From 7f20a727fdbda02dd8fa91b85d62534e59b37bc4 Mon Sep 17 00:00:00 2001 From: Mark Rossetti Date: Tue, 9 Jul 2024 10:23:27 -0700 Subject: [PATCH 1/7] Don't reuse windows cnm image if it has os.version set --- scripts/ci-build-azure-ccm.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/scripts/ci-build-azure-ccm.sh b/scripts/ci-build-azure-ccm.sh index cefbd3f376e..febe0b18346 100755 --- a/scripts/ci-build-azure-ccm.sh +++ b/scripts/ci-build-azure-ccm.sh @@ -95,6 +95,12 @@ can_reuse_artifacts() { echo "false" && return fi + # Do not reuse the image if there is a Windows image built with older version of this script that did not + # build the images as host-process-container images. Those images cannot be pulled on mis-matched Windows Server versions. + if docker manifest inspect "${REGISTRY}/${CNM_IMAGE_NAME}:${IMAGE_TAG_CNM}" | grep -q "\"os.version\": \"10.0."; then + echo "false" && return + fi + for BINARY in azure-acr-credential-provider azure-acr-credential-provider.exe credential-provider-config.yaml credential-provider-config-win.yaml; do if [[ "$(az storage blob exists --container-name "${AZURE_BLOB_CONTAINER_NAME}" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/${BINARY}" --query exists --output tsv)" == "false" ]]; then echo "false" && return From 7a93b59065f2450141cdc8dc1cd03ff13f787f25 Mon Sep 17 00:00:00 2001 From: Jon Huhn Date: Tue, 9 Jul 2024 19:48:31 +0000 Subject: [PATCH 2/7] bump AKS preview API version to 2023-11-02-preview --- azure/services/agentpools/agentpools_test.go | 2 +- azure/services/agentpools/spec.go | 2 +- azure/services/agentpools/spec_test.go | 2 +- azure/services/managedclusters/managedclusters_test.go | 2 +- azure/services/managedclusters/spec.go | 2 +- azure/services/managedclusters/spec_test.go | 2 +- controllers/azuremanagedmachinepool_reconciler.go | 2 +- exp/mutators/azureasomanagedcontrolplane_test.go | 2 +- test/e2e/aks_patches.go | 2 +- test/e2e/azure_clusterproxy.go | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/azure/services/agentpools/agentpools_test.go b/azure/services/agentpools/agentpools_test.go index 0008051801e..ab0b759a6fc 100644 --- a/azure/services/agentpools/agentpools_test.go +++ b/azure/services/agentpools/agentpools_test.go @@ -20,8 +20,8 @@ import ( "context" "testing" - asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" + asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231102preview" . "github.com/onsi/gomega" "github.com/pkg/errors" "go.uber.org/mock/gomock" diff --git a/azure/services/agentpools/spec.go b/azure/services/agentpools/spec.go index 5830d220871..913fe14d562 100644 --- a/azure/services/agentpools/spec.go +++ b/azure/services/agentpools/spec.go @@ -19,9 +19,9 @@ package agentpools import ( "context" - asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" asocontainerservicev1hub "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001/storage" + asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231102preview" "github.com/Azure/azure-service-operator/v2/pkg/genruntime" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/azure/services/agentpools/spec_test.go b/azure/services/agentpools/spec_test.go index c75c639c8d3..ae25f798133 100644 --- a/azure/services/agentpools/spec_test.go +++ b/azure/services/agentpools/spec_test.go @@ -20,8 +20,8 @@ import ( "context" "testing" - asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" + asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231102preview" "github.com/Azure/azure-service-operator/v2/pkg/genruntime" "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" diff --git a/azure/services/managedclusters/managedclusters_test.go b/azure/services/managedclusters/managedclusters_test.go index 7948ed8b78c..c7c718935a0 100644 --- a/azure/services/managedclusters/managedclusters_test.go +++ b/azure/services/managedclusters/managedclusters_test.go @@ -21,8 +21,8 @@ import ( "errors" "testing" - asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" + asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231102preview" . "github.com/onsi/gomega" "go.uber.org/mock/gomock" corev1 "k8s.io/api/core/v1" diff --git a/azure/services/managedclusters/spec.go b/azure/services/managedclusters/spec.go index 8a6245417c0..0e8865b3194 100644 --- a/azure/services/managedclusters/spec.go +++ b/azure/services/managedclusters/spec.go @@ -22,9 +22,9 @@ import ( "fmt" "net" - asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" asocontainerservicev1hub "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001/storage" + asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231102preview" "github.com/Azure/azure-service-operator/v2/pkg/genruntime" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/azure/services/managedclusters/spec_test.go b/azure/services/managedclusters/spec_test.go index de4e54259d1..23e0dc1c164 100644 --- a/azure/services/managedclusters/spec_test.go +++ b/azure/services/managedclusters/spec_test.go @@ -21,8 +21,8 @@ import ( "encoding/base64" "testing" - asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" + asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231102preview" "github.com/Azure/azure-service-operator/v2/pkg/genruntime" "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" diff --git a/controllers/azuremanagedmachinepool_reconciler.go b/controllers/azuremanagedmachinepool_reconciler.go index f63083967d7..f7039e4bc02 100644 --- a/controllers/azuremanagedmachinepool_reconciler.go +++ b/controllers/azuremanagedmachinepool_reconciler.go @@ -22,8 +22,8 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" - asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" + asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231102preview" "github.com/pkg/errors" azprovider "sigs.k8s.io/cloud-provider-azure/pkg/provider" "sigs.k8s.io/cluster-api-provider-azure/azure" diff --git a/exp/mutators/azureasomanagedcontrolplane_test.go b/exp/mutators/azureasomanagedcontrolplane_test.go index 2c2f11a4d80..a72fba1c82b 100644 --- a/exp/mutators/azureasomanagedcontrolplane_test.go +++ b/exp/mutators/azureasomanagedcontrolplane_test.go @@ -21,8 +21,8 @@ import ( "encoding/json" "testing" - asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" + asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231102preview" "github.com/Azure/azure-service-operator/v2/pkg/genruntime" "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" diff --git a/test/e2e/aks_patches.go b/test/e2e/aks_patches.go index 2fb22aa5fd9..f5dcbb22e41 100644 --- a/test/e2e/aks_patches.go +++ b/test/e2e/aks_patches.go @@ -25,8 +25,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v4" - asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" + asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231102preview" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/test/e2e/azure_clusterproxy.go b/test/e2e/azure_clusterproxy.go index f07ae7cfedd..90e12364e1d 100644 --- a/test/e2e/azure_clusterproxy.go +++ b/test/e2e/azure_clusterproxy.go @@ -34,8 +34,8 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/monitor/armmonitor" - asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20230202preview" asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" + asocontainerservicev1preview "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231102preview" asoresourcesv1 "github.com/Azure/azure-service-operator/v2/api/resources/v1api20200601" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" From c2959a5b861f18f3f6bf50e3642be8718816dcd4 Mon Sep 17 00:00:00 2001 From: Matt Boersma Date: Mon, 8 Jul 2024 09:41:51 -0600 Subject: [PATCH 3/7] Bump CAPI to v1.7.4 --- Makefile | 6 +++--- Tiltfile | 4 ++-- go.mod | 6 +++--- go.sum | 8 ++++---- hack/install-cert-manager.sh | 2 +- test/e2e/config/azure-dev.yaml | 34 +++++++++++++++++----------------- 6 files changed, 30 insertions(+), 30 deletions(-) diff --git a/Makefile b/Makefile index d06ecf685e6..d3a925cd8fd 100644 --- a/Makefile +++ b/Makefile @@ -33,7 +33,7 @@ export GOPROXY export GO111MODULE=on # Kubebuilder. -export KUBEBUILDER_ENVTEST_KUBERNETES_VERSION ?= 1.29.0 +export KUBEBUILDER_ENVTEST_KUBERNETES_VERSION ?= 1.30.0 export KUBEBUILDER_CONTROLPLANE_START_TIMEOUT ?= 60s export KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT ?= 60s @@ -101,7 +101,7 @@ RELEASE_NOTES_VER := v0.16.6-0.20240222112346-71feb57b59a4 RELEASE_NOTES_BIN := release-notes RELEASE_NOTES := $(TOOLS_BIN_DIR)/$(RELEASE_NOTES_BIN)-$(RELEASE_NOTES_VER) -KPROMO_VER := v4.0.4 +KPROMO_VER := v4.0.5 KPROMO_BIN := kpromo KPROMO := $(TOOLS_BIN_DIR)/$(KPROMO_BIN)-$(KPROMO_VER) @@ -314,7 +314,7 @@ create-management-cluster: $(KUSTOMIZE) $(ENVSUBST) $(KUBECTL) $(KIND) ## Create ./hack/create-custom-cloud-provider-config.sh # Deploy CAPI - timeout --foreground 300 bash -c "until curl --retry $(CURL_RETRIES) -sSL https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.3/cluster-api-components.yaml | $(ENVSUBST) | $(KUBECTL) apply -f -; do sleep 5; done" + timeout --foreground 300 bash -c "until curl --retry $(CURL_RETRIES) -sSL https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.4/cluster-api-components.yaml | $(ENVSUBST) | $(KUBECTL) apply -f -; do sleep 5; done" # Deploy CAAPH timeout --foreground 300 bash -c "until curl --retry $(CURL_RETRIES) -sSL https://github.com/kubernetes-sigs/cluster-api-addon-provider-helm/releases/download/v0.1.0-alpha.10/addon-components.yaml | $(ENVSUBST) | $(KUBECTL) apply -f -; do sleep 5; done" diff --git a/Tiltfile b/Tiltfile index 55468a5fdf6..8ea4cc528f5 100644 --- a/Tiltfile +++ b/Tiltfile @@ -20,9 +20,9 @@ settings = { "deploy_cert_manager": True, "preload_images_for_kind": True, "kind_cluster_name": "capz", - "capi_version": "v1.7.3", + "capi_version": "v1.7.4", "caaph_version": "v0.2.1", - "cert_manager_version": "v1.15.0", + "cert_manager_version": "v1.15.1", "kubernetes_version": "v1.28.3", "aks_kubernetes_version": "v1.28.3", "flatcar_version": "3374.2.1", diff --git a/go.mod b/go.mod index 2fab61cba40..348a6a88af3 100644 --- a/go.mod +++ b/go.mod @@ -52,8 +52,8 @@ require ( k8s.io/kubectl v0.29.3 k8s.io/utils v0.0.0-20240102154912-e7106e64919e sigs.k8s.io/cloud-provider-azure v1.29.3 - sigs.k8s.io/cluster-api v1.7.3 - sigs.k8s.io/cluster-api/test v1.7.3 + sigs.k8s.io/cluster-api v1.7.4 + sigs.k8s.io/cluster-api/test v1.7.4 sigs.k8s.io/controller-runtime v0.17.3 sigs.k8s.io/kind v0.23.0 ) @@ -216,7 +216,7 @@ require ( sigs.k8s.io/yaml v1.4.0 // indirect ) -replace sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.7.3 +replace sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.7.4 // kube-openapi should match the version imported by CAPI. replace k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 diff --git a/go.sum b/go.sum index 88fa32fdaa8..2ccac367382 100644 --- a/go.sum +++ b/go.sum @@ -718,10 +718,10 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.2 h1:9Zp+uWnxdUOoy/FaQK1DjPfL sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.2/go.mod h1:JKWYkoOyET3wsN0Kk9WxA+zpopkuCy+v4+mrnJ60Yyk= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.1 h1:Lp0nALZmvMJoiVeVV6XjnZv1uClfArnThhuDAjaqE5A= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.1/go.mod h1:pPkJPx/eMVWP3R+LhPoOYGoY7lywcMJev5L2uSfH+Jo= -sigs.k8s.io/cluster-api v1.7.3 h1:DsSRxsA+18jxLqPAo29abZ9kOPK1/xwhSuQb/MROzSs= -sigs.k8s.io/cluster-api v1.7.3/go.mod h1:V9ZhKLvQtsDODwjXOKgbitjyCmC71yMBwDcMyNNIov0= -sigs.k8s.io/cluster-api/test v1.7.3 h1:Nl1IOF03MZzjr6x45IJOFWlV4cNHpJ45qsn3A+1Tf98= -sigs.k8s.io/cluster-api/test v1.7.3/go.mod h1:KbK8+zZEmSopCm6IGd9Vk+573sQ+HL6hnPvqelJEYi4= +sigs.k8s.io/cluster-api v1.7.4 h1:gT9WGbLXKE19pNR6s/cTLRqK2G0EbwxxQrUrw7/w5P4= +sigs.k8s.io/cluster-api v1.7.4/go.mod h1:V9ZhKLvQtsDODwjXOKgbitjyCmC71yMBwDcMyNNIov0= +sigs.k8s.io/cluster-api/test v1.7.4 h1:yBeWaQoPcAbLMF/ddgz0IWJiFgfJe+QfZJ4QWexMVOw= +sigs.k8s.io/cluster-api/test v1.7.4/go.mod h1:KbK8+zZEmSopCm6IGd9Vk+573sQ+HL6hnPvqelJEYi4= sigs.k8s.io/controller-runtime v0.17.3 h1:65QmN7r3FWgTxDMz9fvGnO1kbf2nu+acg9p2R9oYYYk= sigs.k8s.io/controller-runtime v0.17.3/go.mod h1:N0jpP5Lo7lMTF9aL56Z/B2oWBJjey6StQM0jRbKQXtY= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= diff --git a/hack/install-cert-manager.sh b/hack/install-cert-manager.sh index 3e07d1251b9..f3acc672f3c 100755 --- a/hack/install-cert-manager.sh +++ b/hack/install-cert-manager.sh @@ -54,7 +54,7 @@ source "${REPO_ROOT}/hack/common-vars.sh" make --directory="${REPO_ROOT}" "${KUBECTL##*/}" ## Install cert manager and wait for availability -"${KUBECTL}" apply -f https://github.com/jetstack/cert-manager/releases/download/v1.15.0/cert-manager.yaml +"${KUBECTL}" apply -f https://github.com/jetstack/cert-manager/releases/download/v1.15.1/cert-manager.yaml "${KUBECTL}" wait --for=condition=Available --timeout=5m -n cert-manager deployment/cert-manager "${KUBECTL}" wait --for=condition=Available --timeout=5m -n cert-manager deployment/cert-manager-cainjector "${KUBECTL}" wait --for=condition=Available --timeout=5m -n cert-manager deployment/cert-manager-webhook diff --git a/test/e2e/config/azure-dev.yaml b/test/e2e/config/azure-dev.yaml index 678835e30fc..734d8370e71 100644 --- a/test/e2e/config/azure-dev.yaml +++ b/test/e2e/config/azure-dev.yaml @@ -3,11 +3,11 @@ managementClusterName: capz-e2e images: - name: ${MANAGER_IMAGE} loadBehavior: mustLoad - - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.7.3 + - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.7.4 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.7.3 + - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.7.4 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.7.3 + - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.7.4 loadBehavior: tryLoad - name: registry.k8s.io/cluster-api-helm/cluster-api-helm-controller:v0.1.1-alpha.1 loadBehavior: tryLoad @@ -16,8 +16,8 @@ providers: - name: cluster-api type: CoreProvider versions: - - name: v1.6.6 # latest patch of earliest minor in supported v1beta1 releases; this is used for v1beta1 old --> v1beta1 latest clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.6/core-components.yaml" + - name: v1.6.7 # latest patch of earliest minor in supported v1beta1 releases; this is used for v1beta1 old --> v1beta1 latest clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.7/core-components.yaml" type: "url" contract: v1beta1 replacements: @@ -25,8 +25,8 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1beta1/metadata.yaml" - - name: v1.7.3 - value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.3/core-components.yaml + - name: v1.7.4 + value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.4/core-components.yaml type: url contract: v1beta1 files: @@ -39,8 +39,8 @@ providers: - name: kubeadm type: BootstrapProvider versions: - - name: v1.6.6 # latest patch of earliest minor in supported v1beta1 releases; this is used for v1beta1 old --> v1beta1 latest clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.6/bootstrap-components.yaml" + - name: v1.6.7 # latest patch of earliest minor in supported v1beta1 releases; this is used for v1beta1 old --> v1beta1 latest clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.7/bootstrap-components.yaml" type: "url" contract: v1beta1 replacements: @@ -48,8 +48,8 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1beta1/metadata.yaml" - - name: v1.7.3 - value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.3/bootstrap-components.yaml + - name: v1.7.4 + value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.4/bootstrap-components.yaml type: url contract: v1beta1 files: @@ -61,8 +61,8 @@ providers: - name: kubeadm type: ControlPlaneProvider versions: - - name: v1.6.6 # latest patch of earliest minor in supported v1beta1 releases; this is used for v1beta1 old --> v1beta1 latest clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.6/control-plane-components.yaml" + - name: v1.6.7 # latest patch of earliest minor in supported v1beta1 releases; this is used for v1beta1 old --> v1beta1 latest clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.7/control-plane-components.yaml" type: "url" contract: v1beta1 replacements: @@ -70,8 +70,8 @@ providers: new: --metrics-addr=:8080 files: - sourcePath: "../data/shared/v1beta1/metadata.yaml" - - name: v1.7.3 - value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.3/control-plane-components.yaml + - name: v1.7.4 + value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.4/control-plane-components.yaml type: url contract: v1beta1 files: @@ -224,8 +224,8 @@ variables: SECURITY_SCAN_FAIL_THRESHOLD: "${SECURITY_SCAN_FAIL_THRESHOLD:-100}" SECURITY_SCAN_CONTAINER: "${SECURITY_SCAN_CONTAINER:-quay.io/armosec/kubescape:v2.0.167}" AZURE_CNI_V1_MANIFEST_PATH: "${PWD}/templates/addons/azure-cni-v1.yaml" - OLD_CAPI_UPGRADE_VERSION: "v1.6.6" - LATEST_CAPI_UPGRADE_VERSION: "v1.7.3" + OLD_CAPI_UPGRADE_VERSION: "v1.6.7" + LATEST_CAPI_UPGRADE_VERSION: "v1.7.4" OLD_PROVIDER_UPGRADE_VERSION: "v1.14.5" LATEST_PROVIDER_UPGRADE_VERSION: "v1.15.1" OLD_CAAPH_UPGRADE_VERSION: "v0.1.0-alpha.10" From 89d2566fbd5728fb33ced9a9eee3e016acff64d9 Mon Sep 17 00:00:00 2001 From: Nawaz Hussain Khazielakha Date: Mon, 24 Jun 2024 16:17:40 -0700 Subject: [PATCH 4/7] login using Azure Workload ID when available - use AZURE_FEDERATED_TOKEN_FILE while logging in via wi - overload Azure client ID with Azure workload ID for WI based auth - use auth mode for azure storage commands when using federated identity - SC2086 use unquoted expansion with alt value --- hack/ensure-acr-login.sh | 11 +++-------- hack/ensure-azcli.sh | 13 ++++++++++++- scripts/kind-with-registry.sh | 22 +++++++++++++++++----- 3 files changed, 32 insertions(+), 14 deletions(-) diff --git a/hack/ensure-acr-login.sh b/hack/ensure-acr-login.sh index 2dc79ac3fc1..3122022290f 100755 --- a/hack/ensure-acr-login.sh +++ b/hack/ensure-acr-login.sh @@ -22,16 +22,11 @@ set +o xtrace REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. cd "${REPO_ROOT}" || exit 1 -if [[ "${REGISTRY:-}" =~ capzci\.azurecr\.io ]]; then +if [[ "${REGISTRY:-}" =~ \.azurecr\.io ]]; then # if we are using the prow Azure Container Registry, login. "${REPO_ROOT}/hack/ensure-azcli.sh" : "${AZURE_SUBSCRIPTION_ID:?Environment variable empty or not defined.}" az account set -s "${AZURE_SUBSCRIPTION_ID}" - az acr login --name capzci - # TODO(mainred): When using ACR, `az acr login` impacts the authentication of `docker buildx build --push` when the - # ACR, capzci in our case, has anonymous pull enabled. - # Use `docker login` as a suggested workaround and remove this target when the issue is resolved. - # Issue link: https://github.com/Azure/acr/issues/582 - # Failed building link: https://prow.k8s.io/view/gs/kubernetes-jenkins/pr-logs/pull/kubernetes-sigs_cloud-provider-azure/974/pull-cloud-provider-azure-e2e-ccm-capz/1480459040440979456 - docker login -u "${AZURE_CLIENT_ID}" -p "${AZURE_CLIENT_SECRET}" capzci.azurecr.io + acrname="${REGISTRY%%.*}" + az acr login --name "$acrname" fi diff --git a/hack/ensure-azcli.sh b/hack/ensure-azcli.sh index 288656e153c..c3abb3a3941 100755 --- a/hack/ensure-azcli.sh +++ b/hack/ensure-azcli.sh @@ -25,5 +25,16 @@ if [[ -z "$(command -v az)" ]]; then AZ_REPO=$(lsb_release -cs) echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ ${AZ_REPO} main" | tee /etc/apt/sources.list.d/azure-cli.list apt-get update && apt-get install -y azure-cli - az login --service-principal -u "${AZURE_CLIENT_ID}" -p "${AZURE_CLIENT_SECRET}" --tenant "${AZURE_TENANT_ID}" > /dev/null + + if [[ -n "${AZURE_FEDERATED_TOKEN_FILE:-}" ]]; then + # AZURE_CLIENT_ID has been overloaded with Azure Workload ID in the preset-azure-cred-wi. + # This is done to avoid exporting Azure Workload ID as AZURE_CLIENT_ID in the test scenarios. + az login --service-principal -u "${AZURE_CLIENT_ID}" -t "${AZURE_TENANT_ID}" --federated-token "$(cat "${AZURE_FEDERATED_TOKEN_FILE}")" > /dev/null + + # Use --auth-mode "login" in az storage commands. + ENABLE_AUTH_MODE_LOGIN="true" + export ENABLE_AUTH_MODE_LOGIN + else + az login --service-principal -u "${AZURE_CLIENT_ID}" -p "${AZURE_CLIENT_SECRET}" --tenant "${AZURE_TENANT_ID}" > /dev/null + fi fi diff --git a/scripts/kind-with-registry.sh b/scripts/kind-with-registry.sh index 7a9f8ee4284..8a32aebbc7e 100755 --- a/scripts/kind-with-registry.sh +++ b/scripts/kind-with-registry.sh @@ -93,22 +93,27 @@ function checkAZWIENVPreReqsAndCreateFiles() { echo "AZWI_RESOURCE_GROUP environment variable required - Azure resource group to store required Workload Identity artifacts" exit 1 fi + if [ "$(az group exists --name "${AZWI_RESOURCE_GROUP}" --output tsv)" == 'false' ]; then echo "Creating resource group '${AZWI_RESOURCE_GROUP}' in '${AZWI_LOCATION}'" az group create --name "${AZWI_RESOURCE_GROUP}" --location "${AZWI_LOCATION}" --output none --only-show-errors --tags creationTimestamp="${TIMESTAMP}" jobName="${JOB_NAME}" buildProvenance="${BUILD_PROVENANCE}" fi + # Ensure that our connection to storage is inherited from the existing Azure login context unset AZURE_STORAGE_KEY unset AZURE_STORAGE_ACCOUNT + if ! az storage account show --name "${AZWI_STORAGE_ACCOUNT}" --resource-group "${AZWI_RESOURCE_GROUP}" > /dev/null 2>&1; then echo "Creating storage account '${AZWI_STORAGE_ACCOUNT}' in '${AZWI_RESOURCE_GROUP}'" az storage account create --resource-group "${AZWI_RESOURCE_GROUP}" --name "${AZWI_STORAGE_ACCOUNT}" --output none --only-show-errors --tags creationTimestamp="${TIMESTAMP}" jobName="${JOB_NAME}" buildProvenance="${BUILD_PROVENANCE}" - az storage blob service-properties update --account-name "${AZWI_STORAGE_ACCOUNT}" --static-website + az storage blob service-properties ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} update --account-name "${AZWI_STORAGE_ACCOUNT}" --static-website fi + if ! az storage container show --name "${AZWI_STORAGE_CONTAINER}" --account-name "${AZWI_STORAGE_ACCOUNT}" > /dev/null 2>&1; then echo "Creating storage container '${AZWI_STORAGE_CONTAINER}' in '${AZWI_STORAGE_ACCOUNT}'" - az storage container create --name "${AZWI_STORAGE_CONTAINER}" --account-name "${AZWI_STORAGE_ACCOUNT}" --output none --only-show-errors + az storage container ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} create --name "${AZWI_STORAGE_CONTAINER}" --account-name "${AZWI_STORAGE_ACCOUNT}" --output none --only-show-errors fi + SERVICE_ACCOUNT_ISSUER=$(az storage account show --name "${AZWI_STORAGE_ACCOUNT}" -o json | jq -r .primaryEndpoints.web) export SERVICE_ACCOUNT_ISSUER AZWI_OPENID_CONFIG_FILEPATH="${REPO_ROOT}/openid-configuration.json" @@ -131,30 +136,37 @@ EOF openssl rsa -in "${SERVICE_ACCOUNT_SIGNING_KEY_FILEPATH}" -pubout -out "${SERVICE_ACCOUNT_SIGNING_PUB_FILEPATH}" AZWI_JWKS_JSON_FILEPATH="${REPO_ROOT}/jwks.json" "${AZWI}" jwks --public-keys "${SERVICE_ACCOUNT_SIGNING_PUB_FILEPATH}" --output-file "${AZWI_JWKS_JSON_FILEPATH}" + echo "Uploading openid-configuration document to '${AZWI_STORAGE_ACCOUNT}' storage account" upload_to_blob "${AZWI_OPENID_CONFIG_FILEPATH}" ".well-known/openid-configuration" + echo "Uploading jwks document to '${AZWI_STORAGE_ACCOUNT}' storage account" upload_to_blob "${AZWI_JWKS_JSON_FILEPATH}" "openid/v1/jwks" - echo "Removing key access on storage account as no further data writes are required" - az storage account update -n "${AZWI_STORAGE_ACCOUNT}" -g "${AZWI_RESOURCE_GROUP}" --subscription "${AZURE_SUBSCRIPTION_ID}" --allow-shared-key-access=false --output none --only-show-errors fi + if [ -z "${AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY}" ]; then if [ -z "${USER_IDENTITY}" ]; then echo "USER_IDENTITY environment variable required if not bringing your own identity via AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY" exit 1 fi + az identity create -n "${USER_IDENTITY}" -g "${AZWI_RESOURCE_GROUP}" -l "${AZWI_LOCATION}" --output none --only-show-errors --tags creationTimestamp="${TIMESTAMP}" jobName="${JOB_NAME}" buildProvenance="${BUILD_PROVENANCE}" AZURE_IDENTITY_ID=$(az identity show -n "${USER_IDENTITY}" -g "${AZWI_RESOURCE_GROUP}" --query clientId -o tsv) AZURE_IDENTITY_ID_PRINCIPAL_ID=$(az identity show -n "${USER_IDENTITY}" -g "${AZWI_RESOURCE_GROUP}" --query principalId -o tsv) + echo "${AZURE_IDENTITY_ID}" > "${AZURE_IDENTITY_ID_FILEPATH}" until az role assignment create --assignee-object-id "${AZURE_IDENTITY_ID_PRINCIPAL_ID}" --role "Owner" --scope "/subscriptions/${AZURE_SUBSCRIPTION_ID}" --assignee-principal-type ServicePrincipal --output none --only-show-errors; do sleep 5 done + + echo "Creating federated credentials for capz-federated-identity" az identity federated-credential create -n "capz-federated-identity" \ --identity-name "${USER_IDENTITY}" \ -g "${AZWI_RESOURCE_GROUP}" \ --issuer "${SERVICE_ACCOUNT_ISSUER}" \ --subject "system:serviceaccount:capz-system:capz-manager" --output none --only-show-errors + + echo "Creating federated credentials for aso-federated-identity" az identity federated-credential create -n "aso-federated-identity" \ --identity-name "${USER_IDENTITY}" \ -g "${AZWI_RESOURCE_GROUP}" \ @@ -168,7 +180,7 @@ function upload_to_blob() { local blob_name=$2 echo "Uploading ${file_path} to '${AZWI_STORAGE_ACCOUNT}' storage account" - az storage blob upload \ + az storage blob upload ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} \ --container-name "${AZWI_STORAGE_CONTAINER}" \ --file "${file_path}" \ --name "${blob_name}" \ From c3f6e881f19f30afe7801212d3d35aaa1a135ee4 Mon Sep 17 00:00:00 2001 From: James Sturtevant Date: Mon, 1 Jul 2024 17:12:42 -0700 Subject: [PATCH 5/7] Remove storage key for azure storage commands - use auth mode for azure storage commands when using federated identity - SC2086 use unquoted expansion with alt value Signed-off-by: James Sturtevant --- scripts/ci-build-azure-ccm.sh | 17 ++++++++--------- scripts/ci-build-kubernetes.sh | 15 +++++++-------- 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/scripts/ci-build-azure-ccm.sh b/scripts/ci-build-azure-ccm.sh index febe0b18346..67f64a2ec07 100755 --- a/scripts/ci-build-azure-ccm.sh +++ b/scripts/ci-build-azure-ccm.sh @@ -29,7 +29,6 @@ source "${REPO_ROOT}/hack/ensure-go.sh" source "${REPO_ROOT}/hack/parse-prow-creds.sh" : "${AZURE_STORAGE_ACCOUNT:?Environment variable empty or not defined.}" -: "${AZURE_STORAGE_KEY:?Environment variable empty or not defined.}" : "${REGISTRY:?Environment variable empty or not defined.}" # cloud controller manager image @@ -69,16 +68,16 @@ main() { echo "Building and pushing Linux and Windows amd64 Azure ACR credential provider" make -C "${AZURE_CLOUD_PROVIDER_ROOT}" bin/azure-acr-credential-provider bin/azure-acr-credential-provider.exe - if [[ "$(az storage container exists --name "${AZURE_BLOB_CONTAINER_NAME}" --query exists --output tsv)" == "false" ]]; then + if [[ "$(az storage container exists ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --name "${AZURE_BLOB_CONTAINER_NAME}" --query exists --output tsv)" == "false" ]]; then echo "Creating ${AZURE_BLOB_CONTAINER_NAME} storage container" - az storage container create --name "${AZURE_BLOB_CONTAINER_NAME}" > /dev/null - az storage container set-permission --name "${AZURE_BLOB_CONTAINER_NAME}" --public-access container > /dev/null + az storage container create ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --name "${AZURE_BLOB_CONTAINER_NAME}" > /dev/null + az storage container set-permission ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --name "${AZURE_BLOB_CONTAINER_NAME}" --public-access container > /dev/null fi - az storage blob upload --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${AZURE_CLOUD_PROVIDER_ROOT}/bin/azure-acr-credential-provider" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider" - az storage blob upload --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${AZURE_CLOUD_PROVIDER_ROOT}/bin/azure-acr-credential-provider.exe" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider.exe" - az storage blob upload --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${AZURE_CLOUD_PROVIDER_ROOT}/examples/out-of-tree/credential-provider-config.yaml" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/credential-provider-config.yaml" - az storage blob upload --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${AZURE_CLOUD_PROVIDER_ROOT}/examples/out-of-tree/credential-provider-config-win.yaml" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/credential-provider-config-win.yaml" + az storage blob upload ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${AZURE_CLOUD_PROVIDER_ROOT}/bin/azure-acr-credential-provider" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider" + az storage blob upload ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${AZURE_CLOUD_PROVIDER_ROOT}/bin/azure-acr-credential-provider.exe" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider.exe" + az storage blob upload ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${AZURE_CLOUD_PROVIDER_ROOT}/examples/out-of-tree/credential-provider-config.yaml" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/credential-provider-config.yaml" + az storage blob upload ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${AZURE_CLOUD_PROVIDER_ROOT}/examples/out-of-tree/credential-provider-config-win.yaml" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/credential-provider-config-win.yaml" fi } @@ -102,7 +101,7 @@ can_reuse_artifacts() { fi for BINARY in azure-acr-credential-provider azure-acr-credential-provider.exe credential-provider-config.yaml credential-provider-config-win.yaml; do - if [[ "$(az storage blob exists --container-name "${AZURE_BLOB_CONTAINER_NAME}" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/${BINARY}" --query exists --output tsv)" == "false" ]]; then + if [[ "$(az storage blob exists ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --container-name "${AZURE_BLOB_CONTAINER_NAME}" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/${BINARY}" --query exists --output tsv)" == "false" ]]; then echo "false" && return fi done diff --git a/scripts/ci-build-kubernetes.sh b/scripts/ci-build-kubernetes.sh index 3a81b761ac5..8e8ebd10804 100755 --- a/scripts/ci-build-kubernetes.sh +++ b/scripts/ci-build-kubernetes.sh @@ -33,7 +33,6 @@ source "${REPO_ROOT}/hack/parse-prow-creds.sh" source "${REPO_ROOT}/hack/util.sh" : "${AZURE_STORAGE_ACCOUNT:?Environment variable empty or not defined.}" -: "${AZURE_STORAGE_KEY:?Environment variable empty or not defined.}" : "${REGISTRY:?Environment variable empty or not defined.}" declare -a BINARIES=("kubeadm" "kubectl" "kubelet" "e2e.test") @@ -80,10 +79,10 @@ setup() { } main() { - if [[ "$(az storage container exists --name "${AZURE_BLOB_CONTAINER_NAME}" --query exists --output tsv)" == "false" ]]; then + if [[ "$(az storage container exists ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --name "${AZURE_BLOB_CONTAINER_NAME}" --query exists --output tsv)" == "false" ]]; then echo "Creating ${AZURE_BLOB_CONTAINER_NAME} storage container" - az storage container create --name "${AZURE_BLOB_CONTAINER_NAME}" > /dev/null - az storage container set-permission --name "${AZURE_BLOB_CONTAINER_NAME}" --public-access container > /dev/null + az storage container ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} create --name "${AZURE_BLOB_CONTAINER_NAME}" > /dev/null + az storage container ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} set-permission --name "${AZURE_BLOB_CONTAINER_NAME}" --public-access container > /dev/null fi if [[ "${KUBE_BUILD_CONFORMANCE:-}" =~ [yY] ]]; then @@ -116,7 +115,7 @@ main() { for BINARY in "${BINARIES[@]}"; do BIN_PATH="${KUBE_GIT_VERSION}/bin/linux/amd64/${BINARY}" echo "uploading ${BIN_PATH}" - az storage blob upload --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${KUBE_ROOT}/_output/dockerized/bin/linux/amd64/${BINARY}" --name "${BIN_PATH}" + az storage blob upload ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${KUBE_ROOT}/_output/dockerized/bin/linux/amd64/${BINARY}" --name "${BIN_PATH}" done if [[ "${TEST_WINDOWS:-}" == "true" ]]; then @@ -129,7 +128,7 @@ main() { for BINARY in "${WINDOWS_BINARIES[@]}"; do BIN_PATH="${KUBE_GIT_VERSION}/bin/windows/amd64/${BINARY}.exe" echo "uploading ${BIN_PATH}" - az storage blob upload --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${KUBE_ROOT}/_output/dockerized/bin/windows/amd64/${BINARY}.exe" --name "${BIN_PATH}" + az storage blob upload ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${KUBE_ROOT}/_output/dockerized/bin/windows/amd64/${BINARY}.exe" --name "${BIN_PATH}" done fi fi @@ -144,14 +143,14 @@ can_reuse_artifacts() { done for BINARY in "${BINARIES[@]}"; do - if [[ "$(az storage blob exists --container-name "${AZURE_BLOB_CONTAINER_NAME}" --name "${KUBE_GIT_VERSION}/bin/linux/amd64/${BINARY}" --query exists --output tsv)" == "false" ]]; then + if [[ "$(az storage blob exists ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --container-name "${AZURE_BLOB_CONTAINER_NAME}" --name "${KUBE_GIT_VERSION}/bin/linux/amd64/${BINARY}" --query exists --output tsv)" == "false" ]]; then echo "false" && return fi done if [[ "${TEST_WINDOWS:-}" == "true" ]]; then for BINARY in "${WINDOWS_BINARIES[@]}"; do - if [[ "$(az storage blob exists --container-name "${AZURE_BLOB_CONTAINER_NAME}" --name "${KUBE_GIT_VERSION}/bin/windows/amd64/${BINARY}.exe" --query exists --output tsv)" == "false" ]]; then + if [[ "$(az storage blob exists ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --container-name "${AZURE_BLOB_CONTAINER_NAME}" --name "${KUBE_GIT_VERSION}/bin/windows/amd64/${BINARY}.exe" --query exists --output tsv)" == "false" ]]; then echo "false" && return fi done From d3a3fc568d82447283d52163efc850ac90e5800e Mon Sep 17 00:00:00 2001 From: Jon Huhn Date: Mon, 8 Jul 2024 17:28:58 +0000 Subject: [PATCH 6/7] move ASOAPI out of experimental --- .../v1alpha1/azureasomanagedcluster_types.go | 0 .../azureasomanagedcluster_webhook.go | 0 .../azureasomanagedclustertemplate_types.go | 0 .../azureasomanagedcontrolplane_types.go | 0 .../azureasomanagedcontrolplane_webhook.go | 0 ...ureasomanagedcontrolplanetemplate_types.go | 0 .../azureasomanagedmachinepool_types.go | 0 .../azureasomanagedmachinepool_webhook.go | 0 ...zureasomanagedmachinepooltemplate_types.go | 0 {exp/api => api}/v1alpha1/doc.go | 0 .../api => api}/v1alpha1/groupversion_info.go | 0 .../v1alpha1/zz_generated.deepcopy.go | 0 config/webhook/manifests.yaml | 116 +++++++++--------- .../agentpooladopt_controller.go | 26 ++-- .../azureasomanagedcluster_controller.go | 51 ++++---- .../azureasomanagedcluster_controller_test.go | 56 ++++----- .../azureasomanagedcontrolplane_controller.go | 49 ++++---- ...easomanagedcontrolplane_controller_test.go | 60 ++++----- .../azureasomanagedmachinepool_controller.go | 37 +++--- ...reasomanagedmachinepool_controller_test.go | 76 ++++++------ .../managedclusteradopt_controller.go | 33 ++--- .../resource_reconciler.go | 20 +-- .../resource_reconciler_test.go | 42 +++---- docs/book/install-and-build.sh | 2 +- docs/book/src/SUMMARY.md | 2 +- docs/book/src/reference/v1alpha1-api.md | 1 + docs/book/src/reference/v1alpha1-exp-api.md | 1 - docs/book/src/topics/aso.md | 6 +- feature/feature.go | 2 +- main.go | 20 +-- .../mutators/azureasomanagedcontrolplane.go | 8 +- .../azureasomanagedcontrolplane_test.go | 62 +++++----- .../mutators/azureasomanagedmachinepool.go | 8 +- .../azureasomanagedmachinepool_test.go | 32 ++--- {exp => pkg}/mutators/mutator.go | 0 {exp => pkg}/mutators/mutator_test.go | 0 test/e2e/aks.go | 4 +- test/e2e/aks_machinepools.go | 8 +- test/e2e/azure_clusterproxy.go | 4 +- test/e2e/azure_logcollector.go | 10 +- 40 files changed, 368 insertions(+), 368 deletions(-) rename {exp/api => api}/v1alpha1/azureasomanagedcluster_types.go (100%) rename {exp/api => api}/v1alpha1/azureasomanagedcluster_webhook.go (100%) rename {exp/api => api}/v1alpha1/azureasomanagedclustertemplate_types.go (100%) rename {exp/api => api}/v1alpha1/azureasomanagedcontrolplane_types.go (100%) rename {exp/api => api}/v1alpha1/azureasomanagedcontrolplane_webhook.go (100%) rename {exp/api => api}/v1alpha1/azureasomanagedcontrolplanetemplate_types.go (100%) rename {exp/api => api}/v1alpha1/azureasomanagedmachinepool_types.go (100%) rename {exp/api => api}/v1alpha1/azureasomanagedmachinepool_webhook.go (100%) rename {exp/api => api}/v1alpha1/azureasomanagedmachinepooltemplate_types.go (100%) rename {exp/api => api}/v1alpha1/doc.go (100%) rename {exp/api => api}/v1alpha1/groupversion_info.go (100%) rename {exp/api => api}/v1alpha1/zz_generated.deepcopy.go (100%) rename {exp/controllers => controllers}/agentpooladopt_controller.go (86%) rename {exp/controllers => controllers}/azureasomanagedcluster_controller.go (81%) rename {exp/controllers => controllers}/azureasomanagedcluster_controller_test.go (84%) rename {exp/controllers => controllers}/azureasomanagedcontrolplane_controller.go (84%) rename {exp/controllers => controllers}/azureasomanagedcontrolplane_controller_test.go (85%) rename {exp/controllers => controllers}/azureasomanagedmachinepool_controller.go (87%) rename {exp/controllers => controllers}/azureasomanagedmachinepool_controller_test.go (86%) rename {exp/controllers => controllers}/managedclusteradopt_controller.go (83%) rename {exp/controllers => controllers}/resource_reconciler.go (92%) rename {exp/controllers => controllers}/resource_reconciler_test.go (93%) create mode 100644 docs/book/src/reference/v1alpha1-api.md delete mode 100644 docs/book/src/reference/v1alpha1-exp-api.md rename {exp => pkg}/mutators/azureasomanagedcontrolplane.go (97%) rename {exp => pkg}/mutators/azureasomanagedcontrolplane_test.go (90%) rename {exp => pkg}/mutators/azureasomanagedmachinepool.go (96%) rename {exp => pkg}/mutators/azureasomanagedmachinepool_test.go (91%) rename {exp => pkg}/mutators/mutator.go (100%) rename {exp => pkg}/mutators/mutator_test.go (100%) diff --git a/exp/api/v1alpha1/azureasomanagedcluster_types.go b/api/v1alpha1/azureasomanagedcluster_types.go similarity index 100% rename from exp/api/v1alpha1/azureasomanagedcluster_types.go rename to api/v1alpha1/azureasomanagedcluster_types.go diff --git a/exp/api/v1alpha1/azureasomanagedcluster_webhook.go b/api/v1alpha1/azureasomanagedcluster_webhook.go similarity index 100% rename from exp/api/v1alpha1/azureasomanagedcluster_webhook.go rename to api/v1alpha1/azureasomanagedcluster_webhook.go diff --git a/exp/api/v1alpha1/azureasomanagedclustertemplate_types.go b/api/v1alpha1/azureasomanagedclustertemplate_types.go similarity index 100% rename from exp/api/v1alpha1/azureasomanagedclustertemplate_types.go rename to api/v1alpha1/azureasomanagedclustertemplate_types.go diff --git a/exp/api/v1alpha1/azureasomanagedcontrolplane_types.go b/api/v1alpha1/azureasomanagedcontrolplane_types.go similarity index 100% rename from exp/api/v1alpha1/azureasomanagedcontrolplane_types.go rename to api/v1alpha1/azureasomanagedcontrolplane_types.go diff --git a/exp/api/v1alpha1/azureasomanagedcontrolplane_webhook.go b/api/v1alpha1/azureasomanagedcontrolplane_webhook.go similarity index 100% rename from exp/api/v1alpha1/azureasomanagedcontrolplane_webhook.go rename to api/v1alpha1/azureasomanagedcontrolplane_webhook.go diff --git a/exp/api/v1alpha1/azureasomanagedcontrolplanetemplate_types.go b/api/v1alpha1/azureasomanagedcontrolplanetemplate_types.go similarity index 100% rename from exp/api/v1alpha1/azureasomanagedcontrolplanetemplate_types.go rename to api/v1alpha1/azureasomanagedcontrolplanetemplate_types.go diff --git a/exp/api/v1alpha1/azureasomanagedmachinepool_types.go b/api/v1alpha1/azureasomanagedmachinepool_types.go similarity index 100% rename from exp/api/v1alpha1/azureasomanagedmachinepool_types.go rename to api/v1alpha1/azureasomanagedmachinepool_types.go diff --git a/exp/api/v1alpha1/azureasomanagedmachinepool_webhook.go b/api/v1alpha1/azureasomanagedmachinepool_webhook.go similarity index 100% rename from exp/api/v1alpha1/azureasomanagedmachinepool_webhook.go rename to api/v1alpha1/azureasomanagedmachinepool_webhook.go diff --git a/exp/api/v1alpha1/azureasomanagedmachinepooltemplate_types.go b/api/v1alpha1/azureasomanagedmachinepooltemplate_types.go similarity index 100% rename from exp/api/v1alpha1/azureasomanagedmachinepooltemplate_types.go rename to api/v1alpha1/azureasomanagedmachinepooltemplate_types.go diff --git a/exp/api/v1alpha1/doc.go b/api/v1alpha1/doc.go similarity index 100% rename from exp/api/v1alpha1/doc.go rename to api/v1alpha1/doc.go diff --git a/exp/api/v1alpha1/groupversion_info.go b/api/v1alpha1/groupversion_info.go similarity index 100% rename from exp/api/v1alpha1/groupversion_info.go rename to api/v1alpha1/groupversion_info.go diff --git a/exp/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go similarity index 100% rename from exp/api/v1alpha1/zz_generated.deepcopy.go rename to api/v1alpha1/zz_generated.deepcopy.go diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index ef09112747f..de5ab8e6417 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -211,20 +211,18 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azurecluster + path: /validate-infrastructure-cluster-x-k8s-io-v1alpha1-azureasomanagedcluster failurePolicy: Fail - matchPolicy: Equivalent - name: validation.azurecluster.infrastructure.cluster.x-k8s.io + name: validation.azureasomanagedcluster.infrastructure.cluster.x-k8s.io rules: - apiGroups: - infrastructure.cluster.x-k8s.io apiVersions: - - v1beta1 + - v1alpha1 operations: - CREATE - - UPDATE resources: - - azureclusters + - azureasomanagedclusters sideEffects: None - admissionReviewVersions: - v1 @@ -233,20 +231,18 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azureclusteridentity + path: /validate-infrastructure-cluster-x-k8s-io-v1alpha1-azureasomanagedcontrolplane failurePolicy: Fail - matchPolicy: Equivalent - name: validation.azureclusteridentity.infrastructure.cluster.x-k8s.io + name: validation.azureasomanagedcontrolplane.infrastructure.cluster.x-k8s.io rules: - apiGroups: - infrastructure.cluster.x-k8s.io apiVersions: - - v1beta1 + - v1alpha1 operations: - CREATE - - UPDATE resources: - - azureclusteridentities + - azureasomanagedcontrolplanes sideEffects: None - admissionReviewVersions: - v1 @@ -255,20 +251,18 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azureclustertemplate + path: /validate-infrastructure-cluster-x-k8s-io-v1alpha1-azureasomanagedmachinepool failurePolicy: Fail - matchPolicy: Equivalent - name: validation.azureclustertemplate.infrastructure.cluster.x-k8s.io + name: validation.azureasomanagedmachinepool.infrastructure.cluster.x-k8s.io rules: - apiGroups: - infrastructure.cluster.x-k8s.io apiVersions: - - v1beta1 + - v1alpha1 operations: - CREATE - - UPDATE resources: - - azureclustertemplates + - azureasomanagedmachinepools sideEffects: None - admissionReviewVersions: - v1 @@ -277,10 +271,10 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremachine + path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azurecluster failurePolicy: Fail matchPolicy: Equivalent - name: validation.azuremachine.infrastructure.cluster.x-k8s.io + name: validation.azurecluster.infrastructure.cluster.x-k8s.io rules: - apiGroups: - infrastructure.cluster.x-k8s.io @@ -290,7 +284,7 @@ webhooks: - CREATE - UPDATE resources: - - azuremachines + - azureclusters sideEffects: None - admissionReviewVersions: - v1 @@ -299,10 +293,10 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremachinetemplate + path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azureclusteridentity failurePolicy: Fail matchPolicy: Equivalent - name: validation.azuremachinetemplate.infrastructure.cluster.x-k8s.io + name: validation.azureclusteridentity.infrastructure.cluster.x-k8s.io rules: - apiGroups: - infrastructure.cluster.x-k8s.io @@ -312,7 +306,7 @@ webhooks: - CREATE - UPDATE resources: - - azuremachinetemplates + - azureclusteridentities sideEffects: None - admissionReviewVersions: - v1 @@ -321,9 +315,10 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedcluster + path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azureclustertemplate failurePolicy: Fail - name: validation.azuremanagedclusters.infrastructure.cluster.x-k8s.io + matchPolicy: Equivalent + name: validation.azureclustertemplate.infrastructure.cluster.x-k8s.io rules: - apiGroups: - infrastructure.cluster.x-k8s.io @@ -333,7 +328,7 @@ webhooks: - CREATE - UPDATE resources: - - azuremanagedclusters + - azureclustertemplates sideEffects: None - admissionReviewVersions: - v1 @@ -342,18 +337,20 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedclustertemplate + path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremachine failurePolicy: Fail - name: validation.azuremanagedclustertemplates.infrastructure.cluster.x-k8s.io + matchPolicy: Equivalent + name: validation.azuremachine.infrastructure.cluster.x-k8s.io rules: - apiGroups: - infrastructure.cluster.x-k8s.io apiVersions: - v1beta1 operations: + - CREATE - UPDATE resources: - - azuremanagedclustertemplates + - azuremachines sideEffects: None - admissionReviewVersions: - v1 @@ -362,9 +359,10 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedcontrolplane + path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremachinetemplate failurePolicy: Fail - name: validation.azuremanagedcontrolplanes.infrastructure.cluster.x-k8s.io + matchPolicy: Equivalent + name: validation.azuremachinetemplate.infrastructure.cluster.x-k8s.io rules: - apiGroups: - infrastructure.cluster.x-k8s.io @@ -374,7 +372,7 @@ webhooks: - CREATE - UPDATE resources: - - azuremanagedcontrolplanes + - azuremachinetemplates sideEffects: None - admissionReviewVersions: - v1 @@ -383,9 +381,9 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedcontrolplanetemplate + path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedcluster failurePolicy: Fail - name: validation.azuremanagedcontrolplanetemplates.infrastructure.cluster.x-k8s.io + name: validation.azuremanagedclusters.infrastructure.cluster.x-k8s.io rules: - apiGroups: - infrastructure.cluster.x-k8s.io @@ -395,7 +393,7 @@ webhooks: - CREATE - UPDATE resources: - - azuremanagedcontrolplanetemplates + - azuremanagedclusters sideEffects: None - admissionReviewVersions: - v1 @@ -404,21 +402,18 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedmachinepool + path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedclustertemplate failurePolicy: Fail - matchPolicy: Equivalent - name: validation.azuremanagedmachinepools.infrastructure.cluster.x-k8s.io + name: validation.azuremanagedclustertemplates.infrastructure.cluster.x-k8s.io rules: - apiGroups: - infrastructure.cluster.x-k8s.io apiVersions: - v1beta1 operations: - - CREATE - UPDATE - - DELETE resources: - - azuremanagedmachinepools + - azuremanagedclustertemplates sideEffects: None - admissionReviewVersions: - v1 @@ -427,9 +422,9 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedmachinepooltemplate + path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedcontrolplane failurePolicy: Fail - name: validation.azuremanagedmachinepooltemplates.infrastructure.cluster.x-k8s.io + name: validation.azuremanagedcontrolplanes.infrastructure.cluster.x-k8s.io rules: - apiGroups: - infrastructure.cluster.x-k8s.io @@ -438,9 +433,8 @@ webhooks: operations: - CREATE - UPDATE - - DELETE resources: - - azuremanagedmachinepooltemplates + - azuremanagedcontrolplanes sideEffects: None - admissionReviewVersions: - v1 @@ -449,18 +443,19 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-infrastructure-cluster-x-k8s-io-v1alpha1-azureasomanagedcluster + path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedcontrolplanetemplate failurePolicy: Fail - name: validation.azureasomanagedcluster.infrastructure.cluster.x-k8s.io + name: validation.azuremanagedcontrolplanetemplates.infrastructure.cluster.x-k8s.io rules: - apiGroups: - infrastructure.cluster.x-k8s.io apiVersions: - - v1alpha1 + - v1beta1 operations: - CREATE + - UPDATE resources: - - azureasomanagedclusters + - azuremanagedcontrolplanetemplates sideEffects: None - admissionReviewVersions: - v1 @@ -469,18 +464,21 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-infrastructure-cluster-x-k8s-io-v1alpha1-azureasomanagedcontrolplane + path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedmachinepool failurePolicy: Fail - name: validation.azureasomanagedcontrolplane.infrastructure.cluster.x-k8s.io + matchPolicy: Equivalent + name: validation.azuremanagedmachinepools.infrastructure.cluster.x-k8s.io rules: - apiGroups: - infrastructure.cluster.x-k8s.io apiVersions: - - v1alpha1 + - v1beta1 operations: - CREATE + - UPDATE + - DELETE resources: - - azureasomanagedcontrolplanes + - azuremanagedmachinepools sideEffects: None - admissionReviewVersions: - v1 @@ -489,18 +487,20 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-infrastructure-cluster-x-k8s-io-v1alpha1-azureasomanagedmachinepool + path: /validate-infrastructure-cluster-x-k8s-io-v1beta1-azuremanagedmachinepooltemplate failurePolicy: Fail - name: validation.azureasomanagedmachinepool.infrastructure.cluster.x-k8s.io + name: validation.azuremanagedmachinepooltemplates.infrastructure.cluster.x-k8s.io rules: - apiGroups: - infrastructure.cluster.x-k8s.io apiVersions: - - v1alpha1 + - v1beta1 operations: - CREATE + - UPDATE + - DELETE resources: - - azureasomanagedmachinepools + - azuremanagedmachinepooltemplates sideEffects: None - admissionReviewVersions: - v1 diff --git a/exp/controllers/agentpooladopt_controller.go b/controllers/agentpooladopt_controller.go similarity index 86% rename from exp/controllers/agentpooladopt_controller.go rename to controllers/agentpooladopt_controller.go index 3c6dcf606c5..863863b07d8 100644 --- a/exp/controllers/agentpooladopt_controller.go +++ b/controllers/agentpooladopt_controller.go @@ -25,7 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" + infrav1alpha "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha1" "sigs.k8s.io/cluster-api-provider-azure/util/tele" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" @@ -76,13 +76,13 @@ func (r *AgentPoolAdoptReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{}, client.IgnoreNotFound(err) } - if agentPool.GetAnnotations()[adoptAnnotation] != "true" { + if agentPool.GetAnnotations()[adoptAnnotation] != adoptAnnotationValue { return ctrl.Result{}, nil } for _, owner := range agentPool.GetOwnerReferences() { - if owner.APIVersion == infrav1exp.GroupVersion.Identifier() && - owner.Kind == infrav1exp.AzureASOManagedMachinePoolKind { + if owner.APIVersion == infrav1alpha.GroupVersion.Identifier() && + owner.Kind == infrav1alpha.AzureASOManagedMachinePoolKind { return ctrl.Result{}, nil } } @@ -123,8 +123,8 @@ func (r *AgentPoolAdoptReconciler) Reconcile(ctx context.Context, req ctrl.Reque } var managedControlPlaneOwner *metav1.OwnerReference for _, owner := range managedCluster.GetOwnerReferences() { - if owner.APIVersion == infrav1exp.GroupVersion.Identifier() && - owner.Kind == infrav1exp.AzureASOManagedControlPlaneKind && + if owner.APIVersion == infrav1alpha.GroupVersion.Identifier() && + owner.Kind == infrav1alpha.AzureASOManagedControlPlaneKind && owner.Name == agentPool.Owner().Name { managedControlPlaneOwner = ptr.To(owner) break @@ -133,7 +133,7 @@ func (r *AgentPoolAdoptReconciler) Reconcile(ctx context.Context, req ctrl.Reque if managedControlPlaneOwner == nil { return ctrl.Result{}, fmt.Errorf("ManagedCluster %s is not owned by any AzureASOManagedControlPlane", managedClusterKey) } - asoManagedControlPlane := &infrav1exp.AzureASOManagedControlPlane{} + asoManagedControlPlane := &infrav1alpha.AzureASOManagedControlPlane{} managedControlPlaneKey := client.ObjectKey{ Namespace: namespace, Name: managedControlPlaneOwner.Name, @@ -144,13 +144,13 @@ func (r *AgentPoolAdoptReconciler) Reconcile(ctx context.Context, req ctrl.Reque } clusterName := asoManagedControlPlane.Labels[clusterv1.ClusterNameLabel] - asoManagedMachinePool := &infrav1exp.AzureASOManagedMachinePool{ + asoManagedMachinePool := &infrav1alpha.AzureASOManagedMachinePool{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: agentPool.Name, }, - Spec: infrav1exp.AzureASOManagedMachinePoolSpec{ - AzureASOManagedMachinePoolTemplateResourceSpec: infrav1exp.AzureASOManagedMachinePoolTemplateResourceSpec{ + Spec: infrav1alpha.AzureASOManagedMachinePoolSpec{ + AzureASOManagedMachinePoolTemplateResourceSpec: infrav1alpha.AzureASOManagedMachinePoolTemplateResourceSpec{ Resources: []runtime.RawExtension{ {Object: agentPool}, }, @@ -173,8 +173,8 @@ func (r *AgentPoolAdoptReconciler) Reconcile(ctx context.Context, req ctrl.Reque }, ClusterName: clusterName, InfrastructureRef: corev1.ObjectReference{ - APIVersion: infrav1exp.GroupVersion.Identifier(), - Kind: infrav1exp.AzureASOManagedMachinePoolKind, + APIVersion: infrav1alpha.GroupVersion.Identifier(), + Kind: infrav1alpha.AzureASOManagedMachinePoolKind, Name: asoManagedMachinePool.Name, }, }, @@ -184,7 +184,7 @@ func (r *AgentPoolAdoptReconciler) Reconcile(ctx context.Context, req ctrl.Reque if ptr.Deref(agentPool.Spec.EnableAutoScaling, false) { machinePool.Annotations = map[string]string{ - clusterv1.ReplicasManagedByAnnotation: infrav1exp.ReplicasManagedByAKS, + clusterv1.ReplicasManagedByAnnotation: infrav1alpha.ReplicasManagedByAKS, } } diff --git a/exp/controllers/azureasomanagedcluster_controller.go b/controllers/azureasomanagedcluster_controller.go similarity index 81% rename from exp/controllers/azureasomanagedcluster_controller.go rename to controllers/azureasomanagedcluster_controller.go index d651f57d8d0..e3b6014a244 100644 --- a/exp/controllers/azureasomanagedcluster_controller.go +++ b/controllers/azureasomanagedcluster_controller.go @@ -23,9 +23,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - infracontroller "sigs.k8s.io/cluster-api-provider-azure/controllers" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" - "sigs.k8s.io/cluster-api-provider-azure/exp/mutators" + infrav1alpha "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha1" + "sigs.k8s.io/cluster-api-provider-azure/pkg/mutators" "sigs.k8s.io/cluster-api-provider-azure/util/tele" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" @@ -50,7 +49,7 @@ type AzureASOManagedClusterReconciler struct { client.Client WatchFilterValue string - newResourceReconciler func(*infrav1exp.AzureASOManagedCluster, []*unstructured.Unstructured) resourceReconciler + newResourceReconciler func(*infrav1alpha.AzureASOManagedCluster, []*unstructured.Unstructured) resourceReconciler } type resourceReconciler interface { @@ -70,38 +69,38 @@ type resourceReconciler interface { func (r *AzureASOManagedClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureASOManagedClusterReconciler.SetupWithManager", - tele.KVP("controller", infrav1exp.AzureASOManagedClusterKind), + tele.KVP("controller", infrav1alpha.AzureASOManagedClusterKind), ) defer done() c, err := ctrl.NewControllerManagedBy(mgr). - For(&infrav1exp.AzureASOManagedCluster{}). + For(&infrav1alpha.AzureASOManagedCluster{}). WithEventFilter(predicates.ResourceHasFilterLabel(log, r.WatchFilterValue)). WithEventFilter(predicates.ResourceIsNotExternallyManaged(log)). // Watch clusters for pause/unpause notifications Watches( &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc( - util.ClusterToInfrastructureMapFunc(ctx, infrav1exp.GroupVersion.WithKind(infrav1exp.AzureASOManagedClusterKind), mgr.GetClient(), &infrav1exp.AzureASOManagedCluster{}), + util.ClusterToInfrastructureMapFunc(ctx, infrav1alpha.GroupVersion.WithKind(infrav1alpha.AzureASOManagedClusterKind), mgr.GetClient(), &infrav1alpha.AzureASOManagedCluster{}), ), builder.WithPredicates( predicates.ResourceHasFilterLabel(log, r.WatchFilterValue), - infracontroller.ClusterUpdatePauseChange(log), + ClusterUpdatePauseChange(log), ), ). Watches( - &infrav1exp.AzureASOManagedControlPlane{}, + &infrav1alpha.AzureASOManagedControlPlane{}, handler.EnqueueRequestsFromMapFunc(asoManagedControlPlaneToManagedClusterMap(r.Client)), builder.WithPredicates( predicates.ResourceHasFilterLabel(log, r.WatchFilterValue), predicate.Funcs{ CreateFunc: func(ev event.CreateEvent) bool { - controlPlane := ev.Object.(*infrav1exp.AzureASOManagedControlPlane) + controlPlane := ev.Object.(*infrav1alpha.AzureASOManagedControlPlane) return !controlPlane.Status.ControlPlaneEndpoint.IsZero() }, UpdateFunc: func(ev event.UpdateEvent) bool { - oldControlPlane := ev.ObjectOld.(*infrav1exp.AzureASOManagedControlPlane) - newControlPlane := ev.ObjectNew.(*infrav1exp.AzureASOManagedControlPlane) + oldControlPlane := ev.ObjectOld.(*infrav1alpha.AzureASOManagedControlPlane) + newControlPlane := ev.ObjectNew.(*infrav1alpha.AzureASOManagedControlPlane) return oldControlPlane.Status.ControlPlaneEndpoint != newControlPlane.Status.ControlPlaneEndpoint }, @@ -118,7 +117,7 @@ func (r *AzureASOManagedClusterReconciler) SetupWithManager(ctx context.Context, Controller: c, } - r.newResourceReconciler = func(asoManagedCluster *infrav1exp.AzureASOManagedCluster, resources []*unstructured.Unstructured) resourceReconciler { + r.newResourceReconciler = func(asoManagedCluster *infrav1alpha.AzureASOManagedCluster, resources []*unstructured.Unstructured) resourceReconciler { return &ResourceReconciler{ Client: r.Client, resources: resources, @@ -132,7 +131,7 @@ func (r *AzureASOManagedClusterReconciler) SetupWithManager(ctx context.Context, func asoManagedControlPlaneToManagedClusterMap(c client.Client) handler.MapFunc { return func(ctx context.Context, o client.Object) []reconcile.Request { - asoManagedControlPlane := o.(*infrav1exp.AzureASOManagedControlPlane) + asoManagedControlPlane := o.(*infrav1alpha.AzureASOManagedControlPlane) cluster, err := util.GetOwnerCluster(ctx, c, asoManagedControlPlane.ObjectMeta) if err != nil { @@ -141,8 +140,8 @@ func asoManagedControlPlaneToManagedClusterMap(c client.Client) handler.MapFunc if cluster == nil || cluster.Spec.InfrastructureRef == nil || - cluster.Spec.InfrastructureRef.APIVersion != infrav1exp.GroupVersion.Identifier() || - cluster.Spec.InfrastructureRef.Kind != infrav1exp.AzureASOManagedClusterKind { + cluster.Spec.InfrastructureRef.APIVersion != infrav1alpha.GroupVersion.Identifier() || + cluster.Spec.InfrastructureRef.Kind != infrav1alpha.AzureASOManagedClusterKind { return nil } @@ -167,11 +166,11 @@ func (r *AzureASOManagedClusterReconciler) Reconcile(ctx context.Context, req ct "controllers.AzureASOManagedClusterReconciler.Reconcile", tele.KVP("namespace", req.Namespace), tele.KVP("name", req.Name), - tele.KVP("kind", infrav1exp.AzureASOManagedClusterKind), + tele.KVP("kind", infrav1alpha.AzureASOManagedClusterKind), ) defer done() - asoManagedCluster := &infrav1exp.AzureASOManagedCluster{} + asoManagedCluster := &infrav1alpha.AzureASOManagedCluster{} err := r.Get(ctx, req.NamespacedName, asoManagedCluster) if err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) @@ -208,7 +207,7 @@ func (r *AzureASOManagedClusterReconciler) Reconcile(ctx context.Context, req ct return r.reconcileNormal(ctx, asoManagedCluster, cluster) } -func (r *AzureASOManagedClusterReconciler) reconcileNormal(ctx context.Context, asoManagedCluster *infrav1exp.AzureASOManagedCluster, cluster *clusterv1.Cluster) (ctrl.Result, error) { +func (r *AzureASOManagedClusterReconciler) reconcileNormal(ctx context.Context, asoManagedCluster *infrav1alpha.AzureASOManagedCluster, cluster *clusterv1.Cluster) (ctrl.Result, error) { ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureASOManagedClusterReconciler.reconcileNormal", ) @@ -220,13 +219,13 @@ func (r *AzureASOManagedClusterReconciler) reconcileNormal(ctx context.Context, return ctrl.Result{}, nil } if cluster.Spec.ControlPlaneRef == nil || - cluster.Spec.ControlPlaneRef.APIVersion != infrav1exp.GroupVersion.Identifier() || - cluster.Spec.ControlPlaneRef.Kind != infrav1exp.AzureASOManagedControlPlaneKind { + cluster.Spec.ControlPlaneRef.APIVersion != infrav1alpha.GroupVersion.Identifier() || + cluster.Spec.ControlPlaneRef.Kind != infrav1alpha.AzureASOManagedControlPlaneKind { return ctrl.Result{}, reconcile.TerminalError(errInvalidControlPlaneKind) } needsPatch := controllerutil.AddFinalizer(asoManagedCluster, clusterv1.ClusterFinalizer) - needsPatch = infracontroller.AddBlockMoveAnnotation(asoManagedCluster) || needsPatch + needsPatch = AddBlockMoveAnnotation(asoManagedCluster) || needsPatch if needsPatch { return ctrl.Result{Requeue: true}, nil } @@ -246,7 +245,7 @@ func (r *AzureASOManagedClusterReconciler) reconcileNormal(ctx context.Context, } } - asoManagedControlPlane := &infrav1exp.AzureASOManagedControlPlane{ + asoManagedControlPlane := &infrav1alpha.AzureASOManagedControlPlane{ ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.Spec.ControlPlaneRef.Namespace, Name: cluster.Spec.ControlPlaneRef.Name, @@ -264,7 +263,7 @@ func (r *AzureASOManagedClusterReconciler) reconcileNormal(ctx context.Context, } //nolint:unparam // an empty ctrl.Result is always returned here, leaving it as-is to avoid churn in refactoring later if that changes. -func (r *AzureASOManagedClusterReconciler) reconcilePaused(ctx context.Context, asoManagedCluster *infrav1exp.AzureASOManagedCluster) (ctrl.Result, error) { +func (r *AzureASOManagedClusterReconciler) reconcilePaused(ctx context.Context, asoManagedCluster *infrav1alpha.AzureASOManagedCluster) (ctrl.Result, error) { ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureASOManagedClusterReconciler.reconcilePaused") defer done() log.V(4).Info("reconciling pause") @@ -279,13 +278,13 @@ func (r *AzureASOManagedClusterReconciler) reconcilePaused(ctx context.Context, return ctrl.Result{}, fmt.Errorf("failed to pause resources: %w", err) } - infracontroller.RemoveBlockMoveAnnotation(asoManagedCluster) + RemoveBlockMoveAnnotation(asoManagedCluster) return ctrl.Result{}, nil } //nolint:unparam // an empty ctrl.Result is always returned here, leaving it as-is to avoid churn in refactoring later if that changes. -func (r *AzureASOManagedClusterReconciler) reconcileDelete(ctx context.Context, asoManagedCluster *infrav1exp.AzureASOManagedCluster) (ctrl.Result, error) { +func (r *AzureASOManagedClusterReconciler) reconcileDelete(ctx context.Context, asoManagedCluster *infrav1alpha.AzureASOManagedCluster) (ctrl.Result, error) { ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureASOManagedClusterReconciler.reconcileDelete", ) diff --git a/exp/controllers/azureasomanagedcluster_controller_test.go b/controllers/azureasomanagedcluster_controller_test.go similarity index 84% rename from exp/controllers/azureasomanagedcluster_controller_test.go rename to controllers/azureasomanagedcluster_controller_test.go index a4d6e1af598..4767e64469b 100644 --- a/exp/controllers/azureasomanagedcluster_controller_test.go +++ b/controllers/azureasomanagedcluster_controller_test.go @@ -28,7 +28,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" + infrav1alpha "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" ctrl "sigs.k8s.io/controller-runtime" @@ -69,7 +69,7 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { s := runtime.NewScheme() sb := runtime.NewSchemeBuilder( - infrav1exp.AddToScheme, + infrav1alpha.AddToScheme, clusterv1.AddToScheme, ) NewGomegaWithT(t).Expect(sb.AddToScheme(s)).To(Succeed()) @@ -77,7 +77,7 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { fakeClientBuilder := func() *fakeclient.ClientBuilder { return fakeclient.NewClientBuilder(). WithScheme(s). - WithStatusSubresource(&infrav1exp.AzureASOManagedCluster{}) + WithStatusSubresource(&infrav1alpha.AzureASOManagedCluster{}) } t.Run("AzureASOManagedCluster does not exist", func(t *testing.T) { @@ -96,7 +96,7 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { t.Run("Cluster does not exist", func(t *testing.T) { g := NewGomegaWithT(t) - asoManagedCluster := &infrav1exp.AzureASOManagedCluster{ + asoManagedCluster := &infrav1alpha.AzureASOManagedCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "amc", Namespace: "ns", @@ -129,12 +129,12 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { }, Spec: clusterv1.ClusterSpec{ ControlPlaneRef: &corev1.ObjectReference{ - APIVersion: infrav1exp.GroupVersion.Identifier(), - Kind: infrav1exp.AzureASOManagedControlPlaneKind, + APIVersion: infrav1alpha.GroupVersion.Identifier(), + Kind: infrav1alpha.AzureASOManagedControlPlaneKind, }, }, } - asoManagedCluster := &infrav1exp.AzureASOManagedCluster{ + asoManagedCluster := &infrav1alpha.AzureASOManagedCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "amc", Namespace: cluster.Namespace, @@ -172,12 +172,12 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { }, Spec: clusterv1.ClusterSpec{ ControlPlaneRef: &corev1.ObjectReference{ - APIVersion: infrav1exp.GroupVersion.Identifier(), - Kind: infrav1exp.AzureASOManagedControlPlaneKind, + APIVersion: infrav1alpha.GroupVersion.Identifier(), + Kind: infrav1alpha.AzureASOManagedControlPlaneKind, }, }, } - asoManagedCluster := &infrav1exp.AzureASOManagedCluster{ + asoManagedCluster := &infrav1alpha.AzureASOManagedCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "amc", Namespace: cluster.Namespace, @@ -195,7 +195,7 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { clusterctlv1.BlockMoveAnnotation: "true", }, }, - Status: infrav1exp.AzureASOManagedClusterStatus{ + Status: infrav1alpha.AzureASOManagedClusterStatus{ Ready: true, }, } @@ -204,11 +204,11 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { Build() r := &AzureASOManagedClusterReconciler{ Client: c, - newResourceReconciler: func(asoManagedCluster *infrav1exp.AzureASOManagedCluster, _ []*unstructured.Unstructured) resourceReconciler { + newResourceReconciler: func(asoManagedCluster *infrav1alpha.AzureASOManagedCluster, _ []*unstructured.Unstructured) resourceReconciler { return &fakeResourceReconciler{ owner: asoManagedCluster, reconcileFunc: func(ctx context.Context, o client.Object) error { - asoManagedCluster.SetResourceStatuses([]infrav1exp.ResourceStatus{ + asoManagedCluster.SetResourceStatuses([]infrav1alpha.ResourceStatus{ {Ready: true}, {Ready: false}, {Ready: true}, @@ -236,14 +236,14 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { }, Spec: clusterv1.ClusterSpec{ ControlPlaneRef: &corev1.ObjectReference{ - APIVersion: infrav1exp.GroupVersion.Identifier(), - Kind: infrav1exp.AzureASOManagedControlPlaneKind, + APIVersion: infrav1alpha.GroupVersion.Identifier(), + Kind: infrav1alpha.AzureASOManagedControlPlaneKind, Name: "amcp", Namespace: "ns", }, }, } - asoManagedCluster := &infrav1exp.AzureASOManagedCluster{ + asoManagedCluster := &infrav1alpha.AzureASOManagedCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "amc", Namespace: cluster.Namespace, @@ -261,16 +261,16 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { clusterctlv1.BlockMoveAnnotation: "true", }, }, - Status: infrav1exp.AzureASOManagedClusterStatus{ + Status: infrav1alpha.AzureASOManagedClusterStatus{ Ready: false, }, } - asoManagedControlPlane := &infrav1exp.AzureASOManagedControlPlane{ + asoManagedControlPlane := &infrav1alpha.AzureASOManagedControlPlane{ ObjectMeta: metav1.ObjectMeta{ Name: "amcp", Namespace: cluster.Namespace, }, - Status: infrav1exp.AzureASOManagedControlPlaneStatus{ + Status: infrav1alpha.AzureASOManagedControlPlaneStatus{ ControlPlaneEndpoint: clusterv1.APIEndpoint{Host: "endpoint"}, }, } @@ -279,7 +279,7 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { Build() r := &AzureASOManagedClusterReconciler{ Client: c, - newResourceReconciler: func(_ *infrav1exp.AzureASOManagedCluster, _ []*unstructured.Unstructured) resourceReconciler { + newResourceReconciler: func(_ *infrav1alpha.AzureASOManagedCluster, _ []*unstructured.Unstructured) resourceReconciler { return &fakeResourceReconciler{ reconcileFunc: func(ctx context.Context, o client.Object) error { return nil @@ -308,7 +308,7 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { Paused: true, }, } - asoManagedCluster := &infrav1exp.AzureASOManagedCluster{ + asoManagedCluster := &infrav1alpha.AzureASOManagedCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "amc", Namespace: cluster.Namespace, @@ -329,7 +329,7 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { Build() r := &AzureASOManagedClusterReconciler{ Client: c, - newResourceReconciler: func(_ *infrav1exp.AzureASOManagedCluster, _ []*unstructured.Unstructured) resourceReconciler { + newResourceReconciler: func(_ *infrav1alpha.AzureASOManagedCluster, _ []*unstructured.Unstructured) resourceReconciler { return &fakeResourceReconciler{ pauseFunc: func(_ context.Context, _ client.Object) error { return nil @@ -348,7 +348,7 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { t.Run("successfully reconciles in-progress delete", func(t *testing.T) { g := NewGomegaWithT(t) - asoManagedCluster := &infrav1exp.AzureASOManagedCluster{ + asoManagedCluster := &infrav1alpha.AzureASOManagedCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "amc", Namespace: "ns", @@ -363,13 +363,13 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { Build() r := &AzureASOManagedClusterReconciler{ Client: c, - newResourceReconciler: func(asoManagedCluster *infrav1exp.AzureASOManagedCluster, _ []*unstructured.Unstructured) resourceReconciler { + newResourceReconciler: func(asoManagedCluster *infrav1alpha.AzureASOManagedCluster, _ []*unstructured.Unstructured) resourceReconciler { return &fakeResourceReconciler{ owner: asoManagedCluster, deleteFunc: func(ctx context.Context, o client.Object) error { - asoManagedCluster.SetResourceStatuses([]infrav1exp.ResourceStatus{ + asoManagedCluster.SetResourceStatuses([]infrav1alpha.ResourceStatus{ { - Resource: infrav1exp.StatusResource{ + Resource: infrav1alpha.StatusResource{ Name: "still-deleting", }, }, @@ -391,7 +391,7 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { t.Run("successfully reconciles finished delete", func(t *testing.T) { g := NewGomegaWithT(t) - asoManagedCluster := &infrav1exp.AzureASOManagedCluster{ + asoManagedCluster := &infrav1alpha.AzureASOManagedCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "amc", Namespace: "ns", @@ -406,7 +406,7 @@ func TestAzureASOManagedClusterReconcile(t *testing.T) { Build() r := &AzureASOManagedClusterReconciler{ Client: c, - newResourceReconciler: func(_ *infrav1exp.AzureASOManagedCluster, _ []*unstructured.Unstructured) resourceReconciler { + newResourceReconciler: func(_ *infrav1alpha.AzureASOManagedCluster, _ []*unstructured.Unstructured) resourceReconciler { return &fakeResourceReconciler{ deleteFunc: func(ctx context.Context, o client.Object) error { return nil diff --git a/exp/controllers/azureasomanagedcontrolplane_controller.go b/controllers/azureasomanagedcontrolplane_controller.go similarity index 84% rename from exp/controllers/azureasomanagedcontrolplane_controller.go rename to controllers/azureasomanagedcontrolplane_controller.go index 8b91b55b9d2..969ee3728b2 100644 --- a/exp/controllers/azureasomanagedcontrolplane_controller.go +++ b/controllers/azureasomanagedcontrolplane_controller.go @@ -26,9 +26,8 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - infracontroller "sigs.k8s.io/cluster-api-provider-azure/controllers" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" - "sigs.k8s.io/cluster-api-provider-azure/exp/mutators" + infrav1alpha "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha1" + "sigs.k8s.io/cluster-api-provider-azure/pkg/mutators" "sigs.k8s.io/cluster-api-provider-azure/util/tele" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" @@ -52,31 +51,31 @@ type AzureASOManagedControlPlaneReconciler struct { client.Client WatchFilterValue string - newResourceReconciler func(*infrav1exp.AzureASOManagedControlPlane, []*unstructured.Unstructured) resourceReconciler + newResourceReconciler func(*infrav1alpha.AzureASOManagedControlPlane, []*unstructured.Unstructured) resourceReconciler } // SetupWithManager sets up the controller with the Manager. func (r *AzureASOManagedControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { _, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureASOManagedControlPlaneReconciler.SetupWithManager", - tele.KVP("controller", infrav1exp.AzureASOManagedControlPlaneKind), + tele.KVP("controller", infrav1alpha.AzureASOManagedControlPlaneKind), ) defer done() c, err := ctrl.NewControllerManagedBy(mgr). - For(&infrav1exp.AzureASOManagedControlPlane{}). + For(&infrav1alpha.AzureASOManagedControlPlane{}). WithEventFilter(predicates.ResourceHasFilterLabel(log, r.WatchFilterValue)). Watches(&clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(clusterToAzureASOManagedControlPlane), builder.WithPredicates( predicates.ResourceHasFilterLabel(log, r.WatchFilterValue), - infracontroller.ClusterPauseChangeAndInfrastructureReady(log), + ClusterPauseChangeAndInfrastructureReady(log), ), ). // User errors that CAPZ passes through agentPoolProfiles on create must be fixed in the // AzureASOManagedMachinePool, so trigger a reconciliation to consume those fixes. Watches( - &infrav1exp.AzureASOManagedMachinePool{}, + &infrav1alpha.AzureASOManagedMachinePool{}, handler.EnqueueRequestsFromMapFunc(r.azureASOManagedMachinePoolToAzureASOManagedControlPlane), ). Owns(&corev1.Secret{}). @@ -90,7 +89,7 @@ func (r *AzureASOManagedControlPlaneReconciler) SetupWithManager(ctx context.Con Controller: c, } - r.newResourceReconciler = func(asoManagedCluster *infrav1exp.AzureASOManagedControlPlane, resources []*unstructured.Unstructured) resourceReconciler { + r.newResourceReconciler = func(asoManagedCluster *infrav1alpha.AzureASOManagedControlPlane, resources []*unstructured.Unstructured) resourceReconciler { return &ResourceReconciler{ Client: r.Client, resources: resources, @@ -105,15 +104,15 @@ func (r *AzureASOManagedControlPlaneReconciler) SetupWithManager(ctx context.Con func clusterToAzureASOManagedControlPlane(_ context.Context, o client.Object) []ctrl.Request { controlPlaneRef := o.(*clusterv1.Cluster).Spec.ControlPlaneRef if controlPlaneRef != nil && - controlPlaneRef.APIVersion == infrav1exp.GroupVersion.Identifier() && - controlPlaneRef.Kind == infrav1exp.AzureASOManagedControlPlaneKind { + controlPlaneRef.APIVersion == infrav1alpha.GroupVersion.Identifier() && + controlPlaneRef.Kind == infrav1alpha.AzureASOManagedControlPlaneKind { return []ctrl.Request{{NamespacedName: client.ObjectKey{Namespace: controlPlaneRef.Namespace, Name: controlPlaneRef.Name}}} } return nil } func (r *AzureASOManagedControlPlaneReconciler) azureASOManagedMachinePoolToAzureASOManagedControlPlane(ctx context.Context, o client.Object) []ctrl.Request { - asoManagedMachinePool := o.(*infrav1exp.AzureASOManagedMachinePool) + asoManagedMachinePool := o.(*infrav1alpha.AzureASOManagedMachinePool) clusterName := asoManagedMachinePool.Labels[clusterv1.ClusterNameLabel] if clusterName == "" { return nil @@ -135,11 +134,11 @@ func (r *AzureASOManagedControlPlaneReconciler) Reconcile(ctx context.Context, r "controllers.AzureASOManagedControlPlaneReconciler.Reconcile", tele.KVP("namespace", req.Namespace), tele.KVP("name", req.Name), - tele.KVP("kind", infrav1exp.AzureASOManagedControlPlaneKind), + tele.KVP("kind", infrav1alpha.AzureASOManagedControlPlaneKind), ) defer done() - asoManagedControlPlane := &infrav1exp.AzureASOManagedControlPlane{} + asoManagedControlPlane := &infrav1alpha.AzureASOManagedControlPlane{} err := r.Get(ctx, req.NamespacedName, asoManagedControlPlane) if err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) @@ -177,7 +176,7 @@ func (r *AzureASOManagedControlPlaneReconciler) Reconcile(ctx context.Context, r return r.reconcileNormal(ctx, asoManagedControlPlane, cluster) } -func (r *AzureASOManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, asoManagedControlPlane *infrav1exp.AzureASOManagedControlPlane, cluster *clusterv1.Cluster) (ctrl.Result, error) { +func (r *AzureASOManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, asoManagedControlPlane *infrav1alpha.AzureASOManagedControlPlane, cluster *clusterv1.Cluster) (ctrl.Result, error) { ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureASOManagedControlPlaneReconciler.reconcileNormal", ) @@ -189,13 +188,13 @@ func (r *AzureASOManagedControlPlaneReconciler) reconcileNormal(ctx context.Cont return ctrl.Result{}, nil } if cluster.Spec.InfrastructureRef == nil || - cluster.Spec.InfrastructureRef.APIVersion != infrav1exp.GroupVersion.Identifier() || - cluster.Spec.InfrastructureRef.Kind != infrav1exp.AzureASOManagedClusterKind { + cluster.Spec.InfrastructureRef.APIVersion != infrav1alpha.GroupVersion.Identifier() || + cluster.Spec.InfrastructureRef.Kind != infrav1alpha.AzureASOManagedClusterKind { return ctrl.Result{}, reconcile.TerminalError(errInvalidClusterKind) } - needsPatch := controllerutil.AddFinalizer(asoManagedControlPlane, infrav1exp.AzureASOManagedControlPlaneFinalizer) - needsPatch = infracontroller.AddBlockMoveAnnotation(asoManagedControlPlane) || needsPatch + needsPatch := controllerutil.AddFinalizer(asoManagedControlPlane, infrav1alpha.AzureASOManagedControlPlaneFinalizer) + needsPatch = AddBlockMoveAnnotation(asoManagedControlPlane) || needsPatch if needsPatch { return ctrl.Result{Requeue: true}, nil } @@ -252,7 +251,7 @@ func (r *AzureASOManagedControlPlaneReconciler) reconcileNormal(ctx context.Cont return ctrl.Result{}, nil } -func (r *AzureASOManagedControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, asoManagedControlPlane *infrav1exp.AzureASOManagedControlPlane, cluster *clusterv1.Cluster, managedCluster *asocontainerservicev1.ManagedCluster) error { +func (r *AzureASOManagedControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, asoManagedControlPlane *infrav1alpha.AzureASOManagedControlPlane, cluster *clusterv1.Cluster, managedCluster *asocontainerservicev1.ManagedCluster) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "controllers.AzureASOManagedControlPlaneReconciler.reconcileKubeconfig", ) @@ -284,7 +283,7 @@ func (r *AzureASOManagedControlPlaneReconciler) reconcileKubeconfig(ctx context. Name: secret.Name(cluster.Name, secret.Kubeconfig), Namespace: cluster.Namespace, OwnerReferences: []metav1.OwnerReference{ - *metav1.NewControllerRef(asoManagedControlPlane, infrav1exp.GroupVersion.WithKind(infrav1exp.AzureASOManagedControlPlaneKind)), + *metav1.NewControllerRef(asoManagedControlPlane, infrav1alpha.GroupVersion.WithKind(infrav1alpha.AzureASOManagedControlPlaneKind)), }, Labels: map[string]string{clusterv1.ClusterNameLabel: cluster.Name}, }, @@ -297,7 +296,7 @@ func (r *AzureASOManagedControlPlaneReconciler) reconcileKubeconfig(ctx context. } //nolint:unparam // an empty ctrl.Result is always returned here, leaving it as-is to avoid churn in refactoring later if that changes. -func (r *AzureASOManagedControlPlaneReconciler) reconcilePaused(ctx context.Context, asoManagedControlPlane *infrav1exp.AzureASOManagedControlPlane) (ctrl.Result, error) { +func (r *AzureASOManagedControlPlaneReconciler) reconcilePaused(ctx context.Context, asoManagedControlPlane *infrav1alpha.AzureASOManagedControlPlane) (ctrl.Result, error) { ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureASOManagedControlPlaneReconciler.reconcilePaused") defer done() log.V(4).Info("reconciling pause") @@ -312,13 +311,13 @@ func (r *AzureASOManagedControlPlaneReconciler) reconcilePaused(ctx context.Cont return ctrl.Result{}, fmt.Errorf("failed to pause resources: %w", err) } - infracontroller.RemoveBlockMoveAnnotation(asoManagedControlPlane) + RemoveBlockMoveAnnotation(asoManagedControlPlane) return ctrl.Result{}, nil } //nolint:unparam // an empty ctrl.Result is always returned here, leaving it as-is to avoid churn in refactoring later if that changes. -func (r *AzureASOManagedControlPlaneReconciler) reconcileDelete(ctx context.Context, asoManagedControlPlane *infrav1exp.AzureASOManagedControlPlane) (ctrl.Result, error) { +func (r *AzureASOManagedControlPlaneReconciler) reconcileDelete(ctx context.Context, asoManagedControlPlane *infrav1alpha.AzureASOManagedControlPlane) (ctrl.Result, error) { ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureASOManagedControlPlaneReconciler.reconcileDelete", ) @@ -338,7 +337,7 @@ func (r *AzureASOManagedControlPlaneReconciler) reconcileDelete(ctx context.Cont return ctrl.Result{}, nil } - controllerutil.RemoveFinalizer(asoManagedControlPlane, infrav1exp.AzureASOManagedControlPlaneFinalizer) + controllerutil.RemoveFinalizer(asoManagedControlPlane, infrav1alpha.AzureASOManagedControlPlaneFinalizer) return ctrl.Result{}, nil } diff --git a/exp/controllers/azureasomanagedcontrolplane_controller_test.go b/controllers/azureasomanagedcontrolplane_controller_test.go similarity index 85% rename from exp/controllers/azureasomanagedcontrolplane_controller_test.go rename to controllers/azureasomanagedcontrolplane_controller_test.go index 7e80e80b583..707ddf9db81 100644 --- a/exp/controllers/azureasomanagedcontrolplane_controller_test.go +++ b/controllers/azureasomanagedcontrolplane_controller_test.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" + infrav1alpha "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/util/secret" @@ -46,7 +46,7 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { s := runtime.NewScheme() sb := runtime.NewSchemeBuilder( - infrav1exp.AddToScheme, + infrav1alpha.AddToScheme, clusterv1.AddToScheme, asocontainerservicev1.AddToScheme, corev1.AddToScheme, @@ -55,7 +55,7 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { fakeClientBuilder := func() *fakeclient.ClientBuilder { return fakeclient.NewClientBuilder(). WithScheme(s). - WithStatusSubresource(&infrav1exp.AzureASOManagedControlPlane{}) + WithStatusSubresource(&infrav1alpha.AzureASOManagedControlPlane{}) } t.Run("AzureASOManagedControlPlane does not exist", func(t *testing.T) { @@ -74,7 +74,7 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { t.Run("Cluster does not exist", func(t *testing.T) { g := NewGomegaWithT(t) - asoManagedControlPlane := &infrav1exp.AzureASOManagedControlPlane{ + asoManagedControlPlane := &infrav1alpha.AzureASOManagedControlPlane{ ObjectMeta: metav1.ObjectMeta{ Name: "amcp", Namespace: "ns", @@ -107,12 +107,12 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { }, Spec: clusterv1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ - APIVersion: infrav1exp.GroupVersion.Identifier(), - Kind: infrav1exp.AzureASOManagedClusterKind, + APIVersion: infrav1alpha.GroupVersion.Identifier(), + Kind: infrav1alpha.AzureASOManagedClusterKind, }, }, } - asoManagedControlPlane := &infrav1exp.AzureASOManagedControlPlane{ + asoManagedControlPlane := &infrav1alpha.AzureASOManagedControlPlane{ ObjectMeta: metav1.ObjectMeta{ Name: "amcp", Namespace: cluster.Namespace, @@ -136,7 +136,7 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { g.Expect(result).To(Equal(ctrl.Result{Requeue: true})) g.Expect(c.Get(ctx, client.ObjectKeyFromObject(asoManagedControlPlane), asoManagedControlPlane)).To(Succeed()) - g.Expect(asoManagedControlPlane.GetFinalizers()).To(ContainElement(infrav1exp.AzureASOManagedControlPlaneFinalizer)) + g.Expect(asoManagedControlPlane.GetFinalizers()).To(ContainElement(infrav1alpha.AzureASOManagedControlPlaneFinalizer)) g.Expect(asoManagedControlPlane.GetAnnotations()).To(HaveKey(clusterctlv1.BlockMoveAnnotation)) }) @@ -150,12 +150,12 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { }, Spec: clusterv1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ - APIVersion: infrav1exp.GroupVersion.Identifier(), - Kind: infrav1exp.AzureASOManagedClusterKind, + APIVersion: infrav1alpha.GroupVersion.Identifier(), + Kind: infrav1alpha.AzureASOManagedClusterKind, }, }, } - asoManagedControlPlane := &infrav1exp.AzureASOManagedControlPlane{ + asoManagedControlPlane := &infrav1alpha.AzureASOManagedControlPlane{ ObjectMeta: metav1.ObjectMeta{ Name: "amcp", Namespace: cluster.Namespace, @@ -167,14 +167,14 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { }, }, Finalizers: []string{ - infrav1exp.AzureASOManagedControlPlaneFinalizer, + infrav1alpha.AzureASOManagedControlPlaneFinalizer, }, Annotations: map[string]string{ clusterctlv1.BlockMoveAnnotation: "true", }, }, - Spec: infrav1exp.AzureASOManagedControlPlaneSpec{ - AzureASOManagedControlPlaneTemplateResourceSpec: infrav1exp.AzureASOManagedControlPlaneTemplateResourceSpec{ + Spec: infrav1alpha.AzureASOManagedControlPlaneSpec{ + AzureASOManagedControlPlaneTemplateResourceSpec: infrav1alpha.AzureASOManagedControlPlaneTemplateResourceSpec{ Resources: []runtime.RawExtension{ { Raw: mcJSON(g, &asocontainerservicev1.ManagedCluster{ @@ -186,7 +186,7 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { }, }, }, - Status: infrav1exp.AzureASOManagedControlPlaneStatus{ + Status: infrav1alpha.AzureASOManagedControlPlaneStatus{ Ready: true, }, } @@ -195,11 +195,11 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { Build() r := &AzureASOManagedControlPlaneReconciler{ Client: c, - newResourceReconciler: func(asoManagedControlPlane *infrav1exp.AzureASOManagedControlPlane, _ []*unstructured.Unstructured) resourceReconciler { + newResourceReconciler: func(asoManagedControlPlane *infrav1alpha.AzureASOManagedControlPlane, _ []*unstructured.Unstructured) resourceReconciler { return &fakeResourceReconciler{ owner: asoManagedControlPlane, reconcileFunc: func(ctx context.Context, o client.Object) error { - asoManagedControlPlane.SetResourceStatuses([]infrav1exp.ResourceStatus{ + asoManagedControlPlane.SetResourceStatuses([]infrav1alpha.ResourceStatus{ {Ready: true}, {Ready: false}, {Ready: true}, @@ -227,8 +227,8 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { }, Spec: clusterv1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ - APIVersion: infrav1exp.GroupVersion.Identifier(), - Kind: infrav1exp.AzureASOManagedClusterKind, + APIVersion: infrav1alpha.GroupVersion.Identifier(), + Kind: infrav1alpha.AzureASOManagedClusterKind, }, }, } @@ -261,7 +261,7 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { CurrentKubernetesVersion: ptr.To("Current"), }, } - asoManagedControlPlane := &infrav1exp.AzureASOManagedControlPlane{ + asoManagedControlPlane := &infrav1alpha.AzureASOManagedControlPlane{ ObjectMeta: metav1.ObjectMeta{ Name: "amcp", Namespace: cluster.Namespace, @@ -273,14 +273,14 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { }, }, Finalizers: []string{ - infrav1exp.AzureASOManagedControlPlaneFinalizer, + infrav1alpha.AzureASOManagedControlPlaneFinalizer, }, Annotations: map[string]string{ clusterctlv1.BlockMoveAnnotation: "true", }, }, - Spec: infrav1exp.AzureASOManagedControlPlaneSpec{ - AzureASOManagedControlPlaneTemplateResourceSpec: infrav1exp.AzureASOManagedControlPlaneTemplateResourceSpec{ + Spec: infrav1alpha.AzureASOManagedControlPlaneSpec{ + AzureASOManagedControlPlaneTemplateResourceSpec: infrav1alpha.AzureASOManagedControlPlaneTemplateResourceSpec{ Resources: []runtime.RawExtension{ { Raw: mcJSON(g, &asocontainerservicev1.ManagedCluster{ @@ -292,7 +292,7 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { }, }, }, - Status: infrav1exp.AzureASOManagedControlPlaneStatus{ + Status: infrav1alpha.AzureASOManagedControlPlaneStatus{ Ready: false, }, } @@ -310,7 +310,7 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { return nil }, }, - newResourceReconciler: func(_ *infrav1exp.AzureASOManagedControlPlane, _ []*unstructured.Unstructured) resourceReconciler { + newResourceReconciler: func(_ *infrav1alpha.AzureASOManagedControlPlane, _ []*unstructured.Unstructured) resourceReconciler { return &fakeResourceReconciler{ reconcileFunc: func(ctx context.Context, o client.Object) error { return nil @@ -341,7 +341,7 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { Paused: true, }, } - asoManagedControlPlane := &infrav1exp.AzureASOManagedControlPlane{ + asoManagedControlPlane := &infrav1alpha.AzureASOManagedControlPlane{ ObjectMeta: metav1.ObjectMeta{ Name: "amcp", Namespace: cluster.Namespace, @@ -362,7 +362,7 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { Build() r := &AzureASOManagedControlPlaneReconciler{ Client: c, - newResourceReconciler: func(_ *infrav1exp.AzureASOManagedControlPlane, _ []*unstructured.Unstructured) resourceReconciler { + newResourceReconciler: func(_ *infrav1alpha.AzureASOManagedControlPlane, _ []*unstructured.Unstructured) resourceReconciler { return &fakeResourceReconciler{ pauseFunc: func(_ context.Context, _ client.Object) error { return nil @@ -381,12 +381,12 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { t.Run("successfully reconciles delete", func(t *testing.T) { g := NewGomegaWithT(t) - asoManagedControlPlane := &infrav1exp.AzureASOManagedControlPlane{ + asoManagedControlPlane := &infrav1alpha.AzureASOManagedControlPlane{ ObjectMeta: metav1.ObjectMeta{ Name: "amcp", Namespace: "ns", Finalizers: []string{ - infrav1exp.AzureASOManagedControlPlaneFinalizer, + infrav1alpha.AzureASOManagedControlPlaneFinalizer, }, DeletionTimestamp: &metav1.Time{Time: time.Date(1, 0, 0, 0, 0, 0, 0, time.UTC)}, }, @@ -396,7 +396,7 @@ func TestAzureASOManagedControlPlaneReconcile(t *testing.T) { Build() r := &AzureASOManagedControlPlaneReconciler{ Client: c, - newResourceReconciler: func(_ *infrav1exp.AzureASOManagedControlPlane, _ []*unstructured.Unstructured) resourceReconciler { + newResourceReconciler: func(_ *infrav1alpha.AzureASOManagedControlPlane, _ []*unstructured.Unstructured) resourceReconciler { return &fakeResourceReconciler{ deleteFunc: func(ctx context.Context, o client.Object) error { return nil diff --git a/exp/controllers/azureasomanagedmachinepool_controller.go b/controllers/azureasomanagedmachinepool_controller.go similarity index 87% rename from exp/controllers/azureasomanagedmachinepool_controller.go rename to controllers/azureasomanagedmachinepool_controller.go index b0f521b3627..3d2aa614756 100644 --- a/exp/controllers/azureasomanagedmachinepool_controller.go +++ b/controllers/azureasomanagedmachinepool_controller.go @@ -26,9 +26,8 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" - infracontroller "sigs.k8s.io/cluster-api-provider-azure/controllers" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" - "sigs.k8s.io/cluster-api-provider-azure/exp/mutators" + infrav1alpha "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha1" + "sigs.k8s.io/cluster-api-provider-azure/pkg/mutators" "sigs.k8s.io/cluster-api-provider-azure/util/tele" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" @@ -52,7 +51,7 @@ type AzureASOManagedMachinePoolReconciler struct { WatchFilterValue string Tracker ClusterTracker - newResourceReconciler func(*infrav1exp.AzureASOManagedMachinePool, []*unstructured.Unstructured) resourceReconciler + newResourceReconciler func(*infrav1alpha.AzureASOManagedMachinePool, []*unstructured.Unstructured) resourceReconciler } // ClusterTracker wraps a CAPI remote.ClusterCacheTracker. @@ -64,17 +63,17 @@ type ClusterTracker interface { func (r *AzureASOManagedMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { _, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureASOManagedMachinePoolReconciler.SetupWithManager", - tele.KVP("controller", infrav1exp.AzureASOManagedMachinePoolKind), + tele.KVP("controller", infrav1alpha.AzureASOManagedMachinePoolKind), ) defer done() - clusterToAzureASOManagedMachinePools, err := util.ClusterToTypedObjectsMapper(mgr.GetClient(), &infrav1exp.AzureASOManagedMachinePoolList{}, mgr.GetScheme()) + clusterToAzureASOManagedMachinePools, err := util.ClusterToTypedObjectsMapper(mgr.GetClient(), &infrav1alpha.AzureASOManagedMachinePoolList{}, mgr.GetScheme()) if err != nil { return fmt.Errorf("failed to get Cluster to AzureASOManagedMachinePool mapper: %w", err) } c, err := ctrl.NewControllerManagedBy(mgr). - For(&infrav1exp.AzureASOManagedMachinePool{}). + For(&infrav1alpha.AzureASOManagedMachinePool{}). WithEventFilter(predicates.ResourceHasFilterLabel(log, r.WatchFilterValue)). Watches( &clusterv1.Cluster{}, @@ -83,14 +82,14 @@ func (r *AzureASOManagedMachinePoolReconciler) SetupWithManager(ctx context.Cont predicates.ResourceHasFilterLabel(log, r.WatchFilterValue), predicates.Any(log, predicates.ClusterControlPlaneInitialized(log), - infracontroller.ClusterUpdatePauseChange(log), + ClusterUpdatePauseChange(log), ), ), ). Watches( &expv1.MachinePool{}, handler.EnqueueRequestsFromMapFunc(utilexp.MachinePoolToInfrastructureMapFunc( - infrav1exp.GroupVersion.WithKind(infrav1exp.AzureASOManagedMachinePoolKind), log), + infrav1alpha.GroupVersion.WithKind(infrav1alpha.AzureASOManagedMachinePoolKind), log), ), builder.WithPredicates( predicates.ResourceHasFilterLabel(log, r.WatchFilterValue), @@ -106,7 +105,7 @@ func (r *AzureASOManagedMachinePoolReconciler) SetupWithManager(ctx context.Cont Controller: c, } - r.newResourceReconciler = func(asoManagedCluster *infrav1exp.AzureASOManagedMachinePool, resources []*unstructured.Unstructured) resourceReconciler { + r.newResourceReconciler = func(asoManagedCluster *infrav1alpha.AzureASOManagedMachinePool, resources []*unstructured.Unstructured) resourceReconciler { return &ResourceReconciler{ Client: r.Client, resources: resources, @@ -128,11 +127,11 @@ func (r *AzureASOManagedMachinePoolReconciler) Reconcile(ctx context.Context, re "controllers.AzureASOManagedMachinePoolReconciler.Reconcile", tele.KVP("namespace", req.Namespace), tele.KVP("name", req.Name), - tele.KVP("kind", infrav1exp.AzureASOManagedMachinePoolKind), + tele.KVP("kind", infrav1alpha.AzureASOManagedMachinePoolKind), ) defer done() - asoManagedMachinePool := &infrav1exp.AzureASOManagedMachinePool{} + asoManagedMachinePool := &infrav1alpha.AzureASOManagedMachinePool{} err := r.Get(ctx, req.NamespacedName, asoManagedMachinePool) if err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) @@ -180,8 +179,8 @@ func (r *AzureASOManagedMachinePoolReconciler) Reconcile(ctx context.Context, re return ctrl.Result{}, nil } if cluster.Spec.ControlPlaneRef == nil || - cluster.Spec.ControlPlaneRef.APIVersion != infrav1exp.GroupVersion.Identifier() || - cluster.Spec.ControlPlaneRef.Kind != infrav1exp.AzureASOManagedControlPlaneKind { + cluster.Spec.ControlPlaneRef.APIVersion != infrav1alpha.GroupVersion.Identifier() || + cluster.Spec.ControlPlaneRef.Kind != infrav1alpha.AzureASOManagedControlPlaneKind { return ctrl.Result{}, reconcile.TerminalError(fmt.Errorf("AzureASOManagedMachinePool cannot be used without AzureASOManagedControlPlane")) } @@ -196,7 +195,7 @@ func (r *AzureASOManagedMachinePoolReconciler) Reconcile(ctx context.Context, re return r.reconcileNormal(ctx, asoManagedMachinePool, machinePool, cluster) } -func (r *AzureASOManagedMachinePoolReconciler) reconcileNormal(ctx context.Context, asoManagedMachinePool *infrav1exp.AzureASOManagedMachinePool, machinePool *expv1.MachinePool, cluster *clusterv1.Cluster) (ctrl.Result, error) { +func (r *AzureASOManagedMachinePoolReconciler) reconcileNormal(ctx context.Context, asoManagedMachinePool *infrav1alpha.AzureASOManagedMachinePool, machinePool *expv1.MachinePool, cluster *clusterv1.Cluster) (ctrl.Result, error) { ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureASOManagedMachinePoolReconciler.reconcileNormal", ) @@ -204,7 +203,7 @@ func (r *AzureASOManagedMachinePoolReconciler) reconcileNormal(ctx context.Conte log.V(4).Info("reconciling normally") needsPatch := controllerutil.AddFinalizer(asoManagedMachinePool, clusterv1.ClusterFinalizer) - needsPatch = infracontroller.AddBlockMoveAnnotation(asoManagedMachinePool) || needsPatch + needsPatch = AddBlockMoveAnnotation(asoManagedMachinePool) || needsPatch if needsPatch { return ctrl.Result{Requeue: true}, nil } @@ -293,7 +292,7 @@ func expectedNodeLabels(poolName, nodeRG string) map[string]string { } //nolint:unparam // an empty ctrl.Result is always returned here, leaving it as-is to avoid churn in refactoring later if that changes. -func (r *AzureASOManagedMachinePoolReconciler) reconcilePaused(ctx context.Context, asoManagedMachinePool *infrav1exp.AzureASOManagedMachinePool) (ctrl.Result, error) { +func (r *AzureASOManagedMachinePoolReconciler) reconcilePaused(ctx context.Context, asoManagedMachinePool *infrav1alpha.AzureASOManagedMachinePool) (ctrl.Result, error) { ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureASOManagedMachinePoolReconciler.reconcilePaused") defer done() log.V(4).Info("reconciling pause") @@ -308,13 +307,13 @@ func (r *AzureASOManagedMachinePoolReconciler) reconcilePaused(ctx context.Conte return ctrl.Result{}, fmt.Errorf("failed to pause resources: %w", err) } - infracontroller.RemoveBlockMoveAnnotation(asoManagedMachinePool) + RemoveBlockMoveAnnotation(asoManagedMachinePool) return ctrl.Result{}, nil } //nolint:unparam // an empty ctrl.Result is always returned here, leaving it as-is to avoid churn in refactoring later if that changes. -func (r *AzureASOManagedMachinePoolReconciler) reconcileDelete(ctx context.Context, asoManagedMachinePool *infrav1exp.AzureASOManagedMachinePool, cluster *clusterv1.Cluster) (ctrl.Result, error) { +func (r *AzureASOManagedMachinePoolReconciler) reconcileDelete(ctx context.Context, asoManagedMachinePool *infrav1alpha.AzureASOManagedMachinePool, cluster *clusterv1.Cluster) (ctrl.Result, error) { ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.AzureASOManagedMachinePoolReconciler.reconcileDelete", ) diff --git a/exp/controllers/azureasomanagedmachinepool_controller_test.go b/controllers/azureasomanagedmachinepool_controller_test.go similarity index 86% rename from exp/controllers/azureasomanagedmachinepool_controller_test.go rename to controllers/azureasomanagedmachinepool_controller_test.go index 53e6d974bd5..c66caf1630c 100644 --- a/exp/controllers/azureasomanagedmachinepool_controller_test.go +++ b/controllers/azureasomanagedmachinepool_controller_test.go @@ -31,7 +31,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" + infrav1alpha "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" @@ -56,7 +56,7 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { s := runtime.NewScheme() sb := runtime.NewSchemeBuilder( - infrav1exp.AddToScheme, + infrav1alpha.AddToScheme, clusterv1.AddToScheme, expv1.AddToScheme, asocontainerservicev1.AddToScheme, @@ -65,7 +65,7 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { fakeClientBuilder := func() *fakeclient.ClientBuilder { return fakeclient.NewClientBuilder(). WithScheme(s). - WithStatusSubresource(&infrav1exp.AzureASOManagedMachinePool{}) + WithStatusSubresource(&infrav1alpha.AzureASOManagedMachinePool{}) } t.Run("AzureASOManagedMachinePool does not exist", func(t *testing.T) { @@ -84,7 +84,7 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { t.Run("MachinePool does not exist", func(t *testing.T) { g := NewGomegaWithT(t) - asoManagedMachinePool := &infrav1exp.AzureASOManagedMachinePool{ + asoManagedMachinePool := &infrav1alpha.AzureASOManagedMachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "ammp", Namespace: "ns", @@ -112,7 +112,7 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { t.Run("Cluster does not exist", func(t *testing.T) { g := NewGomegaWithT(t) - asoManagedMachinePool := &infrav1exp.AzureASOManagedMachinePool{ + asoManagedMachinePool := &infrav1alpha.AzureASOManagedMachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "ammp", Namespace: "ns", @@ -156,12 +156,12 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { }, Spec: clusterv1.ClusterSpec{ ControlPlaneRef: &corev1.ObjectReference{ - APIVersion: infrav1exp.GroupVersion.Identifier(), - Kind: infrav1exp.AzureASOManagedControlPlaneKind, + APIVersion: infrav1alpha.GroupVersion.Identifier(), + Kind: infrav1alpha.AzureASOManagedControlPlaneKind, }, }, } - asoManagedMachinePool := &infrav1exp.AzureASOManagedMachinePool{ + asoManagedMachinePool := &infrav1alpha.AzureASOManagedMachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "ammp", Namespace: cluster.Namespace, @@ -208,12 +208,12 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { }, Spec: clusterv1.ClusterSpec{ ControlPlaneRef: &corev1.ObjectReference{ - APIVersion: infrav1exp.GroupVersion.Identifier(), - Kind: infrav1exp.AzureASOManagedControlPlaneKind, + APIVersion: infrav1alpha.GroupVersion.Identifier(), + Kind: infrav1alpha.AzureASOManagedControlPlaneKind, }, }, } - asoManagedMachinePool := &infrav1exp.AzureASOManagedMachinePool{ + asoManagedMachinePool := &infrav1alpha.AzureASOManagedMachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "ammp", Namespace: cluster.Namespace, @@ -231,8 +231,8 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { clusterctlv1.BlockMoveAnnotation: "true", }, }, - Spec: infrav1exp.AzureASOManagedMachinePoolSpec{ - AzureASOManagedMachinePoolTemplateResourceSpec: infrav1exp.AzureASOManagedMachinePoolTemplateResourceSpec{ + Spec: infrav1alpha.AzureASOManagedMachinePoolSpec{ + AzureASOManagedMachinePoolTemplateResourceSpec: infrav1alpha.AzureASOManagedMachinePoolTemplateResourceSpec{ Resources: []runtime.RawExtension{ { Raw: apJSON(g, &asocontainerservicev1.ManagedClustersAgentPool{ @@ -244,7 +244,7 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { }, }, }, - Status: infrav1exp.AzureASOManagedMachinePoolStatus{ + Status: infrav1alpha.AzureASOManagedMachinePoolStatus{ Ready: true, }, } @@ -262,11 +262,11 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { Build() r := &AzureASOManagedMachinePoolReconciler{ Client: c, - newResourceReconciler: func(asoManagedMachinePool *infrav1exp.AzureASOManagedMachinePool, _ []*unstructured.Unstructured) resourceReconciler { + newResourceReconciler: func(asoManagedMachinePool *infrav1alpha.AzureASOManagedMachinePool, _ []*unstructured.Unstructured) resourceReconciler { return &fakeResourceReconciler{ owner: asoManagedMachinePool, reconcileFunc: func(ctx context.Context, o client.Object) error { - asoManagedMachinePool.SetResourceStatuses([]infrav1exp.ResourceStatus{ + asoManagedMachinePool.SetResourceStatuses([]infrav1alpha.ResourceStatus{ {Ready: true}, {Ready: false}, {Ready: true}, @@ -294,8 +294,8 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { }, Spec: clusterv1.ClusterSpec{ ControlPlaneRef: &corev1.ObjectReference{ - APIVersion: infrav1exp.GroupVersion.Identifier(), - Kind: infrav1exp.AzureASOManagedControlPlaneKind, + APIVersion: infrav1alpha.GroupVersion.Identifier(), + Kind: infrav1alpha.AzureASOManagedControlPlaneKind, }, }, } @@ -323,7 +323,7 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { Count: ptr.To(3), }, } - asoManagedMachinePool := &infrav1exp.AzureASOManagedMachinePool{ + asoManagedMachinePool := &infrav1alpha.AzureASOManagedMachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "ammp", Namespace: cluster.Namespace, @@ -341,8 +341,8 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { clusterctlv1.BlockMoveAnnotation: "true", }, }, - Spec: infrav1exp.AzureASOManagedMachinePoolSpec{ - AzureASOManagedMachinePoolTemplateResourceSpec: infrav1exp.AzureASOManagedMachinePoolTemplateResourceSpec{ + Spec: infrav1alpha.AzureASOManagedMachinePoolSpec{ + AzureASOManagedMachinePoolTemplateResourceSpec: infrav1alpha.AzureASOManagedMachinePoolTemplateResourceSpec{ Resources: []runtime.RawExtension{ { Raw: apJSON(g, asoAgentPool), @@ -350,7 +350,7 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { }, }, }, - Status: infrav1exp.AzureASOManagedMachinePoolStatus{ + Status: infrav1alpha.AzureASOManagedMachinePoolStatus{ Ready: false, }, } @@ -371,7 +371,7 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { Build() r := &AzureASOManagedMachinePoolReconciler{ Client: c, - newResourceReconciler: func(_ *infrav1exp.AzureASOManagedMachinePool, _ []*unstructured.Unstructured) resourceReconciler { + newResourceReconciler: func(_ *infrav1alpha.AzureASOManagedMachinePool, _ []*unstructured.Unstructured) resourceReconciler { return &fakeResourceReconciler{ reconcileFunc: func(ctx context.Context, o client.Object) error { return nil @@ -436,8 +436,8 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { }, Spec: clusterv1.ClusterSpec{ ControlPlaneRef: &corev1.ObjectReference{ - APIVersion: infrav1exp.GroupVersion.Identifier(), - Kind: infrav1exp.AzureASOManagedControlPlaneKind, + APIVersion: infrav1alpha.GroupVersion.Identifier(), + Kind: infrav1alpha.AzureASOManagedControlPlaneKind, }, }, } @@ -466,7 +466,7 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { Count: ptr.To(3), }, } - asoManagedMachinePool := &infrav1exp.AzureASOManagedMachinePool{ + asoManagedMachinePool := &infrav1alpha.AzureASOManagedMachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "ammp", Namespace: cluster.Namespace, @@ -484,8 +484,8 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { clusterctlv1.BlockMoveAnnotation: "true", }, }, - Spec: infrav1exp.AzureASOManagedMachinePoolSpec{ - AzureASOManagedMachinePoolTemplateResourceSpec: infrav1exp.AzureASOManagedMachinePoolTemplateResourceSpec{ + Spec: infrav1alpha.AzureASOManagedMachinePoolSpec{ + AzureASOManagedMachinePoolTemplateResourceSpec: infrav1alpha.AzureASOManagedMachinePoolTemplateResourceSpec{ Resources: []runtime.RawExtension{ { Raw: apJSON(g, asoAgentPool), @@ -493,7 +493,7 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { }, }, }, - Status: infrav1exp.AzureASOManagedMachinePoolStatus{ + Status: infrav1alpha.AzureASOManagedMachinePoolStatus{ Ready: false, }, } @@ -511,7 +511,7 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { Build() r := &AzureASOManagedMachinePoolReconciler{ Client: c, - newResourceReconciler: func(_ *infrav1exp.AzureASOManagedMachinePool, _ []*unstructured.Unstructured) resourceReconciler { + newResourceReconciler: func(_ *infrav1alpha.AzureASOManagedMachinePool, _ []*unstructured.Unstructured) resourceReconciler { return &fakeResourceReconciler{ reconcileFunc: func(ctx context.Context, o client.Object) error { return nil @@ -547,12 +547,12 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { Spec: clusterv1.ClusterSpec{ Paused: true, ControlPlaneRef: &corev1.ObjectReference{ - APIVersion: infrav1exp.GroupVersion.Identifier(), - Kind: infrav1exp.AzureASOManagedControlPlaneKind, + APIVersion: infrav1alpha.GroupVersion.Identifier(), + Kind: infrav1alpha.AzureASOManagedControlPlaneKind, }, }, } - asoManagedMachinePool := &infrav1exp.AzureASOManagedMachinePool{ + asoManagedMachinePool := &infrav1alpha.AzureASOManagedMachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "ammp", Namespace: cluster.Namespace, @@ -582,7 +582,7 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { Build() r := &AzureASOManagedMachinePoolReconciler{ Client: c, - newResourceReconciler: func(_ *infrav1exp.AzureASOManagedMachinePool, _ []*unstructured.Unstructured) resourceReconciler { + newResourceReconciler: func(_ *infrav1alpha.AzureASOManagedMachinePool, _ []*unstructured.Unstructured) resourceReconciler { return &fakeResourceReconciler{ pauseFunc: func(_ context.Context, _ client.Object) error { return nil @@ -608,12 +608,12 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { }, Spec: clusterv1.ClusterSpec{ ControlPlaneRef: &corev1.ObjectReference{ - APIVersion: infrav1exp.GroupVersion.Identifier(), - Kind: infrav1exp.AzureASOManagedControlPlaneKind, + APIVersion: infrav1alpha.GroupVersion.Identifier(), + Kind: infrav1alpha.AzureASOManagedControlPlaneKind, }, }, } - asoManagedMachinePool := &infrav1exp.AzureASOManagedMachinePool{ + asoManagedMachinePool := &infrav1alpha.AzureASOManagedMachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "ammp", Namespace: cluster.Namespace, @@ -644,7 +644,7 @@ func TestAzureASOManagedMachinePoolReconcile(t *testing.T) { Build() r := &AzureASOManagedMachinePoolReconciler{ Client: c, - newResourceReconciler: func(_ *infrav1exp.AzureASOManagedMachinePool, _ []*unstructured.Unstructured) resourceReconciler { + newResourceReconciler: func(_ *infrav1alpha.AzureASOManagedMachinePool, _ []*unstructured.Unstructured) resourceReconciler { return &fakeResourceReconciler{ deleteFunc: func(ctx context.Context, o client.Object) error { return nil diff --git a/exp/controllers/managedclusteradopt_controller.go b/controllers/managedclusteradopt_controller.go similarity index 83% rename from exp/controllers/managedclusteradopt_controller.go rename to controllers/managedclusteradopt_controller.go index 0b78f3e1fb2..7be79d9466a 100644 --- a/exp/controllers/managedclusteradopt_controller.go +++ b/controllers/managedclusteradopt_controller.go @@ -25,7 +25,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" + infrav1alpha "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha1" "sigs.k8s.io/cluster-api-provider-azure/util/tele" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ctrl "sigs.k8s.io/controller-runtime" @@ -34,7 +34,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" ) -const adoptAnnotation = "sigs.k8s.io/cluster-api-provider-azure-adopt" +const ( + adoptAnnotation = "sigs.k8s.io/cluster-api-provider-azure-adopt" + adoptAnnotationValue = "true" +) // ManagedClusterAdoptReconciler adopts ASO ManagedCluster resources into a CAPI Cluster. type ManagedClusterAdoptReconciler struct { @@ -77,13 +80,13 @@ func (r *ManagedClusterAdoptReconciler) Reconcile(ctx context.Context, req ctrl. return ctrl.Result{}, client.IgnoreNotFound(err) } - if managedCluster.GetAnnotations()[adoptAnnotation] != "true" { + if managedCluster.GetAnnotations()[adoptAnnotation] != adoptAnnotationValue { return ctrl.Result{}, nil } for _, owner := range managedCluster.GetOwnerReferences() { - if owner.APIVersion == infrav1exp.GroupVersion.Identifier() && - owner.Kind == infrav1exp.AzureASOManagedControlPlaneKind { + if owner.APIVersion == infrav1alpha.GroupVersion.Identifier() && + owner.Kind == infrav1alpha.AzureASOManagedControlPlaneKind { return ctrl.Result{}, nil } } @@ -97,13 +100,13 @@ func (r *ManagedClusterAdoptReconciler) Reconcile(ctx context.Context, req ctrl. }, Spec: clusterv1.ClusterSpec{ InfrastructureRef: &corev1.ObjectReference{ - APIVersion: infrav1exp.GroupVersion.Identifier(), - Kind: infrav1exp.AzureASOManagedClusterKind, + APIVersion: infrav1alpha.GroupVersion.Identifier(), + Kind: infrav1alpha.AzureASOManagedClusterKind, Name: managedCluster.Name, }, ControlPlaneRef: &corev1.ObjectReference{ - APIVersion: infrav1exp.GroupVersion.Identifier(), - Kind: infrav1exp.AzureASOManagedControlPlaneKind, + APIVersion: infrav1alpha.GroupVersion.Identifier(), + Kind: infrav1alpha.AzureASOManagedControlPlaneKind, Name: managedCluster.Name, }, }, @@ -137,13 +140,13 @@ func (r *ManagedClusterAdoptReconciler) Reconcile(ctx context.Context, req ctrl. Spec: resourceGroup.Spec, } - asoManagedCluster := &infrav1exp.AzureASOManagedCluster{ + asoManagedCluster := &infrav1alpha.AzureASOManagedCluster{ ObjectMeta: metav1.ObjectMeta{ Namespace: managedCluster.Namespace, Name: managedCluster.Name, }, - Spec: infrav1exp.AzureASOManagedClusterSpec{ - AzureASOManagedClusterTemplateResourceSpec: infrav1exp.AzureASOManagedClusterTemplateResourceSpec{ + Spec: infrav1alpha.AzureASOManagedClusterSpec{ + AzureASOManagedClusterTemplateResourceSpec: infrav1alpha.AzureASOManagedClusterTemplateResourceSpec{ Resources: []runtime.RawExtension{ {Object: resourceGroup}, }, @@ -174,13 +177,13 @@ func (r *ManagedClusterAdoptReconciler) Reconcile(ctx context.Context, req ctrl. Spec: managedCluster.Spec, } - asoManagedControlPlane := &infrav1exp.AzureASOManagedControlPlane{ + asoManagedControlPlane := &infrav1alpha.AzureASOManagedControlPlane{ ObjectMeta: metav1.ObjectMeta{ Namespace: cluster.Namespace, Name: managedCluster.Name, }, - Spec: infrav1exp.AzureASOManagedControlPlaneSpec{ - AzureASOManagedControlPlaneTemplateResourceSpec: infrav1exp.AzureASOManagedControlPlaneTemplateResourceSpec{ + Spec: infrav1alpha.AzureASOManagedControlPlaneSpec{ + AzureASOManagedControlPlaneTemplateResourceSpec: infrav1alpha.AzureASOManagedControlPlaneTemplateResourceSpec{ Resources: []runtime.RawExtension{ {Object: managedCluster}, }, diff --git a/exp/controllers/resource_reconciler.go b/controllers/resource_reconciler.go similarity index 92% rename from exp/controllers/resource_reconciler.go rename to controllers/resource_reconciler.go index 3788723e2c3..0591a53edf3 100644 --- a/exp/controllers/resource_reconciler.go +++ b/controllers/resource_reconciler.go @@ -29,8 +29,8 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/klog/v2" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" - "sigs.k8s.io/cluster-api-provider-azure/exp/mutators" + infrav1alpha "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha1" + "sigs.k8s.io/cluster-api-provider-azure/pkg/mutators" "sigs.k8s.io/cluster-api-provider-azure/util/tele" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -53,8 +53,8 @@ type watcher interface { type resourceStatusObject interface { client.Object - GetResourceStatuses() []infrav1exp.ResourceStatus - SetResourceStatuses([]infrav1exp.ResourceStatus) + GetResourceStatuses() []infrav1alpha.ResourceStatus + SetResourceStatuses([]infrav1alpha.ResourceStatus) } // Reconcile creates or updates the specified resources. @@ -63,7 +63,7 @@ func (r *ResourceReconciler) Reconcile(ctx context.Context) error { defer done() log.V(4).Info("reconciling resources") - var newResourceStatuses []infrav1exp.ResourceStatus + var newResourceStatuses []infrav1alpha.ResourceStatus for _, spec := range r.resources { gvk := spec.GroupVersionKind() @@ -89,8 +89,8 @@ func (r *ResourceReconciler) Reconcile(ctx context.Context) error { if err != nil { return fmt.Errorf("failed to get ready status: %w", err) } - newResourceStatuses = append(newResourceStatuses, infrav1exp.ResourceStatus{ - Resource: infrav1exp.StatusResource{ + newResourceStatuses = append(newResourceStatuses, infrav1alpha.ResourceStatus{ + Resource: infrav1alpha.StatusResource{ Group: gvk.Group, Version: gvk.Version, Kind: gvk.Kind, @@ -163,7 +163,7 @@ func (r *ResourceReconciler) Delete(ctx context.Context) error { defer done() log.V(4).Info("deleting resources") - var newResourceStatuses []infrav1exp.ResourceStatus + var newResourceStatuses []infrav1alpha.ResourceStatus for _, spec := range r.owner.GetResourceStatuses() { newStatus, err := r.deleteResource(ctx, spec.Resource) @@ -180,7 +180,7 @@ func (r *ResourceReconciler) Delete(ctx context.Context) error { return nil } -func (r *ResourceReconciler) deleteResource(ctx context.Context, resource infrav1exp.StatusResource) (*infrav1exp.ResourceStatus, error) { +func (r *ResourceReconciler) deleteResource(ctx context.Context, resource infrav1alpha.StatusResource) (*infrav1alpha.ResourceStatus, error) { ctx, log, done := tele.StartSpanWithLogger(ctx, "controllers.ResourceReconciler.deleteResource") defer done() @@ -214,7 +214,7 @@ func (r *ResourceReconciler) deleteResource(ctx context.Context, resource infrav return nil, fmt.Errorf("failed to get ready status: %w", err) } - return &infrav1exp.ResourceStatus{ + return &infrav1alpha.ResourceStatus{ Resource: resource, Ready: ready, }, nil diff --git a/exp/controllers/resource_reconciler_test.go b/controllers/resource_reconciler_test.go similarity index 93% rename from exp/controllers/resource_reconciler_test.go rename to controllers/resource_reconciler_test.go index 41aabf4187f..ea4b4c528d2 100644 --- a/exp/controllers/resource_reconciler_test.go +++ b/controllers/resource_reconciler_test.go @@ -29,7 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" + infrav1alpha "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha1" "sigs.k8s.io/controller-runtime/pkg/client" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -68,7 +68,7 @@ func TestResourceReconcilerReconcile(t *testing.T) { s := runtime.NewScheme() sb := runtime.NewSchemeBuilder( - infrav1exp.AddToScheme, + infrav1alpha.AddToScheme, asoresourcesv1.AddToScheme, ) NewGomegaWithT(t).Expect(sb.AddToScheme(s)).To(Succeed()) @@ -76,7 +76,7 @@ func TestResourceReconcilerReconcile(t *testing.T) { fakeClientBuilder := func() *fakeclient.ClientBuilder { return fakeclient.NewClientBuilder(). WithScheme(s). - WithStatusSubresource(&infrav1exp.AzureASOManagedCluster{}) + WithStatusSubresource(&infrav1alpha.AzureASOManagedCluster{}) } t.Run("empty resources", func(t *testing.T) { @@ -84,7 +84,7 @@ func TestResourceReconcilerReconcile(t *testing.T) { r := &ResourceReconciler{ resources: []*unstructured.Unstructured{}, - owner: &infrav1exp.AzureASOManagedCluster{}, + owner: &infrav1alpha.AzureASOManagedCluster{}, } g.Expect(r.Reconcile(ctx)).To(Succeed()) @@ -97,7 +97,7 @@ func TestResourceReconcilerReconcile(t *testing.T) { c := fakeClientBuilder(). Build() - asoManagedCluster := &infrav1exp.AzureASOManagedCluster{} + asoManagedCluster := &infrav1alpha.AzureASOManagedCluster{} unpatchedRGs := map[string]struct{}{ "rg1": {}, @@ -152,9 +152,9 @@ func TestResourceReconcilerReconcile(t *testing.T) { t.Run("delete stale resources", func(t *testing.T) { g := NewGomegaWithT(t) - owner := &infrav1exp.AzureASOManagedCluster{ - Status: infrav1exp.AzureASOManagedClusterStatus{ - Resources: []infrav1exp.ResourceStatus{ + owner := &infrav1alpha.AzureASOManagedCluster{ + Status: infrav1alpha.AzureASOManagedClusterStatus{ + Resources: []infrav1alpha.ResourceStatus{ rgStatus("rg0"), rgStatus("rg1"), rgStatus("rg2"), @@ -234,7 +234,7 @@ func TestResourceReconcilerPause(t *testing.T) { s := runtime.NewScheme() sb := runtime.NewSchemeBuilder( - infrav1exp.AddToScheme, + infrav1alpha.AddToScheme, asoresourcesv1.AddToScheme, ) NewGomegaWithT(t).Expect(sb.AddToScheme(s)).To(Succeed()) @@ -242,7 +242,7 @@ func TestResourceReconcilerPause(t *testing.T) { fakeClientBuilder := func() *fakeclient.ClientBuilder { return fakeclient.NewClientBuilder(). WithScheme(s). - WithStatusSubresource(&infrav1exp.AzureASOManagedCluster{}) + WithStatusSubresource(&infrav1alpha.AzureASOManagedCluster{}) } t.Run("empty resources", func(t *testing.T) { @@ -250,7 +250,7 @@ func TestResourceReconcilerPause(t *testing.T) { r := &ResourceReconciler{ resources: []*unstructured.Unstructured{}, - owner: &infrav1exp.AzureASOManagedCluster{}, + owner: &infrav1alpha.AzureASOManagedCluster{}, } g.Expect(r.Pause(ctx)).To(Succeed()) @@ -262,7 +262,7 @@ func TestResourceReconcilerPause(t *testing.T) { c := fakeClientBuilder(). Build() - asoManagedCluster := &infrav1exp.AzureASOManagedCluster{} + asoManagedCluster := &infrav1alpha.AzureASOManagedCluster{} var patchedRGs []string r := &ResourceReconciler{ @@ -299,7 +299,7 @@ func TestResourceReconcilerDelete(t *testing.T) { s := runtime.NewScheme() sb := runtime.NewSchemeBuilder( - infrav1exp.AddToScheme, + infrav1alpha.AddToScheme, asoresourcesv1.AddToScheme, ) NewGomegaWithT(t).Expect(sb.AddToScheme(s)).To(Succeed()) @@ -307,7 +307,7 @@ func TestResourceReconcilerDelete(t *testing.T) { fakeClientBuilder := func() *fakeclient.ClientBuilder { return fakeclient.NewClientBuilder(). WithScheme(s). - WithStatusSubresource(&infrav1exp.AzureASOManagedCluster{}) + WithStatusSubresource(&infrav1alpha.AzureASOManagedCluster{}) } t.Run("empty resources", func(t *testing.T) { @@ -315,7 +315,7 @@ func TestResourceReconcilerDelete(t *testing.T) { r := &ResourceReconciler{ resources: []*unstructured.Unstructured{}, - owner: &infrav1exp.AzureASOManagedCluster{}, + owner: &infrav1alpha.AzureASOManagedCluster{}, } g.Expect(r.Delete(ctx)).To(Succeed()) @@ -324,12 +324,12 @@ func TestResourceReconcilerDelete(t *testing.T) { t.Run("delete several resources", func(t *testing.T) { g := NewGomegaWithT(t) - owner := &infrav1exp.AzureASOManagedCluster{ + owner := &infrav1alpha.AzureASOManagedCluster{ ObjectMeta: metav1.ObjectMeta{ Namespace: "ns", }, - Status: infrav1exp.AzureASOManagedClusterStatus{ - Resources: []infrav1exp.ResourceStatus{ + Status: infrav1alpha.AzureASOManagedClusterStatus{ + Resources: []infrav1alpha.ResourceStatus{ rgStatus("still-deleting"), rgStatus("already-gone"), }, @@ -581,9 +581,9 @@ func rgJSON(g Gomega, scheme *runtime.Scheme, rg *asoresourcesv1.ResourceGroup) return u } -func rgStatus(name string) infrav1exp.ResourceStatus { - return infrav1exp.ResourceStatus{ - Resource: infrav1exp.StatusResource{ +func rgStatus(name string) infrav1alpha.ResourceStatus { + return infrav1alpha.ResourceStatus{ + Resource: infrav1alpha.StatusResource{ Group: asoresourcesv1.GroupVersion.Group, Version: asoresourcesv1.GroupVersion.Version, Kind: "ResourceGroup", diff --git a/docs/book/install-and-build.sh b/docs/book/install-and-build.sh index a454fb76b6f..2a012f93c00 100755 --- a/docs/book/install-and-build.sh +++ b/docs/book/install-and-build.sh @@ -76,7 +76,7 @@ genCRDAPIReferenceDocs="${genCRDAPIReferenceDocsPath}/gen-crd-api-reference-docs ${genCRDAPIReferenceDocs} -config "${genCRDAPIReferenceDocsPath}/example-config.json" -template-dir "${genCRDAPIReferenceDocsPath}/template" -api-dir ./api/v1beta1 -out-file ./docs/book/src/reference/v1beta1-api-raw.html ${genCRDAPIReferenceDocs} -config "${genCRDAPIReferenceDocsPath}/example-config.json" -template-dir "${genCRDAPIReferenceDocsPath}/template" -api-dir ./exp/api/v1beta1 -out-file ./docs/book/src/reference/v1beta1-exp-api-raw.html -${genCRDAPIReferenceDocs} -config "${genCRDAPIReferenceDocsPath}/example-config.json" -template-dir "${genCRDAPIReferenceDocsPath}/template" -api-dir ./exp/api/v1alpha1 -out-file ./docs/book/src/reference/v1alpha1-exp-api-raw.html +${genCRDAPIReferenceDocs} -config "${genCRDAPIReferenceDocsPath}/example-config.json" -template-dir "${genCRDAPIReferenceDocsPath}/template" -api-dir ./api/v1alpha1 -out-file ./docs/book/src/reference/v1alpha1-api-raw.html # Finally build the book. (cd docs/book && /tmp/mdbook build) diff --git a/docs/book/src/SUMMARY.md b/docs/book/src/SUMMARY.md index 40045ebfa49..9857c0f30ac 100644 --- a/docs/book/src/SUMMARY.md +++ b/docs/book/src/SUMMARY.md @@ -47,4 +47,4 @@ - [Reference](./reference/reference.md) - [v1beta1 API](./reference/v1beta1-api.md) - [v1beta1 exp API](./reference/v1beta1-exp-api.md) - - [v1alpha1 exp API](./reference/v1alpha1-exp-api.md) + - [v1alpha1 API](./reference/v1alpha1-api.md) diff --git a/docs/book/src/reference/v1alpha1-api.md b/docs/book/src/reference/v1alpha1-api.md new file mode 100644 index 00000000000..919d55f4f02 --- /dev/null +++ b/docs/book/src/reference/v1alpha1-api.md @@ -0,0 +1 @@ +{{ #include v1alpha1-api-raw.html }} diff --git a/docs/book/src/reference/v1alpha1-exp-api.md b/docs/book/src/reference/v1alpha1-exp-api.md deleted file mode 100644 index 50f46f1d2b1..00000000000 --- a/docs/book/src/reference/v1alpha1-exp-api.md +++ /dev/null @@ -1 +0,0 @@ -{{ #include v1alpha1-exp-api-raw.html }} diff --git a/docs/book/src/topics/aso.md b/docs/book/src/topics/aso.md index 20bc151dac4..0607562d56f 100644 --- a/docs/book/src/topics/aso.md +++ b/docs/book/src/topics/aso.md @@ -80,7 +80,7 @@ More details about how ASO manages CRDs can be found [here](https://azure.github **Note:** To install the resource for the newly installed CRDs, make sure that the ASO operator has the authentication to install the resources. Refer [authentication in ASO](https://azure.github.io/azure-service-operator/guide/authentication/) for more details. An example configuration file and demo for `Azure Cache for Redis` can be found [here](https://github.com/Azure-Samples/azure-service-operator-samples/tree/master/azure-votes-redis). -## Experimental ASO API +## ASO-based API New in CAPZ v1.15.0 is a new flavor of APIs that addresses the following limitations of the existing CAPZ APIs for advanced use cases: @@ -93,7 +93,7 @@ the existing CAPZ APIs for advanced use cases: - For each Azure API version known by CAPZ, only a subset of fields defined in that version by the Azure API spec are exposed by the CAPZ API. -This new experimental API defines new AzureASOManagedCluster, AzureASOManagedControlPlane, and +This new API defines new AzureASOManagedCluster, AzureASOManagedControlPlane, and AzureASOManagedMachinePool resources. An AzureASOManagedCluster might look like this: ```yaml @@ -129,5 +129,5 @@ The overall theme of this API is to leverage ASO as much as possible for represe Kubernetes API, thereby making CAPZ the thinnest possible translation layer between ASO and Cluster API. This experiment will help inform CAPZ whether this pattern may be a candidate for a potential v2 API. This -functionality is available behind the `ASOAPI` feature flag (set by the `EXP_ASO_API` environment variable). +functionality is enabled by default and can be disabled with the `ASOAPI` feature flag (set by the `EXP_ASO_API` environment variable). Please try it out and offer any feedback! diff --git a/feature/feature.go b/feature/feature.go index a3e78668704..b4af07d23fd 100644 --- a/feature/feature.go +++ b/feature/feature.go @@ -65,5 +65,5 @@ var defaultCAPZFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ AKS: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // Remove in 1.12 AKSResourceHealth: {Default: false, PreRelease: featuregate.Alpha}, EdgeZone: {Default: false, PreRelease: featuregate.Alpha}, - ASOAPI: {Default: false, PreRelease: featuregate.Alpha}, + ASOAPI: {Default: true, PreRelease: featuregate.Alpha}, } diff --git a/main.go b/main.go index 70ff1fb14a8..0082b4c4d97 100644 --- a/main.go +++ b/main.go @@ -41,9 +41,9 @@ import ( "k8s.io/client-go/tools/leaderelection/resourcelock" cgrecord "k8s.io/client-go/tools/record" "k8s.io/klog/v2" + infrav1alpha "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/controllers" - infrav1expalpha "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" infrav1controllersexp "sigs.k8s.io/cluster-api-provider-azure/exp/controllers" "sigs.k8s.io/cluster-api-provider-azure/feature" @@ -75,7 +75,7 @@ func init() { _ = clientgoscheme.AddToScheme(scheme) _ = infrav1.AddToScheme(scheme) _ = infrav1exp.AddToScheme(scheme) - _ = infrav1expalpha.AddToScheme(scheme) + _ = infrav1alpha.AddToScheme(scheme) _ = clusterv1.AddToScheme(scheme) _ = expv1.AddToScheme(scheme) _ = kubeadmv1.AddToScheme(scheme) @@ -511,7 +511,7 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { } if feature.Gates.Enabled(feature.ASOAPI) { - if err := (&infrav1controllersexp.AzureASOManagedClusterReconciler{ + if err := (&controllers.AzureASOManagedClusterReconciler{ Client: mgr.GetClient(), WatchFilterValue: watchFilterValue, }).SetupWithManager(ctx, mgr); err != nil { @@ -519,7 +519,7 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { os.Exit(1) } - if err := (&infrav1controllersexp.AzureASOManagedControlPlaneReconciler{ + if err := (&controllers.AzureASOManagedControlPlaneReconciler{ Client: mgr.GetClient(), WatchFilterValue: watchFilterValue, }).SetupWithManager(ctx, mgr); err != nil { @@ -551,7 +551,7 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { os.Exit(1) } - if err := (&infrav1controllersexp.AzureASOManagedMachinePoolReconciler{ + if err := (&controllers.AzureASOManagedMachinePoolReconciler{ Client: mgr.GetClient(), WatchFilterValue: watchFilterValue, Tracker: tracker, @@ -560,14 +560,14 @@ func registerControllers(ctx context.Context, mgr manager.Manager) { os.Exit(1) } - if err := (&infrav1controllersexp.ManagedClusterAdoptReconciler{ + if err := (&controllers.ManagedClusterAdoptReconciler{ Client: mgr.GetClient(), }).SetupWithManager(ctx, mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "ManagedCluster") os.Exit(1) } - if err := (&infrav1controllersexp.AgentPoolAdoptReconciler{ + if err := (&controllers.AgentPoolAdoptReconciler{ Client: mgr.GetClient(), }).SetupWithManager(ctx, mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "AgentPool") @@ -644,17 +644,17 @@ func registerWebhooks(mgr manager.Manager) { os.Exit(1) } - if err := infrav1expalpha.SetupAzureASOManagedClusterWebhookWithManager(mgr); err != nil { + if err := infrav1alpha.SetupAzureASOManagedClusterWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "AzureASOManagedCluster") os.Exit(1) } - if err := infrav1expalpha.SetupAzureASOManagedControlPlaneWebhookWithManager(mgr); err != nil { + if err := infrav1alpha.SetupAzureASOManagedControlPlaneWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "AzureASOManagedControlPlane") os.Exit(1) } - if err := infrav1expalpha.SetupAzureASOManagedMachinePoolWebhookWithManager(mgr); err != nil { + if err := infrav1alpha.SetupAzureASOManagedMachinePoolWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "AzureASOManagedMachinePool") os.Exit(1) } diff --git a/exp/mutators/azureasomanagedcontrolplane.go b/pkg/mutators/azureasomanagedcontrolplane.go similarity index 97% rename from exp/mutators/azureasomanagedcontrolplane.go rename to pkg/mutators/azureasomanagedcontrolplane.go index a535aedbc0d..400fb65987e 100644 --- a/exp/mutators/azureasomanagedcontrolplane.go +++ b/pkg/mutators/azureasomanagedcontrolplane.go @@ -25,8 +25,8 @@ import ( asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" asocontainerservicev1hub "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001/storage" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + infrav1alpha "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha1" "sigs.k8s.io/cluster-api-provider-azure/azure" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" "sigs.k8s.io/cluster-api-provider-azure/util/tele" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" exputil "sigs.k8s.io/cluster-api/exp/util" @@ -45,7 +45,7 @@ var ( ) // SetManagedClusterDefaults propagates values defined by Cluster API to an ASO ManagedCluster. -func SetManagedClusterDefaults(ctrlClient client.Client, asoManagedControlPlane *infrav1exp.AzureASOManagedControlPlane, cluster *clusterv1.Cluster) ResourcesMutator { +func SetManagedClusterDefaults(ctrlClient client.Client, asoManagedControlPlane *infrav1alpha.AzureASOManagedControlPlane, cluster *clusterv1.Cluster) ResourcesMutator { return func(ctx context.Context, us []*unstructured.Unstructured) error { ctx, _, done := tele.StartSpanWithLogger(ctx, "mutators.SetManagedClusterDefaults") defer done() @@ -88,7 +88,7 @@ func SetManagedClusterDefaults(ctrlClient client.Client, asoManagedControlPlane } } -func setManagedClusterKubernetesVersion(ctx context.Context, asoManagedControlPlane *infrav1exp.AzureASOManagedControlPlane, managedClusterPath string, managedCluster *unstructured.Unstructured) error { +func setManagedClusterKubernetesVersion(ctx context.Context, asoManagedControlPlane *infrav1alpha.AzureASOManagedControlPlane, managedClusterPath string, managedCluster *unstructured.Unstructured) error { _, log, done := tele.StartSpanWithLogger(ctx, "mutators.setManagedClusterKubernetesVersion") defer done() @@ -246,7 +246,7 @@ func agentPoolsFromManagedMachinePools(ctx context.Context, ctrlClient client.Cl ctx, log, done := tele.StartSpanWithLogger(ctx, "mutators.agentPoolsFromManagedMachinePools") defer done() - asoManagedMachinePools := &infrav1exp.AzureASOManagedMachinePoolList{} + asoManagedMachinePools := &infrav1alpha.AzureASOManagedMachinePoolList{} err := ctrlClient.List(ctx, asoManagedMachinePools, client.InNamespace(namespace), client.MatchingLabels{ diff --git a/exp/mutators/azureasomanagedcontrolplane_test.go b/pkg/mutators/azureasomanagedcontrolplane_test.go similarity index 90% rename from exp/mutators/azureasomanagedcontrolplane_test.go rename to pkg/mutators/azureasomanagedcontrolplane_test.go index a72fba1c82b..ad4e414dffb 100644 --- a/exp/mutators/azureasomanagedcontrolplane_test.go +++ b/pkg/mutators/azureasomanagedcontrolplane_test.go @@ -30,7 +30,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" + infrav1alpha "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/util/secret" @@ -44,16 +44,16 @@ func TestSetManagedClusterDefaults(t *testing.T) { tests := []struct { name string - asoManagedControlPlane *infrav1exp.AzureASOManagedControlPlane + asoManagedControlPlane *infrav1alpha.AzureASOManagedControlPlane cluster *clusterv1.Cluster expected []*unstructured.Unstructured expectedErr error }{ { name: "no ManagedCluster", - asoManagedControlPlane: &infrav1exp.AzureASOManagedControlPlane{ - Spec: infrav1exp.AzureASOManagedControlPlaneSpec{ - AzureASOManagedControlPlaneTemplateResourceSpec: infrav1exp.AzureASOManagedControlPlaneTemplateResourceSpec{ + asoManagedControlPlane: &infrav1alpha.AzureASOManagedControlPlane{ + Spec: infrav1alpha.AzureASOManagedControlPlaneSpec{ + AzureASOManagedControlPlaneTemplateResourceSpec: infrav1alpha.AzureASOManagedControlPlaneTemplateResourceSpec{ Resources: []runtime.RawExtension{}, }, }, @@ -62,9 +62,9 @@ func TestSetManagedClusterDefaults(t *testing.T) { }, { name: "success", - asoManagedControlPlane: &infrav1exp.AzureASOManagedControlPlane{ - Spec: infrav1exp.AzureASOManagedControlPlaneSpec{ - AzureASOManagedControlPlaneTemplateResourceSpec: infrav1exp.AzureASOManagedControlPlaneTemplateResourceSpec{ + asoManagedControlPlane: &infrav1alpha.AzureASOManagedControlPlane{ + Spec: infrav1alpha.AzureASOManagedControlPlaneSpec{ + AzureASOManagedControlPlaneTemplateResourceSpec: infrav1alpha.AzureASOManagedControlPlaneTemplateResourceSpec{ Version: "vCAPI k8s version", Resources: []runtime.RawExtension{ { @@ -117,7 +117,7 @@ func TestSetManagedClusterDefaults(t *testing.T) { s := runtime.NewScheme() g.Expect(asocontainerservicev1.AddToScheme(s)).To(Succeed()) - g.Expect(infrav1exp.AddToScheme(s)).To(Succeed()) + g.Expect(infrav1alpha.AddToScheme(s)).To(Succeed()) c := fakeclient.NewClientBuilder(). WithScheme(s). Build() @@ -139,14 +139,14 @@ func TestSetManagedClusterKubernetesVersion(t *testing.T) { tests := []struct { name string - asoManagedControlPlane *infrav1exp.AzureASOManagedControlPlane + asoManagedControlPlane *infrav1alpha.AzureASOManagedControlPlane managedCluster *asocontainerservicev1.ManagedCluster expected *asocontainerservicev1.ManagedCluster expectedErr error }{ { name: "no CAPI opinion", - asoManagedControlPlane: &infrav1exp.AzureASOManagedControlPlane{}, + asoManagedControlPlane: &infrav1alpha.AzureASOManagedControlPlane{}, managedCluster: &asocontainerservicev1.ManagedCluster{ Spec: asocontainerservicev1.ManagedCluster_Spec{ KubernetesVersion: ptr.To("user k8s version"), @@ -160,9 +160,9 @@ func TestSetManagedClusterKubernetesVersion(t *testing.T) { }, { name: "set from CAPI opinion", - asoManagedControlPlane: &infrav1exp.AzureASOManagedControlPlane{ - Spec: infrav1exp.AzureASOManagedControlPlaneSpec{ - AzureASOManagedControlPlaneTemplateResourceSpec: infrav1exp.AzureASOManagedControlPlaneTemplateResourceSpec{ + asoManagedControlPlane: &infrav1alpha.AzureASOManagedControlPlane{ + Spec: infrav1alpha.AzureASOManagedControlPlaneSpec{ + AzureASOManagedControlPlaneTemplateResourceSpec: infrav1alpha.AzureASOManagedControlPlaneTemplateResourceSpec{ Version: "vCAPI k8s version", }, }, @@ -176,9 +176,9 @@ func TestSetManagedClusterKubernetesVersion(t *testing.T) { }, { name: "user value matching CAPI ok", - asoManagedControlPlane: &infrav1exp.AzureASOManagedControlPlane{ - Spec: infrav1exp.AzureASOManagedControlPlaneSpec{ - AzureASOManagedControlPlaneTemplateResourceSpec: infrav1exp.AzureASOManagedControlPlaneTemplateResourceSpec{ + asoManagedControlPlane: &infrav1alpha.AzureASOManagedControlPlane{ + Spec: infrav1alpha.AzureASOManagedControlPlaneSpec{ + AzureASOManagedControlPlaneTemplateResourceSpec: infrav1alpha.AzureASOManagedControlPlaneTemplateResourceSpec{ Version: "vCAPI k8s version", }, }, @@ -196,9 +196,9 @@ func TestSetManagedClusterKubernetesVersion(t *testing.T) { }, { name: "incompatible", - asoManagedControlPlane: &infrav1exp.AzureASOManagedControlPlane{ - Spec: infrav1exp.AzureASOManagedControlPlaneSpec{ - AzureASOManagedControlPlaneTemplateResourceSpec: infrav1exp.AzureASOManagedControlPlaneTemplateResourceSpec{ + asoManagedControlPlane: &infrav1alpha.AzureASOManagedControlPlane{ + Spec: infrav1alpha.AzureASOManagedControlPlaneSpec{ + AzureASOManagedControlPlaneTemplateResourceSpec: infrav1alpha.AzureASOManagedControlPlaneTemplateResourceSpec{ Version: "vCAPI k8s version", }, }, @@ -507,7 +507,7 @@ func TestSetManagedClusterAgentPoolProfiles(t *testing.T) { ctx := context.Background() s := runtime.NewScheme() g.Expect(asocontainerservicev1.AddToScheme(s)).To(Succeed()) - g.Expect(infrav1exp.AddToScheme(s)).To(Succeed()) + g.Expect(infrav1alpha.AddToScheme(s)).To(Succeed()) g.Expect(expv1.AddToScheme(s)).To(Succeed()) fakeClientBuilder := func() *fakeclient.ClientBuilder { return fakeclient.NewClientBuilder().WithScheme(s) @@ -564,8 +564,8 @@ func TestSetManagedClusterAgentPoolProfiles(t *testing.T) { managedCluster := &asocontainerservicev1.ManagedCluster{} umc := mcUnstructured(g, managedCluster) - asoManagedMachinePools := &infrav1exp.AzureASOManagedMachinePoolList{ - Items: []infrav1exp.AzureASOManagedMachinePool{ + asoManagedMachinePools := &infrav1alpha.AzureASOManagedMachinePoolList{ + Items: []infrav1alpha.AzureASOManagedMachinePool{ { ObjectMeta: metav1.ObjectMeta{ Name: "wrong-label", @@ -581,8 +581,8 @@ func TestSetManagedClusterAgentPoolProfiles(t *testing.T) { }, }, }, - Spec: infrav1exp.AzureASOManagedMachinePoolSpec{ - AzureASOManagedMachinePoolTemplateResourceSpec: infrav1exp.AzureASOManagedMachinePoolTemplateResourceSpec{ + Spec: infrav1alpha.AzureASOManagedMachinePoolSpec{ + AzureASOManagedMachinePoolTemplateResourceSpec: infrav1alpha.AzureASOManagedMachinePoolTemplateResourceSpec{ Resources: []runtime.RawExtension{ { Raw: apJSON(g, &asocontainerservicev1.ManagedClustersAgentPool{ @@ -610,8 +610,8 @@ func TestSetManagedClusterAgentPoolProfiles(t *testing.T) { }, }, }, - Spec: infrav1exp.AzureASOManagedMachinePoolSpec{ - AzureASOManagedMachinePoolTemplateResourceSpec: infrav1exp.AzureASOManagedMachinePoolTemplateResourceSpec{ + Spec: infrav1alpha.AzureASOManagedMachinePoolSpec{ + AzureASOManagedMachinePoolTemplateResourceSpec: infrav1alpha.AzureASOManagedMachinePoolTemplateResourceSpec{ Resources: []runtime.RawExtension{ { Raw: apJSON(g, &asocontainerservicev1.ManagedClustersAgentPool{ @@ -639,8 +639,8 @@ func TestSetManagedClusterAgentPoolProfiles(t *testing.T) { }, }, }, - Spec: infrav1exp.AzureASOManagedMachinePoolSpec{ - AzureASOManagedMachinePoolTemplateResourceSpec: infrav1exp.AzureASOManagedMachinePoolTemplateResourceSpec{ + Spec: infrav1alpha.AzureASOManagedMachinePoolSpec{ + AzureASOManagedMachinePoolTemplateResourceSpec: infrav1alpha.AzureASOManagedMachinePoolTemplateResourceSpec{ Resources: []runtime.RawExtension{ { Raw: apJSON(g, &asocontainerservicev1.ManagedClustersAgentPool{ @@ -668,8 +668,8 @@ func TestSetManagedClusterAgentPoolProfiles(t *testing.T) { }, }, }, - Spec: infrav1exp.AzureASOManagedMachinePoolSpec{ - AzureASOManagedMachinePoolTemplateResourceSpec: infrav1exp.AzureASOManagedMachinePoolTemplateResourceSpec{ + Spec: infrav1alpha.AzureASOManagedMachinePoolSpec{ + AzureASOManagedMachinePoolTemplateResourceSpec: infrav1alpha.AzureASOManagedMachinePoolTemplateResourceSpec{ Resources: []runtime.RawExtension{ { Raw: apJSON(g, &asocontainerservicev1.ManagedClustersAgentPool{ diff --git a/exp/mutators/azureasomanagedmachinepool.go b/pkg/mutators/azureasomanagedmachinepool.go similarity index 96% rename from exp/mutators/azureasomanagedmachinepool.go rename to pkg/mutators/azureasomanagedmachinepool.go index 8a1b70129be..9b1b670acd2 100644 --- a/exp/mutators/azureasomanagedmachinepool.go +++ b/pkg/mutators/azureasomanagedmachinepool.go @@ -23,7 +23,7 @@ import ( asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" + infrav1alpha "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha1" "sigs.k8s.io/cluster-api-provider-azure/util/tele" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" @@ -114,11 +114,11 @@ func reconcileAutoscaling(agentPool *unstructured.Unstructured, machinePool *exp if machinePool.Annotations == nil { machinePool.Annotations = make(map[string]string) } - machinePool.Annotations[clusterv1.ReplicasManagedByAnnotation] = infrav1exp.ReplicasManagedByAKS - } else if replicaManager != infrav1exp.ReplicasManagedByAKS { + machinePool.Annotations[clusterv1.ReplicasManagedByAnnotation] = infrav1alpha.ReplicasManagedByAKS + } else if replicaManager != infrav1alpha.ReplicasManagedByAKS { return fmt.Errorf("failed to enable autoscaling, replicas are already being managed by %s according to MachinePool %s's %s annotation", replicaManager, machinePool.Name, clusterv1.ReplicasManagedByAnnotation) } - } else if !autoscaling && replicaManager == infrav1exp.ReplicasManagedByAKS { + } else if !autoscaling && replicaManager == infrav1alpha.ReplicasManagedByAKS { // Removing this annotation informs the MachinePool controller that this MachinePool is no longer // being autoscaled. delete(machinePool.Annotations, clusterv1.ReplicasManagedByAnnotation) diff --git a/exp/mutators/azureasomanagedmachinepool_test.go b/pkg/mutators/azureasomanagedmachinepool_test.go similarity index 91% rename from exp/mutators/azureasomanagedmachinepool_test.go rename to pkg/mutators/azureasomanagedmachinepool_test.go index b3d42101006..2749e190092 100644 --- a/exp/mutators/azureasomanagedmachinepool_test.go +++ b/pkg/mutators/azureasomanagedmachinepool_test.go @@ -28,7 +28,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/ptr" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" + infrav1alpha "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -41,16 +41,16 @@ func TestSetAgentPoolDefaults(t *testing.T) { tests := []struct { name string - asoManagedMachinePool *infrav1exp.AzureASOManagedMachinePool + asoManagedMachinePool *infrav1alpha.AzureASOManagedMachinePool machinePool *expv1.MachinePool expected []*unstructured.Unstructured expectedErr error }{ { name: "no ManagedClustersAgentPool", - asoManagedMachinePool: &infrav1exp.AzureASOManagedMachinePool{ - Spec: infrav1exp.AzureASOManagedMachinePoolSpec{ - AzureASOManagedMachinePoolTemplateResourceSpec: infrav1exp.AzureASOManagedMachinePoolTemplateResourceSpec{ + asoManagedMachinePool: &infrav1alpha.AzureASOManagedMachinePool{ + Spec: infrav1alpha.AzureASOManagedMachinePoolSpec{ + AzureASOManagedMachinePoolTemplateResourceSpec: infrav1alpha.AzureASOManagedMachinePoolTemplateResourceSpec{ Resources: []runtime.RawExtension{}, }, }, @@ -59,9 +59,9 @@ func TestSetAgentPoolDefaults(t *testing.T) { }, { name: "success", - asoManagedMachinePool: &infrav1exp.AzureASOManagedMachinePool{ - Spec: infrav1exp.AzureASOManagedMachinePoolSpec{ - AzureASOManagedMachinePoolTemplateResourceSpec: infrav1exp.AzureASOManagedMachinePoolTemplateResourceSpec{ + asoManagedMachinePool: &infrav1alpha.AzureASOManagedMachinePool{ + Spec: infrav1alpha.AzureASOManagedMachinePoolSpec{ + AzureASOManagedMachinePoolTemplateResourceSpec: infrav1alpha.AzureASOManagedMachinePoolTemplateResourceSpec{ Resources: []runtime.RawExtension{ { Raw: apJSON(g, &asocontainerservicev1.ManagedClustersAgentPool{}), @@ -250,7 +250,7 @@ func TestReconcileAutoscaling(t *testing.T) { machinePool: &expv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - clusterv1.ReplicasManagedByAnnotation: infrav1exp.ReplicasManagedByAKS, + clusterv1.ReplicasManagedByAnnotation: infrav1alpha.ReplicasManagedByAKS, }, }, }, @@ -266,14 +266,14 @@ func TestReconcileAutoscaling(t *testing.T) { machinePool: &expv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - clusterv1.ReplicasManagedByAnnotation: "not-" + infrav1exp.ReplicasManagedByAKS, + clusterv1.ReplicasManagedByAnnotation: "not-" + infrav1alpha.ReplicasManagedByAKS, }, }, }, expected: &expv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - clusterv1.ReplicasManagedByAnnotation: "not-" + infrav1exp.ReplicasManagedByAKS, + clusterv1.ReplicasManagedByAnnotation: "not-" + infrav1alpha.ReplicasManagedByAKS, }, }, }, @@ -289,7 +289,7 @@ func TestReconcileAutoscaling(t *testing.T) { expected: &expv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - clusterv1.ReplicasManagedByAnnotation: infrav1exp.ReplicasManagedByAKS, + clusterv1.ReplicasManagedByAnnotation: infrav1alpha.ReplicasManagedByAKS, }, }, }, @@ -300,14 +300,14 @@ func TestReconcileAutoscaling(t *testing.T) { machinePool: &expv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - clusterv1.ReplicasManagedByAnnotation: infrav1exp.ReplicasManagedByAKS, + clusterv1.ReplicasManagedByAnnotation: infrav1alpha.ReplicasManagedByAKS, }, }, }, expected: &expv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - clusterv1.ReplicasManagedByAnnotation: infrav1exp.ReplicasManagedByAKS, + clusterv1.ReplicasManagedByAnnotation: infrav1alpha.ReplicasManagedByAKS, }, }, }, @@ -319,7 +319,7 @@ func TestReconcileAutoscaling(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "mp", Annotations: map[string]string{ - clusterv1.ReplicasManagedByAnnotation: "not-" + infrav1exp.ReplicasManagedByAKS, + clusterv1.ReplicasManagedByAnnotation: "not-" + infrav1alpha.ReplicasManagedByAKS, }, }, }, @@ -383,7 +383,7 @@ func TestSetAgentPoolCount(t *testing.T) { machinePool: &expv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - clusterv1.ReplicasManagedByAnnotation: infrav1exp.ReplicasManagedByAKS, + clusterv1.ReplicasManagedByAnnotation: infrav1alpha.ReplicasManagedByAKS, }, }, Spec: expv1.MachinePoolSpec{ diff --git a/exp/mutators/mutator.go b/pkg/mutators/mutator.go similarity index 100% rename from exp/mutators/mutator.go rename to pkg/mutators/mutator.go diff --git a/exp/mutators/mutator_test.go b/pkg/mutators/mutator_test.go similarity index 100% rename from exp/mutators/mutator_test.go rename to pkg/mutators/mutator_test.go diff --git a/test/e2e/aks.go b/test/e2e/aks.go index 4b6885aa2c9..3b76e1c3a53 100644 --- a/test/e2e/aks.go +++ b/test/e2e/aks.go @@ -28,8 +28,8 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" + infrav1alpha "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework" @@ -166,7 +166,7 @@ func WaitForAKSSystemNodePoolMachinesToExist(ctx context.Context, input WaitForC var capzMPs []client.Object ammpList := &infrav1.AzureManagedMachinePoolList{} - asommpList := &infrav1exp.AzureASOManagedMachinePoolList{} + asommpList := &infrav1alpha.AzureASOManagedMachinePoolList{} if err := input.Lister.List(ctx, ammpList, opt1, opt2, opt3); err != nil { LogWarningf("Failed to list AzureManagedMachinePools: %+v", err) diff --git a/test/e2e/aks_machinepools.go b/test/e2e/aks_machinepools.go index ac0810e81d0..81313f13556 100644 --- a/test/e2e/aks_machinepools.go +++ b/test/e2e/aks_machinepools.go @@ -28,9 +28,9 @@ import ( . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" + infrav1alpha "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" - infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" - "sigs.k8s.io/cluster-api-provider-azure/exp/mutators" + "sigs.k8s.io/cluster-api-provider-azure/pkg/mutators" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework" @@ -87,8 +87,8 @@ func AKSMachinePoolSpec(ctx context.Context, inputGetter func() AKSMachinePoolSp if ammp.Spec.Mode != string(infrav1.NodePoolModeSystem) { isUserPool = true } - case infrav1exp.AzureASOManagedMachinePoolKind: - ammp := &infrav1exp.AzureASOManagedMachinePool{} + case infrav1alpha.AzureASOManagedMachinePoolKind: + ammp := &infrav1alpha.AzureASOManagedMachinePool{} err := bootstrapClusterProxy.GetClient().Get(ctx, types.NamespacedName{ Namespace: mp.Spec.Template.Spec.InfrastructureRef.Namespace, Name: mp.Spec.Template.Spec.InfrastructureRef.Name, diff --git a/test/e2e/azure_clusterproxy.go b/test/e2e/azure_clusterproxy.go index 90e12364e1d..c3c138e84ec 100644 --- a/test/e2e/azure_clusterproxy.go +++ b/test/e2e/azure_clusterproxy.go @@ -46,8 +46,8 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/kubectl/pkg/describe" "k8s.io/utils/ptr" + infrav1alpha "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" - infrav1expalpha "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework" @@ -75,7 +75,7 @@ func initScheme() *runtime.Scheme { framework.TryAddDefaultSchemes(scheme) Expect(infrav1.AddToScheme(scheme)).To(Succeed()) Expect(infrav1exp.AddToScheme(scheme)).To(Succeed()) - Expect(infrav1expalpha.AddToScheme(scheme)).To(Succeed()) + Expect(infrav1alpha.AddToScheme(scheme)).To(Succeed()) Expect(expv1.AddToScheme(scheme)).To(Succeed()) Expect(asoresourcesv1.AddToScheme(scheme)).To(Succeed()) Expect(asocontainerservicev1.AddToScheme(scheme)).To(Succeed()) diff --git a/test/e2e/azure_logcollector.go b/test/e2e/azure_logcollector.go index 0f7fdc69198..e0909b7f05e 100644 --- a/test/e2e/azure_logcollector.go +++ b/test/e2e/azure_logcollector.go @@ -32,9 +32,9 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" + infrav1alpha "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha1" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" - infrav1expalpha "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha1" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" azureutil "sigs.k8s.io/cluster-api-provider-azure/util/azure" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -210,13 +210,13 @@ func getAzureManagedControlPlane(ctx context.Context, managementClusterClient cl return azManagedControlPlane, err } -func getAzureASOManagedCluster(ctx context.Context, managementClusterClient client.Client, namespace, name string) (*infrav1expalpha.AzureASOManagedCluster, error) { +func getAzureASOManagedCluster(ctx context.Context, managementClusterClient client.Client, namespace, name string) (*infrav1alpha.AzureASOManagedCluster, error) { key := client.ObjectKey{ Namespace: namespace, Name: name, } - azManagedCluster := &infrav1expalpha.AzureASOManagedCluster{} + azManagedCluster := &infrav1alpha.AzureASOManagedCluster{} err := managementClusterClient.Get(ctx, key, azManagedCluster) return azManagedCluster, err } @@ -254,13 +254,13 @@ func getAzureManagedMachinePool(ctx context.Context, managementClusterClient cli return azManagedMachinePool, err } -func getAzureASOManagedMachinePool(ctx context.Context, managementClusterClient client.Client, mp *expv1.MachinePool) (*infrav1expalpha.AzureASOManagedMachinePool, error) { +func getAzureASOManagedMachinePool(ctx context.Context, managementClusterClient client.Client, mp *expv1.MachinePool) (*infrav1alpha.AzureASOManagedMachinePool, error) { key := client.ObjectKey{ Namespace: mp.Spec.Template.Spec.InfrastructureRef.Namespace, Name: mp.Spec.Template.Spec.InfrastructureRef.Name, } - azManagedMachinePool := &infrav1expalpha.AzureASOManagedMachinePool{} + azManagedMachinePool := &infrav1alpha.AzureASOManagedMachinePool{} err := managementClusterClient.Get(ctx, key, azManagedMachinePool) return azManagedMachinePool, err } From caff3a814a58628278967e20d8822517d51122bd Mon Sep 17 00:00:00 2001 From: James Sturtevant Date: Thu, 11 Jul 2024 14:07:17 -0700 Subject: [PATCH 7/7] Use the env AZURE_STORAGE_AUTH_MODE when using federated auth Signed-off-by: James Sturtevant --- hack/ensure-azcli.sh | 6 +++--- scripts/ci-build-azure-ccm.sh | 17 +++++++++-------- scripts/ci-build-kubernetes.sh | 14 +++++++------- scripts/kind-with-registry.sh | 6 +++--- 4 files changed, 22 insertions(+), 21 deletions(-) diff --git a/hack/ensure-azcli.sh b/hack/ensure-azcli.sh index c3abb3a3941..c6180814107 100755 --- a/hack/ensure-azcli.sh +++ b/hack/ensure-azcli.sh @@ -27,13 +27,13 @@ if [[ -z "$(command -v az)" ]]; then apt-get update && apt-get install -y azure-cli if [[ -n "${AZURE_FEDERATED_TOKEN_FILE:-}" ]]; then + echo "Logging in with federated token" # AZURE_CLIENT_ID has been overloaded with Azure Workload ID in the preset-azure-cred-wi. # This is done to avoid exporting Azure Workload ID as AZURE_CLIENT_ID in the test scenarios. az login --service-principal -u "${AZURE_CLIENT_ID}" -t "${AZURE_TENANT_ID}" --federated-token "$(cat "${AZURE_FEDERATED_TOKEN_FILE}")" > /dev/null - # Use --auth-mode "login" in az storage commands. - ENABLE_AUTH_MODE_LOGIN="true" - export ENABLE_AUTH_MODE_LOGIN + # Use --auth-mode "login" in az storage commands to use RBAC permissions of login identity. This is a well known ENV variable the Azure cli + export AZURE_STORAGE_AUTH_MODE="login" else az login --service-principal -u "${AZURE_CLIENT_ID}" -p "${AZURE_CLIENT_SECRET}" --tenant "${AZURE_TENANT_ID}" > /dev/null fi diff --git a/scripts/ci-build-azure-ccm.sh b/scripts/ci-build-azure-ccm.sh index 67f64a2ec07..c42e947bb97 100755 --- a/scripts/ci-build-azure-ccm.sh +++ b/scripts/ci-build-azure-ccm.sh @@ -68,16 +68,17 @@ main() { echo "Building and pushing Linux and Windows amd64 Azure ACR credential provider" make -C "${AZURE_CLOUD_PROVIDER_ROOT}" bin/azure-acr-credential-provider bin/azure-acr-credential-provider.exe - if [[ "$(az storage container exists ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --name "${AZURE_BLOB_CONTAINER_NAME}" --query exists --output tsv)" == "false" ]]; then + if [[ "$(az storage container exists --name "${AZURE_BLOB_CONTAINER_NAME}" --query exists --output tsv)" == "false" ]]; then echo "Creating ${AZURE_BLOB_CONTAINER_NAME} storage container" - az storage container create ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --name "${AZURE_BLOB_CONTAINER_NAME}" > /dev/null - az storage container set-permission ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --name "${AZURE_BLOB_CONTAINER_NAME}" --public-access container > /dev/null + az storage container create --name "${AZURE_BLOB_CONTAINER_NAME}" > /dev/null + # if the storage account has public access disabled at the account level this will return 404 + az storage container set-permission --name "${AZURE_BLOB_CONTAINER_NAME}" --public-access container > /dev/null fi - az storage blob upload ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${AZURE_CLOUD_PROVIDER_ROOT}/bin/azure-acr-credential-provider" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider" - az storage blob upload ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${AZURE_CLOUD_PROVIDER_ROOT}/bin/azure-acr-credential-provider.exe" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider.exe" - az storage blob upload ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${AZURE_CLOUD_PROVIDER_ROOT}/examples/out-of-tree/credential-provider-config.yaml" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/credential-provider-config.yaml" - az storage blob upload ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${AZURE_CLOUD_PROVIDER_ROOT}/examples/out-of-tree/credential-provider-config-win.yaml" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/credential-provider-config-win.yaml" + az storage blob upload --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${AZURE_CLOUD_PROVIDER_ROOT}/bin/azure-acr-credential-provider" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider" + az storage blob upload --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${AZURE_CLOUD_PROVIDER_ROOT}/bin/azure-acr-credential-provider.exe" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/azure-acr-credential-provider.exe" + az storage blob upload --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${AZURE_CLOUD_PROVIDER_ROOT}/examples/out-of-tree/credential-provider-config.yaml" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/credential-provider-config.yaml" + az storage blob upload --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${AZURE_CLOUD_PROVIDER_ROOT}/examples/out-of-tree/credential-provider-config-win.yaml" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/credential-provider-config-win.yaml" fi } @@ -101,7 +102,7 @@ can_reuse_artifacts() { fi for BINARY in azure-acr-credential-provider azure-acr-credential-provider.exe credential-provider-config.yaml credential-provider-config-win.yaml; do - if [[ "$(az storage blob exists ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --container-name "${AZURE_BLOB_CONTAINER_NAME}" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/${BINARY}" --query exists --output tsv)" == "false" ]]; then + if [[ "$(az storage blob exists --container-name "${AZURE_BLOB_CONTAINER_NAME}" --name "${IMAGE_TAG_ACR_CREDENTIAL_PROVIDER}/${BINARY}" --query exists --output tsv)" == "false" ]]; then echo "false" && return fi done diff --git a/scripts/ci-build-kubernetes.sh b/scripts/ci-build-kubernetes.sh index 8e8ebd10804..3abe5349c11 100755 --- a/scripts/ci-build-kubernetes.sh +++ b/scripts/ci-build-kubernetes.sh @@ -79,10 +79,10 @@ setup() { } main() { - if [[ "$(az storage container exists ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --name "${AZURE_BLOB_CONTAINER_NAME}" --query exists --output tsv)" == "false" ]]; then + if [[ "$(az storage container exists --name "${AZURE_BLOB_CONTAINER_NAME}" --query exists --output tsv)" == "false" ]]; then echo "Creating ${AZURE_BLOB_CONTAINER_NAME} storage container" - az storage container ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} create --name "${AZURE_BLOB_CONTAINER_NAME}" > /dev/null - az storage container ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} set-permission --name "${AZURE_BLOB_CONTAINER_NAME}" --public-access container > /dev/null + az storage container create --name "${AZURE_BLOB_CONTAINER_NAME}" > /dev/null + az storage container set-permission --name "${AZURE_BLOB_CONTAINER_NAME}" --public-access container > /dev/null fi if [[ "${KUBE_BUILD_CONFORMANCE:-}" =~ [yY] ]]; then @@ -115,7 +115,7 @@ main() { for BINARY in "${BINARIES[@]}"; do BIN_PATH="${KUBE_GIT_VERSION}/bin/linux/amd64/${BINARY}" echo "uploading ${BIN_PATH}" - az storage blob upload ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${KUBE_ROOT}/_output/dockerized/bin/linux/amd64/${BINARY}" --name "${BIN_PATH}" + az storage blob upload --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${KUBE_ROOT}/_output/dockerized/bin/linux/amd64/${BINARY}" --name "${BIN_PATH}" done if [[ "${TEST_WINDOWS:-}" == "true" ]]; then @@ -128,7 +128,7 @@ main() { for BINARY in "${WINDOWS_BINARIES[@]}"; do BIN_PATH="${KUBE_GIT_VERSION}/bin/windows/amd64/${BINARY}.exe" echo "uploading ${BIN_PATH}" - az storage blob upload ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${KUBE_ROOT}/_output/dockerized/bin/windows/amd64/${BINARY}.exe" --name "${BIN_PATH}" + az storage blob upload --overwrite --container-name "${AZURE_BLOB_CONTAINER_NAME}" --file "${KUBE_ROOT}/_output/dockerized/bin/windows/amd64/${BINARY}.exe" --name "${BIN_PATH}" done fi fi @@ -143,14 +143,14 @@ can_reuse_artifacts() { done for BINARY in "${BINARIES[@]}"; do - if [[ "$(az storage blob exists ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --container-name "${AZURE_BLOB_CONTAINER_NAME}" --name "${KUBE_GIT_VERSION}/bin/linux/amd64/${BINARY}" --query exists --output tsv)" == "false" ]]; then + if [[ "$(az storage blob exists --container-name "${AZURE_BLOB_CONTAINER_NAME}" --name "${KUBE_GIT_VERSION}/bin/linux/amd64/${BINARY}" --query exists --output tsv)" == "false" ]]; then echo "false" && return fi done if [[ "${TEST_WINDOWS:-}" == "true" ]]; then for BINARY in "${WINDOWS_BINARIES[@]}"; do - if [[ "$(az storage blob exists ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} --container-name "${AZURE_BLOB_CONTAINER_NAME}" --name "${KUBE_GIT_VERSION}/bin/windows/amd64/${BINARY}.exe" --query exists --output tsv)" == "false" ]]; then + if [[ "$(az storage blob exists --container-name "${AZURE_BLOB_CONTAINER_NAME}" --name "${KUBE_GIT_VERSION}/bin/windows/amd64/${BINARY}.exe" --query exists --output tsv)" == "false" ]]; then echo "false" && return fi done diff --git a/scripts/kind-with-registry.sh b/scripts/kind-with-registry.sh index 8a32aebbc7e..3c2830842cd 100755 --- a/scripts/kind-with-registry.sh +++ b/scripts/kind-with-registry.sh @@ -106,12 +106,12 @@ function checkAZWIENVPreReqsAndCreateFiles() { if ! az storage account show --name "${AZWI_STORAGE_ACCOUNT}" --resource-group "${AZWI_RESOURCE_GROUP}" > /dev/null 2>&1; then echo "Creating storage account '${AZWI_STORAGE_ACCOUNT}' in '${AZWI_RESOURCE_GROUP}'" az storage account create --resource-group "${AZWI_RESOURCE_GROUP}" --name "${AZWI_STORAGE_ACCOUNT}" --output none --only-show-errors --tags creationTimestamp="${TIMESTAMP}" jobName="${JOB_NAME}" buildProvenance="${BUILD_PROVENANCE}" - az storage blob service-properties ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} update --account-name "${AZWI_STORAGE_ACCOUNT}" --static-website + az storage blob service-properties update --account-name "${AZWI_STORAGE_ACCOUNT}" --static-website fi if ! az storage container show --name "${AZWI_STORAGE_CONTAINER}" --account-name "${AZWI_STORAGE_ACCOUNT}" > /dev/null 2>&1; then echo "Creating storage container '${AZWI_STORAGE_CONTAINER}' in '${AZWI_STORAGE_ACCOUNT}'" - az storage container ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} create --name "${AZWI_STORAGE_CONTAINER}" --account-name "${AZWI_STORAGE_ACCOUNT}" --output none --only-show-errors + az storage container create --name "${AZWI_STORAGE_CONTAINER}" --account-name "${AZWI_STORAGE_ACCOUNT}" --output none --only-show-errors fi SERVICE_ACCOUNT_ISSUER=$(az storage account show --name "${AZWI_STORAGE_ACCOUNT}" -o json | jq -r .primaryEndpoints.web) @@ -180,7 +180,7 @@ function upload_to_blob() { local blob_name=$2 echo "Uploading ${file_path} to '${AZWI_STORAGE_ACCOUNT}' storage account" - az storage blob upload ${ENABLE_AUTH_MODE_LOGIN:+"--auth-mode login"} \ + az storage blob upload \ --container-name "${AZWI_STORAGE_CONTAINER}" \ --file "${file_path}" \ --name "${blob_name}" \