Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add v1beta2 and clusterclass test for e2e #124

Merged
merged 1 commit into from
Jun 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,7 @@
bin/
out/
_releasenotes
_artifacts/
_artifacts/

# E2E test templates
test/e2e/data/infrastructure-docker/cluster-template*.yaml
12 changes: 11 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -218,15 +218,25 @@ all-controlplane: manager-controlplane
test-controlplane: envtest generate-controlplane generate-controlplane-conversions lint manifests-controlplane
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(TOOLS_BIN_DIR) -p path)" go test $(shell pwd)/controlplane/... -coverprofile cover.out

DOCKER_TEMPLATES := test/e2e/data/infrastructure-docker

.PHONY: docker-build-e2e
docker-build-e2e: ## Run docker-build-* targets for all the images with settings to be used for the e2e tests
# please ensure the generated image name matches image names used in the E2E_CONF_FILE
# and it also match the image tags in bootstrap/config/default and controlplane/config/default
$(MAKE) BOOTSTRAP_IMG_TAG=dev docker-build-bootstrap
$(MAKE) CONTROLPLANE_IMG_TAG=dev docker-build-controlplane

.PHONY: generate-e2e-templates
generate-e2e-templates: $(KUSTOMIZE)
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/cluster-template --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/cluster-template.yaml
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/cluster-template-v1beta1 --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/cluster-template-v1beta1.yaml
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/cluster-template-kcp-remediation --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/cluster-template-kcp-remediation.yaml
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/cluster-template-md-remediation --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/cluster-template-md-remediation.yaml
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/cluster-template-topology.yaml

.PHONY: test-e2e
test-e2e: $(GINKGO) $(KUSTOMIZE) ## Run the end-to-end tests
test-e2e: generate-e2e-templates $(GINKGO) $(KUSTOMIZE) ## Run the end-to-end tests
CAPI_KUSTOMIZE_PATH="$(KUSTOMIZE)" $(GINKGO) -v --trace -poll-progress-after=$(GINKGO_POLL_PROGRESS_AFTER) \
-poll-progress-interval=$(GINKGO_POLL_PROGRESS_INTERVAL) --tags=e2e --focus="$(GINKGO_FOCUS)" \
$(_SKIP_ARGS) --nodes=$(GINKGO_NODES) --timeout=$(GINKGO_TIMEOUT) --no-color=$(GINKGO_NOCOLOR) \
Expand Down
11 changes: 2 additions & 9 deletions bootstrap/config/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -47,17 +47,10 @@ rules:
resources:
- clusters
- clusters/status
- machines
- machines/status
verbs:
- get
- list
- watch
- apiGroups:
- exp.cluster.x-k8s.io
resources:
- machinepools
- machinepools/status
- machines
- machines/status
verbs:
- get
- list
Expand Down
3 changes: 1 addition & 2 deletions bootstrap/controllers/kthreesconfig_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,7 @@ var (

// +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=kthreesconfigs,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=kthreesconfigs/status,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status;machines;machines/status,verbs=get;list;watch
// +kubebuilder:rbac:groups=exp.cluster.x-k8s.io,resources=machinepools;machinepools/status,verbs=get;list;watch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status;machines;machines/status;machinepools;machinepools/status,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=secrets;events;configmaps,verbs=get;list;watch;create;update;patch;delete

func (r *KThreesConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ reconcile.Result, rerr error) {
Expand Down
1 change: 1 addition & 0 deletions docs/tilt-setup.md
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ Also, visit the [Cluster API documentation on Tilt][cluster_api_tilt] for more i
# add template for fast workload cluster creation, change to a different path if needed
# you could also add more templates
k3s-bootstrap:
# please run `make generate-e2e-templates` to generate the templates first
- ../cluster-api-k3s/test/e2e/data/infrastructure-docker
```
5. Run `tilt` in the `cluster-api` directory
Expand Down
98 changes: 78 additions & 20 deletions test/e2e/cluster_upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,11 +29,13 @@ import (
. "github.com/onsi/gomega"

corev1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
"k8s.io/utils/ptr"

"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/patch"
)

// ClusterUpgradeSpecInput is the input for ClusterUpgradeConformanceSpec.
Expand Down Expand Up @@ -76,8 +78,9 @@ func ClusterUpgradeSpec(ctx context.Context, inputGetter func() ClusterUpgradeSp
namespace *corev1.Namespace
cancelWatches context.CancelFunc

controlPlaneMachineCount int64
workerMachineCount int64
controlPlaneMachineCount int64
workerMachineCount int64
kubernetesVersionUpgradeTo string

result *ApplyClusterTemplateAndWaitResult
clusterName string
Expand Down Expand Up @@ -109,6 +112,8 @@ func ClusterUpgradeSpec(ctx context.Context, inputGetter func() ClusterUpgradeSp
workerMachineCount = *input.WorkerMachineCount
}

kubernetesVersionUpgradeTo = input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo)

// Setup a Namespace where to host objects for this spec and create a watcher for the namespace events.
namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder)

Expand Down Expand Up @@ -154,30 +159,83 @@ func ClusterUpgradeSpec(ctx context.Context, inputGetter func() ClusterUpgradeSp
WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
}, result)

By("Upgrading the Kubernetes control-plane")
UpgradeControlPlaneAndWaitForUpgrade(ctx, UpgradeControlPlaneAndWaitForUpgradeInput{
ClusterProxy: input.BootstrapClusterProxy,
Cluster: result.Cluster,
ControlPlane: result.ControlPlane,
KubernetesUpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo),
WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
})

By("Upgrading the machine deployment")
framework.UpgradeMachineDeploymentsAndWait(ctx, framework.UpgradeMachineDeploymentsAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
Cluster: result.Cluster,
UpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo),
MachineDeployments: result.MachineDeployments,
WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
})
if result.Cluster.Spec.Topology != nil {
// Cluster is using ClusterClass, upgrade via topology.
By("Upgrading the Cluster topology")
mgmtClient := input.BootstrapClusterProxy.GetClient()

By("Patching the new Kubernetes version to Cluster topology")
patchHelper, err := patch.NewHelper(result.Cluster, mgmtClient)
Expect(err).ToNot(HaveOccurred())

result.Cluster.Spec.Topology.Version = kubernetesVersionUpgradeTo

Eventually(func() error {
return patchHelper.Patch(ctx, result.Cluster)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to patch Cluster topology %s with version %s", klog.KObj(result.Cluster), kubernetesVersionUpgradeTo)

By("Waiting for control-plane machines to have the upgraded kubernetes version")
framework.WaitForControlPlaneMachinesToBeUpgraded(ctx, framework.WaitForControlPlaneMachinesToBeUpgradedInput{
Lister: mgmtClient,
Cluster: result.Cluster,
MachineCount: int(*result.ControlPlane.Spec.Replicas),
KubernetesUpgradeVersion: kubernetesVersionUpgradeTo,
}, input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade")...)

for _, deployment := range result.MachineDeployments {
if *deployment.Spec.Replicas > 0 {
Byf("Waiting for Kubernetes versions of machines in MachineDeployment %s to be upgraded to %s",
klog.KObj(deployment), kubernetesVersionUpgradeTo)
framework.WaitForMachineDeploymentMachinesToBeUpgraded(ctx, framework.WaitForMachineDeploymentMachinesToBeUpgradedInput{
Lister: mgmtClient,
Cluster: result.Cluster,
MachineCount: int(*deployment.Spec.Replicas),
KubernetesUpgradeVersion: kubernetesVersionUpgradeTo,
MachineDeployment: *deployment,
}, input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade")...)
}
}

for _, pool := range result.MachinePools {
if *pool.Spec.Replicas > 0 {
Byf("Waiting for Kubernetes versions of machines in MachinePool %s to be upgraded to %s",
klog.KObj(pool), kubernetesVersionUpgradeTo)
framework.WaitForMachinePoolInstancesToBeUpgraded(ctx, framework.WaitForMachinePoolInstancesToBeUpgradedInput{
Getter: mgmtClient,
WorkloadClusterGetter: input.BootstrapClusterProxy.GetWorkloadCluster(ctx, result.Cluster.Namespace, result.Cluster.Name).GetClient(),
Cluster: result.Cluster,
MachineCount: int(*pool.Spec.Replicas),
KubernetesUpgradeVersion: kubernetesVersionUpgradeTo,
MachinePool: pool,
}, input.E2EConfig.GetIntervals(specName, "wait-machine-pool-upgrade")...)
}
}
} else {
By("Upgrading the Kubernetes control-plane")
UpgradeControlPlaneAndWaitForUpgrade(ctx, UpgradeControlPlaneAndWaitForUpgradeInput{
ClusterProxy: input.BootstrapClusterProxy,
Cluster: result.Cluster,
ControlPlane: result.ControlPlane,
KubernetesUpgradeVersion: kubernetesVersionUpgradeTo,
WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
})

By("Upgrading the machine deployment")
framework.UpgradeMachineDeploymentsAndWait(ctx, framework.UpgradeMachineDeploymentsAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
Cluster: result.Cluster,
UpgradeVersion: kubernetesVersionUpgradeTo,
MachineDeployments: result.MachineDeployments,
WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
})
}

By("Waiting until nodes are ready")
workloadProxy := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, result.Cluster.Name)
workloadClient := workloadProxy.GetClient()
framework.WaitForNodesReady(ctx, framework.WaitForNodesReadyInput{
Lister: workloadClient,
KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo),
KubernetesVersion: kubernetesVersionUpgradeTo,
Count: int(result.ExpectedTotalNodes()),
WaitForNodesReady: input.E2EConfig.GetIntervals(specName, "wait-nodes-ready"),
})
Expand Down
16 changes: 16 additions & 0 deletions test/e2e/cluster_upgrade_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,4 +54,20 @@ var _ = Describe("Workload cluster upgrade [K3s-Upgrade]", func() {
}
})
})

Context("Upgrading a cluster with ClusterClass [ClusterClass]", func() {
ClusterUpgradeSpec(ctx, func() ClusterUpgradeSpecInput {
return ClusterUpgradeSpecInput{
E2EConfig: e2eConfig,
ClusterctlConfigPath: clusterctlConfigPath,
BootstrapClusterProxy: bootstrapClusterProxy,
ArtifactFolder: artifactFolder,
SkipCleanup: skipCleanup,
Flavor: ptr.To("topology"),
InfrastructureProvider: ptr.To("docker"),
ControlPlaneMachineCount: ptr.To[int64](1),
WorkerMachineCount: ptr.To[int64](2),
}
})
})
})
22 changes: 14 additions & 8 deletions test/e2e/config/k3s-docker.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ providers:
- name: cluster-api
type: CoreProvider
versions:
- name: v1.6.2
value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.2/core-components.yaml
- name: v1.7.2
value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.2/core-components.yaml
type: url
files:
- sourcePath: "../data/shared/v1beta1/metadata.yaml"
Expand All @@ -28,21 +28,21 @@ providers:
versions:
# By default, will use the latest version defined in ../data/shared/v1beta1/metadata.yaml
# to init the management cluster
- name: v1.6.2 # used during e2e-test
value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.2/infrastructure-components-development.yaml
- name: v1.7.2 # used during e2e-test
value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.2/infrastructure-components-development.yaml
type: url
files:
- sourcePath: "../data/shared/v1beta1/metadata.yaml"
replacements:
- old: "imagePullPolicy: Always"
new: "imagePullPolicy: IfNotPresent"

# Add v1.7.99 to support tilt (not presented in ../data/shared/v1beta1/metadata.yaml)
# Add v1.8.99 to support tilt (not presented in ../data/shared/v1beta1/metadata.yaml)
# when bootstrapping with tilt, it will use
# the defaultProviderVersion in https://github.com/kubernetes-sigs/cluster-api/blob/main/hack/tools/internal/tilt-prepare/main.go as
# default version for docker infrastructure provider
# name here should match defaultProviderVersion
- name: v1.7.99 # next; use manifest from source files
- name: v1.8.99 # next; use manifest from source files
value: https://github.com/kubernetes-sigs/cluster-api/releases/latest/download/infrastructure-components-development.yaml
type: url
files:
Expand All @@ -54,6 +54,9 @@ providers:
- sourcePath: "../data/infrastructure-docker/cluster-template.yaml"
- sourcePath: "../data/infrastructure-docker/cluster-template-kcp-remediation.yaml"
- sourcePath: "../data/infrastructure-docker/cluster-template-md-remediation.yaml"
- sourcePath: "../data/infrastructure-docker/cluster-template-v1beta1.yaml"
- sourcePath: "../data/infrastructure-docker/cluster-template-topology.yaml"
- sourcePath: "../data/infrastructure-docker/clusterclass-k3s.yaml"
- name: k3s
type: BootstrapProvider
versions:
Expand Down Expand Up @@ -82,6 +85,9 @@ variables:
KUBERNETES_VERSION_UPGRADE_TO: "v1.28.7+k3s1"
IP_FAMILY: "IPv4"
KIND_IMAGE_VERSION: "v1.28.0"
# Enabling the feature flags by setting the env variables.
CLUSTER_TOPOLOGY: "true"
EXP_MACHINE_POOL: "true"

intervals:
# The array is defined as [timeout, polling interval]
Expand All @@ -90,10 +96,10 @@ intervals:
default/wait-cluster: ["5m", "10s"]
default/wait-control-plane: ["10m", "10s"]
default/wait-worker-nodes: ["10m", "10s"]
default/wait-machine-pool-nodes: ["5m", "10s"]
default/wait-machine-pool-nodes: ["10m", "10s"]
default/wait-delete-cluster: ["3m", "10s"]
default/wait-machine-upgrade: ["30m", "10s"]
default/wait-machine-pool-upgrade: ["5m", "10s"]
default/wait-machine-pool-upgrade: ["30m", "10s"]
default/wait-nodes-ready: ["10m", "10s"]
default/wait-machine-remediation: ["5m", "10s"]
default/wait-autoscaler: ["5m", "10s"]
Expand Down
48 changes: 48 additions & 0 deletions test/e2e/create_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,4 +97,52 @@ var _ = Describe("Workload cluster creation", func() {
}, result)
})
})

Context("Creating a cluster with v1beta1 api version for testing conversion", func() {
It("Should create a workload cluster with 1 control plane and 3 worker nodes", func() {
By("Creating a workload cluster with v1beta1 controlplane")
ApplyClusterTemplateAndWait(ctx, ApplyClusterTemplateAndWaitInput{
ClusterProxy: bootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: clusterctlLogFolder,
ClusterctlConfigPath: clusterctlConfigPath,
KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: infrastructureProvider,
Flavor: "v1beta1",
Namespace: namespace.Name,
ClusterName: clusterName,
KubernetesVersion: e2eConfig.GetVariable(KubernetesVersion),
ControlPlaneMachineCount: pointer.Int64Ptr(1),
WorkerMachineCount: pointer.Int64Ptr(3),
},
WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
}, result)
})
})

Context("Creating a cluster with clusterclass [ClusterClass]", func() {
It("Should create a workload cluster with clusterclass", func() {
By("Creating a workload cluster with v1beta1 controlplane")
ApplyClusterTemplateAndWait(ctx, ApplyClusterTemplateAndWaitInput{
ClusterProxy: bootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: clusterctlLogFolder,
ClusterctlConfigPath: clusterctlConfigPath,
KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: infrastructureProvider,
Flavor: "topology",
Namespace: namespace.Name,
ClusterName: clusterName,
KubernetesVersion: e2eConfig.GetVariable(KubernetesVersion),
ControlPlaneMachineCount: pointer.Int64Ptr(1),
WorkerMachineCount: pointer.Int64Ptr(1),
},
WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
}, result)
})
})
})
Loading
Loading