Skip to content

Commit

Permalink
add upgrade test
Browse files Browse the repository at this point in the history
Signed-off-by: nasusoba <[email protected]>
  • Loading branch information
nasusoba committed May 29, 2024
1 parent 9468f3d commit d89ac39
Show file tree
Hide file tree
Showing 12 changed files with 337 additions and 43 deletions.
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ KUSTOMIZE := $(TOOLS_BIN_DIR)/$(KUSTOMIZE_BIN)-$(KUSTOMIZE_VER)
# Ginkgo
TEST_DIR := $(shell pwd)/test
ARTIFACTS ?= $(shell pwd)/_artifacts
GINKGO_FOCUS ?=
GINKGO_FOCUS ?=
GINKGO_SKIP ?=
GINKGO_NODES ?= 1 # GINKGO_NODES is the number of parallel nodes to run
# when running the e2e tests, 1 means no parallelism
Expand Down Expand Up @@ -233,7 +233,7 @@ generate-e2e-templates: $(KUSTOMIZE)
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/cluster-template-v1beta1 --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/cluster-template-v1beta1.yaml
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/cluster-template-kcp-remediation --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/cluster-template-kcp-remediation.yaml
$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/cluster-template-md-remediation --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/cluster-template-md-remediation.yaml

$(KUSTOMIZE) build $(DOCKER_TEMPLATES)/cluster-template-topology --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/cluster-template-topology.yaml

.PHONY: test-e2e
test-e2e: generate-e2e-templates $(GINKGO) $(KUSTOMIZE) ## Run the end-to-end tests
Expand Down
11 changes: 2 additions & 9 deletions bootstrap/config/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -47,17 +47,10 @@ rules:
resources:
- clusters
- clusters/status
- machines
- machines/status
verbs:
- get
- list
- watch
- apiGroups:
- exp.cluster.x-k8s.io
resources:
- machinepools
- machinepools/status
- machines
- machines/status
verbs:
- get
- list
Expand Down
3 changes: 1 addition & 2 deletions bootstrap/controllers/kthreesconfig_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,7 @@ var (

// +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=kthreesconfigs,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=bootstrap.cluster.x-k8s.io,resources=kthreesconfigs/status,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status;machines;machines/status,verbs=get;list;watch
// +kubebuilder:rbac:groups=exp.cluster.x-k8s.io,resources=machinepools;machinepools/status,verbs=get;list;watch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status;machines;machines/status;machinepools;machinepools/status,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=secrets;events;configmaps,verbs=get;list;watch;create;update;patch;delete

func (r *KThreesConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ reconcile.Result, rerr error) {
Expand Down
98 changes: 78 additions & 20 deletions test/e2e/cluster_upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,11 +29,13 @@ import (
. "github.com/onsi/gomega"

corev1 "k8s.io/api/core/v1"
"k8s.io/klog/v2"
"k8s.io/utils/ptr"

"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/patch"
)

// ClusterUpgradeSpecInput is the input for ClusterUpgradeConformanceSpec.
Expand Down Expand Up @@ -76,8 +78,9 @@ func ClusterUpgradeSpec(ctx context.Context, inputGetter func() ClusterUpgradeSp
namespace *corev1.Namespace
cancelWatches context.CancelFunc

controlPlaneMachineCount int64
workerMachineCount int64
controlPlaneMachineCount int64
workerMachineCount int64
kubernetesVersionUpgradeTo string

result *ApplyClusterTemplateAndWaitResult
clusterName string
Expand Down Expand Up @@ -109,6 +112,8 @@ func ClusterUpgradeSpec(ctx context.Context, inputGetter func() ClusterUpgradeSp
workerMachineCount = *input.WorkerMachineCount
}

kubernetesVersionUpgradeTo = input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo)

// Setup a Namespace where to host objects for this spec and create a watcher for the namespace events.
namespace, cancelWatches = setupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder)

Expand Down Expand Up @@ -154,30 +159,83 @@ func ClusterUpgradeSpec(ctx context.Context, inputGetter func() ClusterUpgradeSp
WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
}, result)

By("Upgrading the Kubernetes control-plane")
UpgradeControlPlaneAndWaitForUpgrade(ctx, UpgradeControlPlaneAndWaitForUpgradeInput{
ClusterProxy: input.BootstrapClusterProxy,
Cluster: result.Cluster,
ControlPlane: result.ControlPlane,
KubernetesUpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo),
WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
})

By("Upgrading the machine deployment")
framework.UpgradeMachineDeploymentsAndWait(ctx, framework.UpgradeMachineDeploymentsAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
Cluster: result.Cluster,
UpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo),
MachineDeployments: result.MachineDeployments,
WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
})
if result.Cluster.Spec.Topology != nil {
// Cluster is using ClusterClass, upgrade via topology.
By("Upgrading the Cluster topology")
mgmtClient := input.BootstrapClusterProxy.GetClient()

By("Patching the new Kubernetes version to Cluster topology")
patchHelper, err := patch.NewHelper(result.Cluster, mgmtClient)
Expect(err).ToNot(HaveOccurred())

result.Cluster.Spec.Topology.Version = kubernetesVersionUpgradeTo

Eventually(func() error {
return patchHelper.Patch(ctx, result.Cluster)
}, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to patch Cluster topology %s with version %s", klog.KObj(result.Cluster), kubernetesVersionUpgradeTo)

By("Waiting for control-plane machines to have the upgraded kubernetes version")
framework.WaitForControlPlaneMachinesToBeUpgraded(ctx, framework.WaitForControlPlaneMachinesToBeUpgradedInput{
Lister: mgmtClient,
Cluster: result.Cluster,
MachineCount: int(*result.ControlPlane.Spec.Replicas),
KubernetesUpgradeVersion: kubernetesVersionUpgradeTo,
}, input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade")...)

for _, deployment := range result.MachineDeployments {
if *deployment.Spec.Replicas > 0 {
Byf("Waiting for Kubernetes versions of machines in MachineDeployment %s to be upgraded to %s",
klog.KObj(deployment), kubernetesVersionUpgradeTo)
framework.WaitForMachineDeploymentMachinesToBeUpgraded(ctx, framework.WaitForMachineDeploymentMachinesToBeUpgradedInput{
Lister: mgmtClient,
Cluster: result.Cluster,
MachineCount: int(*deployment.Spec.Replicas),
KubernetesUpgradeVersion: kubernetesVersionUpgradeTo,
MachineDeployment: *deployment,
}, input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade")...)
}
}

for _, pool := range result.MachinePools {
if *pool.Spec.Replicas > 0 {
Byf("Waiting for Kubernetes versions of machines in MachinePool %s to be upgraded to %s",
klog.KObj(pool), kubernetesVersionUpgradeTo)
framework.WaitForMachinePoolInstancesToBeUpgraded(ctx, framework.WaitForMachinePoolInstancesToBeUpgradedInput{
Getter: mgmtClient,
WorkloadClusterGetter: input.BootstrapClusterProxy.GetWorkloadCluster(ctx, result.Cluster.Namespace, result.Cluster.Name).GetClient(),
Cluster: result.Cluster,
MachineCount: int(*pool.Spec.Replicas),
KubernetesUpgradeVersion: kubernetesVersionUpgradeTo,
MachinePool: pool,
}, input.E2EConfig.GetIntervals(specName, "wait-machine-pool-upgrade")...)
}
}
} else {
By("Upgrading the Kubernetes control-plane")
UpgradeControlPlaneAndWaitForUpgrade(ctx, UpgradeControlPlaneAndWaitForUpgradeInput{
ClusterProxy: input.BootstrapClusterProxy,
Cluster: result.Cluster,
ControlPlane: result.ControlPlane,
KubernetesUpgradeVersion: kubernetesVersionUpgradeTo,
WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-upgrade"),
})

By("Upgrading the machine deployment")
framework.UpgradeMachineDeploymentsAndWait(ctx, framework.UpgradeMachineDeploymentsAndWaitInput{
ClusterProxy: input.BootstrapClusterProxy,
Cluster: result.Cluster,
UpgradeVersion: kubernetesVersionUpgradeTo,
MachineDeployments: result.MachineDeployments,
WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
})
}

By("Waiting until nodes are ready")
workloadProxy := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, result.Cluster.Name)
workloadClient := workloadProxy.GetClient()
framework.WaitForNodesReady(ctx, framework.WaitForNodesReadyInput{
Lister: workloadClient,
KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo),
KubernetesVersion: kubernetesVersionUpgradeTo,
Count: int(result.ExpectedTotalNodes()),
WaitForNodesReady: input.E2EConfig.GetIntervals(specName, "wait-nodes-ready"),
})
Expand Down
16 changes: 16 additions & 0 deletions test/e2e/cluster_upgrade_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,4 +54,20 @@ var _ = Describe("Workload cluster upgrade [K3s-Upgrade]", func() {
}
})
})

Context("Upgrading a cluster with ClusterClass [ClusterClass]", func() {
ClusterUpgradeSpec(ctx, func() ClusterUpgradeSpecInput {
return ClusterUpgradeSpecInput{
E2EConfig: e2eConfig,
ClusterctlConfigPath: clusterctlConfigPath,
BootstrapClusterProxy: bootstrapClusterProxy,
ArtifactFolder: artifactFolder,
SkipCleanup: skipCleanup,
Flavor: ptr.To("topology"),
InfrastructureProvider: ptr.To("docker"),
ControlPlaneMachineCount: ptr.To[int64](1),
WorkerMachineCount: ptr.To[int64](2),
}
})
})
})
21 changes: 13 additions & 8 deletions test/e2e/config/k3s-docker.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ providers:
- name: cluster-api
type: CoreProvider
versions:
- name: v1.6.2
value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.2/core-components.yaml
- name: v1.7.2
value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.2/core-components.yaml
type: url
files:
- sourcePath: "../data/shared/v1beta1/metadata.yaml"
Expand All @@ -28,21 +28,21 @@ providers:
versions:
# By default, will use the latest version defined in ../data/shared/v1beta1/metadata.yaml
# to init the management cluster
- name: v1.6.2 # used during e2e-test
value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.6.2/infrastructure-components-development.yaml
- name: v1.7.2 # used during e2e-test
value: https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.7.2/infrastructure-components-development.yaml
type: url
files:
- sourcePath: "../data/shared/v1beta1/metadata.yaml"
replacements:
- old: "imagePullPolicy: Always"
new: "imagePullPolicy: IfNotPresent"

# Add v1.7.99 to support tilt (not presented in ../data/shared/v1beta1/metadata.yaml)
# Add v1.8.99 to support tilt (not presented in ../data/shared/v1beta1/metadata.yaml)
# when bootstrapping with tilt, it will use
# the defaultProviderVersion in https://github.com/kubernetes-sigs/cluster-api/blob/main/hack/tools/internal/tilt-prepare/main.go as
# default version for docker infrastructure provider
# name here should match defaultProviderVersion
- name: v1.7.99 # next; use manifest from source files
- name: v1.8.99 # next; use manifest from source files
value: https://github.com/kubernetes-sigs/cluster-api/releases/latest/download/infrastructure-components-development.yaml
type: url
files:
Expand All @@ -55,6 +55,8 @@ providers:
- sourcePath: "../data/infrastructure-docker/cluster-template-kcp-remediation.yaml"
- sourcePath: "../data/infrastructure-docker/cluster-template-md-remediation.yaml"
- sourcePath: "../data/infrastructure-docker/cluster-template-v1beta1.yaml"
- sourcePath: "../data/infrastructure-docker/cluster-template-topology.yaml"
- sourcePath: "../data/infrastructure-docker/clusterclass-k3s.yaml"
- name: k3s
type: BootstrapProvider
versions:
Expand Down Expand Up @@ -83,6 +85,9 @@ variables:
KUBERNETES_VERSION_UPGRADE_TO: "v1.28.7+k3s1"
IP_FAMILY: "IPv4"
KIND_IMAGE_VERSION: "v1.28.0"
# Enabling the feature flags by setting the env variables.
CLUSTER_TOPOLOGY: "true"
EXP_MACHINE_POOL: "true"

intervals:
# The array is defined as [timeout, polling interval]
Expand All @@ -91,10 +96,10 @@ intervals:
default/wait-cluster: ["5m", "10s"]
default/wait-control-plane: ["10m", "10s"]
default/wait-worker-nodes: ["10m", "10s"]
default/wait-machine-pool-nodes: ["5m", "10s"]
default/wait-machine-pool-nodes: ["10m", "10s"]
default/wait-delete-cluster: ["3m", "10s"]
default/wait-machine-upgrade: ["30m", "10s"]
default/wait-machine-pool-upgrade: ["5m", "10s"]
default/wait-machine-pool-upgrade: ["30m", "10s"]
default/wait-nodes-ready: ["10m", "10s"]
default/wait-machine-remediation: ["5m", "10s"]
default/wait-autoscaler: ["5m", "10s"]
Expand Down
26 changes: 25 additions & 1 deletion test/e2e/create_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ var _ = Describe("Workload cluster creation", func() {
})
})

Context("Creating a v1beta1 cluster for testing conversion", func() {
Context("Creating a cluster with v1beta1 api version for testing conversion", func() {
It("Should create a workload cluster with 1 control plane and 3 worker nodes", func() {
By("Creating a workload cluster with v1beta1 controlplane")
ApplyClusterTemplateAndWait(ctx, ApplyClusterTemplateAndWaitInput{
Expand All @@ -121,4 +121,28 @@ var _ = Describe("Workload cluster creation", func() {
}, result)
})
})

Context("Creating a cluster with clusterclass [ClusterClass]", func() {
It("Should create a workload cluster with 1 control plane, 1 machine deployment and 1 machine pool", func() {
By("Creating a workload cluster with v1beta1 controlplane")
ApplyClusterTemplateAndWait(ctx, ApplyClusterTemplateAndWaitInput{
ClusterProxy: bootstrapClusterProxy,
ConfigCluster: clusterctl.ConfigClusterInput{
LogFolder: clusterctlLogFolder,
ClusterctlConfigPath: clusterctlConfigPath,
KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(),
InfrastructureProvider: infrastructureProvider,
Flavor: "topology",
Namespace: namespace.Name,
ClusterName: clusterName,
KubernetesVersion: e2eConfig.GetVariable(KubernetesVersion),
ControlPlaneMachineCount: pointer.Int64Ptr(1),
WorkerMachineCount: pointer.Int64Ptr(1),
},
WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"),
WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
}, result)
})
})
})
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
name: ${CLUSTER_NAME}
namespace: ${NAMESPACE}
spec:
clusterNetwork:
pods:
cidrBlocks:
- 10.45.0.0/16
services:
cidrBlocks:
- 10.46.0.0/16
serviceDomain: cluster.local
topology:
class: k3s
version: ${KUBERNETES_VERSION}
controlPlane:
nodeDeletionTimeout: "30s"
nodeVolumeDetachTimeout: "5m"
replicas: ${CONTROL_PLANE_MACHINE_COUNT}
workers:
machineDeployments:
- class: k3s-default-worker
name: ${CLUSTER_NAME}-md-0
nodeDeletionTimeout: "30s"
nodeVolumeDetachTimeout: "5m"
minReadySeconds: 5
replicas: ${WORKER_MACHINE_COUNT}
machinePools:
- class: k3s-default-worker
name: ${CLUSTER_NAME}-mp-0
nodeDeletionTimeout: "30s"
nodeVolumeDetachTimeout: "5m"
minReadySeconds: 5
replicas: ${WORKER_MACHINE_COUNT}
variables:
- name: kindImageVersion
value: ${KIND_IMAGE_VERSION}
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
resources:
- ../bases/cluster-with-kcp.yaml
- ../bases/md.yaml
- mhc.yaml
patches:
- path: kcp.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
resources:
- ../bases/cluster-with-topology.yaml
Loading

0 comments on commit d89ac39

Please sign in to comment.