From 4824dbc753320ac58427fa2c5e62a644473d61a4 Mon Sep 17 00:00:00 2001 From: Mateusz Gozdek Date: Tue, 3 May 2022 09:24:53 +0200 Subject: [PATCH] Fix automatically found typos and related spelling Found and fixed using the following command: codespell -f -H -S .git,_artifacts,*.sum -L aks,witht,geting,ot,intepreted -C0 -i0 -w Additionally did kubernetes -> Kubernetes in exp/api/v1beta1 and 'to to' -> 'to'. Signed-off-by: Mateusz Gozdek --- CONTRIBUTING.md | 18 +++++++++--------- api/v1alpha4/conditions_consts.go | 2 +- api/v1beta1/azurecluster_validation_test.go | 2 +- api/v1beta1/conditions_consts.go | 2 +- azure/scope/cluster_test.go | 2 +- azure/scope/identity.go | 2 +- azure/scope/machinepoolmachine.go | 2 +- azure/scope/machinepoolmachine_test.go | 2 +- .../services/bastionhosts/azurebastion_spec.go | 2 +- azure/services/natgateways/natgateways.go | 2 +- azure/services/scalesetvms/client.go | 2 +- ...ter.x-k8s.io_azuremanagedcontrolplanes.yaml | 6 +++--- docs/book/src/developers/development.md | 16 ++++++++-------- .../src/developers/kubernetes-developers.md | 2 +- docs/book/src/topics/windows.md | 2 +- .../20201214-bootstrap-failure-detection.md | 4 ++-- ...6-async-azure-resource-creation-deletion.md | 2 +- .../v1alpha3/azuremanagedcontrolplane_types.go | 2 +- .../v1alpha4/azuremanagedcontrolplane_types.go | 2 +- .../v1beta1/azuremanagedcontrolplane_types.go | 2 +- .../azuremanagedcontrolplane_webhook_test.go | 3 +-- .../prometheus/resources/bundle.yaml | 6 +++--- hack/util.sh | 2 +- scripts/ci-build-kubernetes.sh | 2 +- templates/flavors/aks/cluster-template.yaml | 2 +- ...luster-template-prow-identity-from-env.yaml | 2 +- ...cluster-template-custom-builds-windows.yaml | 4 ++-- .../patches/custom-builds-windows.yaml | 4 ++-- test/e2e/aks.go | 6 +++--- test/e2e/azure_selfhosted.go | 2 +- .../v1alpha4/cluster-template-prow.yaml | 2 +- test/e2e/kubernetes/node/node.go | 2 +- 32 files changed, 56 insertions(+), 57 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0b5801f6c59..dd6f1cf0ccd 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -34,20 +34,20 @@ If you're new to the project and want to help, but don't know where to start, we 1. See the [Development Guide](https://capz.sigs.k8s.io/developers/development.html) for more instructions on setting up your environment and testing changes locally. 3. Submit a pull request. 1. All PRs should be labeled with one of the following kinds - - `/kind feature` for PRs releated to adding new features/tests - - `/kind bug` for PRs releated to bug fixes and patches - - `/kind api-change` for PRs releated to adding, removing, or otherwise changing an API - - `/kind cleanup` for PRs releated to code refactoring and cleanup + - `/kind feature` for PRs related to adding new features/tests + - `/kind bug` for PRs related to bug fixes and patches + - `/kind api-change` for PRs related to adding, removing, or otherwise changing an API + - `/kind cleanup` for PRs related to code refactoring and cleanup - `/kind deprecation` for PRs related to a feature/enhancement marked for deprecation. - - `/kind design` for PRs releated to design proposals - - `/kind documentation` for PRs releated to documentation - - `/kind failing-test` for PRs releated to to a consistently or frequently failing test. + - `/kind design` for PRs related to design proposals + - `/kind documentation` for PRs related to documentation + - `/kind failing-test` for PRs related to a consistently or frequently failing test. - `/kind flake` for PRs related to a flaky test. - - `/kind other` for PRs releated to updating dependencies, minor changes or other + - `/kind other` for PRs related to updating dependencies, minor changes or other 2. If the PR requires additional action from users switching to a new release, include the string "action required" in the PR release-notes. 3. All code changes must be covered by unit tests and E2E tests. 4. All new features should come with user documentation. - 4. Once the PR has been reviewed and is ready to be merged, commits should be [squashed](https://github.com/kubernetes/community/blob/master/contributors/guide/github-workflow.md#squash-commits). + 4. Once the PR has been reviewed and is ready to be merged, commits should be [squashed](https://github.com/kubernetes/community/blob/master/contributors/guide/github-workflow.md#squash-commits). 1. Ensure that commit message(s) are be meaningful and commit history is readable. All changes must be code reviewed. Coding conventions and standards are explained in the official [developer docs](https://github.com/kubernetes/community/tree/master/contributors/devel). Expect reviewers to request that you avoid common [go style mistakes](https://github.com/golang/go/wiki/CodeReviewComments) in your PRs. diff --git a/api/v1alpha4/conditions_consts.go b/api/v1alpha4/conditions_consts.go index d6f94b9ce8b..d860de17688 100644 --- a/api/v1alpha4/conditions_consts.go +++ b/api/v1alpha4/conditions_consts.go @@ -42,7 +42,7 @@ const ( WaitingForClusterInfrastructureReason = "WaitingForClusterInfrastructure" // WaitingForBootstrapDataReason used when machine is waiting for bootstrap data to be ready before proceeding. WaitingForBootstrapDataReason = "WaitingForBootstrapData" - // BootstrapSucceededCondition reports the result of the execution of the boostrap data on the machine. + // BootstrapSucceededCondition reports the result of the execution of the bootstrap data on the machine. BootstrapSucceededCondition = "BoostrapSucceeded" // BootstrapInProgressReason is used to indicate the bootstrap data has not finished executing. BootstrapInProgressReason = "BootstrapInProgress" diff --git a/api/v1beta1/azurecluster_validation_test.go b/api/v1beta1/azurecluster_validation_test.go index ad74e966109..e92d1485b0d 100644 --- a/api/v1beta1/azurecluster_validation_test.go +++ b/api/v1beta1/azurecluster_validation_test.go @@ -588,7 +588,7 @@ func TestValidateSubnetCIDR(t *testing.T) { }, }, { - name: "subnet cidr in atleast one vnet's range in case of multiple vnet cidr blocks", + name: "subnet cidr in at least one vnet's range in case of multiple vnet cidr blocks", vnetCidrBlocks: []string{"10.0.0.0/8", "11.0.0.0/8"}, subnetCidrBlocks: []string{"10.1.0.0/16", "10.0.0.0/16", "11.1.0.0/16"}, wantErr: false, diff --git a/api/v1beta1/conditions_consts.go b/api/v1beta1/conditions_consts.go index 105a58692cb..681fd20168d 100644 --- a/api/v1beta1/conditions_consts.go +++ b/api/v1beta1/conditions_consts.go @@ -42,7 +42,7 @@ const ( WaitingForClusterInfrastructureReason = "WaitingForClusterInfrastructure" // WaitingForBootstrapDataReason used when machine is waiting for bootstrap data to be ready before proceeding. WaitingForBootstrapDataReason = "WaitingForBootstrapData" - // BootstrapSucceededCondition reports the result of the execution of the boostrap data on the machine. + // BootstrapSucceededCondition reports the result of the execution of the bootstrap data on the machine. BootstrapSucceededCondition clusterv1.ConditionType = "BootstrapSucceeded" // BootstrapInProgressReason is used to indicate the bootstrap data has not finished executing. BootstrapInProgressReason = "BootstrapInProgress" diff --git a/azure/scope/cluster_test.go b/azure/scope/cluster_test.go index ad4fdaf0806..f45a87cd8f3 100644 --- a/azure/scope/cluster_test.go +++ b/azure/scope/cluster_test.go @@ -2112,7 +2112,7 @@ func TestFailureDomains(t *testing.T) { }, }, { - name: "Mutiple failure domains present in azure cluster status", + name: "Multiple failure domains present in azure cluster status", expectFailureDomains: []string{"failure-domain-id-1", "failure-domain-id-2", "failure-domain-id-3"}, clusterName: "my-cluster", azureClusterStatus: infrav1.AzureClusterStatus{ diff --git a/azure/scope/identity.go b/azure/scope/identity.go index 6150480d9d6..3a11f6132c2 100644 --- a/azure/scope/identity.go +++ b/azure/scope/identity.go @@ -211,7 +211,7 @@ func createAzureIdentityWithBindings(ctx context.Context, azureIdentity *infrav1 } // AzureIdentity and AzureIdentityBinding will no longer have an OwnerRef starting from capz release v0.5.0 because of the following: - // In Kubenetes v1.20+, if the garbage collector detects an invalid cross-namespace ownerReference, or a cluster-scoped dependent with + // In Kubernetes v1.20+, if the garbage collector detects an invalid cross-namespace ownerReference, or a cluster-scoped dependent with // an ownerReference referencing a namespaced kind, a warning Event with a reason of OwnerRefInvalidNamespace and an involvedObject // of the invalid dependent is reported. You can check for that kind of Event by running kubectl get events -A --field-selector=reason=OwnerRefInvalidNamespace. diff --git a/azure/scope/machinepoolmachine.go b/azure/scope/machinepoolmachine.go index cf2e1014944..4f0bbe7f014 100644 --- a/azure/scope/machinepoolmachine.go +++ b/azure/scope/machinepoolmachine.go @@ -281,7 +281,7 @@ func (s *MachinePoolMachineScope) UpdateStatus(ctx context.Context) error { } if err != nil && !apierrors.IsNotFound(err) { - return errors.Wrap(err, "failed to to get node by providerID or object reference") + return errors.Wrap(err, "failed to get node by providerID or object reference") } if node != nil { diff --git a/azure/scope/machinepoolmachine_test.go b/azure/scope/machinepoolmachine_test.go index 69f83991157..9214008c1ff 100644 --- a/azure/scope/machinepoolmachine_test.go +++ b/azure/scope/machinepoolmachine_test.go @@ -192,7 +192,7 @@ func TestMachineScope_UpdateStatus(t *testing.T) { mockNodeGetter.EXPECT().GetNodeByProviderID(gomock2.AContext(), FakeProviderID).Return(nil, errors.New("boom")) return nil, ampm }, - Err: "failed to to get node by providerID or object reference: boom", + Err: "failed to get node by providerID or object reference: boom", }, { Name: "should not mark AMPM ready if node is not ready", diff --git a/azure/services/bastionhosts/azurebastion_spec.go b/azure/services/bastionhosts/azurebastion_spec.go index b70dda169f7..d9cf76dbbf6 100644 --- a/azure/services/bastionhosts/azurebastion_spec.go +++ b/azure/services/bastionhosts/azurebastion_spec.go @@ -60,7 +60,7 @@ func (s *AzureBastionSpec) OwnerResourceName() string { } // Parameters returns the parameters for the bastion host. -func (s *AzureBastionSpec) Parameters(existing interface{}) (paramteres interface{}, err error) { +func (s *AzureBastionSpec) Parameters(existing interface{}) (parameters interface{}, err error) { if existing != nil { if _, ok := existing.(network.BastionHost); !ok { return nil, errors.Errorf("%T is not a network.BastionHost", existing) diff --git a/azure/services/natgateways/natgateways.go b/azure/services/natgateways/natgateways.go index 4a1b5ce8bba..2bb9465965d 100644 --- a/azure/services/natgateways/natgateways.go +++ b/azure/services/natgateways/natgateways.go @@ -93,7 +93,7 @@ func (s *Service) Reconcile(ctx context.Context) error { if err == nil { natGateway, ok := result.(network.NatGateway) if !ok { - // Return out of loop since this would be an unexepcted fatal error + // Return out of loop since this would be an unexpected fatal error resultingErr = errors.Errorf("created resource %T is not a network.NatGateway", result) break } diff --git a/azure/services/scalesetvms/client.go b/azure/services/scalesetvms/client.go index 9e862515b54..8a62a824870 100644 --- a/azure/services/scalesetvms/client.go +++ b/azure/services/scalesetvms/client.go @@ -103,7 +103,7 @@ func (ac *azureClient) GetResultIfDone(ctx context.Context, future *infrav1.Futu VirtualMachineScaleSetVMsDeleteFuture: future, } default: - return compute.VirtualMachineScaleSetVM{}, errors.Errorf("unknown furture type %q", future.Type) + return compute.VirtualMachineScaleSetVM{}, errors.Errorf("unknown future type %q", future.Type) } done, err := genericFuture.DoneWithContext(ctx, ac.scalesetvms) diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml index 1a590ec36f7..f161c796070 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_azuremanagedcontrolplanes.yaml @@ -110,7 +110,7 @@ spec: type: string nodeResourceGroupName: description: NodeResourceGroupName is the name of the resource group - containining cluster IaaS resources. Will be populated to default + containing cluster IaaS resources. Will be populated to default in webhook. type: string resourceGroupName: @@ -368,7 +368,7 @@ spec: type: string nodeResourceGroupName: description: NodeResourceGroupName is the name of the resource group - containining cluster IaaS resources. Will be populated to default + containing cluster IaaS resources. Will be populated to default in webhook. type: string resourceGroupName: @@ -693,7 +693,7 @@ spec: type: string nodeResourceGroupName: description: NodeResourceGroupName is the name of the resource group - containining cluster IaaS resources. Will be populated to default + containing cluster IaaS resources. Will be populated to default in webhook. type: string resourceGroupName: diff --git a/docs/book/src/developers/development.md b/docs/book/src/developers/development.md index b45e0809d3a..c5cc1006f0a 100644 --- a/docs/book/src/developers/development.md +++ b/docs/book/src/developers/development.md @@ -134,7 +134,7 @@ Install [Helm](https://helm.sh/docs/intro/install/): - `choco install kubernetes-helm` on Windows - [Install Instruction](https://helm.sh/docs/intro/install/#from-source-linux-macos) on Linux -You would require installation of Helm for succesfully setting up Tilt. +You would require installation of Helm for successfully setting up Tilt. ### Using Tilt @@ -371,8 +371,8 @@ export WORKER_MACHINE_COUNT=2 export KUBERNETES_VERSION="v1.22.1" # Identity secret. -export AZURE_CLUSTER_IDENTITY_SECRET_NAME="cluster-identity-secret" -export CLUSTER_IDENTITY_NAME="cluster-identity" +export AZURE_CLUSTER_IDENTITY_SECRET_NAME="cluster-identity-secret" +export CLUSTER_IDENTITY_NAME="cluster-identity" export AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE="default" # Generate SSH key. @@ -392,7 +392,7 @@ or the use of `envsubst` to replace these values ##### Creating the cluster -⚠️ Make sure you followed the previous two steps to build the dev image and set the required environment variables before proceding. +⚠️ Make sure you followed the previous two steps to build the dev image and set the required environment variables before proceeding. Ensure dev environment has been reset: @@ -435,7 +435,7 @@ defer done() ``` The code above creates a context with a new span stored in the context.Context value bag. If a span already existed in -the `ctx` arguement, then the new span would take on the parentID of the existing span, otherwise the new span +the `ctx` argument, then the new span would take on the parentID of the existing span, otherwise the new span becomes a "root span", one that does not have a parent. The span is also created with labels, or tags, which provide metadata about the span and can be used to query in many distributed tracing systems. @@ -492,7 +492,7 @@ You can optionally set the following variables: |----------------------------|---------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------| | `E2E_CONF_FILE` | The path of the [E2E configuration file](https://cluster-api.sigs.k8s.io/developer/e2e.html#defining-an-e2e-config-file). | `${GOPATH}/src/sigs.k8s.io/cluster-api-provider-azure/test/e2e/config/azure-dev.yaml` | | `SKIP_CLEANUP` | Set to `true` if you do not want the bootstrap and workload clusters to be cleaned up after running E2E tests. | `false` | -| `SKIP_CREATE_MGMT_CLUSTER` | Skip management cluster creation. If skipping managment cluster creation you must specify `KUBECONFIG` and `SKIP_CLEANUP` | `false` | +| `SKIP_CREATE_MGMT_CLUSTER` | Skip management cluster creation. If skipping management cluster creation you must specify `KUBECONFIG` and `SKIP_CLEANUP` | `false` | | `LOCAL_ONLY` | Use Kind local registry and run the subset of tests which don't require a remotely pushed controller image. | `true` | | `REGISTRY` | Registry to push the controller image. | `capzci.azurecr.io/ci-e2e` | | `CLUSTER_NAME` | Name of an existing workload cluster. Must be set to run specs against existing workload cluster. Use in conjunction with `SKIP_CREATE_MGMT_CLUSTER`, `GINKGO_FOCUS`, `CLUSTER_NAMESPACE` and `KUBECONFIG`. Must specify **only one** e2e spec to run against with `GINKGO_FOCUS` such as `export GINKGO_FOCUS=Creating.a.VMSS.cluster.with.a.single.control.plane.node`. | @@ -532,7 +532,7 @@ With the following environment variables defined, CAPZ runs `./scripts/ci-build- |-------------------------|------------| | `AZURE_STORAGE_ACCOUNT` | Your Azure storage account name | | `AZURE_STORAGE_KEY` | Your Azure storage key | -| `JOB_NAME` | `test` (an enviroment variable used by CI, can be any non-empty string) | +| `JOB_NAME` | `test` (an environment variable used by CI, can be any non-empty string) | | `LOCAL_ONLY` | `false` | | `REGISTRY` | Your Registry | | `TEST_K8S` | `true` | @@ -558,7 +558,7 @@ You can optionally set the following variables: | `EXP_MACHINE_POOL` | Use [Machine Pool](../topics/machinepools.md) for worker machines. | | `TEST_WINDOWS` | Build a cluster that has Windows worker nodes. | | `REGISTRY` | Registry to push any custom k8s images or cloud provider images built. | -| `CLUSTER_TEMPLATE` | Use a custom cluster template. By default, the script will choose the appropriate cluster template based on existing environment variabes. | +| `CLUSTER_TEMPLATE` | Use a custom cluster template. By default, the script will choose the appropriate cluster template based on existing environment variables. | You can also customize the configuration of the CAPZ cluster (assuming that `SKIP_CREATE_WORKLOAD_CLUSTER` is not set). See [Customizing the cluster deployment](#customizing-the-cluster-deployment) for more details. diff --git a/docs/book/src/developers/kubernetes-developers.md b/docs/book/src/developers/kubernetes-developers.md index 5346d907b80..b3ecaf70c2c 100644 --- a/docs/book/src/developers/kubernetes-developers.md +++ b/docs/book/src/developers/kubernetes-developers.md @@ -21,7 +21,7 @@ export AZURE_STORAGE_ACCOUNT= export AZURE_STORAGE_KEY= export REGISTRY= export TEST_K8S="true" -export JOB_NAME="test" # an enviroment variable used by CI, can be any non-empty string +export JOB_NAME="test" # an environment variable used by CI, can be any non-empty string source ./scripts/ci-build-kubernetes.sh ``` diff --git a/docs/book/src/topics/windows.md b/docs/book/src/topics/windows.md index d3bc1cb709f..7bbf613aec2 100644 --- a/docs/book/src/topics/windows.md +++ b/docs/book/src/topics/windows.md @@ -81,7 +81,7 @@ The Windows HostProcess Container feature is Alpha for Kubernetes v1.22 and Beta Current requirements: -- Kuberentes 1.22+ +- Kubernetes 1.22+ - containerd 1.6+ - `WindowsHostProcessContainers` feature-gate (Alpha for v1.22) turned on for kube-apiserver and kubelet if using Kubernetes 1.22 diff --git a/docs/proposals/20201214-bootstrap-failure-detection.md b/docs/proposals/20201214-bootstrap-failure-detection.md index d3d802b5b9d..81ff6c5107b 100644 --- a/docs/proposals/20201214-bootstrap-failure-detection.md +++ b/docs/proposals/20201214-bootstrap-failure-detection.md @@ -63,7 +63,7 @@ For more info see https://docs.microsoft.com/en-us/azure/service-bus-messaging/ #### Pub Sub Cons: - Complicated, lots of additional moving pieces: -- Might have to write our own OS-specific tools to consume bootstrap logs and publish them, and those tools would be installed by additional VM Extentions, or add'l cloud-init configuration +- Might have to write our own OS-specific tools to consume bootstrap logs and publish them, and those tools would be installed by additional VM Extensions, or add'l cloud-init configuration - Add’l IaaS cost ### Option 3: Azure Custom Script Extensions @@ -133,7 +133,7 @@ At a very high level, this is what we want our capz-named Azure VM Extension to - Again, we assume using a common exit code for all failure states is acceptable for the initial scope of this work - Set appropriate AzureMachine (and possibly Machine?) conditions -VM Boot Diagnostics should be used in conjunction with the extension. The VM extension provides a simple pass/fail signal that can be used by CAPZ to set conditions and indicate bootstrap status. Boot Diagnostics can provide a quick look at what went wrong to the user by displaying cloud-init logs without needing to SSH into the VM. In the future, boot diagnostics might even used to stream logs programatically at the AzureMachine level. +VM Boot Diagnostics should be used in conjunction with the extension. The VM extension provides a simple pass/fail signal that can be used by CAPZ to set conditions and indicate bootstrap status. Boot Diagnostics can provide a quick look at what went wrong to the user by displaying cloud-init logs without needing to SSH into the VM. In the future, boot diagnostics might even used to stream logs programmatically at the AzureMachine level. ## Questions diff --git a/docs/proposals/20210716-async-azure-resource-creation-deletion.md b/docs/proposals/20210716-async-azure-resource-creation-deletion.md index 13aee45798e..c9dbcc52057 100644 --- a/docs/proposals/20210716-async-azure-resource-creation-deletion.md +++ b/docs/proposals/20210716-async-azure-resource-creation-deletion.md @@ -316,5 +316,5 @@ This is not mutually exclusive with the proposal above. In fact, it might be a g ## Implementation History -- 2020/12/04: Inital POC [PR](https://github.com/kubernetes-sigs/cluster-api-provider-azure/pull/1067) for AzureMachinePool opened +- 2020/12/04: Initial POC [PR](https://github.com/kubernetes-sigs/cluster-api-provider-azure/pull/1067) for AzureMachinePool opened - 2021/07/16: Initial proposal diff --git a/exp/api/v1alpha3/azuremanagedcontrolplane_types.go b/exp/api/v1alpha3/azuremanagedcontrolplane_types.go index 532f1a64bc2..c92a7e3a429 100644 --- a/exp/api/v1alpha3/azuremanagedcontrolplane_types.go +++ b/exp/api/v1alpha3/azuremanagedcontrolplane_types.go @@ -32,7 +32,7 @@ type AzureManagedControlPlaneSpec struct { ResourceGroupName string `json:"resourceGroupName"` // NodeResourceGroupName is the name of the resource group - // containining cluster IaaS resources. Will be populated to default + // containing cluster IaaS resources. Will be populated to default // in webhook. NodeResourceGroupName string `json:"nodeResourceGroupName"` diff --git a/exp/api/v1alpha4/azuremanagedcontrolplane_types.go b/exp/api/v1alpha4/azuremanagedcontrolplane_types.go index 8a5677d4e6e..bbc929817ac 100644 --- a/exp/api/v1alpha4/azuremanagedcontrolplane_types.go +++ b/exp/api/v1alpha4/azuremanagedcontrolplane_types.go @@ -41,7 +41,7 @@ type AzureManagedControlPlaneSpec struct { ResourceGroupName string `json:"resourceGroupName"` // NodeResourceGroupName is the name of the resource group - // containining cluster IaaS resources. Will be populated to default + // containing cluster IaaS resources. Will be populated to default // in webhook. // +optional NodeResourceGroupName string `json:"nodeResourceGroupName,omitempty"` diff --git a/exp/api/v1beta1/azuremanagedcontrolplane_types.go b/exp/api/v1beta1/azuremanagedcontrolplane_types.go index 67d442c92dd..ce698de16b6 100644 --- a/exp/api/v1beta1/azuremanagedcontrolplane_types.go +++ b/exp/api/v1beta1/azuremanagedcontrolplane_types.go @@ -41,7 +41,7 @@ type AzureManagedControlPlaneSpec struct { ResourceGroupName string `json:"resourceGroupName"` // NodeResourceGroupName is the name of the resource group - // containining cluster IaaS resources. Will be populated to default + // containing cluster IaaS resources. Will be populated to default // in webhook. // +optional NodeResourceGroupName string `json:"nodeResourceGroupName,omitempty"` diff --git a/exp/api/v1beta1/azuremanagedcontrolplane_webhook_test.go b/exp/api/v1beta1/azuremanagedcontrolplane_webhook_test.go index ce377a86728..213202becb2 100644 --- a/exp/api/v1beta1/azuremanagedcontrolplane_webhook_test.go +++ b/exp/api/v1beta1/azuremanagedcontrolplane_webhook_test.go @@ -112,7 +112,7 @@ func TestValidatingWebhook(t *testing.T) { expectErr: true, }, { - name: "not following the kuberntes Version pattern", + name: "not following the Kubernetes Version pattern", amcp: AzureManagedControlPlane{ Spec: AzureManagedControlPlaneSpec{ DNSServiceIP: pointer.StringPtr("192.168.0.0"), @@ -631,7 +631,6 @@ func TestAzureManagedControlPlane_ValidateUpdate(t *testing.T) { Spec: AzureManagedControlPlaneSpec{ Version: "v1.18.0", AADProfile: &AADProfile{ - Managed: false, AdminGroupObjectIDs: []string{ "616077a8-5db7-4c98-b856-b34619afg75h", diff --git a/hack/observability/prometheus/resources/bundle.yaml b/hack/observability/prometheus/resources/bundle.yaml index 9d8e2b9b495..2fce683d225 100644 --- a/hack/observability/prometheus/resources/bundle.yaml +++ b/hack/observability/prometheus/resources/bundle.yaml @@ -4052,7 +4052,7 @@ spec: description: Define which Nodes the Pods are scheduled on. type: object paused: - description: If set to true all actions on the underlaying managed + description: If set to true all actions on the underlying managed objects are not goint to be performed, except for delete actions. type: boolean podMetadata: @@ -5921,7 +5921,7 @@ spec: format: int32 type: integer paused: - description: Represents whether any actions on the underlaying managed + description: Represents whether any actions on the underlying managed objects are being performed. Only delete actions will be performed. type: boolean replicas: @@ -13021,7 +13021,7 @@ spec: format: int32 type: integer paused: - description: Represents whether any actions on the underlaying managed + description: Represents whether any actions on the underlying managed objects are being performed. Only delete actions will be performed. type: boolean replicas: diff --git a/hack/util.sh b/hack/util.sh index 2daf8a4f2eb..b677101d1b8 100755 --- a/hack/util.sh +++ b/hack/util.sh @@ -31,7 +31,7 @@ capz::util::should_build_kubernetes() { if [[ -n "${TEST_K8S:-}" ]]; then echo "true" && return fi - # JOB_TYPE, REPO_OWNER, and REPO_NAME are enviornment variables set by a prow job - + # JOB_TYPE, REPO_OWNER, and REPO_NAME are environment variables set by a prow job - # https://github.com/kubernetes/test-infra/blob/master/prow/jobs.md#job-environment-variables if [[ "${JOB_TYPE:-}" == "presubmit" ]] && [[ "${REPO_OWNER:-}/${REPO_NAME:-}" == "kubernetes/kubernetes" ]]; then echo "true" && return diff --git a/scripts/ci-build-kubernetes.sh b/scripts/ci-build-kubernetes.sh index 9a8c9a0e557..d5fe6f0683f 100755 --- a/scripts/ci-build-kubernetes.sh +++ b/scripts/ci-build-kubernetes.sh @@ -33,7 +33,7 @@ source "${REPO_ROOT}/hack/parse-prow-creds.sh" : "${AZURE_STORAGE_ACCOUNT:?Environment variable empty or not defined.}" : "${AZURE_STORAGE_KEY:?Environment variable empty or not defined.}" : "${REGISTRY:?Environment variable empty or not defined.}" -# JOB_NAME is an enviornment variable set by a prow job - +# JOB_NAME is an environment variable set by a prow job - # https://github.com/kubernetes/test-infra/blob/master/prow/jobs.md#job-environment-variables : "${JOB_NAME:?Environment variable empty or not defined.}" diff --git a/templates/flavors/aks/cluster-template.yaml b/templates/flavors/aks/cluster-template.yaml index 0b33da3fc36..5d8274b2934 100644 --- a/templates/flavors/aks/cluster-template.yaml +++ b/templates/flavors/aks/cluster-template.yaml @@ -42,7 +42,7 @@ kind: AzureManagedCluster metadata: name: ${CLUSTER_NAME} --- -# We provision a default machine pool with no boostrap data (AKS will provide it). +# We provision a default machine pool with no bootstrap data (AKS will provide it). # We specify an AzureManagedMachinePool as the infrastructure machine it, which # will be reflected in Azure as VMSS node pools attached to an AKS cluster. apiVersion: cluster.x-k8s.io/v1beta1 diff --git a/templates/test/ci/cluster-template-prow-identity-from-env.yaml b/templates/test/ci/cluster-template-prow-identity-from-env.yaml index ca06b37771a..cbd8a237147 100644 --- a/templates/test/ci/cluster-template-prow-identity-from-env.yaml +++ b/templates/test/ci/cluster-template-prow-identity-from-env.yaml @@ -785,7 +785,7 @@ data: description: 'BPFExtToServiceConnmark in BPF mode, control a 32bit mark that is set on connections from an external client to a local service. This mark allows us to control how packets of that connection - are routed within the host and how is routing intepreted by RPF + are routed within the host and how is routing interpreted by RPF check. [Default: 0]' type: integer bpfExternalServiceMode: diff --git a/templates/test/dev/cluster-template-custom-builds-windows.yaml b/templates/test/dev/cluster-template-custom-builds-windows.yaml index eb1c3ae8733..696da355156 100644 --- a/templates/test/dev/cluster-template-custom-builds-windows.yaml +++ b/templates/test/dev/cluster-template-custom-builds-windows.yaml @@ -430,8 +430,8 @@ spec: # We are using a VHD that maps to v1.18.19 so the kubeproxy image is already pulled. (pull it just in case) # Tag it to the ci_version which is the version set when kicking off the CI builds and doesn't match the KUBE_GIT_VERSION - # but matches the kubeproxy image tag when it gets generated. The image configuraiton knows how to use the binary locally. - # This does mean the image tage will not match the verison of the binary running. + # but matches the kubeproxy image tag when it gets generated. The image configuration knows how to use the binary locally. + # This does mean the image tag will not match the version of the binary running. # See: # https://github.com/kubernetes-sigs/cluster-api-provider-azure/blob/529dbb507962a52ee9fd5a56f3d3856b9bcc53c1/templates/addons/windows/kube-proxy-windows.yaml#L60 # https://github.com/kubernetes-sigs/cluster-api-provider-azure/blob/529dbb507962a52ee9fd5a56f3d3856b9bcc53c1/scripts/ci-build-kubernetes.sh#L54-L59 diff --git a/templates/test/dev/custom-builds-windows/patches/custom-builds-windows.yaml b/templates/test/dev/custom-builds-windows/patches/custom-builds-windows.yaml index c62fe55e4e2..750f3862b2e 100644 --- a/templates/test/dev/custom-builds-windows/patches/custom-builds-windows.yaml +++ b/templates/test/dev/custom-builds-windows/patches/custom-builds-windows.yaml @@ -14,8 +14,8 @@ # We are using a VHD that maps to v1.18.19 so the kubeproxy image is already pulled. (pull it just in case) # Tag it to the ci_version which is the version set when kicking off the CI builds and doesn't match the KUBE_GIT_VERSION - # but matches the kubeproxy image tag when it gets generated. The image configuraiton knows how to use the binary locally. - # This does mean the image tage will not match the verison of the binary running. + # but matches the kubeproxy image tag when it gets generated. The image configuration knows how to use the binary locally. + # This does mean the image tag will not match the version of the binary running. # See: # https://github.com/kubernetes-sigs/cluster-api-provider-azure/blob/529dbb507962a52ee9fd5a56f3d3856b9bcc53c1/templates/addons/windows/kube-proxy-windows.yaml#L60 # https://github.com/kubernetes-sigs/cluster-api-provider-azure/blob/529dbb507962a52ee9fd5a56f3d3856b9bcc53c1/scripts/ci-build-kubernetes.sh#L54-L59 diff --git a/test/e2e/aks.go b/test/e2e/aks.go index e8e89f6498d..48665aefd00 100644 --- a/test/e2e/aks.go +++ b/test/e2e/aks.go @@ -69,7 +69,7 @@ type DiscoverAndWaitForControlPlaneMachinesInput struct { } // DiscoverAndWaitForControlPlaneInitialized gets the azure managed control plane associated with the cluster, -// and waits for atleast one control plane machine to be up. +// and waits for at least one control plane machine to be up. func DiscoverAndWaitForControlPlaneInitialized(ctx context.Context, input DiscoverAndWaitForControlPlaneMachinesInput, intervals ...interface{}) { Expect(ctx).NotTo(BeNil(), "ctx is required for DiscoverAndWaitForControlPlaneInitialized") Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling DiscoverAndWaitForControlPlaneInitialized") @@ -145,9 +145,9 @@ type WaitForControlPlaneAndMachinesReadyInput struct { Namespace string } -// WaitForAtLeastOneControlPlaneAndMachineToExist waits for atleast one control plane machine to be provisioned. +// WaitForAtLeastOneControlPlaneAndMachineToExist waits for at least one control plane machine to be provisioned. func WaitForAtLeastOneControlPlaneAndMachineToExist(ctx context.Context, input WaitForControlPlaneAndMachinesReadyInput, intervals ...interface{}) { - By("Waiting for atleast one control plane node to exist") + By("Waiting for at least one control plane node to exist") WaitForControlPlaneMachinesToExist(ctx, input, atLeastOne, intervals...) } diff --git a/test/e2e/azure_selfhosted.go b/test/e2e/azure_selfhosted.go index 3eb64365b08..78e4b5411d5 100644 --- a/test/e2e/azure_selfhosted.go +++ b/test/e2e/azure_selfhosted.go @@ -101,7 +101,7 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) Expect(os.Setenv(ClusterIdentitySecretNamespace, namespace.Name)).NotTo(HaveOccurred()) }) - // Management clusters do not support Windows nodes becuase of cert manager + // Management clusters do not support Windows nodes because of cert manager // We are using the capi specs located in test/e2e/data/infrastructure-azure/v1beta1 that only have linux nodes // to act as the management cluster until Windows nodes are supported for management nodes // Tracking support for cert manager: https://github.com/jetstack/cert-manager/issues/3606 diff --git a/test/e2e/data/infrastructure-azure/v1alpha4/cluster-template-prow.yaml b/test/e2e/data/infrastructure-azure/v1alpha4/cluster-template-prow.yaml index 53a19b865b7..5509ffe2fa3 100644 --- a/test/e2e/data/infrastructure-azure/v1alpha4/cluster-template-prow.yaml +++ b/test/e2e/data/infrastructure-azure/v1alpha4/cluster-template-prow.yaml @@ -541,7 +541,7 @@ data: 'BPFExtToServiceConnmark in BPF mode, control a 32bit\n mark that is set on connections from an external client to a local\n service. This mark allows us to control how packets of that connection\n are - routed within the host and how is routing intepreted by RPF\n check. + routed within the host and how is routing interpreted by RPF\n check. [Default: 0]'\n type: integer\n bpfExternalServiceMode:\n \ description: 'BPFExternalServiceMode in BPF mode, controls how connections\n from outside the cluster to services (node ports diff --git a/test/e2e/kubernetes/node/node.go b/test/e2e/kubernetes/node/node.go index 19d32dee488..a3b47627f28 100644 --- a/test/e2e/kubernetes/node/node.go +++ b/test/e2e/kubernetes/node/node.go @@ -63,7 +63,7 @@ func GetWindowsVersion(ctx context.Context, clientset *kubernetes.Clientset) (wi kernalVersion := result.Items[0].Status.NodeInfo.KernelVersion kernalVersions := strings.Split(kernalVersion, ".") if len(kernalVersions) != 4 { - return windows.Unknown, fmt.Errorf("Not a valid Windows kernal version: %s", kernalVersion) + return windows.Unknown, fmt.Errorf("Not a valid Windows kernel version: %s", kernalVersion) } switch kernalVersions[2] {