Skip to content

Commit 3ce9958

Browse files
committed
Working scalable controlplane cluster
1 parent 12562ad commit 3ce9958

9 files changed

+212
-34
lines changed

.terraform.lock.hcl

+19
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

main.tf

+67-19
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,25 @@ resource "libvirt_pool" "cluster" {
88
path = var.pool_path
99
}
1010

11+
resource "random_password" "kubeadm_token" {
12+
length = 22
13+
special = false
14+
upper = false
15+
}
16+
17+
resource "random_id" "kubeadm_certificate_key" {
18+
byte_length = 32
19+
}
20+
1121
locals {
12-
controleplane_network = var.networks[0]
13-
controleplane_ips = [for i in range(2, 2 + var.controlplane_count) : cidrhost(local.controleplane_network, i)]
14-
domainname = "k8s.lab"
22+
controleplane_network = var.networks[0]
23+
controleplane_ips = [for i in range(2, 2 + var.controlplane_count) : cidrhost(local.controleplane_network, i)]
24+
domainname = "k8s.lab"
25+
cluster_endpoint = "cluster-endpoint.${local.domainname}"
26+
cluster_endpoint_with_user = "${var.ssh_admin}@${local.cluster_endpoint}"
27+
kubeadm_token_id = substr(random_password.kubeadm_token.result, 0, 6)
28+
kubeadm_token = join(".", [local.kubeadm_token_id, substr(random_password.kubeadm_token.result, 6, 16)])
29+
kubeadm_certificate_key = random_id.kubeadm_certificate_key.hex
1530
}
1631

1732
resource "libvirt_network" "default" {
@@ -24,6 +39,13 @@ resource "libvirt_network" "default" {
2439
ip = local.controleplane_ips[0]
2540
hostname = "cluster-endpoint"
2641
}
42+
dynamic "hosts" {
43+
for_each = local.controleplane_ips
44+
content {
45+
ip = hosts.value
46+
hostname = format("controlplane-%02d", hosts.key + 1)
47+
}
48+
}
2749
}
2850
}
2951

@@ -58,37 +80,63 @@ module "control_plane" {
5880
file("${var.ssh_private_key}.pub"),
5981
]
6082
}
61-
62-
resource "ssh_resource" "control_plane" {
83+
resource "ssh_resource" "control_plane_certs" {
6384
host = module.control_plane.ip_address[0]
6485
user = var.ssh_admin
6586
private_key = var.ssh_private_key
66-
timeout = "10m"
87+
timeout = "1m"
6788

68-
file {
69-
source = "preinstall-kubeadm.sh"
70-
destination = "/tmp/preinstall-kubeadm.sh"
71-
permissions = "0700"
89+
triggers = {
90+
count_changes = length(local.controleplane_ips)
7291
}
92+
commands = [
93+
"sudo kubeadm init phase upload-certs --upload-certs --certificate-key ${local.kubeadm_certificate_key}",
94+
"sudo kubeadm token create ${local.kubeadm_token} || true",
95+
]
96+
}
97+
resource "ssh_resource" "control_plane" {
98+
count = length(local.controleplane_ips)
99+
host = module.control_plane.ip_address[count.index]
100+
user = var.ssh_admin
101+
private_key = var.ssh_private_key
102+
103+
commands = [
104+
"sudo /usr/local/bin/install-kubeadm.sh cluster-endpoint.k8s.lab:6443 ${local.kubeadm_token} ${local.kubeadm_certificate_key} --control-plane --discovery-token-unsafe-skip-ca-verification"
105+
]
106+
}
107+
108+
resource "ssh_resource" "control_plane_destroy" {
109+
count = length(local.controleplane_ips)
110+
host = module.control_plane.ip_address[count.index]
111+
user = var.ssh_admin
112+
private_key = var.ssh_private_key
113+
when = "destroy"
114+
timeout = "30s"
73115

74116
file {
75-
#content = "sudo kubeadm --token qjtm24.wnu9yb6ls0zl9zkx --discovery-token-ca-cert-hash --control-plane cluster-endpoint.k8s.local"
76-
content = "sudo kubeadm init --control-plane-endpoint=cluster-endpoint.k8s.local"
77-
destination = "/tmp/install-kubeadm.sh"
117+
source = "remove-node.sh"
118+
destination = "/tmp/remove-node.sh"
78119
permissions = "0700"
79120
}
80121

81122
commands = [
82-
"/tmp/preinstall-kubeadm.sh",
83-
"/tmp/install-kubeadm.sh"
123+
"sudo /tmp/remove-node.sh"
84124
]
85125
}
86126

87-
# kubeadm init --control-plane-endpoint=cluster-endpoint --token [a-z0-9]{6}.[a-z0-9]{16}
127+
# kubeadm init phase upload-certs --upload-certs --certificate-key d9456efcc50c12d8f5fff93c097a16d2495fb5df9cb17cd2fd26f8022a926af4
128+
# kubeadm token create qahkjs.ru8katsu52fep1ea
129+
130+
## kubectl cordon controlplane-02
131+
# kubectl drain controlplane-02 --ignore-daemonsets
132+
# kubectl delete node controlplane-02
133+
134+
# sudo etcdctl --endpoints=127.0.0.1:2379 --key /etc/kubernetes/pki/etcd/healthcheck-client.key --cert /etc/kubernetes/pki/etcd/healthcheck-client.crt --cacert /etc/kubernetes/pki/etcd/ca.crt endpoint status
135+
# sudo etcdctl --endpoints=cluster-endpoint.k8s.lab:2379 --key /etc/kubernetes/pki/etcd/healthcheck-client.key --cert /etc/kubernetes/pki/etcd/healthcheck-client.crt --cacert /etc/kubernetes/pki/etcd/ca.crt member remove c7b9a74f4a348e3d
88136

89137
output "outputs" {
90138
value = module.control_plane
91139
}
92-
#output "run" {
93-
# value = ssh_resource.control_plane.result
94-
#}
140+
output "run" {
141+
value = ssh_resource.control_plane[*].result
142+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
{
2+
"cniVersion": "1.0.0",
3+
"name": "containerd-net",
4+
"plugins": [
5+
{
6+
"type": "bridge",
7+
"bridge": "cni0",
8+
"isGateway": true,
9+
"ipMasq": true,
10+
"promiscMode": true,
11+
"ipam": {
12+
"type": "host-local",
13+
"ranges": [
14+
[{
15+
"subnet": "10.88.0.0/16"
16+
}],
17+
[{
18+
"subnet": "2001:4860:4860::/64"
19+
}]
20+
],
21+
"routes": [
22+
{ "dst": "0.0.0.0/0" },
23+
{ "dst": "::/0" }
24+
]
25+
}
26+
},
27+
{
28+
"type": "portmap",
29+
"capabilities": {"portMappings": true},
30+
"externalSetMarkChain": "KUBE-MARK-MASQ"
31+
}
32+
]
33+
}

modules/vm/templates/cloud_init.tpl

+16
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ packages:
2525
- kubectl
2626
- containerd
2727
- containernetworking-plugins
28+
- etcd-client
2829

2930
runcmd:
3031
${runcmd}
@@ -98,6 +99,21 @@ write_files:
9899
content: "br_netfilter"
99100
- path: /etc/sysctl.d/50-kubeadm.conf
100101
content: "net.ipv4.ip_forward = 1"
102+
- path: /etc/crictl.yaml
103+
content: |
104+
runtime-endpoint: unix:///var/run/containerd/containerd.sock
105+
image-endpoint: unix:///var/run/containerd/containerd.sock
106+
- path: /etc/containerd/config.toml
107+
content: |
108+
${ indent(8, file("${path}/templates/containerd-config.toml")) }
109+
- path: /etc/cni/net.d/10-containerd-net.conflist
110+
content: |
111+
${ indent(8, file("${path}/templates/10-containerd-net.conflist")) }
112+
- path: /usr/local/bin/install-kubeadm.sh
113+
permissions: 0o755
114+
content: |
115+
${ indent(8, file("${path}/templates/install-kubeadm.sh")) }
116+
101117
growpart:
102118
mode: auto
103119
devices:
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
version = 2
2+
3+
[plugins]
4+
[plugins."io.containerd.grpc.v1.cri"]
5+
sandbox_image = "registry.k8s.io/pause:3.9"
6+
[plugins."io.containerd.grpc.v1.cri".cni]
7+
bin_dir = "/usr/lib/cni"
8+
conf_dir = "/etc/cni/net.d"
9+
[plugins."io.containerd.internal.v1.opt"]
10+
path = "/var/lib/containerd/opt"
11+
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
12+
base_runtime_spec = ""
13+
cni_conf_dir = ""
14+
cni_max_conf_num = 0
15+
container_annotations = []
16+
pod_annotations = []
17+
privileged_without_host_devices = false
18+
runtime_engine = ""
19+
runtime_path = ""
20+
runtime_root = ""
21+
runtime_type = "io.containerd.runc.v2"
22+
23+
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
24+
BinaryName = ""
25+
CriuImagePath = ""
26+
CriuPath = ""
27+
CriuWorkPath = ""
28+
IoGid = 0
29+
IoUid = 0
30+
NoNewKeyring = false
31+
NoPivotRoot = false
32+
Root = ""
33+
ShimCgroup = ""
34+
SystemdCgroup = true
+25
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
#!/bin/bash
2+
3+
set -e
4+
5+
apt-mark hold kubelet kubeadm kubectl
6+
7+
modprobe br_netfilter
8+
sysctl --system
9+
10+
# don't rerun install when not needed
11+
[ -e /etc/kubernetes/admin.conf ] && exit 0
12+
13+
ENDPOINT=$1
14+
TOKEN=$2
15+
CERT_KEY=$3
16+
OTHER_JOIN_ARGS=${*:4}
17+
18+
INIT=0
19+
if [ `hostname` = 'controlplane-01' ]; then INIT=1 ; fi
20+
21+
if [ $INIT -eq 1 ]; then
22+
kubeadm init --control-plane-endpoint=$ENDPOINT --upload-certs --certificate-key $CERT_KEY --token $TOKEN
23+
else
24+
kubeadm join $ENDPOINT --certificate-key $CERT_KEY --token $TOKEN $OTHER_JOIN_ARGS
25+
fi

preinstall-kubeadm.sh

-15
This file was deleted.

remove-node.sh

+14
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
#!/bin/bash
2+
3+
set -e
4+
5+
NODE_NAME=`hostname`
6+
7+
export KUBECONFIG=/etc/kubernetes/admin.conf
8+
9+
kubectl drain $NODE_NAME --delete-emptydir-data --force --ignore-daemonsets
10+
kubectl delete node $NODE_NAME
11+
12+
ETCD_ID=$(etcdctl --endpoints=127.0.0.1:2379 --key /etc/kubernetes/pki/etcd/healthcheck-client.key --cert /etc/kubernetes/pki/etcd/healthcheck-client.crt --cacert /etc/kubernetes/pki/etcd/ca.crt endpoint status | awk -F, '{print $2}')
13+
14+
etcdctl --endpoints=127.0.0.1:2379 --key /etc/kubernetes/pki/etcd/healthcheck-client.key --cert /etc/kubernetes/pki/etcd/healthcheck-client.crt --cacert /etc/kubernetes/pki/etcd/ca.crt member remove $ETCD_ID

versions.tf

+4
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,10 @@
11
terraform {
22
required_version = ">= 0.13"
33
required_providers {
4+
random = {
5+
source = "hashicorp/random"
6+
version = "3.5.1"
7+
}
48
libvirt = {
59
source = "dmacvicar/libvirt"
610
version = ">= 0.7.0"

0 commit comments

Comments
 (0)