@@ -8,10 +8,25 @@ resource "libvirt_pool" "cluster" {
8
8
path = var. pool_path
9
9
}
10
10
11
+ resource "random_password" "kubeadm_token" {
12
+ length = 22
13
+ special = false
14
+ upper = false
15
+ }
16
+
17
+ resource "random_id" "kubeadm_certificate_key" {
18
+ byte_length = 32
19
+ }
20
+
11
21
locals {
12
- controleplane_network = var. networks [0 ]
13
- controleplane_ips = [for i in range (2 , 2 + var. controlplane_count ) : cidrhost (local. controleplane_network , i)]
14
- domainname = " k8s.lab"
22
+ controleplane_network = var. networks [0 ]
23
+ controleplane_ips = [for i in range (2 , 2 + var. controlplane_count ) : cidrhost (local. controleplane_network , i)]
24
+ domainname = " k8s.lab"
25
+ cluster_endpoint = " cluster-endpoint.${ local . domainname } "
26
+ cluster_endpoint_with_user = " ${ var . ssh_admin } @${ local . cluster_endpoint } "
27
+ kubeadm_token_id = substr (random_password. kubeadm_token . result , 0 , 6 )
28
+ kubeadm_token = join (" ." , [local . kubeadm_token_id , substr (random_password. kubeadm_token . result , 6 , 16 )])
29
+ kubeadm_certificate_key = random_id. kubeadm_certificate_key . hex
15
30
}
16
31
17
32
resource "libvirt_network" "default" {
@@ -24,6 +39,13 @@ resource "libvirt_network" "default" {
24
39
ip = local. controleplane_ips [0 ]
25
40
hostname = " cluster-endpoint"
26
41
}
42
+ dynamic "hosts" {
43
+ for_each = local. controleplane_ips
44
+ content {
45
+ ip = hosts. value
46
+ hostname = format (" controlplane-%02d" , hosts. key + 1 )
47
+ }
48
+ }
27
49
}
28
50
}
29
51
@@ -58,37 +80,63 @@ module "control_plane" {
58
80
file (" ${ var . ssh_private_key } .pub" ),
59
81
]
60
82
}
61
-
62
- resource "ssh_resource" "control_plane" {
83
+ resource "ssh_resource" "control_plane_certs" {
63
84
host = module. control_plane . ip_address [0 ]
64
85
user = var. ssh_admin
65
86
private_key = var. ssh_private_key
66
- timeout = " 10m "
87
+ timeout = " 1m "
67
88
68
- file {
69
- source = " preinstall-kubeadm.sh"
70
- destination = " /tmp/preinstall-kubeadm.sh"
71
- permissions = " 0700"
89
+ triggers = {
90
+ count_changes = length (local. controleplane_ips )
72
91
}
92
+ commands = [
93
+ " sudo kubeadm init phase upload-certs --upload-certs --certificate-key ${ local . kubeadm_certificate_key } " ,
94
+ " sudo kubeadm token create ${ local . kubeadm_token } || true" ,
95
+ ]
96
+ }
97
+ resource "ssh_resource" "control_plane" {
98
+ count = length (local. controleplane_ips )
99
+ host = module. control_plane . ip_address [count . index ]
100
+ user = var. ssh_admin
101
+ private_key = var. ssh_private_key
102
+
103
+ commands = [
104
+ " sudo /usr/local/bin/install-kubeadm.sh cluster-endpoint.k8s.lab:6443 ${ local . kubeadm_token } ${ local . kubeadm_certificate_key } --control-plane --discovery-token-unsafe-skip-ca-verification"
105
+ ]
106
+ }
107
+
108
+ resource "ssh_resource" "control_plane_destroy" {
109
+ count = length (local. controleplane_ips )
110
+ host = module. control_plane . ip_address [count . index ]
111
+ user = var. ssh_admin
112
+ private_key = var. ssh_private_key
113
+ when = " destroy"
114
+ timeout = " 30s"
73
115
74
116
file {
75
- # content = "sudo kubeadm --token qjtm24.wnu9yb6ls0zl9zkx --discovery-token-ca-cert-hash --control-plane cluster-endpoint.k8s.local"
76
- content = " sudo kubeadm init --control-plane-endpoint=cluster-endpoint.k8s.local"
77
- destination = " /tmp/install-kubeadm.sh"
117
+ source = " remove-node.sh"
118
+ destination = " /tmp/remove-node.sh"
78
119
permissions = " 0700"
79
120
}
80
121
81
122
commands = [
82
- " /tmp/preinstall-kubeadm.sh" ,
83
- " /tmp/install-kubeadm.sh"
123
+ " sudo /tmp/remove-node.sh"
84
124
]
85
125
}
86
126
87
- # kubeadm init --control-plane-endpoint=cluster-endpoint --token [a-z0-9]{6}.[a-z0-9]{16}
127
+ # kubeadm init phase upload-certs --upload-certs --certificate-key d9456efcc50c12d8f5fff93c097a16d2495fb5df9cb17cd2fd26f8022a926af4
128
+ # kubeadm token create qahkjs.ru8katsu52fep1ea
129
+
130
+ # # kubectl cordon controlplane-02
131
+ # kubectl drain controlplane-02 --ignore-daemonsets
132
+ # kubectl delete node controlplane-02
133
+
134
+ # sudo etcdctl --endpoints=127.0.0.1:2379 --key /etc/kubernetes/pki/etcd/healthcheck-client.key --cert /etc/kubernetes/pki/etcd/healthcheck-client.crt --cacert /etc/kubernetes/pki/etcd/ca.crt endpoint status
135
+ # sudo etcdctl --endpoints=cluster-endpoint.k8s.lab:2379 --key /etc/kubernetes/pki/etcd/healthcheck-client.key --cert /etc/kubernetes/pki/etcd/healthcheck-client.crt --cacert /etc/kubernetes/pki/etcd/ca.crt member remove c7b9a74f4a348e3d
88
136
89
137
output "outputs" {
90
138
value = module. control_plane
91
139
}
92
- # output "run" {
93
- # value = ssh_resource.control_plane.result
94
- # }
140
+ output "run" {
141
+ value = ssh_resource. control_plane [ * ] . result
142
+ }
0 commit comments