Skip to content

Commit 43b9c00

Browse files
author
sangam14
committed
update
1 parent 1872913 commit 43b9c00

File tree

1 file changed

+289
-32
lines changed

1 file changed

+289
-32
lines changed

content/docs/kubernetes/HA-cluster.md

Lines changed: 289 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -16,56 +16,313 @@ High availability in the control plane is crucial but only part of achieving ove
1616

1717
Kubeadm simplifies the process of expanding your Kubernetes cluster by adding more nodes. To join a new node to the cluster, you first need to generate a join command that includes the API server's address, a unique join token, and the SHA hash of the cluster’s certificate authority (CA) certificate. This command can be generated using the following command on an existing control plane node:
1818

19-
```bash
20-
19+
#### master node
2120

21+
```
22+
sangam@sangam:~$
2223
kubeadm token create --print-join-command
23-
24-
kubeadm join 192.168.100.100:6443 --token 3ua85a.rl5riytxhvc7fs1e --discovery-token-ca-cert-hash sha256:3d239f1c87cac3549334a91ed24580bea67e96cf78a4a83b20371af1c973922f
24+
kubeadm join 192.168.129.135:6443 --token wahoal.dhn8p2qvkavuq7ge --discovery-token-ca-cert-hash sha256:d401c9e8831da53d4f85bb2b027b1a48f60408d0c241f105677c86e061ab2b4f
2525
2626
```
2727

28-
Run this command on any additional nodes that meet the prerequisites mentioned earlier in this module:
28+
#### restart containerd
29+
30+
```
31+
sangam@sangam:~$ sudo systemctl restart containerd
2932
33+
sangam@sangam:~$ sudo kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes
34+
NAME STATUS ROLES AGE VERSION
35+
sangam Ready control-plane 47m v1.29.4
3036
```
37+
##### set containerd config
3138
```
32-
workernode@workernode:~$ sudo rm /etc/kubernetes/kubelet.conf
33-
sudo rm /etc/kubernetes/bootstrap-kubelet.conf
34-
sudo rm /etc/kubernetes/pki/ca.crt
35-
workernode@workernode:~$ sudo ss -ltnp | grep :10250
36-
LISTEN 0 4096 *:10250 *:* users:(("kubelet",pid=23209,fd=20))
37-
workernode@workernode:~$ sudo systemctl stop kubelet
38-
sudo systemctl disable kubelet
39-
Removed /etc/systemd/system/multi-user.target.wants/kubelet.service.
40-
workernode@workernode:~$ sudo kubeadm reset
41-
W0418 18:46:14.265856 23698 preflight.go:56] [reset] WARNING: Changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted.
42-
[reset] Are you sure you want to proceed? [y/N]: y
43-
[preflight] Running pre-flight checks
44-
W0418 18:46:15.832641 23698 removeetcdmember.go:106] [reset] No kubeadm config, using etcd pod spec to get data directory
45-
[reset] Deleted contents of the etcd data directory: /var/lib/etcd
46-
[reset] Stopping the kubelet service
47-
[reset] Unmounting mounted directories in "/var/lib/kubelet"
48-
[reset] Deleting contents of directories: [/etc/kubernetes/manifests /var/lib/kubelet /etc/kubernetes/pki]
49-
[reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/super-admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]
39+
sangam@sangam:~$ sudo mkdir -p /etc/containerd
40+
sudo containerd config default | sudo tee /etc/containerd/config.toml
41+
disabled_plugins = []
42+
imports = []
43+
oom_score = 0
44+
plugin_dir = ""
45+
required_plugins = []
46+
root = "/var/lib/containerd"
47+
state = "/run/containerd"
48+
temp = ""
49+
version = 2
50+
51+
[cgroup]
52+
path = ""
53+
54+
[debug]
55+
address = ""
56+
format = ""
57+
gid = 0
58+
level = ""
59+
uid = 0
60+
61+
[grpc]
62+
address = "/run/containerd/containerd.sock"
63+
gid = 0
64+
max_recv_message_size = 16777216
65+
max_send_message_size = 16777216
66+
tcp_address = ""
67+
tcp_tls_ca = ""
68+
tcp_tls_cert = ""
69+
tcp_tls_key = ""
70+
uid = 0
71+
72+
[metrics]
73+
address = ""
74+
grpc_histogram = false
75+
76+
[plugins]
77+
78+
[plugins."io.containerd.gc.v1.scheduler"]
79+
deletion_threshold = 0
80+
mutation_threshold = 100
81+
pause_threshold = 0.02
82+
schedule_delay = "0s"
83+
startup_delay = "100ms"
84+
85+
[plugins."io.containerd.grpc.v1.cri"]
86+
device_ownership_from_security_context = false
87+
disable_apparmor = false
88+
disable_cgroup = false
89+
disable_hugetlb_controller = true
90+
disable_proc_mount = false
91+
disable_tcp_service = true
92+
drain_exec_sync_io_timeout = "0s"
93+
enable_selinux = false
94+
enable_tls_streaming = false
95+
enable_unprivileged_icmp = false
96+
enable_unprivileged_ports = false
97+
ignore_deprecation_warnings = []
98+
ignore_image_defined_volumes = false
99+
max_concurrent_downloads = 3
100+
max_container_log_line_size = 16384
101+
netns_mounts_under_state_dir = false
102+
restrict_oom_score_adj = false
103+
sandbox_image = "registry.k8s.io/pause:3.6"
104+
selinux_category_range = 1024
105+
stats_collect_period = 10
106+
stream_idle_timeout = "4h0m0s"
107+
stream_server_address = "127.0.0.1"
108+
stream_server_port = "0"
109+
systemd_cgroup = false
110+
tolerate_missing_hugetlb_controller = true
111+
unset_seccomp_profile = ""
112+
113+
[plugins."io.containerd.grpc.v1.cri".cni]
114+
bin_dir = "/opt/cni/bin"
115+
conf_dir = "/etc/cni/net.d"
116+
conf_template = ""
117+
ip_pref = ""
118+
max_conf_num = 1
119+
120+
[plugins."io.containerd.grpc.v1.cri".containerd]
121+
default_runtime_name = "runc"
122+
disable_snapshot_annotations = true
123+
discard_unpacked_layers = false
124+
ignore_rdt_not_enabled_errors = false
125+
no_pivot = false
126+
snapshotter = "overlayfs"
127+
128+
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
129+
base_runtime_spec = ""
130+
cni_conf_dir = ""
131+
cni_max_conf_num = 0
132+
container_annotations = []
133+
pod_annotations = []
134+
privileged_without_host_devices = false
135+
runtime_engine = ""
136+
runtime_path = ""
137+
runtime_root = ""
138+
runtime_type = ""
139+
140+
[plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]
141+
142+
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
143+
144+
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
145+
base_runtime_spec = ""
146+
cni_conf_dir = ""
147+
cni_max_conf_num = 0
148+
container_annotations = []
149+
pod_annotations = []
150+
privileged_without_host_devices = false
151+
runtime_engine = ""
152+
runtime_path = ""
153+
runtime_root = ""
154+
runtime_type = "io.containerd.runc.v2"
155+
156+
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
157+
BinaryName = ""
158+
CriuImagePath = ""
159+
CriuPath = ""
160+
CriuWorkPath = ""
161+
IoGid = 0
162+
IoUid = 0
163+
NoNewKeyring = false
164+
NoPivotRoot = false
165+
Root = ""
166+
ShimCgroup = ""
167+
SystemdCgroup = false
168+
169+
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
170+
base_runtime_spec = ""
171+
cni_conf_dir = ""
172+
cni_max_conf_num = 0
173+
container_annotations = []
174+
pod_annotations = []
175+
privileged_without_host_devices = false
176+
runtime_engine = ""
177+
runtime_path = ""
178+
runtime_root = ""
179+
runtime_type = ""
180+
181+
[plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]
182+
183+
[plugins."io.containerd.grpc.v1.cri".image_decryption]
184+
key_model = "node"
185+
186+
[plugins."io.containerd.grpc.v1.cri".registry]
187+
config_path = ""
188+
189+
[plugins."io.containerd.grpc.v1.cri".registry.auths]
190+
191+
[plugins."io.containerd.grpc.v1.cri".registry.configs]
192+
193+
[plugins."io.containerd.grpc.v1.cri".registry.headers]
194+
195+
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
196+
197+
[plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
198+
tls_cert_file = ""
199+
tls_key_file = ""
50200
51-
The reset process does not clean CNI configuration. To do so, you must remove /etc/cni/net.d
201+
[plugins."io.containerd.internal.v1.opt"]
202+
path = "/opt/containerd"
52203
53-
The reset process does not reset or clean up iptables rules or IPVS tables.
54-
If you wish to reset iptables, you must do so manually by using the "iptables" command.
204+
[plugins."io.containerd.internal.v1.restart"]
205+
interval = "10s"
55206
56-
If your cluster was setup to utilize IPVS, run ipvsadm --clear (or similar)
57-
to reset your system's IPVS tables.
207+
[plugins."io.containerd.internal.v1.tracing"]
208+
sampling_ratio = 1.0
209+
service_name = "containerd"
58210
59-
The reset process does not clean your kubeconfig files and you must remove them manually.
60-
Please, check the contents of the $HOME/.kube/config file.
61-
workernode@workernode:~$ sudo kubeadm join 192.168.129.135:6443 --token dfs0h9.pru6ez9v84qbw98k --discovery-token-ca-cert-hash sha256:27e8c63c7355d79dd2b0dc98dadcd46e87b3ef05ab181caaddf2c1b2488ae474
211+
[plugins."io.containerd.metadata.v1.bolt"]
212+
content_sharing_policy = "shared"
213+
214+
[plugins."io.containerd.monitor.v1.cgroups"]
215+
no_prometheus = false
216+
217+
[plugins."io.containerd.runtime.v1.linux"]
218+
no_shim = false
219+
runtime = "runc"
220+
runtime_root = ""
221+
shim = "containerd-shim"
222+
shim_debug = false
223+
224+
[plugins."io.containerd.runtime.v2.task"]
225+
platforms = ["linux/arm64/v8"]
226+
sched_core = false
227+
228+
[plugins."io.containerd.service.v1.diff-service"]
229+
default = ["walking"]
230+
231+
[plugins."io.containerd.service.v1.tasks-service"]
232+
rdt_config_file = ""
233+
234+
[plugins."io.containerd.snapshotter.v1.aufs"]
235+
root_path = ""
236+
237+
[plugins."io.containerd.snapshotter.v1.btrfs"]
238+
root_path = ""
239+
240+
[plugins."io.containerd.snapshotter.v1.devmapper"]
241+
async_remove = false
242+
base_image_size = ""
243+
discard_blocks = false
244+
fs_options = ""
245+
fs_type = ""
246+
pool_name = ""
247+
root_path = ""
248+
249+
[plugins."io.containerd.snapshotter.v1.native"]
250+
root_path = ""
251+
252+
[plugins."io.containerd.snapshotter.v1.overlayfs"]
253+
mount_options = []
254+
root_path = ""
255+
sync_remove = false
256+
upperdir_label = false
257+
258+
[plugins."io.containerd.snapshotter.v1.zfs"]
259+
root_path = ""
260+
261+
[plugins."io.containerd.tracing.processor.v1.otlp"]
262+
endpoint = ""
263+
insecure = false
264+
protocol = ""
265+
266+
[proxy_plugins]
267+
268+
[stream_processors]
269+
270+
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar"]
271+
accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"]
272+
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
273+
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
274+
path = "ctd-decoder"
275+
returns = "application/vnd.oci.image.layer.v1.tar"
276+
277+
[stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"]
278+
accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"]
279+
args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"]
280+
env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"]
281+
path = "ctd-decoder"
282+
returns = "application/vnd.oci.image.layer.v1.tar+gzip"
283+
284+
[timeouts]
285+
"io.containerd.timeout.bolt.open" = "0s"
286+
"io.containerd.timeout.shim.cleanup" = "5s"
287+
"io.containerd.timeout.shim.load" = "5s"
288+
"io.containerd.timeout.shim.shutdown" = "3s"
289+
"io.containerd.timeout.task.state" = "2s"
290+
291+
[ttrpc]
292+
address = ""
293+
gid = 0
294+
uid = 0
295+
sangam@sangam:~$ sudo sed -i 's/ SystemdCgroup = false/ SystemdCgroup = true/' /etc/containerd/config.toml
296+
sangam@sangam:~$ sudo systemctl restart containerd
297+
```
298+
299+
300+
##### worker node
301+
302+
```
303+
k8s@k8s:~$ sudo kubeadm join 192.168.129.135:6443 --token wahoal.dhn8p2qvkavuq7ge --discovery-token-ca-cert-hash sha256:d401c9e8831da53d4f85bb2b027b1a48f60408d0c241f105677c86e061ab2b4f
62304
[preflight] Running pre-flight checks
63-
[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
64305
[preflight] Reading configuration from the cluster...
65306
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
66307
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
67308
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
68309
[kubelet-start] Starting the kubelet
69310
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
70311
312+
This node has joined the cluster:
313+
* Certificate signing request was sent to apiserver and a response was received.
314+
* The Kubelet was informed of the new secure connection details.
315+
316+
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
317+
71318
```
319+
320+
### check master node
321+
322+
```
323+
324+
sangam@sangam:~$ sudo kubectl --kubeconfig /etc/kubernetes/admin.conf get nodes
325+
NAME STATUS ROLES AGE VERSION
326+
k8s Ready <none> 56s v1.29.4
327+
sangam Ready control-plane 67m v1.29.4
328+
```

0 commit comments

Comments
 (0)