提前各个机器设置主机名,这一步一定要提前做,不然work节点加入不了master

hostnamectl set-hostname MPP.pp.ua 
hostnamectl set-hostname node19 
hostnamectl set-hostname node20
#!/bin/bash
dnf install chrony -y
systemctl enable --now chronyd && chronyc sources
setenforce 0
systemctl stop firewalld
systemctl disable firewalld
swapoff -a
sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

echo "192.168.10.18 MPP.pp.ua master.MPP.pp.ua kubeapi.MPP.pp.ua
192.168.10.19 node19
192.168.10.20 node20" >> /etc/hosts

modprobe br_netfilter
echo "net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1" >>/etc/sysctl.conf
sysctl -p 

dnf install ipvsadm ipset sysstat conntrack libseccomp \
yum-utils device-mapper-persistent-data lvm2   -y

echo "net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
fs.file-max=1000000
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
vm.max_map_count=262144
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
net.netfilter.nf_conntrack_max=2097152
kernel.pid_max=4194303" > /etc/sysctl.d/k8s.conf


echo "ip_vs
ip_vs_lc
ip_vs_lblc
ip_vs_lblcr
ip_vs_rr
ip_vs_wrr
ip_vs_sh
ip_vs_dh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
ip_tables
ip_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
xt_set
br_netfilter
nf_conntrack
verlay" >/etc/modules-load.d/modules.conf 

sysctl --system
sysctl -p

yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install containerd -y
containerd config default > /etc/containerd/config.toml
sed -i "s/SystemdCgroup = false/SystemdCgroup = true/g" /etc/containerd/config.toml
SystemdCgroup = false
systemctl restart containerd && systemctl enable containerd


echo "runtime-endpoint: "unix:///run/containerd/containerd.sock"
image-endpoint: "unix:///run/containerd/containerd.sock"
timeout: 10
debug: false" >/etc/crictl.yaml


wget https://github.com/containerd/nerdctl/releases/download/v1.7.2/nerdctl-1.7.2-linux-amd64.tar.gz
tar xvf nerdctl-1.7.2-linux-amd64.tar.gz -C /usr/local/bin/
nerdctl version 
mkdir /etc/nerdctl/

echo "namespace = "k8s.io"
debug = false
debug_full = false
insecure_registry = true" >/etc/nerdctl/nerdctl.tom



echo "[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.28/rpm/
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.28/rpm/repodata/repomd.xml.key" > /etc/yum.repos.d/kubernetes.repo


dnf  install -y kubelet kubeadm kubectl
systemctl enable --now  kubelet

kubeadm config images pull --image-repository="registry.aliyuncs.com/google_containers" --kubernetes-version=v1.28.2
echo "master 和nodes 环境初始化OK,继续在master节点操作"
#

所有机器执行完毕,下面只是master执行

kubeadm init --kubernetes-version=v1.28.2 --control-plane-endpoint="kubeapi.MPP.pp.ua"  --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12  --token-ttl=0 --image-repository=registry.aliyuncs.com/google_containers

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

echo "source <(kubectl completion bash)" >> ~/.bashrc
echo "source <(kubeadm completion bash)" >> ~/.bashrc
source ~/.bashrc

其它work节点加入k8s集群

kubeadm join kubeapi.MPP.pp.ua:6443 --token qqi5po.sbybgnyufwlmaosm \
    --discovery-token-ca-cert-hash sha256:9e63db27b513958c00ffe152d32c5506b863f48f45a9ec7dd1ab79bf53d35ebe

安装网络组件(master执行)

flannel

kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml

安装 Tigera Calico 运算符和自定义资源定义 (此命令会创建一个tigera-operator的命名空间,可以使用kubectl get namespaces 查看 )

kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.26.4/manifests/tigera-operator.yaml

查看tigera-operator内pod有没有创建成功

kubectl get pods -n tigera-operator

通过创建必要的自定义资源来安装 Calico (注意修改默认 IP 池 CIDR 以匹配您的容器网络 CIDR)

wget -c https://raw.githubusercontent.com/projectcalico/calico/v3.26.4/manifests/custom-resources.yaml

编辑文件修改CIDR网段与kubeadm初始化时的ip网段一致 "--pod-network-cidr=10.244.0.0/16 使用10.244.0.0/16"

vim custom-resources.yaml
cidr: 10.244.0.0/16

创建

kubectl apply -f custom-resources.yaml

验证是否创建了calico-system命名空间(如果没有创建,可能是网段修改不正确)

kubectl get ns

稍等片刻后查看pod是否创建完成,时间取决于自身网络环境

kubectl get pod -n calico-system

注意: 若是calico-node READY 列是0/1 可以使用 kubectl describe pod pod名称 -n calico-system 查看具体原因

验证集群节点是否ready

kubectl get nodes

k8s使用ipvs(master节点操作)

编辑k8s-proxy的configmap文件,在54行左右修改mode值为ipvs

kubectl edit configmaps kube-proxy  -n kube-system
mode: "ipvs"

删除所有kube-proxy 让k8s进行自愈重建

kubectl delete pod -l k8s-app=kube-proxy  -n kube-system

验证ipvs是否可用

ipvsadm -ln

pA5WXU1.jpg

results matching ""

    No results matching ""