一,集群規(guī)劃
master01 4C8G 10.0.0.121
master02 4C8G 10.0.0.122
master03 4C8G 10.0.0.123
nodes01 4C8G 10.0.0.124
nodes02 4C8G 10.0.0.125
nodes03 4C8G 10.0.0.126
etcd01 4C8G 10.0.0.127
etcd02 4C8G 10.0.0.128
etc0d3 4C8G 10.0.0.129
ha01 2C2G 10.0.0.130
ha02 2C2G 10.0.0.131
10.0.0.135(vip)
規(guī)劃:三臺maste節(jié)點三臺node節(jié)點三臺etcd兩臺ha
軟件版本:
操作系統(tǒng): ubuntu:20.04.5
kubernetes v1.25.4
containerd v1.6.10
etcd v3.5.3
cin-pulgin v1.1.1
Cfssl v1.6.1
二,基礎(chǔ)環(huán)境配置
2.1,所有環(huán)境配置hosts
root@etcd01:~# cat /etc/hosts
10.0.0.121 master01
10.0.0.122 master02
10.0.0.123 master03
10.0.0.124 node01
10.0.0.125 node02
10.0.0.126 node03
10.0.0.127 etcd01
10.0.0.128 etcd02
10.0.0.129 etcd03
10.0.0.130 ha01
10.0.0.131 ha02
10.0.0.135 vip
2.2,所有節(jié)點關(guān)閉防火墻,selinux,swap
ufw disable
ufw status (ufw status命令查看當前的防火墻狀態(tài):inactive狀態(tài)是防火墻關(guān)閉狀態(tài) active是開啟狀態(tài)。)
#關(guān)閉selinux
sed -ri 's/(^SELINUX=).*/\1disabled/' /etc/selinux/config
setenforce 0
#關(guān)閉swap分區(qū)
sed -ri 's@(^.*swap *swap.*0 0$)@#\1@' /etc/fstab
swapoff -a
2.3,所有時間同步
#方法1使用ntpdate
yum install ntpdate -y
ntpdate time2.aliyun.com
#寫入定時任務(wù)
crontab -e
*/5 * * * * ntpdate time2.aliyun.com
#方法2使用chrony(推薦使用)
#安裝chrony
yum install chrony -y
#在其中一臺主機配置為時間服務(wù)器
cat /etc/chrony.conf
server time2.aliyun.com iburst #從哪同步時間
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
allow 10.0.0.0/16 #允許的ntp客戶端網(wǎng)段
local stratum 10
logdir /var/log/chrony
#重啟服務(wù)
systemctl restart chronyd
#配置其他節(jié)點從服務(wù)端獲取時間進行同步
cat /etc/chrony.conf
server 10.0.0.121 iburst
#重啟驗證
systemctl restart chronyd
chronyc sources -v
^* master01 3 6 17 5 -10us[ -109us] +/- 28ms #這樣表示正常
2.4,所有節(jié)點優(yōu)化內(nèi)核參數(shù)
#修改內(nèi)核參數(shù)
cat >/etc/sysctl.conf<<EOF
net.ipv4.tcp_keepalive_time=600
net.ipv4.tcp_keepalive_intvl=30
net.ipv4.tcp_keepalive_probes=10
net.ipv6.conf.all.disable_ipv6=1
net.ipv6.conf.default.disable_ipv6=1
net.ipv6.conf.lo.disable_ipv6=1
net.ipv4.neigh.default.gc_stale_time=120
net.ipv4.conf.all.rp_filter=0 # 默認為1,系統(tǒng)會嚴格校驗數(shù)據(jù)包的反向路徑,可能導(dǎo)致丟包
net.ipv4.conf.default.rp_filter=0
net.ipv4.conf.default.arp_announce=2
net.ipv4.conf.lo.arp_announce=2
net.ipv4.conf.all.arp_announce=2
net.ipv4.ip_local_port_range= 45001 65000
net.ipv4.ip_forward=1
net.ipv4.tcp_max_tw_buckets=6000
net.ipv4.tcp_syncookies=1
net.ipv4.tcp_synack_retries=2
net.bridge.bridge-nf-call-ip6tables=1
net.bridge.bridge-nf-call-iptables=1
net.netfilter.nf_conntrack_max=2310720
net.ipv6.neigh.default.gc_thresh1=8192
net.ipv6.neigh.default.gc_thresh2=32768
net.ipv6.neigh.default.gc_thresh3=65536
net.core.netdev_max_backlog=16384 # 每CPU網(wǎng)絡(luò)設(shè)備積壓隊列長度
net.core.rmem_max = 16777216 # 所有協(xié)議類型讀寫的緩存區(qū)大小
net.core.wmem_max = 16777216
net.ipv4.tcp_max_syn_backlog = 8096 # 第一個積壓隊列長度
net.core.somaxconn = 32768 # 第二個積壓隊列長度
fs.inotify.max_user_instances=8192 # 表示每一個real user ID可創(chuàng)建的inotify instatnces的數(shù)量上限,默認128.
fs.inotify.max_user_watches=524288 # 同一用戶同時可以添加的watch數(shù)目,默認8192。
fs.file-max=52706963
fs.nr_open=52706963
kernel.pid_max = 4194303
net.bridge.bridge-nf-call-arptables=1
vm.swappiness=0 # 禁止使用 swap 空間,只有當系統(tǒng) OOM 時才允許使用它
vm.overcommit_memory=1 # 不檢查物理內(nèi)存是否夠用
vm.panic_on_oom=0 # 開啟 OOM
vm.max_map_count = 262144
EOF
#加載ipvs模塊
cat >/etc/modules-load.d/ipvs.conf <<EOF
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
EOF
systemctl enable --now systemd-modules-load.service
#重啟
reboot
#重啟服務(wù)器執(zhí)行檢查
lsmod | grep -e ip_vs -e nf
2.5,使用master1節(jié)點做免秘鑰
ssh-keygen -t rsa
all="master01 master02 master03 node01 node02 node03 etcd01 etcd02 etcd03"
for i in $all;do ssh-copy-id -i .ssh/id_rsa.pub $i;done
2.6,安裝所需軟件
apt install curl conntrack ipvsadm ipset iptables jq sysstat libseccomp rsync wget jq psmisc vim net-tools telnet lrzsz bash-completion -y
2.7,軟件包準備
#下載kubernetesv1.25.4的二進制軟件包
https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#downloads-for-v1254
#etcdctl二進制包
https://github.com/etcd-io/etcd/releases
#containerd二進制包
https://github.com/containerd/containerd/releases
#cfssl二進制包
https://github.com/cloudflare/cfssl/releases
#cni插件二進制包
https://github.com/containernetworking/plugins/releases
#crictl二進制包
https://github.com/kubernetes-sigs/cri-tools/releases
#docker-containerd二進制包
https://download.docker.com/linux/static/stable/x86_64/
#cri-docker二進制包
https://github.com/Mirantis/cri-dockerd/releases
三,安裝docker && 安裝容器運行時 containerd(所有master節(jié)點和node節(jié)點)
安裝容器運行時
以下操作docker-ce和containerd選擇安裝一個就可以,需要在安裝kubelet的節(jié)點都需要安裝。在kubernetesv1.24版本后如果需要使用docker-ce作為容器運行時需要額外安裝cri-docker
3.1,安裝容器運行時containerd
#先安裝cni插件:
wget https://github.com/containernetworking/plugins/releases/download/v1.1.1/cni-plugins-linux-amd64-v1.1.1.tgz
mkdir -p /etc/cni/{bin,net.d}
tar xf cni-plugins-linux-amd64-v1.1.1.tgz -C /etc/cni/bin/
#解壓containerd安裝
wget https://github.com/containerd/containerd/releases/download/v1.6.10/cri-containerd-cni-1.6.10-linux-arm64.tar.gz
tar xf cri-containerd-cni-1.6.10-linux-amd64.tar.gz -C /
#創(chuàng)建服務(wù)啟動文件
cat > /etc/systemd/system/containerd.service <<EOF
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target
[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=infinity
TasksMax=infinity
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
EOF
#創(chuàng)建配置文件
mkdir /etc/containerd
/usr/local/bin/containerd config default > /etc/containerd/config.toml
#修改配置
首先我們修改默認的 pause 鏡像為國內(nèi)的地址,替換 [plugins."io.containerd.grpc.v1.cri"] 下面的 sandbox_image:
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "registry.aliyuncs.com/k8sxio/pause:3.2"
配置鏡像倉庫加速地址
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["https://bqr1dr1n.mirror.aliyuncs.com"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."k8s.gcr.io"]
endpoint = ["https://registry.aliyuncs.com/k8sxio"]
#啟動
systemctl enable --now containerd.service
#安裝crictl客戶端工具
wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.25.0/crictl-v1.25.0-linux-amd64.tar.gz
tar xf crictl-v1.25.0-linux-amd64.tar.gz -C /usr/bin/
#生成配置文件
cat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
EOF
#測試
crictl info
3.2,安裝docker
tar xf docker-20.10.15.tgz
#拷貝二進制文件
cp docker/* /usr/bin/
#創(chuàng)建containerd的service文件,并且啟動
cat >/etc/systemd/system/containerd.service <<EOF
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target
[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=1048576
TasksMax=infinity
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
EOF
systemctl enable --now containerd.service
#準備docker的service文件
cat > /etc/systemd/system/docker.service <<EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket containerd.service
[Service]
Type=notify
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
EOF
#準備docker的socket文件
cat > /etc/systemd/system/docker.socket <<EOF
[Unit]
Description=Docker Socket for the API
[Socket]
ListenStream=/var/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF
#創(chuàng)建docker組
groupadd docker
#啟動docker
systemctl enable --now docker.socket && systemctl enable --now docker.service
#驗證
docker info
#創(chuàng)建docker配置文件
mkdir /etc/docker/ -p
cat >/etc/docker/daemon.json << EOF
{"registry-mirrors":["https://b9pmyelo.mirror.aliyuncs.com"]}
EOF
systemctl restart docker
#安裝cri-Docker
#解壓安裝包
tar xf cri-dockerd-0.2.3.amd64.tgz
#拷貝二進制文件
cp cri-dockerd/* /usr/bin/
#生成service文件
cat >/etc/systemd/system/cri-docker.socket<<EOF
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service
[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF
cat >/etc/systemd/system/cri-docker.service<<EOF
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket
[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint #fd:// --network-plugin=cni #--pod-infra-container-image=192.168.10.254:5000/k8s/pause:3.7
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3
# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
#啟動
systemctl enable --now cri-docker.socket
systemctl enable --now cri-docker
四,生成集群相關(guān)證書(都在master01上操作)
4.1,安裝cfssl工具并分發(fā)二進制文件
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.3/cfssl_1.6.3_linux_amd64
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.2/cfssljson_1.6.2_linux_amd64
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.2/cfssl-certinfo_1.6.2_linux_amd64
mv cfssl_1.6.3_linux_amd64 cfssl
mv cfssljson_1.6.2_linux_amd64 cfssljson
mv cfssl-certinfo_1.6.2_linux_amd64 cfssl-certinfo
chmod +x cfssl* && mv cfssl* /usr/bin/
#分發(fā)二進制文件
master="master01 master02 master03"
node="node01 node02 node03"
#分發(fā)master組件
for i in $master;do
scp kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kube-proxy,kubelet,kubectl} $i:/usr/bin
done
#分發(fā)node組件
for i in $node;do
scp kubernetes/server/bin/{kube-proxy,kubelet} $i:/usr/bin
done
4.2,創(chuàng)建etcd集群證書
mkdir /opt/pki/etcd/ -p
cd /opt/pki/etcd/
#創(chuàng)建etcd的ca證書
mkdir ca && cd ca
#生成配置文件
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"etcd": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
#生成申請文件
cat > ca-csr.json <<EOF
{
"CA":{"expiry":"87600h"},
"CN": "etcd-cluster",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"TS": "Beijing",
"L": "Beijing",
"O": "etcd-cluster",
"OU": "System"
}
]
}
EOF
#生成ca證書
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
#etcd服務(wù)端
#生成etcd證書申請文件
cd /opt/pki/etcd/
cat > etcd-server-csr.json << EOF
{
"CN": "etcd-server",
"hosts": [
"10.0.0.127",
"10.0.0.128",
"10.0.0.129",
"127.0.0.1"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"TS": "Beijing",
"L": "Beijing",
"O": "etcd-server",
"OU": "System"
}
]
}
EOF
#生成證書
cfssl gencert \
-ca=ca/ca.pem \
-ca-key=ca/ca-key.pem \
-config=ca/ca-config.json \
-profile=etcd \
etcd-server-csr.json | cfssljson -bare etcd-server
#etcd客戶端
#生成etcd證書申請文件
cd /opt/pki/etcd/
cat > etcd-client-csr.json << EOF
{
"CN": "etcd-client",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"TS": "Beijing",
"L": "Beijing",
"O": "etcd-client",
"OU": "System"
}
]
}
EOF
#生成證書
cfssl gencert \
-ca=ca/ca.pem \
-ca-key=ca/ca-key.pem \
-config=ca/ca-config.json \
-profile=etcd \
etcd-client-csr.json | cfssljson -bare etcd-client
驗證
pwd
/opt/pki/etcd
#tree命令驗證生成的文件
[root@k8s-master01 etcd]# tree
.
├── ca
│ ├── ca-config.json
│ ├── ca.csr
│ ├── ca-csr.json
│ ├── ca-key.pem
│ └── ca.pem
├── etcd-client.csr
├── etcd-client-csr.json
├── etcd-client-key.pem #客戶端私鑰
├── etcd-client.pem #客戶端公鑰
├── etcd-server.csr
├── etcd-server-csr.json
├── etcd-server-key.pem #服務(wù)端私鑰
└── etcd-server.pem #服務(wù)端公鑰
#拷貝到master節(jié)點和etcd節(jié)點
master_etcd="master01 master02 master03 etcd01 etcd02 etcd03"
for i in $master_etcd;do
ssh $i "mkdir /etc/etcd/ssl -p"
scp /opt/pki/etcd/ca/ca.pem /opt/pki/etcd/{etcd-server.pem,etcd-server-key.pem,etcd-client.pem,etcd-client-key.pem} $i:/etc/etcd/ssl/
done
4.3,創(chuàng)建kubernetes集群各組件證書
生成kubernetes ca證書
mkdir /opt/pki/kubernetes/ -p
cd /opt/pki/kubernetes/
mkdir ca
cd ca
#創(chuàng)建ca配置文件與申請文件
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
#生成申請文件
cat > ca-csr.json <<EOF
{
"CA":{"expiry":"87600h"},
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"TS": "Beijing",
"L": "Beijing",
"O": "kubernetes",
"OU": "System"
}
]
}
EOF
#生成ca證書
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
生成kube-apiserver證書
#創(chuàng)建目錄
mkdir /opt/pki/kubernetes/kube-apiserver -p
cd /opt/pki/kubernetes/kube-apiserver
#生成證書申請文件
cat > kube-apiserver-csr.json <<EOF
{
"CN": "kube-apiserver",
"hosts": [
"127.0.0.1",
"10.0.0.121",
"10.0.0.122",
"10.0.0.123",
"10.0.0.135",
"10.200.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"TS": "Beijing",
"L": "Beijing",
"O": "kube-apiserver",
"OU": "System"
}
]
}
EOF
#生成證書
cfssl gencert \
-ca=../ca/ca.pem \
-ca-key=../ca/ca-key.pem \
-config=../ca/ca-config.json \
-profile=kubernetes \
kube-apiserver-csr.json | cfssljson -bare kube-apiserver
#拷貝kube-apiserver到master節(jié)點
master="master01 master02 master03"
for i in $master;do
ssh $i "mkdir /etc/kubernetes/pki -p"
scp /opt/pki/kubernetes/ca/{ca.pem,ca-key.pem} /opt/pki/kubernetes/kube-apiserver/{kube-apiserver-key.pem,kube-apiserver.pem} $i:/etc/kubernetes/pki
done
#拷貝證書到node節(jié)點
node="node01 node02 node03"
for i in $node;do
ssh $i "mkdir /etc/kubernetes/pki -p"
scp /opt/pki/kubernetes/ca/ca.pem $i:/etc/kubernetes/pki
done
生成proxy-client和ca證書
#創(chuàng)建目錄
mkdir /opt/pki/proxy-client
cd /opt/pki/proxy-client
#生成ca配置文件
cat > front-proxy-ca-csr.json <<EOF
{
"CA":{"expiry":"87600h"},
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
}
}
EOF
#生成ca文件
cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare front-proxy-ca
#生成客戶端證書申請文件
cat > front-proxy-client-csr.json <<EOF
{
"CN": "front-proxy-client",
"key": {
"algo": "rsa",
"size": 2048
}
}
EOF
#生成證書
cfssl gencert \
-ca=front-proxy-ca.pem \
-ca-key=front-proxy-ca-key.pem \
-config=../kubernetes/ca/ca-config.json \
-profile=kubernetes front-proxy-client-csr.json | cfssljson -bare front-proxy-client
#拷貝證書到節(jié)點
master="master01 master02 master03"
node="node01 node02 node03"
for i in $master;do
scp /opt/pki/proxy-client/{front-proxy-ca.pem,front-proxy-client.pem,front-proxy-client-key.pem} $i:/etc/kubernetes/pki
done
for i in $node;do
scp /opt/pki/proxy-client/front-proxy-ca.pem $i:/etc/kubernetes/pki
done
生成kube-controller-manager證書和文件
mkdir /opt/pki/kubernetes/kube-controller-manager -p
cd /opt/pki/kubernetes//kube-controller-manager
#生成證書請求文件
cat > kube-controller-manager-csr.json <<EOF
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"TS": "Beijing",
"L": "Beijing",
"O": "system:kube-controller-manager",
"OU": "System"
}
]
}
EOF
#生成證書文件
cfssl gencert \
-ca=../ca/ca.pem \
-ca-key=../ca/ca-key.pem \
-config=../ca/ca-config.json \
-profile=kubernetes \
kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
#生成配置文件
export KUBE_APISERVER="https://10.0.0.135:8443"
kubectl config set-cluster kubernetes \
--certificate-authority=../ca/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-credentials system:kube-controller-manager \
--client-certificate=kube-controller-manager.pem \
--client-key=kube-controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=system:kube-controller-manager \
--kubeconfig=kube-controller-manager.kubeconfig
kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig
拷貝證書到master節(jié)點
master="master01 master02 master03"
for i in $master;do
scp /opt/pki/kubernetes/kube-controller-manager/kube-controller-manager.kubeconfig $i:/etc/kubernetes/
done
生成kube-scheduler證書
#創(chuàng)建目錄
mkdir /opt/pki/kubernetes/kube-scheduler
cd /opt/pki/kubernetes/kube-scheduler
#生成證書申請文件
cat > kube-scheduler-csr.json <<EOF
{
"CN": "system:kube-scheduler",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"TS": "Beijing",
"L": "Beijing",
"O": "system:kube-scheduler",
"OU": "System"
}
]
}
EOF
#生成證書
cfssl gencert \
-ca=../ca/ca.pem \
-ca-key=../ca/ca-key.pem \
-config=../ca/ca-config.json \
-profile=kubernetes \
kube-scheduler-csr.json | cfssljson -bare kube-scheduler
#生成配置文件
export KUBE_APISERVER="https://10.0.0.135:8443"
kubectl config set-cluster kubernetes \
--certificate-authority=../ca/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-scheduler.kubeconfig
kubectl config set-credentials system:kube-scheduler \
--client-certificate=kube-scheduler.pem \
--client-key=kube-scheduler-key.pem \
--embed-certs=true \
--kubeconfig=kube-scheduler.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=system:kube-scheduler \
--kubeconfig=kube-scheduler.kubeconfig
kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig
#拷貝文件到master
master="master01 master02 master03"
for i in $master;do
scp /opt/pki/kubernetes/kube-scheduler/kube-scheduler.kubeconfig $i:/etc/kubernetes
done
生成集群管理員證書
#創(chuàng)建目錄
mkdir /opt/pki/kubernetes/admin
cd /opt/pki/kubernetes/admin
#生成證書申請文件
cat > admin-csr.json <<EOF
{
"CN": "admin",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"TS": "Beijing",
"L": "Beijing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
#生成證書
cfssl gencert \
-ca=../ca/ca.pem \
-ca-key=../ca/ca-key.pem \
-config=../ca/ca-config.json \
-profile=kubernetes \
admin-csr.json | cfssljson -bare admin
#生成配置文件
export KUBE_APISERVER="https://10.0.0.135:8443"
kubectl config set-cluster kubernetes \
--certificate-authority=../ca/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=admin.kubeconfig
kubectl config set-credentials admin \
--client-certificate=admin.pem \
--client-key=admin-key.pem \
--embed-certs=true \
--kubeconfig=admin.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=admin \
--kubeconfig=admin.kubeconfig
kubectl config use-context default --kubeconfig=admin.kubeconfig
#拷貝文件到master
master="master01 master02 master03"
for i in $master;do
scp /opt/pki/kubernetes/admin/admin.kubeconfig $i:/etc/kubernetes
done
五,etcd集群安裝
5.1,etcd節(jié)點 1
tar xf etcd-v3.5.3-linux-amd64.tar.gz
cp etcd-v3.5.3-linux-amd64/etcd* /usr/bin/
#創(chuàng)建etcd配置文件
cat > /etc/etcd/etcd.config.yml <<EOF
name: 'etcd-1'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://10.0.0.127:2380'
listen-client-urls: 'https://10.0.0.127:2379,https://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://10.0.0.127:2380'
advertise-client-urls: 'https://10.0.0.127:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'etcd-1=https://10.0.0.127:2380,etcd-2=https://10.0.0.128:2380,etcd-3=https://10.0.0.129:2380'
initial-cluster-token: 'etcd-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/etcd/ssl/etcd-server.pem'
key-file: '/etc/etcd/ssl/etcd-server-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/etcd/ssl/etcd-server.pem'
key-file: '/etc/etcd/ssl/etcd-server-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF
5.2,etcd節(jié)點2
tar xf etcd-v3.5.3-linux-amd64.tar.gz
cp etcd-v3.5.3-linux-amd64/etcd* /usr/bin/
#創(chuàng)建etcd配置文件
cat > /etc/etcd/etcd.config.yml <<EOF
name: 'etcd-2'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://10.0.0.128:2380'
listen-client-urls: 'https://10.0.0.128:2379,https://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://10.0.0.128:2380'
advertise-client-urls: 'https://10.0.0.128:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'etcd-1=https://10.0.0.127:2380,etcd-2=https://10.0.0.128:2380,etcd-3=https://10.0.0.129:2380'
initial-cluster-token: 'etcd-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/etcd/ssl/etcd-server.pem'
key-file: '/etc/etcd/ssl/etcd-server-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/etcd/ssl/etcd-server.pem'
key-file: '/etc/etcd/ssl/etcd-server-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF
5.3,etcd節(jié)點3
tar xf etcd-v3.5.3-linux-amd64.tar.gz
cp etcd-v3.5.3-linux-amd64/etcd* /usr/bin/
#創(chuàng)建etcd配置文件
cat > /etc/etcd/etcd.config.yml <<EOF
name: 'etcd-3'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://10.0.0.129:2380'
listen-client-urls: 'https://10.0.0.129:2379,https://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://10.0.0.129:2380'
advertise-client-urls: 'https://10.0.0.129:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'etcd-1=https://10.0.0.127:2380,etcd-2=https://10.0.0.128:2380,etcd-3=https://10.0.0.129:2380'
initial-cluster-token: 'etcd-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/etcd/ssl/etcd-server.pem'
key-file: '/etc/etcd/ssl/etcd-server-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/etcd/ssl/etcd-server.pem'
key-file: '/etc/etcd/ssl/etcd-server-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF
5.4,創(chuàng)建service文件并啟動服務(wù)(etcd所有節(jié)點操作)
cat > /etc/systemd/system/etcd.service <<EOF
[Unit]
Description=Etcd Service
Documentation=https://coreos.com/etcd/docs/latest/
After=network.target
[Service]
Type=notify
ExecStart=/usr/bin/etcd --config-file=/etc/etcd/etcd.config.yml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
Alias=etcd3.service
EOF
#啟動服務(wù)
systemctl daemon-reload
systemctl enable --now etcd
##配置etcdctl使用v3 api
cat > /etc/profile.d/etcdctl.sh <<EOF
#!/bin/bash
export ETCDCTL_API=3
export ETCDCTL_ENDPOINTS=https://127.0.0.1:2379
export ETCDCTL_CACERT=/etc/etcd/ssl/ca.pem
export ETCDCTL_CERT=/etc/etcd/ssl/etcd-client.pem
export ETCDCTL_KEY=/etc/etcd/ssl/etcd-client-key.pem
EOF
#生效
source /etc/profile
#驗證集群狀態(tài)
root@etcd01:~# etcdctl member list --write-out='table'
+------------------+---------+--------+-------------------------+-------------------------+------------+
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | IS LEARNER |
+------------------+---------+--------+-------------------------+-------------------------+------------+
| f1480aba887b1eb | started | etcd-3 | https://10.0.0.129:2380 | https://10.0.0.129:2379 | false |
| 358a0a2775d7a771 | started | etcd-1 | https://10.0.0.127:2380 | https://10.0.0.127:2379 | false |
| d6b17c4efb47744b | started | etcd-2 | https://10.0.0.128:2380 | https://10.0.0.128:2379 | false |
+------------------+---------+--------+-------------------------+-------------------------+------------+
六,安裝kubernetes集群
6.1,master節(jié)點安裝kube-apiserver
#創(chuàng)建ServiceAccount Key
openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub
master="master01 master02 master03"
#分發(fā)master組件
for i in $master;do
scp /etc/kubernetes/pki/{sa.pub,sa.key} $i:/etc/kubernetes/pki/
done
#創(chuàng)建service文件
a=`ifconfig ens33 | awk -rn 'NR==2{print $2}'`
cat > /etc/systemd/system/kube-apiserver.service <<EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/bin/kube-apiserver \\
--v=2 \\
--logtostderr=true \\
--allow-privileged=true \\
--bind-address=$a \\
--secure-port=6443 \\
--advertise-address=$a \\
--service-cluster-ip-range=10.200.0.0/16 \\
--service-node-port-range=30000-42767 \\
--etcd-servers=https://10.0.0.127:2379,https://10.0.0.128:2379,https://10.0.0.129:2379 \\
--etcd-cafile=/etc/etcd/ssl/ca.pem \\
--etcd-certfile=/etc/etcd/ssl/etcd-client.pem \\
--etcd-keyfile=/etc/etcd/ssl/etcd-client-key.pem \\
--client-ca-file=/etc/kubernetes/pki/ca.pem \\
--tls-cert-file=/etc/kubernetes/pki/kube-apiserver.pem \\
--tls-private-key-file=/etc/kubernetes/pki/kube-apiserver-key.pem \\
--kubelet-client-certificate=/etc/kubernetes/pki/kube-apiserver.pem \\
--kubelet-client-key=/etc/kubernetes/pki/kube-apiserver-key.pem \\
--service-account-key-file=/etc/kubernetes/pki/sa.pub \\
--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \\
--service-account-issuer=https://kubernetes.default.svc.cluster.local \\
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \\
--authorization-mode=Node,RBAC \\
--enable-bootstrap-token-auth=true \\
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \\
--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \\
--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \\
--requestheader-allowed-names=aggregator \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
--requestheader-username-headers=X-Remote-User
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target
EOF
#啟動服務(wù)
systemctl enable --now kube-apiserver.service
apiserver 高可用配置
#安裝keepalived haproxy
apt install -y keepa;ived haproxy
#ha01配置文件
cat >/etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
acassen
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 10.0.0.140
smtp_connect_timeout 30
router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
interface ens33
garp_master_delay 10
smtp_alert
virtual_router_id 56 # 修改對應(yīng)的id
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.135 dev ens33 label ens33:0 # 添加vip
}
track_script {
chk_apiserver
}
}
EOF
#ha02 配置文件
cat >/etc/keepalived/keepalived.conf <<EOF
! Configuration File for keepalived
global_defs {
notification_email {
acassen
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 10.0.0.140
smtp_connect_timeout 30
router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state SLAVE
interface ens33
garp_master_delay 10
smtp_alert
virtual_router_id 56 # 修改對應(yīng)的id
priority 80
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.125 dev ens33 label ens33:0 # 添加vip
}
track_script {
chk_apiserver
}
}
EOF
#ha配置文件
cat >/etc/haproxy/haproxy.cfg <<EOF
global
log /dev/log local0
log /dev/log local1 notice
chroot /var/lib/haproxy
stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
stats timeout 30s
user haproxy
group haproxy
daemon
# Default SSL material locations
ca-base /etc/ssl/certs
crt-base /etc/ssl/private
# See: https://ssl-config.mozilla.org/#server=haproxy&server-version=2.0.3&config=intermediate
ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
ssl-default-bind-options ssl-min-ver TLSv1.2 no-tls-tickets
defaults
log global
mode http
option httplog
option dontlognull
timeout connect 5000
timeout client 50000
timeout server 50000
errorfile 400 /etc/haproxy/errors/400.http
errorfile 403 /etc/haproxy/errors/403.http
errorfile 408 /etc/haproxy/errors/408.http
errorfile 500 /etc/haproxy/errors/500.http
errorfile 502 /etc/haproxy/errors/502.http
errorfile 503 /etc/haproxy/errors/503.http
errorfile 504 /etc/haproxy/errors/504.http
listen k8s-6443 # 添加監(jiān)聽
bind 10.0.0.135:8443
mode tcp
server k8s1 10.0.0.121:6443 check inter 3s fall 3 rise 5 # 每三秒一次健康檢查
server k8s2 10.0.0.122:6443 check inter 3s fall 3 rise 5
server k8s3 10.0.0.123:6443 check inter 3s fall 3 rise 5
EOF
## 檢查ha進程腳本
cat > /etc/keepalived/check_apiserver.sh <<EOF
#!/bin/bash
err=0
for k in $(seq 1 3)
do
check_code=$(pgrep haproxy)
if [[ $check_code == "" ]]; then
err=$(expr $err + 1)
sleep 1
continue
else
err=0
break
fi
done
if [[ $err != "0" ]]; then
echo "systemctl stop keepalived"
/usr/bin/systemctl stop keepalived
exit 1
else
exit 0
fi
EOF
#給腳本授權(quán)
chmod +x /etc/keepalived/check_apiserver.sh
#啟動服務(wù)
systemctl daemon-reload
systemctl enable --now haproxy
systemctl enable --now keepalived
6.2,所有master節(jié)點安裝kube-controller-manager
#生成service文件
cat > /etc/systemd/system/kube-controller-manager.service <<EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/bin/kube-controller-manager \
--v=2 \
--logtostderr=true \
--root-ca-file=/etc/kubernetes/pki/ca.pem \
--cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
--service-account-private-key-file=/etc/kubernetes/pki/sa.key \
--kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
--leader-elect=true \
--use-service-account-credentials=true \
--node-monitor-grace-period=40s \
--node-monitor-period=5s \
--pod-eviction-timeout=2m0s \
--controllers=*,bootstrapsigner,tokencleaner \
--allocate-node-cidrs=true \
--cluster-cidr=10.200.0.0/16 \
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
--node-cidr-mask-size=24
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
#啟動服務(wù)
systemctl enable --now kube-controller-manager.service
6.3,所有master節(jié)點安裝kube-scheduler
#生成service文件
cat > /etc/systemd/system/kube-scheduler.service <<EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/bin/kube-scheduler \
--v=2 \
--logtostderr=true \
--leader-elect=true \
--kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
#啟動服務(wù)
systemctl enable --now kube-scheduler.service
在master01上配置kubelet工具
#拷貝admin.kubeconfig到~/.kube/config
mkdir /root/.kube/ -p
cp /etc/kubernetes/admin.kubeconfig /root/.kube/config
#驗證集群狀態(tài),以下顯示信息表示master節(jié)點的所有組件運行正常
[root@k8s-master01 ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-2 Healthy {"health":"true","reason":""}
etcd-0 Healthy {"health":"true","reason":""}
etcd-1 Healthy {"health":"true","reason":""}
6.4,所有節(jié)點安裝kubelet
#創(chuàng)建目錄
mkdir /opt/pki/kubernetes/kubelet -p
cd /opt/pki/kubernetes/kubelet
#生成隨機認證key
a=`head -c 16 /dev/urandom | od -An -t x | tr -d ' ' | head -c6`
b=`head -c 16 /dev/urandom | od -An -t x | tr -d ' ' | head -c16`
#生成權(quán)限綁定文件
cat > bootstrap.secret.yaml <<EOF
apiVersion: v1
kind: Secret
metadata:
name: bootstrap-token-$a
namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
description: "The default bootstrap token generated by 'kubelet '."
token-id: $a
token-secret: $b
usage-bootstrap-authentication: "true"
usage-bootstrap-signing: "true"
auth-extra-groups: system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubelet-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-certificate-rotation
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kube-apiserver
EOF
#生成配置文件
kubectl config set-cluster kubernetes \
--certificate-authority=../ca/ca.pem \
--embed-certs=true \
--server=https://10.0.0.135:8443 \
--kubeconfig=bootstrap-kubelet.kubeconfig
kubectl config set-credentials tls-bootstrap-token-user \
--token=$a.$b \
--kubeconfig=bootstrap-kubelet.kubeconfig
kubectl config set-context tls-bootstrap-token-user@kubernetes \
--cluster=kubernetes \
--user=tls-bootstrap-token-user \
--kubeconfig=bootstrap-kubelet.kubeconfig
kubectl config use-context tls-bootstrap-token-user@kubernetes \
--kubeconfig=bootstrap-kubelet.kubeconfig
kubectl apply -f bootstrap.secret.yaml
#拷貝配置文件到master節(jié)點和node節(jié)點
all="master01 master02 master03 node01 node02 node03"
for i in $all;do scp /opt/pki/kubernetes/kubelet/bootstrap-kubelet.kubeconfig $i:/etc/kubernetes;done
#生成配置文件
name=`ifconfig ens33 | awk -rn 'NR==2{print $2}'`
hostname=`hostname`
kubernetes_ssl_dir="/etc/kubernetes/pki"
cat > /etc/kubernetes/kubelet.conf << EOF
KUBELET_OPTS="--hostname-override=${hostname} \\
--container-runtime=remote \\
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \\
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \\
--config=/etc/kubernetes/kubelet-config.yml \\
--cert-dir=${kubernetes_ssl_dir} "
EOF
##生成kubelet-config.yml文件
cat > /etc/kubernetes/kubelet-config.yml << EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: ${name}
port: 10250
readOnlyPort: 10255
cgroupDriver: systemd
clusterDNS:
- 10.200.0.2 ##根據(jù)cidr修改該值
clusterDomain: cluster.local
failSwapOn: false
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: ${kubernetes_ssl_dir}/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF
##生成kubelet.service文件
cat > /usr/lib/systemd/system/kubelet.service << "EOF"
[Unit]
Description=Kubernetes Kubelet
After=docker.service
[Service]
EnvironmentFile=/etc/kubernetes/kubelet.conf
ExecStart=/usr/bin/kubelet $KUBELET_OPTS
Restart=on-failure
RestartSec=10
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
##使修改生效并啟用服務(wù)
systemctl daemon-reload
systemctl enable --now kubelet
6.5,所有節(jié)點安裝kube-proxy
#master節(jié)點執(zhí)行
#創(chuàng)建目錄
mkdir /opt/pki/kubernetes/kube-proxy/ -p
cd /opt/pki/kubernetes/kube-proxy/
#生成配置文件
kubectl -n kube-system create serviceaccount kube-proxy
kubectl create clusterrolebinding system:kube-proxy --clusterrole system:node-proxier --serviceaccount kube-system:kube-proxy
cat >kube-proxy-scret.yml<<EOF
apiVersion: v1
kind: Secret
metadata:
name: kube-proxy
namespace: kube-system
annotations:
kubernetes.io/service-account.name: "kube-proxy"
type: kubernetes.io/service-account-token
EOF
kubectl apply -f kube-proxy-scret.yml
JWT_TOKEN=$(kubectl -n kube-system get secret/kube-proxy \
--output=jsonpath='{.data.token}' | base64 -d)
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://10.0.0.135:8443 \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kubernetes \
--token=${JWT_TOKEN} \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=kubernetes \
--kubeconfig=kube-proxy.kubeconfig
kubectl config use-context kubernetes \
--kubeconfig=kube-proxy.kubeconfig
#拷貝配置文件到node節(jié)點
node="master01 master02 master03 node01 node02 node03"
for i in $node;do
scp /opt/pki/kubernetes/kube-proxy/kube-proxy.kubeconfig $i:/etc/kubernetes
done
#所有節(jié)點生成service文件
cat > /etc/systemd/system/kube-proxy.service <<EOF
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/bin/kube-proxy \
--config=/etc/kubernetes/kube-proxy.conf \
--v=2
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
#所有節(jié)點生成配置文件
a=`ifconfig ens33| awk -rn 'NR==2{print $2}'`
cat > /etc/kubernetes/kube-proxy.conf <<EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: $a
clientConnection:
acceptContentTypes: ""
burst: 10
contentType: application/vnd.kubernetes.protobuf
kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
qps: 5
clusterCIDR: 10.100.0.0/16
configSyncPeriod: 15m0s
conntrack:
max: null
maxPerCore: 32768
min: 131072
tcpCloseWaitTimeout: 1h0m0s
tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: "$a"
iptables:
masqueradeAll: false
masqueradeBit: 14
minSyncPeriod: 0s
syncPeriod: 30s
ipvs:
masqueradeAll: true
minSyncPeriod: 5s
scheduler: "rr"
syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
EOF
#啟動服務(wù)
systemctl daemon-reload
systemctl enable --now kube-proxy.service
root@master01:~# kubectl get nodes
NAME STATUS ROLES AGE VERSION
10.0.0.121 Ready <none> 27m v1.25.4
10.0.0.122 Ready <none> 27m v1.25.4
10.0.0.123 Ready <none> 27m v1.25.4
10.0.0.124 Ready <none> 27m v1.25.4
10.0.0.125 Ready <none> 27m v1.25.4
10.0.0.126 Ready <none> 27m v1.25.4
#給節(jié)點打標簽
kubectl label nodes 10.0.0.121 node-role.kubernetes.io/master=master01
kubectl label nodes 10.0.0.122 node-role.kubernetes.io/master=master02
kubectl label nodes 10.0.0.123 node-role.kubernetes.io/master=master03
kubectl label nodes 10.0.0.124 node-role.kubernetes.io/node=node01
kubectl label nodes 10.0.0.125 node-role.kubernetes.io/node=node02
kubectl label nodes 10.0.0.126 node-role.kubernetes.io/node=node03
七,安裝網(wǎng)絡(luò)插件calico
https://github.com/projectcalico/calico/tree/v3.24.5/manifests
vim calico.yaml
#修改配置
- name: CALICO_IPV4POOL_CIDR
value: "10.100.0.0/16"
kubectl apply -f calico.yaml
root@master01:~# kubectl get pods -A -o wide
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-system calico-kube-controllers-798cc86c47-7msbp 1/1 Running 0 8m11s 10.100.178.65 10.0.0.124 <none> <none>
kube-system calico-node-bt4hm 1/1 Running 0 8m11s 10.0.0.123 10.0.0.123 <none> <none>
kube-system calico-node-cng8w 1/1 Running 0 8m11s 10.0.0.125 10.0.0.125 <none> <none>
kube-system calico-node-f8cvd 1/1 Running 0 8m11s 10.0.0.126 10.0.0.126 <none> <none>
kube-system calico-node-frmhz 1/1 Running 0 8m11s 10.0.0.124 10.0.0.124 <none> <none>
kube-system calico-node-hbslc 1/1 Running 0 8m11s 10.0.0.121 10.0.0.121 <none> <none>
kube-system calico-node-zszns 1/1 Running 0 8m11s 10.0.0.122 10.0.0.122 <none> <none>
#安裝calico客戶端工具
#創(chuàng)建配置文件
mkdir /etc/calico -p
cat >/etc/calico/calicoctl.cfg <<EOF
apiVersion: projectcalico.org/v3
kind: CalicoAPIConfig
metadata:
spec:
datastoreType: "kubernetes"
kubeconfig: "/root/.kube/config"
EOF
wget https://github.com/projectcalico/calicoctl/releases/download/v3.21.5/calicoctl-linux-amd64
chmod +x calicoctl-linux-amd64 && mv calicoctl-linux-amd64 calicoctl && mv calicoctl /usr/bin
#驗證
root@master01:~# calicoctl node status
Calico process is running.
IPv4 BGP status
+--------------+-------------------+-------+----------+-------------+
| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO |
+--------------+-------------------+-------+----------+-------------+
| 10.0.0.123 | node-to-node mesh | up | 06:45:42 | Established |
| 10.0.0.124 | node-to-node mesh | up | 06:45:42 | Established |
| 10.0.0.125 | node-to-node mesh | up | 06:45:42 | Established |
| 10.0.0.126 | node-to-node mesh | up | 06:45:42 | Established |
| 10.0.0.122 | node-to-node mesh | up | 06:49:36 | Established |
+--------------+-------------------+-------+----------+-------------+
IPv6 BGP status
No IPv6 peers found.
八,安裝coredns
#下載地址:https://github.com/coredns/deployment/blob/master/kubernetes/coredns.yaml.sed
vim coredns.yaml
#修改配置
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa { #這里修改
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . 114.114.114.114 { #外部dns解析服務(wù)器
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
clusterIP: 10.200.0.2 #這里改為這個地址
#部署
kubectl apply -f coredns.yaml
root@master01:~# kubectl get pod -A |grep core
kube-system coredns-54b8c69d54-2rnms 1/1 Running 0 3h8m
九,安裝metrics-server
#下載地址
wget https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.6.1/components.yaml
#修改配置
vim components.yaml
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-insecure-tls
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
- --requestheader-username-headers=X-Remote-User
- --requestheader-group-headers=X-Remote-Group
- --requestheader-extra-headers-prefix=X-Remote-Extra-
image: registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server:v0.6.1
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /tmp
name: tmp-dir
- mountPath: /etc/kubernetes/pki
name: ca-ssl
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp-dir
- name: ca-ssl
hostPath:
path: /etc/kubernetes/pki
#創(chuàng)建
kubectl apply -f components.yaml
#驗證:
root@master01:~# kubectl top nodes
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
10.0.0.121 390m 9% 1791Mi 22%
10.0.0.122 374m 9% 1158Mi 14%
10.0.0.123 317m 7% 1108Mi 14%
10.0.0.124 234m 5% 798Mi 10%
10.0.0.125 202m 5% 711Mi 9%
10.0.0.126 200m 5% 659Mi 8%