apiVersion: v1
kind: ReplicationController
metadata:
name: mytomcat
spec:
replicas: 2
selector:
app: mytomcat
template:
metadata:
labels:
app: mytomcat
spec:
containers:
- name: mytomcat
image: tomcat
ports:
- containerPort: 8080
apiVersion: v1
kind: Service
metadata:
name: mytomcat
spec:
type: NodePort
ports:
- port: 8080
nodePort: 30001
selector:
app: mytomcat
Master
vi /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
[Service]
Type=simple
WorkingDirectory=/var/lib/etcd/
EnvironmentFile=-/etc/etcd/etcd.conf
ExecStart=/usr/bin/etcd
Restart=on-failure
[Install]
WantedBy=multi-user.target
啟動(dòng)etcd
systemctl daemon-reload
systemctl enable etcd.service
mkdir -p /var/lib/etcd/
systemctl start etcd.service
etcdctl cluster-health
vi /usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=etcd.service
Wants=etcd.service [Service]
EnvironmentFile=/etc/kubernetes/apiserver
ExecStart=/usr/bin/kube-apiserver $KUBE_API_ARGS
Restart=on-failure
Type=notify [Install]
WantedBy=multi-user.target
mkdir /etc/kubernetes
vi /etc/kubernetes/apiserver
KUBE_API_ARGS="--storage-backend=etcd3 --etcd-servers=http://127.0.0.1:2379 --insecure-bind-address=0.0.0.0 --insecure-port=8080 --service-cluster-ip-range=169.169.0.0/16 --service-node-port-range=1-65535 --admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,DefaultStorageClass,ResourceQuota --logtostderr=true --log-dir=/var/log/kubernetes --v=2"
vi /usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=kube-apiserver.service
Requires=kube-apiserver.service
[Service]
EnvironmentFile=-/etc/kubernetes/controller-manager
ExecStart=/usr/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
vi /etc/kubernetes/controller-manager
KUBE_CONTROLLER_MANAGER_ARGS="--master=http://192.168.158.150:8080 --logtostderr=true --log-dir=/var/log/kubernetes --v=2"
vi /usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=kube-apiserver.service
Requires=kube-apiserver.service
[Service]
EnvironmentFile=-/etc/kubernetes/scheduler
ExecStart=/usr/bin/kube-scheduler $KUBE_SCHEDULER_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
vi /etc/kubernetes/scheduler
KUBE_SCHEDULER_ARGS="--master=http://192.168.158.150:8080 --logtostderr=true --log-dir=/var/log/kubernetes --v=2"
啟動(dòng)
systemctl daemon-reload
systemctl enable kube-apiserver.service
systemctl start kube-apiserver.service
systemctl enable kube-controller-manager.service
systemctl start kube-controller-manager.service
systemctl enable kube-scheduler.service
systemctl start kube-scheduler.service
檢查每個(gè)服務(wù)的健康狀態(tài):
systemctl status kube-apiserver.service
systemctl status kube-controller-manager.service
systemctl status kube-scheduler.service
Node1
vi /usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/var/lib/kubelet
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/kubelet
ExecStart=/usr/bin/kubelet
KUBE_LOG_LEVEL
KUBELET_ADDRESS
KUBELET_HOSTNAME
KUBELET_POD_INFRA_CONTAINER
$KUBELET_ARGS
Restart=on-failure
[Install]
WantedBy=multi-user.target
[Install]
WantedBy=multi-user.target
mkdir -p /var/lib/kubelet
vi /etc/kubernetes/kubelet
KUBELET_ARGS="--kubeconfig=/etc/kubernetes/kubeconfig --hostname-override=192.168.158.151 --logtostderr=false --log-dir=/var/log/kubernetes --v=2"
vi /etc/kubernetes/kubeconfig
apiVersion: v1
kind: Config
clusters:
- cluster:
server: http://192.168.158:150:8080
name: local
contexts: - context:
cluster: local
name: mycontext
current-context: mycontext
vi /usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
EnvironmentFile=-/etc/kubernetes/config
EnvironmentFile=-/etc/kubernetes/proxy
ExecStart=/usr/bin/kube-proxy
KUBE_LOG_LEVEL
KUBE_PROXY_ARGS
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
[Install]
WantedBy=multi-user.target
vi /etc/kubernetes/proxy
KUBE_PROXY_ARGS="--master=http://192.168.158.150:8080 --hostname-override=192.168.158.151 --logtostderr=true --log-dir=/var/log/kubernetes --v=2"
Restart
systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
systemctl restart kubelet
systemctl status kubelet
systemctl enable kube-proxy
systemctl start kube-proxy
systemctl restart kube-proxy
systemctl status kube-proxy
查看日志:
journalctl _PID=XXX
新版禁用swap不能用--swqp了
關(guān)閉Swap,機(jī)器重啟后不生效
swapoff -a
修改/etc/fstab永久關(guān)閉Swap
cp -p /etc/fstab /etc/fstab.bak$(date '+%Y%m%d%H%M%S')
CentOS
sed -i "s/\/dev\/mapper\/centos-swap/\#\/dev\/mapper\/centos-swap/g" /etc/fstab
Redhat
sed -i "s/\/dev\/mapper\/rhel-swap/\#\/dev\/mapper\/rhel-swap/g" /etc/fstab
修改后重新掛載全部掛載點(diǎn)
mount -a
查看Swap
free -m
cat /proc/swaps
vi /etc/ansible/hosts
[k8s]
192.168.158.150
192.168.158.151
192.168.158.152
[master]
192.168.158.150
[node]
192.168.158.151
192.168.158.152
ssh-keygen -t rsa
ssh-copy-id -i /root/.ssh/id_rsa.pub 192.168.158.150
hostnamectl --static set-hostname k8s-node-1
echo '192.168.158.150 k8s-master
192.168.158.150 etcd
192.168.158.150 registry
192.168.158.151 k8s-node-1
192.168.158.152 k8s-node-2' >> /etc/hosts
yum install etcd -y
vi /etc/etcd/etcd.conf
另一種方式
vi /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf
sudo yum remove docker docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-selinux docker-engine-selinux docker-engine
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum list docker-ce --showduplicates
yum makecache fast
yum install docker-ce -y
vi /etc/docker/daemon.json
{"registry-mirrors":["https://w6pxadsb.mirror.aliyuncs.com","https://docker.mirrors.ustc.edu.cn"],"exec-opts": ["native.cgroupdriver=systemd"]
}
systemctl daemon-reload
systemctl enable docker
systemctl start docker
systemctl status docker
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubelet kubeadm kubectl
systemctl enable --now kubelet
mster
kubeadm init --apiserver-advertise-address=192.168.158.151 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.17.0 --service-cidr=10.1.0.0/16 --pod-network-cidr=10.244.0.0/16
mkdir -p HOME/.kube/config
sudo chown (id -g) $HOME/.kube/config
kubectl cluster-info
node
kubeadm join 192.168.158.150:6443 --token tke6ck.o49skr479bfy2dy4 --discovery-token-ca-cert-hash sha256:e675d752e521e5c5c43ecfee4b0c0b51d7bb9bae87ee83410715329654432e76
kubeadm join 192.168.158.152:6443 --token hpxl8v.jc6szqhkvkwf8s7z --discovery-token-ca-cert-hash sha256:bb4ecc9004703bb18fb8bd11c4c1a3ba347acb60bf7470a39f326c44ab738aad
k8s優(yōu)點(diǎn)
1、故障遷移:當(dāng)某一個(gè)node節(jié)點(diǎn)關(guān)機(jī)或掛掉后,node節(jié)點(diǎn)上的服務(wù)會(huì)自動(dòng)轉(zhuǎn)移到另一個(gè)node節(jié)點(diǎn)上,這個(gè)過程所有服務(wù)不中斷。這是docker或普通云主機(jī)是不能做到的
2、資源調(diào)度:當(dāng)node節(jié)點(diǎn)上的cpu、內(nèi)存不夠用的時(shí)候,可以擴(kuò)充node節(jié)點(diǎn),新建的pod就會(huì)被kube-schedule調(diào)度到新擴(kuò)充的node節(jié)點(diǎn)上
3、資源隔離:創(chuàng)建開發(fā)、運(yùn)維、測試三個(gè)命名空間,切換上下文后,開發(fā)人員就只能看到開發(fā)命名空間的所有pod,看不到運(yùn)維命名空間的pod,這樣就不會(huì)造成影響,互不干擾,傳統(tǒng)的主機(jī)或只有docker環(huán)境中,登錄進(jìn)去就會(huì)看到所有的服務(wù)或者容器
4、因?yàn)椴捎胐ocker容器,進(jìn)程之間互不影響,
5、安全:不同角色有不同的權(quán)限,查看pod、刪除pod等操作;RBAC認(rèn)證增加了k8s的安全
Kubernetes 基本架構(gòu)與常用術(shù)語
Kubernetes主要由以下幾個(gè)核心組件組成:
etcd保存了整個(gè)集群的狀態(tài);
apiserver提供了資源操作的唯一入口,并提供認(rèn)證、授權(quán)、訪問控制、API注冊和發(fā)現(xiàn)等機(jī)制;
controller manager負(fù)責(zé)維護(hù)集群的狀態(tài),比如故障檢測、自動(dòng)擴(kuò)展、滾動(dòng)更新等; scheduler負(fù)責(zé)資源的調(diào)度,按照預(yù)定的調(diào)度策略將Pod調(diào)度到相應(yīng)的機(jī)器上;
kubelet負(fù)責(zé)維護(hù)容器的生命周期,同時(shí)也負(fù)責(zé)Volume(CVI)和網(wǎng)絡(luò)(CNI)的管理; Container runtime負(fù)責(zé)鏡像管理以及Pod和容器的真正運(yùn)行(CRI);
kube-proxy負(fù)責(zé)為Service提供cluster內(nèi)部的服務(wù)發(fā)現(xiàn)和負(fù)載均衡;
除了核心組件,還有一些推薦的Add-ons:
kube-dns負(fù)責(zé)為整個(gè)集群提供DNS服務(wù)Ingress Controller為服務(wù)提供外網(wǎng)入口Heapster提供資源監(jiān)控
Dashboard 提 供 GUI Federation提供跨可用區(qū)的集群
Fluentd-elasticsearch提供集群日志采集、存儲與查詢
Kubernetes設(shè)計(jì)理念和功能其實(shí)就是一個(gè)類似Linux的分層架構(gòu)
核心層:Kubernetes最核心的功能,對外提供API構(gòu)建高層的應(yīng)用,對內(nèi)提供插件式應(yīng)用執(zhí)行環(huán)境
應(yīng)用層:部署(無狀態(tài)應(yīng)用、有狀態(tài)應(yīng)用、批處理任務(wù)、集群應(yīng)用等)和路由(服務(wù)發(fā)現(xiàn)、DNS解析等)
管理層:系統(tǒng)度量(如基礎(chǔ)設(shè)施、容器和網(wǎng)絡(luò)的度量),自動(dòng)化(如自動(dòng)擴(kuò)展、動(dòng)態(tài)Provision等)以及策略 管理(RBAC、Quota、PSP、NetworkPolicy等)
接口層:kubectl命令行工具、客戶端SDK以及集群聯(lián)邦
生態(tài)系統(tǒng):在接口層之上的龐大容器集群管理調(diào)度的生態(tài)系統(tǒng),可以劃分為兩個(gè)范疇
Kubernetes外部:日志、監(jiān)控、配置管理、CI、CD、Work?ow、FaaS、OTS應(yīng)用、ChatOps等Kubernetes內(nèi)部:CRI、CNI、CVI、鏡像倉庫、Cloud Provider、集群自身的配置和管理等
1.2.1Cluster
Cluster是計(jì)算、存儲和網(wǎng)絡(luò)資源的集合,Kubernetes利用這些資源運(yùn)行各種基于容器的應(yīng)用. Kubernetes Cluster由Master和Node組成,節(jié)點(diǎn)上運(yùn)行著若干Kubernetes服務(wù)
1.2.1Master
Master主要職責(zé)是調(diào)度,即決定將應(yīng)用放在哪運(yùn)行。Master運(yùn)行Linux系統(tǒng),可以是物理機(jī)或虛擬機(jī)。 Master是Kubernetes Cluster的大腦,運(yùn)行著的Daemon服務(wù)包括kube-apiserver、kube-scheduler、kuber-controller- manager、etcd和Pod網(wǎng)絡(luò)
API Serer(kube-apiserver)
API Server 提供HTTP/HTTPS RESTful API,即Kubernetes API.是Kubernetes里所有資源的CRUD等操作的唯一入口,也是集群控制的入口進(jìn)程
Scheduler(kube-scheduler)
Scheduler負(fù)責(zé)資源調(diào)度的里程,簡單說,它決定將Pod放在哪個(gè)Node上運(yùn)行
Controller Manager(kube-controller-manager)
所有資源對象的自動(dòng)化控制中心。Controller Manager負(fù)責(zé)管理Cluster各種資源,保證資源處于預(yù)期的狀態(tài) 。 Controller Manager 有 多 種 , 如 replication controller 、 endpoints controller 、 namespace controller、serviceaccounts controller等。
不同的controller管理不同的資源,如replication controller管理Deployment、StatefulSet、DaemonSet的生命周期,namespace controller管理Namespace資源
etcd
etcd負(fù)責(zé)保存Kubernetes Cluster的配置信息和各種資源的狀態(tài)信息。當(dāng)數(shù)據(jù)發(fā)生變化時(shí),etcd會(huì)快速地通知Kubernetes相關(guān)組件
Pod網(wǎng)絡(luò)
Pod要能夠相互通信,Kubernetes Cluster必須部署Pod網(wǎng)絡(luò),?annel是其中一個(gè)可選方案。
1.2.2Node
除了Master,Kubernetes集群中的其它機(jī)器被稱為Node節(jié)點(diǎn)。Node職責(zé)是運(yùn)行容器應(yīng)用,Node由Master管理,Node負(fù)責(zé)監(jiān)控并匯報(bào)容器的狀態(tài),同時(shí)根據(jù)Master的要求管理容器的生命周期。Node也運(yùn)行在Linux系統(tǒng), 可以是物理機(jī)或虛擬機(jī)。
每個(gè)Node節(jié)點(diǎn)上都運(yùn)行著以下一組關(guān)鍵進(jìn)程
kubelet:負(fù)責(zé)Pod對應(yīng)的容器的創(chuàng)建、啟動(dòng)等任務(wù),同時(shí)與Master節(jié)點(diǎn)密切協(xié)作,實(shí)現(xiàn)集群管理的基本功能
kube-proxy:實(shí)現(xiàn)Kubernetes Service的通信與負(fù)載均衡機(jī)制的重要組件Docker Enginer:Docker引擎,負(fù)責(zé)本機(jī)的容器創(chuàng)建和管理工作
1.2.3Pod
Pod是Kubernetes的最小單元,也是最重要和最基本的概念。每一個(gè)Pod包含一個(gè)或多個(gè)容器,Pod的容器會(huì)作為 一個(gè)整體被Master調(diào)度到一個(gè)Node上運(yùn)行。Kubenetes為每個(gè)Pod都分配了唯一的IP地址,稱為PodIP,一個(gè)Pod里 的多個(gè)容器共享PodIP地址。在Kubernetes里,一個(gè)Pod里的容器與另外主機(jī)上的Pod容器能夠直接通信。
1.2.4Service
Kubernetes Service定義了外界訪問一組特定Pod的方式,Service有自己的IP和端口,Service為Pod提供了負(fù)載均衡。它也是Kubernetes最核心的資源對象之一,每個(gè)Service其實(shí)就是我們經(jīng)常提起的微服務(wù)架構(gòu)中的一個(gè)"微服 務(wù)"。
1.2.5Replication Controller
Replication Controller(簡稱RC)是Kubernetes系統(tǒng)中的核心概念之一,它其實(shí)是定義了一個(gè)期望的場景,即聲明某種Pod的副本數(shù)量在任意時(shí)刻都符合某個(gè)預(yù)期值,所以RC的定義包括如下幾個(gè)部分
Pod期待的副本數(shù)(replicas)
用于篩選目標(biāo)Pod的Label Selector
當(dāng)Pod的副本數(shù)量小于預(yù)期數(shù)量時(shí),用于創(chuàng)建新Pod的Pod模板(template)
以下是總結(jié)的RC的一些特性與作用
在大多數(shù)情況下,我們通過定義一個(gè)RC實(shí)現(xiàn)Pod的創(chuàng)建過程及副本數(shù)量的自動(dòng)控制
RC里包括完整的Pod定義模板
RC通過Label Selector機(jī)制實(shí)現(xiàn)對Pod副本的自動(dòng)控制
通過改變RC里的Pod副本數(shù)量,可以實(shí)現(xiàn)Pod的擴(kuò)容或縮容功能 通過改變RC里Pod模板中鏡像版本,可以實(shí)現(xiàn)Pod的滾動(dòng)升級功能
(一)、環(huán)境
IP地址 系統(tǒng) 功能
192.168.158.150 CentOS7.4 Master
192.168.158.151 CentOS7.4 node1
192.168.158.152 CentOS7.4 node2
(二)、基礎(chǔ)環(huán)境安裝配置(每一臺服務(wù)器都要執(zhí)行)
1、關(guān)閉防火墻
[root@DEV004021 ~]# systemctl stop firewalld
[root@DEV004021 ~]# systemctl disable firewalld
2、創(chuàng)建/etc/sysctl.d/k8s.conf 文件
[root@DEV004021 ~]# vim /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
3、把以上配置修改的使其生效。
[root@DEV004021 ~]#modprobe br_netfilter
[root@DEV004021 ~]#sysctl -p /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
4、關(guān)閉虛擬內(nèi)存
[root@DEV004021 ~]#sudo sed -i '/swap/s/^/#/' /etc/fstab
[root@DEV004021 ~]#sudo swapoff -a
5、安裝docker
5.1、刪除舊版本的docker
[root@DEV004021 ~]# sudo yum remove docker docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-selinux docker-engine-selinux docker-engine
5.2、安裝必要的工具
[root@DEV004021 ~]# yum install -y yum-utils device-mapper-persistent-data lvm2
5.3、添加yum源的相關(guān)軟件信息并更新緩存
[root@DEV004021 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
5.4、更新并安裝docker
查看支持哪些版本
[root@DEV004021 ~]#yum list docker-ce --showduplicates
[root@DEV004021 ~]# yum makecache fast
[root@DEV004021 ~]# yum install docker-ce -y
5.5、配置鏡像加速
[root@localhost ~]# vi /etc/docker/daemon.json
{"registry-mirrors":["https://w6pxadsb.mirror.aliyuncs.com","https://docker.mirrors.ustc.edu.cn"],"registry-mirrors": ["http://hub-mirror.c.163.com"]}
5.6、設(shè)置docker服務(wù)并做自啟動(dòng)
systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
systemctl start docker
6、安裝kubelet、kubeadm、kubectl
[root@DEV004021 ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
[root@DEV004021 ~]# yum install -y kubelet kubeadm kubectl
systemctl enable --now kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
(三)、構(gòu)建Kubernetes集群
1、初始化Master節(jié)點(diǎn)(只在master節(jié)點(diǎn)執(zhí)行)。
[root@DEV004021 ~]# kubeadm init --apiserver-advertise-address=192.168.158.150 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.17.0 --service-cidr=10.1.0.0/16 --pod-network-cidr=10.244.0.0/16
--pod-network-cidr :后續(xù)安裝 flannel 的前提條件,且值為 10.244.0.0/16。--image-repository :指定鏡像倉庫這里是阿里云的倉庫
2、查看輸出日志如下,出現(xiàn)初始化成功了。
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.4.21:6443 --token dmzz6x.t864anv0btkyxjwi --discovery-token-ca-cert-hash sha256:2a8bbdd54dcc01435be1a3b443d33d0ce932c8d81c6d9ae8b3c248325977ceb1
3、依次執(zhí)行如下命令:
[root@DEV004021 ~]# mkdir -p $HOME/.kube
[root@DEV004021 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@DEV004021 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
4、部署Pod Network到集群中
[root@otrs004021 ~]# kubectl apply -f https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel.yml
podsecuritypolicy.extensions/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.extensions/kube-flannel-ds-amd64 created
daemonset.extensions/kube-flannel-ds-arm64 created
daemonset.extensions/kube-flannel-ds-arm created
daemonset.extensions/kube-flannel-ds-ppc64le created
daemonset.extensions/kube-flannel-ds-s390x created
5、至此master節(jié)點(diǎn)初始化完畢,查看集群相關(guān)信息。
查看集群相關(guān)信息
[root@otrs004021 ~]# kubectl cluster-info
Kubernetes master is running at https://192.168.4.21:6443
KubeDNS is running at https://192.168.4.21:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
To further debug and diagnose cluster problems, use ‘kubectl cluster-info dump‘.
查看節(jié)點(diǎn)相關(guān)信息
[root@otrs004021 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
otrs004097 Ready master 6m27s v1.15.2
查看pods信息
[root@otrs004021 ~]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-bccdc95cf-f5wtc 1/1 Running 0 6m32s
kube-system coredns-bccdc95cf-lnp2j 1/1 Running 0 6m32s
kube-system etcd-otrs004097 1/1 Running 0 5m56s
kube-system kube-apiserver-otrs004097 1/1 Running 0 5m38s
kube-system kube-controller-manager-otrs004097 1/1 Running 0 5m40s
kube-system kube-flannel-ds-amd64-xqdcf 1/1 Running 0 2m10s
kube-system kube-proxy-2lz96 1/1 Running 0 6m33s
kube-system kube-scheduler-otrs004097 1/1 Running 0 5m45s
初始化出現(xiàn)問題,使用如下命令進(jìn)行重置
[root@DEV004021 ~]# kubeadm reset
[root@DEV004021 ~]# rm -rf /var/lib/cni/
[root@DEV004021 ~]# rm -f $HOME/.kube/config
(四)、添加kubernetes其他節(jié)點(diǎn),有兩種方法。
方法一、使用master節(jié)點(diǎn)初始化的token加入
[root@DEV004021 ~]# kubeadm join 192.168.158.152:6443 --token hpxl8v.jc6szqhkvkwf8s7z --discovery-token-ca-cert-hash sha256:bb4ecc9004703bb18fb8bd11c4c1a3ba347acb60bf7470a39f326c44ab738aad
方法二、重新生成token來加入
[root@otrs004021 ~]# kubeadm token generate
3o7wop.z2kxzhy7p0zwnb3v
[root@otrs004021 ~]# kubeadm token create 3o7wop.z2kxzhy7p0zwnb3v --print-join-command --ttl=24h
kubeadm join 192.168.4.21:6443 --token 3o7wop.z2kxzhy7p0zwnb3v --discovery-token-ca-cert-hash sha256:2a8bbdd54dcc01435be1a3b443d33d0ce932c8d81c6d9ae8b3c248325977ceb1
2、在其他節(jié)點(diǎn)依次執(zhí)行如下命令即可加入K8S
[root@DEV004021 ~]# kubeadm join 192.168.4.21:6443 --token 3o7wop.z2kxzhy7p0zwnb3v --discovery-token-ca-cert-hash sha256:2a8bbdd54dcc01435be1a3b443d33d0ce932c8d81c6d9ae8b3c248325977ceb1
kubeadm join 192.168.158.152:6443 --token hpxl8v.jc6szqhkvkwf8s7z --discovery-token-ca-cert-hash sha256:bb4ecc9004703bb18fb8bd11c4c1a3ba347acb60bf7470a39f326c44ab738aad
[root@DEV004021 yum.repos.d]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
dev004019 Ready <none> 3d v1.15.2
dev004020 Ready <none> 3d v1.15.2
dev004021 Ready master 3d v1.15.2
至此,1個(gè)Master+2 nodes的K8S集群創(chuàng)建成功
http://m.mamicode.com/info-detail-2749150.html
查看日志
journalctl -f -u kubelet
重置
kubeadm reset
kubeadm join 192.168.158.150:6443 --token tke6ck.o49skr479bfy2dy4 --discovery-token-ca-cert-hash sha256:e675d752e521e5c5c43ecfee4b0c0b51d7bb9bae87ee83410715329654432e76
vi /etc/sysconfig/kubelet
--runtime-cgroups=/systemd/system.slice --kubelet-cgroups=/systemd/system.slice
sudo systemctl restart kubelet
docker pull quay.io/coreos/flannel:v0.11.0-amd64
kubectl -n kube-system get secret 1}') -o go-template='{{.data.token}}' | base64 -d
eyJhbGciOiJSUzI1NiIsImtpZCI6ImRCSzRmazBmRFpybm5WNXJCVnoxck51bm1meEk3T3VOTXFIdVNCd1JCeFEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJvYXJkLXVzZXItdG9rZW4tMjc3OWgiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoia3Vib2FyZC11c2VyIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQudWlkIjoiMWJiYTY4YmEtMjliOC00OGUyLTk4NzItNmNhYWIwMzFkNWFmIiwic3ViIjoic3lzdGVtOnNlcnZpY2VhY2NvdW50Omt1YmUtc3lzdGVtOmt1Ym9hcmQtdXNlciJ9.IlKUsZDZLFRmyheOlRcL_YgcTQf-vDIq9cCJsyob_Tdm9YnKl-r2Hy5u4w9FfKTxpvUqjjDnIdmYi5Ck24gjnR7xFFPrC715Vac52v_AEecxAsgM8DLa-cV_NQ2uzJfvFgoWevKoC1YkSKeIymkWm1NcR60A49h445pLdQxmRuUegP-AowR6VEEXLUhbrwKyp6FcjZEKnE7PLTrnHY4MJ30jztKHg1TR7r6MhZxanCAacfaDz57TXHE-b80FtiOAzz9FuEK25w7kmz6Pdjoputdd3Dixj6GJKzjEzWArll7y0G8AtuzLshsUNxcN5ikuKLGqI2_ZctZFuevZ1_Tkzw
kubectl get deployment -n kube-system
kubectl delete deployment metrics-server -n kube-system
dushBoard
eyJhbGciOiJSUzI1NiIsImtpZCI6ImRCSzRmazBmRFpybm5WNXJCVnoxck51bm1meEk3T3VOTXFIdVNCd1JCeFEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTQ5djl6Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiIxYmU1MzMyOC05MGNhLTQzN2EtOGFmNS0xY2FjMGJmNmMxODQiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6YWRtaW4tdXNlciJ9.HD0lanYG35m2lGsI-L_mMLwXT55BWONypmnb2RhTdnJhO2Y5Xaa7wR7Q_pVsS6TU-CM6T13muoKOgbdJzf_ShQcDj9ElO6UiURYFSR_kasODRVqPhUCVqANN2ErLzgoX9Kpcy10E8qCn8x2r99X_Qxogoo1ncAL2JPZMXQcXOHE9JKFBLS6jX8K2FIGu74qSW7sztMcHC_WCNKcFpX3LDF_1KL5fYyoe2xMswnxa-K4cXrkPQo9Wkdu-NpUaZ9eUqVQX_8L3lP4luM6hXFR_5aSxIWGMXYVxyezdIZS3pbmtaD4zOUlHShJiQAn5SAHUprTvaqecn520G3k_peSHXA