1.kubernetes 集群升級(jí);? 2.總結(jié)yaml文件? 3.etcd客戶端使用、數(shù)據(jù)備份和恢復(fù);? 4.kubernetes集群維護(hù)常用命令; 5.資源對(duì)象? ? rc/rs/deployment、? ? service、? ? volume、? ? ? emptyDir、? ? ? hostpath、? ? ? NFS
1.kubernetes 集群升級(jí)
1.master升級(jí):一個(gè)一個(gè)升級(jí),先在node節(jié)點(diǎn)將master從配置文件中刪掉,然后重啟kube-lb服務(wù),接著將master以替換二進(jìn)制的方式升級(jí)master,最后再修改node節(jié)點(diǎn)的配置,將master加入負(fù)載均衡節(jié)點(diǎn),重啟node節(jié)點(diǎn)服務(wù),完成升級(jí)。
2.node升級(jí):也是一個(gè)node升級(jí)完再升級(jí)下一個(gè),升級(jí)一個(gè)node時(shí),需要停掉kubelete和kube-proxy服務(wù),然后將node的二進(jìn)制替換掉,升級(jí)node,最后將kubelete和kube-proxy啟動(dòng)。
master升級(jí):在master1,先下載升級(jí)的二進(jìn)制包,到github上下載:https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.21.md
root@k8s-master1:/etc/kubeasz# cd /usr/local/src/
root@k8s-master1:/usr/local/src# wget https://dl.k8s.io/v1.21.5/kubernetes.tar.gz
root@k8s-master1:/usr/local/src# wget https://dl.k8s.io/v1.21.5/kubernetes-client-linux-amd64.tar.gz
root@k8s-master1:/usr/local/src#wget?https://dl.k8s.io/v1.21.5/kubernetes-server-linux-amd64.tar.gz
root@k8s-master1:/usr/local/src#wget?https://dl.k8s.io/v1.21.5/kubernetes-node-linux-amd64.tar.gz?
root@k8s-master1:/usr/local/src# tar xf kubernetes-client-linux-amd64.tar.gz root@k8s-master1:/usr/local/src# tar xf kubernetes-node-linux-amd64.tar.gz root@k8s-master1:/usr/local/src# tar xf kubernetes-server-linux-amd64.tar.gz root@k8s-master1:/usr/local/src# tar xf kubernetes.tar.gz
###先升級(jí)master1,修改node上的lb配置將master1注掉-->重啟lb服務(wù)-->拷貝二進(jìn)制包到master-->放開注釋-->重啟lb服務(wù)
root@k8s-master1:~# for i in {1..3};do ssh k8s-node$i "sed -i 's/server 192.168.241.51:6443/#server 192.168.241.51:6443/g' /etc/kube-lb/conf/kube-lb.conf" && systemctl restart kube-lb.service;done
###因?yàn)閙aster1就是部署機(jī)器,因此,先停止服務(wù),直接拷貝服務(wù)就可以了
root@k8s-master1:/usr/local/src/kubernetes# systemctl stop kube-apiserver.service kube-controller-manager.service kube-scheduler.service kubelet.service kube-proxy.service
root@k8s-master1:/usr/local/src/kubernetes# \cp server/bin/{kube-apiserver,kube-controller-manager,kube-proxy,kube-scheduler,kubelet,kubectl} /usr/local/bin/
啟動(dòng)服務(wù)
root@k8s-master1:/usr/local/src/kubernetes# systemctl start kube-apiserver.service kube-controller-manager.service kube-scheduler.service kubelet.service kube-proxy.service
#查看master1的版本應(yīng)該是1.21.5
root@k8s-master1:/usr/local/src/kubernetes# kubectl get nodeNAME STATUS ROLES AGE VERSION192.168.241.51 Ready,SchedulingDisabled master 102m v1.21.5192.168.241.52 Ready,SchedulingDisabled master 102m v1.21.0192.168.241.53 Ready,SchedulingDisabled master 96m v1.21.0192.168.241.57 Ready node 19m v1.21.0192.168.241.58 Ready node 101m v1.21.0192.168.241.59 Ready node 101m v1.21.0
將node節(jié)點(diǎn)的lb上的master重新添加回去,并重啟lb的服務(wù)
root@k8s-master1:/usr/local/src/kubernetes# for i in {1..3};do ssh k8s-node$i "sed -i 's/#server 192.168.241.51:6443/server 192.168.241.51:6443/g' /etc/kube-lb/conf/kube-lb.conf" && systemctl restart kube-lb.service;done
依次添加master2
root@k8s-master1:~# for i in {1..3};do ssh k8s-node$i "sed -i 's/server 192.168.241.52:6443/#server 192.168.241.52:6443/g' /etc/kube-lb/conf/kube-lb.conf" && systemctl restart kube-lb.service;done
root@k8s-master1:~#ssh? k8s-master2? "systemctl stop kube-apiserver.service kube-controller-manager.service kube-scheduler.service kubelet.service kube-proxy.service"
root@k8s-master1:/etc/kubeasz# cd /usr/local/src/kubernetes/root@k8s-master1:/usr/local/src/kubernetes# scp server/bin/{kube-apiserver,kube-controller-manager,kube-proxy,kube-scheduler,kubelet,kubectl} k8s-master2:/usr/local/bin/
root@k8s-master1:~#ssh? k8s-master2? "systemctl start kube-apiserver.service kube-controller-manager.service kube-scheduler.service kubelet.service kube-proxy.service"
root@k8s-master1:/usr/local/src/kubernetes# for i in {1..3};do ssh k8s-node$i "sed -i 's/#server 192.168.241.52:6443/server 192.168.241.52:6443/g' /etc/kube-lb/conf/kube-lb.conf" && systemctl restart kube-lb.service;done
root@k8s-master1:/usr/local/src/kubernetes# kubectl get nodeNAME STATUS ROLES AGE VERSION192.168.241.51 Ready,SchedulingDisabled master 112m v1.21.5192.168.241.52 Ready,SchedulingDisabled master 112m v1.21.5
升級(jí)master3
root@k8s-master1:~# for i in {1..3};do ssh k8s-node$i "sed -i 's/server 192.168.241.53:6443/#server 192.168.241.53:6443/g' /etc/kube-lb/conf/kube-lb.conf" && systemctl restart kube-lb.service;done
root@k8s-master1:~#ssh? k8s-master3? "systemctl stop kube-apiserver.service kube-controller-manager.service kube-scheduler.service kubelet.service kube-proxy.service"
root@k8s-master1:/etc/kubeasz# cd /usr/local/src/kubernetes/?
root@k8s-master1:/usr/local/src/kubernetes# scp server/bin/{kube-apiserver,kube-controller-manager,kube-proxy,kube-scheduler,kubelet,kubectl} k8s-master3:/usr/local/bin/
root@k8s-master1:~#ssh? k8s-master3? "systemctl start kube-apiserver.service kube-controller-manager.service kube-scheduler.service kubelet.service kube-proxy.service"
root@k8s-master1:/usr/local/src/kubernetes# for i in {1..3};do ssh k8s-node$i "sed -i 's/#server 192.168.241.53:6443/server 192.168.241.53:6443/g' /etc/kube-lb/conf/kube-lb.conf" && systemctl restart kube-lb.service;done
root@k8s-master1:/usr/local/src/kubernetes# kubectl get nodeNAME STATUS ROLES AGE VERSION192.168.241.51 Ready,SchedulingDisabled master 117m v1.21.5192.168.241.52 Ready,SchedulingDisabled master 117m v1.21.5192.168.241.53 Ready,SchedulingDisabled master 111m v1.21.5192.168.241.57 Ready node 34m v1.21.0192.168.241.58 Ready node 116m v1.21.0192.168.241.59 Ready node 116m v1.21.0
master升級(jí)完畢!
node升級(jí):
##只需要停止kubelet和kube-proxy服務(wù)后將二進(jìn)制文件拷貝到執(zhí)行目錄,然后啟動(dòng)這兩服務(wù)即可
root@k8s-master1:/usr/local/src/kubernetes# ssh k8s-node1 "systemctl stop kubelet kube-proxy"
root@k8s-master1:/usr/local/src/kubernetes# scp server/bin/{kubelet,kube-proxy,kubectl} k8s-node1:/usr/local/bin
root@k8s-master1:/usr/local/src/kubernetes# ssh k8s-node1 "systemctl start kubelet kube-proxy"
root@k8s-master1:/usr/local/src/kubernetes# ssh k8s-node2 "systemctl stop kubelet kube-proxy"
root@k8s-master1:/usr/local/src/kubernetes# scp server/bin/{kubelet,kube-proxy,kubectl} k8s-node2:/usr/local/bin
root@k8s-master1:/usr/local/src/kubernetes# ssh k8s-node2 "systemctl start kubelet kube-proxy"
root@k8s-master1:/usr/local/src/kubernetes# ssh k8s-node3 "systemctl stop kubelet kube-proxy"root@k8s-master1:/usr/local/src/kubernetes# scp server/bin/{kubelet,kube-proxy,kubectl} k8s-node3:/usr/local/bin
root@k8s-master1:/usr/local/src/kubernetes# ssh k8s-node3 "systemctl start kubelet kube-proxy"
root@k8s-master1:/usr/local/src/kubernetes# kubectl get nodeNAME STATUS ROLES AGE VERSION192.168.241.51 Ready,SchedulingDisabled master 123m v1.21.5192.168.241.52 Ready,SchedulingDisabled master 123m v1.21.5192.168.241.53 Ready,SchedulingDisabled master 117m v1.21.5192.168.241.57 NotReady node 40m v1.21.5192.168.241.58 Ready node 123m v1.21.5192.168.241.59 Ready node 123m v1.21.5
至此,master和node升級(jí)完畢!
2.總結(jié)yaml文件
yaml更適用于配置文件,json更適用于API數(shù)據(jù)返回,json也可以用作配置文件,json不能使用注釋。yaml和json可以互相轉(zhuǎn)換。
yaml格式:
大小寫敏感
縮進(jìn)表示層級(jí)關(guān)系
縮進(jìn)不能使用table,縮進(jìn)一般是兩個(gè)空格,通緝縮進(jìn)應(yīng)該對(duì)齊
可以加注釋,# 注釋
比json更適用于配置文件
列表用短橫線表示? -
3.etcd客戶端使用、數(shù)據(jù)備份和恢復(fù)
etcd是kv分布式存儲(chǔ)系統(tǒng)
到etcd任意節(jié)點(diǎn),執(zhí)行以下操作:
##etcd命令客戶端工具etcdctl,命令使用幫助
root@k8s-etcd1:~# etcdctl member -h
root@k8s-etcd1:~# etcdctl -h
etcd健康狀態(tài)查詢:
root@k8s-etcd2:~# export NODE_IPS="192.168.241.54 192.168.241.55 192.168.241.56"?
root@k8s-etcd2:~# for ip in ${NODE_IPS} ;do ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints=https://${ip}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem endpoint health;done
顯示集群成員信息: member list
root@k8s-etcd1:~# etcdctl member list
root@k8s-etcd1:~# etcdctl --write-out=table member list #etcdctl 3版本以上可以不加證書,但是建議加上
ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints=https://192.168.241.56:2379 --write-out=table member list --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem
以表格方式顯示節(jié)點(diǎn)詳細(xì)狀態(tài): endpoint status
export NODE_IPS="192.168.241.54 192.168.241.55 192.168.241.56"
for ip in ${NODE_IPS} ;do ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints=https://${ip}:2379 --write-out=table endpoint status --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem endpoint health;done
可以看到leader是etcd3,現(xiàn)在可以停止etcd3的服務(wù),
root@k8s-etcd3:/usr/local/bin# systemctl stop etcd
再到etcd1查看,leader不再是etcd3,會(huì)自動(dòng)重新選舉一個(gè)新的leader
root@k8s-etcd1:~# for ip in ${NODE_IPS} ;do ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints=https://${ip}:2379 --write-out=table endpoint status --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem endpoint health;done
etcd數(shù)據(jù)的增刪改查
? ? 增,改: put
? ? 查:get
? ? 刪: del
寫入數(shù)據(jù): etcdctl put /huahualin linux??
查看數(shù)據(jù):?etcdctl get? /huahualin?
修改數(shù)據(jù):?etcdctl put /huahualin centos
刪除數(shù)據(jù):?etcdctl del? /huahualin?
查看有多少key
/usr/local/bin/etcdctl get / --prefix --keys-only
獲取key的值
root@k8s-etcd1:~# /usr/local/bin/etcdctl get /registry/services/endpoints/default/kubernetes
如果想查看多少個(gè)pod,但是也不準(zhǔn),可以過濾
/usr/local/bin/etcdctl get / --prefix --keys-only|grep pod |wc -l
etcd獲取calico相關(guān)
/usr/local/bin/etcdctl get / --prefix --keys-only|grep calico
root@k8s-etcd1:~# /usr/local/bin/etcdctl get /calico/ipam/v2/assignment/ipv4/block/10.200.169.128-26
etcd V3的watch機(jī)制:
基于不斷監(jiān)控?cái)?shù)據(jù),發(fā)生變化就主動(dòng)發(fā)通知客戶端,保證數(shù)據(jù)的快速同步,ETCD V3版本的watch機(jī)制支持watch某個(gè)固定的key,也支持一個(gè)范圍 相比如v2,內(nèi)存只存key,值放在磁盤里,因此對(duì)磁盤的io很高了,
watch機(jī)制更穩(wěn)定,基本上可以實(shí)現(xiàn)數(shù)據(jù)完全同步
通過Grpc實(shí)現(xiàn)遠(yuǎn)程調(diào)用,長(zhǎng)鏈接效率提升明顯
放棄目錄結(jié)構(gòu),純粹kv
watch的使用:可以實(shí)時(shí)監(jiān)控?cái)?shù)據(jù)的變化,這些都在etcdctl自動(dòng)實(shí)現(xiàn)了,不需要我們?nèi)为?dú)watch
? ??etcdcl watch /huahualin
備份和恢復(fù):使用快照來進(jìn)行備份和恢復(fù)?
????WAL機(jī)制:預(yù)寫日志,可以用來恢復(fù)數(shù)據(jù),記住數(shù)據(jù)變化的全部歷程
? ??etcd是鏡像集群,在每個(gè)節(jié)點(diǎn)正常同步數(shù)據(jù)的情況下,每個(gè)節(jié)點(diǎn)數(shù)據(jù)都是一樣的,因此備份只備份一份就行,還原也是只還原一份就可以了。集群壞一個(gè)不需要恢復(fù)數(shù)據(jù),極端情況是所有節(jié)點(diǎn)都被刪了,才恢復(fù)。
? ?etcd數(shù)據(jù)備份:??ETCDCTL_API=3 /usr/local/bin/etcdctl snapshot save etcd-2021-1014.db
? ? etcd數(shù)據(jù)恢復(fù):?ETCDCTL_API=3 /usr/local/bin/etcdctl snapshot restore etcd-2021-1014.db --data-dir=/tmp/etcd? ?##還原的目標(biāo)目錄不能存在,否則會(huì)報(bào)錯(cuò),恢復(fù)數(shù)據(jù)需要恢復(fù)到etcd的數(shù)據(jù)目錄,目錄一般都是在 /var/lib/etcd ,先停etcd服務(wù),然后把數(shù)據(jù)目錄刪除,進(jìn)行數(shù)據(jù)恢復(fù),最后啟動(dòng)etcd服務(wù)。
使用kubeasz項(xiàng)目對(duì)生產(chǎn)環(huán)境的etcd數(shù)據(jù)進(jìn)行備份和恢復(fù):
? ??cd /etc/kubeasz/
? ??./ezctl backup k8s-01? ?#其實(shí)也是連接到其中一臺(tái)etcd然后將備份好的文件拷貝到master上的集群下的backup目錄下
? ? ?恢復(fù):./ezctl restore k8s-01
? ? ? ? 假設(shè)刪除了一個(gè)pod: ??
? ???????????方法一:kubectl delete pod net-test1 -n default
? ? ? ? ? ? ?方法二:也可以到etcd下刪除key,? ?etcdctl del /registry/x/x/net-test1,刪除key很快
? ? ? ?數(shù)據(jù)恢復(fù): 過程也是到etcd先停止服務(wù)避免寫入,然后刪除目錄進(jìn)行恢復(fù)
? ? ? ? ?./ezctl restore k8s-01
4.kubernetes集群維護(hù)常用命令
kubectl get pod -A -o wide? ?查看所有pod
kubectl get service -A? ?查看所有service
kubectl get nodes -A? ?查看所以node節(jié)點(diǎn)
kubectl describe pod pod_name -n ns_name??如果不是默認(rèn)namespace,需要指定ns,-n后面就是制定namespace的名稱
如果pod創(chuàng)建失敗,可以使用kubectl logs pod_name -n ns 查看容器日志
如果還看不到日志,可以到node節(jié)點(diǎn)查看syslog,
? ? ? ? ?cat /var/log/syslog 看有沒有報(bào)錯(cuò)
? ? ? ? ?cat /var/log/kern.log
利用yaml創(chuàng)建資源: kubectl create -f file.yaml --save-config --record? ?這條命令類似于? kubectl apply -f file.yaml? ?,
kubectl create命令,是先刪除所有現(xiàn)有的東西,重新根據(jù)yaml文件生成新的。所以要求yaml文件中的配置必須是完整的.用同一個(gè)yaml 文件執(zhí)行替換replace命令,將會(huì)不成功,fail掉。 kubectl apply命令,根據(jù)配置文件里面列出來的內(nèi)容,升級(jí)現(xiàn)有的。所以yaml文件的內(nèi)容可以只寫需要升級(jí)的屬性,就是說apply只會(huì)修改資源變化的部分,而create是需要?jiǎng)h掉服務(wù),重新創(chuàng)建
用kubectl? apply -f file.yaml? ?比較多,
獲取token:
root@k8s-master2:~# kubectl get secrets?
NAME TYPE DATA AGE?
default-token-7mcjc kubernetes.io/service-account-token 3 10h 2l80Vwzag
root@k8s-master2:~# kubectl describe secret default-token-7mcjc HGRR0U
5.資源對(duì)象? ? rc/rs/deployment、? ? service、? ? volume、? ? ? emptyDir、? ? ? hostpath、? ? ? NFS
1.controller,總有三代:
? ? ?Replication Controller:副本控制器RC,只支持 (selector = !=),第一代pod副本控制器,主要控制副本,簡(jiǎn)稱rc,現(xiàn)在很少用了
? ? ?Replicaset:服務(wù)控制集,除了支持rc的selector,還支持使用正則匹配,比如支持in,not in,匹配的范圍更大,第二代pod副本控制器RS,也是控制副本,簡(jiǎn)稱rs
? ? ?Deployment:第三代pod副本控制器,其實(shí)也是調(diào)用的replicaset,優(yōu)點(diǎn)是有更多的高級(jí)功能,除了擁有replicaset的功能外,還有別的功能,比如滾動(dòng)升級(jí)、回滾等。用此方法創(chuàng)建的pod名稱有三段組成: deploymentname-Replicasetname-podname
2.service:ipvsadm -Ln 可以查看service的服務(wù)映射類型:分為集群內(nèi)的service和集群外的訪問k8s集群內(nèi)的service:使用ClusterIP?訪問K8S集群外的service: 使用nodePort ,這樣可以通過宿主機(jī)去訪問
ClusterIP:
cat nginx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
? name: nginx-deployment
spec:
? replicas: 1
? selector:
? ? matchExpressions:
? ? - {key: app, operator: In, values: [ng-deploy-80,ng-rs-81]}
? template:
? ? metadata:? ? ? ?
? ? ? labels:
? ? ? ? app: ng-deploy-80
? ? spec:
? ? ? containers:
? ? ? ? - name:? ng-deploy-80
? ? ? ? ? image: nginx:1.16.1
? ? ? ? ? ports:
? ? ? ? ? - containerPort: 80
執(zhí)行創(chuàng)建deployment控制器:? kubectl apply -f nginx.yaml
root@k8s-master1:~/yaml/service# cat service.yaml
apiVersion: v1
kind: Service
metadata:
? name: ng-deploy-80
spec:
? ports:
? - name: http
? ? port: 80
? ? targetPort: 80
? ? protocol: TCP
? type: ClusterIP
執(zhí)行創(chuàng)建service:? kubectl apply -f service.yaml
查看: 在k8s的web頁(yè)面上查看,登錄任意一個(gè)節(jié)點(diǎn)的30002端口 https://192.168.133.59:30002,找到pods,可以看到對(duì)應(yīng)的pod,
Nodeport:
還是使用上面那個(gè)nginx.yaml
root@k8s-master1:~/yaml/service# cat nodePort-svc.yaml
apiVersion: v1
kind: Service
metadata:
? name: ng-deploy-80
spec:
? ports:
? - name: http
? ? port: 90
? ? targetPort: 80
? ? nodePort: 30012
? ? protocol: TCP
? type: NodePort
? selector:
? ? app: ng-deploy-80?
執(zhí)行創(chuàng)建service:? kubectl? apply -f nodePort.yaml
在瀏覽器訪問任何一個(gè)node節(jié)點(diǎn)的30012端口? http://192.168.133.59:30012/
通過負(fù)載均衡器訪問:因?yàn)橥ㄟ^node的30012訪問不太方便,所以可以在ha1和ha2上面配置多個(gè)
root@k8s-ha1:~# vim /etc/haproxy//haproxy.cfg
listen huahualin-nginx-80
? bind 192.168.241.62:80
? mode tcp
? server node1 192.168.241.57:30012 check inter 3s fall 3 rise 3
? server node2 192.168.241.58:30012 check inter 3s fall 3 rise 3
? server node3 192.168.241.59:30012 check inter 3s fall 3 rise 3
root@k8s-ha1:~# systemctl restart haproxy.service
瀏覽器訪問服務(wù)? http://192.168.241.62:80/? ,也可以把域名解析到本地,訪問域名
3.Volume:支持多種類型:比如nfs,?hostpath,emptyDir,? cinder,rdb等
舉例:hostpath,emptyDir,nfs的掛載方式
emptyDir:本地臨時(shí)卷,就是個(gè)空的目錄,還是臨時(shí)的,容器被刪除時(shí),emptyDir中的數(shù)據(jù)也被刪除,掛載的容器里的/cache目錄不存在也會(huì)自動(dòng)創(chuàng)建,在這個(gè)目錄下創(chuàng)建目錄,可以在使用kubectl get pods -o wide找到在哪個(gè)節(jié)點(diǎn)上,去那個(gè)節(jié)點(diǎn)下的/var/lib/kubelet/pods/目錄下查找這個(gè)pod的這個(gè)volumes就可以看到和cache映射的文件
root@k8s-master1:~/yaml/service# cat nginx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
? name: nginx-deployment
spec:
? replicas: 1
? selector:
? ? matchExpressions:
? ? - {key: app, operator: In, values: [ng-deploy-80,ng-rs-81]}
? template:
? ? metadata:? ? ? ?
? ? ? labels:
? ? ? ? app: ng-deploy-80
? ? spec:
? ? ? containers:
? ? ? ? - name:? ng-deploy-80
? ? ? ? ? image: nginx:1.16.1
? ? ? ? ? ports:
? ? ? ? ? - containerPort: 80
kubectl? apply -f?nginx.yaml
hostPath:容器刪除時(shí),數(shù)據(jù)不會(huì)刪除
root@k8s-master1:~/yaml/volume# kubectl exec -it nginx-deployment-98f46f4cc-2kbjd bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx-deployment-98f46f4cc-2kbjd:/# cd? /cache/
root@nginx-deployment-98f46f4cc-2kbjd:/cache# echo 333 >> test
##查看pod在哪個(gè)節(jié)點(diǎn)上,在59上,可以去59查看這個(gè)pod掛載emptyDir下面是否生成了test文件
root@k8s-master1:~/yaml/service# kubectl get pods -o wide
NAME? ? ? ? ? ? ? ? ? ? ? ? ? ? ? READY? STATUS? ? RESTARTS? AGE? IP? ? ? ? ? ? ? NODE? ? ? ? ? ? NOMINATED NODE? READINESS GATES
nginx-deployment-98f46f4cc-2kbjd? 1/1? ? Running? 0? ? ? ? ? 11h? 10.200.107.195? 192.168.241.59? <none>? ? ? ? ? <none>
##去59查看這個(gè)pod掛載emptyDir下面是否生成了test文件,
root@k8s-node3# find /var/lib/kubelet/* -name cache*
/var/lib/kubelet/pods/29767367-c535-491d-b1c0-beaaff531849/plugins/kubernetes.io~empty-dir/cache-volume
/var/lib/kubelet/pods/29767367-c535-491d-b1c0-beaaff531849/volumes/kubernetes.io~empty-dir/cache-volume
cd /var/lib/kubelet/pods/29767367-c535-491d-b1c0-beaaff531849/volumes/kubernetes.io~empty-dir/cache-volume
root@k8s-node3:/var/lib/kubelet/pods/29767367-c535-491d-b1c0-beaaff531849/volumes/kubernetes.io~empty-dir/cache-volume# echo ddd > test
現(xiàn)在去容器里查看就會(huì)有新的字串了
hostPath:可以持久化,但是不能共享,只能在當(dāng)前主機(jī)使用,主機(jī)刪除以后可能就會(huì)被重新調(diào)度,很可能會(huì)分配到別的主機(jī),別的主機(jī)沒有這個(gè)hostPath容器再創(chuàng)建后如果調(diào)度到別的主機(jī)那么自己的數(shù)據(jù)就看不到了,就丟了
root@k8s-master1:~/yaml/volume# cat hostpath.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
? name: nginx-deployment
spec:
? replicas: 1
? selector:
? ? matchExpressions:
? ? - {key: app, operator: In, values: [ng-deploy-80,ng-rs-81]}
? template:
? ? metadata:? ? ? ?
? ? ? labels:
? ? ? ? app: ng-deploy-80
? ? spec:
? ? ? containers:
? ? ? - name:? ng-deploy-80
? ? ? ? image: nginx:1.16.1
? ? ? ? ports:
? ? ? ? - containerPort: 80
? ? ? ? volumeMounts:
? ? ? ? - mountPath: /cache
? ? ? ? ? name: cache-volume
? ? ? volumes:
? ? ? - name: cache-volume
? ? ? ? hostPath:
? ? ? ? ? path: /tmp/cache
查看創(chuàng)建在哪個(gè)node上了,hostPath下的/tmp/cache路徑會(huì)自動(dòng)創(chuàng)建,
root@k8s-master1:~/yaml/volume# kubectl get pods -o wide
NAME? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? READY? STATUS? ? RESTARTS? AGE? ? IP? ? ? ? ? ? ? NODE? ? ? ? ? ? NOMINATED NODE? READINESS GATES
nginx-deployment-5cc98d6c56-sdtvc? 1/1? ? Running? 0? ? ? ? ? 4m43s? 10.200.169.131? 192.168.241.58? <none>? ? ? ? ? <none>
然后去58這個(gè)node上查看/tmp/cache有沒有,有的
root@k8s-node2:~# ls /tmp/cache/
到master1節(jié)點(diǎn)進(jìn)入容器創(chuàng)建文件
root@k8s-master1:~/yaml/volume# kubectl exec -it nginx-deployment-5cc98d6c56-sdtvc? bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nginx-deployment-5cc98d6c56-sdtvc:/# echo 123 > /cache/nginx.log
再到58節(jié)點(diǎn)查看,會(huì)有文件nginx.log生成
root@k8s-node2:~# ls /tmp/cache/
nginx.log
nfs:? 網(wǎng)絡(luò)文件系統(tǒng)共享存儲(chǔ),多個(gè)pod可以同時(shí)掛載同一個(gè)nfs
##ha01上操作:
? 先安裝nfs,在ha01上面安裝nfs
apt update
apt install nfs-server
mkdir /data/nfs -p
vi /etc/exports
/data/huahualin_nfs *(rw,no_root_squash)? #這里授權(quán)的地方和權(quán)限不能用空格,以前要有空格
systemctl restart nfs-server.service
systemctl enable nfs-server.service
showmount -e? 如果有目錄說明可以掛載了
###到master1上,暴露node的30016端口
root@k8s-master1:~/yaml/volume# cat nfs.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
? name: nginx-deployment
spec:
? replicas: 1
? selector:
? ? matchExpressions:
? ? - {key: app, operator: In, values: [ng-deploy-80,ng-rs-81]}
? template:
? ? metadata:? ? ? ?
? ? ? labels:
? ? ? ? app: ng-deploy-80
? ? spec:
? ? ? containers:
? ? ? - name:? ng-deploy-80
? ? ? ? image: nginx:1.16.1
? ? ? ? ports:
? ? ? ? - containerPort: 80
? ? ? ? volumeMounts:
? ? ? ? - mountPath: /usr/share/nginx/html/mysite
? ? ? ? ? name: my-nfs-volume
? ? ? volumes:
? ? ? - name: my-nfs-volume
? ? ? ? nfs:
? ? ? ? ? server: 192.168.241.62? ? ? ? ? ? ? ?
? ? ? ? ? path: /data/nfs
---
apiVersion: v1
kind: Service
metadata:
? name: ng-deploy-80
spec:
? ports:
? - name: http
? ? port: 81
? ? targetPort: 80
? ? nodePort: 30016
? ? protocol: TCP
? type: NodePort
? selector:
? ? app: ng-deploy-80 ? ?
showmount -e? ha_ip? #檢查是否可以掛載的共享目錄,如果可以看到就可以掛載
##到ha01做負(fù)載均衡,修改端口號(hào)
root@k8s-ha1:~# cat /etc/haproxy/haproxy.cfg
listen huahualin-nginx-80
? bind 192.168.241.62:80
? mode tcp
? server node1 192.168.241.57:30016 check inter 3s fall 3 rise 3
? server node2 192.168.241.58:30016 check inter 3s fall 3 rise 3
? server node3 192.168.241.59:30016 check inter 3s fall 3 rise 3
重啟服務(wù): systemctl restart haproxy
#在瀏覽器放問: 192.168.241.62:80
訪問dashboard: https://192.168.241.58:30002
? 進(jìn)入剛剛創(chuàng)建的容器,可以看到掛載的目錄 /usr/share/nginx/html/mysite
root@nginx-deployment-7964d774d9-ntz6g:/# df -h? ?
Filesystem? ? ? ? ? ? ? ? Size? Used Avail Use% Mounted on
overlay? ? ? ? ? ? ? ? ? ? 29G? 12G? 16G? 42% /
tmpfs? ? ? ? ? ? ? ? ? ? ? 64M? ? 0? 64M? 0% /dev
tmpfs? ? ? ? ? ? ? ? ? ? 975M? ? 0? 975M? 0% /sys/fs/cgroup
/dev/sda5? ? ? ? ? ? ? ? ? 29G? 12G? 16G? 42% /etc/hosts
shm? ? ? ? ? ? ? ? ? ? ? ? 64M? ? 0? 64M? 0% /dev/shm
192.168.241.62:/data/nfs? 29G? 9.1G? 19G? 34% /usr/share/nginx/html/mysite
tmpfs? ? ? ? ? ? ? ? ? ? 975M? 12K? 975M? 1% /run/secrets/kubernetes.io/serviceaccount
tmpfs? ? ? ? ? ? ? ? ? ? 975M? ? 0? 975M? 0% /proc/acpi
tmpfs? ? ? ? ? ? ? ? ? ? 975M? ? 0? 975M? 0% /proc/scsi
tmpfs? ? ? ? ? ? ? ? ? ? 975M? ? 0? 975M? 0% /sys/firmware ?
在ha1的/data/nfs放圖片flowers1.jpg
訪問 192.168.241.62:80/mysite/flowers1.jpg
容器的掛載其實(shí)不是掛載到pod中的,容器沒有內(nèi)核,其實(shí)是掛載到node節(jié)點(diǎn)上的,然后映射給容器,查看pod創(chuàng)建在哪個(gè)node上
root@k8s-master1:~/yaml/volume# kubectl get pod -o wide
NAME? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? READY? STATUS? ? RESTARTS? AGE? IP? ? ? ? ? ? ? NODE? ? ? ? ? ? NOMINATED NODE? READINESS GATES
nginx-deployment-7964d774d9-ntz6g? 1/1? ? Running? 1? ? ? ? ? 47m? 10.200.107.201? 192.168.241.59? <none>? ? ? ? ? <none>
到192.168.241.59上查看
root@k8s-node3:~# df -Th
192.168.241.62:/data/nfs nfs4? ? ? 29G? 9.1G? 19G? 34% /var/lib/kubelet/pods/0cdfdfa7-c8e5-4cad-b5a3-747f931a6a59/volumes/kubernetes.io~nfs/my-nfs-volume
如果要掛載多個(gè)nfs怎么弄?
? ##到master1上面,添加新的掛載用來掛載js文件,把nfs的/data/nfs/js掛載到/usr/share/nginx/html/js
vi? nfs.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
? name: nginx-deployment
spec:
? replicas: 1
? selector:
? ? matchExpressions:
? ? - {key: app, operator: In, values: [ng-deploy-80,ng-rs-81]}
? template:
? ? metadata:? ? ? ?
? ? ? labels:
? ? ? ? app: ng-deploy-80
? ? spec:
? ? ? containers:
? ? ? - name:? ng-deploy-80
? ? ? ? image: nginx:1.16.1
? ? ? ? ports:
? ? ? ? - containerPort: 80
? ? ? ? volumeMounts:
? ? ? ? - mountPath: /usr/share/nginx/html/mysite
? ? ? ? ? name: my-nfs-volume
? ? ? ? - mountPath: /usr/share/nginx/html/js
? ? ? ? ? name: my-nfs-js
? ? ? volumes:
? ? ? volumes:
? ? ? - name: my-nfs-volume
? ? ? ? nfs:
? ? ? ? ? server: 192.168.241.62? ? ? ? ? ? ? ?
? ? ? ? ? path: /data/nfs
? ? ? - name: my-nfs-js
? ? ? ? nfs:
? ? ? ? ? server: 192.168.241.62? ? ? ? ? ? ? ?
? ? ? ? ? path: /data/nfs/js
---
---
apiVersion: v1
kind: Service
metadata:
? name: ng-deploy-80
spec:
? ports:
? - name: http
? ? port: 81
? ? targetPort: 80
? ? nodePort: 30016
? ? protocol: TCP
? type: NodePort
? selector:
? ? app: ng-deploy-80
? ? kubectl apply -f nfs.yaml
? ? 到ha01創(chuàng)建? /data/nfs/js目錄
? ? mkdir /data/nfs/js
? ? 任意編寫個(gè)靜態(tài)文件,假裝是js,? vi 1.js
? ? 到dashboard進(jìn)入容器查看:兩個(gè)掛載目錄/usr/share/nginx/html/js和/usr/share/nginx/html/mysite
root@nginx-deployment-79454b55b8-jbh4s:/# df -h
Filesystem? ? ? ? ? ? ? ? ? Size? Used Avail Use% Mounted on
overlay? ? ? ? ? ? ? ? ? ? ? 29G? 12G? 16G? 42% /
tmpfs? ? ? ? ? ? ? ? ? ? ? ? 64M? ? 0? 64M? 0% /dev
tmpfs? ? ? ? ? ? ? ? ? ? ? ? 975M? ? 0? 975M? 0% /sys/fs/cgroup
/dev/sda5? ? ? ? ? ? ? ? ? ? 29G? 12G? 16G? 42% /etc/hosts
shm? ? ? ? ? ? ? ? ? ? ? ? ? 64M? ? 0? 64M? 0% /dev/shm
192.168.241.62:/data/nfs/js? 29G? 9.1G? 19G? 34% /usr/share/nginx/html/js
tmpfs? ? ? ? ? ? ? ? ? ? ? ? 975M? 12K? 975M? 1% /run/secrets/kubernetes.io/serviceaccount
192.168.241.62:/data/nfs? ? ? 29G? 9.1G? 19G? 34% /usr/share/nginx/html/mysite
tmpfs? ? ? ? ? ? ? ? ? ? ? ? 975M? ? 0? 975M? 0% /proc/acpi
tmpfs? ? ? ? ? ? ? ? ? ? ? ? 975M? ? 0? 975M? 0% /proc/scsi
tmpfs? ? ? ? ? ? ? ? ? ? ? ? 975M? ? 0? 975M? 0% /sys/firmware?
到瀏覽器訪問
http://192.168.241.62/js/1.js