云計(jì)算day11-Kubernetes_K8s

1. 健康檢查

1.1.1 探針的種類

livenessProbe:健康狀態(tài)檢查,周期性檢查服務(wù)是否存活,檢查結(jié)果失敗,將重啟容器

readinessProbe:可用性檢查,周期性檢查服務(wù)是否可用,不可用將從service的endpoints中移除

1.1.2 探針的檢測(cè)方法

exec:執(zhí)行一段命令
httpGet:檢測(cè)某個(gè) http 請(qǐng)求的返回狀態(tài)碼
tcpSocket:測(cè)試某個(gè)端口是否能夠連接

1.1.3 liveness探針的exec使用

[root@k8s-master k8s_yaml]# mkdir healthy
[root@k8s-master k8s_yaml]# cd healthy
[root@k8s-master healthy]# cat  nginx_pod_exec.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: exec
spec:
  containers:
    - name: nginx
      image: 10.0.0.11:5000/nginx:1.13
      ports:
        - containerPort: 80
      args:
        - /bin/sh
        - -c
        - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
      livenessProbe:
        exec:
          command:
            - cat
            - /tmp/healthy
        initialDelaySeconds: 5   
        periodSeconds: 5

[root@k8s-master healthy]# kubectl create -f nginx_pod_exec.yaml

1.1.4 liveness探針的httpGet使用

[root@k8s-master healthy]# vim  nginx_pod_httpGet.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: httpget
spec:
  containers:
    - name: nginx
      image: 10.0.0.11:5000/nginx:1.13
      ports:
        - containerPort: 80
      livenessProbe:
        httpGet:
          path: /index.html
          port: 80
        initialDelaySeconds: 3
        periodSeconds: 3

1.1.5 liveness探針的tcpSocket使用

[root@k8s-master healthy]# vim   nginx_pod_tcpSocket.yaml
apiVersion: v1
kind: Pod
metadata:
  name: tcpsocket
spec:
  containers:
    - name: nginx
      image: 10.0.0.11:5000/nginx:1.13
      ports:
        - containerPort: 80
      args:
        - /bin/sh
        - -c
        - tailf  /etc/hosts
      livenessProbe:
        tcpSocket:
          port: 80
        initialDelaySeconds: 60
        periodSeconds: 3

#查看pod,1分鐘后重啟了一次
root@k8s-master healthy]# kubectl create -f nginx_pod_tcpSocket.yaml
[root@k8s-master healthy]# kubectl get pod
NAME                    READY     STATUS    RESTARTS   AGE
tcpsocket               1/1       Running   1          4m

1.1.6 readiness探針的httpGet使用

可用性檢查readinessprobe

[root@k8s-master healthy]# vim  nginx-rc-httpGet.yaml
apiVersion: v1
kind: ReplicationController
metadata:
  name: readiness
spec:
  replicas: 2
  selector:
    app: readiness
  template:
    metadata:
      labels:
        app: readiness
    spec:
      containers:
      - name: readiness
        image: 10.0.0.11:5000/nginx:1.13
        ports:
        - containerPort: 80
        readinessProbe:
          httpGet:
            path: /lcx.html
            port: 80
          initialDelaySeconds: 3
          periodSeconds: 3

[root@k8s-master healthy]# kubectl create -f nginx-rc-httpGet.yaml

1.2dashboard服務(wù)

1:上傳并導(dǎo)入鏡像,打標(biāo)簽

2:創(chuàng)建dashborad的deployment和service

3:訪問http://10.0.0.11:8080/ui/


在master上傳鏡像
官網(wǎng)配置文件下載鏈
鏡像下載鏈接: 提取碼: qjb7

docker load -i kubernetes-dashboard-amd64_v1.4.1.tar.gz
image
#在k8s-node2上上傳鏡像
[root@k8s-node2 ~]# docker load -i kubernetes-dashboard-amd64_v1.4.1.tar.gz 
5f70bf18a086: Loading layer 1.024 kB/1.024 kB
2e350fa8cbdf: Loading layer 86.96 MB/86.96 MB
Loaded image: index.tenxcloud.com/google_containers/kubernetes-dashboard-amd64:v1.4.1

dashboard.yaml

[root@k8s-master dashboard]# cat dashboard.yaml 
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
# Keep the name in sync with image version and
# gce/coreos/kube-manifests/addons/dashboard counterparts
  name: kubernetes-dashboard-latest
  namespace: kube-system
spec:
  replicas: 1
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
        version: latest
        kubernetes.io/cluster-service: "true"
    spec:
      nodeName: k8s-node2
      containers:
      - name: kubernetes-dashboard
        image: index.tenxcloud.com/google_containers/kubernetes-dashboard-amd64:v1.4.1
        imagePullPolicy: IfNotPresent
        resources:
          # keep request = limit to keep this container in guaranteed class
          limits:
            cpu: 100m
            memory: 50Mi
          requests:
            cpu: 100m
            memory: 50Mi
        ports:
        - containerPort: 9090
        args:
         -  --apiserver-host=http://10.0.0.11:8080
        livenessProbe:
          httpGet:
            path: /
            port: 9090
          initialDelaySeconds: 30
          timeoutSeconds: 30

dashboard-svc.yaml

[root@k8s-master dashboard]# vim dashboard-svc.yaml 
apiVersion: v1
kind: Service
metadata:
  name: kubernetes-dashboard
  namespace: kube-system
  labels:
    k8s-app: kubernetes-dashboard
    kubernetes.io/cluster-service: "true"
spec:
  selector:
    k8s-app: kubernetes-dashboard
  ports:
  - port: 80
    targetPort: 9090

創(chuàng)建

[root@k8s-master dashboard]# kubectl create -f .
service "kubernetes-dashboard" created
deployment "kubernetes-dashboard-latest" created

#檢查是否 Runing
[root@k8s-master dashboard]# kubectl get all -n kube-system
NAME                                 DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
deploy/kube-dns                      1         1         1            1           17h
deploy/kubernetes-dashboard-latest   1         1         1            1           20s

NAME                       CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE
svc/kube-dns               10.254.230.254   <none>        53/UDP,53/TCP   17h
svc/kubernetes-dashboard   10.254.216.169   <none>        80/TCP          20s

NAME                                        DESIRED   CURRENT   READY     AGE
rs/kube-dns-2622810276                      1         1         1         17h
rs/kubernetes-dashboard-latest-3233121221   1         1         1         20s

NAME                                              READY     STATUS    RESTARTS   AGE
po/kube-dns-2622810276-wvh5m                      4/4       Running   4          17h
po/kubernetes-dashboard-latest-3233121221-km08b   1/1       Running   0          20s
image
image

1.3 通過apiservicer反向代理訪問service

第一種:NodePort類型 
type: NodePort
  ports:
    - port: 80
      targetPort: 80
      nodePort: 30008
?
第二種:ClusterIP類型
 type: ClusterIP
  ports:
    - port: 80
      targetPort: 80
      
http://10.0.0.11:8080/api/v1/proxy/namespaces/命令空間/services/service的名字/
?
http://10.0.0.11:8080/api/v1/proxy/namespaces/default/services/myweb/
image


2. k8s彈性伸縮

k8s彈性伸縮,需要附加插件heapster監(jiān)控

image.png

2.1 安裝heapster監(jiān)控

1:上傳并導(dǎo)入鏡像,打標(biāo)簽
k8s-node2上

[root@k8s-node2 opt]# ll
total 1492076
-rw-r--r-- 1 root root 275096576 Sep 17 11:42 docker_heapster_grafana.tar.gz
-rw-r--r-- 1 root root 260942336 Sep 17 11:43 docker_heapster_influxdb.tar.gz
-rw-r--r-- 1 root root 991839232 Sep 17 11:44 docker_heapster.tar.gz


for n in `ls *.tar.gz`;do docker load -i $n ;done
docker tag docker.io/kubernetes/heapster_grafana:v2.6.0 10.0.0.11:5000/heapster_grafana:v2.6.0
docker tag  docker.io/kubernetes/heapster_influxdb:v0.5 10.0.0.11:5000/heapster_influxdb:v0.5
docker tag docker.io/kubernetes/heapster:canary 10.0.0.11:5000/heapster:canary

2:上傳配置文件 kubectl create -f .

influxdb-grafana-controller.yaml

mkdir heapster
cd heapster/

[root@k8s-master heapster]# cat influxdb-grafana-controller.yaml 
apiVersion: v1
kind: ReplicationController
metadata:
  labels:
    name: influxGrafana
  name: influxdb-grafana
  namespace: kube-system
spec:
  replicas: 1
  selector:
    name: influxGrafana
  template:
    metadata:
      labels:
        name: influxGrafana
    spec:
      nodeName: k8s-node2
      containers:
      - name: influxdb
        image: 10.0.0.11:5000/heapster_influxdb:v0.5
        volumeMounts:
        - mountPath: /data
          name: influxdb-storage
      - name: grafana
        image: 10.0.0.11:5000/heapster_grafana:v2.6.0
        env:
          - name: INFLUXDB_SERVICE_URL
            value: http://monitoring-influxdb:8086
            # The following env variables are required to make Grafana accessible via
            # the kubernetes api-server proxy. On production clusters, we recommend
            # removing these env variables, setup auth for grafana, and expose the grafana
            # service using a LoadBalancer or a public IP.
          - name: GF_AUTH_BASIC_ENABLED
            value: "false"
          - name: GF_AUTH_ANONYMOUS_ENABLED
            value: "true"
          - name: GF_AUTH_ANONYMOUS_ORG_ROLE
            value: Admin
          - name: GF_SERVER_ROOT_URL
            value: /api/v1/proxy/namespaces/kube-system/services/monitoring-grafana/
        volumeMounts:
        - mountPath: /var
          name: grafana-storage
      volumes:
      - name: influxdb-storage
        emptyDir: {}
      - name: grafana-storage
        emptyDir: {}

grafana-service.yaml

[root@k8s-master heapster]# cat grafana-service.yaml 
apiVersion: v1
kind: Service
metadata:
  labels:
    kubernetes.io/cluster-service: 'true'
    kubernetes.io/name: monitoring-grafana
  name: monitoring-grafana
  namespace: kube-system
spec:
  # In a production setup, we recommend accessing Grafana through an external Loadbalancer
  # or through a public IP. 
  # type: LoadBalancer
  ports:
  - port: 80
    targetPort: 3000
  selector:
    name: influxGrafana

influxdb-service.yaml

[root@k8s-master heapster]# vim influxdb-service.yaml 
apiVersion: v1
kind: Service
metadata:
  labels: null
  name: monitoring-influxdb
  namespace: kube-system
spec:
  ports:
  - name: http
    port: 8083
    targetPort: 8083
  - name: api
    port: 8086
    targetPort: 8086
  selector:
    name: influxGrafana
    

heapster-service.yaml

[root@k8s-master heapster]# cat heapster-service.yaml
apiVersion: v1
kind: Service
metadata:
  labels:
    kubernetes.io/cluster-service: 'true'
    kubernetes.io/name: Heapster
  name: heapster
  namespace: kube-system
spec:
  ports:
  - port: 80
    targetPort: 8082
  selector:
    k8s-app: heapster

heapster-controller.yaml

[root@k8s-master heapster]# cat heapster-controller.yaml 
apiVersion: v1
kind: ReplicationController
metadata:
  labels:
    k8s-app: heapster
    name: heapster
    version: v6
  name: heapster
  namespace: kube-system
spec:
  replicas: 1
  selector:
    k8s-app: heapster
    version: v6
  template:
    metadata:
      labels:
        k8s-app: heapster
        version: v6
    spec:
      nodeName: k8s-node2
      containers:
      - name: heapster
        image: 10.0.0.11:5000/heapster:canary
        imagePullPolicy: IfNotPresent
        command:
        - /heapster
        - --source=kubernetes:http://10.0.0.11:8080?inClusterConfig=false
        - --sink=influxdb:http://monitoring-influxdb:8086
修改配置文件:
#heapster-controller.yaml
    spec:
      nodeName: 10.0.0.13
      containers:
      - name: heapster
        image: 10.0.0.11:5000/heapster:canary
        imagePullPolicy: IfNotPresent
#influxdb-grafana-controller.yaml
    spec:
      nodeName: 10.0.0.13
      containers:
[root@k8s-master heapster]# kubectl create -f .

3:打開dashboard驗(yàn)證
http://10.0.0.11:8080/api/v1/proxy/namespaces/kube-system/services/kubernetes-dashboard

image

2.2彈性伸縮

image

1:修改rc的配置文件

  containers:
  - name: myweb
    image: 10.0.0.11:5000/nginx:1.13
    ports:
    - containerPort: 80
    resources:
      limits:
        cpu: 100m
      requests:
        cpu: 100m

2:創(chuàng)建彈性伸縮規(guī)則

kubectl  autoscale  deploy  nginx-deployment  --max=8  --min=1 --cpu-percent=10

kubectl get hpa

3:測(cè)試

yum install httpd-tools -y

 ab -n 1000000 -c 40 http://172.16.28.6/index.html

擴(kuò)容截圖

image

縮容截圖


image

3. 持久化存儲(chǔ)

數(shù)據(jù)持久化類型:

3.1 emptyDir:

了解

3.2 HostPath:

spec:
  nodeName: 10.0.0.13
  volumes:
  - name: mysql
    hostPath:
      path: /data/wp_mysql
  containers:
    - name: wp-mysql
      image: 10.0.0.11:5000/mysql:5.7
      imagePullPolicy: IfNotPresent
      ports:
      - containerPort: 3306
      volumeMounts:
      - mountPath: /var/lib/mysql
        name: mysql

3.3 nfs: ☆☆☆

#所有節(jié)點(diǎn)安裝nfs
yum install nfs-utils -y
===========================================

master節(jié)點(diǎn):
#創(chuàng)建目錄
mkdir -p /data/tomcat-db

#修改nfs配置文件
[root@k8s-master tomcat-db]# vim /etc/exports
/data 10.0.0.0/24(rw,sync,no_root_squash,no_all_squash)

#重啟服務(wù)
[root@k8s-master tomcat-db]# systemctl restart rpcbind
[root@k8s-master tomcat-db]# systemctl restart nfs

#檢查
[root@k8s-master tomcat-db]# showmount -e 10.0.0.11
Export list for 10.0.0.11:
/data 10.0.0.0/24

添加配置文件mysql-rc-nfs.yaml

#需要修改的地方:
volumes:
- name: mysql
  nfs:
    path: /data/tomcat-db
    server: 10.0.0.11
================================================

[root@k8s-master tomcat_demo]# pwd
/root/k8s_yaml/tomcat_demo
[root@k8s-master tomcat_demo]# cat mysql-rc-nfs.yaml 
apiVersion: v1
kind: ReplicationController
metadata:
  name: mysql
spec:
  replicas: 1
  selector:
    app: mysql
  template:
    metadata:
      labels:
        app: mysql
    spec:
      volumes: 
      - name: mysql 
        nfs:
          path: /data/tomcat-db
          server: 10.0.0.11
      containers:
        - name: mysql
          volumeMounts:
          - mountPath: /var/lib/mysql
            name: mysql
          image: 10.0.0.11:5000/mysql:5.7
          ports:
          - containerPort: 3306
          env:
          - name: MYSQL_ROOT_PASSWORD
            value: '123456'


kubectl delete -f mysql-rc-nfs.yaml
kubectl create -f mysql-rc-nfs.yaml
kubectl get pod


#查看/data目錄是否共享成功
[root@k8s-master tomcat_demo]# ls /data/tomcat-db/
auto.cnf  ib_buffer_pool  ib_logfile0  ibtmp1  performance_schema
HPE_APP   ibdata1         ib_logfile1  mysql   sys
image

查看是否掛在共享目錄

#在node1上
[root@k8s-node1 ~]# df -h|grep nfs
10.0.0.11:/data/tomcat-db   48G  6.8G   42G  15% /var/lib/kubelet/pods/8675fe7e-d927-11e9-a65f-000c29b2785a/volumes/kubernetes.io~nfs/mysql

#重啟kubelet
[root@k8s-node1 ~]# systemctl restart kubelet.service 


#在master節(jié)點(diǎn)查看node狀態(tài)
[root@k8s-master tomcat_demo]# kubectl get nodes
NAME        STATUS    AGE
k8s-node1   Ready     5d
k8s-node2   Ready     6d


#查看當(dāng)前的mysql在node1上運(yùn)行
[root@k8s-master ~]# kubectl get pods -o wide
NAME                                READY     STATUS    RESTARTS   AGE       IP            NODE
mysql-kld7f                         1/1       Running   0          1m        172.18.19.5   k8s-node1
myweb-38hgv                         1/1       Running   1          23h       172.18.19.4   k8s-node1
nginx-847814248-hq268               1/1       Running   0          4h        172.18.19.2   k8s-node1
   
   
#將mysql刪除掉,重新生成的mysql后跳到了node2上
[root@k8s-master ~]# kubectl delete pod mysql-kld7f 
pod "mysql-kld7f" deleted
[root@k8s-master ~]# kubectl get pods -o wide
NAME                                READY     STATUS              RESTARTS   AGE       IP            NODE
mysql-14kj0                         0/1       ContainerCreating   0          1s        <none>        k8s-node2
mysql-kld7f                         1/1       Terminating         0          2m        172.18.19.5   k8s-node1
myweb-38hgv                         1/1       Running             1          23h       172.18.19.4   k8s-node1
nginx-847814248-hq268               1/1       Running             0          4h        172.18.19.2   k8s-node1
nginx-deployment-2807576163-c9g0n   1/1       Running             0          4h        172.18.53.4   k8s-node2

#在node2上查看掛載目錄
[root@k8s-node2 ~]# df -h|grep nfs
10.0.0.11:/data/tomcat-db   48G  6.8G   42G  15% /var/lib/kubelet/pods/ed09eb26-d929-11e9-a65f-000c29b2785a/volumes/kubernetes.io~nfs/mysql

刷新網(wǎng)頁(yè)查看之前添加的數(shù)據(jù)還在,說明nfs持久化配置成功

image

3.4 pvc:

網(wǎng)上資料

**
image
pv: persistent volume    全局資源,k8s集群

pvc: persistent volume  claim,   局部資源屬于某一個(gè)namespace


3.5 分布式存儲(chǔ)glusterfs ☆☆☆☆☆

a: 什么是glusterfs

Glusterfs是一個(gè)開源分布式文件系統(tǒng),具有強(qiáng)大的橫向擴(kuò)展能力,可支持?jǐn)?shù)PB存儲(chǔ)容量和數(shù)千客戶端,通過網(wǎng)絡(luò)互聯(lián)成一個(gè)并行的網(wǎng)絡(luò)文件系統(tǒng)。具有可擴(kuò)展性、高性能、高可用性等特點(diǎn)。

image

b: 安裝glusterfs

1.三個(gè)節(jié)點(diǎn)都添加倆塊硬盤

測(cè)試環(huán)境,大小隨意
image

2.三個(gè)節(jié)點(diǎn)都熱添加硬盤不重啟

echo "- - -" > /sys/class/scsi_host/host0/scan
echo "- - -" > /sys/class/scsi_host/host1/scan
echo "- - -" > /sys/class/scsi_host/host2/scan

#一定要都添加hosts解析
cat /etc/hosts
    10.0.0.11 k8s-master
    10.0.0.12 k8s-node1
    10.0.0.13 k8s-node2

3.三個(gè)節(jié)點(diǎn)查看磁盤是否能夠識(shí)別出來,然后格式化

fdisk -l
mkfs.xfs /dev/sdb
mkfs.xfs /dev/sdc

4.所有節(jié)點(diǎn)創(chuàng)建目錄

mkdir -p /gfs/test1
mkdir -p /gfs/test2

5.防止掛載后重啟盤符改變,需要修改UUID

master節(jié)點(diǎn)

#blkid  查看每塊盤的ID

[root@k8s-master ~]# blkid 
/dev/sda1: UUID="72aabc10-44b8-4c05-86bd-049157d771f8" TYPE="swap" 
/dev/sda2: UUID="35076632-0a8a-4234-bd8a-45dc7df0fdb3" TYPE="xfs" 
/dev/sdb: UUID="577ef260-533b-45f5-94c6-60e73b17d1fe" TYPE="xfs" 
/dev/sdc: UUID="5a907588-80a1-476b-8805-d458e22dd763" TYPE="xfs" 

[root@k8s-master ~]# vim /etc/fstab 
UUID=35076632-0a8a-4234-bd8a-45dc7df0fdb3 /                       xfs     defaults        0 0
UUID=72aabc10-44b8-4c05-86bd-049157d771f8 swap                    swap    defaults        0 0
UUID=577ef260-533b-45f5-94c6-60e73b17d1fe /gfs/test1              xfs     defaults        0 0
UUID=5a907588-80a1-476b-8805-d458e22dd763 /gfs/test2              xfs     defaults        0 0

#掛載并查看
[root@k8s-master ~]# mount -a
[root@k8s-master ~]# df -h
.....
/dev/sdb         10G   33M   10G   1% /gfs/test1
/dev/sdc         10G   33M   10G   1% /gfs/test2

node1節(jié)點(diǎn)

[root@k8s-node1 ~]# blkid 
/dev/sda1: UUID="72aabc10-44b8-4c05-86bd-049157d771f8" TYPE="swap" 
/dev/sda2: UUID="35076632-0a8a-4234-bd8a-45dc7df0fdb3" TYPE="xfs" 
/dev/sdb: UUID="c9a47468-ce5c-4aac-bffc-05e731e28f5b" TYPE="xfs" 
/dev/sdc: UUID="7340cc1b-2c83-40be-a031-1aad8bdd5474" TYPE="xfs" 

[root@k8s-node1 ~]# vim /etc/fstab
UUID=35076632-0a8a-4234-bd8a-45dc7df0fdb3 /                       xfs     defaults        0 0
UUID=72aabc10-44b8-4c05-86bd-049157d771f8 swap                    swap    defaults        0 0
UUID=c9a47468-ce5c-4aac-bffc-05e731e28f5b /gfs/test1              xfs     defaults        0 0
UUID=7340cc1b-2c83-40be-a031-1aad8bdd5474 /gfs/test2              xfs     defaults        0 0


[root@k8s-node1 ~]# mount -a
[root@k8s-node1 ~]# df -h
/dev/sdb                    10G   33M   10G   1% /gfs/test1
/dev/sdc                    10G   33M   10G   1% /gfs/test2

node2節(jié)點(diǎn)

[root@k8s-node2 ~]# blkid 
/dev/sda1: UUID="72aabc10-44b8-4c05-86bd-049157d771f8" TYPE="swap" 
/dev/sda2: UUID="35076632-0a8a-4234-bd8a-45dc7df0fdb3" TYPE="xfs" 
/dev/sdb: UUID="6a2f2bbb-9011-41b6-b62b-37f05e167283" TYPE="xfs" 
/dev/sdc: UUID="3a259ad4-7738-4fb8-925c-eb6251e8dd18" TYPE="xfs" 


[root@k8s-node2 ~]# vim /etc/fstab 
UUID=35076632-0a8a-4234-bd8a-45dc7df0fdb3 /                       xfs     defaults        0 0
UUID=72aabc10-44b8-4c05-86bd-049157d771f8 swap                    swap    defaults        0 0
UUID=6a2f2bbb-9011-41b6-b62b-37f05e167283 /gfs/test1              xfs     defaults        0 0
UUID=3a259ad4-7738-4fb8-925c-eb6251e8dd18 /gfs/test2              xfs     defaults        0 0

[root@k8s-node2 ~]# mount -a
[root@k8s-node2 ~]# df -h
/dev/sdb         10G   33M   10G   1% /gfs/test1
/dev/sdc         10G   33M   10G   1% /gfs/test2

6. master節(jié)點(diǎn)上下載軟件并啟動(dòng)

#為節(jié)省帶寬下載前打開緩存
[root@k8s-master volume]# vim /etc/yum.conf 
keepcache=1

yum install  centos-release-gluster -y
yum install  install glusterfs-server -y

systemctl start glusterd.service
systemctl enable glusterd.service

然后在兩個(gè)node節(jié)點(diǎn)上安裝軟件并啟動(dòng)

yum install  centos-release-gluster -y
yum install  install glusterfs-server -y

systemctl start glusterd.service
systemctl enable glusterd.service

7.查看gluster節(jié)點(diǎn)

#當(dāng)前只能看到自己
[root@k8s-master volume]# bash
[root@k8s-master volume]# gluster pool list 
UUID                    Hostname    State
a335ea83-fcf9-4b7d-ba3d-43968aa8facf    localhost   Connected 


#將另外兩個(gè)節(jié)點(diǎn)加入進(jìn)來
[root@k8s-master volume]# gluster peer probe k8s-node1 
peer probe: success. 
[root@k8s-master volume]# gluster peer probe k8s-node2 
peer probe: success. 
[root@k8s-master volume]# gluster pool list 
UUID                    Hostname    State
ebf5838a-4de2-447b-b559-475799551895    k8s-node1   Connected 
78678387-cc5b-4577-b0fe-b11b4ca80a67    k8s-node2   Connected 
a335ea83-fcf9-4b7d-ba3d-43968aa8facf    localhost   Connected 

8.去資源池創(chuàng)建卷查看后再刪除

#wahaha是卷名
[root@k8s-master volume]# gluster volume create wahaha k8s-master:/gfs/test1 k8s-master:/gfs/test2 k8s-node1:/gfs/test1 k8s-node1:/gfs/test2 force
volume create: wahaha: success: please start the volume to access data

#查看創(chuàng)建卷的屬性
[root@k8s-master volume]# gluster volume info wahaha
image
#刪除卷
[root@k8s-master volume]# gluster volume delete wahaha 
Deleting volume will erase all information about the volume. Do you want to continue? (y/n) y
volume delete: wahaha: success

9.再次創(chuàng)建分布式復(fù)制卷☆☆☆

分布式復(fù)制卷圖解
image
#查詢幫助的命令
[root@k8s-master volume]# gluster volume create --help

#創(chuàng)建卷,在上次創(chuàng)建的命令上指定副本數(shù) <replica 2>
[root@k8s-master volume]# gluster volume create wahaha replica 2 k8s-master:/gfs/test1 k8s-master:/gfs/test2 k8s-node1:/gfs/test1 k8s-node1:/gfs/test2 force
volume create: wahaha: success: please start the volume to access data

#必須啟動(dòng)后才能volume此數(shù)據(jù)
[root@k8s-master volume]# gluster volume start wahaha 
volume start: wahaha: success

10掛載卷

#在node2上掛載已經(jīng)成為20G了
[root@k8s-node2 ~]# mount -t glusterfs 10.0.0.11:/wahaha /mnt
[root@k8s-node2 ~]# df -h
/dev/sdb            10G   33M   10G   1% /gfs/test1
/dev/sdc            10G   33M   10G   1% /gfs/test2
10.0.0.11:/wahaha   20G  270M   20G   2% /mnt

11測(cè)試是否共享

#在node2上復(fù)制一些內(nèi)容到/mnt下
[root@k8s-node2 ~]# cp -a /etc/hosts /mnt/
[root@k8s-node2 ~]# ll /mnt/
total 1
-rw-r--r-- 1 root root 253 Sep 11 10:19 hosts


#在master節(jié)點(diǎn)上查看
[root@k8s-master volume]# ll /gfs/test1/
total 4
-rw-r--r-- 2 root root 253 Sep 11 10:19 hosts
[root@k8s-master volume]# ll /gfs/test2/
total 4
-rw-r--r-- 2 root root 253 Sep 11 10:19 hosts

12.擴(kuò)容

#在master節(jié)點(diǎn)上
[root@k8s-master volume]# gluster volume add-brick wahaha  k8s-node2:/gfs/test1 k8s-node2:/gfs/test2 force
volume add-brick: success

#在node2上查看已經(jīng)擴(kuò)容成功了
[root@k8s-node2 ~]# df -h
10.0.0.11:/wahaha   30G  404M   30G   2% /mnt

13.擴(kuò)展_添加節(jié)點(diǎn)、添加副本的方法

#新加節(jié)點(diǎn)后,均衡數(shù)據(jù)的命令,建議訪問量低的時(shí)候進(jìn)行
[root@k8s-master ~]# gluster volume rebalance wahaha start force
最后編輯于
?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請(qǐng)聯(lián)系作者
【社區(qū)內(nèi)容提示】社區(qū)部分內(nèi)容疑似由AI輔助生成,瀏覽時(shí)請(qǐng)結(jié)合常識(shí)與多方信息審慎甄別。
平臺(tái)聲明:文章內(nèi)容(如有圖片或視頻亦包括在內(nèi))由作者上傳并發(fā)布,文章內(nèi)容僅代表作者本人觀點(diǎn),簡(jiǎn)書系信息發(fā)布平臺(tái),僅提供信息存儲(chǔ)服務(wù)。

相關(guān)閱讀更多精彩內(nèi)容

  • k8s容器編排 [TOC] 1:k8s集群的安裝 1.1 k8s的架構(gòu) 除了核心組件,還有一些推薦的Add-ons...
    Zh_bd92閱讀 992評(píng)論 0 0
  • k8s容器編排 1. k8s集群的安裝 1.1 k8s的架構(gòu) 除了核心組件,還有一些推薦的Add-ons: 1.2...
    藏鋒1013閱讀 13,997評(píng)論 1 9
  • 前言 嘗到k8s甜頭以后,我們就想著應(yīng)用到生產(chǎn)環(huán)境里去,以提高業(yè)務(wù)迭代效率,可是部署在生產(chǎn)環(huán)境里有一個(gè)要求,就是k...
    我的橙子很甜閱讀 13,436評(píng)論 0 15
  • 我們家老太太今年72,年齡不算小了。身材勻稱,身體硬朗,看上去神清氣爽,人家都說她身體好。私底下卻跟我說:...
    惜福_1d8b閱讀 595評(píng)論 0 5
  • 到底有多愛? 在我想問自己這個(gè)問題的時(shí)候,我所有浮夸、華麗的詞藻已變得索然無味。這是一次對(duì)...
    一聲平閱讀 517評(píng)論 0 1

友情鏈接更多精彩內(nèi)容