[TOC]
常用命令
kubectl -h
#獲取所有pod信息
kubectl get pods --all-namespaces -o wide
kubectl get cs,node,svc,pods,ingress --all-namespaces -o wide
kubectl get pods -n kube-system -o wide
watch kubectl get pod -n kube-system -o wide
kubectl describe pod calico-node-qxfrt -n kube-system
kubeadm config view
kubectl cluster-info
kubectl create -f manifest.yaml
kubectl delete -f manifest.yaml
kubectl logs -f manifest...
kubectl get pod pod_name
kubectl run b1 -it --rm --image=alpine /bin/sh
kubectl run b1 -it --rm --image=harbor.lisea.cn/k8s/alpine-base:2.0 /bin/sh
curl -s microservice-cloud-config-service.default.svc.cluster.local:8888/future-dev.yml
安裝
簡(jiǎn)單部署:kubeasz
https://github.com/gjmzj/kubeasz
快速部署
https://github.com/gjmzj/kubeasz/blob/master/docs/setup/quickStart.md
升級(jí)內(nèi)核
CentOS7.x系統(tǒng)自帶的3.10.x內(nèi)核存在一些Bug,Docker運(yùn)行不穩(wěn)定,建議升級(jí)內(nèi)核
#下載內(nèi)核源
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
# 安裝最新版本內(nèi)核
yum --enablerepo=elrepo-kernel install -y kernel-lt
# 查看可用內(nèi)核
cat /boot/grub2/grub.cfg |grep menuentry
# 設(shè)置開機(jī)從新內(nèi)核啟動(dòng)
grub2-set-default "CentOS Linux (4.4.230-1.el7.elrepo.x86_64) 7 (Core)"
# 查看內(nèi)核啟動(dòng)項(xiàng)
grub2-editenv list
# 重啟系統(tǒng)使內(nèi)核生效
reboot
# 查看內(nèi)核版本是否生效
uname -r
安裝kubeadm,kubectl,kubelet
#添加yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#卸載
yum remove -y kubelet kubeadm kubectl
#安裝
yum install -y kubelet kubeadm kubectl
# 設(shè)置kubelet開機(jī)啟動(dòng)
systemctl enable kubelet && systemctl start kubelet
參考腳本
# 在 master 節(jié)點(diǎn)和 worker 節(jié)點(diǎn)都要執(zhí)行
# 最后一個(gè)參數(shù) 1.18.9 用于指定 kubenetes 版本,支持所有 1.18.x 版本的安裝
# 騰訊云 docker hub 鏡像
# export REGISTRY_MIRROR="https://mirror.ccs.tencentyun.com"
# DaoCloud 鏡像
# export REGISTRY_MIRROR="http://f1361db2.m.daocloud.io"
# 華為云鏡像
# export REGISTRY_MIRROR="https://05f073ad3c0010ea0f4bc00b7105ec20.mirror.swr.myhuaweicloud.com"
# 阿里云 docker hub 鏡像
export REGISTRY_MIRROR=https://registry.cn-hangzhou.aliyuncs.com
curl -sSL https://kuboard.cn/install-script/v1.18.x/install_kubelet.sh | sh -s 1.18.9
install_kubelet.sh
#!/bin/bash
# 在 master 節(jié)點(diǎn)和 worker 節(jié)點(diǎn)都要執(zhí)行
# 安裝 docker
# 參考文檔如下
# https://docs.docker.com/install/linux/docker-ce/centos/
# https://docs.docker.com/install/linux/linux-postinstall/
# 卸載舊版本
yum remove -y docker \
docker-client \
docker-client-latest \
docker-ce-cli \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-selinux \
docker-engine-selinux \
docker-engine
# 設(shè)置 yum repository
yum install -y yum-utils \
device-mapper-persistent-data \
lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# 安裝并啟動(dòng) docker
yum install -y docker-ce-19.03.8 docker-ce-cli-19.03.8 containerd.io
systemctl enable docker
systemctl start docker
# 安裝 nfs-utils
# 必須先安裝 nfs-utils 才能掛載 nfs 網(wǎng)絡(luò)存儲(chǔ)
yum install -y nfs-utils
yum install -y wget
# 關(guān)閉 防火墻
systemctl stop firewalld
systemctl disable firewalld
# 關(guān)閉 SeLinux
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
# 關(guān)閉 swap
swapoff -a
yes | cp /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak |grep -v swap > /etc/fstab
# 修改 /etc/sysctl.conf
# 如果有配置,則修改
sed -i "s#^net.ipv4.ip_forward.*#net.ipv4.ip_forward=1#g" /etc/sysctl.conf
sed -i "s#^net.bridge.bridge-nf-call-ip6tables.*#net.bridge.bridge-nf-call-ip6tables=1#g" /etc/sysctl.conf
sed -i "s#^net.bridge.bridge-nf-call-iptables.*#net.bridge.bridge-nf-call-iptables=1#g" /etc/sysctl.conf
sed -i "s#^net.ipv6.conf.all.disable_ipv6.*#net.ipv6.conf.all.disable_ipv6=1#g" /etc/sysctl.conf
sed -i "s#^net.ipv6.conf.default.disable_ipv6.*#net.ipv6.conf.default.disable_ipv6=1#g" /etc/sysctl.conf
sed -i "s#^net.ipv6.conf.lo.disable_ipv6.*#net.ipv6.conf.lo.disable_ipv6=1#g" /etc/sysctl.conf
sed -i "s#^net.ipv6.conf.all.forwarding.*#net.ipv6.conf.all.forwarding=1#g" /etc/sysctl.conf
# 可能沒有,追加
echo "net.ipv4.ip_forward = 1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.all.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.default.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.lo.disable_ipv6 = 1" >> /etc/sysctl.conf
echo "net.ipv6.conf.all.forwarding = 1" >> /etc/sysctl.conf
# 執(zhí)行命令以應(yīng)用
sysctl -p
# 配置K8S的yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
# 卸載舊版本
yum remove -y kubelet kubeadm kubectl
# 安裝kubelet、kubeadm、kubectl
# 將 ${1} 替換為 kubernetes 版本號(hào),例如 1.17.2
yum install -y kubelet-${1} kubeadm-${1} kubectl-${1}
# 修改docker Cgroup Driver為systemd
# # 將/usr/lib/systemd/system/docker.service文件中的這一行 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
# # 修改為 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd
# 如果不修改,在添加 worker 節(jié)點(diǎn)時(shí)可能會(huì)碰到如下錯(cuò)誤
# [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd".
# Please follow the guide at https://kubernetes.io/docs/setup/cri/
sed -i "s#^ExecStart=/usr/bin/dockerd.*#ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd#g" /usr/lib/systemd/system/docker.service
# 設(shè)置 docker 鏡像,提高 docker 鏡像下載速度和穩(wěn)定性
# 如果您訪問 https://hub.docker.io 速度非常穩(wěn)定,亦可以跳過這個(gè)步驟
curl -sSL https://kuboard.cn/install-script/set_mirror.sh | sh -s ${REGISTRY_MIRROR}
# 重啟 docker,并啟動(dòng) kubelet
systemctl daemon-reload
systemctl restart docker
systemctl enable kubelet && systemctl start kubelet
docker version
初始化master
#設(shè)置hostname
hostnamectl set-hostname master
kubeadm init --kubernetes-version=1.18.9 --apiserver-advertise-address=[master節(jié)點(diǎn)IP] --service-cidr=10.1.0.0/16 --pod-network-cidr=10.244.0.0/16 --image-repository=registry.aliyuncs.com/google_containers
kubernetes-version:為控制平面選擇一個(gè)特定的 Kubernetes 版本。
apiserver-advertise-address:API 服務(wù)器所公布的其正在監(jiān)聽的 IP 地址。如果未設(shè)置,則使用默認(rèn)網(wǎng)絡(luò)接口。
service-cidr:為服務(wù)的虛擬 IP 地址另外指定 IP 地址段。
pod-network-cidr:指明 pod 網(wǎng)絡(luò)可以使用的 IP 地址段。如果設(shè)置了這個(gè)參數(shù),控制面板將會(huì)為每一個(gè)節(jié)點(diǎn)自動(dòng)分配 CIDRs。
image-repository:選擇用于拉取控制面板鏡像的容器倉(cāng)庫(kù) 默認(rèn) k8s.gcr.io。
部署阿里云如果公網(wǎng)IP沒有綁定到網(wǎng)卡需要修改ETCD
解決阿里云ECS下kubeadm部署k8s無法指定公網(wǎng)IP(作廢)
內(nèi)網(wǎng)不互通的云服務(wù)器安裝k8s
當(dāng)卡在
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[kubelet-check] Initial timeout of 40s passed.
的時(shí)候新開窗口修改以下文件
vim /etc/kubernetes/manifests/etcd.yaml
修改成以下參數(shù)
- --listen-client-urls=https://127.0.0.1:2379
- --listen-peer-urls=https://127.0.0.1:2380
配置用戶證書
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
禁用非安全端口
kubectl get cs,node,svc,pods,ingress --all-namespaces -o wide
vim /etc/kubernetes/manifests/kube-scheduler.yaml
vim /etc/kubernetes/manifests/kube-controller-manager.yaml
#去掉port=0
systemctl restart kubelet
查看集群狀態(tài)
#master現(xiàn)在狀態(tài)是notready因?yàn)闆]有安裝pod網(wǎng)絡(luò)
kubectl get node
kubectl -h
#獲取所有pod信息
kubectl get pods --all-namespaces -o wide
kubectl get cs,node,svc,pods,ingress --all-namespaces -o wide
kubectl get pods -n kube-system -o wide
watch kubectl get pod --all-namespaces -o wide
kubectl describe pod calico-node-qxfrt -n kube-system
kubeadm config view
kubectl cluster-info
安裝pod網(wǎng)絡(luò)
如果要使用外網(wǎng)IP
#每個(gè)節(jié)點(diǎn)都需要改成外網(wǎng)IP
kubectl annotate nodes master flannel.alpha.coreos.com/public-ip-overwrite=47.103.63.71
安裝flannel
#flannel
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kube-flannel.yml
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.14.0-rc1
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.14.0-rc1
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
添加Node節(jié)點(diǎn)
kubeadm token create --print-join-command
# kubeadm token create 命令的輸出
kubeadm join apiserver.demo:6443 --token mpfjma.4vjjg8flqihor4vt --discovery-token-ca-cert-hash sha256:6f7a8e40a810323672de5eee6f4d19aa2dbdb38411845a1bf5dd63485c43d303
創(chuàng)建nginx
kubectl run --image=nginx nginx-app --port=80
kubectl delete pod nginx-app
允許master節(jié)點(diǎn)部署pod
kubectl taint nodes --all node-role.kubernetes.io/master-
不允許調(diào)度
kubectl taint nodes master1 node-role.kubernetes.io/master=:NoSchedule
使用云服務(wù)器可能存在的問題
日志鏈接不上的問題
如果內(nèi)網(wǎng)ip也不在一個(gè)網(wǎng)段不能相互訪問,那訪問部署在master以外的節(jié)點(diǎn)時(shí)看不到運(yùn)行日志已經(jīng)連接不上。
注意是在master執(zhí)行以下命令
#注意是在master執(zhí)行以下命令,將其他節(jié)點(diǎn)的內(nèi)網(wǎng)ip轉(zhuǎn)變成外網(wǎng)ip
iptables -t nat -A OUTPUT -d [其他節(jié)點(diǎn)內(nèi)網(wǎng)IP] -j DNAT --to-destination [其他節(jié)點(diǎn)公網(wǎng)IP]
修改與刪除
#查看
iptables -t nat -vnL OUTPUT --line-number
#刪除,[num]是某條記錄的序號(hào)最左邊
iptables -t nat -D OUTPUT [num]
不能使用cluster ip的問題
配置kube-proxy 基于 ipvs 模式工作
#修改mode為ipvs
kubectl edit cm kube-proxy -n kube-system
---
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: null
portRange: ""
-- INSERT --
刪除然后會(huì)自動(dòng)重新創(chuàng)建
kubectl delete pod -l k8s-app=kube-proxy -n kube-system

卸載
# 卸載服務(wù)
kubeadm reset
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
#刪除網(wǎng)卡
ifconfig cni0 down
ip link delete cni0
ifconfig flannel.1 down
ip link delete flannel.1
ifconfig kube-ipvs0 down
ip link delete kube-ipvs0
rm -rf /var/lib/cni/
rm -rf /var/lib/etcd
rm -rf /var/lib/kubelet
rm -rf /var/run/kubernetes
rm -rf /etc/cni/*
# 刪除node
kubectl delete node node-01
# 刪除rpm包
rpm -qa|grep kube*|xargs rpm --nodeps -e
#停止刪除所有的容器
docker stop $(docker ps -q) & docker rm $(docker ps -aq)
# 刪除容器及鏡像
docker images -qa|xargs docker rmi -f
remove.sh
echo "----------清理開始----------"
kubeadm reset
#停止所有docker容器
docker stop `docker ps |awk {'print $1'}|grep -v CONTAINER`
# 刪除所有容器
docker rm -f $(docker ps -qa)
# 刪除所有容器卷
docker volume rm $(docker volume ls -q)
# 刪除所有的鏡像,慎用
#docker rmi -f `docker images|awk {'print $3'}`
#停止服務(wù)
systemctl disable kubelet.service
systemctl disable kube-scheduler.service
systemctl disable kube-proxy.service
systemctl disable kube-controller-manager.service
systemctl disable kube-apiserver.service
systemctl stop kubelet.service
systemctl stop kube-scheduler.service
systemctl stop kube-proxy.service
systemctl stop kube-controller-manager.service
systemctl stop kube-apiserver.service
#卸載mount目錄
for mount in $(mount | grep tmpfs | grep '/var/lib/kubelet' | awk '{ print $3 }') /var/lib/kubelet /var/lib/rancher; do umount $mount; done
#刪除殘留路徑
rm -rf /etc/ceph
rm -rf /etc/cni
rm -rf /etc/kubernetes
rm -rf /run/secrets/kubernetes.io
rm -rf /run/calico
rm -rf /run/flannel
rm -rf /var/lib/calico
rm -rf /var/lib/cni
rm -rf /var/lib/kubelet
rm -rf /var/lib/etcd
rm -rf /var/log/containers
rm -rf /var/log/pods
rm -rf /var/run/calico
rm -rf /var/run/kubernetes
rm -rf /opt/cni
rm -rf ~/.kube/config
#清理網(wǎng)絡(luò)接口
network_interface=`ls /sys/class/net`
for net_inter in $network_interface;
do
if ! echo $net_inter | grep -qiE 'lo|docker0|eth*|ens*';then
ip link delete $net_inter
fi
done
#清理Iptables表
## 注意:如果節(jié)點(diǎn)Iptables有特殊配置,以下命令請(qǐng)謹(jǐn)慎操作
sudo iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
systemctl restart docker
echo "----------清理完成----------"
網(wǎng)絡(luò)調(diào)試工具
busybox.yaml
apiVersion: v1
kind: Pod
metadata:
name: busybox
namespace: default
spec:
containers:
- name: busybox
image: busybox:1.28.4
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
restartPolicy: Always
安裝Dashboard
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0/aio/deploy/recommended.yaml
- 修改kubernetes-dashboard的service類型為NodePort類型
[root@k8s-master dashboard]# vim recommended.yaml
...
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort # 新增
ports:
- port: 443
targetPort: 8443
nodePort: 30443 # 新增
selector:
- 安裝
kubectl create -f recommended.yaml
- 創(chuàng)建serviceaccount和clusterrolebinding資源YAML文件:默認(rèn)Dashboard為最小RBAC權(quán)限,添加集群管理員權(quán)限以便從Dashboard操作集群資源
[root@k8s-master dashboard]# vim adminuser.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
- 創(chuàng)建
kubectl create -f adminuser.yaml
- 獲取token
kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk '{print $1}')
- 瀏覽器訪問https://IP:30443
安裝traefik
-
traefik-ingress.yaml:根據(jù)端口限制設(shè)置開放端口,默認(rèn)端口:23456
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- pods
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: traefik-ingress-controller
namespace: kube-system
labels:
k8s-app: traefik-ingress-lb
spec:
replicas: 1
selector:
matchLabels:
k8s-app: traefik-ingress-lb
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
containers:
- image: traefik:v1.7.20
imagePullPolicy: IfNotPresent
name: traefik-ingress-lb
args:
- --api
- --kubernetes
- --logLevel=INFO
---
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: kube-system
spec:
selector:
k8s-app: traefik-ingress-lb
ports:
- protocol: TCP
# 該端口為 traefik ingress-controller的服務(wù)端口
port: 80
# 集群hosts文件中設(shè)置的 NODE_PORT_RANGE 作為 NodePort的可用范圍
# 從默認(rèn)20000~40000之間選一個(gè)可用端口,讓ingress-controller暴露給外部的訪問
nodePort: 23456
name: web
- protocol: TCP
# 該端口為 traefik 的管理WEB界面
port: 8080
name: admin
type: NodePort
traefik-ui.ing.yaml
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: traefik-web-ui
namespace: kube-system
spec:
rules:
- host: traefik-ui.test.com
http:
paths:
- path: /
backend:
serviceName: traefik-ingress-service
servicePort: 8080
- 添加hosts
39.103.232.193 traefik-ui.test.com
訪問:http://traefik-ui.test.com:30000
安裝harbor
每次重啟需要確保harbor的容器都啟動(dòng)起來,可以執(zhí)行
docker-compose up -d
- 安裝docker-compose
curl -L https://github.com/docker/compose/releases/download/1.29.2/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
- 下載離線文件:harbor
- 復(fù)制配置文件:
cp harbor.yml.tmpl harbor.yml- 修改hostname可以是ip地址
- 可以注釋掉https相關(guān)配置
- 檢查配置是否正確:
./prepare - 安裝:
./install.sh - harbor的控制
# 啟動(dòng)
docker-compose up -d
# 停止
docker-compose stop
# 重新啟動(dòng)
docker-compose restart
docker使用
下面端口是改成9090的,默認(rèn)是80端口
如果是使用ip這需要配置信任
[root@master harbor]# vim /etc/docker/daemon.json
{
"registry-mirrors": ["https://registry.cn-hangzhou.aliyuncs.com"],
"insecure-registries": ["192.168.1.20:9090"]
}
#重啟
systemctl daemon-reload
systemctl restart docker
需要在harbor先創(chuàng)建一個(gè)demo的項(xiàng)目
登錄harbor倉(cāng)庫(kù)
[root@master]# docker login -u admin -p Harbor12345 192.168.1.20:9090
WARNING! Using --password via the CLI is insecure. Use --password-stdin.
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store
Login Succeeded
#上傳鏡像到私有倉(cāng)庫(kù)
docker pull nginx:latest
docker tag nginx:latest 192.168.1.20:9090/demo/nginx:v1.0
docker push 192.168.1.20:9090/demo/nginx:v1.0
組件介紹
Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 1
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.15-alpine
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
---
spec:
...
spec:
containers:
...
volumeMounts:
- mountPath: /nginx
name: nginx
volumes:
- name: nginx
persistentVolumeClaim:
claimName: nginx
labels:是打個(gè)標(biāo)簽,參數(shù)隨意 app: nginx 或者 name: nginx
subPath: 將/nginx/config目錄下文件掛載到 pv 的 config 目錄下。
spec:
...
volumeMounts:
- mountPath: /nginx/config
name: nginx
subPath: config
volumes:
- name: nginx
persistentVolumeClaim:
claimName: nginx
指定部署到node
先給node打標(biāo)簽
#查看標(biāo)簽
kubectl get node --show-labels
#打標(biāo)簽
kubectl label nodes node-1 tag=node-1
#修改便簽
kubectl label nodes node-1 tag=node-1 --overwrite
#刪除標(biāo)簽
kubectl label nodes node-1 tag-
修改部署文件
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 1
template:
metadata:
labels:
app: nginx
spec:
nodeSelector:
tag: node
containers:
- name: nginx
image: nginx:1.15-alpine
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
Service
apiVersion: v1
kind: Service
metadata:
name: nginx-service
labels:
name: nginx
spec:
type: NodePort
ports:
- port: 80
nodePort: 30080
protocol: TCP
name: first-port
selector:
app: nginx
---
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 80
protocol: TCP
name: first-port
selector:
app: nginx
- selector:選取的標(biāo)簽要和 Deployment 的 labels 一樣
Service 有四種類型:
ClusterIP:默認(rèn)類型,自動(dòng)分配一個(gè)僅 cluster 內(nèi)部可以訪問的虛擬 IP
NodePort:在 ClusterIP 基礎(chǔ)上為 Service 在每臺(tái)機(jī)器上綁定一個(gè)端口,這樣就可 以通過 <NodeIP>:NodePort 來訪問該服務(wù)。如果 kube-proxy 設(shè)置了 --
nodeport-addresses=10.240.0.0/16 (v1.10 支持),那么僅該 NodePort 僅對(duì) 設(shè)置在范圍內(nèi)的 IP 有效。
LoadBalancer:在 NodePort 的基礎(chǔ)上,借助 cloud provider 創(chuàng)建一個(gè)外部的負(fù)載 均衡器,并將請(qǐng)求轉(zhuǎn)發(fā)到 <NodeIP>:NodePort
ExternalName:將服務(wù)通過 DNS CNAME 記錄方式轉(zhuǎn)發(fā)到指定的域名(通過 spec.externlName 設(shè)定)。需要 kube-dns 版本在 1.7 以上。
Service、Endpoints 和 Pod 支持三種類型的協(xié)議:
TCP(Transmission Control Protocol,傳輸控制協(xié)議)是一種面向連接的、可靠 的、基于字節(jié)流的傳輸層通信協(xié)議。
UDP(User Datagram Protocol,用戶數(shù)據(jù)報(bào)協(xié)議)是一種無連接的傳輸層協(xié)議, 用于不可靠信息傳送服務(wù)。
SCTP(Stream Control Transmission Protocol,流控制傳輸協(xié)議),用于通過IP網(wǎng) 傳輸SCN(Signaling Communication Network,信令通信網(wǎng))窄帶信令消息。
PV
apiVersion: v1
kind: PersistentVolume
metadata:
name: nginx
spec:
accessModes:
- ReadWriteMany
capacity:
storage: 50Gi
nfs:
path: /data/nfs/nginx
server: 192.168.0.200
volumeMode: Filesystem
注意:配置目錄權(quán)限,不知道哪個(gè)有用
chmod 777 /data/nfs/nginx
chown nfsnobody.nfsnobody /data/nfs/nginx
chown -R 200 /data/nfs/nginx
PersistentVolume(PV)是集群之中的一塊網(wǎng)絡(luò)存儲(chǔ)。跟 Node 一樣,也是集群的資 源。PV 跟 Volume (卷) 類似,不過會(huì)有獨(dú)立于 Pod 的生命周期。
PV 的訪問模式(accessModes)有三種:
ReadWriteOnce(RWO):是最基本的方式,可讀可寫,但只支持被單個(gè)節(jié)點(diǎn)掛 載。
ReadOnlyMany(ROX):可以以只讀的方式被多個(gè)節(jié)點(diǎn)掛載。
ReadWriteMany(RWX):這種存儲(chǔ)可以以讀寫的方式被多個(gè)節(jié)點(diǎn)共享。不是每一 種存儲(chǔ)都支持這三種方式,像共享方式,目前支持的還比較少,比較常用的是 NFS。在 PVC 綁定 PV 時(shí)通常根據(jù)兩個(gè)條件來綁定,一個(gè)是存儲(chǔ)的大小,另一個(gè)就 是訪問模式。
PV 的回收策略(persistentVolumeReclaimPolicy,即 PVC 釋放卷的時(shí)候 PV 該如何操作)也有三種
Retain,不清理, 保留 Volume(需要手動(dòng)清理)
Recycle,刪除數(shù)據(jù),即 rm -rf /thevolume/* (只有 NFS 和 HostPath 支持)
Delete,刪除存儲(chǔ)資源,比如刪除 AWS EBS 卷(只有 AWS EBS, GCE PD, AzureDisk 和 Cinder 支持)
PVC
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: nginx
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 5Gi
volumeName: nginx
可視化工具
- Kubernetic
安裝windows版
創(chuàng)建
C:\Users\10711\\.kube目錄
#打開cmd
mkdir .kube
- 查看config文件
cat ~/.kube/config
- 將config文件復(fù)制到windows下的
C:\Users\10711\.kube目錄,就能訪問了
- lens
https://github.com/lensapp/lens
kubectl
地址:https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG.md#client-binaries-1
配置文件
/etc/kubernetes/admin.conf
下載二進(jìn)制文件
chmod +x ./kubectl
sudo mv ./kubectl /usr/local/bin/kubectl
配置~/.kube/config文件
shell自動(dòng)補(bǔ)全
# 安裝bash-completion
yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
# 應(yīng)用kubectl的completion到系統(tǒng)環(huán)境
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
常用
kubectl -h
#獲取所有pod信息
kubectl get pods --all-namespaces
kubectl create -f manifest.yaml
kubectl delete -f manifest.yaml
kubectl logs -f manifest...
kubectl get pod pod_name
kubectl run b1 -it --rm --image=alpine /bin/sh
kubectl run b1 -it --rm --image=harbor.lisea.cn/k8s/alpine-base:2.0 /bin/sh
curl -s microservice-cloud-config-service.default.svc.cluster.local:8888/future-dev.yml
NFS
安裝服務(wù)端
yum install nfs-utils rpcbind
systemctl start rpcbind.service
systemctl start nfs.service
#開機(jī)啟動(dòng)
chkconfig rpcbind on
chkconfig nfs on
#配置
#1、創(chuàng)建文件夾
mkdir -p /data/nfs-share
mkdir -p /data/tmp
#2、配置權(quán)限,都試下不知道哪個(gè)有用........
chmod 777 /data/nfs-share
chown nfsnobody.nfsnobody /data/nfs-share
chown -R 200 /data/nfs-share
#添加掛載信息,在新增目錄也要添加以下操作
vi /etc/exports
#添加以下
/data/nfs-share *(rw,no_root_squash)
#可能有權(quán)限問題
/data/nfs-share *(rw,async,root_squash)
#重載配置
exportfs -a
#此時(shí)可用showmount -e 服務(wù)端ip來查看可mount目錄
showmount -e 192.168.90.128
#測(cè)試
#1、掛載配置
mount -t nfs 192.168.90.128:/data/nfs-share /data/tmp
#2、卸載掛載
umount /data/tmp
redis
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
name: redis
name: redis
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- image: redis:latest
imagePullPolicy: IfNotPresent
name: redis
ports:
- containerPort: 6379
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: redis
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis