▲單獨架構(gòu)的小伙伴看這里:(學(xué)習(xí)杰哥視頻的作業(yè)第19-20天)
1、使用docker-compose制作nginx+php-fpm,mysql,wordpress鏡像,并管理啟動,實現(xiàn)wordpress的訪問
一、環(huán)境準備:
Centos 7.8.2003 系統(tǒng)已經(jīng)安裝完docker服務(wù)并啟動
二、安裝docker-compose
[root@docker~]# yum install docker-compose
[root@docker~]# docker-compose version
docker-compose version 1.18.0, build 8dd22a9
docker-py version: 2.6.1
CPython version: 3.6.8
OpenSSL version: OpenSSL 1.0.2k-fips? 26 Jan 2017
三、編寫docker-compose.yml文件
[root@docker~]# mkdir /workpress
[root@docker~]# cd /wordpress
[root@docker wordpress]# vim docker-compose.yml
version: '3.3'
services:
? db:
? ? image: mysql:5.7
? ? volumes:
? ? ? - db_data:/var/lib/mysql
? ? restart: always
? ? environment:
? ? ? MYSQL_ROOT_PASSWORD: somewordpress
? ? ? MYSQL_DATABASE: wordpress
? ? ? MYSQL_USER: wordpress
? ? ? MYSQL_PASSWORD: wordpress
? wordpress:
? ? depends_on:
? ? ? - db
? ? image: wordpress:latest
? ? ports:
? ? ? - "8000:80"
? ? restart: always
? ? environment:
? ? ? WORDPRESS_DB_HOST: db:3306
? ? ? WORDPRESS_DB_USER: wordpress
? ? ? WORDPRESS_DB_PASSWORD: wordpress
? ? ? WORDPRESS_DB_NAME: wordpress
volumes:
? ? db_data: {}
四、運行docker-compose
[root@docker wordpress]# docker-compose up -d


2、使用ansible進行K8s初始化安裝配置。
一、環(huán)境準備
1 Ubuntu 1804
(1)基礎(chǔ)軟件包
# apt purge ufw lxd lxd-client lxcfs lxc-common
# apt install -y iproute2 ntpdate tcpdump telnet traceroute nfs-kernel-server nfs-common lrzsz tree openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev ntpdate tcpdump telnet traceroute gcc openssh-server lrzsz tree openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev ntpdate tcpdump telnet traceroute iotop unzip zip
(2)系統(tǒng)資源限制優(yōu)化
# vim /etc/security/limits.conf
#root賬?的資源軟限制和硬限制
root soft core unlimited
root hard core unlimited
root soft nproc 1000000
root hard nproc 1000000
root soft nofile 1000000
root hard nofile 1000000
root soft memlock 32000
root hard memlock 32000
root soft msgqueue 8192000
root hard msgqueue 8192000
#其他賬?的資源軟限制和硬限制
* soft core unlimited
* hard core unlimited
* soft nproc 1000000
* hard nproc 1000000
* soft nofile 1000000
* hard nofile 1000000
* soft memlock 32000
* hard memlock 32000
* soft msgqueue 8192000
* hard msgqueue 8192000
(3)內(nèi)核參數(shù)優(yōu)化
# vim /etc/sysctl.conf
# Controls source route verification
net.ipv4.conf.default.rp_filter = 1
net.ipv4.ip_nonlocal_bind = 1
net.ipv4.ip_forward = 1
# Do not accept source routing
net.ipv4.conf.default.accept_source_route = 0
# Controls the System Request debugging functionality of the kernel
kernel.sysrq = 0
# Controls whether core dumps will append the PID to the core filename.
# Useful for debugging multi-threaded applications.
kernel.core_uses_pid = 1
# Controls the use of TCP syncookies
net.ipv4.tcp_syncookies = 1
# Disable netfilter on bridges.
net.bridge.bridge-nf-call-ip6tables = 0
net.bridge.bridge-nf-call-iptables = 0
net.bridge.bridge-nf-call-arptables = 0
# Controls the default maxmimum size of a mesage queue
kernel.msgmnb = 65536
# # Controls the maximum size of a message, in bytes
kernel.msgmax = 65536
# Controls the maximum shared segment size, in bytes
kernel.shmmax = 68719476736
# # Controls the maximum number of shared memory segments, in pages
kernel.shmall = 4294967296
# TCP kernel paramater
net.ipv4.tcp_mem = 786432 1048576 1572864
net.ipv4.tcp_rmem = 4096 87380 4194304
net.ipv4.tcp_wmem = 4096 16384 4194304
net.ipv4.tcp_window_scaling = 1
net.ipv4.tcp_sack = 1
# socket buffer
net.core.wmem_default = 8388608
net.core.rmem_default = 8388608
net.core.rmem_max = 16777216
net.core.wmem_max = 16777216
net.core.netdev_max_backlog = 262144
net.core.somaxconn = 20480
net.core.optmem_max = 81920
# TCP conn
net.ipv4.tcp_max_syn_backlog = 262144
net.ipv4.tcp_syn_retries = 3
net.ipv4.tcp_retries1 = 3
net.ipv4.tcp_retries2 = 15
# tcp conn reuse
net.ipv4.tcp_timestamps = 0
net.ipv4.tcp_tw_reuse = 0
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_fin_timeout = 1
net.ipv4.tcp_max_tw_buckets = 20000
net.ipv4.tcp_max_orphans = 3276800
net.ipv4.tcp_synack_retries = 1
net.ipv4.tcp_syncookies = 1
# keepalive conn
net.ipv4.tcp_keepalive_time = 300
net.ipv4.tcp_keepalive_intvl = 30
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.ip_local_port_range = 10001 65000
# swap
vm.overcommit_memory = 0
vm.swappiness = 10
#net.ipv4.conf.eth1.rp_filter = 0
#net.ipv4.conf.lo.arp_ignore = 1
#net.ipv4.conf.lo.arp_announce = 2
#net.ipv4.conf.all.arp_ignore = 1
#net.ipv4.conf.all.arp_announce = 2
2 主機IP規(guī)劃
(1)Master 節(jié)點
192.168.7.101? master1.magedu.net? vip:192.168.7.248
192.168.7.102? master2.magedu.net? vip:192.168.7.248
(2)Harbor 節(jié)點
192.168.7.103? harbor.magedu.net
(3)etcd 節(jié)點
192.168.7.105? etcd1.magedu.net
192.168.7.106? etcd2.magedu.net
192.168.7.107? etcd3.magedu.net
(4)Haproxy+keepalive節(jié)點
192.168.7.108? ha.magedu.net
(5)Node 節(jié)點
192.168.7.110? node1.magedu.net
192.168.7.111? node2.magedu.net
3 高可用負載均衡(操作主機:7.108)
(1)keepalived
# apt-get install libssl-dev
# apt-get install openssl
# apt-get install libpopt-dev
# apt-get install keepalived
# vim /etc/keepalived/keepalived.conf
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 1
priority 100
advert_int 3
unicast_src_ip 192.168.7.108
unicast_peer {
192.168.7.109
}
authentication {
auth_type PASS
auth_pass 123abc
}
virtual_ipaddress {
192.168.7.248 dev eth0 label eth0:1
}
}
# systemctl start keepalived
# systemctl enable keepalived
# systemctl status keepalived
(2)haproxy
# vim /etc/haproxy/haproxy.cfg
listen k8s_api_nodes_6443
bind 192.168.7.248:6443
mode tcp
#balance leastconn
server 192.168.7.101 192.168.7.101:6443 check inter 2000 fall 3 rise 5
server 192.168.7.102 192.168.7.102:6443 check inter 2000 fall 3 rise 5
4 各節(jié)點都需要安裝(master/etcd/node)
(1)docker-ce服務(wù);或使用官方自動化命令安裝https://developer.aliyun.com/article/110806使用官方安裝腳本自動安裝 (僅適用于公網(wǎng)環(huán)境)
# curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
(2)
二、Harbor 安裝配置
1 將 harbor-offline-installer-v1.7.5? 上傳至 /usr/local/src
[root@docker-server1 src]# tar xvf harbor-offline-installer-v1.2.2.tgz
[root@docker-server1 src]# ln -sv /usr/local/src/harbor /usr/local/
[root@docker-server1 harbor]# cd /usr/local/harbor/
2 harbor 之 https 配置
root@k8s-harbor1:/usr/local/src/harbor# pwd
/usr/local/src/harbor
root@k8s-harbor1:/usr/local/src/harbor# mkdir certs/
# openssl genrsa -out /usr/local/src/harbor/certs/harbor-ca.key #生成私有key
# openssl req -x509 -new -nodes -key /usr/local/src/harbor/certs/harbor-ca.key -subj
"/CN=harbor.magedu.net" -days 7120 -out /usr/local/src/harbor/certs/harbor-ca.crt #簽證
# vim harbor.cfg
hostname = harbor.magedu.net
ui_url_protocol = https
ssl_cert = /usr/local/src/harbor/certs/harbor-ca.crt
ssl_cert_key = /usr/local/src/harbor/certs/harbor-ca.key
harbor_admin_password = 123456
# apt-get install docker-compress
# ./install.sh
# docker-compress start
3 client 同步在crt證書
# mkdir /etc/docker/certs.d/harbor.magedu.net -p? ? ? ? ? #所有節(jié)點
harbor1:~# scp /usr/local/src/harbor/certs/harbor-ca.crt 192.168.7.101:/etc/docker/certs.d/harbor.magedu.net
# systemctl restart docker? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? #所有節(jié)點重啟docker
4 登錄harbor
root@k8s-master1:~# docker login harbor.magedu.net
Username: admin
Password:
WARNING! Your password will be stored unencrypted in /root/.docker/config.json. Configure a credential helper to remove this warning. See https://docs.docker.com/engine/reference/commandline/login/#credentials-store Login Succeeded
5 測試push鏡像到harbor
master1:~# docker pull alpine
root@k8s-master1:~# docker tag alpine harbor.magedu.net/library/alpine:linux36
root@k8s-master1:~# docker push harbor.magedu.net/library/alpine:linux36
The push refers to repository [harbor.magedu.net/library/alpine]
256a7af3acb1: Pushed
linux36: digest:
sha256:97a042bf09f1bf78c8cf3dcebef94614f2b95fa2f988a5c07314031bc2570c7a size: 528
三、ansible自動化部署K8S的前期工作
(1) 基礎(chǔ)環(huán)境
# apt-get install python2.7
# ln -s /usr/bin/python2.7 /usr/bin/python
# apt-get install git ansible -y
# ssh-keygen? ? ? ? ? ? ? ? ? ? #生成密鑰對
# ssh-copy-id root@192.168.7.xxx
# apt-get install sshpass? ? ? ? #利用ssh同步公鑰到各k8s服務(wù)器
(2) clone項目
# git clone -b 0.6.1 https://github.com/easzlab/kubeasz.git
root@k8s-master1:~# mv /etc/ansible/* /opt/
root@k8s-master1:~# mv kubeasz/* /etc/ansible/
root@k8s-master1:~# cd /etc/ansible/
root@k8s-master1:/etc/ansible# cp example/hosts.m-masters.example ./hosts #復(fù)制hosts模板文件
(3) 準備hosts文件
root@k8s-master1:/etc/ansible# pwd
/etc/ansible
root@k8s-master1:/etc/ansible# cp example/hosts.m-masters.example ./hosts
root@k8s-master1:/etc/ansible# cat hosts
# 集群部署節(jié)點:一般為運行ansible 腳本的節(jié)點
# 變量 NTP_ENABLED (=yes/no) 設(shè)置集群是否安裝 chrony 時間同步
[deploy]
192.168.7.101 NTP_ENABLED=no
# etcd集群請?zhí)峁┤缦翹ODE_NAME,注意etcd集群必須是1,3,5,7...奇數(shù)個節(jié)點
[etcd]
192.168.7.105 NODE_NAME=etcd1
192.168.7.106 NODE_NAME=etcd2
192.168.7.107 NODE_NAME=etcd3
[new-etcd] # 預(yù)留組,后續(xù)添加etcd節(jié)點使用
#192.168.7.x NODE_NAME=etcdx[kube-master]
192.168.7.101
[new-master] # 預(yù)留組,后續(xù)添加master節(jié)點使用
#192.168.7.5
[kube-node]
192.168.7.110
[new-node] # 預(yù)留組,后續(xù)添加node節(jié)點使用
#192.168.7.xx
# 參數(shù) NEW_INSTALL:yes表示新建,no表示使用已有harbor服務(wù)器
# 如果不使用域名,可以設(shè)置 HARBOR_DOMAIN=""
[harbor]
#192.168.7.8 HARBOR_DOMAIN="harbor.yourdomain.com" NEW_INSTALL=no
# 負載均衡(目前已支持多于2節(jié)點,一般2節(jié)點就夠了) 安裝 haproxy+keepalived
[lb]
192.168.7.1 LB_ROLE=backup
192.168.7.2 LB_ROLE=master
#【可選】外部負載均衡,用于自有環(huán)境負載轉(zhuǎn)發(fā) NodePort 暴露的服務(wù)等
[ex-lb]
#192.168.7.6 LB_ROLE=backup EX_VIP=192.168.7.250
#192.168.7.7 LB_ROLE=master EX_VIP=192.168.7.250
[all:vars]
# ---------集群主要參數(shù)---------------
#集群部署模式:allinone, single-master, multi-master
DEPLOY_MODE=multi-master
#集群主版本號,目前支持: v1.8, v1.9, v1.10,v1.11, v1.12, v1.13
K8S_VER="v1.13"
# 集群 MASTER IP即 LB節(jié)點VIP地址,為區(qū)別與默認apiserver端口,設(shè)置VIP監(jiān)聽的服務(wù)端口8443
# 公有云上請使用云負載均衡內(nèi)網(wǎng)地址和監(jiān)聽端口
MASTER_IP="192.168.7.248"
KUBE_APISERVER="https://{{ MASTER_IP }}:6443"
# 集群網(wǎng)絡(luò)插件,目前支持calico, flannel, kube-router, cilium
CLUSTER_NETWORK="calico"
# 服務(wù)網(wǎng)段 (Service CIDR),注意不要與內(nèi)網(wǎng)已有網(wǎng)段沖突
SERVICE_CIDR="10.20.0.0/16"
# POD 網(wǎng)段 (Cluster CIDR),注意不要與內(nèi)網(wǎng)已有網(wǎng)段沖突
CLUSTER_CIDR="172.31.0.0/16"
# 服務(wù)端口范圍 (NodePort Range)
NODE_PORT_RANGE="20000-60000"
# kubernetes 服務(wù) IP (預(yù)分配,一般是 SERVICE_CIDR 中第一個IP)
CLUSTER_KUBERNETES_SVC_IP="10.20.0.1"
# 集群 DNS 服務(wù) IP (從 SERVICE_CIDR 中預(yù)分配)
CLUSTER_DNS_SVC_IP="10.20.254.254"
# 集群 DNS 域名
CLUSTER_DNS_DOMAIN="linux36.local."
# 集群basic auth 使用的用戶名和密碼
BASIC_AUTH_USER="admin"
BASIC_AUTH_PASS="123456"
# ---------附加參數(shù)--------------------
#默認二進制文件目錄
bin_dir="/usr/bin"
#證書目錄
ca_dir="/etc/kubernetes/ssl"
#部署目錄,即 ansible 工作目錄,建議不要修改
base_dir="/etc/ansible"
(4) 準備二進制文件
k8s-master1:/etc/ansible/bin# pwd
/etc/ansible/bin
k8s-master1:/etc/ansible/bin# tar xvf k8s.1-13-5.tar.gz
k8s-master1:/etc/ansible/bin# mv bin/* .
四、開始部署(通過ansible腳本初始化環(huán)境及部署k8s 高可用集群)
1:環(huán)境初始化
root@k8s-master1:/etc/ansible# pwd
/etc/ansible
root@k8s-master1:/etc/ansible# ansible-playbook 01.prepare.yml
2:部署etcd集群
root@k8s-master1:/etc/ansible# ansible-playbook 02.etcd.yml
3:部署docker:
可選更改啟動腳本路徑,但是docker已經(jīng)提前安裝,因此不需要重新執(zhí)行
4:部署master
root@k8s-master1:/etc/ansible# ansible-playbook 04.kube-master.yml
5:部署node
root@k8s-master1:/etc/ansible# vim roles/kube-node/defaults/main.yml
# 基礎(chǔ)容器鏡像
SANDBOX_IMAGE: "harbor.magedu.net/baseimages/pause-amd64:3.1"
root@k8s-master1:/etc/ansible# ansible-playbook 05.kube-node.yml
6:部署網(wǎng)絡(luò)服務(wù)calico
# docker load -i calico-cni.tar
# docker tag calico/cni:v3.4.4 harbor.magedu.net/baseimages/cni:v3.4.4
# docker push harbor.magedu.net/baseimages/cni:v3.4.4
# docker load -i calico-node.tar
# docker tag calico/node:v3.4.4 harbor.magedu.net/baseimages/node:v3.4.4
# docker push harbor.magedu.net/baseimages/node:v3.4.4
# docker load -i calico-kube-controllers.tar
# docker tag calico/kube-controllers:v3.4.4 harbor.magedu.net/baseimages/kube-
controllers:v3.4.4
# docker push harbor.magedu.net/baseimages/kube-controllers:v3.4.4
執(zhí)行部署網(wǎng)絡(luò):
root@k8s-master1:/etc/ansible# ansible-playbook 06.network.yml
7:驗證當前狀態(tài)
root@k8s-master1:/etc/ansible# calicoctl node status