搭建K8S高可用集群(二进制方式)教学

Windows Windows 2个月前 (08-15) 6次浏览 未收录 0个评论 扫描二维码

———————————————

1、系统概述

2、自签Etcd SSL证书

3、Etcd数据库集群部署

4、Node安装Docker

5、Flannel容器集群网络部署

6、自签APIServer SSL证书

7、部署Master组件

8、生产Node kubeconfig文件

9、部署Node组件

10、安装nginx

11、安装keepalived

12、节点发现

13、运行一个测试示例

———————————————

1、系统概述

操作系统版本:CentOS7.5

k8s版本:1.12

系统要求:关闭swap、selinux、iptables

具体信息:

搭建K8S高可用集群(二进制方式)教学

拓扑图:

搭建K8S高可用集群(二进制方式)教学

二进制包下载地址

etcd:

https://github.com/coreos/etcd/releases/tag/v3.2.12

flannel:

https://github.com/coreos/flannel/releases/download/v0.10.0/flannel-v0.10.0-linux-amd64.tar.gz

k8s:

https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.12.md

2、自签Etcd SSL证书

master01操作:

#catcfssl.sh
#!/bin/bash
wgethttps://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wgethttps://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wgethttps://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod+xcfssl_linux-amd64cfssljson_linux-amd64cfssl-certinfo_linux-amd64
mvcfssl_linux-amd64/usr/local/bin/cfssl
mvcfssljson_linux-amd64/usr/local/bin/cfssljson
mvcfssl-certinfo_linux-amd64/usr/bin/cfssl-certinfo

自签Etcd SSL证书

#catcert-etcd.sh
cat>ca-config.json<
 
  ca-csr.json<
  
   server-csr.json<
   
    
#ll*.pem
-rw-------1rootroot1675Jan1115:50ca-key.pem
-rw-r--r--1rootroot1265Jan1115:50ca.pem
-rw-------1rootroot1679Jan1115:50server-key.pem
-rw-r--r--1rootroot1338Jan1115:50server.pem

3、Etcd数据库集群部署

master01 02 03操作:

#mkdir-pv/opt/etcd/{bin,cfg,ssl}
#tarzxvfetcd-v3.2.12-linux-amd64.tar.gz
#mvetcd-v3.2.12-linux-amd64/{etcd,etcdctl}/opt/etcd/bin/

master01操作:

#cdcert-etcd/
[root@master01cert-etcd]#ll
total40
-rw-r--r--1rootroot287Jan1115:50ca-config.json
-rw-r--r--1rootroot956Jan1115:50ca.csr
-rw-r--r--1rootroot209Jan1115:50ca-csr.json
-rw-------1rootroot1675Jan1115:50ca-key.pem
-rw-r--r--1rootroot1265Jan1115:50ca.pem
-rw-r--r--1rootroot1013Jan1115:50server.csr
-rw-r--r--1rootroot296Jan1115:50server-csr.json
-rw-------1rootroot1679Jan1115:50server-key.pem
-rw-r--r--1rootroot1338Jan1115:50server.pem
-rwxr-xr-x1rootroot1076Jan1115:50ssl-etcd.sh
[root@master01cert-etcd]#cp*.pem/opt/etcd/ssl/
#scp-r/opt/etcdmaster02:/opt/
#scp-r/opt/etcdmaster03:/opt/

分别在master01 02 03操作:

#catetcd.sh
#!/bin/bash
#example:./etcd.shetcd01192.168.1.10etcd02=https://192.168.1.11:2380,etcd03=https://192.168.1.12:2380

ETCD_NAME=$1
ETCD_IP=$2
ETCD_CLUSTER=$3

WORK_DIR=/opt/etcd

cat<
     
      $WORK_DIR/cfg/etcd
#[Member]
ETCD_NAME="${ETCD_NAME}"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://${ETCD_IP}:2380"
ETCD_LISTEN_CLIENT_URLS="https://${ETCD_IP}:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://${ETCD_IP}:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://${ETCD_IP}:2379"
ETCD_INITIAL_CLUSTER="${ETCD_NAME}=https://${ETCD_IP}:2380,${ETCD_CLUSTER}"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF


cat<
      
       /usr/lib/systemd/system/etcd.service [Unit] Description=EtcdServer After=network.target After=network-online.target Wants=network-online.target [Service] Type=notify EnvironmentFile=${WORK_DIR}/cfg/etcd ExecStart=${WORK_DIR}/bin/etcd\ --name=\${ETCD_NAME}\ --data-dir=\${ETCD_DATA_DIR}\ --listen-peer-urls=\${ETCD_LISTEN_PEER_URLS}\ --listen-client-urls=\${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379\ --advertise-client-urls=\${ETCD_ADVERTISE_CLIENT_URLS}\ --initial-advertise-peer-urls=\${ETCD_INITIAL_ADVERTISE_PEER_URLS}\ --initial-cluster=\${ETCD_INITIAL_CLUSTER}\ --initial-cluster-token=\${ETCD_INITIAL_CLUSTER_TOKEN}\ --initial-cluster-state=new\ --cert-file=${WORK_DIR}/ssl/server.pem\ --key-file=${WORK_DIR}/ssl/server-key.pem\ --peer-cert-file=${WORK_DIR}/ssl/server.pem\ --peer-key-file=${WORK_DIR}/ssl/server-key.pem\ --trusted-ca-file=${WORK_DIR}/ssl/ca.pem\ --peer-trusted-ca-file=${WORK_DIR}/ssl/ca.pem Restart=on-failure LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF systemctldaemon-reload systemctlstartetcd systemctlenableetcd
      
     
#./etcd.shetcd01192.168.247.161etcd02=https://192.168.247.162:2380,etcd03=https://192.168.247.163:2380
#scpetcd.shmaster02:/root/
#scpetcd.shmaster03:/root/
[root@master02~]#./etcd.shetcd02192.168.247.162etcd01=https://192.168.247.161:2380,etcd03=https://192.168.247.163:2380
[root@master03~]#./etcd.shetcd03192.168.247.163etcd01=https://192.168.247.161:2380,etcd02=https://192.168.247.162:2380
[root@master01~]#systemctlrestartetcd
#cd/opt/etcd/ssl
#/opt/etcd/bin/etcdctl\
--ca-file=ca.pem--cert-file=server.pem--key-file=server-key.pem\
--endpoints="https://192.168.247.161:2379,https://192.168.247.162:2379,https://192.168.247.163:2379"\
cluster-health
member1afd7ff8f95cf93ishealthy:gothealthyresultfromhttps://192.168.247.161:2379
member8f4e6ce663f0d49aishealthy:gothealthyresultfromhttps://192.168.247.162:2379
memberb6230d9c6f20feebishealthy:gothealthyresultfromhttps://192.168.247.163:2379
clusterishealthy

如有报错,查看/var/log/message日志

4、node节点安装docker

可以放到脚本内执行

#catdocker.sh
yumremove-ydockerdocker-commondocker-selinuxdocker-engine
yuminstall-yyum-utilsdevice-mapper-persistent-datalvm2
wget-O/etc/yum.repos.d/docker-ce.repohttps://download.docker.com/linux/centos/docker-ce.repo
sed-i's+download.docker.com+mirrors.tuna.tsinghua.edu.cn/docker-ce+'/etc/yum.repos.d/docker-ce.repo
yummakecachefast
yuminstall-ydocker-ce
systemctlenabledocker
systemctlstartdocker
dockerversion

如果拉取镜像较慢,可以配置daocloud提供的docker加速器

5、Flannel网络部署

master01执行:

#pwd
/opt/etcd/ssl
#/opt/etcd/bin/etcdctl\
--ca-file=ca.pem--cert-file=server.pem--key-file=server-key.pem\
--endpoints="https://192.168.247.161:2379,https://192.168.247.162:2379,https://192.168.247.163:2379"\
set/coreos.com/network/config'{"Network":"172.17.0.0/16","Backend":{"Type":"vxlan"}}'

node01执行:

#wgethttps://github.com/coreos/flannel/releases/download/v0.10.0/flannel-v0.10.0-linux-amd64.tar.gz
#tarzxvfflannel-v0.10.0-linux-amd64.tar.gz
#mkdir-pv/opt/kubernetes/{bin,cfg,ssl}
#mvflanneldmk-docker-opts.sh/opt/kubernetes/bin/
#cat/opt/kubernetes/cfg/flanneld
FLANNEL_OPTIONS="--etcd-endpoints=https://192.168.247.161:2379,https://192.168.247.162:2379,https://192.168.247.163:2379-etcd-cafile=/opt/etcd/ssl/ca.pem-etcd-certfile=/opt/etcd/ssl/server.pem-etcd-keyfile=/opt/etcd/ssl/server-key.pem"

将master节点的/opt/etcd/ssl/*拷贝到node节点

[root@master01~]#scp-r/opt/etcd/sslnode01:/opt/etcd/
#cat/usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneldoverlayaddressetcdagent
After=network-online.targetnetwork.target
Before=docker.service

[Service]
Type=notify
EnvironmentFile=/opt/kubernetes/cfg/flanneld
ExecStart=/opt/kubernetes/bin/flanneld--ip-masq$FLANNEL_OPTIONS
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh-kDOCKER_NETWORK_OPTIONS-d/run/flannel/subnet.env
Restart=on-failure

[Install]
WantedBy=multi-user.target
#cat/usr/lib/systemd/system/docker.service
[Unit]
Description=DockerApplicationContainerEngine
Documentation=https://docs.docker.com
After=network-online.targetfirewalld.service
Wants=network-online.target

[Service]
Type=notify
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd$DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill-sHUP$MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s

[Install]
WantedBy=multi-user.target

重启flannel和docker:

#systemctldaemon-reload
#systemctlstartflanneld
#systemctlenableflanneld
#systemctlrestartdocker
#systemctlenabledocker
#cat/run/flannel/subnet.env
DOCKER_OPT_BIP="--bip=172.17.12.1/24"
DOCKER_OPT_IPMASQ="--ip-masq=false"
DOCKER_OPT_MTU="--mtu=1450"
DOCKER_NETWORK_OPTIONS="--bip=172.17.12.1/24--ip-masq=false--mtu=1450"
#ipa
5:docker0:
     
      mtu1500qdiscnoqueuestateDOWNgroupdefault
link/ether02:42:f0:62:07:73brdff:ff:ff:ff:ff:ff
inet172.17.12.1/24brd172.17.12.255scopeglobaldocker0
valid_lftforeverpreferred_lftforever
6:flannel.1:
      
       mtu1450qdiscnoqueuestateUNKNOWNgroupdefault link/etherca:e9:e0:d4:05:bebrdff:ff:ff:ff:ff:ff inet172.17.12.0/32scopeglobalflannel.1 valid_lftforeverpreferred_lftforever inet6fe80::c8e9:e0ff:fed4:5be/64scopelink valid_lftforeverpreferred_lftforever
      
     

将介质及配置文件拷贝至node02节点

#scp-r/opt/kubernetesnode02:/opt/
#cd/usr/lib/systemd/system/
#scpflanneld.servicedocker.servicenode02:/usr/lib/systemd/system/
#scp-r/opt/etcd/ssl/node02:/opt/etcd/

node02执行:

#mkdir/opt/etcd
#systemctldaemon-reload
#systemctlstartflanneld
#systemctlenableflanneld
#systemctlrestartdocker
#ipa
5:docker0:
     
      mtu1500qdiscnoqueuestateDOWNgroupdefault
link/ether02:42:ca:2c:48:dfbrdff:ff:ff:ff:ff:ff
inet172.17.16.1/24brd172.17.16.255scopeglobaldocker0
valid_lftforeverpreferred_lftforever
6:flannel.1:
      
       mtu1450qdiscnoqueuestateUNKNOWNgroupdefault link/etheree:73:b2:e8:46:c1brdff:ff:ff:ff:ff:ff inet172.17.16.0/32scopeglobalflannel.1 valid_lftforeverpreferred_lftforever inet6fe80::ec73:b2ff:fee8:46c1/64scopelink valid_lftforeverpreferred_lftforever
      
     

网络测试:

[root@node02opt]#ping172.17.12.1
PING172.17.12.1(172.17.12.1)56(84)bytesofdata.
64bytesfrom172.17.12.1:icmp_seq=1ttl=64time=1.07ms
64bytesfrom172.17.12.1:icmp_seq=2ttl=64time=0.300ms
[root@node01system]#ping172.17.16.1
PING172.17.16.1(172.17.16.1)56(84)bytesofdata.
64bytesfrom172.17.16.1:icmp_seq=1ttl=64time=1.13ms

6、自签APIServer SSL证书

在master01执行:

#catcert-k8s.sh
#创建ca证书
cat>ca-config.json<
     
      ca-csr.json<
      
       server-csr.json<
       
        kube-proxy-csr.json<
        
         admin-csr.json<
         
          
#ll*.pem
-rw-------1rootroot1679Jan1122:06admin-key.pem
-rw-r--r--1rootroot1399Jan1122:06admin.pem
-rw-------1rootroot1679Jan1122:06ca-key.pem
-rw-r--r--1rootroot1359Jan1122:06ca.pem
-rw-------1rootroot1675Jan1122:06kube-proxy-key.pem
-rw-r--r--1rootroot1403Jan1122:06kube-proxy.pem
-rw-------1rootroot1679Jan1122:06server-key.pem
-rw-r--r--1rootroot1651Jan1122:06server.pem

7、部署Master组件

master01、02、03执行:

#mkdir-pv/opt/kubernetes/{bin,cfg,ssl}
#tarzxvfkubernetes-server-linux-amd64.tar.gz
#cdkubernetes/server/bin
#cpkube-apiserverkube-schedulerkube-controller-managerkubectl/opt/kubernetes/bin/

#pwd
/root/cert-k8s
#cp*.pem/opt/kubernetes/ssl/

#head-c16/dev/urandom|od-An-tx|tr-d''
1c96cf8a12d4555a52e89bf3925a5c87

#cat/opt/kubernetes/cfg/token.csv
1c96cf8a12d4555a52e89bf3925a5c87,kubelet-bootstrap,10001,"system:kubelet-bootstrap"

1)、api-server:

#catapi-server.sh
#!/bin/bash
#example:./api-server.sh192.168.247.161https://192.168.247.161:2379,https://192.168.247.162:2379,https://192.168.247.163:2379
MASTER_IP=$1
ETCD_SERVERS=$2

cat<
           
            /opt/kubernetes/cfg/kube-apiserver

KUBE_APISERVER_OPTS="--logtostderr=true\\
--v=4\\
--etcd-servers=${ETCD_SERVERS}\\
--bind-address=${MASTER_IP}\\
--secure-port=6443\\
--advertise-address=${MASTER_IP}\\
--allow-privileged=true\\
--service-cluster-ip-range=10.0.0.0/24\\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction\\
--authorization-mode=RBAC,Node\\
--enable-bootstrap-token-auth\\
--token-auth-file=/opt/kubernetes/cfg/token.csv\\
--service-node-port-range=30000-50000\\
--tls-cert-file=/opt/kubernetes/ssl/server.pem\\
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem\\
--client-ca-file=/opt/kubernetes/ssl/ca.pem\\
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem\\
--etcd-cafile=/opt/etcd/ssl/ca.pem\\
--etcd-certfile=/opt/etcd/ssl/server.pem\\
--etcd-keyfile=/opt/etcd/ssl/server-key.pem"
EOF

cat<
            
             /usr/lib/systemd/system/kube-apiserver.service [Unit] Description=KubernetesAPIServer Documentation=https://github.com/kubernetes/kubernetes [Service] EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver ExecStart=/opt/kubernetes/bin/kube-apiserver\$KUBE_APISERVER_OPTS Restart=on-failure [Install] WantedBy=multi-user.target EOF systemctldaemon-reload systemctlenablekube-apiserver systemctlrestartkube-apiserver
            
           
#./api-server.sh192.168.247.161https://192.168.247.161:2379,https://192.168.247.162:2379,https://192.168.247.163:2379

2)、scheduler组件

#catscheduler.sh
cat<
           
            /opt/kubernetes/cfg/kube-scheduler
KUBE_SCHEDULER_OPTS="--logtostderr=true\\
--v=4\\
--master=127.0.0.1:8080\\
--leader-elect"
EOF

cat<
            
             /usr/lib/systemd/system/kube-scheduler.service [Unit] Description=KubernetesScheduler Documentation=https://github.com/kubernetes/kubernetes [Service] EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler ExecStart=/opt/kubernetes/bin/kube-scheduler\$KUBE_SCHEDULER_OPTS Restart=on-failure [Install] WantedBy=multi-user.target EOF systemctldaemon-reload systemctlenablekube-scheduler systemctlrestartkube-scheduler #./scheduler.sh 部署controller-manager组件 #catcontroller-manager.sh cat<
             
              /opt/kubernetes/cfg/kube-controller-manager KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true\ --v=4\\ --master=127.0.0.1:8080\\ --leader-elect=true\\ --address=127.0.0.1\\ --service-cluster-ip-range=10.0.0.0/24\\ --cluster-name=kubernetes\\ --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem\\ --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem\\ --root-ca-file=/opt/kubernetes/ssl/ca.pem\\ --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem" EOF cat<
              
               /usr/lib/systemd/system/kube-controller-manager.service [Unit] Description=KubernetesControllerManager Documentation=https://github.com/kubernetes/kubernetes [Service] EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager ExecStart=/opt/kubernetes/bin/kube-controller-manager\$KUBE_CONTROLLER_MANAGER_OPTS Restart=on-failure [Install] WantedBy=multi-user.target EOF systemctldaemon-reload systemctlenablekube-controller-manager systemctlrestartkube-controller-manager
              
             
            
           
#shcontroller-manager.sh

添加环境变量

K8S_HOME=/opt/kubernetes
PATH=$K8S_HOME/bin:$PATH
[root@master01~]#kubectlgetcs
#kubectlgetcs
NAMESTATUSMESSAGEERROR
schedulerHealthyok
controller-managerHealthyok
etcd-1Healthy{"health":"true"}
etcd-2Healthy{"health":"true"}
etcd-0Healthy{"health":"true"}

[root@master02~]#kubectlgetcs
NAMESTATUSMESSAGEERROR
schedulerHealthyok
controller-managerHealthyok
etcd-2Healthy{"health":"true"}
etcd-0Healthy{"health":"true"}
etcd-1Healthy{"health":"true"}

[root@master03~]#kubectlgetcs
NAMESTATUSMESSAGEERROR
schedulerHealthyok
controller-managerHealthyok
etcd-1Healthy{"health":"true"}
etcd-0Healthy{"health":"true"}
etcd-2Healthy{"health":"true"}

8、生成Node kubeconfig文件

[root@master01~]#scpkubernetes/server/bin/{kubelet,kube-proxy}node01:/opt/kubernetes/bin/
[root@master01~]#scpkubernetes/server/bin/{kubelet,kube-proxy}node02:/opt/kubernetes/bin/
master01执行:
kubectlcreateclusterrolebindingkubelet-bootstrap\
--clusterrole=system:node-bootstrapper\
--user=kubelet-bootstrap

在master01执行:

catkubeconfig.sh
#创建kubeletbootstrappingkubeconfig
APISERVER=$1
SSL_DIR=$2

exportBOOTSTRAP_TOKEN=`cat/opt/kubernetes/cfg/token.csv|awk-F',''{print$1}'`
exportKUBE_APISERVER="https://$APISERVER:6443"

#设置集群参数
kubectlconfigset-clusterkubernetes\
--certificate-authority=$SSL_DIR/ca.pem\
--embed-certs=true\
--server=${KUBE_APISERVER}\
--kubeconfig=bootstrap.kubeconfig

#设置客户端认证参数
kubectlconfigset-credentialskubelet-bootstrap\
--token=${BOOTSTRAP_TOKEN}\
--kubeconfig=bootstrap.kubeconfig

#设置上下文参数
kubectlconfigset-contextdefault\
--cluster=kubernetes\
--user=kubelet-bootstrap\
--kubeconfig=bootstrap.kubeconfig

#设置默认上下文
kubectlconfiguse-contextdefault--kubeconfig=bootstrap.kubeconfig

#----------------------

#创建kube-proxykubeconfig文件

kubectlconfigset-clusterkubernetes\
--certificate-authority=$SSL_DIR/ca.pem\
--embed-certs=true\
--server=${KUBE_APISERVER}\
--kubeconfig=kube-proxy.kubeconfig

kubectlconfigset-credentialskube-proxy\
--client-certificate=$SSL_DIR/kube-proxy.pem\
--client-key=$SSL_DIR/kube-proxy-key.pem\
--embed-certs=true\
--kubeconfig=kube-proxy.kubeconfig

kubectlconfigset-contextdefault\
--cluster=kubernetes\
--user=kube-proxy\
--kubeconfig=kube-proxy.kubeconfig

kubectlconfiguse-contextdefault--kubeconfig=kube-proxy.kubeconfig
#./kubeconfig.sh192.168.247.160/opt/kubernetes/ssl
#ll
total16
-rw-------1rootroot2169Jan1208:09bootstrap.kubeconfig
-rwxr-xr-x1rootroot1419Jan1208:07kubeconfig.sh
-rw-------1rootroot6271Jan1208:09kube-proxy.kubeconfig
#scpbootstrap.kubeconfigkube-proxy.kubeconfignode01:/opt/kubernetes/cfg/
#scpbootstrap.kubeconfigkube-proxy.kubeconfignode02:/opt/kubernetes/cfg/

9、部署Node组件

在node01、02执行:

1)、部署kubelet组件

catkubelet.sh
#!/bin/bash
NODE_IP=$1

cat<
           
            /opt/kubernetes/cfg/kubelet
KUBELET_OPTS="--logtostderr=true\\
--v=4\\
--address=${NODE_IP}\\
--hostname-override=${NODE_IP}\\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig\\
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig\\
--config=/opt/kubernetes/cfg/kubelet.config\\
--cert-dir=/opt/kubernetes/ssl\\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
EOF

cat<
            
             /opt/kubernetes/cfg/kubelet.config kind:KubeletConfiguration apiVersion:kubelet.config.k8s.io/v1beta1 address:${NODE_IP} port:10250 readOnlyPort:10255 cgroupDriver:cgroupfs clusterDNS:["10.0.0.2"] clusterDomain:cluster.local. failSwapOn:false authentication: anonymous: enabled:true EOF cat<
             
              /usr/lib/systemd/system/kubelet.service [Unit] Description=KubernetesKubelet After=docker.service Requires=docker.service [Service] EnvironmentFile=/opt/kubernetes/cfg/kubelet ExecStart=/opt/kubernetes/bin/kubelet\$KUBELET_OPTS Restart=on-failure KillMode=process [Install] WantedBy=multi-user.target EOF systemctldaemon-reload systemctlenablekubelet systemctlrestartkubelet
             
            
           
#./kubelet.sh192.168.247.171
#./kubelet.sh192.168.247.172

2)、部署kube-proxy组件:

catkube-proxy.sh
#!/bin/bash

NODE_IP=$1

cat<
           
            /opt/kubernetes/cfg/kube-proxy
KUBE_PROXY_OPTS="--logtostderr=true\\
--v=4\\
--hostname-override=${NODE_IP}\\
--cluster-cidr=10.0.0.0/24\\
--proxy-mode=ipvs\\
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"
EOF

cat<
            
             /usr/lib/systemd/system/kube-proxy.service [Unit] Description=KubernetesProxy After=network.target [Service] EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy ExecStart=/opt/kubernetes/bin/kube-proxy\$KUBE_PROXY_OPTS Restart=on-failure [Install] WantedBy=multi-user.target EOF systemctldaemon-reload systemctlenablekube-proxy systemctlrestartkube-proxy
            
           
#./kube-proxy.sh192.168.247.171
#./kube-proxy.sh192.168.247.172

10、安装nginx

使用nginx四层进行转发

#catnginx.repo
[nginx]
name=nginxrepo
baseurl=http://nginx.org/packages/centos/7/$basearch/
gpgcheck=0
enabled=1
#yuminstallnginx

1) LB01和LB02配置:

nginx配置文件添加以下内容:

#cat/etc/nginx/nginx.conf
stream{
log_formatmain"$remote_addr$upstream_addr$time_local$status";
access_log/var/log/nginx/k8s-access.logmain;
upstreamk8s-apiserver{
server192.168.247.161:6443;
server192.168.247.162:6443;
server192.168.247.163:6443;
}
server{
listen0.0.0.0:6443;
proxy_passk8s-apiserver;
}
}

11、安装keepalived

#yuminstallkeepalived
#yuminstalllibnl3-develipset-devel
#cat/etc/keepalived/check_nginx.sh
#!/bin/bash
count=$(ps-ef|grepnginx|egrep-cv"grep|$$")
if["$count"-eq0];then
systemctlstopkeeplived
fi
#chmod755check_nginx.sh

LB01配置:

#cat/etc/keepalived/keepalived.conf
!ConfigurationFileforkeepalived

global_defs{
notification_email{
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_fromAlexandre.Cassen@firewall.loc
smtp_server192.168.200.1
smtp_connect_timeout30
router_idLVS_DEVEL
vrrp_skip_check_adv_addr
vrrp_strict
vrrp_garp_interval0
vrrp_gna_interval0
}

vrrp_scriptcheck_nginx{
script"/etc/keepalived/check_nginx.sh"
}

vrrp_instanceVI_1{
stateMASTER
interfaceens33
virtual_router_id51
priority100
advert_int1
authentication{
auth_typePASS
auth_pass1111
}
virtual_ipaddress{
192.168.247.160/24
}
track_script{
check_nginx
}
}

LB02配置:

#cat/etc/keepalived/keepalived.conf
!ConfigurationFileforkeepalived

global_defs{
notification_email{
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_fromAlexandre.Cassen@firewall.loc
smtp_server192.168.200.1
smtp_connect_timeout30
router_idLVS_DEVEL
vrrp_skip_check_adv_addr
vrrp_strict
vrrp_garp_interval0
vrrp_gna_interval0
}

vrrp_scriptcheck_nginx{
script"/etc/keepalived/check_nginx.sh"
}

vrrp_instanceVI_1{
stateBACKUP
interfaceens33
virtual_router_id51
priority90
advert_int1
authentication{
auth_typePASS
auth_pass1111
}
virtual_ipaddress{
192.168.247.160/24
}
track_script{
check_nginx
}
}
#systemctlenablenginx
#systemctlstartnginx
#systemctlenablekeepalived
#systemctlstartkeepalived

12、节点发现

#kubectlgetcsr
NAMEAGEREQUESTORCONDITION
node-csr-gvRm9pzQJCj4cp_hGYp5qwW93LLdAbVPtz7AaztlGv817mkubelet-bootstrapPending
node-csr-luowueA4U43ca96d-Ff64X7o8p9BW6MGIxWfASUPukE20mkubelet-bootstrapPending
#kubectlcertificateapprovenode-csr-gvRm9pzQJCj4cp_hGYp5qwW93LLdAbVPtz7AaztlGv8
certificatesigningrequest.certificates.k8s.io/node-csr-gvRm9pzQJCj4cp_hGYp5qwW93LLdAbVPtz7AaztlGv8approved
#kubectlcertificateapprovenode-csr-luowueA4U43ca96d-Ff64X7o8p9BW6MGIxWfASUPukE
certificatesigningrequest.certificates.k8s.io/node-csr-luowueA4U43ca96d-Ff64X7o8p9BW6MGIxWfASUPukEapproved
#kubectlgetnode
NAMESTATUSROLESAGEVERSION
192.168.247.171Ready
           
            12sv1.12.4
192.168.247.172Ready
            
             9m41sv1.12.4
            
           

13、运行一个测试示例

#kubectlrunnginx--image=nginx--replicas=3
#kubectlgetpod-owide
NAMEREADYSTATUSRESTARTSAGEIPNODENOMINATEDNODE
nginx-dbddb74b8-dkhcw1/1Running038m172.17.35.2192.168.247.172
           
            
nginx-dbddb74b8-rdf2v1/1Running038m172.17.17.2192.168.247.171
            
              nginx-dbddb74b8-rn9l61/1Running038m172.17.35.3192.168.247.172
             
               #kubectlexposedeploymentnginx--port=88--target-port=80--type=NodePort service/nginxexposed #kubectlgetsvc NAMETYPECLUSTER-IPEXTERNAL-IPPORT(S)AGE kubernetesClusterIP10.0.0.1
              
               443/TCP12h nginxNodePort10.0.0.30
               
                88:48363/TCP6s
               
              
             
            
           
喜欢 (0)
[]
分享 (0)
关于作者:
发表我的评论
取消评论
表情 贴图 加粗 删除线 居中 斜体 签到

Hi,您需要填写昵称和邮箱!

  • 昵称 (必填)
  • 邮箱 (必填)
  • 网址