- 一、环境准备
- 1. 给每一台机器设置主机名
- 2. 配置IP host映射关系
- 3. 确保可以连接到外部网络
- 4. 安装依赖环境,注意:每一台机器都需要安装此依赖环境
- 5. 安装iptables
- 6. 关闭selinux并关闭swap分区【虚拟内存】并且永久关闭虚拟内存
- 7. 升级Linux内核为4.44版本
- 8. 调整内核参数
- 9. 调整系统临时区
- 10. 关闭系统不需要的服务
- 11. 设置日志保存方式
- 12. 打开文件数调整
- 13. kube-proxy 开启 ipvs 前置条件
- 二. 安装docker
- 1. 安装docker
- 2. 设置docker daemon文件
- 3. 重启docker服务
- 三. 安装kubeadm
- 四、配置kubeadmin
- 1. 查看需要的镜像
- 2. 脚本配置
- 3. 生成k8s 配置文件
- 五、k8s 部署
- 1. 安装 kubernetes 主节点
- 2. 初始化成功后执行如下命令
- 3. 验证是否成功
- 4. 加入worker node节点
- 5. 验证添加是否成功
- 六、安装flannel网络插件
- 1、下载flannel网络插件
- 2、部署flannel
- 七、卸载
一、环境准备
建议最少准备3台主机或者阿里云服务器
k8s-master01: 用来安装k8s-master的操作环境
k8s-node01: 用来安装k8s node节点的环境
k8s-node02: 安装k8s node节点的环境
1. 给每一台机器设置主机名
hostnamectl set-hostname k8s-master01
hostnamectl set-hostname k8s-node01
hostnamectl set-hostname k8s-node02
# 查看主机名
hostname
2. 配置IP host映射关系
cat >> /etc/hosts << EOF
192.168.231.20 k8s-master01
192.168.231.21 k8s-node01
192.168.231.22 k8s-node02
EOF
3. 确保可以连接到外部网络
# 虚拟机配置上网(配置两个地方,resolv.conf和网关)
cat >> /etc/resolv.conf <<EOF
# Generated by NetworkManager
nameserver 114.114.114.114
EOF
# 查看网卡信息
# 修改IP地址与网关地址
[root@master01 ~]# cat /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static # 静态IP
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
UUID=f24a01bc-8e4d-4478-a5ee-8f9f5f1afdda
DEVICE=ens33
ONBOOT=yes
IPV6_PRIVACY=no
IPADDR=192.168.231.10
PREFIX=24
GATEWAY=192.168.231.2 # 网关
# 如果不能访问互联网,需配置yum源(本地)
略
4. 安装依赖环境,注意:每一台机器都需要安装此依赖环境
yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget vim net-tools git iproute lrzsz bash-completion tree bridge-utils unzip bind-utils gcc
5. 安装iptables
启动iptables,设置开机自启,清空iptables规则,保存当前规则到默认规则
# 关闭防火墙
systemctl stop firewalld && systemctl disable firewalld
# 置空iptables
yum -y install iptables-services && systemctl start iptables && systemctl enable iptables && iptables -F && service iptables save
6. 关闭selinux并关闭swap分区【虚拟内存】并且永久关闭虚拟内存
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
#关闭selinux
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
7. 升级Linux内核为4.44版本
如果安装kubernetes v1.19以下版本,可不进行内核升级
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm
#安装内核
yum --enablerepo=elrepo-kernel install -y kernel-lt
#设置开机从新内核启动
grub2-set-default 'CentOS Linux (4.4.189-1.el7.elrepo.x86_64) 7 (Core)'
#注意:设置完内核后,需要重启服务器才会生效。
#查询内核
uname -r
8. 调整内核参数
cat > kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
#将优化内核文件拷贝到/etc/sysctl.d/文件夹下,这样优化文件开机的时候能够被调用
cp kubernetes.conf /etc/sysctl.d/kubernetes.conf
#手动刷新,让优化文件立即生效
sysctl -p /etc/sysctl.d/kubernetes.conf
9. 调整系统临时区
如果已经设置时区,可略过
#设置系统时区为中国/上海
timedatectl set-timezone Asia/Shanghai
#将当前的 UTC 时间写入硬件时钟
timedatectl set-local-rtc 0
#重启依赖于系统时间的服务
systemctl restart rsyslog
systemctl restart crond
10. 关闭系统不需要的服务
systemctl stop postfix && systemctl disable postfix
11. 设置日志保存方式
#1).创建保存日志的目录
mkdir /var/log/journal
#2).创建配置文件存放目录
mkdir /etc/systemd/journald.conf.d
#3).创建配置文件
cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
Storage=persistent
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
SystemMaxUse=10G
SystemMaxFileSize=200M
MaxRetentionSec=2week
ForwardToSyslog=no
EOF
#4).重启systemd journald的配置
systemctl restart systemd-journald
12. 打开文件数调整
echo "* soft nofile 65536" >> /etc/security/limits.conf
echo "* hard nofile 65536" >> /etc/security/limits.conf
13. kube-proxy 开启 ipvs 前置条件
modprobe br_netfilter
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
##使用lsmod命令查看这些文件是否被引导
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
二. 安装docker
继续采用在线安装的方式
1. 安装docker
yum install -y yum-utils device-mapper-persistent-data lvm2
#紧接着配置一个稳定(stable)的仓库、仓库配置会保存到/etc/yum.repos.d/docker-ce.repo文件中
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
#更新Yum安装的相关Docke软件包&安装Docker CE
yum update -y && yum install docker-ce
2. 设置docker daemon文件
#创建/etc/docker目录
mkdir /etc/docker
#更新daemon.json文件和加速引导
cat > /etc/docker/daemon.json <<EOF
{"exec-opts": ["native.cgroupdriver=systemd"],"log-driver": "json-file","log-opts": {"max-size": "100m"}}
{
"registry-mirrors": [
"https://registry.docker-cn.com"
]
}
EOF
#注意: 一定注意编码问题,出现错误:查看命令:journalctl -amu docker 即可发现错误
#创建,存储docker配置文件
mkdir -p /etc/systemd/system/docker.service.d
3. 重启docker服务
systemctl daemon-reload && systemctl restart docker && systemctl enable docker
三. 安装kubeadm
安装kubernetes的时候,需要安装kubelet, kubeadm等包,但k8s官网给的yum源是packages.cloud.google.com,国内访问不了,此时我们可以使用阿里云的yum仓库镜像。
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
(1)安装kubeadm、kubelet、kubectl(注意:版本一定要和安装的k8s版本一致)
(2) –setopt=obsoletes=0 的意思是支持旧版本
yum -y install kubeadm-1.17.5 kubectl-1.17.5 kubelet-1.17.5 --setopt=obsoletes=0
# 修改kubelet配置文件添加如下两行
cat > /etc/sysconfig/kubenet << EOF
KUBELET_EXTRA_ARGS="--fail-swap-on=false"
# 指定kube-proxy使用ipvs规则,如果系统内核没有加载ipvs模块这自动降级使用iptables规则
KUBE_PROXY_MODE=ipvs
EOF
# 启动 kubelet
systemctl enable kubelet && systemctl start kubelet
四、配置kubeadmin
1. 查看需要的镜像
[root@k8s-master01 ~]# kubeadm config images list
I0807 01:08:24.971978 4087 version.go:251] remote version is much newer: v1.18.6; falling back to: stable-1.17
W0807 01:08:34.972414 4087 version.go:101] could not fetch a Kubernetes version from the internet: unable to get URL "https://dl.k8s.io/release/stable-1.17.txt": Get https://dl.k8s.io/release/stable-1.17.txt: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)
W0807 01:08:34.972450 4087 version.go:102] falling back to the local client version: v1.17.5
W0807 01:08:34.972579 4087 validation.go:28] Cannot validate kube-proxy config - no validator is available
W0807 01:08:34.972586 4087 validation.go:28] Cannot validate kubelet config - no validator is available
k8s.gcr.io/kube-apiserver:v1.17.5
k8s.gcr.io/kube-controller-manager:v1.17.5
k8s.gcr.io/kube-scheduler:v1.17.5
k8s.gcr.io/kube-proxy:v1.17.5
k8s.gcr.io/pause:3.1
k8s.gcr.io/etcd:3.4.3-0
k8s.gcr.io/coredns:1.6.5
2. 脚本配置
配置下面的脚本,进行批量下载镜像
下面的版本和上面列出的版本相对应。
[root@k8s-master01 k8s]# cat kubeadm.sh
#!/bin/bash
KUBE_VERSION=v1.17.5
KUBE_PAUSE_VERSION=3.1
ETCD_VERSION=3.4.3-0
CORE_DNS_VERSION=1.6.5
GCR_URL=k8s.gcr.io
ALIYUN_URL=registry.cn-hangzhou.aliyuncs.com/google_containers
images=(
kube-apiserver:${KUBE_VERSION}
kube-controller-manager:${KUBE_VERSION}
kube-scheduler:${KUBE_VERSION}
kube-proxy:${KUBE_VERSION}
pause:${KUBE_PAUSE_VERSION}
etcd:${ETCD_VERSION}
coredns:${CORE_DNS_VERSION}
)
for imageName in ${images[@]} ; do
docker pull $ALIYUN_URL/$imageName
docker tag $ALIYUN_URL/$imageName $GCR_URL/$imageName
docker rmi $ALIYUN_URL/$imageName
done
[root@k8s-master01 k8s]#
# 执行脚本
[root@k8s-master01 k8s]# sh kubeadm.sh
3. 生成k8s 配置文件
[root@k8s-master01 k8s]# kubeadm config print init-defaults > kubeadm-config.yaml
W0807 01:21:07.816266 5073 validation.go:28] Cannot validate kube-proxy config - no validator is available
W0807 01:21:07.816365 5073 validation.go:28] Cannot validate kubelet config - no validator is available
[root@k8s-master01 k8s]#
[root@k8s-master01 k8s]# ll
total 12
-rw-r--r-- 1 root root 832 Aug 7 01:21 kubeadm-config.yaml
## 修改
[root@k8s-master01 k8s]# cat kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.231.20 # 修改为自己的master ip
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: k8s-master01
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers # 替换镜像源
kind: ClusterConfiguration
kubernetesVersion: v1.17.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16" # flannel网络配置,一定要配置,否则后面flannel网络会出现不断重启的问题
serviceSubnet: 10.96.0.0/12
scheduler: {}
---
# 配置使用ipvs路由
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: kubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs
五、k8s 部署
1. 安装 kubernetes 主节点
执行以下命令初始化主节点,该命令指定了初始化时需要使用的配置文件,其中添加 –experimental-upload-certs 参数可以在后续执行加入节点时自动分发证书文件。追加的 tee kubeadm-init.log 用以输出日志。
# kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log
# v1.15以后的版本使用下面的语句,把--experimental-upload-certs 换成 --upload-certs
kubeadm init --config=kubeadm-config.yaml --upload-certs | tee kubeadm-init.log
# 安装成功则会有如下输出
[apiclient] All control plane components are healthy after 37.510224 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.17" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
843ab78fea52bcaf2d366f37530161ed90dbec783cf695649bb42f1b3166eba1
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
# 执行如下命令:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
# 后面子节点加入执行如下命令
kubeadm join 192.168.231.20:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash
sha256:4e56e92f97ee6fd5300c8cef06280f71eed3308f65a1bfa8f5842fb40e6a0613
2. 初始化成功后执行如下命令
按照k8s指示,执行下面的命令:
#创建目录,保存连接配置缓存,认证文件
mkdir -p $HOME/.kube
#拷贝集群管理配置文件
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
#授权给配置文件
chown $(id -u):$(id -g) $HOME/.kube/config
3. 验证是否成功
kubectl get node
# 能够打印出节点信息即表示成功
NAME STATUS ROLES AGE VERSION
kubernetes-master NotReady master 8m40s v1.14.1
我们发现已经可以成功查询node节点信息了,但是节点的状态却是NotReady,不是Runing的状态。
原因是此时我们使用ipvs+flannel的方式进行网络通信,但是flannel网络插件还没有部署,因此节点状态此时为NotReady
kubeadm init 的执行过程
• init:指定版本进行初始化操作
• preflight:初始化前的检查和下载所需要的 Docker 镜像文件
• kubelet-start:生成 kubelet 的配置文件 var/lib/kubelet/config.yaml,没有这个文件 kubelet 无法启动,所以初始化之前的 kubelet 实际上启动不会成功
• certificates:生成 Kubernetes 使用的证书,存放在 /etc/kubernetes/pki 目录中
• kubeconfig:生成 KubeConfig 文件,存放在 /etc/kubernetes 目录中,组件之间通信需要使用对应文件
• control-plane:使用 /etc/kubernetes/manifest 目录下的 YAML 文件,安装 Master 组件
• etcd:使用 /etc/kubernetes/manifest/etcd.yaml 安装 Etcd 服务
• wait-control-plane:等待 control-plan 部署的 Master 组件启动
• apiclient:检查 Master 组件服务状态。
• uploadconfig:更新配置
• kubelet:使用 configMap 配置 kubelet
• patchnode:更新 CNI 信息到 Node 上,通过注释的方式记录
• mark-control-plane:为当前节点打标签,打了角色 Master,和不可调度标签,这样默认就不会使用 Master 节点来运行 Pod
• bootstrap-token:生成 token 记录下来,后边使用 kubeadm join 往集群中添加节点时会用到
• addons:安装附加组件 CoreDNS 和 kube-proxy
4. 加入worker node节点
从主节点日志中查看加入节点的命令,在每个work node上执行。
[root@k8s-node01 ~]# kubeadm join 192.168.231.20:6443 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:4e56e92f97ee6fd5300c8cef06280f71eed3308f65a1bfa8f5842fb40e6a0613
W0807 01:51:17.636954 12956 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.17" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
说明:
token
- 可以通过安装 master 时的日志查看 token 信息
- 可以通过 kubeadm token list 命令打印出 token 信息
- 如果 token 过期,可以使用 kubeadm token create 命令创建新的 token
discovery-token-ca-cert-hash
- 可以通过安装 master 时的日志查看 sha256 信息
- 可以通过 openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed ‘s/^.* //‘ 命令查看 sha256 信息
5. 验证添加是否成功
回到 master 服务器
kubectl get nodes
# 可以看到 slave 成功加入 master
NAME STATUS ROLES AGE VERSION
kubernetes-master NotReady master 9h v1.14.1
kubernetes-slave1 NotReady <none> 22s v1.14.1
如果 slave 节点加入 master 时配置有问题可以在 slave 节点上使用
kubeadm reset
重置配置再使用kubeadm join
命令重新加入即可。希望在 master 节点删除 node ,可以使用kubeadm delete nodes <NAME>
删除。
# 查看 pod 状态
kubectl get pod -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-8686dcc4fd-gwrmb 0/1 Pending 0 9h <none> <none> <none> <none>
coredns-8686dcc4fd-j6gfk 0/1 Pending 0 9h <none> <none> <none> <none>
etcd-kubernetes-master 1/1 Running 1 9h 192.168.141.130 kubernetes-master <none> <none>
kube-apiserver-kubernetes-master 1/1 Running 1 9h 192.168.141.130 kubernetes-master <none> <none>
kube-controller-manager-kubernetes-master 1/1 Running 1 9h 192.168.141.130 kubernetes-master <none> <none>
kube-proxy-496dr 1/1 Running 0 17m 192.168.141.131 kubernetes-slave1 <none> <none>
kube-proxy-rsnb6 1/1 Running 1 9h 192.168.141.130 kubernetes-master <none> <none>
kube-scheduler-kubernetes-master 1/1 Running 1 9h 192.168.141.130 kubernetes-master <none> <none>
由此可以看出 coredns 尚未运行,此时我们还需要安装网络插件。
k8s 主要有以下核心组组件组成
- etcd 保存了整个集群的状态;
- apiserver 提供了资源操作的唯一入口,并提供认证、授权、访问控制、API 注册和发现等机制;
- controller manager 负责维护集群的状态,比如故障检测、自动扩展、滚动更新等;
- scheduler 负责资源的调度,按照预定的调度策略将 Pod 调度到相应的机器上;
- kubelet 负责维护容器的生命周期,同时也负责 Volume(CVI)和网络(CNI)的管理;
- Container runtime 负责镜像管理以及 Pod 和容器的真正运行(CRI);
- kube-proxy 负责为 Service 提供 cluster 内部的服务发现和负载均衡
六、安装flannel网络插件
部署flannel网络插件 — 只需要在主节点执行
1、下载flannel网络插件
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
2、部署flannel
kubectl create -f kube-flannel.yml
#也可进行部署网络
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
可能yml无法下载,从其他地方拷贝一份
[root@k8s-master01 k8s]# kubectl apply -f kube-flannel.yml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds-amd64 created
daemonset.apps/kube-flannel-ds-arm64 created
daemonset.apps/kube-flannel-ds-arm created
daemonset.apps/kube-flannel-ds-ppc64le created
daemonset.apps/kube-flannel-ds-s390x created
由于无法访问镜像,安装flannel时出现** ImagePullBackOff **的错误
[root@k8s-master01 k8s]# kubectl get pod -A | grep flannel
kube-system kube-flannel-ds-amd64-dj8bt 0/1 Init:ImagePullBackOff 0 7m37s
kube-system kube-flannel-ds-amd64-gzbz7 0/1 Init:ImagePullBackOff 0 7m37s
kube-system kube-flannel-ds-amd64-j28bs 0/1 Init:ImagePullBackOff 0 7m37s
[root@k8s-master01 k8s]# kubectl describe pod kube-flannel-ds-amd64-dj8bt -n kube-system
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 2m31s default-scheduler Successfully assigned kube-system/kube-flannel-ds-amd64-dj8bt to k8s-master01
Normal Pulling 69s (x4 over 2m30s) kubelet, k8s-master01 Pulling image "quay-mirror.qiniu.com/coreos/flannel:v0.12.0-amd64"
Warning Failed 68s (x4 over 2m30s) kubelet, k8s-master01 Failed to pull image "quay-mirror.qiniu.com/coreos/flannel:v0.12.0-amd64": rpc error: code = Unknown desc = Error response from daemon: Get https://quay-mirror.qiniu.com/v2/: x509: certificate has expired or is not yet valid
Warning Failed 68s (x4 over 2m30s) kubelet, k8s-master01 Error: ErrImagePull
Normal BackOff 45s (x6 over 2m30s) kubelet, k8s-master01 Back-off pulling image "quay-mirror.qiniu.com/coreos/flannel:v0.12.0-amd64"
Warning Failed 34s (x7 over 2m30s) kubelet, k8s-master01 Error: ImagePullBackOff
解决:
从其他仓库手动拉取镜像并重新打tag
# 在所有master和node节点执行,
### 注意查看describe上面的版本和标签信息
docker pull easzlab/flannel:v0.12.0-amd64
docker tag easzlab/flannel:v0.12.0-amd64 quay-mirror.qiniu.com/coreos/flannel:v0.12.0-amd64
flannel pod会不断的尝试,重新打tag后会自动启动
再查看node状态,已经安装flannel的节点已经正常。
[root@k8s-master01 k8s]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master01 Ready master 50m v1.17.5
k8s-node01 NotReady <none> 45m v1.17.5
k8s-node02 Ready <none> 45m v1.17.5
flannel网络不断重启:
在kubeadmin-config.yam文件没加flannel的subnetwork选项
https://my.oschina.net/jianming/blog/2354157https://my.oschina.net/jianming/blog/2354157
七、卸载
kubeadm reset -f
modprobe -r ipip
lsmod
rm -rf ~/.kube/
rm -rf /etc/kubernetes/
rm -rf /etc/systemd/system/kubelet.service.d
rm -rf /etc/systemd/system/kubelet.service
rm -rf /usr/bin/kube*
rm -rf /etc/cni
rm -rf /opt/cni
rm -rf /var/lib/etcd
rm -rf /var/etcd