环境

服务器参数:

  • CentOS Linux release 7.9.2009 (Core)
  • 4核(vCPU)8GB

防火墙:关闭
SELINUX:SELINUX=disabled
软件环境:

  • docker版本:20.10.22
  • docker-compose版本:2.15.1
  • kubeadm版本:1.26.2;kubelet版本:1.26.2;kubectl版本:1.26.2
  • containerd版本:1.6.18
  • flannel版本:v0.20.0

一、环境

1、hostname
bash
1
2
3
4
hostnamectl set-hostname tenxun-jing

vim /etc/hosts
127.0.0.1 tenxun-jing
2、防火墙
bash
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
1、### 关闭
systemctl stop firewalld && \
systemctl disable firewalld && \
setenforce 0 && \
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config

2、## 检查
getenforce && \
cat /etc/selinux/config |grep "^SELINUX=" && \
systemctl status firewalld |grep -B 1 'Active'

3、# 关闭swap
swapoff -a
sed -i '/swap/s/^\(.*\)$/#\1/g' /etc/fstab

4、# 设置系统参数
modprobe br_netfilter
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sysctl -p /etc/sysctl.d/k8s.conf

5、配置ipvs功能
在Kubernetes中Service有两种带来模型,一种是基于iptables的,一种是基于ipvs的两者比较的话,
ipvs的性能明显要高一些,但是如果要使用它,需要手动载入ipvs模块

# 1.安装ipset和ipvsadm
yum install ipset ipvsadm -y
# 2.添加需要加载的模块写入脚本文件
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
# 3.为脚本添加执行权限
chmod +x /etc/sysconfig/modules/ipvs.modules
# 4.执行脚本文件
/bin/bash /etc/sysconfig/modules/ipvs.modules
# 5.查看对应的模块是否加载成功
lsmod | grep -e ip_vs -e nf_conntrack_ipv4

6、同步时间
【可选】
yum -y install ntpdate
# 先看下是否可以手动同步,如果提示ntpdate不存在则须安装,提示地址不通则需要打通或者更换内部NTP服务器
ntpdate ntp.aliyun.com

# 配置定时同步
echo '*/15 * * * * ntpdate ntp.aliyun.com > /dev/null 2>&1' >> /var/spool/cron/root
crontab -l
3、依赖安装
bash
1
2
yum install update
yum -y install lrzsz device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel wget vim ncurses-devel autoconf automake zlib-devel python-devel epel-release openssh-server socat ipvsadm conntrack telnet nc
4、docker安装(可选)
bash
1
2
3
4
5
6
7
8
9
10
11
# step 1: 安装必要的一些系统工具
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
# Step 2: 添加软件源信息
sudo yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# Step 3
sudo sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
# Step 4: 更新并安装Docker-CE
sudo yum makecache fast
sudo yum -y install docker-ce
# Step 4: 开启Docker服务
sudo service docker start

二、k8s安装

1、安装kubeadm、kubelet、kubectl
bash
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum makecache fast
yum install -y kubelet-1.26.2 kubeadm-1.26.2 kubectl-1.26.2

systemctl enable --now kubelet
systemctl is-active kubelet
2、安装containerd,配置 crictl
2.1、安装containerd
bash
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
1、安装
# 源已经添加过可省略
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

yum list available |grep containerd
yum install -y containerd.io-1.6.18

2、生成配置文件
containerd config default > /etc/containerd/config.toml

3、修改配置文件
# 修改 cgroups 为 systemd
sed -i 's#SystemdCgroup = false#SystemdCgroup = true#' /etc/containerd/config.toml

# 修改 pause 镜像地址:sandbox_image=
# containerd.io-1.6.18对应:registry.k8s.io
# containerd.io-1.6.6对应:k8s.gcr.io
sed -i 's#registry.k8s.io#registry.aliyuncs.com/google_containers#' /etc/containerd/config.toml

# 修改版本号:
# 可以先确定pasuse的版本号,containerd默认是3.6;kubeadm需要3.9
sed -i 's#pause:3.6#pause:3.9#' /etc/containerd/config.toml

# 修改容器存储路径到空间比较充裕的路径
默认:root = "/var/lib/containerd"

################## 补充【systemd驱动】开始 ##################

4、查看k8s驱动
# k8s 1.26.2默认是systemd驱动
# 查看所有配置项列表,找到kubelet-config
kubectl get cm -n kube-system
# 查看cgroupDriver的值;cgroupDriver: systemd
kubectl edit cm kubelet-config -n kube-system

5、查看kubelet默认驱动
# 查看kubelet 的配置文件
# yum安装kubelet 1.26.2默认是/var/lib/kubelet/config.yaml
systemctl status kubelet.service |grep 'config'
# 查看配置项;默认是:systemd
cat /var/lib/kubelet/config.yaml|grep "cgroupDriver"
# 输出
cgroupDriver: systemd

6、kubeadm init 时可指定驱动
# 打印初始化时配置,可查看cgroupDriver默认值
kubeadm config print init-defaults --component-configs KubeletConfiguration
# 默认是systemd;修改的话,需生成配置kubeadm.yml,在配置文件中的kind: KubeletConfiguration添加
cgroupDriver: systemd

# 例如:在kubeadm.yml最后添加以下内容
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd

### 注意以上驱动配置需一致

################## 补充【systemd驱动】结束 ##################

containerd 配置镜像加速

  • 法一:添加附件

    shell
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    1、修改/etc/containerd/config.toml文件
    [plugins."io.containerd.grpc.v1.cri".registry]
    config_path = "/etc/containerd/certs.d" # 镜像地址配置文件

    [plugins."io.containerd.grpc.v1.cri".registry.auths]

    [plugins."io.containerd.grpc.v1.cri".registry.configs]

    [plugins."io.containerd.grpc.v1.cri".registry.headers]

    [plugins."io.containerd.grpc.v1.cri".registry.mirrors]

    2、创建相应目录
    mkdir /etc/containerd/certs.d/docker.io -pv

    3、配置加速
    cat > /etc/containerd/certs.d/docker.io/hosts.toml << EOF
    server = "https://docker.io"
    [host."https://xxxxxxxx.mirror.aliyuncs.com"]
    capabilities = ["pull", "resolve"]
    EOF

    4、重启Containerd
    systemctl restart containerd

    5、拉取镜像
    ctr i pull docker.io/library/nginx:latest
  • 法二:直接在/etc/containerd/config.toml中添加

    shell
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    1、修改/etc/containerd/config.toml文件
    [plugins."io.containerd.grpc.v1.cri".registry]

    [plugins."io.containerd.grpc.v1.cri".registry.auths]

    [plugins."io.containerd.grpc.v1.cri".registry.configs]

    [plugins."io.containerd.grpc.v1.cri".registry.headers]

    [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
    # 添加镜像加速信息(此注释去掉)
    [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
    endpoint = ["https://registry.cn-hangzhou.aliyuncs.com"]

    2、重启Containerd
    systemctl restart containerd

    containerd代理配置参考:

  • 前提得有代理,没有代理,大可不必

    bash
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    1、添加代理
    vim /lib/systemd/system/containerd.service

    # 在 [Service] 下添加
    Environment="http_proxy=http://127.0.0.1:7890"
    Environment="https_proxy=http://127.0.0.1:7890"
    Environment="ALL_PROXY=socks5://127.0.0.1:7891"
    Environment="all_proxy=socks5://127.0.0.1:7891"

    2、重启
    systemctl daemon-reload && \
    systemctl restart containerd
2.2、配置crictl
bash
1
2
3
4
5
6
7
# 配置文件地址 /etc/crictl.yaml,修改 sock 地址
cat <<EOF> /etc/crictl.yaml
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
EOF
2.3、启动服务
bash
1
2
3
4
5
systemctl enable containerd && \
systemctl daemon-reload && \
systemctl restart containerd

systemctl status containerd
3、安装k8s

命令行形式初始化

bash
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
1、官方镜像
kubeadm init \
--apiserver-advertise-address=10.0.4.12 \
--image-repository registry.k8s.io \
--kubernetes-version v1.26.2 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16 \
--cri-socket /var/run/containerd/containerd.sock \
--ignore-preflight-errors=all

2、阿里云镜像
kubeadm init \
--apiserver-advertise-address=10.0.4.12 \
--image-repository registry.cn-hangzhou.aliyuncs.com/google_containers \
--kubernetes-version v1.26.2 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16 \
--cri-socket /var/run/containerd/containerd.sock \
--ignore-preflight-errors=all
3.1、生成kubeadm.yml
bash
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
kubeadm config print init-defaults > kubeadm.yml

vim kubeadm.yml
修改如下配置:

修改本机主机名、本机ip、pod的ip、service的ip、k8s版本号、镜像源
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
name: tenxun-jing
taints: null
....
localAPIEndpoint:
advertiseAddress: 172.22.109.126
bindPort: 6443
....
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.26.2
networking:
dnsDomain: cluster.local
podSubnet: 10.244.0.0/16
serviceSubnet: 10.96.0.0/12

修改后的配置

bash
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.0.99
bindPort: 6443
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
name: tenxun-jing
taints: null
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.k8s.io
kind: ClusterConfiguration
kubernetesVersion: 1.26.2
networking:
dnsDomain: cluster.local
podSubnet: 10.244.0.0/16
serviceSubnet: 10.96.0.0/12
scheduler: {}
3.2、使用 kubeadm.yml 进行初始化
bash
1
2
3
4
5
6
7
8
# 查看所需镜像列表
kubeadm config images list --config ./kubeadm.yml
# 拉取镜像
kubeadm config images pull --config ./kubeadm.yml
# 检查
kubeadm init phase preflight --config=./kubeadm.yml
# 根据配置文件启动 kubeadm 初始化 k8s
kubeadm init --config=./kubeadm.yml --upload-certs --v=6
bash
1
2
3
4
5
6
7
8
9
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf

cat >> /etc/profile << EOF
export KUBECONFIG=/etc/kubernetes/admin.conf
EOF
source /etc/profile
3.3、kubectl补全
bash
1
2
3
4
5
6
7
8
9
10
11
12
13
1、安装bash-completion工具
yum install bash-completion -y
否则报错:
-bash: _get_comp_words_by_ref: command not found

2、执行bash_completion
source /usr/share/bash-completion/bash_completion

3、加载kubectl completion
source <(kubectl completion bash)
# 在 bash 中设置当前 shell 的自动补全,要先安装 bash-completion 包。
echo "source <(kubectl completion bash)" >> ~/.bashrc
# 在您的 bash shell 中永久的添加自动补全
4、安装网络插件 flannel
bash
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
1、下载
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

2、操作
# 修改镜像
# 注意:新版本的kube-flannel.yaml默认使用的docker.io镜像源

- name: install-cni-plugin
#image: flannelcni/flannel-cni-plugin:v1.1.0 for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
#image: flannelcni/flannel:v0.20.0 for ppc64le and mips64le (dockerhub limitations may apply)
image: docker.io/rancher/mirrored-flannelcni-flannel:v0.20.0

3、确保与kueadm初始化时设置的podSubnet网段一致
# 查看:kubectl get configmap kubeadm-config -n kube-system -o yaml |grep podSubnet
# 输出
podSubnet: 10.244.0.0/16
# 查看kube-flannel.yml
grep -A 3 "net-conf.json" kube-flannel.yml|grep "Network"
# 输出
"Network": "10.244.0.0/16",

4、修改模式host-gw(默认是vxlan --》1450)
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "host-gw"
}
}


5、启动
kubectl apply -f kube-flannel.yml
5、添加节点
bash
1
2
3
4
5
6
7
8
9
10
1、获取加入集群的命令,以下 指令在master节点执行
#token会过期,有效期为5分钟,以下指令在master节点执行
kubeadm token create --print-join-command

2、添加
kubeadm join 172.16.8.31:6443 --token whihg6.utknhvj4dg3ndsv1 --discovery-token-ca-cert-hash sha256:5d2939c6d23cde6507e621cf21d550a7e083efd4331a245c2250209bdb110b89

3、检查
查看节点是否加入成功(master节点执行)
kubectl get pod -nsit -owide

三、问题记录

1、解决k8s Error registering network: failed to acquire lease: node “master“ pod cidr not assigne

问题描述:
部署flannel网络插件时发现flannel一直处于CrashLoopBackOff状态,查看日志提示没有分配cidr

bash
1
2
3
4
5
6
7
8
1、修改
vim /etc/kubernetes/manifests/kube-controller-manager.yaml
增加参数:
--allocate-node-cidrs=true
--cluster-cidr=10.244.0.0/16

2、重启
systemctl restart kubelet
2、container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized
shell
1
2
3
4
5
6
7
8
9
1、问题触发:
重装时,重新加入从节点,网络正常,kube-proxy和fannel均正常,describe查看从节点构建过程,
发现:container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized

2、解决:重启从节点的容器:
# 查看kubelet状态:
# systemctl status kubelet
# journalctl -f -u kubelet
systemctl restart containerd.service

75bf000a2ccee5441da25d0ed49bc8cf_dd38f44b469a4a80af941a63dfbc8020.png

四、其他

1、污点taint
bash
1
2
3
4
5
6
7
8
9
10
11
1、查看
kubectl describe nodes k8s-master |grep Taints

2、删除
kubectl taint node k8s-master gameble-
kubectl taint node k8s-master node-role.kubernetes.io/control-plane:NoSchedule-
# 一键删除
kubectl taint node tenxun-jing $(kubectl describe node tenxun-jing |grep Taints|awk '{print $2}')-

3、添加
kubectl taint node k8s-master gameble
2、重置脚本
bash
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
#/bin/bash
# premise: touch k8s_reset_init.sh && chmod +x k8s_reset_init.sh
# implement: bash k8s_reset_init.sh && [init1|init2]

function init1(){
kubeadm reset -f && \
kubeadm init \
--apiserver-advertise-address=10.0.4.12 \
--image-repository registry.k8s.io \
--kubernetes-version v1.26.2 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16 \
--cri-socket /var/run/containerd/containerd.sock \
--ignore-preflight-errors=all

}

function init2(){
kubeadm reset -f && \
kubeadm init --config=./kubeadm.yml --upload-certs --v=6
}
3、代理脚本
  • 前提得有代理,没有代理,大可不必
    bash
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    #!/usr/bin/env bash
    # premise: touch start_containerd_env.sh && chmod +x start_containerd_env.sh
    # implement: source start_containerd_env.sh && [env_start|env_stop|env_status]

    containerd_file="/lib/systemd/system/containerd.service"
    proxy_port="7890"
    socks5_port="7891"
    proxy_ip="127.0.0.1"

    # list
    proxy_str_list=(
    'Environment="http_proxy=http:\/\/'${proxy_ip}':'${proxy_port}'"' \
    'Environment="https_proxy=http:\/\/'${proxy_ip}':'${proxy_port}'"' \
    'Environment="ALL_PROXY=socks5:\/\/'${proxy_ip}':'${socks5_port}'"' \
    'Environment="all_proxy=socks5:\/\/'${proxy_ip}':'${socks5_port}'"' \
    )
    list_len=$((${#proxy_str_list[@]} - 1))

    function env_create(){
    [[ ! -f ${containerd_file} ]] && echo "[error] ${containerd_file} not exist" && return
    for ((i=0;i <= ${list_len};i++));do
    grep -on "^${proxy_str_list[${i}]}" ${containerd_file} &>/dev/null
    [[ $? != "0" ]] && sed -ri "/${proxy_str_list[${i}]}/d" ${containerd_file} && sed -ri "/\[Service\]/a${proxy_str_list[${i}]}" ${containerd_file}
    done
    proxy_str_num=$(grep -o "http://${proxy_ip}:${proxy_port}\|socks5://${proxy_ip}:${socks5_port}" ${containerd_file}|wc -l)
    [[ "${proxy_str_num}" != "${#proxy_str_list[@]}" ]] && echo "[error] not create containerd proxy in ${containerd_file}" && return
    }

    function env_delete(){
    [[ ! -f ${containerd_file} ]] && echo "[error] ${containerd_file} not exist" && return
    for ((i=0;i <= ${list_len};i++));do
    grep -on "^${proxy_str_list[${i}]}" ${containerd_file} &>/dev/null && sed -ri "s/(^${proxy_str_list[${i}]})/#\1/g" ${containerd_file}
    grep -on "^${proxy_str_list[${i}]}" ${containerd_file} &>/dev/null && echo "[error] not notes ${proxy_str_list[${i}]}" && return
    done
    }

    function env_start(){
    echo "==[env_start]== BEGIN"

    env_create
    systemctl daemon-reload && systemctl restart containerd
    [[ "$(systemctl is-active containerd)" != "active" ]] && echo "[error] containerd restart error" && return
    [[ $(systemctl show --property=Environment containerd|grep -o "${proxy_ip}"|wc -l) == "4" ]] && echo "[sucess] start containerd proxy" && systemctl show --property=Environment containerd |grep -o "http://${proxy_ip}:${proxy_port}\|socks5://${proxy_ip}:${socks5_port}" || echo "[error] not set containerd proxy env"

    echo "==[env_start]== END"
    }

    function env_stop(){
    echo "==[env_stop]== BEGIN"

    grep "^Environment=" ${containerd_file}|grep "${proxy_ip}" &>/dev/null
    if [[ $? == "0" ]];then
    env_delete
    systemctl daemon-reload && systemctl restart containerd
    [[ "$(systemctl is-active containerd)" != "active" ]] && echo "[error] containerd restart error" && return
    else
    echo "[warning] not operation, not set containerd proxy"
    fi
    systemctl show --property=Environment containerd | grep "Environment="
    [[ $(systemctl show --property=Environment containerd|grep -o "${proxy_ip}"|wc -l) != "4" ]] && echo "[sucess] stop containerd proxy"

    echo "==[env_stop]== END"
    }

    function env_status(){
    systemctl show --property=Environment containerd | grep -o "http://${proxy_ip}:${proxy_port}\|socks5://${proxy_ip}:${socks5_port}"
    [[ "$(systemctl show --property=Environment containerd|grep -o "${proxy_ip}"|wc -l)" != "4" ]] && echo "[error] not set containerd proxy env"
    }

    msg="==[error]==input error, please try: source xx.sh && [env_start|env_stop|env_status]"
    [[ ! "$1" ]] || echo ${msg}
4、更改nodePort模式下的默认端口范围
  • 官网:https://kubernetes.io/zh-cn/docs/concepts/services-networking/service/
  • 使用nodePort模式,官方默认范围为30000-32767
  • NodePort 类型
    如果将 type 字段设置为 NodePort,则 Kubernetes 控制平面将在 –service-node-port-range 标志指定的范围内分配端口(默认值:30000-32767)。 每个节点将那个端口(每个节点上的相同端口号)代理到您的服务中。 您的服务在其 .spec.ports[*].nodePort 字段中要求分配的端口。
  • 修改/etc/kubernetes/manifests/kube-apiserver.yaml
    shell
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    [root@node-1 manifests]# vim /etc/kubernetes/manifests/kube-apiserver.yaml 
    apiVersion: v1
    kind: Pod
    metadata:
    creationTimestamp: null
    labels:
    component: kube-apiserver
    tier: control-plane
    name: kube-apiserver
    namespace: kube-system
    spec:
    containers:
    - command:
    - kube-apiserver
    - --advertise-address=192.168.235.21
    - --allow-privileged=true
    - --authorization-mode=Node,RBAC
    - --client-ca-file=/etc/kubernetes/pki/ca.crt
    - --enable-admission-plugins=NodeRestriction
    - --enable-admission-plugins=PodPreset
    - --runtime-config=settings.k8s.io/v1alpha1=true
    - --service-node-port-range=1-65535 # 需增加的配置
    ...
    调整完毕后会等待大概10s,因为更改kube-apiserver.yaml配置文件后会进行重启操作,重新加载配置文件,期间可执行kubectl get pod命令进行查看,如果可正常查看pod信息即说明重启完毕。但是此时端口范围可能仍然不会生效,需要继续进行以下操作:
    shell
    1
    2
    [root@node-0 manifests]# systemctl daemon-reload
    [root@node-0 manifests]# systemctl restart kubelet
    然后重新进行新的service的生成,即可成功创建指定nodePort的service。
5、补充:云服务器公网部署初始化(实验表明:不同节点之间的pod无法互通。**不建议)
  • 法一:添加公网ip的虚拟网卡
    shell
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    1、临时生效
    ifconfig eth0:1 <公网ip>

    2、永久生效
    cat > /etc/sysconfig/network-scripts/ifcfg-eth0:1 <<EOF
    BOOTPROTO=static
    DEVICE=eth0:1
    IPADDR=<公网ip>
    PREFIX=32
    TYPE=Ethernet
    USERCTL=no
    ONBOOT=yes
    EOF

    3、kubeadm初始化时选择 <公网ip>

    4、补充:卸载网卡
    ifconfig eth0:1 down
法二:
shell
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
1、公网ip初始化

2、修改/etc/kubernetes/manifests/etcd.yaml
- --listen-client-urls=https://127.0.0.1:2379,https://101.34.112.190:2379
- --listen-peer-urls=https://101.34.112.190:2380
改为
- --listen-client-urls=https://127.0.0.1:2379
- --listen-peer-urls=https://127.0.0.1:2380

3、手工停止已启动的进程
# 先停止kubelet
$ systemctl stop kubelet
# 把所有kube的进程杀掉
$ netstat -anp |grep kube
请注意,不要执行 kubeadm reset,先 systemctl stop kubelet ,然后手动通过 netstat -anp |grep kube 来找pid,
再通过 kill -9 pid 强杀。否则又会生成错误的etcd配置文件,这里非常关键!!!

4、重新初始化,但是跳过etcd文件已经存在的检查
# 重新启动kubelet
$ systemctl start kubelet
# 重新初始化,跳过配置文件生成环节,不要etcd的修改要被覆盖掉
$ kubeadm init --config=kubeadm-config.yaml --skip-phases=preflight,certs,kubeconfig,kubelet-start,control-plane,etcd

6、验证集群是否搭建成功:
shell
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
cat > test.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: test
namespace: default
spec:
replicas: 3
selector:
matchLabels:
app: test
template:
metadata:
labels:
app: test
annotations:
md-update: '20200517104741'
spec:
containers:
- name: test
image: centos:7.9.2009
command:
- sh
- -c
- |
echo $(hostname) > hostname.txt
python -m SimpleHTTPServer
resources:
limits:
memory: 512Mi
cpu: 1
requests:
memory: 64Mi
cpu: 0.01
volumeMounts:
- name: tz-config
mountPath: /etc/localtime
volumes:
- name: tz-config
hostPath:
path: /usr/share/zoneinfo/Etc/GMT-8

---

apiVersion: v1
kind: Service
metadata:
name: test
namespace: default
spec:
selector:
app: test
ports:
- name: external-test
port: 8000
targetPort: 8000
nodePort: 30001
type: NodePort
EOF

kubectl apply -f test.yaml
7、拉取镜像脚本(测试通过)
  • 本脚本针对coredns插件镜像拉取;建议是拉取k8s 1.11版本以上得(k8s 从1.11版本开始使用coredns插件)
    shell
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    #!/bin/bash
    # Author:jing
    # premise: touch k8s_img_pull.sh && chmod +x k8s_img_pull.sh
    # implement: bash k8s_img_pull.sh

    china_img_url="registry.cn-hangzhou.aliyuncs.com/google_containers"
    k8s_img_url="k8s.gcr.io"
    version="v1.26.2"
    images=($(kubeadm config images list --kubernetes-version=${version} | awk -F "/" '{if ($3 != "") {print $2"/"$3}else{print $2}}'))

    for imagename in ${images[@]}
    do
    echo ${imagename}|grep "/" &> /dev/null
    if [[ $? == 0 ]];then
    coredns_img=$(echo ${imagename}|grep "/"|awk -F'/' '{print $2}')
    ctr -n k8s.io images pull ${china_img_url}/${coredns_img}
    ctr -n k8s.io images tag ${china_img_url}/${coredns_img} ${k8s_img_url}/${imagename}
    ctr -n k8s.io images rm ${china_img_url}/${coredns_img}
    else
    ctr -n k8s.io images pull ${china_img_url}/${imagename}
    ctr -n k8s.io images tag ${china_img_url}/${imagename} ${k8s_img_url}/${imagename}
    ctr -n k8s.io images rm ${china_img_url}/${imagename}
    fi

    # 导出
    # [[ ! -d "/root/kube-images/" ]] && mkdir -p /root/kube-images/
    # ctr -n k8s.io images save -o /root/kube-images/${imagename}.tar.gz ${k8s_img_url}/${imagename}
    # ctr -n k8s.io images rm ${k8s_img_url}/$imagename}
    done
    docker版
    shell
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    #!/bin/bash
    # Author:jing
    # premise: touch k8s_img_pull.sh && chmod +x k8s_img_pull.sh
    # implement: bash k8s_img_pull.sh

    china_img_url="registry.cn-hangzhou.aliyuncs.com/google_containers"
    k8s_img_url="k8s.gcr.io"
    version="v1.18.20"
    images=($(kubeadm config images list --kubernetes-version=${version} | awk -F "/" '{if ($3 != "") {print $2"/"$3}else{print $2}}'))

    for imagename in ${images[@]}
    do
    echo ${imagename}|grep "/" &> /dev/null
    if [[ $? == 0 ]];then
    coredns_img=$(echo ${imagename}|grep "/"|awk -F'/' '{print $2}')
    docker pull ${china_img_url}/${coredns_img}
    docker tag ${china_img_url}/${coredns_img} ${k8s_img_url}/${imagename}
    docker rmi ${china_img_url}/${coredns_img}
    else
    docker pull ${china_img_url}/${imagename}
    docker tag ${china_img_url}/${imagename} ${k8s_img_url}/${imagename}
    docker rmi ${china_img_url}/${imagename}
    fi

    # 导出
    # [[ ! -d "/root/kube-images/" ]] && mkdir -p /root/kube-images/
    # docker save -o /root/kube-images/${imagename}.tar.gz ${k8s_img_url}/${imagename}
    # docker rmi ${k8s_img_url}/$imagename}
    done

8、清理fannel网络方法
shell
1
2
3
4
5
6
7
sudo ifconfig cni0 down 
sudo ip link delete cni0

sudo ifconfig flannel.1 down
sudo ip link delete flannel.1

# 根据kubeadm reset 提示 删除 /etc/cni/net.d
9、开启ipvs
bash
1
2
3
4
5
6
7
8
9
10
11
12
13
# 此模式必须安装ipvs内核模块,否则会降级为iptables
# 开启ipvs
[root@k8s-master01 ~]# kubectl edit cm kube-proxy -n kube-system
# 修改mode: "ipvs"
[root@k8s-master01 ~]# kubectl delete pod -l k8s-app=kube-proxy -n kube-system
[root@node1 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 10.97.97.97:80 rr
-> 10.244.1.39:80 Masq 1 0 0
-> 10.244.1.40:80 Masq 1 0 0
-> 10.244.2.33:80 Masq 1 0 0