sudo swapoff -a
sudo sed -i '/swap/s/^/#/' /etc/fstab
sudo cp /etc/docker/daemon.json /etc/docker/daemon.json.bf
shitou@shitou:~$ cat /etc/docker/daemon.json
{
"registry-mirrors": [
"https://docker.211678.top",
"https://docker.lpanel.live",
"https://hub.rat.dev",
"https://docker.m.daocloud.io",
"https://do.nark.eu.org",
"https://dockerpull.com",
"https://dockerproxy.cn",
"https://docker.aws19527.cn",
"https://docker.erduoya.top"
],
"insecure-registries": [
"localhost:5000"
],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}
# 配置 Docker 驱动为 systemd(K8s 推荐)
添加阿里源,通过国内镜像源快速安装 kubeadm、kubelet、kubectl 等 Kubernetes 组件
shitou@shitou:~$ curl -fsSL https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-archive-keyring.gpg
shitou@shitou:~$ echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://mirrors.aliyun.com/kubernetes/apt kubernetes-xenial main
或者切换华为云
shitou@shitou:~$ sudo rm /etc/apt/sources.list.d/kubernetes.list
shitou@shitou:~$ curl -fsSL https://mirrors.huaweicloud.com/kubernetes/apt/doc/apt-key.gpg | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-archive-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://mirrors.huaweicloud.com/kubernetes/apt kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
# 检查1.28.2版本是否存在于当前源中
apt-cache madison kubelet | grep 1.28.2
安装指定版本
sudo apt-get install -y \
kubelet=1.28.2-00 \
kubeadm=1.28.2-00 \
kubectl=1.28.2-00
# 锁定版本,防止后续意外升级
sudo apt-mark hold kubelet kubeadm kubectl
shitou@shitou:~$ sudo apt-mark hold kubelet kubeadm kubectl
kubelet set on hold.
kubeadm set on hold.
kubectl set on hold.
shitou@shitou:~$ kubelet --version
kubectl version --client
kubeadm version
Kubernetes v1.28.2
Client Version: v1.28.2
Kustomize Version: v5.0.4-0.20230601165947-6ce0bf390ce3
kubeadm version: &version.Info{Major:"1", Minor:"28", GitVersion:"v1.28.2", GitCommit:"89a4ea3e1e4ddd7f7572286090359983e0387b2f", GitTreeState:"clean", BuildDate:"2023-09-13T09:34:32Z", GoVersion:"go1.20.8", Compiler:"gc", Platform:"linux/amd64"}
然后安装依赖目的是安装cri-dockerd
================================================================
编译失败,
shitou@shitou:~$ sudo apt install -y git gcc make
Reading package lists... Done
Building dependency tree... Done
Reading state information... Done
git is already the newest version (1:2.43.0-1ubuntu7.3).
git set to manually installed.
gcc is already the newest version (4:13.2.0-7ubuntu1).
gcc set to manually installed.
The following packages were automatically installed and are no longer required:
libdrm-nouveau2 libdrm-radeon1 libgl1-amber-dri libglapi-mesa libllvm17t64
libltdl7 libxcb-dri2-0
Use 'sudo apt autoremove' to remove them.
Suggested packages:
make-doc
The following NEW packages will be installed:
make
0 upgraded, 1 newly installed, 0 to remove and 25 not upgraded.
Need to get 180 kB of archives.
After this operation, 414 kB of additional disk space will be used.
Get:1 http://mirrors.aliyun.com/ubuntu noble/main amd64 make amd64 4.3-4.1build2 [180 kB]
Fetched 180 kB in 0s (625 kB/s)
Selecting previously unselected package make.
(Reading database ... 161464 files and directories currently installed.)
Preparing to unpack .../make_4.3-4.1build2_amd64.deb ...
Unpacking make (4.3-4.1build2) ...
Setting up make (4.3-4.1build2) ...
Processing triggers for man-db (2.12.0-4build2) ...
Scanning processes...
Scanning processor microcode...
Scanning linux images...
Running kernel seems to be up-to-date.
The processor microcode seems to be up-to-date.
No services need to be restarted.
No containers need to be restarted.
No user sessions are running outdated binaries.
No VM guests are running outdated hypervisor (qemu) binaries on this host.
shitou@shitou:~$
克隆源码
shitou@shitou:~$ git clone https://github.com/Mirantis/cri-dockerd.git
Cloning into 'cri-dockerd'...
remote: Enumerating objects: 26193, done.
remote: Counting objects: 100% (1765/1765), done.
remote: Compressing objects: 100% (492/492), done.
remote: Total 26193 (delta 1411), reused 1273 (delta 1273), pack-reused 24428 (from 1)
Receiving objects: 100% (26193/26193), 50.32 MiB | 8.18 MiB/s, done.
Resolving deltas: 100% (13771/13771), done.
Updating files: 100% (7154/71
shitou@shitou:~$ cd cri-dockerd
shitou@shitou:~/cri-dockerd$ mkdir -p bin
shitou@shitou:~/cri-dockerd$ VERSION=$((git describe --abbrev=0 --tags | sed -e 's/v//') || echo 0.3.0)
================================================================
shitou@shitou:~$ sudo systemctl status cri-dockerd
Unit cri-dockerd.service could not be found.
shitou@shitou:~$ ls
adminhome go kickstart.sh ub
amprobe halo mysql_8.1.0.tar ubuntu
bk halohub.2.21.6.tar Netdata ubuntu.sources.list
cri-dockerd_0.3.19.3-0.ubuntu-bionic_amd64.deb hub registry usr.sbin.libvirtd
data installer registry.sh
docker jx shitou
shitou@shitou:~$ sudo dpkg -i cri-dockerd_0.3.19.3-0.ubuntu-bionic_amd64.deb
[sudo] password for shitou:
Selecting previously unselected package cri-dockerd.
(Reading database ... 161482 files and directories currently installed.)
Preparing to unpack cri-dockerd_0.3.19.3-0.ubuntu-bionic_amd64.deb ...
Unpacking cri-dockerd (0.3.19~3-0~ubuntu-bionic) ...
Setting up cri-dockerd (0.3.19~3-0~ubuntu-bionic) ...
Created symlink /etc/systemd/system/multi-user.target.wants/cri-docker.service → /usr/lib/systemd/system/cri-docker.service.
Created symlink /etc/systemd/system/sockets.target.wants/cri-docker.socket → /usr/lib/systemd/system/cri-docker.socket.
shitou@shitou:~$ sudo systemctl status cri-docker
● cri-docker.service - CRI Interface for Docker Application Container Engine
Loaded: loaded (/usr/lib/systemd/system/cri-docker.service; enabled; preset: enabled)
Active: active (running) since Mon 2025-08-18 05:41:11 UTC; 1min 10s ago
TriggeredBy: ● cri-docker.socket
Docs: https://docs.mirantis.com
Main PID: 268056 (cri-dockerd)
Tasks: 8
Memory: 9.0M (peak: 9.2M)
CPU: 130ms
CGroup: /system.slice/cri-docker.service
└─268056 /usr/bin/cri-dockerd --container-runtime-endpoint fd://
Aug 18 05:41:11 shitou cri-dockerd[268056]: time="2025-08-18T05:41:11Z" level=info msg="Connecting to do>
Aug 18 05:41:11 shitou cri-dockerd[268056]: time="2025-08-18T05:41:11Z" level=info msg="Start docker cli>
Aug 18 05:41:11 shitou cri-dockerd[268056]: time="2025-08-18T05:41:11Z" level=info msg="Hairpin mode is >
Aug 18 05:41:11 shitou cri-dockerd[268056]: time="2025-08-18T05:41:11Z" level=info msg="Loaded network p>
Aug 18 05:41:11 shitou cri-dockerd[268056]: time="2025-08-18T05:41:11Z" level=info msg="Docker cri netwo>
Aug 18 05:41:11 shitou cri-dockerd[268056]: time="2025-08-18T05:41:11Z" level=info msg="Setting cgroupDr>
Aug 18 05:41:11 shitou cri-dockerd[268056]: time="2025-08-18T05:41:11Z" level=info msg="Docker cri recei>
Aug 18 05:41:11 shitou cri-dockerd[268056]: time="2025-08-18T05:41:11Z" level=info msg="Starting the GRP>
Aug 18 05:41:11 shitou cri-dockerd[268056]: time="2025-08-18T05:41:11Z" level=info msg="Start cri-docker>
Aug 18 05:41:11 shitou systemd[1]: Sta
hitou@shitou:~$ sudo modprobe br_netfilter
shitou@shitou:~$ lsmod | grep br_netfilter
br_netfilter 32768 0
bridge 421888 1 br_netfilter
shitou@shitou:~$ sudo sysctl -w net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-iptables = 1
shitou@shitou:~$ sudo sysctl -w net.bridge.bridge-nf-call-ip6tables=1
net.bridge.bridge-nf-call-ip6tables = 1
shitou@shitou:~$ sudo sysctl -w net.ipv4.ip_forward=1
net.ipv4.ip_forward = 1
shitou@shitou:~$ sudo nano /etc/sysctl.d/k8s.conf
shitou@shitou:~$ sudo cat /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
------------------------------------------------------------------
shitou@shitou:~$ sudo sysctl --system
* Applying /usr/lib/sysctl.d/10-apparmor.conf ...
* Applying /etc/sysctl.d/10-bufferbloat.conf ...
* Applying /etc/sysctl.d/10-console-messages.conf ...
* Applying /etc/sysctl.d/10-ipv6-privacy.conf ...
* Applying /etc/sysctl.d/10-kernel-hardening.conf ...
* Applying /etc/sysctl.d/10-magic-sysrq.conf ...
* Applying /etc/sysctl.d/10-map-count.conf ...
* Applying /etc/sysctl.d/10-network-security.conf ...
* Applying /etc/sysctl.d/10-ptrace.conf ...
* Applying /etc/sysctl.d/10-zeropage.conf ...
* Applying /usr/lib/sysctl.d/50-pid-max.conf ...
* Applying /usr/lib/sysctl.d/99-protect-links.conf ...
* Applying /etc/sysctl.d/99-sysctl.conf ...
* Applying /etc/sysctl.d/k8s.conf ...
* Applying /etc/sysctl.conf ...
kernel.apparmor_restrict_unprivileged_userns = 1
net.core.default_qdisc = fq_codel
kernel.printk = 4 4 1 7
net.ipv6.conf.all.use_tempaddr = 2
net.ipv6.conf.default.use_tempaddr = 2
kernel.kptr_restrict = 1
kernel.sysrq = 176
vm.max_map_count = 1048576
net.ipv4.conf.default.rp_filter = 2
net.ipv4.conf.all.rp_filter = 2
kernel.yama.ptrace_scope = 1
vm.mmap_min_addr = 65536
kernel.pid_max = 4194304
fs.protected_fifos = 1
fs.protected_hardlinks = 1
fs.protected_regular = 2
fs.protected_symlinks = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
初始化
shitou@shitou:~$ sudo kubeadm init --cri-socket unix:///run/cri-dockerd.sock --pod-network-cidr=10.244.0.0/16
I0818 06:06:10.977381 298519 version.go:256] remote version is much newer: v1.33.4; falling back to: stable-1.28
[init] Using Kubernetes version: v1.28.15
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
W0818 06:15:22.283919 298519 checks.go:835] detected that the sandbox image "registry.k8s.io/pause:3.10" of the container runtime is inconsistent with that used by kubeadm. It is recommended that using "registry.k8s.io/pause:3.9" as the CRI sandbox image.
^C
shitou@shitou:~$ kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers --cri-socket unix:///run/cri-dockerd.sock
I0818 06:17:22.092944 312305 version.go:256] remote version is much newer: v1.33.4; falling back to: stable-1.28
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-apiserver:v1.28.15
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-controller-manager:v1.28.15
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-scheduler:v1.28.15
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-proxy:v1.28.15
[config/images] Pulled registry.aliyuncs.com/google_containers/pause:3.9
[config/images] Pulled registry.aliyuncs.com/google_containers/etcd:3.5.9-0
[config/images] Pulled registry.aliyuncs.com/google_containers/coredns:v1.10.1
shitou@shitou:~$ sudo kubeadm init \
--image-repository registry.aliyuncs.com/google_containers \
--cri-socket unix:///run/cri-dockerd.sock \
--kubernetes-version stable-1.28
[init] Using Kubernetes version: v1.28.15
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
W0818 06:20:05.975520 315758 checks.go:835] detected that the sandbox image "registry.k8s.io/pause:3.10" of the container runtime is inconsistent with that used by kubeadm. It is recommended that using "registry.aliyuncs.com/google_containers/pause:3.9" as the CRI sandbox image.
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local shitou] and IPs [10.96.0.1 192.168.31.19]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost shitou] and IPs [192.168.31.19 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost shitou] and IPs [192.168.31.19 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
sudo kubeadm init \
--cri-socket unix:///var/run/cri-dockerd.sock \
--pod-network-cidr=10.244.0.0/16
I0818 07:08:50.133536 373383 version.go:256] remote version is much newer: v1.33.4; falling back to: stable-1.28
将 coredns
的镜像路径从 coredns/coredns
改为 coredns
(阿里云镜像仓库的路径格式不同),重新执行以下命令:
bash
# 定义镜像源和版本(保持不变)
IMAGE_REPO="registry.aliyuncs.com/google_containers"
K8S_VERSION="v1.28.15"
PAUSE_VERSION="3.9"
COREDNS_VERSION="v1.10.1"
# 重新拉取所有镜像(重点修正 coredns 的路径)
docker pull $IMAGE_REPO/kube-apiserver:$K8S_VERSION
docker pull $IMAGE_REPO/kube-controller-manager:$K8S_VERSION
docker pull $IMAGE_REPO/kube-scheduler:$K8S_VERSION
docker pull $IMAGE_REPO/kube-proxy:$K8S_VERSION
docker pull $IMAGE_REPO/pause:$PAUSE_VERSION
docker pull $IMAGE_REPO/etcd:3.5.9-0
# 修正 coredns 的拉取路径(去掉中间的 coredns/ 层级)
docker pull $IMAGE_REPO/coredns:$COREDNS_VERSION
# 对应修正标签(保持与 kubeadm 期望的路径一致)
docker tag $IMAGE_REPO/kube-apiserver:$K8S_VERSION registry.k8s.io/kube-apiserver:$K8S_VERSION
docker tag $IMAGE_REPO/kube-controller-manager:$K8S_VERSION registry.k8s.io/kube-controller-manager:$K8S_VERSION
docker tag $IMAGE_REPO/kube-scheduler:$K8S_VERSION registry.k8s.io/kube-scheduler:$K8S_VERSION
docker tag $IMAGE_REPO/kube-proxy:$K8S_VERSION registry.k8s.io/kube-proxy:$K8S_VERSION
docker tag $IMAGE_REPO/pause:$PAUSE_VERSION registry.k8s.io/pause:$PAUSE_VERSION
docker tag $IMAGE_REPO/etcd:3.5.9-0 registry.k8s.io/etcd:3.5.9-0
# 修正 coredns 的标签(kubeadm 期望的路径是 registry.k8s.io/coredns/coredns)
docker tag $IMAGE_REPO/coredns:$COREDNS_VERSION registry.k8s.io/coredns/coredns:$COREDNS_VERSION
原因说明:
不同镜像仓库的路径结构可能不同:
官方仓库
registry.k8s.io
中,coredns 的路径是coredns/coredns:<版本>
阿里云仓库
registry.aliyuncs.com/google_containers
中,coredns 的路径是coredns:<版本>
(没有嵌套的coredns
目录)
修正路径后,即可正常拉取并打标签,供 kubeadm
使用。
拉取成功后,再次执行初始化命令:
bash
sudo kubeadm init \
--cri-socket unix:///var/run/cri-dockerd.sock \
--pod-network-cidr=10.244.0.0/16
报错下面的啊,
nit] Using Kubernetes version: v1.28.15
[preflight] Running pre-flight checks
error execution phase preflight: [preflight] Some fatal errors occurred:
[ERROR FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml]: /etc/kubernetes/manifests/kube-apiserver.yaml already exists
[ERROR FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml]: /etc/kubernetes/manifests/kube-controller-manager.yaml already exists
[ERROR FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml]: /etc/kubernetes/manifests/kube-scheduler.yaml already exists
[ERROR FileAvailable--etc-kubernetes-manifests-etcd.yaml]: /etc/kubernetes/manifests/etcd.yaml already exists
[ERROR Port-10250]: Port 10250 is in use
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher
shitou@shitou:~$ sudo kubeadm reset -f
Found multiple CRI endpoints on the host. Please define which one do you wish to use by setting the 'criSocket' field in the kubeadm configuration file: unix:///var/run/containerd/containerd.sock, unix:///var/run/cri-dockerd.sock
To see the stack trace of this error execute with --v=5 or higher
shitou@shitou:~$ sudo kubeadm reset -f
Found multiple CRI endpoints on the host. Please define which one do you wish to use by setting the 'criSocket' field in the kubeadm configuration file: unix:///var/run/containerd/containerd.sock, unix:///var/run/cri-dockerd.sock
To see the stack trace of this error execute with --v=5 or higher
shitou@shitou:~$ sudo kubeadm reset -f --cri-socket unix:///var/run/cri-dockerd.sock
[reset] Reading configuration from the cluster...
[reset] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
W0818 07:29:37.508500 400805 reset.go:120] [reset] Unable to fetch the kubeadm-config ConfigMap from cluster: failed to get config map: configmaps "kubeadm-config" not found
[preflight] Running pre-flight checks
W0818 07:29:37.509128 400805 removeetcdmember.go:106] [reset] No kubeadm config, using etcd pod spec to get data directory
[reset] Deleted contents of the etcd data directory: /var/lib/etcd
[reset] Stopping the kubelet service
[reset] Unmounting mounted directories in "/var/lib/kubelet"
[reset] Deleting contents of directories: [/etc/kubernetes/manifests /var/lib/kubelet /etc/kubernetes/pki]
[reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]
The reset process does not clean CNI configuration. To do so, you must remove /etc/cni/net.d
The reset process does not reset or clean up iptables rules or IPVS tables.
If you wish to reset iptables, you must do so manually by using the "iptables" command.
If your cluster was setup to utilize IPVS, run ipvsadm --clear (or similar)
to reset your system's IPVS tables.
The reset process does not clean your kubeconfig files and you must remove them manually.
然后,清理大部分配置
# 1. 强制重置 kubeadm 环境(清理大部分配置)
sudo kubeadm reset -f
# 2. 删除残留的关键配置文件(错误中提到的 manifests 目录)
sudo rm -rf /etc/kubernetes/manifests/*
sudo rm -rf /etc/kubernetes/pki/* # 清理证书文件(重要!)
# 3. 清理 CNI 网络配置(避免网络插件残留影响)
sudo rm -rf /etc/cni/net.d
# 4. 停止并重启 kubelet(解决端口占用问题)
sudo systemctl stop kubelet
sudo systemctl daemon-reload
sudo systemctl start kubelet
# 5. 释放被占用的 10250 端口(kubelet 默认端口)
# 先找到占用端口的进程 ID(PID)
sudo lsof -i :10250 | grep LISTEN | awk '{print $2}'
# 假设输出 PID 为 1234,杀死该进程
sudo kill -9 1234 # 替换为实际的 PID,若没有输出则无需执行
shitou@shitou:~$ sudo rm -rf /etc/kubernetes/manifests/*
shitou@shitou:~$ sudo rm -rf /etc/kubernetes/pki/*
shitou@shitou:~$ sudo rm -rf /etc/cni/net.d
shitou@shitou:~$ sudo systemctl stop kubelet
shitou@shitou:~$ sudo systemctl daemon-reload
shitou@shitou:~$ sudo systemctl start kubelet
shitou@shitou:~$ sudo lsof -i :10250 | grep LISTEN | awk '{print $2}'
shitou@shitou:~$ sudo kubeadm init \
--cri-socket unix:///var/run/cri-dockerd.sock \
--pod-network-cidr=10.244.0.0/16
I0818 07:32:10.573873 404092 version.go:256] remote version is much newer: v1.33.4; falling back to: stable-1.28
[init] Using Kubernetes version: v1.28.15
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
W0818 07:32:12.865766 404092 checks.go:835] detected that the sandbox image "registry.k8s.io/pause:3.10" of the container runtime is inconsistent with that used by kubeadm. It is recommended that using "registry.k8s.io/pause:3.9" as the CRI sandbox image.
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local shitou] and IPs [10.96.0.1 192.168.31.19]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost shitou] and IPs [192.168.31.19 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost shitou] and IPs [192.168.31.19 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 26.006070 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node shitou as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node shitou as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
[bootstrap-token] Using token: h3lm8d.djwtfasfi53kd5jc
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.31.19:6443 --token h3lm8d.djwtfasfi53kd5jc \
--discovery-token-ca-cert-hash sha256:5ee36933b68e3b23bbe55771e3bd75514255e54e816786844a25a051f9ebda70
shitou@shitou:~$
mkdir -p $HOME/.kube
shitou@shitou:~$ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
shitou@shitou:~$ sudo chown $(id -u):$(id -g) $HOME/.kube/config
shitou@shitou:~$ kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/v0.22.2/Documentation/kube-flannel.yml
namespace/kube-flannel created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
1. mkdir -p $HOME/.kube
功能:创建 K8s 配置文件的存放目录。
mkdir:创建目录的基础命令。
-p:「递归创建」选项,若目录(或上级目录)已存在则不报错,避免手动检查目录是否存在的麻烦。
$HOME/.kube:目标目录路径,$HOME是当前用户的主目录(如/home/ubuntu),.kube是 K8s 默认的配置文件目录(隐藏目录,以.开头)。
2. sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
功能:将 K8s 集群的「管理员配置文件」复制到当前用户的配置目录下。
sudo:以管理员(root)权限执行命令,因为/etc/kubernetes/admin.conf默认只有 root 可读取。
cp:复制文件的基础命令。
-i:「交互式」选项,若目标文件($HOME/.kube/config)已存在,会提示是否覆盖(避免误删已有配置)。
/etc/kubernetes/admin.conf:源文件路径,是 K8s 集群初始化(如kubeadm init)后自动生成的管理员级配置文件,包含集群地址、认证密钥等关键信息,拥有集群的最高操作权限。
$HOME/.kube/config:目标文件路径,kubectl工具会默认读取该路径的配置文件,用于识别集群、验证身份。
3. sudo chown $(id -u):$(id -g) $HOME/.kube/config
功能:修改复制后的配置文件所有者,确保当前用户有权读写该文件。
chown:修改文件 / 目录所有者的命令(格式:chown 用户名:组名 目标文件)。
$(id -u):通过id -u命令动态获取当前用户的UID(用户 ID) ,避免手动输入用户名(如ubuntu)。
$(id -g):通过id -g命令动态获取当前用户的GID(组 ID) ,确保用户组权限匹配。
为什么需要这一步?:第二步用sudo复制的文件,默认所有者是root,普通用户无法读写;修改后,当前用户才能正常使用kubectl命令(否则会报「权限不足」错误
验证一下状态 网络插件部署状态
shitou@shitou:~$
shitou@shitou:~$ kubectl get pods -n kube-flannel
NAME READY STATUS RESTARTS AGE
kube-flannel-ds-fkvsf 1/1 Running 0 2m26s
shitou@shitou:~$ kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/v0.22.2/Documentation/kube-flannel.yml
namespace/kube-flannel created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
shitou@shitou:~$ kubectl get pods -n kube-flannel
NAME READY STATUS RESTARTS AGE
kube-flannel-ds-fkvsf 1/1 Running 0 2m26s
shitou@shitou:~$ kubectl describe node shitou | grep Taint
Taints: <none>
shitou@shitou:~$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
shitou Ready control-plane 15m v1.28.2
shitou@shitou:~$ kubectl run nginx-test --image=nginx:alpine
pod/nginx-test created
shitou@shitou:~$ kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-test 0/1 ContainerCreating 0 8s
shitou@shitou:~$ kubectl get nodes
NAME STATUS ROLES AGE VERSION