1、前期准备
1.1、下载阿里云的repo配置文件
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
1.2、修改host(三台主机都执行)
cat >> /etc/hosts << EOF
192.168.10.20 k8s-20
192.168.10.21 k8s-21
192.168.10.22 k8s-22
EOF
#使用以下命令修改hostname
hostnamectl set-hostname (每台主机的hostname)
1.3、配置SSH免密登录
[root@k8s-20 ~]# ssh-keygen
[root@k8s-20 ~]# ssh-copy-id k8s-20
[root@k8s-20 ~]# ssh-copy-id k8s-21
[root@k8s-20 ~]# ssh-copy-id k8s-22
[root@k8s-20 ~]# ssh k8s-22
Last login: Tue May 27 18:27:50 2025 from 192.168.10.2
#使用scp命令把生成的密钥复制到其他两个节点
scp /root/.ssh/* root@k8s-21:/root/.ssh
scp /root/.ssh/* root@k8s-22:/root/.ssh
1.4、关闭Swap分区
# 临时关闭,重启恢复
[root@node1 ~]# swapoff -a
# 永久关闭,注释swap行
[root@node1 ~]# cat /etc/fstab
# /dev/mapper/centos-swap swap swap defaults 0 0
1.5、禁用SELinux
#所有节点执行如下命令:
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/sysconfig/selinux
1.6、关闭防火墙
systemctl disable firewalld
systemctl stop firewalld
1.7、升级内核并修改内核参数
modprobe br_netfilter
echo "modprobe br_netfilter" >> /etc/profile
tee /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl -p /etc/sysctl.d/k8s.conf
# 重启生效
reboot
1.8、配置集群时钟同步
# 硬件时钟设置为UTC
timedatectl set-local-rtc 0
# 设置本地时区,显示本地时间
timedatectl set-timezone Asia/Shanghai
# 手动加载RTC设置
hwclock --systohc
# 验证
timedatectl
1.9、配置k8s的Yum源
#国外yum源因为网络问题下载比较慢,此处修改为国内aliyun,用于安装k8s各个组件
cat >> /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
EOF
1.10、部署nfs共享文件系统
yum install -y nfs-utils
echo "/nfs/data *(rw,no_subtree_check,no_root_squash)">> /etc/exports
mkdir -p /nfs/data
#启动
systemctl enable nfs --now
systemctl enable rpcbind --now
#应用配置
exportfs -ra
##在从节点上同步绑定
yum install nfs-utils -y
mkdir -p /nfs/data
mount -t nfs 192.168.10.20:/nfs/data /nfs/data
#开机挂载目录
echo "192.168.10.20:/nfs/data /nfs/data nfs defaults 0 0" >> /etc/fstab
2、kubeKey一健安装k8s
2.1、下载kubekey
# 设置下载区域为中国,加速下载
export KKZONE=cn
#选择版本
#curl -sfL https://get-kk.kubesphere.io | VERSION=v3.0.7 sh -
#最新版本
curl -sfL https://get-kk.kubesphere.io | sh -
2.2、生成配置文件,并修改
./kk create config -f k8s-v12317.yaml --with-kubesphere
apiVersion: kubekey.kubesphere.io/v1alpha2
kind: Cluster
metadata:
name: sample
spec:
hosts:
- {name: k8s-20, address: 192.168.10.20, internalAddress: 192.168.10.20, user: root, password: "xxxx"}
- {name: k8s-21, address: 192.168.10.21, internalAddress: 192.168.10.21, user: root, password: "xxxx"}
- {name: k8s-22, address: 192.168.10.22, internalAddress: 192.168.10.22, user: root, password: "xxxx"}
roleGroups:
etcd:
- k8s-20
- k8s-21
- k8s-22
control-plane:
- k8s-20
- k8s-21
- k8s-22
worker:
- k8s-20
- k8s-21
- k8s-22
controlPlaneEndpoint:
## Internal loadbalancer for apiservers
internalLoadbalancer: haproxy
domain: lb.kubesphere.local
address: ""
port: 6443
kubernetes:
version: v1.23.17
clusterName: k8s-chanfi
autoRenewCerts: true
containerManager: docker
etcd:
type: kubekey
network:
plugin: calico
kubePodsCIDR: 10.233.64.0/18
kubeServiceCIDR: 10.233.0.0/18
## multus support. https://github.com/k8snetworkplumbingwg/multus-cni
multusCNI:
enabled: false
registry:
privateRegistry: ""
namespaceOverride: ""
registryMirrors: []
insecureRegistries: []
addons: []
2.3、执行kubekey
./kk create cluster -f k8s-v12317.yaml
2.4、 创建默认的存储卷类
创建 nfs-storage.yaml 文件:
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #根据实际环境设定namespace,下面类同
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-storageclass
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: nfs-storage-provisioner #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致
parameters:
# archiveOnDelete: "false"
archiveOnDelete: "true"
reclaimPolicy: Retain
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
#image: quay.io/external_storage/nfs-client-provisioner:latest
#这里特别注意,在k8s-1.20以后版本中使用上面提供的包,并不好用,这里我折腾了好久,才解决,后来在官方的github上,别人提的问题中建议使用下面这个包才解决的,我这里是下载后,传到我自已的仓库里
image: k8s.m.daocloud.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
# image: easzlab/nfs-subdir-external-provisioner:v4.0.1
# image: registry-op.test.cn/nfs-subdir-external-provisioner:v4.0.1
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: nfs-storage-provisioner #provisioner名称,请确保该名称与 nfs-StorageClass.yaml文件中的provisioner名称保持一致
- name: NFS_SERVER
value: 192.168.10.20 #NFS Server IP地址
- name: NFS_PATH
value: "/nfs/data" #NFS挂载卷
volumes:
- name: nfs-client-root
nfs:
server: 192.168.10.20 #NFS Server IP地址
path: "/nfs/data" #NFS 挂载卷
2.5、测试存储卷类
创建 test.yaml 文件:
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: test-claim
annotations:
#与nfs-storageClass.yaml metadata.name保持一致
volume.beta.kubernetes.io/storage-class: "nfs-storageclass"
spec:
storageClassName: "nfs-storageclass"
accessModes:
- ReadWriteMany
#- ReadWriteOnce
resources:
requests:
storage: 1Gi
测试完成后删除 PVC 和 PV !!
3、在已经安装好的k8s集群中安装kubesphere
helm upgrade --install -n kubesphere-system --create-namespace ks-core https://charts.kubesphere.com.cn/main/ks-core-1.1.4.tgz --debug --wait --set global.imageRegistry=swr.cn-southwest-2.myhuaweicloud.com/ks --set extension.imageRegistry=swr.cn-southwest-2.myhuaweicloud.com/ks