常用命令
kubectl get ns
查询当前集群内所有namespace
[root@k8s-master-19 k8s]# kubectl get ns
NAME STATUS AGE
default Active 13d
kube-node-lease Active 13d
kube-public Active 13d
kube-system Active 13d
kubernetes-dashboard Active 13d
ops Active 11d
kubectl get pods -n xxx
: 获取指定命名空间的所有pods
[root@k8s-master-19 k8s]# kubectl get pods -n default
NAME READY STATUS RESTARTS AGE
tomcat6-56fcc999cb-g8zql 1/1 Running 0 13m
tomcat6-56fcc999cb-qp762 1/1 Running 0 13m
tomcat6-56fcc999cb-tng6q 1/1 Running 0 13m
[root@k8s-master-19 k8s]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-f9fd979d6-cfp6r 1/1 Running 1 12d
coredns-f9fd979d6-l7975 1/1 Running 1 4d19h
etcd-k8s-master-19 1/1 Running 1 13d
kube-apiserver-k8s-master-19 1/1 Running 1 13d
kube-controller-manager-k8s-master-19 1/1 Running 4 13d
kube-flannel-ds-99bl2 1/1 Running 1 13d
kube-flannel-ds-j74d9 1/1 Running 1 13d
kube-flannel-ds-kx5vc 1/1 Running 1 13d
kube-flannel-ds-wjjm6 1/1 Running 1 13d
kube-flannel-ds-xpd4g 1/1 Running 2 13d
kube-proxy-jhrsq 1/1 Running 1 13d
kube-proxy-phkgl 1/1 Running 1 13d
kube-proxy-vb8ss 1/1 Running 1 13d
kube-proxy-vvsfc 1/1 Running 1 13d
kube-proxy-zcnp2 1/1 Running 3 13d
kube-scheduler-k8s-master-19 1/1 Running 4 13d
kubectl get pods --all-namespaces
: 获取所有命名空间的所有pods
[root@k8s-master-19 k8s]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
default tomcat6-56fcc999cb-g8zql 1/1 Running 0 14m
default tomcat6-56fcc999cb-qp762 1/1 Running 0 14m
default tomcat6-56fcc999cb-tng6q 1/1 Running 0 14m
kube-system coredns-f9fd979d6-cfp6r 1/1 Running 1 12d
kube-system coredns-f9fd979d6-l7975 1/1 Running 1 4d19h
kube-system etcd-k8s-master-19 1/1 Running 1 13d
kube-system kube-apiserver-k8s-master-19 1/1 Running 1 13d
kube-system kube-controller-manager-k8s-master-19 1/1 Running 4 13d
kube-system kube-flannel-ds-99bl2 1/1 Running 1 13d
kube-system kube-flannel-ds-j74d9 1/1 Running 1 13d
kube-system kube-flannel-ds-kx5vc 1/1 Running 1 13d
kube-system kube-flannel-ds-wjjm6 1/1 Running 1 13d
kube-system kube-flannel-ds-xpd4g 1/1 Running 2 13d
kube-system kube-proxy-jhrsq 1/1 Running 1 13d
kube-system kube-proxy-phkgl 1/1 Running 1 13d
kube-system kube-proxy-vb8ss 1/1 Running 1 13d
kube-system kube-proxy-vvsfc 1/1 Running 1 13d
kube-system kube-proxy-zcnp2 1/1 Running 3 13d
kube-system kube-scheduler-k8s-master-19 1/1 Running 4 13d
kubernetes-dashboard dashboard-metrics-scraper-7b59f7d4df-zq59r 1/1 Running 2 4d19h
kubernetes-dashboard kubernetes-dashboard-7f5754dff6-ctvds 1/1 Running 1 12d
ops nacos-deployment-598cd54f4d-dvn9f 0/2 ContainerCreating 0 4d19h
使用-o [option]
可以输出指定格式的信息,支持的option如下
Output format | Description |
---|---|
-o custom-columns=xxx,xxx | 使用逗号分隔的自定义列列表打印表 |
-o custom-columns-file= | 使用 指定文件中的自定义列模板打印表。 |
-o json | 输出 JSON 格式的 API 对象 |
-o jsonpath= | 打印 jsonpath 表达式定义的字段 |
-o jsonpath-file= | 打印指定文件中 jsonpath 表达式定义的字段。 |
-o name | 仅打印资源名称而不打印任何其他内容 |
-o wide | 以纯文本格式输出,包含任何附加信息。对于 pod 包含节点名。 |
-o yaml | 输出 YAML 格式的 API 对象。 |
服务部署
tomcat6配置
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: tomcat6
name: tomcat6
spec:
replicas: 3
selector:
matchLabels:
app: tomcat6
template:
metadata:
labels:
app: tomcat6
spec:
containers:
- image: tomcat:6.0.53-jre8
name: tomcat
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app: tomcat6
name: tomcat6
spec:
ports:
- port: 30010
protocol: TCP
targetPort: 8080
selector:
app: tomcat6
type: NodePort
externalIPs:
- 192.168.0.5
- 192.168.0.12
- 192.168.0.30
nfs服务的搭建
选择nfs节点: 如选择存储容量较大的192.168.0.12节点。
-
nfs节点(192.168.0.12)执行
yum -y install nfs-utils rpcbind
-
nfs节点创建共享目录
mkdir -p /opt/software/nfs
-
修改共享配置
vim /etc/exports
/opt/software/nfs *(rw,sync,no_root_squash)
-
启动服务并设置开机启动(注意启动顺序)
[root@k8s-node-12 ~]# systemctl start rpcbind.service [root@k8s-node-12 ~]# systemctl status rpcbind.service [root@k8s-node-12 ~]# systemctl enable rpcbind.service [root@k8s-node-12 ~]# systemctl start nfs.service [root@k8s-node-12 ~]# systemctl enable nfs.service [root@k8s-node-12 ~]# systemctl status nfs.service
5.查看服务启动结果
[root@k8s-node-12 ~]# systemctl status nfs rpcbind
● nfs-server.service - NFS server and services
Loaded: loaded (/usr/lib/systemd/system/nfs-server.service; enabled; vendor preset: disabled)
Drop-In: /run/systemd/generator/nfs-server.service.d
└─order-with-mounts.conf
Active: active (exited) since Sun 2020-10-04 21:14:54 CST; 1 weeks 1 days ago
Main PID: 12459 (code=exited, status=0/SUCCESS)
Tasks: 0
Memory: 0B
CGroup: /system.slice/nfs-server.service
Oct 04 21:14:54 k8s-node-12 systemd[1]: Starting NFS server and services...
Oct 04 21:14:54 k8s-node-12 systemd[1]: Started NFS server and services.
● rpcbind.service - RPC bind service
Loaded: loaded (/usr/lib/systemd/system/rpcbind.service; enabled; vendor preset: enabled)
Active: active (running) since Sun 2020-10-04 21:14:16 CST; 1 weeks 1 days ago
Main PID: 12166 (rpcbind)
Tasks: 1
Memory: 596.0K
CGroup: /system.slice/rpcbind.service
└─12166 /sbin/rpcbind -w
Oct 04 21:14:16 k8s-node-12 systemd[1]: Starting RPC bind service...
Oct 04 21:14:16 k8s-node-12 systemd[1]: Started RPC bind service.
可以看到服务是成功启动了的
-
其余节点执行
mkdir -p /opt/software/nfs && mount -t nfs -o nolock,nfsvers=3,vers=3 192.168.0.12:/opt/software/nfs /opt/software/nfs
挂载共享目录到本地 -
其余节点执行
yum -y install nfs-utils && showmount -e 192.168.0.12
[root@k8s-node-16 ~]# showmount -e 192.168.0.12 Export list for 192.168.0.12: /opt/software/nfs *
可以看到绑定成功。
目录挂载-pvc.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-node12 # pv 名字
namespace: default
labels:
app: nfs-node12 # 定义 labels ap: kl-test
spec:
capacity:
storage: 1.5T # 定义容量
accessModes:
- ReadWriteMany # 访问模式
persistentVolumeReclaimPolicy: Retain # 回收策略
storageClassName: nfs # 定义 storageClassName 只有相同名字的才能绑定在一起
nfs:
path: /opt/software/nfs
server: 192.168.0.12
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: nfs-node12 # pvc 名字
namespace: default
spec:
storageClassName: nfs # Name of Storage Class Name, use same class name defined in pv
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1.5T # amout of Storage request
selector:
matchLabels:
app: nfs-node12 # 指定 pv 的标签 kl-test
执行kubectl apply -f pvc.yaml
jenkins服务配置-jenkins30999.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: jenkins30999
name: jenkins30999
spec:
replicas: 1
selector:
matchLabels:
app: jenkins30999
template:
metadata:
labels:
app: jenkins30999
spec:
containers:
- image: jenkins:latest
name: jenkins30999
volumeMounts:
- name: nfs-node12
mountPath: "/var/jenkins_home"
subPath: "jenkins_home"
volumes:
- name: nfs-node12
persistentVolumeClaim:
claimName: nfs-node12
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
app: jenkins30999
name: jenkins30999
spec:
ports:
- port: 30999
protocol: TCP
targetPort: 8080
selector:
app: jenkins30999
type: NodePort
externalIPs:
- 192.168.0.5
执行kubectl apply -f jenkins30999.yaml
查看运行状态:
[root@k8s-master-19 k8s]# kubectl get pods -n default
NAME READY STATUS RESTARTS AGE
jenkins30999-d8d6b79bb-r85wr 1/1 Running 0 4m28s
tomcat6-56fcc999cb-g8zql 1/1 Running 0 114m
tomcat6-56fcc999cb-qp762 1/1 Running 0 114m
tomcat6-56fcc999cb-tng6q 1/1 Running 0 114m
注意:
1.jenkins部署的例子是将容器内的目录挂载到了nfs的子目录下(当然也可以指定其他的持久化存储方式,详见官网),这里挂载到子目录是通过subPath指定的,可以通过shoumount -e 192.168.0.12
来查看nfs共享的目录位置,这里为了区分不同的业务指定了子目录,具体的效果就是会在共享目录下创建指定的子目录并存储数据
2.jenkins部署的例子指定了service的type是NodePort主要是为了实现外网的访问,IP是通过externalIPs指定的,端口是通过spec.port属性指定的,这样的话访问地址就是192.168.0.12:30999(IP和端口请根据具体情况修改)
到此k8s集群部署jenkins服务并挂载指定目录就完成了,后续会针对配置文件进行更深入的学习和分享。