K8s实验环境:

接着上一篇笔记:https://blog.csdn.net/tushanpeipei/article/details/118395393?spm=1001.2014.3001.5502

一个manager,2个worker node的Centos7,保证互相能够通信:
在这里插入图片描述

1.查看nodes信息:

[root@vms201 ~]# kubectl get nodes
NAME             STATUS   ROLES                  AGE   VERSION
vms201.rhce.cc   Ready    control-plane,master   15m   v1.21.0
vms202.rhce.cc   Ready    <none>                 11m   v1.21.0
vms203.rhce.cc   Ready    <none>                 11m   v1.21.0

[root@vms201 ~]# kubectl get nodes -o wide
NAME             STATUS   ROLES                  AGE   VERSION   INTERNAL-IP     EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION          CONTAINER-RUNTIME
vms201.rhce.cc   Ready    control-plane,master   16m   v1.21.0   192.168.0.201   <none>        CentOS Linux 7 (Core)   3.10.0-693.el7.x86_64   docker://20.10.7
vms202.rhce.cc   Ready    <none>                 12m   v1.21.0   192.168.0.202   <none>        CentOS Linux 7 (Core)   3.10.0-693.el7.x86_64   docker://20.10.7
vms203.rhce.cc   Ready    <none>                 12m   v1.21.0   192.168.0.203   <none>        CentOS Linux 7 (Core)   3.10.0-693.el7.x86_64   docker://20.10.7

2.查看集群信息,可以看懂apiservice运行的地址:

[root@vms201 ~]# kubectl cluster-info
Kubernetes control plane is running at https://192.168.0.201:6443
CoreDNS is running at https://192.168.0.201:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

3.查看k8s版本信息:

[root@vms201 ~]# kubectl version
Client Version: version.Info{Major:"1", Minor:"21", GitVersion:"v1.21.0", GitCommit:"cb303e613a121a29364f75cc67d3d580833a7479", GitTreeState:"clean", BuildDate:"2021-04-08T16:31:21Z", GoVersion:"go1.16.1", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"21", GitVersion:"v1.21.0", GitCommit:"cb303e613a121a29364f75cc67d3d580833a7479", GitTreeState:"clean", BuildDate:"2021-04-08T16:25:06Z", GoVersion:"go1.16.1", Compiler:"gc", Platform:"linux/amd64"}

[root@vms201 ~]# kubectl version --short
Client Version: v1.21.0
Server Version: v1.21.0

client也就是指的是kubectl这个客户端工具的地址。

4.查看初始化集群的设置:

[root@vms201 ~]# kubeadm config view
Command "view" is deprecated, This command is deprecated and will be removed in a future release, please use 'kubectl get cm -o yaml -n kube-system kubeadm-config' to get the kubeadm config directly.
apiServer:
  extraArgs:
    authorization-mode: Node,RBAC
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.21.0
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.96.0.0/12
scheduler: {}

5.查看kubecconfig(认证文件)文件的结构:

[root@vms201 ~]# kubectl config view
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: DATA+OMITTED
    server: https://192.168.0.201:6443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: kubernetes-admin
  name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
  user:
    client-certificate-data: REDACTED
    client-key-data: REDACTED

6.查看节点信息:-n是指定了命名空间,默认k8s组件的pod是在kube-system命名空间下。

[root@vms201 ~]# kubectl get pods -n kube-system
NAME                                       READY   STATUS    RESTARTS   AGE
calico-kube-controllers-78d6f96c7b-k5srp   1/1     Running   0          16m
calico-node-hstwf                          1/1     Running   0          16m
calico-node-ldfhc                          1/1     Running   0          16m
calico-node-sdvzv                          1/1     Running   0          16m
coredns-545d6fc579-596hb                   1/1     Running   0          25m
coredns-545d6fc579-7mb9v                   1/1     Running   0          25m
etcd-vms201.rhce.cc                        1/1     Running   0          26m
kube-apiserver-vms201.rhce.cc              1/1     Running   1          26m
kube-controller-manager-vms201.rhce.cc     1/1     Running   0          26m
kube-proxy-28x9h                           1/1     Running   0          25m
kube-proxy-7qzdd                           1/1     Running   0          22m
kube-proxy-m9c2c                           1/1     Running   0          22m
kube-scheduler-vms201.rhce.cc              1/1     Running   0          26m
[root@vms201 ~]# kubectl get pods -n kube-system -o wide
NAME                                       READY   STATUS    RESTARTS   AGE   IP              NODE             NOMINATED NODE   READINESS GATES
calico-kube-controllers-78d6f96c7b-k5srp   1/1     Running   0          16m   10.244.58.193   vms202.rhce.cc   <none>           <none>
calico-node-hstwf                          1/1     Running   0          16m   192.168.0.201   vms201.rhce.cc   <none>           <none>
calico-node-ldfhc                          1/1     Running   0          16m   192.168.0.203   vms203.rhce.cc   <none>           <none>
calico-node-sdvzv                          1/1     Running   0          16m   192.168.0.202   vms202.rhce.cc   <none>           <none>
coredns-545d6fc579-596hb                   1/1     Running   0          26m   10.244.20.194   vms201.rhce.cc   <none>           <none>
coredns-545d6fc579-7mb9v                   1/1     Running   0          26m   10.244.20.193   vms201.rhce.cc   <none>           <none>
etcd-vms201.rhce.cc                        1/1     Running   0          26m   192.168.0.201   vms201.rhce.cc   <none>           <none>
kube-apiserver-vms201.rhce.cc              1/1     Running   1          26m   192.168.0.201   vms201.rhce.cc   <none>           <none>
kube-controller-manager-vms201.rhce.cc     1/1     Running   0          26m   192.168.0.201   vms201.rhce.cc   <none>           <none>
kube-proxy-28x9h                           1/1     Running   0          26m   192.168.0.201   vms201.rhce.cc   <none>           <none>
kube-proxy-7qzdd                           1/1     Running   0          22m   192.168.0.202   vms202.rhce.cc   <none>           <none>
kube-proxy-m9c2c                           1/1     Running   0          22m   192.168.0.203   vms203.rhce.cc   <none>           <none>
kube-scheduler-vms201.rhce.cc              1/1     Running   0          26m   192.168.0.201   vms201.rhce.cc   <none>           <none>

7.查看node或者pod的负载信息

前提:安装metric server

curl -Ls https://api.github.com/repos/kubernetes-sigs/metrics-server/tarball/v0.3.6 -o metrics-server-v0.3.6.tar.gz

# 拷贝镜像到2个worker node
scp metrics-img.tar vms202:~
scp metrics-img.tar vms203:~

在三台设备上加载镜像:

docker load -i metric-img.tar

在master上解压:

rar zxvf metrics-server-v0.36.tar.gz

修改文件:

cd /root/kubernetes-sigs-metrics-server-d1f4f6f/deploy/1.8+/
vim metrics-server-deployment.yaml

需要修改的内容如下:

      containers:
      - name: metrics-server
        image: k8s.gcr.io/metrics-server-amd64:v0.3.6
        imagePullPolicy: IfNotPresent
        command:
        - /metrics-server
        - --metric-resolution=30s
        - --kubelet-insecure-tls
        - --kubelet-preferred-address-types=InternalIP

应用安装当前目录所有yaml文件:

 kubectl apply -f .

查看是否安装成功:

[root@vms201 1.8+]# kubectl get pods -n kube-system
NAME                                       READY   STATUS    RESTARTS   AGE
calico-kube-controllers-78d6f96c7b-k5srp   1/1     Running   0          119m
calico-node-hstwf                          1/1     Running   0          119m
calico-node-ldfhc                          1/1     Running   0          119m
calico-node-sdvzv                          1/1     Running   0          119m
coredns-545d6fc579-596hb                   1/1     Running   0          129m
coredns-545d6fc579-7mb9v                   1/1     Running   0          129m
etcd-vms201.rhce.cc                        1/1     Running   0          129m
kube-apiserver-vms201.rhce.cc              1/1     Running   1          129m
kube-controller-manager-vms201.rhce.cc     1/1     Running   0          129m
kube-proxy-28x9h                           1/1     Running   0          129m
kube-proxy-7qzdd                           1/1     Running   0          125m
kube-proxy-m9c2c                           1/1     Running   0          125m
kube-scheduler-vms201.rhce.cc              1/1     Running   0          129m
metrics-server-bcfb98c76-x8c77             1/1     Running   0          96s

可以看到最后一行,已经安装了metrics,状态为running。

查看nodes或pods的负载情况:

[root@vms201 ~]# kubectl top nodes
W0701 23:55:11.334786   54660 top_node.go:119] Using json format to get metrics. Next release will switch to protocol-buffers, switch early by passing --use-protocol-buffers flag
NAME             CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%
vms201.rhce.cc   172m         8%     1442Mi          50%
vms202.rhce.cc   61m          3%     616Mi           21%
vms203.rhce.cc   60m          3%     645Mi           22%

[root@vms201 ~]# kubectl top pods -n kube-system
W0701 23:56:25.946547   55653 top_pod.go:140] Using json format to get metrics. Next release will switch to protocol-buffers, switch early by passing --use-protocol-buffers flag
NAME                                       CPU(cores)   MEMORY(bytes)
calico-kube-controllers-78d6f96c7b-k5srp   2m           20Mi
calico-node-hstwf                          21m          83Mi
calico-node-ldfhc                          32m          96Mi
calico-node-sdvzv                          28m          99Mi
coredns-545d6fc579-596hb                   2m           14Mi
coredns-545d6fc579-7mb9v                   2m           16Mi
etcd-vms201.rhce.cc                        7m           55Mi
kube-apiserver-vms201.rhce.cc              29m          352Mi
kube-controller-manager-vms201.rhce.cc     9m           58Mi
kube-proxy-28x9h                           1m           18Mi
kube-proxy-7qzdd                           1m           19Mi
kube-proxy-m9c2c                           1m           19Mi
kube-scheduler-vms201.rhce.cc              2m           24Mi
metrics-server-bcfb98c76-x8c77             1m           12Mi

查看pods的情况,其中CPU一栏的m表示微核心数,一个核心,有1000个微核心。

8.命名空间相关操作

不同命名空间之前相互隔离,每一个pod只能属于一个命名空间。

在master上查看所有的命名空间:

[root@vms201 ~]# kubectl get ns
NAME              STATUS   AGE
default           Active   3h3m
kube-node-lease   Active   3h3m
kube-public       Active   3h3m
kube-system       Active   3h3m

创建命名空间ns1:

[root@vms201 ~]# kubectl create ns ns1
namespace/ns1 created

管理命名空间的相关插件:kubens;下载后导入master。

chmod +x kubens
mv kubens /bin/

导入完成后即可使用kubens对命名空间管理。查看当前在那个命名空间:

[root@vms201 bin]# kubens
default  《-----在此
kube-node-lease
kube-public
kube-system
ns1

切换到kube-system中:

[root@vms201 bin]#  kubens kube-system
Context "kubernetes-admin@kubernetes" modified.
Active namespace is "kube-system".

查看当前命名空间下的pods,不用再跟-n指定命名空间:

[root@vms201 bin]# kubectl get pods
NAME                                       READY   STATUS    RESTARTS   AGE
calico-kube-controllers-78d6f96c7b-k5srp   1/1     Running   0          3h7m
calico-node-hstwf                          1/1     Running   0          3h7m
calico-node-ldfhc                          1/1     Running   0          3h7m
calico-node-sdvzv                          1/1     Running   0          3h7m
coredns-545d6fc579-596hb                   1/1     Running   0          3h16m
coredns-545d6fc579-7mb9v                   1/1     Running   0          3h16m
etcd-vms201.rhce.cc                        1/1     Running   0          3h17m
kube-apiserver-vms201.rhce.cc              1/1     Running   1          3h17m
kube-controller-manager-vms201.rhce.cc     1/1     Running   0          3h17m
kube-proxy-28x9h                           1/1     Running   0          3h16m
kube-proxy-7qzdd                           1/1     Running   0          3h13m
kube-proxy-m9c2c                           1/1     Running   0          3h13m
kube-scheduler-vms201.rhce.cc              1/1     Running   0          3h17m
metrics-server-bcfb98c76-x8c77             1/1     Running   0          69m

9.master管理多套集群

当存在多套集群的时候,如果想从一个集群的master上直接管理另外一套集群。需要进入如下操作:

在第一套集群的master上:

cd ~/.kube
vim config

修改内容:

apiVersion: v1
clusters:
# 第一个集群
- cluster:
    certificate-authority-data: xxx1
    server: https://192.168.0.201:6443
  name: kubernetes1
# 设置第二个集群,server为第二个集群的地址
- cluster:
    certificate-authority-data: xxx2
    server: https://192.168.0.211:6443
  name: kubernetes2
contexts:
# 关联第一个集群与用户
- context:
    cluster: kubernetes1
    namespace: kube-system
    user: kubernetes-admin1
  name: kubernetes-admin1@kubernetes
# 增加第二个context,将第二个集群与永久关联起来
- context:
    cluster: kubernetes2
    namespace: kube-system
    user: kubernetes-admin2
  name: kubernetes-admin2@kubernetes
current-context: kubernetes-admin1@kubernetes
kind: Config
preferences: {}
users:
# 设置两个集群的用户
- name: kubernetes-admin1
  user:
    client-certificate-data: yyy1
    client-key-data: xxx1
- name: kubernetes-admin2
  user:
    client-certificate-data: yyy2
    client-key-data: xxx2

其中第二个集群的认证信息,登陆其master用同样的方法查看即可,然后填入上述的文件中。在第一套集群的master上查看集群连接:

kubect config get-contexts

切换到第二套集群中执行操作:

kubectl config use-context kubernetes2

整理资料来源:
《老段CKA课程》

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐