1.查看k8s版本

[root@master1 ~]# kubectl version
Client Version: version.Info{Major:"1", Minor:"14", GitVersion:"v1.14.2", GitCommit:"66049e3b21efe110454d67df4fa62b08ea79a19b", GitTreeState:"clean", BuildDate:"2019-05-16T16:23:09Z", GoVersion:"go1.12.5", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"14", GitVersion:"v1.14.0", GitCommit:"641856db18352033a0d96dbc99153fa3b27298e5", GitTreeState:"clean", BuildDate:"2019-03-25T15:45:25Z", GoVersion:"go1.12.1", Compiler:"gc", Platform:"linux/amd64"}

2.查看k8s集群信息

[root@master1 ~]# kubectl cluster-info
Kubernetes master is running at https://192.168.3.130:8443
KubeDNS is running at https://192.168.3.130:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy

To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

3.查看apiservice

[root@master1 ~]# kubectl get apiservice
NAME                                   SERVICE   AVAILABLE   AGE
v1.                                    Local     True        3h15m
v1.apps                                Local     True        3h15m
v1.authentication.k8s.io               Local     True        3h15m
v1.authorization.k8s.io                Local     True        3h15m
v1.autoscaling                         Local     True        3h15m
v1.batch                               Local     True        3h15m
v1.coordination.k8s.io                 Local     True        3h15m
v1.crd.projectcalico.org               Local     True        67m
v1.networking.k8s.io                   Local     True        3h15m
v1.rbac.authorization.k8s.io           Local     True        3h15m
v1.scheduling.k8s.io                   Local     True        3h15m
v1.storage.k8s.io                      Local     True        3h15m
v1beta1.admissionregistration.k8s.io   Local     True        3h15m
v1beta1.apiextensions.k8s.io           Local     True        3h15m
v1beta1.apps                           Local     True        3h15m
v1beta1.authentication.k8s.io          Local     True        3h15m
v1beta1.authorization.k8s.io           Local     True        3h15m
v1beta1.batch                          Local     True        3h15m
v1beta1.certificates.k8s.io            Local     True        3h15m
v1beta1.coordination.k8s.io            Local     True        3h15m
v1beta1.events.k8s.io                  Local     True        3h15m
v1beta1.extensions                     Local     True        3h15m
v1beta1.networking.k8s.io              Local     True        3h15m
v1beta1.node.k8s.io                    Local     True        3h15m
v1beta1.policy                         Local     True        3h15m
v1beta1.rbac.authorization.k8s.io      Local     True        3h15m
v1beta1.scheduling.k8s.io              Local     True        3h15m
v1beta1.storage.k8s.io                 Local     True        3h15m
v1beta2.apps                           Local     True        3h15m
v2beta1.autoscaling                    Local     True        3h15m
v2beta2.autoscaling                    Local     True        3h15m

4.查看节点状态

[root@master1 ~]# kubectl get nodes
NAME      STATUS   ROLES    AGE    VERSION
master1   Ready    master   113m   v1.14.2
master2   Ready    master   105m   v1.14.2
master3   Ready    master   102m   v1.14.2
node1     Ready    <none>   101m   v1.14.2
node2     Ready    <none>   43m    v1.14.2
node3     Ready    <none>   43m    v1.14.2

5.查看节点详细信息

[root@master1 ~]# kubectl describe node master1
Name:               master1
Roles:              master
Labels:             beta.kubernetes.io/arch=amd64
                    beta.kubernetes.io/os=linux
                    kubernetes.io/arch=amd64
                    kubernetes.io/hostname=master1
                    kubernetes.io/os=linux
                    node-role.kubernetes.io/master=
Annotations:        kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock
                    node.alpha.kubernetes.io/ttl: 0
                    projectcalico.org/IPv4Address: 192.168.3.131/24
                    projectcalico.org/IPv4IPIPTunnelAddr: 10.97.40.64
                    volumes.kubernetes.io/controller-managed-attach-detach: true
CreationTimestamp:  Tue, 06 Aug 2019 15:48:06 +0800
Taints:             node-role.kubernetes.io/master:NoSchedule
Unschedulable:      false
Conditions:
  Type                 Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message
  ----                 ------  -----------------                 ------------------                ------                       -------
  NetworkUnavailable   False   Tue, 06 Aug 2019 17:56:41 +0800   Tue, 06 Aug 2019 17:56:41 +0800   CalicoIsUp                   Calico is running on this node
  MemoryPressure       False   Wed, 07 Aug 2019 11:27:57 +0800   Tue, 06 Aug 2019 17:51:32 +0800   KubeletHasSufficientMemory   kubelet has sufficient memory available
  DiskPressure         False   Wed, 07 Aug 2019 11:27:57 +0800   Tue, 06 Aug 2019 17:51:32 +0800   KubeletHasNoDiskPressure     kubelet has no disk pressure
  PIDPressure          False   Wed, 07 Aug 2019 11:27:57 +0800   Tue, 06 Aug 2019 17:51:32 +0800   KubeletHasSufficientPID      kubelet has sufficient PID available
  Ready                True    Wed, 07 Aug 2019 11:27:57 +0800   Tue, 06 Aug 2019 17:51:32 +0800   KubeletReady                 kubelet is posting ready status
Addresses:
  InternalIP:  192.168.3.131
  Hostname:    master1
Capacity:
 cpu:                2
 ephemeral-storage:  36936068Ki
 hugepages-2Mi:      0
 memory:             1888672Ki
 pods:               110
Allocatable:
 cpu:                2
 ephemeral-storage:  34040280213
 hugepages-2Mi:      0
 memory:             1786272Ki
 pods:               110
System Info:
 Machine ID:                 e7a02cba19ef4e27ab273a5fb3f9efce
 System UUID:                E92A4D56-F769-5618-6BFA-241D5C8BBD7E
 Boot ID:                    f6ff55c0-f47c-4cc3-9e59-f56a4522e5b2
 Kernel Version:             4.4.187-1.el7.elrepo.x86_64
 OS Image:                   CentOS Linux 7 (Core)
 Operating System:           linux
 Architecture:               amd64
 Container Runtime Version:  docker://18.6.3
 Kubelet Version:            v1.14.2
 Kube-Proxy Version:         v1.14.2
Non-terminated Pods:         (6 in total)
  Namespace                  Name                               CPU Requests  CPU Limits  Memory Requests  Memory Limits  AGE
  ---------                  ----                               ------------  ----------  ---------------  -------------  ---
  kube-system                calico-node-s2jbl                  250m (12%)    0 (0%)      0 (0%)           0 (0%)         19h
  kube-system                etcd-master1                       0 (0%)        0 (0%)      0 (0%)           0 (0%)         19h
  kube-system                kube-apiserver-master1             250m (12%)    0 (0%)      0 (0%)           0 (0%)         19h
  kube-system                kube-controller-manager-master1    200m (10%)    0 (0%)      0 (0%)           0 (0%)         19h
  kube-system                kube-proxy-qf8ql                   0 (0%)        0 (0%)      0 (0%)           0 (0%)         18h
  kube-system                kube-scheduler-master1             100m (5%)     0 (0%)      0 (0%)           0 (0%)         19h
Allocated resources:
  (Total limits may be over 100 percent, i.e., overcommitted.)
  Resource           Requests    Limits
  --------           --------    ------
  cpu                800m (40%)  0 (0%)
  memory             0 (0%)      0 (0%)
  ephemeral-storage  0 (0%)      0 (0%)
Events:              <none>

6.查看系统静态pod运行情况

[root@master1 ~]# kubectl get pods -n kube-system -o wide
NAME                                       READY   STATUS    RESTARTS   AGE    IP              NODE      NOMINATED NODE   READINESS GATES
calico-kube-controllers-67d686764b-qml54   1/1     Running   0          98m    10.101.149.3    node1     <none>           <none>
calico-node-5kzpp                          1/1     Running   0          98m    192.168.3.132   master2   <none>           <none>
calico-node-czwrh                          1/1     Running   0          98m    192.168.3.133   master3   <none>           <none>
calico-node-dsgcf                          1/1     Running   0          45m    192.168.3.142   node2     <none>           <none>
calico-node-hrxbv                          1/1     Running   0          98m    192.168.3.141   node1     <none>           <none>
calico-node-j22pl                          1/1     Running   0          44m    192.168.3.143   node3     <none>           <none>
calico-node-s2jbl                          1/1     Running   0          98m    192.168.3.131   master1   <none>           <none>
coredns-d5947d4b-6t2vs                     1/1     Running   0          114m   10.101.149.1    node1     <none>           <none>
coredns-d5947d4b-sgmfb                     1/1     Running   0          114m   10.101.149.2    node1     <none>           <none>
etcd-master1                               1/1     Running   0          113m   192.168.3.131   master1   <none>           <none>
etcd-master2                               1/1     Running   0          107m   192.168.3.132   master2   <none>           <none>
etcd-master3                               1/1     Running   0          104m   192.168.3.133   master3   <none>           <none>
kube-apiserver-master1                     1/1     Running   0          113m   192.168.3.131   master1   <none>           <none>
kube-apiserver-master2                     1/1     Running   0          107m   192.168.3.132   master2   <none>           <none>
kube-apiserver-master3                     1/1     Running   0          103m   192.168.3.133   master3   <none>           <none>
kube-controller-manager-master1            1/1     Running   2          113m   192.168.3.131   master1   <none>           <none>
kube-controller-manager-master2            1/1     Running   0          107m   192.168.3.132   master2   <none>           <none>
kube-controller-manager-master3            1/1     Running   0          103m   192.168.3.133   master3   <none>           <none>
kube-proxy-6h6gc                           1/1     Running   0          39m    192.168.3.142   node2     <none>           <none>
kube-proxy-hb498                           1/1     Running   0          39m    192.168.3.132   master2   <none>           <none>
kube-proxy-l8csf                           1/1     Running   0          40m    192.168.3.143   node3     <none>           <none>
kube-proxy-p96nn                           1/1     Running   0          39m    192.168.3.141   node1     <none>           <none>
kube-proxy-qf8ql                           1/1     Running   0          39m    192.168.3.131   master1   <none>           <none>
kube-proxy-xhgqw                           1/1     Running   0          39m    192.168.3.133   master3   <none>           <none>
kube-scheduler-master1                     1/1     Running   1          113m   192.168.3.131   master1   <none>           <none>
kube-scheduler-master2                     1/1     Running   0          107m   192.168.3.132   master2   <none>           <none>
kube-scheduler-master3                     1/1     Running   0          103m   192.168.3.133   master3   <none>           <none>

7.运行pod

[root@master1 ~]# kubectl run nginx --image=nginx --replicas=5
  kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx created

注:上面创建pod的方式提示在未来的k8s版本中将被移除,而且通过上面方式创建的pod要关联至svc,只能使用expose命令,不能使用 create svc这种方式关联pod。建议创建pod使用下面这种方式:

[root@master1 ~]# kubectl create deploy nginx-dev --image=nginx:1.17-alpine
deployment.apps/nginx-dev created

 8.扩缩容pod

[root@master1 ~]# kubectl  scale deployment nginx --replicas=2           
deployment.extensions/nginx scaled
[root@master1 ~]# kubectl get pods -o wide
NAME                     READY   STATUS    RESTARTS   AGE   IP             NODE    NOMINATED NODE   READINESS GATES
nginx-7db9fccd9b-9vqv8   1/1     Running   0          47m   10.101.11.4    node2   <none>           <none>
nginx-7db9fccd9b-jx85k   1/1     Running   0          47m   10.106.135.3   node3   <none>           <none>

9.查看运行在默认空间中的pod

[root@master1 ~]# kubectl get pods -o wide
NAME                     READY   STATUS    RESTARTS   AGE   IP             NODE    NOMINATED NODE   READINESS GATES
nginx-7db9fccd9b-9vqv8   1/1     Running   0          37s   10.101.11.4    node2   <none>           <none>
nginx-7db9fccd9b-jx85k   1/1     Running   0          37s   10.106.135.3   node3   <none>           <none>
nginx-7db9fccd9b-ljthd   1/1     Running   0          37s   10.101.11.5    node2   <none>           <none>
nginx-7db9fccd9b-scwtt   1/1     Running   0          37s   10.101.149.4   node1   <none>           <none>
nginx-7db9fccd9b-ztqmk   1/1     Running   0          37s   10.106.135.4   node3   <none>           <none>

10.查看某个pod的详情

[root@master1 ~]# kubectl describe pod nginx-7db9fccd9b-jx85k
Name:               nginx-7db9fccd9b-jx85k
Namespace:          default
Priority:           0
PriorityClassName:  <none>
Node:               node3/192.168.3.143
Start Time:         Tue, 06 Aug 2019 18:06:49 +0800
Labels:             pod-template-hash=7db9fccd9b
                    run=nginx
Annotations:        cni.projectcalico.org/podIP: 10.106.135.3/32
Status:             Running
IP:                 10.106.135.3
Controlled By:      ReplicaSet/nginx-7db9fccd9b
Containers:
  nginx:
    Container ID:   docker://4a21c43681e2a694cd9a0be54bb1928a6774ad75e141c065b62a7e95947e1238
    Image:          nginx
    Image ID:       docker-pullable://nginx@sha256:eb3320e2f9ca409b7c0aa71aea3cf7ce7d018f03a372564dbdb023646958770b
    Port:           <none>
    Host Port:      <none>
    State:          Running
      Started:      Tue, 06 Aug 2019 18:06:57 +0800
    Ready:          True
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-tzqsm (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  default-token-tzqsm:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-tzqsm
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type    Reason     Age   From               Message
  ----    ------     ----  ----               -------
  Normal  Scheduled  60m   default-scheduler  Successfully assigned default/nginx-7db9fccd9b-jx85k to node3
  Normal  Pulling    60m   kubelet, node3     Pulling image "nginx"
  Normal  Pulled     59m   kubelet, node3     Successfully pulled image "nginx"
  Normal  Created    59m   kubelet, node3     Created container nginx
  Normal  Started    59m   kubelet, node3     Started container nginx

11.删除pod

[root@master1 ~]# kubectl delete pod nginx-7db9fccd9b-9vqv8
pod "nginx-7db9fccd9b-9vqv8" deleted
[root@master1 ~]# kubectl get pods -o wide                 
NAME                     READY   STATUS    RESTARTS   AGE   IP             NODE    NOMINATED NODE   READINESS GATES
nginx-7db9fccd9b-jx85k   1/1     Running   0          48m   10.106.135.3   node3   <none>           <none>
nginx-7db9fccd9b-mm4rs   1/1     Running   0          6s    10.101.11.6    node2   <none>           <none>

12.查看deploy

[root@master1 ~]# kubectl get deploy
NAME    READY   UP-TO-DATE   AVAILABLE   AGE
nginx   2/2     2            2           63m

 13.查看某个deploy详情

[root@master1 ~]# kubectl describe deploy nginx 
Name:                   nginx
Namespace:              default
CreationTimestamp:      Tue, 06 Aug 2019 18:06:49 +0800
Labels:                 run=nginx
Annotations:            deployment.kubernetes.io/revision: 1
Selector:               run=nginx
Replicas:               2 desired | 2 updated | 2 total | 2 available | 0 unavailable
StrategyType:           RollingUpdate
MinReadySeconds:        0
RollingUpdateStrategy:  25% max unavailable, 25% max surge
Pod Template:
  Labels:  run=nginx
  Containers:
   nginx:
    Image:        nginx
    Port:         <none>
    Host Port:    <none>
    Environment:  <none>
    Mounts:       <none>
  Volumes:        <none>
Conditions:
  Type           Status  Reason
  ----           ------  ------
  Progressing    True    NewReplicaSetAvailable
  Available      True    MinimumReplicasAvailable
OldReplicaSets:  <none>
NewReplicaSet:   nginx-7db9fccd9b (2/2 replicas created)
Events:
  Type    Reason             Age   From                   Message
  ----    ------             ----  ----                   -------
  Normal  ScalingReplicaSet  21m   deployment-controller  Scaled down replica set nginx-7db9fccd9b to 2

14.更换deploy的pod镜像

[root@master1 ~]# kubectl get deploy -o wide
NAME        READY   UP-TO-DATE   AVAILABLE   AGE   CONTAINERS   IMAGES              SELECTOR
nginx-dev   2/2     2            2           66m   nginx        nginx:1.16-alpine   app=nginx-dev
[root@master1 ~]# kubectl set image deploy nginx-dev nginx=nginx:1.17-alpine
deployment.extensions/nginx-dev image updated
[root@master1 ~]# kubectl get deploy -o wide                                
NAME        READY   UP-TO-DATE   AVAILABLE   AGE   CONTAINERS   IMAGES              SELECTOR
nginx-dev   2/2     2            2           66m   nginx        nginx:1.17-alpine   app=nginx-dev

 15.显示pod更新过程

[root@master1 ~]# kubectl rollout status deploy nginx-dev                   
Waiting for deployment "nginx-dev" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "nginx-dev" rollout to finish: 1 old replicas are pending termination...
deployment "nginx-dev" successfully rolled out

16.将pod版本回滚

回滚到上一个版本

[root@master1 ~]# kubectl rollout undo deploy nginx-dev
deployment.extensions/nginx-dev rolled back

回滚到指定版本

[root@master2 ~]# kubectl rollout history deploy nginx-dev                   
deployment.extensions/nginx-dev 
REVISION  CHANGE-CAUSE
4         <none>
5         <none>

[root@master2 ~]# kubectl rollout undo deploy nginx-dev --to-revision=4
deployment.extensions/nginx-dev rolled back

17.删除deploy

[root@master1 ~]# kubectl delete deploy nginx
deployment.extensions "nginx" deleted
[root@master1 ~]# kubectl get pods
No resources found.

16.创建svc

[root@master1 ~]# kubectl create svc clusterip nginx-dev --tcp=80:80
service/nginx-dev created

18.查看svc

简单信息

[root@master1 ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1      <none>        443/TCP   24h
nginx-dev    ClusterIP   10.99.155.10   <none>        80/TCP    36m

详细信息:

[root@master1 ~]# kubectl get svc -o wide
NAME         TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)   AGE   SELECTOR
kubernetes   ClusterIP   10.96.0.1      <none>        443/TCP   24h   <none>
nginx-dev    ClusterIP   10.99.155.10   <none>        80/TCP    38m   app=nginx-dev

yaml格式:
 

[root@master1 ~]# kubectl get svc nginx-dev -o yaml
apiVersion: v1
kind: Service
metadata:
  creationTimestamp: "2019-08-07T07:45:43Z"
  labels:
    app: nginx-dev
  name: nginx-dev
  namespace: default
  resourceVersion: "64369"
  selfLink: /api/v1/namespaces/default/services/nginx-dev
  uid: 5c2851e5-b8e7-11e9-8039-000c298bbd7e
spec:
  clusterIP: 10.99.155.10
  ports:
  - name: 80-80
    port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: nginx-dev
  sessionAffinity: None
  type: ClusterIP
status:
  loadBalancer: {}

json格式:

[root@master1 ~]# kubectl get svc nginx-dev -o json
{
    "apiVersion": "v1",
    "kind": "Service",
    "metadata": {
        "creationTimestamp": "2019-08-07T07:45:43Z",
        "labels": {
            "app": "nginx-dev"
        },
        "name": "nginx-dev",
        "namespace": "default",
        "resourceVersion": "64369",
        "selfLink": "/api/v1/namespaces/default/services/nginx-dev",
        "uid": "5c2851e5-b8e7-11e9-8039-000c298bbd7e"
    },
    "spec": {
        "clusterIP": "10.99.155.10",
        "ports": [
            {
                "name": "80-80",
                "port": 80,
                "protocol": "TCP",
                "targetPort": 80
            }
        ],
        "selector": {
            "app": "nginx-dev"
        },
        "sessionAffinity": "None",
        "type": "ClusterIP"
    },
    "status": {
        "loadBalancer": {}
    }
}

19.k8s集群内部访问svc

svcname.default.svc.cluster.local.
# svc名字.svc所在名称空间.svc.固定后缀
# cluster.local. 为kubeadm初始化集群时默认的固定后缀

20.编辑svc

kubectl edit svc svcname

21.打补丁

[root@master1 ~]# kubectl patch deploy nginx-dev -p '{"spec":{"replicas":3}}' 
deployment.extensions/nginx-dev patched

22.查看deploy更新历史

[root@master1 ~]# kubectl rollout history deploy nginx-dev
deployment.extensions/nginx-dev 
REVISION  CHANGE-CAUSE
4         <none>
5         <none>

23.动态查看pod更新过程

[root@master1 ~]# kubectl get pods -l app=nginx-dev -w
NAME                         READY   STATUS    RESTARTS   AGE
nginx-dev-545b7cc9bc-d49h6   1/1     Running   0          41h
nginx-dev-545b7cc9bc-g2mpm   1/1     Running   0          41h

24.查看pod版本历史

[root@master1 ~]# kubectl get rs -o wide
NAME                   DESIRED   CURRENT   READY   AGE   CONTAINERS   IMAGES              SELECTOR
nginx-dev-545b7cc9bc   0         0         0       43h   nginx        nginx:1.17-alpine   app=nginx-dev,pod-template-hash=545b7cc9bc
nginx-dev-7445f56c6d   3         3         3       42h   nginx        nginx:1.16-alpine   app=nginx-dev,pod-template-hash=7445f56c6d

 

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐