三分钟安装k8s-v1.20.8版本

1.所有机器关闭防火墙,selinux,配置hosts
修改主机名称:

cat /etc/hostname
k8s-master

cat /etc/hosts
k8s-master 192.168.1.15
k8s-node1 192.168.1.16
k8s-node2 192.168.1.17

systemctl stop firewalld
systemctl disable firewalld
setenforce 0
sed -i 's/=enforcing/=disabled/g' /etc/selinux/config

2.关闭swap分区

swapoff -a #临时
sed -i '/swap/s/^/#/' /etc/fstab #永久

3.修改内核参数

vi /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0

4.内核参数生效

modprobe br_netfilter && sysctl -p /etc/sysctl.d/k8s.conf

5.启用ipvs内核模块

vim /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
ipvs_mods_dir="/usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs"
for i in $(ls $ipvs_mods_dir | grep -o "^[^.]*");do
    /sbin/modinfo -F filename $i &>/dev/null
    if [ $? -eq 0 ];then
        /sbin/modprobe $i
    fi
done

chmod +x /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules

6.添加docker-yum仓库

cd /etc/yum.repos.d/
wget  https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

7.添加kubernetes-yum仓库

cat /etc/yum.repos.d/kubernetes.repo 
[kubernetes]
name=Kubernetes Repo
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
enabled=1

8.查看版本并选择安装。

yum  list  |grep kubeadm
yum install - y docker-ce   kubelet-1.20.8 kubeadm-1.20.8 kubectl-1.20.8  

在这里插入图片描述
9.启动

systemctl start docker && systemctl enable docker
  systemctl start kubelet &&systemctl enable kubelet

安装部署(master节点执行下面的,上面是所有集群执行。)

kubeadm init --kubernetes-version=v1.20.8  --apiserver-advertise-address=192.168.1.82  --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12 --image-repository registry.aliyuncs.com/google_containers #apiserver-advertise-address改成自己的master地址,其他不用动

11.如果部署出现问题,解决问题之后再使用kubeadm清除一次再重新执行上面命令。

kubeadm reset -f
kubeadm init --kubernetes-version=v1.20.8  --apiserver-advertise-address=192.168.1.82  --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12 --image-repository registry.aliyuncs.com/google_containers

12.执行成功的显示:

....
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:
kubeadm join
 ...

13.执行上面的命令:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

14.在node执行kubeadmin join … 加入集群之中(上面已显示的内容)。

kubeadm join 172.16.186.1.82:6443 --token 27kdvx.asd123453534sdf123 \
--discovery-token-ca-cert-hash sha256:45656756sdfsdfsd89165156465165451651

15.查看node节点和其他组件:

#kubectl get nodes
NAME       STATUS     ROLES                  AGE   VERSION
master01   NotReady   control-plane,master   14m   v1.20.8
node01     NotReady    <none>                 8m46s   v1.20.8
node02    NotReady    <none>                 8m30s   v1.20.8
#kubectl  get pod -n kube-system

flannel起来了NotReady 才变成Ready 。
在这里插入图片描述

16.安装网络插件flannel

cat kube-flannel.yml
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
  name: psp.flannel.unprivileged
  annotations:
    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
  privileged: false
  volumes:
  - configMap
  - secret
  - emptyDir
  - hostPath
  allowedHostPaths:
  - pathPrefix: "/etc/cni/net.d"
  - pathPrefix: "/etc/kube-flannel"
  - pathPrefix: "/run/flannel"
  readOnlyRootFilesystem: false
  # Users and groups
  runAsUser:
    rule: RunAsAny
  supplementalGroups:
    rule: RunAsAny
  fsGroup:
    rule: RunAsAny
  # Privilege Escalation
  allowPrivilegeEscalation: false
  defaultAllowPrivilegeEscalation: false
  # Capabilities
  allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
  defaultAddCapabilities: []
  requiredDropCapabilities: []
  # Host namespaces
  hostPID: false
  hostIPC: false
  hostNetwork: true
  hostPorts:
  - min: 0
    max: 65535
  # SELinux
  seLinux:
    # SELinux is unused in CaaSP
    rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
rules:
- apiGroups: ['extensions']
  resources: ['podsecuritypolicies']
  verbs: ['use']
  resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - get
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "cniVersion": "0.3.1",
      "plugins": [
        {
          "type": "flannel",
          "delegate": {
            "hairpinMode": true,
            "isDefaultGateway": true
          }
        },
        {
          "type": "portmap",
          "capabilities": {
            "portMappings": true
          }
        }
      ]
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  selector:
    matchLabels:
      app: flannel
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: kubernetes.io/os
                operator: In
                values:
                - linux
      hostNetwork: true
      priorityClassName: system-node-critical
      tolerations:
      - operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.14.0
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conflist
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.14.0
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        resources:
          requests:
            cpu: "100m"
            memory: "50Mi"
          limits:
            cpu: "100m"
            memory: "50Mi"
        securityContext:
          privileged: false
          capabilities:
            add: ["NET_ADMIN", "NET_RAW"]
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run/flannel
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
      - name: run
        hostPath:
          path: /run/flannel
      - name: cni
        hostPath:
          path: /etc/cni/net.d
      - name: flannel-cfg
        configMap:
          name: kube-flannel-cfg

17.启动kube-flannel.yaml文件

kubectl  apply -f kube-flannel.yml #镜像因该会拉取很慢,可以手动拉取,或者从其他地方拉取。

18.启动之后如下图及安装成功:
在这里插入图片描述
19.插入一个小问题:
kubeadm安装的k8s集群获取kube-scheduler和kube-controller-manager组件状态异常,一般都会有。

#kubectl get cs
NAME                 STATUS      MESSAGE                                                                                     ERROR
controller-manager   Unhealthy   Get http://127.0.0.1:10252/healthz: dial tcp 127.0.0.1:10252: connect: connection refused
scheduler            Unhealthy   Get http://127.0.0.1:10251/healthz: dial tcp 127.0.0.1:10251: connect: connection refused
etcd-0               Healthy     {"health":"true"}

20.去配置文件路径修改:/etc/kubernetes/manifests/kube-scheduler.yaml 、/etc/kubernetes/manifests/kube-controller-manager.yaml

cd /etc/kubernetes/manifests/
vim kube-controller-manager.yaml #注释掉port=0
 vim kube-scheduler.yaml   #注释掉port=0

在这里插入图片描述
21.重启并查看服务。

systemctl restart kubelet
kubectl get cs

如下:
在这里插入图片描述
22.tab补全:

yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc

23. k8s基础命令附上几条:

kubectl create ns test #创建名称空间
kubectl get deployment -n test #  -n指名称空间。
kubectl get pods
kubectl get pods  -o wide
kubectl get pods --show-labels   #查看pod的标签:
kubectl  describe pods myapp-7c468db58f-2dqrh -n test  #describe 详情
kubectl delete pods nginx-deplog-5476b95b8-tdsjs -n test  
kubectl  get service
kubectl  edit svc nginx  #编辑
kubectl  delete svc nginx
kubectl  scale --replicas=5 deployment myapp #扩容数量
kubectl  rollout undo deployment myapp #回滚到上一个版本
kubectl get nodes    #查看所有主从节点的状态
kubectl get ns    #获取所有namespace资源 
kubectl get pods -n {$nameSpace}  #获取指定namespace的pod 
kubectl describe pod的名称  -n {$nameSpace}   
kubectl create -f xxx.yml  #通过配置文件创建一个集群资源对象
kubectl delete -f xxx.yml  #通过配置文件删除一个集群资源对象
kubectl delete pod名称 -n {$nameSpace}   #通过pod删除集群资源 
kubectl exec podname -n test bash #进入pod
kubectl logs podname -n test  #查看日志
kubectl  exec -it ds-admin-5475c5bb75-xf7jr -c  php-fpm   bash   -n test1 #指定进入那个pod
kubectl  get pod jenkins-6f4bf9f868-xx9wr -n demon  -o yaml  #导出yaml
kubectl cp vendor android-cms-68b84fd69b-9vh9s:/app/ -c php-fpm  -n test #拷贝
kubectl port-forward es-cluster-0 9200:9200 --namespace=kube-logging  #端口转发
kubectl --kubeconfig /root/.kube/huiseconfig   get  pod  -n huise #指定config,查看集群 

如果报错添加下面,docker不兼容:

在这里插入图片描述

cat >/etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts":{
    "max-size": "100m"
}
}
EOF

先就写这么几条吧!
。。。。。。。。。。。。。。。。END。。。。。。。。。。。。。。。。。。。。

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐