准备工作

设置VMware的静态IP访问https://blog.csdn.net/Su_Levi_Wei/article/details/85958004

设置系统主机名以及 Host 文件的相互解析

hostnamectl set-hostname 主机名

 

安装依赖包

yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget vim net-tools git

 

设置防火墙为 Iptables 并设置空规则

systemctl stop firewalld && systemctl disable firewalld

 

yum -y install iptables-services && systemctl start iptables && systemctl enable iptables   && iptables -F && service iptables save

 

关闭 SELINUX

关闭虚拟内存Kubernetes(K8S)在启动时会检测虚拟内存是否开启,如果开启的话,Pod有可能在虚拟内存运行会大大降低这个运行效果)

swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

 

setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

 

调整内核参数,对于 K8S

cat > kubernetes.conf <<EOF

net.bridge.bridge-nf-call-iptables=1

net.bridge.bridge-nf-call-ip6tables=1

net.ipv4.ip_forward=1

net.ipv4.tcp_tw_recycle=0

vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它

vm.overcommit_memory=1 # 不检查物理内存是否够用

vm.panic_on_oom=0 # 开 启 OOM

fs.inotify.max_user_instances=8192

fs.inotify.max_user_watches=1048576

fs.file-max=52706963

fs.nr_open=52706963

net.ipv6.conf.all.disable_ipv6=1

net.netfilter.nf_conntrack_max=2310720

EOF

 

cp kubernetes.conf /etc/sysctl.d/kubernetes.conf

 

sysctl -p /etc/sysctl.d/kubernetes.conf

 

调整系统时区

# 设置系统时区为 中国/上海

timedatectl set-timezone Asia/Shanghai

 

# 将当前的 UTC 时间写入硬件时钟

timedatectl set-local-rtc 0

 

# 重启依赖于系统时间的服务

systemctl restart rsyslog

systemctl restart crond

 

关闭系统不需要服务

systemctl stop postfix && systemctl disable postfix

 

设置 rsyslogd 和 systemd journald

mkdir /var/log/journal # 持久化保存日志的目录

mkdir /etc/systemd/journald.conf.d

 

cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF

[Journal]

# 持久化保存到磁盘

Storage=persistent

 

# 压缩历史日志

Compress=yes

SyncIntervalSec=5m

RateLimitInterval=30s

RateLimitBurst=1000

 

# 最大占用空间 10G

SystemMaxUse=10G

 

# 单日志文件最大 200M

SystemMaxFileSize=200M

 

# 日志保存时间 2 周

MaxRetentionSec=2week

 

# 不将日志转发到 syslog

ForwardToSyslog=no

EOF

 

systemctl restart systemd-journald

 

升级系统内核为 4.44

CentOS 7.x系统自带的 3.10.x 内核存在一些 Bugs,导致运行的 Docker、Kubernetes 不稳定,例如:rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm

rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm

 

# 安装完成后检查 /boot/grub2/grub.cfg 中对应内核 menuentry 中是否包含 initrd16 配置,如果没有,再安装一次!

yum --enablerepo=elrepo-kernel install -y kernel-lt

 

# 设置开机从新内核启动

grub2-set-default 'CentOS Linux (4.4.189-1.el7.elrepo.x86_64) 7 (Core)'

 

# 重启后安装内核源文件

yum --enablerepo=elrepo-kernel install kernel-lt-devel-$(uname -r) kernel-lt-headers-$(uname -r)

 

安装Docker

yum install -y yum-utils device-mapper-persistent-data lvm2

yum-config-manager \

--add-repo \

http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

 

yum update -y && yum install -y docker-ce

 

## 创建 /etc/docker 目录

mkdir /etc/docker

 

# 配置 daemon.

cat > /etc/docker/daemon.json <<EOF

{

"exec-opts": ["native.cgroupdriver=systemd"],

"log-driver": "json-file",

"log-opts": {

"max-size": "100m"

},

"registry-mirrors": ["阿里云镜像加速地址"]

}

EOF

 

mkdir -p /etc/systemd/system/docker.service.d

 

# 重启docker服务

systemctl daemon-reload && systemctl restart docker && systemctl enable docker

 

拉取Kubernetes镜像

由于国内网络防火墙问题导致无法正常拉取,docker.io仓库对google的容器做了镜像。

#根据实际需要,修改版本号

docker pull mirrorgooglecontainers/kube-apiserver-amd64:v1.15.1

docker pull mirrorgooglecontainers/kube-controller-manager-amd64:v1.15.1

docker pull mirrorgooglecontainers/kube-scheduler-amd64:v1.15.1

docker pull mirrorgooglecontainers/kube-proxy-amd64:v1.15.1

docker pull mirrorgooglecontainers/pause:3.1

docker pull mirrorgooglecontainers/etcd-amd64:3.3.10

docker pull coredns/coredns:1.3.1

docker pull quay-mirror.qiniu.com/coreos/flannel:v0.11.0-amd64

 

#修改镜像标签

docker tag docker.io/mirrorgooglecontainers/kube-proxy-amd64:v1.15.1 k8s.gcr.io/kube-proxy:v1.15.1

docker tag docker.io/mirrorgooglecontainers/kube-scheduler-amd64:v1.15.1 k8s.gcr.io/kube-scheduler:v1.15.1

docker tag docker.io/mirrorgooglecontainers/kube-apiserver-amd64:v1.15.1 k8s.gcr.io/kube-apiserver:v1.15.1

docker tag docker.io/mirrorgooglecontainers/kube-controller-manager-amd64:v1.15.1 k8s.gcr.io/kube-controller-manager:v1.15.1

docker tag docker.io/mirrorgooglecontainers/etcd-amd64:3.3.10  k8s.gcr.io/etcd:3.3.10

docker tag docker.io/mirrorgooglecontainers/pause:3.1  k8s.gcr.io/pause:3.1

docker tag docker.io/coredns/coredns:1.3.1  k8s.gcr.io/coredns:1.3.1

docker tag quay-mirror.qiniu.com/coreos/flannel:v0.11.0-amd64 quay.io/coreos/flannel:v0.11.0-amd64

 

#查看镜像

docker images

 

安装Kubernetes(K8S)

kube-proxy开启ipvs的前置条件

modprobe br_netfilter

cat > /etc/sysconfig/modules/ipvs.modules <<EOF

#!/bin/bash

modprobe -- ip_vs

modprobe -- ip_vs_rr

modprobe -- ip_vs_wrr

modprobe -- ip_vs_sh

modprobe -- nf_conntrack_ipv4

EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules &&

lsmod | grep -e ip_vs -e nf_conntrack_ipv4

 

安装 Kubeadm (主从配置)

cat <<EOF > /etc/yum.repos.d/kubernetes.repo

[kubernetes]

name=Kubernetes

baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64

enabled=1

gpgcheck=0

repo_gpgcheck=0

gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg

http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

EOF

 

yum -y install kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1

 

systemctl enable kubelet.service

 

查看节点

kubectl get node

 

初始化主节点

#打印k8s默认配置到配置文件中

kubeadm config print init-defaults > kubeadm-config.yaml

 

vim kubeadm-config.yaml
〓〓〓〓〓〓〓〓〓〓〓〓〓〓〓〓〓〓〓〓〓〓〓〓

localAPIEndpoint:

advertiseAddress: 192.168.70.110   #修改

kubernetesVersion: v1.15.1  #修改

networking:

podSubnet: "10.244.0.0/16"   #新增

serviceSubnet: 10.96.0.0/12

--- #新增

apiVersion: kubeproxy.config.k8s.io/v1alpha1 #新增

kind: KubeProxyConfiguration #新增

featureGates: #新增

SupportIPVSProxyMode: true #新增

mode: ipvs #新增
〓〓〓〓〓〓〓〓〓〓〓〓〓〓〓〓〓〓〓〓〓〓〓〓

 

#kubuadm在1.1.3版后才高可用化,--experimental-upload-certs命令在高可用下才有意义,即让其他主节点自动颁发证书

kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log

 

加入主节点以及其余工作节点

#执行安装日志中的加入命令即可,关键词kubeadm join

cat kubeadm-init.log

 

把子节点加入到集群中(后续新的节点)

#============ 主节点操作 Start

 

#获取到token,过期重新生成kubeadm token create

kubeadm token list

 

#获取到证书的key

openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'

 

#============ 主节点操作 End

 

#============ 子节点操作 Start

 

kubeadm join MasterIP:6443 --token <TOKEN> --discovery-token-ca-cert-hash sha256:<证书Key>

 

#============ 子节点操作 End        

 

#主节点查看

kubectl get node

 

#主节点查看子节点网络初始化的详细信息

kubectl get pod -n kube-system -o wide

kubectl get pod -n kube-system -w

 

#如果节点出现失败,则在子节点执行

kubeadm reset

 

部署网络(Master节点操作)

#如果下载不下来就到复制下面的kube-flannel.yml源码(或当前页面搜索)

wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

 

#安装flannel,仅在master节点上操作

kubectl create -f kube-flannel.yml

 

#可能启动会有点慢

kubectl get pod -n kube-system

 

#可以看到flannel的网络

ifconfig

 

kubectl get node

 

建议把相关文件整理到一个文件夹内。

mkdir /usr/local/kubernetes

mkdir /usr/local/kubernetes/core

mkdir /usr/local/kubernetes/plugin

mkdir /usr/local/kubernetes/logs

mv kube-flannel.yml /usr/local/kubernetes/plugin/flannel

mv kubeadm-config.yaml kubernetes.conf /usr/local/kubernetes/core

mv kubeadm-init.log /usr/local/kubernetes/logs

mv anaconda-ks.cfg initial-setup-ks.cfg original-ks.cfg /usr/local/kubernetes/core

 

安装Harbor

前提

Python是2.7或更高版本

Docker引擎应为1.10或更高版本

Docker Compose需要为1.6.0

到官网下载Harbor:https://github.com/vmware/harbor/releases

 

实施安装

按照安装Docker的步骤先安装Docker

 

安装docker-compose

docker-compose:curl -Lhttps://github.com/docker/compose/releases/download/1.9.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose

 

解压Harbor压缩包

 

配置harbor.cfg

hostname: 主机名或完全限定域名https://levi.harbor.com/

ui_url_protocol默认为http,可选http或https(要配置证书就用https)

db_password用于db_auth的MySQL数据库的根密码。(非生产或需要否则不改)

max_job_workers(默认值为3)作业服务中的复制工作人员的最大数量。对于每个映像复制作业,工作人员将存储库的所有标签同步到远程目标。增加此数字允许系统中更多的并发复制作业。但是,由于每个工作人员都会消耗一定数量的网络/ CPU / IO资源,请根据主机的硬件资源,仔细选择该属性的值(非生产或需要否则不改)

customize_crt(on或off。默认为on)当此属性打开时,prepare脚本将为注册表的令牌的生成/验证创建私钥和根证书

ssl_certSSL证书的路径,仅当协议设置为https时才应用(证书的路径)

ssl_cert_keySSL密钥的路径,仅当协议设置为https时才应用(证书的路径)

secretkey_path用于在复制策略中加密或解密远程注册表的密码的密钥路径(证书的路径)

 

生成证书

mkdir /data/cert

 

cd /data/cert

 

openssl genrsa -des3 -out server.key 2048

 

openssl req -new -key server.key -out server.csr

 

cp server.key server.key.org

 

openssl rsa -in server.key.org -out server.key

 

openssl x509 -req -days 365 -in server.csr -signkey server.key -out server.crt

 

chmod -R 777 /data/cert

 

在k8s的其他机器及windows端上添加hosts及docker信任访问配置

#配置hosts

echo "192.168.70.113 levi.harbor.com" >> /etc/hosts

 

#新增docker信任访问配置

{

"insecure-registries": ["https://levi.harbor.com/"]

}

 

运行脚本进行安装

#进入harbor目录

./install.sh

 

浏览器访问https://levi.harbor.com/

 

默认密码admin / Harbor12345

 

测试Harbor

#登录,输入密码

docker login levi.harbor.com

docker pull nginx:1.9.1

docker tag nginx:1.9.1 levi.harbor.com/library/nginx:1.9.1

docker push levi.harbor.com/library/nginx:1.9.1

 

Harbor结合Kubernetes进行测试

kubectl run nginx-deployment --image=levi.harbor.com/library/nginx:1.9.1  --port=80 --replicas=1

 

kubectl get deployment

 

kubectl get rs

 

kubectl get pod

 

#查看详细信息时,可以查看到在哪个子节点运行

kubectl get pod -o wide

 

#根据上一条命令查找出来的IP访问

curl 10.244.1.2

 

#子节点查看

docker ps -a | grep nginx

 

#删除pod,会发现pod还是会有,只不过名称不同了,因为在启动时设置了副本数(replicas)为1,因为要保持副本数为1,因此又会启动一个。

kubectl get pod

kubectl delete <podName>

 

#扩容副本

kubectl scale --replicas=3 deployment/nginx-deployment

 

kubectl get pod

 

kubectl get deployment

 

#设置副本访问的地址

kubectl expose deployment nginx-deployment --port=8080(机器对外的端口) --target-port=80(容器内的目标端口)

 

#查看为3个Pod容器设置的主访问网络

kubectl get svc

 

#根据上一条命令进行访问

curl 10.109.159.229:8080

 

#外部访问

#会发现是ClusterIP

kubectl get svc

 

#修改外部可访问,将ClusterIP改为NodePort

kubectl edit svc nginx-deployment

 

#查看映射对外的端口

kubectl get svc

NAME        TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)          AGE

nginx-deployment   NodePort  10.109.159.229  <none>  8080:32506/TCP   101s

 

#查看端口

netstat -anpt | grep : 32506

 

#浏览器访问

http://服务器IP:32506/

 

kube-flannel.yml源码

---

apiVersion: policy/v1beta1

kind: PodSecurityPolicy

metadata:

  name: psp.flannel.unprivileged

  annotations:

    seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default

    seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default

    apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default

    apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default

spec:

  privileged: false

  volumes:

    - configMap

    - secret

    - emptyDir

    - hostPath

  allowedHostPaths:

    - pathPrefix: "/etc/cni/net.d"

    - pathPrefix: "/etc/kube-flannel"

    - pathPrefix: "/run/flannel"

  readOnlyRootFilesystem: false

  # Users and groups

  runAsUser:

    rule: RunAsAny

  supplementalGroups:

    rule: RunAsAny

  fsGroup:

    rule: RunAsAny

  # Privilege Escalation

  allowPrivilegeEscalation: false

  defaultAllowPrivilegeEscalation: false

  # Capabilities

  allowedCapabilities: ['NET_ADMIN']

  defaultAddCapabilities: []

  requiredDropCapabilities: []

  # Host namespaces

  hostPID: false

  hostIPC: false

  hostNetwork: true

  hostPorts:

  - min: 0

    max: 65535

  # SELinux

  seLinux:

    # SELinux is unused in CaaSP

    rule: 'RunAsAny'

---

kind: ClusterRole

apiVersion: rbac.authorization.k8s.io/v1beta1

metadata:

  name: flannel

rules:

  - apiGroups: ['extensions']

    resources: ['podsecuritypolicies']

    verbs: ['use']

    resourceNames: ['psp.flannel.unprivileged']

  - apiGroups:

      - ""

    resources:

      - pods

    verbs:

      - get

  - apiGroups:

      - ""

    resources:

      - nodes

    verbs:

      - list

      - watch

  - apiGroups:

      - ""

    resources:

      - nodes/status

    verbs:

      - patch

---

kind: ClusterRoleBinding

apiVersion: rbac.authorization.k8s.io/v1beta1

metadata:

  name: flannel

roleRef:

  apiGroup: rbac.authorization.k8s.io

  kind: ClusterRole

  name: flannel

subjects:

- kind: ServiceAccount

  name: flannel

  namespace: kube-system

---

apiVersion: v1

kind: ServiceAccount

metadata:

  name: flannel

  namespace: kube-system

---

kind: ConfigMap

apiVersion: v1

metadata:

  name: kube-flannel-cfg

  namespace: kube-system

  labels:

    tier: node

    app: flannel

data:

  cni-conf.json: |

    {

      "name": "cbr0",

      "cniVersion": "0.3.1",

      "plugins": [

        {

          "type": "flannel",

          "delegate": {

            "hairpinMode": true,

            "isDefaultGateway": true

          }

        },

        {

          "type": "portmap",

          "capabilities": {

            "portMappings": true

          }

        }

      ]

    }

  net-conf.json: |

    {

      "Network": "10.244.0.0/16",

      "Backend": {

        "Type": "vxlan"

      }

    }

---

apiVersion: apps/v1

kind: DaemonSet

metadata:

  name: kube-flannel-ds-amd64

  namespace: kube-system

  labels:

    tier: node

    app: flannel

spec:

  selector:

    matchLabels:

      app: flannel

  template:

    metadata:

      labels:

        tier: node

        app: flannel

    spec:

      affinity:

        nodeAffinity:

          requiredDuringSchedulingIgnoredDuringExecution:

            nodeSelectorTerms:

              - matchExpressions:

                  - key: beta.kubernetes.io/os

                    operator: In

                    values:

                      - linux

                  - key: beta.kubernetes.io/arch

                    operator: In

                    values:

                      - amd64

      hostNetwork: true

      tolerations:

      - operator: Exists

        effect: NoSchedule

      serviceAccountName: flannel

      initContainers:

      - name: install-cni

        image: quay.io/coreos/flannel:v0.11.0-amd64

        command:

        - cp

        args:

        - -f

        - /etc/kube-flannel/cni-conf.json

        - /etc/cni/net.d/10-flannel.conflist

        volumeMounts:

        - name: cni

          mountPath: /etc/cni/net.d

        - name: flannel-cfg

          mountPath: /etc/kube-flannel/

      containers:

      - name: kube-flannel

        image: quay.io/coreos/flannel:v0.11.0-amd64

        command:

        - /opt/bin/flanneld

        args:

        - --ip-masq

        - --kube-subnet-mgr

        resources:

          requests:

            cpu: "100m"

            memory: "50Mi"

          limits:

            cpu: "100m"

            memory: "50Mi"

        securityContext:

          privileged: false

          capabilities:

            add: ["NET_ADMIN"]

        env:

        - name: POD_NAME

          valueFrom:

            fieldRef:

              fieldPath: metadata.name

        - name: POD_NAMESPACE

          valueFrom:

            fieldRef:

              fieldPath: metadata.namespace

        volumeMounts:

        - name: run

          mountPath: /run/flannel

        - name: flannel-cfg

          mountPath: /etc/kube-flannel/

      volumes:

        - name: run

          hostPath:

            path: /run/flannel

        - name: cni

          hostPath:

            path: /etc/cni/net.d

        - name: flannel-cfg

          configMap:

            name: kube-flannel-cfg

---

apiVersion: apps/v1

kind: DaemonSet

metadata:

  name: kube-flannel-ds-arm64

  namespace: kube-system

  labels:

    tier: node

    app: flannel

spec:

  selector:

    matchLabels:

      app: flannel

  template:

    metadata:

      labels:

        tier: node

        app: flannel

    spec:

      affinity:

        nodeAffinity:

          requiredDuringSchedulingIgnoredDuringExecution:

            nodeSelectorTerms:

              - matchExpressions:

                  - key: beta.kubernetes.io/os

                    operator: In

                    values:

                      - linux

                  - key: beta.kubernetes.io/arch

                    operator: In

                    values:

                      - arm64

      hostNetwork: true

      tolerations:

      - operator: Exists

        effect: NoSchedule

      serviceAccountName: flannel

      initContainers:

      - name: install-cni

        image: quay.io/coreos/flannel:v0.11.0-arm64

        command:

        - cp

        args:

        - -f

        - /etc/kube-flannel/cni-conf.json

        - /etc/cni/net.d/10-flannel.conflist

        volumeMounts:

        - name: cni

          mountPath: /etc/cni/net.d

        - name: flannel-cfg

          mountPath: /etc/kube-flannel/

      containers:

      - name: kube-flannel

        image: quay.io/coreos/flannel:v0.11.0-arm64

        command:

        - /opt/bin/flanneld

        args:

        - --ip-masq

        - --kube-subnet-mgr

        resources:

          requests:

            cpu: "100m"

            memory: "50Mi"

          limits:

            cpu: "100m"

            memory: "50Mi"

        securityContext:

          privileged: false

          capabilities:

             add: ["NET_ADMIN"]

        env:

        - name: POD_NAME

          valueFrom:

            fieldRef:

              fieldPath: metadata.name

        - name: POD_NAMESPACE

          valueFrom:

            fieldRef:

              fieldPath: metadata.namespace

        volumeMounts:

        - name: run

          mountPath: /run/flannel

        - name: flannel-cfg

          mountPath: /etc/kube-flannel/

      volumes:

        - name: run

          hostPath:

            path: /run/flannel

        - name: cni

          hostPath:

            path: /etc/cni/net.d

        - name: flannel-cfg

          configMap:

            name: kube-flannel-cfg

---

apiVersion: apps/v1

kind: DaemonSet

metadata:

  name: kube-flannel-ds-arm

  namespace: kube-system

  labels:

    tier: node

    app: flannel

spec:

  selector:

    matchLabels:

      app: flannel

  template:

    metadata:

      labels:

        tier: node

        app: flannel

    spec:

      affinity:

        nodeAffinity:

          requiredDuringSchedulingIgnoredDuringExecution:

            nodeSelectorTerms:

              - matchExpressions:

                  - key: beta.kubernetes.io/os

                    operator: In

                    values:

                      - linux

                  - key: beta.kubernetes.io/arch

                    operator: In

                    values:

                      - arm

      hostNetwork: true

      tolerations:

      - operator: Exists

        effect: NoSchedule

      serviceAccountName: flannel

      initContainers:

      - name: install-cni

        image: quay.io/coreos/flannel:v0.11.0-arm

        command:

        - cp

        args:

        - -f

        - /etc/kube-flannel/cni-conf.json

        - /etc/cni/net.d/10-flannel.conflist

        volumeMounts:

        - name: cni

          mountPath: /etc/cni/net.d

        - name: flannel-cfg

          mountPath: /etc/kube-flannel/

      containers:

      - name: kube-flannel

        image: quay.io/coreos/flannel:v0.11.0-arm

        command:

        - /opt/bin/flanneld

        args:

        - --ip-masq

        - --kube-subnet-mgr

        resources:

          requests:

            cpu: "100m"

            memory: "50Mi"

          limits:

            cpu: "100m"

            memory: "50Mi"

        securityContext:

          privileged: false

          capabilities:

             add: ["NET_ADMIN"]

        env:

        - name: POD_NAME

          valueFrom:

            fieldRef:

              fieldPath: metadata.name

        - name: POD_NAMESPACE

          valueFrom:

            fieldRef:

              fieldPath: metadata.namespace

        volumeMounts:

        - name: run

          mountPath: /run/flannel

        - name: flannel-cfg

          mountPath: /etc/kube-flannel/

      volumes:

        - name: run

          hostPath:

            path: /run/flannel

        - name: cni

          hostPath:

            path: /etc/cni/net.d

        - name: flannel-cfg

          configMap:

            name: kube-flannel-cfg

---

apiVersion: apps/v1

kind: DaemonSet

metadata:

  name: kube-flannel-ds-ppc64le

  namespace: kube-system

  labels:

    tier: node

    app: flannel

spec:

  selector:

    matchLabels:

      app: flannel

  template:

    metadata:

      labels:

        tier: node

        app: flannel

    spec:

      affinity:

        nodeAffinity:

          requiredDuringSchedulingIgnoredDuringExecution:

            nodeSelectorTerms:

              - matchExpressions:

                  - key: beta.kubernetes.io/os

                    operator: In

                    values:

                      - linux

                  - key: beta.kubernetes.io/arch

                    operator: In

                    values:

                      - ppc64le

      hostNetwork: true

      tolerations:

      - operator: Exists

        effect: NoSchedule

      serviceAccountName: flannel

      initContainers:

      - name: install-cni

        image: quay.io/coreos/flannel:v0.11.0-ppc64le

        command:

        - cp

        args:

        - -f

        - /etc/kube-flannel/cni-conf.json

        - /etc/cni/net.d/10-flannel.conflist

        volumeMounts:

        - name: cni

          mountPath: /etc/cni/net.d

        - name: flannel-cfg

          mountPath: /etc/kube-flannel/

      containers:

      - name: kube-flannel

        image: quay.io/coreos/flannel:v0.11.0-ppc64le

        command:

        - /opt/bin/flanneld

        args:

        - --ip-masq

        - --kube-subnet-mgr

        resources:

          requests:

            cpu: "100m"

            memory: "50Mi"

          limits:

            cpu: "100m"

            memory: "50Mi"

        securityContext:

          privileged: false

          capabilities:

             add: ["NET_ADMIN"]

        env:

        - name: POD_NAME

          valueFrom:

            fieldRef:

              fieldPath: metadata.name

        - name: POD_NAMESPACE

          valueFrom:

            fieldRef:

              fieldPath: metadata.namespace

        volumeMounts:

        - name: run

          mountPath: /run/flannel

        - name: flannel-cfg

          mountPath: /etc/kube-flannel/

      volumes:

        - name: run

          hostPath:

            path: /run/flannel

        - name: cni

          hostPath:

            path: /etc/cni/net.d

        - name: flannel-cfg

          configMap:

            name: kube-flannel-cfg

---

apiVersion: apps/v1

kind: DaemonSet

metadata:

  name: kube-flannel-ds-s390x

  namespace: kube-system

  labels:

    tier: node

    app: flannel

spec:

  selector:

    matchLabels:

      app: flannel

  template:

    metadata:

      labels:

        tier: node

        app: flannel

    spec:

      affinity:

        nodeAffinity:

          requiredDuringSchedulingIgnoredDuringExecution:

            nodeSelectorTerms:

              - matchExpressions:

                  - key: beta.kubernetes.io/os

                    operator: In

                    values:

                      - linux

                  - key: beta.kubernetes.io/arch

                    operator: In

                    values:

                      - s390x

      hostNetwork: true

      tolerations:

      - operator: Exists

        effect: NoSchedule

      serviceAccountName: flannel

      initContainers:

      - name: install-cni

        image: quay.io/coreos/flannel:v0.11.0-s390x

        command:

        - cp

        args:

        - -f

        - /etc/kube-flannel/cni-conf.json

        - /etc/cni/net.d/10-flannel.conflist

        volumeMounts:

        - name: cni

          mountPath: /etc/cni/net.d

        - name: flannel-cfg

          mountPath: /etc/kube-flannel/

      containers:

      - name: kube-flannel

        image: quay.io/coreos/flannel:v0.11.0-s390x

        command:

        - /opt/bin/flanneld

        args:

        - --ip-masq

        - --kube-subnet-mgr

        resources:

          requests:

            cpu: "100m"

            memory: "50Mi"

          limits:

            cpu: "100m"

            memory: "50Mi"

        securityContext:

          privileged: false

          capabilities:

             add: ["NET_ADMIN"]

        env:

        - name: POD_NAME

          valueFrom:

            fieldRef:

              fieldPath: metadata.name

        - name: POD_NAMESPACE

          valueFrom:

            fieldRef:

              fieldPath: metadata.namespace

        volumeMounts:

        - name: run

          mountPath: /run/flannel

        - name: flannel-cfg

          mountPath: /etc/kube-flannel/

      volumes:

        - name: run

          hostPath:

            path: /run/flannel

        - name: cni

          hostPath:

            path: /etc/cni/net.d

        - name: flannel-cfg

          configMap:

            name: kube-flannel-cfg

 

 

 

 

 

 

 

 

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐