k8s环境搭建
master:192.168.73.152node1 : 192.168.73.153node2 : 192.168.73.1551、关闭firewalld和selinux(所有主机)vi /etc/selinux/configsystemctl stop firewalldsystemctl disable firewalld2、配置解析/etc/hosts(所有主机)v...
master:192.168.73.152
node1 : 192.168.73.153
node2 : 192.168.73.155
1、关闭firewalld和selinux(所有主机)
vi /etc/selinux/config
systemctl stop firewalld
systemctl disable firewalld
2、配置解析/etc/hosts(所有主机)
vim /etc/hosts
192.168.73.100 kubernetes
192.168.73.101 kubernetes-node1
192.168.73.102 kubernetes-node2
192.168.73.152 kubernetes
192.168.73.153 kubernetes-node1
192.168.73.155 kubernetes-node2
3、添加内核参数文件 /etc/sysctl.d/k8s.conf(所有主机)
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness = 0
4、执行命令(所有主机)
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf
5、安装docker(所有主机)
yum install -y yum-utils device-mapper-persistent-data lvm2
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker.repo
查看版本
yum list docker-ce.x86_64 --showduplicates |sort -r
安装指定的docker(所有主机)
yum makecache fast
yum install docker-ce-18.06.1.ce-3.el7 -y
查看docker版本
docker -v
6、生成kubernetes的yum仓库配置文件/etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck=0
enabled=1
安装(所有主机)
yum makecache fast
yum install -y kubelet kubeadm kubectl
7、关闭swap(所有主机)
swapoff -a
sysctl -p /etc/sysctl.d/k8s.conf
注释掉/etc/fstab中的swap条目
mount -a
echo "KUBELET_EXTRA_ARGS=--fail-swap-on=false" > /etc/sysconfig/kubelet
8、 使用kubeadm init初始化集群,kubeadm version看kubernetes的版本(master上)
systemctl restart docker
systemctl enable kubelet.service docker
kubeadm version
kubeadm init --kubernetes-version=v1.13.3 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12 --ignore-preflight-errors=Swap
加速(所有主机)
curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://45203393.m.daocloud.io
systemctl daemon-reload
systemctl restart docker
相关的软件的版本按init初始化出来的版本更改(所有主机)
vim k8s.sh
docker pull mirrorgooglecontainers/kube-apiserver:v1.13.3
docker pull mirrorgooglecontainers/kube-controller-manager:v1.13.3
docker pull mirrorgooglecontainers/kube-scheduler:v1.13.3
docker pull mirrorgooglecontainers/kube-proxy:v1.13.3
docker pull mirrorgooglecontainers/pause:3.1
docker pull mirrorgooglecontainers/etcd:3.2.24
docker pull coredns/coredns:1.2.6
docker tag mirrorgooglecontainers/kube-proxy:v1.13.3 k8s.gcr.io/kube-proxy:v1.13.3
docker tag mirrorgooglecontainers/kube-scheduler:v1.13.3 k8s.gcr.io/kube-scheduler:v1.13.3
docker tag mirrorgooglecontainers/kube-apiserver:v1.13.3 k8s.gcr.io/kube-apiserver:v1.13.3
docker tag mirrorgooglecontainers/kube-controller-manager:v1.13.3 k8s.gcr.io/kube-controller-manager:v1.13.3
docker tag mirrorgooglecontainers/etcd:3.2.24 k8s.gcr.io/etcd:3.2.24
docker tag coredns/coredns:1.2.6 k8s.gcr.io/coredns:1.2.6
docker tag mirrorgooglecontainers/pause:3.1 k8s.gcr.io/pause:3.1
docker rmi mirrorgooglecontainers/kube-apiserver:v1.13.3
docker rmi mirrorgooglecontainers/kube-controller-manager:v1.13.3
docker rmi mirrorgooglecontainers/kube-scheduler:v1.13.3
docker rmi mirrorgooglecontainers/kube-proxy:v1.13.3
docker rmi mirrorgooglecontainers/pause:3.1
docker rmi mirrorgooglecontainers/etcd:3.2.24
docker rmi coredns/coredns:1.2.6
(删除,不操作)
docker rmi k8s.gcr.io/kube-proxy-amd64:v1.13.3
docker rmi k8s.gcr.io/kube-scheduler-amd64:v1.13.3
docker rmi k8s.gcr.io/kube-apiserver-amd64:v1.13.3
docker rmi k8s.gcr.io/kube-controller-manager-amd64:v1.13.3
docker rmi k8s.gcr.io/etcd-amd64:3.2.24
docker rmi k8s.gcr.io/pause:3.1
docker rmi k8s.gcr.io/coredns:1.2.6
chmod 755 k8s.sh
./k8s.sh
9、重新初始化,最后一行要保存(master上)
kubeadm init --kubernetes-version=v1.13.3 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12 --ignore-preflight-errors=Swap
kubeadm join 192.168.73.152:6443 --token l9jvft.bspa25s561skhcuq --discovery-token-ca-cert-hash sha256:b0a0ff52a16e55488876e2144801e4acc80bcdc948f3aab09924e9c18f6c8360
初始化kubectl(master上)
# mkdir ~/.kube
# cp /etc/kubernetes/admin.conf ~/.kube/
测试:(master上)
[root@kubernetes ~]# kubectl get cs
The connection to the server localhost:8080 was refused - did you specify the right host or port?
[root@kubernetes ~]# sudo cp /etc/kubernetes/admin.conf $HOME/
[root@kubernetes ~]# sudo chown $(id -u):$(id -g) $HOME/admin.conf
[root@kubernetes ~]# export KUBECONFIG=$HOME/admin.conf
[root@kubernetes ~]#
[root@kubernetes ~]#
[root@kubernetes ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-0 Healthy {"health": "true"}
[root@kubernetes ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kubernetes NotReady master 141m v1.13.3
[root@kubernetes ~]# kubectl create -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
podsecuritypolicy.extensions/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.extensions/kube-flannel-ds-amd64 created
daemonset.extensions/kube-flannel-ds-arm64 created
daemonset.extensions/kube-flannel-ds-arm created
daemonset.extensions/kube-flannel-ds-ppc64le created
daemonset.extensions/kube-flannel-ds-s390x created
[root@kubernetes ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
k8s.gcr.io/kube-controller-manager v1.13.3 0482f6400933 3 weeks ago 146MB
k8s.gcr.io/kube-proxy v1.13.3 98db19758ad4 3 weeks ago 80.3MB
k8s.gcr.io/kube-apiserver v1.13.3 fe242e556a99 3 weeks ago 181MB
k8s.gcr.io/kube-scheduler v1.13.3 3a6f709e97a0 3 weeks ago 79.6MB
quay.io/coreos/flannel v0.11.0-amd64 ff281650a721 4 weeks ago 52.6MB
k8s.gcr.io/coredns 1.2.6 f59dcacceff4 3 months ago 40MB
k8s.gcr.io/etcd 3.2.24 3cab8e1b9802 5 months ago 220MB
k8s.gcr.io/pause 3.1 da86e6ba6ca1 14 months ago 742kB
[root@kubernetes ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kubernetes Ready master 156m v1.13.3
[root@kubernetes ~]# kubectl get pods
No resources found.
[root@kubernetes ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-86c58d9df4-6fhf4 1/1 Running 0 157m
coredns-86c58d9df4-jvtt2 1/1 Running 0 157m
etcd-kubernetes 1/1 Running 1 156m
kube-apiserver-kubernetes 1/1 Running 1 156m
kube-controller-manager-kubernetes 1/1 Running 1 156m
kube-flannel-ds-amd64-wq669 1/1 Running 0 3m31s
kube-proxy-ncwfk 1/1 Running 1 157m
kube-scheduler-kubernetes
查看名称空间(master上)
[root@kubernetes ~]# kubectl get ns
NAME STATUS AGE
default Active 158m
kube-public Active 158m
kube-system Active 158m
[root@kubernetes ~]# scp /usr/lib/systemd/system/docker.service 192.168.73.101:/usr/lib/systemd/system/docker.service
root@192.168.73.101's password:
docker.service 100% 1140 1.2MB/s 00:00
[root@kubernetes ~]# scp /usr/lib/systemd/system/docker.service 192.168.73.102:/usr/lib/systemd/system/docker.service
root@192.168.73.102's password:
docker.service 100% 1140 1.6MB/s 00:00
[root@kubernetes ~]# scp /etc/sysconfig/k
kdump kernel kubelet
[root@kubernetes ~]# scp /etc/sysconfig/k
kdump kernel kubelet
[root@kubernetes ~]# scp /etc/sysconfig/kubelet 192.168.73.101:/etc/sysconfig/kubelet
root@192.168.73.101's password:
kubelet 100% 40 69.2KB/s 00:00
[root@kubernetes ~]# scp /etc/sysconfig/kubelet 192.168.73.102:/etc/sysconfig/kubelet
root@192.168.73.102's password:
kubelet 100% 40 28.6KB/s 00:00
节点加入master(节点上)
[root@kubernetes-node1 ~]# kubeadm join 192.168.73.152:6443 --token l9jvft.bspa25s561skhcuq --discovery-token-ca-cert-hash sha256:b0a0ff52a16e55488876e2144801e4acc80bcdc948f3aab09924e9c18f6c8360 --ignore-preflight-errors=Swap
[preflight] Running pre-flight checks
[discovery] Trying to connect to API Server "192.168.73.152:6443"
[discovery] Created cluster-info discovery client, requesting info from "https://192.168.73.152:6443"
[discovery] Requesting info from "https://192.168.73.152:6443" again to validate TLS against the pinned public key
[discovery] Cluster info signature and contents are valid and TLS certificate validates against pinned roots, will use API Server "192.168.73.152:6443"
[discovery] Successfully established connection with API Server "192.168.73.152:6443"
[join] Reading configuration from the cluster...
[join] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet] Downloading configuration for the kubelet from the "kubelet-config-1.13" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[tlsbootstrap] Waiting for the kubelet to perform the TLS Bootstrap...
[patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "kubernetes-node1" as an annotation
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the master to see this node join the cluster.
[root@kubernetes-node2 ~]# kubeadm join 192.168.73.152:6443 --token l9jvft.bspa25s561skhcuq --discovery-token-ca-cert-hash sha256:b0a0ff52a16e55488876e2144801e4acc80bcdc948f3aab09924e9c18f6c8360 --ignore-preflight-errors=Swap
[preflight] Running pre-flight checks
[discovery] Trying to connect to API Server "192.168.73.152:6443"
[discovery] Created cluster-info discovery client, requesting info from "https://192.168.73.152:6443"
[discovery] Requesting info from "https://192.168.73.152:6443" again to validate TLS against the pinned public key
[discovery] Cluster info signature and contents are valid and TLS certificate validates against pinned roots, will use API Server "192.168.73.152:6443"
[discovery] Successfully established connection with API Server "192.168.73.152:6443"
[join] Reading configuration from the cluster...
[join] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet] Downloading configuration for the kubelet from the "kubelet-config-1.13" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[tlsbootstrap] Waiting for the kubelet to perform the TLS Bootstrap...
[patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "kubernetes-node2" as an annotation
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the master to see this node join the cluster.
查看(master上)
[root@kubernetes ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kubernetes Ready master 167m v1.13.3
kubernetes-node1 NotReady <none> 108s v1.13.3
kubernetes-node2 NotReady <none> 32s v1.13.3
查看等待的进程(节点上的flannel)
[root@kubernetes ~]# kubectl get pods -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-86c58d9df4-6fhf4 1/1 Running 0 170m 10.244.0.2 kubernetes <none> <none>
coredns-86c58d9df4-jvtt2 1/1 Running 0 170m 10.244.0.3 kubernetes <none> <none>
etcd-kubernetes 1/1 Running 1 169m 192.168.73.100 kubernetes <none> <none>
kube-apiserver-kubernetes 1/1 Running 1 169m 192.168.73.100 kubernetes <none> <none>
kube-controller-manager-kubernetes 1/1 Running 1 169m 192.168.73.100 kubernetes <none> <none>
kube-flannel-ds-amd64-5t29c 1/1 Running 0 4m7s 192.168.73.102 kubernetes-node2 <none> <none>
kube-flannel-ds-amd64-lg6qg 1/1 Running 0 5m23s 192.168.73.101 kubernetes-node1 <none> <none>
kube-flannel-ds-amd64-wq669 1/1 Running 0 16m 192.168.73.100 kubernetes <none> <none>
kube-proxy-9wh5t 1/1 Running 0 5m23s 192.168.73.101 kubernetes-node1 <none> <none>
kube-proxy-ncwfk 1/1 Running 1 170m 192.168.73.100 kubernetes <none> <none>
kube-proxy-pb6gv 1/1 Running 0 4m7s 192.168.73.102 kubernetes-node2 <none> <none>
kube-scheduler-kubernetes 1/1 Running 1 169m 192.168.73.100 kubernetes <none> <none>
[root@kubernetes ~]#
查看节点状态
[root@kubernetes ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kubernetes Ready master 172m v1.13.3
kubernetes-node1 Ready <none> 7m8s v1.13.3
kubernetes-node2 Ready <none> 5m52s v1.13.3
初始化示例
[root@kubernetes ~]# kubeadm init --kubernetes-version=v1.13.3 --pod-network-cidr=10.244.0.0/16 servicecidr=10.96.0.0/12 --ignore-preflight-errors=Swap
[init] Using Kubernetes version: v1.13.3
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [kubernetes localhost] and IPs [192.168.73.152 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [kubernetes localhost] and IPs [192.168.73.152 127.0.0.1 ::1]
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.73.152]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 18.506297 seconds
[uploadconfig] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.13" in namespace kube-system with the configuration for the kubelets in the cluster
[patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "kubernetes" as an annotation
[mark-control-plane] Marking the node kubernetes as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node kubernetes as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: l9jvft.bspa25s561skhcuq
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstraptoken] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstraptoken] creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes master has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of machines by running the following on each node
as root:
kubeadm join 192.168.73.152:6443 --token l9jvft.bspa25s561skhcuq --discovery-token-ca-cert-hash sha256:b0a0ff52a16e55488876e2144801e4acc80bcdc948f3aab09924e9c18f6c8360
重新开启机子后会报错,设置全局变量就可以解决
[root@kubernetes ~]# kubectl get cs
The connection to the server localhost:8080 was refused - did you specify the right host or port?
[root@kubernetes ~]# sudo cp /etc/kubernetes/admin.conf $HOME/
[root@kubernetes ~]# sudo chown $(id -u):$(id -g) $HOME/admin.conf
[root@kubernetes ~]# export KUBECONFIG=$HOME/admin.conf
[root@kubernetes ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health": "true"}
[root@kubernetes ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kubernetes Ready master 29d v1.13.3
kubernetes-node1 Ready <none> 29d v1.13.3
kubernetes-node2 Ready <none> 29d v1.13.3
[root@kubernetes ~]# kubectl version
Client Version: version.Info{Major:"1", Minor:"13", GitVersion:"v1.13.3", GitCommit:"721bfa751924da8d1680787490c54b9179b1fed0", GitTreeState:"clean", BuildDate:"2019-02-01T20:08:12Z", GoVersion:"go1.11.5", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"13", GitVersion:"v1.13.3", GitCommit:"721bfa751924da8d1680787490c54b9179b1fed0", GitTreeState:"clean", BuildDate:"2019-02-01T20:00:57Z", GoVersion:"go1.11.5", Compiler:"gc", Platform:"linux/amd64"}
[root@kubernetes ~]#
[root@kubernetes ~]#
[root@kubernetes ~]# kubectl cluster-info
Kubernetes master is running at https://192.168.73.152:6443
KubeDNS is running at https://192.168.73.152:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
kube-flannel.yml文件
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-amd64
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- amd64
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.12.0-amd64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.12.0-amd64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-arm64
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- arm64
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.12.0-arm64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.12.0-arm64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-arm
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- arm
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.12.0-arm
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.12.0-arm
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-ppc64le
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- ppc64le
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.12.0-ppc64le
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.12.0-ppc64le
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds-s390x
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: kubernetes.io/arch
operator: In
values:
- s390x
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: quay.io/coreos/flannel:v0.12.0-s390x
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.12.0-s390x
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
更多推荐
所有评论(0)