6、docker+k8s+kubesphere:master安装网络calico

wget https://docs.projectcalico.org/v3.8/manifests/calico.yaml
vim calico.yaml
- name: FELIX_IPINIPMTU
     valueFrom:
      configMapKeyRef:
       name: calico-config
       key: veth_mtu
   # The default IPv4 pool to create on startup if none exists. Pod IPs will be
   # chosen from this range. Changing this value after installation will have
   # no effect. This should fall within `--cluster-cidr`.
            - name: CALICO_IPV4POOL_CIDR
              value: "192.168.0.0/16"  改这里  "10.20.0.1/16"  看第4、docker+k8s+kubesphere:master安装配置
            # Disable file logging so `kubectl logs` works.
            - name: CALICO_DISABLE_FILE_LOGGING
              value: "true"
            # Set Felix endpoint to host default action to ACCEPT.
            - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
              value: "ACCEPT"
            # Disable IPv6 on Kubernetes.
            - name: FELIX_IPV6SUPPORT
              value: "false"

安装calico.yaml

[root@node151 ~]# kubectl apply -f calico.yaml
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created

查看服务的状态

watch kubectl get pod -n kube-system -o wide
kubectl get node

结果如下

[root@node151 ~]# watch kubectl get pod -n kube-system -o wide 

Every 2.0s: kubectl get pod -n kube-system -o wide                                                                                                                                       Fri Jul 24 16:34:35 2020

NAME                                       READY   STATUS    RESTARTS   AGE     IP              NODE      NOMINATED NODE   READINESS GATES
calico-kube-controllers-589b5f594b-ckfgz   1/1     Running   0          2m11s   10.20.235.1     node153   <none>           <none>
calico-node-msd6f                          1/1     Running   0          2m11s   192.168.5.152   node152   <none>           <none>
calico-node-s9xf6                          1/1     Running   0          2m11s   192.168.5.151   node151   <none>           <none>
calico-node-wcztl                          1/1     Running   0          2m11s   192.168.5.153   node153   <none>           <none>
coredns-7f9c544f75-gmclr                   1/1     Running   0          117m    10.20.223.65    node151   <none>           <none>
coredns-7f9c544f75-t7jh6                   1/1     Running   0          117m    10.20.235.2     node153   <none>           <none>
etcd-node151                               1/1     Running   1          117m    192.168.5.151   node151   <none>           <none>
kube-apiserver-node151                     1/1     Running   1          117m    192.168.5.151   node151   <none>           <none>
kube-controller-manager-node151            1/1     Running   1          117m    192.168.5.151   node151   <none>           <none>
kube-proxy-5t7jg                           1/1     Running   1          117m    192.168.5.151   node151   <none>           <none>
kube-proxy-fqjh2                           1/1     Running   1          91m     192.168.5.152   node152   <none>           <none>
kube-proxy-mbxtx                           1/1     Running   1          90m     192.168.5.153   node153   <none>           <none>
kube-scheduler-node151                     1/1     Running   1          117m    192.168.5.151   node151   <none>           <none>


[root@node151 ~]# kubectl get node
NAME      STATUS   ROLES    AGE    VERSION
node151   Ready    master   117m   v1.17.5
node152   Ready    <none>   90m    v1.17.5
node153   Ready    <none>   89m    v1.17.5

此时整个k8s的集群完成

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐