Linux centos7.6服务器环境说明:

三台服务器ip和主机名分别为
192.168.48.130 k8s-master
192.168.48.131 k8s-node1
192.168.48.132 k8s-node2

一、初始化工作(master和所有node节点都要执行)

1、关闭seleniux、iptables、firewalld和NetworkManager

systemctl stop NetworkManager
systemctl disable NetworkManager

systemctl stop firewalld
systemctl disable firewalld

sed -i '/^SELINUX/s/enforcing/disabled/' /etc/selinux/config
setenforce 0

iptables -F

2、配置时间同步

yum -y install chronyd
systemctl start chronyd

3、关闭swap

sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
mount -a
swapoff -a

4、配置hosts解析

cat >> /etc/hosts << END
192.168.48.130 k8s-master
192.168.48.131 k8s-node1
192.168.48.132 k8s-node2
END

5、开启内核防火墙转发功能

# 加载内核模块br_netfilter(将桥接的IPv4流量传递到iptables的链),并调整参数
modprobe br_netfilter
lsmod | grep br_netfilter   #查看是否成功

# 修改内核参数
cat >> /etc/sysctl.conf << END
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness = 0
net.ipv4.ip_forward = 1
net.ipv4.conf.default.rp_filter=1
net.ipv4.conf.all.rp_filter=1
END

##重新加载内核配置文件
sysctl -p

6、安装docker

# 下载国内的镜像源
cd /etc/yum.repos.d/
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
yum makecache fast

# 下载并启动docker
yum list docker-ce --showduplicates | sort -r  # 查看所有的docker版本
yum -y install docker-ce-18.06.1.ce-3.el7   #安装低版本docker,高版本的docker和k8s会出现不兼容问题版本
systemctl start docker

7、配置镜像加速器

mkdir -p /etc/docker
tee /etc/docker/daemon.json <<-'EOF'
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "registry-mirrors": ["https://0s2uk8va.mirror.aliyuncs.com"]
}
EOF

systemctl daemon-reload
systemctl restart docker

8、安装kubelet kubeadm kubectl命令

# 添加kubernetes的repo文件
cat > /etc/yum.repos.d/kubernetes.repo  << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enable=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirros.aliyun.com/kubernetes/yum/doc/yum-key.ogg
https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.ogg
EOF

# 安装低版本的命令
yum install -y kubelet-1.18.0 kubeadm-1.18.0 kubectl-1.18.0

# 添加kubelet的配置文件
mkdir /var/lib/kubelet
cat > /var/lib/kubelet/config.yaml <<EOF
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
EOF
说明:如果直接启动kubelet会直接报 /var/lib/kubelet/config.yaml 文件找不到的错误

# 初始化证书和配置
mkdir /etc/kubernetes/pki/
cd /etc/kubernetes/pki/
kubeadm init phase certs all        #初始化证书

cd /etc/kubernetes/
kubeadm init phase kubeconfig all   #初始化文件


启动kubelet件
systemctl start kubelet
ps -ef | grep kubelet
systemctl enable kubelet

9、配置ipvs功能
在k8s中的service有2种代理模型,一种是基于iptables的,一种是基于ipvs的。
开启防火墙转发功能后,默认开启的是iptables代理模型,iptables是根据配置的代理规则从上往下一个个匹配,当规则很多的时候,效率非常低。

#安装ipvs相关命令
yum install ipset ipvsadm -y

# 配置ipvs调度算法模块
cat > /etc/sysconfig/modules/ipvs.modules << END
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
END

chmod +x /etc/sysconfig/modules/ipvs.modules
sh /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e nf_conntrack_ipv4

# 配置kubelet
cat > /etc/sysconfig/kuberlet << END
KUBELET_CGROUP_ARGS="--cgroup-driver=systemd"
KUBE_PROXY_MODE="ipvs
END

# 重新启动kubelet
systemctl stop kubelet
systemctl start kubelet
二、安装 kubernetes 集群

1、master节点下载kubeadmin初始化k8s集群的镜像

# 指定国内的镜像仓库查看所需要的镜像
kubeadm config images list --image-repository=registry.aliyuncs.com/google_containers

# 使用docker pull 命令下载上述命令查看到的镜像(也可以不用下载,后溪初始化的时候再下载)
docker pull registry.aliyuncs.com/google_containers/kube-apiserver:v1.18.0
docker pull registry.aliyuncs.com/google_containers/kube-controller-manager:v1.18.0
docker pull registry.aliyuncs.com/google_containers/kube-scheduler:v1.18.0
docker pull registry.aliyuncs.com/google_containers/kube-proxy:v1.18.0
docker pull registry.aliyuncs.com/google_containers/pause:3.2
docker pull registry.aliyuncs.com/google_containers/etcd:3.4.3-0
docker pull registry.aliyuncs.com/google_containers/coredns:1.6.7

2、在master上做初始化操作

#ignore-preflight-errors参数如果是物理机,内存比较大可以不指定此参数
#这个初始化是采用k8s默认的镜像仓库,因此需要先将上面的镜像pull下来
kubeadm init --apiserver-advertise-address="192.168.49.130" \
--kubernetes-version="v1.18.0" \
--pod-network-cidr=10.244.0.0/16 \
--service-cidr=10.96.0.0/12 \
--ignore-preflight-errors=Swap
这里一定要看一下版本号,因为 Kubeadm init 的时候 填写的版本号不能低于kuberenete版本

#采用国内的镜像仓库
kubeadm init --apiserver-advertise-address="192.168.49.130" \
--kubernetes-version="v1.18.0" \
--image-repository="registry.aliyuncs.com/google_containers" \
--pod-network-cidr=10.244.0.0/16 \
--service-cidr=10.96.0.0/12 \
--ignore-preflight-errors=all
这里一定要看一下版本号,因为 Kubeadm init 的时候 填写的版本号不能低于kuberenete版本

# # 执行初始化成功后的命令
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

3、在master上初始化flannel网络

# 网络的网段
flannel:10.244.0.0/16
calico:192.168.0.0/16

#初始化flannel网络
#git地址:https://github.com/coreos/flannel ,以下命令是在git上找到的
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

4、node上执行join操作

#加入集群的命令
kubeadm join 192.168.49.130:6443 --token lt38jo.07d7hntzhckepslx --discovery-token-ca-cert-hash sha256:318a4284bf1d944de5c86081e6b692e6bdfff5202456c98a6e7cf0c91e0d2b69 --ignore-preflight-errors=Swap

备注:在node做初始化操作时需要用的镜像有pause、kube-proxy、flannel
问题:
在node上做初始化配置有一个报错,内容如下

k8s The HTTP call equal to 'curl -sSL http://localhost:10255/healthz' failed with error

#这个是由于在kubelet的配置文件中没有设置忽略swap,因此需要在 /etc/sysconfig/kubelet 中添加下面内容即可
KUBERLET_EXTRA_ARGS="--fail-swap-on=false"

5、master上测试

kubectl create deployment nginx --image=nginx:1.14
kubectl expose deployment nginx --port=80 --type=NodePort
kubectl get pod,svc
kubectl get pods -n kube-system
四、安装Dashboard

官网地址:https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/
git地址:https://github.com/kubernetes/dashboard
1、下载 kubernetes-dashboard.yaml 文件

wget wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0/aio/deploy/recommended.yaml

2、修改 kubernetes-dashboard.yaml 文件并创建

vim recommended.yaml

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort    #增加
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30000        #这一行可以不用添加,这个是指定dashboard的端口。我下方的示例中没有指定端口,是系统自动分配的
  selector:
    k8s-app: kubernetes-dashboard

---
#因为自动生成的证书很多浏览器无法使用,所以我们自己创建,注释掉kubernetes-dashboard-certs对象声明
#apiVersion: v1
#kind: Secret
#metadata:
#  labels:
#    k8s-app: kubernetes-dashboard
#  name: kubernetes-dashboard-certs
#  namespace: kubernetes-dashboard
#type: Opaque

3、创建证书

mkdir dashboard-certs
cd dashboard-certs

# 创建命名空间
kubectl create namespace kubernetes-dashboard

#创建key文件
openssl genrsa -out dashboard.key 2048

#证书请求
openssl req -days 36000 -new -out dashboard.csr -key dashboard.key -subj '/CN=dashboard-cert'

#自签证书
openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt

#创建kubernetes-dashboard-certs对象
kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n  kubernetes-dashboard

4、安装dashboard

kubectl apply -f recommended.yaml

5、查看安装结果

[root@k8s-master ~]# kubectl get pods -A  -o wide
NAMESPACE              NAME                                         READY   STATUS    RESTARTS   AGE     IP             NODE         NOMINATED NODE   READINESS GATES
kube-flannel           kube-flannel-ds-8clh6                        1/1     Running   1          3h38m   192.168.3.24   k8s-node1    <none>           <none>
kube-flannel           kube-flannel-ds-fn84k                        1/1     Running   2          3h38m   192.168.3.25   k8s-node2    <none>           <none>
kube-flannel           kube-flannel-ds-x8bfr                        1/1     Running   1          3h38m   192.168.3.21   k8s-master   <none>           <none>
kube-system            coredns-7ff77c879f-86s9s                     1/1     Running   1          4h2m    10.244.0.3     k8s-master   <none>           <none>
kube-system            coredns-7ff77c879f-q5znz                     1/1     Running   1          4h2m    10.244.2.3     k8s-node1    <none>           <none>
kube-system            etcd-k8s-master                              1/1     Running   1          4h2m    192.168.3.21   k8s-master   <none>           <none>
kube-system            kube-apiserver-k8s-master                    1/1     Running   1          4h2m    192.168.3.21   k8s-master   <none>           <none>
kube-system            kube-controller-manager-k8s-master           1/1     Running   1          4h2m    192.168.3.21   k8s-master   <none>           <none>
kube-system            kube-proxy-fp29n                             1/1     Running   1          3h49m   192.168.3.24   k8s-node1    <none>           <none>
kube-system            kube-proxy-hbw9h                             1/1     Running   1          3h51m   192.168.3.25   k8s-node2    <none>           <none>
kube-system            kube-proxy-xc657                             1/1     Running   1          4h2m    192.168.3.21   k8s-master   <none>           <none>
kube-system            kube-scheduler-k8s-master                    1/1     Running   1          4h2m    192.168.3.21   k8s-master   <none>           <none>
kubernetes-dashboard   dashboard-metrics-scraper-6b4884c9d5-58vmr   1/1     Running   0          21m     10.244.2.7     k8s-node1    <none>           <none>
kubernetes-dashboard   kubernetes-dashboard-7b544877d5-7gc4w        1/1     Running   0          21m     10.244.2.6     k8s-node1    <none>           <none>
[root@k8s-master ~]# kubectl get svc  -n  kubernetes-dashboard  -o wide
NAME                        TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE   SELECTOR
dashboard-metrics-scraper   ClusterIP   10.100.156.177   <none>        8000/TCP        29m   k8s-app=dashboard-metrics-scraper
kubernetes-dashboard        NodePort    10.101.77.182    <none>        443:30000/TCP   29m   k8s-app=kubernetes-dashboard

6、创建dashboard管理员

vim dashboard-admin.yaml

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: dashboard-admin
  namespace: kubernetes-dashboard

# 创建管理员
kubectl apply -f dashboard-admin.yaml

dashboard-admin 是自己定义的登录用户名

7、给管理员分配权限

vim dashboard-admin-bind-cluster-role.yaml

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: dashboard-admin-bind-cluster-role
  labels:
    k8s-app: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
  - kind: ServiceAccount
    name: dashboard-admin
    namespace: kubernetes-dashboard

# 分配权限
kubectl apply -f dashboard-admin-bind-cluster-role.yaml

8、查看并赋值用户token

kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep dashboard-admin | awk '{print $1}')

9、访问
方式一:使用代理方式

kubectl proxy --address='192.168.49.131' --disable-filter=true
访问:
http://192.168.49.131:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/

在这里插入图片描述
在这里插入图片描述

方式二:使用集群node ip方式

https://nodeIp:31313//api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/

在这里插入图片描述

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐