Kubernetes(k8s)安装
Kubernetes(k8s)安装硬件要求:内存:2G +CPU:2核 +示例服务器:CentOS-7-x86_64-2009Master:k8s-master 192.168.100.127Node1:k8s-node1 192.168.100.128Node2:k8s-node2 192.168.100.129环境准备:#1> 重命名主机,修改/etc/hosts文件hostnamect
·
Kubernetes(k8s)安装
- 硬件要求:
- 内存:2G +
- CPU:2核 +
- 示例服务器:
CentOS-7-x86_64-2009
Master:k8s-master 192.168.100.127
Node1:k8s-node1 192.168.100.128
Node2:k8s-node2 192.168.100.129
- 环境准备:
#1> 重命名主机,修改/etc/hosts文件
hostnamectl set-hostname k8s-master
hostnamectl set-hostname k8s-node1
hostnamectl set-hostname k8s-node2
cat >> /etc/hosts << EOF
192.168.100.127 k8s-master
192.168.100.128 k8s-node1
192.168.100.129 k8s-node2
EOF
#2> 配置阿里源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#3>关闭防火墙、selinux、SWAP
systemctl stop firewalld
systemctl disable firewalld
setenforce 0 #临时关闭
#永久关闭
sed -i '/^SELINUX/ s/enforcing/disabled/' /etc/selinux/config
#查看
getenforce
Permissive
swapoff -a ; sed -i '/swap/d' /etc/fstab
- 安装Docker
#1> docker要求centos内核版本最低为3.10,centos7可以不升级直接安装
#2> yum update更新
#3> 安装软件包:
yum install -y yum-utils device-mapper-persistent-data lvm2
#4> 设置docker的yum源
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
#5>安装最新稳定版本
yum install -y docker-ce
#6> 加入开机启动
systemctl start docker && systemctl enable docker
#7> 验证docker版本
docker version
- 配置iptables
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
- 安装kubelet kubeadm kubectl
yum install -y kubelet kubeadm kubectl
systemctl restart kubelet
systemctl enable kubelet
- 初始化和配置master集群
#***这里遇到问题会比较多****
#1> 正常init
kubeadm init --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v版本号 --pod-network-cidr=10.244.0.0/16
#2> 如果上边的命令执行不成功,查看kubeadm config所需镜像 手动拉取后修改镜像tag
$ kubeadm config images list
k8s.gcr.io/kube-apiserver:v1.21.8
k8s.gcr.io/kube-controller-manager:v1.21.8
k8s.gcr.io/kube-scheduler:v1.21.8
k8s.gcr.io/kube-proxy:v1.21.8
k8s.gcr.io/pause:3.4.1
k8s.gcr.io/etcd:3.4.13-0
k8s.gcr.io/coredns/coredns:v1.8.0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.23.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.23.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.23.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.23.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.1-0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.6
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.23.1 k8s.gcr.io/kube-apiserver:v1.23.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.23.1 k8s.gcr.io/kube-controller-manager:v1.23.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.23.1 k8s.gcr.io/kube-scheduler:v1.23.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.23.1 k8s.gcr.io/kube-proxy:v1.23.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6 k8s.gcr.io/pause:3.6
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.1-0 k8s.gcr.io/etcd:3.5.1-0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.6 k8s.gcr.io/coredns/coredns:v1.8.6
#初始化
kubeadm init --kubernetes-version=v1.23.1 --apiserver-advertise-address=192.168.100.127 --pod-network-cidr=10.244.0.0/16
#初始化后会有提示信息,依次复制提示信息中的三条命令执行:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
- 查看master初始化结果,一些问题解决
#1> 查看所有命名空间下的pod
kubectl get pods --all-namespaces
#2> 查看状态不为Running的pod
kubectl describe pod coredns-64897985d-h4w96 --namespace=kube-system
# 查看系统日志
tail -f /var/log/messages
# 安装CNI网络插件,安装calico
curl https://docs.projectcalico.org/manifests/calico-etcd.yaml -o calico.yaml
# 查看calico.yaml中需要的镜像
grep image calico.yaml
image: docker.io/calico/cni:v3.19.1
image: docker.io/calico/cni:v3.19.1
image: docker.io/calico/pod2daemon-flexvol:v3.19.1
image: docker.io/calico/node:v3.19.1
image: docker.io/calico/kube-controllers:v3.19.1
#docker pull 拉取下来所需要镜像,并保存打包成tar文件传到两个work nodes上,在每台机器上导入回镜像(kubectl describe pod中发现缺少的镜像也可以这样scp到node节点):
docker save calico/cni calico/kube-controllers calico/node calico/pod2daemon-flexvol > calico-3.19-img.tar
scp calico-3.19-img.tar k8s-node1:~
scp calico-3.19-img.tar k8s-node2:~
docker load -i calico-3.19-img.tar
#修改calico.yaml
- name: CALICO_IPV4POOL_CIDR
value: "10.244.0.0/16"
# 在master上安装calico
kubectl apply -f calico.yaml
- node加入集群
#master中执行
kubeadm token create --print-join-command
#将输出内容复制到node上执行
#master中查看node命令如下:
kubectl get nodes
- 问题解决:
# ******关于etcd*********
# ETCD 地址
ETCD_ENDPOINTS="https://192.168.100.127:2379"
sed -i "s#.*etcd_endpoints:.*# etcd_endpoints: \"${ETCD_ENDPOINTS}\"#g" calico.yaml
sed -i "s#__ETCD_ENDPOINTS__#${ETCD_ENDPOINTS}#g" calico.yaml
# ETCD 证书信息
ETCD_CA=`cat /etc/kubernetes/pki/etcd/ca.crt | base64 | tr -d '\n'`
ETCD_CERT=`cat /etc/kubernetes/pki/etcd/server.crt | base64 | tr -d '\n'`
ETCD_KEY=`cat /etc/kubernetes/pki/etcd/server.key | base64 | tr -d '\n'`
# 替换修改
sed -i "s#.*etcd-ca:.*# etcd-ca: ${ETCD_CA}#g" calico.yaml
sed -i "s#.*etcd-cert:.*# etcd-cert: ${ETCD_CERT}#g" calico.yaml
sed -i "s#.*etcd-key:.*# etcd-key: ${ETCD_KEY}#g" calico.yaml
sed -i 's#.*etcd_ca:.*# etcd_ca: "/calico-secrets/etcd-ca"#g' calico.yaml
sed -i 's#.*etcd_cert:.*# etcd_cert: "/calico-secrets/etcd-cert"#g' calico.yaml
sed -i 's#.*etcd_key:.*# etcd_key: "/calico-secrets/etcd-key"#g' calico.yaml
sed -i "s#__ETCD_CA_CERT_FILE__#/etc/kubernetes/pki/etcd/ca.crt#g" calico.yaml
sed -i "s#__ETCD_CERT_FILE__#/etc/kubernetes/pki/etcd/server.crt#g" calico.yaml
sed -i "s#__ETCD_KEY_FILE__#/etc/kubernetes/pki/etcd/server.key#g" calico.yaml
sed -i "s#__KUBECONFIG_FILEPATH__#/etc/cni/net.d/calico-kubeconfig#g" calico.yaml
# 关于docker,启动引擎不一致
vim /etc/docker/daemon.json
--exec-opt native.cgroupdriver=cgroupfs
# 关于/etc/cni/没找到的
scp /etc/cni/net.d/* k8s-node1:/etc/cni/net.d/
scp /etc/cni/net.d/* k8s-node2:/etc/cni/net.d/
# 需要重新初始化的
rm -rf .kube/
systemctl restart docker
systemctl restart docker
systemctl restart kubelet
kubeadm reset
rm -rf /etc/cni/
# 需要删除节点的
kubectl delete node k8s-node2
- 安装dashboard
官方文档: https://kubernetes.io/zh/docs/tasks/access-application-cluster/web-ui-dashboard/
github使用文档:https://github.com/kubernetes/dashboard/tree/master/docs
#1> 下载yaml到本地
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.2.0/aio/deploy/recommended.yaml
# 下载失败话,可以从github上下载最新的
https://github.com/kubernetes/dashboard/tree/master/aio/deploy/recommended.yaml
#2> 修改recommended.yaml vim recommended.yaml
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort ##增加此行
ports:
- port: 443
targetPort: 8443
nodePort: 30870 ##增加此行
selector:
k8s-app: kubernetes-dashboard
#3> 创建 dashboard-adminuser.yaml dashboard-ClusterRoleBinding.yaml
vim dashboard-adminuser.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
vim dashboard-ClusterRoleBinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
#4> 创建pod
kubectl apply -f recommended.yaml;
kubectl apply -f dashboard-adminuser.yaml
kubectl apply -f dashboard-ClusterRoleBinding.yaml
#5> 获取登录token
kubectl -n kubernetes-dashboard get secret $(kubectl -n kubernetes-dashboard get sa/admin-user -o jsonpath="{.secrets[0].name}") -o go-template="{{.data.token | base64decode}}"
#6> ***********问题解决************
参考k8s初始化后pod问题的解决,可能需要手动拉取镜像、拷贝镜像到node节点
可能需要修改calico网络配置
- 后续有时间补充报错处理过程
更多推荐
已为社区贡献1条内容
所有评论(0)