kubernetes v1.19.1
[root@node-1 ~]# kubeadm config images listk8s.gcr.io/kube-apiserver:v1.19.1k8s.gcr.io/kube-controller-manager:v1.19.1k8s.gcr.io/kube-scheduler:v1.19.1k8s.gcr.io/kube-proxy:v1.19.1k8s.gcr.io/pause:3.2
[root@node-1 ~]# kubeadm config images list
k8s.gcr.io/kube-apiserver:v1.19.1
k8s.gcr.io/kube-controller-manager:v1.19.1
k8s.gcr.io/kube-scheduler:v1.19.1
k8s.gcr.io/kube-proxy:v1.19.1
k8s.gcr.io/pause:3.2
k8s.gcr.io/etcd:3.4.13-0
k8s.gcr.io/coredns:1.7.0
kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.19.1
docker tag registry.aliyuncs.com/google_containers/kube-apiserver:v1.19.1 k8s.gcr.io/kube-apiserver:v1.19.1
docker tag registry.aliyuncs.com/google_containers/kube-controller-manager:v1.19.1 k8s.gcr.io/kube-controller-manager:v1.19.1
docker tag registry.aliyuncs.com/google_containers/kube-scheduler:v1.19.1 k8s.gcr.io/kube-scheduler:v1.19.1
docker tag registry.aliyuncs.com/google_containers/kube-proxy:v1.19.1 k8s.gcr.io/kube-proxy:v1.19.1
docker tag registry.aliyuncs.com/google_containers/pause:3.2 k8s.gcr.io/pause:3.2
docker tag registry.aliyuncs.com/google_containers/etcd:3.4.13-0 k8s.gcr.io/etcd:3.4.13-0
docker tag registry.aliyuncs.com/google_containers/coredns:1.7.0 k8s.gcr.io/coredns:1.7.0
docker rmi registry.aliyuncs.com/google_containers/kube-proxy:v1.19.1
docker rmi registry.aliyuncs.com/google_containers/kube-apiserver:v1.19.1
docker rmi registry.aliyuncs.com/google_containers/kube-scheduler:v1.19.1
docker rmi registry.aliyuncs.com/google_containers/kube-controller-manager:v1.19.1
docker rmi registry.aliyuncs.com/google_containers/etcd:3.4.13-0
docker rmi registry.aliyuncs.com/google_containers/coredns:1.7.0
docker rmi registry.aliyuncs.com/google_containers/pause:3.2
flannel
https://gitee.com/zhangjinyu/flannel/tree/master/Documentation/kube-flannel.yml
dashboard
https://codeload.github.com/kubernetes/dashboard/tar.gz/v2.0.4/dashboard-2.0.4.tar.gz
https://gitee.com/yeaheo/kubernetes-dashboard.git
https://github.com/kubernetes-sigs/metrics-server
安装部署 prometheus
git clone https://github.com/coreos/kube-prometheus.git
quay.io/prometheus/alertmanager:v0.21.0
grafana/grafana:7.1.0
quay.io/coreos/kube-state-metrics:v1.9.7
quay.io/brancz/kube-rbac-proxy:v0.6.0
quay.io/prometheus/node-exporter:v0.18.1
directxman12/k8s-prometheus-adapter:v0.7.0
quay.io/prometheus/prometheus:v2.20.0
jimmidyson/configmap-reload:v0.4.0
quay.io/prometheus-operator/prometheus-operator:v0.42.0
docker save -o alertmanager.tar quay.io/prometheus/alertmanager:v0.21.0
docker save -o grafana.tar grafana/grafana:7.1.0
docker save -o kube-state-metrics.tar quay.io/coreos/kube-state-metrics:v1.9.7
docker save -o kube-rbac-proxy.tar quay.io/brancz/kube-rbac-proxy:v0.6.0
docker save -o node-exporter.tar quay.io/prometheus/node-exporter:v0.18.1
docker save -o k8s-prometheus-adapter.tar directxman12/k8s-prometheus-adapter:v0.7.0
docker save -o prometheus.tar quay.io/prometheus/prometheus:v2.20.0
docker save -o configmap-reload.tar jimmidyson/configmap-reload:v0.4.0
docker save -o prometheus-operator.tar quay.io/prometheus-operator/prometheus-operator:v0.42.0
docker load < ./alertmanager.tar
docker load < ./configmap-reload.tar
docker load < ./grafana.tar
docker load < ./k8s-prometheus-adapter.tar
docker load < ./kube-rbac-proxy.tar
docker load < ./kube-state-metrics.tar
docker load < ./node-exporter.tar
docker load < ./prometheus-operator.tar
docker load < ./prometheus.tar
安装
[root@node-1 kube-prometheus]# kubectl create -f manifests/setup/
[root@node-1 kube-prometheus]# kubectl create -f manifests/
kubectl get pod -n monitoring
全部删除
kubectl delete --ignore-not-found=true -f manifests/ -f manifests/setup
安装rook-ceph
1部署 Rook Operator
/root/rook/cluster/examples/kubernetes/ceph
[root@node-1 ceph]# kubectl create -f common.yaml
[root@node-1 ceph]# kubectl create -f operator.yaml
kubectl label nodes {node-1,node-2,node-3} ceph-mon=enabled
kubectl label nodes {node-1,node-2,node-3} ceph-osd=enabled
kubectl label nodes node-1 ceph-mgr=enabled
[root@node-1 ~]# kubectl get pods -n rook-ceph
NAME READY STATUS RESTARTS AGE
rook-ceph-operator-8d9bf87c-k9z9k 1/1 Running 0 34m
rook-discover-fc8lx 1/1 Running 0 28m
rook-discover-zjphb 1/1 Running 0 28m
[root@node-1 ~]# cd /root/
[root@node-1 ~]# kubectl create -f cluster.yaml
Ceph集群删除
kubectl delete -f operator.yaml
kubectl delete namespace rook-ceph
每台服务器上删除数据
yum install gdisk-0.8.10-3.el7.x86_64
#!/usr/bin/env bash
DISK="/dev/sdb"
sgdisk --zap-all $DISK
ls /dev/mapper/ceph-* | xargs -I% -- dmsetup remove %
rm -rf /dev/ceph-*
更多推荐
所有评论(0)