k8s容器部署elasticsearch+kibana+cerebro
k8s容器部署elasticsearch+kibana+cerebro
·
k8s容器部署elasticsearch+kibana+cerebro
文章目录
1、operator安装
1.1 版本和yaml文件确认
# 官网
https://www.elastic.co/guide/en/cloud-on-k8s/1.9/k8s-deploy-eck.html
#查看k8s版本
kubectl version
wget https://download.elastic.co/downloads/eck/1.9.1/crds.yaml
wget https://download.elastic.co/downloads/eck/1.9.1/operator.yaml
#yaml文件太长我就不贴出来了,可以自己从官网下载
#修改yaml文件
operator.yaml中的命名空间修改为自己的命名空间,我的namespace是es
1.2 创建资源和operator
[root@Paas-cce-test-primary-master01 es]# pwd
/root/zhanghsn/es
[root@Paas-cce-test-primary-master01 es]# kubectl apply -f ./crds.yaml
[root@Paas-cce-test-primary-master01 es]# kubectl apply -f ./operator.yaml
说明:我这个图是最后都创建完的截图,所有你也可以看到其他的软件pod
2、elasticsearch安装
2.1 yaml文件准备
部署elasticsearch使用的nfs存储,我这里使用k8s集群提前就有nfs存储,所以直接使用,如果没有或者其他的存储也一样,大家可以搜素下nfs存储创建
elasticsearch的yaml文件,这里是3个master和3个node自己可以定,我使用的是只有3个master和,进下面yaml文件
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: es-cluster
namespace: es
spec:
version: 7.10.1
nodeSets:
- name: master-nodes
count: 3
config:
node.master: true
node.data: false
podTemplate:
spec:
initContainers:
- name: sysctl
securityContext:
privileged: true
runAsUser: 0
command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
containers:
- name: elasticsearch
image: paas-cce.dev.dcesg.cn/zhsn/elasticsearch:7.10.1
env:
- name: ES_JAVA_OPTS
value: "-Xms6g -Xmx6g"
resources:
limits:
memory: 10Gi
cpu: 4
requests:
memory: 10Gi
cpu: 4
imagePullSecrets:
- name: default-secret
volumeClaimTemplates:
- metadata:
name: elasticsearch-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
storageClassName: copaddon-nfs-public
- name: data-nodes
count: 0
config:
node.master: false
node.data: true
podTemplate:
spec:
initContainers:
- name: sysctl
securityContext:
privileged: true
runAsUser: 0
command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
containers:
- name: elasticsearch
image: paas-cce.dev.dcesg.cn/zhsn/elasticsearch:7.10.1
env:
- name: ES_JAVA_OPTS
value: "-Xms8g -Xmx8g"
resources:
limits:
memory: 10Gi
cpu: 4
requests:
memory: 10Gi
cpu: 4
imagePullSecrets:
- name: default-secret
volumeClaimTemplates:
- metadata:
name: elasticsearch-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
storageClassName: copaddon-nfs-public
http:
tls:
selfSignedCertificate:
disabled: true
本次使用的yaml文件
apiVersion: elasticsearch.k8s.elastic.co/v1
kind: Elasticsearch
metadata:
name: es-cluster
namespace: es
spec:
version: 7.10.1
nodeSets:
- name: master
count: 3
podTemplate:
spec:
initContainers:
- name: sysctl
securityContext:
privileged: true
runAsUser: 0
command: ['sh', '-c', 'sysctl -w vm.max_map_count=262144']
containers:
- name: elasticsearch
image: paas-cce.dev.dcesg.cn/zhsn/elasticsearch:7.10.1
env:
- name: ES_JAVA_OPTS
value: "-Xms6g -Xmx6g"
resources:
limits:
memory: 10Gi
cpu: 4
requests:
memory: 10Gi
cpu: 4
imagePullSecrets:
- name: default-secret
volumeClaimTemplates:
- metadata:
name: elasticsearch-data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
storageClassName: copaddon-nfs-public
http:
tls:
selfSignedCertificate:
disabled: true
2.2 开始部署
[root@Paas-cce-test-primary-master01 es]# kubectl apply -f ./es-test.yaml
2.3 创建ingress
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/backend-protocol: ' HTTP'
name: ingress-es
namespace: es
spec:
rules:
- host: es-test.zhsn.com
http:
paths:
- backend:
serviceName: es-cluster-es-http
servicePort: 9200
path: /
[root@Paas-cce-test-primary-master01 es]# kubectl apply -f ./es-ingress.yaml
[root@Paas-cce-test-primary-master01 es]# kubectl get ing -n es
2.4 查看的用户和密码
[root@Paas-cce-test-primary-master01 es]# kubectl get secret -n es
[root@Paas-cce-test-primary-master01 es]# kubectl get secret -n es es-cluster-es-elastic-user -o yaml
[root@Paas-cce-test-primary-master01 es]# echo "UEBzc3cwcmQ=" | base64 -d
P@ssw0rd
#换密码方式
[root@Paas-cce-test-primary-master01 es]# echo -n "elastic@admin" | base64
ZWxhc3RpY0BhZG1pbg==
[root@Paas-cce-test-primary-master01 es]# kubectl edit secret -n es es-cluster-es-elastic-user
apiVersion: v1
data:
elastic: UEBzc3cwcmQ= #----------> 将加密的密码替换就行
kind: Secret
metadata:
creationTimestamp: "2022-06-30T02:42:48Z"
labels:
common.k8s.elastic.co/type: elasticsearch
eck.k8s.elastic.co/credentials: "true"
eck.k8s.elastic.co/owner-kind: Elasticsearch
eck.k8s.elastic.co/owner-name: es-cluster
eck.k8s.elastic.co/owner-namespace: es
elasticsearch.k8s.elastic.co/cluster-name: es-cluster
name: es-cluster-es-elastic-user
namespace: es
resourceVersion: "123533843"
selfLink: /api/v1/namespaces/es/secrets/es-cluster-es-elastic-user
uid: a0525dc6-d40c-48d8-a666-d6f7374d1742
type: Opaque
#可以重启下es的pod让马上生效
2.5 登录验证
[root@Paas-cce-test-primary-master01 es]# kubectl -n es get elasticsearch
在浏览器登录,需要你在电脑的hosts进行域名解析配置
http://es-test.zhsn.com
登录用户和密码见secret
elastic/P@ssw0rd
3、kibana安装
3.1 yaml文件准备
apiVersion: kibana.k8s.elastic.co/v1
kind: Kibana
metadata:
name: kibana
namespace: es
spec:
version: 7.10.1
http:
tls:
selfSignedCertificate:
disabled: true
count: 1
elasticsearchRef:
name: es-cluster # kubectl -n es get elasticsearch 获取
podTemplate:
spec:
containers:
- name: kibana
resources:
limits:
cpu: 1000m
requests:
cpu: 100m
[root@Paas-cce-test-primary-master01 es]# kubectl apply -f ./kibana.yaml
3.2 创建ingress
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/backend-protocol: ' HTTP'
name: ingress-kibana
namespace: es
spec:
rules:
- host: kibana.zhsn.om
http:
paths:
- backend:
serviceName: kibana-kb-http
servicePort: 5601
path: /
3.3 登录查看
密码为es的账号密码
elastic/P@ssw0rd
4、cerebro安装
4.1 yaml文件准备
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: cerebro
name: cerebro
namespace: es
spec:
replicas: 1
selector:
matchLabels:
app: cerebro
template:
metadata:
labels:
app: cerebro
name: cerebro
spec:
containers:
- image: paas-cce.dev.dcesg.cn/zhsn/cerebro:0.8.3
imagePullPolicy: IfNotPresent
name: cerebro
resources:
limits:
cpu: 1
memory: 1Gi
requests:
cpu: 1
memory: 1Gi
volumeMounts:
- name: cerebro-conf
mountPath: /etc/cerebro
volumes:
- name: cerebro-conf
configMap:
name: cerebro-application
imagePullSecrets:
- name: default-secret
---
apiVersion: v1
kind: Service
metadata:
labels:
app: cerebro
name: cerebro
namespace: es
spec:
ports:
- port: 9000
protocol: TCP
targetPort: 9000
selector:
app: cerebro
type: ClusterIP
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/backend-protocol: ' HTTP'
name: cerebro-ingress
namespace: es
spec:
rules:
- host: cerebro.zhsn.com
http:
paths:
- backend:
serviceName: cerebro
servicePort: 9000
path: /
4.2 部署cerebro
[root@Paas-cce-test-primary-master01 es]# kubectl apply -f ./cerebro.yam
4.3 登录查看
更多推荐
已为社区贡献5条内容
所有评论(0)