配置文件

kibana

# cd kibana/config
cat > kibana.yml <<EOF
#
# ** THIS IS AN AUTO-GENERATED FILE **
#

# Default Kibana configuration for docker target
server.host: "0.0.0.0"
server.shutdownTimeout: "5s"
elasticsearch.hosts: [ "http://localhost:9200" ]
monitoring.ui.container.elasticsearch.enabled: true
i18n.locale: "zh-CN"
EOF

cat > node.options <<EOF
## Node command line options
## See `node --help` and `node --v8-options` for available options
## Please note you should specify one option per line

## max size of old space in megabytes
#--max-old-space-size=4096

## do not terminate process on unhandled promise rejection
 --unhandled-rejections=warn

## restore < Node 16 default DNS lookup behavior
--dns-result-order=ipv4first

## enable OpenSSL 3 legacy provider
#--openssl-legacy-provider
EOF

logstash

# cd logstash8/myconfig
cat > logstash.conf <<EOF
input {
    file {
        path => ["/spring-boot-logs/*/user.*.log"]
    }

}

filter {
  json {
    source => "message"
  }
} 

output{
    elasticsearch{
    hosts => ["ssx-elk-dmsv.ssx:9200"]
    index => "logstash-%{+YYYY.MM.dd}"
    }
}
EOF

部署yaml文件

apiVersion: apps/v1
kind: Deployment
metadata:
  name: ssx-elk-dmsv
  namespace: ssx
spec:
  replicas: 1
  selector:
    matchLabels:
      app: ssx-elk-dmsv
  template:
    metadata:
      labels:
        app: ssx-elk-dmsv
    spec:
      hostAliases:
        - ip: "192.168.0.101"
          hostnames:
            - "node101"
        - ip: "192.168.0.102"
          hostnames:
            - "node102"
        - ip: "192.168.0.103"
          hostnames:
            - "node103"
        - ip: "127.0.0.1"
          hostnames:
            - "elasticsearch"
      containers:
        - name: ssx-elasticsearch8-c
          image: docker.elastic.co/elasticsearch/elasticsearch:8.10.2
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 9200
          env:   #容器运行前需设置的环境变量列表
            - name: discovery.type  #环境变量名称
              value: "single-node" #环境变量的值 这是mysqlroot的密码 因为是纯数字,需要添加双引号 不然编译报错
            - name: xpack.security.enabled  #禁用登录验证
              value: "false" #环境变量的值 这是mysqlroot的密码 因为是纯数字,需要添加双引号 不然编译报错
            - name: ES_JAVA_OPTS
              value: -Xms512m -Xmx512m
          volumeMounts:
            - mountPath: /usr/share/elasticsearch/data   #这是mysql容器内保存数据的默认路径
              name: c-v-path-elasticsearch8-data
            - mountPath: /usr/share/elasticsearch/logs   #这是mysql容器内保存数据的默认路径
              name: c-v-path-elasticsearch8-logs
            - mountPath: /usr/share/elasticsearch/.cache   #这是mysql容器内保存数据的默认路径
              name: c-v-path-elasticsearch8-cache
            - mountPath: /etc/localtime   #时间同步
              name: c-v-path-lt
        - name: ssx-kibana-c
          image: docker.elastic.co/kibana/kibana:8.10.2
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 5601  # 开启本容器的80端口可访问
          volumeMounts:
            - mountPath: /usr/share/kibana/data   #无用,我先看看那些挂载需要
              name: c-v-path-kibana8-data
            - mountPath: /usr/share/kibana/config
              name: c-v-path-kibana8-config
            - mountPath: /etc/localtime   #时间同步
              name: c-v-path-lt
        - name: ssx-logstash-c
          image: docker.elastic.co/logstash/logstash:8.10.2
          imagePullPolicy: IfNotPresent
          env:   #容器运行前需设置的环境变量列表
            - name: xpack.security.enabled  #禁用登录验证
              value: "false" #环境变量的值 这是mysqlroot的密码 因为是纯数字,需要添加双引号 不然编译报错
            - name: LOG_LEVEL  #禁用登录验证
              value: "info" #环境变量的值 这是mysqlroot的密码 因为是纯数字,需要添加双引号 不然编译报错
            - name: MONITORING_ENABLED  #禁用登录验证
              value: "false" #环境变量的值 这是mysqlroot的密码 因为是纯数字,需要添加双引号 不然编译报错
          args: ["-f","/myconf/logstash.conf"]
          volumeMounts:
            - mountPath: /myconf   #配置
              name: c-v-path-logstash8-conf
            - mountPath: /usr/share/logstash/data   #data
              name: c-v-path-logstash8-data
            - mountPath: /spring-boot-logs  #data
              name: c-v-path-filebeat8-spring-logs
            - mountPath: /etc/localtime   #时间同步
              name: c-v-path-lt
#        - name: ssx-filebeat-c #小项目 不用filebeat+kafka读取日志了,直接用logstash读取日志
#          image: docker.elastic.co/beats/filebeat:8.10.2
#          imagePullPolicy: IfNotPresent
#          volumeMounts:
#            - mountPath: /usr/share/filebeat/filebeat.yml   #配置
#              name: c-v-path-filebeat8-conf
#            - mountPath: /usr/share/filebeat/data  #配置
#              name: c-v-path-filebeat8-data
#            - mountPath: /spring-boot-logs  #data
#              name: c-v-path-filebeat8-spring-logs
#            - mountPath: /etc/localtime   #时间同步
#              name: c-v-path-lt
      volumes:
        - name: c-v-path-elasticsearch8-data #和上面保持一致 这是本地的文件路径,上面是容器内部的路径
          hostPath:
            path: /home/app/apps/k8s/for_docker_volume/elk/elasticsearch8/data  #此路径需要实现创建 注意要给此路径授权777权限 不然pod访问不到
        - name: c-v-path-elasticsearch8-logs #和上面保持一致 这是本地的文件路径,上面是容器内部的路径
          hostPath:
            path: /home/app/apps/k8s/for_docker_volume/elk/elasticsearch8/logs  #此路径需要实现创建 注意要给此路径授权777权限 不然pod访问不到
        - name: c-v-path-elasticsearch8-cache #和上面保持一致 这是本地的文件路径,上面是容器内部的路径
          hostPath:
            path: /home/app/apps/k8s/for_docker_volume/elk/elasticsearch8/.cache  #此路径需要实现创建 注意要给此路径授权777权限 不然pod访问不到
        - name: c-v-path-kibana8-data #和上面保持一致 这是本地的文件路径,上面是容器内部的路径
          hostPath:
            path: /home/app/apps/k8s/for_docker_volume/elk/kibana8/data  #此路径需要实现创建 注意要给此路径授权777权限 不然pod访问不到
        - name: c-v-path-kibana8-config #和上面保持一致 这是本地的文件路径,上面是容器内部的路径
          hostPath:
            path: /home/app/apps/k8s/for_docker_volume/elk/kibana8/config  #此路径需要实现创建 注意要给此路径授权777权限 不然pod访问不到
        - name: c-v-path-logstash8-conf
          hostPath:
            path: /home/app/apps/k8s/for_docker_volume/elk/logstash8/myconf
        - name: c-v-path-logstash8-data
          hostPath:
            path: /home/app/apps/k8s/for_docker_volume/elk/logstash8/data
        - name: c-v-path-lt
          hostPath:
            path: /etc/localtime   #时间同步
#        - name: c-v-path-filebeat8-conf #小项目 不用filebeat+kafka读取日志了,直接用logstash读取日志
#          hostPath:
#            path: /home/app/apps/k8s/for_docker_volume/elk/filebeat8/myconf/filebeat.yml
#        - name: c-v-path-filebeat8-data
#          hostPath:
#            path: /home/app/apps/k8s/for_docker_volume/elk/filebeat8/data
        - name: c-v-path-filebeat8-spring-logs
          hostPath:
            path: /home/ssx/appdata/ssx-log/docker-log
      nodeSelector: #把此pod部署到指定的node标签上
        kubernetes.io/hostname: node101
---
apiVersion: v1
kind: Service
metadata:
  name: ssx-elk-dmsv
  namespace: ssx
spec:
  ports:
    - port: 9200
      name: ssx-elk8-9200
      protocol: TCP
      targetPort: 9200
    - port: 5601 #我暂时不理解,这个设置 明明没用到?
      name: ssx-kibana8
      protocol: TCP
      targetPort: 5601 # 容器nginx对外开放的端口 上面的dm已经指定了
  selector:
    app: ssx-elk-dmsv
  type: ClusterIP

Logo

K8S/Kubernetes社区为您提供最前沿的新闻资讯和知识内容

更多推荐