前提:

  • 1、数据来源于redis中的数据(filebeat推送到redis中)
  • 2、在 node 上创建 SSL 证书并为 Elasticsearch 启用 TLS
  • 3、在 node 上为 Kibana 启用 TLS
  • 4、centos7、Docker version 18.03.1-ce

目录结构:

stack_elk/
├── certs
├── create-certs.yml
├── data01
├── elastic-docker-tls.yml
├── images
│   ├── kibana
│   │   ├── build.sh
│   │   ├── Dockerfile
│   │   └── logtrail-7.9.1-0.1.31.zip
├── instances.yml
├── kibana.yml
├── logtrail.json
├── logstash
│   ├── config
│   │   ├── logstash.conf
│   │   └── logstash.yml
│   ├── docker-compose.yml
└── password

mkdir stack_elk && cd stack_elk
☆ 赋权es的数据文件权限 chomd 777 data01

一、创建以下compose和配置文件(es+kibana),传输层配置传输层安全性(TLS)加密

1、(基础文件准备开始)创建instances.yml 标识您需要为其创建证书的实例。
instances:
  - name: es01
    dns:
      - es01
      - localhost
    ip:
      - 127.0.0.1

  - name: 'kib01'
    dns:
      - kib01
      - localhost
      
  - name: 'logstash'
    dns:
     - logstash
     - localhost
2、创建.env设置环境变量以指定Elasticsearch版本和将创建Elasticsearch证书的位置。
COMPOSE_PROJECT_NAME=es
CERTS_DIR=/usr/share/elasticsearch/config/certificates
VERSION=7.9.1
3、创建create-certs.yml 用来生成Elasticsearch和Kibana、logstash的证书。
version: '2.2'

services:
  create_certs:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.9.1
    #image: elk_es:v1
    container_name: create_certs
    command: >
      bash -c '
        yum install -y -q -e 0 unzip;
        if [[ ! -f /certs/bundle.zip ]]; then
          bin/elasticsearch-certutil cert --silent --pem --in config/certificates/instances.yml -out /certs/bundle.zip;
          unzip /certs/bundle.zip -d /certs;
        fi;
        chown -R 1000:0 /certs
      '
    working_dir: /usr/share/elasticsearch
    volumes:
      #- certs:/certs
      - ./certs:/certs
      - .:$CERTS_DIR
    networks:
      - elastic

#volumes:
#  certs:
#    driver: local

networks:
  elastic:
    driver: bridge
4、创建单节点es配置elastic-docker-tls.yml

其中包含单节点的Elasticsearch和一个启用了TLS的Kibana实例和一个logstash实例。

version: '2.2'

services:
  es01:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.9.1
    #image: elk_es:v1
    container_name: es01
    restart: always
    environment:
      #- node.name=es01
      #- cluster.name=es-docker-cluster
      #- discovery.seed_hosts=es02,es03
      #- cluster.initial_master_nodes=es01,es02,es03
      # 单节点es必须使用single-node
      - "discovery.type=single-node"
      #- bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
      # 生成并应用支持传输层安全性的试用许可证。
      - xpack.license.self_generated.type=trial
      - xpack.security.enabled=true
      # 启用传输层安全性以加密客户端通信。
      - xpack.security.http.ssl.enabled=true
      - xpack.security.http.ssl.key=$CERTS_DIR/es01/es01.key
      - xpack.security.http.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
      - xpack.security.http.ssl.certificate=$CERTS_DIR/es01/es01.crt
      # 启用传输层安全性以加密节点间通信。
      - xpack.security.transport.ssl.enabled=true
      # 通过不需要主机名验证来允许使用自签名证书。
      - xpack.security.transport.ssl.verification_mode=certificate
      - xpack.security.transport.ssl.certificate_authorities=$CERTS_DIR/ca/ca.crt
      - xpack.security.transport.ssl.certificate=$CERTS_DIR/es01/es01.crt
      - xpack.security.transport.ssl.key=$CERTS_DIR/es01/es01.key
    ulimits:
      memlock:
	  # soft是一个警告值,而hard则是一个真正意义的阀值,超过就会报错
        soft: -1
        hard: -1
    volumes:
      - ./data01:/usr/share/elasticsearch/data
      #- ./elk_data:/usr/share/elasticsearch/data
      - ./certs:$CERTS_DIR
    ports:
      - 9200:9200
    networks:
      - elastic
    # v2.1 以上版本, 定义容器健康状态检查, 类似于 Dockerfile 的 HEALTHCHECK 指令
    healthcheck:
      test: curl --cacert $CERTS_DIR/ca/ca.crt -s https://localhost:9200 >/dev/null; if [[ $$? == 52 ]]; then echo 0; else echo 1; fi
      # 每次检查之间的间隔时间
      interval: 30s
      # 运行命令的超时时间
      timeout: 10s
      # 重试次数
      retries: 5


  kib01:
    #image: docker.elastic.co/kibana/kibana:7.9.1
    image: elk_kibana:v1
    container_name: kib01
    build: ./images/kibana
    restart: always
    depends_on: {"es01": {"condition": "service_healthy"}}
    ports:
      - 5601:5601
    environment:
      SERVERNAME: localhost
      ELASTICSEARCH_URL: https://es01:9200
      ELASTICSEARCH_HOSTS: https://es01:9200
      ELASTICSEARCH_USERNAME: kibana_system
      ELASTICSEARCH_PASSWORD: rwASaFJtuxn4fYzcKcCT
      ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES: $CERTS_DIR/ca/ca.crt
      SERVER_SSL_ENABLED: "true"
      SERVER_SSL_KEY: $CERTS_DIR/kib01/kib01.key
      SERVER_SSL_CERTIFICATE: $CERTS_DIR/kib01/kib01.crt
    volumes:
      - ./certs:$CERTS_DIR
      - ./kibana.yml:/usr/share/kibana/config/kibana.yml
      - ./logtrail.json:/usr/share/kibana/plugins/logtrail/logtrail.json
    networks:
      - elastic


#volumes:
#  data01:
#    driver: local
#  certs:
#    driver: local

networks:
  elastic:
    driver: bridge

①kibana配置插件logtrail(如不需要此插件可以跳过①②③)

cd images/kibana/
下载对应的logtrail插件包:

wget https://github.com/sivasamyk/logtrail/releases/download/v0.1.31/logtrail-7.9.1-0.1.31.zip
②創建kibana的Dockerfile
FROM docker.elastic.co/kibana/kibana:7.9.1
# 添加kibana的sivasamyk/logtrail 插件
ADD ./logtrail-7.9.1-0.1.31.zip /opt/kibana/plugins/logtrail-7.9.1-0.1.31.zip
#WORKDIR ${HOME}
RUN ./bin/kibana-plugin install file:///opt/kibana/plugins//logtrail-7.9.1-0.1.31.zip
③(基础文件准备结束)配置logtrail显示规则(logtrail.json)

cat logtrail.json

{
    "version": 2,
    "index_patterns": [{
        "es": {
            "default_index": "st-*",
            "allow_url_parameter": false,
            "timezone": "UTC"
        },
        "tail_interval_in_seconds": 10,
        "es_index_time_offset_in_seconds": 0,
        "display_timezone": "local",
        "display_timestamp_format": "YYYY年MM月DD日 HH:mm:ss",
        "max_buckets": 500,
        "nested_objects": false,
        "default_time_range_in_days": 5,
        "max_hosts": 100,
        "max_events_to_keep_in_viewer": 5000,
        "default_search": "",
        "fields": {
            "mapping": {
                "timestamp": "@timestamp",
                "program": "tags",
                "hostname": "attrs.service",
                "message": "log"
            },
            "message_format": "{{{log}}} | {{{marker}}}",
            "keyword_suffix": "keyword"
        }
    },{
        "es": {
            "default_index": "uat-*",
            "allow_url_parameter": false,
            "timezone": "UTC"
        },
        "tail_interval_in_seconds": 10,
        "es_index_time_offset_in_seconds": 0,
        "display_timezone": "local",
        "display_timestamp_format": "YYYY年MM月DD日 HH:mm:ss",
        "max_buckets": 500,
        "nested_objects": false,
        "default_time_range_in_days": 5,
        "max_hosts": 100,
        "max_events_to_keep_in_viewer": 5000,
        "default_search": "",
        "fields": {
            "mapping": {
                "timestamp": "@timestamp",
                "program": "tags",
                "hostname": "kubernetes.labels.name",
                "message": "message"
            },
            "hostname_format": "{{{kubernetes.namespace}}} | {{{hostname}}}",
            "message_format": "{{{kubernetes.namespace}}} | {{{message}}}",
            "keyword_suffix": "keyword"
        }
    },{
        "es": {
            "default_index": "prod-*",
            "allow_url_parameter": false,
            "timezone": "UTC"
        },
        "tail_interval_in_seconds": 10,
        "es_index_time_offset_in_seconds": 0,
        "display_timezone": "local",
        "display_timestamp_format": "YYYY年MM月DD日 HH:mm:ss",
        "max_buckets": 500,
        "nested_objects": false,
        "default_time_range_in_days": 5,
        "max_hosts": 100,
        "max_events_to_keep_in_viewer": 5000,
        "default_search": "",
        "fields": {
            "mapping": {
                "timestamp": "@timestamp",
                "program": "tags",
                "hostname": "kubernetes.labels.name",
                "message": "message"
            },
            "hostname_format": "{{{kubernetes.namespace}}} | {{{hostname}}}",
            "message_format": "{{{kubernetes.namespace}}} | {{{message}}}",
            "keyword_suffix": "keyword"
        }
    }]
}

index_patterns中哪个在前先加载哪个索引
default_time_range_in_days-未使用“搜索”按钮指定时间时要搜索的默认时间范围(天)。值0表示logtrail将默认搜索所有可用日志。

注:default_time_range_in_days默认为day(天),如果想要调整值为hour/minutes可以调整插件包中的js,路径为容器中/opt/kibana/plugins/logtrail/server/routes/server.jsselectedConfig.default_time_range_in_days,'minutes').startOf('minutes').valueOf();改为selectedConfig.default_time_range_in_days,'minutes').startOf('minutes').valueOf();selectedConfig.default_time_range_in_days,'hour').startOf('hour').valueOf();

镜像中的logtrail.json参考:

{
  "version" : 2,
  "index_patterns" : [
    {
      "es": {
        "default_index": "kibana_sample_data_logs"
      },
      "tail_interval_in_seconds": 10,
      "es_index_time_offset_in_seconds": 0,
      "display_timezone": "local",
      "display_timestamp_format": "MMM DD HH:mm:ss",
      "max_buckets": 500,
      "default_time_range_in_days" : 0,
      "max_hosts": 100,
      "max_events_to_keep_in_viewer": 5000,
      "default_search": "",
      "fields" : {
        "mapping" : {
            "timestamp" : "@timestamp",
            "hostname" : "host",
            "program": "machine.os",
            "message": "message"
        },
        "message_format": "{{{message}}}",
        "keyword_suffix" : "keyword"
      },
      "color_mapping" : {
      }
    },
    {
      "es": {
        "default_index": "kibana_sample_data_ecommerce"
      },
      "tail_interval_in_seconds": 10,
      "es_index_time_offset_in_seconds": 0,
      "display_timezone": "local",
      "display_timestamp_format": "MMM DD HH:mm:ss",
      "max_buckets": 500,
      "default_time_range_in_days" : 0,
      "max_hosts": 100,
      "max_events_to_keep_in_viewer": 5000,
      "default_search": "",
      "fields" : {
        "mapping" : {
            "timestamp" : "@timestamp",
            "hostname" : "currency",
            "program": "category",
            "message": "email"
        },
        "message_format": "{{{email}}}",
        "keyword_suffix" : "keyword"
      },
      "color_mapping" : {
      }
    }
  ]
}
1.1、(开始)通过启动create-certs容器为Elasticsearch生成证书:
docker-compose -f create-certs.yml run --rm create_certs
1.2、建立单节点的Elasticsearch集群:
docker-compose -f elastic-docker-tls.yml up -d
1.3、运行该elasticsearch-setup-passwords工具为所有内置用户(包括该kibana_system用户)生成密码。

*不要修改elastic超级用户的密码

docker exec es01 /bin/bash -c "bin/elasticsearch-setup-passwords \
auto --batch --url https://es01:9200"

记下生成的密码。您必须kibana_system在撰写文件中配置用户密码,以使Kibana能够连接到Elasticsearch,并且您需要elastic超级用户登录到Kibana并向Elasticsearch提交请求的密码。

1.4、ELASTICSEARCH_PASSWORDelastic-docker-tls.yml撰写文件中设置为kibana_system用户生成的密码。
  kib01:
    image: docker.elastic.co/kibana/kibana:${VERSION}
    container_name: kib01
    depends_on: {"es01": {"condition": "service_healthy"}}
    ports:
      - 5601:5601
    environment:
      SERVERNAME: localhost
      ELASTICSEARCH_URL: https://es01:9200
      ELASTICSEARCH_HOSTS: https://es01:9200
      ELASTICSEARCH_USERNAME: kibana_system
      # 修改此处的密码
      ELASTICSEARCH_PASSWORD: CHANGEME
      ELASTICSEARCH_SSL_CERTIFICATEAUTHORITIES: $CERTS_DIR/ca/ca.crt
      SERVER_SSL_ENABLED: "true"
      SERVER_SSL_KEY: $CERTS_DIR/kib01/kib01.key
      SERVER_SSL_CERTIFICATE: $CERTS_DIR/kib01/kib01.crt
    volumes:
      - certs:$CERTS_DIR
    networks:
      - elastic
1.5、使用docker-compose重启es和Kibana:
docker-compose -f elastic-docker-tls.yml up -d
1.6、访问kibana《https://localhost:5601》

使用elastic用户登录kibana

拆除:完成实验后,可以通过运行拆下容器,网络和卷:
docker-compose -f elastic-docker-tls.yml down -v

參考資料如下:
Logstash 启用 TLS参考
es+kibana(TLS)加密
安全性(TLS)的证书的创建
logtril参考

二、配置 Logstash 启用 TLS连接es

cd logstash/
以下配置文件中涉及到用户名和密码需要引用1.3步骤中所生成的密码

2.1、配置Logstash的docker-compose.yml
version: '2'
services:
  logstash:
    container_name: logstash
    #image: logstash_elk:v1
    image: logstash:7.9.1
    ports:
    - 5044:5044
    volumes:
    - ./config/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
    - ./config/logstash.yml:/usr/share/logstash/config/logstash.yml
    - ../certs/ca/:/etc/logstash/config/certs/

2.2、创建配置文件logstash.conf

cat config/logstash.conf

input {
  redis {
      host => "192.168.181.18"
      port => "6379"
      password => "sinoeyes"
      key => "sinoeyes-leo"
      data_type => "list"
      db => "3"
  }
}

filter {
    grok {
        match => { "message" => "%{TIMESTAMP_ISO8601:log_date}\s*(?<TraceId>([\S+]*))\s*(?<thread>([\S+]*))\s*%{LOGLEVEL:log_level}" }
    }

}

output {
    elasticsearch {
        hosts => "192.168.180.47:9200"
        index => "%{tags}-%{+YYYY.MM.dd}"
        ssl => true
        user => "elastic"
        password => "cTO87Gal8KYwfRvkuTi0"
        ssl => true
        cacert => '/etc/logstash/config/certs/ca.crt'
    }
}

其中grok过滤参考步骤三不使用可以删除filter{}

2.3、创建Logstash的配置文件

cat config/logstash.yml

# Settings file in YAML
#
# Settings can be specified either in hierarchical form, e.g.:
#
#   pipeline:
#     batch:
#       size: 125
#       delay: 5
#
# Or as flat keys:
#
#   pipeline.batch.size: 125
#   pipeline.batch.delay: 5
#
# ------------  Node identity ------------
#
# Use a descriptive name for the node:
#
# node.name: test
#
# If omitted the node name will default to the machine's host name
#
# ------------ Data path ------------------
#
# Which directory should be used by logstash and its plugins
# for any persistent needs. Defaults to LOGSTASH_HOME/data
#
# path.data:
#
# ------------ Pipeline Settings --------------
#
# The ID of the pipeline.
#
# pipeline.id: main
#
# Set the number of workers that will, in parallel, execute the filters+outputs
# stage of the pipeline.
#
# This defaults to the number of the host's CPU cores.
#
# pipeline.workers: 2
#
# How many events to retrieve from inputs before sending to filters+workers
#
# pipeline.batch.size: 125
#
# How long to wait in milliseconds while polling for the next event
# before dispatching an undersized batch to filters+outputs
#
# pipeline.batch.delay: 50
#
# Force Logstash to exit during shutdown even if there are still inflight
# events in memory. By default, logstash will refuse to quit until all
# received events have been pushed to the outputs.
#
# WARNING: enabling this can lead to data loss during shutdown
#
# pipeline.unsafe_shutdown: false
#
# Set the pipeline event ordering. Options are "auto" (the default), "true" or "false".
# "auto" will  automatically enable ordering if the 'pipeline.workers' setting
# is also set to '1'.
# "true" will enforce ordering on the pipeline and prevent logstash from starting
# if there are multiple workers.
# "false" will disable any extra processing necessary for preserving ordering.
#
pipeline.ordered: auto
#
# ------------ Pipeline Configuration Settings --------------
#
# Where to fetch the pipeline configuration for the main pipeline
#
# path.config:
#
# Pipeline configuration string for the main pipeline
#
# config.string:
#
# At startup, test if the configuration is valid and exit (dry run)
#
# config.test_and_exit: false
#
# Periodically check if the configuration has changed and reload the pipeline
# This can also be triggered manually through the SIGHUP signal
#
# config.reload.automatic: false
#
# How often to check if the pipeline configuration has changed (in seconds)
# Note that the unit value (s) is required. Values without a qualifier (e.g. 60)
# are treated as nanoseconds.
# Setting the interval this way is not recommended and might change in later versions.
#
# config.reload.interval: 3s
#
# Show fully compiled configuration as debug log message
# NOTE: --log.level must be 'debug'
#
# config.debug: false
#
# When enabled, process escaped characters such as \n and \" in strings in the
# pipeline configuration files.
#
# config.support_escapes: false
#
# ------------ HTTP API Settings -------------
# Define settings related to the HTTP API here.
#
# The HTTP API is enabled by default. It can be disabled, but features that rely
# on it will not work as intended.
# http.enabled: true
#
# By default, the HTTP API is bound to only the host's local loopback interface,
# ensuring that it is not accessible to the rest of the network. Because the API
# includes neither authentication nor authorization and has not been hardened or
# tested for use as a publicly-reachable API, binding to publicly accessible IPs
# should be avoided where possible.
#
# http.host: 127.0.0.1
#
# The HTTP API web server will listen on an available port from the given range.
# Values can be specified as a single port (e.g., `9600`), or an inclusive range
# of ports (e.g., `9600-9700`).
#
# http.port: 9600-9700
#
# ------------ Module Settings ---------------
# Define modules here.  Modules definitions must be defined as an array.
# The simple way to see this is to prepend each `name` with a `-`, and keep
# all associated variables under the `name` they are associated with, and
# above the next, like this:
#
# modules:
#   - name: MODULE_NAME
#     var.PLUGINTYPE1.PLUGINNAME1.KEY1: VALUE
#     var.PLUGINTYPE1.PLUGINNAME1.KEY2: VALUE
#     var.PLUGINTYPE2.PLUGINNAME1.KEY1: VALUE
#     var.PLUGINTYPE3.PLUGINNAME3.KEY1: VALUE
#
# Module variable names must be in the format of
#
# var.PLUGIN_TYPE.PLUGIN_NAME.KEY
#
# modules:
#
# ------------ Cloud Settings ---------------
# Define Elastic Cloud settings here.
# Format of cloud.id is a base64 value e.g. dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyRub3RhcmVhbCRpZGVudGlmaWVy
# and it may have an label prefix e.g. staging:dXMtZ...
# This will overwrite 'var.elasticsearch.hosts' and 'var.kibana.host'
# cloud.id: <identifier>
#
# Format of cloud.auth is: <user>:<pass>
# This is optional
# If supplied this will overwrite 'var.elasticsearch.username' and 'var.elasticsearch.password'
# If supplied this will overwrite 'var.kibana.username' and 'var.kibana.password'
# cloud.auth: elastic:<password>
#
# ------------ Queuing Settings --------------
#
# Internal queuing model, "memory" for legacy in-memory based queuing and
# "persisted" for disk-based acked queueing. Defaults is memory
#
# queue.type: memory
#
# If using queue.type: persisted, the directory path where the data files will be stored.
# Default is path.data/queue
#
# path.queue:
#
# If using queue.type: persisted, the page data files size. The queue data consists of
# append-only data files separated into pages. Default is 64mb
#
# queue.page_capacity: 64mb
#
# If using queue.type: persisted, the maximum number of unread events in the queue.
# Default is 0 (unlimited)
#
# queue.max_events: 0
#
# If using queue.type: persisted, the total capacity of the queue in number of bytes.
# If you would like more unacked events to be buffered in Logstash, you can increase the
# capacity using this setting. Please make sure your disk drive has capacity greater than
# the size specified here. If both max_bytes and max_events are specified, Logstash will pick
# whichever criteria is reached first
# Default is 1024mb or 1gb
#
# queue.max_bytes: 1024mb
#
# If using queue.type: persisted, the maximum number of acked events before forcing a checkpoint
# Default is 1024, 0 for unlimited
#
# queue.checkpoint.acks: 1024
#
# If using queue.type: persisted, the maximum number of written events before forcing a checkpoint
# Default is 1024, 0 for unlimited
#
# queue.checkpoint.writes: 1024
#
# If using queue.type: persisted, the interval in milliseconds when a checkpoint is forced on the head page
# Default is 1000, 0 for no periodic checkpoint.
#
# queue.checkpoint.interval: 1000
#
# ------------ Dead-Letter Queue Settings --------------
# Flag to turn on dead-letter queue.
#
# dead_letter_queue.enable: false

# If using dead_letter_queue.enable: true, the maximum size of each dead letter queue. Entries
# will be dropped if they would increase the size of the dead letter queue beyond this setting.
# Default is 1024mb
# dead_letter_queue.max_bytes: 1024mb

# If using dead_letter_queue.enable: true, the directory path where the data files will be stored.
# Default is path.data/dead_letter_queue
#
# path.dead_letter_queue:
#
# ------------ Metrics Settings --------------
#
# Bind address for the metrics REST endpoint
#
# http.host: "127.0.0.1"
#
# Bind port for the metrics REST endpoint, this option also accept a range
# (9600-9700) and logstash will pick up the first available ports.
#
# http.port: 9600-9700
#
# ------------ Debugging Settings --------------
#
# Options for log.level:
#   * fatal
#   * error
#   * warn
#   * info (default)
#   * debug
#   * trace
#
# log.level: info
# path.logs:
#
# ------------ Other Settings --------------
#
# Where to find custom plugins
# path.plugins: []
#
# Flag to output log lines of each pipeline in its separate log file. Each log filename contains the pipeline.name
# Default is false
# pipeline.separate_logs: false
#
# ------------ X-Pack Settings (not applicable for OSS build)--------------
#
# X-Pack Monitoring
# https://www.elastic.co/guide/en/logstash/current/monitoring-logstash.html
xpack.monitoring.enabled: true
xpack.monitoring.elasticsearch.username: logstash_system
xpack.monitoring.elasticsearch.password: "MLqDK5TDQdkWYilSsH40"
#xpack.monitoring.elasticsearch.proxy: ["http://proxy:port"]
xpack.monitoring.elasticsearch.hosts: ["https://192.168.180.47:9200"]
# an alternative to hosts + username/password settings is to use cloud_id/cloud_auth
#xpack.monitoring.elasticsearch.cloud_id: monitoring_cluster_id:xxxxxxxxxx
#xpack.monitoring.elasticsearch.cloud_auth: logstash_system:password
# another authentication alternative is to use an Elasticsearch API key
#xpack.monitoring.elasticsearch.api_key: "id:api_key"
#xpack.monitoring.elasticsearch.ssl.certificate_authority: [ "/var/lib/docker/volumes/es_certs/_data/ca/ca.crt" ]
xpack.monitoring.elasticsearch.ssl.certificate_authority: /etc/logstash/config/certs/ca.crt
#xpack.monitoring.elasticsearch.ssl.truststore.path: path/to/file
#xpack.monitoring.elasticsearch.ssl.truststore.password: password
#xpack.monitoring.elasticsearch.ssl.keystore.path: /path/to/file
#xpack.monitoring.elasticsearch.ssl.keystore.password: password
#xpack.monitoring.elasticsearch.ssl.verification_mode: certificate
#xpack.monitoring.elasticsearch.sniffing: false
#xpack.monitoring.collection.interval: 10s
#xpack.monitoring.collection.pipeline.details.enabled: true
#
# X-Pack Management
# https://www.elastic.co/guide/en/logstash/current/logstash-centralized-pipeline-management.html
#xpack.management.enabled: false
#xpack.management.pipeline.id: ["main", "apache_logs"]
#xpack.management.elasticsearch.username: logstash_admin_user
#xpack.management.elasticsearch.password: password
#xpack.management.elasticsearch.proxy: ["http://proxy:port"]
#xpack.management.elasticsearch.hosts: ["https://es1:9200", "https://es2:9200"]
# an alternative to hosts + username/password settings is to use cloud_id/cloud_auth
#xpack.management.elasticsearch.cloud_id: management_cluster_id:xxxxxxxxxx
#xpack.management.elasticsearch.cloud_auth: logstash_admin_user:password
# another authentication alternative is to use an Elasticsearch API key
#xpack.management.elasticsearch.api_key: "id:api_key"
#xpack.management.elasticsearch.ssl.certificate_authority: [ "/path/to/ca.crt" ]
#xpack.management.elasticsearch.ssl.truststore.path: /path/to/file
#xpack.management.elasticsearch.ssl.truststore.password: password
#xpack.management.elasticsearch.ssl.keystore.path: /path/to/file
#xpack.management.elasticsearch.ssl.keystore.password: password
#xpack.management.elasticsearch.ssl.verification_mode: certificate
#xpack.management.elasticsearch.sniffing: false
#xpack.management.logstash.poll_interval: 5s
2.4、启动
docker-compose up -d

Logstash 启用 TLS参考

三、grok使用参考

以下过滤是想把日志级别过滤出来
日志案例

20-09-29 14:00:43 TraceId:[] [pool-8-thread-1] DEBUG c.s.c.datasource.DynamicDataSource - 使用【pfizer】数据源

grok正则表达式,自定义需要的模式:

%{TIMESTAMP_ISO8601:log_date}\s*(?<TraceId>([\S*]*))\s*(?<thread>([\S*]*))\s*%{LOGLEVEL:log_level}

结果:

{
  "log_date": [
    [
      "2020-09-30 15:10:18.031"
    ]
  ],
  "YEAR": [
    [
      "2020"
    ]
  ],
  "MONTHNUM": [
    [
      "09"
    ]
  ],
  "MONTHDAY": [
    [
      "30"
    ]
  ],
  "HOUR": [
    [
      "15",
      null
    ]
  ],
  "MINUTE": [
    [
      "10",
      null
    ]
  ],
  "SECOND": [
    [
      "18.031"
    ]
  ],
  "ISO8601_TIMEZONE": [
    [
      null
    ]
  ],
  "TraceId": [
    [
      ""
    ]
  ],
  "thread": [
    [
      ""
    ]
  ],
  "log_level": [
    [
      "INFO"
    ]
  ]
}
%{TIMESTAMP_ISO8601:time},代表时间戳
%{LOGLEVEL},代表日志级别
%{DATA},代表任意数据
(?([\S+]*)),自定义正则
\s*或者\s+,代表多个空格
\S+或者\S*,代表多个字符
<>:xxx,相当于起别名

Scheme [a-z]+
 Method  [A-Z]+
 Path [0-9a-zA-Z/\.]+
 Agent [\s\S]+
 Proxy [0-9,\.]+

四、日志告警

1、创建连接器
在这里插入图片描述
2、创建报警
每十分钟检查日志,如果检测到日志ERROR超过1个就发送邮件
在这里插入图片描述

定义警报参考

五、页面调整

5.1、Discover页面日志显示不全

解决办法:
高级设置页面truncate:maxHeight 这个属性指定了表格中单元格显示时占用的最大高度,设置为0则不限制。

高级设置参考

六、定时删除日志

#/bin/bash
#es-index-clear
#只保留某几天内的日志索引-5 days || 5 days ago
ST_LAST_DATA=`date -d "-7 days" "+%Y.%m.%d"`
UAT_LAST_DATA=`date -d "-7 days" "+%Y.%m.%d"`
PROD_LAST_DATA=`date -d "-30 days" "+%Y.%m.%d"`

#删除上个月份所有的索引
curl --user elastic:passowrd -XDELETE "https://172.188.180.52:9200/st-${ST_LAST_DATA}" -k 
#curl -XGET "https://172.188.180.52:9200/st-${ST_LAST_DATA}"
curl --user elastic:passowrd -XDELETE "https://172.188.180.52:9200/uat-paas-${UAT_LAST_DATA}" -k
#curl -XGET "https://172.188.180.52:9200/uat-dev15-${UAT_LAST_DATA}"
curl --user elastic:passowrd -XDELETE "https://172.188.180.52:9200/prod-admin-paas-${PROD_LAST_DATA}" -k
#curl -XGET "https://172.188.180.52:9200/prod-admin-paas-${PROD_LAST_DATA}"

#crontab -e添加定时任务:
#0 1 * * * /home/stack_elk/clear_index/es-index-clear.sh

-k 允许在没有证书的情况下连接到SSL站点

Logo

权威|前沿|技术|干货|国内首个API全生命周期开发者社区

更多推荐