1、安装组件之前需要确保以下端口没有被占用:5601 (Kibana), 9200 (Elasticsearch), and 5044 (Logstash)  同时需要确保内核参数 vmmaxmap_count 至少设置为262144 

 sysctl -w vm.max_map_count=262144
 git clone https://github.com/deviantony/docker-elk.git
 cd docker-elk
 docker-compose up  (启动命令)
 docker-compose up  -d (后台启动命令)

由于git下使用的elk镜像是最新的,下面这些可以不拉取

docker pull docker.elastic.co/kibana/kibana-oss:6.2.3
docker pull docker.elastic.co/elasticsearch/elasticsearch-oss:6.2.3
docker pull docker.elastic.co/logstash/logstash-oss:6.2.3

2、 启动后查看是否成功         

curl localhost:9200
curl 'localhost:9200/_cat/indices?v'

启动及停止命令

docker-compose down –v
docker-compose restart

列表elk常用命令(带xpach用户名密码elastic mocean123)

curl 'elastic:mocean123@localhost:9200/_cat/indices?v'|grep aaa-
curl 'elastic:mocean123@localhost:9200/_cat/indices?v'
curl -XDELETE "elastic:mocean123@localhost:9200/aaalogin-2019.03.1*"

logstash停止报错出现

[logstash.outputs.elasticsearch] retrying failed action with response code: 403 ({"type"=>"cluster_block_exception", "reason"=>"blocked by: [FORBIDDEN/12/index read-only / allow delete (api)

curl -XPUT -H 'Content-Type: application/json' http://127.0.0.1:9200/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}'

3、修改配置文件

docker-compose.yml

- /data/elasticsearch/data:/usr/share/elasticsearch/data会没有权限

org.elasticsearch.bootstrap.StartupException: java.lang.IllegalStateException: Failed to create node environment
	at org.elasticsearch.bootstrap.Elasticsearch.init(Elasticsearch.java:125) ~[elasticsearch-6.2.4.jar:6.2.4]
	at org.elasticsearch.bootstrap.Elasticsearch.execute(Elasticsearch.java:112) ~[elasticsearch-6.2.4.jar:6.2.4]
	at org.elasticsearch.cli.EnvironmentAwareCommand.execute(EnvironmentAwareCommand.java:86) ~[elasticsearch-6.2.4.jar:6.2.4]
	at org.elasticsearch.cli.Command.mainWithoutErrorHandling(Command.java:124) ~[elasticsearch-cli-6.2.4.jar:6.2.4]
	at org.elasticsearch.cli.Command.main(Command.java:90) ~[elasticsearch-cli-6.2.4.jar:6.2.4]
	at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:92) ~[elasticsearch-6.2.4.jar:6.2.4]
	at org.elasticsearch.bootstrap.Elasticsearch.main(Elasticsearch.java:85) ~[elasticsearch-6.2.4.jar:6.2.4]
Caused by: java.lang.IllegalStateException: Failed to create node environment
	at org.elasticsearch.node.Node.<init>(Node.java:267) ~[elasticsearch-6.2.4.jar:6.2.4]
	at org.elasticsearch.node.Node.<init>(Node.java:246) ~[elasticsearch-6.2.4.jar:6.2.4]
	at org.elasticsearch.bootstrap.Bootstrap$5.<init>(Bootstrap.java:213) ~[elasticsearch-6.2.4.jar:6.2.4]
	at org.elasticsearch.bootstrap.Bootstrap.setup(Bootstrap.java:213) ~[elasticsearch-6.2.4.jar:6.2.4]
	at org.elasticsearch.bootstrap.Bootstrap.init(Bootstrap.java:323) ~[elasticsearch-6.2.4.jar:6.2.4]
	at org.elasticsearch.bootstrap.Elasticsearch.init(Elasticsearch.java:121) ~[elasticsearch-6.2.4.jar:6.2.4]
	... 6 more
Caused by: java.nio.file.AccessDeniedException: /usr/share/elasticsearch/data/nodes
	at sun.nio.fs.UnixException.translateToIOException(UnixException.java:84) ~[?:?]
	at sun.nio.fs.UnixException.rethrowAsIOException(UnixException.java:102) ~[?:?]
	at sun.nio.fs.UnixException.rethrowAsIOException(UnixException.java:107) ~[?:?]
	at sun.nio.fs.UnixFileSystemProvider.createDirectory(UnixFileSystemProvider.java:384) ~[?:?]
	at java.nio.file.Files.createDirectory(Files.java:674) ~[?:1.8.0_161]
	at java.nio.file.Files.createAndCheckIsDirectory(Files.java:781) ~[?:1.8.0_161]
	at java.nio.file.Files.createDirectories(Files.java:767) ~[?:1.8.0_161]
	at org.elasticsearch.env.NodeEnvironment.<init>(NodeEnvironment.java:204) ~[elasticsearch-6.2.4.jar:6.2.4]
	at org.elasticsearch.node.Node.<init>(Node.java:264) ~[elasticsearch-6.2.4.jar:6.2.4]
	at org.elasticsearch.node.Node.<init>(Node.java:246) ~[elasticsearch-6.2.4.jar:6.2.4]
	at org.elasticsearch.bootstrap.Bootstrap$5.<init>(Bootstrap.java:213) ~[elasticsearch-6.2.4.jar:6.2.4]
	at org.elasticsearch.bootstrap.Bootstrap.setup(Bootstrap.java:213) ~[elasticsearch-6.2.4.jar:6.2.4]
	at org.elasticsearch.bootstrap.Bootstrap.init(Bootstrap.java:323) ~[elasticsearch-6.2.4.jar:6.2.4]
	at org.elasticsearch.bootstrap.Elasticsearch.init(Elasticsearch.java:121) ~[elasticsearch-6.2.4.jar:6.2.4]
	... 6 more

chmod 777 /data/elasticsearch/data

chmod 777  /data/elasticsearch/data
chown -R 1000:1000  /data/elasticsearch/data
version: '2'

services:

  elasticsearch:
    build:
      context: elasticsearch/
#      args:
#        ELK_VERSION: 6.2.3
    volumes:
      - ./elasticsearch/config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
      - /data/elasticsearch/data:/usr/share/elasticsearch/data
    ports:
      - "9200:9200"
      - "9300:9300"
    environment:
      ES_JAVA_OPTS: "-Xmx256m -Xms256m"
      ELASTIC_PASSWORD: changeme
    networks:
      - elk

  logstash:
    build:
      context: logstash/
 #     args:
 #       ELK_VERSION: 6.2.3
    volumes:
      - ./logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml:ro
      - ./logstash/pipeline:/usr/share/logstash/pipeline:ro
    ports:
      - "5000:5000"
      - "5044:5044"
    environment:
      LS_JAVA_OPTS: "-Xmx256m -Xms256m"
    networks:
      - elk
    depends_on:
      - elasticsearch

  kibana:
    build:
      context: kibana/
 #     args:
 #       ELK_VERSION: 6.2.3
    volumes:
      - ./kibana/config/kibana.yml:/usr/share/kibana/config/kibana.yml:ro
    ports:
      - "5601:5601"
    networks:
      - elk
    depends_on:
      - elasticsearch

networks:
  elk:
    driver: bridge

主要就是三个端口ports

        ES_JAVA_OPTS可以修改JVM参数

        ELK_VERSION默认就行

elasticsearch文件

       config/elasticsearch.yml

---
## Default Elasticsearch configuration from elasticsearch-docker.
## from https://github.com/elastic/elasticsearch-docker/blob/master/.tedi/template/elasticsearch.yml
#
cluster.name: "docker-cluster"
network.host: 0.0.0.0

## Use single node discovery in order to disable production mode and avoid bootstrap checks
## see https://www.elastic.co/guide/en/elasticsearch/reference/current/bootstrap-checks.html
#
discovery.zen.minimum_master_nodes: 1
#
discovery.type: single-node
http.port: 9200
## X-Pack settings
## see https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-xpack.html
#
#xpack.license.self_generated.type: trial
xpack.security.enabled: false
#xpack.monitoring.collection.enabled: false

xpack关闭就行,Dockerfile 镜像可以默认

logstash

        config/logstash.yml

---
## Default Logstash configuration from logstash-docker.
## from https://github.com/elastic/logstash-docker/blob/master/build/logstash/config/logstash-full.yml
#
http.host: "0.0.0.0"
path.config: /usr/share/logstash/pipeline
#xpack.monitoring.elasticsearch.hosts: [ "http://elasticsearch:9200" ]

## X-Pack security credentials
#
xpack.monitoring.enabled: false
#xpack.monitoring.elasticsearch.username: elastic
#xpack.monitoring.elasticsearch.password: changeme

xpack关闭

         pipeline/logstash.conf 

input {
  beats {
    port => 5044
  }
}
## Add your filters / logstash plugins configuration here
##receive request:[pid = 1][pageid = 1][ancillaryPageId = 1][language_code = rus][angel = 1][freq = 1][symbolrate = 1][sid = 1], connect from 112.91.80.108:56863====
filter {
	if "/dvb_subtitle.log"==[source]{
	    grok{
            match =>{"message" => "(?<remsg>.*)"}}
		if [remsg] =~ ".*receive request:.*"{
		    mutate { replace => { "source" => "dvb" } }
		    grok{match =>{"message" => ".*"}} 	  
	    }	
		grok{match =>{"message" => "(?<pid>(?<=\[pid = ).*?(?=\]\[pageid))"}}
		grok{match =>{"message" => "(?<pageid>(?<=\[pageid = ).*?(?=\]\[ancillaryPageId))"}}
		grok{match =>{"message" => "(?<ancillaryPageId>(?<=\[ancillaryPageId = ).*?(?=\]\[language_code))"}}
		grok{match =>{"message" => "(?<language_code>(?<=\[language_code = ).*?(?=\]\[angel))"}}
		grok{match =>{"message" => "(?<angel>(?<=\[angel = ).*?(?=\]\[freq))"}}
		grok{match =>{"message" => "(?<freq>(?<=\[freq = ).*?(?=\]\[symbolrate))"}}
		grok{match =>{"message" => "(?<symbolrate>(?<=\[symbolrate = ).*?(?=\]\[sid))"}}
		grok{match =>{"message" => "(?<sid>(?<=\[sid = ).*?(?=], connect))"}}
		grok{match =>{"message" => "(?<ip>(?<=connect from ).*?(?====))"}}      
        mutate { replace => {"host" =>"%{[beat][name]}"} }		
		mutate{remove_field=>"fields"}
		mutate{remove_field=>"count"}
		mutate{remove_field=>"offset"}
		mutate{remove_field=>"tags"}
		mutate{remove_field=>"remsg"}
		mutate{remove_field=>"beat"}
		mutate{remove_field=>"input_type"}
		mutate{remove_field=>"fields"}
		mutate{remove_field=>"sort"}
		mutate{remove_field=>"@version"}	
	}
}
output {
   if "dvb"==[source]{
	elasticsearch {
        hosts => "elasticsearch:9200"
        index => "dvb-%{+YYYY.MM.dd}" 
	    }
    }
}

kibana

       config/kibana.yml

---
## Default Kibana configuration from kibana-docker.
## https://github.com/elastic/kibana-docker/blob/master/.tedi/template/kibana.yml.j2
#
server.name: kibana
server.host: "0"
elasticsearch.url: http://elasticsearch:9200
#elasticsearch.hosts: [ "http://elasticsearch:9200" ]
#xpack.monitoring.ui.container.elasticsearch.enabled: false
xpack.security.enabled: false
## X-Pack security credentials
#
#elasticsearch.username: elastic
#elasticsearch.password: changeme

关闭xpack,elasticsearch.url配置

配置完成后进入docker-elk目录下,和docker-compose.yml同级运行启动命令

Kibana启动,需要花一定的时间

Logstash启动

       日志出现上面的main说明启动成功

 

通过ip:5601查看界面

 

安装filebeat

1、安装命令   

echo "deb https://packages.elastic.co/beats/apt stable main" |  sudo tee -a /etc/apt/sources.list.d/beats.list
wget -qO - https://packages.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add –
apt-get update
apt-get install filebeat
vim /etc/filebeat/filebeat.yml

  配置filebeat.yml

################### Filebeat Configuration Example #########################

############################# Filebeat ######################################
filebeat:
  # List of prospectors to fetch data.
  prospectors:
    # Each - is a prospector. Below are the prospector specific configurations
    -
      # Paths that should be crawled and fetched. Glob based paths.
      # To fetch all ".log" files from a specific level of subdirectories
      # /var/log/*/*.log can be used.
      # For each file found under this path, a harvester is started.
      # Make sure not file is defined twice as this can lead to unexpected behaviour.
      paths:
#        - /opt/starview/epg/bin/nohup.out
        - /dvb_subtitle.log
       # - /var/log/*.log
        #- c:\programdata\elasticsearch\logs\*

      # Configure the file encoding for reading files with international characters
      # following the W3C recommendation for HTML5 (http://www.w3.org/TR/encoding).
      # Some sample encodings:
      #   plain, utf-8, utf-16be-bom, utf-16be, utf-16le, big5, gb18030, gbk,
      #    hz-gb-2312, euc-kr, euc-jp, iso-2022-jp, shift-jis, ...
      #encoding: plain

      # Type of the files. Based on this the way the file is read is decided.
      # The different types cannot be mixed in one prospector
      #
      # Possible options are:
      # * log: Reads every line of the log file (default)
      # * stdin: Reads the standard in
      input_type: log

      # Exclude lines. A list of regular expressions to match. It drops the lines that are
      # matching any regular expression from the list. The include_lines is called before
      # exclude_lines. By default, no lines are dropped.
      # exclude_lines: ["^DBG"]

      # Include lines. A list of regular expressions to match. It exports the lines that are
      # matching any regular expression from the list. The include_lines is called before
      # exclude_lines. By default, all the lines are exported.
      # include_lines: ["^ERR", "^WARN"]

      # Exclude files. A list of regular expressions to match. Filebeat drops the files that
      # are matching any regular expression from the list. By default, no files are dropped.
      # exclude_files: [".gz$"]

      # Optional additional fields. These field can be freely picked
      # to add additional information to the crawled log files for filtering
      #fields:
      #  level: debug
      #  review: 1

      # Set to true to store the additional fields as top level fields instead
      # of under the "fields" sub-dictionary. In case of name conflicts with the
      # fields added by Filebeat itself, the custom fields overwrite the default
      # fields.
      #fields_under_root: false

      # Ignore files which were modified more then the defined timespan in the past.
      # In case all files on your system must be read you can set this value very large.
      # Time strings like 2h (2 hours), 5m (5 minutes) can be used.
      #ignore_older: 0

      # Close older closes the file handler for which were not modified
      # for longer then close_older
      # Time strings like 2h (2 hours), 5m (5 minutes) can be used.
      #close_older: 1h

      # Type to be published in the 'type' field. For Elasticsearch output,
      # the type defines the document type these entries should be stored
      # in. Default: log
      document_type: syslog

      # Scan frequency in seconds.
      # How often these files should be checked for changes. In case it is set
      # to 0s, it is done as often as possible. Default: 10s
      #scan_frequency: 10s

      # Defines the buffer size every harvester uses when fetching the file
      #harvester_buffer_size: 16384

      # Maximum number of bytes a single log event can have
      # All bytes after max_bytes are discarded and not sent. The default is 10MB.
      # This is especially useful for multiline log messages which can get large.
      #max_bytes: 10485760

      # Mutiline can be used for log messages spanning multiple lines. This is common
      # for Java Stack Traces or C-Line Continuation
      #multiline:

        # The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
        #pattern: ^\[

        # Defines if the pattern set under pattern should be negated or not. Default is false.
        #negate: false

        # Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
        # that was (not) matched before or after or as long as a pattern is not matched based on negate.
        # Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
        #match: after

        # The maximum number of lines that are combined to one event.
        # In case there are more the max_lines the additional lines are discarded.
        # Default is 500
        #max_lines: 500

        # After the defined timeout, an multiline event is sent even if no new pattern was found to start a new event
        # Default is 5s.
        #timeout: 5s

      # Setting tail_files to true means filebeat starts readding new files at the end
      # instead of the beginning. If this is used in combination with log rotation
      # this can mean that the first entries of a new file are skipped.
      #tail_files: false

      # Backoff values define how agressively filebeat crawls new files for updates
      # The default values can be used in most cases. Backoff defines how long it is waited
      # to check a file again after EOF is reached. Default is 1s which means the file
      # is checked every second if new lines were added. This leads to a near real time crawling.
      # Every time a new line appears, backoff is reset to the initial value.
      #backoff: 1s

      # Max backoff defines what the maximum backoff time is. After having backed off multiple times
      # from checking the files, the waiting time will never exceed max_backoff idenependent of the
      # backoff factor. Having it set to 10s means in the worst case a new line can be added to a log
      # file after having backed off multiple times, it takes a maximum of 10s to read the new line
      #max_backoff: 10s

      # The backoff factor defines how fast the algorithm backs off. The bigger the backoff factor,
      # the faster the max_backoff value is reached. If this value is set to 1, no backoff will happen.
      # The backoff value will be multiplied each time with the backoff_factor until max_backoff is reached
      #backoff_factor: 2

      # This option closes a file, as soon as the file name changes.
      # This config option is recommended on windows only. Filebeat keeps the files it's reading open. This can cause
      # issues when the file is removed, as the file will not be fully removed until also Filebeat closes
      # the reading. Filebeat closes the file handler after ignore_older. During this time no new file with the
      # same name can be created. Turning this feature on the other hand can lead to loss of data
      # on rotate files. It can happen that after file rotation the beginning of the new
      # file is skipped, as the reading starts at the end. We recommend to leave this option on false
      # but lower the ignore_older value to release files faster.
      #force_close_files: false

    # Additional prospector
    #-
      # Configuration to use stdin input
      #input_type: stdin

  # General filebeat configuration options
  #
  # Event count spool threshold - forces network flush if exceeded
  #spool_size: 2048

  # Enable async publisher pipeline in filebeat (Experimental!)
  #publish_async: false

  # Defines how often the spooler is flushed. After idle_timeout the spooler is
  # Flush even though spool_size is not reached.
  #idle_timeout: 5s

  # Name of the registry file. Per default it is put in the current working
  # directory. In case the working directory is changed after when running
  # filebeat again, indexing starts from the beginning again.
  registry_file: /var/lib/filebeat/registry

  # Full Path to directory with additional prospector configuration files. Each file must end with .yml
  # These config files must have the full filebeat config part inside, but only
  # the prospector part is processed. All global options like spool_size are ignored.
  # The config_dir MUST point to a different directory then where the main filebeat config file is in.
  #config_dir:

###############################################################################
############################# Libbeat Config ##################################
# Base config file used by all other beats for using libbeat features

############################# Output ##########################################

# Configure what outputs to use when sending the data collected by the beat.
# Multiple outputs may be used.
output:
  ### Logstash as output
  logstash:
    # The Logstash hosts
    hosts: ["ip地址:5044"]



############################# Shipper #########################################

shipper:
  # The name of the shipper that publishes the network data. It can be used to group
  # all the transactions sent by a single shipper in the web interface.
  # If this options is not defined, the hostname is used.
  #name:

  # The tags of the shipper are included in their own field with each
  # transaction published. Tags make it easy to group servers by different
  # logical properties.
  #tags: ["service-X", "web-tier"]

  # Uncomment the following if you want to ignore transactions created
  # by the server on which the shipper is installed. This option is useful
  # to remove duplicates if shippers are installed on multiple servers.
  #ignore_outgoing: true

  # How often (in seconds) shippers are publishing their IPs to the topology map.
  # The default is 10 seconds.
  #refresh_topology_freq: 10

  # Expiration time (in seconds) of the IPs published by a shipper to the topology map.
  # All the IPs will be deleted afterwards. Note, that the value must be higher than
  # refresh_topology_freq. The default is 15 seconds.
  #topology_expire: 15

  # Internal queue size for single events in processing pipeline
  #queue_size: 1000

  # Configure local GeoIP database support.
  # If no paths are not configured geoip is disabled.
  #geoip:
    #paths:
    #  - "/usr/share/GeoIP/GeoLiteCity.dat"
    #  - "/usr/local/var/GeoIP/GeoLiteCity.dat"


############################# Logging #########################################

# There are three options for the log ouput: syslog, file, stderr.
# Under Windos systems, the log files are per default sent to the file output,
# under all other system per default to syslog.
logging:

  # Send all logging output to syslog. On Windows default is false, otherwise
  # default is true.
  #to_syslog: true

  # Write all logging output to files. Beats automatically rotate files if rotateeverybytes
  # limit is reached.
  #to_files: false

  # To enable logging to files, to_files option has to be set to true
  files:
    # The directory where the log files will written to.
    #path: /var/log/mybeat

    # The name of the files where the logs are written to.
    #name: mybeat

    # Configure log file size limit. If limit is reached, log file will be
    # automatically rotated
    rotateeverybytes: 10485760 # = 10MB

    # Number of rotated log files to keep. Oldest files will be deleted first.
    #keepfiles: 7

  # Enable debug output for selected components. To enable all selectors use ["*"]
  # Other available selectors are beat, publish, service
  # Multiple selectors can be chained.
  #selectors: [ ]

  # Sets log level. The default log level is error.
  # Available log levels are: critical, error, warning, info, debug
  #level: error


配置文件

主要配置:paths    document_type   logstash.hosts

常用命令

systemctl status filebeat 查看状态
systemctl enable filebeat 开机自启
systemctl restart filebeat 重启

安装成功之后可以使用telnet ip 5044是否可以连通

 

 

 

 

 

 

 

Logo

瓜分20万奖金 获得内推名额 丰厚实物奖励 易参与易上手

更多推荐