一、ELK搭建

1、下载

wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.16.2-linux-x86_64.tar.gz

2、解压安装

cd /data/elk/

tar -zxvf elasticsearch-7.16.2-linux-x86_64.tar.gz

3、编辑YML

#需要设置成一样的才能加入到集群
cluster.name: es-cluster
discovery.seed_hosts: ["10.16.1.152:9300","10.16.1.141:9300","10.16.1.136:9300"]
cluster.initial_master_nodes: ["node-1"]
## 每隔多长时间ping一个node
##master选举/节点间通讯超时时间(这个时间的配置需要根据实际情况设置)
discovery.zen.fd.ping_interval: 30s
## 每次ping的超时时间
discovery.zen.fd.ping_timeout: 120s
## 一个node被ping多少次失败就认为是故障了
discovery.zen.fd.ping_retries: 6
## 一般建议的目录地址
path.logs: /data/elk/log/elasticsearch
path.data: /data/elk/data/elasticsearch
##集群脑裂问题参数配置
## elasticsearch则可以配置返回消息的节点数量, 一般情况下会配置(n/2 + 1)个节点
discovery.zen.minimum_master_nodes: 2
## 多少个节点启动后就可以组成集群
gateway.recover_after_nodes: 2
## 期望多少个节点联通之后才开始分配shard
gateway.expected_nodes: 3
## 超时时间
gateway.recover_after_time: 1m
node.name: node-1
node.master: true
node.data: true
node.ingest: true
http.cors.enabled: true
http.cors.allow-origin: true
network.host: 10.16.1.152

其它从节点配置修改node.name和network.host

4、启动

后台启动方式:./bin/elasticsearch -d

启动报错:max file descriptors [4096] for elasticsearch process is too low,用户拥有的可创建文件权限太低

vim /etc/security/limits.conf

soft nofile 65536
hard nofile 65536
soft nproc 4096
hard nproc 4096

没有生效的原因是:ssh的链接全杀了,链接复用导致的

5、查看集群

[root@ecs-f595-0304908 elasticsearch-7.16.2]$ 
curl 'http://10.16.1.152:9200/_cat/nodes'
10.16.1.152 30 40 1 0.15 0.07 0.05 cdfhilmrstw * node-1
10.16.1.136 25 45 1 0.00 0.01 0.05 cdfhilmrstw - node-3
10.16.1.141 29 29 0 0.00 0.01 0.05 cdfhilmrstw - node-2

6、调优

vim jvm.options

-Xms 1g -Xmx 2g

二、Kibana搭建

1、下载

wget https://artifacts.elastic.co/downloads/elasticsearch/kibana-7.16.2-linux-x86_64.tar.gz

2、解压安装

cd /data/elk/

tar -zxvf kibana-7.16.2-linux-x86_64.tar.gz

3、编辑YML

elasticsearch.hosts: ["http://10.16.1.152:9200","http://10.16.1.141:9200"]
elasticsearch.requestTimeout: 99999
server.host: "0.0.0.0"
server.name: "es_kibana"
server.port: 5601
i18n.locale: "zh-CN"
logging.dest: /data/elk/log/kibana/kibana.log
kibana.index: ".kibana"
server.basePath: "/kibana"
server.rewriteBasePath: true
server.publicBaseUrl: "http://10.16.1.152:5601/kibana"

4、启动

nohup ./bin/kibana &

5、配置nginx访问

location  /kibana {
            auth_basic "kibana";
            auth_basic_user_file /data/app/openrestry/nginx/db/kibana;
            proxy_set_header Host xxx.xxx.com;
            proxy_pass http://10.16.1.152:5601;
            proxy_redirect ~^http://xxx.xxx.com(.*)   https://xxx.xxx.com$1;
        }

6、设置访问密码

htpasswd -bc kibana kibana kibana

三、logstash搭建

1、下载

wget https://artifacts.elastic.co/downloads/elasticsearch/logstash-7.16.2-linux-x86_64.tar.gz

2、解压安装

cd /data/elk/

tar -zxvf logstash-7.16.2-linux-x86_64.tar.gz

3、编辑CONF

input {
    beats {
        port => 5044
    }
}
filter {
    grok {
        match => { "message" => "%{IPORHOST:http_host} - - \[%{HTTPDATE:timestamp}\] \"%{WORD:request_method} %{NOTSPACE:http_request} HTTP/%{NUMBER:httpversion}\" %{NUMBER:status:int} %{NUMBER:body_sent:int} \"(?:%{URI:referrer}|-)\" \"(%{GREEDYDATA:user_agent}|-)\""}
    }
    date {
        match => ["timestamp", "yyyy-MM-dd HH:mm:ss.SSS"]
        target => "timestamp"
    }
    geoip {
        source => "http_host"
    }
}
output {
    stdout { codec => rubydebug }
    elasticsearch {
        hosts => ["10.16.1.152:9200","10.16.1.141:9200","10.16.1.136:9200"]                #也可以为集群内其它机器的地址
        index => ".nginx-%{+YYYY.MM.dd}"
  }
}

4、启动

nohup bin/logstash -f config/logstash.conf &

四、filebeat搭建

1、下载

wget https://artifacts.elastic.co/downloads/elasticsearch/filebeat-7.16.2-linux-x86_64.tar.gz

2、解压安装

cd /data/elk/

tar -zxvf filebeat-7.16.2-linux-x86_64.tar.gz

3、编辑YML

filebeat.inputs:
- type: log
  paths:
     - /data/app/openresty/nginx/logs/access.log

output.logstash:
    hosts: ["10.16.1.136:5044"]

4、启动

nohup ./filebeat -e -c filebeat.yml &

5、验收


先创建索引,在设置分片
PUT indexName
{
    "settings": {
        "number_of_shards": 5
    }
}
Logo

更多推荐