Fluentd采集Envoy日志
1、Fluentd在K8s的部署参考部署YamlFluentd部署Yaml:apiVersion: v1kind: ServiceAccountmetadata:name: fluentd-esnamespace: kube-systemlabels:k8s-app: fluentd-esaddonmanager.kubernetes.io/mode: Reconcile---kind: Clus
·
1、Fluentd在K8s的部署
Fluentd部署Yaml:
apiVersion: v1
kind: ServiceAccount
metadata:
name: fluentd-es
namespace: kube-system
labels:
k8s-app: fluentd-es
addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: fluentd-es
labels:
k8s-app: fluentd-es
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
resources:
- "namespaces"
- "pods"
verbs:
- "get"
- "watch"
- "list"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: fluentd-es
labels:
k8s-app: fluentd-es
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: fluentd-es
namespace: kube-system
apiGroup: ""
roleRef:
kind: ClusterRole
name: fluentd-es
apiGroup: ""
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd-es-v3.0.2
namespace: kube-system
labels:
k8s-app: fluentd-es
version: v3.0.2
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: fluentd-es
version: v3.0.2
template:
metadata:
labels:
k8s-app: fluentd-es
version: v3.0.2
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
priorityClassName: system-node-critical
serviceAccountName: fluentd-es
containers:
- name: fluentd-es
image: quay.io/fluentd_elasticsearch/fluentd:v3.0.2
env:
- name: FLUENTD_ARGS
value: --no-supervisor -q
resources:
limits:
memory: 500Mi
requests:
cpu: 100m
memory: 200Mi
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: config-volume
mountPath: /etc/fluent/config.d
ports:
- containerPort: 24231
name: prometheus
protocol: TCP
livenessProbe:
tcpSocket:
port: prometheus
initialDelaySeconds: 5
timeoutSeconds: 10
readinessProbe:
tcpSocket:
port: prometheus
initialDelaySeconds: 5
timeoutSeconds: 10
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: config-volume
configMap:
name: fluentd-es-config-v0.2.0
Fluentd ConfigMap:
kind: ConfigMap
apiVersion: v1
metadata:
name: fluentd-es-config-v0.2.0
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
data:
fluentd.conf: |-
<source>
@id fluentd-envoy1.log
@type tail
path /var/log/containers/envoy1*gateway*.log
pos_file /var/log/envoy1.log.pos
tag raw.container.*
read_from_head true
<parse>
@type multi_format
<pattern>
format json
time_key time
time_format %Y-%m-%dT%H:%M:%S.%NZ
</pattern>
<pattern>
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
time_format %Y-%m-%dT%H:%M:%S.%N%:z
</pattern>
</parse>
</source>
<source>
@id fluentd-envoy2.log
@type tail
path /var/log/containers/envoy2*gateway*.log
pos_file /var/log/envoy2.log.pos
tag raw.container.*
read_from_head true
<parse>
@type multi_format
<pattern>
format json
time_key time
time_format %Y-%m-%dT%H:%M:%S.%NZ
</pattern>
<pattern>
format /^(?<time>.+) (?<stream>stdout|stderr) [^ ]* (?<log>.*)$/
time_format %Y-%m-%dT%H:%M:%S.%N%:z
</pattern>
</parse>
</source>
# Detect exceptions in the log output and forward them as one log entry.
<match raw.container.**>
@id raw.container
@type detect_exceptions
remove_tag_prefix raw
message log
stream stream
multiline_flush_interval 5
max_bytes 500000
max_lines 1000
</match>
# Concatenate multi-line logs
<filter container.**>
@id filter_concat
@type concat
key message
multiline_end_regexp /\n$/
separator ""
</filter>
# Enriches records with container metadata
<filter container.**>
@id filter_kubernetes_metadata
@type kubernetes_metadata
</filter>
# Fixes json fields in Elasticsearch
<filter container.**>
@id filter_parser
@type parser
key_name log
reserve_data true
remove_key_name_field true
<parse>
@type multi_format
<pattern>
format regexp
expression /^\[(?<start_time>[^\]]*)\] "(?<method>\S+)(?: +(?<uri>(?:[^\"]|\\.)*?)(?: +\S*)?) (?<protocol>\S+)?" (?<response_code>\S+) (?<response_flags>\S+) (?<bytes_received>\S+) (?<bytes_sent>\S+) (?<duration>\S+) (?<upstream_service_time>\S+) "(?<x_forwarded_for>[^\"]*)" "(?<user_agent>[^\"]*)" "(?<x_request_id>[^\"]*)" "(?<authority>[^\"]*)" "(?<upstream_host>[^\"]*)"?$/
types start_time:string,method:string,uri:string,protocol:string,response_code:string,response_flags:string,bytes_received:integer,bytes_sent:integer,duration:integer,upstream_service_time:string,x_forwarded_for:string,user_agent:string,x_request_id:string,authority:string,upstream_host:string
</pattern>
</parse>
</filter>
<filter **>
@type record_transformer
<record>
user test
cluster cluster
</record>
</filter>
<match container**>
@id elasticsearch
@type elasticsearch
@log_level info
type_name _doc
include_tag_key true
hosts elasticsearch:9200
user admin
password 123456
logstash_format true
logstash_prefix envoy-logs
<buffer>
@type file
path /var/log/fluentd-buffers/envoy.buffer
flush_mode interval
retry_type exponential_backoff
flush_thread_count 2
flush_interval 5s
retry_forever
retry_max_interval 30
chunk_limit_size 2M
total_limit_size 500M
overflow_action block
</buffer>
</match>
需要注意的内容:
(1)使用多个Source是为了采集指定的Pod日志,避免采集其他服务的日志
(2)使用record_transformer是为了标记采集的集群及采集用户
(3)使用filter_parser是为了对Envoy日志中的每个字段进行拆分,并插入ElasticSearch中,便于进行统计
更多推荐
已为社区贡献1条内容
所有评论(0)