最新Centos7.6 部署ELK日志分析系统
下载elasticsearch
创建elk用户并授权
useradd elk
chown -R elk:elk /home/elk/elasticsearch
chown -R elk:elk /home/elk/elasticsearch1
chown -R elk:elk /home/elk/elasticsearch2
mkdir -p /home/eladata
mkdir -p /var/log/elk
chown -R elk:elk /home/eladata
chown -R elk:elk /var/log/elk
主节点master
elasticsearch解压,修改配置文件
/home/elk/elasticsearch/config
[root@localhost config]# grep -v "^#" elasticsearch.yml
cluster.name: my-application
node.name: node0
node.master: true
node.attr.rack: r1
node.max_local_storage_nodes: 3
path.data: /home/eladata
path.logs: /var/log/elk
http.cors.enabled: true
http.cors.allow-origin: "*"
network.host: 192.168.1.70
http.port: 9200
transport.tcp.port: 9301
discovery.zen.minimum_master_nodes: 1
cluster.initial_master_nodes: ["node0"]
手动启动命令
su elk -l -c '/home/elk/elasticsearch/bin/elasticsearch -d'
启动文件 elasticsearch.service
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat elasticsearch.service
[Unit]
Description=Elasticsearch
Documentation=http://www.elastic.co
Wants=network-online.target
After=network-online.target
[Service]
RuntimeDirectory=elasticsearch
PrivateTmp=true
Environment=ES_HOME=/home/elk/elasticsearch
Environment=ES_PATH_CONF=/home/elk/elasticsearch/config
Environment=PID_DIR=/var/run/elasticsearch
EnvironmentFile=-/etc/sysconfig/elasticsearch
WorkingDirectory=/home/elk/elasticsearch
User=elk
Group=elk
ExecStart=/home/elk/elasticsearch/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid --quiet
StandardOutput=journal
StandardError=inherit
LimitNOFILE=65536
LimitNPROC=4096
LimitAS=infinity
LimitFSIZE=infinity
TimeoutStopSec=0
KillSignal=SIGTERM
KillMode=process
SendSIGKILL=no
SuccessExitStatus=143
[Install]
WantedBy=multi-user.target
[root@localhost system]#
Node1节点
/home/elk/elasticsearch1/config
[root@localhost config]# grep -v "^#" elasticsearch.yml
cluster.name: my-application
node.name: node1
node.master: false
node.attr.rack: r1
node.max_local_storage_nodes: 3
path.data: /home/eladata
path.logs: /var/log/elk
http.cors.enabled: true
http.cors.allow-origin: "*"
network.host: 192.168.1.70
transport.tcp.port: 9303
http.port: 9302
discovery.zen.ping.unicast.hosts: ["192.168.1.70:9301"]
[root@localhost config]#
手动启动命令
su elk -l -c '/home/elk/elasticsearch1/bin/elasticsearch1 -d'
启动文件 elasticsearch1.service
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat elasticsearch1.service
[Unit]
Description=Elasticsearch
Documentation=http://www.elastic.co
Wants=network-online.target
After=network-online.target
[Service]
RuntimeDirectory=elasticsearch1
PrivateTmp=true
Environment=ES_HOME=/home/elk/elasticsearch1
Environment=ES_PATH_CONF=/home/elk/elasticsearch1/config
Environment=PID_DIR=/var/run/elasticsearch
EnvironmentFile=-/etc/sysconfig/elasticsearch
WorkingDirectory=/home/elk/elasticsearch
User=elk
Group=elk
ExecStart=/home/elk/elasticsearch1/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid --quiet
StandardOutput=journal
StandardError=inherit
LimitNOFILE=65536
LimitNPROC=4096
LimitAS=infinity
LimitFSIZE=infinity
TimeoutStopSec=0
KillSignal=SIGTERM
KillMode=process
SendSIGKILL=no
SuccessExitStatus=143
[Install]
WantedBy=multi-user.target
[root@localhost system]#
Node2节点
/home/elk/elasticsearch2/config
[root@localhost config]# grep -v "^#" elasticsearch.yml
cluster.name: my-application
node.name: node2
node.attr.rack: r1
node.master: false
node.max_local_storage_nodes: 3
path.data: /home/eladata
path.logs: /var/log/elk
http.cors.enabled: true
http.cors.allow-origin: "*"
network.host: 192.168.1.70
http.port: 9203
transport.tcp.port: 9304
discovery.zen.ping.unicast.hosts: ["192.168.1.70:9301"]
discovery.zen.minimum_master_nodes: 1
[root@localhost config]#
手动启动命令
su elk -l -c '/home/elk/elasticsearch2/bin/elasticsearch2 -d'
启动文件 elasticsearch2.service
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat elasticsearch2.service
[Unit]
Description=Elasticsearch
Documentation=http://www.elastic.co
Wants=network-online.target
After=network-online.target
[Service]
RuntimeDirectory=elasticsearch2
PrivateTmp=true
Environment=ES_HOME=/home/elk/elasticsearch2
Environment=ES_PATH_CONF=/home/elk/elasticsearch2/config
Environment=PID_DIR=/var/run/elasticsearch
EnvironmentFile=-/etc/sysconfig/elasticsearch
WorkingDirectory=/home/elk/elasticsearch2
User=elk
Group=elk
ExecStart=/home/elk/elasticsearch2/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid --quiet
StandardOutput=journal
StandardError=inherit
LimitNOFILE=65536
LimitNPROC=4096
LimitAS=infinity
LimitFSIZE=infinity
TimeoutStopSec=0
KillSignal=SIGTERM
KillMode=process
SendSIGKILL=no
SuccessExitStatus=143
[Install]
WantedBy=multi-user.target
[root@localhost system]#
下载logstash
目录如下,默认配置即可
[root@localhost logstash]# pwd
/home/elk/logstash
[root@localhost logstash]#
手动启动命令
./logstash -f ../dev.conf
nohup ./logstash -f ../dev.conf &
下载kibana
配置文件如下
[root@localhost config]# pwd
/home/elk/kibana/config
[root@localhost config]# grep -v "^#" kibana.yml
server.host: "192.168.1.70"
elasticsearch.hosts: ["http://192.168.1.70:9200"]
kibana.index: ".kibana"
i18n.locale: "zh-CN"
手动启动命令
./kibana
nohup ./kibana &
kibana启动文件
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat kibana.service
[Unit]
Description=Kibana Server Manager
[Service]
ExecStart=/home/elk/kibana/bin/kibana
[Install]
WantedBy=multi-user.target
[root@localhost system]#
端口为:5601 访问:192.168.1.70:5601
安装Elasticsearch -head
yum install git npm
git clone https://github.com/mobz/elasticsearch-head.git
[root@localhost elasticsearch-head]# pwd
/home/elk/elasticsearch-head
[root@localhost elasticsearch-head]#
启动
npm install
npm run start
nohup npm run start &
curl -XPUT '192.168.2.67:9100/book'
访问192.168.2.67:9100 即可访问
下载kafka
修改配置文件如下
[root@localhost config]# pwd
/home/elk/kafka/config
[root@localhost config]# grep -v "^#" server.properties
broker.id=0
listeners=PLAINTEXT://192.168.1.70:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/var/log/kafka-logs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=localhost:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
delete.topic.enable=true
[root@localhost config]#
kafka配置启动zookeeper
手动启动方式
[root@localhost bin]# pwd
/home/elk/kafka/bin
[root@localhost bin]#
./zookeeper-server-start.sh ../config/zookeeper.properties
systemctl 启动zookeeper
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat zookeeper.service
[Service]
Type=forking
SyslogIdentifier=zookeeper
Restart=always
RestartSec=0s
ExecStart=/home/elk/kafka/bin/zookeeper-server-start.sh -daemon /home/elk/kafka/config/zookeeper.properties
ExecStop=/home/elk/kafka/bin/zookeeper-server-stop.sh
[root@localhost system]#
启动kafka服务
手动启动方式
./kafka-server-start.sh ../config/server.properties
systemctl 启动kafka
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat kafka.service
[Unit]
Description=Apache kafka
After=network.target
[Service]
Type=simple
Restart=always
RestartSec=0s
ExecStart=/home/elk/kafka/bin/kafka-server-start.sh /home/elk/kafka/config/server.properties
ExecStop=/home/elk/kafka/bin/kafka-server-stop.sh
[root@localhost system]#
测试kafka
新建一个名字为test的topic
/kafka-topics.sh --create --zookeeper 192.168.1.70:2181 --replication-factor 1 --partitions 1 --topic test
查看kafka中的topic
./kafka-topics.sh --list --zookeeper 192.168.1.70:2181
往kafka topic为test中 生产消息
./kafka-console-producer.sh --broker-list 192.168.1.70:9092 --topic test
在kafka topic为test中 消费消息
bin/kafka-console-consumer.sh --bootstrap-server 192.168.1.70:9092 --topic test --from-beginning
生产的消息,消费那边接受到即是ok的
目标机器安装filebeat
安装6.5版本的
[root@localhost filebeat]# pwd
/usr/local/filebeat
[root@localhost filebeat]# cat filebeat.yml
filebeat.prospectors:
- type: log
paths:
- /opt/logs/workphone-tcp/catalina.out
fields:
tag: 54_tcp_catalina_out
- type: log
paths:
- /opt/logs/workphone-webservice/catalina.out
fields:
tag: 54_web_catalina_out
name: 192.168.1.54
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 3
output.kafka:
hosts: ["192.168.1.70:9092"]
topic: "filebeat-log"
partition.hash:
reachable_only: true
compression: gzip
max_message_bytes: 1000000
required_acks: 1
[root@localhost filebeat]#
安装完成后去logstash编辑配置文件
logstash操作
[root@localhost logstash]# pwd
/home/elk/logstash
[root@localhost logstash]# cat dev.conf
input {
kafka{
bootstrap_servers => "192.168.1.70:9092"
topics => ["filebeat-log"]
codec => "json"
}
}
filter {
if [fields][tag]=="jpwebmap" {
json{
source => "message"
remove_field => "message"
}
geoip {
source => "client"
target => "geoip"
add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ]
add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}" ]
}
mutate {
convert => [ "[geoip][coordinates]", "float"]
}
}
if [fields][tag] == "54_tcp_catalina_out"{
grok {
match => ["message", "%{TIMESTAMP_ISO8601:logdate}"]
}
date {
match => ["logdate", "ISO8601"]
}
mutate {
remove_field => [ "logdate" ]
}
}
if [fields][tag] == "54_web_catalina_out"{
grok {
match => ["message", "%{TIMESTAMP_ISO8601:logdate}"]
}
date {
match => ["logdate", "ISO8601"]
}
mutate {
remove_field => [ "logdate" ]
}
}
if [fields][tag] == "55_tcp_catalina_out"{
grok {
match => ["message", "%{TIMESTAMP_ISO8601:logdate}"]
}
date {
match => ["logdate", "ISO8601"]
}
mutate {
remove_field => [ "logdate" ]
}
}
if [fields][tag] == "55_web_catalina_out"{
grok {
match => ["message", "%{TIMESTAMP_ISO8601:logdate}"]
}
date {
match => ["logdate", "ISO8601"]
}
mutate {
remove_field => [ "logdate" ]
}
}
if [fields][tag] == "51_nginx80_access_log" {
mutate {
add_field => { "spstr" => "%{[log][file][path]}" }
}
mutate {
split => ["spstr" , "/"]
# save the last element of the array as the api_method.
add_field => ["src", "%{[spstr][-1]}" ]
}
mutate{
remove_field => [ "friends", "ecs", "agent" , "spstr" ]
}
grok {
match => { "message" => "%{IPORHOST:remote_addr} - %{DATA:remote_user} \[%{HTTPDATE:time}\] \"%{WORD:method} %{DATA:url} HTTP/%{NUMBER:http_version}\" %{NUMBER:response_code} %{NUMBER:body_sent:bytes} \"%{DATA:referrer}\" \"%{DATA:agent}\" \"%{DATA:x_forwarded_for}\" \"%{NUMBER:request_time}\" \"%{DATA:upstream_addr}\" \"%{DATA:upstream_status}\"" }
remove_field => "message"
}
date {
match => ["time", "dd/MMM/yyyy:HH:mm:ss Z"]
target => "@timestamp"
}
geoip {
source => "x_forwarded_for"
target => "geoip"
database => "/home/elk/logstash/GeoLite2-City.mmdb"
add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ]
add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}" ]
}
mutate {
convert => [ "[geoip][coordinates]", "float"]
}
}
}
output {
if [fields][tag] == "wori"{
elasticsearch {
hosts => ["192.168.1.70:9200"]
index => "zabbix"
}
}
if [fields][tag] == "54_tcp_catalina_out"{
elasticsearch {
hosts => ["192.168.1.70:9200"]
index => "54_tcp_catalina_out"
}
}
if [fields][tag] == "54_web_catalina_out"{
elasticsearch {
hosts => ["192.168.1.70:9200"]
index => "54_web_catalina_out"
}
}
if [fields][tag] == "55_tcp_catalina_out"{
elasticsearch {
hosts => ["192.168.1.70:9200"]
index => "55_tcp_catalina_out"
}
}
if [fields][tag] == "55_web_catalina_out"{
elasticsearch {
hosts => ["192.168.1.70:9200"]
index => "55_web_catalina_out"
}
}
if [fields][tag] == "51_nginx80_access_log" {
stdout{}
elasticsearch {
hosts => ["192.168.1.70:9200"]
index => "51_nginx80_access_log"
}
}
}
其他的配置文件
index.conf
filter {
mutate {
add_field => { "spstr" => "%{[log][file][path]}" }
}
mutate {
split => ["spstr" , "/"]
# save the last element of the array as the api_method.
add_field => ["src", "%{[spstr][-1]}" ]
}
mutate{
remove_field => [ "friends", "ecs", "agent" , "spstr" ]
}
}
java.conf
filter {
if [fields][tag] == "java"{
grok {
match => ["message", "%{TIMESTAMP_ISO8601:logdate}"]
}
date {
match => ["logdate", "ISO8601"]
}
mutate {
remove_field => [ "logdate" ]
}
} #End if
}
kafkainput.conf
input {
kafka{
bootstrap_servers => "172.16.11.68:9092"
#topics => ["ql-prod-tomcat" ]
topics => ["ql-prod-dubbo","ql-prod-nginx","ql-prod-tomcat" ]
codec => "json"
consumer_threads => 5
decorate_events => true
#auto_offset_reset => "latest"
group_id => "logstash"
#client_id => ""
############################# HELK Optimizing Latency #############################
fetch_min_bytes => "1"
request_timeout_ms => "305000"
############################# HELK Optimizing Availability #############################
session_timeout_ms => "10000"
max_poll_records => "550"
max_poll_interval_ms => "300000"
}
}
#input {
# kafka{
# bootstrap_servers => "172.16.11.68:9092"
# topics => ["ql-prod-java-dubbo","ql-prod","ql-prod-java" ]
# codec => "json"
# consumer_threads => 15
# decorate_events => true
# auto_offset_reset => "latest"
# group_id => "logstash-1"
# ############################# HELK Optimizing Latency #############################
# fetch_min_bytes => "1"
# request_timeout_ms => "305000"
# ############################# HELK Optimizing Availability #############################
# session_timeout_ms => "10000"
# max_poll_records => "550"
# max_poll_interval_ms => "300000"
# }
#}
nginx.conf
filter {
if [fields][tag] == "nginx-access" {
mutate {
add_field => { "spstr" => "%{[log][file][path]}" }
}
mutate {
split => ["spstr" , "/"]
# save the last element of the array as the api_method.
add_field => ["src", "%{[spstr][-1]}" ]
}
mutate{
remove_field => [ "friends", "ecs", "agent" , "spstr" ]
}
grok {
match => { "message" => "%{IPORHOST:remote_addr} - %{DATA:remote_user} \[%{HTTPDATE:time}\] \"%{WORD:method} %{DATA:url} HTTP/%{NUMBER:http_version}\" %{NUMBER:response_code} %{NUMBER:body_sent:bytes} \"%{DATA:referrer}\" \"%{DATA:agent}\" \"%{DATA:x_forwarded_for}\" \"%{NUMBER:request_time}\" \"%{DATA:upstream_addr}\" \"%{DATA:upstream_status}\"" }
remove_field => "message"
}
date {
match => ["time", "dd/MMM/yyyy:HH:mm:ss Z"]
target => "@timestamp"
}
geoip {
source => "x_forwarded_for"
target => "geoip"
database => "/opt/logstash-6.2.4/GeoLite2-City.mmdb"
add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ]
add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}" ]
}
mutate {
convert => [ "[geoip][coordinates]", "float"]
}
} #endif
}
ouput.conf
output{
if [fields][tag] == "nginx-access" {
stdout{}
elasticsearch {
user => elastic
password => WR141bp2sveJuGFaD4oR
hosts => ["172.16.11.67:9200"]
index => "logstash-%{[fields][proname]}-%{+YYYY.MM.dd}"
}
}
#stdout{}
if [fields][tag] == "java" {
elasticsearch {
user => elastic
password => WR141bp2sveJuGFaD4oR
hosts => ["172.16.11.66:9200","172.16.11.68:9200"]
index => "%{[host][name]}-%{[src]}"
}
}
}
最新Centos7.6 部署ELK日志分析系统的更多相关文章
- RedHat7 部署ELK日志分析系统
一.ELK的组成二.工作流程三.环境准备四.正式安装 一.ELK的组成 ELK由ElasticSearch.Logstash和Kibana三部分组成,每一部分的功能及特点如下图所示: 二.工作流程 在 ...
- ELK日志分析系统简单部署
1.传统日志分析系统: 日志主要包括系统日志.应用程序日志和安全日志.系统运维和开发人员可以通过日志了解服务器软硬件信息.检查配置过程中的错误及错误发生的原因.经常分析日志可以了解服务器的负荷,性能安 ...
- ELK 日志分析系统的部署
一.ELK简介 ElasticSearch介绍Elasticsearch是一个基于Lucene的搜索服务器. 它提供了一个分布式多用户能力的全文搜索引擎,基于RESTful web接口. Elasti ...
- 十分钟搭建和使用ELK日志分析系统
前言 为满足研发可视化查看测试环境日志的目的,准备采用EK+filebeat实现日志可视化(ElasticSearch+Kibana+Filebeat).题目为“十分钟搭建和使用ELK日志分析系统”听 ...
- ELK日志分析系统-Logstack
ELK日志分析系统 作者:Danbo 2016-*-* 本文是学习笔记,参考ELK Stack中文指南,链接:https://www.gitbook.com/book/chenryn/kibana-g ...
- 《ElasticSearch6.x实战教程》之实战ELK日志分析系统、多数据源同步
第十章-实战:ELK日志分析系统 ElasticSearch.Logstash.Kibana简称ELK系统,主要用于日志的收集与分析. 一个完整的大型分布式系统,会有很多与业务不相关的系统,其中日志系 ...
- Docker笔记(十):使用Docker来搭建一套ELK日志分析系统
一段时间没关注ELK(elasticsearch —— 搜索引擎,可用于存储.索引日志, logstash —— 可用于日志传输.转换,kibana —— WebUI,将日志可视化),发现最新版已到7 ...
- Rsyslog+ELK日志分析系统
转自:https://www.cnblogs.com/itworks/p/7272740.html Rsyslog+ELK日志分析系统搭建总结1.0(测试环境) 因为工作需求,最近在搭建日志分析系统, ...
- elk 日志分析系统Logstash+ElasticSearch+Kibana4
elk 日志分析系统 Logstash+ElasticSearch+Kibana4 logstash 管理日志和事件的工具 ElasticSearch 搜索 Kibana4 功能强大的数据显示clie ...
随机推荐
- 图像滤镜艺术---球面(Spherize)滤镜
原文:图像滤镜艺术---球面(Spherize)滤镜 球面(Spherize)滤镜 球面滤镜是通过极坐标变换实现图像的球面特效. 代码如下: // /// ...
- Win10的UWP之标题栏的返回键(一)
原文:Win10的UWP之标题栏的返回键(一) 关于返回键,放在标题栏是目前较为完美的一种方案.继前一篇的Hello World,博主进行一些修改实现该方法. - - - - - - - - - - ...
- .NET Core整合log4net以及全局异常捕获实现
在使用log4net之前先安装log4net.这里操作很简单,通过nuget下载并安装log4net很方便.如下图. log4net配置 <?xml version="1.0" ...
- 用CDialog实现的消息框MessageBoxST类
http://blog.csdn.net/akof1314/article/details/5078563
- qt中文编码(好多方法)
qt中文编码 来源:http://www.cublog.cn/u1/59481/showart_1947231.html 前些日子,被编码折磨了一段时间,总结一下Qt中的编码. [Qt 编码简单实验] ...
- MAC和PHY的区别(网线上传递的是模拟信号)
一块以太网网卡包括OSI(开方系统互联)模型的两个层.物理层和数据链路层.物理层定义了数据传送与接收所需要的电与光信号.线路状态.时钟基准.数据编码和电路等,并向数据链路层设备提供标准接口.数据链路层 ...
- 32个Python爬虫项目让你一次吃到撑
整理了32个Python爬虫项目.整理的原因是,爬虫入门简单快速,也非常适合新入门的小伙伴培养信心.所有链接指向GitHub,祝大家玩的愉快~O(∩_∩)O WechatSogou [1]- 微信公众 ...
- ZooKeeper学习第八期——ZooKeeper伸缩性(转)
转载来源:https://www.cnblogs.com/sunddenly/p/4143306.html 一.ZooKeeper中Observer 1.1 ZooKeeper角色 经过前面的介绍,我 ...
- Redis 学习笔记(篇一):字符串和链表
本次学习除了基本内容之外主要思考三个问题:why(为什么).what(原理是什么).which(同类中还有哪些类似的东西,相比有什么区别). 由于我对 java 比较熟悉,并且 java 中也有字符串 ...
- web页面的时间传入servlet如何转换为可以存入MySQL的Date类型
在web页面中当使用如下语句: <input type="date" name="startTime"/> 提交到servlet中 在servlet ...