最新Centos7.6 部署ELK日志分析系统
下载elasticsearch
创建elk用户并授权
useradd elk
chown -R elk:elk /home/elk/elasticsearch
chown -R elk:elk /home/elk/elasticsearch1
chown -R elk:elk /home/elk/elasticsearch2
mkdir -p /home/eladata
mkdir -p /var/log/elk
chown -R elk:elk /home/eladata
chown -R elk:elk /var/log/elk
主节点master
elasticsearch解压,修改配置文件
/home/elk/elasticsearch/config
[root@localhost config]# grep -v "^#" elasticsearch.yml
cluster.name: my-application
node.name: node0
node.master: true
node.attr.rack: r1
node.max_local_storage_nodes: 3
path.data: /home/eladata
path.logs: /var/log/elk
http.cors.enabled: true
http.cors.allow-origin: "*"
network.host: 192.168.1.70
http.port: 9200
transport.tcp.port: 9301
discovery.zen.minimum_master_nodes: 1
cluster.initial_master_nodes: ["node0"]
手动启动命令
su elk -l -c '/home/elk/elasticsearch/bin/elasticsearch -d'
启动文件 elasticsearch.service
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat elasticsearch.service
[Unit]
Description=Elasticsearch
Documentation=http://www.elastic.co
Wants=network-online.target
After=network-online.target
[Service]
RuntimeDirectory=elasticsearch
PrivateTmp=true
Environment=ES_HOME=/home/elk/elasticsearch
Environment=ES_PATH_CONF=/home/elk/elasticsearch/config
Environment=PID_DIR=/var/run/elasticsearch
EnvironmentFile=-/etc/sysconfig/elasticsearch
WorkingDirectory=/home/elk/elasticsearch
User=elk
Group=elk
ExecStart=/home/elk/elasticsearch/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid --quiet
StandardOutput=journal
StandardError=inherit
LimitNOFILE=65536
LimitNPROC=4096
LimitAS=infinity
LimitFSIZE=infinity
TimeoutStopSec=0
KillSignal=SIGTERM
KillMode=process
SendSIGKILL=no
SuccessExitStatus=143
[Install]
WantedBy=multi-user.target
[root@localhost system]#
Node1节点
/home/elk/elasticsearch1/config
[root@localhost config]# grep -v "^#" elasticsearch.yml
cluster.name: my-application
node.name: node1
node.master: false
node.attr.rack: r1
node.max_local_storage_nodes: 3
path.data: /home/eladata
path.logs: /var/log/elk
http.cors.enabled: true
http.cors.allow-origin: "*"
network.host: 192.168.1.70
transport.tcp.port: 9303
http.port: 9302
discovery.zen.ping.unicast.hosts: ["192.168.1.70:9301"]
[root@localhost config]#
手动启动命令
su elk -l -c '/home/elk/elasticsearch1/bin/elasticsearch1 -d'
启动文件 elasticsearch1.service
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat elasticsearch1.service
[Unit]
Description=Elasticsearch
Documentation=http://www.elastic.co
Wants=network-online.target
After=network-online.target
[Service]
RuntimeDirectory=elasticsearch1
PrivateTmp=true
Environment=ES_HOME=/home/elk/elasticsearch1
Environment=ES_PATH_CONF=/home/elk/elasticsearch1/config
Environment=PID_DIR=/var/run/elasticsearch
EnvironmentFile=-/etc/sysconfig/elasticsearch
WorkingDirectory=/home/elk/elasticsearch
User=elk
Group=elk
ExecStart=/home/elk/elasticsearch1/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid --quiet
StandardOutput=journal
StandardError=inherit
LimitNOFILE=65536
LimitNPROC=4096
LimitAS=infinity
LimitFSIZE=infinity
TimeoutStopSec=0
KillSignal=SIGTERM
KillMode=process
SendSIGKILL=no
SuccessExitStatus=143
[Install]
WantedBy=multi-user.target
[root@localhost system]#
Node2节点
/home/elk/elasticsearch2/config
[root@localhost config]# grep -v "^#" elasticsearch.yml
cluster.name: my-application
node.name: node2
node.attr.rack: r1
node.master: false
node.max_local_storage_nodes: 3
path.data: /home/eladata
path.logs: /var/log/elk
http.cors.enabled: true
http.cors.allow-origin: "*"
network.host: 192.168.1.70
http.port: 9203
transport.tcp.port: 9304
discovery.zen.ping.unicast.hosts: ["192.168.1.70:9301"]
discovery.zen.minimum_master_nodes: 1
[root@localhost config]#
手动启动命令
su elk -l -c '/home/elk/elasticsearch2/bin/elasticsearch2 -d'
启动文件 elasticsearch2.service
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat elasticsearch2.service
[Unit]
Description=Elasticsearch
Documentation=http://www.elastic.co
Wants=network-online.target
After=network-online.target
[Service]
RuntimeDirectory=elasticsearch2
PrivateTmp=true
Environment=ES_HOME=/home/elk/elasticsearch2
Environment=ES_PATH_CONF=/home/elk/elasticsearch2/config
Environment=PID_DIR=/var/run/elasticsearch
EnvironmentFile=-/etc/sysconfig/elasticsearch
WorkingDirectory=/home/elk/elasticsearch2
User=elk
Group=elk
ExecStart=/home/elk/elasticsearch2/bin/elasticsearch -p ${PID_DIR}/elasticsearch.pid --quiet
StandardOutput=journal
StandardError=inherit
LimitNOFILE=65536
LimitNPROC=4096
LimitAS=infinity
LimitFSIZE=infinity
TimeoutStopSec=0
KillSignal=SIGTERM
KillMode=process
SendSIGKILL=no
SuccessExitStatus=143
[Install]
WantedBy=multi-user.target
[root@localhost system]#
下载logstash
目录如下,默认配置即可
[root@localhost logstash]# pwd
/home/elk/logstash
[root@localhost logstash]#
手动启动命令
./logstash -f ../dev.conf
nohup ./logstash -f ../dev.conf &
下载kibana
配置文件如下
[root@localhost config]# pwd
/home/elk/kibana/config
[root@localhost config]# grep -v "^#" kibana.yml
server.host: "192.168.1.70"
elasticsearch.hosts: ["http://192.168.1.70:9200"]
kibana.index: ".kibana"
i18n.locale: "zh-CN"
手动启动命令
./kibana
nohup ./kibana &
kibana启动文件
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat kibana.service
[Unit]
Description=Kibana Server Manager
[Service]
ExecStart=/home/elk/kibana/bin/kibana
[Install]
WantedBy=multi-user.target
[root@localhost system]#
端口为:5601 访问:192.168.1.70:5601
安装Elasticsearch -head
yum install git npm
git clone https://github.com/mobz/elasticsearch-head.git
[root@localhost elasticsearch-head]# pwd
/home/elk/elasticsearch-head
[root@localhost elasticsearch-head]#
启动
npm install
npm run start
nohup npm run start &
curl -XPUT '192.168.2.67:9100/book'
访问192.168.2.67:9100 即可访问
下载kafka
修改配置文件如下
[root@localhost config]# pwd
/home/elk/kafka/config
[root@localhost config]# grep -v "^#" server.properties
broker.id=0
listeners=PLAINTEXT://192.168.1.70:9092
num.network.threads=3
num.io.threads=8
socket.send.buffer.bytes=102400
socket.receive.buffer.bytes=102400
socket.request.max.bytes=104857600
log.dirs=/var/log/kafka-logs
num.partitions=1
num.recovery.threads.per.data.dir=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
log.retention.hours=168
log.segment.bytes=1073741824
log.retention.check.interval.ms=300000
zookeeper.connect=localhost:2181
zookeeper.connection.timeout.ms=6000
group.initial.rebalance.delay.ms=0
delete.topic.enable=true
[root@localhost config]#
kafka配置启动zookeeper
手动启动方式
[root@localhost bin]# pwd
/home/elk/kafka/bin
[root@localhost bin]#
./zookeeper-server-start.sh ../config/zookeeper.properties
systemctl 启动zookeeper
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat zookeeper.service
[Service]
Type=forking
SyslogIdentifier=zookeeper
Restart=always
RestartSec=0s
ExecStart=/home/elk/kafka/bin/zookeeper-server-start.sh -daemon /home/elk/kafka/config/zookeeper.properties
ExecStop=/home/elk/kafka/bin/zookeeper-server-stop.sh
[root@localhost system]#
启动kafka服务
手动启动方式
./kafka-server-start.sh ../config/server.properties
systemctl 启动kafka
[root@localhost system]# pwd
/lib/systemd/system
[root@localhost system]# cat kafka.service
[Unit]
Description=Apache kafka
After=network.target
[Service]
Type=simple
Restart=always
RestartSec=0s
ExecStart=/home/elk/kafka/bin/kafka-server-start.sh /home/elk/kafka/config/server.properties
ExecStop=/home/elk/kafka/bin/kafka-server-stop.sh
[root@localhost system]#
测试kafka
新建一个名字为test的topic
/kafka-topics.sh --create --zookeeper 192.168.1.70:2181 --replication-factor 1 --partitions 1 --topic test
查看kafka中的topic
./kafka-topics.sh --list --zookeeper 192.168.1.70:2181
往kafka topic为test中 生产消息
./kafka-console-producer.sh --broker-list 192.168.1.70:9092 --topic test
在kafka topic为test中 消费消息
bin/kafka-console-consumer.sh --bootstrap-server 192.168.1.70:9092 --topic test --from-beginning
生产的消息,消费那边接受到即是ok的
目标机器安装filebeat
安装6.5版本的
[root@localhost filebeat]# pwd
/usr/local/filebeat
[root@localhost filebeat]# cat filebeat.yml
filebeat.prospectors:
- type: log
paths:
- /opt/logs/workphone-tcp/catalina.out
fields:
tag: 54_tcp_catalina_out
- type: log
paths:
- /opt/logs/workphone-webservice/catalina.out
fields:
tag: 54_web_catalina_out
name: 192.168.1.54
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 3
output.kafka:
hosts: ["192.168.1.70:9092"]
topic: "filebeat-log"
partition.hash:
reachable_only: true
compression: gzip
max_message_bytes: 1000000
required_acks: 1
[root@localhost filebeat]#
安装完成后去logstash编辑配置文件
logstash操作
[root@localhost logstash]# pwd
/home/elk/logstash
[root@localhost logstash]# cat dev.conf
input {
kafka{
bootstrap_servers => "192.168.1.70:9092"
topics => ["filebeat-log"]
codec => "json"
}
}
filter {
if [fields][tag]=="jpwebmap" {
json{
source => "message"
remove_field => "message"
}
geoip {
source => "client"
target => "geoip"
add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ]
add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}" ]
}
mutate {
convert => [ "[geoip][coordinates]", "float"]
}
}
if [fields][tag] == "54_tcp_catalina_out"{
grok {
match => ["message", "%{TIMESTAMP_ISO8601:logdate}"]
}
date {
match => ["logdate", "ISO8601"]
}
mutate {
remove_field => [ "logdate" ]
}
}
if [fields][tag] == "54_web_catalina_out"{
grok {
match => ["message", "%{TIMESTAMP_ISO8601:logdate}"]
}
date {
match => ["logdate", "ISO8601"]
}
mutate {
remove_field => [ "logdate" ]
}
}
if [fields][tag] == "55_tcp_catalina_out"{
grok {
match => ["message", "%{TIMESTAMP_ISO8601:logdate}"]
}
date {
match => ["logdate", "ISO8601"]
}
mutate {
remove_field => [ "logdate" ]
}
}
if [fields][tag] == "55_web_catalina_out"{
grok {
match => ["message", "%{TIMESTAMP_ISO8601:logdate}"]
}
date {
match => ["logdate", "ISO8601"]
}
mutate {
remove_field => [ "logdate" ]
}
}
if [fields][tag] == "51_nginx80_access_log" {
mutate {
add_field => { "spstr" => "%{[log][file][path]}" }
}
mutate {
split => ["spstr" , "/"]
# save the last element of the array as the api_method.
add_field => ["src", "%{[spstr][-1]}" ]
}
mutate{
remove_field => [ "friends", "ecs", "agent" , "spstr" ]
}
grok {
match => { "message" => "%{IPORHOST:remote_addr} - %{DATA:remote_user} \[%{HTTPDATE:time}\] \"%{WORD:method} %{DATA:url} HTTP/%{NUMBER:http_version}\" %{NUMBER:response_code} %{NUMBER:body_sent:bytes} \"%{DATA:referrer}\" \"%{DATA:agent}\" \"%{DATA:x_forwarded_for}\" \"%{NUMBER:request_time}\" \"%{DATA:upstream_addr}\" \"%{DATA:upstream_status}\"" }
remove_field => "message"
}
date {
match => ["time", "dd/MMM/yyyy:HH:mm:ss Z"]
target => "@timestamp"
}
geoip {
source => "x_forwarded_for"
target => "geoip"
database => "/home/elk/logstash/GeoLite2-City.mmdb"
add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ]
add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}" ]
}
mutate {
convert => [ "[geoip][coordinates]", "float"]
}
}
}
output {
if [fields][tag] == "wori"{
elasticsearch {
hosts => ["192.168.1.70:9200"]
index => "zabbix"
}
}
if [fields][tag] == "54_tcp_catalina_out"{
elasticsearch {
hosts => ["192.168.1.70:9200"]
index => "54_tcp_catalina_out"
}
}
if [fields][tag] == "54_web_catalina_out"{
elasticsearch {
hosts => ["192.168.1.70:9200"]
index => "54_web_catalina_out"
}
}
if [fields][tag] == "55_tcp_catalina_out"{
elasticsearch {
hosts => ["192.168.1.70:9200"]
index => "55_tcp_catalina_out"
}
}
if [fields][tag] == "55_web_catalina_out"{
elasticsearch {
hosts => ["192.168.1.70:9200"]
index => "55_web_catalina_out"
}
}
if [fields][tag] == "51_nginx80_access_log" {
stdout{}
elasticsearch {
hosts => ["192.168.1.70:9200"]
index => "51_nginx80_access_log"
}
}
}
其他的配置文件
index.conf
filter {
mutate {
add_field => { "spstr" => "%{[log][file][path]}" }
}
mutate {
split => ["spstr" , "/"]
# save the last element of the array as the api_method.
add_field => ["src", "%{[spstr][-1]}" ]
}
mutate{
remove_field => [ "friends", "ecs", "agent" , "spstr" ]
}
}
java.conf
filter {
if [fields][tag] == "java"{
grok {
match => ["message", "%{TIMESTAMP_ISO8601:logdate}"]
}
date {
match => ["logdate", "ISO8601"]
}
mutate {
remove_field => [ "logdate" ]
}
} #End if
}
kafkainput.conf
input {
kafka{
bootstrap_servers => "172.16.11.68:9092"
#topics => ["ql-prod-tomcat" ]
topics => ["ql-prod-dubbo","ql-prod-nginx","ql-prod-tomcat" ]
codec => "json"
consumer_threads => 5
decorate_events => true
#auto_offset_reset => "latest"
group_id => "logstash"
#client_id => ""
############################# HELK Optimizing Latency #############################
fetch_min_bytes => "1"
request_timeout_ms => "305000"
############################# HELK Optimizing Availability #############################
session_timeout_ms => "10000"
max_poll_records => "550"
max_poll_interval_ms => "300000"
}
}
#input {
# kafka{
# bootstrap_servers => "172.16.11.68:9092"
# topics => ["ql-prod-java-dubbo","ql-prod","ql-prod-java" ]
# codec => "json"
# consumer_threads => 15
# decorate_events => true
# auto_offset_reset => "latest"
# group_id => "logstash-1"
# ############################# HELK Optimizing Latency #############################
# fetch_min_bytes => "1"
# request_timeout_ms => "305000"
# ############################# HELK Optimizing Availability #############################
# session_timeout_ms => "10000"
# max_poll_records => "550"
# max_poll_interval_ms => "300000"
# }
#}
nginx.conf
filter {
if [fields][tag] == "nginx-access" {
mutate {
add_field => { "spstr" => "%{[log][file][path]}" }
}
mutate {
split => ["spstr" , "/"]
# save the last element of the array as the api_method.
add_field => ["src", "%{[spstr][-1]}" ]
}
mutate{
remove_field => [ "friends", "ecs", "agent" , "spstr" ]
}
grok {
match => { "message" => "%{IPORHOST:remote_addr} - %{DATA:remote_user} \[%{HTTPDATE:time}\] \"%{WORD:method} %{DATA:url} HTTP/%{NUMBER:http_version}\" %{NUMBER:response_code} %{NUMBER:body_sent:bytes} \"%{DATA:referrer}\" \"%{DATA:agent}\" \"%{DATA:x_forwarded_for}\" \"%{NUMBER:request_time}\" \"%{DATA:upstream_addr}\" \"%{DATA:upstream_status}\"" }
remove_field => "message"
}
date {
match => ["time", "dd/MMM/yyyy:HH:mm:ss Z"]
target => "@timestamp"
}
geoip {
source => "x_forwarded_for"
target => "geoip"
database => "/opt/logstash-6.2.4/GeoLite2-City.mmdb"
add_field => [ "[geoip][coordinates]", "%{[geoip][longitude]}" ]
add_field => [ "[geoip][coordinates]", "%{[geoip][latitude]}" ]
}
mutate {
convert => [ "[geoip][coordinates]", "float"]
}
} #endif
}
ouput.conf
output{
if [fields][tag] == "nginx-access" {
stdout{}
elasticsearch {
user => elastic
password => WR141bp2sveJuGFaD4oR
hosts => ["172.16.11.67:9200"]
index => "logstash-%{[fields][proname]}-%{+YYYY.MM.dd}"
}
}
#stdout{}
if [fields][tag] == "java" {
elasticsearch {
user => elastic
password => WR141bp2sveJuGFaD4oR
hosts => ["172.16.11.66:9200","172.16.11.68:9200"]
index => "%{[host][name]}-%{[src]}"
}
}
}
最新Centos7.6 部署ELK日志分析系统的更多相关文章
- RedHat7 部署ELK日志分析系统
一.ELK的组成二.工作流程三.环境准备四.正式安装 一.ELK的组成 ELK由ElasticSearch.Logstash和Kibana三部分组成,每一部分的功能及特点如下图所示: 二.工作流程 在 ...
- ELK日志分析系统简单部署
1.传统日志分析系统: 日志主要包括系统日志.应用程序日志和安全日志.系统运维和开发人员可以通过日志了解服务器软硬件信息.检查配置过程中的错误及错误发生的原因.经常分析日志可以了解服务器的负荷,性能安 ...
- ELK 日志分析系统的部署
一.ELK简介 ElasticSearch介绍Elasticsearch是一个基于Lucene的搜索服务器. 它提供了一个分布式多用户能力的全文搜索引擎,基于RESTful web接口. Elasti ...
- 十分钟搭建和使用ELK日志分析系统
前言 为满足研发可视化查看测试环境日志的目的,准备采用EK+filebeat实现日志可视化(ElasticSearch+Kibana+Filebeat).题目为“十分钟搭建和使用ELK日志分析系统”听 ...
- ELK日志分析系统-Logstack
ELK日志分析系统 作者:Danbo 2016-*-* 本文是学习笔记,参考ELK Stack中文指南,链接:https://www.gitbook.com/book/chenryn/kibana-g ...
- 《ElasticSearch6.x实战教程》之实战ELK日志分析系统、多数据源同步
第十章-实战:ELK日志分析系统 ElasticSearch.Logstash.Kibana简称ELK系统,主要用于日志的收集与分析. 一个完整的大型分布式系统,会有很多与业务不相关的系统,其中日志系 ...
- Docker笔记(十):使用Docker来搭建一套ELK日志分析系统
一段时间没关注ELK(elasticsearch —— 搜索引擎,可用于存储.索引日志, logstash —— 可用于日志传输.转换,kibana —— WebUI,将日志可视化),发现最新版已到7 ...
- Rsyslog+ELK日志分析系统
转自:https://www.cnblogs.com/itworks/p/7272740.html Rsyslog+ELK日志分析系统搭建总结1.0(测试环境) 因为工作需求,最近在搭建日志分析系统, ...
- elk 日志分析系统Logstash+ElasticSearch+Kibana4
elk 日志分析系统 Logstash+ElasticSearch+Kibana4 logstash 管理日志和事件的工具 ElasticSearch 搜索 Kibana4 功能强大的数据显示clie ...
随机推荐
- 数据库连接池之_c3p0
C3p0 1,手动设置参数 @Test public void demo1(){ Connection connection =null; PreparedStatement preparedStat ...
- 百度网盘web端项目总结
项目背景 网盘作为一个在线备份存储,共享文件的工具类产品,给人们的工作和生活带来了很大的帮助和便利.百度网盘是目前国内使用量最大的网盘产品,至今发展已有4年,总用户数超4亿,为了让用户有着更好的使用体 ...
- Office Add-in Model 为 Outlook Mail Add-in 提供的 JavaScript API 介绍
本文所讨论的 Mailbox API是指在 Mail Add-in 中可调用的 JavaScript API.开发者可以利用这些API 实现 Add-in 和 Outlook 的交互(数据读取与写入) ...
- 支付宝RSA签名之Delphi实现
Delphi有个很大的问题就是,厂商的不作为(没有封装标准的Cipher类库),让大家自己造轮子. 今天的轮子就是RSA签名,由于Delphi没有封装Cipher类库,所以只的自己写了. 因为要在Fi ...
- QT信号槽的六个优点(虽然直接调用函数也可解决问题,但要在具体的函数中传递指针,多对一和解除关系也够麻烦的)
信号槽是Qt中特有的概念.它使得程序员将不同的object绑定起来,而object对象间并不需要对相互了解. Slots也是普通的c++方法,它们可以是virtual;可以被重载;可以使private ...
- 如何在Qt中处理(接收/发送)MFC或Windows消息(直接覆盖MainDialog::nativeEvent,或者QApplication::installNativeEventFilter安装过滤器,或者直接改写QApplication::nativeEventFilter)
关于接收: Receive WM_COPYDATA messages in a Qt app. 还有个中文网站: 提问: 如何在Qt中模拟MFC的消息机制 关于发送: 用Qt在Windows下编程,如 ...
- Delphi 10.2 Tokyo的新特性
Delphi 10.2(Tokyo)出来一段时间了,最重要的新特性就是支持Linux的服务端. 官网有详细的介绍: 这里是主要的特性介绍:https://www.embarcadero.com/pro ...
- 解释为什么.net 第一次请求比较慢
通过这个图可以很好的解释为什么第一次请求比较慢,为了提高访问速度,也便有了预编译. 关于ASP.NET网站:每个页面都编译成一个.dll文件 用Assembly.GetExecutingAssembl ...
- C#最新功能(6.0、7.0)
一直用C#开发程序,.NET的功能越来越多,变化也挺大的,从最初的封闭,到现在的开源,功能不断的增加,一直在进步.作为C#的强烈支持者,C#的变化,我不能不关注,这篇文章主要介绍,C#6.0和C#7. ...
- 使用Hibernate连接Oracle 无法识别生成的SQL问题
问题: 在JAVA工程中,数据库使用的是OracleXE(Oracle10g Express Edition). 使用hibernate.reveng.xml创建了实体类及其DAO类,如: tt.my ...