首先安装,这里采用rpm安装:

# rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch

创建repo文件:

[root@node1 logstash]# cat /etc/yum.repos.d/logstash.repo
[logstash-6.x]
name=Elastic repository for 6.x packages
baseurl=https://artifacts.elastic.co/packages/6.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md

在yum install logstash之前确保已经安装了jdk,也就是确保有java环境:

[root@node1 logstash]# java -version
java version "1.8.0_191"
Java(TM) SE Runtime Environment (build 1.8.0_191-b12)
Java HotSpot(TM) 64-Bit Server VM (build 25.191-b12, mixed mode) # yum install logstash

查看logstash的配置文件:

[root@node1 logstash]# pwd
/etc/logstash
[root@node1 logstash]# ll
总用量 36
drwxrwxr-x. 2 root root 6 12月 18 06:06 conf.d
-rw-r--r--. 1 root root 1846 12月 18 06:06 jvm.options
-rw-r--r--. 1 root root 4568 12月 18 06:06 log4j2.properties
-rw-r--r--. 1 root root 342 12月 18 06:06 logstash-sample.conf
-rw-r--r--. 1 root root 8194 12月 23 20:32 logstash.yml
-rw-r--r--. 1 root root 285 12月 18 06:06 pipelines.yml
-rw-------. 1 root root 1696 12月 18 06:06 startup.options

首先来一个简单的输入到输出:

# /usr/share/logstash/bin/logstash -e 'input { stdin { } } output { stdout {} }'

但提示有错误:

Could not find log4j2 configuration at path /usr/share/logstash/config/log4j2.properties. Using default config which logs errors to the console

解决办法:

mkdir -p /usr/share/logstash/config/
ln -s /etc/logstash/* /usr/share/logstash/config
chown -R logstash:logstash /usr/share/logstash/config/ 
[root@node1 conf.d]# /usr/share/logstash/bin/logstash -e 'input { stdin { } } output { stdout {} }'
Sending Logstash logs to /var/log/logstash which is now configured via log4j2.properties
[2018-12-24T20:28:50,213][WARN ][logstash.config.source.multilocal] Ignoring the 'pipelines.yml' file because modules or command line options are specified
[2018-12-24T20:28:50,240][INFO ][logstash.runner ] Starting Logstash {"logstash.version"=>"6.5.4"}
[2018-12-24T20:28:53,997][INFO ][logstash.pipeline ] Starting pipeline {:pipeline_id=>"main", "pipeline.workers"=>4, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>50}
[2018-12-24T20:29:04,221][INFO ][logstash.pipeline ] Pipeline started successfully {:pipeline_id=>"main", :thread=>"#<Thread:0x45200d9d run>"}
The stdin plugin is now waiting for input:
[2018-12-24T20:29:04,293][INFO ][logstash.agent ] Pipelines running {:count=>1, :running_pipelines=>[:main], :non_running_pipelines=>[]}
[2018-12-24T20:29:04,570][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
hello world
{
"message" => "hello world",
"host" => "node1",
"@version" => "1",
"@timestamp" => 2018-12-24T12:29:50.015Z
}

退出该logstash使用ctrl+d

现在将es.log日志的内容输入到redis中:

[root@node1 conf.d]# cat redis_output.conf
input {
file {
path => ["/var/log/elasticsearch/es.log"]
start_position => "beginning"
}
} output {
redis {
db => "0" 选择的库
data_type => "list" 选择数据类型
host => ["172.16.23.129"] 选择的redis服务器
key => "es_log" key取名
}
}

使用docker构建redis服务器:

# docker run --name redis -p 6379:6379 -d redis
# yum install redis 提供redis-cli的命令

然后执行:

# /usr/share/logstash/bin/logstash -f redis_output.conf

这边执行的时候,将elasticsearch的服务进行关闭,产生一部分日志:

[root@node1 ~]# systemctl stop elasticsearch

可以看见上面的输出:

[root@node1 conf.d]# /usr/share/logstash/bin/logstash -f redis_output.conf
Sending Logstash logs to /var/log/logstash which is now configured via log4j2.properties
[2018-12-25T20:55:22,977][WARN ][logstash.config.source.multilocal] Ignoring the 'pipelines.yml' file because modules or command line options are specified
[2018-12-25T20:55:23,004][INFO ][logstash.runner ] Starting Logstash {"logstash.version"=>"6.5.4"}
[2018-12-25T20:55:28,021][INFO ][logstash.pipeline ] Starting pipeline {:pipeline_id=>"main", "pipeline.workers"=>4, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>50}
[2018-12-25T20:55:38,691][INFO ][logstash.inputs.file ] No sincedb_path set, generating one based on the "path" setting {:sincedb_path=>"/var/lib/logstash/plugins/inputs/file/.sincedb_573723e58bddd528c972283d168c6f3f", :path=>["/var/log/elasticsearch/es.log"]}
[2018-12-25T20:55:38,901][INFO ][logstash.pipeline ] Pipeline started successfully {:pipeline_id=>"main", :thread=>"#<Thread:0x3582c34c run>"}
[2018-12-25T20:55:39,132][INFO ][logstash.agent ] Pipelines running {:count=>1, :running_pipelines=>[:main], :non_running_pipelines=>[]}
[2018-12-25T20:55:39,226][INFO ][filewatch.observingtail ] START, creating Discoverer, Watch with file and sincedb collections
[2018-12-25T20:55:40,236][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}

然后打开另外一个终端查看redis的数据:

[root@node1 ~]# redis-cli -h 172.16.23.129
172.16.23.129:6379> KEYS *
1) "es_log"
172.16.23.129:6379> llen es_log
(integer) 7
172.16.23.129:6379> lrange es_log 0 7
1) "{\"message\":\"[2018-12-25T20:59:02,371][INFO ][o.e.n.Node ] [node1] stopping ...\",\"host\":\"node1\",\"@version\":\"1\",\"@timestamp\":\"2018-12-25T12:59:03.484Z\",\"path\":\"/var/log/elasticsearch/es.log\"}"
2) "{\"message\":\"[2018-12-25T20:59:02,981][INFO ][o.e.n.Node ] [node1] stopped\",\"host\":\"node1\",\"@version\":\"1\",\"@timestamp\":\"2018-12-25T12:59:03.525Z\",\"path\":\"/var/log/elasticsearch/es.log\"}"
3) "{\"message\":\"[2018-12-25T20:59:02,877][INFO ][o.e.x.m.j.p.NativeController] [node1] Native controller process has stopped - no new native processes can be started\",\"host\":\"node1\",\"@version\":\"1\",\"@timestamp\":\"2018-12-25T12:59:03.524Z\",\"path\":\"/var/log/elasticsearch/es.log\"}"
4) "{\"message\":\"[2018-12-25T20:59:02,399][INFO ][o.e.x.w.WatcherService ] [node1] stopping watch service, reason [shutdown initiated]\",\"host\":\"node1\",\"@version\":\"1\",\"@timestamp\":\"2018-12-25T12:59:03.523Z\",\"path\":\"/var/log/elasticsearch/es.log\"}"
5) "{\"message\":\"[2018-12-25T20:59:02,981][INFO ][o.e.n.Node ] [node1] closing ...\",\"host\":\"node1\",\"@version\":\"1\",\"@timestamp\":\"2018-12-25T12:59:03.525Z\",\"path\":\"/var/log/elasticsearch/es.log\"}"
6) "{\"message\":\"[2018-12-25T20:59:02,866][INFO ][o.e.x.m.j.p.l.CppLogMessageHandler] [node1] [controller/1513] [Main.cc@148] Ml controller exiting\",\"host\":\"node1\",\"@version\":\"1\",\"@timestamp\":\"2018-12-25T12:59:03.524Z\",\"path\":\"/var/log/elasticsearch/es.log\"}"
7) "{\"message\":\"[2018-12-25T20:59:02,998][INFO ][o.e.n.Node ] [node1] closed\",\"host\":\"node1\",\"@version\":\"1\",\"@timestamp\":\"2018-12-25T12:59:03.526Z\",\"path\":\"/var/log/elasticsearch/es.log\"}"

于是将日志的数据顺利的输出到redis以key的数据了

现在将nginx的日志输出到redis中:

[root@node1 ~]# cat /etc/logstash/conf.d/nginx_output_redis.conf
input {
file {
path => ["/var/log/nginx/access.log"]
start_position => "beginning"
}
} output {
redis {
db => "0"
data_type => "list"
host => ["172.16.23.129"]
key => "nginx_log"
}
}

配置nginx的日志格式为json输出:

log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"'; log_format json '{"@timstamp":"$time_iso8601","@version":"1","client":"$remote_addr","url":"$uri","status":"$status","domain":"$host","host":"$server_addr","size":"$body_bytes_sent","responsetime":"$request_time","referer":"$http_referer","ua":"$http_user_agent"}';

然后将main注释:

#access_log  /var/log/nginx/access.log  main;
access_log /var/log/nginx/access.log json;

现在执行:

[root@node1 conf.d]# /usr/share/logstash/bin/logstash -f nginx_output_redis.conf
Sending Logstash logs to /var/log/logstash which is now configured via log4j2.properties
[2018-12-25T21:22:52,300][WARN ][logstash.config.source.multilocal] Ignoring the 'pipelines.yml' file because modules or command line options are specified
[2018-12-25T21:22:52,320][INFO ][logstash.runner ] Starting Logstash {"logstash.version"=>"6.5.4"}
[2018-12-25T21:22:56,773][INFO ][logstash.pipeline ] Starting pipeline {:pipeline_id=>"main", "pipeline.workers"=>4, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>50}
[2018-12-25T21:23:07,349][INFO ][logstash.inputs.file ] No sincedb_path set, generating one based on the "path" setting {:sincedb_path=>"/var/lib/logstash/plugins/inputs/file/.sincedb_d883144359d3b4f516b37dba51fab2a2", :path=>["/var/log/nginx/access.log"]}
[2018-12-25T21:23:07,459][INFO ][logstash.pipeline ] Pipeline started successfully {:pipeline_id=>"main", :thread=>"#<Thread:0x4e31d96 run>"}
[2018-12-25T21:23:07,633][INFO ][logstash.agent ] Pipelines running {:count=>1, :running_pipelines=>[:main], :non_running_pipelines=>[]}
[2018-12-25T21:23:07,688][INFO ][filewatch.observingtail ] START, creating Discoverer, Watch with file and sincedb collections
[2018-12-25T21:23:08,510][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}

然后进行手动访问nginx页面:

[root@node1 ~]# for i in `seq 1 10`;do echo $i;curl http://172.16.23.129 &> /dev/null ;done

现在到redis中查看相应的key和值:

172.16.23.129:6379> keys *
1) "es_log"
2) "nginx_log"
172.16.23.129:6379> llen nginx_log
(integer) 14
172.16.23.129:6379> lrange nginx_log 0 4
1) "{\"path\":\"/var/log/nginx/access.log\",\"message\":\"{\\\"@timstamp\\\":\\\"2018-12-25T21:19:54+08:00\\\",\\\"@version\\\":\\\"1\\\",\\\"client\\\":\\\"172.16.23.129\\\",\\\"url\\\":\\\"/index.html\\\",\\\"status\\\":\\\"200\\\",\\\"domain\\\":\\\"172.16.23.129\\\",\\\"host\\\":\\\"172.16.23.129\\\",\\\"size\\\":\\\"14\\\",\\\"responsetime\\\":\\\"0.000\\\",\\\"referer\\\":\\\"-\\\",\\\"ua\\\":\\\"curl/7.29.0\\\"}\",\"@version\":\"1\",\"@timestamp\":\"2018-12-25T13:23:09.318Z\",\"host\":\"node1\"}"
2) "{\"path\":\"/var/log/nginx/access.log\",\"message\":\"{\\\"@timstamp\\\":\\\"2018-12-25T21:24:06+08:00\\\",\\\"@version\\\":\\\"1\\\",\\\"client\\\":\\\"172.16.23.129\\\",\\\"url\\\":\\\"/index.html\\\",\\\"status\\\":\\\"200\\\",\\\"domain\\\":\\\"172.16.23.129\\\",\\\"host\\\":\\\"172.16.23.129\\\",\\\"size\\\":\\\"14\\\",\\\"responsetime\\\":\\\"0.000\\\",\\\"referer\\\":\\\"-\\\",\\\"ua\\\":\\\"curl/7.29.0\\\"}\",\"@version\":\"1\",\"@timestamp\":\"2018-12-25T13:24:06.952Z\",\"host\":\"node1\"}"
3) "{\"path\":\"/var/log/nginx/access.log\",\"message\":\"{\\\"@timstamp\\\":\\\"2018-12-25T21:24:27+08:00\\\",\\\"@version\\\":\\\"1\\\",\\\"client\\\":\\\"172.16.23.129\\\",\\\"url\\\":\\\"/index.html\\\",\\\"status\\\":\\\"200\\\",\\\"domain\\\":\\\"172.16.23.129\\\",\\\"host\\\":\\\"172.16.23.129\\\",\\\"size\\\":\\\"14\\\",\\\"responsetime\\\":\\\"0.000\\\",\\\"referer\\\":\\\"-\\\",\\\"ua\\\":\\\"curl/7.29.0\\\"}\",\"@version\":\"1\",\"@timestamp\":\"2018-12-25T13:24:28.040Z\",\"host\":\"node1\"}"
4) "{\"path\":\"/var/log/nginx/access.log\",\"message\":\"{\\\"@timstamp\\\":\\\"2018-12-25T21:24:27+08:00\\\",\\\"@version\\\":\\\"1\\\",\\\"client\\\":\\\"172.16.23.129\\\",\\\"url\\\":\\\"/index.html\\\",\\\"status\\\":\\\"200\\\",\\\"domain\\\":\\\"172.16.23.129\\\",\\\"host\\\":\\\"172.16.23.129\\\",\\\"size\\\":\\\"14\\\",\\\"responsetime\\\":\\\"0.000\\\",\\\"referer\\\":\\\"-\\\",\\\"ua\\\":\\\"curl/7.29.0\\\"}\",\"@version\":\"1\",\"@timestamp\":\"2018-12-25T13:24:28.041Z\",\"host\":\"node1\"}"
5) "{\"path\":\"/var/log/nginx/access.log\",\"message\":\"{\\\"@timstamp\\\":\\\"2018-12-25T21:31:59+08:00\\\",\\\"@version\\\":\\\"1\\\",\\\"client\\\":\\\"172.16.23.129\\\",\\\"url\\\":\\\"/index.html\\\",\\\"status\\\":\\\"200\\\",\\\"domain\\\":\\\"172.16.23.129\\\",\\\"host\\\":\\\"172.16.23.129\\\",\\\"size\\\":\\\"14\\\",\\\"responsetime\\\":\\\"0.000\\\",\\\"referer\\\":\\\"-\\\",\\\"ua\\\":\\\"curl/7.29.0\\\"}\",\"@version\":\"1\",\"@timestamp\":\"2018-12-25T13:32:00.394Z\",\"host\":\"node1\"}"

现在将redis的nginx_log这个key输出到elasticsearch的index中:

[root@node1 ~]# cat /etc/logstash/conf.d/redis_output_es.conf
input {
redis {
db => "0"
data_type => "list"
host => ["172.16.23.129"]
key => "nginx_log"
}
} output {
elasticsearch {
hosts => ["172.16.23.129"]
index => "nginx-log-%{+YYYY.MM.dd}"
}
}

然后执行:

[root@node1 conf.d]# /usr/share/logstash/bin/logstash -f redis_output_es.conf
Sending Logstash logs to /var/log/logstash which is now configured via log4j2.properties
[2018-12-25T21:44:26,608][WARN ][logstash.config.source.multilocal] Ignoring the 'pipelines.yml' file because modules or command line options are specified
[2018-12-25T21:44:26,631][INFO ][logstash.runner ] Starting Logstash {"logstash.version"=>"6.5.4"}
[2018-12-25T21:44:31,074][INFO ][logstash.pipeline ] Starting pipeline {:pipeline_id=>"main", "pipeline.workers"=>4, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>50}
[2018-12-25T21:44:32,062][INFO ][logstash.outputs.elasticsearch] Elasticsearch pool URLs updated {:changes=>{:removed=>[], :added=>[http://172.16.23.129:9200/]}}
[2018-12-25T21:44:32,690][WARN ][logstash.outputs.elasticsearch] Restored connection to ES instance {:url=>"http://172.16.23.129:9200/"}
[2018-12-25T21:44:32,927][INFO ][logstash.outputs.elasticsearch] ES Output version determined {:es_version=>6}
[2018-12-25T21:44:32,935][WARN ][logstash.outputs.elasticsearch] Detected a 6.x and above cluster: the `type` event field won't be used to determine the document _type {:es_version=>6}
[2018-12-25T21:44:32,987][INFO ][logstash.outputs.elasticsearch] New Elasticsearch output {:class=>"LogStash::Outputs::ElasticSearch", :hosts=>["//172.16.23.129"]}
[2018-12-25T21:44:33,026][INFO ][logstash.outputs.elasticsearch] Using mapping template from {:path=>nil}
[2018-12-25T21:44:33,092][INFO ][logstash.outputs.elasticsearch] Attempting to install template {:manage_template=>{"template"=>"logstash-*", "version"=>60001, "settings"=>{"index.refresh_interval"=>"5s"}, "mappings"=>{"_default_"=>{"dynamic_templates"=>[{"message_field"=>{"path_match"=>"message", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false}}}, {"string_fields"=>{"match"=>"*", "match_mapping_type"=>"string", "mapping"=>{"type"=>"text", "norms"=>false, "fields"=>{"keyword"=>{"type"=>"keyword", "ignore_above"=>256}}}}}], "properties"=>{"@timestamp"=>{"type"=>"date"}, "@version"=>{"type"=>"keyword"}, "geoip"=>{"dynamic"=>true, "properties"=>{"ip"=>{"type"=>"ip"}, "location"=>{"type"=>"geo_point"}, "latitude"=>{"type"=>"half_float"}, "longitude"=>{"type"=>"half_float"}}}}}}}}
[2018-12-25T21:44:33,177][INFO ][logstash.inputs.redis ] Registering Redis {:identity=>"redis://@172.16.23.129:6379/0 list:nginx_log"}
[2018-12-25T21:44:33,251][INFO ][logstash.pipeline ] Pipeline started successfully {:pipeline_id=>"main", :thread=>"#<Thread:0x1361ed6f run>"}
[2018-12-25T21:44:33,371][INFO ][logstash.outputs.elasticsearch] Installing elasticsearch template to _template/logstash
[2018-12-25T21:44:33,540][INFO ][logstash.agent ] Pipelines running {:count=>1, :running_pipelines=>[:main], :non_running_pipelines=>[]}
[2018-12-25T21:44:34,552][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}

最后在es上进行查看:

[root@node1 ~]# curl -X GET "localhost:9200/_cat/indices?v"
health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
yellow open test1 ZAjj9y_sSPmGz8ZscIXUsA 5 1 0 0 1.2kb 1.2kb
yellow open nginx-log-2018.12.25 Zr4q_U5bTk2dY9PfEpZz_Q 5 1 14 0 31.8kb 31.8kb

test1是之前手动进行创建的忽略即可,nginx-log-2018.12.25这个index即是刚刚进行创建的

ELK之logstash6.5的更多相关文章

  1. ELK+Kafka学习笔记之搭建ELK+Kafka日志收集系统集群

    0x00 概述 关于如何搭建ELK部分,请参考这篇文章,https://www.cnblogs.com/JetpropelledSnake/p/9893566.html. 该篇用户为非root,使用用 ...

  2. 从零开始搭建系统2.2——ELK安装及配置

    ELK 最新版本对JDK的最低要求是1.8,安装java_1.8版本 一.Elasticsearch 1.创建目录 2.下载安装包 wget https://artifacts.elastic.co/ ...

  3. elk6.22

    启动错误: 参考网站:https://blog.csdn.net/feinifi/article/details/73633235?utm_source=itdadao&utm_medium= ...

  4. Centos7.5搭建ELK-6.5.0日志分析平台

    Centos7.5搭建ELK-6.5.0日志分析平台 1. 简介 工作工程中,不论是开发还是运维,都会遇到各种各样的日志,主要包括系统日志.应用程序日志和安全日志,对于开发人员来说,查看日志,可以实时 ...

  5. centos7 部署 ELK 日志系统

    =============================================== 2017/12/24_第3次修改                       ccb_warlock 更 ...

  6. 使用elk转存储日志

    ELK指的是由Elastic公司提供的三个开源组件Elasticsearch.Logstash和Kibana. Logstash:开源的服务器端数据处理管道,能够同时 从多个来源采集数据.转换数据,然 ...

  7. elk安装配置

    ELK介绍   官网https://www.elastic.co/cn/ 中文指南https://www.gitbook.com/book/chenryn/elk-stack-guide-cn/det ...

  8. ELK日志收集平台部署

    需求背景 由于公司的后台服务有三台,每当后台服务运行异常,需要看日志排查错误的时候,都必须开启3个ssh窗口进行查看,研发们觉得很不方便,于是便有了统一日志收集与查看的需求. 这里,我用ELK集群,通 ...

  9. ELK+filebeat、kafka、zookeeper搭建文档

    系统:centos 6.5 JDK:1.8 Elasticsearch-6.0.0Logstash-6.0.0kibana-6.0.0zookeeper-3.5.3kafka_2.12-1.0.0fi ...

随机推荐

  1. Solr学习笔记之4、Solr配置文件简介

    Solr学习笔记之4.Solr配置文件简介 摘自<Solr in Action>. 1. solr.xml – Defines one or more cores per Solr ser ...

  2. CSS水平导航栏

    <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8&quo ...

  3. TACOTRON:端到端的语音合成

    tacotron主要是将文本转化为语音,采用的结构为基于encoder-decoder的Seq2Seq的结构.其中还引入了注意机制(attention mechanism).在对模型的结构进行介绍之前 ...

  4. Oracle字符串连接的方法

    Oracle数据库中,使用“||”进行字符串连接,下面就让我们一起了解一下Oracle数据库中字符串连接的方法,希望对您能有所帮助. 和其他数据库系统类似,Oracle字符串连接使用“||”进行字符串 ...

  5. Node.js(daemon),tweak(debug ES)/nodejs forever,supervisor--express

    http://www.cnblogs.com/Darren_code/p/node_express.html express -e nodejs-product sudo npm install fo ...

  6. QtCreator 可以通过 Clang-Tidy 和 CLazy 对你的代码进行静态检查

    QtCreator 可以通过 Clang-Tidy 和 CLazy 对你的代码进行静态检查 打开你的工程,点击Analyze -> Clang-Tidy and CLazy 选择你想分析的 cp ...

  7. idong常用js总结

    1.判断屏幕高度 $(document).ready(function() {    $("#left").height($(window).height());    $(&qu ...

  8. /etc/rc.d/rc.local 自定义开机启动程序

    /etc/rc.d/rc.local 用于用户自定义开机启动程序,可以往里写开机要执行的命令或脚本,线上的配置如下: [root@localhost ~]$ cat /etc/rc.d/rc.loca ...

  9. Android开发之改动屏幕方向

    有的场景下.我们须要把手机屏幕方向改变,以下是我写的一个样例. xml页面文件: <RelativeLayout xmlns:android="http://schemas.andro ...

  10. 怎样知道 CPU 是否支持虚拟化技术(VT) | Linux 中国

    版权声明:本文为博主原创文章,未经博主同意不得转载. https://blog.csdn.net/F8qG7f9YD02Pe/article/details/79832475 wx_fmt=png&a ...