[root@log-node1 ~]# cobbler repo add --name=logstash-2.3 --mirror=http://packages.elastic.co/logstash/2.3/centos --arch=x86_64 --breed=yum

[root@log-node1 ~]# cobbler repo add --name=elasticsearch2 --mirror=http://packages.elastic.co/ela ... entos --arch=x86_64 --breed=yum

[root@log-node1 ~]# cobbler repo add --name=kibana4.5 --mirror=http://packages.elastic.co/kibana/4.5/centos --arch=x86_64 --breed=yum

[root@log-node1 ~]# cobbler reposync

[root@node1 /etc/elasticsearch]# grep '^[a-Z]' elasticsearch.yml
cluster.name: myes
node.name: node1
path.data: /data/es-data
path.logs: /var/log/elasticsearch
bootstrap.memory_lock: true
network.host: 192.168.3.3
http.port: 9200 [root@node1 /etc/elasticsearch]# curl -i -XGET 'http://192.168.3.3:9200/_count?';echo
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
Content-Length: 59 {"count":0,"_shards":{"total":0,"successful":0,"failed":0}} [root@node1 /etc/elasticsearch]# /usr/share/elasticsearch/bin/plugin install mobz/elasticsearch-head http://192.168.3.3:9200/_plugin/head/
这样访问
/usr/share/elasticsearch/bin/plugin install lukas-vlcek/bigdesk 上github上面搜索插件 然后直接安装
/usr/share/elasticsearch/bin/plugin install lukas-vlcek/bigdesk 发现模式改成单播
只改node2,node1不改,只要有一个知道就可以了
discovery.zen.ping.unicast.hosts: ["192.168.3.3", "192.168.3.4"] https://www.elastic.co/learn [root@node1 /data]# curl http://192.168.3.3:9200/_cluster/health?pretty=true
{
"cluster_name" : "myes",
"status" : "green",
"timed_out" : false,
"number_of_nodes" : 2,
"number_of_data_nodes" : 2,
"active_primary_shards" : 7,
"active_shards" : 14,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 100.0
} [root@node2 elasticsearch]# /opt/logstash/bin/logstash -e 'input{ stdin{} } output{ stdout{} }' [root@node2 elasticsearch]# /opt/logstash/bin/logstash -e 'input{ stdin{} } output{ stdout{ codec => rubydebug } }'
Settings: Default pipeline workers: 4
Pipeline main started
hello world
{
"message" => "hello world",
"@version" => "1",
"@timestamp" => "2017-01-28T11:06:23.310Z",
"host" => "node2.com"
} /opt/logstash/bin/logstash -e 'input{ stdin{} } output{ elasticsearch { hosts => ["192.168.3.3:9200"] index => "logstash-%{+YYYY.MM.dd}" } }' [root@node1 ~]# cat /etc/logstash/conf.d/demo.conf
input {
stdin{}
}
filter{ }
output{
elasticsearch {
hosts => ["192.168.3.3:9200"]
index => "logstash-%{+YYYY.MM.dd}"
}
stdout {
codec => rubydebug
}
} 收集系统日志rsyslog es
file es
tcp es 1,行 - 事件
2,input output
3, 事件 - input - codec - filter - codec - output https://es.xiaoleilu.com/ /opt/logstash/bin/logstash -f /etc/logstash/conf.d/demo.conf [root@node1 /opt/kibana/config]# egrep -v "#|^$" kibana.yml
server.port: 5601
server.host: "0.0.0.0"
elasticsearch.url: "http://192.168.3.3:9200"
kibana.index: ".kibana" [root@node1 /opt/kibana/config]# /etc/init.d/kibana start /var/log/elasticsearch/myes.log input {
file {
path => ["/var/log/messages","/var/log/secure"]
type => "system-log"
start_position => "beginning"
}
file {
path => "/var/log/elasticsearch/myes.log"
type => "es-log"
start_position => "beginning"
}
file {
path => "/var/log/elasticsearch/myes.log.2017-01-27"
type => "es1-log"
start_position => "beginning"
}
}
filter{ }
output{
if [type] == "system-log" {
elasticsearch {
hosts => ["192.168.3.3:9200"]
index => "system-log-%{+YYYY.MM}"
}
}
if [type] == "es-log" {
elasticsearch {
hosts => ["192.168.3.3:9200"]
index => "es-log-%{+YYYY.MM}"
}
}
if [type] == "es1-log" {
elasticsearch {
hosts => ["192.168.3.3:9200"]
index => "es1-log-%{+YYYY.MM}"
}
} } [root@node1 ~]# for i in `ls .since*`; do echo $i;cat $i; done
.sincedb_1fb922e15ccea4ac0d028d33639ba3ea
86446130 0 64768 54548
86446131 0 64768 924
.sincedb_2a52db197011b7a611fb7594c513ff67
0 0 0
.sincedb_a9b9fed7edff6fd888ffe131a05b5397
210651098 0 64768 4520
210651086 0 64768 4973
.sincedb_b5712b028c2d902c97f521ccf91d1ea8
210651087 0 64768 10086
.sincedb_ec411afaed82c6e15509db4e6d8d51e3 [root@node1 ~]# ls -li /var/log/messages
86446130 -rw------- 1 root root 58431 Feb 3 06:57 /var/log/messages
[root@node1 ~]# ls -li /var/log/elasticsearch/myes.log.2017-01-27
210651087 -rw-r--r-- 1 elasticsearch elasticsearch 10086 Feb 3 06:39 /var/log/elasticsearch/myes.log.2017-01-27
[root@node1 ~]# rm -f .sincedb_*
[root@node1 ~]# pwd
/root [2017-01-27 23:53:54,741][INFO ][plugins ] [node1] modules [reindex, lang-expression, lang-groovy], plugins [], sites []
[2017-01-27 23:53:54,762][ERROR][bootstrap ] Exception
java.lang.IllegalStateException: Failed to created node environment
at org.elasticsearch.node.Node.<init>(Node.java:167)
at org.elasticsearch.node.Node.<init>(Node.java:140)
... 5 more
[2017-01-27 23:56:29,132][INFO ][node ] [node1] version[2.3.5], pid[6215], build[90f439f/2016-07-27T10:36:52Z]
[2017-01-27 23:56:29,133][INFO ][node ] [node1] initializing ...
[2017-01-27 23:56:30,066][INFO ][plugins ] [node1] modules [reindex, lang-expression, lang-groovy], plugins [head], sites [ 多行匹配
file {
path => "/var/log/elasticsearch/myes.log.2017-01-27"
type => "es1-log"
start_position => "beginning"
codec => multiline {
pattern => "^\["
negate => true
what => "previous"
}
} log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"'; 1 nginx 日志改成json格式
2 文件直接收取。 redis, python脚本读取redis,写成json,写入es [root@node2 logstash]# cat /var/lib/logstash/.sincedb_0ba90fec979d14f3e8e5ab1191218736
68231552 0 64768 202989 http://192.168.3.3:5601/app/kibana#/discover?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:now-30d,mode:quick,to:now))&_a=(columns:!(_index,host,http_code),index:%5Bnginx-access-log-%5DYYYY.MM.DD,interval:auto,query:(query_string:(analyze_wildcard:!t,query:'http_code:404')),sort:!('@timestamp',desc),uiState:()) http_code:404

ELK学习的更多相关文章

  1. ELK学习笔记(一)安装Elasticsearch、Kibana、Logstash和X-Pack

    最近在学习ELK的时候踩了不少的坑,特此写个笔记记录下学习过程. 日志主要包括系统日志.应用程序日志和安全日志.系统运维和开发人员可以通过日志了解服务器软硬件信息.检查配置过程中的错误及错误发生的原因 ...

  2. ELK学习笔记之CentOS 7下ELK(6.2.4)++LogStash+Filebeat+Log4j日志集成环境搭建

    0x00 简介 现在的公司由于绝大部分项目都采用分布式架构,很早就采用ELK了,只不过最近因为额外的工作需要,仔细的研究了分布式系统中,怎么样的日志规范和架构才是合理和能够有效提高问题排查效率的. 经 ...

  3. ELK学习之Logstash篇

    Logstash在ELK这一整套解决方案中作为数据采集终端,支持对接Kafka.数据库(MySQL.Oracle).文件等等. 而在Logstash内部的数据流转,主要经过三个环节:input -&g ...

  4. ELK学习笔记(二)-HelloWorld实例+Kibana介绍

    这次我们通过一个最简单的HelloWolrd来了解一下ELK的使用. 进入logstash的config目录,创建stdin.conf 文件. input{ stdin{ } } output{ st ...

  5. ELK学习笔记(三)单台服务器多节点部署

    一般情况下单台服务器只会部署一个ElasticSearch node,但是在学习过程中,很多情况下会需要实现ElasticSearch的分布式效果,所以需要启动多个节点,但是学习开发环境(不想开多个虚 ...

  6. ELK学习笔记(四)SpringBoot+Logback+Redis+ELK实例

    废话不多说,直接上干货,首先看下整体应用的大致结构.(整个过程我用到了两台虚拟机  应用和Shipper 部署在192.168.25.128 上 Redis和ELK 部署在192.168.25.129 ...

  7. ELK学习总结(2-5)elk的版本控制

    ----------------------------------------------------------------- 1.悲观锁和乐观锁 悲观锁:假定会发生并发冲突,屏蔽一切可能违反数据 ...

  8. ELK学习总结(1-1)ELK是什么

    1.elk 是什么 ? Elastic Stack(旧称ELK Stack),是一种能够从任意数据源抽取数据,并实时对数据进行搜索.分析和可视化展现的数据分析框架.(hadoop同一个开发人员) ja ...

  9. ELK学习记录二 :elasticsearch、logstash及kibana的安装与配置

    注意事项: 1.ELK版本要求5.X以上,本人使用版本:elasticsearch-6.0.0.kibana-6.0.0-linux-x86_64.logstash-6.0.0.tar 2.Elast ...

  10. ELK学习记录一 :初识ELK

    ELK是elastic公司提供的一套完整的收集日志并分析展示的产品,分别表示Elasticsearch.Logstash和kibana. (官网截个图) 先来一段个人粗浅的认识: Elasticsea ...

随机推荐

  1. day32基于tcp协议的远程执行命令

    客户端 from socket import *import structimport json client = socket(AF_INET, SOCK_STREAM)client.connect ...

  2. wepy 小程序云开发

    小程序前段时间更新了云开发的功能,刚听到对时候觉得简直是个人开发者对福音,尤其是对我这样对后端不是很懂对前端傻傻,简直不能太方便,就试了下. 体验二维码: 源码:https://github.com/ ...

  3. Java应用常用性能分析工具

    Java应用常用性能分析工具 好的工具有能有效改善和提高工作效率或加速分析问题的进度,笔者将从事Java工作中常用的性能工具和大家分享下,如果感觉有用记得投一票哦,如果你有好的工具也可以分享给我 工具 ...

  4. Laravel常见问题集锦

    1.提示:Syntax error or access violation: 1071 Specified key was too long; max key length is 767 bytes? ...

  5. centos rz sz安装

    1.命令: yum install lrzsz 2.

  6. Ubuntu 14.03 安装jdk

    安装python-software-properties $sudo apt-get install python-software-properties $sudo apt-get install ...

  7. [Linux] traceroute 路由跟踪指令用例

    traceroute是用来跟踪数据包到达网络主机所经过的路由工具.在Linux系统中,称之为traceroute,在Windows中称为tracert. 一条路径上的每个设备traceroute要测3 ...

  8. java应用健康检查

    本文主要针对自己手写shell监控应用状态,有可系统解决方案的,比如K8S,可以略过 #!/bin/sh#health_check.sh count=`ps -ef | grep test.jar | ...

  9. CDH hue下定时执行hive脚步

        今天在看oozie时发现能在hue中执行hive 脚本,主要是hue 和 oozie结合使用,下面介绍下怎么使用的,挺恶心的,哈哈(在这里就不哔哔了) 提交oozie定时作业 1.进入hue界 ...

  10. eclipse配置Servlet连接Mysql要注意的几个地方

    用Servlet即把jdbc那套放到继承于HttpServlet的派生类之内,那段代码很简单 protected void doPost(HttpServletRequest request, Htt ...