# ==================================================================ELK环境准备

# 修改文件限制
# * 代表Linux所有用户名称,保存、退出、重新登录生效。
vi /etc/security/limits.conf * soft nofile 65536
* hard nofile 65536
* soft nproc 2048
* hard nproc 4096
* soft memlock unlimited
* hard memlock unlimited # 调整进程数
#调整成以下配置
vi /etc/security/limits.d/20-nproc.conf * soft nproc 4096
root soft nproc unlimited # 调整虚拟内存&最大并发连接
vi /etc/sysctl.conf vm.max_map_count=655360
fs.file-max=655360 # 并执行命令生效:
sysctl -p shutdown -h now
# 快照 elk前
# ==================================================================安装 elasticsearch
tar -zxvf ~/elasticsearch-6.2.4.tar.gz -C /usr/local
rm –r ~/elasticsearch-6.2.4.tar.gz # ==================================================================安装 logstash
tar -zxvf ~/logstash-6.2.4.tar.gz -C /usr/local
rm –r ~/logstash-6.2.4.tar.gz # ==================================================================安装 kibana
tar -zvxf ~/kibana-6.2.4-linux-x86_64.tar.gz -C /usr/local
mv /usr/local/kibana-6.2.4-linux-x86_64 /usr/local/kibana-6.2.4
rm –r ~/kibana-6.2.4-linux-x86_64.tar.gz

# 环境变量
# ==================================================================node1 node2 node3

vi /etc/profile

# 在export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL下添加

export JAVA_HOME=/usr/java/jdk1.8.0_111
export JRE_HOME=/usr/java/jdk1.8.0_111/jre
export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.12
export HADOOP_HOME=/usr/local/hadoop-2.7.6
export MYSQL_HOME=/usr/local/mysql
export HBASE_HOME=/usr/local/hbase-1.2.4
export HIVE_HOME=/usr/local/hive-2.1.1
export SCALA_HOME=/usr/local/scala-2.12.4
export KAFKA_HOME=/usr/local/kafka_2.12-0.10.2.1
export FLUME_HOME=/usr/local/flume-1.8.0
export SPARK_HOME=/usr/local/spark-2.3.0
export STORM_HOME=/usr/local/storm-1.1.0
export REDIS_HOME=/usr/local/redis-4.0.2
export ERLANG_HOME=/usr/local/erlang
export RABBITMQ_HOME=/usr/local/rabbitmq_server-3.7.5
export MONGODB_HOME=/usr/local/mongodb-3.4.5
export NGINX_HOME=/usr/local/nginx
export CATALINA_BASE=/usr/local/tomcat
export CATALINA_HOME=/usr/local/tomcat
export TOMCAT_HOME=/usr/local/tomcat
export KEEPALIVED_HOME=/usr/local/keepalived
export ELASTICSEARCH_HOME=/usr/local/elasticsearch-6.2.4
export LOGSTASH_HOME=/usr/local/logstash-6.2.4
export KIBANA_HOME=/usr/local/kibana-6.2.4 export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$ZOOKEEPER_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$MYSQL_HOME/bin:$HBASE_HOME/bin:$HIVE_HOME/bin:$SCALA_HOME/bin:$KAFKA_HOME/bin:$FLUME_HOME/bin:$SPARK_HOME/bin:$STORM_HOME/bin:$REDIS_HOME/bin:$ERLANG_HOME/bin:$RABBITMQ_HOME/ebin:$RABBITMQ_HOME/sbin:$MONGODB_HOME/bin:$NGINX_HOME/sbin:$CATALINA_HOME/bin:$KEEPALIVED_HOME/sbin:$ELASTICSEARCH_HOME/bin:$LOGSTASH_HOME/bin:$KIBANA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native

# ==================================================================node1

# 使环境变量生效
source /etc/profile # 查看配置结果
echo $ELASTICSEARCH_HOME
echo $LOGSTASH_HOME
echo $KIBANA_HOME # 由于Elasticsearch、Logstash、Kibana均不能以root账号运行
# 但是Linux对非root账号可并发操作的文件、线程都有限制
# useradd elk (用户名) -g elk (组名) -p 123456 (密码)
groupadd elk
useradd elk -g elk -p 123456 chown -R elk:elk $ELASTICSEARCH_HOME/ # 账号切换到 elk
su - elk # 创建Elasticsearch主目录、数据目录、日志目录
mkdir $ELASTICSEARCH_HOME/data
# mkdir $ELASTICSEARCH_HOME/logs # chown -R elk:elk $ELASTICSEARCH_HOME/data
# chown -R elk:elk $ELASTICSEARCH_HOME/logs
# ll $ELASTICSEARCH_HOME # 修改elasticsearch配置文件
vi $ELASTICSEARCH_HOME/config/elasticsearch.yml # 集群名
cluster.name: es_cluster
# 节点名
node.name: node1
# 节点host/ip
network.host: node1
http.port: 9200
# TCP传输端口
transport.tcp.port: 9300
# 数据保存目录
path.data: /usr/local/elasticsearch-6.2.4/data
# 日志保存目录
path.logs: /usr/local/elasticsearch-6.2.4/logs
# 是否允许作为主节点
node.master: true
# 是否保存数据
node.data: true
# 集群广播
# discovery.zen.ping.multicast.enabled: true
# 集群中的主节点的初始列表,当节点(主节点或者数据节点)启动时使用这个列表进行探测
discovery.zen.ping.unicast.hosts: ["node1","node2","node3"]
# 主节点个数
discovery.zen.minimum_master_nodes: 2
#避免出现跨域问题
http.cors.enabled: true
http.cors.allow-origin: "*" su root scp -r $ELASTICSEARCH_HOME node2:/usr/local/
scp -r $ELASTICSEARCH_HOME node3:/usr/local/

# ==================================================================node2 node3

# 使环境变量生效
source /etc/profile # 查看配置结果
echo $ELASTICSEARCH_HOME

# ==================================================================node2

groupadd elk
useradd elk -g elk -p 123456
chown -R elk:elk $ELASTICSEARCH_HOME/ # 账号切换到 elk
su - elk # 修改elasticsearch配置文件
vi $ELASTICSEARCH_HOME/config/elasticsearch.yml node.name: node2
network.host: node2

# ==================================================================node3

groupadd elk
useradd elk -g elk -p 123456
chown -R elk:elk $ELASTICSEARCH_HOME/ # 账号切换到 elk
su - elk # 修改elasticsearch配置文件
#vi $ELASTICSEARCH_HOME/config/elasticsearch.yml node.name: node3
network.host: node3

su - root

shutdown -h now
# 快照 Elasticsearch启动前

# 启动&健康检查 Elasticsearch

# ==================================================================node1 node2 node3

su - elk

nohup $ELASTICSEARCH_HOME/bin/elasticsearch > $ELASTICSEARCH_HOME/logs/es.log 2>&1 &
# $ELASTICSEARCH_HOME/bin/elasticsearch # 查看集群节点
curl -XGET 'http://node1:9200/_cat/nodes?pretty' # 查看健康状态
curl http://node1:9200/_cluster/health # 查看可以监测的参数
curl http://node1:9200/_cat # 查看集群健康信息
curl http://node1:9200/_cat/health

# 安装 elasticsearch 插件 cerebro
# https://github.com/lmenezes/cerebro/releases
# ==================================================================cerebro 安装

su - root

tar -xivf ~/cerebro-0.8.1.tgz -C /usr/local/
mv /usr/local/cerebro-0.8.1 /usr/local/cerebro # 启动
mkdir /usr/local/cerebro/logs nohup /usr/local/cerebro/bin/cerebro > /usr/local/cerebro/logs/cerebro.log 2>&1 &
# /usr/local/cerebro/bin/cerebro & # 默认端口为9000
# http://node1:9000
# 在文本框中输入下面的地址
# http://node1:9200

# 配置kibana
# ==================================================================node1

chown -R elk:elk $KIBANA_HOME/

# 账号切换到 elk
su - elk # mkdir $KIBANA_HOME/data
mkdir $KIBANA_HOME/logs # 修改配置
vi $KIBANA_HOME/config/kibana.yml #增加以下内容
server.port: 5601
server.host: "node1"
server.name: "kibana-master"
elasticsearch.url: "http://node1:9200"
elasticsearch.url: "http://node2:9200"
elasticsearch.url: "http://node3:9200" # 启动
su - elk nohup $KIBANA_HOME/bin/kibana > $KIBANA_HOME/logs/kibana.log 2>&1 &
# $KIBANA_HOME/bin/kibana
# http://node1:5601

# 配置logstash
# ==================================================================node1

su - root

chown -R elk:elk $LOGSTASH_HOME/

# ll $LOGSTASH_HOME

# 账号切换到 elk
su - elk # 创建Logstash主目录、配置目录、数据目录、日志目录
mkdir $LOGSTASH_HOME/logs # 配置数据&日志目录
vi $LOGSTASH_HOME/config/logstash.yml # 增加以下内容
path.data: /usr/local/logstash-6.2.4/data
path.logs: /usr/local/logstash-6.2.4/logs su - root scp -r $LOGSTASH_HOME node2:/usr/local/
scp -r $LOGSTASH_HOME node3:/usr/local/

# ==================================================================node2 node3

su - root

# 使环境变量生效
source /etc/profile # 查看配置结果
echo $LOGSTASH_HOME/ chown -R elk:elk $LOGSTASH_HOME/

# 启动
# ==================================================================node1 node2 node3

# 账号切换到 elk
su - elk # 如果没有启动
nohup $ELASTICSEARCH_HOME/bin/elasticsearch > $ELASTICSEARCH_HOME/logs/es.log 2>&1 &

# ==================================================================node1

nohup /usr/local/cerebro/bin/cerebro > /usr/local/cerebro/logs/cerebro.log 2>&1 &

nohup $KIBANA_HOME/bin/kibana > $KIBANA_HOME/logs/kibana.log 2>&1 &

# ==================================================================node1

$LOGSTASH_HOME/bin/logstash -e 'input { stdin { } } output { stdout {} }'
# hello $LOGSTASH_HOME/bin/logstash -e 'input { stdin { } } output { stdout {codec => rubydebug} }'
# hi su - root chown -R elk:elk /var/log/messages su - elk vi $LOGSTASH_HOME/config/system.conf input {
file {
path => "/var/log/messages"
type => "system"
start_position => "beginning"
}
}
output {
elasticsearch {
hosts => ["node1:9200","node2:9200","node3:9200"]
index => "system-%{+YYYY.MM.dd}"
}
} $LOGSTASH_HOME/bin/logstash -f $LOGSTASH_HOME/config/system.conf su - root chown -R elk:elk /var/log/secure su - elk vi $LOGSTASH_HOME/config/system_secure.conf # 添加secure日志的路径
input {
file {
path => "/var/log/messages"
type => "system"
start_position => "beginning"
} file {
path => "/var/log/secure"
type => "secure"
start_position => "beginning"
}
}
output {
if [type] == "system" {
elasticsearch {
hosts => ["node1:9200","node2:9200","node3:9200"]
index => "logs-system-%{+YYYY.MM.dd}"
}
} if [type] == "secure" {
elasticsearch {
hosts => ["node1:9200","node2:9200","node3:9200"]
index => "logs-secure-%{+YYYY.MM.dd}"
}
}
} $LOGSTASH_HOME/bin/logstash -f $LOGSTASH_HOME/config/system_secure.conf vi $LOGSTASH_HOME/config/nginx_log.conf input {
file {
path => ["/usr/local/nginx/logs/access.log"]
type => "nginx_log"
start_position => "beginning"
}
}
filter {
grok {
match => { "message" => "%{IPORHOST:http_host} %{IPORHOST:clientip} - %{USERNAME:remote_user} \[%{HTTPDATE:timestamp}\] \"(?:%{WORD:http_verb} %{NOTSPACE:http_request}(?: HTTP/%{NUMBER:http_version})?|%{DATA:raw_http_request})\" %{NUMBER:response} (?:%{NUMBER:bytes_read}|-) %{QS:referrer} %{QS:agent} %{QS:xforwardedfor} %{NUMBER:request_time:float}"}
}
geoip {
source => "clientip"
}
}
output {
if [type] == "nginx_log" {
elasticsearch {
hosts => ["node1:9200","node2:9200","node3:9200"]
index => "nginx_log-%{+YYYY.MM.dd}"
}
}
} $LOGSTASH_HOME/bin/logstash -f $LOGSTASH_HOME/config/nginx_log.conf

hadoop生态搭建(3节点)-16.elk配置的更多相关文章

  1. hadoop生态搭建(3节点)

    软件:CentOS-7    VMware12    SSHSecureShellClient shell工具:Xshell 规划 vm网络配置 01.基础配置 02.ssh配置 03.zookeep ...

  2. hadoop生态搭建(3节点)-08.kafka配置

    如果之前没有安装jdk和zookeeper,安装了的请直接跳过 # https://www.oracle.com/technetwork/java/javase/downloads/java-arch ...

  3. hadoop生态搭建(3节点)-13.mongodb配置

    # 13.mongodb配置_副本集_认证授权# ==================================================================安装 mongod ...

  4. hadoop生态搭建(3节点)-04.hadoop配置

    如果之前没有安装jdk和zookeeper,安装了的请直接跳过 # https://www.oracle.com/technetwork/java/javase/downloads/java-arch ...

  5. hadoop生态搭建(3节点)-10.spark配置

    # https://www.scala-lang.org/download/2.12.4.html# ================================================= ...

  6. hadoop生态搭建(3节点)-15.Nginx_Keepalived_Tomcat配置

    # Nginx+Tomcat搭建高可用服务器名称 预装软件 IP地址Nginx服务器 Nginx1 192.168.6.131Nginx服务器 Nginx2 192.168.6.132 # ===== ...

  7. hadoop生态搭建(3节点)-03.zookeeper配置

    # https://www.oracle.com/technetwork/java/javase/downloads/java-archive-javase8-2177648.html # ===== ...

  8. hadoop生态搭建(3节点)-09.flume配置

    # http://archive.apache.org/dist/flume/1.8.0/# ===================================================== ...

  9. hadoop生态搭建(3节点)-12.rabbitmq配置

    # 安装 需要相关包# ==================================================================node1 node2 node3 yum ...

随机推荐

  1. Data Flow ->> Source ->> Error Output ->> Error & Truncation: Ignore Failure, Redirect Now, Fail Component

    Ignore Failure: 当该字段遇到错误时,字段值被设为NULL Redirect Now: 把该行输出到SSIS的Source组件的红色输出线,这时红色输出线应该连接一个可以接受结果集的组件 ...

  2. abstract(抽象)修饰符

    abstract(抽象)修饰符,可以修饰类和方法 1,abstract修饰类,会使这个类成为一个抽象类,这个类将不能生成对象实例,但可以做为对象变量声明的类型,也就是编译时类型,抽象类就像当于一类的半 ...

  3. Day04——Python模块

    一.模块简介 模块是实现了某个功能的代码集合,比如几个.py文件可以组成代码集合即模块.其中常见的模块有os模块(系统相关),file模块(文件操作相关) 模块主要分三类: 自定义模块 :所谓自定义模 ...

  4. gulp 前端构建工具入门

    gulp 前端构建工具入门 标签(空格分隔): gulp 1. 安装gulp npm i -g gulp 2. 创建gulp项目 2.1 Hello world 使用npm init初始化项目文件夹. ...

  5. 【深入理解JAVA虚拟机】第二部分.内存自动管理机制.1.内存区域

    1.内存区域 根据<Java虚拟机规范(Java SE 7版)> 的规定,Java虚拟机所管理的内存将会包括以下几个运行时数据区域,如图所示.  程序计数器 当前线程所执行的字节码的行号指 ...

  6. July 19th 2017 Week 29th Wednesday

    Rather than envy others, it is better to speed up their own pace. 与其羡慕他人,不如加快自己的脚步. The envy of othe ...

  7. ZT sem_init sem_wait sem_post sem_destroy

    sem_init() 2009-06-26 16:43:11|  分类: linux |字号 订阅       信号量的数据类型为结构sem_t,它本质上是一个长整型的数.函数sem_init()用来 ...

  8. 使用BAPISDORDER_GETDETAILEDLIST创建S/4HANA的Outbound Delivery

    要在S/4HANA里创建Outbound Delivery,首先要具有一个销售订单,ID为376,通过事务码VA03查看. 只用61行代码就能实现基于这个Sales Order去创建对应的outbou ...

  9. tftp传输可执行程序问题

    昨天搭建了板子从nfs系统启动,这样只要在开发机上编写程序编译,就可以在板子上测试运行了,编写了hello world 程序,用arm编译器编译,在主板上运行,提示出错:什么exception ((什 ...

  10. bootstrap Switch 的一个坑点

    在bootstrap的modal点开的时候改变bootstrapSwitch的状态的时候,会出现第一次打开modal,switch没有变化,第二次以后打开modal才会改变,这个问题找了好久没有找到答 ...