##########################################################################################################
##########################################################################################################

flume安装,解压后修改flume_env.sh配置文件,指定java_home即可。

cp hdfs jar包到flume lib目录下(否则无法抽取数据到hdfs上)

flume常见命令选项:

[hadoop@db01 flume-1.5.0]$ bin/flume-ng

commands:
  agent                     run a Flume agent

global options:
  --conf,-c <conf>          use configs in <conf> directory
  -Dproperty=value          sets a Java system property value

agent options:
  --name,-n <name>          the name of this agent (required)
  --conf-file,-f <file>     specify a config file (required if -z missing)

eg:

bin/flume-ng agent --conf /opt/cdh-5.3.6/flume-1.5.0/conf --name agent-test --conf-file test.conf
bin/flume-ng agent -c /opt/cdh-5.3.6/flume-1.5.0/conf -n agent-test -f test.conf

********************************************************************************************************

flume第一个案例:

定义配置文件/opt/cdh-5.3.6/flume-1.5.0/conf/a1.conf:

# The configuration file needs to define the sources,
# the channels and the sinks.

###################################
a1.sources = r1
a1.channels = c1
a1.sinks = k1

############define source#######################################
a1.sources.r1.type = netcat
a1.sources.r1.bind = db01
a1.sources.r1.port = 55555

#############define channel###################################
a1.channels.c1.type = memory
a1.channels.c1.capacity = 1000
a1.channels.c1.transactionCapacity = 100

##########define sinks#########################
a1.sinks.k1.type = logger
a1.sinks.k1.maxBytesToLog = 1024

#######bind###############################
a1.sources.r1.channels=c1
a1.sinks.k1.channel = c1

安装telnet:

[root@db01 softwares]# rpm -ivh telnet-*
Preparing...                ########################################### [100%]
   1:telnet-server          ########################################### [ 50%]
   2:telnet                 ########################################### [100%]
[root@db01 softwares]#
[root@db01 softwares]#
[root@db01 softwares]# rpm -ivh xinetd-2.3.14-39.el6_4.x86_64.rpm
Preparing...                ########################################### [100%]
    package xinetd-2:2.3.14-39.el6_4.x86_64 is already installed
[root@db01 softwares]#
[root@db01 softwares]#
[root@db01 softwares]#
[root@db01 softwares]# /etc/rc.d/init.d/xinetd restart
Stopping xinetd:                                           [  OK  ]
Starting xinetd:                                           [  OK  ]

启动flume:

bin/flume-ng agent \
--conf /opt/cdh-5.3.6/flume-1.5.0/conf \
--name a1 \
--conf-file /opt/cdh-5.3.6/flume-1.5.0/conf/a1.conf \
-Dflume.root.logger=DEBUG,console

登录telnet 测试:

[root@db01 ~]# telnet db01 55555
Trying 192.168.100.231...
Connected to db01.
Escape character is '^]'.
hello flume
OK
chavin king   
OK

------------ 日志输出如下 -------------

2017-03-23 16:48:31,285 (netcat-handler-0) [DEBUG - org.apache.flume.source.NetcatSource$NetcatSocketHandler.run(NetcatSource.java:318)] Chars read = 13
2017-03-23 16:48:31,290 (netcat-handler-0) [DEBUG - org.apache.flume.source.NetcatSource$NetcatSocketHandler.run(NetcatSource.java:322)] Events processed = 1
2017-03-23 16:48:33,234 (SinkRunner-PollingRunner-DefaultSinkProcessor) [INFO - org.apache.flume.sink.LoggerSink.process(LoggerSink.java:70)] Event: { headers:{} body: 68 65 6C 6C 6F 20 66 6C 75 6D 65 0D             hello flume. }
2017-03-23 16:48:39,224 (conf-file-poller-0) [DEBUG - org.apache.flume.node.PollingPropertiesFileConfigurationProvider$FileWatcherRunnable.run(PollingPropertiesFileConfigurationProvider.java:126)] Checking file:/opt/cdh-5.3.6/flume-1.5.0/conf/a1.conf for changes
2017-03-23 16:48:47,031 (netcat-handler-0) [DEBUG - org.apache.flume.source.NetcatSource$NetcatSocketHandler.run(NetcatSource.java:318)] Chars read = 13
2017-03-23 16:48:47,032 (netcat-handler-0) [DEBUG - org.apache.flume.source.NetcatSource$NetcatSocketHandler.run(NetcatSource.java:322)] Events processed = 1
2017-03-23 16:48:48,235 (SinkRunner-PollingRunner-DefaultSinkProcessor) [INFO - org.apache.flume.sink.LoggerSink.process(LoggerSink.java:70)] Event: { headers:{} body: 63 68 61 76 69 6E 20 6B 69 6E 67 0D             chavin king. }
2017-03-23 16:49:09,225 (conf-file-poller-0) [DEBUG - org.apache.flume.node.PollingPropertiesFileConfigurationProvider$FileWatcherRunnable.run(PollingPropertiesFileConfigurationProvider.java:126)] Checking file:/opt/cdh-5.3.6/flume-1.5.0/conf/a1.conf for changes

***************************************************************************

flume第二个案例:收集hive log

/user/hadoop/flume/hive-logs/

[hadoop@db01 hadoop-2.5.0]$ bin/hdfs dfs -mkdir -p /user/hadoop/flume/hive-logs/

a2.conf文件:

# The configuration file needs to define the sources,
# the channels and the sinks.

###################################
a2.sources = r2
a2.channels = c2
a2.sinks = k2

############define source#######################################
a2.sources.r2.type = exec
a2.sources.r2.command = tail -f /opt/cdh-5.3.6/hive-0.13.1/data/logs/hive.log
a2.sources.r2.shell = /bin/bash -c

#############define channel###################################
a2.channels.c2.type = memory
a2.channels.c2.capacity = 1000
a2.channels.c2.transactionCapacity = 100

##########define sinks#########################
a2.sinks.k2.type = hdfs

#a2.sinks.k2.hdfs.path = hdfs://db02:8020/user/hadoop/flume/hive-logs/
#hadoop ha 配置方法,cp hadoop的配置文件到flume的conf目录下:
#cp /opt/cdh-5.3.6/hadoop-2.5.0/etc/hadoop/core-site.xml /opt/cdh-5.3.6/hadoop-2.5.0/etc/hadoop/hdfs-site.xml /opt/cdh-5.3.6/flume-1.5.0/conf/
a2.sinks.k2.hdfs.path = hdfs://ns1/user/hadoop/flume/hive-logs/

a2.sinks.k2.hdfs.fileType = DataStream
a2.sinks.k2.hdfs.writeFormat = Text
a2.sinks.k2.hdfs.batchSize = 10

#######bind###############################
a2.sources.r2.channels=c2
a2.sinks.k2.channel = c2

测试:
bin/flume-ng agent \
--conf /opt/cdh-5.3.6/flume-1.5.0/conf \
--name a2 \
--conf-file /opt/cdh-5.3.6/flume-1.5.0/conf/a2.conf \
-Dflume.root.logger=DEBUG,console

******************************************************************************
flume第三个案例:

编辑a3.conf文件:

# The configuration file needs to define the sources,
# the channels and the sinks.

######define agent#############################
a3.sources = r3
a3.channels = c3
a3.sinks = k3

############define source#######################################
a3.sources.r3.type = spooldir
a3.sources.r3.spoolDir = /opt/cdh-5.3.6/flume-1.5.0/spoolinglogs
a3.sources.r3.ignorePattern = ^(.)*\\.log$
a3.sources.r3.fileSuffix = .delete

#############define channel###################################
a3.channels.c3.type = file
a3.channels.c3.checkpointDir = /opt/cdh-5.3.6/flume-1.5.0/filechannel/checkpoint
a3.channels.c3.dataDirs = /opt/cdh-5.3.6/flume-1.5.0/filechannel/data

##########define sinks#########################
a3.sinks.k3.type = hdfs

#a3.sinks.k3.hdfs.path = hdfs://db02:8020/user/hadoop/flume/hive-logs/
a3.sinks.k3.hdfs.path = hdfs://ns1/user/hadoop/flume/splogs/%Y%m%d

a3.sinks.k3.hdfs.fileType = DataStream
a3.sinks.k3.hdfs.writeFormat = Text
a3.sinks.k3.hdfs.batchSize = 10
a3.sinks.k3.hdfs.useLocalTimeStamp = true
#######bind###############################
a3.sources.r3.channels=c3
a3.sinks.k3.channel = c3

测试:
bin/flume-ng agent \
--conf /opt/cdh-5.3.6/flume-1.5.0/conf \
--name a3 \
--conf-file /opt/cdh-5.3.6/flume-1.5.0/conf/a3.conf \
-Dflume.root.logger=DEBUG,console

flume学习笔记的更多相关文章

  1. flume学习笔记——安装和使用

    Flume是一个分布式.可靠.和高可用的海量日志聚合的系统,支持在系统中定制各类数据发送方,用于收集数据:同时,Flume提供对数据进行简单处理,并写到各种数据接受方(可定制)的能力. Flume是一 ...

  2. Apache Flume 学习笔记

    # 从http://flume.apache.org/download.html 下载flume ############################################# # 概述: ...

  3. Flume 学习笔记之 Flume NG+Kafka整合

    Flume NG集群+Kafka集群整合: 修改Flume配置文件(flume-kafka-server.conf),让Sink连上Kafka hadoop1: #set Agent name a1. ...

  4. Flume 学习笔记之 Flume NG高可用集群搭建

    Flume NG高可用集群搭建: 架构总图: 架构分配: 角色 Host 端口 agent1 hadoop3 52020 collector1 hadoop1 52020 collector2 had ...

  5. Flume 学习笔记之 Flume NG概述及单节点安装

    Flume NG概述: Flume NG是一个分布式,高可用,可靠的系统,它能将不同的海量数据收集,移动并存储到一个数据存储系统中.轻量,配置简单,适用于各种日志收集,并支持 Failover和负载均 ...

  6. spark学习笔记总结-spark入门资料精化

    Spark学习笔记 Spark简介 spark 可以很容易和yarn结合,直接调用HDFS.Hbase上面的数据,和hadoop结合.配置很容易. spark发展迅猛,框架比hadoop更加灵活实用. ...

  7. Hadoop学习笔记(1)概述

    写在学习笔记之前的话: 寒假已经开始好几天了,似乎按现在的时间算,明天就要过年了.在家的这几天,该忙的也都差不多了,其实也都是瞎忙.接下来的几点,哪里也不去了,静静的呆在家里学点东西.所以学习一下Ha ...

  8. Flink学习笔记:Connectors概述

    本文为<Flink大数据项目实战>学习笔记,想通过视频系统学习Flink这个最火爆的大数据计算框架的同学,推荐学习课程: Flink大数据项目实战:http://t.cn/EJtKhaz ...

  9. Hadoop学习笔记系列

    Hadoop学习笔记系列   一.为何要学习Hadoop? 这是一个信息爆炸的时代.经过数十年的积累,很多企业都聚集了大量的数据.这些数据也是企业的核心财富之一,怎样从累积的数据里寻找价值,变废为宝炼 ...

随机推荐

  1. 5 -- Hibernate的基本用法 --4 7 二级缓存相关属性

    Hibernate的SessionFactory可持有一个可选的二级缓存,通过使用这种二级缓存可以提高Hibernate的持久化访问的性能. Hibernate的二级缓存属性: ⊙ hibernate ...

  2. Go之简单并发

    func Calculate(id int) { fmt.Println(id) } 使用go来实现并发 func main() { for i := 0; i < 100; i++ { go ...

  3. cocos2d-x 弹出对话框

    登陆界面有一点注册按钮,点击之后弹出一个框,让用户输入一些信息; 在网上找的一些方法都是派生一个类,然后自己实现; 我觉得太麻烦了. 我使用分层的方式来显示, 启动界面就一个,背景图片加几个按钮:登陆 ...

  4. 【RF库Collections测试】List Should Not Contain Duplicates

    Name:List Should Not Contain DuplicatesSource:Collections <test library>Arguments:[ list_ | ms ...

  5. 使用一条sql查询多个表中的记录数

    方法一: select t1.num1,t2.num2,t3.num3 from (select count(*) num1 from table1) t1, (select count(*) num ...

  6. java.util.concurrent.RejectedExecutionException 线程池饱和

    java.util.concurrent.RejectedExecutionException at java.util.concurrent.ThreadPoolExecutor$AbortPoli ...

  7. Senium 简介

    有时候我们在用 requests 抓取页面的时候,得到的结果可能和在浏览器中看到的不一样,在浏览器中可以看到正常显示的页面数据,但是使用 requests 得到的结果并没有.这是因为 requests ...

  8. Tomcat的目录结构和配置文件详解

    本文转载: https://www.zybuluo.com/1234567890/note/515235 参考帖子: Tomcat(一):基础配置详解 Tomcat服务器中配置多个域名,访问不同的we ...

  9. php判断正常访问和外部访问

    php判断正常访问和外部访问 <?php session_start(); if(isset($_POST['check'])&&!empty($_POST['name'])){ ...

  10. 【分布式系列之ActiveMq】ActiveMq入门示例

    前言 github地址:https://github.com/AndyFlower/web-back/tree/master/ActiveMq01 下载ActiveMQ :http://activem ...