重写Sink合并多行
flume1.6+elasticsearch6.3.2
Pom
<dependencies>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>3.8.</version>
<scope>test</scope>
</dependency>
<!-- https://mvnrepository.com/artifact/org.elasticsearch/elasticsearch -->
<dependency>
<groupId>org.elasticsearch</groupId>
<artifactId>elasticsearch</artifactId>
<version>6.4.</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.elasticsearch.client/transport -->
<dependency>
<groupId>org.elasticsearch.client</groupId>
<artifactId>transport</artifactId>
<version>6.4.</version>
</dependency>
<!-- <dependency> <groupId>io.netty</groupId> <artifactId>netty-all</artifactId>
<version>4.1..Final</version> </dependency> -->
<!-- https://mvnrepository.com/artifact/org.apache.flume.flume-ng-sinks/flume-ng-elasticsearch-sink -->
<dependency>
<groupId>org.apache.flume.flume-ng-sinks</groupId>
<artifactId>flume-ng-elasticsearch-sink</artifactId>
<version>1.6.</version>
</dependency>
<!-- https://mvnrepository.com/artifact/com.google.code.gson/gson -->
<dependency>
<groupId>com.google.code.gson</groupId>
<artifactId>gson</artifactId>
<version>2.8.</version>
</dependency> </dependencies>
ElasticSearchForLogSink.java
package com.jachs.sink.elasticsearch; import org.apache.flume.Channel;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.Transaction;
import org.apache.flume.conf.Configurable;
import org.apache.flume.sink.AbstractSink;
import org.apache.flume.sink.elasticsearch.ElasticSearchEventSerializer;
import org.apache.flume.sink.elasticsearch.client.RoundRobinList;
import org.apache.http.client.HttpClient;
import org.apache.http.impl.client.DefaultHttpClient;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.transport.client.PreBuiltTransportClient; import com.google.gson.Gson; import static org.apache.flume.sink.elasticsearch.ElasticSearchSinkConstants.CLUSTER_NAME;
import static org.apache.flume.sink.elasticsearch.ElasticSearchSinkConstants.INDEX_NAME; import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map; import static org.apache.flume.sink.elasticsearch.ElasticSearchSinkConstants.HOSTNAMES; public class ElasticSearchForLogSink extends AbstractSink implements Configurable {
private String hostNames;
private String indexName;
private String clusterName;
static TransportClient client;
static Map<String, String> dataMap = new HashMap<String, String>();; public void configure(Context context) {
hostNames = context.getString(HOSTNAMES);
indexName = context.getString(INDEX_NAME);
clusterName = context.getString(CLUSTER_NAME);
} @Override
public void start() {
Settings settings = Settings.builder().put("cluster.name", clusterName).build();
try {
client = new PreBuiltTransportClient(settings).addTransportAddress(new TransportAddress(
InetAddress.getByName(hostNames.split(":")[]), Integer.parseInt(hostNames.split(":")[])));
} catch (UnknownHostException e) {
e.printStackTrace();
}
} @Override
public void stop() {
super.stop();
} public Status process() throws EventDeliveryException {
Status status = Status.BACKOFF;
Channel ch = getChannel();
Transaction txn = ch.getTransaction();
txn.begin();
try {
Event event = ch.take();
if (event == null) {
txn.rollback();
return status;
}
String data = new String(event.getBody(), "UTF-8");
if (data.indexOf("token") != -) {
String token = data.substring(data.length() - , data.length());
System.out.println("获取标识" + token);
String sb = dataMap.get(token);
if (sb != null) {
sb = sb + data;
} else {
dataMap.put(token, data);
}
}
System.out.println("打印" + dataMap.size());
if (dataMap.size() >= ) {//十条数据一提交,条件自己改
BulkRequestBuilder bulkRequest = client.prepareBulk(); bulkRequest.add(client.prepareIndex(indexName, "text").setSource(dataMap));
bulkRequest.execute().actionGet();
dataMap.clear();
System.out.println("归零" + dataMap.size());
}
// Map<String, Object> map = new HashMap<String, Object>(); // for (String key : head.keySet()) {
// map.put("topic", key);
// map.put("timestamp", head.get(key));
// map.put("data", new String(event.getBody(), "UTF-8"));
// } // IndexRequestBuilder create = client.prepareIndex(indexName,
// "text").setSource(map);
// IndexResponse response = create.execute().actionGet(); txn.commit();
status = Status.READY;
} catch (Throwable t) {
txn.rollback();
status = Status.BACKOFF;
t.printStackTrace();
if (t instanceof Error) {
throw (Error) t;
}
} finally {
txn.close();
}
return status;
}
}
kafka生成者模仿日志写入代码
package com.test.Kafka; import java.util.Properties; import org.apache.commons.lang.RandomStringUtils;
import org.apache.commons.lang.StringUtils;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord; import com.google.gson.Gson; public class App {
public static void main(String[] args) {
Properties properties = new Properties();
// properties.put("bootstrap.servers",
// "192.168.2.200:9092,192.168.2.157:9092,192.168.2.233:9092,192.168.2.194:9092,192.168.2.122:9092");
// properties.put("bootstrap.servers",
// "192.168.2.200:9092,192.168.2.233:9092,192.168.2.122:9092");
properties.put("bootstrap.servers", "127.0.0.1:9092");
properties.put("acks", "all");
properties.put("retries", );
properties.put("batch.size", );
properties.put("linger.ms", );
properties.put("buffer.memory", );
properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
Producer<String, String> producer = null;
RandomStringUtils randomStringUtils=new RandomStringUtils();
try {
producer = new KafkaProducer<String, String>(properties);
for (int i = ; i < ; i++) {// topID无所谓
producer.send(new ProducerRecord<String, String>("test1", "tokenk"+randomStringUtils.random()));
}
} catch (Exception e) {
e.printStackTrace();
} finally {
producer.close();
}
}
}
修改flume配置
a1.sinks.elasticsearch.type=com.jachs.sink.elasticsearch.ElasticSearchForLogSink
重写Sink合并多行的更多相关文章
- jquery动态合并表格行
利用<td rowspan = "num"/>;原理来实现,其中num为要合并的行数. <!DOCTYPE html> <html> <h ...
- Js 合并 table 行 的实现方法
Js 合并 table 行 的实现方法 需求如下: 某公司的员工档案,如下, 经理看员工的信息不是很清晰: 姓名 所在学校 毕业时间 张三 小学 2000 张三 中学 2006 张三 大学 2010 ...
- SQL中合并多行记录的方法总汇
-- =============================================================================-- Title: 在SQL中分类合并数 ...
- C# 使用Epplus导出Excel [4]:合并指定行
C# 使用Epplus导出Excel [1]:导出固定列数据 C# 使用Epplus导出Excel [2]:导出动态列数据 C# 使用Epplus导出Excel [3]:合并列连续相同数据 C# 使用 ...
- 【HANA系列】SAP HANA SQL合并多行操作
公众号:SAP Technical 本文作者:matinal 原文出处:http://www.cnblogs.com/SAPmatinal/ 原文链接:[HANA系列]SAP HANA SQL合并多行 ...
- 合并表格行---三层for循环遍历数据
合并表格行---三层for循环遍历数据 示例1 json <!DOCTYPE html> <html lang="zh_cn"> <head> ...
- 详细说明svn分支与合并---命令行
一,svn分支与合并有什么用? 作程序的,对svn在熟悉不过了,但对svn分支熟悉的,我想并不多.因为一般情况下,是用不着svn分支的,其实也没有那个必要.下面我例举几个需要用到svn分支的情况: 1 ...
- html表格合并(行,一排)
<table> <tr> <td colspan="2">失败的例子:</td> </tr> {% for ip , j ...
- SQL SERVER 字符合并多行为一列
[字符合并多行为一列] 思路1:行转列,在与字符拼接(适用每组列数名相同) 思路2:转xml,去掉多余字符(适用所有) 假设兴趣表Hobbys Name Hobby 小张 打篮球 小张 踢足球 Nam ...
随机推荐
- Linux 中查看进程及资源使用情况
top 自带的 top 命令类似于平时我们使用的任务管理器,能够列出当前系统中的进程及资源的使用情况. $ man top top - display Linux tasks 使用起来很简单,不加任何 ...
- 【链表问题】打卡9:将单链表的每K个节点之间逆序
前言 以专题的形式更新刷题贴,欢迎跟我一起学习刷题,相信我,你的坚持,绝对会有意想不到的收获.每道题会提供简单的解答,如果你有更优雅的做法,欢迎提供指点,谢谢. 注:如果代码排版出现了问题麻烦通知我下 ...
- Flink-Kafka-Connector Flink结合Kafka实战
戳更多文章: 1-Flink入门 2-本地环境搭建&构建第一个Flink应用 3-DataSet API 4-DataSteam API 5-集群部署 6-分布式缓存 7-重启策略 8-Fli ...
- [Vue] vuex进行组件间通讯
vue 组件之间数据传输(vuex) 初始化 store src/main.js import Vuex from "vuex"; Vue.use(Vuex); new Vue({ ...
- MySQL 笔记整理(16) --“order by”是怎么工作的?
笔记记录自林晓斌(丁奇)老师的<MySQL实战45讲> (本篇内图片均来自丁奇老师的讲解,如有侵权,请联系我删除) 16) --“order by”是怎么工作的? 在林老师的课程中,第15 ...
- ITSA(IT Strategy and Architecture)方法介绍
Architecture Capability – At a Glance Architectural coherence part1 Architectural coherence part2 SA ...
- 大数据---Ranger-1
背景:从软通出来,告别华为外包,离开H区,进入了一家搞大数据的创业公司,感觉周围都好陌生,记录下自己大数据的career! 2019-03-4新的征程-入职第一天: 一.办理入职手续 公司人比较少,没 ...
- javaweb中上传视频,并且播放,用上传视频信息为例
1.上传视频信息的jsp页面uploadVideo.jsp <body background="image/bk_hero.jpg"><div id=" ...
- Scrapy 框架流程详解
框架流程图 Scrapy 使用了 Twisted 异步非阻塞网络库来处理网络通讯,整体架构大致如下(绿线是数据流向): 简单叙述一下每层图的含义吧: Spiders(爬虫):它负责处理所有Respon ...
- [20190417]隐含参数_SPIN_COUNT.txt
[20190417]隐含参数_SPIN_COUNT.txt--//在探究latch spin计数之前,先简单探究_SPIN_COUNT.实际上oracle现在版本latch spin的数量不再是200 ...