用的是ShuffleGrouping分组方式,并行度设置为3

这是跑下来的结果

参考代码StormTopologyShufferGrouping.java


package yehua.storm;


import java.util.Map;


import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.generated.AlreadyAliveException;
import org.apache.storm.generated.AuthorizationException;
import org.apache.storm.generated.InvalidTopologyException;
import org.apache.storm.spout.SpoutOutputCollector;
import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.topology.base.BaseRichSpout;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values;
import org.apache.storm.utils.Utils;


/**
* shufferGrouping
* 没有特殊情况下,就使用这个分组方式,可以保证负载均衡,工作中最常用的
* @author yehua
*
*/


public class StormTopologyShufferGrouping {

public static class MySpout extends BaseRichSpout{
private Map conf;
private TopologyContext context;
private SpoutOutputCollector collector;
// @Override
public void open(Map conf, TopologyContext context,
SpoutOutputCollector collector) {
this.conf = conf;
this.collector = collector;
this.context = context;
}


int num = 0;
//@Override
public void nextTuple() {
num++;
System.out.println("spout:"+num);
this.collector.emit(new Values(num));
Utils.sleep(1000);
}


//@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
declarer.declare(new Fields("num"));
}

}

public static class MyBolt extends BaseRichBolt{

private Map stormConf;
private TopologyContext context;
private OutputCollector collector;
// @Override
public void prepare(Map stormConf, TopologyContext context,
OutputCollector collector) {
this.stormConf = stormConf;
this.context = context;
this.collector = collector;
}

//@Override
public void execute(Tuple input) {
Integer num = input.getIntegerByField("num");
System.err.println("thread:"+Thread.currentThread().getId()+",num="+num);
}


//@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {

}

}

public static void main(String[] args) {
TopologyBuilder topologyBuilder = new TopologyBuilder();
String spout_id = MySpout.class.getSimpleName();
String bolt_id = MyBolt.class.getSimpleName();

topologyBuilder.setSpout(spout_id, new MySpout());
topologyBuilder.setBolt(bolt_id, new MyBolt(),3).shuffleGrouping(spout_id);

Config config = new Config();
String topology_name = StormTopologyShufferGrouping.class.getSimpleName();
if(args.length==0){
//在本地运行
LocalCluster localCluster = new LocalCluster();
localCluster.submitTopology(topology_name, config, topologyBuilder.createTopology());
}else{
//在集群运行
try {
StormSubmitter.submitTopology(topology_name, config, topologyBuilder.createTopology());
} catch (AlreadyAliveException e) {
e.printStackTrace();
} catch (InvalidTopologyException e) {
e.printStackTrace();
} catch (AuthorizationException e) {
e.printStackTrace();
}
}

}


}

 

用fieldsGrouping方法

按奇偶数分组(也就是按字段分组)

从跑出来的结果看出来,一个线程处理奇数的一个线程处理偶数的

参考代码StormTopologyFieldsGrouping.java

 package yehua.storm;

 import java.util.Map;

 import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.generated.AlreadyAliveException;
import org.apache.storm.generated.AuthorizationException;
import org.apache.storm.generated.InvalidTopologyException;
import org.apache.storm.spout.SpoutOutputCollector;
import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.topology.base.BaseRichSpout;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values;
import org.apache.storm.utils.Utils; /**
* FieldsGrouping
* 字段分组
* @author yehua
*
*/ public class StormTopologyFieldsGrouping { public static class MySpout extends BaseRichSpout{
private Map conf;
private TopologyContext context;
private SpoutOutputCollector collector;
//@Override
public void open(Map conf, TopologyContext context,
SpoutOutputCollector collector) {
this.conf = conf;
this.collector = collector;
this.context = context;
} int num = 0;
//@Override
public void nextTuple() {
num++;
System.out.println("spout:"+num);
this.collector.emit(new Values(num,num%2));
Utils.sleep(1000);
} //@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
declarer.declare(new Fields("num","flag"));
} } public static class MyBolt extends BaseRichBolt{ private Map stormConf;
private TopologyContext context;
private OutputCollector collector;
//@Override
public void prepare(Map stormConf, TopologyContext context,
OutputCollector collector) {
this.stormConf = stormConf;
this.context = context;
this.collector = collector;
} //@Override
public void execute(Tuple input) {
Integer num = input.getIntegerByField("num");
System.err.println("thread:"+Thread.currentThread().getId()+",num="+num);
} //@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) { } } public static void main(String[] args) {
TopologyBuilder topologyBuilder = new TopologyBuilder();
String spout_id = MySpout.class.getSimpleName();
String bolt_id = MyBolt.class.getSimpleName(); topologyBuilder.setSpout(spout_id, new MySpout());
//注意:字段分组一定可以保证相同分组的数据进入同一个线程处理
topologyBuilder.setBolt(bolt_id, new MyBolt(),2).fieldsGrouping(spout_id, new Fields("flag")); Config config = new Config();
String topology_name = StormTopologyFieldsGrouping.class.getSimpleName();
if(args.length==0){
//在本地运行
LocalCluster localCluster = new LocalCluster();
localCluster.submitTopology(topology_name, config, topologyBuilder.createTopology());
}else{
//在集群运行
try {
StormSubmitter.submitTopology(topology_name, config, topologyBuilder.createTopology());
} catch (AlreadyAliveException e) {
e.printStackTrace();
} catch (InvalidTopologyException e) {
e.printStackTrace();
} catch (AuthorizationException e) {
e.printStackTrace();
}
} } }

这里补充一下,比如说有两类数据3个线程的时候

我们再看看运行结果,发现只有两个线程干活了

还有一种情况,只有一个线程的情况,还是两类数据

从运行结果看出来,所有话一个进程干完了

allGrouping方法

运行结果:spout每发一条数据三个进程都接收到了(基本没什么应用场景)

参考代码StormTopologyAllGrouping.java

 package yehua.storm;

 import java.util.Map;

 import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.generated.AlreadyAliveException;
import org.apache.storm.generated.AuthorizationException;
import org.apache.storm.generated.InvalidTopologyException;
import org.apache.storm.spout.SpoutOutputCollector;
import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.topology.base.BaseRichSpout;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values;
import org.apache.storm.utils.Utils; /**
* AllGrouping
* 广播分组
* @author yehua
*
*/ public class StormTopologyAllGrouping { public static class MySpout extends BaseRichSpout{
private Map conf;
private TopologyContext context;
private SpoutOutputCollector collector;
//@Override
public void open(Map conf, TopologyContext context,
SpoutOutputCollector collector) {
this.conf = conf;
this.collector = collector;
this.context = context;
} int num = 0;
//@Override
public void nextTuple() {
num++;
System.out.println("spout:"+num);
this.collector.emit(new Values(num));
Utils.sleep(1000);
} //@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
declarer.declare(new Fields("num"));
} } public static class MyBolt extends BaseRichBolt{ private Map stormConf;
private TopologyContext context;
private OutputCollector collector;
//@Override
public void prepare(Map stormConf, TopologyContext context,
OutputCollector collector) {
this.stormConf = stormConf;
this.context = context;
this.collector = collector;
} //@Override
public void execute(Tuple input) {
Integer num = input.getIntegerByField("num");
System.err.println("thread:"+Thread.currentThread().getId()+",num="+num);
} //@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) { } } public static void main(String[] args) {
TopologyBuilder topologyBuilder = new TopologyBuilder();
String spout_id = MySpout.class.getSimpleName();
String bolt_id = MyBolt.class.getSimpleName(); topologyBuilder.setSpout(spout_id, new MySpout());
topologyBuilder.setBolt(bolt_id, new MyBolt(),3).allGrouping(spout_id); Config config = new Config();
String topology_name = StormTopologyAllGrouping.class.getSimpleName();
if(args.length==0){
//在本地运行
LocalCluster localCluster = new LocalCluster();
localCluster.submitTopology(topology_name, config, topologyBuilder.createTopology());
}else{
//在集群运行
try {
StormSubmitter.submitTopology(topology_name, config, topologyBuilder.createTopology());
} catch (AlreadyAliveException e) {
e.printStackTrace();
} catch (InvalidTopologyException e) {
e.printStackTrace();
} catch (AuthorizationException e) {
e.printStackTrace();
}
} } }

LocalOrShufferGrouping方法

spout只会给同一个主机的线程发送数据(图中的线程1),也就是在同一个线程里会被发送数据,这样做的好处就是在同一个进程里发送数据效率搞,不用跨主机传输

但是当数据量太大的时候,线程1处理不了的时候就麻烦了,所以在实际工作中不建议这样做。

这里用的是3个线程(3个bolt),2个进程(2个worker)

从运行的结果我们可以看出来,只有一个线程在接收数据

还有一种情况,如果本地没有线程的时候,他就跟ShufferGrouping的效果一样的

参考代码StormTopologyLocalOrShufferGrouping.java

 package yehua.storm;

 import java.util.Map;

 import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.generated.AlreadyAliveException;
import org.apache.storm.generated.AuthorizationException;
import org.apache.storm.generated.InvalidTopologyException;
import org.apache.storm.spout.SpoutOutputCollector;
import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.topology.base.BaseRichSpout;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values;
import org.apache.storm.utils.Utils; /**
* LocalAllshufferGrouping
* @author yehua
*
*/ public class StormTopologyLocalOrShufferGrouping { public static class MySpout extends BaseRichSpout{
private Map conf;
private TopologyContext context;
private SpoutOutputCollector collector;
//@Override
public void open(Map conf, TopologyContext context,
SpoutOutputCollector collector) {
this.conf = conf;
this.collector = collector;
this.context = context;
} int num = 0;
//@Override
public void nextTuple() {
num++;
System.out.println("spout:"+num);
this.collector.emit(new Values(num));
Utils.sleep(1000);
} //@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
declarer.declare(new Fields("num"));
} } public static class MyBolt extends BaseRichBolt{ private Map stormConf;
private TopologyContext context;
private OutputCollector collector;
//@Override
public void prepare(Map stormConf, TopologyContext context,
OutputCollector collector) {
this.stormConf = stormConf;
this.context = context;
this.collector = collector;
} //@Override
public void execute(Tuple input) {
Integer num = input.getIntegerByField("num");
System.err.println("thread:"+Thread.currentThread().getId()+",num="+num);
} //@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) { } } public static void main(String[] args) {
TopologyBuilder topologyBuilder = new TopologyBuilder();
String spout_id = MySpout.class.getSimpleName();
String bolt_id = MyBolt.class.getSimpleName(); topologyBuilder.setSpout(spout_id, new MySpout());
topologyBuilder.setBolt(bolt_id, new MyBolt(),3).localOrShuffleGrouping(spout_id); Config config = new Config();
config.setNumWorkers(2);
String topology_name = StormTopologyLocalOrShufferGrouping.class.getSimpleName();
if(args.length==0){
//在本地运行
LocalCluster localCluster = new LocalCluster();
localCluster.submitTopology(topology_name, config, topologyBuilder.createTopology());
}else{
//在集群运行
try {
StormSubmitter.submitTopology(topology_name, config, topologyBuilder.createTopology());
} catch (AlreadyAliveException e) {
e.printStackTrace();
} catch (InvalidTopologyException e) {
e.printStackTrace();
} catch (AuthorizationException e) {
e.printStackTrace();
}
} } }

storm的流分组的更多相关文章

  1. 简单聊聊Storm的流分组策略

    简单聊聊Storm的流分组策略 首先我要强调的是,Storm的分组策略对结果有着直接的影响,不同的分组的结果一定是不一样的.其次,不同的分组策略对资源的利用也是有着非常大的不同,本文主要讲一讲loca ...

  2. Storm Grouping —— 流分组策略

    Storm Grouping: Shuffle Grouping :随机分组,尽量均匀分布到下游Bolt中 将流分组定义为混排.这种混排分组意味着来自Spout的输入将混排,或随机分发给此Bolt中的 ...

  3. Storm流分组介绍

    Storm流分组介绍                流分组是拓扑定义的一部分,每个Bolt指定应该接收哪个流作为输入.流分组定义了流/元组如何在Bolt的任务之间进行分发.在设计拓扑的时候需要定义数据 ...

  4. Storm Topology及分组原理

    Storm的通信机制,需要满足如下一些条件以满足Storm的语义. 1.建立数据传输的缓冲区.在通信连接没有建立之前把发送的数据缓存起来.数据发送方可以在连接建立之前发送消息,而不需要等连接建立起来, ...

  5. Stream grouping-storm的流分组策略

    A stream grouping tells a topology how to send tuples between two components. Remember, spouts and b ...

  6. Storm 学习之路(一)—— Storm和流处理简介

    一.Storm 1.1 简介 Storm 是一个开源的分布式实时计算框架,可以以简单.可靠的方式进行大数据流的处理.通常用于实时分析,在线机器学习.持续计算.分布式RPC.ETL等场景.Storm具有 ...

  7. Storm 系列(一)—— Storm和流处理简介

    一.Storm 1.1 简介 Storm 是一个开源的分布式实时计算框架,可以以简单.可靠的方式进行大数据流的处理.通常用于实时分析,在线机器学习.持续计算.分布式 RPC.ETL 等场景.Storm ...

  8. Storm实时流处理Hello World

    近来在看Storm的相关资料,以下总结一下配置过程和Hello World例子. Storm是分布式的实时计算系统.详细文档可参考Storm网站,也可以参阅<Getting started wi ...

  9. java8 新特性 Stream流 分组 排序 过滤 多条件去重

    private static List<User> list = new ArrayList<User>(); public static void main(String[] ...

随机推荐

  1. fluent nhibernate映射的数值类型问题

    fluent nhibernate中,数值类型设置不当,就可能会引发一些意想不到错误. 一.引发映射错误 比如,oracle数据库中,字段ID类型是number,结果用codesmith生成代码,实体 ...

  2. 京东Java面试题(一)

    1.说一下java类集2.字符串哈西相等,equals相等吗?反过来呢?3.Spring的工作原理,控制控制反转是怎么实现的,自己写过滤器过滤编码怎么实现4.框架的源码有没有看过5.动态代理是怎么实现 ...

  3. SW4STM32 全局宏定义

    /************************************************************************************ * SW4STM32 全局宏 ...

  4. Win10:已禁用IME

    问题 windows10莫名其妙地禁用了IME,导致的结果便是浏览器不能输入中文. 百度搜索的答案都是tmd 控制中心-->管理-->任务XXX,但是我的 任务XXX打不开啊. 解决 go ...

  5. Navicat #1045 - Access denied for user 'root'@'localhost' (using password: NO)

    Navicat #1045 - Access denied for user 'root'@'localhost' (using password: YES) 出现上述问题,原因在于本机还开了APMS ...

  6. BZOJ3757: 苹果树【树上莫队】

    Description ​ 神犇家门口种了一棵苹果树.苹果树作为一棵树,当然是呈树状结构,每根树枝连接两个苹果,每个苹果都可以沿着一条由树枝构成的路径连到树根,而且这样的路径只存在一条.由于这棵苹果树 ...

  7. cf 557D 二分图黑白染色

    题意:给出一个 n 点 m 边的图,问最少加多少边使其能够存在奇环,加最少边的情况数有多少种 奇环和偶环其实就是二分图的性质:二分图不存在奇环,所以只要判断这张图是否是二分图就行了: 如果本身就不是二 ...

  8. ssh 免 密码登录另一台机器 和 secureCRT的乱码问题

    PS: 就是你把密钥生成好以后,放入B机器中,再登录的时候就已经有了所以就不用验证了 ========================================================= ...

  9. read()/fread()/mmap()执行效率对比

    一. read()/fread()/mmap()执行效率对比 系统调用read.c: #include <sys/types.h> #include <sys/stat.h> ...

  10. AllJoyn 了解

    AllJoyn是一个中性平台系统,旨在简化邻近异构分布式移动通信网络系统.这里的异构性不仅表示不同的设备,而且可以是具有不同操作系统和不同类型的设备(例如个人电脑.手机.平板电脑和消费性电子产品),并 ...