HAManager

public HAManager(VertxInternal vertx, DeploymentManager deploymentManager,
ClusterManager clusterManager, int quorumSize, String group, boolean enabled) {
this.vertx = vertx;
this.deploymentManager = deploymentManager;
this.clusterManager = clusterManager;
//仲裁结点数量,默认1
this.quorumSize = enabled ? quorumSize : ;
//定义逻辑组名,默认__DEFAULT__
this.group = enabled ? group : "__DISABLED__";
this.enabled = enabled;
this.haInfo = new JsonObject();
haInfo.put("verticles", new JsonArray());
haInfo.put("group", this.group);
//获取集群管理 __vertx.haInfo,添加结点信息
this.clusterMap = clusterManager.getSyncMap(CLUSTER_MAP_NAME);
this.nodeID = clusterManager.getNodeID();
synchronized (haInfo) {
clusterMap.put(nodeID, haInfo.encode());
}
/**添加一个节点侦听器,侦听节点join or leaves */
clusterManager.nodeListener(new NodeListener() {
@Override
public void nodeAdded(String nodeID) {
HAManager.this.nodeAdded(nodeID);
} @Override
public void nodeLeft(String leftNodeID) {
HAManager.this.nodeLeft(leftNodeID);
}
});
//定义周期器,每隔 1s 检查 HADeployments
quorumTimerID = vertx.setPeriodic(QUORUM_CHECK_PERIOD, tid -> checkHADeployments());
// 调用检查仲裁来计算是否有初始仲裁
synchronized (this) {
checkQuorum();
}
} private void checkQuorum() {
if (quorumSize == ) {//判断仲裁数量
this.attainedQuorum = true;
} else {
 /**获取group node数量*/
List<String> nodes = clusterManager.getNodes();
int count = ;
for (String node : nodes) {
String json = clusterMap.get(node);
if (json != null) {
JsonObject clusterInfo = new JsonObject(json);
String group = clusterInfo.getString("group");
if (group.equals(this.group)) {
count++;
}
}
}
/**计算是否到达仲裁数量*/
boolean attained = count >= quorumSize;
if (!attainedQuorum && attained) {
log.info("A quorum has been obtained. Any deploymentIDs waiting on a quorum will now be deployed");
this.attainedQuorum = true;
} else if (attainedQuorum && !attained) {
log.info("There is no longer a quorum. Any HA deploymentIDs will be undeployed until a quorum is re-attained");
this.attainedQuorum = false;
}
}
}

deploy an HA verticle

public void deployVerticle(final String verticleName, DeploymentOptions deploymentOptions,
final Handler<AsyncResult<String>> doneHandler) {
if (attainedQuorum) {//根据是否达到仲裁数量,否则添加到delay Queue
doDeployVerticle(verticleName, deploymentOptions, doneHandler);
} else {
log.info("Quorum not attained. Deployment of verticle will be delayed until there's a quorum.");
addToHADeployList(verticleName, deploymentOptions, doneHandler);
}
} /**
* 部署verticle
*/
private void doDeployVerticle(final String verticleName, DeploymentOptions deploymentOptions,
final Handler<AsyncResult<String>> doneHandler) {
/**添加deploy verticle 后的回调 handler*/
final Handler<AsyncResult<String>> wrappedHandler = asyncResult -> {
if (asyncResult.succeeded()) {
// 添加当前 node 的 HA 相关信息,以便 other node了解
addToHA(asyncResult.result(), verticleName, deploymentOptions);
}
/**触发已添加添加回调 hander*/
if (doneHandler != null) {
doneHandler.handle(asyncResult);
} else if (asyncResult.failed()) {
log.error("Failed to deploy verticle", asyncResult.cause());
}
};
//部署verticle
deploymentManager.deployVerticle(verticleName, deploymentOptions, wrappedHandler);
} /**
* 添加deploy 任务到delay Queue
*/
private void addToHADeployList(final String verticleName, final DeploymentOptions deploymentOptions,
final Handler<AsyncResult<String>> doneHandler) {
toDeployOnQuorum.add(() -> {
ContextImpl ctx = vertx.getContext();
try {
ContextImpl.setContext(null);
//部署verticle
deployVerticle(verticleName, deploymentOptions, doneHandler);
} finally {
ContextImpl.setContext(ctx);
}
});
}

周期每秒检测

private void checkHADeployments() {
try {
if (attainedQuorum) {//判断仲裁数量是否达到
deployHADeployments();
} else {
undeployHADeployments();
}
} catch (Throwable t) {
log.error("Failed when checking HA deploymentIDs", t);
}
} private void deployHADeployments() {
  //获取delay Queue 任务数
int size = toDeployOnQuorum.size();
if (size != ) {
log.info("There are " + size + " HA deploymentIDs waiting on a quorum. These will now be deployed");
Runnable task;
/**处理所有 delay 部署任务*/
while ((task = toDeployOnQuorum.poll()) != null) {
try {
task.run();
} catch (Throwable t) {
log.error("Failed to run redeployment task", t);
}
}
}
} private void undeployHADeployments() {
  /** 遍历所有deploy verticle */
for (String deploymentID: deploymentManager.deployments()) {
Deployment dep = deploymentManager.getDeployment(deploymentID);
if (dep != null) {
if (dep.deploymentOptions().isHa()) {
ContextImpl ctx = vertx.getContext();
try {
ContextImpl.setContext(null);
//卸载
deploymentManager.undeployVerticle(deploymentID, result -> {
if (result.succeeded()) {
log.info("Successfully undeployed HA deployment " + deploymentID + "-" + dep.verticleIdentifier() + " as there is no quorum");
/**添加HA verticle 到 delay Queue 重新部署*/
addToHADeployList(dep.verticleIdentifier(), dep.deploymentOptions(), result1 -> {
if (result1.succeeded()) {
log.info("Successfully redeployed verticle " + dep.verticleIdentifier() + " after quorum was re-attained");
} else {
log.error("Failed to redeploy verticle " + dep.verticleIdentifier() + " after quorum was re-attained", result1.cause());
}
});
} else {
log.error("Failed to undeploy deployment on lost quorum", result.cause());
}
});
} finally {
ContextImpl.setContext(ctx);
}
}
}
}
}

note:不建议使用HA模块,还是利用Health Check作为verticle服务检查,出错时自动重启服务,
"启用高可用性(HA)的情况下部署Verticle。在该上下文中,当Verticle部署在突然死亡的vert.x实例上时,
Verticle将从集群中的另一个vert.x实例上重新部署".
分析:没有verticle持久化,上传集群中心或序列化分发other node,其它集群结点local根本没有
verticle(compile class)如何重新部署,毕竟不是共享内存

vertx模块HAManager高可用的更多相关文章

  1. FastDFS高可用集群架构配置搭建

      一.基本模块及高可用架构 FastDFS 是余庆老师开发的一个开源的高性能分布式文件系统(DFS). 它的主要功能包括:文件存储,文件同步和文件访问,以及高容量和负载平衡. FastDFS 系统有 ...

  2. 10 masterless、高可用、salt执行模块开发、sydic架构

    1.salt无master 官方文档: http://docs.saltstack.cn/topics/tutorials/quickstart.html 1.使用场景 1.在项目中使用salt,写一 ...

  3. nginx内置高可用配置与第三方高可用模块nginx_ustream_check_mudule配置

    1. nginx 第三方高可用模块 IP 备注 10.0.0.63 proxy 10.0.0.64 web1 10.0.0.65 web2 这里会讲解一些nignx常用高可用方案,以及引入第三方高可用 ...

  4. vertx模块DeploymentManager部署管理器

    DeploymentManager public DeploymentManager(VertxInternal vertx) { this.vertx = vertx; loadVerticleFa ...

  5. MySQL高可用方案

    高可用架构对于互联网服务基本是标配,无论是应用服务还是数据库服务都需要做到高可用.虽然互联网服务号称7*24小时不间断服务,但多多少少有一些时候服务不可用,比如某些时候网页打不开,百度不能搜索或者无法 ...

  6. 配置LVS + Keepalived高可用负载均衡集群之图文教程

    负载均衡系统可以选用LVS方案,而为避免Director Server单点故障引起系统崩溃,我们可以选用LVS+Keepalived组合保证高可用性.  重点:每个节点时间都同步哈! C++代码 [r ...

  7. MySQL高可用架构之MHA

    简介: MHA(Master High Availability)目前在MySQL高可用方面是一个相对成熟的解决方案,它由日本DeNA公司youshimaton(现就职于Facebook公司)开发,是 ...

  8. 理解 OpenStack 高可用(HA)(2):Neutron L3 Agent HA 之 虚拟路由冗余协议(VRRP)

    本系列会分析OpenStack 的高可用性(HA)概念和解决方案: (1)OpenStack 高可用方案概述 (2)Neutron L3 Agent HA - VRRP (虚拟路由冗余协议) (3)N ...

  9. Redis Sentinel 高可用实现说明

    背景:      前面介绍了Redis 复制.Sentinel的搭建和原理说明,通过这篇文章大致能了解Sentinel的原理和实现方法以及相关的搭建.这篇文章就针对Redis Sentinel的搭建做 ...

随机推荐

  1. Nginx HTTP框架提供的请求相关变量

    L73 binary_remote_addr 对端二进制IPV4或IPV6 一般用作限制用户请求缓存key connection 递增链接序号 connection_requests  一条TCP链接 ...

  2. 构建自定义docker镜像,上传至docker hub

    docker 优势 (外部参考) Docker 让开发者可以打包他们的应用以及依赖包到一个可移植的容器中,然后 发布到任何流行的Linux机器上,便可以实现虚拟化.Docker改变了虚拟化的方 式,使 ...

  3. luogu4365 秘密袭击 (生成函数+线段树合并+拉格朗日插值)

    求所有可能联通块的第k大值的和,考虑枚举这个值: $ans=\sum\limits_{i=1}^{W}{i\sum\limits_{S}{[i是第K大]}}$ 设cnt[i]为连通块中值>=i的 ...

  4. LOJ子序列

    题目描述 https://loj.ac/problem/6074 题解 对于子序列的dp,我们可以设置一个dp. 我们设dp[i]表示以i这个字符结尾的子序列个数,转移为dp[i]+=∑dp[k]-d ...

  5. python全栈开发中级班全程笔记(第二模块)第一部分:文件处理

      第二模块 第一部分:文件处理与函数 #插曲之人丑就要多读书:读书能够提高个人素质与内涵,提升个人修养与能力,以及层次的提升. 推荐书籍:追风筝的人.白鹿原 电影:阿甘正传.辛德勒的名单 第一节:三 ...

  6. freetypeLCD显示

    目录 freetypeLCD显示 安装交叉编译环境 配置 头文件和库的位置 编译安装 复制到PC编译工具链 复制到文件系统 运行测试 LCD显示 编码转换问题 简单显示 角度旋转 换行显示 居中显示 ...

  7. go语言的开始入门(一)

    前言:作为Web开发人员掌握多种后台是必须的,趁自己有C语言的基础,所以尝试入门Golang (一).基本数据类型的认识 小结:int大小默认与系统相关,byte只能够存单字节,   (二).Gola ...

  8. dataTable 实战总结

    后台项目中经常会用到 dataTable 进行表格的智能绘制:总结一下项目中经常用到的 dataTable 设置 1.dataTable 属性设置: autoWidth: true, // 是否自动计 ...

  9. python try exception finally记录

    try exception finally中,finally下的语句块始终会执行 测试finally代码 def test_try_exception(a, b): '''测试异常捕获语句''' re ...

  10. python中opencv的安装

    1.得到opencv的安装包: 2.把安装包中的cv.py, cv2.pd放到一个文件夹中,并把这个文件夹放到D:\Anaconda2\Lib\site-packages中: 3.添加新的变量,变量名 ...