[转]RMI方式Ehcache集群的源码分析
1服务Provider
1.1自动发现配置
<cacheManagerPeerProviderFactory
class="net.sf.ehcache.distribution.RMICacheManagerPeerProviderFactory"
properties="peerDiscovery=automatic, multicastGroupAddress=230.0.0.1,
multicastGroupPort=4446, timeToLive=32"/>
1.2手动发现配置
<cacheManagerPeerProviderFactory
class="net.sf.ehcache.distribution.RMICacheManagerPeerProviderFactory"
properties="peerDiscovery=manual,rmiUrls=//server2:40001/sampleCache11|//server2:40001/sampleCache12"/>
<cacheManagerPeerProviderFactory
class="net.sf.ehcache.distribution.RMICacheManagerPeerProviderFactory"
properties="peerDiscovery=manual,rmiUrls=//server1:40001/sampleCache11|//server1:40001/sampleCache12"/>
1.3源码分析-RMICacheManagerPeerProviderFactory
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
|
public CacheManagerPeerProvider createCachePeerProvider(CacheManager cacheManager, Properties properties)
throws CacheException {
String peerDiscovery = PropertyUtil.extractAndLogProperty(PEER_DISCOVERY, properties);
if (peerDiscovery == null || peerDiscovery.equalsIgnoreCase(AUTOMATIC_PEER_DISCOVERY)) {
try {
return createAutomaticallyConfiguredCachePeerProvider(cacheManager, properties);
} catch (IOException e) {
throw new CacheException("Could not create CacheManagerPeerProvider. Initial cause was " + e.getMessage(), e);
}
} else if (peerDiscovery.equalsIgnoreCase(MANUALLY_CONFIGURED_PEER_DISCOVERY)) {
return createManuallyConfiguredCachePeerProvider(properties);
} else {
return null;
}
}
protected CacheManagerPeerProvider createManuallyConfiguredCachePeerProvider(Properties properties) {
String rmiUrls = PropertyUtil.extractAndLogProperty(RMI_URLS, properties);
if (rmiUrls == null || rmiUrls.length() == 0) {
LOG.info("Starting manual peer provider with empty list of peers. " +
"No replication will occur unless peers are added.");
rmiUrls = new String();
}
rmiUrls = rmiUrls.trim();
StringTokenizer stringTokenizer = new StringTokenizer(rmiUrls, PayloadUtil.URL_DELIMITER);
RMICacheManagerPeerProvider rmiPeerProvider = new ManualRMICacheManagerPeerProvider();
while (stringTokenizer.hasMoreTokens()) {
String rmiUrl = stringTokenizer.nextToken();
rmiUrl = rmiUrl.trim();
rmiPeerProvider.registerPeer(rmiUrl);
LOG.debug("Registering peer {}", rmiUrl);
}
return rmiPeerProvider;
}
|
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
|
public final synchronized void registerPeer(String rmiUrl) {
peerUrls.put(rmiUrl, new Date());
}
public final synchronized List listRemoteCachePeers(Ehcache cache) throws CacheException {
List remoteCachePeers = new ArrayList();
List staleList = new ArrayList();
for (Iterator iterator = peerUrls.keySet().iterator(); iterator.hasNext();) {
String rmiUrl = (String) iterator.next();
String rmiUrlCacheName = extractCacheName(rmiUrl);
if (!rmiUrlCacheName.equals(cache.getName())) {
continue;
}
Date date = (Date) peerUrls.get(rmiUrl);
if (!stale(date)) {
CachePeer cachePeer = null;
try {
cachePeer = lookupRemoteCachePeer(rmiUrl);
remoteCachePeers.add(cachePeer);
} catch (Exception e) {
if (LOG.isDebugEnabled()) {
LOG.debug("Looking up rmiUrl " + rmiUrl + " through exception " + e.getMessage()
+ ". This may be normal if a node has gone offline. Or it may indicate network connectivity"
+ " difficulties", e);
}
}
} else {
LOG.debug("rmiUrl {} should never be stale for a manually configured cluster.", rmiUrl);
staleList.add(rmiUrl);
}
}
//Remove any stale remote peers. Must be done here to avoid concurrent modification exception.
for (int i = 0; i < staleList.size(); i++) {
String rmiUrl = (String) staleList.get(i);
peerUrls.remove(rmiUrl);
}
return remoteCachePeers;
}
public CachePeer lookupRemoteCachePeer(String url) throws MalformedURLException, NotBoundException, RemoteException {
LOG.debug("Lookup URL {}", url);
CachePeer cachePeer = (CachePeer) Naming.lookup(url);
return cachePeer;
}
|
|
1
2
3
4
5
6
7
8
9
10
11
|
public MulticastRMICacheManagerPeerProvider(CacheManager cacheManager, InetAddress groupMulticastAddress,
Integer groupMulticastPort, Integer timeToLive, InetAddress hostAddress) {
super(cacheManager);
heartBeatReceiver = new MulticastKeepaliveHeartbeatReceiver(this, groupMulticastAddress,
groupMulticastPort, hostAddress);
heartBeatSender = new MulticastKeepaliveHeartbeatSender(cacheManager, groupMulticastAddress,
groupMulticastPort, timeToLive, hostAddress);
}
|
2服务Listener
2.1配置文件
<cacheManagerPeerListenerFactory
class="net.sf.ehcache.distribution.RMICacheManagerPeerListenerFactory"
properties="hostName=localhost, port=40001,
socketTimeoutMillis=2000"/>
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
|
public final CacheManagerPeerListener createCachePeerListener(CacheManager cacheManager, Properties properties)
throws CacheException {
String hostName = PropertyUtil.extractAndLogProperty(HOSTNAME, properties);
String portString = PropertyUtil.extractAndLogProperty(PORT, properties);
Integer port = null;
if (portString != null && portString.length() != 0) {
port = Integer.valueOf(portString);
} else {
port = Integer.valueOf(0);
}
//0 means any port in UnicastRemoteObject, so it is ok if not specified to make it 0
String remoteObjectPortString = PropertyUtil.extractAndLogProperty(REMOTE_OBJECT_PORT, properties);
Integer remoteObjectPort = null;
if (remoteObjectPortString != null && remoteObjectPortString.length() != 0) {
remoteObjectPort = Integer.valueOf(remoteObjectPortString);
} else {
remoteObjectPort = Integer.valueOf(0);
}
String socketTimeoutMillisString = PropertyUtil.extractAndLogProperty(SOCKET_TIMEOUT_MILLIS, properties);
Integer socketTimeoutMillis;
if (socketTimeoutMillisString == null || socketTimeoutMillisString.length() == 0) {
socketTimeoutMillis = DEFAULT_SOCKET_TIMEOUT_MILLIS;
} else {
socketTimeoutMillis = Integer.valueOf(socketTimeoutMillisString);
}
return doCreateCachePeerListener(hostName, port, remoteObjectPort, cacheManager, socketTimeoutMillis);
}
protected CacheManagerPeerListener doCreateCachePeerListener(String hostName,
Integer port,
Integer remoteObjectPort,
CacheManager cacheManager,
Integer socketTimeoutMillis) {
try {
return new RMICacheManagerPeerListener(hostName, port, remoteObjectPort, cacheManager, socketTimeoutMillis);
} catch (UnknownHostException e) {
throw new CacheException("Unable to create CacheManagerPeerListener. Initial cause was " + e.getMessage(), e);
}
}
|
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
|
public void init() throws CacheException {
if (!status.equals(Status.STATUS_UNINITIALISED)) {
return;
}
RMICachePeer rmiCachePeer = null;
try {
startRegistry();
int counter = 0;
populateListOfRemoteCachePeers();
synchronized (cachePeers) {
for (Iterator iterator = cachePeers.values().iterator(); iterator.hasNext();) {
rmiCachePeer = (RMICachePeer) iterator.next();
bind(rmiCachePeer.getUrl(), rmiCachePeer);
counter++;
}
}
LOG.debug(counter + " RMICachePeers bound in registry for RMI listener");
status = Status.STATUS_ALIVE;
} catch (Exception e) {
String url = null;
if (rmiCachePeer != null) {
url = rmiCachePeer.getUrl();
}
throw new CacheException("Problem starting listener for RMICachePeer "
+ url + ". Initial cause was " + e.getMessage(), e);
}
}
protected void startRegistry() throws RemoteException {
try {
registry = LocateRegistry.getRegistry(port.intValue());
try {
registry.list();
} catch (RemoteException e) {
//may not be created. Let's create it.
registry = LocateRegistry.createRegistry(port.intValue());
registryCreated = true;
}
} catch (ExportException exception) {
LOG.error("Exception starting RMI registry. Error was " + exception.getMessage(), exception);
}
}
protected void populateListOfRemoteCachePeers() throws RemoteException {
String[] names = cacheManager.getCacheNames();
for (int i = 0; i < names.length; i++) {
String name = names[i];
Ehcache cache = cacheManager.getEhcache(name);
synchronized (cachePeers) {
if (cachePeers.get(name) == null) {
if (isDistributed(cache)) {
RMICachePeer peer = new RMICachePeer(cache, hostName, port, remoteObjectPort, socketTimeoutMillis);
cachePeers.put(name, peer);
}
}
}
}
}
|
3 事件Listener
3.1配置文件
<!-- Sample cache named sampleCache2. -->
<cache name ="sampleCache2"
maxEntriesLocalHeap ="10"
eternal="false"
timeToIdleSeconds ="100"
timeToLiveSeconds ="100"
overflowToDisk="false" >
<cacheEventListenerFactory
class="net.sf.ehcache.distribution.RMICacheReplicatorFactory"
properties="replicateAsynchronously=true, replicatePuts=true, replicateUpdates=true,
replicateUpdatesViaCopy=false, replicateRemovals=true "/>
</cache>
<!-- Sample cache named sampleCache4. All missing RMICacheReplicatorFactory properties
default to true --><cachename="sampleCache4"maxEntriesLocalHeap="10"eternal="true"overflowToDisk="false"memoryStoreEvictionPolicy="LFU"><cacheEventListenerFactoryclass="net.sf.ehcache.distribution.RMICacheReplicatorFactory"/></cache>
3.2源码分析
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
|
public final CacheEventListener createCacheEventListener(Properties properties) {
boolean replicatePuts = extractReplicatePuts(properties);
boolean replicatePutsViaCopy = extractReplicatePutsViaCopy(properties);
boolean replicateUpdates = extractReplicateUpdates(properties);
boolean replicateUpdatesViaCopy = extractReplicateUpdatesViaCopy(properties);
boolean replicateRemovals = extractReplicateRemovals(properties);
boolean replicateAsynchronously = extractReplicateAsynchronously(properties);
int asynchronousReplicationIntervalMillis = extractReplicationIntervalMilis(properties);
if (replicateAsynchronously) {
return new RMIAsynchronousCacheReplicator(
replicatePuts,
replicatePutsViaCopy,
replicateUpdates,
replicateUpdatesViaCopy,
replicateRemovals,
asynchronousReplicationIntervalMillis);
} else {
return new RMISynchronousCacheReplicator(
replicatePuts,
replicatePutsViaCopy,
replicateUpdates,
replicateUpdatesViaCopy,
replicateRemovals);
}
}
|
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
|
/**
* Whether a put should replicated by copy or by invalidation, (a remove).
* <p/>
* By copy is best when the entry is expensive to produce. By invalidation is best when
* we are really trying to force other caches to sync back to a canonical source like a database.
* An example of a latter usage would be a read/write cache being used in Hibernate.
* <p/>
* This setting only has effect if <code>#replicateUpdates</code> is true.
*/
protected boolean replicatePutsViaCopy;
public void notifyElementPut(final Ehcache cache, final Element element) throws CacheException {
if (notAlive()) {
return;
}
if (!replicatePuts) {
return;
}
if (!element.isSerializable()) {
if (LOG.isWarnEnabled()) {
LOG.warn("Object with key " + element.getObjectKey() + " is not Serializable and cannot be replicated");
}
return;
}
if (replicatePutsViaCopy) {
replicatePutNotification(cache, element);
} else {
replicateRemovalNotification(cache, (Serializable) element.getObjectKey());
}
}
protected static void replicatePutNotification(Ehcache cache, Element element) throws RemoteCacheException {
List cachePeers = listRemoteCachePeers(cache);
for (Object cachePeer1 : cachePeers) {
CachePeer cachePeer = (CachePeer) cachePeer1;
try {
cachePeer.put(element);
} catch (Throwable t) {
LOG.error("Exception on replication of putNotification. " + t.getMessage() + ". Continuing...", t);
}
}
}
static List listRemoteCachePeers(Ehcache cache) {
CacheManagerPeerProvider provider = cache.getCacheManager().getCacheManagerPeerProvider("RMI");
return provider.listRemoteCachePeers(cache);
}
|
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
|
public final void notifyElementPut(final Ehcache cache, final Element element) throws CacheException {
if (notAlive()) {
return;
}
if (!replicatePuts) {
return;
}
if (replicatePutsViaCopy) {
if (!element.isSerializable()) {
if (LOG.isWarnEnabled()) {
LOG.warn("Object with key " + element.getObjectKey() + " is not Serializable and cannot be replicated.");
}
return;
}
addToReplicationQueue(new CacheEventMessage(EventMessage.PUT, cache, element, null));
} else {
if (!element.isKeySerializable()) {
if (LOG.isWarnEnabled()) {
LOG.warn("Object with key " + element.getObjectKey()
+ " does not have a Serializable key and cannot be replicated via invalidate.");
}
return;
}
addToReplicationQueue(new CacheEventMessage(EventMessage.REMOVE, cache, null, element.getKey()));
}
}
protected void addToReplicationQueue(CacheEventMessage cacheEventMessage) {
if (!replicationThread.isAlive()) {
LOG.error("CacheEventMessages cannot be added to the replication queue because the replication thread has died.");
} else {
synchronized (replicationQueue) {
replicationQueue.add(cacheEventMessage);
}
}
}
private final class ReplicationThread extends Thread {
public ReplicationThread() {
super("Replication Thread");
setDaemon(true);
setPriority(Thread.NORM_PRIORITY);
}
public final void run() {
replicationThreadMain();
}
}
private void replicationThreadMain() {
while (true) {
// Wait for elements in the replicationQueue
while (alive() && replicationQueue != null && replicationQueue.size() == 0) {
try {
Thread.sleep(asynchronousReplicationInterval);
} catch (InterruptedException e) {
LOG.debug("Spool Thread interrupted.");
return;
}
}
if (notAlive()) {
return;
}
try {
if (replicationQueue.size() != 0) {
flushReplicationQueue();
}
} catch (Throwable e) {
LOG.error("Exception on flushing of replication queue: " + e.getMessage() + ". Continuing...", e);
}
}
}
private void flushReplicationQueue() {
List replicationQueueCopy;
synchronized (replicationQueue) {
if (replicationQueue.size() == 0) {
return;
}
replicationQueueCopy = new ArrayList(replicationQueue);
replicationQueue.clear();
}
Ehcache cache = ((CacheEventMessage) replicationQueueCopy.get(0)).cache;
List cachePeers = listRemoteCachePeers(cache);
List resolvedEventMessages = extractAndResolveEventMessages(replicationQueueCopy);
for (int j = 0; j < cachePeers.size(); j++) {
CachePeer cachePeer = (CachePeer) cachePeers.get(j);
try {
cachePeer.send(resolvedEventMessages);
} catch (UnmarshalException e) {
String message = e.getMessage();
if (message.indexOf("Read time out") != 0) {
LOG.warn("Unable to send message to remote peer due to socket read timeout. Consider increasing" +
" the socketTimeoutMillis setting in the cacheManagerPeerListenerFactory. " +
"Message was: " + e.getMessage());
} else {
LOG.debug("Unable to send message to remote peer. Message was: " + e.getMessage());
}
} catch (Throwable t) {
LOG.warn("Unable to send message to remote peer. Message was: " + t.getMessage(), t);
}
}
if (LOG.isWarnEnabled()) {
int eventMessagesNotResolved = replicationQueueCopy.size() - resolvedEventMessages.size();
if (eventMessagesNotResolved > 0) {
LOG.warn(eventMessagesNotResolved + " messages were discarded on replicate due to reclamation of " +
"SoftReferences by the VM. Consider increasing the maximum heap size and/or setting the " +
"starting heap size to a higher value.");
}
}
}
|
[转]RMI方式Ehcache集群的源码分析的更多相关文章
- RMI方式Ehcache集群的源码分析
Ehcache不仅支持基本的内存缓存,还支持多种方式将本地内存中的缓存同步到其他使用Ehcache的服务器中,形成集群.如下图所示: Ehcache支持多种集群方式,下面以RMI通信方式为例,来具体分 ...
- 【一起学源码-微服务】Nexflix Eureka 源码十二:EurekaServer集群模式源码分析
前言 前情回顾 上一讲看了Eureka 注册中心的自我保护机制,以及里面提到的bug问题. 哈哈 转眼间都2020年了,这个系列的文章从12.17 一直写到现在,也是不容易哈,每天持续不断学习,输出博 ...
- tomcat集群实现源码级别剖析
随着互联网快速发展,各种各样供外部访问的系统越来越多且访问量越来越大,以前Web容器可以包揽接收-逻辑处理-响应整个请求生命周期的工作,现在为了构建让更多用户访问更强大的系统,人们通过不断地业务解耦. ...
- Jedis cluster集群初始化源码剖析
Jedis cluster集群初始化源码剖析 环境 jar版本: spring-data-redis-1.8.4-RELEASE.jar.jedis-2.9.0.jar 测试环境: Redis 3.2 ...
- Go合集,gRPC源码分析,算法合集
年初时,朋友圈见到的最多的就是新的一年新的FlAG,年末时朋友圈最多的也是xxxx就要过去了,你的FLAG实现了吗? 这个公众号2016就已经创建了,但截至今年之前从来没发表过文章,现在想想以前很忙, ...
- Quartz.net 定时任务之储存与持久化和集群(源码)
一.界面 1.这篇博客不上教程.直接看结果(包括把quartz任务转换成Windows服务) (1).主界面 (2).添加任务(默认执行) (3).编辑(默认开启) (4).关闭和开启 2.代码说明 ...
- 吾日三省吾身 java核心代码 高并发集群 spring源码&思想
阿里面试题 未解决https://my.oschina.net/wuweixiang/blog/1863322 java基础 有答案 https://www.cnblogs.com/xdp- ...
- EhCache 集群 配置(RMI方式)
这里先说明下环境:JDK1.6.ehcache-core-2.1.0.jar.Tomcat6.Spring3.0.2.使用的是RMI方式配置集群的,这里先吐槽下遇到的情况,在搜相关知识的时候发现到处都 ...
- lodash源码分析之缓存使用方式的进一步封装
在世界上所有的民族之中,支配着他们的喜怒选择的并不是天性,而是他们的观点. --卢梭<社会与契约论> 本文为读 lodash 源码的第九篇,后续文章会更新到这个仓库中,欢迎 star:po ...
随机推荐
- SQL SERVER Buffer Pool扩展
Buffer Pool扩展简介 Buffer Pool扩展是buffer pool 和非易失的SSD硬盘做连接.以SSD硬盘的特点来提高随机读性能. 在Buffer Pool 扩展之前,SQL Ser ...
- Ubuntu下安装codeblocks
ubuntu 16.04LTS 下Code::Blocks 16.01 安装 Code::Blocks 是一个开放源码的全功能的跨平台C/C++集成开发环境. Code::Blocks是开放源码软件. ...
- 最小截断[AHOI2009]
[题目描述] 宇宙旅行总是出现一些意想不到的问题,这次小可可所驾驶的宇宙飞船所停的空间站发生了故障,这个宇宙空间站非常大,它由N个子站组成,子站之间有M条单向通道,假设其中第i(1<=i< ...
- NYOJ--1236--挑战密室(第八届河南省程序设计大赛)
挑战密室 时间限制:1000 ms | 内存限制:65535 KB 难度:4 描述 R组织的特工Dr. Kong 为了寻找丢失的超体元素,不幸陷入WTO密室.Dr. Kong必须尽快找到解锁密 ...
- pudian
https://zh.wikipedia.org/wiki/%E7%89%B9%E5%BE%81%E7%A0%81 http://www.voidcn.com/blog/lionzl/article/ ...
- 14. Longest Common Prefix【leetcode】
14. Longest Common Prefix Write a function to find the longest common prefix string amongst an array ...
- Windows 程序注册成服务的方法
Windows 程序注册成服务的方法 将windows 程序注册成服务这个是很多后台程序需要实现的功能,注册成服务后,你的程序就可以像windows 服务一样随系统启动,并且隐藏你的控制台界面.下面介 ...
- TCP/IP小记
--TCP/IP小记 -----------------2014/06/11 TCP的要求是:local_ip:local_port <==>remote_ip:remote_port这个 ...
- python练习题一
1.使用while循环输出1 2 3 4 5 6 8 9 10 答:i=0 while i<10: i += 1 if i!=7: print(i) 2. ...
- C/C++ 知识点---设计模式
在软件工程中,设计模式用来描述在各种不同情况下,要怎么解决问题的一种方案.面向对象设计模式通常以类或对象来描述其中的关系和相互作用,是软件“设计”层次上的问题.使用设计模式可提高代码的重用性和可靠性, ...