Exponential Backoff
f^x(f^y+f-m)+f-n
=f^(x+y)+f^(f-m)+(f-n)
<?php
$exponent=0;
w(80,3);
function w($input,$base){
global $exponent;
$children_input=$input/$base;
if($children_input>=0){
$exponent++;
if($children_input>=$base){
w($children_input,$base);
}
}
} echo $exponent;
w
//The ListOrderItems and ListOrderItemsByNextToken operations together share a maximum request quota of 30 and a restore rate of one request every two seconds.
function ExponentialBackoffSleep($restoreRatePerSecond = 0.5, $base = 2, $exponent = 3, $maximumRequestQuota = 30)
{ $unixTimestamp = time();
$intervalSeconds = $maximumRequestQuota / $restoreRatePerSecond;
$restoreOneNeedSeconds = ceil(1 / $restoreRatePerSecond);
if ($unixTimestamp % $intervalSeconds == 0) {
$max = pow($base, $exponent);
$max = $max > $restoreOneNeedSeconds ? $max : $restoreOneNeedSeconds;
$ExponentialBackoff = rand($restoreOneNeedSeconds, $max);
echo "ExponentialBackoffSleep " . $ExponentialBackoff . "\r\n";
sleep($ExponentialBackoff);
}
} function findCloestNumToPow($input, $base = 2, $exponent = 1)
{ if (ceil($input) <= 1) {
return 1;
}
for ($w = $exponent, $len = pow($base, $exponent); $w <= $len; $w++) {
return 10;
}
}
//The ListOrderItems and ListOrderItemsByNextToken operations together share a maximum request quota of 30 and a restore rate of one request every two seconds.
function ExponentialBackoffSleep($restoreRatePerSecond = 0.5, $base = 2, $exponent = 3, $maximumRequestQuota = 30)
{ $unixTimestamp = time();
$intervalSeconds = $maximumRequestQuota / $restoreRatePerSecond;
$restoreOneNeedSeconds = ceil(1 / $restoreRatePerSecond);
if ($unixTimestamp % $intervalSeconds == 0) {
$max = pow($base, $exponent);
$max = $max > $restoreOneNeedSeconds ? $max : $restoreOneNeedSeconds;
$ExponentialBackoff = rand($restoreOneNeedSeconds, $max);
echo "ExponentialBackoffSleep " . $ExponentialBackoff . "\r\n";
sleep($ExponentialBackoff);
}
} function findCloestNumToPow($input, $base = 2, $exponent = 1)
{ if (ceil($input) <= 1) {
return 1;
}
for ($w = $exponent, $len = pow($base, $exponent); $w <= $len; $w++) {
return 10;
}
}
function ExponentialBackoffSleep($intervalSeconds = 8)
{
$unixTimestamp = time();
if ($unixTimestamp % $intervalSeconds == 0) {
$ExponentialBackoff = rand(1, $intervalSeconds);
sleep($ExponentialBackoff);
}
}
<?php
echo "\r\n" . date('Y-m-d H:i:s') . " TODO StartScript \r\n";
set_time_limit(0);
/*
* win-cli-require_once
*
* */ //return; NULL
//return '';
//2017年4月25日 19:59:27 $win_cli_dir = 'D:\cmd\amzapi\amzapitest_com\MarketplaceWebServiceOrders\\'; require_once($win_cli_dir . 'Samples\.config.inc.php');
require_once($win_cli_dir . 'Samples\.config.db.php');
//wStartThisScript();
require_once($win_cli_dir . 'Client.php');
require_once($win_cli_dir . 'Model\ListOrderItemsRequest.php');
require_once($win_cli_dir . 'Model\ListOrderItemsResponse.php');
require_once($win_cli_dir . 'Model\ListOrderItemsByNextTokenRequest.php');
require_once($win_cli_dir . 'Model\ListOrderItemsByNextTokenResponse.php'); $serviceUrl = "https://mws.amazonservices.com/Orders/2013-09-01"; $config = array(
'ServiceURL' => $serviceUrl,
'ProxyHost' => null,
'ProxyPort' => -1,
'ProxyUsername' => null,
'ProxyPassword' => null,
'MaxErrorRetry' => 3,
); $service = new MarketplaceWebServiceOrders_Client(
AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY,
APPLICATION_NAME,
APPLICATION_VERSION,
$config);
//TODO StartScript
//bug limit page
$sql = 'SELECT DISTINCT AmazonOrderId FROM listorders';
$w = 0;
foreach ($dbh->query($sql) as $row) {
echo "\r\n" . date('Y-m-d H:i:s') . "\r\n";
$InputAmazonOrderId = $row['AmazonOrderId']; $sql_inserted = 'SELECT id FROM ListOrderItems WHERE AmazonOrderId="' . $InputAmazonOrderId . '" LIMIT 1';
foreach ($dbh->query($sql_inserted) as $inserted) {
}
if (!empty($inserted)) {
echo "\r\n" . $InputAmazonOrderId . " inserted\r\n";
continue;
} // $w++;//TODO modify
//;;
// if ($w % 30 == 0) sleep(8);
ExponentialBackoffSleep();
$request = new MarketplaceWebServiceOrders_Model_ListOrderItemsRequest();
$request->setSellerId(MERCHANT_ID);
$request->setAmazonOrderId($InputAmazonOrderId);
$request->setMWSAuthToken(MWSAUTH_TOKEN);
invokeListOrderItems($service, $request);
} function ExponentialBackoffSleep($intervalSeconds = 8)
{ $unixTimestamp = time();
$ExponentialBackoff = rand(1, $intervalSeconds);
if ($unixTimestamp % $intervalSeconds == 0) sleep($ExponentialBackoff);
} //TODO
// Exception to DB
//to move
function apiCaughtException($ex)
{
echo("Caught Exception: " . $ex->getMessage() . "\n");
echo("Response Status Code: " . $ex->getStatusCode() . "\n");
echo("Error Code: " . $ex->getErrorCode() . "\n");
echo("Error Type: " . $ex->getErrorType() . "\n");
echo("Request ID: " . $ex->getRequestId() . "\n");
echo("XML: " . $ex->getXML() . "\n");
echo("ResponseHeaderMetadata: " . $ex->getResponseHeaderMetadata() . "\n");
} function invokeListOrderItems(MarketplaceWebServiceOrders_Interface $service, $request)
{
global $link;
global $InputAmazonOrderId;
try {
$response = $service->ListOrderItems($request);
//TODO class XML(){}
$dom = new DOMDocument();
$dom->loadXML($response->toXML());
$dom->preserveWhiteSpace = false;
$dom->formatOutput = true;
$savexml = $dom->saveXML();
$readxml = simplexml_load_string($savexml);
$obj = $readxml->ListOrderItemsResult->OrderItems;
foreach ($obj->children() AS $one) {
TMPdbInsertListOrders($one, $link, $InputAmazonOrderId);
}
echo("ResponseHeaderMetadata: " . $response->getResponseHeaderMetadata() . "\n");
} catch (MarketplaceWebServiceOrders_Exception $ex) {
apiCaughtException($ex);
}
} //TODO EndScript
echo "\r\n" . date('Y-m-d H:i:s') . " TODO EndScript \r\n";
function ExponentialBackoffSleep($intervalSeconds = 8)
{ $unixTimestamp = time();
$ExponentialBackoff = rand(1, $intervalSeconds);
if ($unixTimestamp % $intervalSeconds == 0) sleep($ExponentialBackoff);
}
$w=0;
{
$w++;
//TODO modify
if ($w % 30 == 0) sleep(8);
}
w
Exponential Backoff And Jitter
https://www.awsarchitectureblog.com/2015/03/backoff.html
Exponential Backoff And Jitter
Introducing OCC
Optimistic concurrency control (OCC) is a time-honored way for multiple writers to safely modify a single object without losing writes. OCC has three nice properties: it will always make progress as long as the underlying store is available, it’s easy to understand, and it’s easy to implement. DynamoDB’s conditional writes make OCC a natural fit for DynamoDB users, and it’s natively supported by the DynamoDBMapper client.
While OCC is guaranteed to make progress, it can still perform quite poorly under high contention. The simplest of these contention cases is when a whole lot of clients start at the same time, and try to update the same database row. With one client guaranteed to succeed every round, the time to complete all the updates grows linearly with contention.
For the graphs in this post, I used a small simulator to model the behavior of OCC on a network with delay (and variance in delay), against a remote database. In this simulation, the network introduces delay with a mean of 10ms and variance of 4ms. The first simulation shows how completion time grows linearly with contention. This linear growth is because one client succeeds every round, so it takes N rounds for all N clients to succeed.

Unfortunately, that’s not the whole picture. With N clients contending, the total amount of work done by the system increases with N2.

Adding Backoff
The problem here is that N clients compete in the first round, N-1 in the second round, and so on. Having every client compete in every round is wasteful. Slowing clients down may help, and the classic way to slow clients down is capped exponential backoff. Capped exponential backoff means that clients multiply their backoff by a constant after each attempt, up to some maximum value. In our case, after each unsuccessful attempt, clients sleep for:
sleep = min(cap, base * ** attempt)
Running the simulation again shows that backoff helps a small amount, but doesn’t solve the problem. Client work has only been reduced slightly.

The best way to see the problem is to look at the times these exponentially backed-off calls happen.

It’s obvious that the exponential backoff is working, in that the calls are happening less and less frequently. The problem also stands out: there are still clusters of calls. Instead of reducing the number of clients competing in every round, we’ve just introduced times when no client is competing. Contention hasn’t been reduced much, although the natural variance in network delay has introduced some spreading.
Adding Jitter
The solution isn’t to remove backoff. It’s to add jitter. Initially, jitter may appear to be a counter-intuitive idea: trying to improve the performance of a system by adding randomness. The time series above makes a great case for jitter – we want to spread out the spikes to an approximately constant rate. Adding jitter is a small change to the sleep function:
sleep = random_between(, min(cap, base * ** attempt))

That time series looks a whole lot better. The gaps are gone, and beyond the initial spike, there’s an approximately constant rate of calls. It’s also had a great effect on the total number of calls.

In the case with 100 contending clients, we’ve reduced our call count by more than half. We’ve also significantly improved the time to completion, when compared to un-jittered exponential backoff.

There are a few ways to implement these timed backoff loops. Let’s call the algorithm above “Full Jitter”, and consider two alternatives. The first alternative is “Equal Jitter”, where we always keep some of the backoff and jitter by a smaller amount:
temp = min(cap, base * ** attempt)
sleep = temp / + random_between(, temp / )
The intuition behind this one is that it prevents very short sleeps, always keeping some of the slow down from the backoff. A second alternative is “Decorrelated Jitter”, which is similar to “Full Jitter”, but we also increase the maximum jitter based on the last random value.
sleep = min(cap, random_between(base, sleep * ))
Which approach do you think is best?
Looking at the amount of client work, the number of calls is approximately the same for “Full” and “Equal” jitter, and higher for “Decorrelated”. Both cut down work substantially relative to both the no-jitter approaches.

The no-jitter exponential backoff approach is the clear loser. It not only takes more work, but also takes more time than the jittered approaches. In fact, it takes so much more time we have to leave it off the graph to get a good comparison of the other methods.

Of the jittered approaches, “Equal Jitter” is the loser. It does slightly more work than “Full Jitter”, and takes much longer. The decision between “Decorrelated Jitter” and “Full Jitter” is less clear. The “Full Jitter” approach uses less work, but slightly more time. Both approaches, though, present a substantial decrease in client work and server load.
It’s worth noting that none of these approaches fundamentally change the N2 nature of the work to be done, but do substantially reduce work at reasonable levels of contention. The return on implementation complexity of using jittered backoff is huge, and it should be considered a standard approach for remote clients.
All of the graphs and numbers from this post were generated using a simple simulation of OCC behavior. You can get our simulator code on GitHub, in the aws-arch-backoff-simulator project.
- Marc Brooker
https://github.com/awslabs/aws-arch-backoff-simulator/blob/master/src/backoff_simulator.py
# Simulator for the effects of backoff and jitter on a remote OCC system.
# This code was used for the post on backoff on the AWS architecture blog
# at http://www.awsarchitectureblog.com/
import heapq
import random # Net models the natural delay and variance of the network
class Net:
def __init__(self, mean, sd):
self.mean = mean
self.sd = sd def delay(self):
# We use a normal distribution model. Networks are more likely to be a Weibull model
# in reality, but this is close enough for the model comparison.
return abs(random.normalvariate(self.mean, self.sd)) # Base class for all the backoff implementations
class Backoff:
def __init__(self, base, cap):
self.base = base
self.cap = cap def expo(self, n):
return min(self.cap, pow(2, n)*self.base) class NoBackoff(Backoff):
def backoff(self, n):
return 0 class ExpoBackoff(Backoff):
def backoff(self, n):
return self.expo(n) class ExpoBackoffEqualJitter(Backoff):
def backoff(self, n):
v = self.expo(n)
return v/2 + random.uniform(0, v/2) class ExpoBackoffFullJitter(Backoff):
def backoff(self, n):
v = self.expo(n)
return random.uniform(0, v) class ExpoBackoffDecorr(Backoff):
def __init__(self, base, cap):
Backoff.__init__(self, base, cap)
self.sleep = self.base def backoff(self, n):
self.sleep = min(self.cap, random.uniform(self.base, self.sleep * 3))
return self.sleep # Small class to track two counters
class Stats:
def __init__(self):
self.failures = 0
self.calls = 0 # Build a message to be added to the simulated network
def msg(tm, send_to, reply_to, payload):
assert tm >= 0
assert send_to is not None
return (tm, send_to, reply_to, payload) # The OCC server. It models a single "row" with a single version.
class OccServer:
def __init__(self, net, stats, ts_f):
self.version = 0
self.net = net
self.stats = stats
self.ts_f = ts_f # Try to write the row. If you provide the right version number (obtained from a read),
# the write will succeed.
def write(self, tm, request):
self.ts_f.write("%d\n"%(tm))
success = False
self.stats.calls += 1
if request[3] == self.version:
self.version += 1
success = True
else:
self.stats.failures += 1
return msg(tm + self.net.delay(), request[2], None, success) # Read the current version number of the row.
def read(self, tm, request):
return msg(tm + self.net.delay(), request[2], None, self.version) # The OCC client. It models a client that tries to update the row exactly once,
# then stops.
class OccClient:
def __init__(self, server, net, backoff):
self.server = server
self.net = net
self.attempt = 0
self.backoff = backoff def start(self, tm):
return msg(tm + self.net.delay(), self.server.read, self.read_rsp, None) def read_rsp(self, tm, request):
return msg(tm + self.net.delay(), self.server.write, self.write_rsp, request[3]) def write_rsp(self, tm, request):
if not request[3]:
self.attempt += 1
return msg(tm + self.net.delay() + self.backoff.backoff(self.attempt), self.server.read, self.read_rsp, None)
else:
return None # The main loop of the simulation.
def run_sim(queue):
tm = 0
while len(queue) > 0:
# Pull an event off the priority queue
msg = heapq.heappop(queue)
assert msg[0] >= tm # TIme must move forward
tm = msg[0]
next_msg = msg[1](tm, msg)
if next_msg is not None:
# If it cause another event to be generated, enqueue it
heapq.heappush(queue, next_msg)
return tm # Setup the simulation, creating the clients and server
def setup_sim(clients, backoff_cls, ts_f, stats):
net = Net(10, 2)
queue = []
server = OccServer(net, stats, ts_f)
for i in xrange(0, clients):
client = OccClient(server, net, backoff_cls(5, 2000))
heapq.heappush(queue, client.start(0))
return (queue, stats) # The list of backoff types that we simulate over. The tuples are a class
# name and a friendly name for the output.
backoff_types = ((ExpoBackoff, "Exponential"),
(ExpoBackoffDecorr,"Decorr"),
(ExpoBackoffEqualJitter, "EqualJitter"),
(ExpoBackoffFullJitter, "FullJitter"),
(NoBackoff, "None")) def run():
with open("backoff_results.csv", "w") as f:
f.write("clients,time,calls,Algorithm\n")
for i in xrange(1, 20):
clients = i * 10
for backoff in backoff_types:
with open("ts_" + backoff[1], "w") as ts_f:
stats = Stats()
tm = 0
for t in xrange(0, 100):
queue, stats = setup_sim(clients, backoff[0], ts_f, stats)
tm += run_sim(queue)
f.write("%d,%d,%d,%s\n"%(clients, tm/100, stats.calls/100, backoff[1])) run()
Exponential Backoff的更多相关文章
- Error Retries and Exponential Backoff in AWS
Error Retries and Exponential Backoff in AWS https://docs.aws.amazon.com/general/latest/gr/api-retri ...
- zookeeper源码分析之一服务端启动过程
zookeeper简介 zookeeper是为分布式应用提供分布式协作服务的开源软件.它提供了一组简单的原子操作,分布式应用可以基于这些原子操作来实现更高层次的同步服务,配置维护,组管理和命名.zoo ...
- Polly
Polly Polly is a .NET 3.5 / 4.0 / 4.5 / PCL (Profile 259) library that allows developers to express ...
- 【亲述】Uber容错设计与多机房容灾方案 - 高可用架构系列
此文是根据赵磊在[QCON高可用架构群]中的分享内容整理而成.转载请事先联系赵磊及相关编辑. 赵磊,Uber高级工程师,08年上海交通大学毕业,曾就职于微软,后加入Facebook主要负责Messen ...
- Apache ActiveMQの版本更迭和Apache ActiveMQの故障转移
本文描述apache activemq 版本更迭的原因以及Apache ActiveMQのThe Failover Transport new features in 5.2.0 1.对信息的传输/ ...
- consul模板配置参数值示例
参看https://github.com/hashicorp/consul-template#examples // This is the address of the Consul agent. ...
- RabbitMQ 连接断开处理-自动恢复
Rabbitmq 官方给的NET consumer示例代码如下,但使用过程,会遇到connection断开的问题,一旦断开,这个代码就会报错,如果你的消费者端是这样的代码的话,就会导致消费者挂掉. u ...
- 实时流式计算框架Storm 0.9.0发布通知(中文版)
Storm0.9.0发布通知中文翻译版(2013/12/10 by 富士通邵贤军 有错误一定告诉我 shaoxianjun@hotmail.com^_^) 我们很高兴宣布Storm 0.9.0已经成功 ...
- SQL Server里的自旋锁介绍
在上一篇文章里我讨论了SQL Server里的闩锁.在文章的最后我给你简单介绍了下自旋锁(Spinlock).基于那个基础,今天我会继续讨论SQL Server中的自旋锁,还有给你展示下如何对它们进行 ...
随机推荐
- 使用OpenSSL生成CSR文件,并申请全球通用SSL证书
http://www.openssl.org 上只有OpenSSL的原代码下载,为了方便Windows用户使用OpenSSL,我们特地为您准备了OpenSSL 0.9.8.a for win32的可执 ...
- iOS开发-发送邮件(E-mail)方法整理合集(共3种)
前言:在IOS开发中,有时候我们会需要用到邮件发送的功能.比如,接收用户反馈和程序崩溃通知等等.其实这个功能是很常用的,因为我目前就有发送邮件的开发需求,所以顺便整理下IOS发送邮件的方法. IOS原 ...
- 窥探try ... catch与__try ... __except的区别
VC中的这两个东西肯定谁都用过, 不过它们之间有什么区别, 正好有时间研究了一下, 如果有错误欢迎拍砖.基于VC2005, 32位XP 平台测试通过. 估计对于其他版本的VC和操作系统是不通用的. 1 ...
- Android源代码解析之(四)-->HandlerThread
转载请标明出处:一片枫叶的专栏 上一篇文章中我们解说了AsyncTast的基本使用以及实现原理,我们知道AsyncTask内部是通过线程池和Handler实现的.通过对线程池和handler的封装实现 ...
- 简体字冯|docker-安装docker私有库
原创文章,转载请注明出处. 作者:简体字丶冯; QQ:564372931 安装docker 各终端安装docker 教程 菜鸟docker教程 就挺好,本着不重复造轮子的原则就不深入了,自己学习. 如 ...
- linux中的dd复制命令
dd命令用于复制文件并对原文件的内容进行转换和格式化处理.dd命令功能很强大的,对于一些比较底层的问题,使用dd命令往往可以得到出人意料的效果.用的比较多的还是用dd来备份裸设备.但是不推荐,如果需要 ...
- View的setTag和getTag使用
在listview 优化其中,会使用到setTag()以及getTag()方法 代码例如以下: @Override public View getView(int position, View con ...
- Atitit.attilax的 case list 项目经验 案例列表
Atitit.attilax的 case list 项目经验 案例列表 1. Atian inputmethod 输入法3 2. Ati desktop engine桌面引擎3 3. Acc资金账户系 ...
- flink-connector-kafka consumer的topic分区分配源码
转载请注明原创地址 http://www.cnblogs.com/dongxiao-yang/p/7200599.html flink官方提供了连接kafka的connector实现,由于调试的时候发 ...
- 在ubuntu下安装ns2-allinone-2.35.tar.gz
1.软件下载 首先先下载ns-allinone-2.35.tar.gz (下载路径http://sourceforge.net/projects/nsnam/files/),将其放到你/home/my ...