f^x(f^y+f-m)+f-n
=f^(x+y)+f^(f-m)+(f-n)
<?php
$exponent=0;
w(80,3);
function w($input,$base){
global $exponent;
$children_input=$input/$base;
if($children_input>=0){
$exponent++;
if($children_input>=$base){
w($children_input,$base);
}
}
} echo $exponent;

w

//The ListOrderItems and ListOrderItemsByNextToken operations together share a maximum request quota of 30 and a restore rate of one request every two seconds.
function ExponentialBackoffSleep($restoreRatePerSecond = 0.5, $base = 2, $exponent = 3, $maximumRequestQuota = 30)
{ $unixTimestamp = time();
$intervalSeconds = $maximumRequestQuota / $restoreRatePerSecond;
$restoreOneNeedSeconds = ceil(1 / $restoreRatePerSecond);
if ($unixTimestamp % $intervalSeconds == 0) {
$max = pow($base, $exponent);
$max = $max > $restoreOneNeedSeconds ? $max : $restoreOneNeedSeconds;
$ExponentialBackoff = rand($restoreOneNeedSeconds, $max);
echo "ExponentialBackoffSleep " . $ExponentialBackoff . "\r\n";
sleep($ExponentialBackoff);
}
} function findCloestNumToPow($input, $base = 2, $exponent = 1)
{ if (ceil($input) <= 1) {
return 1;
}
for ($w = $exponent, $len = pow($base, $exponent); $w <= $len; $w++) {
return 10;
}
}
//The ListOrderItems and ListOrderItemsByNextToken operations together share a maximum request quota of 30 and a restore rate of one request every two seconds.
function ExponentialBackoffSleep($restoreRatePerSecond = 0.5, $base = 2, $exponent = 3, $maximumRequestQuota = 30)
{ $unixTimestamp = time();
$intervalSeconds = $maximumRequestQuota / $restoreRatePerSecond;
$restoreOneNeedSeconds = ceil(1 / $restoreRatePerSecond);
if ($unixTimestamp % $intervalSeconds == 0) {
$max = pow($base, $exponent);
$max = $max > $restoreOneNeedSeconds ? $max : $restoreOneNeedSeconds;
$ExponentialBackoff = rand($restoreOneNeedSeconds, $max);
echo "ExponentialBackoffSleep " . $ExponentialBackoff . "\r\n";
sleep($ExponentialBackoff);
}
} function findCloestNumToPow($input, $base = 2, $exponent = 1)
{ if (ceil($input) <= 1) {
return 1;
}
for ($w = $exponent, $len = pow($base, $exponent); $w <= $len; $w++) {
return 10;
}
}
function ExponentialBackoffSleep($intervalSeconds = 8)
{
$unixTimestamp = time();
if ($unixTimestamp % $intervalSeconds == 0) {
$ExponentialBackoff = rand(1, $intervalSeconds);
sleep($ExponentialBackoff);
}
}
<?php
echo "\r\n" . date('Y-m-d H:i:s') . " TODO StartScript \r\n";
set_time_limit(0);
/*
* win-cli-require_once
*
* */ //return; NULL
//return '';
//2017年4月25日 19:59:27 $win_cli_dir = 'D:\cmd\amzapi\amzapitest_com\MarketplaceWebServiceOrders\\'; require_once($win_cli_dir . 'Samples\.config.inc.php');
require_once($win_cli_dir . 'Samples\.config.db.php');
//wStartThisScript();
require_once($win_cli_dir . 'Client.php');
require_once($win_cli_dir . 'Model\ListOrderItemsRequest.php');
require_once($win_cli_dir . 'Model\ListOrderItemsResponse.php');
require_once($win_cli_dir . 'Model\ListOrderItemsByNextTokenRequest.php');
require_once($win_cli_dir . 'Model\ListOrderItemsByNextTokenResponse.php'); $serviceUrl = "https://mws.amazonservices.com/Orders/2013-09-01"; $config = array(
'ServiceURL' => $serviceUrl,
'ProxyHost' => null,
'ProxyPort' => -1,
'ProxyUsername' => null,
'ProxyPassword' => null,
'MaxErrorRetry' => 3,
); $service = new MarketplaceWebServiceOrders_Client(
AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY,
APPLICATION_NAME,
APPLICATION_VERSION,
$config);
//TODO StartScript
//bug limit page
$sql = 'SELECT DISTINCT AmazonOrderId FROM listorders';
$w = 0;
foreach ($dbh->query($sql) as $row) {
echo "\r\n" . date('Y-m-d H:i:s') . "\r\n";
$InputAmazonOrderId = $row['AmazonOrderId']; $sql_inserted = 'SELECT id FROM ListOrderItems WHERE AmazonOrderId="' . $InputAmazonOrderId . '" LIMIT 1';
foreach ($dbh->query($sql_inserted) as $inserted) {
}
if (!empty($inserted)) {
echo "\r\n" . $InputAmazonOrderId . " inserted\r\n";
continue;
} // $w++;//TODO modify
//;;
// if ($w % 30 == 0) sleep(8);
ExponentialBackoffSleep();
$request = new MarketplaceWebServiceOrders_Model_ListOrderItemsRequest();
$request->setSellerId(MERCHANT_ID);
$request->setAmazonOrderId($InputAmazonOrderId);
$request->setMWSAuthToken(MWSAUTH_TOKEN);
invokeListOrderItems($service, $request);
} function ExponentialBackoffSleep($intervalSeconds = 8)
{ $unixTimestamp = time();
$ExponentialBackoff = rand(1, $intervalSeconds);
if ($unixTimestamp % $intervalSeconds == 0) sleep($ExponentialBackoff);
} //TODO
// Exception to DB
//to move
function apiCaughtException($ex)
{
echo("Caught Exception: " . $ex->getMessage() . "\n");
echo("Response Status Code: " . $ex->getStatusCode() . "\n");
echo("Error Code: " . $ex->getErrorCode() . "\n");
echo("Error Type: " . $ex->getErrorType() . "\n");
echo("Request ID: " . $ex->getRequestId() . "\n");
echo("XML: " . $ex->getXML() . "\n");
echo("ResponseHeaderMetadata: " . $ex->getResponseHeaderMetadata() . "\n");
} function invokeListOrderItems(MarketplaceWebServiceOrders_Interface $service, $request)
{
global $link;
global $InputAmazonOrderId;
try {
$response = $service->ListOrderItems($request);
//TODO class XML(){}
$dom = new DOMDocument();
$dom->loadXML($response->toXML());
$dom->preserveWhiteSpace = false;
$dom->formatOutput = true;
$savexml = $dom->saveXML();
$readxml = simplexml_load_string($savexml);
$obj = $readxml->ListOrderItemsResult->OrderItems;
foreach ($obj->children() AS $one) {
TMPdbInsertListOrders($one, $link, $InputAmazonOrderId);
}
echo("ResponseHeaderMetadata: " . $response->getResponseHeaderMetadata() . "\n");
} catch (MarketplaceWebServiceOrders_Exception $ex) {
apiCaughtException($ex);
}
} //TODO EndScript
echo "\r\n" . date('Y-m-d H:i:s') . " TODO EndScript \r\n";
function ExponentialBackoffSleep($intervalSeconds = 8)
{ $unixTimestamp = time();
$ExponentialBackoff = rand(1, $intervalSeconds);
if ($unixTimestamp % $intervalSeconds == 0) sleep($ExponentialBackoff);
}
$w=0;
{
$w++;
//TODO modify
if ($w % 30 == 0) sleep(8);
}

w

Exponential Backoff And Jitter
https://www.awsarchitectureblog.com/2015/03/backoff.html

Exponential Backoff And Jitter

04 Mar 2015 in PerformanceScalability | Permalink

Introducing OCC

Optimistic concurrency control (OCC) is a time-honored way for multiple writers to safely modify a single object without losing writes. OCC has three nice properties: it will always make progress as long as the underlying store is available, it’s easy to understand, and it’s easy to implement. DynamoDB’s conditional writes make OCC a natural fit for DynamoDB users, and it’s natively supported by the DynamoDBMapper client.

While OCC is guaranteed to make progress, it can still perform quite poorly under high contention. The simplest of these contention cases is when a whole lot of clients start at the same time, and try to update the same database row. With one client guaranteed to succeed every round, the time to complete all the updates grows linearly with contention.

For the graphs in this post, I used a small simulator to model the behavior of OCC on a network with delay (and variance in delay), against a remote database. In this simulation, the network introduces delay with a mean of 10ms and variance of 4ms. The first simulation shows how completion time grows linearly with contention. This linear growth is because one client succeeds every round, so it takes N rounds for all N clients to succeed.

Unfortunately, that’s not the whole picture. With N clients contending, the total amount of work done by the system increases with N2.

Adding Backoff

The problem here is that N clients compete in the first round, N-1 in the second round, and so on. Having every client compete in every round is wasteful. Slowing clients down may help, and the classic way to slow clients down is capped exponential backoff. Capped exponential backoff means that clients multiply their backoff by a constant after each attempt, up to some maximum value. In our case, after each unsuccessful attempt, clients sleep for:

sleep = min(cap, base *  ** attempt)

Running the simulation again shows that backoff helps a small amount, but doesn’t solve the problem. Client work has only been reduced slightly.

The best way to see the problem is to look at the times these exponentially backed-off calls happen.

It’s obvious that the exponential backoff is working, in that the calls are happening less and less frequently. The problem also stands out: there are still clusters of calls. Instead of reducing the number of clients competing in every round, we’ve just introduced times when no client is competing. Contention hasn’t been reduced much, although the natural variance in network delay has introduced some spreading.

Adding Jitter

The solution isn’t to remove backoff. It’s to add jitter. Initially, jitter may appear to be a counter-intuitive idea: trying to improve the performance of a system by adding randomness. The time series above makes a great case for jitter – we want to spread out the spikes to an approximately constant rate. Adding jitter is a small change to the sleep function:

sleep = random_between(, min(cap, base *  ** attempt))

That time series looks a whole lot better. The gaps are gone, and beyond the initial spike, there’s an approximately constant rate of calls. It’s also had a great effect on the total number of calls.

In the case with 100 contending clients, we’ve reduced our call count by more than half. We’ve also significantly improved the time to completion, when compared to un-jittered exponential backoff.

There are a few ways to implement these timed backoff loops. Let’s call the algorithm above “Full Jitter”, and consider two alternatives. The first alternative is “Equal Jitter”, where we always keep some of the backoff and jitter by a smaller amount:

temp = min(cap, base *  ** attempt)
sleep = temp / + random_between(, temp / )

The intuition behind this one is that it prevents very short sleeps, always keeping some of the slow down from the backoff. A second alternative is “Decorrelated Jitter”, which is similar to “Full Jitter”, but we also increase the maximum jitter based on the last random value.

sleep = min(cap, random_between(base, sleep * ))

Which approach do you think is best?

Looking at the amount of client work, the number of calls is approximately the same for “Full” and “Equal” jitter, and higher for “Decorrelated”. Both cut down work substantially relative to both the no-jitter approaches.

The no-jitter exponential backoff approach is the clear loser. It not only takes more work, but also takes more time than the jittered approaches. In fact, it takes so much more time we have to leave it off the graph to get a good comparison of the other methods.

Of the jittered approaches, “Equal Jitter” is the loser. It does slightly more work than “Full Jitter”, and takes much longer. The decision between “Decorrelated Jitter” and “Full Jitter” is less clear. The “Full Jitter” approach uses less work, but slightly more time. Both approaches, though, present a substantial decrease in client work and server load.

It’s worth noting that none of these approaches fundamentally change the N2 nature of the work to be done, but do substantially reduce work at reasonable levels of contention. The return on implementation complexity of using jittered backoff is huge, and it should be considered a standard approach for remote clients.

All of the graphs and numbers from this post were generated using a simple simulation of OCC behavior. You can get our simulator code on GitHub, in the aws-arch-backoff-simulator project.

- Marc Brooker

https://github.com/awslabs/aws-arch-backoff-simulator/blob/master/src/backoff_simulator.py

# Simulator for the effects of backoff and jitter on a remote OCC system.
# This code was used for the post on backoff on the AWS architecture blog
# at http://www.awsarchitectureblog.com/
import heapq
import random # Net models the natural delay and variance of the network
class Net:
def __init__(self, mean, sd):
self.mean = mean
self.sd = sd def delay(self):
# We use a normal distribution model. Networks are more likely to be a Weibull model
# in reality, but this is close enough for the model comparison.
return abs(random.normalvariate(self.mean, self.sd)) # Base class for all the backoff implementations
class Backoff:
def __init__(self, base, cap):
self.base = base
self.cap = cap def expo(self, n):
return min(self.cap, pow(2, n)*self.base) class NoBackoff(Backoff):
def backoff(self, n):
return 0 class ExpoBackoff(Backoff):
def backoff(self, n):
return self.expo(n) class ExpoBackoffEqualJitter(Backoff):
def backoff(self, n):
v = self.expo(n)
return v/2 + random.uniform(0, v/2) class ExpoBackoffFullJitter(Backoff):
def backoff(self, n):
v = self.expo(n)
return random.uniform(0, v) class ExpoBackoffDecorr(Backoff):
def __init__(self, base, cap):
Backoff.__init__(self, base, cap)
self.sleep = self.base def backoff(self, n):
self.sleep = min(self.cap, random.uniform(self.base, self.sleep * 3))
return self.sleep # Small class to track two counters
class Stats:
def __init__(self):
self.failures = 0
self.calls = 0 # Build a message to be added to the simulated network
def msg(tm, send_to, reply_to, payload):
assert tm >= 0
assert send_to is not None
return (tm, send_to, reply_to, payload) # The OCC server. It models a single "row" with a single version.
class OccServer:
def __init__(self, net, stats, ts_f):
self.version = 0
self.net = net
self.stats = stats
self.ts_f = ts_f # Try to write the row. If you provide the right version number (obtained from a read),
# the write will succeed.
def write(self, tm, request):
self.ts_f.write("%d\n"%(tm))
success = False
self.stats.calls += 1
if request[3] == self.version:
self.version += 1
success = True
else:
self.stats.failures += 1
return msg(tm + self.net.delay(), request[2], None, success) # Read the current version number of the row.
def read(self, tm, request):
return msg(tm + self.net.delay(), request[2], None, self.version) # The OCC client. It models a client that tries to update the row exactly once,
# then stops.
class OccClient:
def __init__(self, server, net, backoff):
self.server = server
self.net = net
self.attempt = 0
self.backoff = backoff def start(self, tm):
return msg(tm + self.net.delay(), self.server.read, self.read_rsp, None) def read_rsp(self, tm, request):
return msg(tm + self.net.delay(), self.server.write, self.write_rsp, request[3]) def write_rsp(self, tm, request):
if not request[3]:
self.attempt += 1
return msg(tm + self.net.delay() + self.backoff.backoff(self.attempt), self.server.read, self.read_rsp, None)
else:
return None # The main loop of the simulation.
def run_sim(queue):
tm = 0
while len(queue) > 0:
# Pull an event off the priority queue
msg = heapq.heappop(queue)
assert msg[0] >= tm # TIme must move forward
tm = msg[0]
next_msg = msg[1](tm, msg)
if next_msg is not None:
# If it cause another event to be generated, enqueue it
heapq.heappush(queue, next_msg)
return tm # Setup the simulation, creating the clients and server
def setup_sim(clients, backoff_cls, ts_f, stats):
net = Net(10, 2)
queue = []
server = OccServer(net, stats, ts_f)
for i in xrange(0, clients):
client = OccClient(server, net, backoff_cls(5, 2000))
heapq.heappush(queue, client.start(0))
return (queue, stats) # The list of backoff types that we simulate over. The tuples are a class
# name and a friendly name for the output.
backoff_types = ((ExpoBackoff, "Exponential"),
(ExpoBackoffDecorr,"Decorr"),
(ExpoBackoffEqualJitter, "EqualJitter"),
(ExpoBackoffFullJitter, "FullJitter"),
(NoBackoff, "None")) def run():
with open("backoff_results.csv", "w") as f:
f.write("clients,time,calls,Algorithm\n")
for i in xrange(1, 20):
clients = i * 10
for backoff in backoff_types:
with open("ts_" + backoff[1], "w") as ts_f:
stats = Stats()
tm = 0
for t in xrange(0, 100):
queue, stats = setup_sim(clients, backoff[0], ts_f, stats)
tm += run_sim(queue)
f.write("%d,%d,%d,%s\n"%(clients, tm/100, stats.calls/100, backoff[1])) run()

Exponential Backoff的更多相关文章

  1. Error Retries and Exponential Backoff in AWS

    Error Retries and Exponential Backoff in AWS https://docs.aws.amazon.com/general/latest/gr/api-retri ...

  2. zookeeper源码分析之一服务端启动过程

    zookeeper简介 zookeeper是为分布式应用提供分布式协作服务的开源软件.它提供了一组简单的原子操作,分布式应用可以基于这些原子操作来实现更高层次的同步服务,配置维护,组管理和命名.zoo ...

  3. Polly

    Polly Polly is a .NET 3.5 / 4.0 / 4.5 / PCL (Profile 259) library that allows developers to express ...

  4. 【亲述】Uber容错设计与多机房容灾方案 - 高可用架构系列

    此文是根据赵磊在[QCON高可用架构群]中的分享内容整理而成.转载请事先联系赵磊及相关编辑. 赵磊,Uber高级工程师,08年上海交通大学毕业,曾就职于微软,后加入Facebook主要负责Messen ...

  5. Apache ActiveMQの版本更迭和Apache ActiveMQの故障转移

    本文描述apache activemq 版本更迭的原因以及Apache ActiveMQのThe Failover Transport new features in 5.2.0  1.对信息的传输/ ...

  6. consul模板配置参数值示例

    参看https://github.com/hashicorp/consul-template#examples // This is the address of the Consul agent. ...

  7. RabbitMQ 连接断开处理-自动恢复

    Rabbitmq 官方给的NET consumer示例代码如下,但使用过程,会遇到connection断开的问题,一旦断开,这个代码就会报错,如果你的消费者端是这样的代码的话,就会导致消费者挂掉. u ...

  8. 实时流式计算框架Storm 0.9.0发布通知(中文版)

    Storm0.9.0发布通知中文翻译版(2013/12/10 by 富士通邵贤军 有错误一定告诉我 shaoxianjun@hotmail.com^_^) 我们很高兴宣布Storm 0.9.0已经成功 ...

  9. SQL Server里的自旋锁介绍

    在上一篇文章里我讨论了SQL Server里的闩锁.在文章的最后我给你简单介绍了下自旋锁(Spinlock).基于那个基础,今天我会继续讨论SQL Server中的自旋锁,还有给你展示下如何对它们进行 ...

随机推荐

  1. eclipse工程重命名后,无法生产class问题

    在很多时候我们对project重新命名后,class文件始终没有生产,尤其是在web项目的时候,如果不注意class文件生成问题,会浪费大量的时间找错误.这里分享下如何解决eclipse重命名后cla ...

  2. Python MySQLdb 使用utf-8 编码插入中文数据

    参考地址:http://blog.csdn.net/dkman803/article/details/1925326/ 本人在使用python,mysqldb操作数据库的时候,发现如下问题,编码如下: ...

  3. ping的原理以及ICMP

    ping 的原理:     ping 程序是用来探测主机到主机之间是否可通信,如果不能ping到某台主机,表明不能和这台主机建立连接.     ping 使用的是ICMP协议,它发送icmp回送请求消 ...

  4. 转:[windows]DOS批处理添加任务计划

    自动创建每周运行一次的计划任务 创建计划任务可用at,schtasks命令,schtasks提供了很多参数 命令schtasks SCHTASKS /Create [/S system [/U use ...

  5. Linux 查看CPU个数和磁盘个数

    top后按数字1,多个cpu的话会显示多个 fdisk -l可以看到多个物理硬盘,做了硬raid只能看到一个硬盘 cat /proc/cpuinfo查看cpu具体的信息

  6. 激活modelsim se 10.4 时运行patch_dll.bat不能生成TXT

    问题描述: 激活modelsim时运行patch_dll.bat总是在DOS界面一闪而过,不能生成LICENSE.TXT 问题解决: 先取消文件 mgls64.dll 的只读属性(这句话在README ...

  7. iOS开发-常用第三方开源框架介绍

    iOS开发-常用第三方开源框架介绍 图像: 1.图片浏览控件MWPhotoBrowser        实现了一个照片浏览器类似 iOS 自带的相册应用,可显示来自手机的图片或者是网络图片,可自动从网 ...

  8. FreeBSD将SHELL从csh换成bash并支持中文显示

    刚准备在FreeBSD下学习Shell,结果发现怎么好多命令都没有,比如declare.后来发现连bash都没有,好像bash用的多一些吧,于是就准备将csh换成bash. 由于没有bash,所以先得 ...

  9. 【Python + ATX基于uiaotumator2】之Android—APP自动化简易例子

    上代码: import uiautomator2 as u2 from time import sleep d = u2.connect_usb('608ad0fe') #打开小卖 # d(text= ...

  10. C++数组类型与函数类型

    之所以将C++的数组类型与函数类型拿到一块说,是因为两者在很多地方都一样. 首先,声明形式上类似: 数组类型:  type [num]                                 ...