summaryrefslogtreecommitdiff
path: root/indra/llcorehttp/_httppolicy.cpp
diff options
context:
space:
mode:
authorMerov Linden <merov@lindenlab.com>2014-02-24 13:42:44 -0800
committerMerov Linden <merov@lindenlab.com>2014-02-24 13:42:44 -0800
commit3fbaff17bf65a826dd8b9bd50dc460aacaa3cb00 (patch)
tree05916e8997e5133af6d16ce9754a29b2aa0fc760 /indra/llcorehttp/_httppolicy.cpp
parent160bb09e775bc3c165cafefa4d80d861d4c39f2c (diff)
parentde8fea13627cc5978b8a6135802a52864a11c39a (diff)
Pull merge from viewer-release
Diffstat (limited to 'indra/llcorehttp/_httppolicy.cpp')
-rwxr-xr-xindra/llcorehttp/_httppolicy.cpp223
1 files changed, 150 insertions, 73 deletions
diff --git a/indra/llcorehttp/_httppolicy.cpp b/indra/llcorehttp/_httppolicy.cpp
index 014bd37e2e..fd5a93e192 100755
--- a/indra/llcorehttp/_httppolicy.cpp
+++ b/indra/llcorehttp/_httppolicy.cpp
@@ -41,57 +41,70 @@ namespace LLCore
// Per-policy-class data for a running system.
-// Collection of queues, parameters, history, metrics, etc.
+// Collection of queues, options and other data
// for a single policy class.
//
// Threading: accessed only by worker thread
-struct HttpPolicy::State
+struct HttpPolicy::ClassState
{
public:
- State()
- : mConnMax(HTTP_CONNECTION_LIMIT_DEFAULT),
- mConnAt(HTTP_CONNECTION_LIMIT_DEFAULT),
- mConnMin(1),
- mNextSample(0),
- mErrorCount(0),
- mErrorFactor(0)
+ ClassState()
+ : mThrottleEnd(0),
+ mThrottleLeft(0L),
+ mRequestCount(0L)
{}
HttpReadyQueue mReadyQueue;
HttpRetryQueue mRetryQueue;
HttpPolicyClass mOptions;
-
- long mConnMax;
- long mConnAt;
- long mConnMin;
-
- HttpTime mNextSample;
- unsigned long mErrorCount;
- unsigned long mErrorFactor;
+ HttpTime mThrottleEnd;
+ long mThrottleLeft;
+ long mRequestCount;
};
HttpPolicy::HttpPolicy(HttpService * service)
- : mActiveClasses(0),
- mState(NULL),
- mService(service)
-{}
+ : mService(service)
+{
+ // Create default class
+ mClasses.push_back(new ClassState());
+}
HttpPolicy::~HttpPolicy()
{
shutdown();
+
+ for (class_list_t::iterator it(mClasses.begin()); it != mClasses.end(); ++it)
+ {
+ delete (*it);
+ }
+ mClasses.clear();
mService = NULL;
}
+HttpRequest::policy_t HttpPolicy::createPolicyClass()
+{
+ const HttpRequest::policy_t policy_class(mClasses.size());
+ if (policy_class >= HTTP_POLICY_CLASS_LIMIT)
+ {
+ return HttpRequest::INVALID_POLICY_ID;
+ }
+ mClasses.push_back(new ClassState());
+ return policy_class;
+}
+
+
void HttpPolicy::shutdown()
{
- for (int policy_class(0); policy_class < mActiveClasses; ++policy_class)
+ for (int policy_class(0); policy_class < mClasses.size(); ++policy_class)
{
- HttpRetryQueue & retryq(mState[policy_class].mRetryQueue);
+ ClassState & state(*mClasses[policy_class]);
+
+ HttpRetryQueue & retryq(state.mRetryQueue);
while (! retryq.empty())
{
HttpOpRequest * op(retryq.top());
@@ -101,7 +114,7 @@ void HttpPolicy::shutdown()
op->release();
}
- HttpReadyQueue & readyq(mState[policy_class].mReadyQueue);
+ HttpReadyQueue & readyq(state.mReadyQueue);
while (! readyq.empty())
{
HttpOpRequest * op(readyq.top());
@@ -111,28 +124,11 @@ void HttpPolicy::shutdown()
op->release();
}
}
- delete [] mState;
- mState = NULL;
- mActiveClasses = 0;
}
-void HttpPolicy::start(const HttpPolicyGlobal & global,
- const std::vector<HttpPolicyClass> & classes)
-{
- llassert_always(! mState);
-
- mGlobalOptions = global;
- mActiveClasses = classes.size();
- mState = new State [mActiveClasses];
- for (int i(0); i < mActiveClasses; ++i)
- {
- mState[i].mOptions = classes[i];
- mState[i].mConnMax = classes[i].mConnectionLimit;
- mState[i].mConnAt = mState[i].mConnMax;
- mState[i].mConnMin = 2;
- }
-}
+void HttpPolicy::start()
+{}
void HttpPolicy::addOp(HttpOpRequest * op)
@@ -140,7 +136,8 @@ void HttpPolicy::addOp(HttpOpRequest * op)
const int policy_class(op->mReqPolicy);
op->mPolicyRetries = 0;
- mState[policy_class].mReadyQueue.push(op);
+ op->mPolicy503Retries = 0;
+ mClasses[policy_class]->mReadyQueue.push(op);
}
@@ -155,25 +152,39 @@ void HttpPolicy::retryOp(HttpOpRequest * op)
5000000 // ... to every 5.0 S.
};
static const int delta_max(int(LL_ARRAY_SIZE(retry_deltas)) - 1);
-
+ static const HttpStatus error_503(503);
+
const HttpTime now(totalTime());
const int policy_class(op->mReqPolicy);
-
- const HttpTime delta(retry_deltas[llclamp(op->mPolicyRetries, 0, delta_max)]);
+ HttpTime delta(retry_deltas[llclamp(op->mPolicyRetries, 0, delta_max)]);
+ bool external_delta(false);
+
+ if (op->mReplyRetryAfter > 0 && op->mReplyRetryAfter < 30)
+ {
+ delta = op->mReplyRetryAfter * U64L(1000000);
+ external_delta = true;
+ }
op->mPolicyRetryAt = now + delta;
++op->mPolicyRetries;
- LL_WARNS("CoreHttp") << "HTTP request " << static_cast<HttpHandle>(op)
- << " retry " << op->mPolicyRetries
- << " scheduled for +" << (delta / HttpTime(1000))
- << " mS. Status: " << op->mStatus.toHex()
- << LL_ENDL;
- if (op->mTracing > 0)
+ if (error_503 == op->mStatus)
+ {
+ ++op->mPolicy503Retries;
+ }
+ LL_DEBUGS("CoreHttp") << "HTTP request " << static_cast<HttpHandle>(op)
+ << " retry " << op->mPolicyRetries
+ << " scheduled in " << (delta / HttpTime(1000))
+ << " mS (" << (external_delta ? "external" : "internal")
+ << "). Status: " << op->mStatus.toTerseString()
+ << LL_ENDL;
+ if (op->mTracing > HTTP_TRACE_OFF)
{
LL_INFOS("CoreHttp") << "TRACE, ToRetryQueue, Handle: "
<< static_cast<HttpHandle>(op)
+ << ", Delta: " << (delta / HttpTime(1000))
+ << ", Retries: " << op->mPolicyRetries
<< LL_ENDL;
}
- mState[policy_class].mRetryQueue.push(op);
+ mClasses[policy_class]->mRetryQueue.push(op);
}
@@ -188,21 +199,43 @@ void HttpPolicy::retryOp(HttpOpRequest * op)
// the worker thread may sleep hard otherwise will ask for
// normal polling frequency.
//
+// Implements a client-side request rate throttle as well.
+// This is intended to mimic and predict throttling behavior
+// of grid services but that is difficult to do with different
+// time bases. This also represents a rigid coupling between
+// viewer and server that makes it hard to change parameters
+// and I hope we can make this go away with pipelining.
+//
HttpService::ELoopSpeed HttpPolicy::processReadyQueue()
{
const HttpTime now(totalTime());
HttpService::ELoopSpeed result(HttpService::REQUEST_SLEEP);
HttpLibcurl & transport(mService->getTransport());
- for (int policy_class(0); policy_class < mActiveClasses; ++policy_class)
+ for (int policy_class(0); policy_class < mClasses.size(); ++policy_class)
{
- State & state(mState[policy_class]);
- int active(transport.getActiveCountInClass(policy_class));
- int needed(state.mConnAt - active); // Expect negatives here
-
+ ClassState & state(*mClasses[policy_class]);
HttpRetryQueue & retryq(state.mRetryQueue);
HttpReadyQueue & readyq(state.mReadyQueue);
+
+ if (retryq.empty() && readyq.empty())
+ {
+ continue;
+ }
+ const bool throttle_enabled(state.mOptions.mThrottleRate > 0L);
+ const bool throttle_current(throttle_enabled && now < state.mThrottleEnd);
+
+ if (throttle_current && state.mThrottleLeft <= 0)
+ {
+ // Throttled condition, don't serve this class but don't sleep hard.
+ result = HttpService::NORMAL;
+ continue;
+ }
+
+ int active(transport.getActiveCountInClass(policy_class));
+ int needed(state.mOptions.mConnectionLimit - active); // Expect negatives here
+
if (needed > 0)
{
// First see if we have any retries...
@@ -216,10 +249,27 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue()
op->stageFromReady(mService);
op->release();
-
+
+ ++state.mRequestCount;
--needed;
+ if (throttle_enabled)
+ {
+ if (now >= state.mThrottleEnd)
+ {
+ // Throttle expired, move to next window
+ LL_DEBUGS("CoreHttp") << "Throttle expired with " << state.mThrottleLeft
+ << " requests to go and " << state.mRequestCount
+ << " requests issued." << LL_ENDL;
+ state.mThrottleLeft = state.mOptions.mThrottleRate;
+ state.mThrottleEnd = now + HttpTime(1000000);
+ }
+ if (--state.mThrottleLeft <= 0)
+ {
+ goto throttle_on;
+ }
+ }
}
-
+
// Now go on to the new requests...
while (needed > 0 && ! readyq.empty())
{
@@ -229,10 +279,29 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue()
op->stageFromReady(mService);
op->release();
+ ++state.mRequestCount;
--needed;
+ if (throttle_enabled)
+ {
+ if (now >= state.mThrottleEnd)
+ {
+ // Throttle expired, move to next window
+ LL_DEBUGS("CoreHttp") << "Throttle expired with " << state.mThrottleLeft
+ << " requests to go and " << state.mRequestCount
+ << " requests issued." << LL_ENDL;
+ state.mThrottleLeft = state.mOptions.mThrottleRate;
+ state.mThrottleEnd = now + HttpTime(1000000);
+ }
+ if (--state.mThrottleLeft <= 0)
+ {
+ goto throttle_on;
+ }
+ }
}
}
-
+
+ throttle_on:
+
if (! readyq.empty() || ! retryq.empty())
{
// If anything is ready, continue looping...
@@ -246,9 +315,9 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue()
bool HttpPolicy::changePriority(HttpHandle handle, HttpRequest::priority_t priority)
{
- for (int policy_class(0); policy_class < mActiveClasses; ++policy_class)
+ for (int policy_class(0); policy_class < mClasses.size(); ++policy_class)
{
- State & state(mState[policy_class]);
+ ClassState & state(*mClasses[policy_class]);
// We don't scan retry queue because a priority change there
// is meaningless. The request will be issued based on retry
// intervals not priority value, which is now moot.
@@ -276,9 +345,9 @@ bool HttpPolicy::changePriority(HttpHandle handle, HttpRequest::priority_t prior
bool HttpPolicy::cancel(HttpHandle handle)
{
- for (int policy_class(0); policy_class < mActiveClasses; ++policy_class)
+ for (int policy_class(0); policy_class < mClasses.size(); ++policy_class)
{
- State & state(mState[policy_class]);
+ ClassState & state(*mClasses[policy_class]);
// Scan retry queue
HttpRetryQueue::container_type & c1(state.mRetryQueue.get_container());
@@ -337,14 +406,14 @@ bool HttpPolicy::stageAfterCompletion(HttpOpRequest * op)
LL_WARNS("CoreHttp") << "HTTP request " << static_cast<HttpHandle>(op)
<< " failed after " << op->mPolicyRetries
<< " retries. Reason: " << op->mStatus.toString()
- << " (" << op->mStatus.toHex() << ")"
+ << " (" << op->mStatus.toTerseString() << ")"
<< LL_ENDL;
}
else if (op->mPolicyRetries)
{
- LL_WARNS("CoreHttp") << "HTTP request " << static_cast<HttpHandle>(op)
- << " succeeded on retry " << op->mPolicyRetries << "."
- << LL_ENDL;
+ LL_DEBUGS("CoreHttp") << "HTTP request " << static_cast<HttpHandle>(op)
+ << " succeeded on retry " << op->mPolicyRetries << "."
+ << LL_ENDL;
}
op->stageFromActive(mService);
@@ -352,13 +421,21 @@ bool HttpPolicy::stageAfterCompletion(HttpOpRequest * op)
return false; // not active
}
+
+HttpPolicyClass & HttpPolicy::getClassOptions(HttpRequest::policy_t pclass)
+{
+ llassert_always(pclass >= 0 && pclass < mClasses.size());
+
+ return mClasses[pclass]->mOptions;
+}
+
int HttpPolicy::getReadyCount(HttpRequest::policy_t policy_class) const
{
- if (policy_class < mActiveClasses)
+ if (policy_class < mClasses.size())
{
- return (mState[policy_class].mReadyQueue.size()
- + mState[policy_class].mRetryQueue.size());
+ return (mClasses[policy_class]->mReadyQueue.size()
+ + mClasses[policy_class]->mRetryQueue.size());
}
return 0;
}