summaryrefslogtreecommitdiff
path: root/indra/llcorehttp
diff options
context:
space:
mode:
authorMonty Brandenberg <monty@lindenlab.com>2013-07-30 15:21:31 -0400
committerMonty Brandenberg <monty@lindenlab.com>2013-07-30 15:21:31 -0400
commitf3927c6ca2aad757fe88fdd59b87986ca8b207a8 (patch)
tree198943266fcdc6631bfaa0fdbe8eab5d8791e7d8 /indra/llcorehttp
parent46dd3df73370590f61eb9a2cffcd732463a4319b (diff)
SH-4371 Reduce 22mS inter-connection latency.
This really extended into the client-side request throttling. Moved this from llmeshrepository (which doesn't really want to do connection management) into llcorehttp. It's now a class option with configurable rate. This still isn't the right thing to do as it creates coupling between viewer and services. When we get to pipelining, this notion becomes invalid.
Diffstat (limited to 'indra/llcorehttp')
-rwxr-xr-xindra/llcorehttp/_httpinternal.h1
-rwxr-xr-xindra/llcorehttp/_httppolicy.cpp67
-rwxr-xr-xindra/llcorehttp/_httppolicyclass.cpp15
-rwxr-xr-xindra/llcorehttp/_httppolicyclass.h1
-rwxr-xr-xindra/llcorehttp/_httpservice.cpp3
-rwxr-xr-xindra/llcorehttp/httprequest.h23
6 files changed, 103 insertions, 7 deletions
diff --git a/indra/llcorehttp/_httpinternal.h b/indra/llcorehttp/_httpinternal.h
index 80f4f34942..effc6a42c5 100755
--- a/indra/llcorehttp/_httpinternal.h
+++ b/indra/llcorehttp/_httpinternal.h
@@ -143,6 +143,7 @@ const int HTTP_CONNECTION_LIMIT_MAX = 256;
// Miscellaneous defaults
const long HTTP_PIPELINING_DEFAULT = 0L;
const bool HTTP_USE_RETRY_AFTER_DEFAULT = true;
+const long HTTP_THROTTLE_RATE_DEFAULT = 0L;
// Tuning parameters
diff --git a/indra/llcorehttp/_httppolicy.cpp b/indra/llcorehttp/_httppolicy.cpp
index 32a9ba282a..808eebc6cc 100755
--- a/indra/llcorehttp/_httppolicy.cpp
+++ b/indra/llcorehttp/_httppolicy.cpp
@@ -49,12 +49,18 @@ struct HttpPolicy::ClassState
{
public:
ClassState()
+ : mThrottleEnd(0),
+ mThrottleLeft(0L),
+ mRequestCount(0L)
{}
HttpReadyQueue mReadyQueue;
HttpRetryQueue mRetryQueue;
HttpPolicyClass mOptions;
+ HttpTime mThrottleEnd;
+ long mThrottleLeft;
+ long mRequestCount;
};
@@ -190,6 +196,13 @@ void HttpPolicy::retryOp(HttpOpRequest * op)
// the worker thread may sleep hard otherwise will ask for
// normal polling frequency.
//
+// Implements a client-side request rate throttle as well.
+// This is intended to mimic and predict throttling behavior
+// of grid services but that is difficult to do with different
+// time bases. This also represents a rigid coupling between
+// viewer and server that makes it hard to change parameters
+// and I hope we can make this go away with pipelining.
+//
HttpService::ELoopSpeed HttpPolicy::processReadyQueue()
{
const HttpTime now(totalTime());
@@ -199,12 +212,22 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue()
for (int policy_class(0); policy_class < mClasses.size(); ++policy_class)
{
ClassState & state(*mClasses[policy_class]);
+ const bool throttle_enabled(state.mOptions.mThrottleRate > 0L);
+ const bool throttle_current(throttle_enabled && now < state.mThrottleEnd);
+
+ if (throttle_current && state.mThrottleLeft <= 0)
+ {
+ // Throttled condition, don't serve this class but don't sleep hard.
+ result = HttpService::NORMAL;
+ continue;
+ }
+
int active(transport.getActiveCountInClass(policy_class));
int needed(state.mOptions.mConnectionLimit - active); // Expect negatives here
HttpRetryQueue & retryq(state.mRetryQueue);
HttpReadyQueue & readyq(state.mReadyQueue);
-
+
if (needed > 0)
{
// First see if we have any retries...
@@ -218,10 +241,27 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue()
op->stageFromReady(mService);
op->release();
-
+
+ ++state.mRequestCount;
--needed;
+ if (throttle_enabled)
+ {
+ if (now >= state.mThrottleEnd)
+ {
+ // Throttle expired, move to next window
+ LL_DEBUGS("CoreHttp") << "Throttle expired with " << state.mThrottleLeft
+ << " requests to go and " << state.mRequestCount
+ << " requests issued." << LL_ENDL;
+ state.mThrottleLeft = state.mOptions.mThrottleRate;
+ state.mThrottleEnd = now + HttpTime(1000000);
+ }
+ if (--state.mThrottleLeft <= 0)
+ {
+ goto throttle_on;
+ }
+ }
}
-
+
// Now go on to the new requests...
while (needed > 0 && ! readyq.empty())
{
@@ -231,10 +271,29 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue()
op->stageFromReady(mService);
op->release();
+ ++state.mRequestCount;
--needed;
+ if (throttle_enabled)
+ {
+ if (now >= state.mThrottleEnd)
+ {
+ // Throttle expired, move to next window
+ LL_DEBUGS("CoreHttp") << "Throttle expired with " << state.mThrottleLeft
+ << " requests to go and " << state.mRequestCount
+ << " requests issued." << LL_ENDL;
+ state.mThrottleLeft = state.mOptions.mThrottleRate;
+ state.mThrottleEnd = now + HttpTime(1000000);
+ }
+ if (--state.mThrottleLeft <= 0)
+ {
+ goto throttle_on;
+ }
+ }
}
}
-
+
+ throttle_on:
+
if (! readyq.empty() || ! retryq.empty())
{
// If anything is ready, continue looping...
diff --git a/indra/llcorehttp/_httppolicyclass.cpp b/indra/llcorehttp/_httppolicyclass.cpp
index fe4359081a..f34a8e9f1e 100755
--- a/indra/llcorehttp/_httppolicyclass.cpp
+++ b/indra/llcorehttp/_httppolicyclass.cpp
@@ -36,7 +36,8 @@ namespace LLCore
HttpPolicyClass::HttpPolicyClass()
: mConnectionLimit(HTTP_CONNECTION_LIMIT_DEFAULT),
mPerHostConnectionLimit(HTTP_CONNECTION_LIMIT_DEFAULT),
- mPipelining(HTTP_PIPELINING_DEFAULT)
+ mPipelining(HTTP_PIPELINING_DEFAULT),
+ mThrottleRate(HTTP_THROTTLE_RATE_DEFAULT)
{}
@@ -51,6 +52,7 @@ HttpPolicyClass & HttpPolicyClass::operator=(const HttpPolicyClass & other)
mConnectionLimit = other.mConnectionLimit;
mPerHostConnectionLimit = other.mPerHostConnectionLimit;
mPipelining = other.mPipelining;
+ mThrottleRate = other.mThrottleRate;
}
return *this;
}
@@ -59,7 +61,8 @@ HttpPolicyClass & HttpPolicyClass::operator=(const HttpPolicyClass & other)
HttpPolicyClass::HttpPolicyClass(const HttpPolicyClass & other)
: mConnectionLimit(other.mConnectionLimit),
mPerHostConnectionLimit(other.mPerHostConnectionLimit),
- mPipelining(other.mPipelining)
+ mPipelining(other.mPipelining),
+ mThrottleRate(other.mThrottleRate)
{}
@@ -79,6 +82,10 @@ HttpStatus HttpPolicyClass::set(HttpRequest::EPolicyOption opt, long value)
mPipelining = llclamp(value, 0L, 1L);
break;
+ case HttpRequest::PO_THROTTLE_RATE:
+ mThrottleRate = llclamp(value, 0L, 1000000L);
+ break;
+
default:
return HttpStatus(HttpStatus::LLCORE, HE_INVALID_ARG);
}
@@ -103,6 +110,10 @@ HttpStatus HttpPolicyClass::get(HttpRequest::EPolicyOption opt, long * value) co
*value = mPipelining;
break;
+ case HttpRequest::PO_THROTTLE_RATE:
+ *value = mThrottleRate;
+ break;
+
default:
return HttpStatus(HttpStatus::LLCORE, HE_INVALID_ARG);
}
diff --git a/indra/llcorehttp/_httppolicyclass.h b/indra/llcorehttp/_httppolicyclass.h
index 69fb459d22..38f1194ded 100755
--- a/indra/llcorehttp/_httppolicyclass.h
+++ b/indra/llcorehttp/_httppolicyclass.h
@@ -63,6 +63,7 @@ public:
long mConnectionLimit;
long mPerHostConnectionLimit;
long mPipelining;
+ long mThrottleRate;
}; // end class HttpPolicyClass
} // end namespace LLCore
diff --git a/indra/llcorehttp/_httpservice.cpp b/indra/llcorehttp/_httpservice.cpp
index e21d196a3e..c94249dc2d 100755
--- a/indra/llcorehttp/_httpservice.cpp
+++ b/indra/llcorehttp/_httpservice.cpp
@@ -52,7 +52,8 @@ const HttpService::OptionDescriptor HttpService::sOptionDesc[] =
{ false, true, true, false }, // PO_HTTP_PROXY
{ true, true, true, false }, // PO_LLPROXY
{ true, true, true, false }, // PO_TRACE
- { true, true, false, true } // PO_ENABLE_PIPELINING
+ { true, true, false, true }, // PO_ENABLE_PIPELINING
+ { true, true, false, true } // PO_THROTTLE_RATE
};
HttpService * HttpService::sInstance(NULL);
volatile HttpService::EState HttpService::sState(NOT_INITIALIZED);
diff --git a/indra/llcorehttp/httprequest.h b/indra/llcorehttp/httprequest.h
index 5c54d35a21..651654844a 100755
--- a/indra/llcorehttp/httprequest.h
+++ b/indra/llcorehttp/httprequest.h
@@ -139,23 +139,33 @@ public:
/// Limits the number of connections used for a single
/// literal address/port pair within the class.
+ ///
+ /// Per-class only
PO_PER_HOST_CONNECTION_LIMIT,
/// String containing a system-appropriate directory name
/// where SSL certs are stored.
+ ///
+ /// Global only
PO_CA_PATH,
/// String giving a full path to a file containing SSL certs.
+ ///
+ /// Global only
PO_CA_FILE,
/// String of host/port to use as simple HTTP proxy. This is
/// going to change in the future into something more elaborate
/// that may support richer schemes.
+ ///
+ /// Global only
PO_HTTP_PROXY,
/// Long value that if non-zero enables the use of the
/// traditional LLProxy code for http/socks5 support. If
// enabled, has priority over GP_HTTP_PROXY.
+ ///
+ /// Global only
PO_LLPROXY,
/// Long value setting the logging trace level for the
@@ -169,12 +179,25 @@ public:
/// These values are also used in the trace modes for
/// individual requests in HttpOptions. Also be aware that
/// tracing tends to impact performance of the viewer.
+ ///
+ /// Global only
PO_TRACE,
/// Suitable requests are allowed to pipeline on their
/// connections when they ask for it.
+ ///
+ /// Per-class only
PO_ENABLE_PIPELINING,
+ /// Controls whether client-side throttling should be
+ /// performed on this policy class. Positive values
+ /// enable throttling and specify the request rate
+ /// (requests per second) that should be targetted.
+ /// A value of zero, the default, specifies no throttling.
+ ///
+ /// Per-class only
+ PO_THROTTLE_RATE,
+
PO_LAST // Always at end
};