summaryrefslogtreecommitdiff
path: root/indra
diff options
context:
space:
mode:
authorMonty Brandenberg <monty@lindenlab.com>2013-07-30 15:21:31 -0400
committerMonty Brandenberg <monty@lindenlab.com>2013-07-30 15:21:31 -0400
commitf3927c6ca2aad757fe88fdd59b87986ca8b207a8 (patch)
tree198943266fcdc6631bfaa0fdbe8eab5d8791e7d8 /indra
parent46dd3df73370590f61eb9a2cffcd732463a4319b (diff)
SH-4371 Reduce 22mS inter-connection latency.
This really extended into the client-side request throttling. Moved this from llmeshrepository (which doesn't really want to do connection management) into llcorehttp. It's now a class option with configurable rate. This still isn't the right thing to do as it creates coupling between viewer and services. When we get to pipelining, this notion becomes invalid.
Diffstat (limited to 'indra')
-rwxr-xr-xindra/llcorehttp/_httpinternal.h1
-rwxr-xr-xindra/llcorehttp/_httppolicy.cpp67
-rwxr-xr-xindra/llcorehttp/_httppolicyclass.cpp15
-rwxr-xr-xindra/llcorehttp/_httppolicyclass.h1
-rwxr-xr-xindra/llcorehttp/_httpservice.cpp3
-rwxr-xr-xindra/llcorehttp/httprequest.h23
-rwxr-xr-xindra/newview/llappcorehttp.cpp31
-rwxr-xr-xindra/newview/llmeshrepository.cpp59
-rwxr-xr-xindra/newview/llmeshrepository.h11
9 files changed, 152 insertions, 59 deletions
diff --git a/indra/llcorehttp/_httpinternal.h b/indra/llcorehttp/_httpinternal.h
index 80f4f34942..effc6a42c5 100755
--- a/indra/llcorehttp/_httpinternal.h
+++ b/indra/llcorehttp/_httpinternal.h
@@ -143,6 +143,7 @@ const int HTTP_CONNECTION_LIMIT_MAX = 256;
// Miscellaneous defaults
const long HTTP_PIPELINING_DEFAULT = 0L;
const bool HTTP_USE_RETRY_AFTER_DEFAULT = true;
+const long HTTP_THROTTLE_RATE_DEFAULT = 0L;
// Tuning parameters
diff --git a/indra/llcorehttp/_httppolicy.cpp b/indra/llcorehttp/_httppolicy.cpp
index 32a9ba282a..808eebc6cc 100755
--- a/indra/llcorehttp/_httppolicy.cpp
+++ b/indra/llcorehttp/_httppolicy.cpp
@@ -49,12 +49,18 @@ struct HttpPolicy::ClassState
{
public:
ClassState()
+ : mThrottleEnd(0),
+ mThrottleLeft(0L),
+ mRequestCount(0L)
{}
HttpReadyQueue mReadyQueue;
HttpRetryQueue mRetryQueue;
HttpPolicyClass mOptions;
+ HttpTime mThrottleEnd;
+ long mThrottleLeft;
+ long mRequestCount;
};
@@ -190,6 +196,13 @@ void HttpPolicy::retryOp(HttpOpRequest * op)
// the worker thread may sleep hard otherwise will ask for
// normal polling frequency.
//
+// Implements a client-side request rate throttle as well.
+// This is intended to mimic and predict throttling behavior
+// of grid services but that is difficult to do with different
+// time bases. This also represents a rigid coupling between
+// viewer and server that makes it hard to change parameters
+// and I hope we can make this go away with pipelining.
+//
HttpService::ELoopSpeed HttpPolicy::processReadyQueue()
{
const HttpTime now(totalTime());
@@ -199,12 +212,22 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue()
for (int policy_class(0); policy_class < mClasses.size(); ++policy_class)
{
ClassState & state(*mClasses[policy_class]);
+ const bool throttle_enabled(state.mOptions.mThrottleRate > 0L);
+ const bool throttle_current(throttle_enabled && now < state.mThrottleEnd);
+
+ if (throttle_current && state.mThrottleLeft <= 0)
+ {
+ // Throttled condition, don't serve this class but don't sleep hard.
+ result = HttpService::NORMAL;
+ continue;
+ }
+
int active(transport.getActiveCountInClass(policy_class));
int needed(state.mOptions.mConnectionLimit - active); // Expect negatives here
HttpRetryQueue & retryq(state.mRetryQueue);
HttpReadyQueue & readyq(state.mReadyQueue);
-
+
if (needed > 0)
{
// First see if we have any retries...
@@ -218,10 +241,27 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue()
op->stageFromReady(mService);
op->release();
-
+
+ ++state.mRequestCount;
--needed;
+ if (throttle_enabled)
+ {
+ if (now >= state.mThrottleEnd)
+ {
+ // Throttle expired, move to next window
+ LL_DEBUGS("CoreHttp") << "Throttle expired with " << state.mThrottleLeft
+ << " requests to go and " << state.mRequestCount
+ << " requests issued." << LL_ENDL;
+ state.mThrottleLeft = state.mOptions.mThrottleRate;
+ state.mThrottleEnd = now + HttpTime(1000000);
+ }
+ if (--state.mThrottleLeft <= 0)
+ {
+ goto throttle_on;
+ }
+ }
}
-
+
// Now go on to the new requests...
while (needed > 0 && ! readyq.empty())
{
@@ -231,10 +271,29 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue()
op->stageFromReady(mService);
op->release();
+ ++state.mRequestCount;
--needed;
+ if (throttle_enabled)
+ {
+ if (now >= state.mThrottleEnd)
+ {
+ // Throttle expired, move to next window
+ LL_DEBUGS("CoreHttp") << "Throttle expired with " << state.mThrottleLeft
+ << " requests to go and " << state.mRequestCount
+ << " requests issued." << LL_ENDL;
+ state.mThrottleLeft = state.mOptions.mThrottleRate;
+ state.mThrottleEnd = now + HttpTime(1000000);
+ }
+ if (--state.mThrottleLeft <= 0)
+ {
+ goto throttle_on;
+ }
+ }
}
}
-
+
+ throttle_on:
+
if (! readyq.empty() || ! retryq.empty())
{
// If anything is ready, continue looping...
diff --git a/indra/llcorehttp/_httppolicyclass.cpp b/indra/llcorehttp/_httppolicyclass.cpp
index fe4359081a..f34a8e9f1e 100755
--- a/indra/llcorehttp/_httppolicyclass.cpp
+++ b/indra/llcorehttp/_httppolicyclass.cpp
@@ -36,7 +36,8 @@ namespace LLCore
HttpPolicyClass::HttpPolicyClass()
: mConnectionLimit(HTTP_CONNECTION_LIMIT_DEFAULT),
mPerHostConnectionLimit(HTTP_CONNECTION_LIMIT_DEFAULT),
- mPipelining(HTTP_PIPELINING_DEFAULT)
+ mPipelining(HTTP_PIPELINING_DEFAULT),
+ mThrottleRate(HTTP_THROTTLE_RATE_DEFAULT)
{}
@@ -51,6 +52,7 @@ HttpPolicyClass & HttpPolicyClass::operator=(const HttpPolicyClass & other)
mConnectionLimit = other.mConnectionLimit;
mPerHostConnectionLimit = other.mPerHostConnectionLimit;
mPipelining = other.mPipelining;
+ mThrottleRate = other.mThrottleRate;
}
return *this;
}
@@ -59,7 +61,8 @@ HttpPolicyClass & HttpPolicyClass::operator=(const HttpPolicyClass & other)
HttpPolicyClass::HttpPolicyClass(const HttpPolicyClass & other)
: mConnectionLimit(other.mConnectionLimit),
mPerHostConnectionLimit(other.mPerHostConnectionLimit),
- mPipelining(other.mPipelining)
+ mPipelining(other.mPipelining),
+ mThrottleRate(other.mThrottleRate)
{}
@@ -79,6 +82,10 @@ HttpStatus HttpPolicyClass::set(HttpRequest::EPolicyOption opt, long value)
mPipelining = llclamp(value, 0L, 1L);
break;
+ case HttpRequest::PO_THROTTLE_RATE:
+ mThrottleRate = llclamp(value, 0L, 1000000L);
+ break;
+
default:
return HttpStatus(HttpStatus::LLCORE, HE_INVALID_ARG);
}
@@ -103,6 +110,10 @@ HttpStatus HttpPolicyClass::get(HttpRequest::EPolicyOption opt, long * value) co
*value = mPipelining;
break;
+ case HttpRequest::PO_THROTTLE_RATE:
+ *value = mThrottleRate;
+ break;
+
default:
return HttpStatus(HttpStatus::LLCORE, HE_INVALID_ARG);
}
diff --git a/indra/llcorehttp/_httppolicyclass.h b/indra/llcorehttp/_httppolicyclass.h
index 69fb459d22..38f1194ded 100755
--- a/indra/llcorehttp/_httppolicyclass.h
+++ b/indra/llcorehttp/_httppolicyclass.h
@@ -63,6 +63,7 @@ public:
long mConnectionLimit;
long mPerHostConnectionLimit;
long mPipelining;
+ long mThrottleRate;
}; // end class HttpPolicyClass
} // end namespace LLCore
diff --git a/indra/llcorehttp/_httpservice.cpp b/indra/llcorehttp/_httpservice.cpp
index e21d196a3e..c94249dc2d 100755
--- a/indra/llcorehttp/_httpservice.cpp
+++ b/indra/llcorehttp/_httpservice.cpp
@@ -52,7 +52,8 @@ const HttpService::OptionDescriptor HttpService::sOptionDesc[] =
{ false, true, true, false }, // PO_HTTP_PROXY
{ true, true, true, false }, // PO_LLPROXY
{ true, true, true, false }, // PO_TRACE
- { true, true, false, true } // PO_ENABLE_PIPELINING
+ { true, true, false, true }, // PO_ENABLE_PIPELINING
+ { true, true, false, true } // PO_THROTTLE_RATE
};
HttpService * HttpService::sInstance(NULL);
volatile HttpService::EState HttpService::sState(NOT_INITIALIZED);
diff --git a/indra/llcorehttp/httprequest.h b/indra/llcorehttp/httprequest.h
index 5c54d35a21..651654844a 100755
--- a/indra/llcorehttp/httprequest.h
+++ b/indra/llcorehttp/httprequest.h
@@ -139,23 +139,33 @@ public:
/// Limits the number of connections used for a single
/// literal address/port pair within the class.
+ ///
+ /// Per-class only
PO_PER_HOST_CONNECTION_LIMIT,
/// String containing a system-appropriate directory name
/// where SSL certs are stored.
+ ///
+ /// Global only
PO_CA_PATH,
/// String giving a full path to a file containing SSL certs.
+ ///
+ /// Global only
PO_CA_FILE,
/// String of host/port to use as simple HTTP proxy. This is
/// going to change in the future into something more elaborate
/// that may support richer schemes.
+ ///
+ /// Global only
PO_HTTP_PROXY,
/// Long value that if non-zero enables the use of the
/// traditional LLProxy code for http/socks5 support. If
// enabled, has priority over GP_HTTP_PROXY.
+ ///
+ /// Global only
PO_LLPROXY,
/// Long value setting the logging trace level for the
@@ -169,12 +179,25 @@ public:
/// These values are also used in the trace modes for
/// individual requests in HttpOptions. Also be aware that
/// tracing tends to impact performance of the viewer.
+ ///
+ /// Global only
PO_TRACE,
/// Suitable requests are allowed to pipeline on their
/// connections when they ask for it.
+ ///
+ /// Per-class only
PO_ENABLE_PIPELINING,
+ /// Controls whether client-side throttling should be
+ /// performed on this policy class. Positive values
+ /// enable throttling and specify the request rate
+ /// (requests per second) that should be targetted.
+ /// A value of zero, the default, specifies no throttling.
+ ///
+ /// Per-class only
+ PO_THROTTLE_RATE,
+
PO_LAST // Always at end
};
diff --git a/indra/newview/llappcorehttp.cpp b/indra/newview/llappcorehttp.cpp
index 104debe023..b90b9749f9 100755
--- a/indra/newview/llappcorehttp.cpp
+++ b/indra/newview/llappcorehttp.cpp
@@ -40,32 +40,33 @@ static const struct
U32 mMin;
U32 mMax;
U32 mDivisor;
+ U32 mRate;
std::string mKey;
const char * mUsage;
} init_data[] = // Default and dynamic values for classes
{
{
- LLAppCoreHttp::AP_TEXTURE, 8, 1, 12, 1,
+ LLAppCoreHttp::AP_TEXTURE, 8, 1, 12, 1, 0,
"TextureFetchConcurrency",
"texture fetch"
},
{
- LLAppCoreHttp::AP_MESH1, 32, 1, 128, 1,
+ LLAppCoreHttp::AP_MESH1, 32, 1, 128, 1, 100,
"MeshMaxConcurrentRequests",
"mesh fetch"
},
{
- LLAppCoreHttp::AP_MESH2, 8, 1, 32, 4,
+ LLAppCoreHttp::AP_MESH2, 8, 1, 32, 4, 100,
"MeshMaxConcurrentRequests",
"mesh2 fetch"
},
{
- LLAppCoreHttp::AP_LARGE_MESH, 2, 1, 8, 1,
+ LLAppCoreHttp::AP_LARGE_MESH, 2, 1, 8, 1, 0,
"",
"large mesh fetch"
},
{
- LLAppCoreHttp::AP_UPLOADS, 2, 1, 8, 1,
+ LLAppCoreHttp::AP_UPLOADS, 2, 1, 8, 1, 0,
"",
"asset upload"
}
@@ -267,10 +268,28 @@ void LLAppCoreHttp::cleanup()
void LLAppCoreHttp::refreshSettings(bool initial)
{
+ LLCore::HttpStatus status;
+
for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i)
{
const EAppPolicy policy(init_data[i].mPolicy);
+ // Set any desired throttle
+ if (initial && init_data[i].mRate)
+ {
+ // Init-time only, can use the static setters here
+ status = LLCore::HttpRequest::setStaticPolicyOption(LLCore::HttpRequest::PO_THROTTLE_RATE,
+ mPolicies[policy],
+ init_data[i].mRate,
+ NULL);
+ if (! status)
+ {
+ LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage
+ << " throttle rate. Reason: " << status.toString()
+ << LL_ENDL;
+ }
+ }
+
// Get target connection concurrency value
U32 setting(init_data[i].mDefault);
if (! init_data[i].mKey.empty() && gSavedSettings.controlExists(init_data[i].mKey))
@@ -299,7 +318,7 @@ void LLAppCoreHttp::refreshSettings(bool initial)
setting, NULL);
if (LLCORE_HTTP_HANDLE_INVALID == handle)
{
- LLCore::HttpStatus status(mRequest->getStatus());
+ status = mRequest->getStatus();
LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage
<< " concurrency. Reason: " << status.toString()
<< LL_ENDL;
diff --git a/indra/newview/llmeshrepository.cpp b/indra/newview/llmeshrepository.cpp
index d02384f87c..e9b1a10e73 100755
--- a/indra/newview/llmeshrepository.cpp
+++ b/indra/newview/llmeshrepository.cpp
@@ -229,7 +229,6 @@
LLMeshRepository gMeshRepo;
const S32 MESH_HEADER_SIZE = 4096; // Important: assumption is that headers fit in this space
-const U32 MAX_MESH_REQUESTS_PER_SECOND = 100;
const S32 REQUEST_HIGH_WATER_MIN = 32;
const S32 REQUEST_HIGH_WATER_MAX = 80;
const S32 REQUEST_LOW_WATER_MIN = 16;
@@ -613,7 +612,6 @@ void log_upload_error(LLCore::HttpStatus status, const LLSD& content,
LLMeshRepoThread::LLMeshRepoThread()
: LLThread("mesh repo"),
mWaiting(false),
- mHttpRetries(0U),
mHttpRequest(NULL),
mHttpOptions(NULL),
mHttpLargeOptions(NULL),
@@ -701,23 +699,9 @@ void LLMeshRepoThread::run()
if (! LLApp::isQuitting())
{
- static U32 count = 0;
- static F32 last_hundred = gFrameTimeSeconds;
-
- if (gFrameTimeSeconds - last_hundred > 1.f)
- { //a second has gone by, clear count
- last_hundred = gFrameTimeSeconds;
- count = 0;
- }
- else
- {
- count += mHttpRetries;
- }
- mHttpRetries = 0U;
-
- // NOTE: throttling intentionally favors LOD requests over header requests
+ // NOTE: order of queue processing intentionally favors LOD requests over header requests
- while (!mLODReqQ.empty() && count < MAX_MESH_REQUESTS_PER_SECOND && mHttpRequestSet.size() < sRequestHighWater)
+ while (!mLODReqQ.empty() && mHttpRequestSet.size() < sRequestHighWater)
{
if (! mMutex)
{
@@ -728,7 +712,7 @@ void LLMeshRepoThread::run()
mLODReqQ.pop();
LLMeshRepository::sLODProcessing--;
mMutex->unlock();
- if (!fetchMeshLOD(req.mMeshParams, req.mLOD, count))//failed, resubmit
+ if (!fetchMeshLOD(req.mMeshParams, req.mLOD))//failed, resubmit
{
mMutex->lock();
mLODReqQ.push(req) ;
@@ -737,7 +721,7 @@ void LLMeshRepoThread::run()
}
}
- while (!mHeaderReqQ.empty() && count < MAX_MESH_REQUESTS_PER_SECOND && mHttpRequestSet.size() < sRequestHighWater)
+ while (!mHeaderReqQ.empty() && mHttpRequestSet.size() < sRequestHighWater)
{
if (! mMutex)
{
@@ -747,7 +731,7 @@ void LLMeshRepoThread::run()
HeaderRequest req = mHeaderReqQ.front();
mHeaderReqQ.pop();
mMutex->unlock();
- if (!fetchMeshHeader(req.mMeshParams, count))//failed, resubmit
+ if (!fetchMeshHeader(req.mMeshParams))//failed, resubmit
{
mMutex->lock();
mHeaderReqQ.push(req) ;
@@ -762,14 +746,14 @@ void LLMeshRepoThread::run()
// order will lose. Keep to the throttle enforcement and pay
// attention to the highwater level (enforced in each fetchXXX()
// method).
- if (! mSkinRequests.empty() && count < MAX_MESH_REQUESTS_PER_SECOND && mHttpRequestSet.size() < sRequestHighWater)
+ if (! mSkinRequests.empty() && mHttpRequestSet.size() < sRequestHighWater)
{
// *FIXME: this really does need a lock as do the following ones
std::set<LLUUID> incomplete;
for (std::set<LLUUID>::iterator iter = mSkinRequests.begin(); iter != mSkinRequests.end(); ++iter)
{
LLUUID mesh_id = *iter;
- if (!fetchMeshSkinInfo(mesh_id, count))
+ if (!fetchMeshSkinInfo(mesh_id))
{
incomplete.insert(mesh_id);
}
@@ -777,13 +761,13 @@ void LLMeshRepoThread::run()
mSkinRequests.swap(incomplete);
}
- if (! mDecompositionRequests.empty() && count < MAX_MESH_REQUESTS_PER_SECOND && mHttpRequestSet.size() < sRequestHighWater)
+ if (! mDecompositionRequests.empty() && mHttpRequestSet.size() < sRequestHighWater)
{
std::set<LLUUID> incomplete;
for (std::set<LLUUID>::iterator iter = mDecompositionRequests.begin(); iter != mDecompositionRequests.end(); ++iter)
{
LLUUID mesh_id = *iter;
- if (!fetchMeshDecomposition(mesh_id, count))
+ if (!fetchMeshDecomposition(mesh_id))
{
incomplete.insert(mesh_id);
}
@@ -791,13 +775,13 @@ void LLMeshRepoThread::run()
mDecompositionRequests.swap(incomplete);
}
- if (! mPhysicsShapeRequests.empty() && count < MAX_MESH_REQUESTS_PER_SECOND && mHttpRequestSet.size() < sRequestHighWater)
+ if (! mPhysicsShapeRequests.empty() && mHttpRequestSet.size() < sRequestHighWater)
{
std::set<LLUUID> incomplete;
for (std::set<LLUUID>::iterator iter = mPhysicsShapeRequests.begin(); iter != mPhysicsShapeRequests.end(); ++iter)
{
LLUUID mesh_id = *iter;
- if (!fetchMeshPhysicsShape(mesh_id, count))
+ if (!fetchMeshPhysicsShape(mesh_id))
{
incomplete.insert(mesh_id);
}
@@ -965,7 +949,7 @@ LLCore::HttpHandle LLMeshRepoThread::getByteRange(const std::string & url, int c
}
-bool LLMeshRepoThread::fetchMeshSkinInfo(const LLUUID& mesh_id, U32& count)
+bool LLMeshRepoThread::fetchMeshSkinInfo(const LLUUID& mesh_id)
{
if (!mHeaderMutex)
@@ -1023,7 +1007,7 @@ bool LLMeshRepoThread::fetchMeshSkinInfo(const LLUUID& mesh_id, U32& count)
}
//reading from VFS failed for whatever reason, fetch from sim
- if (count >= MAX_MESH_REQUESTS_PER_SECOND || mHttpRequestSet.size() >= sRequestHighWater)
+ if (mHttpRequestSet.size() >= sRequestHighWater)
{
return false;
}
@@ -1060,7 +1044,7 @@ bool LLMeshRepoThread::fetchMeshSkinInfo(const LLUUID& mesh_id, U32& count)
return ret;
}
-bool LLMeshRepoThread::fetchMeshDecomposition(const LLUUID& mesh_id, U32& count)
+bool LLMeshRepoThread::fetchMeshDecomposition(const LLUUID& mesh_id)
{
if (!mHeaderMutex)
{
@@ -1118,7 +1102,7 @@ bool LLMeshRepoThread::fetchMeshDecomposition(const LLUUID& mesh_id, U32& count)
}
//reading from VFS failed for whatever reason, fetch from sim
- if (count >= MAX_MESH_REQUESTS_PER_SECOND || mHttpRequestSet.size() >= sRequestHighWater)
+ if (mHttpRequestSet.size() >= sRequestHighWater)
{
return false;
}
@@ -1155,7 +1139,7 @@ bool LLMeshRepoThread::fetchMeshDecomposition(const LLUUID& mesh_id, U32& count)
return ret;
}
-bool LLMeshRepoThread::fetchMeshPhysicsShape(const LLUUID& mesh_id, U32& count)
+bool LLMeshRepoThread::fetchMeshPhysicsShape(const LLUUID& mesh_id)
{
if (!mHeaderMutex)
{
@@ -1212,7 +1196,7 @@ bool LLMeshRepoThread::fetchMeshPhysicsShape(const LLUUID& mesh_id, U32& count)
}
//reading from VFS failed for whatever reason, fetch from sim
- if (count >= MAX_MESH_REQUESTS_PER_SECOND || mHttpRequestSet.size() >= sRequestHighWater)
+ if (mHttpRequestSet.size() >= sRequestHighWater)
{
return false;
}
@@ -1282,7 +1266,7 @@ void LLMeshRepoThread::decActiveHeaderRequests()
}
//return false if failed to get header
-bool LLMeshRepoThread::fetchMeshHeader(const LLVolumeParams& mesh_params, U32& count)
+bool LLMeshRepoThread::fetchMeshHeader(const LLVolumeParams& mesh_params)
{
{
//look for mesh in asset in vfs
@@ -1331,7 +1315,6 @@ bool LLMeshRepoThread::fetchMeshHeader(const LLVolumeParams& mesh_params, U32& c
handler->mHttpHandle = handle;
mHttpRequestSet.insert(handler);
++LLMeshRepository::sHTTPRequestCount;
- ++count;
}
}
@@ -1339,7 +1322,7 @@ bool LLMeshRepoThread::fetchMeshHeader(const LLVolumeParams& mesh_params, U32& c
}
//return false if failed to get mesh lod.
-bool LLMeshRepoThread::fetchMeshLOD(const LLVolumeParams& mesh_params, S32 lod, U32& count)
+bool LLMeshRepoThread::fetchMeshLOD(const LLVolumeParams& mesh_params, S32 lod)
{
if (!mHeaderMutex)
{
@@ -1413,7 +1396,6 @@ bool LLMeshRepoThread::fetchMeshLOD(const LLVolumeParams& mesh_params, S32 lod,
handler->mHttpHandle = handle;
mHttpRequestSet.insert(handler);
++LLMeshRepository::sHTTPRequestCount;
- ++count;
}
}
else
@@ -2374,11 +2356,8 @@ void LLMeshHandlerBase::onCompleted(LLCore::HttpHandle handle, LLCore::HttpRespo
{
mProcessed = true;
- // Accumulate retries, we'll use these to offset the HTTP
- // count and maybe hold to a throttle better.
unsigned int retries(0U);
response->getRetries(NULL, &retries);
- gMeshRepo.mThread->mHttpRetries += retries;
LLMeshRepository::sHTTPRetryCount += retries;
LLCore::HttpStatus status(response->getStatus());
diff --git a/indra/newview/llmeshrepository.h b/indra/newview/llmeshrepository.h
index 70079eed23..400ceb4ad7 100755
--- a/indra/newview/llmeshrepository.h
+++ b/indra/newview/llmeshrepository.h
@@ -322,7 +322,6 @@ public:
// llcorehttp library interface objects.
LLCore::HttpStatus mHttpStatus;
- unsigned int mHttpRetries;
LLCore::HttpRequest * mHttpRequest;
LLCore::HttpOptions * mHttpOptions;
LLCore::HttpOptions * mHttpLargeOptions;
@@ -345,8 +344,8 @@ public:
void lockAndLoadMeshLOD(const LLVolumeParams& mesh_params, S32 lod);
void loadMeshLOD(const LLVolumeParams& mesh_params, S32 lod);
- bool fetchMeshHeader(const LLVolumeParams& mesh_params, U32& count);
- bool fetchMeshLOD(const LLVolumeParams& mesh_params, S32 lod, U32& count);
+ bool fetchMeshHeader(const LLVolumeParams& mesh_params);
+ bool fetchMeshLOD(const LLVolumeParams& mesh_params, S32 lod);
bool headerReceived(const LLVolumeParams& mesh_params, U8* data, S32 data_size);
bool lodReceived(const LLVolumeParams& mesh_params, S32 lod, U8* data, S32 data_size);
bool skinInfoReceived(const LLUUID& mesh_id, U8* data, S32 data_size);
@@ -363,15 +362,15 @@ public:
//send request for skin info, returns true if header info exists
// (should hold onto mesh_id and try again later if header info does not exist)
- bool fetchMeshSkinInfo(const LLUUID& mesh_id, U32& count);
+ bool fetchMeshSkinInfo(const LLUUID& mesh_id);
//send request for decomposition, returns true if header info exists
// (should hold onto mesh_id and try again later if header info does not exist)
- bool fetchMeshDecomposition(const LLUUID& mesh_id, U32& count);
+ bool fetchMeshDecomposition(const LLUUID& mesh_id);
//send request for PhysicsShape, returns true if header info exists
// (should hold onto mesh_id and try again later if header info does not exist)
- bool fetchMeshPhysicsShape(const LLUUID& mesh_id, U32& count);
+ bool fetchMeshPhysicsShape(const LLUUID& mesh_id);
static void incActiveLODRequests();
static void decActiveLODRequests();