summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMonty Brandenberg <monty@lindenlab.com>2014-09-04 16:57:44 -0400
committerMonty Brandenberg <monty@lindenlab.com>2014-09-04 16:57:44 -0400
commit0c20beda6800149ee71a307ca4e943b5bba56908 (patch)
tree504ed9a9816a11022801e25334b5f4431f54744e
parent7fa382937679a9937fd7b09e33b6c2f39ec680ff (diff)
Pipelining work. Extend transfer timeout by the pipeline depth
as transfers can appear delayed with deep pipelining and more requests in the pool. Added bad HTTP status error (typically getting a 0 back as HTTP status from libcurl) to the list of retryable errors. There's a response stream problem with libcurl and pipelining that induces this problem. Retrying helps but may not be entirely safe. Watch bug 1420 on the libcurl sourceforge bug tracker. Extend options of test/example program to include un-ranged requests. Document the excessive data transfer induced when ranged requests are disabled. This is an abnormal mode for very rare users so we'll just eat that for now.
-rwxr-xr-xindra/llcorehttp/_httplibcurl.cpp15
-rwxr-xr-xindra/llcorehttp/_httpoprequest.cpp34
-rwxr-xr-xindra/llcorehttp/examples/http_texture_load.cpp21
-rwxr-xr-xindra/llcorehttp/httpcommon.cpp13
-rwxr-xr-xindra/newview/app_settings/settings.xml4
-rwxr-xr-xindra/newview/llmeshrepository.cpp13
6 files changed, 78 insertions, 22 deletions
diff --git a/indra/llcorehttp/_httplibcurl.cpp b/indra/llcorehttp/_httplibcurl.cpp
index b46833a1f3..cfbe0fd2bb 100755
--- a/indra/llcorehttp/_httplibcurl.cpp
+++ b/indra/llcorehttp/_httplibcurl.cpp
@@ -217,8 +217,17 @@ void HttpLibcurl::addOp(HttpOpRequest * op)
}
// Make the request live
- curl_multi_add_handle(mMultiHandles[op->mReqPolicy], op->mCurlHandle);
+ CURLMcode code;
+ code = curl_multi_add_handle(mMultiHandles[op->mReqPolicy], op->mCurlHandle);
+ if (CURLM_OK != code)
+ {
+ // *TODO: Better cleanup and recovery but not much we can do here.
+ check_curl_multi_code(code);
+ return;
+ }
op->mCurlActive = true;
+ mActiveOps.insert(op);
+ ++mActiveHandles[op->mReqPolicy];
if (op->mTracing > HTTP_TRACE_OFF)
{
@@ -230,10 +239,6 @@ void HttpLibcurl::addOp(HttpOpRequest * op)
<< ", Readies: " << policy.getReadyCount(op->mReqPolicy)
<< LL_ENDL;
}
-
- // On success, make operation active
- mActiveOps.insert(op);
- ++mActiveHandles[op->mReqPolicy];
}
diff --git a/indra/llcorehttp/_httpoprequest.cpp b/indra/llcorehttp/_httpoprequest.cpp
index eb664fdced..38c1f1e78a 100755
--- a/indra/llcorehttp/_httpoprequest.cpp
+++ b/indra/llcorehttp/_httpoprequest.cpp
@@ -378,6 +378,7 @@ void HttpOpRequest::setupCommon(HttpRequest::policy_t policy_id,
// Junk may be left around from a failed request and that
// needs to be cleaned out.
//
+// *TODO: Move this to _httplibcurl where it belongs.
HttpStatus HttpOpRequest::prepareRequest(HttpService * service)
{
CURLcode code;
@@ -411,8 +412,9 @@ HttpStatus HttpOpRequest::prepareRequest(HttpService * service)
// *FIXME: better error handling later
HttpStatus status;
- // Get global policy options
- HttpPolicyGlobal & policy(service->getPolicy().getGlobalOptions());
+ // Get global and class policy options
+ HttpPolicyGlobal & gpolicy(service->getPolicy().getGlobalOptions());
+ HttpPolicyClass & cpolicy(service->getPolicy().getClassOptions(mReqPolicy));
mCurlHandle = LLCurl::createStandardCurlHandle();
if (! mCurlHandle)
@@ -462,30 +464,30 @@ HttpStatus HttpOpRequest::prepareRequest(HttpService * service)
code = curl_easy_setopt(mCurlHandle, CURLOPT_SSL_VERIFYHOST, 0);
check_curl_easy_code(code, CURLOPT_SSL_VERIFYHOST);
- if (policy.mUseLLProxy)
+ if (gpolicy.mUseLLProxy)
{
// Use the viewer-based thread-safe API which has a
// fast/safe check for proxy enable. Would like to
// encapsulate this someway...
LLProxy::getInstance()->applyProxySettings(mCurlHandle);
}
- else if (policy.mHttpProxy.size())
+ else if (gpolicy.mHttpProxy.size())
{
// *TODO: This is fine for now but get fuller socks5/
// authentication thing going later....
- code = curl_easy_setopt(mCurlHandle, CURLOPT_PROXY, policy.mHttpProxy.c_str());
+ code = curl_easy_setopt(mCurlHandle, CURLOPT_PROXY, gpolicy.mHttpProxy.c_str());
check_curl_easy_code(code, CURLOPT_PROXY);
code = curl_easy_setopt(mCurlHandle, CURLOPT_PROXYTYPE, CURLPROXY_HTTP);
check_curl_easy_code(code, CURLOPT_PROXYTYPE);
}
- if (policy.mCAPath.size())
+ if (gpolicy.mCAPath.size())
{
- code = curl_easy_setopt(mCurlHandle, CURLOPT_CAPATH, policy.mCAPath.c_str());
+ code = curl_easy_setopt(mCurlHandle, CURLOPT_CAPATH, gpolicy.mCAPath.c_str());
check_curl_easy_code(code, CURLOPT_CAPATH);
}
- if (policy.mCAFile.size())
+ if (gpolicy.mCAFile.size())
{
- code = curl_easy_setopt(mCurlHandle, CURLOPT_CAINFO, policy.mCAFile.c_str());
+ code = curl_easy_setopt(mCurlHandle, CURLOPT_CAINFO, gpolicy.mCAFile.c_str());
check_curl_easy_code(code, CURLOPT_CAINFO);
}
@@ -594,6 +596,20 @@ HttpStatus HttpOpRequest::prepareRequest(HttpService * service)
{
xfer_timeout = timeout;
}
+ if (cpolicy.mPipelining > 1L)
+ {
+ // Pipelining affects both connection and transfer timeout values.
+ // Requests that are added to a pipeling immediately have completed
+ // their connection so the connection delay tends to be less than
+ // the non-pipelined value. Transfers are the opposite. Transfer
+ // timeout starts once the connection is established and completion
+ // can be delayed due to the pipelined requests ahead. So, it's
+ // a handwave but bump the transfer timeout up by the pipelining
+ // depth to give some room.
+ //
+ // *TODO: Find a better scheme than timeouts to guarantee liveness.
+ xfer_timeout *= cpolicy.mPipelining;
+ }
code = curl_easy_setopt(mCurlHandle, CURLOPT_TIMEOUT, xfer_timeout);
check_curl_easy_code(code, CURLOPT_TIMEOUT);
code = curl_easy_setopt(mCurlHandle, CURLOPT_CONNECTTIMEOUT, timeout);
diff --git a/indra/llcorehttp/examples/http_texture_load.cpp b/indra/llcorehttp/examples/http_texture_load.cpp
index 88692c3f69..b76c874557 100755
--- a/indra/llcorehttp/examples/http_texture_load.cpp
+++ b/indra/llcorehttp/examples/http_texture_load.cpp
@@ -102,6 +102,7 @@ public:
public:
bool mVerbose;
bool mRandomRange;
+ bool mNoRange;
int mRequestLowWater;
int mRequestHighWater;
handle_set_t mHandles;
@@ -162,10 +163,11 @@ int main(int argc, char** argv)
{
LLCore::HttpStatus status;
bool do_random(false);
+ bool do_whole(false);
bool do_verbose(false);
int option(-1);
- while (-1 != (option = getopt(argc, argv, "u:c:h?RvH:p:t:")))
+ while (-1 != (option = getopt(argc, argv, "u:c:h?RwvH:p:t:")))
{
switch (option)
{
@@ -236,6 +238,12 @@ int main(int argc, char** argv)
case 'R':
do_random = true;
+ do_whole = false;
+ break;
+
+ case 'w':
+ do_whole = true;
+ do_random = false;
break;
case 'v':
@@ -307,6 +315,7 @@ int main(int argc, char** argv)
ws.mUrl = url_format;
ws.loadAssetUuids(uuids);
ws.mRandomRange = do_random;
+ ws.mNoRange = do_whole;
ws.mVerbose = do_verbose;
ws.mRequestHighWater = highwater;
ws.mRequestLowWater = ws.mRequestHighWater / 2;
@@ -381,6 +390,7 @@ void usage(std::ostream & out)
" -u <url_format> printf-style format string for URL generation\n"
" Default: " << url_format << "\n"
" -R Issue GETs with random Range: headers\n"
+ " -w Issue GETs without Range: headers to get whole object\n"
" -c <limit> Maximum connection concurrency. Range: [1..100]\n"
" Default: " << concurrency_limit << "\n"
" -H <limit> HTTP request highwater (requests fed to llcorehttp).\n"
@@ -400,6 +410,7 @@ WorkingSet::WorkingSet()
: LLCore::HttpHandler(),
mVerbose(false),
mRandomRange(false),
+ mNoRange(false),
mRemaining(200),
mLimit(200),
mAt(0),
@@ -449,8 +460,12 @@ bool WorkingSet::reload(LLCore::HttpRequest * hr, LLCore::HttpOptions * opt)
#else
snprintf(buffer, sizeof(buffer), mUrl.c_str(), mAssets[mAt].mUuid.c_str());
#endif
- int offset(mRandomRange ? ((unsigned long) rand()) % 1000000UL : mAssets[mAt].mOffset);
- int length(mRandomRange ? ((unsigned long) rand()) % 1000000UL : mAssets[mAt].mLength);
+ int offset(mNoRange
+ ? 0
+ : (mRandomRange ? ((unsigned long) rand()) % 1000000UL : mAssets[mAt].mOffset));
+ int length(mNoRange
+ ? 0
+ : (mRandomRange ? ((unsigned long) rand()) % 1000000UL : mAssets[mAt].mLength));
LLCore::HttpHandle handle;
if (offset || length)
diff --git a/indra/llcorehttp/httpcommon.cpp b/indra/llcorehttp/httpcommon.cpp
index c2f15155ac..9bcf7ac5e3 100755
--- a/indra/llcorehttp/httpcommon.cpp
+++ b/indra/llcorehttp/httpcommon.cpp
@@ -4,7 +4,7 @@
*
* $LicenseInfo:firstyear=2012&license=viewerlgpl$
* Second Life Viewer Source Code
- * Copyright (C) 2012-2013, Linden Research, Inc.
+ * Copyright (C) 2012-2014, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -219,6 +219,13 @@ std::string HttpStatus::toTerseString() const
// Pass true on statuses that might actually be cleared by a
// retry. Library failures, calling problems, etc. aren't
// going to be fixed by squirting bits all over the Net.
+//
+// HE_INVALID_HTTP_STATUS is special. As of 7.37.0, there are
+// some scenarios where response processing in libcurl appear
+// to go wrong and response data is corrupted. A side-effect
+// of this is that the HTTP status is read as 0 from the library.
+// See libcurl bug report 1420 (https://sourceforge.net/p/curl/bugs/1420/)
+// for details.
bool HttpStatus::isRetryable() const
{
static const HttpStatus cant_connect(HttpStatus::EXT_CURL_EASY, CURLE_COULDNT_CONNECT);
@@ -231,6 +238,7 @@ bool HttpStatus::isRetryable() const
static const HttpStatus post_error(HttpStatus::EXT_CURL_EASY, CURLE_HTTP_POST_ERROR);
static const HttpStatus partial_file(HttpStatus::EXT_CURL_EASY, CURLE_PARTIAL_FILE);
static const HttpStatus inv_cont_range(HttpStatus::LLCORE, HE_INV_CONTENT_RANGE_HDR);
+ static const HttpStatus inv_status(HttpStatus::LLCORE, HE_INVALID_HTTP_STATUS);
return ((isHttpStatus() && mType >= 499 && mType <= 599) || // Include special 499 in retryables
*this == cant_connect || // Connection reset/endpoint problems
@@ -242,7 +250,8 @@ bool HttpStatus::isRetryable() const
*this == op_timedout || // Timer expired
*this == post_error || // Transport problem
*this == partial_file || // Data inconsistency in response
- *this == inv_cont_range); // Short data read disagrees with content-range
+ *this == inv_cont_range || // Short data read disagrees with content-range
+ *this == inv_status); // Inv status can reflect internal state problem in libcurl
}
} // end namespace LLCore
diff --git a/indra/newview/app_settings/settings.xml b/indra/newview/app_settings/settings.xml
index 0607579a08..ab15a03229 100755
--- a/indra/newview/app_settings/settings.xml
+++ b/indra/newview/app_settings/settings.xml
@@ -4459,7 +4459,7 @@
<key>HttpPipelining</key>
<map>
<key>Comment</key>
- <string>If true, viewer will pipeline HTTP requests to servers.</string>
+ <string>If true, viewer will attempt to pipeline HTTP requests.</string>
<key>Persist</key>
<integer>1</integer>
<key>Type</key>
@@ -4470,7 +4470,7 @@
<key>HttpRangeRequestsDisable</key>
<map>
<key>Comment</key>
- <string>If true, viewer will not issued range GET requests for meshes and textures. May resolve problems with certain ISPs and networking gear.</string>
+ <string>If true, viewer will not issue GET requests with 'Range:' headers for meshes and textures. May resolve problems with certain ISPs and networking gear.</string>
<key>Persist</key>
<integer>1</integer>
<key>Type</key>
diff --git a/indra/newview/llmeshrepository.cpp b/indra/newview/llmeshrepository.cpp
index 6477389d4c..a6707392fe 100755
--- a/indra/newview/llmeshrepository.cpp
+++ b/indra/newview/llmeshrepository.cpp
@@ -2644,6 +2644,17 @@ void LLMeshRepository::cacheOutgoingMesh(LLMeshUploadData& data, LLSD& header)
}
+// Handle failed or successful requests for mesh assets.
+//
+// Support for 200 responses was added for several reasons. One,
+// a service or cache can ignore range headers and give us a
+// 200 with full asset should it elect to. We also support
+// a debug flag which disables range requests for those very
+// few users that have some sort of problem with their networking
+// services. But the 200 response handling is suboptimal: rather
+// than cache the whole asset, we just extract the part that would
+// have been sent in a 206 and process that. Inefficient but these
+// are cases far off the norm.
void LLMeshHandlerBase::onCompleted(LLCore::HttpHandle handle, LLCore::HttpResponse * response)
{
mProcessed = true;
@@ -2685,7 +2696,7 @@ void LLMeshHandlerBase::onCompleted(LLCore::HttpHandle handle, LLCore::HttpRespo
if (par_status == status)
{
- // 216 case
+ // 206 case
response->getRange(&offset, &length, &full_length);
if (! offset && ! length)
{