summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xautobuild.xml12
-rw-r--r--indra/cmake/00-COMPILE-LINK-RUN.txt12
-rwxr-xr-xindra/llcorehttp/_httpinternal.h5
-rwxr-xr-xindra/llcorehttp/_httplibcurl.cpp297
-rwxr-xr-xindra/llcorehttp/_httplibcurl.h88
-rwxr-xr-xindra/llcorehttp/_httpoperation.cpp28
-rwxr-xr-xindra/llcorehttp/_httpoprequest.cpp100
-rwxr-xr-xindra/llcorehttp/_httppolicy.cpp94
-rwxr-xr-xindra/llcorehttp/_httppolicy.h8
-rwxr-xr-xindra/llcorehttp/_httppolicyclass.cpp8
-rwxr-xr-xindra/llcorehttp/_httpservice.cpp30
-rwxr-xr-xindra/llcorehttp/examples/http_texture_load.cpp83
-rwxr-xr-xindra/llcorehttp/httpcommon.cpp17
-rwxr-xr-xindra/llcorehttp/httprequest.h35
-rwxr-xr-xindra/newview/app_settings/settings.xml22
-rwxr-xr-xindra/newview/llappcorehttp.cpp250
-rwxr-xr-xindra/newview/llappcorehttp.h40
-rwxr-xr-xindra/newview/llmeshrepository.cpp462
-rwxr-xr-xindra/newview/lltexturefetch.cpp154
-rwxr-xr-xindra/newview/lltexturefetch.h10
-rwxr-xr-xindra/newview/tests/llslurl_test.cpp23
-rwxr-xr-xindra/newview/tests/llviewernetwork_test.cpp23
22 files changed, 1301 insertions, 500 deletions
diff --git a/autobuild.xml b/autobuild.xml
index b6586b25b7..52f718716c 100755
--- a/autobuild.xml
+++ b/autobuild.xml
@@ -282,9 +282,9 @@
<key>archive</key>
<map>
<key>hash</key>
- <string>f5a699c93beb1a854d0b51382b5cecc8</string>
+ <string>7e5385eedde808e51e602deca1879b22</string>
<key>url</key>
- <string>http://automated-builds-secondlife-com.s3.amazonaws.com/hg/repo/3pl_3p-curl-update/rev/290664/arch/Darwin/installer/curl-7.37.0-darwin-20140605.tar.bz2</string>
+ <string>http://automated-builds-secondlife-com.s3.amazonaws.com/hg/repo/http_3p-curl-experimental/rev/294357/arch/Darwin/installer/curl-7.38.0-darwin-20140918.tar.bz2</string>
</map>
<key>name</key>
<string>darwin</string>
@@ -294,9 +294,9 @@
<key>archive</key>
<map>
<key>hash</key>
- <string>2bc285edffd0e55e0cd6290f39854a89</string>
+ <string>c85406fb9cad19b5602b90c55a169a6a</string>
<key>url</key>
- <string>http://automated-builds-secondlife-com.s3.amazonaws.com/hg/repo/3pl_3p-curl-update/rev/290664/arch/Linux/installer/curl-7.37.0-linux-20140605.tar.bz2</string>
+ <string>http://automated-builds-secondlife-com.s3.amazonaws.com/hg/repo/http_3p-curl-experimental/rev/294357/arch/Linux/installer/curl-7.38.0-linux-20140918.tar.bz2</string>
</map>
<key>name</key>
<string>linux</string>
@@ -306,9 +306,9 @@
<key>archive</key>
<map>
<key>hash</key>
- <string>8d3b197d7a114d2b688d2831a0a59757</string>
+ <string>9fe81decac23f3179a2af58d23baf6ce</string>
<key>url</key>
- <string>http://automated-builds-secondlife-com.s3.amazonaws.com/hg/repo/3pl_3p-curl-update/rev/290664/arch/CYGWIN/installer/curl-7.37.0-windows-20140605.tar.bz2</string>
+ <string>http://automated-builds-secondlife-com.s3.amazonaws.com/hg/repo/http_3p-curl-experimental/rev/294357/arch/CYGWIN/installer/curl-7.38.0-windows-20140918.tar.bz2</string>
</map>
<key>name</key>
<string>windows</string>
diff --git a/indra/cmake/00-COMPILE-LINK-RUN.txt b/indra/cmake/00-COMPILE-LINK-RUN.txt
index d08cc2dc0c..49b899c50d 100644
--- a/indra/cmake/00-COMPILE-LINK-RUN.txt
+++ b/indra/cmake/00-COMPILE-LINK-RUN.txt
@@ -115,12 +115,12 @@ Compilation
----------------------------------------------------------------------------
Notes:
- 1. We’re also building dylibs in a somewhat unusual way. They’re
+ 1. We're also building dylibs in a somewhat unusual way. They're
currently being generated with a link path of
- ‘@executable_path/../Resources/<library>’. If we were to follow
- the recommendations in dyld’s man page, we’d instead reference
- ‘@loader_path/<library>’, use -rpath on the executable link
- (pointing to the ‘Resources’ subdir of the main executable), and
+ '@executable_path/../Resources/<library>'. If we were to follow
+ the recommendations in dyld's man page, we’d instead reference
+ '@loader_path/<library>', use -rpath on the executable link
+ (pointing to the 'Resources' subdir of the main executable), and
be able to avoid some symlinking in the .app tree.
2. Use the -headerpad_max_install_names link option on all .dylibs.
@@ -184,7 +184,7 @@ Linking
second, incompatible version of the library. Switching colladadom
to a static library ended the re-export problem.
- * Preventing re-export is not sufficient. other libraries will
+ * Preventing re-export is not sufficient. Other libraries will
still be shipped as shared and they can still have Singleton and
Fragile Base Class issues. A DLL may be built with a static
archive of a library that has global data. That same static
diff --git a/indra/llcorehttp/_httpinternal.h b/indra/llcorehttp/_httpinternal.h
index f80d7f60f5..a2a60ca056 100755
--- a/indra/llcorehttp/_httpinternal.h
+++ b/indra/llcorehttp/_httpinternal.h
@@ -145,8 +145,11 @@ const int HTTP_CONNECTION_LIMIT_DEFAULT = 8;
const int HTTP_CONNECTION_LIMIT_MIN = 1;
const int HTTP_CONNECTION_LIMIT_MAX = 256;
-// Miscellaneous defaults
+// Pipelining limits
const long HTTP_PIPELINING_DEFAULT = 0L;
+const long HTTP_PIPELINING_MAX = 20L;
+
+// Miscellaneous defaults
const bool HTTP_USE_RETRY_AFTER_DEFAULT = true;
const long HTTP_THROTTLE_RATE_DEFAULT = 0L;
diff --git a/indra/llcorehttp/_httplibcurl.cpp b/indra/llcorehttp/_httplibcurl.cpp
index e56bc84174..81b44ab90b 100755
--- a/indra/llcorehttp/_httplibcurl.cpp
+++ b/indra/llcorehttp/_httplibcurl.cpp
@@ -4,7 +4,7 @@
*
* $LicenseInfo:firstyear=2012&license=viewerlgpl$
* Second Life Viewer Source Code
- * Copyright (C) 2012-2013, Linden Research, Inc.
+ * Copyright (C) 2012-2014, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -33,6 +33,17 @@
#include "llhttpconstants.h"
+namespace
+{
+
+// Error testing and reporting for libcurl status codes
+void check_curl_multi_code(CURLMcode code);
+void check_curl_multi_code(CURLMcode code, int curl_setopt_option);
+
+static const char * const LOG_CORE("CoreHttp");
+
+} // end anonymous namespace
+
namespace LLCore
{
@@ -40,16 +51,18 @@ namespace LLCore
HttpLibcurl::HttpLibcurl(HttpService * service)
: mService(service),
+ mHandleCache(),
mPolicyCount(0),
mMultiHandles(NULL),
- mActiveHandles(NULL)
+ mActiveHandles(NULL),
+ mDirtyPolicy(NULL)
{}
HttpLibcurl::~HttpLibcurl()
{
shutdown();
-
+
mService = NULL;
}
@@ -81,6 +94,9 @@ void HttpLibcurl::shutdown()
delete [] mActiveHandles;
mActiveHandles = NULL;
+
+ delete [] mDirtyPolicy;
+ mDirtyPolicy = NULL;
}
mPolicyCount = 0;
@@ -95,11 +111,18 @@ void HttpLibcurl::start(int policy_count)
mPolicyCount = policy_count;
mMultiHandles = new CURLM * [mPolicyCount];
mActiveHandles = new int [mPolicyCount];
+ mDirtyPolicy = new bool [mPolicyCount];
for (int policy_class(0); policy_class < mPolicyCount; ++policy_class)
{
- mMultiHandles[policy_class] = curl_multi_init();
+ if (NULL == (mMultiHandles[policy_class] = curl_multi_init()))
+ {
+ LL_ERRS(LOG_CORE) << "Failed to allocate multi handle in libcurl."
+ << LL_ENDL;
+ }
mActiveHandles[policy_class] = 0;
+ mDirtyPolicy[policy_class] = false;
+ policyUpdated(policy_class);
}
}
@@ -117,8 +140,19 @@ HttpService::ELoopSpeed HttpLibcurl::processTransport()
// Give libcurl some cycles to do I/O & callbacks
for (int policy_class(0); policy_class < mPolicyCount; ++policy_class)
{
- if (! mActiveHandles[policy_class] || ! mMultiHandles[policy_class])
+ if (! mMultiHandles[policy_class])
+ {
+ // No handle, nothing to do.
+ continue;
+ }
+ if (! mActiveHandles[policy_class])
{
+ // If we've gone quiet and there's a dirty update, apply it,
+ // otherwise we're done.
+ if (mDirtyPolicy[policy_class])
+ {
+ policyUpdated(policy_class);
+ }
continue;
}
@@ -153,9 +187,9 @@ HttpService::ELoopSpeed HttpLibcurl::processTransport()
}
else
{
- LL_WARNS_ONCE("CoreHttp") << "Unexpected message from libcurl. Msg code: "
- << msg->msg
- << LL_ENDL;
+ LL_WARNS_ONCE(LOG_CORE) << "Unexpected message from libcurl. Msg code: "
+ << msg->msg
+ << LL_ENDL;
}
msgs_in_queue = 0;
}
@@ -184,23 +218,28 @@ void HttpLibcurl::addOp(HttpOpRequest * op)
}
// Make the request live
- curl_multi_add_handle(mMultiHandles[op->mReqPolicy], op->mCurlHandle);
+ CURLMcode code;
+ code = curl_multi_add_handle(mMultiHandles[op->mReqPolicy], op->mCurlHandle);
+ if (CURLM_OK != code)
+ {
+ // *TODO: Better cleanup and recovery but not much we can do here.
+ check_curl_multi_code(code);
+ return;
+ }
op->mCurlActive = true;
+ mActiveOps.insert(op);
+ ++mActiveHandles[op->mReqPolicy];
if (op->mTracing > HTTP_TRACE_OFF)
{
HttpPolicy & policy(mService->getPolicy());
- LL_INFOS("CoreHttp") << "TRACE, ToActiveQueue, Handle: "
- << static_cast<HttpHandle>(op)
- << ", Actives: " << mActiveOps.size()
- << ", Readies: " << policy.getReadyCount(op->mReqPolicy)
- << LL_ENDL;
+ LL_INFOS(LOG_CORE) << "TRACE, ToActiveQueue, Handle: "
+ << static_cast<HttpHandle>(op)
+ << ", Actives: " << mActiveOps.size()
+ << ", Readies: " << policy.getReadyCount(op->mReqPolicy)
+ << LL_ENDL;
}
-
- // On success, make operation active
- mActiveOps.insert(op);
- ++mActiveHandles[op->mReqPolicy];
}
@@ -241,16 +280,16 @@ void HttpLibcurl::cancelRequest(HttpOpRequest * op)
// Detach from multi and recycle handle
curl_multi_remove_handle(mMultiHandles[op->mReqPolicy], op->mCurlHandle);
- curl_easy_cleanup(op->mCurlHandle);
+ mHandleCache.freeHandle(op->mCurlHandle);
op->mCurlHandle = NULL;
// Tracing
if (op->mTracing > HTTP_TRACE_OFF)
{
- LL_INFOS("CoreHttp") << "TRACE, RequestCanceled, Handle: "
- << static_cast<HttpHandle>(op)
- << ", Status: " << op->mStatus.toTerseString()
- << LL_ENDL;
+ LL_INFOS(LOG_CORE) << "TRACE, RequestCanceled, Handle: "
+ << static_cast<HttpHandle>(op)
+ << ", Status: " << op->mStatus.toTerseString()
+ << LL_ENDL;
}
// Cancel op and deliver for notification
@@ -267,18 +306,18 @@ bool HttpLibcurl::completeRequest(CURLM * multi_handle, CURL * handle, CURLcode
if (handle != op->mCurlHandle || ! op->mCurlActive)
{
- LL_WARNS("CoreHttp") << "libcurl handle and HttpOpRequest handle in disagreement or inactive request."
- << " Handle: " << static_cast<HttpHandle>(handle)
- << LL_ENDL;
+ LL_WARNS(LOG_CORE) << "libcurl handle and HttpOpRequest handle in disagreement or inactive request."
+ << " Handle: " << static_cast<HttpHandle>(handle)
+ << LL_ENDL;
return false;
}
active_set_t::iterator it(mActiveOps.find(op));
if (mActiveOps.end() == it)
{
- LL_WARNS("CoreHttp") << "libcurl completion for request not on active list. Continuing."
- << " Handle: " << static_cast<HttpHandle>(handle)
- << LL_ENDL;
+ LL_WARNS(LOG_CORE) << "libcurl completion for request not on active list. Continuing."
+ << " Handle: " << static_cast<HttpHandle>(handle)
+ << LL_ENDL;
return false;
}
@@ -309,25 +348,25 @@ bool HttpLibcurl::completeRequest(CURLM * multi_handle, CURL * handle, CURLcode
}
else
{
- LL_WARNS("CoreHttp") << "Invalid HTTP response code ("
- << http_status << ") received from server."
- << LL_ENDL;
+ LL_WARNS(LOG_CORE) << "Invalid HTTP response code ("
+ << http_status << ") received from server."
+ << LL_ENDL;
op->mStatus = HttpStatus(HttpStatus::LLCORE, HE_INVALID_HTTP_STATUS);
}
}
// Detach from multi and recycle handle
curl_multi_remove_handle(multi_handle, handle);
- curl_easy_cleanup(handle);
+ mHandleCache.freeHandle(op->mCurlHandle);
op->mCurlHandle = NULL;
// Tracing
if (op->mTracing > HTTP_TRACE_OFF)
{
- LL_INFOS("CoreHttp") << "TRACE, RequestComplete, Handle: "
- << static_cast<HttpHandle>(op)
- << ", Status: " << op->mStatus.toTerseString()
- << LL_ENDL;
+ LL_INFOS(LOG_CORE) << "TRACE, RequestComplete, Handle: "
+ << static_cast<HttpHandle>(op)
+ << ", Status: " << op->mStatus.toTerseString()
+ << LL_ENDL;
}
// Dispatch to next stage
@@ -351,6 +390,164 @@ int HttpLibcurl::getActiveCountInClass(int policy_class) const
return mActiveHandles ? mActiveHandles[policy_class] : 0;
}
+void HttpLibcurl::policyUpdated(int policy_class)
+{
+ if (policy_class < 0 || policy_class >= mPolicyCount || ! mMultiHandles)
+ {
+ return;
+ }
+
+ HttpPolicy & policy(mService->getPolicy());
+
+ if (! mActiveHandles[policy_class])
+ {
+ // Clear to set options. As of libcurl 7.37.0, if a pipelining
+ // multi handle has active requests and you try to set the
+ // multi handle to non-pipelining, the library gets very angry
+ // and goes off the rails corrupting memory. A clue that you're
+ // about to crash is that you'll get a missing server response
+ // error (curl code 9). So, if options are to be set, we let
+ // the multi handle run out of requests, then set options, and
+ // re-enable request processing.
+ //
+ // All of this stall mechanism exists for this reason. If
+ // libcurl becomes more resilient later, it should be possible
+ // to remove all of this. The connection limit settings are fine,
+ // it's just that pipelined-to-non-pipelined transition that
+ // is fatal at the moment.
+
+ HttpPolicyClass & options(policy.getClassOptions(policy_class));
+ CURLM * multi_handle(mMultiHandles[policy_class]);
+ CURLMcode code;
+
+ // Enable policy if stalled
+ policy.stallPolicy(policy_class, false);
+ mDirtyPolicy[policy_class] = false;
+
+ if (options.mPipelining > 1)
+ {
+ // We'll try to do pipelining on this multihandle
+ code = curl_multi_setopt(multi_handle,
+ CURLMOPT_PIPELINING,
+ 1L);
+ check_curl_multi_code(code, CURLMOPT_PIPELINING);
+ code = curl_multi_setopt(multi_handle,
+ CURLMOPT_MAX_PIPELINE_LENGTH,
+ long(options.mPipelining));
+ check_curl_multi_code(code, CURLMOPT_MAX_PIPELINE_LENGTH);
+ code = curl_multi_setopt(multi_handle,
+ CURLMOPT_MAX_HOST_CONNECTIONS,
+ long(options.mPerHostConnectionLimit));
+ check_curl_multi_code(code, CURLMOPT_MAX_HOST_CONNECTIONS);
+ code = curl_multi_setopt(multi_handle,
+ CURLMOPT_MAX_TOTAL_CONNECTIONS,
+ long(options.mConnectionLimit));
+ check_curl_multi_code(code, CURLMOPT_MAX_TOTAL_CONNECTIONS);
+ }
+ else
+ {
+ code = curl_multi_setopt(multi_handle,
+ CURLMOPT_PIPELINING,
+ 0L);
+ check_curl_multi_code(code, CURLMOPT_PIPELINING);
+ code = curl_multi_setopt(multi_handle,
+ CURLMOPT_MAX_HOST_CONNECTIONS,
+ 0L);
+ check_curl_multi_code(code, CURLMOPT_MAX_HOST_CONNECTIONS);
+ code = curl_multi_setopt(multi_handle,
+ CURLMOPT_MAX_TOTAL_CONNECTIONS,
+ long(options.mConnectionLimit));
+ check_curl_multi_code(code, CURLMOPT_MAX_TOTAL_CONNECTIONS);
+ }
+ }
+ else if (! mDirtyPolicy[policy_class])
+ {
+ // Mark policy dirty and request a stall in the policy.
+ // When policy goes idle, we'll re-invoke this method
+ // and perform the change. Don't allow this thread to
+ // sleep while we're waiting for quiescence, we'll just
+ // stop processing.
+ mDirtyPolicy[policy_class] = true;
+ policy.stallPolicy(policy_class, true);
+ }
+}
+
+// ---------------------------------------
+// HttpLibcurl::HandleCache
+// ---------------------------------------
+
+HttpLibcurl::HandleCache::HandleCache()
+ : mHandleTemplate(NULL)
+{
+ mCache.reserve(50);
+}
+
+
+HttpLibcurl::HandleCache::~HandleCache()
+{
+ if (mHandleTemplate)
+ {
+ curl_easy_cleanup(mHandleTemplate);
+ mHandleTemplate = NULL;
+ }
+
+ for (handle_cache_t::iterator it(mCache.begin()); mCache.end() != it; ++it)
+ {
+ curl_easy_cleanup(*it);
+ }
+ mCache.clear();
+}
+
+
+CURL * HttpLibcurl::HandleCache::getHandle()
+{
+ CURL * ret(NULL);
+
+ if (! mCache.empty())
+ {
+ // Fastest path to handle
+ ret = mCache.back();
+ mCache.pop_back();
+ }
+ else if (mHandleTemplate)
+ {
+ // Still fast path
+ ret = curl_easy_duphandle(mHandleTemplate);
+ }
+ else
+ {
+ // When all else fails
+ ret = curl_easy_init();
+ }
+
+ return ret;
+}
+
+
+void HttpLibcurl::HandleCache::freeHandle(CURL * handle)
+{
+ if (! handle)
+ {
+ return;
+ }
+
+ curl_easy_reset(handle);
+ if (! mHandleTemplate)
+ {
+ // Save the first freed handle as a template.
+ mHandleTemplate = handle;
+ }
+ else
+ {
+ // Otherwise add it to the cache
+ if (mCache.size() >= mCache.capacity())
+ {
+ mCache.reserve(mCache.capacity() + 50);
+ }
+ mCache.push_back(handle);
+ }
+}
+
// ---------------------------------------
// Free functions
@@ -376,3 +573,29 @@ struct curl_slist * append_headers_to_slist(const HttpHeaders * headers, struct
} // end namespace LLCore
+
+
+namespace
+{
+
+void check_curl_multi_code(CURLMcode code, int curl_setopt_option)
+{
+ if (CURLM_OK != code)
+ {
+ LL_WARNS(LOG_CORE) << "libcurl multi error detected: " << curl_multi_strerror(code)
+ << ", curl_multi_setopt option: " << curl_setopt_option
+ << LL_ENDL;
+ }
+}
+
+
+void check_curl_multi_code(CURLMcode code)
+{
+ if (CURLM_OK != code)
+ {
+ LL_WARNS(LOG_CORE) << "libcurl multi error detected: " << curl_multi_strerror(code)
+ << LL_ENDL;
+ }
+}
+
+} // end anonymous namespace
diff --git a/indra/llcorehttp/_httplibcurl.h b/indra/llcorehttp/_httplibcurl.h
index 67f98dd4f0..ffc24c63a8 100755
--- a/indra/llcorehttp/_httplibcurl.h
+++ b/indra/llcorehttp/_httplibcurl.h
@@ -4,7 +4,7 @@
*
* $LicenseInfo:firstyear=2012&license=viewerlgpl$
* Second Life Viewer Source Code
- * Copyright (C) 2012-2013, Linden Research, Inc.
+ * Copyright (C) 2012-2014, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -116,6 +116,31 @@ public:
/// Threading: called by worker thread.
bool cancel(HttpHandle handle);
+ /// Informs transport that a particular policy class has had
+ /// options changed and so should effect any transport state
+ /// change necessary to effect those changes. Used mainly for
+ /// initialization and dynamic option setting.
+ ///
+ /// Threading: called by worker thread.
+ void policyUpdated(int policy_class);
+
+ /// Allocate a curl handle for caller. May be freed using
+ /// either the freeHandle() method or calling curl_easy_cleanup()
+ /// directly.
+ ///
+ /// @return Libcurl handle (CURL *) or NULL on allocation
+ /// problem. Handle will be in curl_easy_reset()
+ /// condition.
+ ///
+ /// Threading: callable by worker thread.
+ ///
+ /// Deprecation: Expect this to go away after _httpoprequest is
+ /// refactored bringing code into this class.
+ CURL * getHandle()
+ {
+ return mHandleCache.getHandle();
+ }
+
protected:
/// Invoked when libcurl has indicated a request has been processed
/// to completion and we need to move the request to a new state.
@@ -127,13 +152,68 @@ protected:
protected:
typedef std::set<HttpOpRequest *> active_set_t;
+
+ /// Simple request handle cache for libcurl.
+ ///
+ /// Handle creation is somewhat slow and chunky in libcurl and there's
+ /// a pretty good speedup to be had from handle re-use. So, a simple
+ /// vector is kept of 'freed' handles to be reused as needed. When
+ /// that is empty, the first freed handle is kept as a template for
+ /// handle duplication. This is still faster than creation from nothing.
+ /// And when that fails, we init fresh from curl_easy_init().
+ ///
+ /// Handles allocated with getHandle() may be freed with either
+ /// freeHandle() or curl_easy_cleanup(). Choice may be dictated
+ /// by thread constraints.
+ ///
+ /// Threading: Single-threaded. May only be used by a single thread,
+ /// typically the worker thread. If freeing requests' handles in an
+ /// unknown threading context, use curl_easy_cleanup() for safety.
+
+ class HandleCache
+ {
+ public:
+ HandleCache();
+ ~HandleCache();
+
+ private:
+ HandleCache(const HandleCache &); // Not defined
+ void operator=(const HandleCache &); // Not defined
+
+ public:
+ /// Allocate a curl handle for caller. May be freed using
+ /// either the freeHandle() method or calling curl_easy_cleanup()
+ /// directly.
+ ///
+ /// @return Libcurl handle (CURL *) or NULL on allocation
+ /// problem.
+ ///
+ /// Threading: Single-thread (worker) only.
+ CURL * getHandle();
+
+ /// Free a libcurl handle acquired by whatever means. Thread
+ /// safety is left to the caller.
+ ///
+ /// Threading: Single-thread (worker) only.
+ void freeHandle(CURL * handle);
+
+ protected:
+ typedef std::vector<CURL *> handle_cache_t;
+
+ protected:
+ CURL * mHandleTemplate; // Template for duplicating new handles
+ handle_cache_t mCache; // Cache of old handles
+ }; // end class HandleCache
protected:
- HttpService * mService; // Simple reference, not owner
+ HttpService * mService; // Simple reference, not owner
+ HandleCache mHandleCache; // Handle allocator, owner
active_set_t mActiveOps;
int mPolicyCount;
- CURLM ** mMultiHandles; // One handle per policy class
- int * mActiveHandles; // Active count per policy class
+ CURLM ** mMultiHandles; // One handle per policy class
+ int * mActiveHandles; // Active count per policy class
+ bool * mDirtyPolicy; // Dirty policy update waiting for stall (per pc)
+
}; // end class HttpLibcurl
} // end namespace LLCore
diff --git a/indra/llcorehttp/_httpoperation.cpp b/indra/llcorehttp/_httpoperation.cpp
index 5bb0654652..fefe561f80 100755
--- a/indra/llcorehttp/_httpoperation.cpp
+++ b/indra/llcorehttp/_httpoperation.cpp
@@ -4,7 +4,7 @@
*
* $LicenseInfo:firstyear=2012&license=viewerlgpl$
* Second Life Viewer Source Code
- * Copyright (C) 2012-2013, Linden Research, Inc.
+ * Copyright (C) 2012-2014, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -38,6 +38,14 @@
#include "lltimer.h"
+namespace
+{
+
+static const char * const LOG_CORE("CoreHttp");
+
+} // end anonymous namespace
+
+
namespace LLCore
{
@@ -94,8 +102,8 @@ void HttpOperation::stageFromRequest(HttpService *)
// Default implementation should never be called. This
// indicates an operation making a transition that isn't
// defined.
- LL_ERRS("CoreHttp") << "Default stageFromRequest method may not be called."
- << LL_ENDL;
+ LL_ERRS(LOG_CORE) << "Default stageFromRequest method may not be called."
+ << LL_ENDL;
}
@@ -104,8 +112,8 @@ void HttpOperation::stageFromReady(HttpService *)
// Default implementation should never be called. This
// indicates an operation making a transition that isn't
// defined.
- LL_ERRS("CoreHttp") << "Default stageFromReady method may not be called."
- << LL_ENDL;
+ LL_ERRS(LOG_CORE) << "Default stageFromReady method may not be called."
+ << LL_ENDL;
}
@@ -114,8 +122,8 @@ void HttpOperation::stageFromActive(HttpService *)
// Default implementation should never be called. This
// indicates an operation making a transition that isn't
// defined.
- LL_ERRS("CoreHttp") << "Default stageFromActive method may not be called."
- << LL_ENDL;
+ LL_ERRS(LOG_CORE) << "Default stageFromActive method may not be called."
+ << LL_ENDL;
}
@@ -145,9 +153,9 @@ void HttpOperation::addAsReply()
{
if (mTracing > HTTP_TRACE_OFF)
{
- LL_INFOS("CoreHttp") << "TRACE, ToReplyQueue, Handle: "
- << static_cast<HttpHandle>(this)
- << LL_ENDL;
+ LL_INFOS(LOG_CORE) << "TRACE, ToReplyQueue, Handle: "
+ << static_cast<HttpHandle>(this)
+ << LL_ENDL;
}
if (mReplyQueue)
diff --git a/indra/llcorehttp/_httpoprequest.cpp b/indra/llcorehttp/_httpoprequest.cpp
index 43dd069bc6..bbda0b82fd 100755
--- a/indra/llcorehttp/_httpoprequest.cpp
+++ b/indra/llcorehttp/_httpoprequest.cpp
@@ -4,7 +4,7 @@
*
* $LicenseInfo:firstyear=2012&license=viewerlgpl$
* Second Life Viewer Source Code
- * Copyright (C) 2012-2013, Linden Research, Inc.
+ * Copyright (C) 2012-2014, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -47,6 +47,19 @@
#include "llhttpconstants.h"
#include "llproxy.h"
+// *DEBUG: "[curl:bugs] #1420" problem and testing.
+//
+// A pipelining problem, https://sourceforge.net/p/curl/bugs/1420/,
+// was a source of Core_9 failures. Code related to this can be
+// identified and tested by:
+// * Looking for '[curl:bugs]' strings in source and following
+// instructions there.
+// * Set 'QAModeHttpTrace' to 2 or 3 in settings.xml and look for
+// 'timed out' events in the log.
+// * Enable the HttpRangeRequestsDisable debug setting which causes
+// full asset fetches. These slow the pipelines down a bit.
+//
+
namespace
{
@@ -94,6 +107,8 @@ void os_strlower(char * str);
void check_curl_easy_code(CURLcode code);
void check_curl_easy_code(CURLcode code, int curl_setopt_option);
+static const char * const LOG_CORE("CoreHttp");
+
} // end anonymous namespace
@@ -155,6 +170,8 @@ HttpOpRequest::~HttpOpRequest()
if (mCurlHandle)
{
+ // Uncertain of thread context so free using
+ // safest method.
curl_easy_cleanup(mCurlHandle);
mCurlHandle = NULL;
}
@@ -376,6 +393,7 @@ void HttpOpRequest::setupCommon(HttpRequest::policy_t policy_id,
// Junk may be left around from a failed request and that
// needs to be cleaned out.
//
+// *TODO: Move this to _httplibcurl where it belongs.
HttpStatus HttpOpRequest::prepareRequest(HttpService * service)
{
CURLcode code;
@@ -409,17 +427,19 @@ HttpStatus HttpOpRequest::prepareRequest(HttpService * service)
// *FIXME: better error handling later
HttpStatus status;
- // Get global policy options
- HttpPolicyGlobal & policy(service->getPolicy().getGlobalOptions());
+ // Get global and class policy options
+ HttpPolicyGlobal & gpolicy(service->getPolicy().getGlobalOptions());
+ HttpPolicyClass & cpolicy(service->getPolicy().getClassOptions(mReqPolicy));
- mCurlHandle = LLCurl::createStandardCurlHandle();
+ mCurlHandle = service->getTransport().getHandle();
if (! mCurlHandle)
{
// We're in trouble. We'll continue but it won't go well.
- LL_WARNS("CoreHttp") << "Failed to allocate libcurl easy handle. Continuing."
- << LL_ENDL;
+ LL_WARNS(LOG_CORE) << "Failed to allocate libcurl easy handle. Continuing."
+ << LL_ENDL;
return HttpStatus(HttpStatus::LLCORE, HE_BAD_ALLOC);
}
+
code = curl_easy_setopt(mCurlHandle, CURLOPT_IPRESOLVE, CURL_IPRESOLVE_V4);
check_curl_easy_code(code, CURLOPT_IPRESOLVE);
code = curl_easy_setopt(mCurlHandle, CURLOPT_NOSIGNAL, 1);
@@ -460,30 +480,30 @@ HttpStatus HttpOpRequest::prepareRequest(HttpService * service)
code = curl_easy_setopt(mCurlHandle, CURLOPT_SSL_VERIFYHOST, 0);
check_curl_easy_code(code, CURLOPT_SSL_VERIFYHOST);
- if (policy.mUseLLProxy)
+ if (gpolicy.mUseLLProxy)
{
// Use the viewer-based thread-safe API which has a
// fast/safe check for proxy enable. Would like to
// encapsulate this someway...
LLProxy::getInstance()->applyProxySettings(mCurlHandle);
}
- else if (policy.mHttpProxy.size())
+ else if (gpolicy.mHttpProxy.size())
{
// *TODO: This is fine for now but get fuller socks5/
// authentication thing going later....
- code = curl_easy_setopt(mCurlHandle, CURLOPT_PROXY, policy.mHttpProxy.c_str());
+ code = curl_easy_setopt(mCurlHandle, CURLOPT_PROXY, gpolicy.mHttpProxy.c_str());
check_curl_easy_code(code, CURLOPT_PROXY);
code = curl_easy_setopt(mCurlHandle, CURLOPT_PROXYTYPE, CURLPROXY_HTTP);
check_curl_easy_code(code, CURLOPT_PROXYTYPE);
}
- if (policy.mCAPath.size())
+ if (gpolicy.mCAPath.size())
{
- code = curl_easy_setopt(mCurlHandle, CURLOPT_CAPATH, policy.mCAPath.c_str());
+ code = curl_easy_setopt(mCurlHandle, CURLOPT_CAPATH, gpolicy.mCAPath.c_str());
check_curl_easy_code(code, CURLOPT_CAPATH);
}
- if (policy.mCAFile.size())
+ if (gpolicy.mCAFile.size())
{
- code = curl_easy_setopt(mCurlHandle, CURLOPT_CAINFO, policy.mCAFile.c_str());
+ code = curl_easy_setopt(mCurlHandle, CURLOPT_CAINFO, gpolicy.mCAFile.c_str());
check_curl_easy_code(code, CURLOPT_CAINFO);
}
@@ -538,9 +558,9 @@ HttpStatus HttpOpRequest::prepareRequest(HttpService * service)
break;
default:
- LL_ERRS("CoreHttp") << "Invalid HTTP method in request: "
- << int(mReqMethod) << ". Can't recover."
- << LL_ENDL;
+ LL_ERRS(LOG_CORE) << "Invalid HTTP method in request: "
+ << int(mReqMethod) << ". Can't recover."
+ << LL_ENDL;
break;
}
@@ -592,6 +612,22 @@ HttpStatus HttpOpRequest::prepareRequest(HttpService * service)
{
xfer_timeout = timeout;
}
+ if (cpolicy.mPipelining > 1L)
+ {
+ // Pipelining affects both connection and transfer timeout values.
+ // Requests that are added to a pipeling immediately have completed
+ // their connection so the connection delay tends to be less than
+ // the non-pipelined value. Transfers are the opposite. Transfer
+ // timeout starts once the connection is established and completion
+ // can be delayed due to the pipelined requests ahead. So, it's
+ // a handwave but bump the transfer timeout up by the pipelining
+ // depth to give some room.
+ //
+ // *TODO: Find a better scheme than timeouts to guarantee liveness.
+ xfer_timeout *= cpolicy.mPipelining;
+ }
+ // *DEBUG: Useful for timeout handling and "[curl:bugs] #1420" tests
+ // xfer_timeout = 3L;
code = curl_easy_setopt(mCurlHandle, CURLOPT_TIMEOUT, xfer_timeout);
check_curl_easy_code(code, CURLOPT_TIMEOUT);
code = curl_easy_setopt(mCurlHandle, CURLOPT_CONNECTTIMEOUT, timeout);
@@ -652,8 +688,8 @@ size_t HttpOpRequest::readCallback(void * data, size_t size, size_t nmemb, void
{
// Warn but continue if the read position moves beyond end-of-body
// for some reason.
- LL_WARNS("CoreHttp") << "Request body position beyond body size. Truncating request body."
- << LL_ENDL;
+ LL_WARNS(LOG_CORE) << "Request body position beyond body size. Truncating request body."
+ << LL_ENDL;
}
return 0;
}
@@ -790,10 +826,10 @@ size_t HttpOpRequest::headerCallback(void * data, size_t size, size_t nmemb, voi
else
{
// Ignore the unparsable.
- LL_INFOS_ONCE("CoreHttp") << "Problem parsing odd Content-Range header: '"
- << std::string(hdr_data, wanted_hdr_size)
- << "'. Ignoring."
- << LL_ENDL;
+ LL_INFOS_ONCE(LOG_CORE) << "Problem parsing odd Content-Range header: '"
+ << std::string(hdr_data, wanted_hdr_size)
+ << "'. Ignoring."
+ << LL_ENDL;
}
}
@@ -895,11 +931,11 @@ int HttpOpRequest::debugCallback(CURL * handle, curl_infotype info, char * buffe
if (logit)
{
- LL_INFOS("CoreHttp") << "TRACE, LibcurlDebug, Handle: "
- << static_cast<HttpHandle>(op)
- << ", Type: " << tag
- << ", Data: " << safe_line
- << LL_ENDL;
+ LL_INFOS(LOG_CORE) << "TRACE, LibcurlDebug, Handle: "
+ << static_cast<HttpHandle>(op)
+ << ", Type: " << tag
+ << ", Data: " << safe_line
+ << LL_ENDL;
}
return 0;
@@ -1094,9 +1130,9 @@ void check_curl_easy_code(CURLcode code, int curl_setopt_option)
//
// linux appears to throw a curl error once per session for a bad initialization
// at a pretty random time (when enabling cookies).
- LL_WARNS("CoreHttp") << "libcurl error detected: " << curl_easy_strerror(code)
- << ", curl_easy_setopt option: " << curl_setopt_option
- << LL_ENDL;
+ LL_WARNS(LOG_CORE) << "libcurl error detected: " << curl_easy_strerror(code)
+ << ", curl_easy_setopt option: " << curl_setopt_option
+ << LL_ENDL;
}
}
@@ -1109,8 +1145,8 @@ void check_curl_easy_code(CURLcode code)
//
// linux appears to throw a curl error once per session for a bad initialization
// at a pretty random time (when enabling cookies).
- LL_WARNS("CoreHttp") << "libcurl error detected: " << curl_easy_strerror(code)
- << LL_ENDL;
+ LL_WARNS(LOG_CORE) << "libcurl error detected: " << curl_easy_strerror(code)
+ << LL_ENDL;
}
}
diff --git a/indra/llcorehttp/_httppolicy.cpp b/indra/llcorehttp/_httppolicy.cpp
index fd5a93e192..09b9206f63 100755
--- a/indra/llcorehttp/_httppolicy.cpp
+++ b/indra/llcorehttp/_httppolicy.cpp
@@ -4,7 +4,7 @@
*
* $LicenseInfo:firstyear=2012&license=viewerlgpl$
* Second Life Viewer Source Code
- * Copyright (C) 2012-2013, Linden Research, Inc.
+ * Copyright (C) 2012-2014, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -35,6 +35,13 @@
#include "lltimer.h"
+namespace
+{
+
+static const char * const LOG_CORE("CoreHttp");
+
+} // end anonymous namespace
+
namespace LLCore
{
@@ -51,7 +58,8 @@ public:
ClassState()
: mThrottleEnd(0),
mThrottleLeft(0L),
- mRequestCount(0L)
+ mRequestCount(0L),
+ mStallStaging(false)
{}
HttpReadyQueue mReadyQueue;
@@ -61,6 +69,7 @@ public:
HttpTime mThrottleEnd;
long mThrottleLeft;
long mRequestCount;
+ bool mStallStaging;
};
@@ -128,7 +137,8 @@ void HttpPolicy::shutdown()
void HttpPolicy::start()
-{}
+{
+}
void HttpPolicy::addOp(HttpOpRequest * op)
@@ -170,19 +180,19 @@ void HttpPolicy::retryOp(HttpOpRequest * op)
{
++op->mPolicy503Retries;
}
- LL_DEBUGS("CoreHttp") << "HTTP request " << static_cast<HttpHandle>(op)
- << " retry " << op->mPolicyRetries
- << " scheduled in " << (delta / HttpTime(1000))
- << " mS (" << (external_delta ? "external" : "internal")
- << "). Status: " << op->mStatus.toTerseString()
- << LL_ENDL;
+ LL_DEBUGS(LOG_CORE) << "HTTP request " << static_cast<HttpHandle>(op)
+ << " retry " << op->mPolicyRetries
+ << " scheduled in " << (delta / HttpTime(1000))
+ << " mS (" << (external_delta ? "external" : "internal")
+ << "). Status: " << op->mStatus.toTerseString()
+ << LL_ENDL;
if (op->mTracing > HTTP_TRACE_OFF)
{
- LL_INFOS("CoreHttp") << "TRACE, ToRetryQueue, Handle: "
- << static_cast<HttpHandle>(op)
- << ", Delta: " << (delta / HttpTime(1000))
- << ", Retries: " << op->mPolicyRetries
- << LL_ENDL;
+ LL_INFOS(LOG_CORE) << "TRACE, ToRetryQueue, Handle: "
+ << static_cast<HttpHandle>(op)
+ << ", Delta: " << (delta / HttpTime(1000))
+ << ", Retries: " << op->mPolicyRetries
+ << LL_ENDL;
}
mClasses[policy_class]->mRetryQueue.push(op);
}
@@ -218,6 +228,15 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue()
HttpRetryQueue & retryq(state.mRetryQueue);
HttpReadyQueue & readyq(state.mReadyQueue);
+ if (state.mStallStaging)
+ {
+ // Stalling but don't sleep. Need to complete operations
+ // and get back to servicing queues. Do this test before
+ // the retryq/readyq test or you'll get stalls until you
+ // click a setting or an asset request comes in.
+ result = HttpService::NORMAL;
+ continue;
+ }
if (retryq.empty() && readyq.empty())
{
continue;
@@ -234,7 +253,11 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue()
}
int active(transport.getActiveCountInClass(policy_class));
- int needed(state.mOptions.mConnectionLimit - active); // Expect negatives here
+ int active_limit(state.mOptions.mPipelining > 1L
+ ? (state.mOptions.mPerHostConnectionLimit
+ * state.mOptions.mPipelining)
+ : state.mOptions.mConnectionLimit);
+ int needed(active_limit - active); // Expect negatives here
if (needed > 0)
{
@@ -257,9 +280,9 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue()
if (now >= state.mThrottleEnd)
{
// Throttle expired, move to next window
- LL_DEBUGS("CoreHttp") << "Throttle expired with " << state.mThrottleLeft
- << " requests to go and " << state.mRequestCount
- << " requests issued." << LL_ENDL;
+ LL_DEBUGS(LOG_CORE) << "Throttle expired with " << state.mThrottleLeft
+ << " requests to go and " << state.mRequestCount
+ << " requests issued." << LL_ENDL;
state.mThrottleLeft = state.mOptions.mThrottleRate;
state.mThrottleEnd = now + HttpTime(1000000);
}
@@ -286,9 +309,9 @@ HttpService::ELoopSpeed HttpPolicy::processReadyQueue()
if (now >= state.mThrottleEnd)
{
// Throttle expired, move to next window
- LL_DEBUGS("CoreHttp") << "Throttle expired with " << state.mThrottleLeft
- << " requests to go and " << state.mRequestCount
- << " requests issued." << LL_ENDL;
+ LL_DEBUGS(LOG_CORE) << "Throttle expired with " << state.mThrottleLeft
+ << " requests to go and " << state.mRequestCount
+ << " requests issued." << LL_ENDL;
state.mThrottleLeft = state.mOptions.mThrottleRate;
state.mThrottleEnd = now + HttpTime(1000000);
}
@@ -403,17 +426,17 @@ bool HttpPolicy::stageAfterCompletion(HttpOpRequest * op)
// This op is done, finalize it delivering it to the reply queue...
if (! op->mStatus)
{
- LL_WARNS("CoreHttp") << "HTTP request " << static_cast<HttpHandle>(op)
- << " failed after " << op->mPolicyRetries
- << " retries. Reason: " << op->mStatus.toString()
- << " (" << op->mStatus.toTerseString() << ")"
- << LL_ENDL;
+ LL_WARNS(LOG_CORE) << "HTTP request " << static_cast<HttpHandle>(op)
+ << " failed after " << op->mPolicyRetries
+ << " retries. Reason: " << op->mStatus.toString()
+ << " (" << op->mStatus.toTerseString() << ")"
+ << LL_ENDL;
}
else if (op->mPolicyRetries)
{
- LL_DEBUGS("CoreHttp") << "HTTP request " << static_cast<HttpHandle>(op)
- << " succeeded on retry " << op->mPolicyRetries << "."
- << LL_ENDL;
+ LL_DEBUGS(LOG_CORE) << "HTTP request " << static_cast<HttpHandle>(op)
+ << " succeeded on retry " << op->mPolicyRetries << "."
+ << LL_ENDL;
}
op->stageFromActive(mService);
@@ -441,4 +464,17 @@ int HttpPolicy::getReadyCount(HttpRequest::policy_t policy_class) const
}
+bool HttpPolicy::stallPolicy(HttpRequest::policy_t policy_class, bool stall)
+{
+ bool ret(false);
+
+ if (policy_class < mClasses.size())
+ {
+ ret = mClasses[policy_class]->mStallStaging;
+ mClasses[policy_class]->mStallStaging = stall;
+ }
+ return ret;
+}
+
+
} // end namespace LLCore
diff --git a/indra/llcorehttp/_httppolicy.h b/indra/llcorehttp/_httppolicy.h
index bf1aa74267..11cd89bbd1 100755
--- a/indra/llcorehttp/_httppolicy.h
+++ b/indra/llcorehttp/_httppolicy.h
@@ -158,6 +158,14 @@ public:
/// Threading: called by worker thread
int getReadyCount(HttpRequest::policy_t policy_class) const;
+ /// Stall (or unstall) a policy class preventing requests from
+ /// transitioning to an active state. Used to allow an HTTP
+ /// request policy to empty prior to changing settings or state
+ /// that isn't tolerant of changes when work is outstanding.
+ ///
+ /// Threading: called by worker thread
+ bool stallPolicy(HttpRequest::policy_t policy_class, bool stall);
+
protected:
struct ClassState;
typedef std::vector<ClassState *> class_list_t;
diff --git a/indra/llcorehttp/_httppolicyclass.cpp b/indra/llcorehttp/_httppolicyclass.cpp
index f34a8e9f1e..2c0f650155 100755
--- a/indra/llcorehttp/_httppolicyclass.cpp
+++ b/indra/llcorehttp/_httppolicyclass.cpp
@@ -4,7 +4,7 @@
*
* $LicenseInfo:firstyear=2012&license=viewerlgpl$
* Second Life Viewer Source Code
- * Copyright (C) 2012-2013, Linden Research, Inc.
+ * Copyright (C) 2012-2014, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -78,8 +78,8 @@ HttpStatus HttpPolicyClass::set(HttpRequest::EPolicyOption opt, long value)
mPerHostConnectionLimit = llclamp(value, long(HTTP_CONNECTION_LIMIT_MIN), mConnectionLimit);
break;
- case HttpRequest::PO_ENABLE_PIPELINING:
- mPipelining = llclamp(value, 0L, 1L);
+ case HttpRequest::PO_PIPELINING_DEPTH:
+ mPipelining = llclamp(value, 0L, HTTP_PIPELINING_MAX);
break;
case HttpRequest::PO_THROTTLE_RATE:
@@ -106,7 +106,7 @@ HttpStatus HttpPolicyClass::get(HttpRequest::EPolicyOption opt, long * value) co
*value = mPerHostConnectionLimit;
break;
- case HttpRequest::PO_ENABLE_PIPELINING:
+ case HttpRequest::PO_PIPELINING_DEPTH:
*value = mPipelining;
break;
diff --git a/indra/llcorehttp/_httpservice.cpp b/indra/llcorehttp/_httpservice.cpp
index c94249dc2d..c673e1be1d 100755
--- a/indra/llcorehttp/_httpservice.cpp
+++ b/indra/llcorehttp/_httpservice.cpp
@@ -4,7 +4,7 @@
*
* $LicenseInfo:firstyear=2012&license=viewerlgpl$
* Second Life Viewer Source Code
- * Copyright (C) 2012-2013, Linden Research, Inc.
+ * Copyright (C) 2012-2014, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -40,6 +40,14 @@
#include "llthread.h"
+namespace
+{
+
+static const char * const LOG_CORE("CoreHttp");
+
+} // end anonymous namespace
+
+
namespace LLCore
{
@@ -87,8 +95,8 @@ HttpService::~HttpService()
// Failed to join, expect problems ahead so do a hard termination.
mThread->cancel();
- LL_WARNS("CoreHttp") << "Destroying HttpService with running thread. Expect problems."
- << LL_ENDL;
+ LL_WARNS(LOG_CORE) << "Destroying HttpService with running thread. Expect problems."
+ << LL_ENDL;
}
}
}
@@ -328,9 +336,9 @@ HttpService::ELoopSpeed HttpService::processRequestQueue(ELoopSpeed loop)
if (op->mTracing > HTTP_TRACE_OFF)
{
- LL_INFOS("CoreHttp") << "TRACE, FromRequestQueue, Handle: "
- << static_cast<HttpHandle>(op)
- << LL_ENDL;
+ LL_INFOS(LOG_CORE) << "TRACE, FromRequestQueue, Handle: "
+ << static_cast<HttpHandle>(op)
+ << LL_ENDL;
}
// Stage
@@ -437,9 +445,13 @@ HttpStatus HttpService::setPolicyOption(HttpRequest::EPolicyOption opt, HttpRequ
HttpPolicyClass & opts(mPolicy->getClassOptions(pclass));
status = opts.set(opt, value);
- if (status && ret_value)
+ if (status)
{
- status = opts.get(opt, ret_value);
+ mTransport->policyUpdated(pclass);
+ if (ret_value)
+ {
+ status = opts.get(opt, ret_value);
+ }
}
}
@@ -463,7 +475,7 @@ HttpStatus HttpService::setPolicyOption(HttpRequest::EPolicyOption opt, HttpRequ
return status;
}
- // Only string values are global at this time
+ // String values are always global (at this time).
if (pclass == HttpRequest::GLOBAL_POLICY_ID)
{
HttpPolicyGlobal & opts(mPolicy->getGlobalOptions());
diff --git a/indra/llcorehttp/examples/http_texture_load.cpp b/indra/llcorehttp/examples/http_texture_load.cpp
index 73c49687d7..b76c874557 100755
--- a/indra/llcorehttp/examples/http_texture_load.cpp
+++ b/indra/llcorehttp/examples/http_texture_load.cpp
@@ -4,7 +4,7 @@
*
* $LicenseInfo:firstyear=2012&license=viewerlgpl$
* Second Life Viewer Source Code
- * Copyright (C) 2012-2013, Linden Research, Inc.
+ * Copyright (C) 2012-2014, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -59,11 +59,13 @@ void usage(std::ostream & out);
// Default command line settings
static int concurrency_limit(40);
static int highwater(100);
+static int pipeline_depth(0);
+static int tracing(0);
static char url_format[1024] = "http://example.com/some/path?texture_id=%s.texture";
#if defined(WIN32)
-#define strncpy(_a, _b, _c) strncpy_s(_a, _b, _c)
+#define strncpy(_a, _b, _c) strncpy_s(_a, _b, _c)
#define strtok_r(_a, _b, _c) strtok_s(_a, _b, _c)
int getopt(int argc, char * const argv[], const char *optstring);
@@ -100,6 +102,7 @@ public:
public:
bool mVerbose;
bool mRandomRange;
+ bool mNoRange;
int mRequestLowWater;
int mRequestHighWater;
handle_set_t mHandles;
@@ -160,10 +163,11 @@ int main(int argc, char** argv)
{
LLCore::HttpStatus status;
bool do_random(false);
+ bool do_whole(false);
bool do_verbose(false);
int option(-1);
- while (-1 != (option = getopt(argc, argv, "u:c:h?RvH:")))
+ while (-1 != (option = getopt(argc, argv, "u:c:h?RwvH:p:t:")))
{
switch (option)
{
@@ -193,7 +197,7 @@ int main(int argc, char** argv)
char * end;
value = strtoul(optarg, &end, 10);
- if (value < 1 || value > 100 || *end != '\0')
+ if (value < 1 || value > 200 || *end != '\0')
{
usage(std::cerr);
return 1;
@@ -202,8 +206,44 @@ int main(int argc, char** argv)
}
break;
+ case 'p':
+ {
+ unsigned long value;
+ char * end;
+
+ value = strtoul(optarg, &end, 10);
+ if (value < 0 || value > 100 || *end != '\0')
+ {
+ usage(std::cerr);
+ return 1;
+ }
+ pipeline_depth = value;
+ }
+ break;
+
+ case '5':
+ {
+ unsigned long value;
+ char * end;
+
+ value = strtoul(optarg, &end, 10);
+ if (value < 0 || value > 3 || *end != '\0')
+ {
+ usage(std::cerr);
+ return 1;
+ }
+ tracing = value;
+ }
+ break;
+
case 'R':
do_random = true;
+ do_whole = false;
+ break;
+
+ case 'w':
+ do_whole = true;
+ do_random = false;
break;
case 'v':
@@ -240,6 +280,24 @@ int main(int argc, char** argv)
LLCore::HttpRequest::DEFAULT_POLICY_ID,
concurrency_limit,
NULL);
+ LLCore::HttpRequest::setStaticPolicyOption(LLCore::HttpRequest::PO_PER_HOST_CONNECTION_LIMIT,
+ LLCore::HttpRequest::DEFAULT_POLICY_ID,
+ concurrency_limit,
+ NULL);
+ if (pipeline_depth)
+ {
+ LLCore::HttpRequest::setStaticPolicyOption(LLCore::HttpRequest::PO_PIPELINING_DEPTH,
+ LLCore::HttpRequest::DEFAULT_POLICY_ID,
+ pipeline_depth,
+ NULL);
+ }
+ if (tracing)
+ {
+ LLCore::HttpRequest::setStaticPolicyOption(LLCore::HttpRequest::PO_TRACE,
+ LLCore::HttpRequest::DEFAULT_POLICY_ID,
+ tracing,
+ NULL);
+ }
LLCore::HttpRequest::startThread();
// Get service point
@@ -257,6 +315,7 @@ int main(int argc, char** argv)
ws.mUrl = url_format;
ws.loadAssetUuids(uuids);
ws.mRandomRange = do_random;
+ ws.mNoRange = do_whole;
ws.mVerbose = do_verbose;
ws.mRequestHighWater = highwater;
ws.mRequestLowWater = ws.mRequestHighWater / 2;
@@ -331,10 +390,15 @@ void usage(std::ostream & out)
" -u <url_format> printf-style format string for URL generation\n"
" Default: " << url_format << "\n"
" -R Issue GETs with random Range: headers\n"
+ " -w Issue GETs without Range: headers to get whole object\n"
" -c <limit> Maximum connection concurrency. Range: [1..100]\n"
" Default: " << concurrency_limit << "\n"
" -H <limit> HTTP request highwater (requests fed to llcorehttp).\n"
- " Range: [1..100] Default: " << highwater << "\n"
+ " Range: [1..200] Default: " << highwater << "\n"
+ " -p <depth> If <depth> is positive, enables and sets pipelineing\n"
+ " depth on HTTP requests. Default: " << pipeline_depth << "\n"
+ " -t <level> If <level> is positive ([1..3]), enables and sets HTTP\n"
+ " tracing on HTTP requests. Default: " << tracing << "\n"
" -v Verbose mode. Issue some chatter while running\n"
" -h print this help\n"
"\n"
@@ -346,6 +410,7 @@ WorkingSet::WorkingSet()
: LLCore::HttpHandler(),
mVerbose(false),
mRandomRange(false),
+ mNoRange(false),
mRemaining(200),
mLimit(200),
mAt(0),
@@ -395,8 +460,12 @@ bool WorkingSet::reload(LLCore::HttpRequest * hr, LLCore::HttpOptions * opt)
#else
snprintf(buffer, sizeof(buffer), mUrl.c_str(), mAssets[mAt].mUuid.c_str());
#endif
- int offset(mRandomRange ? ((unsigned long) rand()) % 1000000UL : mAssets[mAt].mOffset);
- int length(mRandomRange ? ((unsigned long) rand()) % 1000000UL : mAssets[mAt].mLength);
+ int offset(mNoRange
+ ? 0
+ : (mRandomRange ? ((unsigned long) rand()) % 1000000UL : mAssets[mAt].mOffset));
+ int length(mNoRange
+ ? 0
+ : (mRandomRange ? ((unsigned long) rand()) % 1000000UL : mAssets[mAt].mLength));
LLCore::HttpHandle handle;
if (offset || length)
diff --git a/indra/llcorehttp/httpcommon.cpp b/indra/llcorehttp/httpcommon.cpp
index c2f15155ac..8714915fa2 100755
--- a/indra/llcorehttp/httpcommon.cpp
+++ b/indra/llcorehttp/httpcommon.cpp
@@ -4,7 +4,7 @@
*
* $LicenseInfo:firstyear=2012&license=viewerlgpl$
* Second Life Viewer Source Code
- * Copyright (C) 2012-2013, Linden Research, Inc.
+ * Copyright (C) 2012-2014, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -219,6 +219,13 @@ std::string HttpStatus::toTerseString() const
// Pass true on statuses that might actually be cleared by a
// retry. Library failures, calling problems, etc. aren't
// going to be fixed by squirting bits all over the Net.
+//
+// HE_INVALID_HTTP_STATUS is special. As of 7.37.0, there are
+// some scenarios where response processing in libcurl appear
+// to go wrong and response data is corrupted. A side-effect
+// of this is that the HTTP status is read as 0 from the library.
+// See libcurl bug report 1420 (https://sourceforge.net/p/curl/bugs/1420/)
+// for details.
bool HttpStatus::isRetryable() const
{
static const HttpStatus cant_connect(HttpStatus::EXT_CURL_EASY, CURLE_COULDNT_CONNECT);
@@ -231,6 +238,11 @@ bool HttpStatus::isRetryable() const
static const HttpStatus post_error(HttpStatus::EXT_CURL_EASY, CURLE_HTTP_POST_ERROR);
static const HttpStatus partial_file(HttpStatus::EXT_CURL_EASY, CURLE_PARTIAL_FILE);
static const HttpStatus inv_cont_range(HttpStatus::LLCORE, HE_INV_CONTENT_RANGE_HDR);
+ static const HttpStatus inv_status(HttpStatus::LLCORE, HE_INVALID_HTTP_STATUS);
+
+ // *DEBUG: For "[curl:bugs] #1420" tests.
+ // Disable the '*this == inv_status' test and look for 'Core_9'
+ // failures in log files.
return ((isHttpStatus() && mType >= 499 && mType <= 599) || // Include special 499 in retryables
*this == cant_connect || // Connection reset/endpoint problems
@@ -242,7 +254,8 @@ bool HttpStatus::isRetryable() const
*this == op_timedout || // Timer expired
*this == post_error || // Transport problem
*this == partial_file || // Data inconsistency in response
- *this == inv_cont_range); // Short data read disagrees with content-range
+ *this == inv_cont_range || // Short data read disagrees with content-range
+ *this == inv_status); // Inv status can reflect internal state problem in libcurl
}
} // end namespace LLCore
diff --git a/indra/llcorehttp/httprequest.h b/indra/llcorehttp/httprequest.h
index 651654844a..7f23723b0b 100755
--- a/indra/llcorehttp/httprequest.h
+++ b/indra/llcorehttp/httprequest.h
@@ -4,7 +4,7 @@
*
* $LicenseInfo:firstyear=2012&license=viewerlgpl$
* Second Life Viewer Source Code
- * Copyright (C) 2012-2013, Linden Research, Inc.
+ * Copyright (C) 2012-2014, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -183,11 +183,38 @@ public:
/// Global only
PO_TRACE,
- /// Suitable requests are allowed to pipeline on their
- /// connections when they ask for it.
+ /// If greater than 1, suitable requests are allowed to
+ /// pipeline on their connections when they ask for it.
+ /// Value gives the maximum number of outstanding requests
+ /// on a connection.
+ ///
+ /// There is some interaction between PO_CONNECTION_LIMIT,
+ /// PO_PER_HOST_CONNECTION_LIMIT, and PO_PIPELINING_DEPTH.
+ /// When PIPELINING_DEPTH is 0 or 1 (no pipelining), this
+ /// library manages connection lifecycle and honors the
+ /// PO_CONNECTION_LIMIT setting as the maximum in-flight
+ /// request limit. Libcurl itself may be caching additional
+ /// connections under its connection cache policy.
+ ///
+ /// When PIPELINING_DEPTH is 2 or more, libcurl performs
+ /// connection management and both PO_CONNECTION_LIMIT and
+ /// PO_PER_HOST_CONNECTION_LIMIT should be set and non-zero.
+ /// In this case (as of libcurl 7.37.0), libcurl will
+ /// open new connections in preference to pipelining, up
+ /// to the above limits at which time pipelining begins.
+ /// And as usual, an additional cache of open but inactive
+ /// connections may still be maintained within libcurl.
+ /// For SL, a good rule-of-thumb is to set
+ /// PO_PER_HOST_CONNECTION_LIMIT to the user-visible
+ /// concurrency value and PO_CONNECTION_LIMIT to twice
+ /// that for baked texture loads and region crossings where
+ /// additional connection load will be tolerated. If
+ /// either limit is 0, libcurl will prefer pipelining
+ /// over connection creation, which is still interesting,
+ /// but won't be pursued at this time.
///
/// Per-class only
- PO_ENABLE_PIPELINING,
+ PO_PIPELINING_DEPTH,
/// Controls whether client-side throttling should be
/// performed on this policy class. Positive values
diff --git a/indra/newview/app_settings/settings.xml b/indra/newview/app_settings/settings.xml
index fbb1637090..96c43eb7a2 100755
--- a/indra/newview/app_settings/settings.xml
+++ b/indra/newview/app_settings/settings.xml
@@ -4456,6 +4456,28 @@
<key>Value</key>
<string />
</map>
+ <key>HttpPipelining</key>
+ <map>
+ <key>Comment</key>
+ <string>If true, viewer will attempt to pipeline HTTP requests.</string>
+ <key>Persist</key>
+ <integer>1</integer>
+ <key>Type</key>
+ <string>Boolean</string>
+ <key>Value</key>
+ <integer>1</integer>
+ </map>
+ <key>HttpRangeRequestsDisable</key>
+ <map>
+ <key>Comment</key>
+ <string>If true, viewer will not issue GET requests with 'Range:' headers for meshes and textures. May resolve problems with certain ISPs and networking gear.</string>
+ <key>Persist</key>
+ <integer>1</integer>
+ <key>Type</key>
+ <string>Boolean</string>
+ <key>Value</key>
+ <integer>0</integer>
+ </map>
<key>IMShowTimestamps</key>
<map>
<key>Comment</key>
diff --git a/indra/newview/llappcorehttp.cpp b/indra/newview/llappcorehttp.cpp
index 70dcffefb2..e9274c5c1e 100755
--- a/indra/newview/llappcorehttp.cpp
+++ b/indra/newview/llappcorehttp.cpp
@@ -4,7 +4,7 @@
*
* $LicenseInfo:firstyear=2012&license=viewerlgpl$
* Second Life Viewer Source Code
- * Copyright (C) 2012-2013, Linden Research, Inc.
+ * Copyright (C) 2012-2014, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -40,49 +40,52 @@
// be open at a time.
const F64 LLAppCoreHttp::MAX_THREAD_WAIT_TIME(10.0);
+const long LLAppCoreHttp::PIPELINING_DEPTH(5L);
+
+// Default and dynamic values for classes
static const struct
{
- LLAppCoreHttp::EAppPolicy mPolicy;
U32 mDefault;
U32 mMin;
U32 mMax;
U32 mRate;
+ bool mPipelined;
std::string mKey;
const char * mUsage;
-} init_data[] = // Default and dynamic values for classes
+} init_data[LLAppCoreHttp::AP_COUNT] =
{
- {
- LLAppCoreHttp::AP_DEFAULT, 8, 8, 8, 0,
+ { // AP_DEFAULT
+ 8, 8, 8, 0, false,
"",
"other"
},
- {
- LLAppCoreHttp::AP_TEXTURE, 8, 1, 12, 0,
+ { // AP_TEXTURE
+ 8, 1, 12, 0, true,
"TextureFetchConcurrency",
"texture fetch"
},
- {
- LLAppCoreHttp::AP_MESH1, 32, 1, 128, 100,
+ { // AP_MESH1
+ 32, 1, 128, 0, false,
"MeshMaxConcurrentRequests",
"mesh fetch"
},
- {
- LLAppCoreHttp::AP_MESH2, 8, 1, 32, 100,
+ { // AP_MESH2
+ 8, 1, 32, 0, true,
"Mesh2MaxConcurrentRequests",
"mesh2 fetch"
},
- {
- LLAppCoreHttp::AP_LARGE_MESH, 2, 1, 8, 0,
+ { // AP_LARGE_MESH
+ 2, 1, 8, 0, false,
"",
"large mesh fetch"
},
- {
- LLAppCoreHttp::AP_UPLOADS, 2, 1, 8, 0,
+ { // AP_UPLOADS
+ 2, 1, 8, 0, false,
"",
"asset upload"
},
- {
- LLAppCoreHttp::AP_LONG_POLL, 32, 32, 32, 0,
+ { // AP_LONG_POLL
+ 32, 32, 32, 0, false,
"",
"long poll"
}
@@ -91,18 +94,20 @@ static const struct
static void setting_changed();
+LLAppCoreHttp::HttpClass::HttpClass()
+ : mPolicy(LLCore::HttpRequest::DEFAULT_POLICY_ID),
+ mConnLimit(0U),
+ mPipelined(false)
+{}
+
+
LLAppCoreHttp::LLAppCoreHttp()
: mRequest(NULL),
mStopHandle(LLCORE_HTTP_HANDLE_INVALID),
mStopRequested(0.0),
- mStopped(false)
-{
- for (int i(0); i < LL_ARRAY_SIZE(mPolicies); ++i)
- {
- mPolicies[i] = LLCore::HttpRequest::DEFAULT_POLICY_ID;
- mSettings[i] = 0U;
- }
-}
+ mStopped(false),
+ mPipelined(true)
+{}
LLAppCoreHttp::~LLAppCoreHttp()
@@ -157,27 +162,28 @@ void LLAppCoreHttp::init()
}
// Setup default policy and constrain if directed to
- mPolicies[AP_DEFAULT] = LLCore::HttpRequest::DEFAULT_POLICY_ID;
+ mHttpClasses[AP_DEFAULT].mPolicy = LLCore::HttpRequest::DEFAULT_POLICY_ID;
// Setup additional policies based on table and some special rules
+ llassert(LL_ARRAY_SIZE(init_data) == AP_COUNT);
for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i)
{
- const EAppPolicy policy(init_data[i].mPolicy);
+ const EAppPolicy app_policy(static_cast<EAppPolicy>(i));
- if (AP_DEFAULT == policy)
+ if (AP_DEFAULT == app_policy)
{
// Pre-created
continue;
}
- mPolicies[policy] = LLCore::HttpRequest::createPolicyClass();
- if (! mPolicies[policy])
+ mHttpClasses[app_policy].mPolicy = LLCore::HttpRequest::createPolicyClass();
+ if (! mHttpClasses[app_policy].mPolicy)
{
// Use default policy (but don't accidentally modify default)
LL_WARNS("Init") << "Failed to create HTTP policy class for " << init_data[i].mUsage
<< ". Using default policy."
<< LL_ENDL;
- mPolicies[policy] = mPolicies[AP_DEFAULT];
+ mHttpClasses[app_policy].mPolicy = mHttpClasses[AP_DEFAULT].mPolicy;
continue;
}
}
@@ -196,9 +202,27 @@ void LLAppCoreHttp::init()
<< LL_ENDL;
}
+ // Signal for global pipelining preference from settings
+ static const std::string http_pipelining("HttpPipelining");
+ if (gSavedSettings.controlExists(http_pipelining))
+ {
+ LLPointer<LLControlVariable> cntrl_ptr = gSavedSettings.getControl(http_pipelining);
+ if (cntrl_ptr.isNull())
+ {
+ LL_WARNS("Init") << "Unable to set signal on global setting '" << http_pipelining
+ << "'" << LL_ENDL;
+ }
+ else
+ {
+ mPipelinedSignal = cntrl_ptr->getCommitSignal()->connect(boost::bind(&setting_changed));
+ }
+ }
+
// Register signals for settings and state changes
for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i)
{
+ const EAppPolicy app_policy(static_cast<EAppPolicy>(i));
+
if (! init_data[i].mKey.empty() && gSavedSettings.controlExists(init_data[i].mKey))
{
LLPointer<LLControlVariable> cntrl_ptr = gSavedSettings.getControl(init_data[i].mKey);
@@ -209,7 +233,7 @@ void LLAppCoreHttp::init()
}
else
{
- mSettingsSignal[i] = cntrl_ptr->getCommitSignal()->connect(boost::bind(&setting_changed));
+ mHttpClasses[app_policy].mSettingsSignal = cntrl_ptr->getCommitSignal()->connect(boost::bind(&setting_changed));
}
}
}
@@ -261,10 +285,11 @@ void LLAppCoreHttp::cleanup()
}
}
- for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i)
+ for (int i(0); i < LL_ARRAY_SIZE(mHttpClasses); ++i)
{
- mSettingsSignal[i].disconnect();
+ mHttpClasses[i].mSettingsSignal.disconnect();
}
+ mPipelinedSignal.disconnect();
delete mRequest;
mRequest = NULL;
@@ -278,30 +303,84 @@ void LLAppCoreHttp::cleanup()
}
}
+
void LLAppCoreHttp::refreshSettings(bool initial)
{
LLCore::HttpStatus status;
+
+ // Global pipelining setting
+ bool pipeline_changed(false);
+ static const std::string http_pipelining("HttpPipelining");
+ if (gSavedSettings.controlExists(http_pipelining))
+ {
+ // Default to true (in ctor) if absent.
+ bool pipelined(gSavedSettings.getBOOL(http_pipelining));
+ if (pipelined != mPipelined)
+ {
+ mPipelined = pipelined;
+ pipeline_changed = true;
+ }
+ }
for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i)
{
- const EAppPolicy policy(init_data[i].mPolicy);
+ const EAppPolicy app_policy(static_cast<EAppPolicy>(i));
- // Set any desired throttle
- if (initial && init_data[i].mRate)
+ if (initial)
{
- // Init-time only, can use the static setters here
- status = LLCore::HttpRequest::setStaticPolicyOption(LLCore::HttpRequest::PO_THROTTLE_RATE,
- mPolicies[policy],
- init_data[i].mRate,
- NULL);
- if (! status)
+ // Init-time only settings, can use the static setters here
+
+ if (init_data[i].mRate)
{
- LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage
- << " throttle rate. Reason: " << status.toString()
- << LL_ENDL;
+ // Set any desired throttle
+ status = LLCore::HttpRequest::setStaticPolicyOption(LLCore::HttpRequest::PO_THROTTLE_RATE,
+ mHttpClasses[app_policy].mPolicy,
+ init_data[i].mRate,
+ NULL);
+ if (! status)
+ {
+ LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage
+ << " throttle rate. Reason: " << status.toString()
+ << LL_ENDL;
+ }
}
+
}
+ // Init- or run-time settings. Must use the queued request API.
+
+ // Pipelining changes
+ if (initial || pipeline_changed)
+ {
+ const bool to_pipeline(mPipelined && init_data[i].mPipelined);
+ if (to_pipeline != mHttpClasses[app_policy].mPipelined)
+ {
+ // Pipeline election changing, set dynamic option via request
+
+ LLCore::HttpHandle handle;
+ const long new_depth(to_pipeline ? PIPELINING_DEPTH : 0);
+
+ handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_PIPELINING_DEPTH,
+ mHttpClasses[app_policy].mPolicy,
+ new_depth,
+ NULL);
+ if (LLCORE_HTTP_HANDLE_INVALID == handle)
+ {
+ status = mRequest->getStatus();
+ LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage
+ << " pipelining. Reason: " << status.toString()
+ << LL_ENDL;
+ }
+ else
+ {
+ LL_DEBUGS("Init") << "Changed " << init_data[i].mUsage
+ << " pipelining. New value: " << new_depth
+ << LL_ENDL;
+ mHttpClasses[app_policy].mPipelined = to_pipeline;
+ }
+ }
+ }
+
// Get target connection concurrency value
U32 setting(init_data[i].mDefault);
if (! init_data[i].mKey.empty() && gSavedSettings.controlExists(init_data[i].mKey))
@@ -314,38 +393,61 @@ void LLAppCoreHttp::refreshSettings(bool initial)
}
}
- if (! initial && setting == mSettings[policy])
+ if (initial || setting != mHttpClasses[app_policy].mConnLimit || pipeline_changed)
{
- // Unchanged, try next setting
- continue;
- }
-
- // Set it and report
- // *TODO: These are intended to be per-host limits when we can
- // support that in llcorehttp/libcurl.
- LLCore::HttpHandle handle;
- handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_CONNECTION_LIMIT,
- mPolicies[policy],
- setting, NULL);
- if (LLCORE_HTTP_HANDLE_INVALID == handle)
- {
- status = mRequest->getStatus();
- LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage
- << " concurrency. Reason: " << status.toString()
- << LL_ENDL;
- }
- else
- {
- LL_DEBUGS("Init") << "Changed " << init_data[i].mUsage
- << " concurrency. New value: " << setting
- << LL_ENDL;
- mSettings[policy] = setting;
- if (initial && setting != init_data[i].mDefault)
+ // Set it and report. Strategies depend on pipelining:
+ //
+ // No Pipelining. Llcorehttp manages connections itself based
+ // on the PO_CONNECTION_LIMIT setting. Set both limits to the
+ // same value for logical consistency. In the future, may
+ // hand over connection management to libcurl after the
+ // connection cache has been better vetted.
+ //
+ // Pipelining. Libcurl is allowed to manage connections to a
+ // great degree. Steady state will connection limit based on
+ // the per-host setting. Transitions (region crossings, new
+ // avatars, etc.) can request additional outbound connections
+ // to other servers via 2X total connection limit.
+ //
+ LLCore::HttpHandle handle;
+ handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_CONNECTION_LIMIT,
+ mHttpClasses[app_policy].mPolicy,
+ (mHttpClasses[app_policy].mPipelined ? 2 * setting : setting),
+ NULL);
+ if (LLCORE_HTTP_HANDLE_INVALID == handle)
{
- LL_INFOS("Init") << "Application settings overriding default " << init_data[i].mUsage
- << " concurrency. New value: " << setting
+ status = mRequest->getStatus();
+ LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage
+ << " concurrency. Reason: " << status.toString()
<< LL_ENDL;
}
+ else
+ {
+ handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_PER_HOST_CONNECTION_LIMIT,
+ mHttpClasses[app_policy].mPolicy,
+ setting,
+ NULL);
+ if (LLCORE_HTTP_HANDLE_INVALID == handle)
+ {
+ status = mRequest->getStatus();
+ LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage
+ << " per-host concurrency. Reason: " << status.toString()
+ << LL_ENDL;
+ }
+ else
+ {
+ LL_DEBUGS("Init") << "Changed " << init_data[i].mUsage
+ << " concurrency. New value: " << setting
+ << LL_ENDL;
+ mHttpClasses[app_policy].mConnLimit = setting;
+ if (initial && setting != init_data[i].mDefault)
+ {
+ LL_INFOS("Init") << "Application settings overriding default " << init_data[i].mUsage
+ << " concurrency. New value: " << setting
+ << LL_ENDL;
+ }
+ }
+ }
}
}
}
diff --git a/indra/newview/llappcorehttp.h b/indra/newview/llappcorehttp.h
index 40e3042b84..9ad4eb4b30 100755
--- a/indra/newview/llappcorehttp.h
+++ b/indra/newview/llappcorehttp.h
@@ -4,7 +4,7 @@
*
* $LicenseInfo:firstyear=2012&license=viewerlgpl$
* Second Life Viewer Source Code
- * Copyright (C) 2012-2013, Linden Research, Inc.
+ * Copyright (C) 2012-2014, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -41,6 +41,8 @@
class LLAppCoreHttp : public LLCore::HttpHandler
{
public:
+ static const long PIPELINING_DEPTH;
+
typedef LLCore::HttpRequest::policy_t policy_t;
enum EAppPolicy
@@ -70,7 +72,7 @@ public:
/// Long poll: no
/// Concurrency: high
/// Request rate: high
- /// Pipelined: soon
+ /// Pipelined: yes
AP_TEXTURE,
/// Legacy mesh fetching policy class. Used to
@@ -98,7 +100,7 @@ public:
/// Long poll: no
/// Concurrency: high
/// Request rate: high
- /// Pipelined: soon
+ /// Pipelined: yes
AP_MESH2,
/// Large mesh fetching policy class. Used to
@@ -116,7 +118,7 @@ public:
/// Long poll: no
/// Concurrency: low
/// Request rate: low
- /// Pipelined: soon
+ /// Pipelined: no
AP_LARGE_MESH,
/// Asset upload policy class. Used to store
@@ -180,7 +182,13 @@ public:
// application function.
policy_t getPolicy(EAppPolicy policy) const
{
- return mPolicies[policy];
+ return mHttpClasses[policy].mPolicy;
+ }
+
+ // Return whether a policy is using pipelined operations.
+ bool isPipelined(EAppPolicy policy) const
+ {
+ return mHttpClasses[policy].mPipelined;
}
// Apply initial or new settings from the environment.
@@ -190,13 +198,27 @@ private:
static const F64 MAX_THREAD_WAIT_TIME;
private:
- LLCore::HttpRequest * mRequest; // Request queue to issue shutdowns
+
+ // PODish container for per-class settings and state.
+ struct HttpClass
+ {
+ public:
+ HttpClass();
+
+ public:
+ policy_t mPolicy; // Policy class id for the class
+ U32 mConnLimit;
+ bool mPipelined;
+ boost::signals2::connection mSettingsSignal; // Signal to global setting that affect this class (if any)
+ };
+
+ LLCore::HttpRequest * mRequest; // Request queue to issue shutdowns
LLCore::HttpHandle mStopHandle;
F64 mStopRequested;
bool mStopped;
- policy_t mPolicies[AP_COUNT]; // Policy class id for each connection set
- U32 mSettings[AP_COUNT];
- boost::signals2::connection mSettingsSignal[AP_COUNT]; // Signals to global settings that affect us
+ HttpClass mHttpClasses[AP_COUNT];
+ bool mPipelined; // Global setting
+ boost::signals2::connection mPipelinedSignal; // Signal for 'HttpPipelining' setting
};
diff --git a/indra/newview/llmeshrepository.cpp b/indra/newview/llmeshrepository.cpp
index 8f50555a73..a6707392fe 100755
--- a/indra/newview/llmeshrepository.cpp
+++ b/indra/newview/llmeshrepository.cpp
@@ -1,4 +1,3 @@
-
/**
* @file llmeshrepository.cpp
* @brief Mesh repository implementation.
@@ -338,14 +337,17 @@ static LLFastTimer::DeclareTimer FTM_MESH_FETCH("Mesh Fetch");
LLMeshRepository gMeshRepo;
const S32 MESH_HEADER_SIZE = 4096; // Important: assumption is that headers fit in this space
+
const S32 REQUEST_HIGH_WATER_MIN = 32; // Limits for GetMesh regions
const S32 REQUEST_HIGH_WATER_MAX = 150; // Should remain under 2X throttle
const S32 REQUEST_LOW_WATER_MIN = 16;
const S32 REQUEST_LOW_WATER_MAX = 75;
+
const S32 REQUEST2_HIGH_WATER_MIN = 32; // Limits for GetMesh2 regions
-const S32 REQUEST2_HIGH_WATER_MAX = 80;
+const S32 REQUEST2_HIGH_WATER_MAX = 100;
const S32 REQUEST2_LOW_WATER_MIN = 16;
-const S32 REQUEST2_LOW_WATER_MAX = 40;
+const S32 REQUEST2_LOW_WATER_MAX = 50;
+
const U32 LARGE_MESH_FETCH_THRESHOLD = 1U << 21; // Size at which requests goes to narrow/slow queue
const long SMALL_MESH_XFER_TIMEOUT = 120L; // Seconds to complete xfer, small mesh downloads
const long LARGE_MESH_XFER_TIMEOUT = 600L; // Seconds to complete xfer, large downloads
@@ -518,11 +520,13 @@ class LLMeshHandlerBase : public LLCore::HttpHandler
{
public:
LOG_CLASS(LLMeshHandlerBase);
- LLMeshHandlerBase()
+ LLMeshHandlerBase(U32 offset, U32 requested_bytes)
: LLCore::HttpHandler(),
mMeshParams(),
mProcessed(false),
- mHttpHandle(LLCORE_HTTP_HANDLE_INVALID)
+ mHttpHandle(LLCORE_HTTP_HANDLE_INVALID),
+ mOffset(offset),
+ mRequestedBytes(requested_bytes)
{}
virtual ~LLMeshHandlerBase()
@@ -534,13 +538,15 @@ protected:
public:
virtual void onCompleted(LLCore::HttpHandle handle, LLCore::HttpResponse * response);
- virtual void processData(LLCore::BufferArray * body, U8 * data, S32 data_size) = 0;
+ virtual void processData(LLCore::BufferArray * body, S32 body_offset, U8 * data, S32 data_size) = 0;
virtual void processFailure(LLCore::HttpStatus status) = 0;
public:
LLVolumeParams mMeshParams;
bool mProcessed;
- LLCore::HttpHandle mHttpHandle;
+ LLCore::HttpHandle mHttpHandle;
+ U32 mOffset;
+ U32 mRequestedBytes;
};
@@ -551,8 +557,8 @@ class LLMeshHeaderHandler : public LLMeshHandlerBase
{
public:
LOG_CLASS(LLMeshHeaderHandler);
- LLMeshHeaderHandler(const LLVolumeParams & mesh_params)
- : LLMeshHandlerBase()
+ LLMeshHeaderHandler(const LLVolumeParams & mesh_params, U32 offset, U32 requested_bytes)
+ : LLMeshHandlerBase(offset, requested_bytes)
{
mMeshParams = mesh_params;
LLMeshRepoThread::incActiveHeaderRequests();
@@ -564,7 +570,7 @@ protected:
void operator=(const LLMeshHeaderHandler &); // Not defined
public:
- virtual void processData(LLCore::BufferArray * body, U8 * data, S32 data_size);
+ virtual void processData(LLCore::BufferArray * body, S32 body_offset, U8 * data, S32 data_size);
virtual void processFailure(LLCore::HttpStatus status);
};
@@ -573,17 +579,16 @@ public:
//
// Thread: repo
class LLMeshLODHandler : public LLMeshHandlerBase
- {
+{
public:
+ LOG_CLASS(LLMeshLODHandler);
LLMeshLODHandler(const LLVolumeParams & mesh_params, S32 lod, U32 offset, U32 requested_bytes)
- : LLMeshHandlerBase(),
- mLOD(lod),
- mRequestedBytes(requested_bytes),
- mOffset(offset)
+ : LLMeshHandlerBase(offset, requested_bytes),
+ mLOD(lod)
{
- mMeshParams = mesh_params;
- LLMeshRepoThread::incActiveLODRequests();
- }
+ mMeshParams = mesh_params;
+ LLMeshRepoThread::incActiveLODRequests();
+ }
virtual ~LLMeshLODHandler();
protected:
@@ -591,13 +596,11 @@ protected:
void operator=(const LLMeshLODHandler &); // Not defined
public:
- virtual void processData(LLCore::BufferArray * body, U8 * data, S32 data_size);
+ virtual void processData(LLCore::BufferArray * body, S32 body_offset, U8 * data, S32 data_size);
virtual void processFailure(LLCore::HttpStatus status);
public:
S32 mLOD;
- U32 mRequestedBytes;
- U32 mOffset;
};
@@ -605,14 +608,12 @@ public:
//
// Thread: repo
class LLMeshSkinInfoHandler : public LLMeshHandlerBase
- {
+{
public:
LOG_CLASS(LLMeshSkinInfoHandler);
- LLMeshSkinInfoHandler(const LLUUID& id, U32 offset, U32 size)
- : LLMeshHandlerBase(),
- mMeshID(id),
- mRequestedBytes(size),
- mOffset(offset)
+ LLMeshSkinInfoHandler(const LLUUID& id, U32 offset, U32 requested_bytes)
+ : LLMeshHandlerBase(offset, requested_bytes),
+ mMeshID(id)
{}
virtual ~LLMeshSkinInfoHandler();
@@ -621,13 +622,11 @@ protected:
void operator=(const LLMeshSkinInfoHandler &); // Not defined
public:
- virtual void processData(LLCore::BufferArray * body, U8 * data, S32 data_size);
+ virtual void processData(LLCore::BufferArray * body, S32 body_offset, U8 * data, S32 data_size);
virtual void processFailure(LLCore::HttpStatus status);
public:
LLUUID mMeshID;
- U32 mRequestedBytes;
- U32 mOffset;
};
@@ -635,14 +634,12 @@ public:
//
// Thread: repo
class LLMeshDecompositionHandler : public LLMeshHandlerBase
- {
+{
public:
LOG_CLASS(LLMeshDecompositionHandler);
- LLMeshDecompositionHandler(const LLUUID& id, U32 offset, U32 size)
- : LLMeshHandlerBase(),
- mMeshID(id),
- mRequestedBytes(size),
- mOffset(offset)
+ LLMeshDecompositionHandler(const LLUUID& id, U32 offset, U32 requested_bytes)
+ : LLMeshHandlerBase(offset, requested_bytes),
+ mMeshID(id)
{}
virtual ~LLMeshDecompositionHandler();
@@ -651,13 +648,11 @@ protected:
void operator=(const LLMeshDecompositionHandler &); // Not defined
public:
- virtual void processData(LLCore::BufferArray * body, U8 * data, S32 data_size);
+ virtual void processData(LLCore::BufferArray * body, S32 body_offset, U8 * data, S32 data_size);
virtual void processFailure(LLCore::HttpStatus status);
public:
LLUUID mMeshID;
- U32 mRequestedBytes;
- U32 mOffset;
};
@@ -665,14 +660,12 @@ public:
//
// Thread: repo
class LLMeshPhysicsShapeHandler : public LLMeshHandlerBase
- {
+{
public:
LOG_CLASS(LLMeshPhysicsShapeHandler);
- LLMeshPhysicsShapeHandler(const LLUUID& id, U32 offset, U32 size)
- : LLMeshHandlerBase(),
- mMeshID(id),
- mRequestedBytes(size),
- mOffset(offset)
+ LLMeshPhysicsShapeHandler(const LLUUID& id, U32 offset, U32 requested_bytes)
+ : LLMeshHandlerBase(offset, requested_bytes),
+ mMeshID(id)
{}
virtual ~LLMeshPhysicsShapeHandler();
@@ -681,13 +674,11 @@ protected:
void operator=(const LLMeshPhysicsShapeHandler &); // Not defined
public:
- virtual void processData(LLCore::BufferArray * body, U8 * data, S32 data_size);
+ virtual void processData(LLCore::BufferArray * body, S32 body_offset, U8 * data, S32 data_size);
virtual void processFailure(LLCore::HttpStatus status);
public:
LLUUID mMeshID;
- U32 mRequestedBytes;
- U32 mOffset;
};
@@ -713,8 +704,8 @@ void log_upload_error(LLCore::HttpStatus status, const LLSD& content,
LL_WARNS(LOG_MESH) << "error: " << err << LL_ENDL;
LL_WARNS(LOG_MESH) << " mesh upload failed, stage '" << stage
<< "', error '" << err["error"].asString()
- << "', message '" << err["message"].asString()
- << "', id '" << err["identifier"].asString()
+ << "', message '" << err["message"].asString()
+ << "', id '" << err["identifier"].asString()
<< "'" << LL_ENDL;
if (err.has("errors"))
{
@@ -754,7 +745,9 @@ LLMeshRepoThread::LLMeshRepoThread()
mHttpLargePolicyClass(LLCore::HttpRequest::DEFAULT_POLICY_ID),
mHttpPriority(0),
mGetMeshVersion(2)
- {
+{
+ LLAppCoreHttp & app_core_http(LLAppViewer::instance()->getAppCoreHttp());
+
mMutex = new LLMutex(NULL);
mHeaderMutex = new LLMutex(NULL);
mSignal = new LLCondition(NULL);
@@ -767,14 +760,14 @@ LLMeshRepoThread::LLMeshRepoThread()
mHttpLargeOptions->setUseRetryAfter(gSavedSettings.getBOOL("MeshUseHttpRetryAfter"));
mHttpHeaders = new LLCore::HttpHeaders;
mHttpHeaders->append("Accept", "application/vnd.ll.mesh");
- mHttpPolicyClass = LLAppViewer::instance()->getAppCoreHttp().getPolicy(LLAppCoreHttp::AP_MESH2);
- mHttpLegacyPolicyClass = LLAppViewer::instance()->getAppCoreHttp().getPolicy(LLAppCoreHttp::AP_MESH1);
- mHttpLargePolicyClass = LLAppViewer::instance()->getAppCoreHttp().getPolicy(LLAppCoreHttp::AP_LARGE_MESH);
- }
+ mHttpPolicyClass = app_core_http.getPolicy(LLAppCoreHttp::AP_MESH2);
+ mHttpLegacyPolicyClass = app_core_http.getPolicy(LLAppCoreHttp::AP_MESH1);
+ mHttpLargePolicyClass = app_core_http.getPolicy(LLAppCoreHttp::AP_LARGE_MESH);
+}
LLMeshRepoThread::~LLMeshRepoThread()
- {
+{
LL_INFOS(LOG_MESH) << "Small GETs issued: " << LLMeshRepository::sHTTPRequestCount
<< ", Large GETs issued: " << LLMeshRepository::sHTTPLargeRequestCount
<< ", Max Lock Holdoffs: " << LLMeshRepository::sMaxLockHoldoffs
@@ -785,23 +778,23 @@ LLMeshRepoThread::~LLMeshRepoThread()
++iter)
{
delete *iter;
- }
+ }
mHttpRequestSet.clear();
if (mHttpHeaders)
- {
+ {
mHttpHeaders->release();
mHttpHeaders = NULL;
- }
+ }
if (mHttpOptions)
- {
+ {
mHttpOptions->release();
mHttpOptions = NULL;
- }
+ }
if (mHttpLargeOptions)
-{
+ {
mHttpLargeOptions->release();
mHttpLargeOptions = NULL;
-}
+ }
delete mHttpRequest;
mHttpRequest = NULL;
delete mMutex;
@@ -846,48 +839,49 @@ void LLMeshRepoThread::run()
{
// Dispatch all HttpHandler notifications
mHttpRequest->update(0L);
- }
+ }
sRequestWaterLevel = mHttpRequestSet.size(); // Stats data update
// NOTE: order of queue processing intentionally favors LOD requests over header requests
while (!mLODReqQ.empty() && mHttpRequestSet.size() < sRequestHighWater)
- {
+ {
if (! mMutex)
- {
+ {
break;
}
- mMutex->lock();
- LODRequest req = mLODReqQ.front();
- mLODReqQ.pop();
- LLMeshRepository::sLODProcessing--;
- mMutex->unlock();
+ mMutex->lock();
+ LODRequest req = mLODReqQ.front();
+ mLODReqQ.pop();
+ LLMeshRepository::sLODProcessing--;
+ mMutex->unlock();
+
if (!fetchMeshLOD(req.mMeshParams, req.mLOD)) // failed, resubmit
- {
- mMutex->lock();
- mLODReqQ.push(req);
+ {
+ mMutex->lock();
+ mLODReqQ.push(req);
++LLMeshRepository::sLODProcessing;
- mMutex->unlock();
- }
- }
+ mMutex->unlock();
+ }
+ }
while (!mHeaderReqQ.empty() && mHttpRequestSet.size() < sRequestHighWater)
- {
+ {
if (! mMutex)
- {
+ {
break;
}
- mMutex->lock();
- HeaderRequest req = mHeaderReqQ.front();
- mHeaderReqQ.pop();
- mMutex->unlock();
+ mMutex->lock();
+ HeaderRequest req = mHeaderReqQ.front();
+ mHeaderReqQ.pop();
+ mMutex->unlock();
if (!fetchMeshHeader(req.mMeshParams))//failed, resubmit
- {
- mMutex->lock();
- mHeaderReqQ.push(req) ;
- mMutex->unlock();
- }
- }
+ {
+ mMutex->lock();
+ mHeaderReqQ.push(req) ;
+ mMutex->unlock();
+ }
+ }
// For the final three request lists, similar goal to above but
// slightly different queue structures. Stay off the mutex when
@@ -983,7 +977,7 @@ void LLMeshRepoThread::run()
}
}
mMutex->unlock();
- }
+ }
// For dev purposes only. A dynamic change could make this false
// and that shouldn't assert.
@@ -1131,6 +1125,9 @@ LLCore::HttpHandle LLMeshRepoThread::getByteRange(const std::string & url, int c
size_t offset, size_t len,
LLCore::HttpHandler * handler)
{
+ // Also used in lltexturefetch.cpp
+ static LLCachedControl<bool> disable_range_req(gSavedSettings, "HttpRangeRequestsDisable", false);
+
LLCore::HttpHandle handle(LLCORE_HTTP_HANDLE_INVALID);
if (len < LARGE_MESH_FETCH_THRESHOLD)
@@ -1140,8 +1137,8 @@ LLCore::HttpHandle LLMeshRepoThread::getByteRange(const std::string & url, int c
: mHttpLegacyPolicyClass),
mHttpPriority,
url,
- offset,
- len,
+ (disable_range_req ? size_t(0) : offset),
+ (disable_range_req ? size_t(0) : len),
mHttpOptions,
mHttpHeaders,
handler);
@@ -1155,8 +1152,8 @@ LLCore::HttpHandle LLMeshRepoThread::getByteRange(const std::string & url, int c
handle = mHttpRequest->requestGetByteRange(mHttpLargePolicyClass,
mHttpPriority,
url,
- offset,
- len,
+ (disable_range_req ? size_t(0) : offset),
+ (disable_range_req ? size_t(0) : len),
mHttpLargeOptions,
mHttpHeaders,
handler);
@@ -1250,7 +1247,6 @@ bool LLMeshRepoThread::fetchMeshSkinInfo(const LLUUID& mesh_id)
<< LL_ENDL;
delete handler;
ret = false;
-
}
else
{
@@ -1527,7 +1523,7 @@ bool LLMeshRepoThread::fetchMeshHeader(const LLVolumeParams& mesh_params)
//within the first 4KB
//NOTE -- this will break of headers ever exceed 4KB
- LLMeshHeaderHandler * handler = new LLMeshHeaderHandler(mesh_params);
+ LLMeshHeaderHandler * handler = new LLMeshHeaderHandler(mesh_params, 0, MESH_HEADER_SIZE);
LLCore::HttpHandle handle = getByteRange(http_url, cap_version, 0, MESH_HEADER_SIZE, handler);
if (LLCORE_HTTP_HANDLE_INVALID == handle)
{
@@ -1860,7 +1856,7 @@ LLMeshUploadThread::LLMeshUploadThread(LLMeshUploadThread::instance_list& data,
bool upload_skin, bool upload_joints, const std::string & upload_url, bool do_upload,
LLHandle<LLWholeModelFeeObserver> fee_observer,
LLHandle<LLWholeModelUploadObserver> upload_observer)
-: LLThread("mesh upload"),
+ : LLThread("mesh upload"),
LLCore::HttpHandler(),
mDiscarded(false),
mDoUpload(do_upload),
@@ -2271,7 +2267,7 @@ void LLMeshUploadThread::doWholeModelUpload()
mHttpRequest->update(0);
while (! LLApp::isQuitting() && ! finished() && ! isDiscarded())
- {
+ {
ms_sleep(sleep_time);
sleep_time = llmin(250U, sleep_time + sleep_time);
mHttpRequest->update(0);
@@ -2287,7 +2283,7 @@ void LLMeshUploadThread::doWholeModelUpload()
}
}
}
- }
+}
void LLMeshUploadThread::requestWholeModelFee()
{
@@ -2318,7 +2314,7 @@ void LLMeshUploadThread::requestWholeModelFee()
LL_WARNS(LOG_MESH) << "Couldn't issue request for model fee. Reason: " << mHttpStatus.toString()
<< " (" << mHttpStatus.toTerseString() << ")"
<< LL_ENDL;
- }
+ }
else
{
U32 sleep_time(10);
@@ -2335,7 +2331,7 @@ void LLMeshUploadThread::requestWholeModelFee()
LL_DEBUGS(LOG_MESH) << "Mesh fee query operation discarded." << LL_ENDL;
}
}
- }
+}
// Does completion duty for both fee queries and actual uploads.
@@ -2388,12 +2384,12 @@ void LLMeshUploadThread::onCompleted(LLCore::HttpHandle handle, LLCore::HttpResp
{
LLCore::BufferArrayStream bas(ba);
LLSDSerialize::fromXML(body, bas);
-}
+ }
}
dump_llsd_to_file(body, make_dump_name("whole_model_upload_response_", dump_num));
if (body["state"].asString() == "complete")
-{
+ {
// requested "mesh" asset type isn't actually the type
// of the resultant object, fix it up here.
mModelData["asset_type"] = "object";
@@ -2446,18 +2442,18 @@ void LLMeshUploadThread::onCompleted(LLCore::HttpHandle handle, LLCore::HttpResp
body = llsd_from_file("fake_upload_error.xml");
}
else
- {
+ {
LLCore::BufferArray * ba(response->getBody());
if (ba && ba->size())
- {
+ {
LLCore::BufferArrayStream bas(ba);
LLSDSerialize::fromXML(body, bas);
- }
- }
+ }
+ }
dump_llsd_to_file(body, make_dump_name("whole_model_fee_response_", dump_num));
if (body["state"].asString() == "upload")
- {
+ {
mWholeModelUploadURL = body["uploader"].asString();
if (observer)
@@ -2543,18 +2539,18 @@ void LLMeshRepoThread::notifyLoadedMeshes()
skin_info_q.swap(mSkinInfoQ);
}
if (! mDecompositionQ.empty())
- {
+ {
decomp_q.swap(mDecompositionQ);
- }
+ }
mMutex->unlock();
// Process the elements free of the lock
while (! skin_info_q.empty())
- {
+ {
gMeshRepo.notifySkinInfoReceived(skin_info_q.front());
skin_info_q.pop_front();
- }
+ }
while (! decomp_q.empty())
{
@@ -2648,6 +2644,17 @@ void LLMeshRepository::cacheOutgoingMesh(LLMeshUploadData& data, LLSD& header)
}
+// Handle failed or successful requests for mesh assets.
+//
+// Support for 200 responses was added for several reasons. One,
+// a service or cache can ignore range headers and give us a
+// 200 with full asset should it elect to. We also support
+// a debug flag which disables range requests for those very
+// few users that have some sort of problem with their networking
+// services. But the 200 response handling is suboptimal: rather
+// than cache the whole asset, we just extract the part that would
+// have been sent in a 206 and process that. Inefficient but these
+// are cases far off the norm.
void LLMeshHandlerBase::onCompleted(LLCore::HttpHandle handle, LLCore::HttpResponse * response)
{
mProcessed = true;
@@ -2676,35 +2683,78 @@ void LLMeshHandlerBase::onCompleted(LLCore::HttpHandle handle, LLCore::HttpRespo
// rather than partial) and 416 (request completely unsatisfyable).
// Always been exposed to these but are less likely here where
// speculative loads aren't done.
- static const LLCore::HttpStatus par_status(HTTP_PARTIAL_CONTENT);
+ LLCore::BufferArray * body(response->getBody());
+ S32 body_offset(0);
+ U8 * data(NULL);
+ S32 data_size(body ? body->size() : 0);
- if (par_status != status)
+ if (data_size > 0)
{
- LL_WARNS_ONCE(LOG_MESH) << "Non-206 successful status received for fetch: "
- << status.toTerseString() << LL_ENDL;
- }
+ static const LLCore::HttpStatus par_status(HTTP_PARTIAL_CONTENT);
+
+ unsigned int offset(0), length(0), full_length(0);
+
+ if (par_status == status)
+ {
+ // 206 case
+ response->getRange(&offset, &length, &full_length);
+ if (! offset && ! length)
+ {
+ // This is the case where we receive a 206 status but
+ // there wasn't a useful Content-Range header in the response.
+ // This could be because it was badly formatted but is more
+ // likely due to capabilities services which scrub headers
+ // from responses. Assume we got what we asked for...`
+ // length = data_size;
+ offset = mOffset;
+ }
+ }
+ else
+ {
+ // 200 case, typically
+ offset = 0;
+ }
- LLCore::BufferArray * body(response->getBody());
- S32 data_size(body ? body->size() : 0);
- U8 * data(NULL);
+ // *DEBUG: To test validation below
+ // offset += 1;
- if (data_size > 0)
- {
+ // Validate that what we think we received is consistent with
+ // what we've asked for. I.e. first byte we wanted lies somewhere
+ // in the response.
+ if (offset > mOffset
+ || (offset + data_size) <= mOffset
+ || (mOffset - offset) >= data_size)
+ {
+ // No overlap with requested range. Fail request with
+ // suitable error. Shouldn't happen unless server/cache/ISP
+ // is doing something awful.
+ LL_WARNS(LOG_MESH) << "Mesh response (bytes ["
+ << offset << ".." << (offset + length - 1)
+ << "]) didn't overlap with request's origin (bytes ["
+ << mOffset << ".." << (mOffset + mRequestedBytes - 1)
+ << "])." << LL_ENDL;
+ processFailure(LLCore::HttpStatus(LLCore::HttpStatus::LLCORE, LLCore::HE_INV_CONTENT_RANGE_HDR));
+ ++LLMeshRepository::sHTTPErrorCount;
+ goto common_exit;
+ }
+
// *TODO: Try to get rid of data copying and add interfaces
// that support BufferArray directly. Introduce a two-phase
// handler, optional first that takes a body, fallback second
// that requires a temporary allocation and data copy.
- data = new U8[data_size];
- body->read(0, (char *) data, data_size);
+ body_offset = mOffset - offset;
+ data = new U8[data_size - body_offset];
+ body->read(body_offset, (char *) data, data_size - body_offset);
LLMeshRepository::sBytesReceived += data_size;
}
- processData(body, data, data_size);
+ processData(body, body_offset, data, data_size - body_offset);
delete [] data;
}
// Release handler
+common_exit:
gMeshRepo.mThread->mHttpRequestSet.erase(this);
delete this; // Must be last statement
}
@@ -2739,9 +2789,10 @@ void LLMeshHeaderHandler::processFailure(LLCore::HttpStatus status)
{
gMeshRepo.mThread->mUnavailableQ.push(LLMeshRepoThread::LODRequest(mMeshParams, i));
}
- }
+}
-void LLMeshHeaderHandler::processData(LLCore::BufferArray * body, U8 * data, S32 data_size)
+void LLMeshHeaderHandler::processData(LLCore::BufferArray * /* body */, S32 /* body_offset */,
+ U8 * data, S32 data_size)
{
LLUUID mesh_id = mMeshParams.getSculptID();
bool success = (! MESH_HEADER_PROCESS_FAILED) && gMeshRepo.mThread->headerReceived(mMeshParams, data, data_size);
@@ -2756,12 +2807,12 @@ void LLMeshHeaderHandler::processData(LLCore::BufferArray * body, U8 * data, S32
// Can't get the header so none of the LODs will be available
LLMutexLock lock(gMeshRepo.mThread->mMutex);
for (int i(0); i < 4; ++i)
- {
+ {
gMeshRepo.mThread->mUnavailableQ.push(LLMeshRepoThread::LODRequest(mMeshParams, i));
- }
}
+ }
else if (data && data_size > 0)
- {
+ {
// header was successfully retrieved from sim, cache in vfs
LLSD header = gMeshRepo.mThread->mMeshHeader[mesh_id];
@@ -2774,11 +2825,11 @@ void LLMeshHeaderHandler::processData(LLCore::BufferArray * body, U8 * data, S32
S32 lod_bytes = 0;
for (U32 i = 0; i < LLModel::LOD_PHYSICS; ++i)
- {
+ {
// figure out how many bytes we'll need to reserve in the file
const std::string & lod_name = header_lod[i];
lod_bytes = llmax(lod_bytes, header[lod_name]["offset"].asInteger()+header[lod_name]["size"].asInteger());
- }
+ }
// just in case skin info or decomposition is at the end of the file (which it shouldn't be)
lod_bytes = llmax(lod_bytes, header["skin"]["offset"].asInteger() + header["skin"]["size"].asInteger());
@@ -2794,7 +2845,7 @@ void LLMeshHeaderHandler::processData(LLCore::BufferArray * body, U8 * data, S32
LLVFile file(gVFS, mesh_id, LLAssetType::AT_MESH, LLVFile::WRITE);
if (file.getMaxSize() >= bytes || file.setMaxSize(bytes))
- {
+ {
LLMeshRepository::sCacheBytesWritten += data_size;
++LLMeshRepository::sCacheWrites;
@@ -2805,19 +2856,19 @@ void LLMeshHeaderHandler::processData(LLCore::BufferArray * body, U8 * data, S32
memset(block, 0, sizeof(block));
while (bytes-file.tell() > sizeof(block))
- {
+ {
file.write(block, sizeof(block));
- }
+ }
S32 remaining = bytes-file.tell();
if (remaining > 0)
- {
+ {
file.write(block, remaining);
}
}
}
}
- }
+}
LLMeshLODHandler::~LLMeshLODHandler()
{
@@ -2843,8 +2894,9 @@ void LLMeshLODHandler::processFailure(LLCore::HttpStatus status)
gMeshRepo.mThread->mUnavailableQ.push(LLMeshRepoThread::LODRequest(mMeshParams, mLOD));
}
-void LLMeshLODHandler::processData(LLCore::BufferArray * body, U8 * data, S32 data_size)
- {
+void LLMeshLODHandler::processData(LLCore::BufferArray * /* body */, S32 /* body_offset */,
+ U8 * data, S32 data_size)
+{
if ((! MESH_LOD_PROCESS_FAILED) && gMeshRepo.mThread->lodReceived(mMeshParams, mLOD, data, data_size))
{
//good fetch from sim, write to VFS for caching
@@ -2860,7 +2912,7 @@ void LLMeshLODHandler::processData(LLCore::BufferArray * body, U8 * data, S32 da
LLMeshRepository::sCacheBytesWritten += size;
++LLMeshRepository::sCacheWrites;
}
- }
+ }
else
{
LL_WARNS(LOG_MESH) << "Error during mesh LOD processing. ID: " << mMeshParams.getSculptID()
@@ -2872,12 +2924,12 @@ void LLMeshLODHandler::processData(LLCore::BufferArray * body, U8 * data, S32 da
}
LLMeshSkinInfoHandler::~LLMeshSkinInfoHandler()
- {
- llassert(mProcessed);
- }
+{
+ llassert(mProcessed);
+}
void LLMeshSkinInfoHandler::processFailure(LLCore::HttpStatus status)
- {
+{
LL_WARNS(LOG_MESH) << "Error during mesh skin info handling. ID: " << mMeshID
<< ", Reason: " << status.toString()
<< " (" << status.toTerseString() << "). Not retrying."
@@ -2885,10 +2937,11 @@ void LLMeshSkinInfoHandler::processFailure(LLCore::HttpStatus status)
// *TODO: Mark mesh unavailable on error. For now, simply leave
// request unfulfilled rather than retry forever.
- }
+}
-void LLMeshSkinInfoHandler::processData(LLCore::BufferArray * body, U8 * data, S32 data_size)
- {
+void LLMeshSkinInfoHandler::processData(LLCore::BufferArray * /* body */, S32 /* body_offset */,
+ U8 * data, S32 data_size)
+{
if ((! MESH_SKIN_INFO_PROCESS_FAILED) && gMeshRepo.mThread->skinInfoReceived(mMeshID, data, data_size))
{
//good fetch from sim, write to VFS for caching
@@ -2916,20 +2969,21 @@ void LLMeshSkinInfoHandler::processData(LLCore::BufferArray * body, U8 * data, S
LLMeshDecompositionHandler::~LLMeshDecompositionHandler()
{
- llassert(mProcessed);
+ llassert(mProcessed);
}
void LLMeshDecompositionHandler::processFailure(LLCore::HttpStatus status)
- {
+{
LL_WARNS(LOG_MESH) << "Error during mesh decomposition handling. ID: " << mMeshID
<< ", Reason: " << status.toString()
<< " (" << status.toTerseString() << "). Not retrying."
<< LL_ENDL;
// *TODO: Mark mesh unavailable on error. For now, simply leave
// request unfulfilled rather than retry forever.
- }
+}
-void LLMeshDecompositionHandler::processData(LLCore::BufferArray * body, U8 * data, S32 data_size)
+void LLMeshDecompositionHandler::processData(LLCore::BufferArray * /* body */, S32 /* body_offset */,
+ U8 * data, S32 data_size)
{
if ((! MESH_DECOMP_PROCESS_FAILED) && gMeshRepo.mThread->decompositionReceived(mMeshID, data, data_size))
{
@@ -2946,34 +3000,35 @@ void LLMeshDecompositionHandler::processData(LLCore::BufferArray * body, U8 * da
file.seek(offset);
file.write(data, size);
}
- }
- else
- {
+ }
+ else
+ {
LL_WARNS(LOG_MESH) << "Error during mesh decomposition processing. ID: " << mMeshID
<< ", Unknown reason. Not retrying."
<< LL_ENDL;
// *TODO: Mark mesh unavailable on error
- }
}
+}
LLMeshPhysicsShapeHandler::~LLMeshPhysicsShapeHandler()
- {
- llassert(mProcessed);
- }
+{
+ llassert(mProcessed);
+}
void LLMeshPhysicsShapeHandler::processFailure(LLCore::HttpStatus status)
- {
+{
LL_WARNS(LOG_MESH) << "Error during mesh physics shape handling. ID: " << mMeshID
<< ", Reason: " << status.toString()
<< " (" << status.toTerseString() << "). Not retrying."
<< LL_ENDL;
// *TODO: Mark mesh unavailable on error
- }
+}
-void LLMeshPhysicsShapeHandler::processData(LLCore::BufferArray * body, U8 * data, S32 data_size)
- {
+void LLMeshPhysicsShapeHandler::processData(LLCore::BufferArray * /* body */, S32 /* body_offset */,
+ U8 * data, S32 data_size)
+{
if ((! MESH_PHYS_SHAPE_PROCESS_FAILED) && gMeshRepo.mThread->physicsShapeReceived(mMeshID, data, data_size))
- {
+ {
// good fetch from sim, write to VFS for caching
LLVFile file(gVFS, mMeshID, LLAssetType::AT_MESH, LLVFile::WRITE);
@@ -2981,13 +3036,13 @@ void LLMeshPhysicsShapeHandler::processData(LLCore::BufferArray * body, U8 * dat
S32 size = mRequestedBytes;
if (file.getSize() >= offset+size)
- {
+ {
LLMeshRepository::sCacheBytesWritten += size;
++LLMeshRepository::sCacheWrites;
file.seek(offset);
file.write(data, size);
- }
}
+ }
else
{
LL_WARNS(LOG_MESH) << "Error during mesh physics shape processing. ID: " << mMeshID
@@ -3187,7 +3242,7 @@ void LLMeshRepository::notifyLoadedMeshes()
if (1 == mGetMeshVersion)
{
// Legacy GetMesh operation with high connection concurrency
- LLMeshRepoThread::sMaxConcurrentRequests = gSavedSettings.getU32("MeshMaxConcurrentRequests");
+ LLMeshRepoThread::sMaxConcurrentRequests = gSavedSettings.getU32("MeshMaxConcurrentRequests");
LLMeshRepoThread::sRequestHighWater = llclamp(2 * S32(LLMeshRepoThread::sMaxConcurrentRequests),
REQUEST_HIGH_WATER_MIN,
REQUEST_HIGH_WATER_MAX);
@@ -3198,9 +3253,15 @@ void LLMeshRepository::notifyLoadedMeshes()
else
{
// GetMesh2 operation with keepalives, etc. With pipelining,
- // we'll increase this.
+ // we'll increase this. See llappcorehttp and llcorehttp for
+ // discussion on connection strategies.
+ LLAppCoreHttp & app_core_http(LLAppViewer::instance()->getAppCoreHttp());
+ S32 scale(app_core_http.isPipelined(LLAppCoreHttp::AP_MESH2)
+ ? (2 * LLAppCoreHttp::PIPELINING_DEPTH)
+ : 5);
+
LLMeshRepoThread::sMaxConcurrentRequests = gSavedSettings.getU32("Mesh2MaxConcurrentRequests");
- LLMeshRepoThread::sRequestHighWater = llclamp(5 * S32(LLMeshRepoThread::sMaxConcurrentRequests),
+ LLMeshRepoThread::sRequestHighWater = llclamp(scale * S32(LLMeshRepoThread::sMaxConcurrentRequests),
REQUEST2_HIGH_WATER_MIN,
REQUEST2_HIGH_WATER_MAX);
LLMeshRepoThread::sRequestLowWater = llclamp(LLMeshRepoThread::sRequestHighWater / 2,
@@ -3300,18 +3361,18 @@ void LLMeshRepository::notifyLoadedMeshes()
// If we can't get the locks, skip and pick this up later.
++hold_offs;
sMaxLockHoldoffs = llmax(sMaxLockHoldoffs, hold_offs);
- return;
- }
+ return;
+ }
hold_offs = 0;
if (gAgent.getRegion())
{
// Update capability urls
- static std::string region_name("never name a region this");
+ static std::string region_name("never name a region this");
- if (gAgent.getRegion()->getName() != region_name && gAgent.getRegion()->capabilitiesReceived())
- {
- region_name = gAgent.getRegion()->getName();
+ if (gAgent.getRegion()->getName() != region_name && gAgent.getRegion()->capabilitiesReceived())
+ {
+ region_name = gAgent.getRegion()->getName();
const bool use_v1(gSavedSettings.getBOOL("MeshUseGetMesh1"));
const std::string mesh1(gAgent.getRegion()->getCapability("GetMesh"));
const std::string mesh2(gAgent.getRegion()->getCapability("GetMesh2"));
@@ -3322,8 +3383,8 @@ void LLMeshRepository::notifyLoadedMeshes()
<< ", GetMesh: " << mesh1
<< ", using version: " << mGetMeshVersion
<< LL_ENDL;
+ }
}
- }
//popup queued error messages from background threads
while (!mUploadErrorQ.empty())
@@ -3338,46 +3399,46 @@ void LLMeshRepository::notifyLoadedMeshes()
S32 push_count = LLMeshRepoThread::sRequestHighWater - active_count;
if (mPendingRequests.size() > push_count)
- {
+ {
// More requests than the high-water limit allows so
// sort and forward the most important.
- //calculate "score" for pending requests
+ //calculate "score" for pending requests
- //create score map
- std::map<LLUUID, F32> score_map;
+ //create score map
+ std::map<LLUUID, F32> score_map;
- for (U32 i = 0; i < 4; ++i)
- {
- for (mesh_load_map::iterator iter = mLoadingMeshes[i].begin(); iter != mLoadingMeshes[i].end(); ++iter)
+ for (U32 i = 0; i < 4; ++i)
{
- F32 max_score = 0.f;
- for (std::set<LLUUID>::iterator obj_iter = iter->second.begin(); obj_iter != iter->second.end(); ++obj_iter)
+ for (mesh_load_map::iterator iter = mLoadingMeshes[i].begin(); iter != mLoadingMeshes[i].end(); ++iter)
{
- LLViewerObject* object = gObjectList.findObject(*obj_iter);
-
- if (object)
+ F32 max_score = 0.f;
+ for (std::set<LLUUID>::iterator obj_iter = iter->second.begin(); obj_iter != iter->second.end(); ++obj_iter)
{
- LLDrawable* drawable = object->mDrawable;
- if (drawable)
+ LLViewerObject* object = gObjectList.findObject(*obj_iter);
+
+ if (object)
{
- F32 cur_score = drawable->getRadius()/llmax(drawable->mDistanceWRTCamera, 1.f);
- max_score = llmax(max_score, cur_score);
+ LLDrawable* drawable = object->mDrawable;
+ if (drawable)
+ {
+ F32 cur_score = drawable->getRadius()/llmax(drawable->mDistanceWRTCamera, 1.f);
+ max_score = llmax(max_score, cur_score);
+ }
}
}
- }
- score_map[iter->first.getSculptID()] = max_score;
+ score_map[iter->first.getSculptID()] = max_score;
+ }
}
- }
- //set "score" for pending requests
- for (std::vector<LLMeshRepoThread::LODRequest>::iterator iter = mPendingRequests.begin(); iter != mPendingRequests.end(); ++iter)
- {
- iter->mScore = score_map[iter->mMeshParams.getSculptID()];
- }
+ //set "score" for pending requests
+ for (std::vector<LLMeshRepoThread::LODRequest>::iterator iter = mPendingRequests.begin(); iter != mPendingRequests.end(); ++iter)
+ {
+ iter->mScore = score_map[iter->mMeshParams.getSculptID()];
+ }
- //sort by "score"
+ //sort by "score"
std::partial_sort(mPendingRequests.begin(), mPendingRequests.begin() + push_count,
mPendingRequests.end(), LLMeshRepoThread::CompareScoreGreater());
}
@@ -3588,7 +3649,6 @@ void LLMeshRepository::fetchPhysicsShape(const LLUUID& mesh_id)
}
}
}
-
}
LLModel::Decomposition* LLMeshRepository::getDecomposition(const LLUUID& mesh_id)
diff --git a/indra/newview/lltexturefetch.cpp b/indra/newview/lltexturefetch.cpp
index d9a874be49..a64a6ee091 100755
--- a/indra/newview/lltexturefetch.cpp
+++ b/indra/newview/lltexturefetch.cpp
@@ -241,8 +241,10 @@ LLTrace::EventStatHandle<F64Milliseconds > LLTextureFetch::sCacheReadLatency("te
// Tuning/Parameterization Constants
-static const S32 HTTP_REQUESTS_IN_QUEUE_HIGH_WATER = 40; // Maximum requests to have active in HTTP
-static const S32 HTTP_REQUESTS_IN_QUEUE_LOW_WATER = 20; // Active level at which to refill
+static const S32 HTTP_PIPE_REQUESTS_HIGH_WATER = 100; // Maximum requests to have active in HTTP (pipelined)
+static const S32 HTTP_PIPE_REQUESTS_LOW_WATER = 50; // Active level at which to refill
+static const S32 HTTP_NONPIPE_REQUESTS_HIGH_WATER = 40;
+static const S32 HTTP_NONPIPE_REQUESTS_LOW_WATER = 20;
// BUG-3323/SH-4375
// *NOTE: This is a heuristic value. Texture fetches have a habit of using a
@@ -481,12 +483,12 @@ private:
bool acquireHttpSemaphore()
{
llassert(! mHttpHasResource);
- if (mFetcher->mHttpSemaphore <= 0)
+ if (mFetcher->mHttpSemaphore >= mFetcher->mHttpHighWater)
{
return false;
}
mHttpHasResource = true;
- mFetcher->mHttpSemaphore--;
+ mFetcher->mHttpSemaphore++;
return true;
}
@@ -496,7 +498,8 @@ private:
{
llassert(mHttpHasResource);
mHttpHasResource = false;
- mFetcher->mHttpSemaphore++;
+ mFetcher->mHttpSemaphore--;
+ llassert_always(mFetcher->mHttpSemaphore >= 0);
}
private:
@@ -608,16 +611,16 @@ private:
LLCore::HttpHandle mHttpHandle; // Handle of any active request
LLCore::BufferArray * mHttpBufferArray; // Refcounted pointer to response data
- S32 mHttpPolicyClass;
+ S32 mHttpPolicyClass;
bool mHttpActive; // Active request to http library
- U32 mHttpReplySize, // Actual received data size
- mHttpReplyOffset; // Actual received data offset
+ U32 mHttpReplySize, // Actual received data size
+ mHttpReplyOffset; // Actual received data offset
bool mHttpHasResource; // Counts against Fetcher's mHttpSemaphore
// State history
- U32 mCacheReadCount,
- mCacheWriteCount,
- mResourceWaitCount; // Requests entering WAIT_HTTP_RESOURCE2
+ U32 mCacheReadCount,
+ mCacheWriteCount,
+ mResourceWaitCount; // Requests entering WAIT_HTTP_RESOURCE2
};
//////////////////////////////////////////////////////////////////////////////
@@ -1325,7 +1328,7 @@ bool LLTextureFetchWorker::doWork(S32 param)
}
}
- static LLCachedControl<bool> use_http(gSavedSettings,"ImagePipelineUseHTTP", true);
+ static LLCachedControl<bool> use_http(gSavedSettings, "ImagePipelineUseHTTP", true);
// if (mHost != LLHost::invalid) get_url = false;
if ( use_http && mCanUseHTTP && mUrl.empty())//get http url.
@@ -1473,6 +1476,9 @@ bool LLTextureFetchWorker::doWork(S32 param)
if (mState == SEND_HTTP_REQ)
{
+ // Also used in llmeshrepository
+ static LLCachedControl<bool> disable_range_req(gSavedSettings, "HttpRangeRequestsDisable", false);
+
if (! mCanUseHTTP)
{
releaseHttpSemaphore();
@@ -1528,22 +1534,47 @@ bool LLTextureFetchWorker::doWork(S32 param)
mRequestedOffset -= 1;
mRequestedSize += 1;
}
-
mHttpHandle = LLCORE_HTTP_HANDLE_INVALID;
- if (!mUrl.empty())
- {
- mRequestedTimer.reset();
- mLoaded = FALSE;
- mGetStatus = LLCore::HttpStatus();
- mGetReason.clear();
- LL_DEBUGS(LOG_TXT) << "HTTP GET: " << mID << " Offset: " << mRequestedOffset
- << " Bytes: " << mRequestedSize
- << " Bandwidth(kbps): " << mFetcher->getTextureBandwidth() << "/" << mFetcher->mMaxBandwidth
- << LL_ENDL;
- // Will call callbackHttpGet when curl request completes
- // Only server bake images use the returned headers currently, for getting retry-after field.
- LLCore::HttpOptions *options = (mFTType == FTT_SERVER_BAKE) ? mFetcher->mHttpOptionsWithHeaders: mFetcher->mHttpOptions;
+ if (mUrl.empty())
+ {
+ // *FIXME: This should not be reachable except it has become
+ // so after some recent 'work'. Need to track this down
+ // and illuminate the unenlightened.
+ LL_WARNS(LOG_TXT) << "HTTP GET request failed for " << mID
+ << " on empty URL." << LL_ENDL;
+ resetFormattedData();
+ releaseHttpSemaphore();
+ return true; // failed
+ }
+
+ mRequestedTimer.reset();
+ mLoaded = FALSE;
+ mGetStatus = LLCore::HttpStatus();
+ mGetReason.clear();
+ LL_DEBUGS(LOG_TXT) << "HTTP GET: " << mID << " Offset: " << mRequestedOffset
+ << " Bytes: " << mRequestedSize
+ << " Bandwidth(kbps): " << mFetcher->getTextureBandwidth() << "/" << mFetcher->mMaxBandwidth
+ << LL_ENDL;
+
+ // Will call callbackHttpGet when curl request completes
+ // Only server bake images use the returned headers currently, for getting retry-after field.
+ LLCore::HttpOptions *options = (mFTType == FTT_SERVER_BAKE) ? mFetcher->mHttpOptionsWithHeaders: mFetcher->mHttpOptions;
+ if (disable_range_req)
+ {
+ // 'Range:' requests may be disabled in which case all HTTP
+ // texture fetches result in full fetches. This can be used
+ // by people with questionable ISPs or networking gear that
+ // doesn't handle these well.
+ mHttpHandle = mFetcher->mHttpRequest->requestGet(mHttpPolicyClass,
+ mWorkPriority,
+ mUrl,
+ options,
+ mFetcher->mHttpHeaders,
+ this);
+ }
+ else
+ {
mHttpHandle = mFetcher->mHttpRequest->requestGetByteRange(mHttpPolicyClass,
mWorkPriority,
mUrl,
@@ -1557,7 +1588,11 @@ bool LLTextureFetchWorker::doWork(S32 param)
}
if (LLCORE_HTTP_HANDLE_INVALID == mHttpHandle)
{
- LL_WARNS(LOG_TXT) << "HTTP GET request failed for " << mID << LL_ENDL;
+ LLCore::HttpStatus status(mFetcher->mHttpRequest->getStatus());
+ LL_WARNS(LOG_TXT) << "HTTP GET request failed for " << mID
+ << ", Status: " << status.toTerseString()
+ << " Reason: '" << status.toString() << "'"
+ << LL_ENDL;
resetFormattedData();
releaseHttpSemaphore();
return true; // failed
@@ -1613,10 +1648,6 @@ bool LLTextureFetchWorker::doWork(S32 param)
else if (http_service_unavail == mGetStatus)
{
LL_INFOS_ONCE(LOG_TXT) << "Texture server busy (503): " << mUrl << LL_ENDL;
- LL_INFOS(LOG_TXT) << "503: HTTP GET failed for: " << mUrl
- << " Status: " << mGetStatus.toHex()
- << " Reason: '" << mGetReason << "'"
- << LL_ENDL;
}
else if (http_not_sat == mGetStatus)
{
@@ -1774,7 +1805,7 @@ bool LLTextureFetchWorker::doWork(S32 param)
if (mState == DECODE_IMAGE)
{
- static LLCachedControl<bool> textures_decode_disabled(gSavedSettings,"TextureDecodeDisabled", false);
+ static LLCachedControl<bool> textures_decode_disabled(gSavedSettings, "TextureDecodeDisabled", false);
setPriority(LLWorkerThread::PRIORITY_LOW | mWorkPriority); // Set priority first since Responder may change it
if (textures_decode_disabled)
@@ -2485,7 +2516,6 @@ LLTextureFetch::LLTextureFetch(LLTextureCache* cache, LLImageDecodeThread* image
mHttpHeaders(NULL),
mHttpMetricsHeaders(NULL),
mHttpPolicyClass(LLCore::HttpRequest::DEFAULT_POLICY_ID),
- mHttpSemaphore(HTTP_REQUESTS_IN_QUEUE_HIGH_WATER),
mTotalCacheReadCount(0U),
mTotalCacheWriteCount(0U),
mTotalResourceWaitCount(0U),
@@ -2497,6 +2527,22 @@ LLTextureFetch::LLTextureFetch(LLTextureCache* cache, LLImageDecodeThread* image
mMaxBandwidth = gSavedSettings.getF32("ThrottleBandwidthKBPS");
mTextureInfo.setUpLogging(gSavedSettings.getBOOL("LogTextureDownloadsToViewerLog"), gSavedSettings.getBOOL("LogTextureDownloadsToSimulator"), U32Bytes(gSavedSettings.getU32("TextureLoggingThreshold")));
+ LLAppCoreHttp & app_core_http(LLAppViewer::instance()->getAppCoreHttp());
+ mHttpPolicyClass = app_core_http.getPolicy(LLAppCoreHttp::AP_TEXTURE);
+ mHttpRequest = new LLCore::HttpRequest;
+ mHttpOptions = new LLCore::HttpOptions;
+ mHttpOptionsWithHeaders = new LLCore::HttpOptions;
+ mHttpOptionsWithHeaders->setWantHeaders(true);
+ mHttpHeaders = new LLCore::HttpHeaders;
+ mHttpHeaders->append("Accept", "image/x-j2c");
+ mHttpMetricsHeaders = new LLCore::HttpHeaders;
+ mHttpMetricsHeaders->append("Content-Type", "application/llsd+xml");
+ mHttpHighWater = HTTP_NONPIPE_REQUESTS_HIGH_WATER;
+ mHttpLowWater = HTTP_NONPIPE_REQUESTS_LOW_WATER;
+ mHttpSemaphore = 0;
+
+ // Conditionally construct debugger object after 'this' is
+ // fully initialized.
LLTextureFetchDebugger::sDebuggerEnabled = gSavedSettings.getBOOL("TextureFetchDebuggerEnabled");
if(LLTextureFetchDebugger::isEnabled())
{
@@ -2509,16 +2555,6 @@ LLTextureFetch::LLTextureFetch(LLTextureCache* cache, LLImageDecodeThread* image
}
mOriginFetchSource = mFetchSource;
}
-
- mHttpRequest = new LLCore::HttpRequest;
- mHttpOptions = new LLCore::HttpOptions;
- mHttpOptionsWithHeaders = new LLCore::HttpOptions;
- mHttpOptionsWithHeaders->setWantHeaders(true);
- mHttpHeaders = new LLCore::HttpHeaders;
- mHttpHeaders->append("Accept", "image/x-j2c");
- mHttpMetricsHeaders = new LLCore::HttpHeaders;
- mHttpMetricsHeaders->append("Content-Type", "application/llsd+xml");
- mHttpPolicyClass = LLAppViewer::instance()->getAppCoreHttp().getPolicy(LLAppCoreHttp::AP_TEXTURE);
}
LLTextureFetch::~LLTextureFetch()
@@ -2990,6 +3026,20 @@ bool LLTextureFetch::runCondition()
// Threads: Ttf
void LLTextureFetch::commonUpdate()
{
+ // Update low/high water levels based on pipelining. We pick
+ // up setting eventually, so the semaphore/request level can
+ // fall outside the [0..HIGH_WATER] range. Expect that.
+ if (LLAppViewer::instance()->getAppCoreHttp().isPipelined(LLAppCoreHttp::AP_TEXTURE))
+ {
+ mHttpHighWater = HTTP_PIPE_REQUESTS_HIGH_WATER;
+ mHttpLowWater = HTTP_PIPE_REQUESTS_LOW_WATER;
+ }
+ else
+ {
+ mHttpHighWater = HTTP_NONPIPE_REQUESTS_HIGH_WATER;
+ mHttpLowWater = HTTP_NONPIPE_REQUESTS_LOW_WATER;
+ }
+
// Release waiters
releaseHttpWaiters();
@@ -3651,8 +3701,16 @@ void LLTextureFetch::releaseHttpWaiters()
{
// Use mHttpSemaphore rather than mHTTPTextureQueue.size()
// to avoid a lock.
- if (mHttpSemaphore < (HTTP_REQUESTS_IN_QUEUE_HIGH_WATER - HTTP_REQUESTS_IN_QUEUE_LOW_WATER))
+ if (mHttpSemaphore >= mHttpLowWater)
return;
+ S32 needed(mHttpHighWater - mHttpSemaphore);
+ if (needed <= 0)
+ {
+ // Would only happen if High/LowWater were changed behind
+ // our back. In that case, defer fill until usage falls within
+ // limits.
+ return;
+ }
// Quickly make a copy of all the LLUIDs. Get off the
// mutex as early as possible.
@@ -3701,10 +3759,10 @@ void LLTextureFetch::releaseHttpWaiters()
tids.clear();
// Sort into priority order, if necessary and only as much as needed
- if (tids2.size() > mHttpSemaphore)
+ if (tids2.size() > needed)
{
LLTextureFetchWorker::Compare compare;
- std::partial_sort(tids2.begin(), tids2.begin() + mHttpSemaphore, tids2.end(), compare);
+ std::partial_sort(tids2.begin(), tids2.begin() + needed, tids2.end(), compare);
}
// Release workers up to the high water mark. Since we aren't
@@ -4544,7 +4602,7 @@ S32 LLTextureFetchDebugger::fillCurlQueue()
mNbCurlCompleted = mFetchingHistory.size();
return 0;
}
- if (mNbCurlRequests > HTTP_REQUESTS_IN_QUEUE_LOW_WATER)
+ if (mNbCurlRequests > HTTP_NONPIPE_REQUESTS_LOW_WATER)
{
return mNbCurlRequests;
}
@@ -4577,7 +4635,7 @@ S32 LLTextureFetchDebugger::fillCurlQueue()
mFetchingHistory[i].mHttpHandle = handle;
mFetchingHistory[i].mCurlState = FetchEntry::CURL_IN_PROGRESS;
mNbCurlRequests++;
- if (mNbCurlRequests >= HTTP_REQUESTS_IN_QUEUE_HIGH_WATER) // emulate normal pipeline
+ if (mNbCurlRequests >= HTTP_NONPIPE_REQUESTS_HIGH_WATER) // emulate normal pipeline
{
break;
}
diff --git a/indra/newview/lltexturefetch.h b/indra/newview/lltexturefetch.h
index c4da2e8685..89d18e2c67 100755
--- a/indra/newview/lltexturefetch.h
+++ b/indra/newview/lltexturefetch.h
@@ -356,7 +356,9 @@ private:
LLCore::HttpHeaders * mHttpHeaders; // Ttf
LLCore::HttpHeaders * mHttpMetricsHeaders; // Ttf
LLCore::HttpRequest::policy_t mHttpPolicyClass; // T*
-
+ S32 mHttpHighWater; // Ttf
+ S32 mHttpLowWater; // Ttf
+
// We use a resource semaphore to keep HTTP requests in
// WAIT_HTTP_RESOURCE2 if there aren't sufficient slots in the
// transport. This keeps them near where they can be cheaply
@@ -364,7 +366,11 @@ private:
// where it's more expensive to get at them. Requests in either
// SEND_HTTP_REQ or WAIT_HTTP_REQ charge against the semaphore
// and tracking state transitions is critical to liveness.
- LLAtomicS32 mHttpSemaphore; // Ttf + Tmain
+ //
+ // Originally implemented as a traditional semaphore (heading towards
+ // zero), it now is an outstanding request count that is allowed to
+ // exceed the high water level (but not go below zero).
+ LLAtomicS32 mHttpSemaphore; // Ttf
typedef std::set<LLUUID> wait_http_res_queue_t;
wait_http_res_queue_t mHttpWaitResource; // Mfnq
diff --git a/indra/newview/tests/llslurl_test.cpp b/indra/newview/tests/llslurl_test.cpp
index 86229ad636..2bc0d5a086 100755
--- a/indra/newview/tests/llslurl_test.cpp
+++ b/indra/newview/tests/llslurl_test.cpp
@@ -6,7 +6,7 @@
*
* $LicenseInfo:firstyear=2009&license=viewerlgpl$
* Second Life Viewer Source Code
- * Copyright (C) 2010, Linden Research, Inc.
+ * Copyright (C) 2014, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -31,6 +31,15 @@
#include "../llslurl.h"
#include "../../llxml/llcontrol.h"
#include "llsdserialize.h"
+
+namespace
+{
+
+// Should not collide with other test programs creating temp files.
+static const char * const TEST_FILENAME("llslurl_test.xml");
+
+}
+
//----------------------------------------------------------------------------
// Mock objects for the dependencies of the code we're testing
@@ -143,11 +152,11 @@ namespace tut
template<> template<>
void slurlTestObject::test<1>()
{
- llofstream gridfile("grid_test.xml");
+ llofstream gridfile(TEST_FILENAME);
gridfile << gSampleGridFile;
gridfile.close();
- LLGridManager::getInstance()->initialize("grid_test.xml");
+ LLGridManager::getInstance()->initialize(TEST_FILENAME);
LLGridManager::getInstance()->setGridChoice("util.agni.lindenlab.com");
@@ -260,11 +269,11 @@ namespace tut
template<> template<>
void slurlTestObject::test<2>()
{
- llofstream gridfile("grid_test.xml");
+ llofstream gridfile(TEST_FILENAME);
gridfile << gSampleGridFile;
gridfile.close();
- LLGridManager::getInstance()->initialize("grid_test.xml");
+ LLGridManager::getInstance()->initialize(TEST_FILENAME);
LLSLURL slurl = LLSLURL("my.grid.com", "my region");
ensure_equals("grid/region - type", slurl.getType(), LLSLURL::LOCATION);
@@ -293,11 +302,11 @@ namespace tut
template<> template<>
void slurlTestObject::test<3>()
{
- llofstream gridfile("grid_test.xml");
+ llofstream gridfile(TEST_FILENAME);
gridfile << gSampleGridFile;
gridfile.close();
- LLGridManager::getInstance()->initialize("grid_test.xml");
+ LLGridManager::getInstance()->initialize(TEST_FILENAME);
LLGridManager::getInstance()->setGridChoice("my.grid.com");
LLSLURL slurl = LLSLURL("https://my.grid.com/region/my%20region/1/2/3");
diff --git a/indra/newview/tests/llviewernetwork_test.cpp b/indra/newview/tests/llviewernetwork_test.cpp
index 7ad7947ca4..0eb0ab6500 100755
--- a/indra/newview/tests/llviewernetwork_test.cpp
+++ b/indra/newview/tests/llviewernetwork_test.cpp
@@ -6,7 +6,7 @@
*
* $LicenseInfo:firstyear=2009&license=viewerlgpl$
* Second Life Viewer Source Code
- * Copyright (C) 2010, Linden Research, Inc.
+ * Copyright (C) 2014, Linden Research, Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -31,6 +31,13 @@
#include "../../llxml/llcontrol.h"
#include "llfile.h"
+namespace
+{
+
+// Should not collide with other test programs creating temp files.
+static const char * const TEST_FILENAME("llviewernetwork_test.xml");
+
+}
//----------------------------------------------------------------------------
// Mock objects for the dependencies of the code we're testing
@@ -143,7 +150,7 @@ namespace tut
{
viewerNetworkTest()
{
- LLFile::remove("grid_test.xml");
+ LLFile::remove(TEST_FILENAME);
gCmdLineLoginURI.clear();
gCmdLineGridChoice.clear();
gCmdLineHelperURI.clear();
@@ -152,7 +159,7 @@ namespace tut
}
~viewerNetworkTest()
{
- LLFile::remove("grid_test.xml");
+ LLFile::remove(TEST_FILENAME);
}
};
@@ -170,7 +177,7 @@ namespace tut
{
LLGridManager *manager = LLGridManager::getInstance();
// grid file doesn't exist
- manager->initialize("grid_test.xml");
+ manager->initialize(TEST_FILENAME);
// validate that some of the defaults are available.
std::map<std::string, std::string> known_grids = manager->getKnownGrids();
ensure_equals("Known grids is a string-string map of size 2", known_grids.size(), 2);
@@ -238,11 +245,11 @@ namespace tut
template<> template<>
void viewerNetworkTestObject::test<2>()
{
- llofstream gridfile("grid_test.xml");
+ llofstream gridfile(TEST_FILENAME);
gridfile << gSampleGridFile;
gridfile.close();
- LLGridManager::getInstance()->initialize("grid_test.xml");
+ LLGridManager::getInstance()->initialize(TEST_FILENAME);
std::map<std::string, std::string> known_grids = LLGridManager::getInstance()->getKnownGrids();
ensure_equals("adding a grid via a grid file increases known grid size",4,
known_grids.size());
@@ -369,11 +376,11 @@ namespace tut
void viewerNetworkTestObject::test<7>()
{
// adding a grid with simply a name will populate the values.
- llofstream gridfile("grid_test.xml");
+ llofstream gridfile(TEST_FILENAME);
gridfile << gSampleGridFile;
gridfile.close();
- LLGridManager::getInstance()->initialize("grid_test.xml");
+ LLGridManager::getInstance()->initialize(TEST_FILENAME);
LLGridManager::getInstance()->setGridChoice("util.agni.lindenlab.com");
ensure_equals("getGridLabel",