diff options
Diffstat (limited to 'indra/newview')
-rwxr-xr-x | indra/newview/app_settings/settings.xml | 22 | ||||
-rwxr-xr-x | indra/newview/llappcorehttp.cpp | 250 | ||||
-rwxr-xr-x | indra/newview/llappcorehttp.h | 40 | ||||
-rwxr-xr-x | indra/newview/llmeshrepository.cpp | 462 | ||||
-rwxr-xr-x | indra/newview/lltexturefetch.cpp | 154 | ||||
-rwxr-xr-x | indra/newview/lltexturefetch.h | 10 |
6 files changed, 604 insertions, 334 deletions
diff --git a/indra/newview/app_settings/settings.xml b/indra/newview/app_settings/settings.xml index cab7af8a32..baccaffb3f 100755 --- a/indra/newview/app_settings/settings.xml +++ b/indra/newview/app_settings/settings.xml @@ -4456,6 +4456,28 @@ <key>Value</key> <string /> </map> + <key>HttpPipelining</key> + <map> + <key>Comment</key> + <string>If true, viewer will attempt to pipeline HTTP requests.</string> + <key>Persist</key> + <integer>1</integer> + <key>Type</key> + <string>Boolean</string> + <key>Value</key> + <integer>1</integer> + </map> + <key>HttpRangeRequestsDisable</key> + <map> + <key>Comment</key> + <string>If true, viewer will not issue GET requests with 'Range:' headers for meshes and textures. May resolve problems with certain ISPs and networking gear.</string> + <key>Persist</key> + <integer>1</integer> + <key>Type</key> + <string>Boolean</string> + <key>Value</key> + <integer>0</integer> + </map> <key>IMShowTimestamps</key> <map> <key>Comment</key> diff --git a/indra/newview/llappcorehttp.cpp b/indra/newview/llappcorehttp.cpp index 70dcffefb2..e9274c5c1e 100755 --- a/indra/newview/llappcorehttp.cpp +++ b/indra/newview/llappcorehttp.cpp @@ -4,7 +4,7 @@ * * $LicenseInfo:firstyear=2012&license=viewerlgpl$ * Second Life Viewer Source Code - * Copyright (C) 2012-2013, Linden Research, Inc. + * Copyright (C) 2012-2014, Linden Research, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -40,49 +40,52 @@ // be open at a time. const F64 LLAppCoreHttp::MAX_THREAD_WAIT_TIME(10.0); +const long LLAppCoreHttp::PIPELINING_DEPTH(5L); + +// Default and dynamic values for classes static const struct { - LLAppCoreHttp::EAppPolicy mPolicy; U32 mDefault; U32 mMin; U32 mMax; U32 mRate; + bool mPipelined; std::string mKey; const char * mUsage; -} init_data[] = // Default and dynamic values for classes +} init_data[LLAppCoreHttp::AP_COUNT] = { - { - LLAppCoreHttp::AP_DEFAULT, 8, 8, 8, 0, + { // AP_DEFAULT + 8, 8, 8, 0, false, "", "other" }, - { - LLAppCoreHttp::AP_TEXTURE, 8, 1, 12, 0, + { // AP_TEXTURE + 8, 1, 12, 0, true, "TextureFetchConcurrency", "texture fetch" }, - { - LLAppCoreHttp::AP_MESH1, 32, 1, 128, 100, + { // AP_MESH1 + 32, 1, 128, 0, false, "MeshMaxConcurrentRequests", "mesh fetch" }, - { - LLAppCoreHttp::AP_MESH2, 8, 1, 32, 100, + { // AP_MESH2 + 8, 1, 32, 0, true, "Mesh2MaxConcurrentRequests", "mesh2 fetch" }, - { - LLAppCoreHttp::AP_LARGE_MESH, 2, 1, 8, 0, + { // AP_LARGE_MESH + 2, 1, 8, 0, false, "", "large mesh fetch" }, - { - LLAppCoreHttp::AP_UPLOADS, 2, 1, 8, 0, + { // AP_UPLOADS + 2, 1, 8, 0, false, "", "asset upload" }, - { - LLAppCoreHttp::AP_LONG_POLL, 32, 32, 32, 0, + { // AP_LONG_POLL + 32, 32, 32, 0, false, "", "long poll" } @@ -91,18 +94,20 @@ static const struct static void setting_changed(); +LLAppCoreHttp::HttpClass::HttpClass() + : mPolicy(LLCore::HttpRequest::DEFAULT_POLICY_ID), + mConnLimit(0U), + mPipelined(false) +{} + + LLAppCoreHttp::LLAppCoreHttp() : mRequest(NULL), mStopHandle(LLCORE_HTTP_HANDLE_INVALID), mStopRequested(0.0), - mStopped(false) -{ - for (int i(0); i < LL_ARRAY_SIZE(mPolicies); ++i) - { - mPolicies[i] = LLCore::HttpRequest::DEFAULT_POLICY_ID; - mSettings[i] = 0U; - } -} + mStopped(false), + mPipelined(true) +{} LLAppCoreHttp::~LLAppCoreHttp() @@ -157,27 +162,28 @@ void LLAppCoreHttp::init() } // Setup default policy and constrain if directed to - mPolicies[AP_DEFAULT] = LLCore::HttpRequest::DEFAULT_POLICY_ID; + mHttpClasses[AP_DEFAULT].mPolicy = LLCore::HttpRequest::DEFAULT_POLICY_ID; // Setup additional policies based on table and some special rules + llassert(LL_ARRAY_SIZE(init_data) == AP_COUNT); for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i) { - const EAppPolicy policy(init_data[i].mPolicy); + const EAppPolicy app_policy(static_cast<EAppPolicy>(i)); - if (AP_DEFAULT == policy) + if (AP_DEFAULT == app_policy) { // Pre-created continue; } - mPolicies[policy] = LLCore::HttpRequest::createPolicyClass(); - if (! mPolicies[policy]) + mHttpClasses[app_policy].mPolicy = LLCore::HttpRequest::createPolicyClass(); + if (! mHttpClasses[app_policy].mPolicy) { // Use default policy (but don't accidentally modify default) LL_WARNS("Init") << "Failed to create HTTP policy class for " << init_data[i].mUsage << ". Using default policy." << LL_ENDL; - mPolicies[policy] = mPolicies[AP_DEFAULT]; + mHttpClasses[app_policy].mPolicy = mHttpClasses[AP_DEFAULT].mPolicy; continue; } } @@ -196,9 +202,27 @@ void LLAppCoreHttp::init() << LL_ENDL; } + // Signal for global pipelining preference from settings + static const std::string http_pipelining("HttpPipelining"); + if (gSavedSettings.controlExists(http_pipelining)) + { + LLPointer<LLControlVariable> cntrl_ptr = gSavedSettings.getControl(http_pipelining); + if (cntrl_ptr.isNull()) + { + LL_WARNS("Init") << "Unable to set signal on global setting '" << http_pipelining + << "'" << LL_ENDL; + } + else + { + mPipelinedSignal = cntrl_ptr->getCommitSignal()->connect(boost::bind(&setting_changed)); + } + } + // Register signals for settings and state changes for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i) { + const EAppPolicy app_policy(static_cast<EAppPolicy>(i)); + if (! init_data[i].mKey.empty() && gSavedSettings.controlExists(init_data[i].mKey)) { LLPointer<LLControlVariable> cntrl_ptr = gSavedSettings.getControl(init_data[i].mKey); @@ -209,7 +233,7 @@ void LLAppCoreHttp::init() } else { - mSettingsSignal[i] = cntrl_ptr->getCommitSignal()->connect(boost::bind(&setting_changed)); + mHttpClasses[app_policy].mSettingsSignal = cntrl_ptr->getCommitSignal()->connect(boost::bind(&setting_changed)); } } } @@ -261,10 +285,11 @@ void LLAppCoreHttp::cleanup() } } - for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i) + for (int i(0); i < LL_ARRAY_SIZE(mHttpClasses); ++i) { - mSettingsSignal[i].disconnect(); + mHttpClasses[i].mSettingsSignal.disconnect(); } + mPipelinedSignal.disconnect(); delete mRequest; mRequest = NULL; @@ -278,30 +303,84 @@ void LLAppCoreHttp::cleanup() } } + void LLAppCoreHttp::refreshSettings(bool initial) { LLCore::HttpStatus status; + + // Global pipelining setting + bool pipeline_changed(false); + static const std::string http_pipelining("HttpPipelining"); + if (gSavedSettings.controlExists(http_pipelining)) + { + // Default to true (in ctor) if absent. + bool pipelined(gSavedSettings.getBOOL(http_pipelining)); + if (pipelined != mPipelined) + { + mPipelined = pipelined; + pipeline_changed = true; + } + } for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i) { - const EAppPolicy policy(init_data[i].mPolicy); + const EAppPolicy app_policy(static_cast<EAppPolicy>(i)); - // Set any desired throttle - if (initial && init_data[i].mRate) + if (initial) { - // Init-time only, can use the static setters here - status = LLCore::HttpRequest::setStaticPolicyOption(LLCore::HttpRequest::PO_THROTTLE_RATE, - mPolicies[policy], - init_data[i].mRate, - NULL); - if (! status) + // Init-time only settings, can use the static setters here + + if (init_data[i].mRate) { - LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage - << " throttle rate. Reason: " << status.toString() - << LL_ENDL; + // Set any desired throttle + status = LLCore::HttpRequest::setStaticPolicyOption(LLCore::HttpRequest::PO_THROTTLE_RATE, + mHttpClasses[app_policy].mPolicy, + init_data[i].mRate, + NULL); + if (! status) + { + LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage + << " throttle rate. Reason: " << status.toString() + << LL_ENDL; + } } + } + // Init- or run-time settings. Must use the queued request API. + + // Pipelining changes + if (initial || pipeline_changed) + { + const bool to_pipeline(mPipelined && init_data[i].mPipelined); + if (to_pipeline != mHttpClasses[app_policy].mPipelined) + { + // Pipeline election changing, set dynamic option via request + + LLCore::HttpHandle handle; + const long new_depth(to_pipeline ? PIPELINING_DEPTH : 0); + + handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_PIPELINING_DEPTH, + mHttpClasses[app_policy].mPolicy, + new_depth, + NULL); + if (LLCORE_HTTP_HANDLE_INVALID == handle) + { + status = mRequest->getStatus(); + LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage + << " pipelining. Reason: " << status.toString() + << LL_ENDL; + } + else + { + LL_DEBUGS("Init") << "Changed " << init_data[i].mUsage + << " pipelining. New value: " << new_depth + << LL_ENDL; + mHttpClasses[app_policy].mPipelined = to_pipeline; + } + } + } + // Get target connection concurrency value U32 setting(init_data[i].mDefault); if (! init_data[i].mKey.empty() && gSavedSettings.controlExists(init_data[i].mKey)) @@ -314,38 +393,61 @@ void LLAppCoreHttp::refreshSettings(bool initial) } } - if (! initial && setting == mSettings[policy]) + if (initial || setting != mHttpClasses[app_policy].mConnLimit || pipeline_changed) { - // Unchanged, try next setting - continue; - } - - // Set it and report - // *TODO: These are intended to be per-host limits when we can - // support that in llcorehttp/libcurl. - LLCore::HttpHandle handle; - handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_CONNECTION_LIMIT, - mPolicies[policy], - setting, NULL); - if (LLCORE_HTTP_HANDLE_INVALID == handle) - { - status = mRequest->getStatus(); - LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage - << " concurrency. Reason: " << status.toString() - << LL_ENDL; - } - else - { - LL_DEBUGS("Init") << "Changed " << init_data[i].mUsage - << " concurrency. New value: " << setting - << LL_ENDL; - mSettings[policy] = setting; - if (initial && setting != init_data[i].mDefault) + // Set it and report. Strategies depend on pipelining: + // + // No Pipelining. Llcorehttp manages connections itself based + // on the PO_CONNECTION_LIMIT setting. Set both limits to the + // same value for logical consistency. In the future, may + // hand over connection management to libcurl after the + // connection cache has been better vetted. + // + // Pipelining. Libcurl is allowed to manage connections to a + // great degree. Steady state will connection limit based on + // the per-host setting. Transitions (region crossings, new + // avatars, etc.) can request additional outbound connections + // to other servers via 2X total connection limit. + // + LLCore::HttpHandle handle; + handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_CONNECTION_LIMIT, + mHttpClasses[app_policy].mPolicy, + (mHttpClasses[app_policy].mPipelined ? 2 * setting : setting), + NULL); + if (LLCORE_HTTP_HANDLE_INVALID == handle) { - LL_INFOS("Init") << "Application settings overriding default " << init_data[i].mUsage - << " concurrency. New value: " << setting + status = mRequest->getStatus(); + LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage + << " concurrency. Reason: " << status.toString() << LL_ENDL; } + else + { + handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_PER_HOST_CONNECTION_LIMIT, + mHttpClasses[app_policy].mPolicy, + setting, + NULL); + if (LLCORE_HTTP_HANDLE_INVALID == handle) + { + status = mRequest->getStatus(); + LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage + << " per-host concurrency. Reason: " << status.toString() + << LL_ENDL; + } + else + { + LL_DEBUGS("Init") << "Changed " << init_data[i].mUsage + << " concurrency. New value: " << setting + << LL_ENDL; + mHttpClasses[app_policy].mConnLimit = setting; + if (initial && setting != init_data[i].mDefault) + { + LL_INFOS("Init") << "Application settings overriding default " << init_data[i].mUsage + << " concurrency. New value: " << setting + << LL_ENDL; + } + } + } } } } diff --git a/indra/newview/llappcorehttp.h b/indra/newview/llappcorehttp.h index 40e3042b84..9ad4eb4b30 100755 --- a/indra/newview/llappcorehttp.h +++ b/indra/newview/llappcorehttp.h @@ -4,7 +4,7 @@ * * $LicenseInfo:firstyear=2012&license=viewerlgpl$ * Second Life Viewer Source Code - * Copyright (C) 2012-2013, Linden Research, Inc. + * Copyright (C) 2012-2014, Linden Research, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -41,6 +41,8 @@ class LLAppCoreHttp : public LLCore::HttpHandler { public: + static const long PIPELINING_DEPTH; + typedef LLCore::HttpRequest::policy_t policy_t; enum EAppPolicy @@ -70,7 +72,7 @@ public: /// Long poll: no /// Concurrency: high /// Request rate: high - /// Pipelined: soon + /// Pipelined: yes AP_TEXTURE, /// Legacy mesh fetching policy class. Used to @@ -98,7 +100,7 @@ public: /// Long poll: no /// Concurrency: high /// Request rate: high - /// Pipelined: soon + /// Pipelined: yes AP_MESH2, /// Large mesh fetching policy class. Used to @@ -116,7 +118,7 @@ public: /// Long poll: no /// Concurrency: low /// Request rate: low - /// Pipelined: soon + /// Pipelined: no AP_LARGE_MESH, /// Asset upload policy class. Used to store @@ -180,7 +182,13 @@ public: // application function. policy_t getPolicy(EAppPolicy policy) const { - return mPolicies[policy]; + return mHttpClasses[policy].mPolicy; + } + + // Return whether a policy is using pipelined operations. + bool isPipelined(EAppPolicy policy) const + { + return mHttpClasses[policy].mPipelined; } // Apply initial or new settings from the environment. @@ -190,13 +198,27 @@ private: static const F64 MAX_THREAD_WAIT_TIME; private: - LLCore::HttpRequest * mRequest; // Request queue to issue shutdowns + + // PODish container for per-class settings and state. + struct HttpClass + { + public: + HttpClass(); + + public: + policy_t mPolicy; // Policy class id for the class + U32 mConnLimit; + bool mPipelined; + boost::signals2::connection mSettingsSignal; // Signal to global setting that affect this class (if any) + }; + + LLCore::HttpRequest * mRequest; // Request queue to issue shutdowns LLCore::HttpHandle mStopHandle; F64 mStopRequested; bool mStopped; - policy_t mPolicies[AP_COUNT]; // Policy class id for each connection set - U32 mSettings[AP_COUNT]; - boost::signals2::connection mSettingsSignal[AP_COUNT]; // Signals to global settings that affect us + HttpClass mHttpClasses[AP_COUNT]; + bool mPipelined; // Global setting + boost::signals2::connection mPipelinedSignal; // Signal for 'HttpPipelining' setting }; diff --git a/indra/newview/llmeshrepository.cpp b/indra/newview/llmeshrepository.cpp index 8f50555a73..a6707392fe 100755 --- a/indra/newview/llmeshrepository.cpp +++ b/indra/newview/llmeshrepository.cpp @@ -1,4 +1,3 @@ - /** * @file llmeshrepository.cpp * @brief Mesh repository implementation. @@ -338,14 +337,17 @@ static LLFastTimer::DeclareTimer FTM_MESH_FETCH("Mesh Fetch"); LLMeshRepository gMeshRepo; const S32 MESH_HEADER_SIZE = 4096; // Important: assumption is that headers fit in this space + const S32 REQUEST_HIGH_WATER_MIN = 32; // Limits for GetMesh regions const S32 REQUEST_HIGH_WATER_MAX = 150; // Should remain under 2X throttle const S32 REQUEST_LOW_WATER_MIN = 16; const S32 REQUEST_LOW_WATER_MAX = 75; + const S32 REQUEST2_HIGH_WATER_MIN = 32; // Limits for GetMesh2 regions -const S32 REQUEST2_HIGH_WATER_MAX = 80; +const S32 REQUEST2_HIGH_WATER_MAX = 100; const S32 REQUEST2_LOW_WATER_MIN = 16; -const S32 REQUEST2_LOW_WATER_MAX = 40; +const S32 REQUEST2_LOW_WATER_MAX = 50; + const U32 LARGE_MESH_FETCH_THRESHOLD = 1U << 21; // Size at which requests goes to narrow/slow queue const long SMALL_MESH_XFER_TIMEOUT = 120L; // Seconds to complete xfer, small mesh downloads const long LARGE_MESH_XFER_TIMEOUT = 600L; // Seconds to complete xfer, large downloads @@ -518,11 +520,13 @@ class LLMeshHandlerBase : public LLCore::HttpHandler { public: LOG_CLASS(LLMeshHandlerBase); - LLMeshHandlerBase() + LLMeshHandlerBase(U32 offset, U32 requested_bytes) : LLCore::HttpHandler(), mMeshParams(), mProcessed(false), - mHttpHandle(LLCORE_HTTP_HANDLE_INVALID) + mHttpHandle(LLCORE_HTTP_HANDLE_INVALID), + mOffset(offset), + mRequestedBytes(requested_bytes) {} virtual ~LLMeshHandlerBase() @@ -534,13 +538,15 @@ protected: public: virtual void onCompleted(LLCore::HttpHandle handle, LLCore::HttpResponse * response); - virtual void processData(LLCore::BufferArray * body, U8 * data, S32 data_size) = 0; + virtual void processData(LLCore::BufferArray * body, S32 body_offset, U8 * data, S32 data_size) = 0; virtual void processFailure(LLCore::HttpStatus status) = 0; public: LLVolumeParams mMeshParams; bool mProcessed; - LLCore::HttpHandle mHttpHandle; + LLCore::HttpHandle mHttpHandle; + U32 mOffset; + U32 mRequestedBytes; }; @@ -551,8 +557,8 @@ class LLMeshHeaderHandler : public LLMeshHandlerBase { public: LOG_CLASS(LLMeshHeaderHandler); - LLMeshHeaderHandler(const LLVolumeParams & mesh_params) - : LLMeshHandlerBase() + LLMeshHeaderHandler(const LLVolumeParams & mesh_params, U32 offset, U32 requested_bytes) + : LLMeshHandlerBase(offset, requested_bytes) { mMeshParams = mesh_params; LLMeshRepoThread::incActiveHeaderRequests(); @@ -564,7 +570,7 @@ protected: void operator=(const LLMeshHeaderHandler &); // Not defined public: - virtual void processData(LLCore::BufferArray * body, U8 * data, S32 data_size); + virtual void processData(LLCore::BufferArray * body, S32 body_offset, U8 * data, S32 data_size); virtual void processFailure(LLCore::HttpStatus status); }; @@ -573,17 +579,16 @@ public: // // Thread: repo class LLMeshLODHandler : public LLMeshHandlerBase - { +{ public: + LOG_CLASS(LLMeshLODHandler); LLMeshLODHandler(const LLVolumeParams & mesh_params, S32 lod, U32 offset, U32 requested_bytes) - : LLMeshHandlerBase(), - mLOD(lod), - mRequestedBytes(requested_bytes), - mOffset(offset) + : LLMeshHandlerBase(offset, requested_bytes), + mLOD(lod) { - mMeshParams = mesh_params; - LLMeshRepoThread::incActiveLODRequests(); - } + mMeshParams = mesh_params; + LLMeshRepoThread::incActiveLODRequests(); + } virtual ~LLMeshLODHandler(); protected: @@ -591,13 +596,11 @@ protected: void operator=(const LLMeshLODHandler &); // Not defined public: - virtual void processData(LLCore::BufferArray * body, U8 * data, S32 data_size); + virtual void processData(LLCore::BufferArray * body, S32 body_offset, U8 * data, S32 data_size); virtual void processFailure(LLCore::HttpStatus status); public: S32 mLOD; - U32 mRequestedBytes; - U32 mOffset; }; @@ -605,14 +608,12 @@ public: // // Thread: repo class LLMeshSkinInfoHandler : public LLMeshHandlerBase - { +{ public: LOG_CLASS(LLMeshSkinInfoHandler); - LLMeshSkinInfoHandler(const LLUUID& id, U32 offset, U32 size) - : LLMeshHandlerBase(), - mMeshID(id), - mRequestedBytes(size), - mOffset(offset) + LLMeshSkinInfoHandler(const LLUUID& id, U32 offset, U32 requested_bytes) + : LLMeshHandlerBase(offset, requested_bytes), + mMeshID(id) {} virtual ~LLMeshSkinInfoHandler(); @@ -621,13 +622,11 @@ protected: void operator=(const LLMeshSkinInfoHandler &); // Not defined public: - virtual void processData(LLCore::BufferArray * body, U8 * data, S32 data_size); + virtual void processData(LLCore::BufferArray * body, S32 body_offset, U8 * data, S32 data_size); virtual void processFailure(LLCore::HttpStatus status); public: LLUUID mMeshID; - U32 mRequestedBytes; - U32 mOffset; }; @@ -635,14 +634,12 @@ public: // // Thread: repo class LLMeshDecompositionHandler : public LLMeshHandlerBase - { +{ public: LOG_CLASS(LLMeshDecompositionHandler); - LLMeshDecompositionHandler(const LLUUID& id, U32 offset, U32 size) - : LLMeshHandlerBase(), - mMeshID(id), - mRequestedBytes(size), - mOffset(offset) + LLMeshDecompositionHandler(const LLUUID& id, U32 offset, U32 requested_bytes) + : LLMeshHandlerBase(offset, requested_bytes), + mMeshID(id) {} virtual ~LLMeshDecompositionHandler(); @@ -651,13 +648,11 @@ protected: void operator=(const LLMeshDecompositionHandler &); // Not defined public: - virtual void processData(LLCore::BufferArray * body, U8 * data, S32 data_size); + virtual void processData(LLCore::BufferArray * body, S32 body_offset, U8 * data, S32 data_size); virtual void processFailure(LLCore::HttpStatus status); public: LLUUID mMeshID; - U32 mRequestedBytes; - U32 mOffset; }; @@ -665,14 +660,12 @@ public: // // Thread: repo class LLMeshPhysicsShapeHandler : public LLMeshHandlerBase - { +{ public: LOG_CLASS(LLMeshPhysicsShapeHandler); - LLMeshPhysicsShapeHandler(const LLUUID& id, U32 offset, U32 size) - : LLMeshHandlerBase(), - mMeshID(id), - mRequestedBytes(size), - mOffset(offset) + LLMeshPhysicsShapeHandler(const LLUUID& id, U32 offset, U32 requested_bytes) + : LLMeshHandlerBase(offset, requested_bytes), + mMeshID(id) {} virtual ~LLMeshPhysicsShapeHandler(); @@ -681,13 +674,11 @@ protected: void operator=(const LLMeshPhysicsShapeHandler &); // Not defined public: - virtual void processData(LLCore::BufferArray * body, U8 * data, S32 data_size); + virtual void processData(LLCore::BufferArray * body, S32 body_offset, U8 * data, S32 data_size); virtual void processFailure(LLCore::HttpStatus status); public: LLUUID mMeshID; - U32 mRequestedBytes; - U32 mOffset; }; @@ -713,8 +704,8 @@ void log_upload_error(LLCore::HttpStatus status, const LLSD& content, LL_WARNS(LOG_MESH) << "error: " << err << LL_ENDL; LL_WARNS(LOG_MESH) << " mesh upload failed, stage '" << stage << "', error '" << err["error"].asString() - << "', message '" << err["message"].asString() - << "', id '" << err["identifier"].asString() + << "', message '" << err["message"].asString() + << "', id '" << err["identifier"].asString() << "'" << LL_ENDL; if (err.has("errors")) { @@ -754,7 +745,9 @@ LLMeshRepoThread::LLMeshRepoThread() mHttpLargePolicyClass(LLCore::HttpRequest::DEFAULT_POLICY_ID), mHttpPriority(0), mGetMeshVersion(2) - { +{ + LLAppCoreHttp & app_core_http(LLAppViewer::instance()->getAppCoreHttp()); + mMutex = new LLMutex(NULL); mHeaderMutex = new LLMutex(NULL); mSignal = new LLCondition(NULL); @@ -767,14 +760,14 @@ LLMeshRepoThread::LLMeshRepoThread() mHttpLargeOptions->setUseRetryAfter(gSavedSettings.getBOOL("MeshUseHttpRetryAfter")); mHttpHeaders = new LLCore::HttpHeaders; mHttpHeaders->append("Accept", "application/vnd.ll.mesh"); - mHttpPolicyClass = LLAppViewer::instance()->getAppCoreHttp().getPolicy(LLAppCoreHttp::AP_MESH2); - mHttpLegacyPolicyClass = LLAppViewer::instance()->getAppCoreHttp().getPolicy(LLAppCoreHttp::AP_MESH1); - mHttpLargePolicyClass = LLAppViewer::instance()->getAppCoreHttp().getPolicy(LLAppCoreHttp::AP_LARGE_MESH); - } + mHttpPolicyClass = app_core_http.getPolicy(LLAppCoreHttp::AP_MESH2); + mHttpLegacyPolicyClass = app_core_http.getPolicy(LLAppCoreHttp::AP_MESH1); + mHttpLargePolicyClass = app_core_http.getPolicy(LLAppCoreHttp::AP_LARGE_MESH); +} LLMeshRepoThread::~LLMeshRepoThread() - { +{ LL_INFOS(LOG_MESH) << "Small GETs issued: " << LLMeshRepository::sHTTPRequestCount << ", Large GETs issued: " << LLMeshRepository::sHTTPLargeRequestCount << ", Max Lock Holdoffs: " << LLMeshRepository::sMaxLockHoldoffs @@ -785,23 +778,23 @@ LLMeshRepoThread::~LLMeshRepoThread() ++iter) { delete *iter; - } + } mHttpRequestSet.clear(); if (mHttpHeaders) - { + { mHttpHeaders->release(); mHttpHeaders = NULL; - } + } if (mHttpOptions) - { + { mHttpOptions->release(); mHttpOptions = NULL; - } + } if (mHttpLargeOptions) -{ + { mHttpLargeOptions->release(); mHttpLargeOptions = NULL; -} + } delete mHttpRequest; mHttpRequest = NULL; delete mMutex; @@ -846,48 +839,49 @@ void LLMeshRepoThread::run() { // Dispatch all HttpHandler notifications mHttpRequest->update(0L); - } + } sRequestWaterLevel = mHttpRequestSet.size(); // Stats data update // NOTE: order of queue processing intentionally favors LOD requests over header requests while (!mLODReqQ.empty() && mHttpRequestSet.size() < sRequestHighWater) - { + { if (! mMutex) - { + { break; } - mMutex->lock(); - LODRequest req = mLODReqQ.front(); - mLODReqQ.pop(); - LLMeshRepository::sLODProcessing--; - mMutex->unlock(); + mMutex->lock(); + LODRequest req = mLODReqQ.front(); + mLODReqQ.pop(); + LLMeshRepository::sLODProcessing--; + mMutex->unlock(); + if (!fetchMeshLOD(req.mMeshParams, req.mLOD)) // failed, resubmit - { - mMutex->lock(); - mLODReqQ.push(req); + { + mMutex->lock(); + mLODReqQ.push(req); ++LLMeshRepository::sLODProcessing; - mMutex->unlock(); - } - } + mMutex->unlock(); + } + } while (!mHeaderReqQ.empty() && mHttpRequestSet.size() < sRequestHighWater) - { + { if (! mMutex) - { + { break; } - mMutex->lock(); - HeaderRequest req = mHeaderReqQ.front(); - mHeaderReqQ.pop(); - mMutex->unlock(); + mMutex->lock(); + HeaderRequest req = mHeaderReqQ.front(); + mHeaderReqQ.pop(); + mMutex->unlock(); if (!fetchMeshHeader(req.mMeshParams))//failed, resubmit - { - mMutex->lock(); - mHeaderReqQ.push(req) ; - mMutex->unlock(); - } - } + { + mMutex->lock(); + mHeaderReqQ.push(req) ; + mMutex->unlock(); + } + } // For the final three request lists, similar goal to above but // slightly different queue structures. Stay off the mutex when @@ -983,7 +977,7 @@ void LLMeshRepoThread::run() } } mMutex->unlock(); - } + } // For dev purposes only. A dynamic change could make this false // and that shouldn't assert. @@ -1131,6 +1125,9 @@ LLCore::HttpHandle LLMeshRepoThread::getByteRange(const std::string & url, int c size_t offset, size_t len, LLCore::HttpHandler * handler) { + // Also used in lltexturefetch.cpp + static LLCachedControl<bool> disable_range_req(gSavedSettings, "HttpRangeRequestsDisable", false); + LLCore::HttpHandle handle(LLCORE_HTTP_HANDLE_INVALID); if (len < LARGE_MESH_FETCH_THRESHOLD) @@ -1140,8 +1137,8 @@ LLCore::HttpHandle LLMeshRepoThread::getByteRange(const std::string & url, int c : mHttpLegacyPolicyClass), mHttpPriority, url, - offset, - len, + (disable_range_req ? size_t(0) : offset), + (disable_range_req ? size_t(0) : len), mHttpOptions, mHttpHeaders, handler); @@ -1155,8 +1152,8 @@ LLCore::HttpHandle LLMeshRepoThread::getByteRange(const std::string & url, int c handle = mHttpRequest->requestGetByteRange(mHttpLargePolicyClass, mHttpPriority, url, - offset, - len, + (disable_range_req ? size_t(0) : offset), + (disable_range_req ? size_t(0) : len), mHttpLargeOptions, mHttpHeaders, handler); @@ -1250,7 +1247,6 @@ bool LLMeshRepoThread::fetchMeshSkinInfo(const LLUUID& mesh_id) << LL_ENDL; delete handler; ret = false; - } else { @@ -1527,7 +1523,7 @@ bool LLMeshRepoThread::fetchMeshHeader(const LLVolumeParams& mesh_params) //within the first 4KB //NOTE -- this will break of headers ever exceed 4KB - LLMeshHeaderHandler * handler = new LLMeshHeaderHandler(mesh_params); + LLMeshHeaderHandler * handler = new LLMeshHeaderHandler(mesh_params, 0, MESH_HEADER_SIZE); LLCore::HttpHandle handle = getByteRange(http_url, cap_version, 0, MESH_HEADER_SIZE, handler); if (LLCORE_HTTP_HANDLE_INVALID == handle) { @@ -1860,7 +1856,7 @@ LLMeshUploadThread::LLMeshUploadThread(LLMeshUploadThread::instance_list& data, bool upload_skin, bool upload_joints, const std::string & upload_url, bool do_upload, LLHandle<LLWholeModelFeeObserver> fee_observer, LLHandle<LLWholeModelUploadObserver> upload_observer) -: LLThread("mesh upload"), + : LLThread("mesh upload"), LLCore::HttpHandler(), mDiscarded(false), mDoUpload(do_upload), @@ -2271,7 +2267,7 @@ void LLMeshUploadThread::doWholeModelUpload() mHttpRequest->update(0); while (! LLApp::isQuitting() && ! finished() && ! isDiscarded()) - { + { ms_sleep(sleep_time); sleep_time = llmin(250U, sleep_time + sleep_time); mHttpRequest->update(0); @@ -2287,7 +2283,7 @@ void LLMeshUploadThread::doWholeModelUpload() } } } - } +} void LLMeshUploadThread::requestWholeModelFee() { @@ -2318,7 +2314,7 @@ void LLMeshUploadThread::requestWholeModelFee() LL_WARNS(LOG_MESH) << "Couldn't issue request for model fee. Reason: " << mHttpStatus.toString() << " (" << mHttpStatus.toTerseString() << ")" << LL_ENDL; - } + } else { U32 sleep_time(10); @@ -2335,7 +2331,7 @@ void LLMeshUploadThread::requestWholeModelFee() LL_DEBUGS(LOG_MESH) << "Mesh fee query operation discarded." << LL_ENDL; } } - } +} // Does completion duty for both fee queries and actual uploads. @@ -2388,12 +2384,12 @@ void LLMeshUploadThread::onCompleted(LLCore::HttpHandle handle, LLCore::HttpResp { LLCore::BufferArrayStream bas(ba); LLSDSerialize::fromXML(body, bas); -} + } } dump_llsd_to_file(body, make_dump_name("whole_model_upload_response_", dump_num)); if (body["state"].asString() == "complete") -{ + { // requested "mesh" asset type isn't actually the type // of the resultant object, fix it up here. mModelData["asset_type"] = "object"; @@ -2446,18 +2442,18 @@ void LLMeshUploadThread::onCompleted(LLCore::HttpHandle handle, LLCore::HttpResp body = llsd_from_file("fake_upload_error.xml"); } else - { + { LLCore::BufferArray * ba(response->getBody()); if (ba && ba->size()) - { + { LLCore::BufferArrayStream bas(ba); LLSDSerialize::fromXML(body, bas); - } - } + } + } dump_llsd_to_file(body, make_dump_name("whole_model_fee_response_", dump_num)); if (body["state"].asString() == "upload") - { + { mWholeModelUploadURL = body["uploader"].asString(); if (observer) @@ -2543,18 +2539,18 @@ void LLMeshRepoThread::notifyLoadedMeshes() skin_info_q.swap(mSkinInfoQ); } if (! mDecompositionQ.empty()) - { + { decomp_q.swap(mDecompositionQ); - } + } mMutex->unlock(); // Process the elements free of the lock while (! skin_info_q.empty()) - { + { gMeshRepo.notifySkinInfoReceived(skin_info_q.front()); skin_info_q.pop_front(); - } + } while (! decomp_q.empty()) { @@ -2648,6 +2644,17 @@ void LLMeshRepository::cacheOutgoingMesh(LLMeshUploadData& data, LLSD& header) } +// Handle failed or successful requests for mesh assets. +// +// Support for 200 responses was added for several reasons. One, +// a service or cache can ignore range headers and give us a +// 200 with full asset should it elect to. We also support +// a debug flag which disables range requests for those very +// few users that have some sort of problem with their networking +// services. But the 200 response handling is suboptimal: rather +// than cache the whole asset, we just extract the part that would +// have been sent in a 206 and process that. Inefficient but these +// are cases far off the norm. void LLMeshHandlerBase::onCompleted(LLCore::HttpHandle handle, LLCore::HttpResponse * response) { mProcessed = true; @@ -2676,35 +2683,78 @@ void LLMeshHandlerBase::onCompleted(LLCore::HttpHandle handle, LLCore::HttpRespo // rather than partial) and 416 (request completely unsatisfyable). // Always been exposed to these but are less likely here where // speculative loads aren't done. - static const LLCore::HttpStatus par_status(HTTP_PARTIAL_CONTENT); + LLCore::BufferArray * body(response->getBody()); + S32 body_offset(0); + U8 * data(NULL); + S32 data_size(body ? body->size() : 0); - if (par_status != status) + if (data_size > 0) { - LL_WARNS_ONCE(LOG_MESH) << "Non-206 successful status received for fetch: " - << status.toTerseString() << LL_ENDL; - } + static const LLCore::HttpStatus par_status(HTTP_PARTIAL_CONTENT); + + unsigned int offset(0), length(0), full_length(0); + + if (par_status == status) + { + // 206 case + response->getRange(&offset, &length, &full_length); + if (! offset && ! length) + { + // This is the case where we receive a 206 status but + // there wasn't a useful Content-Range header in the response. + // This could be because it was badly formatted but is more + // likely due to capabilities services which scrub headers + // from responses. Assume we got what we asked for...` + // length = data_size; + offset = mOffset; + } + } + else + { + // 200 case, typically + offset = 0; + } - LLCore::BufferArray * body(response->getBody()); - S32 data_size(body ? body->size() : 0); - U8 * data(NULL); + // *DEBUG: To test validation below + // offset += 1; - if (data_size > 0) - { + // Validate that what we think we received is consistent with + // what we've asked for. I.e. first byte we wanted lies somewhere + // in the response. + if (offset > mOffset + || (offset + data_size) <= mOffset + || (mOffset - offset) >= data_size) + { + // No overlap with requested range. Fail request with + // suitable error. Shouldn't happen unless server/cache/ISP + // is doing something awful. + LL_WARNS(LOG_MESH) << "Mesh response (bytes [" + << offset << ".." << (offset + length - 1) + << "]) didn't overlap with request's origin (bytes [" + << mOffset << ".." << (mOffset + mRequestedBytes - 1) + << "])." << LL_ENDL; + processFailure(LLCore::HttpStatus(LLCore::HttpStatus::LLCORE, LLCore::HE_INV_CONTENT_RANGE_HDR)); + ++LLMeshRepository::sHTTPErrorCount; + goto common_exit; + } + // *TODO: Try to get rid of data copying and add interfaces // that support BufferArray directly. Introduce a two-phase // handler, optional first that takes a body, fallback second // that requires a temporary allocation and data copy. - data = new U8[data_size]; - body->read(0, (char *) data, data_size); + body_offset = mOffset - offset; + data = new U8[data_size - body_offset]; + body->read(body_offset, (char *) data, data_size - body_offset); LLMeshRepository::sBytesReceived += data_size; } - processData(body, data, data_size); + processData(body, body_offset, data, data_size - body_offset); delete [] data; } // Release handler +common_exit: gMeshRepo.mThread->mHttpRequestSet.erase(this); delete this; // Must be last statement } @@ -2739,9 +2789,10 @@ void LLMeshHeaderHandler::processFailure(LLCore::HttpStatus status) { gMeshRepo.mThread->mUnavailableQ.push(LLMeshRepoThread::LODRequest(mMeshParams, i)); } - } +} -void LLMeshHeaderHandler::processData(LLCore::BufferArray * body, U8 * data, S32 data_size) +void LLMeshHeaderHandler::processData(LLCore::BufferArray * /* body */, S32 /* body_offset */, + U8 * data, S32 data_size) { LLUUID mesh_id = mMeshParams.getSculptID(); bool success = (! MESH_HEADER_PROCESS_FAILED) && gMeshRepo.mThread->headerReceived(mMeshParams, data, data_size); @@ -2756,12 +2807,12 @@ void LLMeshHeaderHandler::processData(LLCore::BufferArray * body, U8 * data, S32 // Can't get the header so none of the LODs will be available LLMutexLock lock(gMeshRepo.mThread->mMutex); for (int i(0); i < 4; ++i) - { + { gMeshRepo.mThread->mUnavailableQ.push(LLMeshRepoThread::LODRequest(mMeshParams, i)); - } } + } else if (data && data_size > 0) - { + { // header was successfully retrieved from sim, cache in vfs LLSD header = gMeshRepo.mThread->mMeshHeader[mesh_id]; @@ -2774,11 +2825,11 @@ void LLMeshHeaderHandler::processData(LLCore::BufferArray * body, U8 * data, S32 S32 lod_bytes = 0; for (U32 i = 0; i < LLModel::LOD_PHYSICS; ++i) - { + { // figure out how many bytes we'll need to reserve in the file const std::string & lod_name = header_lod[i]; lod_bytes = llmax(lod_bytes, header[lod_name]["offset"].asInteger()+header[lod_name]["size"].asInteger()); - } + } // just in case skin info or decomposition is at the end of the file (which it shouldn't be) lod_bytes = llmax(lod_bytes, header["skin"]["offset"].asInteger() + header["skin"]["size"].asInteger()); @@ -2794,7 +2845,7 @@ void LLMeshHeaderHandler::processData(LLCore::BufferArray * body, U8 * data, S32 LLVFile file(gVFS, mesh_id, LLAssetType::AT_MESH, LLVFile::WRITE); if (file.getMaxSize() >= bytes || file.setMaxSize(bytes)) - { + { LLMeshRepository::sCacheBytesWritten += data_size; ++LLMeshRepository::sCacheWrites; @@ -2805,19 +2856,19 @@ void LLMeshHeaderHandler::processData(LLCore::BufferArray * body, U8 * data, S32 memset(block, 0, sizeof(block)); while (bytes-file.tell() > sizeof(block)) - { + { file.write(block, sizeof(block)); - } + } S32 remaining = bytes-file.tell(); if (remaining > 0) - { + { file.write(block, remaining); } } } } - } +} LLMeshLODHandler::~LLMeshLODHandler() { @@ -2843,8 +2894,9 @@ void LLMeshLODHandler::processFailure(LLCore::HttpStatus status) gMeshRepo.mThread->mUnavailableQ.push(LLMeshRepoThread::LODRequest(mMeshParams, mLOD)); } -void LLMeshLODHandler::processData(LLCore::BufferArray * body, U8 * data, S32 data_size) - { +void LLMeshLODHandler::processData(LLCore::BufferArray * /* body */, S32 /* body_offset */, + U8 * data, S32 data_size) +{ if ((! MESH_LOD_PROCESS_FAILED) && gMeshRepo.mThread->lodReceived(mMeshParams, mLOD, data, data_size)) { //good fetch from sim, write to VFS for caching @@ -2860,7 +2912,7 @@ void LLMeshLODHandler::processData(LLCore::BufferArray * body, U8 * data, S32 da LLMeshRepository::sCacheBytesWritten += size; ++LLMeshRepository::sCacheWrites; } - } + } else { LL_WARNS(LOG_MESH) << "Error during mesh LOD processing. ID: " << mMeshParams.getSculptID() @@ -2872,12 +2924,12 @@ void LLMeshLODHandler::processData(LLCore::BufferArray * body, U8 * data, S32 da } LLMeshSkinInfoHandler::~LLMeshSkinInfoHandler() - { - llassert(mProcessed); - } +{ + llassert(mProcessed); +} void LLMeshSkinInfoHandler::processFailure(LLCore::HttpStatus status) - { +{ LL_WARNS(LOG_MESH) << "Error during mesh skin info handling. ID: " << mMeshID << ", Reason: " << status.toString() << " (" << status.toTerseString() << "). Not retrying." @@ -2885,10 +2937,11 @@ void LLMeshSkinInfoHandler::processFailure(LLCore::HttpStatus status) // *TODO: Mark mesh unavailable on error. For now, simply leave // request unfulfilled rather than retry forever. - } +} -void LLMeshSkinInfoHandler::processData(LLCore::BufferArray * body, U8 * data, S32 data_size) - { +void LLMeshSkinInfoHandler::processData(LLCore::BufferArray * /* body */, S32 /* body_offset */, + U8 * data, S32 data_size) +{ if ((! MESH_SKIN_INFO_PROCESS_FAILED) && gMeshRepo.mThread->skinInfoReceived(mMeshID, data, data_size)) { //good fetch from sim, write to VFS for caching @@ -2916,20 +2969,21 @@ void LLMeshSkinInfoHandler::processData(LLCore::BufferArray * body, U8 * data, S LLMeshDecompositionHandler::~LLMeshDecompositionHandler() { - llassert(mProcessed); + llassert(mProcessed); } void LLMeshDecompositionHandler::processFailure(LLCore::HttpStatus status) - { +{ LL_WARNS(LOG_MESH) << "Error during mesh decomposition handling. ID: " << mMeshID << ", Reason: " << status.toString() << " (" << status.toTerseString() << "). Not retrying." << LL_ENDL; // *TODO: Mark mesh unavailable on error. For now, simply leave // request unfulfilled rather than retry forever. - } +} -void LLMeshDecompositionHandler::processData(LLCore::BufferArray * body, U8 * data, S32 data_size) +void LLMeshDecompositionHandler::processData(LLCore::BufferArray * /* body */, S32 /* body_offset */, + U8 * data, S32 data_size) { if ((! MESH_DECOMP_PROCESS_FAILED) && gMeshRepo.mThread->decompositionReceived(mMeshID, data, data_size)) { @@ -2946,34 +3000,35 @@ void LLMeshDecompositionHandler::processData(LLCore::BufferArray * body, U8 * da file.seek(offset); file.write(data, size); } - } - else - { + } + else + { LL_WARNS(LOG_MESH) << "Error during mesh decomposition processing. ID: " << mMeshID << ", Unknown reason. Not retrying." << LL_ENDL; // *TODO: Mark mesh unavailable on error - } } +} LLMeshPhysicsShapeHandler::~LLMeshPhysicsShapeHandler() - { - llassert(mProcessed); - } +{ + llassert(mProcessed); +} void LLMeshPhysicsShapeHandler::processFailure(LLCore::HttpStatus status) - { +{ LL_WARNS(LOG_MESH) << "Error during mesh physics shape handling. ID: " << mMeshID << ", Reason: " << status.toString() << " (" << status.toTerseString() << "). Not retrying." << LL_ENDL; // *TODO: Mark mesh unavailable on error - } +} -void LLMeshPhysicsShapeHandler::processData(LLCore::BufferArray * body, U8 * data, S32 data_size) - { +void LLMeshPhysicsShapeHandler::processData(LLCore::BufferArray * /* body */, S32 /* body_offset */, + U8 * data, S32 data_size) +{ if ((! MESH_PHYS_SHAPE_PROCESS_FAILED) && gMeshRepo.mThread->physicsShapeReceived(mMeshID, data, data_size)) - { + { // good fetch from sim, write to VFS for caching LLVFile file(gVFS, mMeshID, LLAssetType::AT_MESH, LLVFile::WRITE); @@ -2981,13 +3036,13 @@ void LLMeshPhysicsShapeHandler::processData(LLCore::BufferArray * body, U8 * dat S32 size = mRequestedBytes; if (file.getSize() >= offset+size) - { + { LLMeshRepository::sCacheBytesWritten += size; ++LLMeshRepository::sCacheWrites; file.seek(offset); file.write(data, size); - } } + } else { LL_WARNS(LOG_MESH) << "Error during mesh physics shape processing. ID: " << mMeshID @@ -3187,7 +3242,7 @@ void LLMeshRepository::notifyLoadedMeshes() if (1 == mGetMeshVersion) { // Legacy GetMesh operation with high connection concurrency - LLMeshRepoThread::sMaxConcurrentRequests = gSavedSettings.getU32("MeshMaxConcurrentRequests"); + LLMeshRepoThread::sMaxConcurrentRequests = gSavedSettings.getU32("MeshMaxConcurrentRequests"); LLMeshRepoThread::sRequestHighWater = llclamp(2 * S32(LLMeshRepoThread::sMaxConcurrentRequests), REQUEST_HIGH_WATER_MIN, REQUEST_HIGH_WATER_MAX); @@ -3198,9 +3253,15 @@ void LLMeshRepository::notifyLoadedMeshes() else { // GetMesh2 operation with keepalives, etc. With pipelining, - // we'll increase this. + // we'll increase this. See llappcorehttp and llcorehttp for + // discussion on connection strategies. + LLAppCoreHttp & app_core_http(LLAppViewer::instance()->getAppCoreHttp()); + S32 scale(app_core_http.isPipelined(LLAppCoreHttp::AP_MESH2) + ? (2 * LLAppCoreHttp::PIPELINING_DEPTH) + : 5); + LLMeshRepoThread::sMaxConcurrentRequests = gSavedSettings.getU32("Mesh2MaxConcurrentRequests"); - LLMeshRepoThread::sRequestHighWater = llclamp(5 * S32(LLMeshRepoThread::sMaxConcurrentRequests), + LLMeshRepoThread::sRequestHighWater = llclamp(scale * S32(LLMeshRepoThread::sMaxConcurrentRequests), REQUEST2_HIGH_WATER_MIN, REQUEST2_HIGH_WATER_MAX); LLMeshRepoThread::sRequestLowWater = llclamp(LLMeshRepoThread::sRequestHighWater / 2, @@ -3300,18 +3361,18 @@ void LLMeshRepository::notifyLoadedMeshes() // If we can't get the locks, skip and pick this up later. ++hold_offs; sMaxLockHoldoffs = llmax(sMaxLockHoldoffs, hold_offs); - return; - } + return; + } hold_offs = 0; if (gAgent.getRegion()) { // Update capability urls - static std::string region_name("never name a region this"); + static std::string region_name("never name a region this"); - if (gAgent.getRegion()->getName() != region_name && gAgent.getRegion()->capabilitiesReceived()) - { - region_name = gAgent.getRegion()->getName(); + if (gAgent.getRegion()->getName() != region_name && gAgent.getRegion()->capabilitiesReceived()) + { + region_name = gAgent.getRegion()->getName(); const bool use_v1(gSavedSettings.getBOOL("MeshUseGetMesh1")); const std::string mesh1(gAgent.getRegion()->getCapability("GetMesh")); const std::string mesh2(gAgent.getRegion()->getCapability("GetMesh2")); @@ -3322,8 +3383,8 @@ void LLMeshRepository::notifyLoadedMeshes() << ", GetMesh: " << mesh1 << ", using version: " << mGetMeshVersion << LL_ENDL; + } } - } //popup queued error messages from background threads while (!mUploadErrorQ.empty()) @@ -3338,46 +3399,46 @@ void LLMeshRepository::notifyLoadedMeshes() S32 push_count = LLMeshRepoThread::sRequestHighWater - active_count; if (mPendingRequests.size() > push_count) - { + { // More requests than the high-water limit allows so // sort and forward the most important. - //calculate "score" for pending requests + //calculate "score" for pending requests - //create score map - std::map<LLUUID, F32> score_map; + //create score map + std::map<LLUUID, F32> score_map; - for (U32 i = 0; i < 4; ++i) - { - for (mesh_load_map::iterator iter = mLoadingMeshes[i].begin(); iter != mLoadingMeshes[i].end(); ++iter) + for (U32 i = 0; i < 4; ++i) { - F32 max_score = 0.f; - for (std::set<LLUUID>::iterator obj_iter = iter->second.begin(); obj_iter != iter->second.end(); ++obj_iter) + for (mesh_load_map::iterator iter = mLoadingMeshes[i].begin(); iter != mLoadingMeshes[i].end(); ++iter) { - LLViewerObject* object = gObjectList.findObject(*obj_iter); - - if (object) + F32 max_score = 0.f; + for (std::set<LLUUID>::iterator obj_iter = iter->second.begin(); obj_iter != iter->second.end(); ++obj_iter) { - LLDrawable* drawable = object->mDrawable; - if (drawable) + LLViewerObject* object = gObjectList.findObject(*obj_iter); + + if (object) { - F32 cur_score = drawable->getRadius()/llmax(drawable->mDistanceWRTCamera, 1.f); - max_score = llmax(max_score, cur_score); + LLDrawable* drawable = object->mDrawable; + if (drawable) + { + F32 cur_score = drawable->getRadius()/llmax(drawable->mDistanceWRTCamera, 1.f); + max_score = llmax(max_score, cur_score); + } } } - } - score_map[iter->first.getSculptID()] = max_score; + score_map[iter->first.getSculptID()] = max_score; + } } - } - //set "score" for pending requests - for (std::vector<LLMeshRepoThread::LODRequest>::iterator iter = mPendingRequests.begin(); iter != mPendingRequests.end(); ++iter) - { - iter->mScore = score_map[iter->mMeshParams.getSculptID()]; - } + //set "score" for pending requests + for (std::vector<LLMeshRepoThread::LODRequest>::iterator iter = mPendingRequests.begin(); iter != mPendingRequests.end(); ++iter) + { + iter->mScore = score_map[iter->mMeshParams.getSculptID()]; + } - //sort by "score" + //sort by "score" std::partial_sort(mPendingRequests.begin(), mPendingRequests.begin() + push_count, mPendingRequests.end(), LLMeshRepoThread::CompareScoreGreater()); } @@ -3588,7 +3649,6 @@ void LLMeshRepository::fetchPhysicsShape(const LLUUID& mesh_id) } } } - } LLModel::Decomposition* LLMeshRepository::getDecomposition(const LLUUID& mesh_id) diff --git a/indra/newview/lltexturefetch.cpp b/indra/newview/lltexturefetch.cpp index d9a874be49..a64a6ee091 100755 --- a/indra/newview/lltexturefetch.cpp +++ b/indra/newview/lltexturefetch.cpp @@ -241,8 +241,10 @@ LLTrace::EventStatHandle<F64Milliseconds > LLTextureFetch::sCacheReadLatency("te // Tuning/Parameterization Constants -static const S32 HTTP_REQUESTS_IN_QUEUE_HIGH_WATER = 40; // Maximum requests to have active in HTTP -static const S32 HTTP_REQUESTS_IN_QUEUE_LOW_WATER = 20; // Active level at which to refill +static const S32 HTTP_PIPE_REQUESTS_HIGH_WATER = 100; // Maximum requests to have active in HTTP (pipelined) +static const S32 HTTP_PIPE_REQUESTS_LOW_WATER = 50; // Active level at which to refill +static const S32 HTTP_NONPIPE_REQUESTS_HIGH_WATER = 40; +static const S32 HTTP_NONPIPE_REQUESTS_LOW_WATER = 20; // BUG-3323/SH-4375 // *NOTE: This is a heuristic value. Texture fetches have a habit of using a @@ -481,12 +483,12 @@ private: bool acquireHttpSemaphore() { llassert(! mHttpHasResource); - if (mFetcher->mHttpSemaphore <= 0) + if (mFetcher->mHttpSemaphore >= mFetcher->mHttpHighWater) { return false; } mHttpHasResource = true; - mFetcher->mHttpSemaphore--; + mFetcher->mHttpSemaphore++; return true; } @@ -496,7 +498,8 @@ private: { llassert(mHttpHasResource); mHttpHasResource = false; - mFetcher->mHttpSemaphore++; + mFetcher->mHttpSemaphore--; + llassert_always(mFetcher->mHttpSemaphore >= 0); } private: @@ -608,16 +611,16 @@ private: LLCore::HttpHandle mHttpHandle; // Handle of any active request LLCore::BufferArray * mHttpBufferArray; // Refcounted pointer to response data - S32 mHttpPolicyClass; + S32 mHttpPolicyClass; bool mHttpActive; // Active request to http library - U32 mHttpReplySize, // Actual received data size - mHttpReplyOffset; // Actual received data offset + U32 mHttpReplySize, // Actual received data size + mHttpReplyOffset; // Actual received data offset bool mHttpHasResource; // Counts against Fetcher's mHttpSemaphore // State history - U32 mCacheReadCount, - mCacheWriteCount, - mResourceWaitCount; // Requests entering WAIT_HTTP_RESOURCE2 + U32 mCacheReadCount, + mCacheWriteCount, + mResourceWaitCount; // Requests entering WAIT_HTTP_RESOURCE2 }; ////////////////////////////////////////////////////////////////////////////// @@ -1325,7 +1328,7 @@ bool LLTextureFetchWorker::doWork(S32 param) } } - static LLCachedControl<bool> use_http(gSavedSettings,"ImagePipelineUseHTTP", true); + static LLCachedControl<bool> use_http(gSavedSettings, "ImagePipelineUseHTTP", true); // if (mHost != LLHost::invalid) get_url = false; if ( use_http && mCanUseHTTP && mUrl.empty())//get http url. @@ -1473,6 +1476,9 @@ bool LLTextureFetchWorker::doWork(S32 param) if (mState == SEND_HTTP_REQ) { + // Also used in llmeshrepository + static LLCachedControl<bool> disable_range_req(gSavedSettings, "HttpRangeRequestsDisable", false); + if (! mCanUseHTTP) { releaseHttpSemaphore(); @@ -1528,22 +1534,47 @@ bool LLTextureFetchWorker::doWork(S32 param) mRequestedOffset -= 1; mRequestedSize += 1; } - mHttpHandle = LLCORE_HTTP_HANDLE_INVALID; - if (!mUrl.empty()) - { - mRequestedTimer.reset(); - mLoaded = FALSE; - mGetStatus = LLCore::HttpStatus(); - mGetReason.clear(); - LL_DEBUGS(LOG_TXT) << "HTTP GET: " << mID << " Offset: " << mRequestedOffset - << " Bytes: " << mRequestedSize - << " Bandwidth(kbps): " << mFetcher->getTextureBandwidth() << "/" << mFetcher->mMaxBandwidth - << LL_ENDL; - // Will call callbackHttpGet when curl request completes - // Only server bake images use the returned headers currently, for getting retry-after field. - LLCore::HttpOptions *options = (mFTType == FTT_SERVER_BAKE) ? mFetcher->mHttpOptionsWithHeaders: mFetcher->mHttpOptions; + if (mUrl.empty()) + { + // *FIXME: This should not be reachable except it has become + // so after some recent 'work'. Need to track this down + // and illuminate the unenlightened. + LL_WARNS(LOG_TXT) << "HTTP GET request failed for " << mID + << " on empty URL." << LL_ENDL; + resetFormattedData(); + releaseHttpSemaphore(); + return true; // failed + } + + mRequestedTimer.reset(); + mLoaded = FALSE; + mGetStatus = LLCore::HttpStatus(); + mGetReason.clear(); + LL_DEBUGS(LOG_TXT) << "HTTP GET: " << mID << " Offset: " << mRequestedOffset + << " Bytes: " << mRequestedSize + << " Bandwidth(kbps): " << mFetcher->getTextureBandwidth() << "/" << mFetcher->mMaxBandwidth + << LL_ENDL; + + // Will call callbackHttpGet when curl request completes + // Only server bake images use the returned headers currently, for getting retry-after field. + LLCore::HttpOptions *options = (mFTType == FTT_SERVER_BAKE) ? mFetcher->mHttpOptionsWithHeaders: mFetcher->mHttpOptions; + if (disable_range_req) + { + // 'Range:' requests may be disabled in which case all HTTP + // texture fetches result in full fetches. This can be used + // by people with questionable ISPs or networking gear that + // doesn't handle these well. + mHttpHandle = mFetcher->mHttpRequest->requestGet(mHttpPolicyClass, + mWorkPriority, + mUrl, + options, + mFetcher->mHttpHeaders, + this); + } + else + { mHttpHandle = mFetcher->mHttpRequest->requestGetByteRange(mHttpPolicyClass, mWorkPriority, mUrl, @@ -1557,7 +1588,11 @@ bool LLTextureFetchWorker::doWork(S32 param) } if (LLCORE_HTTP_HANDLE_INVALID == mHttpHandle) { - LL_WARNS(LOG_TXT) << "HTTP GET request failed for " << mID << LL_ENDL; + LLCore::HttpStatus status(mFetcher->mHttpRequest->getStatus()); + LL_WARNS(LOG_TXT) << "HTTP GET request failed for " << mID + << ", Status: " << status.toTerseString() + << " Reason: '" << status.toString() << "'" + << LL_ENDL; resetFormattedData(); releaseHttpSemaphore(); return true; // failed @@ -1613,10 +1648,6 @@ bool LLTextureFetchWorker::doWork(S32 param) else if (http_service_unavail == mGetStatus) { LL_INFOS_ONCE(LOG_TXT) << "Texture server busy (503): " << mUrl << LL_ENDL; - LL_INFOS(LOG_TXT) << "503: HTTP GET failed for: " << mUrl - << " Status: " << mGetStatus.toHex() - << " Reason: '" << mGetReason << "'" - << LL_ENDL; } else if (http_not_sat == mGetStatus) { @@ -1774,7 +1805,7 @@ bool LLTextureFetchWorker::doWork(S32 param) if (mState == DECODE_IMAGE) { - static LLCachedControl<bool> textures_decode_disabled(gSavedSettings,"TextureDecodeDisabled", false); + static LLCachedControl<bool> textures_decode_disabled(gSavedSettings, "TextureDecodeDisabled", false); setPriority(LLWorkerThread::PRIORITY_LOW | mWorkPriority); // Set priority first since Responder may change it if (textures_decode_disabled) @@ -2485,7 +2516,6 @@ LLTextureFetch::LLTextureFetch(LLTextureCache* cache, LLImageDecodeThread* image mHttpHeaders(NULL), mHttpMetricsHeaders(NULL), mHttpPolicyClass(LLCore::HttpRequest::DEFAULT_POLICY_ID), - mHttpSemaphore(HTTP_REQUESTS_IN_QUEUE_HIGH_WATER), mTotalCacheReadCount(0U), mTotalCacheWriteCount(0U), mTotalResourceWaitCount(0U), @@ -2497,6 +2527,22 @@ LLTextureFetch::LLTextureFetch(LLTextureCache* cache, LLImageDecodeThread* image mMaxBandwidth = gSavedSettings.getF32("ThrottleBandwidthKBPS"); mTextureInfo.setUpLogging(gSavedSettings.getBOOL("LogTextureDownloadsToViewerLog"), gSavedSettings.getBOOL("LogTextureDownloadsToSimulator"), U32Bytes(gSavedSettings.getU32("TextureLoggingThreshold"))); + LLAppCoreHttp & app_core_http(LLAppViewer::instance()->getAppCoreHttp()); + mHttpPolicyClass = app_core_http.getPolicy(LLAppCoreHttp::AP_TEXTURE); + mHttpRequest = new LLCore::HttpRequest; + mHttpOptions = new LLCore::HttpOptions; + mHttpOptionsWithHeaders = new LLCore::HttpOptions; + mHttpOptionsWithHeaders->setWantHeaders(true); + mHttpHeaders = new LLCore::HttpHeaders; + mHttpHeaders->append("Accept", "image/x-j2c"); + mHttpMetricsHeaders = new LLCore::HttpHeaders; + mHttpMetricsHeaders->append("Content-Type", "application/llsd+xml"); + mHttpHighWater = HTTP_NONPIPE_REQUESTS_HIGH_WATER; + mHttpLowWater = HTTP_NONPIPE_REQUESTS_LOW_WATER; + mHttpSemaphore = 0; + + // Conditionally construct debugger object after 'this' is + // fully initialized. LLTextureFetchDebugger::sDebuggerEnabled = gSavedSettings.getBOOL("TextureFetchDebuggerEnabled"); if(LLTextureFetchDebugger::isEnabled()) { @@ -2509,16 +2555,6 @@ LLTextureFetch::LLTextureFetch(LLTextureCache* cache, LLImageDecodeThread* image } mOriginFetchSource = mFetchSource; } - - mHttpRequest = new LLCore::HttpRequest; - mHttpOptions = new LLCore::HttpOptions; - mHttpOptionsWithHeaders = new LLCore::HttpOptions; - mHttpOptionsWithHeaders->setWantHeaders(true); - mHttpHeaders = new LLCore::HttpHeaders; - mHttpHeaders->append("Accept", "image/x-j2c"); - mHttpMetricsHeaders = new LLCore::HttpHeaders; - mHttpMetricsHeaders->append("Content-Type", "application/llsd+xml"); - mHttpPolicyClass = LLAppViewer::instance()->getAppCoreHttp().getPolicy(LLAppCoreHttp::AP_TEXTURE); } LLTextureFetch::~LLTextureFetch() @@ -2990,6 +3026,20 @@ bool LLTextureFetch::runCondition() // Threads: Ttf void LLTextureFetch::commonUpdate() { + // Update low/high water levels based on pipelining. We pick + // up setting eventually, so the semaphore/request level can + // fall outside the [0..HIGH_WATER] range. Expect that. + if (LLAppViewer::instance()->getAppCoreHttp().isPipelined(LLAppCoreHttp::AP_TEXTURE)) + { + mHttpHighWater = HTTP_PIPE_REQUESTS_HIGH_WATER; + mHttpLowWater = HTTP_PIPE_REQUESTS_LOW_WATER; + } + else + { + mHttpHighWater = HTTP_NONPIPE_REQUESTS_HIGH_WATER; + mHttpLowWater = HTTP_NONPIPE_REQUESTS_LOW_WATER; + } + // Release waiters releaseHttpWaiters(); @@ -3651,8 +3701,16 @@ void LLTextureFetch::releaseHttpWaiters() { // Use mHttpSemaphore rather than mHTTPTextureQueue.size() // to avoid a lock. - if (mHttpSemaphore < (HTTP_REQUESTS_IN_QUEUE_HIGH_WATER - HTTP_REQUESTS_IN_QUEUE_LOW_WATER)) + if (mHttpSemaphore >= mHttpLowWater) return; + S32 needed(mHttpHighWater - mHttpSemaphore); + if (needed <= 0) + { + // Would only happen if High/LowWater were changed behind + // our back. In that case, defer fill until usage falls within + // limits. + return; + } // Quickly make a copy of all the LLUIDs. Get off the // mutex as early as possible. @@ -3701,10 +3759,10 @@ void LLTextureFetch::releaseHttpWaiters() tids.clear(); // Sort into priority order, if necessary and only as much as needed - if (tids2.size() > mHttpSemaphore) + if (tids2.size() > needed) { LLTextureFetchWorker::Compare compare; - std::partial_sort(tids2.begin(), tids2.begin() + mHttpSemaphore, tids2.end(), compare); + std::partial_sort(tids2.begin(), tids2.begin() + needed, tids2.end(), compare); } // Release workers up to the high water mark. Since we aren't @@ -4544,7 +4602,7 @@ S32 LLTextureFetchDebugger::fillCurlQueue() mNbCurlCompleted = mFetchingHistory.size(); return 0; } - if (mNbCurlRequests > HTTP_REQUESTS_IN_QUEUE_LOW_WATER) + if (mNbCurlRequests > HTTP_NONPIPE_REQUESTS_LOW_WATER) { return mNbCurlRequests; } @@ -4577,7 +4635,7 @@ S32 LLTextureFetchDebugger::fillCurlQueue() mFetchingHistory[i].mHttpHandle = handle; mFetchingHistory[i].mCurlState = FetchEntry::CURL_IN_PROGRESS; mNbCurlRequests++; - if (mNbCurlRequests >= HTTP_REQUESTS_IN_QUEUE_HIGH_WATER) // emulate normal pipeline + if (mNbCurlRequests >= HTTP_NONPIPE_REQUESTS_HIGH_WATER) // emulate normal pipeline { break; } diff --git a/indra/newview/lltexturefetch.h b/indra/newview/lltexturefetch.h index c4da2e8685..89d18e2c67 100755 --- a/indra/newview/lltexturefetch.h +++ b/indra/newview/lltexturefetch.h @@ -356,7 +356,9 @@ private: LLCore::HttpHeaders * mHttpHeaders; // Ttf LLCore::HttpHeaders * mHttpMetricsHeaders; // Ttf LLCore::HttpRequest::policy_t mHttpPolicyClass; // T* - + S32 mHttpHighWater; // Ttf + S32 mHttpLowWater; // Ttf + // We use a resource semaphore to keep HTTP requests in // WAIT_HTTP_RESOURCE2 if there aren't sufficient slots in the // transport. This keeps them near where they can be cheaply @@ -364,7 +366,11 @@ private: // where it's more expensive to get at them. Requests in either // SEND_HTTP_REQ or WAIT_HTTP_REQ charge against the semaphore // and tracking state transitions is critical to liveness. - LLAtomicS32 mHttpSemaphore; // Ttf + Tmain + // + // Originally implemented as a traditional semaphore (heading towards + // zero), it now is an outstanding request count that is allowed to + // exceed the high water level (but not go below zero). + LLAtomicS32 mHttpSemaphore; // Ttf typedef std::set<LLUUID> wait_http_res_queue_t; wait_http_res_queue_t mHttpWaitResource; // Mfnq |