diff options
Diffstat (limited to 'indra/newview')
-rwxr-xr-x | indra/newview/app_settings/settings.xml | 4 | ||||
-rwxr-xr-x | indra/newview/llappcorehttp.cpp | 174 | ||||
-rwxr-xr-x | indra/newview/llappcorehttp.h | 1 | ||||
-rwxr-xr-x | indra/newview/lltexturefetch.cpp | 55 | ||||
-rwxr-xr-x | indra/newview/lltexturefetch.h | 10 |
5 files changed, 145 insertions, 99 deletions
diff --git a/indra/newview/app_settings/settings.xml b/indra/newview/app_settings/settings.xml index 50f56cf6ff..0607579a08 100755 --- a/indra/newview/app_settings/settings.xml +++ b/indra/newview/app_settings/settings.xml @@ -4459,7 +4459,7 @@ <key>HttpPipelining</key> <map> <key>Comment</key> - <string>If true, viewer will pipeline HTTP requests to servers. Static.</string> + <string>If true, viewer will pipeline HTTP requests to servers.</string> <key>Persist</key> <integer>1</integer> <key>Type</key> @@ -4470,7 +4470,7 @@ <key>HttpRangeRequestsDisable</key> <map> <key>Comment</key> - <string>If true, viewer will not issued range GET requests for meshes and textures.</string> + <string>If true, viewer will not issued range GET requests for meshes and textures. May resolve problems with certain ISPs and networking gear.</string> <key>Persist</key> <integer>1</integer> <key>Type</key> diff --git a/indra/newview/llappcorehttp.cpp b/indra/newview/llappcorehttp.cpp index d097f18d61..464e60948a 100755 --- a/indra/newview/llappcorehttp.cpp +++ b/indra/newview/llappcorehttp.cpp @@ -60,7 +60,7 @@ static const struct "other" }, { // AP_TEXTURE - 4, 1, 12, 0, true, + 8, 1, 12, 0, true, "TextureFetchConcurrency", "texture fetch" }, @@ -70,7 +70,7 @@ static const struct "mesh fetch" }, { // AP_MESH2 - 4, 1, 32, 100, true, + 8, 1, 32, 100, true, "Mesh2MaxConcurrentRequests", "mesh2 fetch" }, @@ -126,14 +126,6 @@ void LLAppCoreHttp::init() << LL_ENDL; } - // Global pipelining preference from settings - static const std::string http_pipelining("HttpPipelining"); - if (gSavedSettings.controlExists(http_pipelining)) - { - // Default to true if absent. - mPipelined = gSavedSettings.getBOOL(http_pipelining); - } - // Point to our certs or SSH/https: will fail on connect status = LLCore::HttpRequest::setStaticPolicyOption(LLCore::HttpRequest::PO_CA_FILE, LLCore::HttpRequest::GLOBAL_POLICY_ID, @@ -210,12 +202,27 @@ void LLAppCoreHttp::init() << LL_ENDL; } - // *NOTE: Pipelining isn't dynamic yet. When it is, add a global - // signal for the setting here. - + // Signal for global pipelining preference from settings + static const std::string http_pipelining("HttpPipelining"); + if (gSavedSettings.controlExists(http_pipelining)) + { + LLPointer<LLControlVariable> cntrl_ptr = gSavedSettings.getControl(http_pipelining); + if (cntrl_ptr.isNull()) + { + LL_WARNS("Init") << "Unable to set signal on global setting '" << http_pipelining + << "'" << LL_ENDL; + } + else + { + mPipelinedSignal = cntrl_ptr->getCommitSignal()->connect(boost::bind(&setting_changed)); + } + } + // Register signals for settings and state changes for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i) { + const EAppPolicy app_policy(static_cast<EAppPolicy>(i)); + if (! init_data[i].mKey.empty() && gSavedSettings.controlExists(init_data[i].mKey)) { LLPointer<LLControlVariable> cntrl_ptr = gSavedSettings.getControl(init_data[i].mKey); @@ -226,7 +233,7 @@ void LLAppCoreHttp::init() } else { - mHttpClasses[i].mSettingsSignal = cntrl_ptr->getCommitSignal()->connect(boost::bind(&setting_changed)); + mHttpClasses[app_policy].mSettingsSignal = cntrl_ptr->getCommitSignal()->connect(boost::bind(&setting_changed)); } } } @@ -282,6 +289,7 @@ void LLAppCoreHttp::cleanup() { mHttpClasses[i].mSettingsSignal.disconnect(); } + mPipelinedSignal.disconnect(); delete mRequest; mRequest = NULL; @@ -299,6 +307,20 @@ void LLAppCoreHttp::cleanup() void LLAppCoreHttp::refreshSettings(bool initial) { LLCore::HttpStatus status; + + // Global pipelining setting + bool pipeline_changed(false); + static const std::string http_pipelining("HttpPipelining"); + if (gSavedSettings.controlExists(http_pipelining)) + { + // Default to true (in ctor) if absent. + bool pipelined(gSavedSettings.getBOOL(http_pipelining)); + if (pipelined != mPipelined) + { + mPipelined = pipelined; + pipeline_changed = true; + } + } for (int i(0); i < LL_ARRAY_SIZE(init_data); ++i) { @@ -323,33 +345,42 @@ void LLAppCoreHttp::refreshSettings(bool initial) } } - mHttpClasses[app_policy].mPipelined = false; - if (mPipelined && init_data[i].mPipelined) + } + + // Init- or run-time settings. Must use the queued request API. + + // Pipelining changes + if (initial || pipeline_changed) + { + const bool to_pipeline(mPipelined && init_data[i].mPipelined); + if (to_pipeline != mHttpClasses[app_policy].mPipelined) { - // Pipelining election is currently static (init-time). - // Making it dynamic isn't too hard in the SL code but verifying - // that libcurl handles the on-to-off transition while holding - // outstanding requests is something that should be tested. + // Pipeline election changing, set dynamic option via request + + LLCore::HttpHandle handle; + const long new_depth(to_pipeline ? PIPELINING_DEPTH : 0); - status = LLCore::HttpRequest::setStaticPolicyOption(LLCore::HttpRequest::PO_PIPELINING_DEPTH, - mHttpClasses[app_policy].mPolicy, - PIPELINING_DEPTH, - NULL); - if (! status) + handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_PIPELINING_DEPTH, + mHttpClasses[app_policy].mPolicy, + new_depth, + NULL); + if (LLCORE_HTTP_HANDLE_INVALID == handle) { + status = mRequest->getStatus(); LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage - << " to pipelined mode. Reason: " << status.toString() + << " pipelining. Reason: " << status.toString() << LL_ENDL; } else { - mHttpClasses[app_policy].mPipelined = true; + LL_DEBUGS("Init") << "Changed " << init_data[i].mUsage + << " pipelining. New value: " << new_depth + << LL_ENDL; + mHttpClasses[app_policy].mPipelined = to_pipeline; } } } - - // Init- or run-time settings - + // Get target connection concurrency value U32 setting(init_data[i].mDefault); if (! init_data[i].mKey.empty() && gSavedSettings.controlExists(init_data[i].mKey)) @@ -362,63 +393,60 @@ void LLAppCoreHttp::refreshSettings(bool initial) } } - if (! initial && setting == mHttpClasses[app_policy].mConnLimit) + if (initial || setting != mHttpClasses[app_policy].mConnLimit || pipeline_changed) { - // Unchanged, try next setting - continue; - } - - // Set it and report. Strategies depend on pipelining: - // - // No Pipelining. Llcorehttp manages connections itself based - // on the PO_CONNECTION_LIMIT setting. Set both limits to the - // same value for logical consistency. In the future, may - // hand over connection management to libcurl after the - // connection cache has been better vetted. - // - // Pipelining. Libcurl is allowed to manage connections to a - // great degree. Steady state will connection limit based on - // the per-host setting. Transitions (region crossings, new - // avatars, etc.) can request additional outbound connections - // to other servers via 2X total connection limit. - // - LLCore::HttpHandle handle; - handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_CONNECTION_LIMIT, - mHttpClasses[app_policy].mPolicy, - (mHttpClasses[app_policy].mPipelined ? 2 * setting : setting), - NULL); - if (LLCORE_HTTP_HANDLE_INVALID == handle) - { - status = mRequest->getStatus(); - LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage - << " concurrency. Reason: " << status.toString() - << LL_ENDL; - } - else - { - handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_PER_HOST_CONNECTION_LIMIT, + // Set it and report. Strategies depend on pipelining: + // + // No Pipelining. Llcorehttp manages connections itself based + // on the PO_CONNECTION_LIMIT setting. Set both limits to the + // same value for logical consistency. In the future, may + // hand over connection management to libcurl after the + // connection cache has been better vetted. + // + // Pipelining. Libcurl is allowed to manage connections to a + // great degree. Steady state will connection limit based on + // the per-host setting. Transitions (region crossings, new + // avatars, etc.) can request additional outbound connections + // to other servers via 2X total connection limit. + // + LLCore::HttpHandle handle; + handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_CONNECTION_LIMIT, mHttpClasses[app_policy].mPolicy, - setting, + (mHttpClasses[app_policy].mPipelined ? 2 * setting : setting), NULL); if (LLCORE_HTTP_HANDLE_INVALID == handle) { status = mRequest->getStatus(); LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage - << " per-host concurrency. Reason: " << status.toString() + << " concurrency. Reason: " << status.toString() << LL_ENDL; } else { - LL_DEBUGS("Init") << "Changed " << init_data[i].mUsage - << " concurrency. New value: " << setting - << LL_ENDL; - mHttpClasses[app_policy].mConnLimit = setting; - if (initial && setting != init_data[i].mDefault) + handle = mRequest->setPolicyOption(LLCore::HttpRequest::PO_PER_HOST_CONNECTION_LIMIT, + mHttpClasses[app_policy].mPolicy, + setting, + NULL); + if (LLCORE_HTTP_HANDLE_INVALID == handle) { - LL_INFOS("Init") << "Application settings overriding default " << init_data[i].mUsage - << " concurrency. New value: " << setting + status = mRequest->getStatus(); + LL_WARNS("Init") << "Unable to set " << init_data[i].mUsage + << " per-host concurrency. Reason: " << status.toString() << LL_ENDL; } + else + { + LL_DEBUGS("Init") << "Changed " << init_data[i].mUsage + << " concurrency. New value: " << setting + << LL_ENDL; + mHttpClasses[app_policy].mConnLimit = setting; + if (initial && setting != init_data[i].mDefault) + { + LL_INFOS("Init") << "Application settings overriding default " << init_data[i].mUsage + << " concurrency. New value: " << setting + << LL_ENDL; + } + } } } } diff --git a/indra/newview/llappcorehttp.h b/indra/newview/llappcorehttp.h index 63c8a11180..9ad4eb4b30 100755 --- a/indra/newview/llappcorehttp.h +++ b/indra/newview/llappcorehttp.h @@ -218,6 +218,7 @@ private: bool mStopped; HttpClass mHttpClasses[AP_COUNT]; bool mPipelined; // Global setting + boost::signals2::connection mPipelinedSignal; // Signal for 'HttpPipelining' setting }; diff --git a/indra/newview/lltexturefetch.cpp b/indra/newview/lltexturefetch.cpp index 4008a6948d..097a7b374f 100755 --- a/indra/newview/lltexturefetch.cpp +++ b/indra/newview/lltexturefetch.cpp @@ -483,12 +483,12 @@ private: bool acquireHttpSemaphore() { llassert(! mHttpHasResource); - if (mFetcher->mHttpSemaphore <= 0) + if (mFetcher->mHttpSemaphore >= mFetcher->mHttpHighWater) { return false; } mHttpHasResource = true; - mFetcher->mHttpSemaphore--; + mFetcher->mHttpSemaphore++; return true; } @@ -498,7 +498,8 @@ private: { llassert(mHttpHasResource); mHttpHasResource = false; - mFetcher->mHttpSemaphore++; + mFetcher->mHttpSemaphore--; + llassert_always(mFetcher->mHttpSemaphore >= 0); } private: @@ -2523,6 +2524,8 @@ LLTextureFetch::LLTextureFetch(LLTextureCache* cache, LLImageDecodeThread* image mMaxBandwidth = gSavedSettings.getF32("ThrottleBandwidthKBPS"); mTextureInfo.setUpLogging(gSavedSettings.getBOOL("LogTextureDownloadsToViewerLog"), gSavedSettings.getBOOL("LogTextureDownloadsToSimulator"), U32Bytes(gSavedSettings.getU32("TextureLoggingThreshold"))); + LLAppCoreHttp & app_core_http(LLAppViewer::instance()->getAppCoreHttp()); + mHttpPolicyClass = app_core_http.getPolicy(LLAppCoreHttp::AP_TEXTURE); mHttpRequest = new LLCore::HttpRequest; mHttpOptions = new LLCore::HttpOptions; mHttpOptionsWithHeaders = new LLCore::HttpOptions; @@ -2531,21 +2534,9 @@ LLTextureFetch::LLTextureFetch(LLTextureCache* cache, LLImageDecodeThread* image mHttpHeaders->append("Accept", "image/x-j2c"); mHttpMetricsHeaders = new LLCore::HttpHeaders; mHttpMetricsHeaders->append("Content-Type", "application/llsd+xml"); - LLAppCoreHttp & app_core_http(LLAppViewer::instance()->getAppCoreHttp()); - mHttpPolicyClass = app_core_http.getPolicy(LLAppCoreHttp::AP_TEXTURE); - if (app_core_http.isPipelined(LLAppCoreHttp::AP_TEXTURE)) - { - // Init-time election that will have to change for - // support of dynamic changes to the pipelining enable flag. - mHttpHighWater = HTTP_PIPE_REQUESTS_HIGH_WATER; - mHttpLowWater = HTTP_PIPE_REQUESTS_LOW_WATER; - } - else - { - mHttpHighWater = HTTP_NONPIPE_REQUESTS_HIGH_WATER; - mHttpLowWater = HTTP_NONPIPE_REQUESTS_LOW_WATER; - } - mHttpSemaphore = mHttpHighWater; + mHttpHighWater = HTTP_NONPIPE_REQUESTS_HIGH_WATER; + mHttpLowWater = HTTP_NONPIPE_REQUESTS_LOW_WATER; + mHttpSemaphore = 0; // Conditionally construct debugger object after 'this' is // fully initialized. @@ -3032,6 +3023,20 @@ bool LLTextureFetch::runCondition() // Threads: Ttf void LLTextureFetch::commonUpdate() { + // Update low/high water levels based on pipelining. We pick + // up setting eventually, so the semaphore/request level can + // fall outside the [0..HIGH_WATER] range. Expect that. + if (LLAppViewer::instance()->getAppCoreHttp().isPipelined(LLAppCoreHttp::AP_TEXTURE)) + { + mHttpHighWater = HTTP_PIPE_REQUESTS_HIGH_WATER; + mHttpLowWater = HTTP_PIPE_REQUESTS_LOW_WATER; + } + else + { + mHttpHighWater = HTTP_NONPIPE_REQUESTS_HIGH_WATER; + mHttpLowWater = HTTP_NONPIPE_REQUESTS_LOW_WATER; + } + // Release waiters releaseHttpWaiters(); @@ -3693,8 +3698,16 @@ void LLTextureFetch::releaseHttpWaiters() { // Use mHttpSemaphore rather than mHTTPTextureQueue.size() // to avoid a lock. - if (mHttpSemaphore < (mHttpHighWater - mHttpLowWater)) + if (mHttpSemaphore >= mHttpLowWater) return; + S32 needed(mHttpHighWater - mHttpSemaphore); + if (needed <= 0) + { + // Would only happen if High/LowWater were changed behind + // our back. In that case, defer fill until usage falls within + // limits. + return; + } // Quickly make a copy of all the LLUIDs. Get off the // mutex as early as possible. @@ -3743,10 +3756,10 @@ void LLTextureFetch::releaseHttpWaiters() tids.clear(); // Sort into priority order, if necessary and only as much as needed - if (tids2.size() > mHttpSemaphore) + if (tids2.size() > needed) { LLTextureFetchWorker::Compare compare; - std::partial_sort(tids2.begin(), tids2.begin() + mHttpSemaphore, tids2.end(), compare); + std::partial_sort(tids2.begin(), tids2.begin() + needed, tids2.end(), compare); } // Release workers up to the high water mark. Since we aren't diff --git a/indra/newview/lltexturefetch.h b/indra/newview/lltexturefetch.h index d13736997f..89d18e2c67 100755 --- a/indra/newview/lltexturefetch.h +++ b/indra/newview/lltexturefetch.h @@ -356,8 +356,8 @@ private: LLCore::HttpHeaders * mHttpHeaders; // Ttf LLCore::HttpHeaders * mHttpMetricsHeaders; // Ttf LLCore::HttpRequest::policy_t mHttpPolicyClass; // T* - S32 mHttpHighWater; // T* (ro) - S32 mHttpLowWater; // T* (ro) + S32 mHttpHighWater; // Ttf + S32 mHttpLowWater; // Ttf // We use a resource semaphore to keep HTTP requests in // WAIT_HTTP_RESOURCE2 if there aren't sufficient slots in the @@ -366,7 +366,11 @@ private: // where it's more expensive to get at them. Requests in either // SEND_HTTP_REQ or WAIT_HTTP_REQ charge against the semaphore // and tracking state transitions is critical to liveness. - LLAtomicS32 mHttpSemaphore; // Ttf + Tmain + // + // Originally implemented as a traditional semaphore (heading towards + // zero), it now is an outstanding request count that is allowed to + // exceed the high water level (but not go below zero). + LLAtomicS32 mHttpSemaphore; // Ttf typedef std::set<LLUUID> wait_http_res_queue_t; wait_http_res_queue_t mHttpWaitResource; // Mfnq |