summaryrefslogtreecommitdiff
path: root/indra/newview/llmeshrepository.cpp
diff options
context:
space:
mode:
authorAndrey Kleshchev <andreykproductengine@lindenlab.com>2018-05-03 18:44:10 +0000
committerAndrey Kleshchev <andreykproductengine@lindenlab.com>2018-05-03 18:44:10 +0000
commit7b5c0dd39a0928601a0e0ae51d4a75ac7f254ab3 (patch)
treed25700841aef0145fca34ae19b1395642cf94ec3 /indra/newview/llmeshrepository.cpp
parent43335283bd2aaa9934f152cf95840f722a9d841e (diff)
MAINT-8593 Viewer should not repeat loads indefinetely
Diffstat (limited to 'indra/newview/llmeshrepository.cpp')
-rw-r--r--indra/newview/llmeshrepository.cpp378
1 files changed, 243 insertions, 135 deletions
diff --git a/indra/newview/llmeshrepository.cpp b/indra/newview/llmeshrepository.cpp
index bb64201fef..4011328104 100644
--- a/indra/newview/llmeshrepository.cpp
+++ b/indra/newview/llmeshrepository.cpp
@@ -353,6 +353,9 @@ const U32 LARGE_MESH_FETCH_THRESHOLD = 1U << 21; // Size at which requests goes
const long SMALL_MESH_XFER_TIMEOUT = 120L; // Seconds to complete xfer, small mesh downloads
const long LARGE_MESH_XFER_TIMEOUT = 600L; // Seconds to complete xfer, large downloads
+const U32 DOWNLOAD_RETRY_LIMIT = 8;
+const F32 DOWNLOAD_RETRY_DELAY = 0.5f; // seconds
+
// Would normally like to retry on uploads as some
// retryable failures would be recoverable. Unfortunately,
// the mesh service is using 500 (retryable) rather than
@@ -516,6 +519,24 @@ void get_vertex_buffer_from_mesh(LLCDMeshData& mesh, LLModel::PhysicsMesh& res,
}
}
+void RequestStats::updateTime()
+{
+ U32 modifier = 1 << mRetries; // before ++
+ mRetries++;
+ mTimer.reset();
+ mTimer.setTimerExpirySec(DOWNLOAD_RETRY_DELAY * (F32)modifier); // up to 32s, 64 total wait
+}
+
+bool RequestStats::canRetry() const
+{
+ return mRetries < DOWNLOAD_RETRY_LIMIT;
+}
+
+bool RequestStats::isDelayed() const
+{
+ return mTimer.getStarted() && !mTimer.hasExpired();
+}
+
LLViewerFetchedTexture* LLMeshUploadThread::FindViewerTexture(const LLImportMaterial& material)
{
LLPointer< LLViewerFetchedTexture > * ppTex = static_cast< LLPointer< LLViewerFetchedTexture > * >(material.mOpaqueData);
@@ -890,142 +911,225 @@ void LLMeshRepoThread::run()
sRequestWaterLevel = mHttpRequestSet.size(); // Stats data update
// NOTE: order of queue processing intentionally favors LOD requests over header requests
+ // Todo: we are processing mLODReqQ, mHeaderReqQ, mSkinRequests, mDecompositionRequests and mPhysicsShapeRequests
+ // in relatively similar manners, remake code to simplify/unify the process,
+ // like processRequests(&requestQ, fetchFunction); which does same thing for each element
- while (!mLODReqQ.empty() && mHttpRequestSet.size() < sRequestHighWater)
- {
- if (! mMutex)
- {
- break;
- }
- mMutex->lock();
- LODRequest req = mLODReqQ.front();
- mLODReqQ.pop();
- LLMeshRepository::sLODProcessing--;
- mMutex->unlock();
-
- // Todo: this and other cases shouldn't retry indefinitely, at the very least do as with mDecompositionRequests
- if (!fetchMeshLOD(req.mMeshParams, req.mLOD)) // failed, resubmit
- {
- mMutex->lock();
- mLODReqQ.push(req);
- ++LLMeshRepository::sLODProcessing;
- mMutex->unlock();
- }
- }
-
- while (!mHeaderReqQ.empty() && mHttpRequestSet.size() < sRequestHighWater)
- {
- if (! mMutex)
- {
- break;
- }
- mMutex->lock();
- HeaderRequest req = mHeaderReqQ.front();
- mHeaderReqQ.pop();
- mMutex->unlock();
- if (!fetchMeshHeader(req.mMeshParams))//failed, resubmit
- {
- mMutex->lock();
- mHeaderReqQ.push(req) ;
- mMutex->unlock();
- }
- }
-
- // For the final three request lists, similar goal to above but
- // slightly different queue structures. Stay off the mutex when
- // performing long-duration actions.
-
- if (mHttpRequestSet.size() < sRequestHighWater
- && (! mSkinRequests.empty()
- || ! mDecompositionRequests.empty()
- || ! mPhysicsShapeRequests.empty()))
- {
- // Something to do probably, lock and double-check. We don't want
- // to hold the lock long here. That will stall main thread activities
- // so we bounce it.
-
- mMutex->lock();
- if (! mSkinRequests.empty() && mHttpRequestSet.size() < sRequestHighWater)
- {
- std::set<LLUUID> incomplete;
- std::set<LLUUID>::iterator iter(mSkinRequests.begin());
- while (iter != mSkinRequests.end() && mHttpRequestSet.size() < sRequestHighWater)
- {
- LLUUID mesh_id = *iter;
- mSkinRequests.erase(iter);
- mMutex->unlock();
-
- if (! fetchMeshSkinInfo(mesh_id))
- {
- incomplete.insert(mesh_id);
- }
+ if (!mLODReqQ.empty() && mHttpRequestSet.size() < sRequestHighWater)
+ {
+ std::list<LODRequest> incomplete;
+ while (!mLODReqQ.empty() && mHttpRequestSet.size() < sRequestHighWater)
+ {
+ if (!mMutex)
+ {
+ break;
+ }
+
+ mMutex->lock();
+ LODRequest req = mLODReqQ.front();
+ mLODReqQ.pop();
+ LLMeshRepository::sLODProcessing--;
+ mMutex->unlock();
+ if (req.isDelayed())
+ {
+ // failed to load before, wait a bit
+ incomplete.push_front(req);
+ }
+ else if (!fetchMeshLOD(req.mMeshParams, req.mLOD, req.canRetry()))
+ {
+ if (req.canRetry())
+ {
+ // failed, resubmit
+ req.updateTime();
+ incomplete.push_front(req);
+ }
+ else
+ {
+ // too many fails
+ mUnavailableQ.push(req);
+ LL_WARNS() << "Failed to load " << req.mMeshParams << " , skip" << LL_ENDL;
+ }
+ }
+ }
- mMutex->lock();
- iter = mSkinRequests.begin();
- }
+ if (!incomplete.empty())
+ {
+ LLMutexLock locker(mMutex);
+ for (std::list<LODRequest>::iterator iter = incomplete.begin(); iter != incomplete.end(); iter++)
+ {
+ mLODReqQ.push(*iter);
+ ++LLMeshRepository::sLODProcessing;
+ }
+ }
+ }
- if (! incomplete.empty())
- {
- mSkinRequests.insert(incomplete.begin(), incomplete.end());
- }
- }
+ if (!mHeaderReqQ.empty() && mHttpRequestSet.size() < sRequestHighWater)
+ {
+ std::list<HeaderRequest> incomplete;
+ while (!mHeaderReqQ.empty() && mHttpRequestSet.size() < sRequestHighWater)
+ {
+ if (!mMutex)
+ {
+ break;
+ }
+
+ mMutex->lock();
+ HeaderRequest req = mHeaderReqQ.front();
+ mHeaderReqQ.pop();
+ mMutex->unlock();
+ if (req.isDelayed())
+ {
+ // failed to load before, wait a bit
+ incomplete.push_front(req);
+ }
+ else if (!fetchMeshHeader(req.mMeshParams, req.getRetries()))
+ {
+ if (req.canRetry())
+ {
+ //failed, resubmit
+ req.updateTime();
+ incomplete.push_front(req);
+ }
+ else
+ {
+ LL_DEBUGS() << "mHeaderReqQ failed: " << req.mMeshParams << LL_ENDL;
+ }
+ }
+ }
- // holding lock, try next list
- // *TODO: For UI/debug-oriented lists, we might drop the fine-
- // grained locking as there's a lowered expectation of smoothness
- // in these cases.
- if (! mDecompositionRequests.empty() && mHttpRequestSet.size() < sRequestHighWater)
- {
- std::set<LLUUID> incomplete;
- std::set<LLUUID>::iterator iter(mDecompositionRequests.begin());
- while (iter != mDecompositionRequests.end() && mHttpRequestSet.size() < sRequestHighWater)
- {
- LLUUID mesh_id = *iter;
- mDecompositionRequests.erase(iter);
- mMutex->unlock();
-
- if (! fetchMeshDecomposition(mesh_id))
- {
- incomplete.insert(mesh_id);
- }
+ if (!incomplete.empty())
+ {
+ LLMutexLock locker(mMutex);
+ for (std::list<HeaderRequest>::iterator iter = incomplete.begin(); iter != incomplete.end(); iter++)
+ {
+ mHeaderReqQ.push(*iter);
+ }
+ }
+ }
- mMutex->lock();
- iter = mDecompositionRequests.begin();
- }
+ // For the final three request lists, similar goal to above but
+ // slightly different queue structures. Stay off the mutex when
+ // performing long-duration actions.
- if (! incomplete.empty())
- {
- mDecompositionRequests.insert(incomplete.begin(), incomplete.end());
- }
- }
+ if (mHttpRequestSet.size() < sRequestHighWater
+ && (!mSkinRequests.empty()
+ || !mDecompositionRequests.empty()
+ || !mPhysicsShapeRequests.empty()))
+ {
+ // Something to do probably, lock and double-check. We don't want
+ // to hold the lock long here. That will stall main thread activities
+ // so we bounce it.
- // holding lock, final list
- if (! mPhysicsShapeRequests.empty() && mHttpRequestSet.size() < sRequestHighWater)
- {
- std::set<LLUUID> incomplete;
- std::set<LLUUID>::iterator iter(mPhysicsShapeRequests.begin());
- while (iter != mPhysicsShapeRequests.end() && mHttpRequestSet.size() < sRequestHighWater)
- {
- LLUUID mesh_id = *iter;
- mPhysicsShapeRequests.erase(iter);
- mMutex->unlock();
-
- if (! fetchMeshPhysicsShape(mesh_id))
- {
- incomplete.insert(mesh_id);
- }
+ if (!mSkinRequests.empty())
+ {
+ std::set<UUIDBasedRequest> incomplete;
+ while (!mSkinRequests.empty() && mHttpRequestSet.size() < sRequestHighWater)
+ {
+ mMutex->lock();
+ std::set<UUIDBasedRequest>::iterator iter = mSkinRequests.begin();
+ UUIDBasedRequest req = *iter;
+ mSkinRequests.erase(iter);
+ mMutex->unlock();
+ if (req.isDelayed())
+ {
+ incomplete.insert(req);
+ }
+ else if (!fetchMeshSkinInfo(req.mId))
+ {
+ if (req.canRetry())
+ {
+ req.updateTime();
+ incomplete.insert(req);
+ }
+ else
+ {
+ LL_DEBUGS() << "mSkinRequests failed: " << req.mId << LL_ENDL;
+ }
+ }
+ }
+
+ if (!incomplete.empty())
+ {
+ LLMutexLock locker(mMutex);
+ mSkinRequests.insert(incomplete.begin(), incomplete.end());
+ }
+ }
- mMutex->lock();
- iter = mPhysicsShapeRequests.begin();
- }
+ // holding lock, try next list
+ // *TODO: For UI/debug-oriented lists, we might drop the fine-
+ // grained locking as there's a lowered expectation of smoothness
+ // in these cases.
+ if (!mDecompositionRequests.empty())
+ {
+ std::set<UUIDBasedRequest> incomplete;
+ while (!mDecompositionRequests.empty() && mHttpRequestSet.size() < sRequestHighWater)
+ {
+ mMutex->lock();
+ std::set<UUIDBasedRequest>::iterator iter = mDecompositionRequests.begin();
+ UUIDBasedRequest req = *iter;
+ mDecompositionRequests.erase(iter);
+ mMutex->unlock();
+ if (req.isDelayed())
+ {
+ incomplete.insert(req.mId);
+ }
+ else if (!fetchMeshDecomposition(req.mId))
+ {
+ if (req.canRetry())
+ {
+ req.updateTime();
+ incomplete.insert(req.mId);
+ }
+ else
+ {
+ LL_DEBUGS() << "mDecompositionRequests failed: " << req.mId << LL_ENDL;
+ }
+ }
+ }
+
+ if (!incomplete.empty())
+ {
+ LLMutexLock locker(mMutex);
+ mDecompositionRequests.insert(incomplete.begin(), incomplete.end());
+ }
+ }
- if (! incomplete.empty())
- {
- mPhysicsShapeRequests.insert(incomplete.begin(), incomplete.end());
- }
- }
- mMutex->unlock();
- }
+ // holding lock, final list
+ if (!mPhysicsShapeRequests.empty())
+ {
+ std::set<LLUUID> incomplete;
+ while (!mPhysicsShapeRequests.empty() && mHttpRequestSet.size() < sRequestHighWater)
+ {
+ mMutex->lock();
+ std::set<UUIDBasedRequest>::iterator iter = mPhysicsShapeRequests.begin();
+ UUIDBasedRequest req = *iter;
+ mPhysicsShapeRequests.erase(iter);
+ mMutex->unlock();
+ if (req.isDelayed())
+ {
+ incomplete.insert(req.mId);
+ }
+ else if (!fetchMeshPhysicsShape(req.mId))
+ {
+ if (req.canRetry())
+ {
+ req.updateTime();
+ incomplete.insert(req.mId);
+ }
+ else
+ {
+ LL_DEBUGS() << "mPhysicsShapeRequests failed: " << req.mId << LL_ENDL;
+ }
+ }
+ }
+
+ if (!incomplete.empty())
+ {
+ LLMutexLock locker(mMutex);
+ mPhysicsShapeRequests.insert(incomplete.begin(), incomplete.end());
+ }
+ }
+ }
// For dev purposes only. A dynamic change could make this false
// and that shouldn't assert.
@@ -1047,19 +1151,19 @@ void LLMeshRepoThread::run()
// Mutex: LLMeshRepoThread::mMutex must be held on entry
void LLMeshRepoThread::loadMeshSkinInfo(const LLUUID& mesh_id)
{
- mSkinRequests.insert(mesh_id);
+ mSkinRequests.insert(UUIDBasedRequest(mesh_id));
}
// Mutex: LLMeshRepoThread::mMutex must be held on entry
void LLMeshRepoThread::loadMeshDecomposition(const LLUUID& mesh_id)
{
- mDecompositionRequests.insert(mesh_id);
+ mDecompositionRequests.insert(UUIDBasedRequest(mesh_id));
}
// Mutex: LLMeshRepoThread::mMutex must be held on entry
void LLMeshRepoThread::loadMeshPhysicsShape(const LLUUID& mesh_id)
{
- mPhysicsShapeRequests.insert(mesh_id);
+ mPhysicsShapeRequests.insert(UUIDBasedRequest(mesh_id));
}
void LLMeshRepoThread::lockAndLoadMeshLOD(const LLVolumeParams& mesh_params, S32 lod)
@@ -1525,7 +1629,7 @@ void LLMeshRepoThread::decActiveHeaderRequests()
}
//return false if failed to get header
-bool LLMeshRepoThread::fetchMeshHeader(const LLVolumeParams& mesh_params)
+bool LLMeshRepoThread::fetchMeshHeader(const LLVolumeParams& mesh_params, bool can_retry)
{
++LLMeshRepository::sMeshRequestCount;
@@ -1572,7 +1676,7 @@ bool LLMeshRepoThread::fetchMeshHeader(const LLVolumeParams& mesh_params)
<< LL_ENDL;
retval = false;
}
- else
+ else if (can_retry)
{
handler->mHttpHandle = handle;
mHttpRequestSet.insert(handler);
@@ -1583,7 +1687,7 @@ bool LLMeshRepoThread::fetchMeshHeader(const LLVolumeParams& mesh_params)
}
//return false if failed to get mesh lod.
-bool LLMeshRepoThread::fetchMeshLOD(const LLVolumeParams& mesh_params, S32 lod)
+bool LLMeshRepoThread::fetchMeshLOD(const LLVolumeParams& mesh_params, S32 lod, bool can_retry)
{
if (!mHeaderMutex)
{
@@ -1616,7 +1720,7 @@ bool LLMeshRepoThread::fetchMeshLOD(const LLVolumeParams& mesh_params, S32 lod)
U8* buffer = new(std::nothrow) U8[size];
if (!buffer)
{
- LL_WARNS_ONCE(LOG_MESH) << "Can't allocate memory for mesh LOD, size: " << size << LL_ENDL;
+ LL_WARNS_ONCE(LOG_MESH) << "Can't allocate memory for mesh " << mesh_id << " LOD " << lod << ", size: " << size << LL_ENDL;
// todo: for now it will result in indefinite constant retries, should result in timeout
// or in retry-count and disabling mesh. (but usually viewer is beyond saving at this point)
return false;
@@ -1661,12 +1765,16 @@ bool LLMeshRepoThread::fetchMeshLOD(const LLVolumeParams& mesh_params, S32 lod)
<< LL_ENDL;
retval = false;
}
- else
+ else if (can_retry)
{
handler->mHttpHandle = handle;
mHttpRequestSet.insert(handler);
// *NOTE: Allowing a re-request, not marking as unavailable. Is that correct?
}
+ else
+ {
+ mUnavailableQ.push(LODRequest(mesh_params, lod));
+ }
}
else
{