summaryrefslogtreecommitdiff
path: root/indra/llmessage
diff options
context:
space:
mode:
Diffstat (limited to 'indra/llmessage')
-rw-r--r--indra/llmessage/llavatarnamecache.cpp20
-rw-r--r--indra/llmessage/llcurl.cpp218
-rw-r--r--indra/llmessage/llcurl.h65
3 files changed, 292 insertions, 11 deletions
diff --git a/indra/llmessage/llavatarnamecache.cpp b/indra/llmessage/llavatarnamecache.cpp
index ff31f7665e..32d9d8bfc3 100644
--- a/indra/llmessage/llavatarnamecache.cpp
+++ b/indra/llmessage/llavatarnamecache.cpp
@@ -87,6 +87,9 @@ namespace LLAvatarNameCache
/// Time when unrefreshed cached names were checked last
static F64 sLastExpireCheck;
+ /// Time-to-live for a temp cache entry.
+ const F64 TEMP_CACHE_ENTRY_LIFETIME = 60.0;
+
//-----------------------------------------------------------------------
// Internal methods
//-----------------------------------------------------------------------
@@ -274,7 +277,7 @@ void LLAvatarNameCache::handleAgentError(const LLUUID& agent_id)
{
// there is no existing cache entry, so make a temporary name from legacy
LL_WARNS("AvNameCache") << "LLAvatarNameCache get legacy for agent "
- << agent_id << LL_ENDL;
+ << agent_id << LL_ENDL;
gCacheName->get(agent_id, false, // legacy compatibility
boost::bind(&LLAvatarNameCache::legacyNameCallback,
_1, _2, _3));
@@ -287,13 +290,14 @@ void LLAvatarNameCache::handleAgentError(const LLUUID& agent_id)
// Clear this agent from the pending list
LLAvatarNameCache::sPendingQueue.erase(agent_id);
- const LLAvatarName& av_name = existing->second;
+ LLAvatarName& av_name = existing->second;
LL_DEBUGS("AvNameCache") << "LLAvatarNameCache use cache for agent "
<< agent_id
<< "user '" << av_name.mUsername << "' "
<< "display '" << av_name.mDisplayName << "' "
<< "expires in " << av_name.mExpires - LLFrameTimer::getTotalSeconds() << " seconds"
<< LL_ENDL;
+ av_name.mExpires = LLFrameTimer::getTotalSeconds() + TEMP_CACHE_ENTRY_LIFETIME; // reset expiry time so we don't constantly rerequest.
}
}
@@ -402,10 +406,12 @@ void LLAvatarNameCache::legacyNameCallback(const LLUUID& agent_id,
<< LL_ENDL;
buildLegacyName(full_name, &av_name);
- // Don't add to cache, the data already exists in the legacy name system
- // cache and we don't want or need duplicate storage, because keeping the
- // two copies in sync is complex.
- processName(agent_id, av_name, false);
+ // Add to cache, because if we don't we'll keep rerequesting the
+ // same record forever. buildLegacyName should always guarantee
+ // that these records expire reasonably soon
+ // (in TEMP_CACHE_ENTRY_LIFETIME seconds), so if the failure was due
+ // to something temporary we will eventually request and get the right data.
+ processName(agent_id, av_name, true);
}
void LLAvatarNameCache::requestNamesViaLegacy()
@@ -583,7 +589,7 @@ void LLAvatarNameCache::buildLegacyName(const std::string& full_name,
av_name->mDisplayName = full_name;
av_name->mIsDisplayNameDefault = true;
av_name->mIsTemporaryName = true;
- av_name->mExpires = F64_MAX; // not used because these are not cached
+ av_name->mExpires = LLFrameTimer::getTotalSeconds() + TEMP_CACHE_ENTRY_LIFETIME;
LL_DEBUGS("AvNameCache") << "LLAvatarNameCache::buildLegacyName "
<< full_name
<< LL_ENDL;
diff --git a/indra/llmessage/llcurl.cpp b/indra/llmessage/llcurl.cpp
index 5ea9b58300..b4ac984d57 100644
--- a/indra/llmessage/llcurl.cpp
+++ b/indra/llmessage/llcurl.cpp
@@ -935,8 +935,8 @@ bool LLCurlThread::CurlRequest::processRequest()
if(!completed)
{
- setPriority(LLQueuedThread::PRIORITY_LOW) ;
- }
+ setPriority(LLQueuedThread::PRIORITY_LOW) ;
+ }
}
return completed ;
@@ -946,7 +946,7 @@ void LLCurlThread::CurlRequest::finishRequest(bool completed)
{
if(mMulti->isDead())
{
- mCurlThread->deleteMulti(mMulti) ;
+ mCurlThread->deleteMulti(mMulti) ;
}
else
{
@@ -990,6 +990,7 @@ void LLCurlThread::killMulti(LLCurl::Multi* multi)
return ;
}
+
multi->markDead() ;
}
@@ -1095,7 +1096,9 @@ void LLCurlRequest::get(const std::string& url, LLCurl::ResponderPtr responder)
{
getByteRange(url, headers_t(), 0, -1, responder);
}
-
+
+// Note: (length==0) is interpreted as "the rest of the file", i.e. the whole file if (offset==0) or
+// the remainder of the file if not.
bool LLCurlRequest::getByteRange(const std::string& url,
const headers_t& headers,
S32 offset, S32 length,
@@ -1113,6 +1116,11 @@ bool LLCurlRequest::getByteRange(const std::string& url,
std::string range = llformat("Range: bytes=%d-%d", offset,offset+length-1);
easy->slist_append(range.c_str());
}
+ else if (offset > 0)
+ {
+ std::string range = llformat("Range: bytes=%d-", offset);
+ easy->slist_append(range.c_str());
+ }
easy->setHeaders();
bool res = addEasy(easy);
return res;
@@ -1238,6 +1246,208 @@ S32 LLCurlRequest::getQueued()
return queued;
}
+LLCurlTextureRequest::LLCurlTextureRequest(S32 concurrency) :
+ LLCurlRequest(),
+ mConcurrency(concurrency),
+ mInQueue(0),
+ mMutex(NULL),
+ mHandleCounter(1),
+ mTotalIssuedRequests(0),
+ mTotalReceivedBits(0)
+{
+ mGlobalTimer.reset();
+}
+
+LLCurlTextureRequest::~LLCurlTextureRequest()
+{
+ mRequestMap.clear();
+
+ for(req_queue_t::iterator iter = mCachedRequests.begin(); iter != mCachedRequests.end(); ++iter)
+ {
+ delete *iter;
+ }
+ mCachedRequests.clear();
+}
+
+//return 0: success
+// > 0: cached handle
+U32 LLCurlTextureRequest::getByteRange(const std::string& url,
+ const headers_t& headers,
+ S32 offset, S32 length, U32 pri,
+ LLCurl::ResponderPtr responder, F32 delay_time)
+{
+ U32 ret_val = 0;
+ bool success = false;
+
+ if(mInQueue < mConcurrency && delay_time < 0.f)
+ {
+ success = LLCurlRequest::getByteRange(url, headers, offset, length, responder);
+ }
+
+ LLMutexLock lock(&mMutex);
+
+ if(success)
+ {
+ mInQueue++;
+ mTotalIssuedRequests++;
+ }
+ else
+ {
+ request_t* request = new request_t(mHandleCounter, url, headers, offset, length, pri, responder);
+ if(delay_time > 0.f)
+ {
+ request->mStartTime = mGlobalTimer.getElapsedTimeF32() + delay_time;
+ }
+
+ mCachedRequests.insert(request);
+ mRequestMap[mHandleCounter] = request;
+ ret_val = mHandleCounter;
+ mHandleCounter++;
+
+ if(!mHandleCounter)
+ {
+ mHandleCounter = 1;
+ }
+ }
+
+ return ret_val;
+}
+
+void LLCurlTextureRequest::completeRequest(S32 received_bytes)
+{
+ LLMutexLock lock(&mMutex);
+
+ llassert_always(mInQueue > 0);
+
+ mInQueue--;
+ mTotalReceivedBits += received_bytes * 8;
+}
+
+void LLCurlTextureRequest::nextRequests()
+{
+ if(mCachedRequests.empty() || mInQueue >= mConcurrency)
+ {
+ return;
+ }
+
+ F32 cur_time = mGlobalTimer.getElapsedTimeF32();
+
+ req_queue_t::iterator iter;
+ {
+ LLMutexLock lock(&mMutex);
+ iter = mCachedRequests.begin();
+ }
+ while(1)
+ {
+ request_t* request = *iter;
+ if(request->mStartTime < cur_time)
+ {
+ if(!LLCurlRequest::getByteRange(request->mUrl, request->mHeaders, request->mOffset, request->mLength, request->mResponder))
+ {
+ break;
+ }
+
+ LLMutexLock lock(&mMutex);
+ ++iter;
+ mInQueue++;
+ mTotalIssuedRequests++;
+ mCachedRequests.erase(request);
+ mRequestMap.erase(request->mHandle);
+ delete request;
+
+ if(iter == mCachedRequests.end() || mInQueue >= mConcurrency)
+ {
+ break;
+ }
+ }
+ else
+ {
+ LLMutexLock lock(&mMutex);
+ ++iter;
+ if(iter == mCachedRequests.end() || mInQueue >= mConcurrency)
+ {
+ break;
+ }
+ }
+ }
+
+ return;
+}
+
+void LLCurlTextureRequest::updatePriority(U32 handle, U32 pri)
+{
+ if(!handle)
+ {
+ return;
+ }
+
+ LLMutexLock lock(&mMutex);
+
+ std::map<S32, request_t*>::iterator iter = mRequestMap.find(handle);
+ if(iter != mRequestMap.end())
+ {
+ request_t* req = iter->second;
+
+ if(req->mPriority != pri)
+ {
+ mCachedRequests.erase(req);
+ req->mPriority = pri;
+ mCachedRequests.insert(req);
+ }
+ }
+}
+
+void LLCurlTextureRequest::removeRequest(U32 handle)
+{
+ if(!handle)
+ {
+ return;
+ }
+
+ LLMutexLock lock(&mMutex);
+
+ std::map<S32, request_t*>::iterator iter = mRequestMap.find(handle);
+ if(iter != mRequestMap.end())
+ {
+ request_t* req = iter->second;
+ mRequestMap.erase(iter);
+ mCachedRequests.erase(req);
+ delete req;
+ }
+}
+
+bool LLCurlTextureRequest::isWaiting(U32 handle)
+{
+ if(!handle)
+ {
+ return false;
+ }
+
+ LLMutexLock lock(&mMutex);
+ return mRequestMap.find(handle) != mRequestMap.end();
+}
+
+U32 LLCurlTextureRequest::getTotalReceivedBits()
+{
+ LLMutexLock lock(&mMutex);
+
+ U32 bits = mTotalReceivedBits;
+ mTotalReceivedBits = 0;
+ return bits;
+}
+
+U32 LLCurlTextureRequest::getTotalIssuedRequests()
+{
+ LLMutexLock lock(&mMutex);
+ return mTotalIssuedRequests;
+}
+
+S32 LLCurlTextureRequest::getNumRequests()
+{
+ LLMutexLock lock(&mMutex);
+ return mInQueue;
+}
+
////////////////////////////////////////////////////////////////////////////
// For generating one easy request
// associated with a single multi request
diff --git a/indra/llmessage/llcurl.h b/indra/llmessage/llcurl.h
index d6a7714d4c..20ebd86c06 100644
--- a/indra/llmessage/llcurl.h
+++ b/indra/llmessage/llcurl.h
@@ -414,6 +414,71 @@ private:
BOOL mProcessing;
};
+//for texture fetch only
+class LLCurlTextureRequest : public LLCurlRequest
+{
+public:
+ LLCurlTextureRequest(S32 concurrency);
+ ~LLCurlTextureRequest();
+
+ U32 getByteRange(const std::string& url, const headers_t& headers, S32 offset, S32 length, U32 pri, LLCurl::ResponderPtr responder, F32 delay_time = -1.f);
+ void nextRequests();
+ void completeRequest(S32 received_bytes);
+
+ void updatePriority(U32 handle, U32 pri);
+ void removeRequest(U32 handle);
+
+ U32 getTotalReceivedBits();
+ U32 getTotalIssuedRequests();
+ S32 getNumRequests();
+ bool isWaiting(U32 handle);
+
+private:
+ LLMutex mMutex;
+ S32 mConcurrency;
+ S32 mInQueue; //request currently in queue.
+ U32 mHandleCounter;
+ U32 mTotalIssuedRequests;
+ U32 mTotalReceivedBits;
+
+ typedef struct _request_t
+ {
+ _request_t(U32 handle, const std::string& url, const headers_t& headers, S32 offset, S32 length, U32 pri, LLCurl::ResponderPtr responder) :
+ mHandle(handle), mUrl(url), mHeaders(headers), mOffset(offset), mLength(length), mPriority(pri), mResponder(responder), mStartTime(0.f)
+ {}
+
+ U32 mHandle;
+ std::string mUrl;
+ LLCurlRequest::headers_t mHeaders;
+ S32 mOffset;
+ S32 mLength;
+ LLCurl::ResponderPtr mResponder;
+ U32 mPriority;
+ F32 mStartTime; //start time to issue this request
+ } request_t;
+
+ struct request_compare
+ {
+ bool operator()(const request_t* lhs, const request_t* rhs) const
+ {
+ if(lhs->mPriority != rhs->mPriority)
+ {
+ return lhs->mPriority > rhs->mPriority; // higher priority in front of queue (set)
+ }
+ else
+ {
+ return (U32)lhs < (U32)rhs;
+ }
+ }
+ };
+
+ typedef std::set<request_t*, request_compare> req_queue_t;
+ req_queue_t mCachedRequests;
+ std::map<S32, request_t*> mRequestMap;
+
+ LLFrameTimer mGlobalTimer;
+};
+
class LLCurlEasyRequest
{
public: