summaryrefslogtreecommitdiff
path: root/indra/newview/lltexturecache.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'indra/newview/lltexturecache.cpp')
-rw-r--r--indra/newview/lltexturecache.cpp236
1 files changed, 117 insertions, 119 deletions
diff --git a/indra/newview/lltexturecache.cpp b/indra/newview/lltexturecache.cpp
index 651070a2ea..9ad2322765 100644
--- a/indra/newview/lltexturecache.cpp
+++ b/indra/newview/lltexturecache.cpp
@@ -401,7 +401,8 @@ bool LLTextureCacheRemoteWorker::doRead()
// Second state / stage : identify the cache or not...
if (!done && (mState == CACHE))
{
- idx = mCache->getHeaderCacheEntry(mID, mImageSize);
+ LLTextureCache::Entry entry ;
+ idx = mCache->getHeaderCacheEntry(mID, entry);
if (idx < 0)
{
// The texture is *not* cached. We're done here...
@@ -410,6 +411,7 @@ bool LLTextureCacheRemoteWorker::doRead()
}
else
{
+ mImageSize = entry.mImageSize ;
// If the read offset is bigger than the header cache, we read directly from the body
// Note that currently, we *never* read with offset from the cache, so the result is *always* HEADER
mState = mOffset < TEXTURE_CACHE_ENTRY_SIZE ? HEADER : BODY;
@@ -531,13 +533,14 @@ bool LLTextureCacheRemoteWorker::doRead()
bool LLTextureCacheRemoteWorker::doWrite()
{
bool done = false;
- S32 idx = -1;
+ S32 idx = -1;
// First state / stage : check that what we're trying to cache is in an OK shape
if (mState == INIT)
{
llassert_always(mOffset == 0); // We currently do not support write offsets
llassert_always(mDataSize > 0); // Things will go badly wrong if mDataSize is nul or negative...
+ llassert_always(mImageSize >= mDataSize);
mState = CACHE;
}
@@ -547,14 +550,19 @@ bool LLTextureCacheRemoteWorker::doWrite()
if (!done && (mState == CACHE))
{
bool alreadyCached = false;
- S32 cur_imagesize = 0;
+ LLTextureCache::Entry entry ;
+
// Checks if this image is already in the entry list
- idx = mCache->getHeaderCacheEntry(mID, cur_imagesize);
- if (idx >= 0 && (cur_imagesize > 0))
+ idx = mCache->getHeaderCacheEntry(mID, entry);
+ if(idx < 0)
+ {
+ idx = mCache->setHeaderCacheEntry(mID, entry, mImageSize, mDataSize); // create the new entry.
+ }
+ else
{
- alreadyCached = true; // already there and non empty
+ alreadyCached = mCache->updateEntry(idx, entry, mImageSize, mDataSize); // update the existing entry.
}
- idx = mCache->setHeaderCacheEntry(mID, mImageSize); // create or touch the entry
+
if (idx < 0)
{
llwarns << "LLTextureCacheWorker: " << mID
@@ -564,10 +572,6 @@ bool LLTextureCacheRemoteWorker::doWrite()
}
else
{
- if (cur_imagesize > 0 && (mImageSize != cur_imagesize))
- {
- alreadyCached = false; // re-write the header if the size changed in all cases
- }
if (alreadyCached && (mDataSize <= TEXTURE_CACHE_ENTRY_SIZE))
{
// Small texture already cached case: we're done with writing
@@ -630,7 +634,7 @@ bool LLTextureCacheRemoteWorker::doWrite()
{
llassert(mDataSize > TEXTURE_CACHE_ENTRY_SIZE); // wouldn't make sense to be here otherwise...
S32 file_size = mDataSize - TEXTURE_CACHE_ENTRY_SIZE;
- if ((file_size > 0) && mCache->updateTextureEntryList(mID, file_size))
+
{
// build the cache file name from the UUID
std::string filename = mCache->getTextureFileName(mID);
@@ -648,10 +652,7 @@ bool LLTextureCacheRemoteWorker::doWrite()
done = true;
}
}
- else
- {
- mDataSize = 0; // no data written
- }
+
// Nothing else to do at that point...
done = true;
}
@@ -742,7 +743,7 @@ LLTextureCache::LLTextureCache(bool threaded)
mHeaderMutex(NULL),
mListMutex(NULL),
mHeaderAPRFile(NULL),
- mReadOnly(FALSE),
+ mReadOnly(TRUE), //do not allow to change the texture cache until setReadOnly() is called.
mTexturesSizeTotal(0),
mDoPurge(FALSE)
{
@@ -825,53 +826,6 @@ std::string LLTextureCache::getTextureFileName(const LLUUID& id)
return filename;
}
-bool LLTextureCache::updateTextureEntryList(const LLUUID& id, S32 bodysize)
-{
- bool res = false;
- bool purge = false;
- {
- LLMutexLock lock(&mHeaderMutex);
- size_map_t::iterator iter1 = mTexturesSizeMap.find(id);
- if (iter1 == mTexturesSizeMap.end() || iter1->second < bodysize)
- {
- llassert_always(bodysize > 0);
-
- S32 oldbodysize = 0;
- if (iter1 != mTexturesSizeMap.end())
- {
- oldbodysize = iter1->second;
- }
-
- Entry entry;
- S32 idx = openAndReadEntry(id, entry, false);
- if (idx < 0)
- {
- llwarns << "Failed to open entry: " << id << llendl;
- removeCachedTexture(id) ;
- return false;
- }
- else if (oldbodysize != entry.mBodySize)
- {
- // only happens to 64 bits systems, do not know why.
- llwarns << "Entry mismatch in mTextureSizeMap / mHeaderIDMap"
- << " idx=" << idx << " oldsize=" << oldbodysize << " entrysize=" << entry.mBodySize << llendl;
- }
- updateEntry(idx, entry, entry.mImageSize, bodysize);
-
- if (mTexturesSizeTotal > sCacheMaxTexturesSize)
- {
- purge = true;
- }
- res = true;
- }
- }
- if (purge)
- {
- mDoPurge = TRUE;
- }
- return res;
-}
-
//debug
BOOL LLTextureCache::isInCache(const LLUUID& id)
{
@@ -929,13 +883,16 @@ U32 LLTextureCache::sCacheMaxEntries = MAX_REASONABLE_FILE_SIZE / TEXTURE_CACHE_
S64 LLTextureCache::sCacheMaxTexturesSize = 0; // no limit
const char* entries_filename = "texture.entries";
const char* cache_filename = "texture.cache";
-const char* textures_dirname = "textures";
+const char* old_textures_dirname = "textures";
+//change the location of the texture cache to prevent from being deleted by old version viewers.
+const char* textures_dirname = "texturecache";
void LLTextureCache::setDirNames(ELLPath location)
{
std::string delem = gDirUtilp->getDirDelimiter();
- mHeaderEntriesFileName = gDirUtilp->getExpandedFilename(location, entries_filename);
- mHeaderDataFileName = gDirUtilp->getExpandedFilename(location, cache_filename);
+
+ mHeaderEntriesFileName = gDirUtilp->getExpandedFilename(location, textures_dirname, entries_filename);
+ mHeaderDataFileName = gDirUtilp->getExpandedFilename(location, textures_dirname, cache_filename);
mTexturesDirName = gDirUtilp->getExpandedFilename(location, textures_dirname);
}
@@ -947,16 +904,38 @@ void LLTextureCache::purgeCache(ELLPath location)
{
setDirNames(location);
llassert_always(mHeaderAPRFile == NULL);
- LLAPRFile::remove(mHeaderEntriesFileName, getLocalAPRFilePool());
- LLAPRFile::remove(mHeaderDataFileName, getLocalAPRFilePool());
+
+ //remove the legacy cache if exists
+ std::string texture_dir = mTexturesDirName ;
+ mTexturesDirName = gDirUtilp->getExpandedFilename(location, old_textures_dirname);
+ if(LLFile::isdir(mTexturesDirName))
+ {
+ std::string file_name = gDirUtilp->getExpandedFilename(location, entries_filename);
+ LLAPRFile::remove(file_name, getLocalAPRFilePool());
+
+ file_name = gDirUtilp->getExpandedFilename(location, cache_filename);
+ LLAPRFile::remove(file_name, getLocalAPRFilePool());
+
+ purgeAllTextures(true);
+ }
+ mTexturesDirName = texture_dir ;
}
+
+ //remove the current texture cache.
purgeAllTextures(true);
}
-S64 LLTextureCache::initCache(ELLPath location, S64 max_size, BOOL read_only)
+//is called in the main thread before initCache(...) is called.
+void LLTextureCache::setReadOnly(BOOL read_only)
{
- mReadOnly = read_only;
-
+ mReadOnly = read_only ;
+}
+
+//called in the main thread.
+S64 LLTextureCache::initCache(ELLPath location, S64 max_size, BOOL disable_texture_cache)
+{
+ llassert_always(getPending() == 0) ; //should not start accessing the texture cache before initialized.
+
S64 header_size = (max_size * 2) / 10;
S64 max_entries = header_size / TEXTURE_CACHE_ENTRY_SIZE;
sCacheMaxEntries = (S32)(llmin((S64)sCacheMaxEntries, max_entries));
@@ -968,6 +947,15 @@ S64 LLTextureCache::initCache(ELLPath location, S64 max_size, BOOL read_only)
sCacheMaxTexturesSize = max_size;
max_size -= sCacheMaxTexturesSize;
+ if(disable_texture_cache) //the texture cache is disabled
+ {
+ llinfos << "The texture cache is disabled!" << llendl ;
+ setReadOnly(TRUE) ;
+ purgeAllTextures(true);
+
+ return max_size ;
+ }
+
LL_INFOS("TextureCache") << "Headers: " << sCacheMaxEntries
<< " Textures size: " << sCacheMaxTexturesSize/(1024*1024) << " MB" << LL_ENDL;
@@ -976,6 +964,7 @@ S64 LLTextureCache::initCache(ELLPath location, S64 max_size, BOOL read_only)
if (!mReadOnly)
{
LLFile::mkdir(mTexturesDirName);
+
const char* subdirs = "0123456789abcdef";
for (S32 i=0; i<16; i++)
{
@@ -986,6 +975,8 @@ S64 LLTextureCache::initCache(ELLPath location, S64 max_size, BOOL read_only)
readHeaderCache();
purgeTextures(true); // calc mTexturesSize and make some room in the texture cache if we need it
+ llassert_always(getPending() == 0) ; //should not start accessing the texture cache before initialized.
+
return max_size; // unused cache space
}
@@ -1170,55 +1161,64 @@ void LLTextureCache::updateEntryTimeStamp(S32 idx, Entry& entry)
{
if (!mReadOnly)
{
- llassert_always(entry.mImageSize > entry.mBodySize);
-
entry.mTime = time(NULL);
mUpdatedEntryMap[idx] = entry ;
}
}
}
-//mHeaderMutex is locked before calling this.
//update an existing entry, write to header file immediately.
-void LLTextureCache::updateEntry(S32 idx, Entry& entry, S32 new_image_size, S32 new_body_size)
+bool LLTextureCache::updateEntry(S32 idx, Entry& entry, S32 new_image_size, S32 new_data_size)
{
- llassert_always(new_image_size > -1) ;
-
+ S32 new_body_size = llmax(0, new_data_size - TEXTURE_CACHE_ENTRY_SIZE) ;
+
if(new_image_size == entry.mImageSize && new_body_size == entry.mBodySize)
{
- updateEntryTimeStamp(idx, entry) ; //nothing changed.
+ return true ; //nothing changed.
}
- else if (idx >= 0)
+ else
{
- if (!mReadOnly)
- {
- llassert_always(new_image_size > new_body_size) ;
+ bool purge = false ;
- bool update_header = false ;
- if(entry.mImageSize < 0) //is a brand-new entry
- {
- mHeaderIDMap[entry.mID] = idx;
- mTexturesSizeMap[entry.mID] = new_body_size ;
- mTexturesSizeTotal += new_body_size ;
-
- // Update Header
- update_header = true ;
- }
- else if (entry.mBodySize != new_body_size)
- {
- //already in mHeaderIDMap.
- mTexturesSizeMap[entry.mID] = new_body_size ;
- mTexturesSizeTotal -= entry.mBodySize ;
- mTexturesSizeTotal += new_body_size ;
- }
- entry.mTime = time(NULL);
- entry.mImageSize = new_image_size ;
- entry.mBodySize = new_body_size ;
+ lockHeaders() ;
+
+ bool update_header = false ;
+ if(entry.mImageSize < 0) //is a brand-new entry
+ {
+ mHeaderIDMap[entry.mID] = idx;
+ mTexturesSizeMap[entry.mID] = new_body_size ;
+ mTexturesSizeTotal += new_body_size ;
-// llinfos << "Updating TE: " << idx << ": " << id << " Size: " << entry.mBodySize << " Time: " << entry.mTime << llendl;
- writeEntryToHeaderImmediately(idx, entry, update_header) ;
+ // Update Header
+ update_header = true ;
+ }
+ else if (entry.mBodySize != new_body_size)
+ {
+ //already in mHeaderIDMap.
+ mTexturesSizeMap[entry.mID] = new_body_size ;
+ mTexturesSizeTotal -= entry.mBodySize ;
+ mTexturesSizeTotal += new_body_size ;
+ }
+ entry.mTime = time(NULL);
+ entry.mImageSize = new_image_size ;
+ entry.mBodySize = new_body_size ;
+
+ writeEntryToHeaderImmediately(idx, entry, update_header) ;
+
+ if (mTexturesSizeTotal > sCacheMaxTexturesSize)
+ {
+ purge = true;
+ }
+
+ unlockHeaders() ;
+
+ if (purge)
+ {
+ mDoPurge = TRUE;
}
}
+
+ return false ;
}
U32 LLTextureCache::openAndReadEntries(std::vector<Entry>& entries)
@@ -1462,9 +1462,9 @@ void LLTextureCache::purgeAllTextures(bool purge_directories)
}
if (purge_directories)
{
- gDirUtilp->deleteFilesInDir(mTexturesDirName,mask);
+ gDirUtilp->deleteFilesInDir(mTexturesDirName, mask);
LLFile::rmdir(mTexturesDirName);
- }
+ }
}
mHeaderIDMap.clear();
mTexturesSizeMap.clear();
@@ -1621,39 +1621,37 @@ LLTextureCacheWorker* LLTextureCache::getWriter(handle_t handle)
// Called from work thread
// Reads imagesize from the header, updates timestamp
-S32 LLTextureCache::getHeaderCacheEntry(const LLUUID& id, S32& imagesize)
+S32 LLTextureCache::getHeaderCacheEntry(const LLUUID& id, Entry& entry)
{
- LLMutexLock lock(&mHeaderMutex);
- Entry entry;
+ LLMutexLock lock(&mHeaderMutex);
S32 idx = openAndReadEntry(id, entry, false);
if (idx >= 0)
- {
- imagesize = entry.mImageSize;
+ {
updateEntryTimeStamp(idx, entry); // updates time
}
return idx;
}
// Writes imagesize to the header, updates timestamp
-S32 LLTextureCache::setHeaderCacheEntry(const LLUUID& id, S32 imagesize)
+S32 LLTextureCache::setHeaderCacheEntry(const LLUUID& id, Entry& entry, S32 imagesize, S32 datasize)
{
mHeaderMutex.lock();
- llassert_always(imagesize >= 0);
- Entry entry;
S32 idx = openAndReadEntry(id, entry, true);
+ mHeaderMutex.unlock();
+
if (idx >= 0)
{
- updateEntry(idx, entry, imagesize, entry.mBodySize);
- mHeaderMutex.unlock();
+ updateEntry(idx, entry, imagesize, datasize);
}
else // retry
{
- mHeaderMutex.unlock();
readHeaderCache(); // We couldn't write an entry, so refresh the LRU
+
mHeaderMutex.lock();
llassert_always(!mLRU.empty() || mHeaderEntriesInfo.mEntries < sCacheMaxEntries);
mHeaderMutex.unlock();
- idx = setHeaderCacheEntry(id, imagesize); // assert above ensures no inf. recursion
+
+ idx = setHeaderCacheEntry(id, entry, imagesize, datasize); // assert above ensures no inf. recursion
}
return idx;
}