summaryrefslogtreecommitdiff
path: root/indra/llimage/llimagej2c.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'indra/llimage/llimagej2c.cpp')
-rwxr-xr-x[-rw-r--r--]indra/llimage/llimagej2c.cpp98
1 files changed, 48 insertions, 50 deletions
diff --git a/indra/llimage/llimagej2c.cpp b/indra/llimage/llimagej2c.cpp
index 44e6b89dd3..452aad25cb 100644..100755
--- a/indra/llimage/llimagej2c.cpp
+++ b/indra/llimage/llimagej2c.cpp
@@ -29,6 +29,7 @@
#include "llmemtype.h"
#include "lltimer.h"
#include "llmath.h"
+#include "llmemory.h"
typedef LLImageJ2CImpl* (*CreateLLImageJ2CFunction)();
typedef void (*DestroyLLImageJ2CFunction)(LLImageJ2CImpl*);
@@ -55,7 +56,7 @@ std::string LLImageJ2C::getEngineInfo()
LLImageJ2C::LLImageJ2C() : LLImageFormatted(IMG_CODEC_J2C),
mMaxBytes(0),
mRawDiscardLevel(-1),
- mRate(0.0f),
+ mRate(DEFAULT_COMPRESSION_RATE),
mReversible(FALSE),
mAreaUsedForDataSizeCalcs(0)
{
@@ -141,6 +142,7 @@ BOOL LLImageJ2C::updateData()
BOOL LLImageJ2C::initDecode(LLImageRaw &raw_image, int discard_level, int* region)
{
+ setDiscardLevel(discard_level != -1 ? discard_level : 0);
return mImpl->initDecode(*this,raw_image,discard_level,region);
}
@@ -260,19 +262,34 @@ S32 LLImageJ2C::calcHeaderSizeJ2C()
//static
S32 LLImageJ2C::calcDataSizeJ2C(S32 w, S32 h, S32 comp, S32 discard_level, F32 rate)
{
- // Note: this only provides an *estimate* of the size in bytes of an image level
- // *TODO: find a way to read the true size (when available) and convey the fact
- // that the result is an estimate in the other cases
- if (rate <= 0.f) rate = .125f;
- while (discard_level > 0)
+ // Note: This provides an estimation for the first to last quality layer of a given discard level
+ // This is however an efficient approximation, as the true discard level boundary would be
+ // in general too big for fast fetching.
+ // For details about the equation used here, see https://wiki.lindenlab.com/wiki/THX1138_KDU_Improvements#Byte_Range_Study
+
+ // Estimate the number of layers. This is consistent with what's done for j2c encoding in LLImageJ2CKDU::encodeImpl().
+ S32 nb_layers = 1;
+ S32 surface = w*h;
+ S32 s = 64*64;
+ while (surface > s)
{
- if (w < 1 || h < 1)
- break;
- w >>= 1;
- h >>= 1;
- discard_level--;
+ nb_layers++;
+ s *= 4;
}
- S32 bytes = (S32)((F32)(w*h*comp)*rate);
+ F32 layer_factor = 3.0f * (7 - llclamp(nb_layers,1,6));
+
+ // Compute w/pow(2,discard_level) and h/pow(2,discard_level)
+ w >>= discard_level;
+ h >>= discard_level;
+ w = llmax(w, 1);
+ h = llmax(h, 1);
+
+ // Temporary: compute both new and old range and pick one according to the settings TextureNewByteRange
+ // *TODO: Take the old code out once we have enough tests done
+ S32 bytes;
+ S32 new_bytes = (S32) (sqrt((F32)(w*h))*(F32)(comp)*rate*1000.f/layer_factor);
+ S32 old_bytes = (S32)((F32)(w*h*comp)*rate);
+ bytes = (LLImage::useNewByteRange() && (new_bytes < old_bytes) ? new_bytes : old_bytes);
bytes = llmax(bytes, calcHeaderSizeJ2C());
return bytes;
}
@@ -282,15 +299,12 @@ S32 LLImageJ2C::calcHeaderSize()
return calcHeaderSizeJ2C();
}
-
-// calcDataSize() returns how many bytes to read
-// to load discard_level (including header and higher discard levels)
+// calcDataSize() returns how many bytes to read to load discard_level (including header)
S32 LLImageJ2C::calcDataSize(S32 discard_level)
{
discard_level = llclamp(discard_level, 0, MAX_DISCARD_LEVEL);
-
if ( mAreaUsedForDataSizeCalcs != (getHeight() * getWidth())
- || mDataSizes[0] == 0)
+ || (mDataSizes[0] == 0))
{
mAreaUsedForDataSizeCalcs = getHeight() * getWidth();
@@ -300,25 +314,6 @@ S32 LLImageJ2C::calcDataSize(S32 discard_level)
mDataSizes[level] = calcDataSizeJ2C(getWidth(), getHeight(), getComponents(), level, mRate);
level--;
}
-
- /* This is technically a more correct way to calculate the size required
- for each discard level, since they should include the size needed for
- lower levels. Unfortunately, this doesn't work well and will lead to
- download stalls. The true correct way is to parse the header. This will
- all go away with http textures at some point.
-
- // Calculate the size for each discard level. Lower levels (higher quality)
- // contain the cumulative size of higher levels
- S32 total_size = calcHeaderSizeJ2C();
-
- S32 level = MAX_DISCARD_LEVEL; // Start at the highest discard
- while ( level >= 0 )
- { // Add in this discard level and all before it
- total_size += calcDataSizeJ2C(getWidth(), getHeight(), getComponents(), level, mRate);
- mDataSizes[level] = total_size;
- level--;
- }
- */
}
return mDataSizes[discard_level];
}
@@ -333,8 +328,9 @@ S32 LLImageJ2C::calcDiscardLevelBytes(S32 bytes)
}
while (1)
{
- S32 bytes_needed = calcDataSize(discard_level); // virtual
- if (bytes >= bytes_needed - (bytes_needed>>2)) // For J2c, up the res at 75% of the optimal number of bytes
+ S32 bytes_needed = calcDataSize(discard_level);
+ // Use TextureReverseByteRange percent (see settings.xml) of the optimal size to qualify as correct rendering for the given discard level
+ if (bytes >= (bytes_needed*LLImage::getReverseByteRangePercent()/100))
{
break;
}
@@ -347,11 +343,6 @@ S32 LLImageJ2C::calcDiscardLevelBytes(S32 bytes)
return discard_level;
}
-void LLImageJ2C::setRate(F32 rate)
-{
- mRate = rate;
-}
-
void LLImageJ2C::setMaxBytes(S32 max_bytes)
{
mMaxBytes = max_bytes;
@@ -385,14 +376,14 @@ BOOL LLImageJ2C::loadAndValidate(const std::string &filename)
}
else
{
- U8 *data = new U8[file_size];
+ U8 *data = (U8*)ALLOCATE_MEM(LLImageBase::getPrivatePool(), file_size);
apr_size_t bytes_read = file_size;
apr_status_t s = apr_file_read(apr_file, data, &bytes_read); // modifies bytes_read
infile.close() ;
if (s != APR_SUCCESS || (S32)bytes_read != file_size)
{
- delete[] data;
+ FREE_MEM(LLImageBase::getPrivatePool(), data);
setLastError("Unable to read entire file");
res = FALSE;
}
@@ -473,6 +464,7 @@ LLImageCompressionTester::LLImageCompressionTester() : LLMetricPerformanceTester
addMetric("Perf Compression (kB/s)");
mRunBytesInDecompression = 0;
+ mRunBytesOutDecompression = 0;
mRunBytesInCompression = 0;
mTotalBytesInDecompression = 0;
@@ -482,6 +474,7 @@ LLImageCompressionTester::LLImageCompressionTester() : LLMetricPerformanceTester
mTotalTimeDecompression = 0.0f;
mTotalTimeCompression = 0.0f;
+ mRunTimeDecompression = 0.0f;
}
LLImageCompressionTester::~LLImageCompressionTester()
@@ -500,10 +493,10 @@ void LLImageCompressionTester::outputTestRecord(LLSD *sd)
F32 decompressionRate = 0.0f;
F32 compressionRate = 0.0f;
- F32 totalkBInDecompression = (F32)(mTotalBytesInDecompression) / 1000.0;
- F32 totalkBOutDecompression = (F32)(mTotalBytesOutDecompression) / 1000.0;
- F32 totalkBInCompression = (F32)(mTotalBytesInCompression) / 1000.0;
- F32 totalkBOutCompression = (F32)(mTotalBytesOutCompression) / 1000.0;
+ F32 totalkBInDecompression = (F32)(mTotalBytesInDecompression) / 1000.f;
+ F32 totalkBOutDecompression = (F32)(mTotalBytesOutDecompression) / 1000.f;
+ F32 totalkBInCompression = (F32)(mTotalBytesInCompression) / 1000.f;
+ F32 totalkBOutCompression = (F32)(mTotalBytesOutCompression) / 1000.f;
if (!is_approx_zero(mTotalTimeDecompression))
{
@@ -564,12 +557,17 @@ void LLImageCompressionTester::updateDecompressionStats(const S32 bytesIn, const
mTotalBytesInDecompression += bytesIn;
mRunBytesInDecompression += bytesIn;
mTotalBytesOutDecompression += bytesOut;
- if (mRunBytesInDecompression > (1000000))
+ mRunBytesOutDecompression += bytesOut;
+ //if (mRunBytesInDecompression > (1000000))
+ if (mRunBytesOutDecompression > (10000000))
+ //if ((mTotalTimeDecompression - mRunTimeDecompression) >= (5.0f))
{
// Output everything
outputTestResults();
// Reset the decompression data of the run
mRunBytesInDecompression = 0;
+ mRunBytesOutDecompression = 0;
+ mRunTimeDecompression = mTotalTimeDecompression;
}
}