summaryrefslogtreecommitdiff
path: root/indra/llrender/llimagegl.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'indra/llrender/llimagegl.cpp')
-rw-r--r--indra/llrender/llimagegl.cpp62
1 files changed, 50 insertions, 12 deletions
diff --git a/indra/llrender/llimagegl.cpp b/indra/llrender/llimagegl.cpp
index 03ac10c00a..67b4ada62f 100644
--- a/indra/llrender/llimagegl.cpp
+++ b/indra/llrender/llimagegl.cpp
@@ -70,6 +70,7 @@ static U64 sTextureBytes = 0;
void LLImageGLMemory::alloc_tex_image(U32 width, U32 height, U32 intformat, U32 count)
{
U32 texUnit = gGL.getCurrentTexUnitIndex();
+ llassert(texUnit == 0); // allocations should always be done on tex unit 0
U32 texName = gGL.getTexUnit(texUnit)->getCurrTexture();
U64 size = LLImageGL::dataFormatBytes(intformat, width, height);
size *= count;
@@ -77,6 +78,8 @@ void LLImageGLMemory::alloc_tex_image(U32 width, U32 height, U32 intformat, U32
llassert(size >= 0);
sTexMemMutex.lock();
+
+ // it is a precondition that no existing allocation exists for this texture
llassert(sTextureAllocs.find(texName) == sTextureAllocs.end());
sTextureAllocs[texName] = size;
@@ -90,7 +93,7 @@ void LLImageGLMemory::free_tex_image(U32 texName)
{
sTexMemMutex.lock();
auto iter = sTextureAllocs.find(texName);
- if (iter != sTextureAllocs.end())
+ if (iter != sTextureAllocs.end()) // sometimes a texName will be "freed" before allocated (e.g. first call to setManualImage for a given texName)
{
llassert(iter->second <= sTextureBytes); // sTextureBytes MUST NOT go below zero
@@ -115,6 +118,7 @@ void LLImageGLMemory::free_tex_images(U32 count, const U32* texNames)
void LLImageGLMemory::free_cur_tex_image()
{
U32 texUnit = gGL.getCurrentTexUnitIndex();
+ llassert(texUnit == 0); // frees should always be done on tex unit 0
U32 texName = gGL.getTexUnit(texUnit)->getCurrTexture();
free_tex_image(texName);
}
@@ -1041,15 +1045,47 @@ void sub_image_lines(U32 target, S32 miplevel, S32 x_offset, S32 y_offset, S32 w
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE;
+ LL_PROFILE_ZONE_NUM(width);
+ LL_PROFILE_ZONE_NUM(height);
+
U32 components = LLImageGL::dataFormatComponents(pixformat);
U32 type_width = type_width_from_pixtype(pixtype);
const U32 line_width = data_width * components * type_width;
const U32 y_offset_end = y_offset + height;
- for (U32 y_pos = y_offset; y_pos < y_offset_end; ++y_pos)
+
+ if (width == data_width && height % 32 == 0)
+ {
+ LL_PROFILE_ZONE_NAMED_CATEGORY_TEXTURE("subimage - batched lines");
+
+ // full width, batch multiple lines at a time
+ // set batch size based on width
+ U32 batch_size = 32;
+
+ if (width > 1024)
+ {
+ batch_size = 8;
+ }
+ else if (width > 512)
+ {
+ batch_size = 16;
+ }
+
+ // full width texture, do 32 lines at a time
+ for (U32 y_pos = y_offset; y_pos < y_offset_end; y_pos += batch_size)
+ {
+ glTexSubImage2D(target, miplevel, x_offset, y_pos, width, batch_size, pixformat, pixtype, src);
+ src += line_width * batch_size;
+ }
+ }
+ else
{
- glTexSubImage2D(target, miplevel, x_offset, y_pos, width, 1, pixformat, pixtype, src);
- src += line_width;
+ // partial width or strange height
+ for (U32 y_pos = y_offset; y_pos < y_offset_end; y_pos += 1)
+ {
+ glTexSubImage2D(target, miplevel, x_offset, y_pos, width, 1, pixformat, pixtype, src);
+ src += line_width;
+ }
}
}
@@ -1240,8 +1276,8 @@ void LLImageGL::deleteTextures(S32 numTextures, const U32 *textures)
if (!sFreeList[idx].empty())
{
- glDeleteTextures((GLsizei) sFreeList[idx].size(), sFreeList[idx].data());
free_tex_images((GLsizei) sFreeList[idx].size(), sFreeList[idx].data());
+ glDeleteTextures((GLsizei)sFreeList[idx].size(), sFreeList[idx].data());
sFreeList[idx].resize(0);
}
}
@@ -2135,6 +2171,8 @@ void LLImageGL::analyzeAlpha(const void* data_in, U32 w, U32 h)
return ;
}
+ LL_PROFILE_ZONE_SCOPED_CATEGORY_TEXTURE;
+
U32 length = w * h;
U32 alphatotal = 0;
@@ -2146,15 +2184,15 @@ void LLImageGL::analyzeAlpha(const void* data_in, U32 w, U32 h)
// this will mid-skew the data (and thus increase the chances of not
// being used as a mask) from high-frequency alpha maps which
// suffer the worst from aliasing when used as alpha masks.
- if (w >= 2 && h >= 2)
+ if (w >= 4 && h >= 4)
{
- llassert(w%2 == 0);
- llassert(h%2 == 0);
+ llassert(w%4 == 0);
+ llassert(h%4 == 0);
const GLubyte* rowstart = ((const GLubyte*) data_in) + mAlphaOffset;
- for (U32 y = 0; y < h; y+=2)
+ for (U32 y = 0; y < h; y+=4)
{
const GLubyte* current = rowstart;
- for (U32 x = 0; x < w; x+=2)
+ for (U32 x = 0; x < w; x+=4)
{
const U32 s1 = current[0];
alphatotal += s1;
@@ -2178,7 +2216,7 @@ void LLImageGL::analyzeAlpha(const void* data_in, U32 w, U32 h)
}
- rowstart += 2 * w * mAlphaStride;
+ rowstart += 4 * w * mAlphaStride;
}
length *= 2; // we sampled everything twice, essentially
}
@@ -2461,7 +2499,7 @@ bool LLImageGL::scaleDown(S32 desired_discard)
{ // use a PBO to downscale the texture
U64 size = getBytes(desired_discard);
llassert(size <= 2048 * 2048 * 4); // we shouldn't be using this method to downscale huge textures, but it'll work
- gGL.getTexUnit(0)->bind(this);
+ gGL.getTexUnit(0)->bind(this, false, true);
if (sScratchPBO == 0)
{