summaryrefslogtreecommitdiff
path: root/indra/llrender
diff options
context:
space:
mode:
Diffstat (limited to 'indra/llrender')
-rw-r--r--indra/llrender/llfontgl.cpp1
-rw-r--r--indra/llrender/llimagegl.cpp62
-rw-r--r--indra/llrender/llimagegl.h2
-rw-r--r--indra/llrender/llrender.cpp1
-rw-r--r--indra/llrender/llrender.h1
-rw-r--r--indra/llrender/llrendertarget.cpp11
-rw-r--r--indra/llrender/llvertexbuffer.cpp10
7 files changed, 61 insertions, 27 deletions
diff --git a/indra/llrender/llfontgl.cpp b/indra/llrender/llfontgl.cpp
index 768f042e69..867d14c855 100644
--- a/indra/llrender/llfontgl.cpp
+++ b/indra/llrender/llfontgl.cpp
@@ -247,7 +247,6 @@ S32 LLFontGL::render(const LLWString &wstr, S32 begin_offset, F32 x, F32 y, cons
}
}
-
const LLFontGlyphInfo* next_glyph = NULL;
for (i = begin_offset; i < begin_offset + length; i++)
diff --git a/indra/llrender/llimagegl.cpp b/indra/llrender/llimagegl.cpp
index 36ac3ff119..4251392546 100644
--- a/indra/llrender/llimagegl.cpp
+++ b/indra/llrender/llimagegl.cpp
@@ -1639,7 +1639,7 @@ void LLImageGL::calcAlphaChannelOffsetAndStride()
}
}
-void LLImageGL::analyzeAlpha(const void* data_in, S32 w, S32 h)
+void LLImageGL::analyzeAlpha(const void* data_in, U32 w, U32 h)
{
if(!mNeedsAlphaAndPickMask)
{
@@ -1647,24 +1647,64 @@ void LLImageGL::analyzeAlpha(const void* data_in, S32 w, S32 h)
}
U32 length = w * h;
- const GLubyte* current = ((const GLubyte*) data_in) + mAlphaOffset ;
- S32 sample[16];
- memset(sample, 0, sizeof(S32)*16);
-
- for (U32 i = 0; i < length; i++)
+ U32 sample[16];
+ memset(sample, 0, sizeof(U32)*16);
+
+ // generate histogram of quantized alpha.
+ // also add-in the histogram of a 2x2 box-sampled version. The idea is
+ // this will mid-skew the data (and thus increase the chances of not
+ // being used as a mask) from high-frequency alpha maps which
+ // suffer the worst from aliasing when used as alpha masks.
+ if (w >= 2 && h >= 2)
+ {
+ llassert(w%2 == 0);
+ llassert(h%2 == 0);
+ const GLubyte* rowstart = ((const GLubyte*) data_in) + mAlphaOffset;
+ for (U32 y = 0; y < h; y+=2)
+ {
+ const GLubyte* current = rowstart;
+ for (U32 x = 0; x < w; x+=2)
+ {
+ U32 s1 = current[0];
+ U32 s2 = current[w * mAlphaStride];
+ current += mAlphaStride;
+ U32 s3 = current[0];
+ U32 s4 = current[w * mAlphaStride];
+ current += mAlphaStride;
+
+ ++sample[s1/16];
+ ++sample[s2/16];
+ ++sample[s3/16];
+ ++sample[s4/16];
+
+ sample[(s1+s2+s3+s4)/(16 * 4)] += 4;
+ }
+
+ rowstart += 2 * w * mAlphaStride;
+ }
+ length += length;
+ }
+ else
{
- ++sample[*current/16];
- current += mAlphaStride ;
+ const GLubyte* current = ((const GLubyte*) data_in) + mAlphaOffset;
+ for (U32 i = 0; i < length; i++)
+ {
+ ++sample[*current/16];
+ current += mAlphaStride;
+ }
}
+
+ // if more than 1/16th of alpha samples are mid-range, this
+ // shouldn't be treated as a 1-bit mask
- U32 total = 0;
+ U32 midrangetotal = 0;
for (U32 i = 4; i < 11; i++)
{
- total += sample[i];
+ midrangetotal += sample[i];
}
- if (total > length/16)
+ if (midrangetotal > length/16)
{
mIsMask = FALSE;
}
diff --git a/indra/llrender/llimagegl.h b/indra/llrender/llimagegl.h
index f0870c3fc4..1b303307f6 100644
--- a/indra/llrender/llimagegl.h
+++ b/indra/llrender/llimagegl.h
@@ -91,7 +91,7 @@ public:
protected:
virtual ~LLImageGL();
- void analyzeAlpha(const void* data_in, S32 w, S32 h);
+ void analyzeAlpha(const void* data_in, U32 w, U32 h);
void calcAlphaChannelOffsetAndStride();
public:
diff --git a/indra/llrender/llrender.cpp b/indra/llrender/llrender.cpp
index 656f690db5..57e7dbe77d 100644
--- a/indra/llrender/llrender.cpp
+++ b/indra/llrender/llrender.cpp
@@ -765,6 +765,7 @@ LLRender::LLRender()
mCurrAlphaFunc = CF_DEFAULT;
mCurrAlphaFuncVal = 0.01f;
+
mCurrBlendSFactor = BF_UNDEF;
mCurrBlendDFactor = BF_UNDEF;
}
diff --git a/indra/llrender/llrender.h b/indra/llrender/llrender.h
index a90fbd4a5c..a73a128368 100644
--- a/indra/llrender/llrender.h
+++ b/indra/llrender/llrender.h
@@ -364,7 +364,6 @@ private:
eBlendFactor mCurrBlendSFactor;
eBlendFactor mCurrBlendDFactor;
-
F32 mMaxAnisotropy;
std::list<LLVector3> mUIOffset;
diff --git a/indra/llrender/llrendertarget.cpp b/indra/llrender/llrendertarget.cpp
index d9520b3bf6..3f2558f1f5 100644
--- a/indra/llrender/llrendertarget.cpp
+++ b/indra/llrender/llrendertarget.cpp
@@ -390,8 +390,6 @@ void LLRenderTarget::flush(BOOL fetch_depth)
}
else
{
-#if !LL_DARWIN
-
stop_glerror();
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
@@ -435,7 +433,6 @@ void LLRenderTarget::flush(BOOL fetch_depth)
}
}
}
-#endif
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
}
@@ -444,7 +441,6 @@ void LLRenderTarget::flush(BOOL fetch_depth)
void LLRenderTarget::copyContents(LLRenderTarget& source, S32 srcX0, S32 srcY0, S32 srcX1, S32 srcY1,
S32 dstX0, S32 dstY0, S32 dstX1, S32 dstY1, U32 mask, U32 filter)
{
-#if !LL_DARWIN
gGL.flush();
if (!source.mFBO || !mFBO)
{
@@ -483,14 +479,12 @@ void LLRenderTarget::copyContents(LLRenderTarget& source, S32 srcX0, S32 srcY0,
stop_glerror();
}
}
-#endif
}
//static
void LLRenderTarget::copyContentsToFramebuffer(LLRenderTarget& source, S32 srcX0, S32 srcY0, S32 srcX1, S32 srcY1,
S32 dstX0, S32 dstY0, S32 dstX1, S32 dstY1, U32 mask, U32 filter)
{
-#if !LL_DARWIN
if (!source.mFBO)
{
llerrs << "Cannot copy framebuffer contents for non FBO render targets." << llendl;
@@ -507,7 +501,6 @@ void LLRenderTarget::copyContentsToFramebuffer(LLRenderTarget& source, S32 srcX0
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
stop_glerror();
}
-#endif
}
BOOL LLRenderTarget::isComplete() const
@@ -652,7 +645,6 @@ void LLMultisampleBuffer::allocate(U32 resx, U32 resy, U32 color_fmt, BOOL depth
void LLMultisampleBuffer::addColorAttachment(U32 color_fmt)
{
-#if !LL_DARWIN
if (color_fmt == 0)
{
return;
@@ -693,12 +685,10 @@ void LLMultisampleBuffer::addColorAttachment(U32 color_fmt)
}
mTex.push_back(tex);
-#endif
}
void LLMultisampleBuffer::allocateDepth()
{
-#if !LL_DARWIN
glGenRenderbuffersEXT(1, (GLuint* ) &mDepth);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, mDepth);
if (mStencil)
@@ -709,6 +699,5 @@ void LLMultisampleBuffer::allocateDepth()
{
glRenderbufferStorageMultisampleEXT(GL_RENDERBUFFER_EXT, mSamples, GL_DEPTH_COMPONENT16_ARB, mResX, mResY);
}
-#endif
}
diff --git a/indra/llrender/llvertexbuffer.cpp b/indra/llrender/llvertexbuffer.cpp
index bf5eda21eb..415d2f603b 100644
--- a/indra/llrender/llvertexbuffer.cpp
+++ b/indra/llrender/llvertexbuffer.cpp
@@ -579,7 +579,7 @@ void LLVertexBuffer::destroyGLBuffer()
}
mGLBuffer = 0;
- unbind();
+ //unbind();
}
void LLVertexBuffer::destroyGLIndices()
@@ -606,7 +606,7 @@ void LLVertexBuffer::destroyGLIndices()
}
mGLIndices = 0;
- unbind();
+ //unbind();
}
void LLVertexBuffer::updateNumVerts(S32 nverts)
@@ -668,6 +668,12 @@ void LLVertexBuffer::allocateBuffer(S32 nverts, S32 nindices, bool create)
{
LLMemType mt2(LLMemType::MTYPE_VERTEX_ALLOCATE_BUFFER);
+ if (nverts < 0 || nindices < 0 ||
+ nverts > 65536)
+ {
+ llerrs << "Bad vertex buffer allocation: " << nverts << " : " << nindices << llendl;
+ }
+
updateNumVerts(nverts);
updateNumIndices(nindices);