summaryrefslogtreecommitdiff
path: root/indra
diff options
context:
space:
mode:
authorDave Parks <davep@lindenlab.com>2022-02-01 22:28:02 +0000
committerDave Parks <davep@lindenlab.com>2022-02-01 22:28:02 +0000
commit0b850360f5f3e520fa3bd321e2e105c24d1f46f0 (patch)
tree88c7d1d5c3d7cba8395dd493ff158a310219683e /indra
parent19281510bce123bfa85a0ec8f564a7ccb8f31aa9 (diff)
SL-16714 and SL-16750 Break rigged alpha into its own pass (restore release like behavior) and fix rigged alpha emissive not rendering.
Diffstat (limited to 'indra')
-rw-r--r--indra/newview/lldrawpool.h1
-rw-r--r--indra/newview/lldrawpoolalpha.cpp168
-rw-r--r--indra/newview/lldrawpoolalpha.h4
-rw-r--r--indra/newview/llspatialpartition.cpp31
-rw-r--r--indra/newview/llspatialpartition.h9
-rw-r--r--indra/newview/llvovolume.cpp2
-rw-r--r--indra/newview/pipeline.cpp20
-rw-r--r--indra/newview/pipeline.h2
8 files changed, 171 insertions, 66 deletions
diff --git a/indra/newview/lldrawpool.h b/indra/newview/lldrawpool.h
index d4f30fc51a..fd1b022e5b 100644
--- a/indra/newview/lldrawpool.h
+++ b/indra/newview/lldrawpool.h
@@ -183,6 +183,7 @@ public:
PASS_GLOW,
PASS_GLOW_RIGGED,
PASS_ALPHA,
+ PASS_ALPHA_RIGGED,
PASS_ALPHA_MASK,
PASS_ALPHA_MASK_RIGGED,
PASS_FULLBRIGHT_ALPHA_MASK, // Diffuse texture used as alpha mask and fullbright
diff --git a/indra/newview/lldrawpoolalpha.cpp b/indra/newview/lldrawpoolalpha.cpp
index 9da20cc375..963ea6ff8b 100644
--- a/indra/newview/lldrawpoolalpha.cpp
+++ b/indra/newview/lldrawpoolalpha.cpp
@@ -129,23 +129,26 @@ void LLDrawPoolAlpha::renderPostDeferred(S32 pass)
LL_PROFILE_ZONE_SCOPED_CATEGORY_DRAWPOOL;
deferred_render = TRUE;
- // first pass, regular forward alpha rendering
- {
- emissive_shader = (LLPipeline::sUnderWaterRender) ? &gObjectEmissiveWaterProgram : &gObjectEmissiveProgram;
- prepare_alpha_shader(emissive_shader, true, false);
-
- fullbright_shader = (LLPipeline::sImpostorRender) ? &gDeferredFullbrightProgram :
- (LLPipeline::sUnderWaterRender) ? &gDeferredFullbrightWaterProgram : &gDeferredFullbrightProgram;
- prepare_alpha_shader(fullbright_shader, true, false);
-
- simple_shader = (LLPipeline::sImpostorRender) ? &gDeferredAlphaImpostorProgram :
- (LLPipeline::sUnderWaterRender) ? &gDeferredAlphaWaterProgram : &gDeferredAlphaProgram;
- prepare_alpha_shader(simple_shader, false, true); //prime simple shader (loads shadow relevant uniforms)
-
- forwardRender();
- }
+ // prepare shaders
+ emissive_shader = (LLPipeline::sUnderWaterRender) ? &gObjectEmissiveWaterProgram : &gObjectEmissiveProgram;
+ prepare_alpha_shader(emissive_shader, true, false);
+
+ fullbright_shader = (LLPipeline::sImpostorRender) ? &gDeferredFullbrightProgram :
+ (LLPipeline::sUnderWaterRender) ? &gDeferredFullbrightWaterProgram : &gDeferredFullbrightProgram;
+ prepare_alpha_shader(fullbright_shader, true, false);
+
+ simple_shader = (LLPipeline::sImpostorRender) ? &gDeferredAlphaImpostorProgram :
+ (LLPipeline::sUnderWaterRender) ? &gDeferredAlphaWaterProgram : &gDeferredAlphaProgram;
+ prepare_alpha_shader(simple_shader, false, true); //prime simple shader (loads shadow relevant uniforms)
+
- // second pass, render to depth for depth of field effects
+ // first pass, render rigged objects only and render to depth buffer
+ forwardRender(true);
+
+ // second pass, regular forward alpha rendering
+ forwardRender();
+
+ // final pass, render to depth for depth of field effects
if (!LLPipeline::sImpostorRender && gSavedSettings.getBOOL("RenderDepthOfField"))
{
//update depth buffer sampler
@@ -209,10 +212,14 @@ void LLDrawPoolAlpha::render(S32 pass)
prepare_forward_shader(fullbright_shader, minimum_alpha);
prepare_forward_shader(simple_shader, minimum_alpha);
+ //first pass -- rigged only and drawn to depth buffer
+ forwardRender(true);
+
+ //second pass -- non-rigged, no depth buffer writes
forwardRender();
}
-void LLDrawPoolAlpha::forwardRender()
+void LLDrawPoolAlpha::forwardRender(bool rigged)
{
gPipeline.enableLightsDynamic();
@@ -221,7 +228,8 @@ void LLDrawPoolAlpha::forwardRender()
//enable writing to alpha for emissive effects
gGL.setColorMask(true, true);
- bool write_depth = LLDrawPoolWater::sSkipScreenCopy
+ bool write_depth = rigged
+ || LLDrawPoolWater::sSkipScreenCopy
// we want depth written so that rendered alpha will
// contribute to the alpha mask used for impostors
|| LLPipeline::sImpostorRenderAlphaDepthPass;
@@ -236,11 +244,17 @@ void LLDrawPoolAlpha::forwardRender()
// If the face is more than 90% transparent, then don't update the Depth buffer for Dof
// We don't want the nearly invisible objects to cause of DoF effects
- renderAlpha(getVertexDataMask() | LLVertexBuffer::MAP_TEXTURE_INDEX | LLVertexBuffer::MAP_TANGENT | LLVertexBuffer::MAP_TEXCOORD1 | LLVertexBuffer::MAP_TEXCOORD2);
+ renderAlpha(getVertexDataMask() | LLVertexBuffer::MAP_TEXTURE_INDEX | LLVertexBuffer::MAP_TANGENT | LLVertexBuffer::MAP_TEXCOORD1 | LLVertexBuffer::MAP_TEXCOORD2, false, rigged);
gGL.setColorMask(true, false);
- renderDebugAlpha();
+ if (!rigged)
+ { //render "highlight alpha" on final non-rigged pass
+ // NOTE -- hacky call here protected by !rigged instead of alongside "forwardRender"
+ // so renderDebugAlpha is executed while gls_pipeline_alpha and depth GL state
+ // variables above are still in scope
+ renderDebugAlpha();
+ }
}
void LLDrawPoolAlpha::renderDebugAlpha()
@@ -291,54 +305,60 @@ void LLDrawPoolAlpha::renderDebugAlpha()
void LLDrawPoolAlpha::renderAlphaHighlight(U32 mask)
{
- LLVOAvatar* lastAvatar = nullptr;
- U64 lastMeshId = 0;
+ for (int pass = 0; pass < 2; ++pass)
+ { //two passes, one rigged and one not
+ LLVOAvatar* lastAvatar = nullptr;
+ U64 lastMeshId = 0;
- for (LLCullResult::sg_iterator i = gPipeline.beginAlphaGroups(); i != gPipeline.endAlphaGroups(); ++i)
- {
- LLSpatialGroup* group = *i;
- if (group->getSpatialPartition()->mRenderByGroup &&
- !group->isDead())
- {
- LLSpatialGroup::drawmap_elem_t& draw_info = group->mDrawMap[LLRenderPass::PASS_ALPHA];
+ LLCullResult::sg_iterator begin = pass == 0 ? gPipeline.beginAlphaGroups() : gPipeline.beginRiggedAlphaGroups();
+ LLCullResult::sg_iterator end = pass == 0 ? gPipeline.endAlphaGroups() : gPipeline.endRiggedAlphaGroups();
- for (LLSpatialGroup::drawmap_elem_t::iterator k = draw_info.begin(); k != draw_info.end(); ++k)
- {
- LLDrawInfo& params = **k;
-
- if (params.mParticle)
- {
- continue;
- }
-
- bool rigged = (params.mAvatar != nullptr);
- gHighlightProgram.bind(rigged);
- gGL.diffuseColor4f(1, 0, 0, 1);
+ for (LLCullResult::sg_iterator i = begin; i != end; ++i)
+ {
+ LLSpatialGroup* group = *i;
+ if (group->getSpatialPartition()->mRenderByGroup &&
+ !group->isDead())
+ {
+ LLSpatialGroup::drawmap_elem_t& draw_info = group->mDrawMap[LLRenderPass::PASS_ALPHA+pass]; // <-- hacky + pass to use PASS_ALPHA_RIGGED on second pass
- if (rigged)
+ for (LLSpatialGroup::drawmap_elem_t::iterator k = draw_info.begin(); k != draw_info.end(); ++k)
{
- if (lastAvatar != params.mAvatar ||
- lastMeshId != params.mSkinInfo->mHash)
+ LLDrawInfo& params = **k;
+
+ if (params.mParticle)
{
- if (!uploadMatrixPalette(params))
+ continue;
+ }
+
+ bool rigged = (params.mAvatar != nullptr);
+ gHighlightProgram.bind(rigged);
+ gGL.diffuseColor4f(1, 0, 0, 1);
+
+ if (rigged)
+ {
+ if (lastAvatar != params.mAvatar ||
+ lastMeshId != params.mSkinInfo->mHash)
{
- continue;
+ if (!uploadMatrixPalette(params))
+ {
+ continue;
+ }
+ lastAvatar = params.mAvatar;
+ lastMeshId = params.mSkinInfo->mHash;
}
- lastAvatar = params.mAvatar;
- lastMeshId = params.mSkinInfo->mHash;
}
- }
- LLRenderPass::applyModelMatrix(params);
- if (params.mGroup)
- {
- params.mGroup->rebuildMesh();
- }
- params.mVertexBuffer->setBufferFast(rigged ? mask | LLVertexBuffer::MAP_WEIGHT4 : mask);
- params.mVertexBuffer->drawRangeFast(params.mDrawMode, params.mStart, params.mEnd, params.mCount, params.mOffset);
- }
- }
- }
+ LLRenderPass::applyModelMatrix(params);
+ if (params.mGroup)
+ {
+ params.mGroup->rebuildMesh();
+ }
+ params.mVertexBuffer->setBufferFast(rigged ? mask | LLVertexBuffer::MAP_WEIGHT4 : mask);
+ params.mVertexBuffer->drawRangeFast(params.mDrawMode, params.mStart, params.mEnd, params.mCount, params.mOffset);
+ }
+ }
+ }
+ }
// make sure static version of highlight shader is bound before returning
gHighlightProgram.bind();
@@ -471,6 +491,8 @@ void LLDrawPoolAlpha::renderRiggedEmissives(U32 mask, std::vector<LLDrawInfo*>&
LLVOAvatar* lastAvatar = nullptr;
U64 lastMeshId = 0;
+ mask |= LLVertexBuffer::MAP_WEIGHT4;
+
for (LLDrawInfo* draw : emissives)
{
bool tex_setup = TexSetup(draw, false);
@@ -488,7 +510,7 @@ void LLDrawPoolAlpha::renderRiggedEmissives(U32 mask, std::vector<LLDrawInfo*>&
}
}
-void LLDrawPoolAlpha::renderAlpha(U32 mask, bool depth_only)
+void LLDrawPoolAlpha::renderAlpha(U32 mask, bool depth_only, bool rigged)
{
LL_PROFILE_ZONE_SCOPED_CATEGORY_DRAWPOOL;
BOOL initialized_lighting = FALSE;
@@ -498,7 +520,21 @@ void LLDrawPoolAlpha::renderAlpha(U32 mask, bool depth_only)
U64 lastMeshId = 0;
LLGLSLShader* lastAvatarShader = nullptr;
- for (LLCullResult::sg_iterator i = gPipeline.beginAlphaGroups(); i != gPipeline.endAlphaGroups(); ++i)
+ LLCullResult::sg_iterator begin;
+ LLCullResult::sg_iterator end;
+
+ if (rigged)
+ {
+ begin = gPipeline.beginRiggedAlphaGroups();
+ end = gPipeline.endRiggedAlphaGroups();
+ }
+ else
+ {
+ begin = gPipeline.beginAlphaGroups();
+ end = gPipeline.endAlphaGroups();
+ }
+
+ for (LLCullResult::sg_iterator i = begin; i != end; ++i)
{
LL_PROFILE_ZONE_NAMED_CATEGORY_DRAWPOOL("renderAlpha - group");
LLSpatialGroup* group = *i;
@@ -521,12 +557,18 @@ void LLDrawPoolAlpha::renderAlpha(U32 mask, bool depth_only)
bool disable_cull = is_particle_or_hud_particle;
LLGLDisable cull(disable_cull ? GL_CULL_FACE : 0);
- LLSpatialGroup::drawmap_elem_t& draw_info = group->mDrawMap[LLRenderPass::PASS_ALPHA];
+ LLSpatialGroup::drawmap_elem_t& draw_info = rigged ? group->mDrawMap[LLRenderPass::PASS_ALPHA_RIGGED] : group->mDrawMap[LLRenderPass::PASS_ALPHA];
for (LLSpatialGroup::drawmap_elem_t::iterator k = draw_info.begin(); k != draw_info.end(); ++k)
{
- LL_PROFILE_ZONE_NAMED_CATEGORY_DRAWPOOL("ra - push batch")
LLDrawInfo& params = **k;
+ if ((bool)params.mAvatar != rigged)
+ {
+ continue;
+ }
+
+ LL_PROFILE_ZONE_NAMED_CATEGORY_DRAWPOOL("ra - push batch")
+
U32 have_mask = params.mVertexBuffer->getTypeMask() & mask;
if (have_mask != mask)
{ //FIXME!
diff --git a/indra/newview/lldrawpoolalpha.h b/indra/newview/lldrawpoolalpha.h
index 1f6909e282..fa8ef0f227 100644
--- a/indra/newview/lldrawpoolalpha.h
+++ b/indra/newview/lldrawpoolalpha.h
@@ -55,13 +55,13 @@ public:
/*virtual*/ S32 getNumPasses() { return 1; }
virtual void render(S32 pass = 0);
- void forwardRender();
+ void forwardRender(bool write_depth = false);
/*virtual*/ void prerender();
void renderDebugAlpha();
void renderGroupAlpha(LLSpatialGroup* group, U32 type, U32 mask, BOOL texture = TRUE);
- void renderAlpha(U32 mask, bool depth_only = false);
+ void renderAlpha(U32 mask, bool depth_only = false, bool rigged = false);
void renderAlphaHighlight(U32 mask);
bool uploadMatrixPalette(const LLDrawInfo& params);
diff --git a/indra/newview/llspatialpartition.cpp b/indra/newview/llspatialpartition.cpp
index eaf6186dae..5c648c11e1 100644
--- a/indra/newview/llspatialpartition.cpp
+++ b/indra/newview/llspatialpartition.cpp
@@ -4022,6 +4022,7 @@ LLCullResult::LLCullResult()
{
mVisibleGroupsAllocated = 0;
mAlphaGroupsAllocated = 0;
+ mRiggedAlphaGroupsAllocated = 0;
mOcclusionGroupsAllocated = 0;
mDrawableGroupsAllocated = 0;
mVisibleListAllocated = 0;
@@ -4033,6 +4034,9 @@ LLCullResult::LLCullResult()
mAlphaGroups.clear();
mAlphaGroups.push_back(NULL);
mAlphaGroupsEnd = &mAlphaGroups[0];
+ mRiggedAlphaGroups.clear();
+ mRiggedAlphaGroups.push_back(NULL);
+ mRiggedAlphaGroupsEnd = &mRiggedAlphaGroups[0];
mOcclusionGroups.clear();
mOcclusionGroups.push_back(NULL);
mOcclusionGroupsEnd = &mOcclusionGroups[0];
@@ -4073,6 +4077,9 @@ void LLCullResult::clear()
mAlphaGroupsSize = 0;
mAlphaGroupsEnd = &mAlphaGroups[0];
+ mRiggedAlphaGroupsSize = 0;
+ mRiggedAlphaGroupsEnd = &mRiggedAlphaGroups[0];
+
mOcclusionGroupsSize = 0;
mOcclusionGroupsEnd = &mOcclusionGroups[0];
@@ -4117,6 +4124,16 @@ LLCullResult::sg_iterator LLCullResult::endAlphaGroups()
return mAlphaGroupsEnd;
}
+LLCullResult::sg_iterator LLCullResult::beginRiggedAlphaGroups()
+{
+ return &mRiggedAlphaGroups[0];
+}
+
+LLCullResult::sg_iterator LLCullResult::endRiggedAlphaGroups()
+{
+ return mRiggedAlphaGroupsEnd;
+}
+
LLCullResult::sg_iterator LLCullResult::beginOcclusionGroups()
{
return &mOcclusionGroups[0];
@@ -4195,6 +4212,20 @@ void LLCullResult::pushAlphaGroup(LLSpatialGroup* group)
mAlphaGroupsEnd = &mAlphaGroups[mAlphaGroupsSize];
}
+void LLCullResult::pushRiggedAlphaGroup(LLSpatialGroup* group)
+{
+ if (mRiggedAlphaGroupsSize < mRiggedAlphaGroupsAllocated)
+ {
+ mRiggedAlphaGroups[mRiggedAlphaGroupsSize] = group;
+ }
+ else
+ {
+ pushBack(mRiggedAlphaGroups, mRiggedAlphaGroupsAllocated, group);
+ }
+ ++mRiggedAlphaGroupsSize;
+ mRiggedAlphaGroupsEnd = &mRiggedAlphaGroups[mRiggedAlphaGroupsSize];
+}
+
void LLCullResult::pushOcclusionGroup(LLSpatialGroup* group)
{
if (mOcclusionGroupsSize < mOcclusionGroupsAllocated)
diff --git a/indra/newview/llspatialpartition.h b/indra/newview/llspatialpartition.h
index afe24d7d1f..eefb5b0eba 100644
--- a/indra/newview/llspatialpartition.h
+++ b/indra/newview/llspatialpartition.h
@@ -470,6 +470,9 @@ public:
sg_iterator beginAlphaGroups();
sg_iterator endAlphaGroups();
+ sg_iterator beginRiggedAlphaGroups();
+ sg_iterator endRiggedAlphaGroups();
+
bool hasOcclusionGroups() { return mOcclusionGroupsSize > 0; }
sg_iterator beginOcclusionGroups();
sg_iterator endOcclusionGroups();
@@ -488,6 +491,7 @@ public:
void pushVisibleGroup(LLSpatialGroup* group);
void pushAlphaGroup(LLSpatialGroup* group);
+ void pushRiggedAlphaGroup(LLSpatialGroup* group);
void pushOcclusionGroup(LLSpatialGroup* group);
void pushDrawableGroup(LLSpatialGroup* group);
void pushDrawable(LLDrawable* drawable);
@@ -496,6 +500,7 @@ public:
U32 getVisibleGroupsSize() { return mVisibleGroupsSize; }
U32 getAlphaGroupsSize() { return mAlphaGroupsSize; }
+ U32 getRiggedAlphaGroupsSize() { return mRiggedAlphaGroupsSize; }
U32 getDrawableGroupsSize() { return mDrawableGroupsSize; }
U32 getVisibleListSize() { return mVisibleListSize; }
U32 getVisibleBridgeSize() { return mVisibleBridgeSize; }
@@ -509,6 +514,7 @@ private:
U32 mVisibleGroupsSize;
U32 mAlphaGroupsSize;
+ U32 mRiggedAlphaGroupsSize;
U32 mOcclusionGroupsSize;
U32 mDrawableGroupsSize;
U32 mVisibleListSize;
@@ -516,6 +522,7 @@ private:
U32 mVisibleGroupsAllocated;
U32 mAlphaGroupsAllocated;
+ U32 mRiggedAlphaGroupsAllocated;
U32 mOcclusionGroupsAllocated;
U32 mDrawableGroupsAllocated;
U32 mVisibleListAllocated;
@@ -527,6 +534,8 @@ private:
sg_iterator mVisibleGroupsEnd;
sg_list_t mAlphaGroups;
sg_iterator mAlphaGroupsEnd;
+ sg_list_t mRiggedAlphaGroups;
+ sg_iterator mRiggedAlphaGroupsEnd;
sg_list_t mOcclusionGroups;
sg_iterator mOcclusionGroupsEnd;
sg_list_t mDrawableGroups;
diff --git a/indra/newview/llvovolume.cpp b/indra/newview/llvovolume.cpp
index 8bc570311c..a60bece037 100644
--- a/indra/newview/llvovolume.cpp
+++ b/indra/newview/llvovolume.cpp
@@ -5143,7 +5143,7 @@ void LLVolumeGeometryManager::registerFace(LLSpatialGroup* group, LLFace* facep,
bool rigged = facep->isState(LLFace::RIGGED);
- if (rigged && type != LLRenderPass::PASS_ALPHA)
+ if (rigged)
{
// hacky, should probably clean up -- if this face is rigged, put it in "type + 1"
// See LLRenderPass PASS_foo enum
diff --git a/indra/newview/pipeline.cpp b/indra/newview/pipeline.cpp
index 117766afea..8d45e64bf8 100644
--- a/indra/newview/pipeline.cpp
+++ b/indra/newview/pipeline.cpp
@@ -3822,6 +3822,16 @@ void LLPipeline::postSort(LLCamera& camera)
sCull->pushAlphaGroup(group);
}
}
+
+ LLSpatialGroup::draw_map_t::iterator rigged_alpha = group->mDrawMap.find(LLRenderPass::PASS_ALPHA_RIGGED);
+
+ if (rigged_alpha != group->mDrawMap.end())
+ { //store rigged alpha groups for LLDrawPoolAlpha prepass (skip distance update, rigged attachments use depth buffer)
+ if (hasRenderType(LLDrawPool::POOL_ALPHA))
+ {
+ sCull->pushRiggedAlphaGroup(group);
+ }
+ }
}
}
@@ -11153,6 +11163,16 @@ LLCullResult::sg_iterator LLPipeline::endAlphaGroups()
return sCull->endAlphaGroups();
}
+LLCullResult::sg_iterator LLPipeline::beginRiggedAlphaGroups()
+{
+ return sCull->beginRiggedAlphaGroups();
+}
+
+LLCullResult::sg_iterator LLPipeline::endRiggedAlphaGroups()
+{
+ return sCull->endRiggedAlphaGroups();
+}
+
bool LLPipeline::hasRenderType(const U32 type) const
{
// STORM-365 : LLViewerJointAttachment::setAttachmentVisibility() is setting type to 0 to actually mean "do not render"
diff --git a/indra/newview/pipeline.h b/indra/newview/pipeline.h
index fdc3738472..6114aa4f6c 100644
--- a/indra/newview/pipeline.h
+++ b/indra/newview/pipeline.h
@@ -338,6 +338,8 @@ public:
LLCullResult::drawinfo_iterator endRenderMap(U32 type);
LLCullResult::sg_iterator beginAlphaGroups();
LLCullResult::sg_iterator endAlphaGroups();
+ LLCullResult::sg_iterator beginRiggedAlphaGroups();
+ LLCullResult::sg_iterator endRiggedAlphaGroups();
void addTrianglesDrawn(S32 index_count, U32 render_type = LLRender::TRIANGLES);