diff options
| author | Dave Parks <davep@lindenlab.com> | 2024-05-29 16:56:39 -0500 | 
|---|---|---|
| committer | GitHub <noreply@github.com> | 2024-05-29 16:56:39 -0500 | 
| commit | 15fd13f83036ff781160957a21bb2d59771044bc (patch) | |
| tree | 984601482bc6d7384796123bce39b7e50074ec5a /indra/llrender | |
| parent | 2d0fe5ca7bf3bda62bf10a37a65f5859e6d1b095 (diff) | |
#1530 Increase joint limit for GLTF Assets (#1582)
* Migrate GLTF scene rendering to its own shaders
* Add support for ambient occlusion map separate from metallic roughness map (or absent)
* Use UBO's for GLTF joints
* Better error handling of downloading GLTF assets
Diffstat (limited to 'indra/llrender')
| -rw-r--r-- | indra/llrender/llgl.cpp | 5 | ||||
| -rw-r--r-- | indra/llrender/llgl.h | 1 | ||||
| -rw-r--r-- | indra/llrender/llglslshader.cpp | 25 | ||||
| -rw-r--r-- | indra/llrender/llglslshader.h | 28 | ||||
| -rw-r--r-- | indra/llrender/llshadermgr.cpp | 4 | ||||
| -rw-r--r-- | indra/llrender/llshadermgr.h | 4 | ||||
| -rw-r--r-- | indra/llrender/llvertexbuffer.cpp | 5 | 
7 files changed, 52 insertions, 20 deletions
| diff --git a/indra/llrender/llgl.cpp b/indra/llrender/llgl.cpp index ac697b72be..0586c34e50 100644 --- a/indra/llrender/llgl.cpp +++ b/indra/llrender/llgl.cpp @@ -1238,6 +1238,11 @@ bool LLGLManager::initGL()      glGetIntegerv(GL_MAX_INTEGER_SAMPLES, &mMaxIntegerSamples);      glGetIntegerv(GL_MAX_SAMPLE_MASK_WORDS, &mMaxSampleMaskWords);      glGetIntegerv(GL_MAX_SAMPLES, &mMaxSamples); +    glGetIntegerv(GL_MAX_UNIFORM_BLOCK_SIZE, &mMaxUniformBlockSize); + +    // sanity clamp max uniform block size to 64k just in case  +    // there's some implementation that reports a crazy value +    mMaxUniformBlockSize = llmin(mMaxUniformBlockSize, 65536);      if (mGLVersion >= 4.59f)      { diff --git a/indra/llrender/llgl.h b/indra/llrender/llgl.h index 75a7c5d3b2..2f538d0844 100644 --- a/indra/llrender/llgl.h +++ b/indra/llrender/llgl.h @@ -87,6 +87,7 @@ public:      S32 mGLMaxIndexRange;      S32 mGLMaxTextureSize;      F32 mMaxAnisotropy = 0.f; +    S32 mMaxUniformBlockSize = 0;      // GL 4.x capabilities      bool mHasCubeMapArray = false; diff --git a/indra/llrender/llglslshader.cpp b/indra/llrender/llglslshader.cpp index ecd0c6908b..8ea134393a 100644 --- a/indra/llrender/llglslshader.cpp +++ b/indra/llrender/llglslshader.cpp @@ -983,17 +983,25 @@ bool LLGLSLShader::mapUniforms(const vector<LLStaticHashedString>* uniforms)      }      //........................................................................................................................................ -    if (mFeatures.hasReflectionProbes) // Set up block binding, in a way supported by Apple (rather than binding = 1 in .glsl). -    {   // See slide 35 and more of https://docs.huihoo.com/apple/wwdc/2011/session_420__advances_in_opengl_for_mac_os_x_lion.pdf -        static const GLuint BLOCKBINDING = 1; //picked by us -        //Get the index, similar to a uniform location -        GLuint UBOBlockIndex = glGetUniformBlockIndex(mProgramObject, "ReflectionProbes"); +    // Set up block binding, in a way supported by Apple (rather than binding = 1 in .glsl). +    // See slide 35 and more of https://docs.huihoo.com/apple/wwdc/2011/session_420__advances_in_opengl_for_mac_os_x_lion.pdf +    const char* ubo_names[] = +    { +        "ReflectionProbes", // UB_REFLECTION_PROBES +        "GLTFJoints", // UB_GLTF_JOINTS +    }; + +    llassert(LL_ARRAY_SIZE(ubo_names) == NUM_UNIFORM_BLOCKS); + +    for (U32 i = 0; i < NUM_UNIFORM_BLOCKS; ++i) +    { +        GLuint UBOBlockIndex = glGetUniformBlockIndex(mProgramObject, ubo_names[i]);          if (UBOBlockIndex != GL_INVALID_INDEX)          { -            //Set this index to a binding index -            glUniformBlockBinding(mProgramObject, UBOBlockIndex, BLOCKBINDING); +            glUniformBlockBinding(mProgramObject, UBOBlockIndex, i);          }      } +          unbind();      LL_DEBUGS("ShaderUniform") << "Total Uniform Size: " << mTotalUniformSize << LL_ENDL; @@ -1049,9 +1057,10 @@ void LLGLSLShader::bind()      }  } -void LLGLSLShader::bind(LLGLSLShader::GLTFVariant variant) +void LLGLSLShader::bind(U32 variant)  {      llassert(mGLTFVariants.size() == LLGLSLShader::NUM_GLTF_VARIANTS); +    llassert(variant < LLGLSLShader::NUM_GLTF_VARIANTS);      mGLTFVariants[variant].bind();  } diff --git a/indra/llrender/llglslshader.h b/indra/llrender/llglslshader.h index fa01d212e1..8ebea2deca 100644 --- a/indra/llrender/llglslshader.h +++ b/indra/llrender/llglslshader.h @@ -146,6 +146,14 @@ public:          SG_COUNT      } eGroup; +    enum UniformBlock : GLuint +    { +        UB_REFLECTION_PROBES, +        UB_GLTF_JOINTS, +        NUM_UNIFORM_BLOCKS +    }; + +      static std::set<LLGLSLShader*> sInstances;      static bool sProfileEnabled; @@ -320,20 +328,24 @@ public:      LLGLSLShader* mRiggedVariant = nullptr;      // variants for use by GLTF renderer -    // "this" is considered to be OPAQUE -    enum GLTFVariant +    // bit 0 = alpha mode blend (1) or opaque (0) +    // bit 1 = rigged (1) or static (0) +    struct GLTFVariant      { -        STATIC_OPAQUE, -        STATIC_BLEND, -        RIGGED_OPAQUE, -        RIGGED_BLEND, -        NUM_GLTF_VARIANTS +        constexpr static U32 RIGGED = 2; +        constexpr static U32 ALPHA = 1; +        constexpr static U32 OPAQUE_STATIC = 0; +        constexpr static U32 ALPHA_STATIC = 1; +        constexpr static U32 OPAQUE_RIGGED = 2; +        constexpr static U32 ALPHA_RIGGED = 3;      }; +    constexpr static U32 NUM_GLTF_VARIANTS = 4; +      std::vector<LLGLSLShader> mGLTFVariants;      //helper to bind GLTF variant -    void bind(GLTFVariant variant); +    void bind(U32 variant);      // hacky flag used for optimization in LLDrawPoolAlpha      bool mCanBindFast = false; diff --git a/indra/llrender/llshadermgr.cpp b/indra/llrender/llshadermgr.cpp index 694bcbeeb9..4e8adb2fb3 100644 --- a/indra/llrender/llshadermgr.cpp +++ b/indra/llrender/llshadermgr.cpp @@ -1225,6 +1225,9 @@ void LLShaderMgr::initAttribsAndUniforms()      mReservedUniforms.push_back("diffuseMap");      mReservedUniforms.push_back("altDiffuseMap");      mReservedUniforms.push_back("specularMap"); +    mReservedUniforms.push_back("metallicRoughnessMap"); +    mReservedUniforms.push_back("normalMap"); +    mReservedUniforms.push_back("occlusionMap");      mReservedUniforms.push_back("emissiveMap");      mReservedUniforms.push_back("bumpMap");      mReservedUniforms.push_back("bumpMap2"); @@ -1348,7 +1351,6 @@ void LLShaderMgr::initAttribsAndUniforms()      llassert(mReservedUniforms.size() == LLShaderMgr::DEFERRED_SHADOW5+1); -    mReservedUniforms.push_back("normalMap");      mReservedUniforms.push_back("positionMap");      mReservedUniforms.push_back("diffuseRect");      mReservedUniforms.push_back("specularRect"); diff --git a/indra/llrender/llshadermgr.h b/indra/llrender/llshadermgr.h index c3e5a2aafd..03803c0e96 100644 --- a/indra/llrender/llshadermgr.h +++ b/indra/llrender/llshadermgr.h @@ -93,6 +93,9 @@ public:          DIFFUSE_MAP,                        //  "diffuseMap"          ALTERNATE_DIFFUSE_MAP,              //  "altDiffuseMap"          SPECULAR_MAP,                       //  "specularMap" +        METALLIC_ROUGHNESS_MAP,             //  "metallicRoughnessMap" +        NORMAL_MAP,                         //  "normalMap" +        OCCLUSION_MAP,                      //  "occlusionMap"          EMISSIVE_MAP,                       //  "emissiveMap"          BUMP_MAP,                           //  "bumpMap"          BUMP_MAP2,                          //  "bumpMap2" @@ -202,7 +205,6 @@ public:          DEFERRED_SHADOW3,                   //  "shadowMap3"          DEFERRED_SHADOW4,                   //  "shadowMap4"          DEFERRED_SHADOW5,                   //  "shadowMap5" -        DEFERRED_NORMAL,                    //  "normalMap"          DEFERRED_POSITION,                  //  "positionMap"          DEFERRED_DIFFUSE,                   //  "diffuseRect"          DEFERRED_SPECULAR,                  //  "specularRect" diff --git a/indra/llrender/llvertexbuffer.cpp b/indra/llrender/llvertexbuffer.cpp index 7caf20f40b..8cb124d406 100644 --- a/indra/llrender/llvertexbuffer.cpp +++ b/indra/llrender/llvertexbuffer.cpp @@ -687,6 +687,7 @@ bool LLVertexBuffer::validateRange(U32 start, U32 end, U32 count, U32 indices_of      }      { +#if 0  // not a reliable test for VBOs that are not backed by a CPU buffer          U16* idx = (U16*) mMappedIndexData+indices_offset;          for (U32 i = 0; i < count; ++i)          { @@ -724,6 +725,7 @@ bool LLVertexBuffer::validateRange(U32 start, U32 end, U32 count, U32 indices_of                  }              }          } +#endif      }      return true; @@ -1036,8 +1038,7 @@ bool LLVertexBuffer::updateNumIndices(U32 nindices)  bool LLVertexBuffer::allocateBuffer(U32 nverts, U32 nindices)  { -    if (nverts < 0 || nindices < 0 || -        nverts > 65536) +    if (nverts < 0 || nindices < 0)      {          LL_ERRS() << "Bad vertex buffer allocation: " << nverts << " : " << nindices << LL_ENDL;      } | 
