diff options
Diffstat (limited to 'indra/llrender/llgl.cpp')
| -rw-r--r-- | indra/llrender/llgl.cpp | 131 | 
1 files changed, 77 insertions, 54 deletions
| diff --git a/indra/llrender/llgl.cpp b/indra/llrender/llgl.cpp index 1b5566a3f7..1bc5e79835 100644 --- a/indra/llrender/llgl.cpp +++ b/indra/llrender/llgl.cpp @@ -50,6 +50,10 @@  #include "llglheaders.h"  #include "llglslshader.h" +#include "glm/glm.hpp" +#include <glm/gtc/matrix_access.hpp> +#include "glm/gtc/type_ptr.hpp" +  #if LL_WINDOWS  #include "lldxhardware.h"  #endif @@ -59,12 +63,12 @@  #endif -BOOL gDebugSession = FALSE; -BOOL gDebugGLSession = FALSE; -BOOL gClothRipple = FALSE; -BOOL gHeadlessClient = FALSE; -BOOL gNonInteractive = FALSE; -BOOL gGLActive = FALSE; +bool gDebugSession = false; +bool gDebugGLSession = false; +bool gClothRipple = false; +bool gHeadlessClient = false; +bool gNonInteractive = false; +bool gGLActive = false;  static const std::string HEADLESS_VENDOR_STRING("Linden Lab");  static const std::string HEADLESS_RENDERER_STRING("Headless"); @@ -983,21 +987,21 @@ PFNGLPOLYGONOFFSETCLAMPPROC              glPolygonOffsetClamp = nullptr;  LLGLManager gGLManager;  LLGLManager::LLGLManager() : -    mInited(FALSE), -    mIsDisabled(FALSE), +    mInited(false), +    mIsDisabled(false),      mMaxSamples(0),      mNumTextureImageUnits(1),      mMaxSampleMaskWords(0),      mMaxColorTextureSamples(0),      mMaxDepthTextureSamples(0),      mMaxIntegerSamples(0), -    mIsAMD(FALSE), -    mIsNVIDIA(FALSE), -    mIsIntel(FALSE), +    mIsAMD(false), +    mIsNVIDIA(false), +    mIsIntel(false),  #if LL_DARWIN -    mIsMobileGF(FALSE), +    mIsMobileGF(false),  #endif -    mHasRequirements(TRUE), +    mHasRequirements(true),      mDriverVersionMajor(1),      mDriverVersionMinor(0),      mDriverVersionRelease(0), @@ -1038,6 +1042,7 @@ void LLGLManager::initWGL()          GLH_EXT_NAME(wglGetGPUIDsAMD) = (PFNWGLGETGPUIDSAMDPROC)GLH_EXT_GET_PROC_ADDRESS("wglGetGPUIDsAMD");          GLH_EXT_NAME(wglGetGPUInfoAMD) = (PFNWGLGETGPUINFOAMDPROC)GLH_EXT_GET_PROC_ADDRESS("wglGetGPUInfoAMD");      } +    mHasNVXGpuMemoryInfo = ExtensionExists("GL_NVX_gpu_memory_info", gGLHExts.mSysExts);      if (ExtensionExists("WGL_EXT_swap_control", gGLHExts.mSysExts))      { @@ -1147,12 +1152,12 @@ bool LLGLManager::initGL()      {          mGLVendorShort = "AMD";          // *TODO: Fix this? -        mIsAMD = TRUE; +        mIsAMD = true;      }      else if (mGLVendor.find("NVIDIA ") != std::string::npos)      {          mGLVendorShort = "NVIDIA"; -        mIsNVIDIA = TRUE; +        mIsNVIDIA = true;      }      else if (mGLVendor.find("INTEL") != std::string::npos  #if LL_LINUX @@ -1163,7 +1168,12 @@ bool LLGLManager::initGL()           )      {          mGLVendorShort = "INTEL"; -        mIsIntel = TRUE; +        mIsIntel = true; +    } +    else if (mGLVendor.find("APPLE") != std::string::npos) +    { +        mGLVendorShort = "APPLE"; +        mIsApple = true;      }      else      { @@ -1173,7 +1183,7 @@ bool LLGLManager::initGL()      // This is called here because it depends on the setting of mIsGF2or4MX, and sets up mHasMultitexture.      initExtensions(); -    S32 old_vram = mVRAM; +    U32 old_vram = mVRAM;      mVRAM = 0;  #if LL_WINDOWS @@ -1205,6 +1215,17 @@ bool LLGLManager::initGL()              LL_WARNS("RenderInit") << "VRAM Detected (AMDAssociations):" << mVRAM << LL_ENDL;          }      } +    else if (mHasNVXGpuMemoryInfo) +    { +        GLint mem_kb = 0; +        glGetIntegerv(GL_GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX, &mem_kb); +        mVRAM = mem_kb / 1024; + +        if (mVRAM != 0) +        { +            LL_WARNS("RenderInit") << "VRAM Detected (NVXGpuMemoryInfo):" << mVRAM << LL_ENDL; +        } +    }  #endif  #if LL_WINDOWS @@ -1215,7 +1236,7 @@ bool LLGLManager::initGL()          // Function will check all GPUs WMI knows of and will pick up the one with most          // memory. We need to check all GPUs because system can switch active GPU to          // weaker one, to preserve power when not under load. -        S32 mem = LLDXHardware::getMBVideoMemoryViaWMI(); +        U32 mem = LLDXHardware::getMBVideoMemoryViaWMI();          if (mem != 0)          {              mVRAM = mem; @@ -1238,6 +1259,12 @@ bool LLGLManager::initGL()      glGetIntegerv(GL_MAX_INTEGER_SAMPLES, &mMaxIntegerSamples);      glGetIntegerv(GL_MAX_SAMPLE_MASK_WORDS, &mMaxSampleMaskWords);      glGetIntegerv(GL_MAX_SAMPLES, &mMaxSamples); +    glGetIntegerv(GL_MAX_VARYING_VECTORS, &mMaxVaryingVectors); +    glGetIntegerv(GL_MAX_UNIFORM_BLOCK_SIZE, &mMaxUniformBlockSize); + +    // sanity clamp max uniform block size to 64k just in case +    // there's some implementation that reports a crazy value +    mMaxUniformBlockSize = llmin(mMaxUniformBlockSize, 65536);      if (mGLVersion >= 4.59f)      { @@ -1345,7 +1372,7 @@ void LLGLManager::asLLSD(LLSD& info)      info["gpu_version"] = mDriverVersionVendorString;      info["opengl_version"] = mGLVersionString; -    info["vram"] = mVRAM; +    info["vram"] = LLSD::Integer(mVRAM);      // OpenGL limits      info["max_samples"] = mMaxSamples; @@ -1372,12 +1399,12 @@ void LLGLManager::shutdownGL()      {          glFinish();          stop_glerror(); -        mInited = FALSE; +        mInited = false;      }  }  // these are used to turn software blending on. They appear in the Debug/Avatar menu -// presence of vertex skinning/blending or vertex programs will set these to FALSE by default. +// presence of vertex skinning/blending or vertex programs will set these to false by default.  void LLGLManager::initExtensions()  { @@ -1409,7 +1436,7 @@ void LLGLManager::initExtensions()      glGetIntegerv(GL_MAX_ELEMENTS_INDICES, (GLint*) &mGLMaxIndexRange);      glGetIntegerv(GL_MAX_TEXTURE_SIZE, (GLint*) &mGLMaxTextureSize); -    mInited = TRUE; +    mInited = true;  #if (LL_WINDOWS || LL_LINUX) && !LL_MESA_HEADLESS      LL_DEBUGS("RenderInit") << "GL Probe: Getting symbols" << LL_ENDL; @@ -2297,10 +2324,10 @@ void do_assert_glerror()      //  Create or update texture to be used with this data      GLenum error;      error = glGetError(); -    BOOL quit = FALSE; +    bool quit = false;      if (LL_UNLIKELY(error))      { -        quit = TRUE; +        quit = true;          GLubyte const * gl_error_msg = gluErrorString(error);          if (NULL != gl_error_msg)          { @@ -2420,7 +2447,7 @@ void LLGLState::dumpStates()      for (boost::unordered_map<LLGLenum, LLGLboolean>::iterator iter = sStateMap.begin();           iter != sStateMap.end(); ++iter)      { -        LL_INFOS("RenderState") << llformat(" 0x%04x : %s",(S32)iter->first,iter->second?"TRUE":"FALSE") << LL_ENDL; +        LL_INFOS("RenderState") << llformat(" 0x%04x : %s",(S32)iter->first,iter->second?"true":"false") << LL_ENDL;      }  } @@ -2463,7 +2490,7 @@ void LLGLState::checkStates(GLboolean writeAlpha)  ///////////////////////////////////////////////////////////////////////  LLGLState::LLGLState(LLGLenum state, S32 enabled) : -    mState(state), mWasEnabled(FALSE), mIsEnabled(FALSE) +    mState(state), mWasEnabled(false), mIsEnabled(false)  {      LL_PROFILE_ZONE_SCOPED_CATEGORY_PIPELINE; @@ -2482,15 +2509,15 @@ void LLGLState::setEnabled(S32 enabled)      }      if (enabled == CURRENT_STATE)      { -        enabled = sStateMap[mState] == GL_TRUE ? TRUE : FALSE; +        enabled = sStateMap[mState] == GL_TRUE ? ENABLED_STATE : DISABLED_STATE;      } -    else if (enabled == TRUE && sStateMap[mState] != GL_TRUE) +    else if (enabled == ENABLED_STATE && sStateMap[mState] != GL_TRUE)      {          gGL.flush();          glEnable(mState);          sStateMap[mState] = GL_TRUE;      } -    else if (enabled == FALSE && sStateMap[mState] != GL_FALSE) +    else if (enabled == DISABLED_STATE && sStateMap[mState] != GL_FALSE)      {          gGL.flush();          glDisable(mState); @@ -2677,7 +2704,7 @@ void parse_glsl_version(S32& major, S32& minor)      LLStringUtil::convertToS32(minor_str, minor);  } -LLGLUserClipPlane::LLGLUserClipPlane(const LLPlane& p, const glh::matrix4f& modelview, const glh::matrix4f& projection, bool apply) +LLGLUserClipPlane::LLGLUserClipPlane(const LLPlane& p, const glm::mat4& modelview, const glm::mat4& projection, bool apply)  {      mApply = apply; @@ -2704,13 +2731,12 @@ void LLGLUserClipPlane::disable()  void LLGLUserClipPlane::setPlane(F32 a, F32 b, F32 c, F32 d)  { -    glh::matrix4f& P = mProjection; -    glh::matrix4f& M = mModelview; +    const glm::mat4& P = mProjection; +    const glm::mat4& M = mModelview; -    glh::matrix4f invtrans_MVP = (P * M).inverse().transpose(); -    glh::vec4f oplane(a,b,c,d); -    glh::vec4f cplane; -    invtrans_MVP.mult_matrix_vec(oplane, cplane); +    glm::mat4 invtrans_MVP = glm::transpose(glm::inverse(P*M)); +    glm::vec4 oplane(a,b,c,d); +    glm::vec4 cplane = invtrans_MVP * oplane;      cplane /= fabs(cplane[2]); // normalize such that depth is not scaled      cplane[3] -= 1; @@ -2718,13 +2744,13 @@ void LLGLUserClipPlane::setPlane(F32 a, F32 b, F32 c, F32 d)      if(cplane[2] < 0)          cplane *= -1; -    glh::matrix4f suffix; -    suffix.set_row(2, cplane); -    glh::matrix4f newP = suffix * P; +    glm::mat4 suffix; +    suffix = glm::row(suffix, 2, cplane); +    glm::mat4 newP = suffix * P;      gGL.matrixMode(LLRender::MM_PROJECTION);      gGL.pushMatrix(); -    gGL.loadMatrix(newP.m); -    gGLObliqueProjectionInverse = LLMatrix4(newP.inverse().transpose().m); +    gGL.loadMatrix(glm::value_ptr(newP)); +    gGLObliqueProjectionInverse = LLMatrix4(glm::value_ptr(glm::transpose(glm::inverse(newP))));      gGL.matrixMode(LLRender::MM_MODELVIEW);  } @@ -2737,14 +2763,14 @@ LLGLDepthTest::LLGLDepthTest(GLboolean depth_enabled, GLboolean write_enabled, G  : mPrevDepthEnabled(sDepthEnabled), mPrevDepthFunc(sDepthFunc), mPrevWriteEnabled(sWriteEnabled)  {      stop_glerror(); - +    LL_PROFILE_ZONE_SCOPED_CATEGORY_PIPELINE;      checkState();      if (!depth_enabled)      { // always disable depth writes if depth testing is disabled        // GL spec defines this as a requirement, but some implementations allow depth writes with testing disabled        // The proper way to write to depth buffer with testing disabled is to enable testing and use a depth_func of GL_ALWAYS -        write_enabled = FALSE; +        write_enabled = GL_FALSE;      }      if (depth_enabled != sDepthEnabled) @@ -2770,6 +2796,7 @@ LLGLDepthTest::LLGLDepthTest(GLboolean depth_enabled, GLboolean write_enabled, G  LLGLDepthTest::~LLGLDepthTest()  { +    LL_PROFILE_ZONE_SCOPED_CATEGORY_PIPELINE;      checkState();      if (sDepthEnabled != mPrevDepthEnabled )      { @@ -2797,7 +2824,7 @@ void LLGLDepthTest::checkState()      if (gDebugGL)      {          GLint func = 0; -        GLboolean mask = FALSE; +        GLboolean mask = GL_FALSE;          glGetIntegerv(GL_DEPTH_FUNC, &func);          glGetBooleanv(GL_DEPTH_WRITEMASK, &mask); @@ -2820,31 +2847,27 @@ void LLGLDepthTest::checkState()  LLGLSquashToFarClip::LLGLSquashToFarClip()  { -    glh::matrix4f proj = get_current_projection(); +    glm::mat4 proj = get_current_projection();      setProjectionMatrix(proj, 0);  } -LLGLSquashToFarClip::LLGLSquashToFarClip(glh::matrix4f& P, U32 layer) +LLGLSquashToFarClip::LLGLSquashToFarClip(const glm::mat4& P, U32 layer)  {      setProjectionMatrix(P, layer);  } - -void LLGLSquashToFarClip::setProjectionMatrix(glh::matrix4f& projection, U32 layer) +void LLGLSquashToFarClip::setProjectionMatrix(glm::mat4 projection, U32 layer)  { -      F32 depth = 0.99999f - 0.0001f * layer; -    for (U32 i = 0; i < 4; i++) -    { -        projection.element(2, i) = projection.element(3, i) * depth; -    } +    glm::vec4 P_row_3 = glm::row(projection, 3) * depth; +    projection = glm::row(projection, 2, P_row_3);      LLRender::eMatrixMode last_matrix_mode = gGL.getMatrixMode();      gGL.matrixMode(LLRender::MM_PROJECTION);      gGL.pushMatrix(); -    gGL.loadMatrix(projection.m); +    gGL.loadMatrix(glm::value_ptr(projection));      gGL.matrixMode(last_matrix_mode);  } | 
