diff options
Diffstat (limited to 'indra')
22 files changed, 358 insertions, 171 deletions
diff --git a/indra/cmake/LLAddBuildTest.cmake b/indra/cmake/LLAddBuildTest.cmake index 543075db5b..21a0c8a9ca 100755 --- a/indra/cmake/LLAddBuildTest.cmake +++ b/indra/cmake/LLAddBuildTest.cmake @@ -201,19 +201,13 @@ FUNCTION(LL_ADD_INTEGRATION_TEST endif(TEST_DEBUG) ADD_EXECUTABLE(INTEGRATION_TEST_${testname} ${source_files}) SET_TARGET_PROPERTIES(INTEGRATION_TEST_${testname} PROPERTIES RUNTIME_OUTPUT_DIRECTORY "${EXE_STAGING_DIR}") - if (WINDOWS) - set_target_properties(INTEGRATION_TEST_${testname} - PROPERTIES - LINK_FLAGS "/debug /NODEFAULTLIB:LIBCMT /SUBSYSTEM:WINDOWS /INCLUDE:__tcmalloc" - LINK_FLAGS_DEBUG "/NODEFAULTLIB:\"LIBCMT;LIBCMTD;MSVCRT\" /INCREMENTAL:NO" - LINK_FLAGS_RELEASE "" - ) - endif(WINDOWS) if(STANDALONE) SET_TARGET_PROPERTIES(INTEGRATION_TEST_${testname} PROPERTIES COMPILE_FLAGS -I"${TUT_INCLUDE_DIR}") endif(STANDALONE) + # The following was copied to llcorehttp/CMakeLists.txt's texture_load target. + # Any changes made here should be replicated there. if (WINDOWS) SET_TARGET_PROPERTIES(INTEGRATION_TEST_${testname} PROPERTIES diff --git a/indra/llcorehttp/CMakeLists.txt b/indra/llcorehttp/CMakeLists.txt index 82fb3f0d4e..9adfd0ed62 100644 --- a/indra/llcorehttp/CMakeLists.txt +++ b/indra/llcorehttp/CMakeLists.txt @@ -174,7 +174,7 @@ if (LL_TESTS) # The following come from LLAddBuildTest.cmake's INTEGRATION_TEST_xxxx target. set_target_properties(http_texture_load PROPERTIES - LINK_FLAGS "/debug /NODEFAULTLIB:LIBCMT /SUBSYSTEM:WINDOWS /INCLUDE:__tcmalloc" + LINK_FLAGS "/debug /NODEFAULTLIB:LIBCMT /SUBSYSTEM:WINDOWS ${TCMALLOC_LINK_FLAGS}" LINK_FLAGS_DEBUG "/NODEFAULTLIB:\"LIBCMT;LIBCMTD;MSVCRT\" /INCREMENTAL:NO" LINK_FLAGS_RELEASE "" ) diff --git a/indra/llcorehttp/_httpinternal.h b/indra/llcorehttp/_httpinternal.h index 465e2036b3..14f744a9f1 100644 --- a/indra/llcorehttp/_httpinternal.h +++ b/indra/llcorehttp/_httpinternal.h @@ -111,7 +111,15 @@ const int HTTP_TRACE_MIN = HTTP_TRACE_OFF; const int HTTP_TRACE_MAX = HTTP_TRACE_CURL_BODIES; // Request retry limits -const int HTTP_RETRY_COUNT_DEFAULT = 5; +// +// At a minimum, retries need to extend past any throttling +// window we're expecting from central services. In the case +// of Linden services running through the caps routers, there's +// a five-second or so window for throttling with some spillover. +// We want to span a few windows to allow transport to slow +// after onset of the throttles and then recover without a final +// failure. Other systems may need other constants. +const int HTTP_RETRY_COUNT_DEFAULT = 8; const int HTTP_RETRY_COUNT_MIN = 0; const int HTTP_RETRY_COUNT_MAX = 100; diff --git a/indra/llcorehttp/examples/http_texture_load.cpp b/indra/llcorehttp/examples/http_texture_load.cpp index 998dc9240b..40ad4f047d 100644 --- a/indra/llcorehttp/examples/http_texture_load.cpp +++ b/indra/llcorehttp/examples/http_texture_load.cpp @@ -153,6 +153,7 @@ public: // int main(int argc, char** argv) { + LLCore::HttpStatus status; bool do_random(false); bool do_verbose(false); @@ -215,6 +216,9 @@ int main(int argc, char** argv) // Initialization init_curl(); LLCore::HttpRequest::createService(); + LLCore::HttpRequest::setPolicyClassOption(LLCore::HttpRequest::DEFAULT_POLICY_ID, + LLCore::HttpRequest::CP_CONNECTION_LIMIT, + concurrency_limit); LLCore::HttpRequest::startThread(); // Get service point @@ -228,7 +232,7 @@ int main(int argc, char** argv) ws.loadTextureUuids(uuids); ws.mRandomRange = do_random; ws.mVerbose = do_verbose; - ws.mMaxConcurrency = concurrency_limit; + ws.mMaxConcurrency = 100; if (! ws.mTextures.size()) { diff --git a/indra/llrender/llrendertarget.cpp b/indra/llrender/llrendertarget.cpp index e8eec22b61..5fb4fc8e52 100644 --- a/indra/llrender/llrendertarget.cpp +++ b/indra/llrender/llrendertarget.cpp @@ -51,11 +51,13 @@ void check_framebuffer_status() } bool LLRenderTarget::sUseFBO = false; +U32 LLRenderTarget::sCurFBO = 0; LLRenderTarget::LLRenderTarget() : mResX(0), mResY(0), mFBO(0), + mPreviousFBO(0), mDepth(0), mStencil(0), mUseDepth(false), @@ -107,6 +109,9 @@ void LLRenderTarget::resize(U32 resx, U32 resy, U32 color_fmt) bool LLRenderTarget::allocate(U32 resx, U32 resy, U32 color_fmt, bool depth, bool stencil, LLTexUnit::eTextureType usage, bool use_fbo, S32 samples) { + resx = llmin(resx, (U32) 4096); + resy = llmin(resy, (U32) 4096); + stop_glerror(); release(); stop_glerror(); @@ -146,7 +151,7 @@ bool LLRenderTarget::allocate(U32 resx, U32 resy, U32 color_fmt, bool depth, boo glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, LLTexUnit::getInternalType(mUsage), mDepth, 0); stop_glerror(); } - glBindFramebuffer(GL_FRAMEBUFFER, 0); + glBindFramebuffer(GL_FRAMEBUFFER, sCurFBO); } stop_glerror(); @@ -233,7 +238,7 @@ bool LLRenderTarget::addColorAttachment(U32 color_fmt) check_framebuffer_status(); - glBindFramebuffer(GL_FRAMEBUFFER, 0); + glBindFramebuffer(GL_FRAMEBUFFER, sCurFBO); } mTex.push_back(tex); @@ -322,7 +327,7 @@ void LLRenderTarget::shareDepthBuffer(LLRenderTarget& target) check_framebuffer_status(); - glBindFramebuffer(GL_FRAMEBUFFER, 0); + glBindFramebuffer(GL_FRAMEBUFFER, sCurFBO); target.mUseDepth = true; } @@ -385,9 +390,13 @@ void LLRenderTarget::bindTarget() { if (mFBO) { + mPreviousFBO = sCurFBO; + stop_glerror(); glBindFramebuffer(GL_FRAMEBUFFER, mFBO); + sCurFBO = mFBO; + stop_glerror(); if (gGLManager.mHasDrawBuffers) { //setup multiple render targets @@ -413,16 +422,6 @@ void LLRenderTarget::bindTarget() sBoundTarget = this; } -// static -void LLRenderTarget::unbindTarget() -{ - if (gGLManager.mHasFramebufferObject) - { - glBindFramebuffer(GL_FRAMEBUFFER, 0); - } - sBoundTarget = NULL; -} - void LLRenderTarget::clear(U32 mask_in) { U32 mask = GL_COLOR_BUFFER_BIT; @@ -488,7 +487,8 @@ void LLRenderTarget::flush(bool fetch_depth) else { stop_glerror(); - glBindFramebuffer(GL_FRAMEBUFFER, 0); + glBindFramebuffer(GL_FRAMEBUFFER, mPreviousFBO); + sCurFBO = mPreviousFBO; stop_glerror(); } } @@ -518,7 +518,7 @@ void LLRenderTarget::copyContents(LLRenderTarget& source, S32 srcX0, S32 srcY0, stop_glerror(); glCopyTexSubImage2D(LLTexUnit::getInternalType(mUsage), 0, srcX0, srcY0, dstX0, dstY0, dstX1, dstY1); stop_glerror(); - glBindFramebuffer(GL_FRAMEBUFFER, 0); + glBindFramebuffer(GL_FRAMEBUFFER, sCurFBO); stop_glerror(); } else @@ -535,7 +535,7 @@ void LLRenderTarget::copyContents(LLRenderTarget& source, S32 srcX0, S32 srcY0, stop_glerror(); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0); stop_glerror(); - glBindFramebuffer(GL_FRAMEBUFFER, 0); + glBindFramebuffer(GL_FRAMEBUFFER, sCurFBO); stop_glerror(); } } @@ -561,7 +561,7 @@ void LLRenderTarget::copyContentsToFramebuffer(LLRenderTarget& source, S32 srcX0 stop_glerror(); glBlitFramebuffer(srcX0, srcY0, srcX1, srcY1, dstX0, dstY0, dstX1, dstY1, mask, filter); stop_glerror(); - glBindFramebuffer(GL_FRAMEBUFFER, 0); + glBindFramebuffer(GL_FRAMEBUFFER, sCurFBO); stop_glerror(); } } diff --git a/indra/llrender/llrendertarget.h b/indra/llrender/llrendertarget.h index b9ef666206..765a727b5b 100644 --- a/indra/llrender/llrendertarget.h +++ b/indra/llrender/llrendertarget.h @@ -62,6 +62,7 @@ public: //whether or not to use FBO implementation static bool sUseFBO; static U32 sBytesAllocated; + static U32 sCurFBO; LLRenderTarget(); ~LLRenderTarget(); @@ -95,9 +96,6 @@ public: //applies appropriate viewport void bindTarget(); - //unbind target for rendering - static void unbindTarget(); - //clear render targer, clears depth buffer if present, //uses scissor rect if in copy-to-texture mode void clear(U32 mask = 0xFFFFFFFF); @@ -147,6 +145,7 @@ protected: std::vector<U32> mTex; std::vector<U32> mInternalFormat; U32 mFBO; + U32 mPreviousFBO; U32 mDepth; bool mStencil; bool mUseDepth; diff --git a/indra/newview/app_settings/settings.xml b/indra/newview/app_settings/settings.xml index 4a69e5b356..c817579e9e 100755 --- a/indra/newview/app_settings/settings.xml +++ b/indra/newview/app_settings/settings.xml @@ -14158,5 +14158,17 @@ <real>1.0</real> </array> </map> + + <key>SimulateFBOFailure</key> + <map> + <key>Comment</key> + <string>[DEBUG] Make allocateScreenBuffer return false. Used to test error handling.</string> + <key>Persist</key> + <integer>0</integer> + <key>Type</key> + <string>Boolean</string> + <key>Value</key> + <integer>0</integer> + </map> </map> </llsd> diff --git a/indra/newview/app_settings/shaders/class1/deferred/fxaaF.glsl b/indra/newview/app_settings/shaders/class1/deferred/fxaaF.glsl index e02a7b405b..2cef8f2a5d 100644 --- a/indra/newview/app_settings/shaders/class1/deferred/fxaaF.glsl +++ b/indra/newview/app_settings/shaders/class1/deferred/fxaaF.glsl @@ -2093,7 +2093,6 @@ uniform sampler2D diffuseMap; uniform vec2 rcp_screen_res; uniform vec4 rcp_frame_opt; uniform vec4 rcp_frame_opt2; -uniform vec2 screen_res; VARYING vec2 vary_fragcoord; VARYING vec2 vary_tc; diff --git a/indra/newview/gpu_table.txt b/indra/newview/gpu_table.txt index 5e8189caa5..4c39014c8b 100644 --- a/indra/newview/gpu_table.txt +++ b/indra/newview/gpu_table.txt @@ -70,11 +70,11 @@ ATI ASUS EAH58xx .*ATI.*ASUS.*EAH58.* 5 1 1 4.1 ATI ASUS EAH62xx .*ATI.*ASUS.*EAH62.* 2 1 0 0 ATI ASUS EAH63xx .*ATI.*ASUS.*EAH63.* 2 1 0 0 ATI ASUS EAH64xx .*ATI.*ASUS.*EAH64.* 2 1 0 0 -ATI ASUS EAH65xx .*ATI.*ASUS.*EAH65.* 2 1 0 0 -ATI ASUS EAH66xx .*ATI.*ASUS.*EAH66.* 3 1 0 0 +ATI ASUS EAH65xx .*ATI.*ASUS.*EAH65.* 2 1 0 4.1 +ATI ASUS EAH66xx .*ATI.*ASUS.*EAH66.* 3 1 0 4.1 ATI ASUS EAH67xx .*ATI.*ASUS.*EAH67.* 3 1 0 0 -ATI ASUS EAH68xx .*ATI.*ASUS.*EAH68.* 5 1 0 0 -ATI ASUS EAH69xx .*ATI.*ASUS.*EAH69.* 5 1 0 0 +ATI ASUS EAH68xx .*ATI.*ASUS.*EAH68.* 5 1 0 4 +ATI ASUS EAH69xx .*ATI.*ASUS.*EAH69.* 5 1 0 4.1 ATI ASUS Radeon X1xxx .*ATI.*ASUS.*X1.* 2 1 1 2.1 ATI Radeon X7xx .*ATI.*ASUS.*X7.* 1 1 0 0 ATI Radeon X19xx .*ATI.*(Radeon|Diamond) X19.* ?.* 2 1 1 2.1 @@ -108,13 +108,22 @@ ATI Radeon HD 65xx .*ATI.*AMD Radeon.* HD [67]5..[MG] 2 1 1 4.2 ATI Radeon HD 66xx .*ATI.*AMD Radeon.* HD [67]6..[MG] 3 1 1 4.2 ATI Radeon HD 7100 .*ATI.*AMD Radeon.* HD 71.* 2 1 0 0 ATI Radeon HD 7200 .*ATI.*AMD Radeon.* HD 72.* 2 1 0 0 -ATI Radeon HD 7300 .*ATI.*AMD Radeon.* HD 73.* 2 1 0 0 -ATI Radeon HD 7400 .*ATI.*AMD Radeon.* HD 74.* 2 1 0 0 +ATI Radeon HD 7300 .*ATI.*AMD Radeon.* HD 73.* 2 1 0 4.2 +ATI Radeon HD 7400 .*ATI.*AMD Radeon.* HD 74.* 2 1 0 4.2 ATI Radeon HD 7500 .*ATI.*AMD Radeon.* HD 75.* 3 1 1 4.2 -ATI Radeon HD 7600 .*ATI.*AMD Radeon.* HD 76.* 3 1 0 0 +ATI Radeon HD 7600 .*ATI.*AMD Radeon.* HD 76.* 3 1 0 4.2 ATI Radeon HD 7700 .*ATI.*AMD Radeon.* HD 77.* 4 1 1 4.2 ATI Radeon HD 7800 .*ATI.*AMD Radeon.* HD 78.* 5 1 1 4.2 ATI Radeon HD 7900 .*ATI.*AMD Radeon.* HD 79.* 5 1 1 4.2 +ATI ASUS HD7100 .*ATI.*ASUS.* HD71.* 2 1 0 0 +ATI ASUS HD7200 .*ATI.*ASUS.* HD72.* 2 1 0 0 +ATI ASUS HD7300 .*ATI.*ASUS.* HD73.* 2 1 0 0 +ATI ASUS HD7400 .*ATI.*ASUS.* HD74.* 2 1 0 0 +ATI ASUS HD7500 .*ATI.*ASUS.* HD75.* 3 1 1 4.2 +ATI ASUS HD7600 .*ATI.*ASUS.* HD76.* 3 1 0 0 +ATI ASUS HD7700 .*ATI.*ASUS.* HD77.* 4 1 1 4.2 +ATI ASUS HD7800 .*ATI.*ASUS.* HD78.* 5 1 1 4.2 +ATI ASUS HD7900 .*ATI.*ASUS.* HD79.* 5 1 1 4.2 ATI Mobility Radeon 4100 .*ATI.*Mobility.*41.. 1 1 1 3.3 ATI Mobility Radeon 7xxx .*ATI.*Mobility.*Radeon 7.* 0 1 1 1.3 ATI Mobility Radeon 8xxx .*ATI.*Mobility.*Radeon 8.* 0 1 0 0 @@ -167,6 +176,7 @@ ATI Radeon HD 3400 .*ATI.*Radeon HD *34.. 1 1 1 4 ATI Radeon HD 3500 .*ATI.*Radeon HD *35.. 2 1 0 0 ATI Radeon HD 3600 .*ATI.*Radeon HD *36.. 3 1 1 3.3 ATI Radeon HD 3700 .*ATI.*Radeon HD *37.. 3 1 0 0 +ATI HD3700 .*ATI.* HD37.. 3 1 0 3.3 ATI Radeon HD 3800 .*ATI.*Radeon HD *38.. 3 1 1 4 ATI Radeon HD 4100 .*ATI.*Radeon HD *41.. 1 1 0 0 ATI Radeon HD 4200 .*ATI.*Radeon HD *42.. 1 1 1 4 @@ -176,8 +186,10 @@ ATI Radeon HD 4500 .*ATI.*Radeon HD *45.. 2 1 1 3.3 ATI Radeon HD 4600 .*ATI.*Radeon HD *46.. 3 1 1 4 ATI Radeon HD 4700 .*ATI.*Radeon HD *47.. 3 1 1 3.3 ATI Radeon HD 4800 .*ATI.*Radeon HD *48.. 3 1 1 4 +ATI ASUS EAH5400 .*ATI.*ASUS EAH54.. 3 1 1 4.2 ATI Radeon HD 5400 .*ATI.*Radeon HD *54.. 3 1 1 4.2 ATI Radeon HD 5500 .*ATI.*Radeon HD *55.. 3 1 1 4.2 +ATI ASUS EAH5500 .*ATI.*ASUS EAH55.. 3 1 1 4.2 ATI Radeon HD 5600 .*ATI.*Radeon HD *56.. 3 1 1 4.2 ATI Radeon HD 5700 .*ATI.*Radeon HD *57.. 3 1 1 4.2 ATI Radeon HD 5800 .*ATI.*Radeon HD *58.. 4 1 1 4.2 @@ -270,7 +282,7 @@ ATI FirePro 5000 .*ATI.*FirePro V5.* 3 1 0 0 ATI FirePro 7000 .*ATI.*FirePro V7.* 3 1 0 0 ATI FirePro M .*ATI.*FirePro M.* 3 1 1 4.2 ATI R300 (9700) .*R300.* 0 1 1 2.1 -ATI Radeon .*ATI.*(Diamond|Radeon).* 0 1 0 0 +ATI Radeon .*ATI.*(Diamond|Radeon).* 0 1 0 4.2 Intel X3100 .*Intel.*X3100.* 1 1 1 2.1 Intel GMA 3600 .*Intel.* 3600.* 0 1 1 3 Intel 830M .*Intel.*830M 0 0 0 0 @@ -293,9 +305,9 @@ Intel Brookdale .*Intel.*Brookdale.* 0 0 1 1.3 Intel Cantiga .*Intel.*Cantiga.* 0 0 1 2 Intel Eaglelake .*Intel.*Eaglelake.* 1 1 1 2 Intel Graphics Media HD .*Intel.*Graphics Media.*HD.* 1 1 1 2.1 -Intel HD Graphics 2000 .*Intel.*HD Graphics 2.* 2 1 0 0 +Intel HD Graphics 2000 .*Intel.*HD Graphics 2.* 2 1 0 4 Intel HD Graphics 3000 .*Intel.*HD Graphics 3.* 3 1 1 3.1 -Intel HD Graphics 4000 .*Intel.*HD Graphics 4.* 3 1 1 3.3 +Intel HD Graphics 4000 .*Intel.*HD Graphics 4.* 3 1 1 4 Intel HD2000 .*Intel.*HD2000.* 2 1 0 0 Intel HD3000 .*Intel.*HD3000.* 3 1 0 0 Intel HD Graphics .*Intel.*HD Graphics.* 2 1 1 4 @@ -341,8 +353,8 @@ NVIDIA GT 325M .*NVIDIA .*GT *32*M.* 3 1 1 3.3 NVIDIA GT 330M .*NVIDIA .*GT *33*M.* 3 1 1 3.3 NVIDIA GT 340M .*NVIDIA .*GT *34*M.* 4 1 1 3.3 NVIDIA GTS 350M .*NVIDIA .*GTS *35*M.* 4 1 1 3.3 -NVIDIA GTS 360M .*NVIDIA .*GTS *36*M.* 5 1 1 3.3 -NVIDIA 405M .*NVIDIA .* 40*M.* 2 1 0 0 +NVIDIA GTS 360M .*NVIDIA .*GTS *360M.* 5 1 1 3.3 +NVIDIA 405M .*NVIDIA .* 40*M.* 2 1 0 4.2 NVIDIA 410M .*NVIDIA .* 41*M.* 3 1 0 0 NVIDIA GT 415M .*NVIDIA .*GT *41*M.* 3 1 1 4.2 NVIDIA GT 420M .*NVIDIA .*GT *42*M.* 3 1 1 4.2 @@ -369,46 +381,51 @@ NVIDIA GTX 670M .*NVIDIA .*GTX *67*M.* 5 1 1 4.2 NVIDIA GTX 680M .*NVIDIA .*GTX *68*M.* 5 1 0 0 NVIDIA GTX 690M .*NVIDIA .*GTX *69*M.* 5 1 0 0 NVIDIA G100 .*NVIDIA .*G10.* 3 1 1 4.2 -NVIDIA GT 120 .*NVIDIA .*GT *12.* 2 1 0 0 -NVIDIA GT 130 .*NVIDIA .*GT *13.* 2 1 0 0 +NVIDIA GT 120 .*NVIDIA .*GT *12.* 2 1 0 3 +NVIDIA GT 130 .*NVIDIA .*GT *13.* 2 1 0 3.3 NVIDIA GTS 150 .*NVIDIA .*GTS *15.* 2 1 0 0 -NVIDIA 205 .*NVIDIA .*GeForce 205.* 2 1 1 3.3 +NVIDIA 200 .*NVIDIA .*GeForce 20.* 2 1 1 3.3 +NVIDIA G200 .*NVIDIA .*GeForce G20.* 2 1 1 3.3 +NVIDIA G210 .*NVIDIA .*GeForce G210.* 3 1 1 3.3 NVIDIA 210 .*NVIDIA .*GeForce 210.* 3 1 1 3.3 NVIDIA GT 220 .*NVIDIA .*GT *22.* 2 1 1 3.3 +NVIDIA GT 230 .*NVIDIA .*GT *23.* 2 1 1 3.3 +NVIDIA GT 240 .*NVIDIA .*GT *24.* 4 1 1 3.3 NVIDIA GTS 240 .*NVIDIA .*GTS *24.* 4 1 1 3.3 NVIDIA GTS 250 .*NVIDIA .*GTS *25.* 4 1 1 3.3 NVIDIA GTX 260 .*NVIDIA .*GTX *26.* 4 1 1 3.3 -NVIDIA GTX 270 .*NVIDIA .*GTX *27.* 4 1 0 0 +NVIDIA GTX 270 .*NVIDIA .*GTX *27.* 4 1 0 3.3 NVIDIA GTX 280 .*NVIDIA .*GTX *28.* 4 1 1 3.3 -NVIDIA GTX 290 .*NVIDIA .*GTX *29.* 5 1 0 0 +NVIDIA GTX 290 .*NVIDIA .*GTX *29.* 5 1 0 3.3 NVIDIA 310 .*NVIDIA .*GeForce 310.* 3 1 1 3.3 NVIDIA 315 .*NVIDIA .*GeForce 315.* 3 1 1 3.3 -NVIDIA GT 320 .*NVIDIA .*GT *32.* 3 1 0 0 -NVIDIA GT 330 .*NVIDIA .*GT *33.* 3 1 0 0 +NVIDIA GT 320 .*NVIDIA .*GT *32.* 3 1 0 3.3 +NVIDIA GT 330 .*NVIDIA .*GT *33.* 3 1 0 3.3 NVIDIA GT 340 .*NVIDIA .*GT *34.* 3 1 0 0 -NVIDIA 405 .*NVIDIA .* 405.* 3 1 0 0 +NVIDIA 405 .*NVIDIA .* 405.* 3 1 0 3.3 NVIDIA GT 420 .*NVIDIA .*GT *42.* 3 1 1 4.2 -NVIDIA GT 430 .*NVIDIA .*GT *43.* 3 1 1 4.1 -NVIDIA GT 440 .*NVIDIA .*GT *44.* 4 1 0 0 +NVIDIA GT 430 .*NVIDIA .*GT *43.* 3 1 1 4.2 +NVIDIA GT 440 .*NVIDIA .*GT *44.* 4 1 0 4.2 NVIDIA GTS 450 .*NVIDIA .*GTS *45.* 4 1 1 4.2 -NVIDIA GTX 460 .*NVIDIA .*GTX *46.* 5 1 1 4.2 +NVIDIA GTX 460 .*NVIDIA .*GTX *46.* 5 1 1 4.3 NVIDIA GTX 470 .*NVIDIA .*GTX *47.* 5 1 1 4.2 NVIDIA GTX 480 .*NVIDIA .*GTX *48.* 5 1 1 4.2 NVIDIA 510 .*NVIDIA .* 510.* 3 1 0 0 NVIDIA GT 520 .*NVIDIA .*GT *52.* 3 1 1 4.2 NVIDIA GT 530 .*NVIDIA .*GT *53.* 3 1 1 4.2 NVIDIA GT 540 .*NVIDIA .*GT *54.* 3 1 1 4.2 -NVIDIA GTX 550 .*NVIDIA .*GTX *55.* 5 1 1 4.2 +NVIDIA GTX 550 .*NVIDIA .*GTX *55.* 5 1 1 4.3 NVIDIA GTX 560 .*NVIDIA .*GTX *56.* 5 1 1 4.2 NVIDIA GTX 570 .*NVIDIA .*GTX *57.* 5 1 1 4.2 -NVIDIA GTX 580 .*NVIDIA .*GTX *58.* 5 1 1 4.2 +NVIDIA GTX 580 .*NVIDIA .*GTX *58.* 5 1 1 4.3 NVIDIA GTX 590 .*NVIDIA .*GTX *59.* 5 1 1 4.2 NVIDIA GT 610 .*NVIDIA .*GT *61.* 3 1 1 4.2 -NVIDIA GT 620 .*NVIDIA .*GT *62.* 3 1 0 0 -NVIDIA GT 630 .*NVIDIA .*GT *63.* 3 1 0 0 -NVIDIA GT 640 .*NVIDIA .*GT *64.* 3 1 0 0 +NVIDIA GT 620 .*NVIDIA .*GT *62.* 3 1 0 4.2 +NVIDIA GT 630 .*NVIDIA .*GT *63.* 3 1 0 4.2 +NVIDIA GT 640 .*NVIDIA .*GT *64.* 3 1 0 4.3 NVIDIA GT 650 .*NVIDIA .*GT *65.* 3 1 1 4.2 -NVIDIA GTX 660 .*NVIDIA .*GTX *66.* 5 1 0 0 +NVIDIA GTX 650 .*NVIDIA .*GTX *65.* 3 1 1 4.2 +NVIDIA GTX 660 .*NVIDIA .*GTX *66.* 5 1 0 4.3 NVIDIA GTX 670 .*NVIDIA .*GTX *67.* 5 1 1 4.2 NVIDIA GTX 680 .*NVIDIA .*GTX *68.* 5 1 1 4.2 NVIDIA GTX 690 .*NVIDIA .*GTX *69.* 5 1 1 4.2 @@ -442,8 +459,8 @@ NVIDIA GeForce 7600 .*NVIDIA .*GeForce 76.* 2 1 1 2.1 NVIDIA GeForce 7800 .*NVIDIA .*GeForce 78.* 2 1 1 2.1 NVIDIA GeForce 7900 .*NVIDIA .*GeForce 79.* 3 1 1 2.1 NVIDIA GeForce 8100 .*NVIDIA .*GeForce 81.* 1 1 0 0 -NVIDIA GeForce 8200M .*NVIDIA .*GeForce 8200M.* 1 1 0 0 -NVIDIA GeForce 8200 .*NVIDIA .*GeForce 82.* 1 1 0 0 +NVIDIA GeForce 8200M .*NVIDIA .*GeForce 8200M.* 1 1 0 3.3 +NVIDIA GeForce 8200 .*NVIDIA .*GeForce 82.* 1 1 0 2.1 NVIDIA GeForce 8300 .*NVIDIA .*GeForce 83.* 3 1 1 3.3 NVIDIA GeForce 8400M .*NVIDIA .*GeForce 8400M.* 1 1 1 3.3 NVIDIA GeForce 8400 .*NVIDIA .*GeForce 84.* 2 1 1 3.3 @@ -455,9 +472,9 @@ NVIDIA GeForce 8700 .*NVIDIA .*GeForce 87.* 3 1 0 0 NVIDIA GeForce 8800M .*NVIDIA .*GeForce 8800M.* 2 1 1 3.3 NVIDIA GeForce 8800 .*NVIDIA .*GeForce 88.* 3 1 1 3.3 NVIDIA GeForce 9100M .*NVIDIA .*GeForce 9100M.* 0 1 0 0 -NVIDIA GeForce 9100 .*NVIDIA .*GeForce 91.* 0 1 0 0 -NVIDIA GeForce 9200M .*NVIDIA .*GeForce 9200M.* 1 1 0 0 -NVIDIA GeForce 9200 .*NVIDIA .*GeForce 92.* 1 1 0 0 +NVIDIA GeForce 9100 .*NVIDIA .*GeForce 91.* 0 1 0 3.3 +NVIDIA GeForce 9200M .*NVIDIA .*GeForce 9200M.* 1 1 0 3.1 +NVIDIA GeForce 9200 .*NVIDIA .*GeForce 92.* 1 1 0 3.3 NVIDIA GeForce 9300M .*NVIDIA .*GeForce 9300M.* 1 1 1 3.3 NVIDIA GeForce 9300 .*NVIDIA .*GeForce 93.* 1 1 1 3.3 NVIDIA GeForce 9400M .*NVIDIA .*GeForce 9400M.* 2 1 1 3.3 @@ -470,7 +487,7 @@ NVIDIA GeForce 9700M .*NVIDIA .*GeForce 9700M.* 0 1 1 3.3 NVIDIA GeForce 9800M .*NVIDIA .*GeForce 9800M.* 2 1 1 3.3 NVIDIA GeForce 9800 .*NVIDIA .*GeForce 98.* 3 1 1 3.3 NVIDIA GeForce FX 5100 .*NVIDIA .*GeForce FX 51.* 0 1 0 0 -NVIDIA GeForce FX 5200 .*NVIDIA .*GeForce FX 52.* 0 1 0 0 +NVIDIA GeForce FX 5200 .*NVIDIA .*GeForce FX 52.* 0 1 0 2.1 NVIDIA GeForce FX 5300 .*NVIDIA .*GeForce FX 53.* 0 1 0 0 NVIDIA GeForce FX 5500 .*NVIDIA .*GeForce FX 55.* 0 1 1 2.1 NVIDIA GeForce FX 5600 .*NVIDIA .*GeForce FX 56.* 1 1 1 2.1 @@ -505,7 +522,7 @@ NVIDIA D9M .*NVIDIA .*D9M.* 1 1 0 0 NVIDIA G94 .*NVIDIA .*G94.* 3 1 0 0 NVIDIA GeForce Go 6 .*GeForce Go 6.* 1 1 0 0 NVIDIA ION 2 .*NVIDIA .*ION 2.* 2 1 0 0 -NVIDIA ION .*NVIDIA Corporation.*ION.* 2 1 1 0 +NVIDIA ION .*NVIDIA Corporation.*ION.* 2 1 1 3.3 NVIDIA NB8M .*NVIDIA .*NB8M.* 1 1 0 0 NVIDIA NB8P .*NVIDIA .*NB8P.* 2 1 0 0 NVIDIA NB9E .*NVIDIA .*NB9E.* 3 1 0 0 @@ -513,7 +530,7 @@ NVIDIA NB9M .*NVIDIA .*NB9M.* 1 1 0 0 NVIDIA NB9P .*NVIDIA .*NB9P.* 2 1 0 0 NVIDIA N10 .*NVIDIA .*N10.* 1 1 0 0 NVIDIA GeForce PCX .*GeForce PCX.* 0 1 0 0 -NVIDIA Generic .*NVIDIA .*Unknown.* 0 0 0 0 +NVIDIA Generic .*NVIDIA .*Unknown.* 0 0 0 3 NVIDIA NV17 .*NVIDIA .*NV17.* 0 1 0 0 NVIDIA NV34 .*NVIDIA .*NV34.* 0 1 0 0 NVIDIA NV35 .*NVIDIA .*NV35.* 0 1 0 0 @@ -521,7 +538,7 @@ NVIDIA NV36 .*NVIDIA .*NV36.* 1 1 0 0 NVIDIA NV41 .*NVIDIA .*NV41.* 1 1 0 0 NVIDIA NV43 .*NVIDIA .*NV43.* 1 1 0 0 NVIDIA NV44 .*NVIDIA .*NV44.* 1 1 0 0 -NVIDIA nForce .*NVIDIA .*nForce.* 0 0 0 0 +NVIDIA nForce .*NVIDIA .*nForce.* 0 0 0 3.3 NVIDIA MCP51 .*NVIDIA .*MCP51.* 1 1 0 0 NVIDIA MCP61 .*NVIDIA .*MCP61.* 1 1 0 0 NVIDIA MCP67 .*NVIDIA .*MCP67.* 1 1 0 0 @@ -532,40 +549,40 @@ NVIDIA MCP78 .*NVIDIA .*MCP78.* 1 1 0 0 NVIDIA MCP79 .*NVIDIA .*MCP79.* 1 1 0 0 NVIDIA MCP7A .*NVIDIA .*MCP7A.* 1 1 0 0 NVIDIA Quadro2 .*Quadro2.* 0 1 0 0 -NVIDIA Quadro 1000M .*Quadro.*1000M.* 2 1 0 0 -NVIDIA Quadro 2000 M/D .*Quadro.*2000.* 3 1 0 0 +NVIDIA Quadro 1000M .*Quadro.*1000M.* 2 1 0 4.2 +NVIDIA Quadro 2000 M/D .*Quadro.*2000.* 3 1 0 4.2 NVIDIA Quadro 3000M .*Quadro.*3000M.* 3 1 0 0 NVIDIA Quadro 4000M .*Quadro.*4000M.* 3 1 0 0 -NVIDIA Quadro 4000 .*Quadro *4000.* 3 1 0 0 +NVIDIA Quadro 4000 .*Quadro *4000.* 3 1 0 4.2 NVIDIA Quadro 50x0 M .*Quadro.*50.0.* 3 1 0 0 NVIDIA Quadro 6000 .*Quadro.*6000.* 3 1 0 0 -NVIDIA Quadro 400 .*Quadro.*400.* 2 1 0 0 -NVIDIA Quadro 600 .*Quadro.*600.* 2 1 0 0 +NVIDIA Quadro 400 .*Quadro.*400.* 2 1 0 3.3 +NVIDIA Quadro 600 .*Quadro.*600.* 2 1 0 3.3 NVIDIA Quadro4 .*Quadro4.* 0 1 0 0 NVIDIA Quadro DCC .*Quadro DCC.* 0 1 0 0 NVIDIA Quadro CX .*Quadro.*CX.* 3 1 0 0 NVIDIA Quadro FX 770M .*Quadro.*FX *770M.* 2 1 0 0 -NVIDIA Quadro FX 1500M .*Quadro.*FX *1500M.* 1 1 0 0 +NVIDIA Quadro FX 1500M .*Quadro.*FX *1500M.* 1 1 0 2.1 NVIDIA Quadro FX 1600M .*Quadro.*FX *1600M.* 2 1 0 0 NVIDIA Quadro FX 2500M .*Quadro.*FX *2500M.* 2 1 0 0 NVIDIA Quadro FX 2700M .*Quadro.*FX *2700M.* 3 1 0 0 -NVIDIA Quadro FX 2800M .*Quadro.*FX *2800M.* 3 1 0 0 -NVIDIA Quadro FX 3500 .*Quadro.*FX *3500.* 2 1 0 0 +NVIDIA Quadro FX 2800M .*Quadro.*FX *2800M.* 3 1 0 3.3 +NVIDIA Quadro FX 3500 .*Quadro.*FX *3500.* 2 1 0 2.1 NVIDIA Quadro FX 3600 .*Quadro.*FX *3600.* 3 1 0 0 -NVIDIA Quadro FX 3700 .*Quadro.*FX *3700.* 3 1 0 0 -NVIDIA Quadro FX 3800 .*Quadro.*FX *3800.* 3 1 0 0 +NVIDIA Quadro FX 3700 .*Quadro.*FX *3700.* 3 1 0 3.3 +NVIDIA Quadro FX 3800 .*Quadro.*FX *3800.* 3 1 0 3.2 NVIDIA Quadro FX 4500 .*Quadro.*FX *45.* 3 1 0 0 -NVIDIA Quadro FX 880M .*Quadro.*FX *880M.* 3 1 0 0 +NVIDIA Quadro FX 880M .*Quadro.*FX *880M.* 3 1 0 3.3 NVIDIA Quadro FX 4800 .*NVIDIA .*Quadro *FX *4800.* 3 1 0 0 -NVIDIA Quadro FX .*Quadro FX.* 1 1 0 0 -NVIDIA Quadro NVS 1xxM .*Quadro NVS *1.[05]M.* 0 1 1 2.1 +NVIDIA Quadro FX .*Quadro FX.* 1 1 0 3.3 +NVIDIA Quadro NVS 1xxM .*Quadro NVS *1.[05]M.* 0 1 1 3.3 NVIDIA Quadro NVS 300M .*NVIDIA .*NVS *300M.* 2 1 0 0 NVIDIA Quadro NVS 320M .*NVIDIA .*NVS *320M.* 2 1 0 0 NVIDIA Quadro NVS 2100M .*NVIDIA .*NVS *2100M.* 2 1 0 0 NVIDIA Quadro NVS 3100M .*NVIDIA .*NVS *3100M.* 2 1 0 0 -NVIDIA Quadro NVS 4200M .*NVIDIA .*NVS *4200M.* 2 1 0 0 +NVIDIA Quadro NVS 4200M .*NVIDIA .*NVS *4200M.* 2 1 0 4.1 NVIDIA Quadro NVS 5100M .*NVIDIA .*NVS *5100M.* 2 1 0 0 -NVIDIA Quadro NVS .*NVIDIA .*NVS 0 1 0 0 +NVIDIA Quadro NVS .*NVIDIA .*NVS 0 1 0 3.2 NVIDIA Corporation N12P .*NVIDIA .*N12P.* 1 1 1 4.1 NVIDIA Corporation N11M .*NVIDIA .*N11M.* 2 1 0 0 NVIDIA RIVA TNT .*RIVA TNT.* 0 0 0 0 @@ -579,5 +596,3 @@ Apple Generic Apple.*Generic.* 0 0 0 0 Apple Software Renderer Apple.*Software Renderer.* 0 0 0 0 Humper Humper.* 0 1 1 2.1 PowerVR SGX545 .*PowerVR SGX.* 1 1 1 3 - - diff --git a/indra/newview/llfeaturemanager.cpp b/indra/newview/llfeaturemanager.cpp index 6f11d4d4ca..24a27c5146 100644 --- a/indra/newview/llfeaturemanager.cpp +++ b/indra/newview/llfeaturemanager.cpp @@ -419,7 +419,7 @@ void LLFeatureManager::parseGPUTable(std::string filename) // setup the tokenizer std::string buf(buffer); - std::string cls, label, expr, supported; + std::string cls, label, expr, supported, stats_based, expected_gl_version; boost_tokenizer tokens(buf, boost::char_separator<char>("\t\n")); boost_tokenizer::iterator token_iter = tokens.begin(); @@ -440,6 +440,14 @@ void LLFeatureManager::parseGPUTable(std::string filename) { supported = *token_iter++; } + if (token_iter != tokens.end()) + { + stats_based = *token_iter++; + } + if (token_iter != tokens.end()) + { + expected_gl_version = *token_iter++; + } if (label.empty() || expr.empty() || cls.empty() || supported.empty()) { @@ -450,7 +458,9 @@ void LLFeatureManager::parseGPUTable(std::string filename) json << "{'label' : '" << label << "',\n" << "'regexp' : '" << expr << "',\n" << "'class' : '" << cls << "',\n" << - "'supported' : '" << supported << "'\n},\n"; + "'supported' : '" << supported << "',\n" << + "'stats_based' : " << stats_based << ",\n" << + "'gl_version' : " << expected_gl_version << "\n},\n"; #endif for (U32 i = 0; i < expr.length(); i++) /*Flawfinder: ignore*/ @@ -720,7 +730,9 @@ void LLFeatureManager::setGraphicsLevel(S32 level, bool skipFeatures) maskFeatures("High"); maskFeatures("Class5"); break; - + case 6: + maskFeatures("Ultra"); + break; default: maskFeatures("Low"); maskFeatures("Class0"); diff --git a/indra/newview/llfloaterpreference.cpp b/indra/newview/llfloaterpreference.cpp index 5752f839ce..542e96cf16 100755 --- a/indra/newview/llfloaterpreference.cpp +++ b/indra/newview/llfloaterpreference.cpp @@ -750,7 +750,10 @@ void LLFloaterPreference::onClose(bool app_quitting) { gSavedSettings.setS32("LastPrefTab", getChild<LLTabContainer>("pref core")->getCurrentPanelIndex()); LLPanelLogin::setAlwaysRefresh(false); - cancel(); + if (!app_quitting) + { + cancel(); + } } void LLFloaterPreference::onOpenHardwareSettings() diff --git a/indra/newview/llgroupmgr.cpp b/indra/newview/llgroupmgr.cpp index 6916cf813a..81eb1d397e 100644 --- a/indra/newview/llgroupmgr.cpp +++ b/indra/newview/llgroupmgr.cpp @@ -238,6 +238,7 @@ LLGroupMgrGroupData::LLGroupMgrGroupData(const LLUUID& id) : mPendingRoleMemberRequest(FALSE), mAccessTime(0.0f) { + mMemberVersion.generate(); } void LLGroupMgrGroupData::setAccessed() @@ -318,14 +319,14 @@ void LLGroupMgrGroupData::setRoleData(const LLUUID& role_id, LLRoleData role_dat role_data.mChangeType = RC_UPDATE_DATA; } else - { + { role_data.mChangeType = RC_UPDATE_POWERS; } mRoleChanges[role_id] = role_data; } else - { + { llwarns << "Change being made to non-existant role " << role_id << llendl; } } @@ -424,6 +425,7 @@ void LLGroupMgrGroupData::removeMemberData() } mMembers.clear(); mMemberDataComplete = FALSE; + mMemberVersion.generate(); } void LLGroupMgrGroupData::removeRoleData() @@ -945,6 +947,8 @@ void LLGroupMgr::processGroupMembersReply(LLMessageSystem* msg, void** data) } } + group_datap->mMemberVersion.generate(); + if (group_datap->mMembers.size() == (U32)group_datap->mMemberCount) { group_datap->mMemberDataComplete = TRUE; @@ -1771,8 +1775,6 @@ void LLGroupMgr::sendGroupMemberEjects(const LLUUID& group_id, bool start_message = true; LLMessageSystem* msg = gMessageSystem; - - LLGroupMgrGroupData* group_datap = LLGroupMgr::getInstance()->getGroupData(group_id); if (!group_datap) return; @@ -1833,6 +1835,8 @@ void LLGroupMgr::sendGroupMemberEjects(const LLUUID& group_id, { gAgent.sendReliableMessage(); } + + group_datap->mMemberVersion.generate(); } @@ -1990,6 +1994,8 @@ void LLGroupMgr::processCapGroupMembersRequest(const LLSD& content) group_datap->mMembers[member_id] = data; } + group_datap->mMemberVersion.generate(); + // Technically, we have this data, but to prevent completely overhauling // this entire system (it would be nice, but I don't have the time), // I'm going to be dumb and just call services I most likely don't need diff --git a/indra/newview/llgroupmgr.h b/indra/newview/llgroupmgr.h index 62b2978f21..d8c1ab7ef5 100644 --- a/indra/newview/llgroupmgr.h +++ b/indra/newview/llgroupmgr.h @@ -236,6 +236,8 @@ public: F32 getAccessTime() const { return mAccessTime; } void setAccessed(); + const LLUUID& getMemberVersion() const { return mMemberVersion; } + public: typedef std::map<LLUUID,LLGroupMemberData*> member_list_t; typedef std::map<LLUUID,LLGroupRoleData*> role_list_t; @@ -284,6 +286,9 @@ private: BOOL mPendingRoleMemberRequest; F32 mAccessTime; + + // Generate a new ID every time mMembers + LLUUID mMemberVersion; }; struct LLRoleAction diff --git a/indra/newview/llpanelgroupgeneral.cpp b/indra/newview/llpanelgroupgeneral.cpp index 51b4d2ea65..993ffb7825 100644 --- a/indra/newview/llpanelgroupgeneral.cpp +++ b/indra/newview/llpanelgroupgeneral.cpp @@ -670,7 +670,6 @@ void LLPanelGroupGeneral::update(LLGroupChange gc) { mMemberProgress = gdatap->mMembers.begin(); mPendingMemberUpdate = TRUE; - mUdpateSessionID.generate(); sSDTime = 0.0f; sElementTime = 0.0f; @@ -730,7 +729,7 @@ void LLPanelGroupGeneral::updateMembers() // If name is not cached, onNameCache() should be called when it is cached and add this member to list. LLAvatarNameCache::get(mMemberProgress->first, boost::bind(&LLPanelGroupGeneral::onNameCache, - this, mUdpateSessionID, member, _1, _2)); + this, gdatap->getMemberVersion(), member, _2)); } } @@ -768,11 +767,15 @@ void LLPanelGroupGeneral::addMember(LLGroupMemberData* member) } } -void LLPanelGroupGeneral::onNameCache(const LLUUID& update_id, LLGroupMemberData* member, const LLUUID& id, const LLAvatarName& av_name) +void LLPanelGroupGeneral::onNameCache(const LLUUID& update_id, LLGroupMemberData* member, const LLAvatarName& av_name) { - if (!member - || update_id != mUdpateSessionID) + LLGroupMgrGroupData* gdatap = LLGroupMgr::getInstance()->getGroupData(mGroupID); + + if (!gdatap + || !gdatap->isMemberDataComplete() + || gdatap->getMemberVersion() != update_id) { + // Stale data return; } diff --git a/indra/newview/llpanelgroupgeneral.h b/indra/newview/llpanelgroupgeneral.h index b179f78c56..1b4e8e2645 100644 --- a/indra/newview/llpanelgroupgeneral.h +++ b/indra/newview/llpanelgroupgeneral.h @@ -63,7 +63,7 @@ public: virtual void setupCtrls (LLPanel* parent); - void onNameCache(const LLUUID& update_id, LLGroupMemberData* member, const LLUUID& id, const LLAvatarName& av_name); + void onNameCache(const LLUUID& update_id, LLGroupMemberData* member, const LLAvatarName& av_name); private: void reset(); @@ -90,7 +90,6 @@ private: BOOL mChanged; BOOL mFirstUse; std::string mIncompleteMemberDataStr; - LLUUID mUdpateSessionID; // Group information (include any updates in updateChanged) LLLineEditor *mGroupNameEditor; diff --git a/indra/newview/llpanelgrouproles.cpp b/indra/newview/llpanelgrouproles.cpp index 5720168f81..ff106882f4 100644 --- a/indra/newview/llpanelgrouproles.cpp +++ b/indra/newview/llpanelgrouproles.cpp @@ -745,7 +745,6 @@ LLPanelGroupMembersSubTab::LLPanelGroupMembersSubTab() mHasMatch(FALSE), mNumOwnerAdditions(0) { - mUdpateSessionID = LLUUID::null; } LLPanelGroupMembersSubTab::~LLPanelGroupMembersSubTab() @@ -1427,13 +1426,20 @@ U64 LLPanelGroupMembersSubTab::getAgentPowersBasedOnRoleChanges(const LLUUID& ag return GP_NO_POWERS; } - LLGroupMemberData* member_data = gdatap->mMembers[agent_id]; - if ( !member_data ) + LLGroupMgrGroupData::member_list_t::iterator iter = gdatap->mMembers.find(agent_id); + if ( iter == gdatap->mMembers.end() ) { llwarns << "LLPanelGroupMembersSubTab::getAgentPowersBasedOnRoleChanges() -- No member data for member with UUID " << agent_id << llendl; return GP_NO_POWERS; } + LLGroupMemberData* member_data = (*iter).second; + if (!member_data) + { + llwarns << "LLPanelGroupMembersSubTab::getAgentPowersBasedOnRoleChanges() -- Null member data for member with UUID " << agent_id << llendl; + return GP_NO_POWERS; + } + //see if there are unsaved role changes for this agent role_change_data_map_t* role_change_datap = NULL; member_role_changes_map_t::iterator member = mMemberRoleChangeData.find(agent_id); @@ -1548,10 +1554,6 @@ void LLPanelGroupMembersSubTab::update(LLGroupChange gc) mMemberProgress = gdatap->mMembers.begin(); mPendingMemberUpdate = TRUE; mHasMatch = FALSE; - // Generate unique ID for current updateMembers()- see onNameCache for details. - // Using unique UUID is perhaps an overkill but this way we are perfectly safe - // from coincidences. - mUdpateSessionID.generate(); } else { @@ -1579,14 +1581,14 @@ void LLPanelGroupMembersSubTab::update(LLGroupChange gc) } } -void LLPanelGroupMembersSubTab::addMemberToList(LLUUID id, LLGroupMemberData* data) +void LLPanelGroupMembersSubTab::addMemberToList(LLGroupMemberData* data) { if (!data) return; LLUIString donated = getString("donation_area"); donated.setArg("[AREA]", llformat("%d", data->getContribution())); LLNameListCtrl::NameItem item_params; - item_params.value = id; + item_params.value = data->getID(); item_params.columns.add().column("name").font.name("SANSSERIF_SMALL").style("NORMAL"); @@ -1600,17 +1602,12 @@ void LLPanelGroupMembersSubTab::addMemberToList(LLUUID id, LLGroupMemberData* da mHasMatch = TRUE; } -void LLPanelGroupMembersSubTab::onNameCache(const LLUUID& update_id, LLGroupMemberData* member, const LLUUID& id, const LLAvatarName& av_name) +void LLPanelGroupMembersSubTab::onNameCache(const LLUUID& update_id, LLGroupMemberData* member, const LLAvatarName& av_name) { - // Update ID is used to determine whether member whose id is passed - // into onNameCache() was passed after current or previous user-initiated update. - // This is needed to avoid probable duplication of members in list after changing filter - // or adding of members of another group if gets for their names were called on - // previous update. If this id is from get() called from older update, - // we do nothing. - if (mUdpateSessionID != update_id) return; - - if (!member) + LLGroupMgrGroupData* gdatap = LLGroupMgr::getInstance()->getGroupData(mGroupID); + if (!gdatap + || gdatap->getMemberVersion() != update_id + || !member) { return; } @@ -1618,7 +1615,7 @@ void LLPanelGroupMembersSubTab::onNameCache(const LLUUID& update_id, LLGroupMemb // trying to avoid unnecessary hash lookups if (matchesSearchFilter(av_name.getLegacyName())) { - addMemberToList(id, member); + addMemberToList(member); if(!mMembersList->getEnabled()) { mMembersList->setEnabled(TRUE); @@ -1672,14 +1669,14 @@ void LLPanelGroupMembersSubTab::updateMembers() { if (matchesSearchFilter(av_name.getLegacyName())) { - addMemberToList(mMemberProgress->first, mMemberProgress->second); + addMemberToList(mMemberProgress->second); } } else { // If name is not cached, onNameCache() should be called when it is cached and add this member to list. LLAvatarNameCache::get(mMemberProgress->first, boost::bind(&LLPanelGroupMembersSubTab::onNameCache, - this, mUdpateSessionID, mMemberProgress->second, _1, _2)); + this, gdatap->getMemberVersion(), mMemberProgress->second, _2)); } } diff --git a/indra/newview/llpanelgrouproles.h b/indra/newview/llpanelgrouproles.h index 8b454e020a..bead8bd85b 100644 --- a/indra/newview/llpanelgrouproles.h +++ b/indra/newview/llpanelgrouproles.h @@ -187,8 +187,8 @@ public: virtual void setGroupID(const LLUUID& id); - void addMemberToList(LLUUID id, LLGroupMemberData* data); - void onNameCache(const LLUUID& update_id, LLGroupMemberData* member, const LLUUID& id, const LLAvatarName& av_name); + void addMemberToList(LLGroupMemberData* data); + void onNameCache(const LLUUID& update_id, LLGroupMemberData* member, const LLAvatarName& av_name); protected: typedef std::map<LLUUID, LLRoleMemberChangeType> role_change_data_map_t; @@ -210,9 +210,6 @@ protected: BOOL mPendingMemberUpdate; BOOL mHasMatch; - // This id is generated after each user initiated member list update(opening Roles or changing filter) - LLUUID mUdpateSessionID; - member_role_changes_map_t mMemberRoleChangeData; U32 mNumOwnerAdditions; diff --git a/indra/newview/lltexturecache.cpp b/indra/newview/lltexturecache.cpp index a61e2d5c86..305f6fca0f 100644 --- a/indra/newview/lltexturecache.cpp +++ b/indra/newview/lltexturecache.cpp @@ -1861,7 +1861,12 @@ LLPointer<LLImageRaw> LLTextureCache::readFromFastCache(const LLUUID& id, S32& d mFastCachep->seek(APR_SET, offset); - llassert_always(mFastCachep->read(head, TEXTURE_FAST_CACHE_ENTRY_OVERHEAD) == TEXTURE_FAST_CACHE_ENTRY_OVERHEAD); + if(mFastCachep->read(head, TEXTURE_FAST_CACHE_ENTRY_OVERHEAD) != TEXTURE_FAST_CACHE_ENTRY_OVERHEAD) + { + //cache corrupted or under thread race condition + closeFastCache(); + return NULL; + } S32 image_size = head[0] * head[1] * head[2]; if(!image_size) //invalid @@ -1872,7 +1877,13 @@ LLPointer<LLImageRaw> LLTextureCache::readFromFastCache(const LLUUID& id, S32& d discardlevel = head[3]; data = (U8*)ALLOCATE_MEM(LLImageBase::getPrivatePool(), image_size); - llassert_always(mFastCachep->read(data, image_size) == image_size); + if(mFastCachep->read(data, image_size) != image_size) + { + FREE_MEM(LLImageBase::getPrivatePool(), data); + closeFastCache(); + return NULL; + } + closeFastCache(); } LLPointer<LLImageRaw> raw = new LLImageRaw(data, head[0], head[1], head[2], true); @@ -1926,7 +1937,11 @@ bool LLTextureCache::writeToFastCache(S32 id, LLPointer<LLImageRaw> raw, S32 dis openFastCache(); mFastCachep->seek(APR_SET, offset); - llassert_always(mFastCachep->write(mFastCachePadBuffer, TEXTURE_FAST_CACHE_ENTRY_SIZE) == TEXTURE_FAST_CACHE_ENTRY_SIZE); + + //no need to do this assertion check. When it fails, let it fail quietly. + //this failure could happen because other viewer removes the fast cache file when clearing cache. + //--> llassert_always(mFastCachep->write(mFastCachePadBuffer, TEXTURE_FAST_CACHE_ENTRY_SIZE) == TEXTURE_FAST_CACHE_ENTRY_SIZE); + mFastCachep->write(mFastCachePadBuffer, TEXTURE_FAST_CACHE_ENTRY_SIZE); closeFastCache(true); } diff --git a/indra/newview/llviewerwindow.cpp b/indra/newview/llviewerwindow.cpp index c072a82a4b..010bb0f398 100755 --- a/indra/newview/llviewerwindow.cpp +++ b/indra/newview/llviewerwindow.cpp @@ -4199,14 +4199,48 @@ BOOL LLViewerWindow::rawSnapshot(LLImageRaw *raw, S32 image_width, S32 image_hei image_height = llmin(image_height, window_height); } + S32 original_width = 0; + S32 original_height = 0; + bool reset_deferred = false; + + LLRenderTarget scratch_space; + F32 scale_factor = 1.0f ; if (!keep_window_aspect || (image_width > window_width) || (image_height > window_height)) { - // if image cropping or need to enlarge the scene, compute a scale_factor - F32 ratio = llmin( (F32)window_width / image_width , (F32)window_height / image_height) ; - snapshot_width = (S32)(ratio * image_width) ; - snapshot_height = (S32)(ratio * image_height) ; - scale_factor = llmax(1.0f, 1.0f / ratio) ; + if ((image_width > window_width || image_height > window_height) && LLPipeline::sRenderDeferred && !show_ui) + { + if (scratch_space.allocate(image_width, image_height, GL_RGBA, true, true)) + { + original_width = gPipeline.mDeferredScreen.getWidth(); + original_height = gPipeline.mDeferredScreen.getHeight(); + + if (gPipeline.allocateScreenBuffer(image_width, image_height)) + { + window_width = image_width; + window_height = image_height; + snapshot_width = image_width; + snapshot_height = image_height; + reset_deferred = true; + mWorldViewRectRaw.set(0, image_height, image_width, 0); + scratch_space.bindTarget(); + } + else + { + scratch_space.release(); + gPipeline.allocateScreenBuffer(original_width, original_height); + } + } + } + + if (!reset_deferred) + { + // if image cropping or need to enlarge the scene, compute a scale_factor + F32 ratio = llmin( (F32)window_width / image_width , (F32)window_height / image_height) ; + snapshot_width = (S32)(ratio * image_width) ; + snapshot_height = (S32)(ratio * image_height) ; + scale_factor = llmax(1.0f, 1.0f / ratio) ; + } } if (show_ui && scale_factor > 1.f) @@ -4395,11 +4429,20 @@ BOOL LLViewerWindow::rawSnapshot(LLImageRaw *raw, S32 image_width, S32 image_hei gPipeline.resetDrawOrders(); } + if (reset_deferred) + { + mWorldViewRectRaw = window_rect; + scratch_space.flush(); + scratch_space.release(); + gPipeline.allocateScreenBuffer(original_width, original_height); + + } + if (high_res) { send_agent_resume(); } - + return ret; } diff --git a/indra/newview/pipeline.cpp b/indra/newview/pipeline.cpp index b24e94256f..bf3d43ced9 100644 --- a/indra/newview/pipeline.cpp +++ b/indra/newview/pipeline.cpp @@ -792,18 +792,57 @@ void LLPipeline::allocatePhysicsBuffer() } } -void LLPipeline::allocateScreenBuffer(U32 resX, U32 resY) +bool LLPipeline::allocateScreenBuffer(U32 resX, U32 resY) { refreshCachedSettings(); - U32 samples = RenderFSAASamples; + + bool save_settings = sRenderDeferred; + if (save_settings) + { + // Set this flag in case we crash while resizing window or allocating space for deferred rendering targets + gSavedSettings.setBOOL("RenderInitError", TRUE); + gSavedSettings.saveToFile( gSavedSettings.getString("ClientSettingsFile"), TRUE ); + } + + eFBOStatus ret = doAllocateScreenBuffer(resX, resY); + + if (save_settings) + { + // don't disable shaders on next session + gSavedSettings.setBOOL("RenderInitError", FALSE); + gSavedSettings.saveToFile( gSavedSettings.getString("ClientSettingsFile"), TRUE ); + } + + if (ret == FBO_FAILURE) + { //FAILSAFE: screen buffer allocation failed, disable deferred rendering if it's enabled + //NOTE: if the session closes successfully after this call, deferred rendering will be + // disabled on future sessions + if (LLPipeline::sRenderDeferred) + { + gSavedSettings.setBOOL("RenderDeferred", FALSE); + LLPipeline::refreshCachedSettings(); + } + } + + return ret == FBO_SUCCESS_FULLRES; +} - //try to allocate screen buffers at requested resolution and samples + +LLPipeline::eFBOStatus LLPipeline::doAllocateScreenBuffer(U32 resX, U32 resY) +{ + // try to allocate screen buffers at requested resolution and samples // - on failure, shrink number of samples and try again // - if not multisampled, shrink resolution and try again (favor X resolution over Y) // Make sure to call "releaseScreenBuffers" after each failure to cleanup the partially loaded state + U32 samples = RenderFSAASamples; + + eFBOStatus ret = FBO_SUCCESS_FULLRES; if (!allocateScreenBuffer(resX, resY, samples)) { + //failed to allocate at requested specification, return false + ret = FBO_FAILURE; + releaseScreenBuffers(); //reduce number of samples while (samples > 0) @@ -811,7 +850,7 @@ void LLPipeline::allocateScreenBuffer(U32 resX, U32 resY) samples /= 2; if (allocateScreenBuffer(resX, resY, samples)) { //success - return; + return FBO_SUCCESS_LOWRES; } releaseScreenBuffers(); } @@ -824,22 +863,23 @@ void LLPipeline::allocateScreenBuffer(U32 resX, U32 resY) resY /= 2; if (allocateScreenBuffer(resX, resY, samples)) { - return; + return FBO_SUCCESS_LOWRES; } releaseScreenBuffers(); resX /= 2; if (allocateScreenBuffer(resX, resY, samples)) { - return; + return FBO_SUCCESS_LOWRES; } releaseScreenBuffers(); } llwarns << "Unable to allocate screen buffer at any resolution!" << llendl; } -} + return ret; +} bool LLPipeline::allocateScreenBuffer(U32 resX, U32 resY, U32 samples) { @@ -867,10 +907,6 @@ bool LLPipeline::allocateScreenBuffer(U32 resX, U32 resY, U32 samples) if (LLPipeline::sRenderDeferred) { - // Set this flag in case we crash while resizing window or allocating space for deferred rendering targets - gSavedSettings.setBOOL("RenderInitError", TRUE); - gSavedSettings.saveToFile( gSavedSettings.getString("ClientSettingsFile"), TRUE ); - S32 shadow_detail = RenderShadowDetail; BOOL ssao = RenderDeferredSSAO; @@ -882,7 +918,7 @@ bool LLPipeline::allocateScreenBuffer(U32 resX, U32 resY, U32 samples) if (!mScreen.allocate(resX, resY, GL_RGBA, FALSE, FALSE, LLTexUnit::TT_RECT_TEXTURE, FALSE, samples)) return false; if (samples > 0) { - if (!mFXAABuffer.allocate(nhpo2(resX), nhpo2(resY), GL_RGBA, FALSE, FALSE, LLTexUnit::TT_TEXTURE, FALSE, samples)) return false; + if (!mFXAABuffer.allocate(resX, resY, GL_RGBA, FALSE, FALSE, LLTexUnit::TT_TEXTURE, FALSE, samples)) return false; } else { @@ -916,7 +952,7 @@ bool LLPipeline::allocateScreenBuffer(U32 resX, U32 resY, U32 samples) } } - U32 width = nhpo2(U32(resX*scale))/2; + U32 width = (U32) (resX*scale); U32 height = width; if (shadow_detail > 1) @@ -935,9 +971,11 @@ bool LLPipeline::allocateScreenBuffer(U32 resX, U32 resY, U32 samples) } } - // don't disable shaders on next session - gSavedSettings.setBOOL("RenderInitError", FALSE); - gSavedSettings.saveToFile( gSavedSettings.getString("ClientSettingsFile"), TRUE ); + //HACK make screenbuffer allocations start failing after 30 seconds + if (gSavedSettings.getBOOL("SimulateFBOFailure")) + { + return false; + } } else { @@ -7145,11 +7183,11 @@ void LLPipeline::renderBloom(BOOL for_snapshot, F32 zoom_factor, int subfield) gGlowProgram.unbind(); - if (LLRenderTarget::sUseFBO) + /*if (LLRenderTarget::sUseFBO) { LLFastTimer ftm(FTM_RENDER_BLOOM_FBO); glBindFramebuffer(GL_FRAMEBUFFER, 0); - } + }*/ gGLViewport[0] = gViewerWindow->getWorldViewRectRaw().mLeft; gGLViewport[1] = gViewerWindow->getWorldViewRectRaw().mBottom; @@ -8025,10 +8063,6 @@ void LLPipeline::renderDeferredLighting() gGL.popMatrix(); stop_glerror(); - //copy depth and stencil from deferred screen - //mScreen.copyContents(mDeferredScreen, 0, 0, mDeferredScreen.getWidth(), mDeferredScreen.getHeight(), - // 0, 0, mScreen.getWidth(), mScreen.getHeight(), GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT, GL_NEAREST); - mScreen.bindTarget(); // clear color buffer here - zeroing alpha (glow) is important or it will accumulate against sky glClearColor(0,0,0,0); @@ -8800,8 +8834,6 @@ void LLPipeline::generateWaterReflection(LLCamera& camera_in) } last_update = LLDrawPoolWater::sNeedsReflectionUpdate && LLDrawPoolWater::sNeedsDistortionUpdate; - LLRenderTarget::unbindTarget(); - LLPipeline::sReflectionRender = FALSE; if (!LLRenderTarget::sUseFBO) diff --git a/indra/newview/pipeline.h b/indra/newview/pipeline.h index 5e69d4a951..a8db93585e 100644 --- a/indra/newview/pipeline.h +++ b/indra/newview/pipeline.h @@ -119,8 +119,25 @@ public: void createGLBuffers(); void createLUTBuffers(); - void allocateScreenBuffer(U32 resX, U32 resY); + //allocate the largest screen buffer possible up to resX, resY + //returns true if full size buffer allocated, false if some other size is allocated + bool allocateScreenBuffer(U32 resX, U32 resY); + + typedef enum { + FBO_SUCCESS_FULLRES = 0, + FBO_SUCCESS_LOWRES, + FBO_FAILURE + } eFBOStatus; + +private: + //implementation of above, wrapped for easy error handling + eFBOStatus doAllocateScreenBuffer(U32 resX, U32 resY); +public: + + //attempt to allocate screen buffers at resX, resY + //returns true if allocation successful, false otherwise bool allocateScreenBuffer(U32 resX, U32 resY, U32 samples); + void allocatePhysicsBuffer(); void resetVertexBuffers(LLDrawable* drawable); diff --git a/indra/newview/skins/default/xui/en/panel_preferences_graphics1.xml b/indra/newview/skins/default/xui/en/panel_preferences_graphics1.xml index f7666bdc4c..849f3ef73d 100644 --- a/indra/newview/skins/default/xui/en/panel_preferences_graphics1.xml +++ b/indra/newview/skins/default/xui/en/panel_preferences_graphics1.xml @@ -55,30 +55,57 @@ name="LowGraphicsDivet" top_delta="-2" width="2" /> + <icon + color="0.12 0.12 0.12 1" + height="14" + image_name="Rounded_Square" + layout="topleft" + left_pad="41" + name="LowMidraphicsDivet" + top_delta="-2" + width="2" /> <icon color="0.12 0.12 0.12 1" height="14" image_name="Rounded_Square" layout="topleft" - left_pad="83" + left_pad="41" name="MidGraphicsDivet" top_delta="0" width="2" /> + <icon + color="0.12 0.12 0.12 1" + height="14" + image_name="Rounded_Square" + layout="topleft" + left_pad="41" + name="MidHighGraphicsDivet" + top_delta="0" + width="2" /> <icon color="0.12 0.12 0.12 1" height="14" image_name="Rounded_Square" layout="topleft" - left_pad="85" + left_pad="41" name="HighGraphicsDivet" top_delta="0" width="2" /> + <icon + color="0.12 0.12 0.12 1" + height="14" + image_name="Rounded_Square" + layout="topleft" + left_pad="41" + name="HighUltraGraphicsDivet" + top_delta="0" + width="2" /> <icon color="0.12 0.12 0.12 1" height="14" image_name="Rounded_Square" layout="topleft" - left_pad="83" + left_pad="41" name="UltraGraphicsDivet" top_delta="0" width="2" /> @@ -91,7 +118,7 @@ initial_value="0" layout="topleft" left="120" - max_val="3" + max_val="6" name="QualityPerformanceSelection" show_text="false" top_delta="-2" @@ -120,12 +147,12 @@ height="12" layout="topleft" left_delta="87" - name="ShadersPrefText2" + name="ShadersPrefText3" top_delta="0" width="80"> Mid </text> - <text + <text type="string" length="1" follows="left|top" @@ -136,8 +163,8 @@ name="ShadersPrefText3" top_delta="0" width="80"> - High - </text> + High + </text> <text type="string" length="1" |