diff options
Diffstat (limited to 'indra/llcommon')
-rwxr-xr-x | indra/llcommon/CMakeLists.txt | 2 | ||||
-rw-r--r-- | indra/llcommon/llalignedarray.h | 139 | ||||
-rwxr-xr-x | indra/llcommon/llapr.h | 15 | ||||
-rwxr-xr-x | indra/llcommon/llcommon.cpp | 2 | ||||
-rwxr-xr-x | indra/llcommon/llcriticaldamp.cpp | 1 | ||||
-rwxr-xr-x | indra/llcommon/llerror.h | 6 | ||||
-rwxr-xr-x | indra/llcommon/lleventapi.h | 1 | ||||
-rwxr-xr-x | indra/llcommon/lleventtimer.h | 1 | ||||
-rwxr-xr-x | indra/llcommon/llinitparam.h | 2 | ||||
-rwxr-xr-x | indra/llcommon/llinstancetracker.h | 12 | ||||
-rwxr-xr-x | indra/llcommon/llleap.h | 1 | ||||
-rwxr-xr-x | indra/llcommon/llmemory.h | 129 | ||||
-rwxr-xr-x | indra/llcommon/llrefcount.h | 36 | ||||
-rw-r--r-- | indra/llcommon/llstaticstringtable.h | 82 | ||||
-rwxr-xr-x | indra/llcommon/llstringtable.h | 8 | ||||
-rwxr-xr-x | indra/llcommon/llthread.cpp | 11 | ||||
-rwxr-xr-x | indra/llcommon/llthread.h | 2 |
17 files changed, 390 insertions, 60 deletions
diff --git a/indra/llcommon/CMakeLists.txt b/indra/llcommon/CMakeLists.txt index e2e2cb436d..0c7575c9a5 100755 --- a/indra/llcommon/CMakeLists.txt +++ b/indra/llcommon/CMakeLists.txt @@ -114,6 +114,7 @@ set(llcommon_HEADER_FILES fix_macros.h indra_constants.h linden_common.h + llalignedarray.h llallocator.h llallocator_heap_profile.h llapp.h @@ -199,6 +200,7 @@ set(llcommon_HEADER_FILES llstrider.h llstring.h llstringtable.h + llstaticstringtable.h llsys.h llthread.h llthreadlocalstorage.h diff --git a/indra/llcommon/llalignedarray.h b/indra/llcommon/llalignedarray.h new file mode 100644 index 0000000000..6f18bfe25c --- /dev/null +++ b/indra/llcommon/llalignedarray.h @@ -0,0 +1,139 @@ +/** + * @file llalignedarray.h + * @brief A static array which obeys alignment restrictions and mimics std::vector accessors. + * + * $LicenseInfo:firstyear=2002&license=viewerlgpl$ + * Second Life Viewer Source Code + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA + * $/LicenseInfo$ + */ + +#ifndef LL_LLALIGNEDARRAY_H +#define LL_LLALIGNEDARRAY_H + +#include "llmemory.h" + +template <class T, U32 alignment> +class LLAlignedArray +{ +public: + T* mArray; + U32 mElementCount; + U32 mCapacity; + + LLAlignedArray(); + ~LLAlignedArray(); + + void push_back(const T& elem); + U32 size() const { return mElementCount; } + void resize(U32 size); + T* append(S32 N); + T& operator[](int idx); + const T& operator[](int idx) const; +}; + +template <class T, U32 alignment> +LLAlignedArray<T, alignment>::LLAlignedArray() +{ + llassert(alignment >= 16); + mArray = NULL; + mElementCount = 0; + mCapacity = 0; +} + +template <class T, U32 alignment> +LLAlignedArray<T, alignment>::~LLAlignedArray() +{ + ll_aligned_free(alignment, mArray); + mArray = NULL; + mElementCount = 0; + mCapacity = 0; +} + +template <class T, U32 alignment> +void LLAlignedArray<T, alignment>::push_back(const T& elem) +{ + T* old_buf = NULL; + if (mCapacity <= mElementCount) + { + mCapacity++; + mCapacity *= 2; + T* new_buf = (T*) ll_aligned_malloc(alignment, mCapacity*sizeof(T)); + if (mArray) + { + ll_memcpy_nonaliased_aligned_16((char*)new_buf, (char*)mArray, sizeof(T)*mElementCount); + } + old_buf = mArray; + mArray = new_buf; + } + + mArray[mElementCount++] = elem; + + //delete old array here to prevent error on a.push_back(a[0]) + ll_aligned_free(alignment, old_buf); +} + +template <class T, U32 alignment> +void LLAlignedArray<T, alignment>::resize(U32 size) +{ + if (mCapacity < size) + { + mCapacity = size+mCapacity*2; + T* new_buf = mCapacity > 0 ? (T*) ll_aligned_malloc(alignment, mCapacity*sizeof(T)) : NULL; + if (mArray) + { + ll_memcpy_nonaliased_aligned_16((char*) new_buf, (char*) mArray, sizeof(T)*mElementCount); + ll_aligned_free(alignment, mArray); + } + + /*for (U32 i = mElementCount; i < mCapacity; ++i) + { + new(new_buf+i) T(); + }*/ + mArray = new_buf; + } + + mElementCount = size; +} + + +template <class T, U32 alignment> +T& LLAlignedArray<T, alignment>::operator[](int idx) +{ + llassert(idx < mElementCount); + return mArray[idx]; +} + +template <class T, U32 alignment> +const T& LLAlignedArray<T, alignment>::operator[](int idx) const +{ + llassert(idx < mElementCount); + return mArray[idx]; +} + +template <class T, U32 alignment> +T* LLAlignedArray<T, alignment>::append(S32 N) +{ + U32 sz = size(); + resize(sz+N); + return &((*this)[sz]); +} + +#endif + diff --git a/indra/llcommon/llapr.h b/indra/llcommon/llapr.h index e9b13c5919..b1b0fc4718 100755 --- a/indra/llcommon/llapr.h +++ b/indra/llcommon/llapr.h @@ -172,18 +172,21 @@ public: LLAtomic32<Type>(Type x) {apr_atomic_set32(&mData, apr_uint32_t(x)); }; ~LLAtomic32<Type>() {}; - operator const Type() { return get(); } + operator const Type() { apr_uint32_t data = apr_atomic_read32(&mData); return Type(data); } + + Type CurrentValue() const { apr_uint32_t data = apr_atomic_read32(const_cast< volatile apr_uint32_t* >(&mData)); return Type(data); } + Type operator =(const Type& x) { apr_atomic_set32(&mData, apr_uint32_t(x)); return Type(mData); } void operator -=(Type x) { apr_atomic_sub32(&mData, apr_uint32_t(x)); } void operator +=(Type x) { apr_atomic_add32(&mData, apr_uint32_t(x)); } Type operator ++(int) { return apr_atomic_inc32(&mData); } // Type++ - Type operator ++() { apr_atomic_inc32(&mData); return get(); } // ++Type - Type operator --(int) { const Type result(get()); apr_atomic_dec32(&mData); return result; } // Type-- - Type operator --() { return apr_atomic_dec32(&mData); } // approximately --Type (0 if final is 0, non-zero otherwise) + Type operator --(int) { return apr_atomic_dec32(&mData); } // approximately --Type (0 if final is 0, non-zero otherwise) + + Type operator ++() { return apr_atomic_inc32(&mData); } // Type++ + Type operator --() { return apr_atomic_dec32(&mData); } // approximately --Type (0 if final is 0, non-zero otherwise) private: - const Type get() { apr_uint32_t data = apr_atomic_read32(&mData); return Type(data); } - apr_uint32_t mData; + volatile apr_uint32_t mData; }; typedef LLAtomic32<U32> LLAtomicU32; diff --git a/indra/llcommon/llcommon.cpp b/indra/llcommon/llcommon.cpp index 96ec0cdefe..19642b0982 100755 --- a/indra/llcommon/llcommon.cpp +++ b/indra/llcommon/llcommon.cpp @@ -48,7 +48,7 @@ void LLCommon::initClass() } LLTimer::initClass(); LLThreadSafeRefCount::initThreadSafeRefCount(); - + assert_main_thread(); // Make sure we record the main thread if (!sMasterThreadRecorder) { sMasterThreadRecorder = new LLTrace::ThreadRecorder(); diff --git a/indra/llcommon/llcriticaldamp.cpp b/indra/llcommon/llcriticaldamp.cpp index 9f4cb09000..54be855f67 100755 --- a/indra/llcommon/llcriticaldamp.cpp +++ b/indra/llcommon/llcriticaldamp.cpp @@ -118,3 +118,4 @@ F32 LLSmoothInterpolation::calcInterpolant(F32 time_constant) { return llclamp(1.f - powf(2.f, -sTimeDelta / time_constant), 0.f, 1.f); } + diff --git a/indra/llcommon/llerror.h b/indra/llcommon/llerror.h index b8ad509d88..046cd69543 100755 --- a/indra/llcommon/llerror.h +++ b/indra/llcommon/llerror.h @@ -304,7 +304,7 @@ typedef LLError::NoClassInfo _LL_CLASS_TO_LOG; // this macro uses a one-shot do statement to avoid parsing errors when writing control flow statements // without braces: -// if (condition) LL_INFOS() << "True" << LLENDL; else LLINFOS() << "False" << LLENDL +// if (condition) LL_INFOS() << "True" << LLENDL; else LL_INFOS()() << "False" << LLENDL #define lllog(level, once, ...) \ do { \ @@ -358,8 +358,8 @@ typedef LLError::NoClassInfo _LL_CLASS_TO_LOG; // DEPRECATED: Use the new macros that allow tags and *look* like macros. #define lldebugs LL_COMPILE_TIME_MESSAGE("Warning: lldebugs deprecated, use LL_DEBUGS() instead") LL_DEBUGS() -#define llinfos LL_COMPILE_TIME_MESSAGE("Warning: llinfos deprecated, use LL_INFOS() instead") LL_INFOS() -#define llwarns LL_COMPILE_TIME_MESSAGE("Warning: llwarns deprecated, use LL_WARNS() instead") LL_WARNS() +#define llinfos LL_COMPILE_TIME_MESSAGE("Warning: LL_INFOS() deprecated, use LL_INFOS() instead") LL_INFOS() +#define llwarns LL_COMPILE_TIME_MESSAGE("Warning: LL_WARNS() deprecated, use LL_WARNS() instead") LL_WARNS() #define llerrs LL_COMPILE_TIME_MESSAGE("Warning: llerrs deprecated, use LL_ERRS() instead") LL_ERRS() #define llcont LL_COMPILE_TIME_MESSAGE("Warning: llcont deprecated, use LL_CONT instead") LL_CONT #define llendl LL_COMPILE_TIME_MESSAGE("Warning: llendl deprecated, use LL_ENDL instead") LL_ENDL diff --git a/indra/llcommon/lleventapi.h b/indra/llcommon/lleventapi.h index 1a37d780b6..5991fe8fd5 100755 --- a/indra/llcommon/lleventapi.h +++ b/indra/llcommon/lleventapi.h @@ -47,6 +47,7 @@ class LL_COMMON_API LLEventAPI: public LLDispatchListener, typedef LLInstanceTracker<LLEventAPI, std::string> ibase; public: + /** * @param name LLEventPump name on which this LLEventAPI will listen. This * also serves as the LLInstanceTracker instance key. diff --git a/indra/llcommon/lleventtimer.h b/indra/llcommon/lleventtimer.h index 7f42623d01..dc918121e1 100755 --- a/indra/llcommon/lleventtimer.h +++ b/indra/llcommon/lleventtimer.h @@ -36,6 +36,7 @@ class LL_COMMON_API LLEventTimer : public LLInstanceTracker<LLEventTimer> { public: + LLEventTimer(F32 period); // period is the amount of time between each call to tick() in seconds LLEventTimer(const LLDate& time); virtual ~LLEventTimer(); diff --git a/indra/llcommon/llinitparam.h b/indra/llcommon/llinitparam.h index 812071efdd..880cd0a370 100755 --- a/indra/llcommon/llinitparam.h +++ b/indra/llcommon/llinitparam.h @@ -2068,7 +2068,7 @@ namespace LLInitParam class Mandatory : public TypedParam<T, NAME_VALUE_LOOKUP, false> { typedef TypedParam<T, NAME_VALUE_LOOKUP, false> super_t; - typedef Mandatory<T, NAME_VALUE_LOOKUP> self_t; + typedef Mandatory<T, NAME_VALUE_LOOKUP> self_t; typedef typename super_t::value_t value_t; typedef typename super_t::default_value_t default_value_t; diff --git a/indra/llcommon/llinstancetracker.h b/indra/llcommon/llinstancetracker.h index 349ad530af..f2b982c4c3 100755 --- a/indra/llcommon/llinstancetracker.h +++ b/indra/llcommon/llinstancetracker.h @@ -60,6 +60,8 @@ protected: }; }; +LL_COMMON_API void assert_main_thread(); + /// This mix-in class adds support for tracking all instances of the specified class parameter T /// The (optional) key associates a value of type KEY with a given instance of T, for quick lookup /// If KEY is not provided, then instances are stored in a simple set @@ -68,7 +70,7 @@ protected: template<typename T, typename KEY = void> class LLInstanceTracker : public LLInstanceTrackerBase { - typedef LLInstanceTracker<T, KEY> MyT; + typedef LLInstanceTracker<T, KEY> self_t; typedef typename std::map<KEY, T*> InstanceMap; struct StaticData: public StaticBase { @@ -210,7 +212,11 @@ private: } void remove_() { - getMap_().erase(mInstanceKey); + typename InstanceMap::iterator iter = getMap_().find(mInstanceKey); + if (iter != getMap_().end()) + { + getMap_().erase(iter); + } } private: @@ -222,7 +228,7 @@ private: template<typename T> class LLInstanceTracker<T, void> : public LLInstanceTrackerBase { - typedef LLInstanceTracker<T, void> MyT; + typedef LLInstanceTracker<T, void> self_t; typedef typename std::set<T*> InstanceSet; struct StaticData: public StaticBase { diff --git a/indra/llcommon/llleap.h b/indra/llcommon/llleap.h index 1a1ad23d39..e33f25e530 100755 --- a/indra/llcommon/llleap.h +++ b/indra/llcommon/llleap.h @@ -32,6 +32,7 @@ class LL_COMMON_API LLLeap: public LLInstanceTracker<LLLeap> { public: + /** * Pass a brief string description, mostly for logging purposes. The desc * need not be unique, but obviously the clearer we can make it, the diff --git a/indra/llcommon/llmemory.h b/indra/llcommon/llmemory.h index 8daea255f9..5ed4ea7d9e 100755 --- a/indra/llcommon/llmemory.h +++ b/indra/llcommon/llmemory.h @@ -40,6 +40,7 @@ class LLMutex ; #define LL_CHECK_MEMORY #endif + #if LL_WINDOWS #define LL_ALIGN_OF __alignof #else @@ -54,19 +55,70 @@ class LLMutex ; #define LL_DEFAULT_HEAP_ALIGN 8 #endif + +LL_COMMON_API void ll_assert_aligned_func(uintptr_t ptr,U32 alignment); + +#ifdef SHOW_ASSERT +#define ll_assert_aligned(ptr,alignment) ll_assert_aligned_func(reinterpret_cast<uintptr_t>(ptr),((U32)alignment)) +#else +#define ll_assert_aligned(ptr,alignment) +#endif + +#include <xmmintrin.h> + +template <typename T> T* LL_NEXT_ALIGNED_ADDRESS(T* address) +{ + return reinterpret_cast<T*>( + (reinterpret_cast<uintptr_t>(address) + 0xF) & ~0xF); +} + +template <typename T> T* LL_NEXT_ALIGNED_ADDRESS_64(T* address) +{ + return reinterpret_cast<T*>( + (reinterpret_cast<uintptr_t>(address) + 0x3F) & ~0x3F); +} + +#if LL_LINUX || LL_DARWIN + +#define LL_ALIGN_PREFIX(x) +#define LL_ALIGN_POSTFIX(x) __attribute__((aligned(x))) + +#elif LL_WINDOWS + +#define LL_ALIGN_PREFIX(x) __declspec(align(x)) +#define LL_ALIGN_POSTFIX(x) + +#else +#error "LL_ALIGN_PREFIX and LL_ALIGN_POSTFIX undefined" +#endif + +#define LL_ALIGN_16(var) LL_ALIGN_PREFIX(16) var LL_ALIGN_POSTFIX(16) + + inline void* ll_aligned_malloc_fallback( size_t size, int align ) { +#if defined(LL_WINDOWS) + return _aligned_malloc(size, align); +#else void* mem = malloc( size + (align - 1) + sizeof(void*) ); char* aligned = ((char*)mem) + sizeof(void*); aligned += align - ((uintptr_t)aligned & (align - 1)); ((void**)aligned)[-1] = mem; return aligned; +#endif } inline void ll_aligned_free_fallback( void* ptr ) { - free( ((void**)ptr)[-1] ); +#if defined(LL_WINDOWS) + _aligned_free(ptr); +#else + if (ptr) + { + free( ((void**)ptr)[-1] ); + } +#endif } #if !LL_USE_TCMALLOC @@ -192,7 +244,76 @@ LL_FORCE_INLINE void ll_aligned_free(size_t alignment, void* ptr) } } +// Copy words 16-byte blocks from src to dst. Source and destination MUST NOT OVERLAP. +// Source and dest must be 16-byte aligned and size must be multiple of 16. +// +inline void ll_memcpy_nonaliased_aligned_16(char* __restrict dst, const char* __restrict src, size_t bytes) +{ + assert(src != NULL); + assert(dst != NULL); + assert(bytes > 0); + assert((bytes % sizeof(F32))== 0); + ll_assert_aligned(src,16); + ll_assert_aligned(dst,16); + assert((src < dst) ? ((src + bytes) < dst) : ((dst + bytes) < src)); + assert(bytes%16==0); + + char* end = dst + bytes; + + if (bytes > 64) + { + + // Find start of 64b aligned area within block + // + void* begin_64 = LL_NEXT_ALIGNED_ADDRESS_64(dst); + + //at least 64 bytes before the end of the destination, switch to 16 byte copies + void* end_64 = end-64; + + // Prefetch the head of the 64b area now + // + _mm_prefetch((char*)begin_64, _MM_HINT_NTA); + _mm_prefetch((char*)begin_64 + 64, _MM_HINT_NTA); + _mm_prefetch((char*)begin_64 + 128, _MM_HINT_NTA); + _mm_prefetch((char*)begin_64 + 192, _MM_HINT_NTA); + + // Copy 16b chunks until we're 64b aligned + // + while (dst < begin_64) + { + + _mm_store_ps((F32*)dst, _mm_load_ps((F32*)src)); + dst += 16; + src += 16; + } + + // Copy 64b chunks up to your tail + // + // might be good to shmoo the 512b prefetch offset + // (characterize performance for various values) + // + while (dst < end_64) + { + _mm_prefetch((char*)src + 512, _MM_HINT_NTA); + _mm_prefetch((char*)dst + 512, _MM_HINT_NTA); + _mm_store_ps((F32*)dst, _mm_load_ps((F32*)src)); + _mm_store_ps((F32*)(dst + 16), _mm_load_ps((F32*)(src + 16))); + _mm_store_ps((F32*)(dst + 32), _mm_load_ps((F32*)(src + 32))); + _mm_store_ps((F32*)(dst + 48), _mm_load_ps((F32*)(src + 48))); + dst += 64; + src += 64; + } + } + // Copy remainder 16b tail chunks (or ALL 16b chunks for sub-64b copies) + // + while (dst < end) + { + _mm_store_ps((F32*)dst, _mm_load_ps((F32*)src)); + dst += 16; + src += 16; + } +} #ifndef __DEBUG_PRIVATE_MEM__ #define __DEBUG_PRIVATE_MEM__ 0 @@ -557,13 +678,7 @@ void LLPrivateMemoryPoolTester::operator delete[](void* addr) // LLSingleton moved to llsingleton.h -LL_COMMON_API void ll_assert_aligned_func(uintptr_t ptr,U32 alignment); -#ifdef SHOW_ASSERT -#define ll_assert_aligned(ptr,alignment) ll_assert_aligned_func(reinterpret_cast<uintptr_t>(ptr),((U32)alignment)) -#else -#define ll_assert_aligned(ptr,alignment) -#endif #endif diff --git a/indra/llcommon/llrefcount.h b/indra/llcommon/llrefcount.h index 3d59e48f74..72011d04a0 100755 --- a/indra/llcommon/llrefcount.h +++ b/indra/llcommon/llrefcount.h @@ -29,6 +29,7 @@ #include <boost/noncopyable.hpp> #include <boost/intrusive_ptr.hpp> #include "llmutex.h" +#include "llapr.h" #define LL_REF_COUNT_DEBUG 0 #if LL_REF_COUNT_DEBUG @@ -110,45 +111,36 @@ public: LLThreadSafeRefCount(const LLThreadSafeRefCount&); LLThreadSafeRefCount& operator=(const LLThreadSafeRefCount& ref) { - if (sMutex) - { - sMutex->lock(); - } mRef = 0; - if (sMutex) - { - sMutex->unlock(); - } return *this; } void ref() { - if (sMutex) sMutex->lock(); mRef++; - if (sMutex) sMutex->unlock(); } - S32 unref() + void unref() { llassert(mRef >= 1); - if (sMutex) sMutex->lock(); - S32 res = --mRef; - if (sMutex) sMutex->unlock(); - if (0 == res) - { - delete this; - return 0; + if ((--mRef) == 0) // See note in llapr.h on atomic decrement operator return value. + { + // If we hit zero, the caller should be the only smart pointer owning the object and we can delete it. + // It is technically possible for a vanilla pointer to mess this up, or another thread to + // jump in, find this object, create another smart pointer and end up dangling, but if + // the code is that bad and not thread-safe, it's trouble already. + delete this; } - return res; - } + } + S32 getNumRefs() const { - return mRef; + const S32 currentVal = mRef.CurrentValue(); + return currentVal; } private: - S32 mRef; + LLAtomic32< S32 > mRef; }; /** diff --git a/indra/llcommon/llstaticstringtable.h b/indra/llcommon/llstaticstringtable.h new file mode 100644 index 0000000000..d7e0e8a08d --- /dev/null +++ b/indra/llcommon/llstaticstringtable.h @@ -0,0 +1,82 @@ +/** + * @file llstringtable.h + * @brief The LLStringTable class provides a _fast_ method for finding + * unique copies of strings. + * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ + * Second Life Viewer Source Code + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA + * $/LicenseInfo$ + */ + +#ifndef LL_STATIC_STRING_TABLE_H +#define LL_STATIC_STRING_TABLE_H + +#include "lldefs.h" +#include <boost/unordered_map.hpp> +#include "llstl.h" + +class LLStaticHashedString +{ +public: + + LLStaticHashedString(const std::string& s) + { + string_hash = makehash(s); + string = s; + } + + const std::string& String() const { return string; } + size_t Hash() const { return string_hash; } + + bool operator==(const LLStaticHashedString& b) const { return Hash() == b.Hash(); } + +protected: + + size_t makehash(const std::string& s) + { + size_t len = s.size(); + const char* c = s.c_str(); + size_t hashval = 0; + for (size_t i=0; i<len; i++) + { + hashval = ((hashval<<5) + hashval) + *c++; + } + return hashval; + } + + std::string string; + size_t string_hash; +}; + +struct LLStaticStringHasher +{ + enum { bucket_size = 8 }; + size_t operator()(const LLStaticHashedString& key_value) const { return key_value.Hash(); } + bool operator()(const LLStaticHashedString& left, const LLStaticHashedString& right) const { return left.Hash() < right.Hash(); } +}; + +template< typename MappedObject > +class LL_COMMON_API LLStaticStringTable + : public boost::unordered_map< LLStaticHashedString, MappedObject, LLStaticStringHasher > +{ +}; + +#endif + diff --git a/indra/llcommon/llstringtable.h b/indra/llcommon/llstringtable.h index 59d7372ed4..ff09e71677 100755 --- a/indra/llcommon/llstringtable.h +++ b/indra/llcommon/llstringtable.h @@ -42,14 +42,6 @@ //# define STRING_TABLE_HASH_MAP 1 #endif -#if STRING_TABLE_HASH_MAP -# if LL_WINDOWS -# include <hash_map> -# else -# include <ext/hash_map> -# endif -#endif - const U32 MAX_STRINGS_LENGTH = 256; class LL_COMMON_API LLStringTableEntry diff --git a/indra/llcommon/llthread.cpp b/indra/llcommon/llthread.cpp index bcae57fe22..3f2127762e 100755 --- a/indra/llcommon/llthread.cpp +++ b/indra/llcommon/llthread.cpp @@ -108,7 +108,8 @@ LL_COMMON_API void assert_main_thread() static U32 s_thread_id = LLThread::currentID(); if (LLThread::currentID() != s_thread_id) { - LL_ERRS() << "Illegal execution outside main thread." << LL_ENDL; + LL_WARNS() << "Illegal execution from thread id " << (S32) LLThread::currentID() + << " outside main thread " << (S32) s_thread_id << llendl; } } @@ -401,15 +402,7 @@ LLThreadSafeRefCount::LLThreadSafeRefCount() : LLThreadSafeRefCount::LLThreadSafeRefCount(const LLThreadSafeRefCount& src) { - if (sMutex) - { - sMutex->lock(); - } mRef = 0; - if (sMutex) - { - sMutex->unlock(); - } } LLThreadSafeRefCount::~LLThreadSafeRefCount() diff --git a/indra/llcommon/llthread.h b/indra/llcommon/llthread.h index d7abdc4970..ba64d20936 100755 --- a/indra/llcommon/llthread.h +++ b/indra/llcommon/llthread.h @@ -161,4 +161,6 @@ public: //============================================================================ +extern LL_COMMON_API void assert_main_thread(); + #endif // LL_LLTHREAD_H |