From 3e80fa3dbc943de9b784fedc202ba38cf238f46d Mon Sep 17 00:00:00 2001 From: David Parks Date: Mon, 2 Nov 2009 19:55:37 +0000 Subject: Sync up with render-pipeline-7 ignore-dead-branch --- indra/llmath/llvolume.cpp | 350 ++++++++++++++++++++++++++++++++++++++++++- indra/llmath/llvolume.h | 22 ++- indra/llmath/llvolumemgr.cpp | 2 +- indra/llmath/m4math.cpp | 11 ++ indra/llmath/m4math.h | 1 + indra/llmath/v2math.cpp | 15 ++ indra/llmath/v2math.h | 3 + 7 files changed, 392 insertions(+), 12 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index b8ef92f9a9..afa82ed399 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -46,6 +46,9 @@ #include "lldarray.h" #include "llvolume.h" #include "llstl.h" +#include "llsdserialize.h" +#include "zlib/zlib.h" + #define DEBUG_SILHOUETTE_BINORMALS 0 #define DEBUG_SILHOUETTE_NORMALS 0 // TomY: Use this to display normals using the silhouette @@ -1688,7 +1691,7 @@ LLVolume::LLVolume(const LLVolumeParams ¶ms, const F32 detail, const BOOL ge mGenerateSingleFace = generate_single_face; generate(); - if (mParams.getSculptID().isNull()) + if (mParams.getSculptID().isNull() && params.getSculptType() == LL_SCULPT_TYPE_NONE) { createVolumeFaces(); } @@ -1839,6 +1842,295 @@ BOOL LLVolume::generate() return FALSE; } +bool LLVolumeFace::VertexData::operator<(const LLVolumeFace::VertexData& rhs)const +{ + const U8* l = (const U8*) this; + const U8* r = (const U8*) &rhs; + + for (U32 i = 0; i < sizeof(VertexData); ++i) + { + if (l[i] != r[i]) + { + return r[i] < l[i]; + } + } + + return false; +} + +bool LLVolumeFace::VertexData::operator==(const LLVolumeFace::VertexData& rhs)const +{ + const U8* l = (const U8*) this; + const U8* r = (const U8*) &rhs; + + for (U32 i = 0; i < sizeof(VertexData); ++i) + { + if (l[i] != r[i]) + { + return false; + } + } + + return true; +} + + +BOOL LLVolume::createVolumeFacesFromFile(const std::string& file_name) +{ + std::ifstream is; + + is.open(file_name.c_str(), std::ifstream::in | std::ifstream::binary); + + BOOL success = createVolumeFacesFromStream(is); + + is.close(); + + return success; +} + +BOOL LLVolume::createVolumeFacesFromStream(std::istream& is) +{ + mSculptLevel = -1; // default is an error occured + + LLSD header; + { + if (!LLSDSerialize::deserialize(header, is, 1024*1024*1024)) + { + llwarns << "not a valid mesh asset!" << llendl; + return FALSE; + } + } + + std::string nm[] = + { + "impostor", + "low_lod", + "medium_lod", + "high_lod" + }; + + S32 lod = llclamp((S32) mDetail, 0, 3); + + while (lod < 4 && header[nm[lod]]["offset"].asInteger() == -1) + { + ++lod; + } + + if (lod >= 4) + { + llwarns << "Couldn't load model for given lod" << llendl; + return FALSE; + } + + is.seekg(header[nm[lod]]["offset"].asInteger(), std::ios_base::cur); + + + U8* result = NULL; + U32 cur_size = 0; + + { + //input stream is now pointing at a zlib compressed block of LLSD + //decompress block + z_stream strm; + + const U32 CHUNK = 65536; + + S32 size = header[nm[lod]]["size"].asInteger(); + U8 *in = new U8[size]; + is.read((char*) in, size); + + U8 out[CHUNK]; + + strm.zalloc = Z_NULL; + strm.zfree = Z_NULL; + strm.opaque = Z_NULL; + strm.avail_in = size; + strm.next_in = in; + + S32 ret = inflateInit(&strm); + + if (ret != Z_OK) + { + llerrs << "WTF?" << llendl; + } + + do + { + strm.avail_out = CHUNK; + strm.next_out = out; + ret = inflate(&strm, Z_NO_FLUSH); + if (ret == Z_STREAM_ERROR) + { + inflateEnd(&strm); + free(result); + delete [] in; + return FALSE; + } + + switch (ret) + { + case Z_NEED_DICT: + ret = Z_DATA_ERROR; + case Z_DATA_ERROR: + case Z_MEM_ERROR: + inflateEnd(&strm); + free(result); + delete [] in; + return FALSE; + break; + } + + U32 have = CHUNK-strm.avail_out; + + result = (U8*) realloc(result, cur_size + have); + memcpy(result+cur_size, out, have); + cur_size += have; + + } while (strm.avail_out == 0); + + inflateEnd(&strm); + delete [] in; + + if (ret != Z_STREAM_END) + { + free(result); + return FALSE; + } + } + + //result now points to the decompressed LLSD block + + LLSD mdl; + + { + std::string res_str((char*) result, cur_size); + std::istringstream istr(res_str); + + if (!LLSDSerialize::deserialize(mdl, istr, cur_size)) + { + llwarns << "not a valid mesh asset!" << llendl; + return FALSE; + } + } + + + free(result); + + + { + U32 face_count = mdl.size(); + + mVolumeFaces.resize(face_count); + + for (U32 i = 0; i < face_count; ++i) + { + LLSD::Binary pos = mdl[i]["Position"]; + LLSD::Binary norm = mdl[i]["Normal"]; + LLSD::Binary tc = mdl[i]["TexCoord0"]; + LLSD::Binary idx = mdl[i]["TriangleList"]; + + LLVolumeFace& face = mVolumeFaces[i]; + + face.mHasBinormals = FALSE; + + //copy out indices + face.mIndices.resize(idx.size()/2); + if (idx.empty()) + { //why is there an empty index list? + continue; + } + + U16* indices = (U16*) &(idx[0]); + for (U32 j = 0; j < idx.size()/2; ++j) + { + face.mIndices[j] = indices[j]; + } + + //copy out vertices + U32 num_verts = pos.size()/(3*2); + face.mVertices.resize(num_verts); + + LLVector3 min_pos; + LLVector3 max_pos; + LLVector2 min_tc; + LLVector2 max_tc; + + min_pos.setValue(mdl[i]["PositionDomain"]["Min"]); + max_pos.setValue(mdl[i]["PositionDomain"]["Max"]); + min_tc.setValue(mdl[i]["TexCoord0Domain"]["Min"]); + max_tc.setValue(mdl[i]["TexCoord0Domain"]["Max"]); + + F32 scale = llclamp((F32) mdl[i]["Scale"].asReal(), 1.f, 10.f); + + LLVector3 pos_range = max_pos - min_pos; + LLVector2 tc_range = max_tc - min_tc; + + LLVector3& min = face.mExtents[0]; + LLVector3& max = face.mExtents[1]; + + min = max = LLVector3(0,0,0); + + for (U32 j = 0; j < num_verts; ++j) + { + U16* v = (U16*) &(pos[j*3*2]); + + face.mVertices[j].mPosition.setVec( + (F32) v[0] / 65535.f * pos_range.mV[0] + min_pos.mV[0], + (F32) v[1] / 65535.f * pos_range.mV[1] + min_pos.mV[1], + (F32) v[2] / 65535.f * pos_range.mV[2] + min_pos.mV[2]); + + face.mVertices[j].mPosition *= scale; + + if (j == 0) + { + min = max = face.mVertices[j].mPosition; + } + else + { + update_min_max(min,max,face.mVertices[j].mPosition); + } + + U16* n = (U16*) &(norm[j*3*2]); + + face.mVertices[j].mNormal.setVec( + (F32) n[0] / 65535.f * 2.f - 1.f, + (F32) n[1] / 65535.f * 2.f - 1.f, + (F32) n[2] / 65535.f * 2.f - 1.f); + + U16* t = (U16*) &(tc[j*2*2]); + + face.mVertices[j].mTexCoord.setVec( + (F32) t[0] / 65535.f * tc_range.mV[0] + min_tc.mV[0], + (F32) t[1] / 65535.f * tc_range.mV[1] + min_tc.mV[1]); + } + + } + } + + mSculptLevel = 0; // success! + return TRUE; +} + +void LLVolume::copyVolumeFaces(LLVolume* volume) +{ + mVolumeFaces = volume->mVolumeFaces; + mSculptLevel = 0; +} + +S32 const LL_SCULPT_MESH_MAX_FACES = 8; + +S32 LLVolume::getNumFaces() const +{ + U8 sculpt_type = (mParams.getSculptType() & LL_SCULPT_TYPE_MASK); + + if (sculpt_type == LL_SCULPT_TYPE_MESH) + { + return LL_SCULPT_MESH_MAX_FACES; + } + + return (S32)mProfilep->mFaces.size(); +} + void LLVolume::createVolumeFaces() { @@ -1864,6 +2156,11 @@ void LLVolume::createVolumeFaces() LLProfile::Face& face = mProfilep->mFaces[i]; vf.mBeginS = face.mIndex; vf.mNumS = face.mCount; + if (vf.mNumS < 0) + { + llerrs << "Volume face corruption detected." << llendl; + } + vf.mBeginT = 0; vf.mNumT= getPath().mPath.size(); vf.mID = i; @@ -1907,6 +2204,10 @@ void LLVolume::createVolumeFaces() if (face.mFlat && vf.mNumS > 2) { //flat inner faces have to copy vert normals vf.mNumS = vf.mNumS*2; + if (vf.mNumS < 0) + { + llerrs << "Volume face corruption detected." << llendl; + } } } else @@ -2309,7 +2610,6 @@ bool LLVolumeParams::operator<(const LLVolumeParams ¶ms) const return mSculptID < params.mSculptID; } - return mSculptType < params.mSculptType; @@ -3379,22 +3679,29 @@ void LLVolume::generateSilhouetteVertices(std::vector &vertices, S32 face_mask) { LLMemType m1(LLMemType::MTYPE_VOLUME); - + vertices.clear(); normals.clear(); segments.clear(); + if (mParams.getSculptType() == LL_SCULPT_TYPE_MESH) + { + return; + } + S32 cur_index = 0; //for each face for (face_list_t::iterator iter = mVolumeFaces.begin(); iter != mVolumeFaces.end(); ++iter) { - const LLVolumeFace& face = *iter; + LLVolumeFace& face = *iter; - if (!(face_mask & (0x1 << cur_index++))) + if (!(face_mask & (0x1 << cur_index++)) || + face.mIndices.empty() || face.mEdge.empty()) { continue; } + if (face.mTypeMask & (LLVolumeFace::CAP_MASK)) { } @@ -3594,6 +3901,8 @@ S32 LLVolume::lineSegmentIntersect(const LLVector3& start, const LLVector3& end, F32 closest_t = 2.f; // must be larger than 1 + end_face = llmin(end_face, getNumVolumeFaces()-1); + for (S32 i = start_face; i <= end_face; i++) { const LLVolumeFace &face = getVolumeFace((U32)i); @@ -4103,11 +4412,28 @@ BOOL LLVolumeParams::exportLegacyStream(std::ostream& output_stream) const return TRUE; } +LLSD LLVolumeParams::sculptAsLLSD() const +{ + LLSD sd = LLSD(); + sd["id"] = getSculptID(); + sd["type"] = getSculptType(); + + return sd; +} + +bool LLVolumeParams::sculptFromLLSD(LLSD& sd) +{ + setSculptID(sd["id"].asUUID(), (U8)sd["type"].asInteger()); + return true; +} + LLSD LLVolumeParams::asLLSD() const { LLSD sd = LLSD(); sd["path"] = mPathParams; sd["profile"] = mProfileParams; + sd["sculpt"] = sculptAsLLSD(); + return sd; } @@ -4115,6 +4441,8 @@ bool LLVolumeParams::fromLLSD(LLSD& sd) { mPathParams.fromLLSD(sd["path"]); mProfileParams.fromLLSD(sd["profile"]); + sculptFromLLSD(sd["sculpt"]); + return true; } @@ -4157,6 +4485,12 @@ const F32 MIN_CONCAVE_PATH_WEDGE = 0.111111f; // 1/9 unity // for collison purposes BOOL LLVolumeParams::isConvex() const { + if (!getSculptID().isNull()) + { + // can't determine, be safe and say no: + return FALSE; + } + F32 path_length = mPathParams.getEnd() - mPathParams.getBegin(); F32 hollow = mProfileParams.getHollow(); @@ -5011,7 +5345,11 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) if (!partial_build) { mIndices.resize(num_indices); - mEdge.resize(num_indices); + + if (volume->getParams().getSculptType() != LL_SCULPT_TYPE_MESH) + { + mEdge.resize(num_indices); + } } else { diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index 871b334452..9f595ccbc4 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -186,6 +186,9 @@ const U8 LL_SCULPT_TYPE_CYLINDER = 4; const U8 LL_SCULPT_TYPE_MASK = LL_SCULPT_TYPE_SPHERE | LL_SCULPT_TYPE_TORUS | LL_SCULPT_TYPE_PLANE | LL_SCULPT_TYPE_CYLINDER; +// need to change this (these) names +const U8 LL_SCULPT_TYPE_MESH = 5; + const U8 LL_SCULPT_FLAG_INVERT = 64; const U8 LL_SCULPT_FLAG_MIRROR = 128; @@ -575,6 +578,9 @@ public: BOOL importLegacyStream(std::istream& input_stream); BOOL exportLegacyStream(std::ostream& output_stream) const; + LLSD sculptAsLLSD() const; + bool sculptFromLLSD(LLSD& sd); + LLSD asLLSD() const; operator LLSD() const { return asLLSD(); } bool fromLLSD(LLSD& sd); @@ -634,7 +640,6 @@ public: const F32& getSkew() const { return mPathParams.getSkew(); } const LLUUID& getSculptID() const { return mSculptID; } const U8& getSculptType() const { return mSculptType; } - BOOL isConvex() const; // 'begin' and 'end' should be in range [0, 1] (they will be clamped) @@ -798,7 +803,7 @@ public: BOOL create(LLVolume* volume, BOOL partial_build = FALSE); void createBinormals(); - + class VertexData { public: @@ -806,6 +811,9 @@ public: LLVector3 mNormal; LLVector3 mBinormal; LLVector2 mTexCoord; + + bool operator<(const VertexData& rhs) const; + bool operator==(const VertexData& rhs) const; }; enum @@ -851,8 +859,7 @@ class LLVolume : public LLRefCount { friend class LLVolumeLODGroup; -private: - LLVolume(const LLVolume&); // Don't implement +protected: ~LLVolume(); // use unref public: @@ -874,7 +881,7 @@ public: U8 getProfileType() const { return mParams.getProfileParams().getCurveType(); } U8 getPathType() const { return mParams.getPathParams().getCurveType(); } - S32 getNumFaces() const { return (S32)mProfilep->mFaces.size(); } + S32 getNumFaces() const; S32 getNumVolumeFaces() const { return mVolumeFaces.size(); } F32 getDetail() const { return mDetail; } const LLVolumeParams& getParams() const { return mParams; } @@ -946,6 +953,8 @@ public: LLVector3 mLODScaleBias; // vector for biasing LOD based on scale void sculpt(U16 sculpt_width, U16 sculpt_height, S8 sculpt_components, const U8* sculpt_data, S32 sculpt_level); + void copyVolumeFaces(LLVolume* volume); + private: void sculptGenerateMapVertices(U16 sculpt_width, U16 sculpt_height, S8 sculpt_components, const U8* sculpt_data, U8 sculpt_type); F32 sculptGetSurfaceArea(); @@ -956,6 +965,9 @@ private: protected: BOOL generate(); void createVolumeFaces(); +public: + virtual BOOL createVolumeFacesFromFile(const std::string& file_name); + virtual BOOL createVolumeFacesFromStream(std::istream& is); protected: BOOL mUnique; diff --git a/indra/llmath/llvolumemgr.cpp b/indra/llmath/llvolumemgr.cpp index 53641fceab..61c5a0adc9 100644 --- a/indra/llmath/llvolumemgr.cpp +++ b/indra/llmath/llvolumemgr.cpp @@ -320,7 +320,7 @@ BOOL LLVolumeLODGroup::derefLOD(LLVolume *volumep) { llassert_always(mLODRefs[i] > 0); mLODRefs[i]--; -#if 1 // SJB: Possible opt: keep other lods around +#if 0 // SJB: Possible opt: keep other lods around if (!mLODRefs[i]) { mVolumeLODs[i] = NULL; diff --git a/indra/llmath/m4math.cpp b/indra/llmath/m4math.cpp index d8e7b4aaf9..7c7f60154d 100644 --- a/indra/llmath/m4math.cpp +++ b/indra/llmath/m4math.cpp @@ -428,6 +428,17 @@ const LLMatrix4& LLMatrix4::initRotTrans(const LLQuaternion &q, const LLVector return (*this); } +const LLMatrix4& LLMatrix4::initScale(const LLVector3 &scale) +{ + setIdentity(); + + mMatrix[VX][VX] = scale.mV[VX]; + mMatrix[VY][VY] = scale.mV[VY]; + mMatrix[VZ][VZ] = scale.mV[VZ]; + + return (*this); +} + const LLMatrix4& LLMatrix4::initAll(const LLVector3 &scale, const LLQuaternion &q, const LLVector3 &pos) { F32 sx, sy, sz; diff --git a/indra/llmath/m4math.h b/indra/llmath/m4math.h index e74b7afe9b..de981b7646 100644 --- a/indra/llmath/m4math.h +++ b/indra/llmath/m4math.h @@ -159,6 +159,7 @@ public: const LLMatrix4& initRotTrans(const F32 roll, const F32 pitch, const F32 yaw, const LLVector4 &pos); // Rotation from Euler + translation const LLMatrix4& initRotTrans(const LLQuaternion &q, const LLVector4 &pos); // Set with Quaternion and position + const LLMatrix4& initScale(const LLVector3 &scale); // Set all const LLMatrix4& initAll(const LLVector3 &scale, const LLQuaternion &q, const LLVector3 &pos); diff --git a/indra/llmath/v2math.cpp b/indra/llmath/v2math.cpp index 555e1f92bb..220336e0c2 100644 --- a/indra/llmath/v2math.cpp +++ b/indra/llmath/v2math.cpp @@ -115,3 +115,18 @@ LLVector2 lerp(const LLVector2 &a, const LLVector2 &b, F32 u) a.mV[VX] + (b.mV[VX] - a.mV[VX]) * u, a.mV[VY] + (b.mV[VY] - a.mV[VY]) * u ); } + +LLSD LLVector2::getValue() const +{ + LLSD ret; + ret[0] = mV[0]; + ret[1] = mV[1]; + return ret; +} + +void LLVector2::setValue(LLSD& sd) +{ + mV[0] = (F32) sd[0].asReal(); + mV[1] = (F32) sd[1].asReal(); +} + diff --git a/indra/llmath/v2math.h b/indra/llmath/v2math.h index 9fef8851cc..f9f1c024f2 100644 --- a/indra/llmath/v2math.h +++ b/indra/llmath/v2math.h @@ -66,6 +66,9 @@ class LLVector2 void set(const LLVector2 &vec); // Sets LLVector2 to vec void set(const F32 *vec); // Sets LLVector2 to vec + LLSD getValue() const; + void setValue(LLSD& sd); + void setVec(F32 x, F32 y); // deprecated void setVec(const LLVector2 &vec); // deprecated void setVec(const F32 *vec); // deprecated -- cgit v1.2.3 From 88292104d9a2332e6169f2add8f0b590bb22dbff Mon Sep 17 00:00:00 2001 From: David Parks Date: Wed, 4 Nov 2009 14:19:05 +0000 Subject: Fix for crash when loading some meshes. Added button to auto-fill LODs. --- indra/llmath/llvolume.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index afa82ed399..ddd1b4b3db 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -1690,9 +1690,11 @@ LLVolume::LLVolume(const LLVolumeParams ¶ms, const F32 detail, const BOOL ge mGenerateSingleFace = generate_single_face; - generate(); + mLODScaleBias.setVec(1,1,1); + if (mParams.getSculptID().isNull() && params.getSculptType() == LL_SCULPT_TYPE_NONE) { + generate(); createVolumeFaces(); } } -- cgit v1.2.3 From 10069e0e13e3214ba9320fdce915440b2e12f938 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Thu, 5 Nov 2009 19:58:10 -0600 Subject: Fix for prims all being 0 lod. Fix for dangling prim references. --- indra/llmath/llvolume.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index ddd1b4b3db..33a8d33ce1 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -1692,9 +1692,10 @@ LLVolume::LLVolume(const LLVolumeParams ¶ms, const F32 detail, const BOOL ge mLODScaleBias.setVec(1,1,1); + generate(); + if (mParams.getSculptID().isNull() && params.getSculptType() == LL_SCULPT_TYPE_NONE) { - generate(); createVolumeFaces(); } } -- cgit v1.2.3 From 4e420a36c67e611cd7d85652b43d9cd65315e563 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Sat, 7 Nov 2009 08:22:39 -0600 Subject: Fix for missing LOD spam. --- indra/llmath/llvolume.cpp | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 33a8d33ce1..c8ef911cc1 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -1676,7 +1676,8 @@ LLVolume::LLVolume(const LLVolumeParams ¶ms, const F32 detail, const BOOL ge mFaceMask = 0x0; mDetail = detail; mSculptLevel = -2; - + mLODScaleBias.setVec(1,1,1); + // set defaults if (mParams.getPathParams().getCurveType() == LL_PCODE_PATH_FLEXIBLE) { @@ -1690,8 +1691,6 @@ LLVolume::LLVolume(const LLVolumeParams ¶ms, const F32 detail, const BOOL ge mGenerateSingleFace = generate_single_face; - mLODScaleBias.setVec(1,1,1); - generate(); if (mParams.getSculptID().isNull() && params.getSculptType() == LL_SCULPT_TYPE_NONE) @@ -1899,7 +1898,7 @@ BOOL LLVolume::createVolumeFacesFromStream(std::istream& is) { if (!LLSDSerialize::deserialize(header, is, 1024*1024*1024)) { - llwarns << "not a valid mesh asset!" << llendl; + llwarns << "Mesh header parse error. Not a valid mesh asset!" << llendl; return FALSE; } } @@ -1921,8 +1920,18 @@ BOOL LLVolume::createVolumeFacesFromStream(std::istream& is) if (lod >= 4) { - llwarns << "Couldn't load model for given lod" << llendl; - return FALSE; + lod = llclamp((S32) mDetail, 0, 3); + + while (lod >= 0 && header[nm[lod]]["offset"].asInteger() == -1) + { + --lod; + } + + if (lod < 0) + { + llwarns << "Mesh header missing LOD offsets. Not a valid mesh asset!" << llendl; + return FALSE; + } } is.seekg(header[nm[lod]]["offset"].asInteger(), std::ios_base::cur); -- cgit v1.2.3 From c02702f3871979cb7745b49aa502ac3c71f77681 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Fri, 13 Nov 2009 17:01:56 -0600 Subject: CTS-7 Add hard edge threshold capability to normal generation. --- indra/llmath/llvolume.cpp | 12 ++++++++++++ indra/llmath/llvolume.h | 1 + 2 files changed, 13 insertions(+) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 6286d1bcea..f252b2a232 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -1876,6 +1876,18 @@ bool LLVolumeFace::VertexData::operator==(const LLVolumeFace::VertexData& rhs)co return true; } +bool LLVolumeFace::VertexData::compareNormal(const LLVolumeFace::VertexData& rhs, F32 angle_cutoff) const +{ + bool retval = false; + if (rhs.mPosition == mPosition && rhs.mTexCoord == mTexCoord) + { + F32 cur_angle = rhs.mNormal*mNormal; + + retval = cur_angle > angle_cutoff; + } + + return retval; +} BOOL LLVolume::createVolumeFacesFromFile(const std::string& file_name) { diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index 9f595ccbc4..d2727d8f21 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -814,6 +814,7 @@ public: bool operator<(const VertexData& rhs) const; bool operator==(const VertexData& rhs) const; + bool compareNormal(const VertexData& rhs, F32 angle_cutoff) const; }; enum -- cgit v1.2.3 From 81bfdcbfae4f203e60f00794966383b01475995b Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Wed, 18 Nov 2009 18:10:48 -0600 Subject: Tetrahedron displays in place of unloaded mesh. Still has some LOD issues. --- indra/llmath/llvolume.cpp | 96 ++++++++++++++++++++++++++++++++++++++++++++++- indra/llmath/llvolume.h | 1 + 2 files changed, 95 insertions(+), 2 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index f252b2a232..84da1b3c62 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -1925,7 +1925,9 @@ BOOL LLVolume::createVolumeFacesFromStream(std::istream& is) S32 lod = llclamp((S32) mDetail, 0, 3); - while (lod < 4 && header[nm[lod]]["offset"].asInteger() == -1) + while (lod < 4 && + (header[nm[lod]]["offset"].asInteger() == -1 || + header[nm[lod]]["size"].asInteger() == 0 )) { ++lod; } @@ -1934,7 +1936,9 @@ BOOL LLVolume::createVolumeFacesFromStream(std::istream& is) { lod = llclamp((S32) mDetail, 0, 3); - while (lod >= 0 && header[nm[lod]]["offset"].asInteger() == -1) + while (lod >= 0 && + (header[nm[lod]]["offset"].asInteger() == -1 || + header[nm[lod]]["size"].asInteger() == 0) ) { --lod; } @@ -2135,6 +2139,94 @@ BOOL LLVolume::createVolumeFacesFromStream(std::istream& is) return TRUE; } +void tetrahedron_set_normal(LLVolumeFace::VertexData* cv) +{ + LLVector3 nrm = (cv[1].mPosition-cv[0].mPosition)%(cv[2].mPosition-cv[0].mPosition); + + nrm.normVec(); + + cv[0].mNormal = nrm; + cv[1].mNormal = nrm; + cv[2].mNormal = nrm; +} + +void LLVolume::makeTetrahedron() +{ + mVolumeFaces.clear(); + + LLVolumeFace face; + + F32 x = 0.5f; + LLVector3 p[] = + { //unit tetrahedron corners + LLVector3(x,x,x), + LLVector3(-x,-x,x), + LLVector3(-x,x,-x), + LLVector3(x,-x,-x) + }; + + LLVolumeFace::VertexData cv[3]; + + //set texture coordinates + cv[0].mTexCoord = LLVector2(0,0); + cv[1].mTexCoord = LLVector2(1,0); + cv[2].mTexCoord = LLVector2(0.5f, 0.5f*F_SQRT3); + + + //side 1 + cv[0].mPosition = p[1]; + cv[1].mPosition = p[0]; + cv[2].mPosition = p[2]; + + tetrahedron_set_normal(cv); + + face.mVertices.push_back(cv[0]); + face.mVertices.push_back(cv[1]); + face.mVertices.push_back(cv[2]); + + //side 2 + cv[0].mPosition = p[3]; + cv[1].mPosition = p[0]; + cv[2].mPosition = p[1]; + + tetrahedron_set_normal(cv); + + face.mVertices.push_back(cv[0]); + face.mVertices.push_back(cv[1]); + face.mVertices.push_back(cv[2]); + + //side 3 + cv[0].mPosition = p[3]; + cv[1].mPosition = p[1]; + cv[2].mPosition = p[2]; + + tetrahedron_set_normal(cv); + + face.mVertices.push_back(cv[0]); + face.mVertices.push_back(cv[1]); + face.mVertices.push_back(cv[2]); + + //side 4 + cv[0].mPosition = p[2]; + cv[1].mPosition = p[0]; + cv[2].mPosition = p[3]; + + tetrahedron_set_normal(cv); + + face.mVertices.push_back(cv[0]); + face.mVertices.push_back(cv[1]); + face.mVertices.push_back(cv[2]); + + //set index buffer + for (U32 i = 0; i < 12; i++) + { + face.mIndices.push_back(i); + } + + mVolumeFaces.push_back(face); + mSculptLevel = 0; +} + void LLVolume::copyVolumeFaces(LLVolume* volume) { mVolumeFaces = volume->mVolumeFaces; diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index d2727d8f21..bf2854ede9 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -969,6 +969,7 @@ protected: public: virtual BOOL createVolumeFacesFromFile(const std::string& file_name); virtual BOOL createVolumeFacesFromStream(std::istream& is); + virtual void makeTetrahedron(); protected: BOOL mUnique; -- cgit v1.2.3 From 62233f22469cdc66042fc7bbbbd367dbb7212fde Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Tue, 24 Nov 2009 07:38:04 -0600 Subject: Fix for copying of tetrahedrons in place of mesh LODs. Fix for bad tetrahedron bounding box. Bad fix for simultaneous loading of multiple LODs. --- indra/llmath/llvolume.cpp | 31 +++++++++++++++++++++++++++++-- indra/llmath/llvolume.h | 5 ++++- indra/llmath/llvolumemgr.cpp | 13 +++++++++++++ indra/llmath/llvolumemgr.h | 1 + 4 files changed, 47 insertions(+), 3 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 84da1b3c62..515b1061f9 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -1676,6 +1676,7 @@ LLVolume::LLVolume(const LLVolumeParams ¶ms, const F32 detail, const BOOL ge mFaceMask = 0x0; mDetail = detail; mSculptLevel = -2; + mIsTetrahedron = FALSE; mLODScaleBias.setVec(1,1,1); // set defaults @@ -1905,7 +1906,7 @@ BOOL LLVolume::createVolumeFacesFromFile(const std::string& file_name) BOOL LLVolume::createVolumeFacesFromStream(std::istream& is) { mSculptLevel = -1; // default is an error occured - + LLSD header; { if (!LLSDSerialize::deserialize(header, is, 1024*1024*1024)) @@ -2048,6 +2049,11 @@ BOOL LLVolume::createVolumeFacesFromStream(std::istream& is) { U32 face_count = mdl.size(); + if (face_count == 0) + { + llerrs << "WTF?" << llendl; + } + mVolumeFaces.resize(face_count); for (U32 i = 0; i < face_count; ++i) @@ -2063,8 +2069,9 @@ BOOL LLVolume::createVolumeFacesFromStream(std::istream& is) //copy out indices face.mIndices.resize(idx.size()/2); - if (idx.empty()) + if (idx.empty() || face.mIndices.size() < 3) { //why is there an empty index list? + llerrs <<"WTF?" << llendl; continue; } @@ -2150,6 +2157,11 @@ void tetrahedron_set_normal(LLVolumeFace::VertexData* cv) cv[2].mNormal = nrm; } +BOOL LLVolume::isTetrahedron() +{ + return mIsTetrahedron; +} + void LLVolume::makeTetrahedron() { mVolumeFaces.clear(); @@ -2165,6 +2177,9 @@ void LLVolume::makeTetrahedron() LLVector3(x,-x,-x) }; + face.mExtents[0].setVec(-x,-x,-x); + face.mExtents[1].setVec(x,x,x); + LLVolumeFace::VertexData cv[3]; //set texture coordinates @@ -2225,12 +2240,19 @@ void LLVolume::makeTetrahedron() mVolumeFaces.push_back(face); mSculptLevel = 0; + mIsTetrahedron = TRUE; } void LLVolume::copyVolumeFaces(LLVolume* volume) { + if (volume->isTetrahedron()) + { + llerrs << "WTF?" << llendl; + } + mVolumeFaces = volume->mVolumeFaces; mSculptLevel = 0; + mIsTetrahedron = FALSE; } S32 const LL_SCULPT_MESH_MAX_FACES = 8; @@ -2615,6 +2637,11 @@ void LLVolume::sculpt(U16 sculpt_width, U16 sculpt_height, S8 sculpt_components, LLMemType m1(LLMemType::MTYPE_VOLUME); U8 sculpt_type = mParams.getSculptType(); + if (sculpt_type & LL_SCULPT_TYPE_MASK == LL_SCULPT_TYPE_MESH) + { + llerrs << "WTF?" << llendl; + } + BOOL data_is_empty = FALSE; if (sculpt_width == 0 || sculpt_height == 0 || sculpt_components < 3 || sculpt_data == NULL) diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index bf2854ede9..8e57f2e280 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -904,7 +904,8 @@ public: BOOL isUnique() const { return mUnique; } S32 getSculptLevel() const { return mSculptLevel; } - + void setSculptLevel(S32 level) { mSculptLevel = level; } + S32 *getTriangleIndices(U32 &num_indices) const; // returns number of triangle indeces required for path/profile mesh @@ -970,11 +971,13 @@ public: virtual BOOL createVolumeFacesFromFile(const std::string& file_name); virtual BOOL createVolumeFacesFromStream(std::istream& is); virtual void makeTetrahedron(); + virtual BOOL isTetrahedron(); protected: BOOL mUnique; F32 mDetail; S32 mSculptLevel; + BOOL mIsTetrahedron; LLVolumeParams mParams; LLPath *mPathp; diff --git a/indra/llmath/llvolumemgr.cpp b/indra/llmath/llvolumemgr.cpp index 61c5a0adc9..419e0015ba 100644 --- a/indra/llmath/llvolumemgr.cpp +++ b/indra/llmath/llvolumemgr.cpp @@ -375,6 +375,19 @@ F32 LLVolumeLODGroup::getVolumeScaleFromDetail(const S32 detail) return mDetailScales[detail]; } +S32 LLVolumeLODGroup::getVolumeDetailFromScale(const F32 detail) +{ + for (S32 i = 1; i < 4; i++) + { + if (mDetailScales[i] > detail) + { + return i-1; + } + } + + return 3; +} + F32 LLVolumeLODGroup::dump() { F32 usage = 0.f; diff --git a/indra/llmath/llvolumemgr.h b/indra/llmath/llvolumemgr.h index a78ea76a1a..f5dc4cd748 100644 --- a/indra/llmath/llvolumemgr.h +++ b/indra/llmath/llvolumemgr.h @@ -59,6 +59,7 @@ public: static S32 getDetailFromTan(const F32 tan_angle); static void getDetailProximity(const F32 tan_angle, F32 &to_lower, F32& to_higher); static F32 getVolumeScaleFromDetail(const S32 detail); + static S32 getVolumeDetailFromScale(F32 scale); LLVolume* refLOD(const S32 detail); BOOL derefLOD(LLVolume *volumep); -- cgit v1.2.3 From 6d66910c6e2fbb25bf8b5c7b90e795f350342104 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Wed, 25 Nov 2009 11:35:41 -0600 Subject: Fix for spam on invalid mesh asset. Fix for index buffer overflow spam and crash in llvertexbuffer. --- indra/llmath/llvolume.cpp | 5 ----- 1 file changed, 5 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 515b1061f9..3e547aec6f 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -2245,11 +2245,6 @@ void LLVolume::makeTetrahedron() void LLVolume::copyVolumeFaces(LLVolume* volume) { - if (volume->isTetrahedron()) - { - llerrs << "WTF?" << llendl; - } - mVolumeFaces = volume->mVolumeFaces; mSculptLevel = 0; mIsTetrahedron = FALSE; -- cgit v1.2.3 From 062a2dd309ca5521d4045eb721496476f43d24dc Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Mon, 30 Nov 2009 15:32:10 -0600 Subject: Remove zero area triangles from meshes post-import. --- indra/llmath/llvolume.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 3e547aec6f..1d36da7f52 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -1692,10 +1692,9 @@ LLVolume::LLVolume(const LLVolumeParams ¶ms, const F32 detail, const BOOL ge mGenerateSingleFace = generate_single_face; - generate(); - if (mParams.getSculptID().isNull() && params.getSculptType() == LL_SCULPT_TYPE_NONE) { + generate(); createVolumeFaces(); } } -- cgit v1.2.3 From f039fa98efedc91965338ef53624279f99914205 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Mon, 30 Nov 2009 17:02:38 -0600 Subject: Fix for silly crash due to LLPrimitive having 0 texture entries. --- indra/llmath/llvolume.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 1d36da7f52..858bd9edea 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -1692,9 +1692,10 @@ LLVolume::LLVolume(const LLVolumeParams ¶ms, const F32 detail, const BOOL ge mGenerateSingleFace = generate_single_face; + generate(); + if (mParams.getSculptID().isNull() && params.getSculptType() == LL_SCULPT_TYPE_NONE) { - generate(); createVolumeFaces(); } } -- cgit v1.2.3 From bb2631180a85df343e6d816fc37d881af31d49fb Mon Sep 17 00:00:00 2001 From: "Karl Stiefvater (qarl)" Date: Tue, 1 Dec 2009 17:40:52 -0600 Subject: CTS-4 Only part of an uploaded mesh renders. --- indra/llmath/llvolume.cpp | 1 - indra/llmath/llvolume.h | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 858bd9edea..fb2de92e35 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -2250,7 +2250,6 @@ void LLVolume::copyVolumeFaces(LLVolume* volume) mIsTetrahedron = FALSE; } -S32 const LL_SCULPT_MESH_MAX_FACES = 8; S32 LLVolume::getNumFaces() const { diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index 8e57f2e280..59c60ccd92 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -192,6 +192,7 @@ const U8 LL_SCULPT_TYPE_MESH = 5; const U8 LL_SCULPT_FLAG_INVERT = 64; const U8 LL_SCULPT_FLAG_MIRROR = 128; +const S32 LL_SCULPT_MESH_MAX_FACES = 8; class LLProfileParams { -- cgit v1.2.3 From 695969c77066de5032bdc9caefecf9b32b076b2f Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Fri, 11 Dec 2009 14:47:11 -0600 Subject: HTTP Mesh fetch FTW.. still busted --- indra/llmath/llvolume.cpp | 7 +++++-- indra/llmath/llvolume.h | 2 ++ 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index fb2de92e35..44ff173502 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -1952,8 +1952,12 @@ BOOL LLVolume::createVolumeFacesFromStream(std::istream& is) } is.seekg(header[nm[lod]]["offset"].asInteger(), std::ios_base::cur); - + return unpackVolumeFaces(is, header[nm[lod]]["size"].asInteger()); +} + +BOOL LLVolume::unpackVolumeFaces(std::istream& is, S32 size) +{ U8* result = NULL; U32 cur_size = 0; @@ -1964,7 +1968,6 @@ BOOL LLVolume::createVolumeFacesFromStream(std::istream& is) const U32 CHUNK = 65536; - S32 size = header[nm[lod]]["size"].asInteger(); U8 *in = new U8[size]; is.read((char*) in, size); diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index 59c60ccd92..9970b24a94 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -971,6 +971,8 @@ protected: public: virtual BOOL createVolumeFacesFromFile(const std::string& file_name); virtual BOOL createVolumeFacesFromStream(std::istream& is); + virtual BOOL unpackVolumeFaces(std::istream& is, S32 size); + virtual void makeTetrahedron(); virtual BOOL isTetrahedron(); -- cgit v1.2.3 From 081fa98a47d2b592ada0fbb049ff959ac2cd6294 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Tue, 15 Dec 2009 17:43:05 -0600 Subject: HTTP Mesh transfer relatively blocking-free. --- indra/llmath/llvolume.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 44ff173502..844918432d 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -2171,7 +2171,7 @@ void LLVolume::makeTetrahedron() LLVolumeFace face; - F32 x = 0.5f; + F32 x = 0.25f; LLVector3 p[] = { //unit tetrahedron corners LLVector3(x,x,x), -- cgit v1.2.3 From 512a5736dceb1cc6db286e5f5baad867ac7a5935 Mon Sep 17 00:00:00 2001 From: "Karl Stiefvater (qarl)" Date: Wed, 23 Dec 2009 14:40:48 -0600 Subject: LODs for scene upload --- indra/llmath/llvolume.cpp | 14 ++++++++++++++ indra/llmath/llvolume.h | 2 ++ 2 files changed, 16 insertions(+) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 844918432d..de32070da1 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -3807,6 +3807,20 @@ S32 LLVolume::getNumTriangleIndices() const return count; } + +S32 LLVolume::getNumTriangles() const +{ + U32 triangle_count = 0; + + for (S32 i = 0; i < getNumVolumeFaces(); ++i) + { + triangle_count += getVolumeFace(i).mIndices.size()/3; + } + + return triangle_count; +} + + //----------------------------------------------------------------------------- // generateSilhouetteVertices() //----------------------------------------------------------------------------- diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index 9970b24a94..e3ab648fe3 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -912,6 +912,8 @@ public: // returns number of triangle indeces required for path/profile mesh S32 getNumTriangleIndices() const; + S32 getNumTriangles() const; + void generateSilhouetteVertices(std::vector &vertices, std::vector &normals, std::vector &segments, -- cgit v1.2.3 From d3e84ea77baa7d60fe225f7b440297e9a49318a6 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Wed, 20 Jan 2010 15:08:29 -0600 Subject: First stab at making model importer act more like scene importer. Upload button still doesn't work. --- indra/llmath/m4math.cpp | 17 +++++++++++++++++ indra/llmath/m4math.h | 1 + 2 files changed, 18 insertions(+) (limited to 'indra/llmath') diff --git a/indra/llmath/m4math.cpp b/indra/llmath/m4math.cpp index 7c7f60154d..3700142982 100644 --- a/indra/llmath/m4math.cpp +++ b/indra/llmath/m4math.cpp @@ -785,6 +785,23 @@ bool operator!=(const LLMatrix4 &a, const LLMatrix4 &b) return FALSE; } +bool operator<(const LLMatrix4& a, const LLMatrix4 &b) +{ + U32 i, j; + for (i = 0; i < NUM_VALUES_IN_MAT4; i++) + { + for (j = 0; j < NUM_VALUES_IN_MAT4; j++) + { + if (a.mMatrix[i][j] != b.mMatrix[i][j]) + { + return a.mMatrix[i][j] < b.mMatrix[i][j]; + } + } + } + + return false; +} + const LLMatrix4& operator*=(LLMatrix4 &a, F32 k) { U32 i, j; diff --git a/indra/llmath/m4math.h b/indra/llmath/m4math.h index de981b7646..6007b96bd9 100644 --- a/indra/llmath/m4math.h +++ b/indra/llmath/m4math.h @@ -237,6 +237,7 @@ public: friend bool operator==(const LLMatrix4 &a, const LLMatrix4 &b); // Return a == b friend bool operator!=(const LLMatrix4 &a, const LLMatrix4 &b); // Return a != b + friend bool operator<(const LLMatrix4 &a, const LLMatrix4& b); // Return a < b friend const LLMatrix4& operator+=(LLMatrix4 &a, const LLMatrix4 &b); // Return a + b friend const LLMatrix4& operator-=(LLMatrix4 &a, const LLMatrix4 &b); // Return a - b -- cgit v1.2.3 From 024c0ebe19588f8452bae7ea01756fd7b4b30540 Mon Sep 17 00:00:00 2001 From: "Karl Stiefvater (qarl)" Date: Fri, 29 Jan 2010 14:33:04 -0600 Subject: enable mirror and invert flags for meshes. --- indra/llmath/llvolume.cpp | 59 ++++++++++++++++++++++++++++++++++++++++++++--- indra/llmath/llvolume.h | 7 +++--- 2 files changed, 59 insertions(+), 7 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index de32070da1..0328c09c9a 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -1694,7 +1694,7 @@ LLVolume::LLVolume(const LLVolumeParams ¶ms, const F32 detail, const BOOL ge generate(); - if (mParams.getSculptID().isNull() && params.getSculptType() == LL_SCULPT_TYPE_NONE) + if (mParams.getSculptID().isNull() && mParams.getSculptType() == LL_SCULPT_TYPE_NONE) { createVolumeFaces(); } @@ -2142,6 +2142,59 @@ BOOL LLVolume::unpackVolumeFaces(std::istream& is, S32 size) (F32) t[1] / 65535.f * tc_range.mV[1] + min_tc.mV[1]); } + + // modifier flags? + BOOL do_mirror = (mParams.getSculptType() & LL_SCULPT_FLAG_MIRROR); + BOOL do_invert = (mParams.getSculptType() &LL_SCULPT_FLAG_INVERT); + + + // translate to actions: + BOOL do_reflect_x = FALSE; + BOOL do_reverse_triangles = FALSE; + BOOL do_invert_normals = FALSE; + + if (do_mirror) + { + do_reflect_x = TRUE; + do_reverse_triangles = !do_reverse_triangles; + } + + if (do_invert) + { + do_invert_normals = TRUE; + do_reverse_triangles = !do_reverse_triangles; + } + + // now do the work + + if (do_reflect_x) + { + for (S32 i = 0; i < face.mVertices.size(); i++) + { + face.mVertices[i].mPosition.mV[VX] *= -1.0f; + face.mVertices[i].mNormal.mV[VX] *= -1.0f; + } + } + + if (do_invert_normals) + { + for (S32 i = 0; i < face.mVertices.size(); i++) + { + face.mVertices[i].mNormal *= -1.0f; + } + } + + if (do_reverse_triangles) + { + for (U32 j = 0; j < face.mIndices.size(); j += 3) + { + // swap the 2nd and 3rd index + S32 swap = face.mIndices[j+1]; + face.mIndices[j+1] = face.mIndices[j+2]; + face.mIndices[j+2] = swap; + } + } + } } @@ -3838,7 +3891,7 @@ void LLVolume::generateSilhouetteVertices(std::vector &vertices, normals.clear(); segments.clear(); - if (mParams.getSculptType() == LL_SCULPT_TYPE_MESH) + if ((mParams.getSculptType() & LL_SCULPT_TYPE_MASK) == LL_SCULPT_TYPE_MESH) { return; } @@ -5500,7 +5553,7 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) { mIndices.resize(num_indices); - if (volume->getParams().getSculptType() != LL_SCULPT_TYPE_MESH) + if ((volume->getParams().getSculptType() & LL_SCULPT_TYPE_MASK) != LL_SCULPT_TYPE_MESH) { mEdge.resize(num_indices); } diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index e3ab648fe3..0bc64f4487 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -183,12 +183,11 @@ const U8 LL_SCULPT_TYPE_SPHERE = 1; const U8 LL_SCULPT_TYPE_TORUS = 2; const U8 LL_SCULPT_TYPE_PLANE = 3; const U8 LL_SCULPT_TYPE_CYLINDER = 4; - -const U8 LL_SCULPT_TYPE_MASK = LL_SCULPT_TYPE_SPHERE | LL_SCULPT_TYPE_TORUS | LL_SCULPT_TYPE_PLANE | LL_SCULPT_TYPE_CYLINDER; - -// need to change this (these) names const U8 LL_SCULPT_TYPE_MESH = 5; +const U8 LL_SCULPT_TYPE_MASK = LL_SCULPT_TYPE_SPHERE | LL_SCULPT_TYPE_TORUS | LL_SCULPT_TYPE_PLANE | + LL_SCULPT_TYPE_CYLINDER | LL_SCULPT_TYPE_MESH; + const U8 LL_SCULPT_FLAG_INVERT = 64; const U8 LL_SCULPT_FLAG_MIRROR = 128; -- cgit v1.2.3 From 1805369b58d98677c726bb0bcb52618bd66e8fdd Mon Sep 17 00:00:00 2001 From: "Karl Stiefvater (qarl)" Date: Wed, 3 Feb 2010 18:08:19 -0600 Subject: handle reflected geometry in scene import implement determinant for 4x4 matrices fix materials for collada --- indra/llmath/m4math.cpp | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/m4math.cpp b/indra/llmath/m4math.cpp index 3700142982..5c112b52b2 100644 --- a/indra/llmath/m4math.cpp +++ b/indra/llmath/m4math.cpp @@ -221,8 +221,33 @@ const LLMatrix4& LLMatrix4::transpose() F32 LLMatrix4::determinant() const { - llerrs << "Not implemented!" << llendl; - return 0.f; + F32 value = + mMatrix[0][3] * mMatrix[1][2] * mMatrix[2][1] * mMatrix[3][0] - + mMatrix[0][2] * mMatrix[1][3] * mMatrix[2][1] * mMatrix[3][0] - + mMatrix[0][3] * mMatrix[1][1] * mMatrix[2][2] * mMatrix[3][0] + + mMatrix[0][1] * mMatrix[1][3] * mMatrix[2][2] * mMatrix[3][0] + + mMatrix[0][2] * mMatrix[1][1] * mMatrix[2][3] * mMatrix[3][0] - + mMatrix[0][1] * mMatrix[1][2] * mMatrix[2][3] * mMatrix[3][0] - + mMatrix[0][3] * mMatrix[1][2] * mMatrix[2][0] * mMatrix[3][1] + + mMatrix[0][2] * mMatrix[1][3] * mMatrix[2][0] * mMatrix[3][1] + + mMatrix[0][3] * mMatrix[1][0] * mMatrix[2][2] * mMatrix[3][1] - + mMatrix[0][0] * mMatrix[1][3] * mMatrix[2][2] * mMatrix[3][1] - + mMatrix[0][2] * mMatrix[1][0] * mMatrix[2][3] * mMatrix[3][1] + + mMatrix[0][0] * mMatrix[1][2] * mMatrix[2][3] * mMatrix[3][1] + + mMatrix[0][3] * mMatrix[1][1] * mMatrix[2][0] * mMatrix[3][2] - + mMatrix[0][1] * mMatrix[1][3] * mMatrix[2][0] * mMatrix[3][2] - + mMatrix[0][3] * mMatrix[1][0] * mMatrix[2][1] * mMatrix[3][2] + + mMatrix[0][0] * mMatrix[1][3] * mMatrix[2][1] * mMatrix[3][2] + + mMatrix[0][1] * mMatrix[1][0] * mMatrix[2][3] * mMatrix[3][2] - + mMatrix[0][0] * mMatrix[1][1] * mMatrix[2][3] * mMatrix[3][2] - + mMatrix[0][2] * mMatrix[1][1] * mMatrix[2][0] * mMatrix[3][3] + + mMatrix[0][1] * mMatrix[1][2] * mMatrix[2][0] * mMatrix[3][3] + + mMatrix[0][2] * mMatrix[1][0] * mMatrix[2][1] * mMatrix[3][3] - + mMatrix[0][0] * mMatrix[1][2] * mMatrix[2][1] * mMatrix[3][3] - + mMatrix[0][1] * mMatrix[1][0] * mMatrix[2][2] * mMatrix[3][3] + + mMatrix[0][0] * mMatrix[1][1] * mMatrix[2][2] * mMatrix[3][3]; + + return value; } // Only works for pure orthonormal, homogeneous transform matrices. -- cgit v1.2.3 From 095a5e84408b47ef3c5610e111aefe51d77633ca Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Sat, 6 Feb 2010 17:33:12 -0600 Subject: Draw prims using triangle strips instead of triangle lists. --- indra/llmath/llvolume.cpp | 201 ++++++++++++++++++++++++++++++++++++---------- indra/llmath/llvolume.h | 4 +- 2 files changed, 160 insertions(+), 45 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index df4c618ac1..cd7d7a12e3 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -4520,15 +4520,65 @@ BOOL LLVolumeFace::createUnCutCubeCap(LLVolume* volume, BOOL partial_build) if (!partial_build) { - int idxs[] = {0,1,(grid_size+1)+1,(grid_size+1)+1,(grid_size+1),0}; - for(int gx = 0;gx=0;i--)mIndices.push_back(vtop+(gy*(grid_size+1))+gx+idxs[i]); - }else{ - for(int i=0;i<6;i++)mIndices.push_back(vtop+(gy*(grid_size+1))+gx+idxs[i]); + mTriStrip.clear(); + S32 idxs[] = {0,1,(grid_size+1)+1,(grid_size+1)+1,(grid_size+1),0}; + for(S32 gx = 0;gx=0;i--) + { + mIndices.push_back(vtop+(gy*(grid_size+1))+gx+idxs[i]); + } + + if (gy == 0) + { + mTriStrip.push_back((gx+1)*(grid_size+1)); + mTriStrip.push_back((gx+1)*(grid_size+1)); + mTriStrip.push_back(gx*(grid_size+1)); + } + + mTriStrip.push_back(gy+1+(gx+1)*(grid_size+1)); + mTriStrip.push_back(gy+1+gx*(grid_size+1)); + + + if (gy == grid_size-1) + { + mTriStrip.push_back(gy+1+gx*(grid_size+1)); + } + } + else + { + for(S32 i=0;i<6;i++) + { + mIndices.push_back(vtop+(gy*(grid_size+1))+gx+idxs[i]); + } + + if (gy == 0) + { + mTriStrip.push_back(gx*(grid_size+1)); + mTriStrip.push_back(gx*(grid_size+1)); + mTriStrip.push_back((gx+1)*(grid_size+1)); + } + + mTriStrip.push_back(gy+1+gx*(grid_size+1)); + mTriStrip.push_back(gy+1+(gx+1)*(grid_size+1)); + + if (gy == grid_size-1) + { + mTriStrip.push_back(gy+1+(gx+1)*(grid_size+1)); + } } } + + } + + if (mTriStrip.size()%2 == 1) + { + mTriStrip.push_back(mTriStrip[mTriStrip.size()-1]); } } @@ -4770,6 +4820,8 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) pt2--; } } + + makeTriStrip(); } else { @@ -4874,67 +4926,108 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) pt2--; } } + + makeTriStrip(); } } else { // Not hollow, generate the triangle fan. + U16 v1 = 2; + U16 v2 = 1; + if (mTypeMask & TOP_MASK) { - if (mTypeMask & OPEN_MASK) - { - // SOLID OPEN TOP - // Generate indices - // This is a tri-fan, so we reuse the same first point for all triangles. - for (S32 i = 0; i < (num_vertices - 2); i++) - { - mIndices[3*i] = num_vertices - 1; - mIndices[3*i+1] = i; - mIndices[3*i+2] = i + 1; - } - } - else - { - // SOLID CLOSED TOP - for (S32 i = 0; i < (num_vertices - 2); i++) - { - //MSMSM fix these caps but only for the un-cut case - mIndices[3*i] = num_vertices - 1; - mIndices[3*i+1] = i; - mIndices[3*i+2] = i + 1; - } - } + v1 = 1; + v2 = 2; + } + + for (S32 i = 0; i < (num_vertices - 2); i++) + { + mIndices[3*i] = num_vertices - 1; + mIndices[3*i+v1] = i; + mIndices[3*i+v2] = i + 1; + } + + //make tri strip + if (mTypeMask & OPEN_MASK) + { + makeTriStrip(); } else { - if (mTypeMask & OPEN_MASK) + S32 j = num_vertices-2; + if (mTypeMask & TOP_MASK) { - // SOLID OPEN BOTTOM - // Generate indices - // This is a tri-fan, so we reuse the same first point for all triangles. - for (S32 i = 0; i < (num_vertices - 2); i++) + mTriStrip.push_back(0); + for (S32 i = 1; i <= j; ++i) { - mIndices[3*i] = num_vertices - 1; - mIndices[3*i+1] = i + 1; - mIndices[3*i+2] = i; + mTriStrip.push_back(i); + if (i != j) + { + mTriStrip.push_back(j); + } + --j; } } else { - // SOLID CLOSED BOTTOM - for (S32 i = 0; i < (num_vertices - 2); i++) + mTriStrip.push_back(j); + for (S32 i = 1; i <= j; ++i) { - //MSMSM fix these caps but only for the un-cut case - mIndices[3*i] = num_vertices - 1; - mIndices[3*i+1] = i + 1; - mIndices[3*i+2] = i; + if (i != j) + { + mTriStrip.push_back(j); + } + mTriStrip.push_back(i); + --j; } } + + mTriStrip.push_back(mTriStrip[mTriStrip.size()-1]); + + if (mTriStrip.size()%2 == 1) + { + mTriStrip.push_back(mTriStrip[mTriStrip.size()-1]); + } } } + return TRUE; } +void LLVolumeFace::makeTriStrip() +{ + for (U32 i = 0; i < mIndices.size(); i+=3) + { + U16 i0 = mIndices[i]; + U16 i1 = mIndices[i+1]; + U16 i2 = mIndices[i+2]; + + if ((i/3)%2 == 1) + { + mTriStrip.push_back(i0); + mTriStrip.push_back(i0); + mTriStrip.push_back(i1); + mTriStrip.push_back(i2); + mTriStrip.push_back(i2); + } + else + { + mTriStrip.push_back(i2); + mTriStrip.push_back(i2); + mTriStrip.push_back(i1); + mTriStrip.push_back(i0); + mTriStrip.push_back(i0); + } + } + + if (mTriStrip.size()%2 == 1) + { + mTriStrip.push_back(mTriStrip[mTriStrip.size()-1]); + } +} + void LLVolumeFace::createBinormals() { LLMemType m1(LLMemType::MTYPE_VOLUME); @@ -5135,9 +5228,14 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) if (!partial_build) { + mTriStrip.clear(); + // Now we generate the indices. for (t = 0; t < (mNumT-1); t++) { + //prepend terminating index to strip + mTriStrip.push_back(mNumS*t); + for (s = 0; s < (mNumS-1); s++) { mIndices[cur_index++] = s + mNumS*t; //bottom left @@ -5147,6 +5245,14 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) mIndices[cur_index++] = s+1 + mNumS*t; //bottom right mIndices[cur_index++] = s+1 + mNumS*(t+1); //top right + if (s == 0) + { + mTriStrip.push_back(s+mNumS*t); + mTriStrip.push_back(s+mNumS*(t+1)); + } + mTriStrip.push_back(s+1+mNumS*t); + mTriStrip.push_back(s+1+mNumS*(t+1)); + mEdge[cur_edge++] = (mNumS-1)*2*t+s*2+1; //bottom left/top right neighbor face if (t < mNumT-2) { //top right/top left neighbor face mEdge[cur_edge++] = (mNumS-1)*2*(t+1)+s*2+1; @@ -5187,6 +5293,13 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) } mEdge[cur_edge++] = (mNumS-1)*2*t+s*2; //top right/bottom left neighbor face } + //append terminating vertex to strip + mTriStrip.push_back(mNumS-1+mNumS*(t+1)); + } + + if (mTriStrip.size()%2 == 1) + { + mTriStrip.push_back(mTriStrip[mTriStrip.size()-1]); } } diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index 871b334452..d9f80f0e30 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -798,7 +798,8 @@ public: BOOL create(LLVolume* volume, BOOL partial_build = FALSE); void createBinormals(); - + void makeTriStrip(); + class VertexData { public: @@ -839,6 +840,7 @@ public: std::vector mVertices; std::vector mIndices; + std::vector mTriStrip; std::vector mEdge; private: -- cgit v1.2.3 From 42df75bafeab49b408f23d79feb4f2213d2560eb Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Mon, 8 Feb 2010 10:14:11 -0600 Subject: Enable FBO multisampling for OSX. Fix bad triangle in prim caps. --- indra/llmath/llvolume.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index cd7d7a12e3..ae5c9bc8cf 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -4960,7 +4960,7 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) if (mTypeMask & TOP_MASK) { mTriStrip.push_back(0); - for (S32 i = 1; i <= j; ++i) + for (S32 i = 0; i <= j; ++i) { mTriStrip.push_back(i); if (i != j) @@ -4973,7 +4973,7 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) else { mTriStrip.push_back(j); - for (S32 i = 1; i <= j; ++i) + for (S32 i = 0; i <= j; ++i) { if (i != j) { -- cgit v1.2.3 From 2cb5b0b66ec9633d4c6563acf5ff9d0f7bc7cbf7 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Tue, 9 Feb 2010 12:26:09 -0600 Subject: consolidate button work in progress --- indra/llmath/llvolume.cpp | 19 +++++++++++++++++++ indra/llmath/llvolume.h | 2 ++ 2 files changed, 21 insertions(+) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index de32070da1..596c5fe231 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -5468,6 +5468,25 @@ void LLVolumeFace::createBinormals() } } +void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& transform, LLMatrix4& norm_transform) +{ + for (U32 i = 0; i < face.mVertices.size(); ++i) + { + VertexData v = face.mVertices[i]; + v.mPosition *= mat; + v.mNormal *= norm_transform; + + + mVertices.push_back(v); + } + + U16 offset = mIndices.size(); + for (U32 i = 0; i < face.mIndices.size(); ++i) + { + mIndices.push_back(face.mIndices[i]+offset); + } +} + BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) { LLMemType m1(LLMemType::MTYPE_VOLUME); diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index e3ab648fe3..1bf6fac305 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -805,6 +805,8 @@ public: BOOL create(LLVolume* volume, BOOL partial_build = FALSE); void createBinormals(); + void appendFace(const LLVolumeFace& face); + class VertexData { public: -- cgit v1.2.3 From ffcbbf4aaabc652c2050ca6147a9388217cfcaa7 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Thu, 11 Feb 2010 18:00:00 -0600 Subject: Multi-threaded asset uploading with proper ordering first draft. --- indra/llmath/llvolume.cpp | 20 ++++++++++++++++---- indra/llmath/llvolume.h | 2 +- 2 files changed, 17 insertions(+), 5 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index d1716e1407..7e1517fba7 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -5614,19 +5614,31 @@ void LLVolumeFace::createBinormals() } } -void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& transform, LLMatrix4& norm_transform) +void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat, LLMatrix4& norm_mat) { + U16 offset = mVertices.size(); + + for (U32 i = 0; i < face.mVertices.size(); ++i) { VertexData v = face.mVertices[i]; - v.mPosition *= mat; - v.mNormal *= norm_transform; + v.mPosition = v.mPosition*mat; + v.mNormal = v.mNormal * norm_mat; mVertices.push_back(v); + + if (offset == 0 && i == 0) + { + mExtents[0] = mExtents[1] = v.mPosition; + } + else + { + update_min_max(mExtents[0], mExtents[1], v.mPosition); + } } - U16 offset = mIndices.size(); + for (U32 i = 0; i < face.mIndices.size(); ++i) { mIndices.push_back(face.mIndices[i]+offset); diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index 1059c29566..36811785dc 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -805,7 +805,7 @@ public: void createBinormals(); void makeTriStrip(); - void appendFace(const LLVolumeFace& face); + void appendFace(const LLVolumeFace& face, LLMatrix4& transform, LLMatrix4& normal_tranform); class VertexData { -- cgit v1.2.3 From ee8036712847315141c78d37646d629796442d09 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Wed, 17 Feb 2010 18:08:00 -0600 Subject: 16-bit limit awareness when consolidating models. --- indra/llmath/llvolume.cpp | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 7c98536e72..33a00b80ca 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -5619,6 +5619,10 @@ void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat, LLMatrix { U16 offset = mVertices.size(); + if (face.mVertices.size() + mVertices.size() > 65536) + { + llerrs << "Cannot append face -- 16-bit overflow will occur." << llendl; + } for (U32 i = 0; i < face.mVertices.size(); ++i) { -- cgit v1.2.3 From 237f1a1ba362aa2dfd9bb386e431713bdfcad2dc Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Fri, 19 Feb 2010 12:41:08 -0600 Subject: Consolidate now preserves materials. --- indra/llmath/v4color.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'indra/llmath') diff --git a/indra/llmath/v4color.h b/indra/llmath/v4color.h index d6fbdec61e..6b63b976b0 100644 --- a/indra/llmath/v4color.h +++ b/indra/llmath/v4color.h @@ -114,6 +114,7 @@ class LLColor4 const LLColor4& operator=(const LLColor3 &a); // Assigns vec3 to vec4 and returns vec4 + bool operator<(const LLColor4& rhs) const; friend std::ostream& operator<<(std::ostream& s, const LLColor4 &a); // Print a friend LLColor4 operator+(const LLColor4 &a, const LLColor4 &b); // Return vector a + b friend LLColor4 operator-(const LLColor4 &a, const LLColor4 &b); // Return vector a minus b @@ -595,6 +596,23 @@ inline LLColor4 lerp(const LLColor4 &a, const LLColor4 &b, F32 u) a.mV[VW] + (b.mV[VW] - a.mV[VW]) * u); } +inline bool LLColor4::operator<(const LLColor4& rhs) const +{ + if (mV[0] != rhs.mV[0]) + { + return mV[0] < rhs.mV[0]; + } + if (mV[1] != rhs.mV[1]) + { + return mV[1] < rhs.mV[1]; + } + if (mV[2] != rhs.mV[2]) + { + return mV[2] < rhs.mV[2]; + } + + return mV[3] < rhs.mV[3]; +} void LLColor4::clamp() { -- cgit v1.2.3 From 066f9de07ecfcf142103f646695e5be63a22a667 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Tue, 23 Feb 2010 16:57:06 -0600 Subject: Fix for normals getting squished on consolidation. Replaced some magic numbers with constants. Switched up throttling of mesh upload HTTP posts to prevent overloading one capability at a time. Added some feedback on upload progress via debug text. Made debug text move with side panel (keep debug text from rendering on top of side panel). --- indra/llmath/llvolume.cpp | 1 + 1 file changed, 1 insertion(+) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 33a00b80ca..704308f20f 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -5630,6 +5630,7 @@ void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat, LLMatrix v.mPosition = v.mPosition*mat; v.mNormal = v.mNormal * norm_mat; + v.mNormal.normalize(); mVertices.push_back(v); -- cgit v1.2.3 From 3c78771acee787e087bd2e2391397794d4d98f6d Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Wed, 24 Feb 2010 22:02:01 -0600 Subject: Removed scale from model importer. Removed support for scale entry in mesh assets. Fixed MeshMaxConcurrentRequests being ignored. Added mesh download queue debug text. --- indra/llmath/llvolume.cpp | 4 ---- 1 file changed, 4 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 704308f20f..904786079f 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -2098,8 +2098,6 @@ BOOL LLVolume::unpackVolumeFaces(std::istream& is, S32 size) min_tc.setValue(mdl[i]["TexCoord0Domain"]["Min"]); max_tc.setValue(mdl[i]["TexCoord0Domain"]["Max"]); - F32 scale = llclamp((F32) mdl[i]["Scale"].asReal(), 1.f, 10.f); - LLVector3 pos_range = max_pos - min_pos; LLVector2 tc_range = max_tc - min_tc; @@ -2117,8 +2115,6 @@ BOOL LLVolume::unpackVolumeFaces(std::istream& is, S32 size) (F32) v[1] / 65535.f * pos_range.mV[1] + min_pos.mV[1], (F32) v[2] / 65535.f * pos_range.mV[2] + min_pos.mV[2]); - face.mVertices[j].mPosition *= scale; - if (j == 0) { min = max = face.mVertices[j].mPosition; -- cgit v1.2.3 From b414b5067e3e47da7d9baf490d94534b4c65a8eb Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Sun, 28 Feb 2010 16:40:30 -0600 Subject: Remove some dead code. Add LH transform to LLVector3 Add DebugShowUploadCost Make LOD generation on model preview less finnicky. Remove error level based LOD generation. Better framing of model before upload. Better error handling for model uploader. Remove [COST] argument from model upload menu item. Remove L$ check from model upload menu item being enabled. --- indra/llmath/m4math.cpp | 31 ------------------------------- indra/llmath/m4math.h | 5 +---- indra/llmath/v3math.cpp | 22 ++++++++++++++++++++++ indra/llmath/v3math.h | 3 +++ 4 files changed, 26 insertions(+), 35 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/m4math.cpp b/indra/llmath/m4math.cpp index 5c112b52b2..ce5428f0e1 100644 --- a/indra/llmath/m4math.cpp +++ b/indra/llmath/m4math.cpp @@ -684,37 +684,6 @@ const LLMatrix4& LLMatrix4::initMatrix(const LLMatrix3 &mat, const LLVector4 & // LLMatrix4 Operators - -/* Not implemented to help enforce code consistency with the syntax of - row-major notation. This is a Good Thing. -LLVector4 operator*(const LLMatrix4 &a, const LLVector4 &b) -{ - // Operate "to the right" on column-vector b - LLVector4 vec; - vec.mV[VX] = a.mMatrix[VX][VX] * b.mV[VX] + - a.mMatrix[VY][VX] * b.mV[VY] + - a.mMatrix[VZ][VX] * b.mV[VZ] + - a.mMatrix[VW][VX] * b.mV[VW]; - - vec.mV[VY] = a.mMatrix[VX][VY] * b.mV[VX] + - a.mMatrix[VY][VY] * b.mV[VY] + - a.mMatrix[VZ][VY] * b.mV[VZ] + - a.mMatrix[VW][VY] * b.mV[VW]; - - vec.mV[VZ] = a.mMatrix[VX][VZ] * b.mV[VX] + - a.mMatrix[VY][VZ] * b.mV[VY] + - a.mMatrix[VZ][VZ] * b.mV[VZ] + - a.mMatrix[VW][VZ] * b.mV[VW]; - - vec.mV[VW] = a.mMatrix[VX][VW] * b.mV[VX] + - a.mMatrix[VY][VW] * b.mV[VY] + - a.mMatrix[VZ][VW] * b.mV[VZ] + - a.mMatrix[VW][VW] * b.mV[VW]; - return vec; -} -*/ - - LLVector4 operator*(const LLVector4 &a, const LLMatrix4 &b) { // Operate "to the left" on row-vector a diff --git a/indra/llmath/m4math.h b/indra/llmath/m4math.h index 6007b96bd9..40599a0886 100644 --- a/indra/llmath/m4math.h +++ b/indra/llmath/m4math.h @@ -226,10 +226,7 @@ public: // Operators // -// Not implemented to enforce code that agrees with symbolic syntax -// friend LLVector4 operator*(const LLMatrix4 &a, const LLVector4 &b); // Apply rotation a to vector b - -// friend inline LLMatrix4 operator*(const LLMatrix4 &a, const LLMatrix4 &b); // Return a * b + // friend inline LLMatrix4 operator*(const LLMatrix4 &a, const LLMatrix4 &b); // Return a * b friend LLVector4 operator*(const LLVector4 &a, const LLMatrix4 &b); // Return transform of vector a by matrix b friend const LLVector3 operator*(const LLVector3 &a, const LLMatrix4 &b); // Return full transform of a by matrix b friend LLVector4 rotate_vector(const LLVector4 &a, const LLMatrix4 &b); // Rotates a but does not translate diff --git a/indra/llmath/v3math.cpp b/indra/llmath/v3math.cpp index 63683ed496..82aad6550b 100644 --- a/indra/llmath/v3math.cpp +++ b/indra/llmath/v3math.cpp @@ -197,6 +197,28 @@ const LLVector3& LLVector3::rotVec(const LLQuaternion &q) return *this; } +const LLVector3& LLVector3::transVec(const LLMatrix4& mat) +{ + setVec( + mV[VX] * mat.mMatrix[VX][VX] + + mV[VY] * mat.mMatrix[VX][VY] + + mV[VZ] * mat.mMatrix[VX][VZ] + + mat.mMatrix[VX][VW], + + mV[VX] * mat.mMatrix[VY][VX] + + mV[VY] * mat.mMatrix[VY][VY] + + mV[VZ] * mat.mMatrix[VY][VZ] + + mat.mMatrix[VY][VW], + + mV[VX] * mat.mMatrix[VZ][VX] + + mV[VY] * mat.mMatrix[VZ][VY] + + mV[VZ] * mat.mMatrix[VZ][VZ] + + mat.mMatrix[VZ][VW]); + + return *this; +} + + const LLVector3& LLVector3::rotVec(F32 angle, const LLVector3 &vec) { if ( !vec.isExactlyZero() && angle ) diff --git a/indra/llmath/v3math.h b/indra/llmath/v3math.h index 73738cffd2..76dd938887 100644 --- a/indra/llmath/v3math.h +++ b/indra/llmath/v3math.h @@ -36,10 +36,12 @@ #include "llerror.h" #include "llmath.h" + #include "llsd.h" class LLVector2; class LLVector4; class LLMatrix3; +class LLMatrix4; class LLVector3d; class LLQuaternion; @@ -115,6 +117,7 @@ class LLVector3 const LLVector3& rotVec(F32 angle, F32 x, F32 y, F32 z); // Rotates about x,y,z by angle radians const LLVector3& rotVec(const LLMatrix3 &mat); // Rotates by LLMatrix4 mat const LLVector3& rotVec(const LLQuaternion &q); // Rotates by LLQuaternion q + const LLVector3& transVec(const LLMatrix4& mat); // Transforms by LLMatrix4 mat (mat * v) const LLVector3& scaleVec(const LLVector3& vec); // scales per component by vec LLVector3 scaledVec(const LLVector3& vec) const; // get a copy of this vector scaled by vec -- cgit v1.2.3 From d60f5e937f2ed264f3e01eec7e32b9260e3d772f Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Tue, 9 Mar 2010 14:28:06 -0600 Subject: Tool tips for model preview. Rename "Impostor" to "Lowest" --- indra/llmath/llvolume.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 904786079f..d85c56046f 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -1918,7 +1918,7 @@ BOOL LLVolume::createVolumeFacesFromStream(std::istream& is) std::string nm[] = { - "impostor", + "lowest_lod", "low_lod", "medium_lod", "high_lod" -- cgit v1.2.3 From 4c0e2f79219913b57424bfe136b75a6a58fb8639 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Thu, 11 Mar 2010 12:02:37 -0600 Subject: "Generate Normals" is less busted now. --- indra/llmath/llvolume.cpp | 67 ++++++++++++++++++++++++++++++++++++++++++++--- indra/llmath/llvolume.h | 33 +++++++++++++++++++++++ 2 files changed, 97 insertions(+), 3 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index d85c56046f..9ea3912a88 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -1882,9 +1882,15 @@ bool LLVolumeFace::VertexData::compareNormal(const LLVolumeFace::VertexData& rhs bool retval = false; if (rhs.mPosition == mPosition && rhs.mTexCoord == mTexCoord) { - F32 cur_angle = rhs.mNormal*mNormal; - - retval = cur_angle > angle_cutoff; + if (angle_cutoff > 1.f) + { + retval = (mNormal == rhs.mNormal); + } + else + { + F32 cur_angle = rhs.mNormal*mNormal; + retval = cur_angle > angle_cutoff; + } } return retval; @@ -4953,6 +4959,61 @@ BOOL LLVolumeFace::create(LLVolume* volume, BOOL partial_build) } } +void LLVolumeFace::optimize(F32 angle_cutoff) +{ + LLVolumeFace new_face; + + VertexMapData::PointMap point_map; + + //remove redundant vertices + for (U32 i = 0; i < mIndices.size(); ++i) + { + U16 index = mIndices[i]; + + LLVolumeFace::VertexData cv = mVertices[index]; + + BOOL found = FALSE; + VertexMapData::PointMap::iterator point_iter = point_map.find(cv.mPosition); + if (point_iter != point_map.end()) + { //duplicate point might exist + for (U32 j = 0; j < point_iter->second.size(); ++j) + { + LLVolumeFace::VertexData& tv = (point_iter->second)[j]; + if (tv.compareNormal(cv, angle_cutoff)) + { + found = TRUE; + new_face.mIndices.push_back((point_iter->second)[j].mIndex); + break; + } + } + } + + if (!found) + { + new_face.mVertices.push_back(cv); + U16 index = (U16) new_face.mVertices.size()-1; + new_face.mIndices.push_back(index); + + VertexMapData d; + d.mPosition = cv.mPosition; + d.mTexCoord = cv.mTexCoord; + d.mNormal = cv.mNormal; + d.mIndex = index; + if (point_iter != point_map.end()) + { + point_iter->second.push_back(d); + } + else + { + point_map[d.mPosition].push_back(d); + } + } + } + + mVertices = new_face.mVertices; + mIndices = new_face.mIndices; +} + void LerpPlanarVertex(LLVolumeFace::VertexData& v0, LLVolumeFace::VertexData& v1, LLVolumeFace::VertexData& v2, diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index 36811785dc..f1c1fdceba 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -820,6 +820,39 @@ public: bool compareNormal(const VertexData& rhs, F32 angle_cutoff) const; }; + class VertexMapData : public LLVolumeFace::VertexData + { + public: + U16 mIndex; + + bool operator==(const LLVolumeFace::VertexData& rhs) const + { + return mPosition == rhs.mPosition && + mTexCoord == rhs.mTexCoord && + mNormal == rhs.mNormal; + } + + struct ComparePosition + { + bool operator()(const LLVector3& a, const LLVector3& b) const + { + if (a.mV[0] != b.mV[0]) + { + return a.mV[0] < b.mV[0]; + } + if (a.mV[1] != b.mV[1]) + { + return a.mV[1] < b.mV[1]; + } + return a.mV[2] < b.mV[2]; + } + }; + + typedef std::map, VertexMapData::ComparePosition > PointMap; + }; + + void optimize(F32 angle_cutoff = 2.f); + enum { SINGLE_MASK = 0x0001, -- cgit v1.2.3 From 71d11af31083ced30da7b67a2a63e624c93b44a3 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Sat, 13 Mar 2010 17:39:32 -0600 Subject: Mesh cache. Has a bug. --- indra/llmath/llvolume.cpp | 28 ++++++++++++++-------------- indra/llmath/llvolume.h | 2 +- 2 files changed, 15 insertions(+), 15 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 9ea3912a88..52a3fb2195 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -1962,7 +1962,7 @@ BOOL LLVolume::createVolumeFacesFromStream(std::istream& is) return unpackVolumeFaces(is, header[nm[lod]]["size"].asInteger()); } -BOOL LLVolume::unpackVolumeFaces(std::istream& is, S32 size) +bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) { U8* result = NULL; U32 cur_size = 0; @@ -2002,7 +2002,7 @@ BOOL LLVolume::unpackVolumeFaces(std::istream& is, S32 size) inflateEnd(&strm); free(result); delete [] in; - return FALSE; + return false; } switch (ret) @@ -2014,7 +2014,7 @@ BOOL LLVolume::unpackVolumeFaces(std::istream& is, S32 size) inflateEnd(&strm); free(result); delete [] in; - return FALSE; + return false; break; } @@ -2032,7 +2032,7 @@ BOOL LLVolume::unpackVolumeFaces(std::istream& is, S32 size) if (ret != Z_STREAM_END) { free(result); - return FALSE; + return false; } } @@ -2047,7 +2047,7 @@ BOOL LLVolume::unpackVolumeFaces(std::istream& is, S32 size) if (!LLSDSerialize::deserialize(mdl, istr, cur_size)) { llwarns << "not a valid mesh asset!" << llendl; - return FALSE; + return false; } } @@ -2074,7 +2074,7 @@ BOOL LLVolume::unpackVolumeFaces(std::istream& is, S32 size) LLVolumeFace& face = mVolumeFaces[i]; - face.mHasBinormals = FALSE; + face.mHasBinormals = false; //copy out indices face.mIndices.resize(idx.size()/2); @@ -2146,24 +2146,24 @@ BOOL LLVolume::unpackVolumeFaces(std::istream& is, S32 size) // modifier flags? - BOOL do_mirror = (mParams.getSculptType() & LL_SCULPT_FLAG_MIRROR); - BOOL do_invert = (mParams.getSculptType() &LL_SCULPT_FLAG_INVERT); + bool do_mirror = (mParams.getSculptType() & LL_SCULPT_FLAG_MIRROR); + bool do_invert = (mParams.getSculptType() &LL_SCULPT_FLAG_INVERT); // translate to actions: - BOOL do_reflect_x = FALSE; - BOOL do_reverse_triangles = FALSE; - BOOL do_invert_normals = FALSE; + bool do_reflect_x = false; + bool do_reverse_triangles = false; + bool do_invert_normals = false; if (do_mirror) { - do_reflect_x = TRUE; + do_reflect_x = true; do_reverse_triangles = !do_reverse_triangles; } if (do_invert) { - do_invert_normals = TRUE; + do_invert_normals = true; do_reverse_triangles = !do_reverse_triangles; } @@ -2201,7 +2201,7 @@ BOOL LLVolume::unpackVolumeFaces(std::istream& is, S32 size) } mSculptLevel = 0; // success! - return TRUE; + return true; } void tetrahedron_set_normal(LLVolumeFace::VertexData* cv) diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index f1c1fdceba..60c1569e55 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -1009,7 +1009,7 @@ protected: public: virtual BOOL createVolumeFacesFromFile(const std::string& file_name); virtual BOOL createVolumeFacesFromStream(std::istream& is); - virtual BOOL unpackVolumeFaces(std::istream& is, S32 size); + virtual bool unpackVolumeFaces(std::istream& is, S32 size); virtual void makeTetrahedron(); virtual BOOL isTetrahedron(); -- cgit v1.2.3 From 807d835c2bfc5d794a74f9690d1fafbe55ff88cc Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Fri, 2 Apr 2010 14:43:05 -0500 Subject: First draft of skin weights in .mesh asset --- indra/llmath/llvolume.cpp | 135 ++++++++++++++++------------------------------ indra/llmath/llvolume.h | 6 +++ 2 files changed, 51 insertions(+), 90 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 52a3fb2195..c563af592f 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -47,8 +47,6 @@ #include "llvolume.h" #include "llstl.h" #include "llsdserialize.h" -#include "zlib/zlib.h" - #define DEBUG_SILHOUETTE_BINORMALS 0 #define DEBUG_SILHOUETTE_NORMALS 0 // TomY: Use this to display normals using the silhouette @@ -1964,97 +1962,15 @@ BOOL LLVolume::createVolumeFacesFromStream(std::istream& is) bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) { - U8* result = NULL; - U32 cur_size = 0; - - { - //input stream is now pointing at a zlib compressed block of LLSD - //decompress block - z_stream strm; - - const U32 CHUNK = 65536; - - U8 *in = new U8[size]; - is.read((char*) in, size); - - U8 out[CHUNK]; - - strm.zalloc = Z_NULL; - strm.zfree = Z_NULL; - strm.opaque = Z_NULL; - strm.avail_in = size; - strm.next_in = in; - - S32 ret = inflateInit(&strm); - - if (ret != Z_OK) - { - llerrs << "WTF?" << llendl; - } - - do - { - strm.avail_out = CHUNK; - strm.next_out = out; - ret = inflate(&strm, Z_NO_FLUSH); - if (ret == Z_STREAM_ERROR) - { - inflateEnd(&strm); - free(result); - delete [] in; - return false; - } - - switch (ret) - { - case Z_NEED_DICT: - ret = Z_DATA_ERROR; - case Z_DATA_ERROR: - case Z_MEM_ERROR: - inflateEnd(&strm); - free(result); - delete [] in; - return false; - break; - } - - U32 have = CHUNK-strm.avail_out; - - result = (U8*) realloc(result, cur_size + have); - memcpy(result+cur_size, out, have); - cur_size += have; - - } while (strm.avail_out == 0); - - inflateEnd(&strm); - delete [] in; - - if (ret != Z_STREAM_END) - { - free(result); - return false; - } - } - - //result now points to the decompressed LLSD block - + //input stream is now pointing at a zlib compressed block of LLSD + //decompress block LLSD mdl; - + if (!unzip_llsd(mdl, is, size)) { - std::string res_str((char*) result, cur_size); - std::istringstream istr(res_str); - - if (!LLSDSerialize::deserialize(mdl, istr, cur_size)) - { - llwarns << "not a valid mesh asset!" << llendl; - return false; - } + llwarns << "not a valid mesh asset!" << llendl; + return false; } - - - free(result); - - + { U32 face_count = mdl.size(); @@ -2094,11 +2010,50 @@ bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) U32 num_verts = pos.size()/(3*2); face.mVertices.resize(num_verts); + if (mdl[i].has("Weights")) + { + face.mWeights.resize(num_verts); + LLSD::Binary weights = mdl[i]["Weights"]; + + LLSD::Binary::iterator iter = weights.begin(); + + U32 cur_vertex = 0; + while (iter != weights.end()) + { + const S32 END_INFLUENCES = 0xFF; + U8 joint = *(iter++); + + U32 cur_influence = 0; + while (joint != END_INFLUENCES) + { + U16 influence = *(iter++); + influence = influence << 8; + influence |= *(iter++); + + F32 w = llmin((F32) influence / 65535.f, 0.99999f); + face.mWeights[cur_vertex].mV[cur_influence++] = (F32) joint + w; + + if (cur_influence >= 4) + { + joint = END_INFLUENCES; + } + else + { + joint = *(iter++); + } + } + + cur_vertex++; + iter++; + } + } + LLVector3 min_pos; LLVector3 max_pos; LLVector2 min_tc; LLVector2 max_tc; + min_pos.setValue(mdl[i]["PositionDomain"]["Min"]); max_pos.setValue(mdl[i]["PositionDomain"]["Max"]); min_tc.setValue(mdl[i]["TexCoord0Domain"]["Min"]); diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index 60c1569e55..c6a156ae37 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -49,6 +49,7 @@ class LLVolume; //#include "vmath.h" #include "v2math.h" #include "v3math.h" +#include "v4math.h" #include "llquaternion.h" #include "llstrider.h" #include "v4coloru.h" @@ -887,6 +888,11 @@ public: std::vector mTriStrip; std::vector mEdge; + //list of skin weights for rigged volumes + // format is mWeights[vertex_index].mV[influence] = . + // mWeights.size() should be empty or match mVertices.size() + std::vector mWeights; + private: BOOL createUnCutCubeCap(LLVolume* volume, BOOL partial_build = FALSE); BOOL createCap(LLVolume* volume, BOOL partial_build = FALSE); -- cgit v1.2.3 From 47ffcdb93d6e2ac1f9d497e43e0213c98d129254 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Tue, 6 Apr 2010 16:24:08 -0500 Subject: Rigged attachments (almost works). --- indra/llmath/llvolume.cpp | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index c563af592f..fdd48b9e9e 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -2013,22 +2013,23 @@ bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) if (mdl[i].has("Weights")) { face.mWeights.resize(num_verts); + LLSD::Binary weights = mdl[i]["Weights"]; - LLSD::Binary::iterator iter = weights.begin(); + U32 idx = 0; U32 cur_vertex = 0; - while (iter != weights.end()) + while (idx < weights.size() && cur_vertex < num_verts) { - const S32 END_INFLUENCES = 0xFF; - U8 joint = *(iter++); + const U8 END_INFLUENCES = 0xFF; + U8 joint = weights[idx++]; U32 cur_influence = 0; while (joint != END_INFLUENCES) { - U16 influence = *(iter++); + U16 influence = weights[idx++]; influence = influence << 8; - influence |= *(iter++); + influence |= weights[idx++]; F32 w = llmin((F32) influence / 65535.f, 0.99999f); face.mWeights[cur_vertex].mV[cur_influence++] = (F32) joint + w; @@ -2039,13 +2040,18 @@ bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) } else { - joint = *(iter++); + joint = weights[idx++]; } } cur_vertex++; - iter++; } + + if (cur_vertex != num_verts || idx != weights.size()) + { + llwarns << "Vertex weight count does not match vertex count!" << llendl; + } + } LLVector3 min_pos; -- cgit v1.2.3 From bd216c96498585ca01e1634877d4b184f20fa45f Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Thu, 22 Apr 2010 16:52:05 -0500 Subject: Fix for shadows from skyboxes hitting the ground. --- indra/llmath/llcamera.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llcamera.h b/indra/llmath/llcamera.h index 0c81067919..d6c5f7bbb1 100644 --- a/indra/llmath/llcamera.h +++ b/indra/llmath/llcamera.h @@ -143,7 +143,7 @@ private: public: LLVector3 mAgentFrustum[8]; //8 corners of 6-plane frustum F32 mFrustumCornerDist; //distance to corner of frustum against far clip plane - LLPlane getAgentPlane(U32 idx) { return mAgentPlanes[idx].p; } + LLPlane& getAgentPlane(U32 idx) { return mAgentPlanes[idx].p; } public: LLCamera(); -- cgit v1.2.3 From 07c0389f50ccef13ad2699e149dc4b87de3dbd70 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Tue, 27 Apr 2010 01:50:06 -0500 Subject: Proper byte ordering when decoding skin weights. --- indra/llmath/llvolume.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index fdd48b9e9e..9d2d157c76 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -2028,8 +2028,7 @@ bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) while (joint != END_INFLUENCES) { U16 influence = weights[idx++]; - influence = influence << 8; - influence |= weights[idx++]; + influence |= ((U16) weights[idx++] << 8); F32 w = llmin((F32) influence / 65535.f, 0.99999f); face.mWeights[cur_vertex].mV[cur_influence++] = (F32) joint + w; -- cgit v1.2.3 From d71716aa6dde434b6356cfe85e3a8fce376056dd Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Wed, 28 Apr 2010 02:53:12 -0500 Subject: Make LLVolume::createSide a little faster. --- indra/llmath/llvolume.cpp | 82 ++++++++++++++++++++++++++--------------------- 1 file changed, 45 insertions(+), 37 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 9d2d157c76..5ffc61ce9c 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -87,6 +87,8 @@ const F32 SKEW_MAX = 0.95f; const F32 SCULPT_MIN_AREA = 0.002f; const S32 SCULPT_MIN_AREA_DETAIL = 1; +#define GEN_TRI_STRIP 0 + BOOL check_same_clock_dir( const LLVector3& pt1, const LLVector3& pt2, const LLVector3& pt3, const LLVector3& norm) { LLVector3 test = (pt2-pt1)%(pt3-pt2); @@ -5079,7 +5081,9 @@ BOOL LLVolumeFace::createUnCutCubeCap(LLVolume* volume, BOOL partial_build) if (!partial_build) { +#if GEN_TRI_STRIP mTriStrip.clear(); +#endif S32 idxs[] = {0,1,(grid_size+1)+1,(grid_size+1)+1,(grid_size+1),0}; for(S32 gx = 0;gx 2) ? mNumS/2 : mNumS; @@ -5771,15 +5779,6 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) mVertices[cur_vertex].mNormal = LLVector3(0,0,0); mVertices[cur_vertex].mBinormal = LLVector3(0,0,0); - - if (cur_vertex == 0) - { - face_min = face_max = mesh[i].mPos; - } - else - { - update_min_max(face_min, face_max, mesh[i].mPos); - } cur_vertex++; @@ -5813,12 +5812,22 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) mVertices[cur_vertex].mNormal = LLVector3(0,0,0); mVertices[cur_vertex].mBinormal = LLVector3(0,0,0); - update_min_max(face_min,face_max,mesh[i].mPos); - cur_vertex++; } } + + //get bounding box for this side + LLVector3& face_min = mExtents[0]; + LLVector3& face_max = mExtents[1]; + mCenter.clearVec(); + + face_min = face_max = mVertices[0].mPosition; + for (U32 i = 1; i < mVertices.size(); ++i) + { + update_min_max(face_min, face_max, mVertices[i].mPosition); + } + mCenter = (face_min + face_max) * 0.5f; S32 cur_index = 0; @@ -5827,13 +5836,17 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) if (!partial_build) { +#if GEN_TRI_STRIP mTriStrip.clear(); +#endif // Now we generate the indices. for (t = 0; t < (mNumT-1); t++) { +#if GEN_TRI_STRIP //prepend terminating index to strip mTriStrip.push_back(mNumS*t); +#endif for (s = 0; s < (mNumS-1); s++) { @@ -5844,6 +5857,7 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) mIndices[cur_index++] = s+1 + mNumS*t; //bottom right mIndices[cur_index++] = s+1 + mNumS*(t+1); //top right +#if GEN_TRI_STRIP if (s == 0) { mTriStrip.push_back(s+mNumS*t); @@ -5851,6 +5865,7 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) } mTriStrip.push_back(s+1+mNumS*t); mTriStrip.push_back(s+1+mNumS*(t+1)); +#endif mEdge[cur_edge++] = (mNumS-1)*2*t+s*2+1; //bottom left/top right neighbor face if (t < mNumT-2) { //top right/top left neighbor face @@ -5892,44 +5907,37 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) } mEdge[cur_edge++] = (mNumS-1)*2*t+s*2; //top right/bottom left neighbor face } +#if GEN_TRI_STRIP //append terminating vertex to strip mTriStrip.push_back(mNumS-1+mNumS*(t+1)); +#endif } +#if GEN_TRI_STRIP if (mTriStrip.size()%2 == 1) { mTriStrip.push_back(mTriStrip[mTriStrip.size()-1]); } +#endif } //generate normals for (U32 i = 0; i < mIndices.size()/3; i++) //for each triangle { - const S32 i0 = mIndices[i*3+0]; - const S32 i1 = mIndices[i*3+1]; - const S32 i2 = mIndices[i*3+2]; - const VertexData& v0 = mVertices[i0]; - const VertexData& v1 = mVertices[i1]; - const VertexData& v2 = mVertices[i2]; + const U16* idx = &(mIndices[i*3]); + + VertexData* v[] = + { &mVertices[idx[0]], &mVertices[idx[1]], &mVertices[idx[2]] }; //calculate triangle normal - LLVector3 norm = (v0.mPosition-v1.mPosition) % (v0.mPosition-v2.mPosition); + LLVector3 norm = (v[0]->mPosition-v[1]->mPosition) % (v[0]->mPosition-v[2]->mPosition); - for (U32 j = 0; j < 3; j++) - { //add triangle normal to vertices - const S32 idx = mIndices[i*3+j]; - mVertices[idx].mNormal += norm; // * (weight_sum - d[j])/weight_sum; - } + v[0]->mNormal += norm; + v[1]->mNormal += norm; + v[2]->mNormal += norm; //even out quad contributions - if ((i & 1) == 0) - { - mVertices[i2].mNormal += norm; - } - else - { - mVertices[i1].mNormal += norm; - } + v[i%2+1]->mNormal += norm; } // adjust normals based on wrapping and stitching -- cgit v1.2.3 From 4a501bb437f86175f6e8e2d015969595f55fa705 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Wed, 28 Apr 2010 02:53:12 -0500 Subject: Make LLVolume::createSide a little faster. (transplanted from 4d43e3b83ccffd725ec6cb234ddcfa0833f17a9f) --- indra/llmath/llvolume.cpp | 129 +++++++++++++++++++++++++++------------------- 1 file changed, 76 insertions(+), 53 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 39b7453ffc..3c3356f41d 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -1,25 +1,31 @@ /** * @file llvolume.cpp * - * $LicenseInfo:firstyear=2002&license=viewerlgpl$ - * Second Life Viewer Source Code - * Copyright (C) 2010, Linden Research, Inc. + * $LicenseInfo:firstyear=2002&license=viewergpl$ + * + * Copyright (c) 2002-2009, Linden Research, Inc. * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; - * version 2.1 of the License only. + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. * - * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. * $/LicenseInfo$ */ @@ -80,6 +86,8 @@ const F32 SKEW_MAX = 0.95f; const F32 SCULPT_MIN_AREA = 0.002f; const S32 SCULPT_MIN_AREA_DETAIL = 1; +#define GEN_TRI_STRIP 0 + BOOL check_same_clock_dir( const LLVector3& pt1, const LLVector3& pt2, const LLVector3& pt3, const LLVector3& norm) { LLVector3 test = (pt2-pt1)%(pt3-pt2); @@ -1682,7 +1690,7 @@ LLVolume::LLVolume(const LLVolumeParams ¶ms, const F32 detail, const BOOL ge mGenerateSingleFace = generate_single_face; generate(); - if (mParams.getSculptID().isNull()) + if (mParams.getSculptID().isNull() && params.getSculptType() == LL_SCULPT_TYPE_NONE) { createVolumeFaces(); } @@ -1858,6 +1866,11 @@ void LLVolume::createVolumeFaces() LLProfile::Face& face = mProfilep->mFaces[i]; vf.mBeginS = face.mIndex; vf.mNumS = face.mCount; + if (vf.mNumS < 0) + { + llerrs << "Volume face corruption detected." << llendl; + } + vf.mBeginT = 0; vf.mNumT= getPath().mPath.size(); vf.mID = i; @@ -1901,6 +1914,10 @@ void LLVolume::createVolumeFaces() if (face.mFlat && vf.mNumS > 2) { //flat inner faces have to copy vert normals vf.mNumS = vf.mNumS*2; + if (vf.mNumS < 0) + { + llerrs << "Volume face corruption detected." << llendl; + } } } else @@ -4515,7 +4532,9 @@ BOOL LLVolumeFace::createUnCutCubeCap(LLVolume* volume, BOOL partial_build) if (!partial_build) { +#if GEN_TRI_STRIP mTriStrip.clear(); +#endif S32 idxs[] = {0,1,(grid_size+1)+1,(grid_size+1)+1,(grid_size+1),0}; for(S32 gx = 0;gx 2) ? mNumS/2 : mNumS; @@ -5167,15 +5190,6 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) mVertices[cur_vertex].mNormal = LLVector3(0,0,0); mVertices[cur_vertex].mBinormal = LLVector3(0,0,0); - - if (cur_vertex == 0) - { - face_min = face_max = mesh[i].mPos; - } - else - { - update_min_max(face_min, face_max, mesh[i].mPos); - } cur_vertex++; @@ -5209,12 +5223,22 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) mVertices[cur_vertex].mNormal = LLVector3(0,0,0); mVertices[cur_vertex].mBinormal = LLVector3(0,0,0); - update_min_max(face_min,face_max,mesh[i].mPos); - cur_vertex++; } } + + //get bounding box for this side + LLVector3& face_min = mExtents[0]; + LLVector3& face_max = mExtents[1]; + mCenter.clearVec(); + + face_min = face_max = mVertices[0].mPosition; + for (U32 i = 1; i < mVertices.size(); ++i) + { + update_min_max(face_min, face_max, mVertices[i].mPosition); + } + mCenter = (face_min + face_max) * 0.5f; S32 cur_index = 0; @@ -5223,13 +5247,17 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) if (!partial_build) { +#if GEN_TRI_STRIP mTriStrip.clear(); +#endif // Now we generate the indices. for (t = 0; t < (mNumT-1); t++) { +#if GEN_TRI_STRIP //prepend terminating index to strip mTriStrip.push_back(mNumS*t); +#endif for (s = 0; s < (mNumS-1); s++) { @@ -5240,6 +5268,7 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) mIndices[cur_index++] = s+1 + mNumS*t; //bottom right mIndices[cur_index++] = s+1 + mNumS*(t+1); //top right +#if GEN_TRI_STRIP if (s == 0) { mTriStrip.push_back(s+mNumS*t); @@ -5247,6 +5276,7 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) } mTriStrip.push_back(s+1+mNumS*t); mTriStrip.push_back(s+1+mNumS*(t+1)); +#endif mEdge[cur_edge++] = (mNumS-1)*2*t+s*2+1; //bottom left/top right neighbor face if (t < mNumT-2) { //top right/top left neighbor face @@ -5288,44 +5318,37 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) } mEdge[cur_edge++] = (mNumS-1)*2*t+s*2; //top right/bottom left neighbor face } +#if GEN_TRI_STRIP //append terminating vertex to strip mTriStrip.push_back(mNumS-1+mNumS*(t+1)); +#endif } +#if GEN_TRI_STRIP if (mTriStrip.size()%2 == 1) { mTriStrip.push_back(mTriStrip[mTriStrip.size()-1]); } +#endif } //generate normals for (U32 i = 0; i < mIndices.size()/3; i++) //for each triangle { - const S32 i0 = mIndices[i*3+0]; - const S32 i1 = mIndices[i*3+1]; - const S32 i2 = mIndices[i*3+2]; - const VertexData& v0 = mVertices[i0]; - const VertexData& v1 = mVertices[i1]; - const VertexData& v2 = mVertices[i2]; + const U16* idx = &(mIndices[i*3]); + + VertexData* v[] = + { &mVertices[idx[0]], &mVertices[idx[1]], &mVertices[idx[2]] }; //calculate triangle normal - LLVector3 norm = (v0.mPosition-v1.mPosition) % (v0.mPosition-v2.mPosition); + LLVector3 norm = (v[0]->mPosition-v[1]->mPosition) % (v[0]->mPosition-v[2]->mPosition); - for (U32 j = 0; j < 3; j++) - { //add triangle normal to vertices - const S32 idx = mIndices[i*3+j]; - mVertices[idx].mNormal += norm; // * (weight_sum - d[j])/weight_sum; - } + v[0]->mNormal += norm; + v[1]->mNormal += norm; + v[2]->mNormal += norm; //even out quad contributions - if ((i & 1) == 0) - { - mVertices[i2].mNormal += norm; - } - else - { - mVertices[i1].mNormal += norm; - } + v[i%2+1]->mNormal += norm; } // adjust normals based on wrapping and stitching -- cgit v1.2.3 From 8919f4811a7dcaf47dc58159e0ba4ba042183325 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Fri, 21 May 2010 23:55:18 -0500 Subject: blah --- indra/llmath/llvolume.cpp | 262 +++++++++++++++++++++++++++++++++------------- indra/llmath/llvolume.h | 27 +++-- indra/llmath/v3math.h | 15 +++ 3 files changed, 226 insertions(+), 78 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 5ffc61ce9c..4e342b0b48 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -43,10 +43,12 @@ #include "v4math.h" #include "m4math.h" #include "m3math.h" +#include "llmatrix4a.h" #include "lldarray.h" #include "llvolume.h" #include "llstl.h" #include "llsdserialize.h" +#include "llvector4a.h" #define DEBUG_SILHOUETTE_BINORMALS 0 #define DEBUG_SILHOUETTE_NORMALS 0 // TomY: Use this to display normals using the silhouette @@ -1992,11 +1994,10 @@ bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) LLVolumeFace& face = mVolumeFaces[i]; - face.mHasBinormals = false; - //copy out indices - face.mIndices.resize(idx.size()/2); - if (idx.empty() || face.mIndices.size() < 3) + face.resizeIndices(idx.size()/2); + + if (idx.empty() || face.mNumIndices < 3) { //why is there an empty index list? llerrs <<"WTF?" << llendl; continue; @@ -2010,7 +2011,7 @@ bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) //copy out vertices U32 num_verts = pos.size()/(3*2); - face.mVertices.resize(num_verts); + face.resizeVertices(num_verts); if (mdl[i].has("Weights")) { @@ -2059,7 +2060,6 @@ bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) LLVector3 max_pos; LLVector2 min_tc; LLVector2 max_tc; - min_pos.setValue(mdl[i]["PositionDomain"]["Min"]); max_pos.setValue(mdl[i]["PositionDomain"]["Max"]); @@ -2074,36 +2074,44 @@ bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) min = max = LLVector3(0,0,0); + F32* pos_out = face.mPositions; + F32* norm_out = face.mNormals; + F32* tc_out = face.mTexCoords; + for (U32 j = 0; j < num_verts; ++j) { U16* v = (U16*) &(pos[j*3*2]); - face.mVertices[j].mPosition.setVec( - (F32) v[0] / 65535.f * pos_range.mV[0] + min_pos.mV[0], - (F32) v[1] / 65535.f * pos_range.mV[1] + min_pos.mV[1], - (F32) v[2] / 65535.f * pos_range.mV[2] + min_pos.mV[2]); + pos_out[0] = (F32) v[0] / 65535.f * pos_range.mV[0] + min_pos.mV[0]; + pos_out[1] = (F32) v[1] / 65535.f * pos_range.mV[1] + min_pos.mV[1]; + pos_out[2] = (F32) v[2] / 65535.f * pos_range.mV[2] + min_pos.mV[2]; + if (j == 0) { - min = max = face.mVertices[j].mPosition; + min = max = LLVector3(pos_out); } else { - update_min_max(min,max,face.mVertices[j].mPosition); + update_min_max(min,max,pos_out); } + pos_out += 4; + U16* n = (U16*) &(norm[j*3*2]); - face.mVertices[j].mNormal.setVec( - (F32) n[0] / 65535.f * 2.f - 1.f, - (F32) n[1] / 65535.f * 2.f - 1.f, - (F32) n[2] / 65535.f * 2.f - 1.f); + + norm_out[0] = (F32) n[0] / 65535.f * 2.f - 1.f; + norm_out[1] = (F32) n[1] / 65535.f * 2.f - 1.f; + norm_out[2] = (F32) n[2] / 65535.f * 2.f - 1.f; + norm_out += 4; U16* t = (U16*) &(tc[j*2*2]); - face.mVertices[j].mTexCoord.setVec( - (F32) t[0] / 65535.f * tc_range.mV[0] + min_tc.mV[0], - (F32) t[1] / 65535.f * tc_range.mV[1] + min_tc.mV[1]); + tc_out[0] = (F32) t[0] / 65535.f * tc_range.mV[0] + min_tc.mV[0]; + tc_out[1] = (F32) t[1] / 65535.f * tc_range.mV[1] + min_tc.mV[1]; + + tc_out += 8; } @@ -2133,24 +2141,29 @@ bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) if (do_reflect_x) { - for (S32 i = 0; i < face.mVertices.size(); i++) + LLVector4a* p = (LLVector4a*) face.mPositions; + LLVector4a* n = (LLVector4a*) face.mNormals; + + for (S32 i = 0; i < face.mNumVertices; i++) { - face.mVertices[i].mPosition.mV[VX] *= -1.0f; - face.mVertices[i].mNormal.mV[VX] *= -1.0f; + p[i].mul(-1.0f); + n[i].mul(-1.0f); } } if (do_invert_normals) { - for (S32 i = 0; i < face.mVertices.size(); i++) + LLVector4a* n = (LLVector4a*) face.mNormals; + + for (S32 i = 0; i < face.mNumVertices; i++) { - face.mVertices[i].mNormal *= -1.0f; + n[i].mul(-1.0f); } } if (do_reverse_triangles) { - for (U32 j = 0; j < face.mIndices.size(); j += 3) + for (U32 j = 0; j < face.mNumIndices; j += 3) { // swap the 2nd and 3rd index S32 swap = face.mIndices[j+1]; @@ -2215,9 +2228,28 @@ void LLVolume::makeTetrahedron() tetrahedron_set_normal(cv); - face.mVertices.push_back(cv[0]); - face.mVertices.push_back(cv[1]); - face.mVertices.push_back(cv[2]); + face.resizeVertices(12); + face.resizeIndices(12); + + LLVector4a* v = (LLVector4a*) face.mPositions; + LLVector4a* n = (LLVector4a*) face.mNormals; + LLVector2* tc = (LLVector2*) face.mTexCoords; + + v[0].load3(cv[0].mPosition.mV); + v[1].load3(cv[1].mPosition.mV); + v[2].load3(cv[2].mPosition.mV); + v += 3; + + n[0].load3(cv[0].mNormal.mV); + n[1].load3(cv[1].mNormal.mV); + n[2].load3(cv[2].mNormal.mV); + n += 3; + + tc[0] = cv[0].mTexCoord; + tc[1] = cv[1].mTexCoord; + tc[2] = cv[2].mTexCoord; + tc += 3; + //side 2 cv[0].mPosition = p[3]; @@ -2226,9 +2258,20 @@ void LLVolume::makeTetrahedron() tetrahedron_set_normal(cv); - face.mVertices.push_back(cv[0]); - face.mVertices.push_back(cv[1]); - face.mVertices.push_back(cv[2]); + v[0].load3(cv[0].mPosition.mV); + v[1].load3(cv[1].mPosition.mV); + v[2].load3(cv[2].mPosition.mV); + v += 3; + + n[0].load3(cv[0].mNormal.mV); + n[1].load3(cv[1].mNormal.mV); + n[2].load3(cv[2].mNormal.mV); + n += 3; + + tc[0] = cv[0].mTexCoord; + tc[1] = cv[1].mTexCoord; + tc[2] = cv[2].mTexCoord; + tc += 3; //side 3 cv[0].mPosition = p[3]; @@ -2237,9 +2280,20 @@ void LLVolume::makeTetrahedron() tetrahedron_set_normal(cv); - face.mVertices.push_back(cv[0]); - face.mVertices.push_back(cv[1]); - face.mVertices.push_back(cv[2]); + v[0].load3(cv[0].mPosition.mV); + v[1].load3(cv[1].mPosition.mV); + v[2].load3(cv[2].mPosition.mV); + v += 3; + + n[0].load3(cv[0].mNormal.mV); + n[1].load3(cv[1].mNormal.mV); + n[2].load3(cv[2].mNormal.mV); + n += 3; + + tc[0] = cv[0].mTexCoord; + tc[1] = cv[1].mTexCoord; + tc[2] = cv[2].mTexCoord; + tc += 3; //side 4 cv[0].mPosition = p[2]; @@ -2248,14 +2302,25 @@ void LLVolume::makeTetrahedron() tetrahedron_set_normal(cv); - face.mVertices.push_back(cv[0]); - face.mVertices.push_back(cv[1]); - face.mVertices.push_back(cv[2]); + v[0].load3(cv[0].mPosition.mV); + v[1].load3(cv[1].mPosition.mV); + v[2].load3(cv[2].mPosition.mV); + v += 3; + + n[0].load3(cv[0].mNormal.mV); + n[1].load3(cv[1].mNormal.mV); + n[2].load3(cv[2].mNormal.mV); + n += 3; + + tc[0] = cv[0].mTexCoord; + tc[1] = cv[1].mTexCoord; + tc[2] = cv[2].mTexCoord; + tc += 3; //set index buffer - for (U32 i = 0; i < 12; i++) + for (U16 i = 0; i < 12; i++) { - face.mIndices.push_back(i); + face.mIndices[i] = i; } mVolumeFaces.push_back(face); @@ -3831,7 +3896,7 @@ S32 LLVolume::getNumTriangles() const for (S32 i = 0; i < getNumVolumeFaces(); ++i) { - triangle_count += getVolumeFace(i).mIndices.size()/3; + triangle_count += getVolumeFace(i).mNumIndices/3; } return triangle_count; @@ -3844,13 +3909,22 @@ S32 LLVolume::getNumTriangles() const void LLVolume::generateSilhouetteVertices(std::vector &vertices, std::vector &normals, std::vector &segments, - const LLVector3& obj_cam_vec, - const LLMatrix4& mat, - const LLMatrix3& norm_mat, + const LLVector3& obj_cam_vec_in, + const LLMatrix4& mat_in, + const LLMatrix3& norm_mat_in, S32 face_mask) { LLMemType m1(LLMemType::MTYPE_VOLUME); + LLMatrix4a mat; + mat.loadu(mat_in); + + LLMatrix4a norm_mat; + norm_mat.loadu(norm_mat_in); + + LLVector4a obj_cam_vec; + obj_cam_vec.load3(obj_cam_vec_in.mV); + vertices.clear(); normals.clear(); segments.clear(); @@ -3868,7 +3942,7 @@ void LLVolume::generateSilhouetteVertices(std::vector &vertices, LLVolumeFace& face = *iter; if (!(face_mask & (0x1 << cur_index++)) || - face.mIndices.empty() || face.mEdge.empty()) + face.mNumIndices == 0 || face.mEdge.empty()) { continue; } @@ -3885,7 +3959,7 @@ void LLVolume::generateSilhouetteVertices(std::vector &vertices, #if DEBUG_SILHOUETTE_EDGE_MAP //for each triangle - U32 count = face.mIndices.size(); + U32 count = face.mNumIndices; for (U32 j = 0; j < count/3; j++) { //get vertices S32 v1 = face.mIndices[j*3+0]; @@ -3938,7 +4012,7 @@ void LLVolume::generateSilhouetteVertices(std::vector &vertices, #elif DEBUG_SILHOUETTE_NORMALS //for each vertex - for (U32 j = 0; j < face.mVertices.size(); j++) { + for (U32 j = 0; j < face.mNumVertices; j++) { vertices.push_back(face.mVertices[j].mPosition); vertices.push_back(face.mVertices[j].mPosition + face.mVertices[j].mNormal*0.1f); normals.push_back(LLVector3(0,0,1)); @@ -3964,26 +4038,36 @@ void LLVolume::generateSilhouetteVertices(std::vector &vertices, //for each triangle std::vector fFacing; - vector_append(fFacing, face.mIndices.size()/3); - for (U32 j = 0; j < face.mIndices.size()/3; j++) + vector_append(fFacing, face.mNumIndices/3); + + LLVector4a* v = (LLVector4a*) face.mPositions; + LLVector4a* n = (LLVector4a*) face.mNormals; + + for (U32 j = 0; j < face.mNumIndices/3; j++) { //approximate normal S32 v1 = face.mIndices[j*3+0]; S32 v2 = face.mIndices[j*3+1]; S32 v3 = face.mIndices[j*3+2]; - LLVector3 norm = (face.mVertices[v1].mPosition - face.mVertices[v2].mPosition) % - (face.mVertices[v2].mPosition - face.mVertices[v3].mPosition); - - if (norm.magVecSquared() < 0.00000001f) + LLVector4a c1,c2; + c1.setSub(v[v1], v[v2]); + c2.setSub(v[v2], v[v3]); + + LLVector4a norm; + + norm.setCross3(c1, c2); + + if (norm.dot3(norm) < 0.00000001f) { fFacing[j] = AWAY | TOWARDS; } else { //get view vector - LLVector3 view = (obj_cam_vec-face.mVertices[v1].mPosition); - bool away = view * norm > 0.0f; + LLVector4a view; + view.setSub(obj_cam_vec, v[v1]); + bool away = view.dot3(norm) > 0.0f; if (away) { fFacing[j] = AWAY; @@ -3996,7 +4080,7 @@ void LLVolume::generateSilhouetteVertices(std::vector &vertices, } //for each triangle - for (U32 j = 0; j < face.mIndices.size()/3; j++) + for (U32 j = 0; j < face.mNumIndices/3; j++) { if (fFacing[j] == (AWAY | TOWARDS)) { //this is a degenerate triangle @@ -4029,9 +4113,14 @@ void LLVolume::generateSilhouetteVertices(std::vector &vertices, S32 v1 = face.mIndices[j*3+k]; S32 v2 = face.mIndices[j*3+((k+1)%3)]; - vertices.push_back(face.mVertices[v1].mPosition*mat); - LLVector3 norm1 = face.mVertices[v1].mNormal * norm_mat; - norm1.normVec(); + LLVector4a t; + mat.affineTransform(v[v1], t); + vertices.push_back(LLVector3(t[0], t[1], t[2])); + + norm_mat.rotate(n[v1], t); + + t.normalize3Fast(); + LLVector3 norm1 = LLVector3(t[0], t[1], t[2]); normals.push_back(norm1); vertices.push_back(face.mVertices[v2].mPosition*mat); @@ -4088,7 +4177,7 @@ S32 LLVolume::lineSegmentIntersect(const LLVector3& start, const LLVector3& end, genBinormals(i); } - for (U32 tri = 0; tri < face.mIndices.size()/3; tri++) + for (U32 tri = 0; tri < face.mNumIndices/3; tri++) { S32 index1 = face.mIndices[tri*3+0]; S32 index2 = face.mIndices[tri*3+1]; @@ -4928,7 +5017,7 @@ void LLVolumeFace::optimize(F32 angle_cutoff) VertexMapData::PointMap point_map; //remove redundant vertices - for (U32 i = 0; i < mIndices.size(); ++i) + for (U32 i = 0; i < mNumIndices; ++i) { U16 index = mIndices[i]; @@ -4953,7 +5042,7 @@ void LLVolumeFace::optimize(F32 angle_cutoff) if (!found) { new_face.mVertices.push_back(cv); - U16 index = (U16) new_face.mVertices.size()-1; + U16 index = (U16) new_face.mNumVertices-1; new_face.mIndices.push_back(index); VertexMapData d; @@ -5053,7 +5142,7 @@ BOOL LLVolumeFace::createUnCutCubeCap(LLVolume* volume, BOOL partial_build) mVertices.clear(); } - S32 vtop = mVertices.size(); + S32 vtop = mNumVertices; for(int gx = 0;gx 65536) + if (face.mNumVertices + mNumVertices > 65536) { llerrs << "Cannot append face -- 16-bit overflow will occur." << llendl; } - for (U32 i = 0; i < face.mVertices.size(); ++i) + for (U32 i = 0; i < face.mNumVertices; ++i) { VertexData v = face.mVertices[i]; v.mPosition = v.mPosition*mat; @@ -5676,7 +5796,7 @@ void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat, LLMatrix } - for (U32 i = 0; i < face.mIndices.size(); ++i) + for (U32 i = 0; i < face.mNumIndices; ++i) { mIndices.push_back(face.mIndices[i]+offset); } @@ -5823,7 +5943,7 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) mCenter.clearVec(); face_min = face_max = mVertices[0].mPosition; - for (U32 i = 1; i < mVertices.size(); ++i) + for (U32 i = 1; i < mNumVertices; ++i) { update_min_max(face_min, face_max, mVertices[i].mPosition); } @@ -5922,7 +6042,7 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) } //generate normals - for (U32 i = 0; i < mIndices.size()/3; i++) //for each triangle + for (U32 i = 0; i < mNumIndices/3; i++) //for each triangle { const U16* idx = &(mIndices[i*3]); diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index c6a156ae37..17be642d4a 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -794,20 +794,28 @@ public: LLVolumeFace() : mID(0), mTypeMask(0), - mHasBinormals(FALSE), mBeginS(0), mBeginT(0), mNumS(0), - mNumT(0) + mNumT(0), + mNumVertices(0), + mNumIndices(0), + mPositions(NULL), + mNormals(NULL), + mBinormals(NULL), + mTexCoords(NULL), + mIndices(NULL) { } BOOL create(LLVolume* volume, BOOL partial_build = FALSE); void createBinormals(); - void makeTriStrip(); void appendFace(const LLVolumeFace& face, LLMatrix4& transform, LLMatrix4& normal_tranform); + void resizeVertices(S32 num_verts); + void resizeIndices(S32 num_indices); + class VertexData { public: @@ -873,7 +881,6 @@ public: S32 mID; U32 mTypeMask; LLVector3 mCenter; - BOOL mHasBinormals; // Only used for INNER/OUTER faces S32 mBeginS; @@ -883,9 +890,15 @@ public: LLVector3 mExtents[2]; //minimum and maximum point of face - std::vector mVertices; - std::vector mIndices; - std::vector mTriStrip; + S32 mNumVertices; + S32 mNumIndices; + + F32* mPositions; + F32* mNormals; + F32* mBinormals; + F32* mTexCoords; + U16* mIndices; + std::vector mEdge; //list of skin weights for rigged volumes diff --git a/indra/llmath/v3math.h b/indra/llmath/v3math.h index 76dd938887..0e7d72e958 100644 --- a/indra/llmath/v3math.h +++ b/indra/llmath/v3math.h @@ -532,6 +532,21 @@ inline void update_min_max(LLVector3& min, LLVector3& max, const LLVector3& pos) } } +inline void update_min_max(LLVector3& min, LLVector3& max, const F32* pos) +{ + for (U32 i = 0; i < 3; i++) + { + if (min.mV[i] > pos[i]) + { + min.mV[i] = pos[i]; + } + if (max.mV[i] < pos[i]) + { + max.mV[i] = pos[i]; + } + } +} + inline F32 angle_between(const LLVector3& a, const LLVector3& b) { LLVector3 an = a; -- cgit v1.2.3 From 4d57cb3c0975ff0bcea0d6fb3498f2d90962ff16 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Sat, 22 May 2010 04:35:02 -0500 Subject: Vectorize/memory align buffers in llvolumeface WIP --- indra/llmath/llvolume.cpp | 720 +++++++++++++++++++++++----------------------- indra/llmath/llvolume.h | 35 ++- 2 files changed, 386 insertions(+), 369 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 4e342b0b48..01fe2be371 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -1,4 +1,5 @@ /** + * @file llvolume.cpp * * $LicenseInfo:firstyear=2002&license=viewergpl$ @@ -89,8 +90,6 @@ const F32 SKEW_MAX = 0.95f; const F32 SCULPT_MIN_AREA = 0.002f; const S32 SCULPT_MIN_AREA_DETAIL = 1; -#define GEN_TRI_STRIP 0 - BOOL check_same_clock_dir( const LLVector3& pt1, const LLVector3& pt2, const LLVector3& pt3, const LLVector3& norm) { LLVector3 test = (pt2-pt1)%(pt3-pt2); @@ -134,21 +133,25 @@ BOOL LLLineSegmentBoxIntersect(const LLVector3& start, const LLVector3& end, con // and returns the intersection point along dir in intersection_t. // Moller-Trumbore algorithm -BOOL LLTriangleRayIntersect(const LLVector3& vert0, const LLVector3& vert1, const LLVector3& vert2, const LLVector3& orig, const LLVector3& dir, +BOOL LLTriangleRayIntersect(const LLVector4a& vert0, const LLVector4a& vert1, const LLVector4a& vert2, const LLVector4a& orig, const LLVector4a& dir, F32* intersection_a, F32* intersection_b, F32* intersection_t, BOOL two_sided) { F32 u, v, t; /* find vectors for two edges sharing vert0 */ - LLVector3 edge1 = vert1 - vert0; + LLVector4a edge1; + edge1.setSub(vert1, vert0); - LLVector3 edge2 = vert2 - vert0;; + + LLVector4a edge2; + edge2.setSub(vert2, vert0); /* begin calculating determinant - also used to calculate U parameter */ - LLVector3 pvec = dir % edge2; - + LLVector4a pvec; + pvec.setCross3(dir, edge2); + /* if determinant is near zero, ray lies in plane of triangle */ - F32 det = edge1 * pvec; + F32 det = edge1.dot3(pvec); if (!two_sided) { @@ -158,10 +161,11 @@ BOOL LLTriangleRayIntersect(const LLVector3& vert0, const LLVector3& vert1, cons } /* calculate distance from vert0 to ray origin */ - LLVector3 tvec = orig - vert0; + LLVector4a tvec; + tvec.setSub(orig, vert0); /* calculate U parameter and test bounds */ - u = tvec * pvec; + u = tvec.dot3(pvec); if (u < 0.f || u > det) { @@ -169,17 +173,18 @@ BOOL LLTriangleRayIntersect(const LLVector3& vert0, const LLVector3& vert1, cons } /* prepare to test V parameter */ - LLVector3 qvec = tvec % edge1; + LLVector4a qvec; + qvec.setCross3(tvec, edge1); /* calculate V parameter and test bounds */ - v = dir * qvec; + v = dir.dot3(qvec); if (v < 0.f || u + v > det) { return FALSE; } /* calculate t, scale parameters, ray intersects triangle */ - t = edge2 * qvec; + t = edge2.dot3(qvec); F32 inv_det = 1.0 / det; t *= inv_det; u *= inv_det; @@ -195,20 +200,22 @@ BOOL LLTriangleRayIntersect(const LLVector3& vert0, const LLVector3& vert1, cons F32 inv_det = 1.0 / det; /* calculate distance from vert0 to ray origin */ - LLVector3 tvec = orig - vert0; + LLVector4a tvec; + tvec.setSub(orig, vert0); /* calculate U parameter and test bounds */ - u = (tvec * pvec) * inv_det; + u = (tvec.dot3(pvec)) * inv_det; if (u < 0.f || u > 1.f) { return FALSE; } /* prepare to test V parameter */ - LLVector3 qvec = tvec - edge1; + LLVector4a qvec; + qvec.setSub(tvec, edge1); /* calculate V parameter and test bounds */ - v = (dir * qvec) * inv_det; + v = (dir.dot3(qvec)) * inv_det; if (v < 0.f || u + v > 1.f) { @@ -216,7 +223,7 @@ BOOL LLTriangleRayIntersect(const LLVector3& vert0, const LLVector3& vert1, cons } /* calculate t, ray intersects triangle */ - t = (edge2 * qvec) * inv_det; + t = (edge2.dot3(qvec)) * inv_det; } if (intersection_a != NULL) @@ -4120,13 +4127,14 @@ void LLVolume::generateSilhouetteVertices(std::vector &vertices, norm_mat.rotate(n[v1], t); t.normalize3Fast(); - LLVector3 norm1 = LLVector3(t[0], t[1], t[2]); - normals.push_back(norm1); + normals.push_back(LLVector3(t[0], t[1], t[2])); - vertices.push_back(face.mVertices[v2].mPosition*mat); - LLVector3 norm2 = face.mVertices[v2].mNormal * norm_mat; - norm2.normVec(); - normals.push_back(norm2); + mat.affineTransform(v[v2], t); + vertices.push_back(LLVector3(t[0], t[1], t[2])); + + norm_mat.rotate(n[v2], t); + t.normalize3Fast(); + normals.push_back(LLVector3(t[0], t[1], t[2])); segments.push_back(vertices.size()); } @@ -4177,6 +4185,10 @@ S32 LLVolume::lineSegmentIntersect(const LLVector3& start, const LLVector3& end, genBinormals(i); } + LLVector4a starta, dira; + + LLVector4a* p = (LLVector4a*) face.mPositions; + for (U32 tri = 0; tri < face.mNumIndices/3; tri++) { S32 index1 = face.mIndices[tri*3+0]; @@ -4185,15 +4197,15 @@ S32 LLVolume::lineSegmentIntersect(const LLVector3& start, const LLVector3& end, F32 a, b, t; - if (LLTriangleRayIntersect(face.mVertices[index1].mPosition, - face.mVertices[index2].mPosition, - face.mVertices[index3].mPosition, - start, dir, &a, &b, &t, FALSE)) + if (LLTriangleRayIntersect(p[index1], + p[index2], + p[index3], + starta, dira, &a, &b, &t, FALSE)) { if ((t >= 0.f) && // if hit is after start (t <= 1.f) && // and before end (t < closest_t)) // and this hit is closer - { + { closest_t = t; hit_face = i; @@ -4201,27 +4213,35 @@ S32 LLVolume::lineSegmentIntersect(const LLVector3& start, const LLVector3& end, { *intersection = start + dir * closest_t; } - + + if (tex_coord != NULL) - { - *tex_coord = ((1.f - a - b) * face.mVertices[index1].mTexCoord + - a * face.mVertices[index2].mTexCoord + - b * face.mVertices[index3].mTexCoord); + { + LLVector2* tc = (LLVector2*) face.mTexCoords; + *tex_coord = ((1.f - a - b) * tc[index1] + + a * tc[index2] + + b * tc[index3]); } if (normal != NULL) - { - *normal = ((1.f - a - b) * face.mVertices[index1].mNormal + - a * face.mVertices[index2].mNormal + - b * face.mVertices[index3].mNormal); + { + LLVector4* norm = (LLVector4*) face.mNormals; + + *normal = ((1.f - a - b) * LLVector3(norm[index1]) + + a * LLVector3(norm[index2]) + + b * LLVector3(norm[index3])); } if (bi_normal != NULL) - { - *bi_normal = ((1.f - a - b) * face.mVertices[index1].mBinormal + - a * face.mVertices[index2].mBinormal + - b * face.mVertices[index3].mBinormal); + { + LLVector4* binormal = (LLVector4*) face.mBinormals; + if (binormal) + { + *bi_normal = ((1.f - a - b) * LLVector3(binormal[index1]) + + a * LLVector3(binormal[index2]) + + b * LLVector3(binormal[index3])); + } } } @@ -4992,6 +5012,14 @@ std::ostream& operator<<(std::ostream &s, const LLVolume *volumep) return s; } +LLVolumeFace::~LLVolumeFace() +{ + _mm_free(mPositions); + _mm_free(mNormals); + _mm_free(mTexCoords); + _mm_free(mIndices); + _mm_free(mBinormals); +} BOOL LLVolumeFace::create(LLVolume* volume, BOOL partial_build) { @@ -5012,6 +5040,7 @@ BOOL LLVolumeFace::create(LLVolume* volume, BOOL partial_build) void LLVolumeFace::optimize(F32 angle_cutoff) { +#if 0 //disabling until a vectorized version is available LLVolumeFace new_face; VertexMapData::PointMap point_map; @@ -5063,6 +5092,7 @@ void LLVolumeFace::optimize(F32 angle_cutoff) mVertices = new_face.mVertices; mIndices = new_face.mIndices; +#endif } void LerpPlanarVertex(LLVolumeFace::VertexData& v0, @@ -5127,20 +5157,22 @@ BOOL LLVolumeFace::createUnCutCubeCap(LLVolume* volume, BOOL partial_build) corners[1].mTexCoord=corners[2].mTexCoord; corners[2].mTexCoord=swap; } - baseVert.mBinormal = calc_binormal_from_triangle( + + LLVector4a binormal; + + calc_binormal_from_triangle( binormal, corners[0].mPosition, corners[0].mTexCoord, corners[1].mPosition, corners[1].mTexCoord, corners[2].mPosition, corners[2].mTexCoord); - for(int t = 0; t < 4; t++){ - corners[t].mBinormal = baseVert.mBinormal; - corners[t].mNormal = baseVert.mNormal; - } - mHasBinormals = TRUE; - if (partial_build) - { - mVertices.clear(); - } + S32 size = (grid_size+1)*(grid_size+1); + resizeVertices(size); + allocateBinormals(size); + + LLVector4a* pos = (LLVector4a*) mPositions; + LLVector4a* norm = (LLVector4a*) mNormals; + LLVector4a* binorm = (LLVector4a*) mBinormals; + LLVector2* tc = (LLVector2*) mTexCoords; S32 vtop = mNumVertices; for(int gx = 0;gx=0;i--) { - mIndices.push_back(vtop+(gy*(grid_size+1))+gx+idxs[i]); - } - -#if GEN_TRI_STRIP - if (gy == 0) - { - mTriStrip.push_back((gx+1)*(grid_size+1)); - mTriStrip.push_back((gx+1)*(grid_size+1)); - mTriStrip.push_back(gx*(grid_size+1)); - } - - mTriStrip.push_back(gy+1+(gx+1)*(grid_size+1)); - mTriStrip.push_back(gy+1+gx*(grid_size+1)); - - - if (gy == grid_size-1) - { - mTriStrip.push_back(gy+1+gx*(grid_size+1)); - } -#endif + *out++ = (vtop+(gy*(grid_size+1))+gx+idxs[i]); + } } else { for(S32 i=0;i<6;i++) { - mIndices.push_back(vtop+(gy*(grid_size+1))+gx+idxs[i]); - } - -#if GEN_TRI_STRIP - if (gy == 0) - { - mTriStrip.push_back(gx*(grid_size+1)); - mTriStrip.push_back(gx*(grid_size+1)); - mTriStrip.push_back((gx+1)*(grid_size+1)); + *out++ = (vtop+(gy*(grid_size+1))+gx+idxs[i]); } - - mTriStrip.push_back(gy+1+gx*(grid_size+1)); - mTriStrip.push_back(gy+1+(gx+1)*(grid_size+1)); - - if (gy == grid_size-1) - { - mTriStrip.push_back(gy+1+(gx+1)*(grid_size+1)); - } -#endif } } } - -#if GEN_TRI_STRIP - if (mTriStrip.size()%2 == 1) - { - mTriStrip.push_back(mTriStrip[mTriStrip.size()-1]); - } -#endif } return TRUE; @@ -5267,11 +5262,25 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) num_vertices = profile.size(); num_indices = (profile.size() - 2)*3; - mVertices.resize(num_vertices); + if (!(mTypeMask & HOLLOW_MASK) && !(mTypeMask & OPEN_MASK)) + { + resizeVertices(num_vertices+1); + allocateBinormals(num_vertices+1); - if (!partial_build) + if (!partial_build) + { + resizeIndices(num_indices+3); + } + } + else { - mIndices.resize(num_indices); + resizeVertices(num_vertices); + allocateBinormals(num_vertices); + + if (!partial_build) + { + resizeIndices(num_indices); + } } S32 max_s = volume->getProfile().getTotal(); @@ -5298,79 +5307,87 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) LLVector3& min = mExtents[0]; LLVector3& max = mExtents[1]; + LLVector2* tc = (LLVector2*) mTexCoords; + LLVector4a* pos = (LLVector4a*) mPositions; + LLVector4a* norm = (LLVector4a*) mNormals; + LLVector4a* binorm = (LLVector4a*) mBinormals; + // Copy the vertices into the array for (S32 i = 0; i < num_vertices; i++) { if (mTypeMask & TOP_MASK) { - mVertices[i].mTexCoord.mV[0] = profile[i].mV[0]+0.5f; - mVertices[i].mTexCoord.mV[1] = profile[i].mV[1]+0.5f; + tc[i].mV[0] = profile[i].mV[0]+0.5f; + tc[i].mV[1] = profile[i].mV[1]+0.5f; } else { // Mirror for underside. - mVertices[i].mTexCoord.mV[0] = profile[i].mV[0]+0.5f; - mVertices[i].mTexCoord.mV[1] = 0.5f - profile[i].mV[1]; + tc[i].mV[0] = profile[i].mV[0]+0.5f; + tc[i].mV[1] = 0.5f - profile[i].mV[1]; } - mVertices[i].mPosition = mesh[i + offset].mPos; + pos[i].load3(mesh[i + offset].mPos.mV); if (i == 0) { - min = max = mVertices[i].mPosition; - min_uv = max_uv = mVertices[i].mTexCoord; + min = max = mesh[i+offset].mPos; + min_uv = max_uv = tc[i]; } else { - update_min_max(min,max, mVertices[i].mPosition); - update_min_max(min_uv, max_uv, mVertices[i].mTexCoord); + update_min_max(min,max, mesh[i+offset].mPos); + update_min_max(min_uv, max_uv, tc[i]); } } mCenter = (min+max)*0.5f; cuv = (min_uv + max_uv)*0.5f; - LLVector3 binormal = calc_binormal_from_triangle( + LLVector4a binormal; + calc_binormal_from_triangle(binormal, mCenter, cuv, - mVertices[0].mPosition, mVertices[0].mTexCoord, - mVertices[1].mPosition, mVertices[1].mTexCoord); - binormal.normVec(); + mesh[0+offset].mPos, tc[0], + mesh[1+offset].mPos, tc[1]); + binormal.normalize3Fast(); + + LLVector4a normal; + LLVector4a d0, d1; + LLVector4a center; + + center.load3(mCenter.mV); - LLVector3 d0; - LLVector3 d1; - LLVector3 normal; + d0.setSub(center, pos[0]); + d1.setSub(center, pos[1]); - d0 = mCenter-mVertices[0].mPosition; - d1 = mCenter-mVertices[1].mPosition; + if (mTypeMask & TOP_MASK) + { + normal.setCross3(d0, d1); + } + else + { + normal.setCross3(d1, d0); + } - normal = (mTypeMask & TOP_MASK) ? (d0%d1) : (d1%d0); - normal.normVec(); + normal.normalize3Fast(); VertexData vd; vd.mPosition = mCenter; - vd.mNormal = normal; - vd.mBinormal = binormal; vd.mTexCoord = cuv; if (!(mTypeMask & HOLLOW_MASK) && !(mTypeMask & OPEN_MASK)) { - mVertices.push_back(vd); + pos[num_vertices].load4a((F32*) ¢er.mQ); + tc[num_vertices] = cuv; num_vertices++; - if (!partial_build) - { - vector_append(mIndices, 3); - } } - for (S32 i = 0; i < num_vertices; i++) { - mVertices[i].mBinormal = binormal; - mVertices[i].mNormal = normal; + binorm[i].load4a((F32*) &binormal.mQ); + norm[i].load4a((F32*) &normal.mQ); } - mHasBinormals = TRUE; - if (partial_build) { return TRUE; @@ -5478,8 +5495,6 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) pt2--; } } - - makeTriStrip(); } else { @@ -5584,8 +5599,6 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) pt2--; } } - - makeTriStrip(); } } else @@ -5607,131 +5620,63 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) mIndices[3*i+v2] = i + 1; } -#if GEN_TRI_STRIP - //make tri strip - if (mTypeMask & OPEN_MASK) - { - makeTriStrip(); - } - else - { - S32 j = num_vertices-2; - if (mTypeMask & TOP_MASK) - { - mTriStrip.push_back(0); - for (S32 i = 0; i <= j; ++i) - { - mTriStrip.push_back(i); - if (i != j) - { - mTriStrip.push_back(j); - } - --j; - } - } - else - { - mTriStrip.push_back(j); - for (S32 i = 0; i <= j; ++i) - { - if (i != j) - { - mTriStrip.push_back(j); - } - mTriStrip.push_back(i); - --j; - } - } - - mTriStrip.push_back(mTriStrip[mTriStrip.size()-1]); - if (mTriStrip.size()%2 == 1) - { - mTriStrip.push_back(mTriStrip[mTriStrip.size()-1]); - } - } -#endif } return TRUE; } -void LLVolumeFace::makeTriStrip() -{ -#if GEN_TRI_STRIP - for (U32 i = 0; i < mNumIndices; i+=3) - { - U16 i0 = mIndices[i]; - U16 i1 = mIndices[i+1]; - U16 i2 = mIndices[i+2]; - - if ((i/3)%2 == 1) - { - mTriStrip.push_back(i0); - mTriStrip.push_back(i0); - mTriStrip.push_back(i1); - mTriStrip.push_back(i2); - mTriStrip.push_back(i2); - } - else - { - mTriStrip.push_back(i2); - mTriStrip.push_back(i2); - mTriStrip.push_back(i1); - mTriStrip.push_back(i0); - mTriStrip.push_back(i0); - } - } - - if (mTriStrip.size()%2 == 1) - { - mTriStrip.push_back(mTriStrip[mTriStrip.size()-1]); - } -#endif -} - void LLVolumeFace::createBinormals() { LLMemType m1(LLMemType::MTYPE_VOLUME); - if (!mHasBinormals) + if (!mBinormals) { + allocateBinormals(mNumVertices); + //generate binormals + LLStrider pos; + pos = (LLVector3*) mPositions; + pos.setStride(16); + + LLVector2* tc = (LLVector2*) mTexCoords; + LLVector4a* binorm = (LLVector4a*) mBinormals; + for (U32 i = 0; i < mNumIndices/3; i++) { //for each triangle - const VertexData& v0 = mVertices[mIndices[i*3+0]]; - const VertexData& v1 = mVertices[mIndices[i*3+1]]; - const VertexData& v2 = mVertices[mIndices[i*3+2]]; + const U16& i0 = mIndices[i*3+0]; + const U16& i1 = mIndices[i*3+1]; + const U16& i2 = mIndices[i*3+2]; //calculate binormal - LLVector3 binorm = calc_binormal_from_triangle(v0.mPosition, v0.mTexCoord, - v1.mPosition, v1.mTexCoord, - v2.mPosition, v2.mTexCoord); + LLVector4a binormal; + calc_binormal_from_triangle(binormal, + pos[i0], tc[i0], + pos[i1], tc[i1], + pos[i2], tc[i2]); - for (U32 j = 0; j < 3; j++) - { //add triangle normal to vertices - mVertices[mIndices[i*3+j]].mBinormal += binorm; // * (weight_sum - d[j])/weight_sum; - } + + //add triangle normal to vertices + binorm[i0].add(binormal); + binorm[i1].add(binormal); + binorm[i2].add(binormal); //even out quad contributions if (i % 2 == 0) { - mVertices[mIndices[i*3+2]].mBinormal += binorm; + binorm[i2].add(binormal); } else { - mVertices[mIndices[i*3+1]].mBinormal += binorm; + binorm[i1].add(binormal); } } //normalize binormals for (U32 i = 0; i < mNumVertices; i++) { - mVertices[i].mBinormal.normVec(); - mVertices[i].mNormal.normVec(); + binorm[i].normalize3Fast(); } - - mHasBinormals = TRUE; } } @@ -5754,6 +5699,13 @@ void LLVolumeFace::resizeVertices(S32 num_verts) mNumVertices = num_verts; } +void LLVolumeFace::allocateBinormals(S32 num_verts) +{ + _mm_free(mBinormals); + mBinormals = (F32*) _mm_malloc(num_verts*16, 16); +} + + void LLVolumeFace::resizeIndices(S32 num_indices) { _mm_free(mIndices); @@ -5761,44 +5713,107 @@ void LLVolumeFace::resizeIndices(S32 num_indices) //pad index block end to allow for QWORD reads S32 size = ((num_indices*2) + 0xF) & ~0xF; - mIndices = (U16*) _mm_malloc(size); + mIndices = (U16*) _mm_malloc(size,16); mNumIndices = num_indices; } -void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat, LLMatrix4& norm_mat) +void LLVolumeFace::fillFromLegacyData(std::vector& v, std::vector& idx) +{ + resizeVertices(v.size()); + resizeIndices(idx.size()); + + for (U32 i = 0; i < v.size(); ++i) + { + for (U32 j = 0; j < 3; ++j) + { + mPositions[i*4+j] = v[i].mPosition[j]; + mNormals[i*4+j] = v[i].mNormal[j]; + } + + mTexCoords[i*2+0] = v[i].mTexCoord.mV[0]; + mTexCoords[i*2+1] = v[i].mTexCoord.mV[1]; + } + + for (U32 i = 0; i < idx.size(); ++i) + { + mIndices[i] = idx[i]; + } +} + +void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat_in, LLMatrix4& norm_mat_in) { U16 offset = mNumVertices; - if (face.mNumVertices + mNumVertices > 65536) + S32 new_count = face.mNumVertices + mNumVertices; + + if (new_count > 65536) { llerrs << "Cannot append face -- 16-bit overflow will occur." << llendl; } + + F32* new_pos = (F32*) _mm_malloc(new_count*16, 16); + F32* new_norm = (F32*) _mm_malloc(new_count*16, 16); + F32* new_tc = (F32*) _mm_malloc((new_count*8+0xF) & ~0xF, 16); + + LLVector4a::memcpyNonAliased16(new_pos, mPositions, new_count*4); + LLVector4a::memcpyNonAliased16(new_norm, mNormals, new_count*4); + LLVector4a::memcpyNonAliased16(new_tc, mTexCoords, new_count*2); + + _mm_free(mPositions); + _mm_free(mNormals); + _mm_free(mTexCoords); + + mPositions = new_pos; + mNormals = new_norm; + mTexCoords = new_tc; + + mNumVertices = new_count; + + LLVector4a* dst_pos = (LLVector4a*) mPositions+offset; + LLVector2* dst_tc = (LLVector2*) mTexCoords+offset; + LLVector4a* dst_norm = (LLVector4a*) mNormals+offset; + + LLVector4a* src_pos = (LLVector4a*) face.mPositions; + LLVector2* src_tc = (LLVector2*) face.mTexCoords; + LLVector4a* src_norm = (LLVector4a*) face.mNormals; + + LLMatrix4a mat, norm_mat; + mat.loadu(mat_in); + norm_mat.loadu(norm_mat_in); + for (U32 i = 0; i < face.mNumVertices; ++i) { - VertexData v = face.mVertices[i]; - v.mPosition = v.mPosition*mat; - v.mNormal = v.mNormal * norm_mat; + mat.affineTransform(src_pos[i], dst_pos[i]); + norm_mat.rotate(src_norm[i], dst_norm[i]); + dst_norm[i].normalize3Fast(); - v.mNormal.normalize(); - - mVertices.push_back(v); + dst_tc[i] = src_tc[i]; if (offset == 0 && i == 0) { - mExtents[0] = mExtents[1] = v.mPosition; + mExtents[0] = mExtents[1] = LLVector3((F32*) &(dst_pos[i].mQ)); } else { - update_min_max(mExtents[0], mExtents[1], v.mPosition); + update_min_max(mExtents[0], mExtents[1], (F32*) &(dst_pos[i].mQ)); } } - + + new_count = mNumIndices + face.mNumIndices; + U16* new_indices = (U16*) _mm_malloc((new_count*2+0xF) & ~0xF, 16); + LLVector4a::memcpyNonAliased16((F32*) new_indices, (F32*) mIndices, new_count/2); + _mm_free(mIndices); + mIndices = new_indices; + mNumIndices = new_count; + + U16* dst_idx = mIndices+offset; + for (U32 i = 0; i < face.mNumIndices; ++i) { - mIndices.push_back(face.mIndices[i]+offset); + dst_idx[i] = face.mIndices[i]+offset; } } @@ -5828,21 +5843,20 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) num_vertices = mNumS*mNumT; num_indices = (mNumS-1)*(mNumT-1)*6; - mVertices.resize(num_vertices); - if (!partial_build) { - mIndices.resize(num_indices); + resizeVertices(num_vertices); + resizeIndices(num_indices); if ((volume->getParams().getSculptType() & LL_SCULPT_TYPE_MASK) != LL_SCULPT_TYPE_MESH) { mEdge.resize(num_indices); } } - else - { - mHasBinormals = FALSE; - } + + LLVector4a* pos = (LLVector4a*) mPositions; + LLVector4a* norm = (LLVector4a*) mNormals; + LLVector2* tc = (LLVector2*) mTexCoords; S32 begin_stex = llfloor( profile[mBeginS].mV[2] ); S32 num_s = ((mTypeMask & INNER_MASK) && (mTypeMask & FLAT_MASK) && mNumS > 2) ? mNumS/2 : mNumS; @@ -5894,21 +5908,21 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) i = mBeginS + s + max_s*t; } - mVertices[cur_vertex].mPosition = mesh[i].mPos; - mVertices[cur_vertex].mTexCoord = LLVector2(ss,tt); + pos[cur_vertex].load3(mesh[i].mPos.mV); + tc[cur_vertex] = LLVector2(ss,tt); - mVertices[cur_vertex].mNormal = LLVector3(0,0,0); - mVertices[cur_vertex].mBinormal = LLVector3(0,0,0); + norm[cur_vertex].clear(); cur_vertex++; if ((mTypeMask & INNER_MASK) && (mTypeMask & FLAT_MASK) && mNumS > 2 && s > 0) { - mVertices[cur_vertex].mPosition = mesh[i].mPos; - mVertices[cur_vertex].mTexCoord = LLVector2(ss,tt); + + pos[cur_vertex].load3(mesh[i].mPos.mV); + tc[cur_vertex] = LLVector2(ss,tt); - mVertices[cur_vertex].mNormal = LLVector3(0,0,0); - mVertices[cur_vertex].mBinormal = LLVector3(0,0,0); + norm[cur_vertex].clear(); + cur_vertex++; } } @@ -5926,12 +5940,10 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) i = mBeginS + s + max_s*t; ss = profile[mBeginS + s].mV[2] - begin_stex; - mVertices[cur_vertex].mPosition = mesh[i].mPos; - mVertices[cur_vertex].mTexCoord = LLVector2(ss,tt); - - mVertices[cur_vertex].mNormal = LLVector3(0,0,0); - mVertices[cur_vertex].mBinormal = LLVector3(0,0,0); - + pos[cur_vertex].load3(mesh[i].mPos.mV); + tc[cur_vertex] = LLVector2(ss,tt); + norm[cur_vertex].clear(); + cur_vertex++; } } @@ -5942,10 +5954,11 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) LLVector3& face_max = mExtents[1]; mCenter.clearVec(); - face_min = face_max = mVertices[0].mPosition; + face_min = face_max = LLVector3((F32*) &(pos[i].mQ)); + for (U32 i = 1; i < mNumVertices; ++i) { - update_min_max(face_min, face_max, mVertices[i].mPosition); + update_min_max(face_min, face_max, (F32*) &(pos[i].mQ)); } mCenter = (face_min + face_max) * 0.5f; @@ -5956,18 +5969,9 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) if (!partial_build) { -#if GEN_TRI_STRIP - mTriStrip.clear(); -#endif - // Now we generate the indices. for (t = 0; t < (mNumT-1); t++) { -#if GEN_TRI_STRIP - //prepend terminating index to strip - mTriStrip.push_back(mNumS*t); -#endif - for (s = 0; s < (mNumS-1); s++) { mIndices[cur_index++] = s + mNumS*t; //bottom left @@ -5977,16 +5981,6 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) mIndices[cur_index++] = s+1 + mNumS*t; //bottom right mIndices[cur_index++] = s+1 + mNumS*(t+1); //top right -#if GEN_TRI_STRIP - if (s == 0) - { - mTriStrip.push_back(s+mNumS*t); - mTriStrip.push_back(s+mNumS*(t+1)); - } - mTriStrip.push_back(s+1+mNumS*t); - mTriStrip.push_back(s+1+mNumS*(t+1)); -#endif - mEdge[cur_edge++] = (mNumS-1)*2*t+s*2+1; //bottom left/top right neighbor face if (t < mNumT-2) { //top right/top left neighbor face mEdge[cur_edge++] = (mNumS-1)*2*(t+1)+s*2+1; @@ -6027,52 +6021,55 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) } mEdge[cur_edge++] = (mNumS-1)*2*t+s*2; //top right/bottom left neighbor face } -#if GEN_TRI_STRIP - //append terminating vertex to strip - mTriStrip.push_back(mNumS-1+mNumS*(t+1)); -#endif } - -#if GEN_TRI_STRIP - if (mTriStrip.size()%2 == 1) - { - mTriStrip.push_back(mTriStrip[mTriStrip.size()-1]); - } -#endif } //generate normals for (U32 i = 0; i < mNumIndices/3; i++) //for each triangle { const U16* idx = &(mIndices[i*3]); - - VertexData* v[] = - { &mVertices[idx[0]], &mVertices[idx[1]], &mVertices[idx[2]] }; - - //calculate triangle normal - LLVector3 norm = (v[0]->mPosition-v[1]->mPosition) % (v[0]->mPosition-v[2]->mPosition); + - v[0]->mNormal += norm; - v[1]->mNormal += norm; - v[2]->mNormal += norm; + LLVector4a* v[] = + { pos+idx[0], pos+idx[1], pos+idx[2] }; + + LLVector4a* n[] = + { norm+idx[0], norm+idx[1], norm+idx[2] }; + + //calculate triangle normal + LLVector4a a, b, c; + + a.setSub(*v[0], *v[1]); + b.setSub(*v[0], *v[2]); + c.setCross3(a,b); + n[0]->add(c); + n[1]->add(c); + n[2]->add(c); + //even out quad contributions - v[i%2+1]->mNormal += norm; + n[i%2+1]->add(c); } // adjust normals based on wrapping and stitching - BOOL s_bottom_converges = ((mVertices[0].mPosition - mVertices[mNumS*(mNumT-2)].mPosition).magVecSquared() < 0.000001f); - BOOL s_top_converges = ((mVertices[mNumS-1].mPosition - mVertices[mNumS*(mNumT-2)+mNumS-1].mPosition).magVecSquared() < 0.000001f); + LLVector4a top; + top.setSub(pos[0], pos[mNumS*(mNumT-2)]); + BOOL s_bottom_converges = (top.dot3(top) < 0.000001f); + + top.setSub(pos[mNumS-1], pos[mNumS*(mNumT-2)+mNumS-1]); + BOOL s_top_converges = (top.dot3(top) < 0.000001f); + if (sculpt_stitching == LL_SCULPT_TYPE_NONE) // logic for non-sculpt volumes { if (volume->getPath().isOpen() == FALSE) { //wrap normals on T for (S32 i = 0; i < mNumS; i++) { - LLVector3 norm = mVertices[i].mNormal + mVertices[mNumS*(mNumT-1)+i].mNormal; - mVertices[i].mNormal = norm; - mVertices[mNumS*(mNumT-1)+i].mNormal = norm; + LLVector4a n; + n.setAdd(norm[i], norm[mNumS*(mNumT-1)+i]); + norm[i] = n; + norm[mNumS*(mNumT-1)+i] = n; } } @@ -6080,9 +6077,10 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) { //wrap normals on S for (S32 i = 0; i < mNumT; i++) { - LLVector3 norm = mVertices[mNumS*i].mNormal + mVertices[mNumS*i+mNumS-1].mNormal; - mVertices[mNumS * i].mNormal = norm; - mVertices[mNumS * i+mNumS-1].mNormal = norm; + LLVector4a n; + n.setAdd(norm[mNumS*i], norm[mNumS*i+mNumS-1]); + norm[mNumS * i] = n; + norm[mNumS * i+mNumS-1] = n; } } @@ -6093,7 +6091,7 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) { //all lower S have same normal for (S32 i = 0; i < mNumT; i++) { - mVertices[mNumS*i].mNormal = LLVector3(1,0,0); + norm[mNumS*i].set(1,0,0); } } @@ -6101,7 +6099,7 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) { //all upper S have same normal for (S32 i = 0; i < mNumT; i++) { - mVertices[mNumS*i+mNumS-1].mNormal = LLVector3(-1,0,0); + norm[mNumS*i+mNumS-1].set(-1,0,0); } } } @@ -6129,30 +6127,33 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) { // average normals for north pole - LLVector3 average(0.0, 0.0, 0.0); + LLVector4a average; + average.clear(); + for (S32 i = 0; i < mNumS; i++) { - average += mVertices[i].mNormal; + average.add(norm[i]); } // set average for (S32 i = 0; i < mNumS; i++) { - mVertices[i].mNormal = average; + norm[i] = average; } // average normals for south pole - average = LLVector3(0.0, 0.0, 0.0); + average.clear(); + for (S32 i = 0; i < mNumS; i++) { - average += mVertices[i + mNumS * (mNumT - 1)].mNormal; + average.add(norm[i + mNumS * (mNumT - 1)]); } // set average for (S32 i = 0; i < mNumS; i++) { - mVertices[i + mNumS * (mNumT - 1)].mNormal = average; + norm[i + mNumS * (mNumT - 1)] = average; } } @@ -6162,23 +6163,22 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) { for (S32 i = 0; i < mNumT; i++) { - LLVector3 norm = mVertices[mNumS*i].mNormal + mVertices[mNumS*i+mNumS-1].mNormal; - mVertices[mNumS * i].mNormal = norm; - mVertices[mNumS * i+mNumS-1].mNormal = norm; + LLVector4a n; + n.setAdd(norm[mNumS*i], norm[mNumS*i+mNumS-1]); + norm[mNumS * i] = n; + norm[mNumS * i+mNumS-1] = n; } } - - if (wrap_t) { for (S32 i = 0; i < mNumS; i++) { - LLVector3 norm = mVertices[i].mNormal + mVertices[mNumS*(mNumT-1)+i].mNormal; - mVertices[i].mNormal = norm; - mVertices[mNumS*(mNumT-1)+i].mNormal = norm; + LLVector4a n; + n.setAdd(norm[i], norm[mNumS*(mNumT-1)+i]); + norm[i] = n; + norm[mNumS*(mNumT-1)+i] = n; } - } } @@ -6188,7 +6188,8 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) // Finds binormal based on three vertices with texture coordinates. // Fills in dummy values if the triangle has degenerate texture coordinates. -LLVector3 calc_binormal_from_triangle( +void calc_binormal_from_triangle(LLVector4a& binormal, + const LLVector3& pos0, const LLVector2& tex0, const LLVector3& pos1, @@ -6196,33 +6197,42 @@ LLVector3 calc_binormal_from_triangle( const LLVector3& pos2, const LLVector2& tex2) { - LLVector3 rx0( pos0.mV[VX], tex0.mV[VX], tex0.mV[VY] ); - LLVector3 rx1( pos1.mV[VX], tex1.mV[VX], tex1.mV[VY] ); - LLVector3 rx2( pos2.mV[VX], tex2.mV[VX], tex2.mV[VY] ); + LLVector4a rx0; rx0.set( pos0.mV[VX], tex0.mV[VX], tex0.mV[VY] ); + LLVector4a rx1; rx1.set( pos1.mV[VX], tex1.mV[VX], tex1.mV[VY] ); + LLVector4a rx2; rx2.set( pos2.mV[VX], tex2.mV[VX], tex2.mV[VY] ); - LLVector3 ry0( pos0.mV[VY], tex0.mV[VX], tex0.mV[VY] ); - LLVector3 ry1( pos1.mV[VY], tex1.mV[VX], tex1.mV[VY] ); - LLVector3 ry2( pos2.mV[VY], tex2.mV[VX], tex2.mV[VY] ); + LLVector4a ry0; ry0.set( pos0.mV[VY], tex0.mV[VX], tex0.mV[VY] ); + LLVector4a ry1; ry1.set( pos1.mV[VY], tex1.mV[VX], tex1.mV[VY] ); + LLVector4a ry2; ry2.set( pos2.mV[VY], tex2.mV[VX], tex2.mV[VY] ); - LLVector3 rz0( pos0.mV[VZ], tex0.mV[VX], tex0.mV[VY] ); - LLVector3 rz1( pos1.mV[VZ], tex1.mV[VX], tex1.mV[VY] ); - LLVector3 rz2( pos2.mV[VZ], tex2.mV[VX], tex2.mV[VY] ); + LLVector4a rz0; rz0.set( pos0.mV[VZ], tex0.mV[VX], tex0.mV[VY] ); + LLVector4a rz1; rz1.set( pos1.mV[VZ], tex1.mV[VX], tex1.mV[VY] ); + LLVector4a rz2; rz2.set( pos2.mV[VZ], tex2.mV[VX], tex2.mV[VY] ); - LLVector3 r0 = (rx0 - rx1) % (rx0 - rx2); - LLVector3 r1 = (ry0 - ry1) % (ry0 - ry2); - LLVector3 r2 = (rz0 - rz1) % (rz0 - rz2); + LLVector4a lhs, rhs; + + LLVector4a r0; + lhs.setSub(rx0, rx1); rhs.setSub(rx0, rx2); + r0.setCross3(lhs, rhs); + + LLVector4a r1; + lhs.setSub(ry0, ry1); rhs.setSub(ry0, ry2); + r1.setCross3(lhs, rhs); + + LLVector4a r2; + lhs.setSub(rz0, rz1); rhs.setSub(rz0, rz2); + r2.setCross3(lhs, rhs); - if( r0.mV[VX] && r1.mV[VX] && r2.mV[VX] ) + if( r0[VX] && r1[VX] && r2[VX] ) { - LLVector3 binormal( - -r0.mV[VZ] / r0.mV[VX], - -r1.mV[VZ] / r1.mV[VX], - -r2.mV[VZ] / r2.mV[VX]); + binormal.set( + -r0[VZ] / r0[VX], + -r1[VZ] / r1[VX], + -r2[VZ] / r2[VX]); // binormal.normVec(); - return binormal; } else { - return LLVector3( 0, 1 , 0 ); + binormal.set( 0, 1 , 0 ); } } diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index 17be642d4a..911db6f94b 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -40,6 +40,7 @@ class LLPathParams; class LLVolumeParams; class LLProfile; class LLPath; +class LLVector4a; class LLVolumeFace; class LLVolume; @@ -791,6 +792,19 @@ public: class LLVolumeFace { public: + class VertexData + { + public: + LLVector3 mPosition; + LLVector3 mNormal; + LLVector3 mBinormal; + LLVector2 mTexCoord; + + bool operator<(const VertexData& rhs) const; + bool operator==(const VertexData& rhs) const; + bool compareNormal(const VertexData& rhs, F32 angle_cutoff) const; + }; + LLVolumeFace() : mID(0), mTypeMask(0), @@ -808,26 +822,18 @@ public: { } + ~LLVolumeFace(); + BOOL create(LLVolume* volume, BOOL partial_build = FALSE); void createBinormals(); void appendFace(const LLVolumeFace& face, LLMatrix4& transform, LLMatrix4& normal_tranform); void resizeVertices(S32 num_verts); + void allocateBinormals(S32 num_verts); void resizeIndices(S32 num_indices); + void fillFromLegacyData(std::vector& v, std::vector& idx); - class VertexData - { - public: - LLVector3 mPosition; - LLVector3 mNormal; - LLVector3 mBinormal; - LLVector2 mTexCoord; - - bool operator<(const VertexData& rhs) const; - bool operator==(const VertexData& rhs) const; - bool compareNormal(const VertexData& rhs, F32 angle_cutoff) const; - }; class VertexMapData : public LLVolumeFace::VertexData { @@ -1051,7 +1057,8 @@ public: std::ostream& operator<<(std::ostream &s, const LLVolumeParams &volume_params); -LLVector3 calc_binormal_from_triangle( +void calc_binormal_from_triangle( + LLVector4a& binormal, const LLVector3& pos0, const LLVector2& tex0, const LLVector3& pos1, @@ -1060,7 +1067,7 @@ LLVector3 calc_binormal_from_triangle( const LLVector2& tex2); BOOL LLLineSegmentBoxIntersect(const LLVector3& start, const LLVector3& end, const LLVector3& center, const LLVector3& size); -BOOL LLTriangleRayIntersect(const LLVector3& vert0, const LLVector3& vert1, const LLVector3& vert2, const LLVector3& orig, const LLVector3& dir, +BOOL LLTriangleRayIntersect(const LLVector4a& vert0, const LLVector4a& vert1, const LLVector4a& vert2, const LLVector4a& orig, const LLVector4a& dir, F32* intersection_a, F32* intersection_b, F32* intersection_t, BOOL two_sided); -- cgit v1.2.3 From d7cab99ba74b214c557d9b5e02a7800b6a25c109 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Sat, 22 May 2010 12:37:53 -0500 Subject: Fix for a couple dumb mistakes. --- indra/llmath/llvolume.cpp | 38 +++++++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 11 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 01fe2be371..88969af4bd 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -5206,7 +5206,7 @@ BOOL LLVolumeFace::createUnCutCubeCap(LLVolume* volume, BOOL partial_build) if (!partial_build) { - resizeIndices(grid_size*6); + resizeIndices(grid_size*grid_size*6); U16* out = mIndices; @@ -5689,12 +5689,21 @@ void LLVolumeFace::resizeVertices(S32 num_verts) mBinormals = NULL; - mPositions = (F32*) _mm_malloc(num_verts*16, 16); - mNormals = (F32*) _mm_malloc(num_verts*16, 16); + if (num_verts) + { + mPositions = (F32*) _mm_malloc(num_verts*16, 16); + mNormals = (F32*) _mm_malloc(num_verts*16, 16); - //pad texture coordinate block end to allow for QWORD reads - S32 size = ((num_verts*8) + 0xF) & ~0xF; - mTexCoords = (F32*) _mm_malloc(size, 16); + //pad texture coordinate block end to allow for QWORD reads + S32 size = ((num_verts*8) + 0xF) & ~0xF; + mTexCoords = (F32*) _mm_malloc(size, 16); + } + else + { + mPositions = NULL; + mNormals = NULL; + mTexCoords = NULL; + } mNumVertices = num_verts; } @@ -5710,10 +5719,17 @@ void LLVolumeFace::resizeIndices(S32 num_indices) { _mm_free(mIndices); - //pad index block end to allow for QWORD reads - S32 size = ((num_indices*2) + 0xF) & ~0xF; - - mIndices = (U16*) _mm_malloc(size,16); + if (num_indices) + { + //pad index block end to allow for QWORD reads + S32 size = ((num_indices*2) + 0xF) & ~0xF; + + mIndices = (U16*) _mm_malloc(size,16); + } + else + { + mIndices = NULL; + } mNumIndices = num_indices; } @@ -5954,7 +5970,7 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) LLVector3& face_max = mExtents[1]; mCenter.clearVec(); - face_min = face_max = LLVector3((F32*) &(pos[i].mQ)); + face_min = face_max = LLVector3((F32*) &(pos[0].mQ)); for (U32 i = 1; i < mNumVertices; ++i) { -- cgit v1.2.3 From 8c32e3bf29337e330a313d0e4865ebd03ad9ca50 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Mon, 24 May 2010 14:03:10 -0500 Subject: Fix for bad indexes on cube faces. Extra validation on vertex buffers. --- indra/llmath/llvolume.cpp | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 88969af4bd..31544016db 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -4173,7 +4173,7 @@ S32 LLVolume::lineSegmentIntersect(const LLVector3& start, const LLVector3& end, for (S32 i = start_face; i <= end_face; i++) { - const LLVolumeFace &face = getVolumeFace((U32)i); + LLVolumeFace &face = mVolumeFaces[i]; LLVector3 box_center = (face.mExtents[0] + face.mExtents[1]) / 2.f; LLVector3 box_size = face.mExtents[1] - face.mExtents[0]; @@ -4235,6 +4235,10 @@ S32 LLVolume::lineSegmentIntersect(const LLVector3& start, const LLVector3& end, if (bi_normal != NULL) { + if (!face.mBinormals) + { + face.createBinormals(); + } LLVector4* binormal = (LLVector4*) face.mBinormals; if (binormal) { @@ -5174,7 +5178,6 @@ BOOL LLVolumeFace::createUnCutCubeCap(LLVolume* volume, BOOL partial_build) LLVector4a* binorm = (LLVector4a*) mBinormals; LLVector2* tc = (LLVector2*) mTexCoords; - S32 vtop = mNumVertices; for(int gx = 0;gx=0;i--) { - *out++ = (vtop+(gy*(grid_size+1))+gx+idxs[i]); + *out++ = ((gy*(grid_size+1))+gx+idxs[i]); } } else { for(S32 i=0;i<6;i++) { - *out++ = (vtop+(gy*(grid_size+1))+gx+idxs[i]); + *out++ = ((gy*(grid_size+1))+gx+idxs[i]); } } } -- cgit v1.2.3 From a2eb86b00927439afcf27219e38e58eba421294f Mon Sep 17 00:00:00 2001 From: "Matthew Breindel (Falcon)" Date: Mon, 24 May 2010 13:37:59 -0700 Subject: Ack. Fixed a bunch of stupid type mistakes in llvector4a. --- indra/llmath/CMakeLists.txt | 2 ++ indra/llmath/llvolume.cpp | 12 ++++++------ 2 files changed, 8 insertions(+), 6 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/CMakeLists.txt b/indra/llmath/CMakeLists.txt index e93fe90650..367486eee7 100644 --- a/indra/llmath/CMakeLists.txt +++ b/indra/llmath/CMakeLists.txt @@ -62,6 +62,8 @@ set(llmath_HEADER_FILES llv4matrix3.h llv4matrix4.h llv4vector3.h + llvector4a.h + llmatrix4a.h llvolume.h llvolumemgr.h llsdutil_math.h diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 88969af4bd..d7d36d901d 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -4126,14 +4126,14 @@ void LLVolume::generateSilhouetteVertices(std::vector &vertices, norm_mat.rotate(n[v1], t); - t.normalize3Fast(); + t.normalize3fast(); normals.push_back(LLVector3(t[0], t[1], t[2])); mat.affineTransform(v[v2], t); vertices.push_back(LLVector3(t[0], t[1], t[2])); norm_mat.rotate(n[v2], t); - t.normalize3Fast(); + t.normalize3fast(); normals.push_back(LLVector3(t[0], t[1], t[2])); segments.push_back(vertices.size()); @@ -5349,7 +5349,7 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) mCenter, cuv, mesh[0+offset].mPos, tc[0], mesh[1+offset].mPos, tc[1]); - binormal.normalize3Fast(); + binormal.normalize3fast(); LLVector4a normal; LLVector4a d0, d1; @@ -5369,7 +5369,7 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) normal.setCross3(d1, d0); } - normal.normalize3Fast(); + normal.normalize3fast(); VertexData vd; vd.mPosition = mCenter; @@ -5675,7 +5675,7 @@ void LLVolumeFace::createBinormals() //normalize binormals for (U32 i = 0; i < mNumVertices; i++) { - binorm[i].normalize3Fast(); + binorm[i].normalize3fast(); } } } @@ -5803,7 +5803,7 @@ void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat_in, LLMat { mat.affineTransform(src_pos[i], dst_pos[i]); norm_mat.rotate(src_norm[i], dst_norm[i]); - dst_norm[i].normalize3Fast(); + dst_norm[i].normalize3fast(); dst_tc[i] = src_tc[i]; -- cgit v1.2.3 From c0b654dd4bee466a2ccbf050e532fb4a05acc549 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Mon, 24 May 2010 17:33:41 -0500 Subject: Fix for bad feeding of vectorized raycast. --- indra/llmath/llvolume.cpp | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 2ff1463b7c..f05e6eb9d9 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -4187,6 +4187,9 @@ S32 LLVolume::lineSegmentIntersect(const LLVector3& start, const LLVector3& end, LLVector4a starta, dira; + starta.load3(start.mV); + dira.load3(dir.mV); + LLVector4a* p = (LLVector4a*) face.mPositions; for (U32 tri = 0; tri < face.mNumIndices/3; tri++) @@ -4235,17 +4238,10 @@ S32 LLVolume::lineSegmentIntersect(const LLVector3& start, const LLVector3& end, if (bi_normal != NULL) { - if (!face.mBinormals) - { - face.createBinormals(); - } LLVector4* binormal = (LLVector4*) face.mBinormals; - if (binormal) - { - *bi_normal = ((1.f - a - b) * LLVector3(binormal[index1]) + + *bi_normal = ((1.f - a - b) * LLVector3(binormal[index1]) + a * LLVector3(binormal[index2]) + b * LLVector3(binormal[index3])); - } } } -- cgit v1.2.3 From e6fe3b1f1aa888e4594c89154ef895b3cf5498e9 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Tue, 25 May 2010 03:55:01 -0500 Subject: Better vectorization of various things. Turn off debug gl by default. --- indra/llmath/llvolume.cpp | 383 ++++++++++++++++++++++++++++++---------------- indra/llmath/llvolume.h | 110 ++++++++++--- 2 files changed, 333 insertions(+), 160 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index f05e6eb9d9..d8fbc081fa 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -237,6 +237,21 @@ BOOL LLTriangleRayIntersect(const LLVector4a& vert0, const LLVector4a& vert1, co return TRUE; } +//helper for non-aligned vectors +BOOL LLTriangleRayIntersect(const LLVector3& vert0, const LLVector3& vert1, const LLVector3& vert2, const LLVector3& orig, const LLVector3& dir, + F32* intersection_a, F32* intersection_b, F32* intersection_t, BOOL two_sided) +{ + LLVector4a vert0a, vert1a, vert2a, origa, dira; + vert0a.load3(vert0.mV); + vert1a.load3(vert1.mV); + vert2a.load3(vert2.mV); + origa.load3(orig.mV); + dira.load3(dir.mV); + + return LLTriangleRayIntersect(vert0a, vert1a, vert2a, origa, dira, + intersection_a, intersection_b, intersection_t, two_sided); +} + //------------------------------------------------------------------- // statics @@ -1889,15 +1904,15 @@ bool LLVolumeFace::VertexData::operator==(const LLVolumeFace::VertexData& rhs)co bool LLVolumeFace::VertexData::compareNormal(const LLVolumeFace::VertexData& rhs, F32 angle_cutoff) const { bool retval = false; - if (rhs.mPosition == mPosition && rhs.mTexCoord == mTexCoord) + if (rhs.mData[POSITION].equal3(mData[POSITION]) && rhs.mTexCoord == mTexCoord) { if (angle_cutoff > 1.f) { - retval = (mNormal == rhs.mNormal); + retval = (mData[NORMAL].equal3(rhs.mData[NORMAL])); } else { - F32 cur_angle = rhs.mNormal*mNormal; + F32 cur_angle = rhs.mData[NORMAL].dot3(mData[NORMAL]); retval = cur_angle > angle_cutoff; } } @@ -2081,9 +2096,9 @@ bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) min = max = LLVector3(0,0,0); - F32* pos_out = face.mPositions; - F32* norm_out = face.mNormals; - F32* tc_out = face.mTexCoords; + F32* pos_out = (F32*) face.mPositions; + F32* norm_out = (F32*) face.mNormals; + F32* tc_out = (F32*) face.mTexCoords; for (U32 j = 0; j < num_verts; ++j) { @@ -2188,13 +2203,15 @@ bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) void tetrahedron_set_normal(LLVolumeFace::VertexData* cv) { - LLVector3 nrm = (cv[1].mPosition-cv[0].mPosition)%(cv[2].mPosition-cv[0].mPosition); - - nrm.normVec(); - - cv[0].mNormal = nrm; - cv[1].mNormal = nrm; - cv[2].mNormal = nrm; + LLVector4a v0; + v0.setSub(cv[1].getPosition(), cv[0].getNormal()); + LLVector4a v1; + v1.setSub(cv[2].getNormal(), cv[0].getPosition()); + + cv[0].getNormal().setCross3(v0,v1); + cv[0].getNormal().normalize3fast(); + cv[1].setNormal(cv[0].getNormal()); + cv[2].setNormal(cv[1].getNormal()); } BOOL LLVolume::isTetrahedron() @@ -2209,12 +2226,12 @@ void LLVolume::makeTetrahedron() LLVolumeFace face; F32 x = 0.25f; - LLVector3 p[] = + LLVector4a p[] = { //unit tetrahedron corners - LLVector3(x,x,x), - LLVector3(-x,-x,x), - LLVector3(-x,x,-x), - LLVector3(x,-x,-x) + LLVector4a(x,x,x), + LLVector4a(-x,-x,x), + LLVector4a(-x,x,-x), + LLVector4a(x,-x,-x) }; face.mExtents[0].setVec(-x,-x,-x); @@ -2229,9 +2246,9 @@ void LLVolume::makeTetrahedron() //side 1 - cv[0].mPosition = p[1]; - cv[1].mPosition = p[0]; - cv[2].mPosition = p[2]; + cv[0].setPosition(p[1]); + cv[1].setPosition(p[0]); + cv[2].setPosition(p[2]); tetrahedron_set_normal(cv); @@ -2242,14 +2259,14 @@ void LLVolume::makeTetrahedron() LLVector4a* n = (LLVector4a*) face.mNormals; LLVector2* tc = (LLVector2*) face.mTexCoords; - v[0].load3(cv[0].mPosition.mV); - v[1].load3(cv[1].mPosition.mV); - v[2].load3(cv[2].mPosition.mV); + v[0] = cv[0].getPosition(); + v[1] = cv[1].getPosition(); + v[2] = cv[2].getPosition(); v += 3; - n[0].load3(cv[0].mNormal.mV); - n[1].load3(cv[1].mNormal.mV); - n[2].load3(cv[2].mNormal.mV); + n[0] = cv[0].getNormal(); + n[1] = cv[1].getNormal(); + n[2] = cv[2].getNormal(); n += 3; tc[0] = cv[0].mTexCoord; @@ -2259,20 +2276,20 @@ void LLVolume::makeTetrahedron() //side 2 - cv[0].mPosition = p[3]; - cv[1].mPosition = p[0]; - cv[2].mPosition = p[1]; + cv[0].setPosition(p[3]); + cv[1].setPosition(p[0]); + cv[2].setPosition(p[1]); tetrahedron_set_normal(cv); - v[0].load3(cv[0].mPosition.mV); - v[1].load3(cv[1].mPosition.mV); - v[2].load3(cv[2].mPosition.mV); + v[0] = cv[0].getPosition(); + v[1] = cv[1].getPosition(); + v[2] = cv[2].getPosition(); v += 3; - n[0].load3(cv[0].mNormal.mV); - n[1].load3(cv[1].mNormal.mV); - n[2].load3(cv[2].mNormal.mV); + n[0] = cv[0].getNormal(); + n[1] = cv[1].getNormal(); + n[2] = cv[2].getNormal(); n += 3; tc[0] = cv[0].mTexCoord; @@ -2281,20 +2298,20 @@ void LLVolume::makeTetrahedron() tc += 3; //side 3 - cv[0].mPosition = p[3]; - cv[1].mPosition = p[1]; - cv[2].mPosition = p[2]; + cv[0].setPosition(p[3]); + cv[1].setPosition(p[1]); + cv[2].setPosition(p[2]); tetrahedron_set_normal(cv); - v[0].load3(cv[0].mPosition.mV); - v[1].load3(cv[1].mPosition.mV); - v[2].load3(cv[2].mPosition.mV); + v[0] = cv[0].getPosition(); + v[1] = cv[1].getPosition(); + v[2] = cv[2].getPosition(); v += 3; - n[0].load3(cv[0].mNormal.mV); - n[1].load3(cv[1].mNormal.mV); - n[2].load3(cv[2].mNormal.mV); + n[0] = cv[0].getNormal(); + n[1] = cv[1].getNormal(); + n[2] = cv[2].getNormal(); n += 3; tc[0] = cv[0].mTexCoord; @@ -2303,20 +2320,20 @@ void LLVolume::makeTetrahedron() tc += 3; //side 4 - cv[0].mPosition = p[2]; - cv[1].mPosition = p[0]; - cv[2].mPosition = p[3]; + cv[0].setPosition(p[2]); + cv[1].setPosition(p[0]); + cv[2].setPosition(p[3]); tetrahedron_set_normal(cv); - v[0].load3(cv[0].mPosition.mV); - v[1].load3(cv[1].mPosition.mV); - v[2].load3(cv[2].mPosition.mV); + v[0] = cv[0].getPosition(); + v[1] = cv[1].getPosition(); + v[2] = cv[2].getPosition(); v += 3; - n[0].load3(cv[0].mNormal.mV); - n[1].load3(cv[1].mNormal.mV); - n[2].load3(cv[2].mNormal.mV); + n[0] = cv[0].getNormal(); + n[1] = cv[1].getNormal(); + n[2] = cv[2].getNormal(); n += 3; tc[0] = cv[0].mTexCoord; @@ -3974,9 +3991,9 @@ void LLVolume::generateSilhouetteVertices(std::vector &vertices, S32 v3 = face.mIndices[j*3+2]; //get current face center - LLVector3 cCenter = (face.mVertices[v1].mPosition + - face.mVertices[v2].mPosition + - face.mVertices[v3].mPosition) / 3.0f; + LLVector3 cCenter = (face.mVertices[v1].getPosition() + + face.mVertices[v2].getPosition() + + face.mVertices[v3].getPosition()) / 3.0f; //for each edge for (S32 k = 0; k < 3; k++) { @@ -3994,9 +4011,9 @@ void LLVolume::generateSilhouetteVertices(std::vector &vertices, v3 = face.mIndices[nIndex*3+2]; //get neighbor face center - LLVector3 nCenter = (face.mVertices[v1].mPosition + - face.mVertices[v2].mPosition + - face.mVertices[v3].mPosition) / 3.0f; + LLVector3 nCenter = (face.mVertices[v1].getPosition() + + face.mVertices[v2].getPosition() + + face.mVertices[v3].getPosition()) / 3.0f; //draw line vertices.push_back(cCenter); @@ -4020,14 +4037,14 @@ void LLVolume::generateSilhouetteVertices(std::vector &vertices, //for each vertex for (U32 j = 0; j < face.mNumVertices; j++) { - vertices.push_back(face.mVertices[j].mPosition); - vertices.push_back(face.mVertices[j].mPosition + face.mVertices[j].mNormal*0.1f); + vertices.push_back(face.mVertices[j].getPosition()); + vertices.push_back(face.mVertices[j].getPosition() + face.mVertices[j].getNormal()*0.1f); normals.push_back(LLVector3(0,0,1)); normals.push_back(LLVector3(0,0,1)); segments.push_back(vertices.size()); #if DEBUG_SILHOUETTE_BINORMALS - vertices.push_back(face.mVertices[j].mPosition); - vertices.push_back(face.mVertices[j].mPosition + face.mVertices[j].mBinormal*0.1f); + vertices.push_back(face.mVertices[j].getPosition()); + vertices.push_back(face.mVertices[j].getPosition() + face.mVertices[j].mBinormal*0.1f); normals.push_back(LLVector3(0,0,1)); normals.push_back(LLVector3(0,0,1)); segments.push_back(vertices.size()); @@ -5038,9 +5055,15 @@ BOOL LLVolumeFace::create(LLVolume* volume, BOOL partial_build) } } +void LLVolumeFace::getVertexData(U16 index, LLVolumeFace::VertexData& cv) +{ + cv.setPosition(mPositions[index]); + cv.setNormal(mNormals[index]); + cv.mTexCoord = mTexCoords[index]; +} + void LLVolumeFace::optimize(F32 angle_cutoff) { -#if 0 //disabling until a vectorized version is available LLVolumeFace new_face; VertexMapData::PointMap point_map; @@ -5050,10 +5073,11 @@ void LLVolumeFace::optimize(F32 angle_cutoff) { U16 index = mIndices[i]; - LLVolumeFace::VertexData cv = mVertices[index]; - + LLVolumeFace::VertexData cv; + getVertexData(index, cv); + BOOL found = FALSE; - VertexMapData::PointMap::iterator point_iter = point_map.find(cv.mPosition); + VertexMapData::PointMap::iterator point_iter = point_map.find(cv.getPosition()); if (point_iter != point_map.end()) { //duplicate point might exist for (U32 j = 0; j < point_iter->second.size(); ++j) @@ -5062,7 +5086,7 @@ void LLVolumeFace::optimize(F32 angle_cutoff) if (tv.compareNormal(cv, angle_cutoff)) { found = TRUE; - new_face.mIndices.push_back((point_iter->second)[j].mIndex); + new_face.pushIndex((point_iter->second)[j].mIndex); break; } } @@ -5070,14 +5094,14 @@ void LLVolumeFace::optimize(F32 angle_cutoff) if (!found) { - new_face.mVertices.push_back(cv); + new_face.pushVertex(cv); U16 index = (U16) new_face.mNumVertices-1; - new_face.mIndices.push_back(index); + new_face.pushIndex(index); VertexMapData d; - d.mPosition = cv.mPosition; + d.setPosition(cv.getPosition()); d.mTexCoord = cv.mTexCoord; - d.mNormal = cv.mNormal; + d.setNormal(cv.getNormal()); d.mIndex = index; if (point_iter != point_map.end()) { @@ -5085,14 +5109,23 @@ void LLVolumeFace::optimize(F32 angle_cutoff) } else { - point_map[d.mPosition].push_back(d); + point_map[d.getPosition()].push_back(d); } } } - mVertices = new_face.mVertices; - mIndices = new_face.mIndices; -#endif + swapData(new_face); +} + +void LLVolumeFace::swapData(LLVolumeFace& rhs) +{ + llswap(rhs.mPositions, mPositions); + llswap(rhs.mNormals, mNormals); + llswap(rhs.mBinormals, mBinormals); + llswap(rhs.mTexCoords, mTexCoords); + llswap(rhs.mIndices,mIndices); + llswap(rhs.mNumVertices, mNumVertices); + llswap(rhs.mNumIndices, mNumIndices); } void LerpPlanarVertex(LLVolumeFace::VertexData& v0, @@ -5102,10 +5135,21 @@ void LerpPlanarVertex(LLVolumeFace::VertexData& v0, F32 coef01, F32 coef02) { - vout.mPosition = v0.mPosition + ((v1.mPosition-v0.mPosition)*coef01)+((v2.mPosition-v0.mPosition)*coef02); + + LLVector4a lhs; + lhs.setSub(v1.getPosition(), v0.getPosition()); + lhs.mul(coef01); + LLVector4a rhs; + rhs.setSub(v2.getPosition(), v0.getPosition()); + rhs.mul(coef02); + + rhs.add(lhs); + rhs.add(v0.getPosition()); + + vout.setPosition(rhs); + vout.mTexCoord = v0.mTexCoord + ((v1.mTexCoord-v0.mTexCoord)*coef01)+((v2.mTexCoord-v0.mTexCoord)*coef02); - vout.mNormal = v0.mNormal; - vout.mBinormal = v0.mBinormal; + vout.setNormal(v0.getNormal()); } BOOL LLVolumeFace::createUnCutCubeCap(LLVolume* volume, BOOL partial_build) @@ -5137,16 +5181,22 @@ BOOL LLVolumeFace::createUnCutCubeCap(LLVolume* volume, BOOL partial_build) VertexData corners[4]; VertexData baseVert; for(int t = 0; t < 4; t++){ - corners[t].mPosition = mesh[offset + (grid_size*t)].mPos; + corners[t].getPosition().load3( mesh[offset + (grid_size*t)].mPos.mV); corners[t].mTexCoord.mV[0] = profile[grid_size*t].mV[0]+0.5f; corners[t].mTexCoord.mV[1] = 0.5f - profile[grid_size*t].mV[1]; } - baseVert.mNormal = - ((corners[1].mPosition-corners[0].mPosition) % - (corners[2].mPosition-corners[1].mPosition)); - baseVert.mNormal.normVec(); + + { + LLVector4a lhs; + lhs.setSub(corners[1].getPosition(), corners[0].getPosition()); + LLVector4a rhs; + rhs.setSub(corners[2].getPosition(), corners[1].getPosition()); + baseVert.getNormal().setCross3(lhs, rhs); + baseVert.getNormal().normalize3fast(); + } + if(!(mTypeMask & TOP_MASK)){ - baseVert.mNormal *= -1.0f; + baseVert.getNormal().mul(-1.0f); }else{ //Swap the UVs on the U(X) axis for top face LLVector2 swap; @@ -5161,9 +5211,9 @@ BOOL LLVolumeFace::createUnCutCubeCap(LLVolume* volume, BOOL partial_build) LLVector4a binormal; calc_binormal_from_triangle( binormal, - corners[0].mPosition, corners[0].mTexCoord, - corners[1].mPosition, corners[1].mTexCoord, - corners[2].mPosition, corners[2].mTexCoord); + corners[0].getPosition(), corners[0].mTexCoord, + corners[1].getPosition(), corners[1].mTexCoord, + corners[2].getPosition(), corners[2].mTexCoord); S32 size = (grid_size+1)*(grid_size+1); resizeVertices(size); @@ -5185,18 +5235,18 @@ BOOL LLVolumeFace::createUnCutCubeCap(LLVolume* volume, BOOL partial_build) (F32)gx/(F32)grid_size, (F32)gy/(F32)grid_size); - (*pos++).load3(newVert.mPosition.mV); - (*norm++).load3(baseVert.mNormal.mV); - (*tc++) = newVert.mTexCoord; - (*binorm++).load4a((F32*) &binormal.mQ); + *pos++ = newVert.getPosition(); + *norm++ = baseVert.getNormal(); + *tc++ = newVert.mTexCoord; + *binorm++ = binormal; if (gx == 0 && gy == 0) { - min = max = newVert.mPosition; + min = max = LLVector3(newVert.getPosition().getF32()); } else { - update_min_max(min,max,newVert.mPosition); + update_min_max(min,max,newVert.getPosition().getF32()); } } } @@ -5343,18 +5393,19 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) mCenter = (min+max)*0.5f; cuv = (min_uv + max_uv)*0.5f; + LLVector4a center; + center.load3(mCenter.mV); + LLVector4a binormal; calc_binormal_from_triangle(binormal, - mCenter, cuv, - mesh[0+offset].mPos, tc[0], - mesh[1+offset].mPos, tc[1]); + center, cuv, + pos[0], tc[0], + pos[1], tc[1]); binormal.normalize3fast(); LLVector4a normal; LLVector4a d0, d1; - LLVector4a center; - - center.load3(mCenter.mV); + d0.setSub(center, pos[0]); d1.setSub(center, pos[1]); @@ -5371,7 +5422,7 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) normal.normalize3fast(); VertexData vd; - vd.mPosition = mCenter; + vd.getPosition().load3(mCenter.mV); vd.mTexCoord = cuv; if (!(mTypeMask & HOLLOW_MASK) && !(mTypeMask & OPEN_MASK)) @@ -5634,10 +5685,7 @@ void LLVolumeFace::createBinormals() allocateBinormals(mNumVertices); //generate binormals - LLStrider pos; - pos = (LLVector3*) mPositions; - pos.setStride(16); - + LLVector4a* pos = mPositions; LLVector2* tc = (LLVector2*) mTexCoords; LLVector4a* binorm = (LLVector4a*) mBinormals; @@ -5690,12 +5738,12 @@ void LLVolumeFace::resizeVertices(S32 num_verts) if (num_verts) { - mPositions = (F32*) _mm_malloc(num_verts*16, 16); - mNormals = (F32*) _mm_malloc(num_verts*16, 16); + mPositions = (LLVector4a*) _mm_malloc(num_verts*16, 16); + mNormals = (LLVector4a*) _mm_malloc(num_verts*16, 16); //pad texture coordinate block end to allow for QWORD reads S32 size = ((num_verts*8) + 0xF) & ~0xF; - mTexCoords = (F32*) _mm_malloc(size, 16); + mTexCoords = (LLVector2*) _mm_malloc(size, 16); } else { @@ -5707,10 +5755,61 @@ void LLVolumeFace::resizeVertices(S32 num_verts) mNumVertices = num_verts; } +void LLVolumeFace::pushVertex(const LLVolumeFace::VertexData& cv) +{ + pushVertex(cv.getPosition(), cv.getNormal(), cv.mTexCoord); +} + +void LLVolumeFace::pushVertex(const LLVector4a& pos, const LLVector4a& norm, const LLVector2& tc) +{ + S32 new_verts = mNumVertices+1; + S32 new_size = new_verts*16; + + //positions + LLVector4a* dst = (LLVector4a*) _mm_malloc(new_size, 16); + if (mPositions) + { + LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mPositions, new_size/4); + _mm_free(mPositions); + } + mPositions = dst; + + //normals + dst = (LLVector4a*) _mm_malloc(new_size, 16); + if (mNormals) + { + LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mNormals, new_size/4); + _mm_free(mNormals); + } + mNormals = dst; + + //tex coords + new_size = ((new_verts*8)+0xF) & ~0xF; + + { + LLVector2* dst = (LLVector2*) _mm_malloc(new_size, 16); + if (mTexCoords) + { + LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mTexCoords, new_size/4); + _mm_free(mTexCoords); + } + } + + //just clear binormals + _mm_free(mBinormals); + mBinormals = NULL; + + mPositions[mNumVertices] = pos; + mNormals[mNumVertices] = norm; + mTexCoords[mNumVertices] = tc; + + mNumVertices++; +} + void LLVolumeFace::allocateBinormals(S32 num_verts) { _mm_free(mBinormals); - mBinormals = (F32*) _mm_malloc(num_verts*16, 16); + mBinormals = (LLVector4a*) _mm_malloc(num_verts*16, 16); } @@ -5733,6 +5832,23 @@ void LLVolumeFace::resizeIndices(S32 num_indices) mNumIndices = num_indices; } +void LLVolumeFace::pushIndex(const U16& idx) +{ + S32 new_count = mNumIndices + 1; + S32 new_size = ((new_count*2)+0xF) & ~0xF; + + S32 old_size = (mNumIndices+0xF) & ~0xF; + if (new_size != old_size) + { + U16* dst = (U16*) _mm_malloc(new_size, 16); + LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mIndices, new_size/4); + _mm_free(mIndices); + mIndices = dst; + } + + mIndices[mNumIndices++] = idx; +} + void LLVolumeFace::fillFromLegacyData(std::vector& v, std::vector& idx) { resizeVertices(v.size()); @@ -5740,14 +5856,9 @@ void LLVolumeFace::fillFromLegacyData(std::vector& v, for (U32 i = 0; i < v.size(); ++i) { - for (U32 j = 0; j < 3; ++j) - { - mPositions[i*4+j] = v[i].mPosition[j]; - mNormals[i*4+j] = v[i].mNormal[j]; - } - - mTexCoords[i*2+0] = v[i].mTexCoord.mV[0]; - mTexCoords[i*2+1] = v[i].mTexCoord.mV[1]; + mPositions[i] = v[i].getPosition(); + mNormals[i] = v[i].getNormal(); + mTexCoords[i] = v[i].mTexCoord; } for (U32 i = 0; i < idx.size(); ++i) @@ -5768,13 +5879,13 @@ void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat_in, LLMat } - F32* new_pos = (F32*) _mm_malloc(new_count*16, 16); - F32* new_norm = (F32*) _mm_malloc(new_count*16, 16); - F32* new_tc = (F32*) _mm_malloc((new_count*8+0xF) & ~0xF, 16); + LLVector4a* new_pos = (LLVector4a*) _mm_malloc(new_count*16, 16); + LLVector4a* new_norm = (LLVector4a*) _mm_malloc(new_count*16, 16); + LLVector2* new_tc = (LLVector2*) _mm_malloc((new_count*8+0xF) & ~0xF, 16); - LLVector4a::memcpyNonAliased16(new_pos, mPositions, new_count*4); - LLVector4a::memcpyNonAliased16(new_norm, mNormals, new_count*4); - LLVector4a::memcpyNonAliased16(new_tc, mTexCoords, new_count*2); + LLVector4a::memcpyNonAliased16((F32*) new_pos, (F32*) mPositions, new_count*4); + LLVector4a::memcpyNonAliased16((F32*) new_norm, (F32*) mNormals, new_count*4); + LLVector4a::memcpyNonAliased16((F32*) new_tc, (F32*) mTexCoords, new_count*2); _mm_free(mPositions); _mm_free(mNormals); @@ -6205,24 +6316,24 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) // Fills in dummy values if the triangle has degenerate texture coordinates. void calc_binormal_from_triangle(LLVector4a& binormal, - const LLVector3& pos0, + const LLVector4a& pos0, const LLVector2& tex0, - const LLVector3& pos1, + const LLVector4a& pos1, const LLVector2& tex1, - const LLVector3& pos2, + const LLVector4a& pos2, const LLVector2& tex2) { - LLVector4a rx0; rx0.set( pos0.mV[VX], tex0.mV[VX], tex0.mV[VY] ); - LLVector4a rx1; rx1.set( pos1.mV[VX], tex1.mV[VX], tex1.mV[VY] ); - LLVector4a rx2; rx2.set( pos2.mV[VX], tex2.mV[VX], tex2.mV[VY] ); + LLVector4a rx0( pos0[VX], tex0.mV[VX], tex0.mV[VY] ); + LLVector4a rx1( pos1[VX], tex1.mV[VX], tex1.mV[VY] ); + LLVector4a rx2( pos2[VX], tex2.mV[VX], tex2.mV[VY] ); - LLVector4a ry0; ry0.set( pos0.mV[VY], tex0.mV[VX], tex0.mV[VY] ); - LLVector4a ry1; ry1.set( pos1.mV[VY], tex1.mV[VX], tex1.mV[VY] ); - LLVector4a ry2; ry2.set( pos2.mV[VY], tex2.mV[VX], tex2.mV[VY] ); + LLVector4a ry0( pos0[VY], tex0.mV[VX], tex0.mV[VY] ); + LLVector4a ry1( pos1[VY], tex1.mV[VX], tex1.mV[VY] ); + LLVector4a ry2( pos2[VY], tex2.mV[VX], tex2.mV[VY] ); - LLVector4a rz0; rz0.set( pos0.mV[VZ], tex0.mV[VX], tex0.mV[VY] ); - LLVector4a rz1; rz1.set( pos1.mV[VZ], tex1.mV[VX], tex1.mV[VY] ); - LLVector4a rz2; rz2.set( pos2.mV[VZ], tex2.mV[VX], tex2.mV[VY] ); + LLVector4a rz0( pos0[VZ], tex0.mV[VX], tex0.mV[VY] ); + LLVector4a rz1( pos1[VZ], tex1.mV[VX], tex1.mV[VY] ); + LLVector4a rz2( pos2[VZ], tex2.mV[VX], tex2.mV[VY] ); LLVector4a lhs, rhs; diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index 911db6f94b..aa58d6d114 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -40,7 +40,6 @@ class LLPathParams; class LLVolumeParams; class LLProfile; class LLPath; -class LLVector4a; class LLVolumeFace; class LLVolume; @@ -56,6 +55,7 @@ class LLVolume; #include "v4coloru.h" #include "llrefcount.h" #include "llfile.h" +#include "llvector4a.h" //============================================================================ @@ -794,15 +794,74 @@ class LLVolumeFace public: class VertexData { + enum + { + POSITION = 0, + NORMAL = 1 + }; + + private: + void init() + { + mData = (LLVector4a*) _mm_malloc(32, 16); + } public: - LLVector3 mPosition; - LLVector3 mNormal; - LLVector3 mBinormal; + VertexData() + { + init(); + } + + VertexData(const VertexData& rhs) + { + init(); + LLVector4a::memcpyNonAliased16((F32*) mData, (F32*) rhs.mData, 8); + mTexCoord = rhs.mTexCoord; + } + + ~VertexData() + { + _mm_free(mData); + } + + LLVector4a& getPosition() + { + return mData[POSITION]; + } + + LLVector4a& getNormal() + { + return mData[NORMAL]; + } + + const LLVector4a& getPosition() const + { + return mData[POSITION]; + } + + const LLVector4a& getNormal() const + { + return mData[NORMAL]; + } + + + void setPosition(const LLVector4a& pos) + { + mData[POSITION] = pos; + } + + void setNormal(const LLVector4a& norm) + { + mData[NORMAL] = norm; + } + LLVector2 mTexCoord; bool operator<(const VertexData& rhs) const; bool operator==(const VertexData& rhs) const; bool compareNormal(const VertexData& rhs, F32 angle_cutoff) const; + + private: + LLVector4a* mData; }; LLVolumeFace() : @@ -834,6 +893,13 @@ public: void resizeIndices(S32 num_indices); void fillFromLegacyData(std::vector& v, std::vector& idx); + void pushVertex(const VertexData& cv); + void pushVertex(const LLVector4a& pos, const LLVector4a& norm, const LLVector2& tc); + void pushIndex(const U16& idx); + + void swapData(LLVolumeFace& rhs); + + void getVertexData(U16 indx, LLVolumeFace::VertexData& cv); class VertexMapData : public LLVolumeFace::VertexData { @@ -842,28 +908,20 @@ public: bool operator==(const LLVolumeFace::VertexData& rhs) const { - return mPosition == rhs.mPosition && + return getPosition().equal3(rhs.getPosition()) && mTexCoord == rhs.mTexCoord && - mNormal == rhs.mNormal; + getNormal().equal3(rhs.getNormal()); } struct ComparePosition { - bool operator()(const LLVector3& a, const LLVector3& b) const + bool operator()(const LLVector4a& a, const LLVector4a& b) const { - if (a.mV[0] != b.mV[0]) - { - return a.mV[0] < b.mV[0]; - } - if (a.mV[1] != b.mV[1]) - { - return a.mV[1] < b.mV[1]; - } - return a.mV[2] < b.mV[2]; + return a.less3(b); } }; - typedef std::map, VertexMapData::ComparePosition > PointMap; + typedef std::map, VertexMapData::ComparePosition > PointMap; }; void optimize(F32 angle_cutoff = 2.f); @@ -899,10 +957,10 @@ public: S32 mNumVertices; S32 mNumIndices; - F32* mPositions; - F32* mNormals; - F32* mBinormals; - F32* mTexCoords; + LLVector4a* mPositions; + LLVector4a* mNormals; + LLVector4a* mBinormals; + LLVector2* mTexCoords; U16* mIndices; std::vector mEdge; @@ -1059,14 +1117,18 @@ std::ostream& operator<<(std::ostream &s, const LLVolumeParams &volume_params); void calc_binormal_from_triangle( LLVector4a& binormal, - const LLVector3& pos0, + const LLVector4a& pos0, const LLVector2& tex0, - const LLVector3& pos1, + const LLVector4a& pos1, const LLVector2& tex1, - const LLVector3& pos2, + const LLVector4a& pos2, const LLVector2& tex2); BOOL LLLineSegmentBoxIntersect(const LLVector3& start, const LLVector3& end, const LLVector3& center, const LLVector3& size); + +BOOL LLTriangleRayIntersect(const LLVector3& vert0, const LLVector3& vert1, const LLVector3& vert2, const LLVector3& orig, const LLVector3& dir, + F32* intersection_a, F32* intersection_b, F32* intersection_t, BOOL two_sided); + BOOL LLTriangleRayIntersect(const LLVector4a& vert0, const LLVector4a& vert1, const LLVector4a& vert2, const LLVector4a& orig, const LLVector4a& dir, F32* intersection_a, F32* intersection_b, F32* intersection_t, BOOL two_sided); -- cgit v1.2.3 From c98b1b3fd9341834978aff0e841714e206d28c0a Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Wed, 26 May 2010 03:29:19 -0500 Subject: Fully aligned llvolume --- indra/llmath/llvolume.cpp | 353 +++++++++++++++++++++++++++++++++++----------- indra/llmath/llvolume.h | 99 ++++--------- indra/llmath/v3math.h | 1 - 3 files changed, 302 insertions(+), 151 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index d8fbc081fa..9b6e2488e6 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -107,22 +107,27 @@ BOOL check_same_clock_dir( const LLVector3& pt1, const LLVector3& pt2, const LLV BOOL LLLineSegmentBoxIntersect(const LLVector3& start, const LLVector3& end, const LLVector3& center, const LLVector3& size) { - float fAWdU[3]; - LLVector3 dir; - LLVector3 diff; + return LLLineSegmentBoxIntersect(start.mV, end.mV, center.mV, size.mV); +} + +BOOL LLLineSegmentBoxIntersect(const F32* start, const F32* end, const F32* center, const F32* size) +{ + F32 fAWdU[3]; + F32 dir[3]; + F32 diff[3]; for (U32 i = 0; i < 3; i++) { - dir.mV[i] = 0.5f * (end.mV[i] - start.mV[i]); - diff.mV[i] = (0.5f * (end.mV[i] + start.mV[i])) - center.mV[i]; - fAWdU[i] = fabsf(dir.mV[i]); - if(fabsf(diff.mV[i])>size.mV[i] + fAWdU[i]) return false; + dir[i] = 0.5f * (end[i] - start[i]); + diff[i] = (0.5f * (end[i] + start[i])) - center[i]; + fAWdU[i] = fabsf(dir[i]); + if(fabsf(diff[i])>size[i] + fAWdU[i]) return false; } float f; - f = dir.mV[1] * diff.mV[2] - dir.mV[2] * diff.mV[1]; if(fabsf(f)>size.mV[1]*fAWdU[2] + size.mV[2]*fAWdU[1]) return false; - f = dir.mV[2] * diff.mV[0] - dir.mV[0] * diff.mV[2]; if(fabsf(f)>size.mV[0]*fAWdU[2] + size.mV[2]*fAWdU[0]) return false; - f = dir.mV[0] * diff.mV[1] - dir.mV[1] * diff.mV[0]; if(fabsf(f)>size.mV[0]*fAWdU[1] + size.mV[1]*fAWdU[0]) return false; + f = dir[1] * diff[2] - dir[2] * diff[1]; if(fabsf(f)>size[1]*fAWdU[2] + size[2]*fAWdU[1]) return false; + f = dir[2] * diff[0] - dir[0] * diff[2]; if(fabsf(f)>size[0]*fAWdU[2] + size[2]*fAWdU[0]) return false; + f = dir[0] * diff[1] - dir[1] * diff[0]; if(fabsf(f)>size[0]*fAWdU[1] + size[1]*fAWdU[0]) return false; return true; } @@ -1869,6 +1874,59 @@ BOOL LLVolume::generate() return FALSE; } +void LLVolumeFace::VertexData::init() +{ + mData = (LLVector4a*) _mm_malloc(32, 16); +} + +LLVolumeFace::VertexData::VertexData() +{ + init(); +} + +LLVolumeFace::VertexData::VertexData(const VertexData& rhs) +{ + init(); + LLVector4a::memcpyNonAliased16((F32*) mData, (F32*) rhs.mData, 8); + mTexCoord = rhs.mTexCoord; +} + +LLVolumeFace::VertexData::~VertexData() +{ + _mm_free(mData); +} + +LLVector4a& LLVolumeFace::VertexData::getPosition() +{ + return mData[POSITION]; +} + +LLVector4a& LLVolumeFace::VertexData::getNormal() +{ + return mData[NORMAL]; +} + +const LLVector4a& LLVolumeFace::VertexData::getPosition() const +{ + return mData[POSITION]; +} + +const LLVector4a& LLVolumeFace::VertexData::getNormal() const +{ + return mData[NORMAL]; +} + + +void LLVolumeFace::VertexData::setPosition(const LLVector4a& pos) +{ + mData[POSITION] = pos; +} + +void LLVolumeFace::VertexData::setNormal(const LLVector4a& norm) +{ + mData[NORMAL] = norm; +} + bool LLVolumeFace::VertexData::operator<(const LLVolumeFace::VertexData& rhs)const { const U8* l = (const U8*) this; @@ -2037,7 +2095,7 @@ bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) if (mdl[i].has("Weights")) { - face.mWeights.resize(num_verts); + face.allocateWeights(num_verts); LLSD::Binary weights = mdl[i]["Weights"]; @@ -2050,13 +2108,15 @@ bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) U8 joint = weights[idx++]; U32 cur_influence = 0; + LLVector4 wght(0,0,0,0); + while (joint != END_INFLUENCES) { U16 influence = weights[idx++]; influence |= ((U16) weights[idx++] << 8); F32 w = llmin((F32) influence / 65535.f, 0.99999f); - face.mWeights[cur_vertex].mV[cur_influence++] = (F32) joint + w; + wght.mV[cur_influence++] = (F32) joint + w; if (cur_influence >= 4) { @@ -2068,6 +2128,8 @@ bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) } } + face.mWeights[cur_vertex].loadua(wght.mV); + cur_vertex++; } @@ -2078,62 +2140,70 @@ bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) } - LLVector3 min_pos; - LLVector3 max_pos; + LLVector3 minp; + LLVector3 maxp; LLVector2 min_tc; LLVector2 max_tc; - min_pos.setValue(mdl[i]["PositionDomain"]["Min"]); - max_pos.setValue(mdl[i]["PositionDomain"]["Max"]); + minp.setValue(mdl[i]["PositionDomain"]["Min"]); + maxp.setValue(mdl[i]["PositionDomain"]["Max"]); + LLVector4a min_pos, max_pos; + min_pos.load3(minp.mV); + max_pos.load3(maxp.mV); + min_tc.setValue(mdl[i]["TexCoord0Domain"]["Min"]); max_tc.setValue(mdl[i]["TexCoord0Domain"]["Max"]); - LLVector3 pos_range = max_pos - min_pos; + LLVector4a pos_range; + pos_range.setSub(max_pos, min_pos); LLVector2 tc_range = max_tc - min_tc; - LLVector3& min = face.mExtents[0]; - LLVector3& max = face.mExtents[1]; - - min = max = LLVector3(0,0,0); + LLVector4a& min = face.mExtents[0]; + LLVector4a& max = face.mExtents[1]; - F32* pos_out = (F32*) face.mPositions; - F32* norm_out = (F32*) face.mNormals; - F32* tc_out = (F32*) face.mTexCoords; + min.clear(); + max.clear(); + + LLVector4a* pos_out = face.mPositions; + LLVector4a* norm_out = face.mNormals; + LLVector2* tc_out = face.mTexCoords; for (U32 j = 0; j < num_verts; ++j) { U16* v = (U16*) &(pos[j*3*2]); - pos_out[0] = (F32) v[0] / 65535.f * pos_range.mV[0] + min_pos.mV[0]; - pos_out[1] = (F32) v[1] / 65535.f * pos_range.mV[1] + min_pos.mV[1]; - pos_out[2] = (F32) v[2] / 65535.f * pos_range.mV[2] + min_pos.mV[2]; - + pos_out->set((F32) v[0], (F32) v[1], (F32) v[2]); + pos_out->div(65535.f); + pos_out->mul(pos_range); + pos_out->add(min_pos); if (j == 0) { - min = max = LLVector3(pos_out); + min = *pos_out; + max = min; } else { - update_min_max(min,max,pos_out); + min.setMin(*pos_out); + max.setMax(*pos_out); } - pos_out += 4; + pos_out++; U16* n = (U16*) &(norm[j*3*2]); - - norm_out[0] = (F32) n[0] / 65535.f * 2.f - 1.f; - norm_out[1] = (F32) n[1] / 65535.f * 2.f - 1.f; - norm_out[2] = (F32) n[2] / 65535.f * 2.f - 1.f; - norm_out += 4; + norm_out->set((F32) n[0], (F32) n[1], (F32) n[2]); + norm_out->div(65535.f); + norm_out->mul(2.f); + norm_out->sub(1.f); + norm_out++; U16* t = (U16*) &(tc[j*2*2]); - tc_out[0] = (F32) t[0] / 65535.f * tc_range.mV[0] + min_tc.mV[0]; - tc_out[1] = (F32) t[1] / 65535.f * tc_range.mV[1] + min_tc.mV[1]; + tc_out->mV[0] = (F32) t[0] / 65535.f * tc_range.mV[0] + min_tc.mV[0]; + tc_out->mV[1] = (F32) t[1] / 65535.f * tc_range.mV[1] + min_tc.mV[1]; - tc_out += 8; + tc_out++; } @@ -2234,8 +2304,8 @@ void LLVolume::makeTetrahedron() LLVector4a(x,-x,-x) }; - face.mExtents[0].setVec(-x,-x,-x); - face.mExtents[1].setVec(x,x,x); + face.mExtents[0].splat(-x); + face.mExtents[1].splat(x); LLVolumeFace::VertexData cv[3]; @@ -4165,6 +4235,18 @@ void LLVolume::generateSilhouetteVertices(std::vector &vertices, S32 LLVolume::lineSegmentIntersect(const LLVector3& start, const LLVector3& end, S32 face, LLVector3* intersection,LLVector2* tex_coord, LLVector3* normal, LLVector3* bi_normal) +{ + LLVector4a starta, enda; + starta.load3(start.mV); + enda.load3(end.mV); + + return lineSegmentIntersect(starta, enda, face, intersection, tex_coord, normal, bi_normal); + +} + +S32 LLVolume::lineSegmentIntersect(const LLVector4a& start, const LLVector4a& end, + S32 face, + LLVector3* intersection,LLVector2* tex_coord, LLVector3* normal, LLVector3* bi_normal) { S32 hit_face = -1; @@ -4182,7 +4264,8 @@ S32 LLVolume::lineSegmentIntersect(const LLVector3& start, const LLVector3& end, end_face = face; } - LLVector3 dir = end - start; + LLVector4a dir; + dir.setSub(end, start); F32 closest_t = 2.f; // must be larger than 1 @@ -4192,21 +4275,20 @@ S32 LLVolume::lineSegmentIntersect(const LLVector3& start, const LLVector3& end, { LLVolumeFace &face = mVolumeFaces[i]; - LLVector3 box_center = (face.mExtents[0] + face.mExtents[1]) / 2.f; - LLVector3 box_size = face.mExtents[1] - face.mExtents[0]; + LLVector4a box_center; + box_center.setAdd(face.mExtents[0], face.mExtents[1]); + box_center.mul(0.5f); + + LLVector4a box_size; + box_size.setSub(face.mExtents[1], face.mExtents[0]); - if (LLLineSegmentBoxIntersect(start, end, box_center, box_size)) + if (LLLineSegmentBoxIntersect(start.getF32(), end.getF32(), box_center.getF32(), box_size.getF32())) { if (bi_normal != NULL) // if the caller wants binormals, we may need to generate them { genBinormals(i); } - LLVector4a starta, dira; - - starta.load3(start.mV); - dira.load3(dir.mV); - LLVector4a* p = (LLVector4a*) face.mPositions; for (U32 tri = 0; tri < face.mNumIndices/3; tri++) @@ -4220,7 +4302,7 @@ S32 LLVolume::lineSegmentIntersect(const LLVector3& start, const LLVector3& end, if (LLTriangleRayIntersect(p[index1], p[index2], p[index3], - starta, dira, &a, &b, &t, FALSE)) + start, dir, &a, &b, &t, FALSE)) { if ((t >= 0.f) && // if hit is after start (t <= 1.f) && // and before end @@ -4231,7 +4313,10 @@ S32 LLVolume::lineSegmentIntersect(const LLVector3& start, const LLVector3& end, if (intersection != NULL) { - *intersection = start + dir * closest_t; + LLVector4a intersect = dir; + intersect.mul(closest_t); + intersect.add(start); + intersection->set(intersect.getF32()); } @@ -5029,6 +5114,107 @@ std::ostream& operator<<(std::ostream &s, const LLVolume *volumep) return s; } +LLVolumeFace::LLVolumeFace() : + mID(0), + mTypeMask(0), + mBeginS(0), + mBeginT(0), + mNumS(0), + mNumT(0), + mNumVertices(0), + mNumIndices(0), + mPositions(NULL), + mNormals(NULL), + mBinormals(NULL), + mTexCoords(NULL), + mIndices(NULL), + mWeights(NULL) +{ + mExtents = (LLVector4a*) _mm_malloc(48, 16); + mCenter = mExtents+2; +} + +LLVolumeFace::LLVolumeFace(const LLVolumeFace& src) +{ + mExtents = (LLVector4a*) _mm_malloc(48, 16); + mCenter = mExtents+2; + *this = src; +} + +LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src) +{ + if (&src == this) + { //self assignment, do nothing + return *this; + } + + mID = src.mID; + mTypeMask = src.mTypeMask; + mBeginS = src.mBeginS; + mBeginT = src.mBeginT; + mNumS = src.mNumS; + mNumT = src.mNumT; + + mNumVertices = 0; + mNumIndices = 0; + mPositions = NULL; + mNormals = NULL; + mBinormals = NULL; + mTexCoords = NULL; + mWeights = NULL; + mIndices = NULL; + + LLVector4a::memcpyNonAliased16((F32*) mExtents, (F32*) src.mExtents, 12); + + resizeVertices(src.mNumVertices); + resizeIndices(src.mNumIndices); + + if (mNumVertices) + { + S32 vert_size = mNumVertices*4; + S32 tc_size = (mNumVertices*8+0xF) & ~0xF; + tc_size /= 4; + + LLVector4a::memcpyNonAliased16((F32*) mPositions, (F32*) src.mPositions, vert_size); + LLVector4a::memcpyNonAliased16((F32*) mNormals, (F32*) src.mNormals, vert_size); + LLVector4a::memcpyNonAliased16((F32*) mTexCoords, (F32*) src.mTexCoords, vert_size); + + if (src.mBinormals) + { + allocateBinormals(src.mNumVertices); + LLVector4a::memcpyNonAliased16((F32*) mBinormals, (F32*) src.mBinormals, vert_size); + } + else + { + _mm_free(mBinormals); + mBinormals = NULL; + } + + if (src.mWeights) + { + allocateWeights(src.mNumVertices); + LLVector4a::memcpyNonAliased16((F32*) mWeights, (F32*) src.mWeights, vert_size); + } + else + { + _mm_free(mWeights); + mWeights = NULL; + } + } + + if (mNumIndices) + { + S32 idx_size = (mNumIndices*2+0xF) & ~0xF; + idx_size /= 4; + + LLVector4a::memcpyNonAliased16((F32*) mIndices, (F32*) src.mIndices, idx_size); + } + + + //delete + return *this; +} + LLVolumeFace::~LLVolumeFace() { _mm_free(mPositions); @@ -5036,6 +5222,8 @@ LLVolumeFace::~LLVolumeFace() _mm_free(mTexCoords); _mm_free(mIndices); _mm_free(mBinormals); + _mm_free(mWeights); + _mm_free(mExtents); } BOOL LLVolumeFace::create(LLVolume* volume, BOOL partial_build) @@ -5169,8 +5357,8 @@ BOOL LLVolumeFace::createUnCutCubeCap(LLVolume* volume, BOOL partial_build) num_vertices = (grid_size+1)*(grid_size+1); num_indices = quad_count * 4; - LLVector3& min = mExtents[0]; - LLVector3& max = mExtents[1]; + LLVector4a& min = mExtents[0]; + LLVector4a& max = mExtents[1]; S32 offset = 0; if (mTypeMask & TOP_MASK) @@ -5242,16 +5430,18 @@ BOOL LLVolumeFace::createUnCutCubeCap(LLVolume* volume, BOOL partial_build) if (gx == 0 && gy == 0) { - min = max = LLVector3(newVert.getPosition().getF32()); + min = max = newVert.getPosition(); } else { - update_min_max(min,max,newVert.getPosition().getF32()); + min.setMin(newVert.getPosition()); + max.setMax(newVert.getPosition()); } } } - mCenter = (min + max) * 0.5f; + mCenter->setAdd(min, max); + mCenter->mul(0.5f); if (!partial_build) { @@ -5335,7 +5525,7 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) S32 max_s = volume->getProfile().getTotal(); S32 max_t = volume->getPath().mPath.size(); - mCenter.clearVec(); + mCenter->clear(); S32 offset = 0; if (mTypeMask & TOP_MASK) @@ -5353,8 +5543,8 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) LLVector2 cuv; LLVector2 min_uv, max_uv; - LLVector3& min = mExtents[0]; - LLVector3& max = mExtents[1]; + LLVector4a& min = mExtents[0]; + LLVector4a& max = mExtents[1]; LLVector2* tc = (LLVector2*) mTexCoords; LLVector4a* pos = (LLVector4a*) mPositions; @@ -5380,25 +5570,24 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) if (i == 0) { - min = max = mesh[i+offset].mPos; + min = max = pos[i]; min_uv = max_uv = tc[i]; } else { - update_min_max(min,max, mesh[i+offset].mPos); + update_min_max(min,max,pos[i]); update_min_max(min_uv, max_uv, tc[i]); } } - mCenter = (min+max)*0.5f; - cuv = (min_uv + max_uv)*0.5f; + mCenter->setAdd(min, max); + mCenter->mul(0.5f); - LLVector4a center; - center.load3(mCenter.mV); + cuv = (min_uv + max_uv)*0.5f; LLVector4a binormal; calc_binormal_from_triangle(binormal, - center, cuv, + *mCenter, cuv, pos[0], tc[0], pos[1], tc[1]); binormal.normalize3fast(); @@ -5407,8 +5596,8 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) LLVector4a d0, d1; - d0.setSub(center, pos[0]); - d1.setSub(center, pos[1]); + d0.setSub(*mCenter, pos[0]); + d1.setSub(*mCenter, pos[1]); if (mTypeMask & TOP_MASK) { @@ -5422,12 +5611,12 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) normal.normalize3fast(); VertexData vd; - vd.getPosition().load3(mCenter.mV); + vd.setPosition(*mCenter); vd.mTexCoord = cuv; if (!(mTypeMask & HOLLOW_MASK) && !(mTypeMask & OPEN_MASK)) { - pos[num_vertices].load4a((F32*) ¢er.mQ); + pos[num_vertices] = *mCenter; tc[num_vertices] = cuv; num_vertices++; } @@ -5812,6 +6001,11 @@ void LLVolumeFace::allocateBinormals(S32 num_verts) mBinormals = (LLVector4a*) _mm_malloc(num_verts*16, 16); } +void LLVolumeFace::allocateWeights(S32 num_verts) +{ + _mm_free(mWeights); + mWeights = (LLVector4a*) _mm_malloc(num_verts*16, 16); +} void LLVolumeFace::resizeIndices(S32 num_indices) { @@ -5919,11 +6113,11 @@ void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat_in, LLMat if (offset == 0 && i == 0) { - mExtents[0] = mExtents[1] = LLVector3((F32*) &(dst_pos[i].mQ)); + mExtents[0] = mExtents[1] = dst_pos[i]; } else { - update_min_max(mExtents[0], mExtents[1], (F32*) &(dst_pos[i].mQ)); + update_min_max(mExtents[0], mExtents[1], dst_pos[i]); } } @@ -6076,18 +6270,19 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) //get bounding box for this side - LLVector3& face_min = mExtents[0]; - LLVector3& face_max = mExtents[1]; - mCenter.clearVec(); + LLVector4a& face_min = mExtents[0]; + LLVector4a& face_max = mExtents[1]; + mCenter->clear(); - face_min = face_max = LLVector3((F32*) &(pos[0].mQ)); + face_min = face_max = pos[0]; for (U32 i = 1; i < mNumVertices; ++i) { - update_min_max(face_min, face_max, (F32*) &(pos[i].mQ)); + update_min_max(face_min, face_max, pos[i]); } - mCenter = (face_min + face_max) * 0.5f; + mCenter->setAdd(face_min, face_max); + mCenter->mul(0.5f); S32 cur_index = 0; S32 cur_edge = 0; diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index aa58d6d114..7c63266aab 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -50,12 +50,13 @@ class LLVolume; #include "v2math.h" #include "v3math.h" #include "v4math.h" +#include "llvector4a.h" #include "llquaternion.h" #include "llstrider.h" #include "v4coloru.h" #include "llrefcount.h" #include "llfile.h" -#include "llvector4a.h" + //============================================================================ @@ -801,59 +802,19 @@ public: }; private: - void init() - { - mData = (LLVector4a*) _mm_malloc(32, 16); - } + void init(); public: - VertexData() - { - init(); - } - - VertexData(const VertexData& rhs) - { - init(); - LLVector4a::memcpyNonAliased16((F32*) mData, (F32*) rhs.mData, 8); - mTexCoord = rhs.mTexCoord; - } - - ~VertexData() - { - _mm_free(mData); - } - - LLVector4a& getPosition() - { - return mData[POSITION]; - } - - LLVector4a& getNormal() - { - return mData[NORMAL]; - } - - const LLVector4a& getPosition() const - { - return mData[POSITION]; - } - - const LLVector4a& getNormal() const - { - return mData[NORMAL]; - } + VertexData(); + VertexData(const VertexData& rhs); + ~VertexData(); + LLVector4a& getPosition(); + LLVector4a& getNormal(); + const LLVector4a& getPosition() const; + const LLVector4a& getNormal() const; + void setPosition(const LLVector4a& pos); + void setNormal(const LLVector4a& norm); - void setPosition(const LLVector4a& pos) - { - mData[POSITION] = pos; - } - - void setNormal(const LLVector4a& norm) - { - mData[NORMAL] = norm; - } - LLVector2 mTexCoord; bool operator<(const VertexData& rhs) const; @@ -864,22 +825,9 @@ public: LLVector4a* mData; }; - LLVolumeFace() : - mID(0), - mTypeMask(0), - mBeginS(0), - mBeginT(0), - mNumS(0), - mNumT(0), - mNumVertices(0), - mNumIndices(0), - mPositions(NULL), - mNormals(NULL), - mBinormals(NULL), - mTexCoords(NULL), - mIndices(NULL) - { - } + LLVolumeFace(); + LLVolumeFace(const LLVolumeFace& src); + LLVolumeFace& operator=(const LLVolumeFace& rhs); ~LLVolumeFace(); @@ -890,6 +838,7 @@ public: void resizeVertices(S32 num_verts); void allocateBinormals(S32 num_verts); + void allocateWeights(S32 num_verts); void resizeIndices(S32 num_indices); void fillFromLegacyData(std::vector& v, std::vector& idx); @@ -944,15 +893,15 @@ public: public: S32 mID; U32 mTypeMask; - LLVector3 mCenter; - + // Only used for INNER/OUTER faces S32 mBeginS; S32 mBeginT; S32 mNumS; S32 mNumT; - LLVector3 mExtents[2]; //minimum and maximum point of face + LLVector4a* mExtents; //minimum and maximum point of face + LLVector4a* mCenter; S32 mNumVertices; S32 mNumIndices; @@ -968,7 +917,7 @@ public: //list of skin weights for rigged volumes // format is mWeights[vertex_index].mV[influence] = . // mWeights.size() should be empty or match mVertices.size() - std::vector mWeights; + LLVector4a* mWeights; private: BOOL createUnCutCubeCap(LLVolume* volume, BOOL partial_build = FALSE); @@ -1051,6 +1000,13 @@ public: LLVector3* normal = NULL, // return the surface normal at the intersection point LLVector3* bi_normal = NULL // return the surface bi-normal at the intersection point ); + + S32 lineSegmentIntersect(const LLVector4a& start, const LLVector4a& end, + S32 face = 1, + LLVector3* intersection = NULL, + LLVector2* tex_coord = NULL, + LLVector3* normal = NULL, + LLVector3* bi_normal = NULL); // The following cleans up vertices and triangles, // getting rid of degenerate triangles and duplicate vertices, @@ -1124,6 +1080,7 @@ void calc_binormal_from_triangle( const LLVector4a& pos2, const LLVector2& tex2); +BOOL LLLineSegmentBoxIntersect(const F32* start, const F32* end, const F32* center, const F32* size); BOOL LLLineSegmentBoxIntersect(const LLVector3& start, const LLVector3& end, const LLVector3& center, const LLVector3& size); BOOL LLTriangleRayIntersect(const LLVector3& vert0, const LLVector3& vert1, const LLVector3& vert2, const LLVector3& orig, const LLVector3& dir, diff --git a/indra/llmath/v3math.h b/indra/llmath/v3math.h index 0e7d72e958..75c860a91e 100644 --- a/indra/llmath/v3math.h +++ b/indra/llmath/v3math.h @@ -36,7 +36,6 @@ #include "llerror.h" #include "llmath.h" - #include "llsd.h" class LLVector2; class LLVector4; -- cgit v1.2.3 From 0e7f4dc5cef8a97cb1dd08aa2f79538ced267888 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Sat, 29 May 2010 05:37:38 -0500 Subject: Octree per LLVolumeFace WIP --- indra/llmath/lloctree.h | 18 ++ indra/llmath/llvolume.cpp | 625 ++++++++++++++++++++++++++++++++++++---------- indra/llmath/llvolume.h | 45 ++-- 3 files changed, 539 insertions(+), 149 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/lloctree.h b/indra/llmath/lloctree.h index 2f34fb1bb0..8bba12783f 100644 --- a/indra/llmath/lloctree.h +++ b/indra/llmath/lloctree.h @@ -72,6 +72,13 @@ public: virtual void visit(const LLOctreeNode* branch) = 0; }; +template +class LLOctreeTravelerDepthFirst : public LLOctreeTraveler +{ +public: + virtual void traverse(const LLOctreeNode* node); +}; + template class LLOctreeNode : public LLTreeNode { @@ -710,4 +717,15 @@ void LLOctreeTraveler::traverse(const LLOctreeNode* node) traverse(node->getChild(i)); } } + +template +void LLOctreeTravelerDepthFirst::traverse(const LLOctreeNode* node) +{ + for (U32 i = 0; i < node->getChildCount(); i++) + { + traverse(node->getChild(i)); + } + node->accept(this); +} + #endif diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 9b6e2488e6..d261811aa2 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -45,6 +45,7 @@ #include "m4math.h" #include "m3math.h" #include "llmatrix4a.h" +#include "lloctree.h" #include "lldarray.h" #include "llvolume.h" #include "llstl.h" @@ -132,6 +133,51 @@ BOOL LLLineSegmentBoxIntersect(const F32* start, const F32* end, const F32* cent return true; } +BOOL LLLineSegmentBoxIntersect(const LLVector4a& start, const LLVector4a& end, const LLVector4a& center, const LLVector4a& size) +{ + LLVector4a fAWdU; + LLVector4a dir; + LLVector4a diff; + + dir.setSub(end, start); + dir.mul(0.5f); + + diff.setAdd(end,start); + diff.mul(0.5f); + diff.sub(center); + fAWdU.setAbs(dir); + + LLVector4a rhs; + rhs.setAdd(size, fAWdU); + + LLVector4a lhs; + lhs.setAbs(diff); + + S32 grt = lhs.greaterThan4(rhs).getComparisonMask(); + + if (grt & 0x7) + { + return false; + } + + LLVector4a f; + f.setCross3(dir, diff); + f.setAbs(f); + + LLVector4a v0; v0.mQ = _mm_shuffle_ps(size.mQ, size.mQ, _MM_SHUFFLE(3,1,0,0)); + LLVector4a v1; v1.mQ = _mm_shuffle_ps(fAWdU.mQ, fAWdU.mQ, _MM_SHUFFLE(3,2,2,1)); + lhs.setMul(v0, v1); + + v0.mQ = _mm_shuffle_ps(size.mQ, size.mQ, _MM_SHUFFLE(3,2,2,1)); + v1.mQ = _mm_shuffle_ps(fAWdU.mQ, fAWdU.mQ, _MM_SHUFFLE(3,1,0,0)); + rhs.setMul(v0, v1); + rhs.add(lhs); + + grt = f.greaterThan4(rhs).getComparisonMask(); + + return (grt & 0x7) ? false : true; +} + // intersect test between triangle vert0, vert1, vert2 and a ray from orig in direction dir. // returns TRUE if intersecting and returns barycentric coordinates in intersection_a, intersection_b, @@ -139,15 +185,13 @@ BOOL LLLineSegmentBoxIntersect(const F32* start, const F32* end, const F32* cent // Moller-Trumbore algorithm BOOL LLTriangleRayIntersect(const LLVector4a& vert0, const LLVector4a& vert1, const LLVector4a& vert2, const LLVector4a& orig, const LLVector4a& dir, - F32* intersection_a, F32* intersection_b, F32* intersection_t, BOOL two_sided) + F32& intersection_a, F32& intersection_b, F32& intersection_t) { - F32 u, v, t; /* find vectors for two edges sharing vert0 */ LLVector4a edge1; edge1.setSub(vert1, vert0); - LLVector4a edge2; edge2.setSub(vert2, vert0); @@ -156,87 +200,116 @@ BOOL LLTriangleRayIntersect(const LLVector4a& vert0, const LLVector4a& vert1, co pvec.setCross3(dir, edge2); /* if determinant is near zero, ray lies in plane of triangle */ - F32 det = edge1.dot3(pvec); - - if (!two_sided) + LLVector4a det; + det.setAllDot3(edge1, pvec); + + if (det.greaterEqual4(LLVector4a::getApproximatelyZero()).getComparisonMask()) { - if (det < F_APPROXIMATELY_ZERO) - { - return FALSE; - } - /* calculate distance from vert0 to ray origin */ LLVector4a tvec; tvec.setSub(orig, vert0); /* calculate U parameter and test bounds */ - u = tvec.dot3(pvec); + LLVector4a u; + u.setAllDot3(tvec,pvec); - if (u < 0.f || u > det) + if (u.greaterEqual4(LLVector4a::getZero()).getComparisonMask() && + u.lessEqual4(det).getComparisonMask()) { - return FALSE; + /* prepare to test V parameter */ + LLVector4a qvec; + qvec.setCross3(tvec, edge1); + + /* calculate V parameter and test bounds */ + LLVector4a v; + v.setAllDot3(dir, qvec); + + + //if (!(v < 0.f || u + v > det)) + + LLVector4a sum_uv; + sum_uv.setAdd(u, v); + + S32 v_gequal = v.greaterEqual4(LLVector4a::getZero()).getComparisonMask(); + S32 sum_lequal = sum_uv.lessEqual4(det).getComparisonMask(); + + if (v_gequal && sum_lequal) + { + /* calculate t, scale parameters, ray intersects triangle */ + LLVector4a t; + t.setAllDot3(edge2,qvec); + + t.div(det); + u.div(det); + v.div(det); + + intersection_a = u[0]; + intersection_b = v[0]; + intersection_t = t[0]; + return TRUE; + } } - - /* prepare to test V parameter */ - LLVector4a qvec; - qvec.setCross3(tvec, edge1); + } - /* calculate V parameter and test bounds */ - v = dir.dot3(qvec); - if (v < 0.f || u + v > det) - { - return FALSE; - } + return FALSE; +} - /* calculate t, scale parameters, ray intersects triangle */ - t = edge2.dot3(qvec); - F32 inv_det = 1.0 / det; - t *= inv_det; - u *= inv_det; - v *= inv_det; - } +BOOL LLTriangleRayIntersectTwoSided(const LLVector4a& vert0, const LLVector4a& vert1, const LLVector4a& vert2, const LLVector4a& orig, const LLVector4a& dir, + F32& intersection_a, F32& intersection_b, F32& intersection_t) +{ + F32 u, v, t; - else // two sided - { - if (det > -F_APPROXIMATELY_ZERO && det < F_APPROXIMATELY_ZERO) - { - return FALSE; - } - F32 inv_det = 1.0 / det; + /* find vectors for two edges sharing vert0 */ + LLVector4a edge1; + edge1.setSub(vert1, vert0); + + + LLVector4a edge2; + edge2.setSub(vert2, vert0); - /* calculate distance from vert0 to ray origin */ - LLVector4a tvec; - tvec.setSub(orig, vert0); - - /* calculate U parameter and test bounds */ - u = (tvec.dot3(pvec)) * inv_det; - if (u < 0.f || u > 1.f) - { - return FALSE; - } + /* begin calculating determinant - also used to calculate U parameter */ + LLVector4a pvec; + pvec.setCross3(dir, edge2); - /* prepare to test V parameter */ - LLVector4a qvec; - qvec.setSub(tvec, edge1); - - /* calculate V parameter and test bounds */ - v = (dir.dot3(qvec)) * inv_det; - - if (v < 0.f || u + v > 1.f) - { - return FALSE; - } + /* if determinant is near zero, ray lies in plane of triangle */ + F32 det = edge1.dot3(pvec); + + + if (det > -F_APPROXIMATELY_ZERO && det < F_APPROXIMATELY_ZERO) + { + return FALSE; + } + + F32 inv_det = 1.f / det; + + /* calculate distance from vert0 to ray origin */ + LLVector4a tvec; + tvec.setSub(orig, vert0); + + /* calculate U parameter and test bounds */ + u = (tvec.dot3(pvec)) * inv_det; + if (u < 0.f || u > 1.f) + { + return FALSE; + } - /* calculate t, ray intersects triangle */ - t = (edge2.dot3(qvec)) * inv_det; + /* prepare to test V parameter */ + tvec.sub(edge1); + + /* calculate V parameter and test bounds */ + v = (dir.dot3(tvec)) * inv_det; + + if (v < 0.f || u + v > 1.f) + { + return FALSE; } + + /* calculate t, ray intersects triangle */ + t = (edge2.dot3(tvec)) * inv_det; - if (intersection_a != NULL) - *intersection_a = u; - if (intersection_b != NULL) - *intersection_b = v; - if (intersection_t != NULL) - *intersection_t = t; + intersection_a = u; + intersection_b = v; + intersection_t = t; return TRUE; @@ -244,7 +317,7 @@ BOOL LLTriangleRayIntersect(const LLVector4a& vert0, const LLVector4a& vert1, co //helper for non-aligned vectors BOOL LLTriangleRayIntersect(const LLVector3& vert0, const LLVector3& vert1, const LLVector3& vert2, const LLVector3& orig, const LLVector3& dir, - F32* intersection_a, F32* intersection_b, F32* intersection_t, BOOL two_sided) + F32& intersection_a, F32& intersection_b, F32& intersection_t, BOOL two_sided) { LLVector4a vert0a, vert1a, vert2a, origa, dira; vert0a.load3(vert0.mV); @@ -253,11 +326,130 @@ BOOL LLTriangleRayIntersect(const LLVector3& vert0, const LLVector3& vert1, cons origa.load3(orig.mV); dira.load3(dir.mV); - return LLTriangleRayIntersect(vert0a, vert1a, vert2a, origa, dira, - intersection_a, intersection_b, intersection_t, two_sided); + if (two_sided) + { + return LLTriangleRayIntersectTwoSided(vert0a, vert1a, vert2a, origa, dira, + intersection_a, intersection_b, intersection_t); + } + else + { + return LLTriangleRayIntersect(vert0a, vert1a, vert2a, origa, dira, + intersection_a, intersection_b, intersection_t); + } } +class LLVolumeOctreeListener : public LLOctreeListener +{ +public: + + LLVolumeOctreeListener(LLOctreeNode* node) + { + node->addListener(this); + + mBounds = (LLVector4a*) _mm_malloc(sizeof(LLVector4a)*4, 16); + mExtents = mBounds+2; + } + + ~LLVolumeOctreeListener() + { + _mm_free(mBounds); + } + + //LISTENER FUNCTIONS + virtual void handleChildAddition(const LLOctreeNode* parent, + LLOctreeNode* child) + { + new LLVolumeOctreeListener(child); + } + + virtual void handleStateChange(const LLTreeNode* node) { } + virtual void handleChildRemoval(const LLOctreeNode* parent, + const LLOctreeNode* child) { } + virtual void handleInsertion(const LLTreeNode* node, LLVolumeFace::Triangle* tri) { } + virtual void handleRemoval(const LLTreeNode* node, LLVolumeFace::Triangle* tri) { } + virtual void handleDestruction(const LLTreeNode* node) { } + + +public: + LLVector4a* mBounds; // bounding box (center, size) of this node and all its children (tight fit to objects) + LLVector4a* mExtents; // extents (min, max) of this node and all its children +}; + +class LLVolumeOctreeRebound : public LLOctreeTravelerDepthFirst +{ +public: + const LLVolumeFace* mFace; + + LLVolumeOctreeRebound(const LLVolumeFace* face) + { + mFace = face; + } + + virtual void visit(const LLOctreeNode* branch) + { + LLVolumeOctreeListener* node = (LLVolumeOctreeListener*) branch->getListener(0); + + LLVector4a& min = node->mExtents[0]; + LLVector4a& max = node->mExtents[1]; + + if (branch->getElementCount() != 0) + { + const LLVolumeFace::Triangle* tri = *(branch->getData().begin()); + + min = *(tri->mV[0]); + max = *(tri->mV[0]); + + for (LLOctreeNode::const_element_iter iter = + branch->getData().begin(); iter != branch->getData().end(); ++iter) + { + //stretch by triangles in node + tri = *iter; + + min.setMin(*tri->mV[0]); + min.setMin(*tri->mV[1]); + min.setMin(*tri->mV[2]); + + max.setMax(*tri->mV[0]); + max.setMax(*tri->mV[1]); + max.setMax(*tri->mV[2]); + } + + for (S32 i = 0; i < branch->getChildCount(); ++i) + { //stretch by child extents + LLVolumeOctreeListener* child = (LLVolumeOctreeListener*) branch->getChild(i)->getListener(0); + min.setMin(child->mExtents[0]); + max.setMax(child->mExtents[1]); + } + } + else if (branch->getChildCount() != 0) + { + LLVolumeOctreeListener* child = (LLVolumeOctreeListener*) branch->getChild(0)->getListener(0); + + min = child->mExtents[0]; + max = child->mExtents[1]; + + for (S32 i = 1; i < branch->getChildCount(); ++i) + { //stretch by child extents + child = (LLVolumeOctreeListener*) branch->getChild(i)->getListener(0); + min.setMin(child->mExtents[0]); + max.setMax(child->mExtents[1]); + } + } + else + { + llerrs << "WTF? Empty leaf" << llendl; + } + + node->mBounds[0].setAdd(min, max); + node->mBounds[0].mul(0.5f); + + node->mBounds[1].setSub(max,min); + node->mBounds[1].mul(0.5f); + } +}; + + //------------------------------------------------------------------- // statics //------------------------------------------------------------------- @@ -4244,6 +4436,114 @@ S32 LLVolume::lineSegmentIntersect(const LLVector3& start, const LLVector3& end, } +class LLOctreeTriangleRayIntersect : public LLOctreeTraveler +{ +public: + const LLVolumeFace* mFace; + LLVector4a mStart; + LLVector4a mDir; + LLVector4a mEnd; + LLVector3* mIntersection; + LLVector2* mTexCoord; + LLVector3* mNormal; + LLVector3* mBinormal; + F32* mClosestT; + bool mHitFace; + + LLOctreeTriangleRayIntersect(const LLVector4a& start, const LLVector4a& dir, + const LLVolumeFace* face, F32* closest_t, + LLVector3* intersection,LLVector2* tex_coord, LLVector3* normal, LLVector3* bi_normal) + : mFace(face), + mStart(start), + mDir(dir), + mIntersection(intersection), + mTexCoord(tex_coord), + mNormal(normal), + mBinormal(bi_normal), + mClosestT(closest_t), + mHitFace(false) + { + mEnd.setAdd(mStart, mDir); + } + + void traverse(const LLOctreeNode* node) + { + LLVolumeOctreeListener* vl = (LLVolumeOctreeListener*) node->getListener(0); + + /*const F32* start = mStart.getF32(); + const F32* end = mEnd.getF32(); + const F32* center = vl->mBounds[0].getF32(); + const F32* size = vl->mBounds[1].getF32();*/ + + if (LLLineSegmentBoxIntersect(mStart, mEnd, vl->mBounds[0], vl->mBounds[1])) + { + node->accept(this); + for (S32 i = 0; i < node->getChildCount(); ++i) + { + traverse(node->getChild(i)); + } + } + } + + void visit(const LLOctreeNode* node) + { + for (LLOctreeNode::const_element_iter iter = + node->getData().begin(); iter != node->getData().end(); ++iter) + { + const LLVolumeFace::Triangle* tri = *iter; + + F32 a, b, t; + + if (LLTriangleRayIntersect(*tri->mV[0], *tri->mV[1], *tri->mV[2], + mStart, mDir, a, b, t)) + { + if ((t >= 0.f) && // if hit is after start + (t <= 1.f) && // and before end + (t < *mClosestT)) // and this hit is closer + { + *mClosestT = t; + mHitFace = true; + + if (mIntersection != NULL) + { + LLVector4a intersect = mDir; + intersect.mul(*mClosestT); + intersect.add(mStart); + mIntersection->set(intersect.getF32()); + } + + + if (mTexCoord != NULL) + { + LLVector2* tc = (LLVector2*) mFace->mTexCoords; + *mTexCoord = ((1.f - a - b) * tc[tri->mIndex[0]] + + a * tc[tri->mIndex[1]] + + b * tc[tri->mIndex[2]]); + + } + + if (mNormal != NULL) + { + LLVector4* norm = (LLVector4*) mFace->mNormals; + + *mNormal = ((1.f - a - b) * LLVector3(norm[tri->mIndex[0]]) + + a * LLVector3(norm[tri->mIndex[1]]) + + b * LLVector3(norm[tri->mIndex[2]])); + } + + if (mBinormal != NULL) + { + LLVector4* binormal = (LLVector4*) mFace->mBinormals; + *mBinormal = ((1.f - a - b) * LLVector3(binormal[tri->mIndex[0]]) + + a * LLVector3(binormal[tri->mIndex[1]]) + + b * LLVector3(binormal[tri->mIndex[2]])); + } + } + } + } + } +}; + S32 LLVolume::lineSegmentIntersect(const LLVector4a& start, const LLVector4a& end, S32 face, LLVector3* intersection,LLVector2* tex_coord, LLVector3* normal, LLVector3* bi_normal) @@ -4288,66 +4588,19 @@ S32 LLVolume::lineSegmentIntersect(const LLVector4a& start, const LLVector4a& en { genBinormals(i); } - - LLVector4a* p = (LLVector4a*) face.mPositions; - for (U32 tri = 0; tri < face.mNumIndices/3; tri++) + if (!face.mOctree) { - S32 index1 = face.mIndices[tri*3+0]; - S32 index2 = face.mIndices[tri*3+1]; - S32 index3 = face.mIndices[tri*3+2]; - - F32 a, b, t; + face.createOctree(); + } - if (LLTriangleRayIntersect(p[index1], - p[index2], - p[index3], - start, dir, &a, &b, &t, FALSE)) - { - if ((t >= 0.f) && // if hit is after start - (t <= 1.f) && // and before end - (t < closest_t)) // and this hit is closer - { - closest_t = t; - hit_face = i; - - if (intersection != NULL) - { - LLVector4a intersect = dir; - intersect.mul(closest_t); - intersect.add(start); - intersection->set(intersect.getF32()); - } - - - if (tex_coord != NULL) - { - LLVector2* tc = (LLVector2*) face.mTexCoords; - *tex_coord = ((1.f - a - b) * tc[index1] + - a * tc[index2] + - b * tc[index3]); - - } - - if (normal != NULL) - { - LLVector4* norm = (LLVector4*) face.mNormals; - - *normal = ((1.f - a - b) * LLVector3(norm[index1]) + - a * LLVector3(norm[index2]) + - b * LLVector3(norm[index3])); - } - - if (bi_normal != NULL) - { - LLVector4* binormal = (LLVector4*) face.mBinormals; - *bi_normal = ((1.f - a - b) * LLVector3(binormal[index1]) + - a * LLVector3(binormal[index2]) + - b * LLVector3(binormal[index3])); - } + LLVector4a* p = (LLVector4a*) face.mPositions; - } - } + LLOctreeTriangleRayIntersect intersect(start, dir, &face, &closest_t, intersection, tex_coord, normal, bi_normal); + intersect.traverse(face.mOctree); + if (intersect.mHitFace) + { + hit_face = i; } } } @@ -5128,13 +5381,29 @@ LLVolumeFace::LLVolumeFace() : mBinormals(NULL), mTexCoords(NULL), mIndices(NULL), - mWeights(NULL) + mWeights(NULL), + mOctree(NULL) { mExtents = (LLVector4a*) _mm_malloc(48, 16); mCenter = mExtents+2; } LLVolumeFace::LLVolumeFace(const LLVolumeFace& src) +: mID(0), + mTypeMask(0), + mBeginS(0), + mBeginT(0), + mNumS(0), + mNumT(0), + mNumVertices(0), + mNumIndices(0), + mPositions(NULL), + mNormals(NULL), + mBinormals(NULL), + mTexCoords(NULL), + mIndices(NULL), + mWeights(NULL), + mOctree(NULL) { mExtents = (LLVector4a*) _mm_malloc(48, 16); mCenter = mExtents+2; @@ -5157,13 +5426,9 @@ LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src) mNumVertices = 0; mNumIndices = 0; - mPositions = NULL; - mNormals = NULL; - mBinormals = NULL; - mTexCoords = NULL; - mWeights = NULL; - mIndices = NULL; + freeData(); + LLVector4a::memcpyNonAliased16((F32*) mExtents, (F32*) src.mExtents, 12); resizeVertices(src.mNumVertices); @@ -5179,6 +5444,7 @@ LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src) LLVector4a::memcpyNonAliased16((F32*) mNormals, (F32*) src.mNormals, vert_size); LLVector4a::memcpyNonAliased16((F32*) mTexCoords, (F32*) src.mTexCoords, vert_size); + if (src.mBinormals) { allocateBinormals(src.mNumVertices); @@ -5216,18 +5482,38 @@ LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src) } LLVolumeFace::~LLVolumeFace() +{ + _mm_free(mExtents); + mExtents = NULL; + + freeData(); +} + +void LLVolumeFace::freeData() { _mm_free(mPositions); + mPositions = NULL; _mm_free(mNormals); + mNormals = NULL; _mm_free(mTexCoords); + mTexCoords = NULL; _mm_free(mIndices); + mIndices = NULL; _mm_free(mBinormals); + mBinormals = NULL; _mm_free(mWeights); - _mm_free(mExtents); + mWeights = NULL; + + delete mOctree; + mOctree = NULL; } BOOL LLVolumeFace::create(LLVolume* volume, BOOL partial_build) { + //tree for this face is no longer valid + delete mOctree; + mOctree = NULL; + if (mTypeMask & CAP_MASK) { return createCap(volume, partial_build); @@ -5250,6 +5536,18 @@ void LLVolumeFace::getVertexData(U16 index, LLVolumeFace::VertexData& cv) cv.mTexCoord = mTexCoords[index]; } +bool LLVolumeFace::VertexMapData::operator==(const LLVolumeFace::VertexData& rhs) const +{ + return getPosition().equal3(rhs.getPosition()) && + mTexCoord == rhs.mTexCoord && + getNormal().equal3(rhs.getNormal()); +} + +bool LLVolumeFace::VertexMapData::ComparePosition::operator()(const LLVector4a& a, const LLVector4a& b) const +{ + return a.less3(b); +} + void LLVolumeFace::optimize(F32 angle_cutoff) { LLVolumeFace new_face; @@ -5305,6 +5603,65 @@ void LLVolumeFace::optimize(F32 angle_cutoff) swapData(new_face); } + +void LLVolumeFace::createOctree() +{ + mOctree = new LLOctreeRoot(LLVector3d(0,0,0), LLVector3d(1,1,1), NULL); + new LLVolumeOctreeListener(mOctree); + + for (U32 i = 0; i < mNumIndices; i+= 3) + { + Triangle* tri = new Triangle(); + + const LLVector4a& v0 = mPositions[mIndices[i]]; + const LLVector4a& v1 = mPositions[mIndices[i+1]]; + const LLVector4a& v2 = mPositions[mIndices[i+2]]; + + tri->mV[0] = &v0; + tri->mV[1] = &v1; + tri->mV[2] = &v2; + + tri->mIndex[0] = mIndices[i]; + tri->mIndex[1] = mIndices[i+1]; + tri->mIndex[2] = mIndices[i+2]; + + LLVector4a min = v0; + min.setMin(v1); + min.setMin(v2); + + LLVector4a max = v0; + max.setMax(v1); + max.setMax(v2); + + LLVector4a center; + center.setAdd(min, max); + center.mul(0.5f); + + + tri->mPositionGroup.setVec(center[0], center[1], center[2]); + + LLVector4a size; + size.setSub(max,min); + + tri->mRadius = size.length3() * 0.5f; + + mOctree->insert(tri); + } + + LLVolumeOctreeRebound rebound(this); + rebound.traverse(mOctree); +} + +const LLVector3d& LLVolumeFace::Triangle::getPositionGroup() const +{ + return mPositionGroup; +} + +const F64& LLVolumeFace::Triangle::getBinRadius() const +{ + return mRadius; +} + void LLVolumeFace::swapData(LLVolumeFace& rhs) { llswap(rhs.mPositions, mPositions); diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index 7c63266aab..a40a21b405 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -40,6 +40,9 @@ class LLPathParams; class LLVolumeParams; class LLProfile; class LLPath; + +template class LLOctreeNode; +class LLVector4a; class LLVolumeFace; class LLVolume; @@ -49,15 +52,14 @@ class LLVolume; //#include "vmath.h" #include "v2math.h" #include "v3math.h" +#include "v3dmath.h" #include "v4math.h" -#include "llvector4a.h" #include "llquaternion.h" #include "llstrider.h" #include "v4coloru.h" #include "llrefcount.h" #include "llfile.h" - //============================================================================ const S32 MIN_DETAIL_FACES = 6; @@ -830,6 +832,9 @@ public: LLVolumeFace& operator=(const LLVolumeFace& rhs); ~LLVolumeFace(); +private: + void freeData(); +public: BOOL create(LLVolume* volume, BOOL partial_build = FALSE); void createBinormals(); @@ -855,26 +860,19 @@ public: public: U16 mIndex; - bool operator==(const LLVolumeFace::VertexData& rhs) const - { - return getPosition().equal3(rhs.getPosition()) && - mTexCoord == rhs.mTexCoord && - getNormal().equal3(rhs.getNormal()); - } + bool operator==(const LLVolumeFace::VertexData& rhs) const; struct ComparePosition { - bool operator()(const LLVector4a& a, const LLVector4a& b) const - { - return a.less3(b); - } + bool operator()(const LLVector4a& a, const LLVector4a& b) const; }; typedef std::map, VertexMapData::ComparePosition > PointMap; }; void optimize(F32 angle_cutoff = 2.f); - + void createOctree(); + enum { SINGLE_MASK = 0x0001, @@ -919,6 +917,21 @@ public: // mWeights.size() should be empty or match mVertices.size() LLVector4a* mWeights; + class Triangle : public LLRefCount + { + public: + const LLVector4a* mV[3]; + U16 mIndex[3]; + + LLVector3d mPositionGroup; + F64 mRadius; + + virtual const LLVector3d& getPositionGroup() const; + virtual const F64& getBinRadius() const; + }; + + LLOctreeNode* mOctree; + private: BOOL createUnCutCubeCap(LLVolume* volume, BOOL partial_build = FALSE); BOOL createCap(LLVolume* volume, BOOL partial_build = FALSE); @@ -1084,10 +1097,12 @@ BOOL LLLineSegmentBoxIntersect(const F32* start, const F32* end, const F32* cent BOOL LLLineSegmentBoxIntersect(const LLVector3& start, const LLVector3& end, const LLVector3& center, const LLVector3& size); BOOL LLTriangleRayIntersect(const LLVector3& vert0, const LLVector3& vert1, const LLVector3& vert2, const LLVector3& orig, const LLVector3& dir, - F32* intersection_a, F32* intersection_b, F32* intersection_t, BOOL two_sided); + F32& intersection_a, F32& intersection_b, F32& intersection_t, BOOL two_sided); BOOL LLTriangleRayIntersect(const LLVector4a& vert0, const LLVector4a& vert1, const LLVector4a& vert2, const LLVector4a& orig, const LLVector4a& dir, - F32* intersection_a, F32* intersection_b, F32* intersection_t, BOOL two_sided); + F32& intersection_a, F32& intersection_b, F32& intersection_t); +BOOL LLTriangleRayIntersectTwoSided(const LLVector4a& vert0, const LLVector4a& vert1, const LLVector4a& vert2, const LLVector4a& orig, const LLVector4a& dir, + F32& intersection_a, F32& intersection_b, F32& intersection_t); -- cgit v1.2.3 From 9a869d630162292864e01fdd1707efc609fbd6b4 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Sat, 29 May 2010 19:55:13 -0500 Subject: Octree triven raycast works, time to profile. --- indra/llmath/CMakeLists.txt | 2 + indra/llmath/lltreenode.h | 3 + indra/llmath/llvolume.cpp | 202 ++------------------------------------------ indra/llmath/llvolume.h | 2 + 4 files changed, 14 insertions(+), 195 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/CMakeLists.txt b/indra/llmath/CMakeLists.txt index 367486eee7..dda07133d5 100644 --- a/indra/llmath/CMakeLists.txt +++ b/indra/llmath/CMakeLists.txt @@ -22,6 +22,7 @@ set(llmath_SOURCE_FILES llsphere.cpp llvolume.cpp llvolumemgr.cpp + llvolumeoctree.cpp llsdutil_math.cpp m3math.cpp m4math.cpp @@ -66,6 +67,7 @@ set(llmath_HEADER_FILES llmatrix4a.h llvolume.h llvolumemgr.h + llvolumeoctree.h llsdutil_math.h m3math.h m4math.h diff --git a/indra/llmath/lltreenode.h b/indra/llmath/lltreenode.h index ee9836241a..e6d2521b2a 100644 --- a/indra/llmath/lltreenode.h +++ b/indra/llmath/lltreenode.h @@ -34,6 +34,9 @@ #include "stdtypes.h" #include "xform.h" +#include "llpointer.h" +#include "llrefcount.h" + #include template class LLTreeNode; diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index d261811aa2..c4172de651 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -48,6 +48,7 @@ #include "lloctree.h" #include "lldarray.h" #include "llvolume.h" +#include "llvolumeoctree.h" #include "llstl.h" #include "llsdserialize.h" #include "llvector4a.h" @@ -133,50 +134,6 @@ BOOL LLLineSegmentBoxIntersect(const F32* start, const F32* end, const F32* cent return true; } -BOOL LLLineSegmentBoxIntersect(const LLVector4a& start, const LLVector4a& end, const LLVector4a& center, const LLVector4a& size) -{ - LLVector4a fAWdU; - LLVector4a dir; - LLVector4a diff; - - dir.setSub(end, start); - dir.mul(0.5f); - - diff.setAdd(end,start); - diff.mul(0.5f); - diff.sub(center); - fAWdU.setAbs(dir); - - LLVector4a rhs; - rhs.setAdd(size, fAWdU); - - LLVector4a lhs; - lhs.setAbs(diff); - - S32 grt = lhs.greaterThan4(rhs).getComparisonMask(); - - if (grt & 0x7) - { - return false; - } - - LLVector4a f; - f.setCross3(dir, diff); - f.setAbs(f); - - LLVector4a v0; v0.mQ = _mm_shuffle_ps(size.mQ, size.mQ, _MM_SHUFFLE(3,1,0,0)); - LLVector4a v1; v1.mQ = _mm_shuffle_ps(fAWdU.mQ, fAWdU.mQ, _MM_SHUFFLE(3,2,2,1)); - lhs.setMul(v0, v1); - - v0.mQ = _mm_shuffle_ps(size.mQ, size.mQ, _MM_SHUFFLE(3,2,2,1)); - v1.mQ = _mm_shuffle_ps(fAWdU.mQ, fAWdU.mQ, _MM_SHUFFLE(3,1,0,0)); - rhs.setMul(v0, v1); - rhs.add(lhs); - - grt = f.greaterThan4(rhs).getComparisonMask(); - - return (grt & 0x7) ? false : true; -} // intersect test between triangle vert0, vert1, vert2 and a ray from orig in direction dir. @@ -203,7 +160,7 @@ BOOL LLTriangleRayIntersect(const LLVector4a& vert0, const LLVector4a& vert1, co LLVector4a det; det.setAllDot3(edge1, pvec); - if (det.greaterEqual4(LLVector4a::getApproximatelyZero()).getComparisonMask()) + if (det.greaterEqual4(LLVector4a::getApproximatelyZero()).getComparisonMask() & 0x7) { /* calculate distance from vert0 to ray origin */ LLVector4a tvec; @@ -213,8 +170,8 @@ BOOL LLTriangleRayIntersect(const LLVector4a& vert0, const LLVector4a& vert1, co LLVector4a u; u.setAllDot3(tvec,pvec); - if (u.greaterEqual4(LLVector4a::getZero()).getComparisonMask() && - u.lessEqual4(det).getComparisonMask()) + if ((u.greaterEqual4(LLVector4a::getZero()).getComparisonMask() & 0x7) && + (u.lessEqual4(det).getComparisonMask() & 0x7)) { /* prepare to test V parameter */ LLVector4a qvec; @@ -230,10 +187,10 @@ BOOL LLTriangleRayIntersect(const LLVector4a& vert0, const LLVector4a& vert1, co LLVector4a sum_uv; sum_uv.setAdd(u, v); - S32 v_gequal = v.greaterEqual4(LLVector4a::getZero()).getComparisonMask(); - S32 sum_lequal = sum_uv.lessEqual4(det).getComparisonMask(); + S32 v_gequal = v.greaterEqual4(LLVector4a::getZero()).getComparisonMask() & 0x7; + S32 sum_lequal = sum_uv.lessEqual4(det).getComparisonMask() & 0x7; - if (v_gequal && sum_lequal) + if (v_gequal && sum_lequal) { /* calculate t, scale parameters, ray intersects triangle */ LLVector4a t; @@ -338,44 +295,6 @@ BOOL LLTriangleRayIntersect(const LLVector3& vert0, const LLVector3& vert1, cons } } - -class LLVolumeOctreeListener : public LLOctreeListener -{ -public: - - LLVolumeOctreeListener(LLOctreeNode* node) - { - node->addListener(this); - - mBounds = (LLVector4a*) _mm_malloc(sizeof(LLVector4a)*4, 16); - mExtents = mBounds+2; - } - - ~LLVolumeOctreeListener() - { - _mm_free(mBounds); - } - - //LISTENER FUNCTIONS - virtual void handleChildAddition(const LLOctreeNode* parent, - LLOctreeNode* child) - { - new LLVolumeOctreeListener(child); - } - - virtual void handleStateChange(const LLTreeNode* node) { } - virtual void handleChildRemoval(const LLOctreeNode* parent, - const LLOctreeNode* child) { } - virtual void handleInsertion(const LLTreeNode* node, LLVolumeFace::Triangle* tri) { } - virtual void handleRemoval(const LLTreeNode* node, LLVolumeFace::Triangle* tri) { } - virtual void handleDestruction(const LLTreeNode* node) { } - - -public: - LLVector4a* mBounds; // bounding box (center, size) of this node and all its children (tight fit to objects) - LLVector4a* mExtents; // extents (min, max) of this node and all its children -}; - class LLVolumeOctreeRebound : public LLOctreeTravelerDepthFirst { public: @@ -4436,113 +4355,6 @@ S32 LLVolume::lineSegmentIntersect(const LLVector3& start, const LLVector3& end, } -class LLOctreeTriangleRayIntersect : public LLOctreeTraveler -{ -public: - const LLVolumeFace* mFace; - LLVector4a mStart; - LLVector4a mDir; - LLVector4a mEnd; - LLVector3* mIntersection; - LLVector2* mTexCoord; - LLVector3* mNormal; - LLVector3* mBinormal; - F32* mClosestT; - bool mHitFace; - - LLOctreeTriangleRayIntersect(const LLVector4a& start, const LLVector4a& dir, - const LLVolumeFace* face, F32* closest_t, - LLVector3* intersection,LLVector2* tex_coord, LLVector3* normal, LLVector3* bi_normal) - : mFace(face), - mStart(start), - mDir(dir), - mIntersection(intersection), - mTexCoord(tex_coord), - mNormal(normal), - mBinormal(bi_normal), - mClosestT(closest_t), - mHitFace(false) - { - mEnd.setAdd(mStart, mDir); - } - - void traverse(const LLOctreeNode* node) - { - LLVolumeOctreeListener* vl = (LLVolumeOctreeListener*) node->getListener(0); - - /*const F32* start = mStart.getF32(); - const F32* end = mEnd.getF32(); - const F32* center = vl->mBounds[0].getF32(); - const F32* size = vl->mBounds[1].getF32();*/ - - if (LLLineSegmentBoxIntersect(mStart, mEnd, vl->mBounds[0], vl->mBounds[1])) - { - node->accept(this); - for (S32 i = 0; i < node->getChildCount(); ++i) - { - traverse(node->getChild(i)); - } - } - } - - void visit(const LLOctreeNode* node) - { - for (LLOctreeNode::const_element_iter iter = - node->getData().begin(); iter != node->getData().end(); ++iter) - { - const LLVolumeFace::Triangle* tri = *iter; - - F32 a, b, t; - - if (LLTriangleRayIntersect(*tri->mV[0], *tri->mV[1], *tri->mV[2], - mStart, mDir, a, b, t)) - { - if ((t >= 0.f) && // if hit is after start - (t <= 1.f) && // and before end - (t < *mClosestT)) // and this hit is closer - { - *mClosestT = t; - mHitFace = true; - - if (mIntersection != NULL) - { - LLVector4a intersect = mDir; - intersect.mul(*mClosestT); - intersect.add(mStart); - mIntersection->set(intersect.getF32()); - } - - - if (mTexCoord != NULL) - { - LLVector2* tc = (LLVector2*) mFace->mTexCoords; - *mTexCoord = ((1.f - a - b) * tc[tri->mIndex[0]] + - a * tc[tri->mIndex[1]] + - b * tc[tri->mIndex[2]]); - - } - - if (mNormal != NULL) - { - LLVector4* norm = (LLVector4*) mFace->mNormals; - - *mNormal = ((1.f - a - b) * LLVector3(norm[tri->mIndex[0]]) + - a * LLVector3(norm[tri->mIndex[1]]) + - b * LLVector3(norm[tri->mIndex[2]])); - } - - if (mBinormal != NULL) - { - LLVector4* binormal = (LLVector4*) mFace->mBinormals; - *mBinormal = ((1.f - a - b) * LLVector3(binormal[tri->mIndex[0]]) + - a * LLVector3(binormal[tri->mIndex[1]]) + - b * LLVector3(binormal[tri->mIndex[2]])); - } - } - } - } - } -}; S32 LLVolume::lineSegmentIntersect(const LLVector4a& start, const LLVector4a& end, S32 face, diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index a40a21b405..0ae8aa19ca 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -42,6 +42,7 @@ class LLProfile; class LLPath; template class LLOctreeNode; + class LLVector4a; class LLVolumeFace; class LLVolume; @@ -1095,6 +1096,7 @@ void calc_binormal_from_triangle( BOOL LLLineSegmentBoxIntersect(const F32* start, const F32* end, const F32* center, const F32* size); BOOL LLLineSegmentBoxIntersect(const LLVector3& start, const LLVector3& end, const LLVector3& center, const LLVector3& size); +BOOL LLLineSegmentBoxIntersect(const LLVector4a& start, const LLVector4a& end, const LLVector4a& center, const LLVector4a& size); BOOL LLTriangleRayIntersect(const LLVector3& vert0, const LLVector3& vert1, const LLVector3& vert2, const LLVector3& orig, const LLVector3& dir, F32& intersection_a, F32& intersection_b, F32& intersection_t, BOOL two_sided); -- cgit v1.2.3 From 26ba00b5554d20ee958693ced87b36fa7f6e3d99 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Thu, 3 Jun 2010 12:52:28 -0500 Subject: Vectorized octree and much of llspatialpartition and lldrawable. Octree driven raycast. --- indra/llmath/llcamera.cpp | 216 +++++++++++++++++------------------------ indra/llmath/llcamera.h | 29 +++--- indra/llmath/lloctree.h | 239 ++++++++++++++++++++++++---------------------- indra/llmath/llvolume.cpp | 31 +++--- indra/llmath/llvolume.h | 16 +--- 5 files changed, 247 insertions(+), 284 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llcamera.cpp b/indra/llmath/llcamera.cpp index 487ed6451f..6b56e4870e 100644 --- a/indra/llmath/llcamera.cpp +++ b/indra/llmath/llcamera.cpp @@ -48,10 +48,10 @@ LLCamera::LLCamera() : mPlaneCount(6), mFrustumCornerDist(0.f) { + alignPlanes(); calculateFrustumPlanes(); } - LLCamera::LLCamera(F32 vertical_fov_rads, F32 aspect_ratio, S32 view_height_in_pixels, F32 near_plane, F32 far_plane) : LLCoordFrame(), mViewHeightInPixels(view_height_in_pixels), @@ -59,6 +59,7 @@ LLCamera::LLCamera(F32 vertical_fov_rads, F32 aspect_ratio, S32 view_height_in_p mPlaneCount(6), mFrustumCornerDist(0.f) { + alignPlanes(); mAspect = llclamp(aspect_ratio, MIN_ASPECT_RATIO, MAX_ASPECT_RATIO); mNearPlane = llclamp(near_plane, MIN_NEAR_PLANE, MAX_NEAR_PLANE); if(far_plane < 0) far_plane = DEFAULT_FAR_PLANE; @@ -67,6 +68,23 @@ LLCamera::LLCamera(F32 vertical_fov_rads, F32 aspect_ratio, S32 view_height_in_p setView(vertical_fov_rads); } +LLCamera::~LLCamera() +{ + +} + +const LLCamera& LLCamera::operator=(const LLCamera& rhs) +{ + memcpy(this, &rhs, sizeof(LLCamera)); + alignPlanes(); + LLVector4a::memcpyNonAliased16((F32*) mAgentPlanes, (F32*) rhs.mAgentPlanes, 4*7); + return *this; +} + +void LLCamera::alignPlanes() +{ + mAgentPlanes = (LLPlane*) LL_NEXT_ALIGNED_ADDRESS(mAgentPlaneBuffer); +} // ---------------- LLCamera::getFoo() member functions ---------------- @@ -91,8 +109,8 @@ F32 LLCamera::getMaxView() const void LLCamera::setUserClipPlane(LLPlane plane) { mPlaneCount = 7; - mAgentPlanes[6].p = plane; - mAgentPlanes[6].mask = calcPlaneMask(plane); + mAgentPlanes[6] = plane; + mPlaneMask[6] = calcPlaneMask(plane); } void LLCamera::disableUserClipPlane() @@ -164,129 +182,66 @@ size_t LLCamera::readFrustumFromBuffer(const char *buffer) // ---------------- test methods ---------------- -S32 LLCamera::AABBInFrustum(const LLVector3 ¢er, const LLVector3& radius) -{ - static const LLVector3 scaler[] = { - LLVector3(-1,-1,-1), - LLVector3( 1,-1,-1), - LLVector3(-1, 1,-1), - LLVector3( 1, 1,-1), - LLVector3(-1,-1, 1), - LLVector3( 1,-1, 1), - LLVector3(-1, 1, 1), - LLVector3( 1, 1, 1) +S32 LLCamera::AABBInFrustum(const LLVector4a ¢er, const LLVector4a& radius) +{ + static const LLVector4a scaler[] = { + LLVector4a(-1,-1,-1), + LLVector4a( 1,-1,-1), + LLVector4a(-1, 1,-1), + LLVector4a( 1, 1,-1), + LLVector4a(-1,-1, 1), + LLVector4a( 1,-1, 1), + LLVector4a(-1, 1, 1), + LLVector4a( 1, 1, 1) }; U8 mask = 0; S32 result = 2; - /*if (mFrustumCornerDist > 0.f && radius.magVecSquared() > mFrustumCornerDist * mFrustumCornerDist) - { //box is larger than frustum, check frustum quads against box planes - - static const LLVector3 dir[] = - { - LLVector3(1, 0, 0), - LLVector3(-1, 0, 0), - LLVector3(0, 1, 0), - LLVector3(0, -1, 0), - LLVector3(0, 0, 1), - LLVector3(0, 0, -1) - }; - - U32 quads[] = + for (U32 i = 0; i < mPlaneCount; i++) + { + mask = mPlaneMask[i]; + if (mask == 0xff) { - 0, 1, 2, 3, - 0, 1, 5, 4, - 2, 3, 7, 6, - 3, 0, 7, 4, - 1, 2, 6, 4, - 4, 5, 6, 7 - }; - - result = 0; - - BOOL total_inside = TRUE; - for (U32 i = 0; i < 6; i++) - { - LLVector3 p = center + radius.scaledVec(dir[i]); - F32 d = -p*dir[i]; - - for (U32 j = 0; j < 6; j++) - { //for each quad - F32 dist = mAgentFrustum[quads[j*4+0]]*dir[i] + d; - if (dist > 0) - { //at least one frustum point is outside the AABB - total_inside = FALSE; - for (U32 k = 1; k < 4; k++) - { //for each other point on quad - if ( mAgentFrustum[quads[j*4+k]]*dir[i]+d <= 0.f) - { //quad is straddling some plane of AABB - return 1; - } - } - } - else - { - for (U32 k = 1; k < 4; k++) - { - if (mAgentFrustum[quads[j*4+k]]*dir[i]+d > 0.f) - { - return 1; - } - } - } - } + continue; } - if (total_inside) + const LLPlane& p = mAgentPlanes[i]; + const LLVector4a& n = reinterpret_cast(p); + float d = p.mV[3]; + LLVector4a rscale; + rscale.setMul(radius, scaler[mask]); + + LLVector4a minp, maxp; + minp.setSub(center, rscale); + maxp.setAdd(center, rscale); + + if (n.dot3(minp) > -d) { - result = 1; + return 0; } - } - else*/ - { - for (U32 i = 0; i < mPlaneCount; i++) + + if (n.dot3(maxp) > -d) { - mask = mAgentPlanes[i].mask; - if (mask == 0xff) - { - continue; - } - LLPlane p = mAgentPlanes[i].p; - LLVector3 n = LLVector3(p); - float d = p.mV[3]; - LLVector3 rscale = radius.scaledVec(scaler[mask]); - - LLVector3 minp = center - rscale; - LLVector3 maxp = center + rscale; - - if (n * minp > -d) - { - return 0; - } - - if (n * maxp > -d) - { - result = 1; - } + result = 1; } } - return result; } -S32 LLCamera::AABBInFrustumNoFarClip(const LLVector3 ¢er, const LLVector3& radius) -{ - static const LLVector3 scaler[] = { - LLVector3(-1,-1,-1), - LLVector3( 1,-1,-1), - LLVector3(-1, 1,-1), - LLVector3( 1, 1,-1), - LLVector3(-1,-1, 1), - LLVector3( 1,-1, 1), - LLVector3(-1, 1, 1), - LLVector3( 1, 1, 1) + +S32 LLCamera::AABBInFrustumNoFarClip(const LLVector4a& center, const LLVector4a& radius) +{ + static const LLVector4a scaler[] = { + LLVector4a(-1,-1,-1), + LLVector4a( 1,-1,-1), + LLVector4a(-1, 1,-1), + LLVector4a( 1, 1,-1), + LLVector4a(-1,-1, 1), + LLVector4a( 1,-1, 1), + LLVector4a(-1, 1, 1), + LLVector4a( 1, 1, 1) }; U8 mask = 0; @@ -299,25 +254,28 @@ S32 LLCamera::AABBInFrustumNoFarClip(const LLVector3 ¢er, const LLVector3& r continue; } - mask = mAgentPlanes[i].mask; + mask = mPlaneMask[i]; if (mask == 0xff) { continue; } - LLPlane p = mAgentPlanes[i].p; - LLVector3 n = LLVector3(p); + + const LLPlane& p = mAgentPlanes[i]; + const LLVector4a& n = reinterpret_cast(p); float d = p.mV[3]; - LLVector3 rscale = radius.scaledVec(scaler[mask]); + LLVector4a rscale; + rscale.setMul(radius, scaler[mask]); - LLVector3 minp = center - rscale; - LLVector3 maxp = center + rscale; + LLVector4a minp, maxp; + minp.setSub(center, rscale); + maxp.setAdd(center, rscale); - if (n * minp > -d) + if (n.dot3(minp) > -d) { return 0; } - if (n * maxp > -d) + if (n.dot3(maxp) > -d) { result = 1; } @@ -447,12 +405,12 @@ int LLCamera::sphereInFrustum(const LLVector3 &sphere_center, const F32 radius) int res = 2; for (int i = 0; i < 6; i++) { - if (mAgentPlanes[i].mask == 0xff) + if (mPlaneMask[i] == 0xff) { continue; } - float d = mAgentPlanes[i].p.dist(sphere_center); + float d = mAgentPlanes[i].dist(sphere_center); if (d > radius) { @@ -644,12 +602,14 @@ void LLCamera::ignoreAgentFrustumPlane(S32 idx) return; } - mAgentPlanes[idx].mask = 0xff; - mAgentPlanes[idx].p.clearVec(); + mPlaneMask[idx] = 0xff; + mAgentPlanes[idx].clearVec(); } void LLCamera::calcAgentFrustumPlanes(LLVector3* frust) { + alignPlanes(); + for (int i = 0; i < 8; i++) { mAgentFrustum[i] = frust[i]; @@ -662,27 +622,27 @@ void LLCamera::calcAgentFrustumPlanes(LLVector3* frust) //order of planes is important, keep most likely to fail in the front of the list //near - frust[0], frust[1], frust[2] - mAgentPlanes[2].p = planeFromPoints(frust[0], frust[1], frust[2]); + mAgentPlanes[2] = planeFromPoints(frust[0], frust[1], frust[2]); //far - mAgentPlanes[5].p = planeFromPoints(frust[5], frust[4], frust[6]); + mAgentPlanes[5] = planeFromPoints(frust[5], frust[4], frust[6]); //left - mAgentPlanes[0].p = planeFromPoints(frust[4], frust[0], frust[7]); + mAgentPlanes[0] = planeFromPoints(frust[4], frust[0], frust[7]); //right - mAgentPlanes[1].p = planeFromPoints(frust[1], frust[5], frust[6]); + mAgentPlanes[1] = planeFromPoints(frust[1], frust[5], frust[6]); //top - mAgentPlanes[4].p = planeFromPoints(frust[3], frust[2], frust[6]); + mAgentPlanes[4] = planeFromPoints(frust[3], frust[2], frust[6]); //bottom - mAgentPlanes[3].p = planeFromPoints(frust[1], frust[0], frust[4]); + mAgentPlanes[3] = planeFromPoints(frust[1], frust[0], frust[4]); //cache plane octant facing mask for use in AABBInFrustum for (U32 i = 0; i < mPlaneCount; i++) { - mAgentPlanes[i].mask = calcPlaneMask(mAgentPlanes[i].p); + mPlaneMask[i] = calcPlaneMask(mAgentPlanes[i]); } } diff --git a/indra/llmath/llcamera.h b/indra/llmath/llcamera.h index d6c5f7bbb1..c40e819dcf 100644 --- a/indra/llmath/llcamera.h +++ b/indra/llmath/llcamera.h @@ -37,6 +37,7 @@ #include "llmath.h" #include "llcoordframe.h" #include "llplane.h" +#include "llvector4a.h" const F32 DEFAULT_FIELD_OF_VIEW = 60.f * DEG_TO_RAD; const F32 DEFAULT_ASPECT_RATIO = 640.f / 480.f; @@ -79,6 +80,14 @@ class LLCamera : public LLCoordFrame { public: + + LLCamera(const LLCamera& rhs) + { + *this = rhs; + } + + const LLCamera& operator=(const LLCamera& rhs); + enum { PLANE_LEFT = 0, PLANE_RIGHT = 1, @@ -129,13 +138,9 @@ private: LLPlane mWorldPlanes[PLANE_NUM]; LLPlane mHorizPlanes[HORIZ_PLANE_NUM]; - struct frustum_plane - { - frustum_plane() : mask(0) {} - LLPlane p; - U8 mask; - }; - frustum_plane mAgentPlanes[7]; //frustum planes in agent space a la gluUnproject (I'm a bastard, I know) - DaveP + LLPlane* mAgentPlanes; //frustum planes in agent space a la gluUnproject (I'm a bastard, I know) - DaveP + U8 mAgentPlaneBuffer[sizeof(LLPlane)*8]; + U8 mPlaneMask[7]; U32 mPlaneCount; //defaults to 6, if setUserClipPlane is called, uses user supplied clip plane in @@ -143,12 +148,14 @@ private: public: LLVector3 mAgentFrustum[8]; //8 corners of 6-plane frustum F32 mFrustumCornerDist; //distance to corner of frustum against far clip plane - LLPlane& getAgentPlane(U32 idx) { return mAgentPlanes[idx].p; } + LLPlane& getAgentPlane(U32 idx) { return mAgentPlanes[idx]; } public: LLCamera(); LLCamera(F32 vertical_fov_rads, F32 aspect_ratio, S32 view_height_in_pixels, F32 near_plane, F32 far_plane); - virtual ~LLCamera(){} // no-op virtual destructor + virtual ~LLCamera(); + + void alignPlanes(); void setUserClipPlane(LLPlane plane); void disableUserClipPlane(); @@ -199,8 +206,8 @@ public: S32 sphereInFrustum(const LLVector3 ¢er, const F32 radius) const; S32 pointInFrustum(const LLVector3 &point) const { return sphereInFrustum(point, 0.0f); } S32 sphereInFrustumFull(const LLVector3 ¢er, const F32 radius) const { return sphereInFrustum(center, radius); } - S32 AABBInFrustum(const LLVector3 ¢er, const LLVector3& radius); - S32 AABBInFrustumNoFarClip(const LLVector3 ¢er, const LLVector3& radius); + S32 AABBInFrustum(const LLVector4a& center, const LLVector4a& radius); + S32 AABBInFrustumNoFarClip(const LLVector4a& center, const LLVector4a& radius); //does a quick 'n dirty sphere-sphere check S32 sphereInFrustumQuick(const LLVector3 &sphere_center, const F32 radius); diff --git a/indra/llmath/lloctree.h b/indra/llmath/lloctree.h index 8bba12783f..ae2259dba0 100644 --- a/indra/llmath/lloctree.h +++ b/indra/llmath/lloctree.h @@ -35,6 +35,7 @@ #include "lltreenode.h" #include "v3math.h" +#include "llvector4a.h" #include #include @@ -44,7 +45,7 @@ #define OCT_ERRS LL_WARNS("OctreeErrors") #endif -#define LL_OCTREE_PARANOIA_CHECK 0 +#define LL_OCTREE_PARANOIA_CHECK 1 #if LL_DARWIN #define LL_OCTREE_MAX_CAPACITY 32 #else @@ -94,23 +95,22 @@ public: typedef LLOctreeNode oct_node; typedef LLOctreeListener oct_listener; - static const U8 OCTANT_POSITIVE_X = 0x01; - static const U8 OCTANT_POSITIVE_Y = 0x02; - static const U8 OCTANT_POSITIVE_Z = 0x04; - - LLOctreeNode( LLVector3d center, - LLVector3d size, + LLOctreeNode( const LLVector4a& center, + const LLVector4a& size, BaseType* parent, - U8 octant = 255) + S32 octant = -1) : mParent((oct_node*)parent), - mCenter(center), - mSize(size), mOctant(octant) { + mD = (LLVector4a*) _mm_malloc(sizeof(LLVector4a)*4, 16); + + mD[CENTER] = center; + mD[SIZE] = size; + updateMinMax(); - if ((mOctant == 255) && mParent) + if ((mOctant == -1) && mParent) { - mOctant = ((oct_node*) mParent)->getOctant(mCenter.mdV); + mOctant = ((oct_node*) mParent)->getOctant(mD[CENTER]); } clearChildren(); @@ -124,43 +124,30 @@ public: { delete getChild(i); } + + _mm_free(mD); } inline const BaseType* getParent() const { return mParent; } - inline void setParent(BaseType* parent) { mParent = (oct_node*) parent; } - inline const LLVector3d& getCenter() const { return mCenter; } - inline const LLVector3d& getSize() const { return mSize; } - inline void setCenter(LLVector3d center) { mCenter = center; } - inline void setSize(LLVector3d size) { mSize = size; } - inline oct_node* getNodeAt(T* data) { return getNodeAt(data->getPositionGroup(), data->getBinRadius()); } - inline U8 getOctant() const { return mOctant; } - inline void setOctant(U8 octant) { mOctant = octant; } + inline void setParent(BaseType* parent) { mParent = (oct_node*) parent; } + inline const LLVector4a& getCenter() const { return mD[CENTER]; } + inline const LLVector4a& getSize() const { return mD[SIZE]; } + inline void setCenter(const LLVector4a& center) { mD[CENTER] = center; } + inline void setSize(const LLVector4a& size) { mD[SIZE] = size; } + inline oct_node* getNodeAt(T* data) { return getNodeAt(data->getPositionGroup(), data->getBinRadius()); } + inline S32 getOctant() const { return mOctant; } + inline void setOctant(S32 octant) { mOctant = octant; } inline const oct_node* getOctParent() const { return (const oct_node*) getParent(); } inline oct_node* getOctParent() { return (oct_node*) getParent(); } - U8 getOctant(const F64 pos[]) const //get the octant pos is in + S32 getOctant(const LLVector4a& pos) const //get the octant pos is in { - U8 ret = 0; - - if (pos[0] > mCenter.mdV[0]) - { - ret |= OCTANT_POSITIVE_X; - } - if (pos[1] > mCenter.mdV[1]) - { - ret |= OCTANT_POSITIVE_Y; - } - if (pos[2] > mCenter.mdV[2]) - { - ret |= OCTANT_POSITIVE_Z; - } - - return ret; + return pos.greaterThan4(mD[CENTER]).getComparisonMask() & 0x7; } - inline bool isInside(const LLVector3d& pos, const F64& rad) const + inline bool isInside(const LLVector4a& pos, const F32& rad) const { - return rad <= mSize.mdV[0]*2.0 && isInside(pos); + return rad <= mD[SIZE][0]*2.f && isInside(pos); } inline bool isInside(T* data) const @@ -168,29 +155,27 @@ public: return isInside(data->getPositionGroup(), data->getBinRadius()); } - bool isInside(const LLVector3d& pos) const + bool isInside(const LLVector4a& pos) const { - const F64& x = pos.mdV[0]; - const F64& y = pos.mdV[1]; - const F64& z = pos.mdV[2]; - - if (x > mMax.mdV[0] || x <= mMin.mdV[0] || - y > mMax.mdV[1] || y <= mMin.mdV[1] || - z > mMax.mdV[2] || z <= mMin.mdV[2]) + S32 gt = pos.greaterThan4(mD[MAX]).getComparisonMask() & 0x7; + if (gt) { return false; } - + + S32 lt = pos.lessEqual4(mD[MIN]).getComparisonMask() & 0x7; + if (lt) + { + return false; + } + return true; } void updateMinMax() { - for (U32 i = 0; i < 3; i++) - { - mMax.mdV[i] = mCenter.mdV[i] + mSize.mdV[i]; - mMin.mdV[i] = mCenter.mdV[i] - mSize.mdV[i]; - } + mD[MAX].setAdd(mD[CENTER], mD[SIZE]); + mD[MIN].setSub(mD[CENTER], mD[SIZE]); } inline oct_listener* getOctListener(U32 index) @@ -203,34 +188,34 @@ public: return contains(xform->getBinRadius()); } - bool contains(F64 radius) + bool contains(F32 radius) { if (mParent == NULL) { //root node contains nothing return false; } - F64 size = mSize.mdV[0]; - F64 p_size = size * 2.0; + F32 size = mD[SIZE][0]; + F32 p_size = size * 2.f; - return (radius <= 0.001 && size <= 0.001) || + return (radius <= 0.001f && size <= 0.001f) || (radius <= p_size && radius > size); } - static void pushCenter(LLVector3d ¢er, const LLVector3d &size, const T* data) + static void pushCenter(LLVector4a ¢er, const LLVector4a &size, const T* data) { - const LLVector3d& pos = data->getPositionGroup(); - for (U32 i = 0; i < 3; i++) - { - if (pos.mdV[i] > center.mdV[i]) - { - center.mdV[i] += size.mdV[i]; - } - else - { - center.mdV[i] -= size.mdV[i]; - } - } + const LLVector4a& pos = data->getPositionGroup(); + + LLVector4a gt = pos.greaterThan4(center); + + LLVector4a up; + up.mQ = _mm_and_ps(size.mQ, gt.mQ); + + LLVector4a down; + down.mQ = _mm_andnot_ps(gt.mQ, size.mQ); + + center.add(up); + center.sub(down); } void accept(oct_traveler* visitor) { visitor->visit(this); } @@ -249,21 +234,21 @@ public: void accept(tree_traveler* visitor) const { visitor->visit(this); } void accept(oct_traveler* visitor) const { visitor->visit(this); } - oct_node* getNodeAt(const LLVector3d& pos, const F64& rad) + oct_node* getNodeAt(const LLVector4a& pos, const F32& rad) { LLOctreeNode* node = this; if (node->isInside(pos, rad)) { //do a quick search by octant - U8 octant = node->getOctant(pos.mdV); + S32 octant = node->getOctant(pos); BOOL keep_going = TRUE; //traverse the tree until we find a node that has no node //at the appropriate octant or is smaller than the object. //by definition, that node is the smallest node that contains // the data - while (keep_going && node->getSize().mdV[0] >= rad) + while (keep_going && node->getSize()[0] >= rad) { keep_going = FALSE; for (U32 i = 0; i < node->getChildCount() && !keep_going; i++) @@ -271,7 +256,7 @@ public: if (node->getChild(i)->getOctant() == octant) { node = node->getChild(i); - octant = node->getOctant(pos.mdV); + octant = node->getOctant(pos); keep_going = TRUE; } } @@ -289,7 +274,7 @@ public: { if (data == NULL) { - //OCT_ERRS << "!!! INVALID ELEMENT ADDED TO OCTREE BRANCH !!!" << llendl; + OCT_ERRS << "!!! INVALID ELEMENT ADDED TO OCTREE BRANCH !!!" << llendl; return false; } LLOctreeNode* parent = getOctParent(); @@ -299,7 +284,7 @@ public: { if (getElementCount() < LL_OCTREE_MAX_CAPACITY && (contains(data->getBinRadius()) || - (data->getBinRadius() > getSize().mdV[0] && + (data->getBinRadius() > getSize()[0] && parent && parent->getElementCount() >= LL_OCTREE_MAX_CAPACITY))) { //it belongs here #if LL_OCTREE_PARANOIA_CHECK @@ -330,16 +315,22 @@ public: } //it's here, but no kids are in the right place, make a new kid - LLVector3d center(getCenter()); - LLVector3d size(getSize()*0.5); + LLVector4a center = getCenter(); + LLVector4a size = getSize(); + size.mul(0.5f); //push center in direction of data LLOctreeNode::pushCenter(center, size, data); // handle case where floating point number gets too small - if( llabs(center.mdV[0] - getCenter().mdV[0]) < F_APPROXIMATELY_ZERO && - llabs(center.mdV[1] - getCenter().mdV[1]) < F_APPROXIMATELY_ZERO && - llabs(center.mdV[2] - getCenter().mdV[2]) < F_APPROXIMATELY_ZERO) + LLVector4a val; + val.setSub(center, getCenter()); + val.setAbs(val); + LLVector4a app_zero; + app_zero.mQ = F_APPROXIMATELY_ZERO_4A; + S32 lt = val.lessThan4(app_zero).getComparisonMask() & 0x7; + + if( lt == 0x7 ) { mData.insert(data); BaseType::insert(data); @@ -357,7 +348,7 @@ public: //make sure no existing node matches this position for (U32 i = 0; i < getChildCount(); i++) { - if (mChild[i]->getCenter() == center) + if (mChild[i]->getCenter().equal3(center)) { OCT_ERRS << "Octree detected duplicate child center and gave up." << llendl; return false; @@ -375,7 +366,7 @@ public: else { //it's not in here, give it to the root - //OCT_ERRS << "Octree insertion failed, starting over from root!" << llendl; + OCT_ERRS << "Octree insertion failed, starting over from root!" << llendl; oct_node* node = this; @@ -482,13 +473,19 @@ public: void addChild(oct_node* child, BOOL silent = FALSE) { #if LL_OCTREE_PARANOIA_CHECK + + if (child->getSize().equal3(getSize())) + { + OCT_ERRS << "Child size is same as parent size!" << llendl; + } + for (U32 i = 0; i < getChildCount(); i++) { - if(mChild[i]->getSize() != child->getSize()) + if(!mChild[i]->getSize().equal3(child->getSize())) { OCT_ERRS <<"Invalid octree child size." << llendl; } - if (mChild[i]->getCenter() == child->getCenter()) + if (mChild[i]->getCenter().equal3(child->getCenter())) { OCT_ERRS <<"Duplicate octree child position." << llendl; } @@ -513,7 +510,7 @@ public: } } - void removeChild(U8 index, BOOL destroy = FALSE) + void removeChild(S32 index, BOOL destroy = FALSE) { for (U32 i = 0; i < this->getListenerCount(); i++) { @@ -554,18 +551,26 @@ public: } } - //OCT_ERRS << "Octree failed to delete requested child." << llendl; + OCT_ERRS << "Octree failed to delete requested child." << llendl; } protected: + typedef enum + { + CENTER = 0, + SIZE = 1, + MAX = 2, + MIN = 3 + } eDName; + + LLVector4a* mD; + + oct_node* mParent; + S32 mOctant; + child_list mChild; element_list mData; - oct_node* mParent; - LLVector3d mCenter; - LLVector3d mSize; - LLVector3d mMax; - LLVector3d mMin; - U8 mOctant; + }; //just like a regular node, except it might expand on insert and compress on balance @@ -576,9 +581,9 @@ public: typedef LLOctreeNode BaseType; typedef LLOctreeNode oct_node; - LLOctreeRoot( LLVector3d center, - LLVector3d size, - BaseType* parent) + LLOctreeRoot(const LLVector4a& center, + const LLVector4a& size, + BaseType* parent) : BaseType(center, size, parent) { } @@ -619,28 +624,33 @@ public: { if (data == NULL) { - //OCT_ERRS << "!!! INVALID ELEMENT ADDED TO OCTREE ROOT !!!" << llendl; + OCT_ERRS << "!!! INVALID ELEMENT ADDED TO OCTREE ROOT !!!" << llendl; return false; } if (data->getBinRadius() > 4096.0) { - //OCT_ERRS << "!!! ELEMENT EXCEEDS MAXIMUM SIZE IN OCTREE ROOT !!!" << llendl; + OCT_ERRS << "!!! ELEMENT EXCEEDS MAXIMUM SIZE IN OCTREE ROOT !!!" << llendl; return false; } - const F64 MAX_MAG = 1024.0*1024.0; + LLVector4a MAX_MAG; + MAX_MAG.splat(1024.f*1024.f); + + const LLVector4a& v = data->getPositionGroup(); + + LLVector4a val; + val.setSub(v, mD[CENTER]); + val.setAbs(val); + S32 lt = val.lessThan4(MAX_MAG).getComparisonMask() & 0x7; - const LLVector3d& v = data->getPositionGroup(); - if (!(fabs(v.mdV[0]-this->mCenter.mdV[0]) < MAX_MAG && - fabs(v.mdV[1]-this->mCenter.mdV[1]) < MAX_MAG && - fabs(v.mdV[2]-this->mCenter.mdV[2]) < MAX_MAG)) + if (lt != 0x7) { - //OCT_ERRS << "!!! ELEMENT EXCEEDS RANGE OF SPATIAL PARTITION !!!" << llendl; + OCT_ERRS << "!!! ELEMENT EXCEEDS RANGE OF SPATIAL PARTITION !!!" << llendl; return false; } - if (this->getSize().mdV[0] > data->getBinRadius() && isInside(data->getPositionGroup())) + if (this->getSize()[0] > data->getBinRadius() && isInside(data->getPositionGroup())) { //we got it, just act like a branch oct_node* node = getNodeAt(data); @@ -656,31 +666,34 @@ public: else if (this->getChildCount() == 0) { //first object being added, just wrap it up - while (!(this->getSize().mdV[0] > data->getBinRadius() && isInside(data->getPositionGroup()))) + while (!(this->getSize()[0] > data->getBinRadius() && isInside(data->getPositionGroup()))) { - LLVector3d center, size; + LLVector4a center, size; center = this->getCenter(); size = this->getSize(); LLOctreeNode::pushCenter(center, size, data); this->setCenter(center); - this->setSize(size*2); + size.mul(2.f); + this->setSize(size); this->updateMinMax(); } LLOctreeNode::insert(data); } else { - while (!(this->getSize().mdV[0] > data->getBinRadius() && isInside(data->getPositionGroup()))) + while (!(this->getSize()[0] > data->getBinRadius() && isInside(data->getPositionGroup()))) { //the data is outside the root node, we need to grow - LLVector3d center(this->getCenter()); - LLVector3d size(this->getSize()); + LLVector4a center(this->getCenter()); + LLVector4a size(this->getSize()); //expand this node - LLVector3d newcenter(center); + LLVector4a newcenter(center); LLOctreeNode::pushCenter(newcenter, size, data); this->setCenter(newcenter); - this->setSize(size*2); + LLVector4a size2 = size; + size2.mul(2.f); + this->setSize(size2); this->updateMinMax(); //copy our children to a new branch diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index c4172de651..72833c019f 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -295,7 +295,7 @@ BOOL LLTriangleRayIntersect(const LLVector3& vert0, const LLVector3& vert1, cons } } -class LLVolumeOctreeRebound : public LLOctreeTravelerDepthFirst +class LLVolumeOctreeRebound : public LLOctreeTravelerDepthFirst { public: const LLVolumeFace* mFace; @@ -305,7 +305,7 @@ public: mFace = face; } - virtual void visit(const LLOctreeNode* branch) + virtual void visit(const LLOctreeNode* branch) { LLVolumeOctreeListener* node = (LLVolumeOctreeListener*) branch->getListener(0); @@ -314,12 +314,12 @@ public: if (branch->getElementCount() != 0) { - const LLVolumeFace::Triangle* tri = *(branch->getData().begin()); + const LLVolumeTriangle* tri = *(branch->getData().begin()); min = *(tri->mV[0]); max = *(tri->mV[0]); - for (LLOctreeNode::const_element_iter iter = + for (LLOctreeNode::const_element_iter iter = branch->getData().begin(); iter != branch->getData().end(); ++iter) { //stretch by triangles in node @@ -4394,7 +4394,7 @@ S32 LLVolume::lineSegmentIntersect(const LLVector4a& start, const LLVector4a& en LLVector4a box_size; box_size.setSub(face.mExtents[1], face.mExtents[0]); - if (LLLineSegmentBoxIntersect(start.getF32(), end.getF32(), box_center.getF32(), box_size.getF32())) + if (LLLineSegmentBoxIntersect(start, end, box_center, box_size)) { if (bi_normal != NULL) // if the caller wants binormals, we may need to generate them { @@ -5418,12 +5418,17 @@ void LLVolumeFace::optimize(F32 angle_cutoff) void LLVolumeFace::createOctree() { - mOctree = new LLOctreeRoot(LLVector3d(0,0,0), LLVector3d(1,1,1), NULL); + LLVector4a center; + LLVector4a size; + center.splat(0.f); + size.splat(1.f); + + mOctree = new LLOctreeRoot(center, size, NULL); new LLVolumeOctreeListener(mOctree); for (U32 i = 0; i < mNumIndices; i+= 3) { - Triangle* tri = new Triangle(); + LLPointer tri = new LLVolumeTriangle(); const LLVector4a& v0 = mPositions[mIndices[i]]; const LLVector4a& v1 = mPositions[mIndices[i+1]]; @@ -5449,8 +5454,7 @@ void LLVolumeFace::createOctree() center.setAdd(min, max); center.mul(0.5f); - - tri->mPositionGroup.setVec(center[0], center[1], center[2]); + *tri->mPositionGroup = center; LLVector4a size; size.setSub(max,min); @@ -5464,15 +5468,6 @@ void LLVolumeFace::createOctree() rebound.traverse(mOctree); } -const LLVector3d& LLVolumeFace::Triangle::getPositionGroup() const -{ - return mPositionGroup; -} - -const F64& LLVolumeFace::Triangle::getBinRadius() const -{ - return mRadius; -} void LLVolumeFace::swapData(LLVolumeFace& rhs) { diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index 0ae8aa19ca..c49d1c650d 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -46,6 +46,7 @@ template class LLOctreeNode; class LLVector4a; class LLVolumeFace; class LLVolume; +class LLVolumeTriangle; #include "lldarray.h" #include "lluuid.h" @@ -918,20 +919,7 @@ public: // mWeights.size() should be empty or match mVertices.size() LLVector4a* mWeights; - class Triangle : public LLRefCount - { - public: - const LLVector4a* mV[3]; - U16 mIndex[3]; - - LLVector3d mPositionGroup; - F64 mRadius; - - virtual const LLVector3d& getPositionGroup() const; - virtual const F64& getBinRadius() const; - }; - - LLOctreeNode* mOctree; + LLOctreeNode* mOctree; private: BOOL createUnCutCubeCap(LLVolume* volume, BOOL partial_build = FALSE); -- cgit v1.2.3 From 7db6a5a64d8eacb63b45dc7cc48bdd681f3e19af Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Fri, 4 Jun 2010 00:10:05 -0500 Subject: Disable octree paranoia checks. --- indra/llmath/lloctree.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'indra/llmath') diff --git a/indra/llmath/lloctree.h b/indra/llmath/lloctree.h index ae2259dba0..f61ce6ce05 100644 --- a/indra/llmath/lloctree.h +++ b/indra/llmath/lloctree.h @@ -45,7 +45,7 @@ #define OCT_ERRS LL_WARNS("OctreeErrors") #endif -#define LL_OCTREE_PARANOIA_CHECK 1 +#define LL_OCTREE_PARANOIA_CHECK 0 #if LL_DARWIN #define LL_OCTREE_MAX_CAPACITY 32 #else -- cgit v1.2.3 From 087b7499082c7f0ae867990a102bc8f90a83471d Mon Sep 17 00:00:00 2001 From: Tofu Linden Date: Fri, 4 Jun 2010 08:46:00 +0100 Subject: fix scoping issues for gcc --- indra/llmath/lloctree.h | 2 +- indra/llmath/llvolume.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/lloctree.h b/indra/llmath/lloctree.h index f61ce6ce05..df77971204 100644 --- a/indra/llmath/lloctree.h +++ b/indra/llmath/lloctree.h @@ -640,7 +640,7 @@ public: const LLVector4a& v = data->getPositionGroup(); LLVector4a val; - val.setSub(v, mD[CENTER]); + val.setSub(v, BaseType::mD[BaseType::CENTER]); val.setAbs(val); S32 lt = val.lessThan4(MAX_MAG).getComparisonMask() & 0x7; diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 72833c019f..ef1ab57036 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -4406,7 +4406,7 @@ S32 LLVolume::lineSegmentIntersect(const LLVector4a& start, const LLVector4a& en face.createOctree(); } - LLVector4a* p = (LLVector4a*) face.mPositions; + //LLVector4a* p = (LLVector4a*) face.mPositions; LLOctreeTriangleRayIntersect intersect(start, dir, &face, &closest_t, intersection, tex_coord, normal, bi_normal); intersect.traverse(face.mOctree); -- cgit v1.2.3 From dc2f50642bf6c785263d91ca53ec337f3898b990 Mon Sep 17 00:00:00 2001 From: Tofu Linden Date: Fri, 4 Jun 2010 08:54:03 +0100 Subject: lots of _mm_malloc and _mm_free -> ll_aligned_malloc_16 and ll_aligned_free_16 more to come. --- indra/llmath/lloctree.h | 4 +-- indra/llmath/llvolume.cpp | 86 +++++++++++++++++++++++------------------------ 2 files changed, 45 insertions(+), 45 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/lloctree.h b/indra/llmath/lloctree.h index df77971204..59828ae565 100644 --- a/indra/llmath/lloctree.h +++ b/indra/llmath/lloctree.h @@ -102,7 +102,7 @@ public: : mParent((oct_node*)parent), mOctant(octant) { - mD = (LLVector4a*) _mm_malloc(sizeof(LLVector4a)*4, 16); + mD = (LLVector4a*) ll_aligned_malloc_16(sizeof(LLVector4a)*4); mD[CENTER] = center; mD[SIZE] = size; @@ -125,7 +125,7 @@ public: delete getChild(i); } - _mm_free(mD); + ll_aligned_free_16(mD); } inline const BaseType* getParent() const { return mParent; } diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index ef1ab57036..a8684759f3 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -1987,7 +1987,7 @@ BOOL LLVolume::generate() void LLVolumeFace::VertexData::init() { - mData = (LLVector4a*) _mm_malloc(32, 16); + mData = (LLVector4a*) ll_aligned_malloc_16(32); } LLVolumeFace::VertexData::VertexData() @@ -2004,7 +2004,7 @@ LLVolumeFace::VertexData::VertexData(const VertexData& rhs) LLVolumeFace::VertexData::~VertexData() { - _mm_free(mData); + ll_aligned_free_16(mData); } LLVector4a& LLVolumeFace::VertexData::getPosition() @@ -5196,7 +5196,7 @@ LLVolumeFace::LLVolumeFace() : mWeights(NULL), mOctree(NULL) { - mExtents = (LLVector4a*) _mm_malloc(48, 16); + mExtents = (LLVector4a*) ll_aligned_malloc_16(48); mCenter = mExtents+2; } @@ -5217,7 +5217,7 @@ LLVolumeFace::LLVolumeFace(const LLVolumeFace& src) mWeights(NULL), mOctree(NULL) { - mExtents = (LLVector4a*) _mm_malloc(48, 16); + mExtents = (LLVector4a*) ll_aligned_malloc_16(48); mCenter = mExtents+2; *this = src; } @@ -5264,7 +5264,7 @@ LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src) } else { - _mm_free(mBinormals); + ll_aligned_free_16(mBinormals); mBinormals = NULL; } @@ -5275,7 +5275,7 @@ LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src) } else { - _mm_free(mWeights); + ll_aligned_free_16(mWeights); mWeights = NULL; } } @@ -5295,7 +5295,7 @@ LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src) LLVolumeFace::~LLVolumeFace() { - _mm_free(mExtents); + ll_aligned_free_16(mExtents); mExtents = NULL; freeData(); @@ -5303,17 +5303,17 @@ LLVolumeFace::~LLVolumeFace() void LLVolumeFace::freeData() { - _mm_free(mPositions); + ll_aligned_free_16(mPositions); mPositions = NULL; - _mm_free(mNormals); + ll_aligned_free_16(mNormals); mNormals = NULL; - _mm_free(mTexCoords); + ll_aligned_free_16(mTexCoords); mTexCoords = NULL; - _mm_free(mIndices); + ll_aligned_free_16(mIndices); mIndices = NULL; - _mm_free(mBinormals); + ll_aligned_free_16(mBinormals); mBinormals = NULL; - _mm_free(mWeights); + ll_aligned_free_16(mWeights); mWeights = NULL; delete mOctree; @@ -6082,21 +6082,21 @@ void LLVolumeFace::createBinormals() void LLVolumeFace::resizeVertices(S32 num_verts) { - _mm_free(mPositions); - _mm_free(mNormals); - _mm_free(mBinormals); - _mm_free(mTexCoords); + ll_aligned_free_16(mPositions); + ll_aligned_free_16(mNormals); + ll_aligned_free_16(mBinormals); + ll_aligned_free_16(mTexCoords); mBinormals = NULL; if (num_verts) { - mPositions = (LLVector4a*) _mm_malloc(num_verts*16, 16); - mNormals = (LLVector4a*) _mm_malloc(num_verts*16, 16); + mPositions = (LLVector4a*) ll_aligned_malloc_16(num_verts*16); + mNormals = (LLVector4a*) ll_aligned_malloc_16(num_verts*16); //pad texture coordinate block end to allow for QWORD reads S32 size = ((num_verts*8) + 0xF) & ~0xF; - mTexCoords = (LLVector2*) _mm_malloc(size, 16); + mTexCoords = (LLVector2*) ll_aligned_malloc_16(size); } else { @@ -6119,20 +6119,20 @@ void LLVolumeFace::pushVertex(const LLVector4a& pos, const LLVector4a& norm, con S32 new_size = new_verts*16; //positions - LLVector4a* dst = (LLVector4a*) _mm_malloc(new_size, 16); + LLVector4a* dst = (LLVector4a*) ll_aligned_malloc_16(new_size); if (mPositions) { LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mPositions, new_size/4); - _mm_free(mPositions); + ll_aligned_free_16(mPositions); } mPositions = dst; //normals - dst = (LLVector4a*) _mm_malloc(new_size, 16); + dst = (LLVector4a*) ll_aligned_malloc_16(new_size); if (mNormals) { LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mNormals, new_size/4); - _mm_free(mNormals); + ll_aligned_free_16(mNormals); } mNormals = dst; @@ -6140,16 +6140,16 @@ void LLVolumeFace::pushVertex(const LLVector4a& pos, const LLVector4a& norm, con new_size = ((new_verts*8)+0xF) & ~0xF; { - LLVector2* dst = (LLVector2*) _mm_malloc(new_size, 16); + LLVector2* dst = (LLVector2*) ll_aligned_malloc_16(new_size); if (mTexCoords) { LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mTexCoords, new_size/4); - _mm_free(mTexCoords); + ll_aligned_free_16(mTexCoords); } } //just clear binormals - _mm_free(mBinormals); + ll_aligned_free_16(mBinormals); mBinormals = NULL; mPositions[mNumVertices] = pos; @@ -6161,26 +6161,26 @@ void LLVolumeFace::pushVertex(const LLVector4a& pos, const LLVector4a& norm, con void LLVolumeFace::allocateBinormals(S32 num_verts) { - _mm_free(mBinormals); - mBinormals = (LLVector4a*) _mm_malloc(num_verts*16, 16); + ll_aligned_free_16(mBinormals); + mBinormals = (LLVector4a*) ll_aligned_malloc_16(num_verts*16); } void LLVolumeFace::allocateWeights(S32 num_verts) { - _mm_free(mWeights); - mWeights = (LLVector4a*) _mm_malloc(num_verts*16, 16); + ll_aligned_free_16(mWeights); + mWeights = (LLVector4a*) ll_aligned_malloc_16(num_verts*16); } void LLVolumeFace::resizeIndices(S32 num_indices) { - _mm_free(mIndices); + ll_aligned_free_16(mIndices); if (num_indices) { //pad index block end to allow for QWORD reads S32 size = ((num_indices*2) + 0xF) & ~0xF; - mIndices = (U16*) _mm_malloc(size,16); + mIndices = (U16*) ll_aligned_malloc_16(size); } else { @@ -6198,9 +6198,9 @@ void LLVolumeFace::pushIndex(const U16& idx) S32 old_size = (mNumIndices+0xF) & ~0xF; if (new_size != old_size) { - U16* dst = (U16*) _mm_malloc(new_size, 16); + U16* dst = (U16*) ll_aligned_malloc_16(new_size); LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mIndices, new_size/4); - _mm_free(mIndices); + ll_aligned_free_16(mIndices); mIndices = dst; } @@ -6237,17 +6237,17 @@ void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat_in, LLMat } - LLVector4a* new_pos = (LLVector4a*) _mm_malloc(new_count*16, 16); - LLVector4a* new_norm = (LLVector4a*) _mm_malloc(new_count*16, 16); - LLVector2* new_tc = (LLVector2*) _mm_malloc((new_count*8+0xF) & ~0xF, 16); + LLVector4a* new_pos = (LLVector4a*) ll_aligned_malloc_16(new_count*16); + LLVector4a* new_norm = (LLVector4a*) ll_aligned_malloc_16(new_count*16); + LLVector2* new_tc = (LLVector2*) ll_aligned_malloc_16((new_count*8+0xF) & ~0xF); LLVector4a::memcpyNonAliased16((F32*) new_pos, (F32*) mPositions, new_count*4); LLVector4a::memcpyNonAliased16((F32*) new_norm, (F32*) mNormals, new_count*4); LLVector4a::memcpyNonAliased16((F32*) new_tc, (F32*) mTexCoords, new_count*2); - _mm_free(mPositions); - _mm_free(mNormals); - _mm_free(mTexCoords); + ll_aligned_free_16(mPositions); + ll_aligned_free_16(mNormals); + ll_aligned_free_16(mTexCoords); mPositions = new_pos; mNormals = new_norm; @@ -6287,9 +6287,9 @@ void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat_in, LLMat new_count = mNumIndices + face.mNumIndices; - U16* new_indices = (U16*) _mm_malloc((new_count*2+0xF) & ~0xF, 16); + U16* new_indices = (U16*) ll_aligned_malloc_16((new_count*2+0xF) & ~0xF); LLVector4a::memcpyNonAliased16((F32*) new_indices, (F32*) mIndices, new_count/2); - _mm_free(mIndices); + ll_aligned_free_16(mIndices); mIndices = new_indices; mNumIndices = new_count; -- cgit v1.2.3 From 6ca40c7afbd794b161904b63b88a95273ae80c9c Mon Sep 17 00:00:00 2001 From: Tofu Linden Date: Fri, 4 Jun 2010 09:10:00 +0100 Subject: more needed #includes --- indra/llmath/llvolume.cpp | 1 + 1 file changed, 1 insertion(+) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index a8684759f3..ce7e20e9b4 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -31,6 +31,7 @@ */ #include "linden_common.h" +#include "llmemory.h" #include "llmath.h" #include -- cgit v1.2.3 From a8f0e47fd5deee1e45b4126ee43955a7bc68bb5d Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Fri, 4 Jun 2010 12:07:55 -0500 Subject: Normal debug display and fix for bad bump mapping and planar texture coordinates. --- indra/llmath/llvolume.cpp | 2 ++ 1 file changed, 2 insertions(+) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 72833c019f..8cb9475994 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -6076,6 +6076,8 @@ void LLVolumeFace::createBinormals() for (U32 i = 0; i < mNumVertices; i++) { binorm[i].normalize3fast(); + //bump map/planar projection code requires normals to be normalized + mNormals[i].normalize3fast(); } } } -- cgit v1.2.3 From f9b13d8f8510b1f7f02fcf92685471461b79858e Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Thu, 10 Jun 2010 00:45:48 -0500 Subject: Add "LL_MESH_ENABLED" preprocessor flag for toggling mesh code. Couple of merge fixes. --- indra/llmath/llvolume.cpp | 13 ++++++++----- indra/llmath/llvolume.h | 7 +++++++ 2 files changed, 15 insertions(+), 5 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 09ab47b890..05868e3517 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -2546,10 +2546,12 @@ S32 LLVolume::getNumFaces() const { U8 sculpt_type = (mParams.getSculptType() & LL_SCULPT_TYPE_MASK); +#if LL_MESH_ENABLED if (sculpt_type == LL_SCULPT_TYPE_MESH) { return LL_SCULPT_MESH_MAX_FACES; } +#endif return (S32)mProfilep->mFaces.size(); } @@ -2922,11 +2924,6 @@ void LLVolume::sculpt(U16 sculpt_width, U16 sculpt_height, S8 sculpt_components, LLMemType m1(LLMemType::MTYPE_VOLUME); U8 sculpt_type = mParams.getSculptType(); - if (sculpt_type & LL_SCULPT_TYPE_MASK == LL_SCULPT_TYPE_MESH) - { - llerrs << "WTF?" << llendl; - } - BOOL data_is_empty = FALSE; if (sculpt_width == 0 || sculpt_height == 0 || sculpt_components < 3 || sculpt_data == NULL) @@ -4135,10 +4132,12 @@ void LLVolume::generateSilhouetteVertices(std::vector &vertices, normals.clear(); segments.clear(); +#if LL_MESH_ENABLED if ((mParams.getSculptType() & LL_SCULPT_TYPE_MASK) == LL_SCULPT_TYPE_MESH) { return; } +#endif S32 cur_index = 0; //for each face @@ -6335,10 +6334,14 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) resizeVertices(num_vertices); resizeIndices(num_indices); +#if LL_MESH_ENABLED if ((volume->getParams().getSculptType() & LL_SCULPT_TYPE_MASK) != LL_SCULPT_TYPE_MESH) { mEdge.resize(num_indices); } +#else + mEdge.resize(num_indices); +#endif } LLVector4a* pos = (LLVector4a*) mPositions; diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index c49d1c650d..0782944079 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -41,6 +41,8 @@ class LLVolumeParams; class LLProfile; class LLPath; +#define LL_MESH_ENABLED 0 + template class LLOctreeNode; class LLVector4a; @@ -190,10 +192,15 @@ const U8 LL_SCULPT_TYPE_SPHERE = 1; const U8 LL_SCULPT_TYPE_TORUS = 2; const U8 LL_SCULPT_TYPE_PLANE = 3; const U8 LL_SCULPT_TYPE_CYLINDER = 4; +#if LL_MESH_ENABLED const U8 LL_SCULPT_TYPE_MESH = 5; const U8 LL_SCULPT_TYPE_MASK = LL_SCULPT_TYPE_SPHERE | LL_SCULPT_TYPE_TORUS | LL_SCULPT_TYPE_PLANE | LL_SCULPT_TYPE_CYLINDER | LL_SCULPT_TYPE_MESH; +#else +const U8 LL_SCULPT_TYPE_MASK = LL_SCULPT_TYPE_SPHERE | LL_SCULPT_TYPE_TORUS | LL_SCULPT_TYPE_PLANE | + LL_SCULPT_TYPE_CYLINDER; +#endif const U8 LL_SCULPT_FLAG_INVERT = 64; const U8 LL_SCULPT_FLAG_MIRROR = 128; -- cgit v1.2.3 From 9440f84dca48e3e2f6022dbf5f916d6439c7a826 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Thu, 10 Jun 2010 14:58:02 -0500 Subject: Potential fix for busted binormals on cubes. --- indra/llmath/llvolume.cpp | 2 ++ 1 file changed, 2 insertions(+) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 05868e3517..ba5d5e7ca1 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -5566,6 +5566,8 @@ BOOL LLVolumeFace::createUnCutCubeCap(LLVolume* volume, BOOL partial_build) corners[0].getPosition(), corners[0].mTexCoord, corners[1].getPosition(), corners[1].mTexCoord, corners[2].getPosition(), corners[2].mTexCoord); + + binormal.normalize3fast(); S32 size = (grid_size+1)*(grid_size+1); resizeVertices(size); -- cgit v1.2.3 From d2d49e3d84956ec4efb9f1a7a308d530c7fc4737 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Thu, 10 Jun 2010 15:02:41 -0500 Subject: Fix for memcpyNonAliased16 issues. --- indra/llmath/llvolume.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 09ab47b890..cafa1e5c44 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -5255,7 +5255,7 @@ LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src) LLVector4a::memcpyNonAliased16((F32*) mPositions, (F32*) src.mPositions, vert_size); LLVector4a::memcpyNonAliased16((F32*) mNormals, (F32*) src.mNormals, vert_size); - LLVector4a::memcpyNonAliased16((F32*) mTexCoords, (F32*) src.mTexCoords, vert_size); + LLVector4a::memcpyNonAliased16((F32*) mTexCoords, (F32*) src.mTexCoords, tc_size); if (src.mBinormals) -- cgit v1.2.3 From 3ea0018dc6f94c029d7dbf13bdd5b0bc0558c8d8 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Thu, 10 Jun 2010 15:07:30 -0500 Subject: Enable meshes. --- indra/llmath/llvolume.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index 0782944079..98db7f31c0 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -41,7 +41,7 @@ class LLVolumeParams; class LLProfile; class LLPath; -#define LL_MESH_ENABLED 0 +#define LL_MESH_ENABLED 1 template class LLOctreeNode; -- cgit v1.2.3 From 2ff888d2bc7424e70a836adb8ec4cf27df75ab7b Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Thu, 10 Jun 2010 21:47:49 -0500 Subject: Unused variable. --- indra/llmath/llvolume.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index ba5d5e7ca1..ac3ed5d63e 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -2544,9 +2544,9 @@ void LLVolume::copyVolumeFaces(LLVolume* volume) S32 LLVolume::getNumFaces() const { +#if LL_MESH_ENABLED U8 sculpt_type = (mParams.getSculptType() & LL_SCULPT_TYPE_MASK); -#if LL_MESH_ENABLED if (sculpt_type == LL_SCULPT_TYPE_MESH) { return LL_SCULPT_MESH_MAX_FACES; -- cgit v1.2.3 From 6e37ec08f678451a526f34218cb070d117cdf60a Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Mon, 14 Jun 2010 23:13:10 -0500 Subject: Builds with LLConvexDecompInter as a static lib. --- indra/llmath/llvolume.cpp | 8 ++++---- indra/llmath/llvolume.h | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 10cef533b0..53f484fb79 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -5355,9 +5355,9 @@ bool LLVolumeFace::VertexMapData::operator==(const LLVolumeFace::VertexData& rhs getNormal().equal3(rhs.getNormal()); } -bool LLVolumeFace::VertexMapData::ComparePosition::operator()(const LLVector4a& a, const LLVector4a& b) const +bool LLVolumeFace::VertexMapData::ComparePosition::operator()(const LLVector3& a, const LLVector3& b) const { - return a.less3(b); + return a < b; } void LLVolumeFace::optimize(F32 angle_cutoff) @@ -5375,7 +5375,7 @@ void LLVolumeFace::optimize(F32 angle_cutoff) getVertexData(index, cv); BOOL found = FALSE; - VertexMapData::PointMap::iterator point_iter = point_map.find(cv.getPosition()); + VertexMapData::PointMap::iterator point_iter = point_map.find(LLVector3(cv.getPosition().getF32())); if (point_iter != point_map.end()) { //duplicate point might exist for (U32 j = 0; j < point_iter->second.size(); ++j) @@ -5407,7 +5407,7 @@ void LLVolumeFace::optimize(F32 angle_cutoff) } else { - point_map[d.getPosition()].push_back(d); + point_map[LLVector3(d.getPosition().getF32())].push_back(d); } } } diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index 98db7f31c0..4aef3be973 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -873,10 +873,10 @@ public: struct ComparePosition { - bool operator()(const LLVector4a& a, const LLVector4a& b) const; + bool operator()(const LLVector3& a, const LLVector3& b) const; }; - typedef std::map, VertexMapData::ComparePosition > PointMap; + typedef std::map, VertexMapData::ComparePosition > PointMap; }; void optimize(F32 angle_cutoff = 2.f); -- cgit v1.2.3 From 66e353812f4732c77206322d271b2346dd74feec Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Tue, 15 Jun 2010 19:16:39 -0500 Subject: Get meshes working post-SSE pass. --- indra/llmath/llvolume.cpp | 89 +++++++++++++++++++++++++++-------------------- indra/llmath/llvolume.h | 2 ++ 2 files changed, 54 insertions(+), 37 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 62e488452a..a4022c842d 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -1988,19 +1988,33 @@ BOOL LLVolume::generate() void LLVolumeFace::VertexData::init() { - mData = (LLVector4a*) ll_aligned_malloc_16(32); + if (!mData) + { + mData = (LLVector4a*) ll_aligned_malloc_16(32); + } } LLVolumeFace::VertexData::VertexData() { + mData = NULL; init(); } LLVolumeFace::VertexData::VertexData(const VertexData& rhs) { - init(); - LLVector4a::memcpyNonAliased16((F32*) mData, (F32*) rhs.mData, 8); - mTexCoord = rhs.mTexCoord; + mData = NULL; + *this = rhs; +} + +const LLVolumeFace::VertexData& LLVolumeFace::VertexData::operator=(const LLVolumeFace::VertexData& rhs) +{ + if (this != &rhs) + { + init(); + LLVector4a::memcpyNonAliased16((F32*) mData, (F32*) rhs.mData, 8); + mTexCoord = rhs.mTexCoord; + } + return *this; } LLVolumeFace::VertexData::~VertexData() @@ -2041,34 +2055,29 @@ void LLVolumeFace::VertexData::setNormal(const LLVector4a& norm) bool LLVolumeFace::VertexData::operator<(const LLVolumeFace::VertexData& rhs)const { - const U8* l = (const U8*) this; - const U8* r = (const U8*) &rhs; + if (mData[POSITION].notEqual3(rhs.getPosition())) + { + return mData[POSITION].less3(rhs.getPosition()); + } - for (U32 i = 0; i < sizeof(VertexData); ++i) + if (mData[NORMAL].notEqual3(rhs.getNormal())) { - if (l[i] != r[i]) - { - return r[i] < l[i]; - } + return mData[NORMAL].less3(rhs.getNormal()); } - + + if (mTexCoord != rhs.mTexCoord) + { + return mTexCoord < rhs.mTexCoord; + } + return false; } bool LLVolumeFace::VertexData::operator==(const LLVolumeFace::VertexData& rhs)const { - const U8* l = (const U8*) this; - const U8* r = (const U8*) &rhs; - - for (U32 i = 0; i < sizeof(VertexData); ++i) - { - if (l[i] != r[i]) - { - return false; - } - } - - return true; + return mData[POSITION].equal3(rhs.getPosition()) && + mData[NORMAL].equal3(rhs.getNormal()) && + mTexCoord == rhs.mTexCoord; } bool LLVolumeFace::VertexData::compareNormal(const LLVolumeFace::VertexData& rhs, F32 angle_cutoff) const @@ -6244,10 +6253,13 @@ void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat_in, LLMat LLVector4a* new_pos = (LLVector4a*) ll_aligned_malloc_16(new_count*16); LLVector4a* new_norm = (LLVector4a*) ll_aligned_malloc_16(new_count*16); LLVector2* new_tc = (LLVector2*) ll_aligned_malloc_16((new_count*8+0xF) & ~0xF); - - LLVector4a::memcpyNonAliased16((F32*) new_pos, (F32*) mPositions, new_count*4); - LLVector4a::memcpyNonAliased16((F32*) new_norm, (F32*) mNormals, new_count*4); - LLVector4a::memcpyNonAliased16((F32*) new_tc, (F32*) mTexCoords, new_count*2); + + if (mNumVertices > 0) + { + LLVector4a::memcpyNonAliased16((F32*) new_pos, (F32*) mPositions, mNumVertices*4); + LLVector4a::memcpyNonAliased16((F32*) new_norm, (F32*) mNormals, mNumVertices*4); + LLVector4a::memcpyNonAliased16((F32*) new_tc, (F32*) mTexCoords, mNumVertices*2); + } ll_aligned_free_16(mPositions); ll_aligned_free_16(mNormals); @@ -6259,13 +6271,13 @@ void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat_in, LLMat mNumVertices = new_count; - LLVector4a* dst_pos = (LLVector4a*) mPositions+offset; - LLVector2* dst_tc = (LLVector2*) mTexCoords+offset; - LLVector4a* dst_norm = (LLVector4a*) mNormals+offset; + LLVector4a* dst_pos = mPositions+offset; + LLVector2* dst_tc = mTexCoords+offset; + LLVector4a* dst_norm = mNormals+offset; - LLVector4a* src_pos = (LLVector4a*) face.mPositions; - LLVector2* src_tc = (LLVector2*) face.mTexCoords; - LLVector4a* src_norm = (LLVector4a*) face.mNormals; + const LLVector4a* src_pos = face.mPositions; + const LLVector2* src_tc = face.mTexCoords; + const LLVector4a* src_norm = face.mNormals; LLMatrix4a mat, norm_mat; mat.loadu(mat_in); @@ -6292,13 +6304,16 @@ void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat_in, LLMat new_count = mNumIndices + face.mNumIndices; U16* new_indices = (U16*) ll_aligned_malloc_16((new_count*2+0xF) & ~0xF); - LLVector4a::memcpyNonAliased16((F32*) new_indices, (F32*) mIndices, new_count/2); + if (mNumIndices > 0) + { + LLVector4a::memcpyNonAliased16((F32*) new_indices, (F32*) mIndices, llmax(mNumIndices/2, 1)); + } + ll_aligned_free_16(mIndices); mIndices = new_indices; + U16* dst_idx = mIndices+mNumIndices; mNumIndices = new_count; - U16* dst_idx = mIndices+offset; - for (U32 i = 0; i < face.mNumIndices; ++i) { dst_idx[i] = face.mIndices[i]+offset; diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index 4aef3be973..af28337f57 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -817,6 +817,8 @@ public: public: VertexData(); VertexData(const VertexData& rhs); + const VertexData& operator=(const VertexData& rhs); + ~VertexData(); LLVector4a& getPosition(); LLVector4a& getNormal(); -- cgit v1.2.3 From 6f2bd694d9a21174648b4e4f76d6d078aa88265f Mon Sep 17 00:00:00 2001 From: Matthew Breindel Date: Sat, 26 Jun 2010 12:35:05 -0700 Subject: Fixed mac build (given LL_MESH_ENABLED set to 0). --- indra/llmath/llvolume.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index af28337f57..9cce94e6cf 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -41,7 +41,7 @@ class LLVolumeParams; class LLProfile; class LLPath; -#define LL_MESH_ENABLED 1 +#define LL_MESH_ENABLED 0 template class LLOctreeNode; -- cgit v1.2.3 From 46768c3c6c263ec27a80c854734ce0b61d29686f Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Mon, 12 Jul 2010 07:39:23 -0500 Subject: Merge? --- indra/llmath/llvolume.cpp | 186 ++++++++++++++++++++++++++++------------------ 1 file changed, 112 insertions(+), 74 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index a4022c842d..38944d3855 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -5245,6 +5245,10 @@ LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src) mNumS = src.mNumS; mNumT = src.mNumT; + mExtents[0] = src.mExtents[0]; + mExtents[1] = src.mExtents[1]; + *mCenter = *src.mCenter; + mNumVertices = 0; mNumIndices = 0; @@ -5297,7 +5301,6 @@ LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src) LLVector4a::memcpyNonAliased16((F32*) mIndices, (F32*) src.mIndices, idx_size); } - //delete return *this; } @@ -5535,88 +5538,100 @@ BOOL LLVolumeFace::createUnCutCubeCap(LLVolume* volume, BOOL partial_build) S32 offset = 0; if (mTypeMask & TOP_MASK) + { offset = (max_t-1) * max_s; + } else + { offset = mBeginS; - - VertexData corners[4]; - VertexData baseVert; - for(int t = 0; t < 4; t++){ - corners[t].getPosition().load3( mesh[offset + (grid_size*t)].mPos.mV); - corners[t].mTexCoord.mV[0] = profile[grid_size*t].mV[0]+0.5f; - corners[t].mTexCoord.mV[1] = 0.5f - profile[grid_size*t].mV[1]; } { - LLVector4a lhs; - lhs.setSub(corners[1].getPosition(), corners[0].getPosition()); - LLVector4a rhs; - rhs.setSub(corners[2].getPosition(), corners[1].getPosition()); - baseVert.getNormal().setCross3(lhs, rhs); - baseVert.getNormal().normalize3fast(); - } + VertexData corners[4]; + VertexData baseVert; + for(S32 t = 0; t < 4; t++) + { + corners[t].getPosition().load3( mesh[offset + (grid_size*t)].mPos.mV); + corners[t].mTexCoord.mV[0] = profile[grid_size*t].mV[0]+0.5f; + corners[t].mTexCoord.mV[1] = 0.5f - profile[grid_size*t].mV[1]; + } - if(!(mTypeMask & TOP_MASK)){ - baseVert.getNormal().mul(-1.0f); - }else{ - //Swap the UVs on the U(X) axis for top face - LLVector2 swap; - swap = corners[0].mTexCoord; - corners[0].mTexCoord=corners[3].mTexCoord; - corners[3].mTexCoord=swap; - swap = corners[1].mTexCoord; - corners[1].mTexCoord=corners[2].mTexCoord; - corners[2].mTexCoord=swap; - } + { + LLVector4a lhs; + lhs.setSub(corners[1].getPosition(), corners[0].getPosition()); + LLVector4a rhs; + rhs.setSub(corners[2].getPosition(), corners[1].getPosition()); + baseVert.getNormal().setCross3(lhs, rhs); + baseVert.getNormal().normalize3fast(); + } - LLVector4a binormal; - - calc_binormal_from_triangle( binormal, - corners[0].getPosition(), corners[0].mTexCoord, - corners[1].getPosition(), corners[1].mTexCoord, - corners[2].getPosition(), corners[2].mTexCoord); - - binormal.normalize3fast(); + if(!(mTypeMask & TOP_MASK)) + { + baseVert.getNormal().mul(-1.0f); + } + else + { + //Swap the UVs on the U(X) axis for top face + LLVector2 swap; + swap = corners[0].mTexCoord; + corners[0].mTexCoord=corners[3].mTexCoord; + corners[3].mTexCoord=swap; + swap = corners[1].mTexCoord; + corners[1].mTexCoord=corners[2].mTexCoord; + corners[2].mTexCoord=swap; + } - S32 size = (grid_size+1)*(grid_size+1); - resizeVertices(size); - allocateBinormals(size); + LLVector4a binormal; + + calc_binormal_from_triangle( binormal, + corners[0].getPosition(), corners[0].mTexCoord, + corners[1].getPosition(), corners[1].mTexCoord, + corners[2].getPosition(), corners[2].mTexCoord); + + binormal.normalize3fast(); - LLVector4a* pos = (LLVector4a*) mPositions; - LLVector4a* norm = (LLVector4a*) mNormals; - LLVector4a* binorm = (LLVector4a*) mBinormals; - LLVector2* tc = (LLVector2*) mTexCoords; + S32 size = (grid_size+1)*(grid_size+1); + resizeVertices(size); + allocateBinormals(size); - for(int gx = 0;gxsetAdd(min, max); - mCenter->mul(0.5f); + mCenter->setAdd(min, max); + mCenter->mul(0.5f); + } if (!partial_build) { @@ -6249,73 +6264,96 @@ void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat_in, LLMat llerrs << "Cannot append face -- 16-bit overflow will occur." << llendl; } - + if (face.mNumVertices == 0) + { + llerrs << "Cannot append empty face." << llendl; + } + + //allocate new buffer space LLVector4a* new_pos = (LLVector4a*) ll_aligned_malloc_16(new_count*16); LLVector4a* new_norm = (LLVector4a*) ll_aligned_malloc_16(new_count*16); LLVector2* new_tc = (LLVector2*) ll_aligned_malloc_16((new_count*8+0xF) & ~0xF); + if (mNumVertices > 0) - { + { //copy old buffers LLVector4a::memcpyNonAliased16((F32*) new_pos, (F32*) mPositions, mNumVertices*4); LLVector4a::memcpyNonAliased16((F32*) new_norm, (F32*) mNormals, mNumVertices*4); LLVector4a::memcpyNonAliased16((F32*) new_tc, (F32*) mTexCoords, mNumVertices*2); } + //free old buffer space ll_aligned_free_16(mPositions); ll_aligned_free_16(mNormals); ll_aligned_free_16(mTexCoords); + //point to new buffers mPositions = new_pos; mNormals = new_norm; mTexCoords = new_tc; mNumVertices = new_count; + //get destination address of appended face LLVector4a* dst_pos = mPositions+offset; LLVector2* dst_tc = mTexCoords+offset; LLVector4a* dst_norm = mNormals+offset; + //get source addresses of appended face const LLVector4a* src_pos = face.mPositions; const LLVector2* src_tc = face.mTexCoords; const LLVector4a* src_norm = face.mNormals; + //load aligned matrices LLMatrix4a mat, norm_mat; mat.loadu(mat_in); norm_mat.loadu(norm_mat_in); for (U32 i = 0; i < face.mNumVertices; ++i) { + //transform appended face position and store mat.affineTransform(src_pos[i], dst_pos[i]); + + //transform appended face normal and store norm_mat.rotate(src_norm[i], dst_norm[i]); dst_norm[i].normalize3fast(); + //copy appended face texture coordinate dst_tc[i] = src_tc[i]; if (offset == 0 && i == 0) - { + { //initialize bounding box mExtents[0] = mExtents[1] = dst_pos[i]; } else { + //stretch bounding box update_min_max(mExtents[0], mExtents[1], dst_pos[i]); } } new_count = mNumIndices + face.mNumIndices; + + //allocate new index buffer U16* new_indices = (U16*) ll_aligned_malloc_16((new_count*2+0xF) & ~0xF); if (mNumIndices > 0) - { - LLVector4a::memcpyNonAliased16((F32*) new_indices, (F32*) mIndices, llmax(mNumIndices/2, 1)); + { //copy old index buffer + LLVector4a::memcpyNonAliased16((F32*) new_indices, (F32*) mIndices, llmax(mNumIndices/2, 4)); } + //free old index buffer ll_aligned_free_16(mIndices); + + //point to new index buffer mIndices = new_indices; + + //get destination address into new index buffer U16* dst_idx = mIndices+mNumIndices; mNumIndices = new_count; for (U32 i = 0; i < face.mNumIndices; ++i) - { + { //copy indices, offsetting by old vertex count dst_idx[i] = face.mIndices[i]+offset; } } -- cgit v1.2.3 From 2dd3a6be720ed6ce7c17415fc8d81869cf46f3a0 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Tue, 13 Jul 2010 12:02:14 -0500 Subject: Fix for mesh upload, consolidate generating bad indices, and normal generation. --- indra/llmath/llvolume.cpp | 36 +++++++++++++++++++++++++++--------- indra/llmath/llvolume.h | 2 +- 2 files changed, 28 insertions(+), 10 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 38944d3855..51bcfb38d4 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -5369,7 +5369,17 @@ bool LLVolumeFace::VertexMapData::operator==(const LLVolumeFace::VertexData& rhs bool LLVolumeFace::VertexMapData::ComparePosition::operator()(const LLVector3& a, const LLVector3& b) const { - return a < b; + if (a.mV[0] != b.mV[0]) + { + return a.mV[0] < b.mV[0]; + } + + if (a.mV[1] != b.mV[1]) + { + return a.mV[1] < b.mV[1]; + } + + return a.mV[2] < b.mV[2]; } void LLVolumeFace::optimize(F32 angle_cutoff) @@ -6145,12 +6155,13 @@ void LLVolumeFace::pushVertex(const LLVector4a& pos, const LLVector4a& norm, con { S32 new_verts = mNumVertices+1; S32 new_size = new_verts*16; - + S32 old_size = mNumVertices*16; + //positions LLVector4a* dst = (LLVector4a*) ll_aligned_malloc_16(new_size); if (mPositions) { - LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mPositions, new_size/4); + LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mPositions, old_size/4); ll_aligned_free_16(mPositions); } mPositions = dst; @@ -6159,22 +6170,25 @@ void LLVolumeFace::pushVertex(const LLVector4a& pos, const LLVector4a& norm, con dst = (LLVector4a*) ll_aligned_malloc_16(new_size); if (mNormals) { - LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mNormals, new_size/4); + LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mNormals, old_size/4); ll_aligned_free_16(mNormals); } mNormals = dst; //tex coords new_size = ((new_verts*8)+0xF) & ~0xF; + old_size = ((mNumVertices*8)+0xF) & ~0xF; + dst = (LLVector4a*) ll_aligned_malloc_16(new_size); { LLVector2* dst = (LLVector2*) ll_aligned_malloc_16(new_size); if (mTexCoords) { - LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mTexCoords, new_size/4); + LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mTexCoords, old_size/4); ll_aligned_free_16(mTexCoords); } } + mTexCoords = (LLVector2*) dst; //just clear binormals ll_aligned_free_16(mBinormals); @@ -6223,12 +6237,15 @@ void LLVolumeFace::pushIndex(const U16& idx) S32 new_count = mNumIndices + 1; S32 new_size = ((new_count*2)+0xF) & ~0xF; - S32 old_size = (mNumIndices+0xF) & ~0xF; + S32 old_size = ((mNumIndices*2)+0xF) & ~0xF; if (new_size != old_size) { U16* dst = (U16*) ll_aligned_malloc_16(new_size); - LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mIndices, new_size/4); - ll_aligned_free_16(mIndices); + if (mIndices) + { + LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mIndices, old_size/4); + ll_aligned_free_16(mIndices); + } mIndices = dst; } @@ -6339,7 +6356,8 @@ void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat_in, LLMat U16* new_indices = (U16*) ll_aligned_malloc_16((new_count*2+0xF) & ~0xF); if (mNumIndices > 0) { //copy old index buffer - LLVector4a::memcpyNonAliased16((F32*) new_indices, (F32*) mIndices, llmax(mNumIndices/2, 4)); + S32 old_size = (mNumIndices*2+0xF) & ~0xF; + LLVector4a::memcpyNonAliased16((F32*) new_indices, (F32*) mIndices, old_size/4); } //free old index buffer diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index 9cce94e6cf..af28337f57 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -41,7 +41,7 @@ class LLVolumeParams; class LLProfile; class LLPath; -#define LL_MESH_ENABLED 0 +#define LL_MESH_ENABLED 1 template class LLOctreeNode; -- cgit v1.2.3 From 93ea3d2850067c23ff07f0ffb362b73247840e9a Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Thu, 15 Jul 2010 14:17:51 -0500 Subject: Merge cleanup. --- indra/llmath/llvolume.cpp | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 25db58491e..0976487cbd 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -93,8 +93,6 @@ const F32 SKEW_MAX = 0.95f; const F32 SCULPT_MIN_AREA = 0.002f; const S32 SCULPT_MIN_AREA_DETAIL = 1; -#define GEN_TRI_STRIP 0 - BOOL check_same_clock_dir( const LLVector3& pt1, const LLVector3& pt2, const LLVector3& pt3, const LLVector3& norm) { LLVector3 test = (pt2-pt1)%(pt3-pt2); @@ -5663,8 +5661,6 @@ BOOL LLVolumeFace::createUnCutCubeCap(LLVolume* volume, BOOL partial_build) { *out++ = ((gy*(grid_size+1))+gx+idxs[i]); } -#if GEN_TRI_STRIP -#endif } else { @@ -5672,14 +5668,9 @@ BOOL LLVolumeFace::createUnCutCubeCap(LLVolume* volume, BOOL partial_build) { *out++ = ((gy*(grid_size+1))+gx+idxs[i]); } -#if GEN_TRI_STRIP -#endif } - } - -#if GEN_TRI_STRIP + } } -#endif } return TRUE; @@ -6539,13 +6530,9 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) if (!partial_build) { -#if GEN_TRI_STRIP -#endif // Now we generate the indices. for (t = 0; t < (mNumT-1); t++) { -#if GEN_TRI_STRIP -#endif for (s = 0; s < (mNumS-1); s++) { mIndices[cur_index++] = s + mNumS*t; //bottom left @@ -6555,8 +6542,6 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) mIndices[cur_index++] = s+1 + mNumS*t; //bottom right mIndices[cur_index++] = s+1 + mNumS*(t+1); //top right -#if GEN_TRI_STRIP -#endif mEdge[cur_edge++] = (mNumS-1)*2*t+s*2+1; //bottom left/top right neighbor face if (t < mNumT-2) { //top right/top left neighbor face mEdge[cur_edge++] = (mNumS-1)*2*(t+1)+s*2+1; @@ -6597,11 +6582,7 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) } mEdge[cur_edge++] = (mNumS-1)*2*t+s*2; //top right/bottom left neighbor face } -#if GEN_TRI_STRIP -#endif -#if GEN_TRI_STRIP } -#endif } //generate normals @@ -6684,7 +6665,6 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) } } } - else // logic for sculpt volumes { BOOL average_poles = FALSE; -- cgit v1.2.3 From 129e31373eaef9cbd74451bfeb4ad62b64d17250 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Fri, 6 Aug 2010 11:57:03 -0500 Subject: Better < operator for LLVolumeFace::VertexData --- indra/llmath/llvolume.cpp | 40 +++++++++++++++++++++++++++++++++------- 1 file changed, 33 insertions(+), 7 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 0976487cbd..bba0a6d089 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -2055,22 +2055,48 @@ void LLVolumeFace::VertexData::setNormal(const LLVector4a& norm) bool LLVolumeFace::VertexData::operator<(const LLVolumeFace::VertexData& rhs)const { - if (mData[POSITION].notEqual3(rhs.getPosition())) + const F32* lp = this->getPosition().getF32(); + const F32* rp = rhs.getPosition().getF32(); + + if (lp[0] != rp[0]) + { + return lp[0] < rp[0]; + } + + if (rp[1] != lp[1]) + { + return lp[1] < rp[1]; + } + + if (rp[2] != lp[2]) + { + return lp[2] < rp[2]; + } + + lp = getNormal().getF32(); + rp = rhs.getNormal().getF32(); + + if (lp[0] != rp[0]) + { + return lp[0] < rp[0]; + } + + if (rp[1] != lp[1]) { - return mData[POSITION].less3(rhs.getPosition()); + return lp[1] < rp[1]; } - if (mData[NORMAL].notEqual3(rhs.getNormal())) + if (rp[2] != lp[2]) { - return mData[NORMAL].less3(rhs.getNormal()); + return lp[2] < rp[2]; } - if (mTexCoord != rhs.mTexCoord) + if (mTexCoord.mV[0] != rhs.mTexCoord.mV[0]) { - return mTexCoord < rhs.mTexCoord; + return mTexCoord.mV[0] < rhs.mTexCoord.mV[0]; } - return false; + return mTexCoord.mV[1] < rhs.mTexCoord.mV[1]; } bool LLVolumeFace::VertexData::operator==(const LLVolumeFace::VertexData& rhs)const -- cgit v1.2.3 From 2fea1d5d33ec1b41a3cfa4307a1bfa58d8014f88 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Thu, 19 Aug 2010 12:25:15 -0500 Subject: Integrate SIMD API from oreh/server-trunk-oreh --- indra/llmath/CMakeLists.txt | 246 ++--- indra/llmath/llcamera.cpp | 2 +- indra/llmath/llmath.h | 1034 +++++++++--------- indra/llmath/lloctree.h | 19 +- indra/llmath/llquantize.h | 310 +++--- indra/llmath/llquaternion.cpp | 1921 +++++++++++++++++----------------- indra/llmath/llquaternion.h | 1184 ++++++++++----------- indra/llmath/llvolume.cpp | 123 +-- indra/llmath/tests/v2math_test.cpp | 6 +- indra/llmath/tests/v3color_test.cpp | 6 +- indra/llmath/tests/v3dmath_test.cpp | 2 +- indra/llmath/tests/v3math_test.cpp | 4 +- indra/llmath/tests/v4color_test.cpp | 4 +- indra/llmath/tests/v4coloru_test.cpp | 2 +- indra/llmath/tests/v4math_test.cpp | 4 +- indra/llmath/v2math.cpp | 2 +- indra/llmath/v2math.h | 8 +- indra/llmath/v3color.h | 10 +- indra/llmath/v3dmath.h | 10 +- indra/llmath/v3math.h | 10 +- indra/llmath/v4color.h | 8 +- indra/llmath/v4coloru.h | 4 +- indra/llmath/v4math.h | 8 +- 23 files changed, 2466 insertions(+), 2461 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/CMakeLists.txt b/indra/llmath/CMakeLists.txt index dda07133d5..8d85765eb8 100644 --- a/indra/llmath/CMakeLists.txt +++ b/indra/llmath/CMakeLists.txt @@ -1,118 +1,128 @@ -# -*- cmake -*- - -project(llmath) - -include(00-Common) -include(LLCommon) - -include_directories( - ${LLCOMMON_INCLUDE_DIRS} - ) - -set(llmath_SOURCE_FILES - llbbox.cpp - llbboxlocal.cpp - llcamera.cpp - llcoordframe.cpp - llline.cpp - llmodularmath.cpp - llperlin.cpp - llquaternion.cpp - llrect.cpp - llsphere.cpp - llvolume.cpp - llvolumemgr.cpp - llvolumeoctree.cpp - llsdutil_math.cpp - m3math.cpp - m4math.cpp - raytrace.cpp - v2math.cpp - v3color.cpp - v3dmath.cpp - v3math.cpp - v4color.cpp - v4coloru.cpp - v4math.cpp - xform.cpp - ) - -set(llmath_HEADER_FILES - CMakeLists.txt - - camera.h - coordframe.h - llbbox.h - llbboxlocal.h - llcamera.h - llcoord.h - llcoordframe.h - llinterp.h - llline.h - llmath.h - llmodularmath.h - lloctree.h - llperlin.h - llplane.h - llquantize.h - llquaternion.h - llrect.h - llsphere.h - lltreenode.h - llv4math.h - llv4matrix3.h - llv4matrix4.h - llv4vector3.h - llvector4a.h - llmatrix4a.h - llvolume.h - llvolumemgr.h - llvolumeoctree.h - llsdutil_math.h - m3math.h - m4math.h - raytrace.h - v2math.h - v3color.h - v3dmath.h - v3math.h - v4color.h - v4coloru.h - v4math.h - xform.h - ) - -set_source_files_properties(${llmath_HEADER_FILES} - PROPERTIES HEADER_FILE_ONLY TRUE) - -list(APPEND llmath_SOURCE_FILES ${llmath_HEADER_FILES}) - -add_library (llmath ${llmath_SOURCE_FILES}) - -# Add tests -if (LL_TESTS) - include(LLAddBuildTest) - # UNIT TESTS - SET(llmath_TEST_SOURCE_FILES - llbboxlocal.cpp - llmodularmath.cpp - llrect.cpp - v2math.cpp - v3color.cpp - v4color.cpp - v4coloru.cpp - ) - LL_ADD_PROJECT_UNIT_TESTS(llmath "${llmath_TEST_SOURCE_FILES}") - - # INTEGRATION TESTS - set(test_libs llmath llcommon ${LLCOMMON_LIBRARIES} ${WINDOWS_LIBRARIES}) - # TODO: Some of these need refactoring to be proper Unit tests rather than Integration tests. - LL_ADD_INTEGRATION_TEST(llbbox llbbox.cpp "${test_libs}") - LL_ADD_INTEGRATION_TEST(llquaternion llquaternion.cpp "${test_libs}") - LL_ADD_INTEGRATION_TEST(mathmisc "" "${test_libs}") - LL_ADD_INTEGRATION_TEST(m3math "" "${test_libs}") - LL_ADD_INTEGRATION_TEST(v3dmath v3dmath.cpp "${test_libs}") - LL_ADD_INTEGRATION_TEST(v3math v3math.cpp "${test_libs}") - LL_ADD_INTEGRATION_TEST(v4math v4math.cpp "${test_libs}") - LL_ADD_INTEGRATION_TEST(xform xform.cpp "${test_libs}") -endif (LL_TESTS) +# -*- cmake -*- + +project(llmath) + +include(00-Common) +include(LLCommon) + +include_directories( + ${LLCOMMON_INCLUDE_DIRS} + ) + +set(llmath_SOURCE_FILES + llbbox.cpp + llbboxlocal.cpp + llcamera.cpp + llcoordframe.cpp + llline.cpp + llmatrix3a.cpp + llmodularmath.cpp + llperlin.cpp + llquaternion.cpp + llrect.cpp + llsphere.cpp + llvector4a.cpp + llvolume.cpp + llvolumemgr.cpp + llvolumeoctree.cpp + llsdutil_math.cpp + m3math.cpp + m4math.cpp + raytrace.cpp + v2math.cpp + v3color.cpp + v3dmath.cpp + v3math.cpp + v4color.cpp + v4coloru.cpp + v4math.cpp + xform.cpp + ) + +set(llmath_HEADER_FILES + CMakeLists.txt + + camera.h + coordframe.h + llbbox.h + llbboxlocal.h + llcamera.h + llcoord.h + llcoordframe.h + llinterp.h + llline.h + llmath.h + llmatrix3a.h + llmatrix3a.inl + llmodularmath.h + lloctree.h + llperlin.h + llplane.h + llquantize.h + llquaternion.h + llquaternion2.h + llquaternion2.inl + llrect.h + llsimdmath.h + llsimdtypes.h + llsimdtypes.inl + llsphere.h + lltreenode.h + llvector4a.h + llvector4a.inl + llvector4logical.h + llv4math.h + llv4matrix3.h + llv4matrix4.h + llv4vector3.h + llvolume.h + llvolumemgr.h + llvolumeoctree.h + llsdutil_math.h + m3math.h + m4math.h + raytrace.h + v2math.h + v3color.h + v3dmath.h + v3math.h + v4color.h + v4coloru.h + v4math.h + xform.h + ) + +set_source_files_properties(${llmath_HEADER_FILES} + PROPERTIES HEADER_FILE_ONLY TRUE) + +list(APPEND llmath_SOURCE_FILES ${llmath_HEADER_FILES}) + +add_library (llmath ${llmath_SOURCE_FILES}) + +# Add tests +if (LL_TESTS) + include(LLAddBuildTest) + # UNIT TESTS + SET(llmath_TEST_SOURCE_FILES + llbboxlocal.cpp + llmodularmath.cpp + llrect.cpp + v2math.cpp + v3color.cpp + v4color.cpp + v4coloru.cpp + ) + LL_ADD_PROJECT_UNIT_TESTS(llmath "${llmath_TEST_SOURCE_FILES}") + + # INTEGRATION TESTS + set(test_libs llmath llcommon ${LLCOMMON_LIBRARIES} ${WINDOWS_LIBRARIES}) + # TODO: Some of these need refactoring to be proper Unit tests rather than Integration tests. + LL_ADD_INTEGRATION_TEST(llbbox llbbox.cpp "${test_libs}") + LL_ADD_INTEGRATION_TEST(llquaternion llquaternion.cpp "${test_libs}") + LL_ADD_INTEGRATION_TEST(mathmisc "" "${test_libs}") + LL_ADD_INTEGRATION_TEST(m3math "" "${test_libs}") + LL_ADD_INTEGRATION_TEST(v3dmath v3dmath.cpp "${test_libs}") + LL_ADD_INTEGRATION_TEST(v3math v3math.cpp "${test_libs}") + LL_ADD_INTEGRATION_TEST(v4math v4math.cpp "${test_libs}") + LL_ADD_INTEGRATION_TEST(xform xform.cpp "${test_libs}") +endif (LL_TESTS) diff --git a/indra/llmath/llcamera.cpp b/indra/llmath/llcamera.cpp index 6b56e4870e..beb5c48624 100644 --- a/indra/llmath/llcamera.cpp +++ b/indra/llmath/llcamera.cpp @@ -77,7 +77,7 @@ const LLCamera& LLCamera::operator=(const LLCamera& rhs) { memcpy(this, &rhs, sizeof(LLCamera)); alignPlanes(); - LLVector4a::memcpyNonAliased16((F32*) mAgentPlanes, (F32*) rhs.mAgentPlanes, 4*7); + LLVector4a::memcpyNonAliased16((F32*) mAgentPlanes, (F32*) rhs.mAgentPlanes, 4*7*sizeof(F32)); return *this; } diff --git a/indra/llmath/llmath.h b/indra/llmath/llmath.h index c3c15e1374..742bbc4751 100644 --- a/indra/llmath/llmath.h +++ b/indra/llmath/llmath.h @@ -1,525 +1,509 @@ -/** - * @file llmath.h - * @brief Useful math constants and macros. - * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * - * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 - * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception - * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. - * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. - * $/LicenseInfo$ - */ - -#ifndef LLMATH_H -#define LLMATH_H - -#include -#include -#include -#include "lldefs.h" -//#include "llstl.h" // *TODO: Remove when LLString is gone -//#include "llstring.h" // *TODO: Remove when LLString is gone -// lltut.h uses is_approx_equal_fraction(). This was moved to its own header -// file in llcommon so we can use lltut.h for llcommon tests without making -// llcommon depend on llmath. -#include "is_approx_equal_fraction.h" - -// work around for Windows & older gcc non-standard function names. -#if LL_WINDOWS -#include -#define llisnan(val) _isnan(val) -#define llfinite(val) _finite(val) -#elif (LL_LINUX && __GNUC__ <= 2) -#define llisnan(val) isnan(val) -#define llfinite(val) isfinite(val) -#elif LL_SOLARIS -#define llisnan(val) isnan(val) -#define llfinite(val) (val <= std::numeric_limits::max()) -#else -#define llisnan(val) std::isnan(val) -#define llfinite(val) std::isfinite(val) -#endif - -// Single Precision Floating Point Routines -#ifndef sqrtf -#define sqrtf(x) ((F32)sqrt((F64)(x))) -#endif -#ifndef fsqrtf -#define fsqrtf(x) sqrtf(x) -#endif - -#ifndef cosf -#define cosf(x) ((F32)cos((F64)(x))) -#endif -#ifndef sinf -#define sinf(x) ((F32)sin((F64)(x))) -#endif -#ifndef tanf -#define tanf(x) ((F32)tan((F64)(x))) -#endif -#ifndef acosf -#define acosf(x) ((F32)acos((F64)(x))) -#endif - -#ifndef powf -#define powf(x,y) ((F32)pow((F64)(x),(F64)(y))) -#endif -#ifndef expf -#define expf(x) ((F32)exp((F64)(x))) -#endif - -const F32 GRAVITY = -9.8f; - -// mathematical constants -const F32 F_PI = 3.1415926535897932384626433832795f; -const F32 F_TWO_PI = 6.283185307179586476925286766559f; -const F32 F_PI_BY_TWO = 1.5707963267948966192313216916398f; -const F32 F_SQRT_TWO_PI = 2.506628274631000502415765284811f; -const F32 F_E = 2.71828182845904523536f; -const F32 F_SQRT2 = 1.4142135623730950488016887242097f; -const F32 F_SQRT3 = 1.73205080756888288657986402541f; -const F32 OO_SQRT2 = 0.7071067811865475244008443621049f; -const F32 DEG_TO_RAD = 0.017453292519943295769236907684886f; -const F32 RAD_TO_DEG = 57.295779513082320876798154814105f; -const F32 F_APPROXIMATELY_ZERO = 0.00001f; -const F32 F_LN2 = 0.69314718056f; -const F32 OO_LN2 = 1.4426950408889634073599246810019f; - -const F32 F_ALMOST_ZERO = 0.0001f; -const F32 F_ALMOST_ONE = 1.0f - F_ALMOST_ZERO; - -// BUG: Eliminate in favor of F_APPROXIMATELY_ZERO above? -const F32 FP_MAG_THRESHOLD = 0.0000001f; - -// TODO: Replace with logic like is_approx_equal -inline BOOL is_approx_zero( F32 f ) { return (-F_APPROXIMATELY_ZERO < f) && (f < F_APPROXIMATELY_ZERO); } - -// These functions work by interpreting sign+exp+mantissa as an unsigned -// integer. -// For example: -// x = 1 00000010 00000000000000000000000 -// y = 1 00000001 11111111111111111111111 -// -// interpreted as ints = -// x = 10000001000000000000000000000000 -// y = 10000000111111111111111111111111 -// which is clearly a different of 1 in the least significant bit -// Values with the same exponent can be trivially shown to work. -// -// WARNING: Denormals of opposite sign do not work -// x = 1 00000000 00000000000000000000001 -// y = 0 00000000 00000000000000000000001 -// Although these values differ by 2 in the LSB, the sign bit makes -// the int comparison fail. -// -// WARNING: NaNs can compare equal -// There is no special treatment of exceptional values like NaNs -// -// WARNING: Infinity is comparable with F32_MAX and negative -// infinity is comparable with F32_MIN - -inline BOOL is_approx_equal(F32 x, F32 y) -{ - const S32 COMPARE_MANTISSA_UP_TO_BIT = 0x02; - return (std::abs((S32) ((U32&)x - (U32&)y) ) < COMPARE_MANTISSA_UP_TO_BIT); -} - -inline BOOL is_approx_equal(F64 x, F64 y) -{ - const S64 COMPARE_MANTISSA_UP_TO_BIT = 0x02; - return (std::abs((S32) ((U64&)x - (U64&)y) ) < COMPARE_MANTISSA_UP_TO_BIT); -} - -inline S32 llabs(const S32 a) -{ - return S32(std::labs(a)); -} - -inline F32 llabs(const F32 a) -{ - return F32(std::fabs(a)); -} - -inline F64 llabs(const F64 a) -{ - return F64(std::fabs(a)); -} - -inline S32 lltrunc( F32 f ) -{ -#if LL_WINDOWS && !defined( __INTEL_COMPILER ) - // Avoids changing the floating point control word. - // Add or subtract 0.5 - epsilon and then round - const static U32 zpfp[] = { 0xBEFFFFFF, 0x3EFFFFFF }; - S32 result; - __asm { - fld f - mov eax, f - shr eax, 29 - and eax, 4 - fadd dword ptr [zpfp + eax] - fistp result - } - return result; -#else - return (S32)f; -#endif -} - -inline S32 lltrunc( F64 f ) -{ - return (S32)f; -} - -inline S32 llfloor( F32 f ) -{ -#if LL_WINDOWS && !defined( __INTEL_COMPILER ) - // Avoids changing the floating point control word. - // Accurate (unlike Stereopsis version) for all values between S32_MIN and S32_MAX and slightly faster than Stereopsis version. - // Add -(0.5 - epsilon) and then round - const U32 zpfp = 0xBEFFFFFF; - S32 result; - __asm { - fld f - fadd dword ptr [zpfp] - fistp result - } - return result; -#else - return (S32)floorf(f); -#endif -} - - -inline S32 llceil( F32 f ) -{ - // This could probably be optimized, but this works. - return (S32)ceil(f); -} - - -#ifndef BOGUS_ROUND -// Use this round. Does an arithmetic round (0.5 always rounds up) -inline S32 llround(const F32 val) -{ - return llfloor(val + 0.5f); -} - -#else // BOGUS_ROUND -// Old llround implementation - does banker's round (toward nearest even in the case of a 0.5. -// Not using this because we don't have a consistent implementation on both platforms, use -// llfloor(val + 0.5f), which is consistent on all platforms. -inline S32 llround(const F32 val) -{ - #if LL_WINDOWS - // Note: assumes that the floating point control word is set to rounding mode (the default) - S32 ret_val; - _asm fld val - _asm fistp ret_val; - return ret_val; - #elif LL_LINUX - // Note: assumes that the floating point control word is set - // to rounding mode (the default) - S32 ret_val; - __asm__ __volatile__( "flds %1 \n\t" - "fistpl %0 \n\t" - : "=m" (ret_val) - : "m" (val) ); - return ret_val; - #else - return llfloor(val + 0.5f); - #endif -} - -// A fast arithmentic round on intel, from Laurent de Soras http://ldesoras.free.fr -inline int round_int(double x) -{ - const float round_to_nearest = 0.5f; - int i; - __asm - { - fld x - fadd st, st (0) - fadd round_to_nearest - fistp i - sar i, 1 - } - return (i); -} -#endif // BOGUS_ROUND - -inline F32 llround( F32 val, F32 nearest ) -{ - return F32(floor(val * (1.0f / nearest) + 0.5f)) * nearest; -} - -inline F64 llround( F64 val, F64 nearest ) -{ - return F64(floor(val * (1.0 / nearest) + 0.5)) * nearest; -} - -// these provide minimum peak error -// -// avg error = -0.013049 -// peak error = -31.4 dB -// RMS error = -28.1 dB - -const F32 FAST_MAG_ALPHA = 0.960433870103f; -const F32 FAST_MAG_BETA = 0.397824734759f; - -// these provide minimum RMS error -// -// avg error = 0.000003 -// peak error = -32.6 dB -// RMS error = -25.7 dB -// -//const F32 FAST_MAG_ALPHA = 0.948059448969f; -//const F32 FAST_MAG_BETA = 0.392699081699f; - -inline F32 fastMagnitude(F32 a, F32 b) -{ - a = (a > 0) ? a : -a; - b = (b > 0) ? b : -b; - return(FAST_MAG_ALPHA * llmax(a,b) + FAST_MAG_BETA * llmin(a,b)); -} - - - -//////////////////// -// -// Fast F32/S32 conversions -// -// Culled from www.stereopsis.com/FPU.html - -const F64 LL_DOUBLE_TO_FIX_MAGIC = 68719476736.0*1.5; //2^36 * 1.5, (52-_shiftamt=36) uses limited precisicion to floor -const S32 LL_SHIFT_AMOUNT = 16; //16.16 fixed point representation, - -// Endian dependent code -#ifdef LL_LITTLE_ENDIAN - #define LL_EXP_INDEX 1 - #define LL_MAN_INDEX 0 -#else - #define LL_EXP_INDEX 0 - #define LL_MAN_INDEX 1 -#endif - -/* Deprecated: use llround(), lltrunc(), or llfloor() instead -// ================================================================================================ -// Real2Int -// ================================================================================================ -inline S32 F64toS32(F64 val) -{ - val = val + LL_DOUBLE_TO_FIX_MAGIC; - return ((S32*)&val)[LL_MAN_INDEX] >> LL_SHIFT_AMOUNT; -} - -// ================================================================================================ -// Real2Int -// ================================================================================================ -inline S32 F32toS32(F32 val) -{ - return F64toS32 ((F64)val); -} -*/ - -//////////////////////////////////////////////// -// -// Fast exp and log -// - -// Implementation of fast exp() approximation (from a paper by Nicol N. Schraudolph -// http://www.inf.ethz.ch/~schraudo/pubs/exp.pdf -static union -{ - double d; - struct - { -#ifdef LL_LITTLE_ENDIAN - S32 j, i; -#else - S32 i, j; -#endif - } n; -} LLECO; // not sure what the name means - -#define LL_EXP_A (1048576 * OO_LN2) // use 1512775 for integer -#define LL_EXP_C (60801) // this value of C good for -4 < y < 4 - -#define LL_FAST_EXP(y) (LLECO.n.i = llround(F32(LL_EXP_A*(y))) + (1072693248 - LL_EXP_C), LLECO.d) - - - -inline F32 llfastpow(const F32 x, const F32 y) -{ - return (F32)(LL_FAST_EXP(y * log(x))); -} - - -inline F32 snap_to_sig_figs(F32 foo, S32 sig_figs) -{ - // compute the power of ten - F32 bar = 1.f; - for (S32 i = 0; i < sig_figs; i++) - { - bar *= 10.f; - } - - foo = (F32)llround(foo * bar); - - // shift back - foo /= bar; - return foo; -} - -inline F32 lerp(F32 a, F32 b, F32 u) -{ - return a + ((b - a) * u); -} - -inline F32 lerp2d(F32 x00, F32 x01, F32 x10, F32 x11, F32 u, F32 v) -{ - F32 a = x00 + (x01-x00)*u; - F32 b = x10 + (x11-x10)*u; - F32 r = a + (b-a)*v; - return r; -} - -inline F32 ramp(F32 x, F32 a, F32 b) -{ - return (a == b) ? 0.0f : ((a - x) / (a - b)); -} - -inline F32 rescale(F32 x, F32 x1, F32 x2, F32 y1, F32 y2) -{ - return lerp(y1, y2, ramp(x, x1, x2)); -} - -inline F32 clamp_rescale(F32 x, F32 x1, F32 x2, F32 y1, F32 y2) -{ - if (y1 < y2) - { - return llclamp(rescale(x,x1,x2,y1,y2),y1,y2); - } - else - { - return llclamp(rescale(x,x1,x2,y1,y2),y2,y1); - } -} - - -inline F32 cubic_step( F32 x, F32 x0, F32 x1, F32 s0, F32 s1 ) -{ - if (x <= x0) - return s0; - - if (x >= x1) - return s1; - - F32 f = (x - x0) / (x1 - x0); - - return s0 + (s1 - s0) * (f * f) * (3.0f - 2.0f * f); -} - -inline F32 cubic_step( F32 x ) -{ - x = llclampf(x); - - return (x * x) * (3.0f - 2.0f * x); -} - -inline F32 quadratic_step( F32 x, F32 x0, F32 x1, F32 s0, F32 s1 ) -{ - if (x <= x0) - return s0; - - if (x >= x1) - return s1; - - F32 f = (x - x0) / (x1 - x0); - F32 f_squared = f * f; - - return (s0 * (1.f - f_squared)) + ((s1 - s0) * f_squared); -} - -inline F32 llsimple_angle(F32 angle) -{ - while(angle <= -F_PI) - angle += F_TWO_PI; - while(angle > F_PI) - angle -= F_TWO_PI; - return angle; -} - -//SDK - Renamed this to get_lower_power_two, since this is what this actually does. -inline U32 get_lower_power_two(U32 val, U32 max_power_two) -{ - if(!max_power_two) - { - max_power_two = 1 << 31 ; - } - if(max_power_two & (max_power_two - 1)) - { - return 0 ; - } - - for(; val < max_power_two ; max_power_two >>= 1) ; - - return max_power_two ; -} - -// calculate next highest power of two, limited by max_power_two -// This is taken from a brilliant little code snipped on http://acius2.blogspot.com/2007/11/calculating-next-power-of-2.html -// Basically we convert the binary to a solid string of 1's with the same -// number of digits, then add one. We subtract 1 initially to handle -// the case where the number passed in is actually a power of two. -// WARNING: this only works with 32 bit ints. -inline U32 get_next_power_two(U32 val, U32 max_power_two) -{ - if(!max_power_two) - { - max_power_two = 1 << 31 ; - } - - if(val >= max_power_two) - { - return max_power_two; - } - - val--; - val = (val >> 1) | val; - val = (val >> 2) | val; - val = (val >> 4) | val; - val = (val >> 8) | val; - val = (val >> 16) | val; - val++; - - return val; -} - -//get the gaussian value given the linear distance from axis x and guassian value o -inline F32 llgaussian(F32 x, F32 o) -{ - return 1.f/(F_SQRT_TWO_PI*o)*powf(F_E, -(x*x)/(2*o*o)); -} - -#endif +/** + * @file llmath.h + * @brief Useful math constants and macros. + * + * $LicenseInfo:firstyear=2000&license=viewergpl$ + * + * Copyright (c) 2000-2009, Linden Research, Inc. + * + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. + * + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. + * $/LicenseInfo$ + */ + +#ifndef LLMATH_H +#define LLMATH_H + +#include +#include +#include "lldefs.h" +//#include "llstl.h" // *TODO: Remove when LLString is gone +//#include "llstring.h" // *TODO: Remove when LLString is gone +// lltut.h uses is_approx_equal_fraction(). This was moved to its own header +// file in llcommon so we can use lltut.h for llcommon tests without making +// llcommon depend on llmath. +#include "is_approx_equal_fraction.h" + +// work around for Windows & older gcc non-standard function names. +#if LL_WINDOWS +#include +#define llisnan(val) _isnan(val) +#define llfinite(val) _finite(val) +#elif (LL_LINUX && __GNUC__ <= 2) +#define llisnan(val) isnan(val) +#define llfinite(val) isfinite(val) +#elif LL_SOLARIS +#define llisnan(val) isnan(val) +#define llfinite(val) (val <= std::numeric_limits::max()) +#else +#define llisnan(val) std::isnan(val) +#define llfinite(val) std::isfinite(val) +#endif + +// Single Precision Floating Point Routines +// (There used to be more defined here, but they appeared to be redundant and +// were breaking some other includes. Removed by Falcon, reviewed by Andrew, 11/25/09) +/*#ifndef tanf +#define tanf(x) ((F32)tan((F64)(x))) +#endif*/ + +const F32 GRAVITY = -9.8f; + +// mathematical constants +const F32 F_PI = 3.1415926535897932384626433832795f; +const F32 F_TWO_PI = 6.283185307179586476925286766559f; +const F32 F_PI_BY_TWO = 1.5707963267948966192313216916398f; +const F32 F_SQRT_TWO_PI = 2.506628274631000502415765284811f; +const F32 F_E = 2.71828182845904523536f; +const F32 F_SQRT2 = 1.4142135623730950488016887242097f; +const F32 F_SQRT3 = 1.73205080756888288657986402541f; +const F32 OO_SQRT2 = 0.7071067811865475244008443621049f; +const F32 DEG_TO_RAD = 0.017453292519943295769236907684886f; +const F32 RAD_TO_DEG = 57.295779513082320876798154814105f; +const F32 F_APPROXIMATELY_ZERO = 0.00001f; +const F32 F_LN2 = 0.69314718056f; +const F32 OO_LN2 = 1.4426950408889634073599246810019f; + +const F32 F_ALMOST_ZERO = 0.0001f; +const F32 F_ALMOST_ONE = 1.0f - F_ALMOST_ZERO; + +// BUG: Eliminate in favor of F_APPROXIMATELY_ZERO above? +const F32 FP_MAG_THRESHOLD = 0.0000001f; + +// TODO: Replace with logic like is_approx_equal +inline BOOL is_approx_zero( F32 f ) { return (-F_APPROXIMATELY_ZERO < f) && (f < F_APPROXIMATELY_ZERO); } + +// These functions work by interpreting sign+exp+mantissa as an unsigned +// integer. +// For example: +// x = 1 00000010 00000000000000000000000 +// y = 1 00000001 11111111111111111111111 +// +// interpreted as ints = +// x = 10000001000000000000000000000000 +// y = 10000000111111111111111111111111 +// which is clearly a different of 1 in the least significant bit +// Values with the same exponent can be trivially shown to work. +// +// WARNING: Denormals of opposite sign do not work +// x = 1 00000000 00000000000000000000001 +// y = 0 00000000 00000000000000000000001 +// Although these values differ by 2 in the LSB, the sign bit makes +// the int comparison fail. +// +// WARNING: NaNs can compare equal +// There is no special treatment of exceptional values like NaNs +// +// WARNING: Infinity is comparable with F32_MAX and negative +// infinity is comparable with F32_MIN + +inline BOOL is_approx_equal(F32 x, F32 y) +{ + const S32 COMPARE_MANTISSA_UP_TO_BIT = 0x02; + return (std::abs((S32) ((U32&)x - (U32&)y) ) < COMPARE_MANTISSA_UP_TO_BIT); +} + +inline BOOL is_approx_equal(F64 x, F64 y) +{ + const S64 COMPARE_MANTISSA_UP_TO_BIT = 0x02; + return (std::abs((S32) ((U64&)x - (U64&)y) ) < COMPARE_MANTISSA_UP_TO_BIT); +} + +inline S32 llabs(const S32 a) +{ + return S32(std::labs(a)); +} + +inline F32 llabs(const F32 a) +{ + return F32(std::fabs(a)); +} + +inline F64 llabs(const F64 a) +{ + return F64(std::fabs(a)); +} + +inline S32 lltrunc( F32 f ) +{ +#if LL_WINDOWS && !defined( __INTEL_COMPILER ) + // Avoids changing the floating point control word. + // Add or subtract 0.5 - epsilon and then round + const static U32 zpfp[] = { 0xBEFFFFFF, 0x3EFFFFFF }; + S32 result; + __asm { + fld f + mov eax, f + shr eax, 29 + and eax, 4 + fadd dword ptr [zpfp + eax] + fistp result + } + return result; +#else + return (S32)f; +#endif +} + +inline S32 lltrunc( F64 f ) +{ + return (S32)f; +} + +inline S32 llfloor( F32 f ) +{ +#if LL_WINDOWS && !defined( __INTEL_COMPILER ) + // Avoids changing the floating point control word. + // Accurate (unlike Stereopsis version) for all values between S32_MIN and S32_MAX and slightly faster than Stereopsis version. + // Add -(0.5 - epsilon) and then round + const U32 zpfp = 0xBEFFFFFF; + S32 result; + __asm { + fld f + fadd dword ptr [zpfp] + fistp result + } + return result; +#else + return (S32)floor(f); +#endif +} + + +inline S32 llceil( F32 f ) +{ + // This could probably be optimized, but this works. + return (S32)ceil(f); +} + + +#ifndef BOGUS_ROUND +// Use this round. Does an arithmetic round (0.5 always rounds up) +inline S32 llround(const F32 val) +{ + return llfloor(val + 0.5f); +} + +#else // BOGUS_ROUND +// Old llround implementation - does banker's round (toward nearest even in the case of a 0.5. +// Not using this because we don't have a consistent implementation on both platforms, use +// llfloor(val + 0.5f), which is consistent on all platforms. +inline S32 llround(const F32 val) +{ + #if LL_WINDOWS + // Note: assumes that the floating point control word is set to rounding mode (the default) + S32 ret_val; + _asm fld val + _asm fistp ret_val; + return ret_val; + #elif LL_LINUX + // Note: assumes that the floating point control word is set + // to rounding mode (the default) + S32 ret_val; + __asm__ __volatile__( "flds %1 \n\t" + "fistpl %0 \n\t" + : "=m" (ret_val) + : "m" (val) ); + return ret_val; + #else + return llfloor(val + 0.5f); + #endif +} + +// A fast arithmentic round on intel, from Laurent de Soras http://ldesoras.free.fr +inline int round_int(double x) +{ + const float round_to_nearest = 0.5f; + int i; + __asm + { + fld x + fadd st, st (0) + fadd round_to_nearest + fistp i + sar i, 1 + } + return (i); +} +#endif // BOGUS_ROUND + +inline F32 llround( F32 val, F32 nearest ) +{ + return F32(floor(val * (1.0f / nearest) + 0.5f)) * nearest; +} + +inline F64 llround( F64 val, F64 nearest ) +{ + return F64(floor(val * (1.0 / nearest) + 0.5)) * nearest; +} + +// these provide minimum peak error +// +// avg error = -0.013049 +// peak error = -31.4 dB +// RMS error = -28.1 dB + +const F32 FAST_MAG_ALPHA = 0.960433870103f; +const F32 FAST_MAG_BETA = 0.397824734759f; + +// these provide minimum RMS error +// +// avg error = 0.000003 +// peak error = -32.6 dB +// RMS error = -25.7 dB +// +//const F32 FAST_MAG_ALPHA = 0.948059448969f; +//const F32 FAST_MAG_BETA = 0.392699081699f; + +inline F32 fastMagnitude(F32 a, F32 b) +{ + a = (a > 0) ? a : -a; + b = (b > 0) ? b : -b; + return(FAST_MAG_ALPHA * llmax(a,b) + FAST_MAG_BETA * llmin(a,b)); +} + + + +//////////////////// +// +// Fast F32/S32 conversions +// +// Culled from www.stereopsis.com/FPU.html + +const F64 LL_DOUBLE_TO_FIX_MAGIC = 68719476736.0*1.5; //2^36 * 1.5, (52-_shiftamt=36) uses limited precisicion to floor +const S32 LL_SHIFT_AMOUNT = 16; //16.16 fixed point representation, + +// Endian dependent code +#ifdef LL_LITTLE_ENDIAN + #define LL_EXP_INDEX 1 + #define LL_MAN_INDEX 0 +#else + #define LL_EXP_INDEX 0 + #define LL_MAN_INDEX 1 +#endif + +/* Deprecated: use llround(), lltrunc(), or llfloor() instead +// ================================================================================================ +// Real2Int +// ================================================================================================ +inline S32 F64toS32(F64 val) +{ + val = val + LL_DOUBLE_TO_FIX_MAGIC; + return ((S32*)&val)[LL_MAN_INDEX] >> LL_SHIFT_AMOUNT; +} + +// ================================================================================================ +// Real2Int +// ================================================================================================ +inline S32 F32toS32(F32 val) +{ + return F64toS32 ((F64)val); +} +*/ + +//////////////////////////////////////////////// +// +// Fast exp and log +// + +// Implementation of fast exp() approximation (from a paper by Nicol N. Schraudolph +// http://www.inf.ethz.ch/~schraudo/pubs/exp.pdf +static union +{ + double d; + struct + { +#ifdef LL_LITTLE_ENDIAN + S32 j, i; +#else + S32 i, j; +#endif + } n; +} LLECO; // not sure what the name means + +#define LL_EXP_A (1048576 * OO_LN2) // use 1512775 for integer +#define LL_EXP_C (60801) // this value of C good for -4 < y < 4 + +#define LL_FAST_EXP(y) (LLECO.n.i = llround(F32(LL_EXP_A*(y))) + (1072693248 - LL_EXP_C), LLECO.d) + + + +inline F32 llfastpow(const F32 x, const F32 y) +{ + return (F32)(LL_FAST_EXP(y * log(x))); +} + + +inline F32 snap_to_sig_figs(F32 foo, S32 sig_figs) +{ + // compute the power of ten + F32 bar = 1.f; + for (S32 i = 0; i < sig_figs; i++) + { + bar *= 10.f; + } + + //F32 new_foo = (F32)llround(foo * bar); + // the llround() implementation sucks. Don't us it. + + F32 sign = (foo > 0.f) ? 1.f : -1.f; + F32 new_foo = F32( S64(foo * bar + sign * 0.5f)); + new_foo /= bar; + + return new_foo; +} + +inline F32 lerp(F32 a, F32 b, F32 u) +{ + return a + ((b - a) * u); +} + +inline F32 lerp2d(F32 x00, F32 x01, F32 x10, F32 x11, F32 u, F32 v) +{ + F32 a = x00 + (x01-x00)*u; + F32 b = x10 + (x11-x10)*u; + F32 r = a + (b-a)*v; + return r; +} + +inline F32 ramp(F32 x, F32 a, F32 b) +{ + return (a == b) ? 0.0f : ((a - x) / (a - b)); +} + +inline F32 rescale(F32 x, F32 x1, F32 x2, F32 y1, F32 y2) +{ + return lerp(y1, y2, ramp(x, x1, x2)); +} + +inline F32 clamp_rescale(F32 x, F32 x1, F32 x2, F32 y1, F32 y2) +{ + if (y1 < y2) + { + return llclamp(rescale(x,x1,x2,y1,y2),y1,y2); + } + else + { + return llclamp(rescale(x,x1,x2,y1,y2),y2,y1); + } +} + + +inline F32 cubic_step( F32 x, F32 x0, F32 x1, F32 s0, F32 s1 ) +{ + if (x <= x0) + return s0; + + if (x >= x1) + return s1; + + F32 f = (x - x0) / (x1 - x0); + + return s0 + (s1 - s0) * (f * f) * (3.0f - 2.0f * f); +} + +inline F32 cubic_step( F32 x ) +{ + x = llclampf(x); + + return (x * x) * (3.0f - 2.0f * x); +} + +inline F32 quadratic_step( F32 x, F32 x0, F32 x1, F32 s0, F32 s1 ) +{ + if (x <= x0) + return s0; + + if (x >= x1) + return s1; + + F32 f = (x - x0) / (x1 - x0); + F32 f_squared = f * f; + + return (s0 * (1.f - f_squared)) + ((s1 - s0) * f_squared); +} + +inline F32 llsimple_angle(F32 angle) +{ + while(angle <= -F_PI) + angle += F_TWO_PI; + while(angle > F_PI) + angle -= F_TWO_PI; + return angle; +} + +//SDK - Renamed this to get_lower_power_two, since this is what this actually does. +inline U32 get_lower_power_two(U32 val, U32 max_power_two) +{ + if(!max_power_two) + { + max_power_two = 1 << 31 ; + } + if(max_power_two & (max_power_two - 1)) + { + return 0 ; + } + + for(; val < max_power_two ; max_power_two >>= 1) ; + + return max_power_two ; +} + +// calculate next highest power of two, limited by max_power_two +// This is taken from a brilliant little code snipped on http://acius2.blogspot.com/2007/11/calculating-next-power-of-2.html +// Basically we convert the binary to a solid string of 1's with the same +// number of digits, then add one. We subtract 1 initially to handle +// the case where the number passed in is actually a power of two. +// WARNING: this only works with 32 bit ints. +inline U32 get_next_power_two(U32 val, U32 max_power_two) +{ + if(!max_power_two) + { + max_power_two = 1 << 31 ; + } + + if(val >= max_power_two) + { + return max_power_two; + } + + val--; + val = (val >> 1) | val; + val = (val >> 2) | val; + val = (val >> 4) | val; + val = (val >> 8) | val; + val = (val >> 16) | val; + val++; + + return val; +} + +//get the gaussian value given the linear distance from axis x and guassian value o +inline F32 llgaussian(F32 x, F32 o) +{ + return 1.f/(F_SQRT_TWO_PI*o)*powf(F_E, -(x*x)/(2*o*o)); +} + +// Include simd math header +#include "llsimdmath.h" + +#endif diff --git a/indra/llmath/lloctree.h b/indra/llmath/lloctree.h index 59828ae565..432e9fbcd8 100644 --- a/indra/llmath/lloctree.h +++ b/indra/llmath/lloctree.h @@ -142,7 +142,7 @@ public: S32 getOctant(const LLVector4a& pos) const //get the octant pos is in { - return pos.greaterThan4(mD[CENTER]).getComparisonMask() & 0x7; + return pos.greaterThan(mD[CENTER]).getGatheredBits() & 0x7; } inline bool isInside(const LLVector4a& pos, const F32& rad) const @@ -157,13 +157,13 @@ public: bool isInside(const LLVector4a& pos) const { - S32 gt = pos.greaterThan4(mD[MAX]).getComparisonMask() & 0x7; + S32 gt = pos.greaterThan(mD[MAX]).getGatheredBits() & 0x7; if (gt) { return false; } - S32 lt = pos.lessEqual4(mD[MIN]).getComparisonMask() & 0x7; + S32 lt = pos.lessEqual(mD[MIN]).getGatheredBits() & 0x7; if (lt) { return false; @@ -206,13 +206,13 @@ public: { const LLVector4a& pos = data->getPositionGroup(); - LLVector4a gt = pos.greaterThan4(center); + LLVector4a gt = pos.greaterThan(center); LLVector4a up; - up.mQ = _mm_and_ps(size.mQ, gt.mQ); + up = _mm_and_ps(size, gt); LLVector4a down; - down.mQ = _mm_andnot_ps(gt.mQ, size.mQ); + down = _mm_andnot_ps(gt, size); center.add(up); center.sub(down); @@ -326,9 +326,8 @@ public: LLVector4a val; val.setSub(center, getCenter()); val.setAbs(val); - LLVector4a app_zero; - app_zero.mQ = F_APPROXIMATELY_ZERO_4A; - S32 lt = val.lessThan4(app_zero).getComparisonMask() & 0x7; + + S32 lt = val.lessThan(LLVector4a::getEpsilon()).getGatheredBits() & 0x7; if( lt == 0x7 ) { @@ -642,7 +641,7 @@ public: LLVector4a val; val.setSub(v, BaseType::mD[BaseType::CENTER]); val.setAbs(val); - S32 lt = val.lessThan4(MAX_MAG).getComparisonMask() & 0x7; + S32 lt = val.lessThan(MAX_MAG).getGatheredBits() & 0x7; if (lt != 0x7) { diff --git a/indra/llmath/llquantize.h b/indra/llmath/llquantize.h index 2192427f07..000d8a060f 100644 --- a/indra/llmath/llquantize.h +++ b/indra/llmath/llquantize.h @@ -1,152 +1,158 @@ -/** - * @file llquantize.h - * @brief useful routines for quantizing floats to various length ints - * and back out again - * - * $LicenseInfo:firstyear=2001&license=viewergpl$ - * - * Copyright (c) 2001-2009, Linden Research, Inc. - * - * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 - * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception - * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. - * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. - * $/LicenseInfo$ - */ - -#ifndef LL_LLQUANTIZE_H -#define LL_LLQUANTIZE_H - -const U16 U16MAX = 65535; -const F32 OOU16MAX = 1.f/(F32)(U16MAX); - -const U8 U8MAX = 255; -const F32 OOU8MAX = 1.f/(F32)(U8MAX); - -const U8 FIRSTVALIDCHAR = 54; -const U8 MAXSTRINGVAL = U8MAX - FIRSTVALIDCHAR; //we don't allow newline or null - - -inline U16 F32_to_U16_ROUND(F32 val, F32 lower, F32 upper) -{ - val = llclamp(val, lower, upper); - // make sure that the value is positive and normalized to <0, 1> - val -= lower; - val /= (upper - lower); - - // round the value. Sreturn the U16 - return (U16)(llround(val*U16MAX)); -} - - -inline U16 F32_to_U16(F32 val, F32 lower, F32 upper) -{ - val = llclamp(val, lower, upper); - // make sure that the value is positive and normalized to <0, 1> - val -= lower; - val /= (upper - lower); - - // return the U16 - return (U16)(llfloor(val*U16MAX)); -} - -inline F32 U16_to_F32(U16 ival, F32 lower, F32 upper) -{ - F32 val = ival*OOU16MAX; - F32 delta = (upper - lower); - val *= delta; - val += lower; - - F32 max_error = delta*OOU16MAX; - - // make sure that zero's come through as zero - if (fabsf(val) < max_error) - val = 0.f; - - return val; -} - - -inline U8 F32_to_U8_ROUND(F32 val, F32 lower, F32 upper) -{ - val = llclamp(val, lower, upper); - // make sure that the value is positive and normalized to <0, 1> - val -= lower; - val /= (upper - lower); - - // return the rounded U8 - return (U8)(llround(val*U8MAX)); -} - - -inline U8 F32_to_U8(F32 val, F32 lower, F32 upper) -{ - val = llclamp(val, lower, upper); - // make sure that the value is positive and normalized to <0, 1> - val -= lower; - val /= (upper - lower); - - // return the U8 - return (U8)(llfloor(val*U8MAX)); -} - -inline F32 U8_to_F32(U8 ival, F32 lower, F32 upper) -{ - F32 val = ival*OOU8MAX; - F32 delta = (upper - lower); - val *= delta; - val += lower; - - F32 max_error = delta*OOU8MAX; - - // make sure that zero's come through as zero - if (fabsf(val) < max_error) - val = 0.f; - - return val; -} - -inline U8 F32_TO_STRING(F32 val, F32 lower, F32 upper) -{ - val = llclamp(val, lower, upper); //[lower, upper] - // make sure that the value is positive and normalized to <0, 1> - val -= lower; //[0, upper-lower] - val /= (upper - lower); //[0,1] - val = val * MAXSTRINGVAL; //[0, MAXSTRINGVAL] - val = floor(val + 0.5f); //[0, MAXSTRINGVAL] - - U8 stringVal = (U8)(val) + FIRSTVALIDCHAR; //[FIRSTVALIDCHAR, MAXSTRINGVAL + FIRSTVALIDCHAR] - return stringVal; -} - -inline F32 STRING_TO_F32(U8 ival, F32 lower, F32 upper) -{ - // remove empty space left for NULL, newline, etc. - ival -= FIRSTVALIDCHAR; //[0, MAXSTRINGVAL] - - F32 val = (F32)ival * (1.f / (F32)MAXSTRINGVAL); //[0, 1] - F32 delta = (upper - lower); - val *= delta; //[0, upper - lower] - val += lower; //[lower, upper] - - return val; -} - -#endif +/** + * @file llquantize.h + * @brief useful routines for quantizing floats to various length ints + * and back out again + * + * $LicenseInfo:firstyear=2001&license=viewergpl$ + * + * Copyright (c) 2001-2009, Linden Research, Inc. + * + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. + * + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. + * $/LicenseInfo$ + */ + +#ifndef LL_LLQUANTIZE_H +#define LL_LLQUANTIZE_H + +const U16 U16MAX = 65535; +LL_ALIGN_16( const F32 F_U16MAX_4A[4] ) = { 65535.f, 65535.f, 65535.f, 65535.f }; + +const F32 OOU16MAX = 1.f/(F32)(U16MAX); +LL_ALIGN_16( const F32 F_OOU16MAX_4A[4] ) = { OOU16MAX, OOU16MAX, OOU16MAX, OOU16MAX }; + +const U8 U8MAX = 255; +LL_ALIGN_16( const F32 F_U8MAX_4A[4] ) = { 255.f, 255.f, 255.f, 255.f }; + +const F32 OOU8MAX = 1.f/(F32)(U8MAX); +LL_ALIGN_16( const F32 F_OOU8MAX_4A[4] ) = { OOU8MAX, OOU8MAX, OOU8MAX, OOU8MAX }; + +const U8 FIRSTVALIDCHAR = 54; +const U8 MAXSTRINGVAL = U8MAX - FIRSTVALIDCHAR; //we don't allow newline or null + + +inline U16 F32_to_U16_ROUND(F32 val, F32 lower, F32 upper) +{ + val = llclamp(val, lower, upper); + // make sure that the value is positive and normalized to <0, 1> + val -= lower; + val /= (upper - lower); + + // round the value. Sreturn the U16 + return (U16)(llround(val*U16MAX)); +} + + +inline U16 F32_to_U16(F32 val, F32 lower, F32 upper) +{ + val = llclamp(val, lower, upper); + // make sure that the value is positive and normalized to <0, 1> + val -= lower; + val /= (upper - lower); + + // return the U16 + return (U16)(llfloor(val*U16MAX)); +} + +inline F32 U16_to_F32(U16 ival, F32 lower, F32 upper) +{ + F32 val = ival*OOU16MAX; + F32 delta = (upper - lower); + val *= delta; + val += lower; + + F32 max_error = delta*OOU16MAX; + + // make sure that zero's come through as zero + if (fabsf(val) < max_error) + val = 0.f; + + return val; +} + + +inline U8 F32_to_U8_ROUND(F32 val, F32 lower, F32 upper) +{ + val = llclamp(val, lower, upper); + // make sure that the value is positive and normalized to <0, 1> + val -= lower; + val /= (upper - lower); + + // return the rounded U8 + return (U8)(llround(val*U8MAX)); +} + + +inline U8 F32_to_U8(F32 val, F32 lower, F32 upper) +{ + val = llclamp(val, lower, upper); + // make sure that the value is positive and normalized to <0, 1> + val -= lower; + val /= (upper - lower); + + // return the U8 + return (U8)(llfloor(val*U8MAX)); +} + +inline F32 U8_to_F32(U8 ival, F32 lower, F32 upper) +{ + F32 val = ival*OOU8MAX; + F32 delta = (upper - lower); + val *= delta; + val += lower; + + F32 max_error = delta*OOU8MAX; + + // make sure that zero's come through as zero + if (fabsf(val) < max_error) + val = 0.f; + + return val; +} + +inline U8 F32_TO_STRING(F32 val, F32 lower, F32 upper) +{ + val = llclamp(val, lower, upper); //[lower, upper] + // make sure that the value is positive and normalized to <0, 1> + val -= lower; //[0, upper-lower] + val /= (upper - lower); //[0,1] + val = val * MAXSTRINGVAL; //[0, MAXSTRINGVAL] + val = floor(val + 0.5f); //[0, MAXSTRINGVAL] + + U8 stringVal = (U8)(val) + FIRSTVALIDCHAR; //[FIRSTVALIDCHAR, MAXSTRINGVAL + FIRSTVALIDCHAR] + return stringVal; +} + +inline F32 STRING_TO_F32(U8 ival, F32 lower, F32 upper) +{ + // remove empty space left for NULL, newline, etc. + ival -= FIRSTVALIDCHAR; //[0, MAXSTRINGVAL] + + F32 val = (F32)ival * (1.f / (F32)MAXSTRINGVAL); //[0, 1] + F32 delta = (upper - lower); + val *= delta; //[0, upper - lower] + val += lower; //[lower, upper] + + return val; +} + +#endif diff --git a/indra/llmath/llquaternion.cpp b/indra/llmath/llquaternion.cpp index fdcc19d657..efdc10e2c6 100644 --- a/indra/llmath/llquaternion.cpp +++ b/indra/llmath/llquaternion.cpp @@ -1,960 +1,961 @@ -/** - * @file llquaternion.cpp - * @brief LLQuaternion class implementation. - * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * - * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 - * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception - * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. - * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. - * $/LicenseInfo$ - */ - -#include "linden_common.h" - -#include "llquaternion.h" - -#include "llmath.h" // for F_PI -//#include "vmath.h" -#include "v3math.h" -#include "v3dmath.h" -#include "v4math.h" -#include "m4math.h" -#include "m3math.h" -#include "llquantize.h" - -// WARNING: Don't use this for global const definitions! using this -// at the top of a *.cpp file might not give you what you think. -const LLQuaternion LLQuaternion::DEFAULT; - -// Constructors - -LLQuaternion::LLQuaternion(const LLMatrix4 &mat) -{ - *this = mat.quaternion(); - normalize(); -} - -LLQuaternion::LLQuaternion(const LLMatrix3 &mat) -{ - *this = mat.quaternion(); - normalize(); -} - -LLQuaternion::LLQuaternion(F32 angle, const LLVector4 &vec) -{ - LLVector3 v(vec.mV[VX], vec.mV[VY], vec.mV[VZ]); - v.normalize(); - - F32 c, s; - c = cosf(angle*0.5f); - s = sinf(angle*0.5f); - - mQ[VX] = v.mV[VX] * s; - mQ[VY] = v.mV[VY] * s; - mQ[VZ] = v.mV[VZ] * s; - mQ[VW] = c; - normalize(); -} - -LLQuaternion::LLQuaternion(F32 angle, const LLVector3 &vec) -{ - LLVector3 v(vec); - v.normalize(); - - F32 c, s; - c = cosf(angle*0.5f); - s = sinf(angle*0.5f); - - mQ[VX] = v.mV[VX] * s; - mQ[VY] = v.mV[VY] * s; - mQ[VZ] = v.mV[VZ] * s; - mQ[VW] = c; - normalize(); -} - -LLQuaternion::LLQuaternion(const LLVector3 &x_axis, - const LLVector3 &y_axis, - const LLVector3 &z_axis) -{ - LLMatrix3 mat; - mat.setRows(x_axis, y_axis, z_axis); - *this = mat.quaternion(); - normalize(); -} - -// Quatizations -void LLQuaternion::quantize16(F32 lower, F32 upper) -{ - F32 x = mQ[VX]; - F32 y = mQ[VY]; - F32 z = mQ[VZ]; - F32 s = mQ[VS]; - - x = U16_to_F32(F32_to_U16_ROUND(x, lower, upper), lower, upper); - y = U16_to_F32(F32_to_U16_ROUND(y, lower, upper), lower, upper); - z = U16_to_F32(F32_to_U16_ROUND(z, lower, upper), lower, upper); - s = U16_to_F32(F32_to_U16_ROUND(s, lower, upper), lower, upper); - - mQ[VX] = x; - mQ[VY] = y; - mQ[VZ] = z; - mQ[VS] = s; - - normalize(); -} - -void LLQuaternion::quantize8(F32 lower, F32 upper) -{ - mQ[VX] = U8_to_F32(F32_to_U8_ROUND(mQ[VX], lower, upper), lower, upper); - mQ[VY] = U8_to_F32(F32_to_U8_ROUND(mQ[VY], lower, upper), lower, upper); - mQ[VZ] = U8_to_F32(F32_to_U8_ROUND(mQ[VZ], lower, upper), lower, upper); - mQ[VS] = U8_to_F32(F32_to_U8_ROUND(mQ[VS], lower, upper), lower, upper); - - normalize(); -} - -// LLVector3 Magnitude and Normalization Functions - - -// Set LLQuaternion routines - -const LLQuaternion& LLQuaternion::setAngleAxis(F32 angle, F32 x, F32 y, F32 z) -{ - LLVector3 vec(x, y, z); - vec.normalize(); - - angle *= 0.5f; - F32 c, s; - c = cosf(angle); - s = sinf(angle); - - mQ[VX] = vec.mV[VX]*s; - mQ[VY] = vec.mV[VY]*s; - mQ[VZ] = vec.mV[VZ]*s; - mQ[VW] = c; - - normalize(); - return (*this); -} - -const LLQuaternion& LLQuaternion::setAngleAxis(F32 angle, const LLVector3 &vec) -{ - LLVector3 v(vec); - v.normalize(); - - angle *= 0.5f; - F32 c, s; - c = cosf(angle); - s = sinf(angle); - - mQ[VX] = v.mV[VX]*s; - mQ[VY] = v.mV[VY]*s; - mQ[VZ] = v.mV[VZ]*s; - mQ[VW] = c; - - normalize(); - return (*this); -} - -const LLQuaternion& LLQuaternion::setAngleAxis(F32 angle, const LLVector4 &vec) -{ - LLVector3 v(vec.mV[VX], vec.mV[VY], vec.mV[VZ]); - v.normalize(); - - F32 c, s; - c = cosf(angle*0.5f); - s = sinf(angle*0.5f); - - mQ[VX] = v.mV[VX]*s; - mQ[VY] = v.mV[VY]*s; - mQ[VZ] = v.mV[VZ]*s; - mQ[VW] = c; - - normalize(); - return (*this); -} - -const LLQuaternion& LLQuaternion::setEulerAngles(F32 roll, F32 pitch, F32 yaw) -{ - LLMatrix3 rot_mat(roll, pitch, yaw); - rot_mat.orthogonalize(); - *this = rot_mat.quaternion(); - - normalize(); - return (*this); -} - -// deprecated -const LLQuaternion& LLQuaternion::set(const LLMatrix3 &mat) -{ - *this = mat.quaternion(); - normalize(); - return (*this); -} - -// deprecated -const LLQuaternion& LLQuaternion::set(const LLMatrix4 &mat) -{ - *this = mat.quaternion(); - normalize(); - return (*this); -} - -// deprecated -const LLQuaternion& LLQuaternion::setQuat(F32 angle, F32 x, F32 y, F32 z) -{ - LLVector3 vec(x, y, z); - vec.normalize(); - - angle *= 0.5f; - F32 c, s; - c = cosf(angle); - s = sinf(angle); - - mQ[VX] = vec.mV[VX]*s; - mQ[VY] = vec.mV[VY]*s; - mQ[VZ] = vec.mV[VZ]*s; - mQ[VW] = c; - - normalize(); - return (*this); -} - -// deprecated -const LLQuaternion& LLQuaternion::setQuat(F32 angle, const LLVector3 &vec) -{ - LLVector3 v(vec); - v.normalize(); - - angle *= 0.5f; - F32 c, s; - c = cosf(angle); - s = sinf(angle); - - mQ[VX] = v.mV[VX]*s; - mQ[VY] = v.mV[VY]*s; - mQ[VZ] = v.mV[VZ]*s; - mQ[VW] = c; - - normalize(); - return (*this); -} - -const LLQuaternion& LLQuaternion::setQuat(F32 angle, const LLVector4 &vec) -{ - LLVector3 v(vec.mV[VX], vec.mV[VY], vec.mV[VZ]); - v.normalize(); - - F32 c, s; - c = cosf(angle*0.5f); - s = sinf(angle*0.5f); - - mQ[VX] = v.mV[VX]*s; - mQ[VY] = v.mV[VY]*s; - mQ[VZ] = v.mV[VZ]*s; - mQ[VW] = c; - - normalize(); - return (*this); -} - -const LLQuaternion& LLQuaternion::setQuat(F32 roll, F32 pitch, F32 yaw) -{ - LLMatrix3 rot_mat(roll, pitch, yaw); - rot_mat.orthogonalize(); - *this = rot_mat.quaternion(); - - normalize(); - return (*this); -} - -const LLQuaternion& LLQuaternion::setQuat(const LLMatrix3 &mat) -{ - *this = mat.quaternion(); - normalize(); - return (*this); -} - -const LLQuaternion& LLQuaternion::setQuat(const LLMatrix4 &mat) -{ - *this = mat.quaternion(); - normalize(); - return (*this); -//#if 1 -// // NOTE: LLQuaternion's are actually inverted with respect to -// // the matrices, so this code also assumes inverted quaternions -// // (-x, -y, -z, w). The result is that roll,pitch,yaw are applied -// // in reverse order (yaw,pitch,roll). -// F64 cosX = cos(roll); -// F64 cosY = cos(pitch); -// F64 cosZ = cos(yaw); -// -// F64 sinX = sin(roll); -// F64 sinY = sin(pitch); -// F64 sinZ = sin(yaw); -// -// mQ[VW] = (F32)sqrt(cosY*cosZ - sinX*sinY*sinZ + cosX*cosZ + cosX*cosY + 1.0)*.5; -// if (fabs(mQ[VW]) < F_APPROXIMATELY_ZERO) -// { -// // null rotation, any axis will do -// mQ[VX] = 0.0f; -// mQ[VY] = 1.0f; -// mQ[VZ] = 0.0f; -// } -// else -// { -// F32 inv_s = 1.0f / (4.0f * mQ[VW]); -// mQ[VX] = (F32)-(-sinX*cosY - cosX*sinY*sinZ - sinX*cosZ) * inv_s; -// mQ[VY] = (F32)-(-cosX*sinY*cosZ + sinX*sinZ - sinY) * inv_s; -// mQ[VZ] = (F32)-(-cosY*sinZ - sinX*sinY*cosZ - cosX*sinZ) * inv_s; -// } -// -//#else // This only works on a certain subset of roll/pitch/yaw -// -// F64 cosX = cosf(roll/2.0); -// F64 cosY = cosf(pitch/2.0); -// F64 cosZ = cosf(yaw/2.0); -// -// F64 sinX = sinf(roll/2.0); -// F64 sinY = sinf(pitch/2.0); -// F64 sinZ = sinf(yaw/2.0); -// -// mQ[VW] = (F32)(cosX*cosY*cosZ + sinX*sinY*sinZ); -// mQ[VX] = (F32)(sinX*cosY*cosZ - cosX*sinY*sinZ); -// mQ[VY] = (F32)(cosX*sinY*cosZ + sinX*cosY*sinZ); -// mQ[VZ] = (F32)(cosX*cosY*sinZ - sinX*sinY*cosZ); -//#endif -// -// normalize(); -// return (*this); -} - -// SJB: This code is correct for a logicly stored (non-transposed) matrix; -// Our matrices are stored transposed, OpenGL style, so this generates the -// INVERSE matrix, or the CORRECT matrix form an INVERSE quaternion. -// Because we use similar logic in LLMatrix3::quaternion(), -// we are internally consistant so everything works OK :) -LLMatrix3 LLQuaternion::getMatrix3(void) const -{ - LLMatrix3 mat; - F32 xx, xy, xz, xw, yy, yz, yw, zz, zw; - - xx = mQ[VX] * mQ[VX]; - xy = mQ[VX] * mQ[VY]; - xz = mQ[VX] * mQ[VZ]; - xw = mQ[VX] * mQ[VW]; - - yy = mQ[VY] * mQ[VY]; - yz = mQ[VY] * mQ[VZ]; - yw = mQ[VY] * mQ[VW]; - - zz = mQ[VZ] * mQ[VZ]; - zw = mQ[VZ] * mQ[VW]; - - mat.mMatrix[0][0] = 1.f - 2.f * ( yy + zz ); - mat.mMatrix[0][1] = 2.f * ( xy + zw ); - mat.mMatrix[0][2] = 2.f * ( xz - yw ); - - mat.mMatrix[1][0] = 2.f * ( xy - zw ); - mat.mMatrix[1][1] = 1.f - 2.f * ( xx + zz ); - mat.mMatrix[1][2] = 2.f * ( yz + xw ); - - mat.mMatrix[2][0] = 2.f * ( xz + yw ); - mat.mMatrix[2][1] = 2.f * ( yz - xw ); - mat.mMatrix[2][2] = 1.f - 2.f * ( xx + yy ); - - return mat; -} - -LLMatrix4 LLQuaternion::getMatrix4(void) const -{ - LLMatrix4 mat; - F32 xx, xy, xz, xw, yy, yz, yw, zz, zw; - - xx = mQ[VX] * mQ[VX]; - xy = mQ[VX] * mQ[VY]; - xz = mQ[VX] * mQ[VZ]; - xw = mQ[VX] * mQ[VW]; - - yy = mQ[VY] * mQ[VY]; - yz = mQ[VY] * mQ[VZ]; - yw = mQ[VY] * mQ[VW]; - - zz = mQ[VZ] * mQ[VZ]; - zw = mQ[VZ] * mQ[VW]; - - mat.mMatrix[0][0] = 1.f - 2.f * ( yy + zz ); - mat.mMatrix[0][1] = 2.f * ( xy + zw ); - mat.mMatrix[0][2] = 2.f * ( xz - yw ); - - mat.mMatrix[1][0] = 2.f * ( xy - zw ); - mat.mMatrix[1][1] = 1.f - 2.f * ( xx + zz ); - mat.mMatrix[1][2] = 2.f * ( yz + xw ); - - mat.mMatrix[2][0] = 2.f * ( xz + yw ); - mat.mMatrix[2][1] = 2.f * ( yz - xw ); - mat.mMatrix[2][2] = 1.f - 2.f * ( xx + yy ); - - // TODO -- should we set the translation portion to zero? - - return mat; -} - - - - -// Other useful methods - - -// calculate the shortest rotation from a to b -void LLQuaternion::shortestArc(const LLVector3 &a, const LLVector3 &b) -{ - // Make a local copy of both vectors. - LLVector3 vec_a = a; - LLVector3 vec_b = b; - - // Make sure neither vector is zero length. Also normalize - // the vectors while we are at it. - F32 vec_a_mag = vec_a.normalize(); - F32 vec_b_mag = vec_b.normalize(); - if (vec_a_mag < F_APPROXIMATELY_ZERO || - vec_b_mag < F_APPROXIMATELY_ZERO) - { - // Can't calculate a rotation from this. - // Just return ZERO_ROTATION instead. - loadIdentity(); - return; - } - - // Create an axis to rotate around, and the cos of the angle to rotate. - LLVector3 axis = vec_a % vec_b; - F32 cos_theta = vec_a * vec_b; - - // Check the angle between the vectors to see if they are parallel or anti-parallel. - if (cos_theta > 1.0 - F_APPROXIMATELY_ZERO) - { - // a and b are parallel. No rotation is necessary. - loadIdentity(); - } - else if (cos_theta < -1.0 + F_APPROXIMATELY_ZERO) - { - // a and b are anti-parallel. - // Rotate 180 degrees around some orthogonal axis. - // Find the projection of the x-axis onto a, and try - // using the vector between the projection and the x-axis - // as the orthogonal axis. - LLVector3 proj = vec_a.mV[VX] / (vec_a * vec_a) * vec_a; - LLVector3 ortho_axis(1.f, 0.f, 0.f); - ortho_axis -= proj; - - // Turn this into an orthonormal axis. - F32 ortho_length = ortho_axis.normalize(); - // If the axis' length is 0, then our guess at an orthogonal axis - // was wrong (a is parallel to the x-axis). - if (ortho_length < F_APPROXIMATELY_ZERO) - { - // Use the z-axis instead. - ortho_axis.setVec(0.f, 0.f, 1.f); - } - - // Construct a quaternion from this orthonormal axis. - mQ[VX] = ortho_axis.mV[VX]; - mQ[VY] = ortho_axis.mV[VY]; - mQ[VZ] = ortho_axis.mV[VZ]; - mQ[VW] = 0.f; - } - else - { - // a and b are NOT parallel or anti-parallel. - // Return the rotation between these vectors. - F32 theta = (F32)acos(cos_theta); - - setAngleAxis(theta, axis); - } -} - -// constrains rotation to a cone angle specified in radians -const LLQuaternion &LLQuaternion::constrain(F32 radians) -{ - const F32 cos_angle_lim = cosf( radians/2 ); // mQ[VW] limit - const F32 sin_angle_lim = sinf( radians/2 ); // rotation axis length limit - - if (mQ[VW] < 0.f) - { - mQ[VX] *= -1.f; - mQ[VY] *= -1.f; - mQ[VZ] *= -1.f; - mQ[VW] *= -1.f; - } - - // if rotation angle is greater than limit (cos is less than limit) - if( mQ[VW] < cos_angle_lim ) - { - mQ[VW] = cos_angle_lim; - F32 axis_len = sqrtf( mQ[VX]*mQ[VX] + mQ[VY]*mQ[VY] + mQ[VZ]*mQ[VZ] ); // sin(theta/2) - F32 axis_mult_fact = sin_angle_lim / axis_len; - mQ[VX] *= axis_mult_fact; - mQ[VY] *= axis_mult_fact; - mQ[VZ] *= axis_mult_fact; - } - - return *this; -} - -// Operators - -std::ostream& operator<<(std::ostream &s, const LLQuaternion &a) -{ - s << "{ " - << a.mQ[VX] << ", " << a.mQ[VY] << ", " << a.mQ[VZ] << ", " << a.mQ[VW] - << " }"; - return s; -} - - -// Does NOT renormalize the result -LLQuaternion operator*(const LLQuaternion &a, const LLQuaternion &b) -{ -// LLQuaternion::mMultCount++; - - LLQuaternion q( - b.mQ[3] * a.mQ[0] + b.mQ[0] * a.mQ[3] + b.mQ[1] * a.mQ[2] - b.mQ[2] * a.mQ[1], - b.mQ[3] * a.mQ[1] + b.mQ[1] * a.mQ[3] + b.mQ[2] * a.mQ[0] - b.mQ[0] * a.mQ[2], - b.mQ[3] * a.mQ[2] + b.mQ[2] * a.mQ[3] + b.mQ[0] * a.mQ[1] - b.mQ[1] * a.mQ[0], - b.mQ[3] * a.mQ[3] - b.mQ[0] * a.mQ[0] - b.mQ[1] * a.mQ[1] - b.mQ[2] * a.mQ[2] - ); - return q; -} - -/* -LLMatrix4 operator*(const LLMatrix4 &m, const LLQuaternion &q) -{ - LLMatrix4 qmat(q); - return (m*qmat); -} -*/ - - - -LLVector4 operator*(const LLVector4 &a, const LLQuaternion &rot) -{ - F32 rw = - rot.mQ[VX] * a.mV[VX] - rot.mQ[VY] * a.mV[VY] - rot.mQ[VZ] * a.mV[VZ]; - F32 rx = rot.mQ[VW] * a.mV[VX] + rot.mQ[VY] * a.mV[VZ] - rot.mQ[VZ] * a.mV[VY]; - F32 ry = rot.mQ[VW] * a.mV[VY] + rot.mQ[VZ] * a.mV[VX] - rot.mQ[VX] * a.mV[VZ]; - F32 rz = rot.mQ[VW] * a.mV[VZ] + rot.mQ[VX] * a.mV[VY] - rot.mQ[VY] * a.mV[VX]; - - F32 nx = - rw * rot.mQ[VX] + rx * rot.mQ[VW] - ry * rot.mQ[VZ] + rz * rot.mQ[VY]; - F32 ny = - rw * rot.mQ[VY] + ry * rot.mQ[VW] - rz * rot.mQ[VX] + rx * rot.mQ[VZ]; - F32 nz = - rw * rot.mQ[VZ] + rz * rot.mQ[VW] - rx * rot.mQ[VY] + ry * rot.mQ[VX]; - - return LLVector4(nx, ny, nz, a.mV[VW]); -} - -LLVector3 operator*(const LLVector3 &a, const LLQuaternion &rot) -{ - F32 rw = - rot.mQ[VX] * a.mV[VX] - rot.mQ[VY] * a.mV[VY] - rot.mQ[VZ] * a.mV[VZ]; - F32 rx = rot.mQ[VW] * a.mV[VX] + rot.mQ[VY] * a.mV[VZ] - rot.mQ[VZ] * a.mV[VY]; - F32 ry = rot.mQ[VW] * a.mV[VY] + rot.mQ[VZ] * a.mV[VX] - rot.mQ[VX] * a.mV[VZ]; - F32 rz = rot.mQ[VW] * a.mV[VZ] + rot.mQ[VX] * a.mV[VY] - rot.mQ[VY] * a.mV[VX]; - - F32 nx = - rw * rot.mQ[VX] + rx * rot.mQ[VW] - ry * rot.mQ[VZ] + rz * rot.mQ[VY]; - F32 ny = - rw * rot.mQ[VY] + ry * rot.mQ[VW] - rz * rot.mQ[VX] + rx * rot.mQ[VZ]; - F32 nz = - rw * rot.mQ[VZ] + rz * rot.mQ[VW] - rx * rot.mQ[VY] + ry * rot.mQ[VX]; - - return LLVector3(nx, ny, nz); -} - -LLVector3d operator*(const LLVector3d &a, const LLQuaternion &rot) -{ - F64 rw = - rot.mQ[VX] * a.mdV[VX] - rot.mQ[VY] * a.mdV[VY] - rot.mQ[VZ] * a.mdV[VZ]; - F64 rx = rot.mQ[VW] * a.mdV[VX] + rot.mQ[VY] * a.mdV[VZ] - rot.mQ[VZ] * a.mdV[VY]; - F64 ry = rot.mQ[VW] * a.mdV[VY] + rot.mQ[VZ] * a.mdV[VX] - rot.mQ[VX] * a.mdV[VZ]; - F64 rz = rot.mQ[VW] * a.mdV[VZ] + rot.mQ[VX] * a.mdV[VY] - rot.mQ[VY] * a.mdV[VX]; - - F64 nx = - rw * rot.mQ[VX] + rx * rot.mQ[VW] - ry * rot.mQ[VZ] + rz * rot.mQ[VY]; - F64 ny = - rw * rot.mQ[VY] + ry * rot.mQ[VW] - rz * rot.mQ[VX] + rx * rot.mQ[VZ]; - F64 nz = - rw * rot.mQ[VZ] + rz * rot.mQ[VW] - rx * rot.mQ[VY] + ry * rot.mQ[VX]; - - return LLVector3d(nx, ny, nz); -} - -F32 dot(const LLQuaternion &a, const LLQuaternion &b) -{ - return a.mQ[VX] * b.mQ[VX] + - a.mQ[VY] * b.mQ[VY] + - a.mQ[VZ] * b.mQ[VZ] + - a.mQ[VW] * b.mQ[VW]; -} - -// DEMO HACK: This lerp is probably inocrrect now due intermediate normalization -// it should look more like the lerp below -#if 0 -// linear interpolation -LLQuaternion lerp(F32 t, const LLQuaternion &p, const LLQuaternion &q) -{ - LLQuaternion r; - r = t * (q - p) + p; - r.normalize(); - return r; -} -#endif - -// lerp from identity to q -LLQuaternion lerp(F32 t, const LLQuaternion &q) -{ - LLQuaternion r; - r.mQ[VX] = t * q.mQ[VX]; - r.mQ[VY] = t * q.mQ[VY]; - r.mQ[VZ] = t * q.mQ[VZ]; - r.mQ[VW] = t * (q.mQ[VZ] - 1.f) + 1.f; - r.normalize(); - return r; -} - -LLQuaternion lerp(F32 t, const LLQuaternion &p, const LLQuaternion &q) -{ - LLQuaternion r; - F32 inv_t; - - inv_t = 1.f - t; - - r.mQ[VX] = t * q.mQ[VX] + (inv_t * p.mQ[VX]); - r.mQ[VY] = t * q.mQ[VY] + (inv_t * p.mQ[VY]); - r.mQ[VZ] = t * q.mQ[VZ] + (inv_t * p.mQ[VZ]); - r.mQ[VW] = t * q.mQ[VW] + (inv_t * p.mQ[VW]); - r.normalize(); - return r; -} - - -// spherical linear interpolation -LLQuaternion slerp( F32 u, const LLQuaternion &a, const LLQuaternion &b ) -{ - // cosine theta = dot product of a and b - F32 cos_t = a.mQ[0]*b.mQ[0] + a.mQ[1]*b.mQ[1] + a.mQ[2]*b.mQ[2] + a.mQ[3]*b.mQ[3]; - - // if b is on opposite hemisphere from a, use -a instead - int bflip; - if (cos_t < 0.0f) - { - cos_t = -cos_t; - bflip = TRUE; - } - else - bflip = FALSE; - - // if B is (within precision limits) the same as A, - // just linear interpolate between A and B. - F32 alpha; // interpolant - F32 beta; // 1 - interpolant - if (1.0f - cos_t < 0.00001f) - { - beta = 1.0f - u; - alpha = u; - } - else - { - F32 theta = acosf(cos_t); - F32 sin_t = sinf(theta); - beta = sinf(theta - u*theta) / sin_t; - alpha = sinf(u*theta) / sin_t; - } - - if (bflip) - beta = -beta; - - // interpolate - LLQuaternion ret; - ret.mQ[0] = beta*a.mQ[0] + alpha*b.mQ[0]; - ret.mQ[1] = beta*a.mQ[1] + alpha*b.mQ[1]; - ret.mQ[2] = beta*a.mQ[2] + alpha*b.mQ[2]; - ret.mQ[3] = beta*a.mQ[3] + alpha*b.mQ[3]; - - return ret; -} - -// lerp whenever possible -LLQuaternion nlerp(F32 t, const LLQuaternion &a, const LLQuaternion &b) -{ - if (dot(a, b) < 0.f) - { - return slerp(t, a, b); - } - else - { - return lerp(t, a, b); - } -} - -LLQuaternion nlerp(F32 t, const LLQuaternion &q) -{ - if (q.mQ[VW] < 0.f) - { - return slerp(t, q); - } - else - { - return lerp(t, q); - } -} - -// slerp from identity quaternion to another quaternion -LLQuaternion slerp(F32 t, const LLQuaternion &q) -{ - F32 c = q.mQ[VW]; - if (1.0f == t || 1.0f == c) - { - // the trivial cases - return q; - } - - LLQuaternion r; - F32 s, angle, stq, stp; - - s = (F32) sqrt(1.f - c*c); - - if (c < 0.0f) - { - // when c < 0.0 then theta > PI/2 - // since quat and -quat are the same rotation we invert one of - // p or q to reduce unecessary spins - // A equivalent way to do it is to convert acos(c) as if it had - // been negative, and to negate stp - angle = (F32) acos(-c); - stp = -(F32) sin(angle * (1.f - t)); - stq = (F32) sin(angle * t); - } - else - { - angle = (F32) acos(c); - stp = (F32) sin(angle * (1.f - t)); - stq = (F32) sin(angle * t); - } - - r.mQ[VX] = (q.mQ[VX] * stq) / s; - r.mQ[VY] = (q.mQ[VY] * stq) / s; - r.mQ[VZ] = (q.mQ[VZ] * stq) / s; - r.mQ[VW] = (stp + q.mQ[VW] * stq) / s; - - return r; -} - -LLQuaternion mayaQ(F32 xRot, F32 yRot, F32 zRot, LLQuaternion::Order order) -{ - LLQuaternion xQ( xRot*DEG_TO_RAD, LLVector3(1.0f, 0.0f, 0.0f) ); - LLQuaternion yQ( yRot*DEG_TO_RAD, LLVector3(0.0f, 1.0f, 0.0f) ); - LLQuaternion zQ( zRot*DEG_TO_RAD, LLVector3(0.0f, 0.0f, 1.0f) ); - LLQuaternion ret; - switch( order ) - { - case LLQuaternion::XYZ: - ret = xQ * yQ * zQ; - break; - case LLQuaternion::YZX: - ret = yQ * zQ * xQ; - break; - case LLQuaternion::ZXY: - ret = zQ * xQ * yQ; - break; - case LLQuaternion::XZY: - ret = xQ * zQ * yQ; - break; - case LLQuaternion::YXZ: - ret = yQ * xQ * zQ; - break; - case LLQuaternion::ZYX: - ret = zQ * yQ * xQ; - break; - } - return ret; -} - -const char *OrderToString( const LLQuaternion::Order order ) -{ - const char *p = NULL; - switch( order ) - { - default: - case LLQuaternion::XYZ: - p = "XYZ"; - break; - case LLQuaternion::YZX: - p = "YZX"; - break; - case LLQuaternion::ZXY: - p = "ZXY"; - break; - case LLQuaternion::XZY: - p = "XZY"; - break; - case LLQuaternion::YXZ: - p = "YXZ"; - break; - case LLQuaternion::ZYX: - p = "ZYX"; - break; - } - return p; -} - -LLQuaternion::Order StringToOrder( const char *str ) -{ - if (strncmp(str, "XYZ", 3)==0 || strncmp(str, "xyz", 3)==0) - return LLQuaternion::XYZ; - - if (strncmp(str, "YZX", 3)==0 || strncmp(str, "yzx", 3)==0) - return LLQuaternion::YZX; - - if (strncmp(str, "ZXY", 3)==0 || strncmp(str, "zxy", 3)==0) - return LLQuaternion::ZXY; - - if (strncmp(str, "XZY", 3)==0 || strncmp(str, "xzy", 3)==0) - return LLQuaternion::XZY; - - if (strncmp(str, "YXZ", 3)==0 || strncmp(str, "yxz", 3)==0) - return LLQuaternion::YXZ; - - if (strncmp(str, "ZYX", 3)==0 || strncmp(str, "zyx", 3)==0) - return LLQuaternion::ZYX; - - return LLQuaternion::XYZ; -} - -void LLQuaternion::getAngleAxis(F32* angle, LLVector3 &vec) const -{ - F32 cos_a = mQ[VW]; - if (cos_a > 1.0f) cos_a = 1.0f; - if (cos_a < -1.0f) cos_a = -1.0f; - - F32 sin_a = (F32) sqrt( 1.0f - cos_a * cos_a ); - - if ( fabs( sin_a ) < 0.0005f ) - sin_a = 1.0f; - else - sin_a = 1.f/sin_a; - - F32 temp_angle = 2.0f * (F32) acos( cos_a ); - if (temp_angle > F_PI) - { - // The (angle,axis) pair should never have angles outside [PI, -PI] - // since we want the _shortest_ (angle,axis) solution. - // Since acos is defined for [0, PI], and we multiply by 2.0, we - // can push the angle outside the acceptible range. - // When this happens we set the angle to the other portion of a - // full 2PI rotation, and negate the axis, which reverses the - // direction of the rotation (by the right-hand rule). - *angle = 2.f * F_PI - temp_angle; - vec.mV[VX] = - mQ[VX] * sin_a; - vec.mV[VY] = - mQ[VY] * sin_a; - vec.mV[VZ] = - mQ[VZ] * sin_a; - } - else - { - *angle = temp_angle; - vec.mV[VX] = mQ[VX] * sin_a; - vec.mV[VY] = mQ[VY] * sin_a; - vec.mV[VZ] = mQ[VZ] * sin_a; - } -} - - -// quaternion does not need to be normalized -void LLQuaternion::getEulerAngles(F32 *roll, F32 *pitch, F32 *yaw) const -{ - LLMatrix3 rot_mat(*this); - rot_mat.orthogonalize(); - rot_mat.getEulerAngles(roll, pitch, yaw); - -// // NOTE: LLQuaternion's are actually inverted with respect to -// // the matrices, so this code also assumes inverted quaternions -// // (-x, -y, -z, w). The result is that roll,pitch,yaw are applied -// // in reverse order (yaw,pitch,roll). -// F32 x = -mQ[VX], y = -mQ[VY], z = -mQ[VZ], w = mQ[VW]; -// F64 m20 = 2.0*(x*z-y*w); -// if (1.0f - fabsf(m20) < F_APPROXIMATELY_ZERO) -// { -// *roll = 0.0f; -// *pitch = (F32)asin(m20); -// *yaw = (F32)atan2(2.0*(x*y-z*w), 1.0 - 2.0*(x*x+z*z)); -// } -// else -// { -// *roll = (F32)atan2(-2.0*(y*z+x*w), 1.0-2.0*(x*x+y*y)); -// *pitch = (F32)asin(m20); -// *yaw = (F32)atan2(-2.0*(x*y+z*w), 1.0-2.0*(y*y+z*z)); -// } -} - -// Saves space by using the fact that our quaternions are normalized -LLVector3 LLQuaternion::packToVector3() const -{ - if( mQ[VW] >= 0 ) - { - return LLVector3( mQ[VX], mQ[VY], mQ[VZ] ); - } - else - { - return LLVector3( -mQ[VX], -mQ[VY], -mQ[VZ] ); - } -} - -// Saves space by using the fact that our quaternions are normalized -void LLQuaternion::unpackFromVector3( const LLVector3& vec ) -{ - mQ[VX] = vec.mV[VX]; - mQ[VY] = vec.mV[VY]; - mQ[VZ] = vec.mV[VZ]; - F32 t = 1.f - vec.magVecSquared(); - if( t > 0 ) - { - mQ[VW] = sqrt( t ); - } - else - { - // Need this to avoid trying to find the square root of a negative number due - // to floating point error. - mQ[VW] = 0; - } -} - -BOOL LLQuaternion::parseQuat(const std::string& buf, LLQuaternion* value) -{ - if( buf.empty() || value == NULL) - { - return FALSE; - } - - LLQuaternion quat; - S32 count = sscanf( buf.c_str(), "%f %f %f %f", quat.mQ + 0, quat.mQ + 1, quat.mQ + 2, quat.mQ + 3 ); - if( 4 == count ) - { - value->set( quat ); - return TRUE; - } - - return FALSE; -} - - -// End +/** + * @file llquaternion.cpp + * @brief LLQuaternion class implementation. + * + * $LicenseInfo:firstyear=2000&license=viewergpl$ + * + * Copyright (c) 2000-2009, Linden Research, Inc. + * + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. + * + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. + * $/LicenseInfo$ + */ + +#include "linden_common.h" + +#include "llmath.h" // for F_PI + +#include "llquaternion.h" + +//#include "vmath.h" +#include "v3math.h" +#include "v3dmath.h" +#include "v4math.h" +#include "m4math.h" +#include "m3math.h" +#include "llquantize.h" + +// WARNING: Don't use this for global const definitions! using this +// at the top of a *.cpp file might not give you what you think. +const LLQuaternion LLQuaternion::DEFAULT; + +// Constructors + +LLQuaternion::LLQuaternion(const LLMatrix4 &mat) +{ + *this = mat.quaternion(); + normalize(); +} + +LLQuaternion::LLQuaternion(const LLMatrix3 &mat) +{ + *this = mat.quaternion(); + normalize(); +} + +LLQuaternion::LLQuaternion(F32 angle, const LLVector4 &vec) +{ + LLVector3 v(vec.mV[VX], vec.mV[VY], vec.mV[VZ]); + v.normalize(); + + F32 c, s; + c = cosf(angle*0.5f); + s = sinf(angle*0.5f); + + mQ[VX] = v.mV[VX] * s; + mQ[VY] = v.mV[VY] * s; + mQ[VZ] = v.mV[VZ] * s; + mQ[VW] = c; + normalize(); +} + +LLQuaternion::LLQuaternion(F32 angle, const LLVector3 &vec) +{ + LLVector3 v(vec); + v.normalize(); + + F32 c, s; + c = cosf(angle*0.5f); + s = sinf(angle*0.5f); + + mQ[VX] = v.mV[VX] * s; + mQ[VY] = v.mV[VY] * s; + mQ[VZ] = v.mV[VZ] * s; + mQ[VW] = c; + normalize(); +} + +LLQuaternion::LLQuaternion(const LLVector3 &x_axis, + const LLVector3 &y_axis, + const LLVector3 &z_axis) +{ + LLMatrix3 mat; + mat.setRows(x_axis, y_axis, z_axis); + *this = mat.quaternion(); + normalize(); +} + +// Quatizations +void LLQuaternion::quantize16(F32 lower, F32 upper) +{ + F32 x = mQ[VX]; + F32 y = mQ[VY]; + F32 z = mQ[VZ]; + F32 s = mQ[VS]; + + x = U16_to_F32(F32_to_U16_ROUND(x, lower, upper), lower, upper); + y = U16_to_F32(F32_to_U16_ROUND(y, lower, upper), lower, upper); + z = U16_to_F32(F32_to_U16_ROUND(z, lower, upper), lower, upper); + s = U16_to_F32(F32_to_U16_ROUND(s, lower, upper), lower, upper); + + mQ[VX] = x; + mQ[VY] = y; + mQ[VZ] = z; + mQ[VS] = s; + + normalize(); +} + +void LLQuaternion::quantize8(F32 lower, F32 upper) +{ + mQ[VX] = U8_to_F32(F32_to_U8_ROUND(mQ[VX], lower, upper), lower, upper); + mQ[VY] = U8_to_F32(F32_to_U8_ROUND(mQ[VY], lower, upper), lower, upper); + mQ[VZ] = U8_to_F32(F32_to_U8_ROUND(mQ[VZ], lower, upper), lower, upper); + mQ[VS] = U8_to_F32(F32_to_U8_ROUND(mQ[VS], lower, upper), lower, upper); + + normalize(); +} + +// LLVector3 Magnitude and Normalization Functions + + +// Set LLQuaternion routines + +const LLQuaternion& LLQuaternion::setAngleAxis(F32 angle, F32 x, F32 y, F32 z) +{ + LLVector3 vec(x, y, z); + vec.normalize(); + + angle *= 0.5f; + F32 c, s; + c = cosf(angle); + s = sinf(angle); + + mQ[VX] = vec.mV[VX]*s; + mQ[VY] = vec.mV[VY]*s; + mQ[VZ] = vec.mV[VZ]*s; + mQ[VW] = c; + + normalize(); + return (*this); +} + +const LLQuaternion& LLQuaternion::setAngleAxis(F32 angle, const LLVector3 &vec) +{ + LLVector3 v(vec); + v.normalize(); + + angle *= 0.5f; + F32 c, s; + c = cosf(angle); + s = sinf(angle); + + mQ[VX] = v.mV[VX]*s; + mQ[VY] = v.mV[VY]*s; + mQ[VZ] = v.mV[VZ]*s; + mQ[VW] = c; + + normalize(); + return (*this); +} + +const LLQuaternion& LLQuaternion::setAngleAxis(F32 angle, const LLVector4 &vec) +{ + LLVector3 v(vec.mV[VX], vec.mV[VY], vec.mV[VZ]); + v.normalize(); + + F32 c, s; + c = cosf(angle*0.5f); + s = sinf(angle*0.5f); + + mQ[VX] = v.mV[VX]*s; + mQ[VY] = v.mV[VY]*s; + mQ[VZ] = v.mV[VZ]*s; + mQ[VW] = c; + + normalize(); + return (*this); +} + +const LLQuaternion& LLQuaternion::setEulerAngles(F32 roll, F32 pitch, F32 yaw) +{ + LLMatrix3 rot_mat(roll, pitch, yaw); + rot_mat.orthogonalize(); + *this = rot_mat.quaternion(); + + normalize(); + return (*this); +} + +// deprecated +const LLQuaternion& LLQuaternion::set(const LLMatrix3 &mat) +{ + *this = mat.quaternion(); + normalize(); + return (*this); +} + +// deprecated +const LLQuaternion& LLQuaternion::set(const LLMatrix4 &mat) +{ + *this = mat.quaternion(); + normalize(); + return (*this); +} + +// deprecated +const LLQuaternion& LLQuaternion::setQuat(F32 angle, F32 x, F32 y, F32 z) +{ + LLVector3 vec(x, y, z); + vec.normalize(); + + angle *= 0.5f; + F32 c, s; + c = cosf(angle); + s = sinf(angle); + + mQ[VX] = vec.mV[VX]*s; + mQ[VY] = vec.mV[VY]*s; + mQ[VZ] = vec.mV[VZ]*s; + mQ[VW] = c; + + normalize(); + return (*this); +} + +// deprecated +const LLQuaternion& LLQuaternion::setQuat(F32 angle, const LLVector3 &vec) +{ + LLVector3 v(vec); + v.normalize(); + + angle *= 0.5f; + F32 c, s; + c = cosf(angle); + s = sinf(angle); + + mQ[VX] = v.mV[VX]*s; + mQ[VY] = v.mV[VY]*s; + mQ[VZ] = v.mV[VZ]*s; + mQ[VW] = c; + + normalize(); + return (*this); +} + +const LLQuaternion& LLQuaternion::setQuat(F32 angle, const LLVector4 &vec) +{ + LLVector3 v(vec.mV[VX], vec.mV[VY], vec.mV[VZ]); + v.normalize(); + + F32 c, s; + c = cosf(angle*0.5f); + s = sinf(angle*0.5f); + + mQ[VX] = v.mV[VX]*s; + mQ[VY] = v.mV[VY]*s; + mQ[VZ] = v.mV[VZ]*s; + mQ[VW] = c; + + normalize(); + return (*this); +} + +const LLQuaternion& LLQuaternion::setQuat(F32 roll, F32 pitch, F32 yaw) +{ + LLMatrix3 rot_mat(roll, pitch, yaw); + rot_mat.orthogonalize(); + *this = rot_mat.quaternion(); + + normalize(); + return (*this); +} + +const LLQuaternion& LLQuaternion::setQuat(const LLMatrix3 &mat) +{ + *this = mat.quaternion(); + normalize(); + return (*this); +} + +const LLQuaternion& LLQuaternion::setQuat(const LLMatrix4 &mat) +{ + *this = mat.quaternion(); + normalize(); + return (*this); +//#if 1 +// // NOTE: LLQuaternion's are actually inverted with respect to +// // the matrices, so this code also assumes inverted quaternions +// // (-x, -y, -z, w). The result is that roll,pitch,yaw are applied +// // in reverse order (yaw,pitch,roll). +// F64 cosX = cos(roll); +// F64 cosY = cos(pitch); +// F64 cosZ = cos(yaw); +// +// F64 sinX = sin(roll); +// F64 sinY = sin(pitch); +// F64 sinZ = sin(yaw); +// +// mQ[VW] = (F32)sqrt(cosY*cosZ - sinX*sinY*sinZ + cosX*cosZ + cosX*cosY + 1.0)*.5; +// if (fabs(mQ[VW]) < F_APPROXIMATELY_ZERO) +// { +// // null rotation, any axis will do +// mQ[VX] = 0.0f; +// mQ[VY] = 1.0f; +// mQ[VZ] = 0.0f; +// } +// else +// { +// F32 inv_s = 1.0f / (4.0f * mQ[VW]); +// mQ[VX] = (F32)-(-sinX*cosY - cosX*sinY*sinZ - sinX*cosZ) * inv_s; +// mQ[VY] = (F32)-(-cosX*sinY*cosZ + sinX*sinZ - sinY) * inv_s; +// mQ[VZ] = (F32)-(-cosY*sinZ - sinX*sinY*cosZ - cosX*sinZ) * inv_s; +// } +// +//#else // This only works on a certain subset of roll/pitch/yaw +// +// F64 cosX = cosf(roll/2.0); +// F64 cosY = cosf(pitch/2.0); +// F64 cosZ = cosf(yaw/2.0); +// +// F64 sinX = sinf(roll/2.0); +// F64 sinY = sinf(pitch/2.0); +// F64 sinZ = sinf(yaw/2.0); +// +// mQ[VW] = (F32)(cosX*cosY*cosZ + sinX*sinY*sinZ); +// mQ[VX] = (F32)(sinX*cosY*cosZ - cosX*sinY*sinZ); +// mQ[VY] = (F32)(cosX*sinY*cosZ + sinX*cosY*sinZ); +// mQ[VZ] = (F32)(cosX*cosY*sinZ - sinX*sinY*cosZ); +//#endif +// +// normalize(); +// return (*this); +} + +// SJB: This code is correct for a logicly stored (non-transposed) matrix; +// Our matrices are stored transposed, OpenGL style, so this generates the +// INVERSE matrix, or the CORRECT matrix form an INVERSE quaternion. +// Because we use similar logic in LLMatrix3::quaternion(), +// we are internally consistant so everything works OK :) +LLMatrix3 LLQuaternion::getMatrix3(void) const +{ + LLMatrix3 mat; + F32 xx, xy, xz, xw, yy, yz, yw, zz, zw; + + xx = mQ[VX] * mQ[VX]; + xy = mQ[VX] * mQ[VY]; + xz = mQ[VX] * mQ[VZ]; + xw = mQ[VX] * mQ[VW]; + + yy = mQ[VY] * mQ[VY]; + yz = mQ[VY] * mQ[VZ]; + yw = mQ[VY] * mQ[VW]; + + zz = mQ[VZ] * mQ[VZ]; + zw = mQ[VZ] * mQ[VW]; + + mat.mMatrix[0][0] = 1.f - 2.f * ( yy + zz ); + mat.mMatrix[0][1] = 2.f * ( xy + zw ); + mat.mMatrix[0][2] = 2.f * ( xz - yw ); + + mat.mMatrix[1][0] = 2.f * ( xy - zw ); + mat.mMatrix[1][1] = 1.f - 2.f * ( xx + zz ); + mat.mMatrix[1][2] = 2.f * ( yz + xw ); + + mat.mMatrix[2][0] = 2.f * ( xz + yw ); + mat.mMatrix[2][1] = 2.f * ( yz - xw ); + mat.mMatrix[2][2] = 1.f - 2.f * ( xx + yy ); + + return mat; +} + +LLMatrix4 LLQuaternion::getMatrix4(void) const +{ + LLMatrix4 mat; + F32 xx, xy, xz, xw, yy, yz, yw, zz, zw; + + xx = mQ[VX] * mQ[VX]; + xy = mQ[VX] * mQ[VY]; + xz = mQ[VX] * mQ[VZ]; + xw = mQ[VX] * mQ[VW]; + + yy = mQ[VY] * mQ[VY]; + yz = mQ[VY] * mQ[VZ]; + yw = mQ[VY] * mQ[VW]; + + zz = mQ[VZ] * mQ[VZ]; + zw = mQ[VZ] * mQ[VW]; + + mat.mMatrix[0][0] = 1.f - 2.f * ( yy + zz ); + mat.mMatrix[0][1] = 2.f * ( xy + zw ); + mat.mMatrix[0][2] = 2.f * ( xz - yw ); + + mat.mMatrix[1][0] = 2.f * ( xy - zw ); + mat.mMatrix[1][1] = 1.f - 2.f * ( xx + zz ); + mat.mMatrix[1][2] = 2.f * ( yz + xw ); + + mat.mMatrix[2][0] = 2.f * ( xz + yw ); + mat.mMatrix[2][1] = 2.f * ( yz - xw ); + mat.mMatrix[2][2] = 1.f - 2.f * ( xx + yy ); + + // TODO -- should we set the translation portion to zero? + + return mat; +} + + + + +// Other useful methods + + +// calculate the shortest rotation from a to b +void LLQuaternion::shortestArc(const LLVector3 &a, const LLVector3 &b) +{ + // Make a local copy of both vectors. + LLVector3 vec_a = a; + LLVector3 vec_b = b; + + // Make sure neither vector is zero length. Also normalize + // the vectors while we are at it. + F32 vec_a_mag = vec_a.normalize(); + F32 vec_b_mag = vec_b.normalize(); + if (vec_a_mag < F_APPROXIMATELY_ZERO || + vec_b_mag < F_APPROXIMATELY_ZERO) + { + // Can't calculate a rotation from this. + // Just return ZERO_ROTATION instead. + loadIdentity(); + return; + } + + // Create an axis to rotate around, and the cos of the angle to rotate. + LLVector3 axis = vec_a % vec_b; + F32 cos_theta = vec_a * vec_b; + + // Check the angle between the vectors to see if they are parallel or anti-parallel. + if (cos_theta > 1.0 - F_APPROXIMATELY_ZERO) + { + // a and b are parallel. No rotation is necessary. + loadIdentity(); + } + else if (cos_theta < -1.0 + F_APPROXIMATELY_ZERO) + { + // a and b are anti-parallel. + // Rotate 180 degrees around some orthogonal axis. + // Find the projection of the x-axis onto a, and try + // using the vector between the projection and the x-axis + // as the orthogonal axis. + LLVector3 proj = vec_a.mV[VX] / (vec_a * vec_a) * vec_a; + LLVector3 ortho_axis(1.f, 0.f, 0.f); + ortho_axis -= proj; + + // Turn this into an orthonormal axis. + F32 ortho_length = ortho_axis.normalize(); + // If the axis' length is 0, then our guess at an orthogonal axis + // was wrong (a is parallel to the x-axis). + if (ortho_length < F_APPROXIMATELY_ZERO) + { + // Use the z-axis instead. + ortho_axis.setVec(0.f, 0.f, 1.f); + } + + // Construct a quaternion from this orthonormal axis. + mQ[VX] = ortho_axis.mV[VX]; + mQ[VY] = ortho_axis.mV[VY]; + mQ[VZ] = ortho_axis.mV[VZ]; + mQ[VW] = 0.f; + } + else + { + // a and b are NOT parallel or anti-parallel. + // Return the rotation between these vectors. + F32 theta = (F32)acos(cos_theta); + + setAngleAxis(theta, axis); + } +} + +// constrains rotation to a cone angle specified in radians +const LLQuaternion &LLQuaternion::constrain(F32 radians) +{ + const F32 cos_angle_lim = cosf( radians/2 ); // mQ[VW] limit + const F32 sin_angle_lim = sinf( radians/2 ); // rotation axis length limit + + if (mQ[VW] < 0.f) + { + mQ[VX] *= -1.f; + mQ[VY] *= -1.f; + mQ[VZ] *= -1.f; + mQ[VW] *= -1.f; + } + + // if rotation angle is greater than limit (cos is less than limit) + if( mQ[VW] < cos_angle_lim ) + { + mQ[VW] = cos_angle_lim; + F32 axis_len = sqrtf( mQ[VX]*mQ[VX] + mQ[VY]*mQ[VY] + mQ[VZ]*mQ[VZ] ); // sin(theta/2) + F32 axis_mult_fact = sin_angle_lim / axis_len; + mQ[VX] *= axis_mult_fact; + mQ[VY] *= axis_mult_fact; + mQ[VZ] *= axis_mult_fact; + } + + return *this; +} + +// Operators + +std::ostream& operator<<(std::ostream &s, const LLQuaternion &a) +{ + s << "{ " + << a.mQ[VX] << ", " << a.mQ[VY] << ", " << a.mQ[VZ] << ", " << a.mQ[VW] + << " }"; + return s; +} + + +// Does NOT renormalize the result +LLQuaternion operator*(const LLQuaternion &a, const LLQuaternion &b) +{ +// LLQuaternion::mMultCount++; + + LLQuaternion q( + b.mQ[3] * a.mQ[0] + b.mQ[0] * a.mQ[3] + b.mQ[1] * a.mQ[2] - b.mQ[2] * a.mQ[1], + b.mQ[3] * a.mQ[1] + b.mQ[1] * a.mQ[3] + b.mQ[2] * a.mQ[0] - b.mQ[0] * a.mQ[2], + b.mQ[3] * a.mQ[2] + b.mQ[2] * a.mQ[3] + b.mQ[0] * a.mQ[1] - b.mQ[1] * a.mQ[0], + b.mQ[3] * a.mQ[3] - b.mQ[0] * a.mQ[0] - b.mQ[1] * a.mQ[1] - b.mQ[2] * a.mQ[2] + ); + return q; +} + +/* +LLMatrix4 operator*(const LLMatrix4 &m, const LLQuaternion &q) +{ + LLMatrix4 qmat(q); + return (m*qmat); +} +*/ + + + +LLVector4 operator*(const LLVector4 &a, const LLQuaternion &rot) +{ + F32 rw = - rot.mQ[VX] * a.mV[VX] - rot.mQ[VY] * a.mV[VY] - rot.mQ[VZ] * a.mV[VZ]; + F32 rx = rot.mQ[VW] * a.mV[VX] + rot.mQ[VY] * a.mV[VZ] - rot.mQ[VZ] * a.mV[VY]; + F32 ry = rot.mQ[VW] * a.mV[VY] + rot.mQ[VZ] * a.mV[VX] - rot.mQ[VX] * a.mV[VZ]; + F32 rz = rot.mQ[VW] * a.mV[VZ] + rot.mQ[VX] * a.mV[VY] - rot.mQ[VY] * a.mV[VX]; + + F32 nx = - rw * rot.mQ[VX] + rx * rot.mQ[VW] - ry * rot.mQ[VZ] + rz * rot.mQ[VY]; + F32 ny = - rw * rot.mQ[VY] + ry * rot.mQ[VW] - rz * rot.mQ[VX] + rx * rot.mQ[VZ]; + F32 nz = - rw * rot.mQ[VZ] + rz * rot.mQ[VW] - rx * rot.mQ[VY] + ry * rot.mQ[VX]; + + return LLVector4(nx, ny, nz, a.mV[VW]); +} + +LLVector3 operator*(const LLVector3 &a, const LLQuaternion &rot) +{ + F32 rw = - rot.mQ[VX] * a.mV[VX] - rot.mQ[VY] * a.mV[VY] - rot.mQ[VZ] * a.mV[VZ]; + F32 rx = rot.mQ[VW] * a.mV[VX] + rot.mQ[VY] * a.mV[VZ] - rot.mQ[VZ] * a.mV[VY]; + F32 ry = rot.mQ[VW] * a.mV[VY] + rot.mQ[VZ] * a.mV[VX] - rot.mQ[VX] * a.mV[VZ]; + F32 rz = rot.mQ[VW] * a.mV[VZ] + rot.mQ[VX] * a.mV[VY] - rot.mQ[VY] * a.mV[VX]; + + F32 nx = - rw * rot.mQ[VX] + rx * rot.mQ[VW] - ry * rot.mQ[VZ] + rz * rot.mQ[VY]; + F32 ny = - rw * rot.mQ[VY] + ry * rot.mQ[VW] - rz * rot.mQ[VX] + rx * rot.mQ[VZ]; + F32 nz = - rw * rot.mQ[VZ] + rz * rot.mQ[VW] - rx * rot.mQ[VY] + ry * rot.mQ[VX]; + + return LLVector3(nx, ny, nz); +} + +LLVector3d operator*(const LLVector3d &a, const LLQuaternion &rot) +{ + F64 rw = - rot.mQ[VX] * a.mdV[VX] - rot.mQ[VY] * a.mdV[VY] - rot.mQ[VZ] * a.mdV[VZ]; + F64 rx = rot.mQ[VW] * a.mdV[VX] + rot.mQ[VY] * a.mdV[VZ] - rot.mQ[VZ] * a.mdV[VY]; + F64 ry = rot.mQ[VW] * a.mdV[VY] + rot.mQ[VZ] * a.mdV[VX] - rot.mQ[VX] * a.mdV[VZ]; + F64 rz = rot.mQ[VW] * a.mdV[VZ] + rot.mQ[VX] * a.mdV[VY] - rot.mQ[VY] * a.mdV[VX]; + + F64 nx = - rw * rot.mQ[VX] + rx * rot.mQ[VW] - ry * rot.mQ[VZ] + rz * rot.mQ[VY]; + F64 ny = - rw * rot.mQ[VY] + ry * rot.mQ[VW] - rz * rot.mQ[VX] + rx * rot.mQ[VZ]; + F64 nz = - rw * rot.mQ[VZ] + rz * rot.mQ[VW] - rx * rot.mQ[VY] + ry * rot.mQ[VX]; + + return LLVector3d(nx, ny, nz); +} + +F32 dot(const LLQuaternion &a, const LLQuaternion &b) +{ + return a.mQ[VX] * b.mQ[VX] + + a.mQ[VY] * b.mQ[VY] + + a.mQ[VZ] * b.mQ[VZ] + + a.mQ[VW] * b.mQ[VW]; +} + +// DEMO HACK: This lerp is probably inocrrect now due intermediate normalization +// it should look more like the lerp below +#if 0 +// linear interpolation +LLQuaternion lerp(F32 t, const LLQuaternion &p, const LLQuaternion &q) +{ + LLQuaternion r; + r = t * (q - p) + p; + r.normalize(); + return r; +} +#endif + +// lerp from identity to q +LLQuaternion lerp(F32 t, const LLQuaternion &q) +{ + LLQuaternion r; + r.mQ[VX] = t * q.mQ[VX]; + r.mQ[VY] = t * q.mQ[VY]; + r.mQ[VZ] = t * q.mQ[VZ]; + r.mQ[VW] = t * (q.mQ[VZ] - 1.f) + 1.f; + r.normalize(); + return r; +} + +LLQuaternion lerp(F32 t, const LLQuaternion &p, const LLQuaternion &q) +{ + LLQuaternion r; + F32 inv_t; + + inv_t = 1.f - t; + + r.mQ[VX] = t * q.mQ[VX] + (inv_t * p.mQ[VX]); + r.mQ[VY] = t * q.mQ[VY] + (inv_t * p.mQ[VY]); + r.mQ[VZ] = t * q.mQ[VZ] + (inv_t * p.mQ[VZ]); + r.mQ[VW] = t * q.mQ[VW] + (inv_t * p.mQ[VW]); + r.normalize(); + return r; +} + + +// spherical linear interpolation +LLQuaternion slerp( F32 u, const LLQuaternion &a, const LLQuaternion &b ) +{ + // cosine theta = dot product of a and b + F32 cos_t = a.mQ[0]*b.mQ[0] + a.mQ[1]*b.mQ[1] + a.mQ[2]*b.mQ[2] + a.mQ[3]*b.mQ[3]; + + // if b is on opposite hemisphere from a, use -a instead + int bflip; + if (cos_t < 0.0f) + { + cos_t = -cos_t; + bflip = TRUE; + } + else + bflip = FALSE; + + // if B is (within precision limits) the same as A, + // just linear interpolate between A and B. + F32 alpha; // interpolant + F32 beta; // 1 - interpolant + if (1.0f - cos_t < 0.00001f) + { + beta = 1.0f - u; + alpha = u; + } + else + { + F32 theta = acosf(cos_t); + F32 sin_t = sinf(theta); + beta = sinf(theta - u*theta) / sin_t; + alpha = sinf(u*theta) / sin_t; + } + + if (bflip) + beta = -beta; + + // interpolate + LLQuaternion ret; + ret.mQ[0] = beta*a.mQ[0] + alpha*b.mQ[0]; + ret.mQ[1] = beta*a.mQ[1] + alpha*b.mQ[1]; + ret.mQ[2] = beta*a.mQ[2] + alpha*b.mQ[2]; + ret.mQ[3] = beta*a.mQ[3] + alpha*b.mQ[3]; + + return ret; +} + +// lerp whenever possible +LLQuaternion nlerp(F32 t, const LLQuaternion &a, const LLQuaternion &b) +{ + if (dot(a, b) < 0.f) + { + return slerp(t, a, b); + } + else + { + return lerp(t, a, b); + } +} + +LLQuaternion nlerp(F32 t, const LLQuaternion &q) +{ + if (q.mQ[VW] < 0.f) + { + return slerp(t, q); + } + else + { + return lerp(t, q); + } +} + +// slerp from identity quaternion to another quaternion +LLQuaternion slerp(F32 t, const LLQuaternion &q) +{ + F32 c = q.mQ[VW]; + if (1.0f == t || 1.0f == c) + { + // the trivial cases + return q; + } + + LLQuaternion r; + F32 s, angle, stq, stp; + + s = (F32) sqrt(1.f - c*c); + + if (c < 0.0f) + { + // when c < 0.0 then theta > PI/2 + // since quat and -quat are the same rotation we invert one of + // p or q to reduce unecessary spins + // A equivalent way to do it is to convert acos(c) as if it had + // been negative, and to negate stp + angle = (F32) acos(-c); + stp = -(F32) sin(angle * (1.f - t)); + stq = (F32) sin(angle * t); + } + else + { + angle = (F32) acos(c); + stp = (F32) sin(angle * (1.f - t)); + stq = (F32) sin(angle * t); + } + + r.mQ[VX] = (q.mQ[VX] * stq) / s; + r.mQ[VY] = (q.mQ[VY] * stq) / s; + r.mQ[VZ] = (q.mQ[VZ] * stq) / s; + r.mQ[VW] = (stp + q.mQ[VW] * stq) / s; + + return r; +} + +LLQuaternion mayaQ(F32 xRot, F32 yRot, F32 zRot, LLQuaternion::Order order) +{ + LLQuaternion xQ( xRot*DEG_TO_RAD, LLVector3(1.0f, 0.0f, 0.0f) ); + LLQuaternion yQ( yRot*DEG_TO_RAD, LLVector3(0.0f, 1.0f, 0.0f) ); + LLQuaternion zQ( zRot*DEG_TO_RAD, LLVector3(0.0f, 0.0f, 1.0f) ); + LLQuaternion ret; + switch( order ) + { + case LLQuaternion::XYZ: + ret = xQ * yQ * zQ; + break; + case LLQuaternion::YZX: + ret = yQ * zQ * xQ; + break; + case LLQuaternion::ZXY: + ret = zQ * xQ * yQ; + break; + case LLQuaternion::XZY: + ret = xQ * zQ * yQ; + break; + case LLQuaternion::YXZ: + ret = yQ * xQ * zQ; + break; + case LLQuaternion::ZYX: + ret = zQ * yQ * xQ; + break; + } + return ret; +} + +const char *OrderToString( const LLQuaternion::Order order ) +{ + const char *p = NULL; + switch( order ) + { + default: + case LLQuaternion::XYZ: + p = "XYZ"; + break; + case LLQuaternion::YZX: + p = "YZX"; + break; + case LLQuaternion::ZXY: + p = "ZXY"; + break; + case LLQuaternion::XZY: + p = "XZY"; + break; + case LLQuaternion::YXZ: + p = "YXZ"; + break; + case LLQuaternion::ZYX: + p = "ZYX"; + break; + } + return p; +} + +LLQuaternion::Order StringToOrder( const char *str ) +{ + if (strncmp(str, "XYZ", 3)==0 || strncmp(str, "xyz", 3)==0) + return LLQuaternion::XYZ; + + if (strncmp(str, "YZX", 3)==0 || strncmp(str, "yzx", 3)==0) + return LLQuaternion::YZX; + + if (strncmp(str, "ZXY", 3)==0 || strncmp(str, "zxy", 3)==0) + return LLQuaternion::ZXY; + + if (strncmp(str, "XZY", 3)==0 || strncmp(str, "xzy", 3)==0) + return LLQuaternion::XZY; + + if (strncmp(str, "YXZ", 3)==0 || strncmp(str, "yxz", 3)==0) + return LLQuaternion::YXZ; + + if (strncmp(str, "ZYX", 3)==0 || strncmp(str, "zyx", 3)==0) + return LLQuaternion::ZYX; + + return LLQuaternion::XYZ; +} + +void LLQuaternion::getAngleAxis(F32* angle, LLVector3 &vec) const +{ + F32 cos_a = mQ[VW]; + if (cos_a > 1.0f) cos_a = 1.0f; + if (cos_a < -1.0f) cos_a = -1.0f; + + F32 sin_a = (F32) sqrt( 1.0f - cos_a * cos_a ); + + if ( fabs( sin_a ) < 0.0005f ) + sin_a = 1.0f; + else + sin_a = 1.f/sin_a; + + F32 temp_angle = 2.0f * (F32) acos( cos_a ); + if (temp_angle > F_PI) + { + // The (angle,axis) pair should never have angles outside [PI, -PI] + // since we want the _shortest_ (angle,axis) solution. + // Since acos is defined for [0, PI], and we multiply by 2.0, we + // can push the angle outside the acceptible range. + // When this happens we set the angle to the other portion of a + // full 2PI rotation, and negate the axis, which reverses the + // direction of the rotation (by the right-hand rule). + *angle = 2.f * F_PI - temp_angle; + vec.mV[VX] = - mQ[VX] * sin_a; + vec.mV[VY] = - mQ[VY] * sin_a; + vec.mV[VZ] = - mQ[VZ] * sin_a; + } + else + { + *angle = temp_angle; + vec.mV[VX] = mQ[VX] * sin_a; + vec.mV[VY] = mQ[VY] * sin_a; + vec.mV[VZ] = mQ[VZ] * sin_a; + } +} + + +// quaternion does not need to be normalized +void LLQuaternion::getEulerAngles(F32 *roll, F32 *pitch, F32 *yaw) const +{ + LLMatrix3 rot_mat(*this); + rot_mat.orthogonalize(); + rot_mat.getEulerAngles(roll, pitch, yaw); + +// // NOTE: LLQuaternion's are actually inverted with respect to +// // the matrices, so this code also assumes inverted quaternions +// // (-x, -y, -z, w). The result is that roll,pitch,yaw are applied +// // in reverse order (yaw,pitch,roll). +// F32 x = -mQ[VX], y = -mQ[VY], z = -mQ[VZ], w = mQ[VW]; +// F64 m20 = 2.0*(x*z-y*w); +// if (1.0f - fabsf(m20) < F_APPROXIMATELY_ZERO) +// { +// *roll = 0.0f; +// *pitch = (F32)asin(m20); +// *yaw = (F32)atan2(2.0*(x*y-z*w), 1.0 - 2.0*(x*x+z*z)); +// } +// else +// { +// *roll = (F32)atan2(-2.0*(y*z+x*w), 1.0-2.0*(x*x+y*y)); +// *pitch = (F32)asin(m20); +// *yaw = (F32)atan2(-2.0*(x*y+z*w), 1.0-2.0*(y*y+z*z)); +// } +} + +// Saves space by using the fact that our quaternions are normalized +LLVector3 LLQuaternion::packToVector3() const +{ + if( mQ[VW] >= 0 ) + { + return LLVector3( mQ[VX], mQ[VY], mQ[VZ] ); + } + else + { + return LLVector3( -mQ[VX], -mQ[VY], -mQ[VZ] ); + } +} + +// Saves space by using the fact that our quaternions are normalized +void LLQuaternion::unpackFromVector3( const LLVector3& vec ) +{ + mQ[VX] = vec.mV[VX]; + mQ[VY] = vec.mV[VY]; + mQ[VZ] = vec.mV[VZ]; + F32 t = 1.f - vec.magVecSquared(); + if( t > 0 ) + { + mQ[VW] = sqrt( t ); + } + else + { + // Need this to avoid trying to find the square root of a negative number due + // to floating point error. + mQ[VW] = 0; + } +} + +BOOL LLQuaternion::parseQuat(const std::string& buf, LLQuaternion* value) +{ + if( buf.empty() || value == NULL) + { + return FALSE; + } + + LLQuaternion quat; + S32 count = sscanf( buf.c_str(), "%f %f %f %f", quat.mQ + 0, quat.mQ + 1, quat.mQ + 2, quat.mQ + 3 ); + if( 4 == count ) + { + value->set( quat ); + return TRUE; + } + + return FALSE; +} + + +// End diff --git a/indra/llmath/llquaternion.h b/indra/llmath/llquaternion.h index 0769f29f23..bbd4326483 100644 --- a/indra/llmath/llquaternion.h +++ b/indra/llmath/llquaternion.h @@ -1,590 +1,594 @@ -/** - * @file llquaternion.h - * @brief LLQuaternion class header file. - * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * - * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 - * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception - * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. - * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. - * $/LicenseInfo$ - */ - -#ifndef LLQUATERNION_H -#define LLQUATERNION_H - -#include "llmath.h" - -class LLVector4; -class LLVector3; -class LLVector3d; -class LLMatrix4; -class LLMatrix3; - -// NOTA BENE: Quaternion code is written assuming Unit Quaternions!!!! -// Moreover, it is written assuming that all vectors and matricies -// passed as arguments are normalized and unitary respectively. -// VERY VERY VERY VERY BAD THINGS will happen if these assumptions fail. - -static const U32 LENGTHOFQUAT = 4; - -class LLQuaternion -{ -public: - F32 mQ[LENGTHOFQUAT]; - - static const LLQuaternion DEFAULT; - - LLQuaternion(); // Initializes Quaternion to (0,0,0,1) - explicit LLQuaternion(const LLMatrix4 &mat); // Initializes Quaternion from Matrix4 - explicit LLQuaternion(const LLMatrix3 &mat); // Initializes Quaternion from Matrix3 - LLQuaternion(F32 x, F32 y, F32 z, F32 w); // Initializes Quaternion to normalize(x, y, z, w) - LLQuaternion(F32 angle, const LLVector4 &vec); // Initializes Quaternion to axis_angle2quat(angle, vec) - LLQuaternion(F32 angle, const LLVector3 &vec); // Initializes Quaternion to axis_angle2quat(angle, vec) - LLQuaternion(const F32 *q); // Initializes Quaternion to normalize(x, y, z, w) - LLQuaternion(const LLVector3 &x_axis, - const LLVector3 &y_axis, - const LLVector3 &z_axis); // Initializes Quaternion from Matrix3 = [x_axis ; y_axis ; z_axis] - - BOOL isIdentity() const; - BOOL isNotIdentity() const; - BOOL isFinite() const; // checks to see if all values of LLQuaternion are finite - void quantize16(F32 lower, F32 upper); // changes the vector to reflect quatization - void quantize8(F32 lower, F32 upper); // changes the vector to reflect quatization - void loadIdentity(); // Loads the quaternion that represents the identity rotation - - const LLQuaternion& set(F32 x, F32 y, F32 z, F32 w); // Sets Quaternion to normalize(x, y, z, w) - const LLQuaternion& set(const LLQuaternion &quat); // Copies Quaternion - const LLQuaternion& set(const F32 *q); // Sets Quaternion to normalize(quat[VX], quat[VY], quat[VZ], quat[VW]) - const LLQuaternion& set(const LLMatrix3 &mat); // Sets Quaternion to mat2quat(mat) - const LLQuaternion& set(const LLMatrix4 &mat); // Sets Quaternion to mat2quat(mat) - - const LLQuaternion& setAngleAxis(F32 angle, F32 x, F32 y, F32 z); // Sets Quaternion to axis_angle2quat(angle, x, y, z) - const LLQuaternion& setAngleAxis(F32 angle, const LLVector3 &vec); // Sets Quaternion to axis_angle2quat(angle, vec) - const LLQuaternion& setAngleAxis(F32 angle, const LLVector4 &vec); // Sets Quaternion to axis_angle2quat(angle, vec) - const LLQuaternion& setEulerAngles(F32 roll, F32 pitch, F32 yaw); // Sets Quaternion to euler2quat(pitch, yaw, roll) - - const LLQuaternion& setQuatInit(F32 x, F32 y, F32 z, F32 w); // deprecated - const LLQuaternion& setQuat(const LLQuaternion &quat); // deprecated - const LLQuaternion& setQuat(const F32 *q); // deprecated - const LLQuaternion& setQuat(const LLMatrix3 &mat); // deprecated - const LLQuaternion& setQuat(const LLMatrix4 &mat); // deprecated - const LLQuaternion& setQuat(F32 angle, F32 x, F32 y, F32 z); // deprecated - const LLQuaternion& setQuat(F32 angle, const LLVector3 &vec); // deprecated - const LLQuaternion& setQuat(F32 angle, const LLVector4 &vec); // deprecated - const LLQuaternion& setQuat(F32 roll, F32 pitch, F32 yaw); // deprecated - - LLMatrix4 getMatrix4(void) const; // Returns the Matrix4 equivalent of Quaternion - LLMatrix3 getMatrix3(void) const; // Returns the Matrix3 equivalent of Quaternion - void getAngleAxis(F32* angle, F32* x, F32* y, F32* z) const; // returns rotation in radians about axis x,y,z - void getAngleAxis(F32* angle, LLVector3 &vec) const; - void getEulerAngles(F32 *roll, F32* pitch, F32 *yaw) const; - - F32 normalize(); // Normalizes Quaternion and returns magnitude - F32 normQuat(); // deprecated - - const LLQuaternion& conjugate(void); // Conjugates Quaternion and returns result - const LLQuaternion& conjQuat(void); // deprecated - - // Other useful methods - const LLQuaternion& transpose(); // transpose (same as conjugate) - const LLQuaternion& transQuat(); // deprecated - - void shortestArc(const LLVector3 &a, const LLVector3 &b); // shortest rotation from a to b - const LLQuaternion& constrain(F32 radians); // constrains rotation to a cone angle specified in radians - - // Standard operators - friend std::ostream& operator<<(std::ostream &s, const LLQuaternion &a); // Prints a - friend LLQuaternion operator+(const LLQuaternion &a, const LLQuaternion &b); // Addition - friend LLQuaternion operator-(const LLQuaternion &a, const LLQuaternion &b); // Subtraction - friend LLQuaternion operator-(const LLQuaternion &a); // Negation - friend LLQuaternion operator*(F32 a, const LLQuaternion &q); // Scale - friend LLQuaternion operator*(const LLQuaternion &q, F32 b); // Scale - friend LLQuaternion operator*(const LLQuaternion &a, const LLQuaternion &b); // Returns a * b - friend LLQuaternion operator~(const LLQuaternion &a); // Returns a* (Conjugate of a) - bool operator==(const LLQuaternion &b) const; // Returns a == b - bool operator!=(const LLQuaternion &b) const; // Returns a != b - - friend const LLQuaternion& operator*=(LLQuaternion &a, const LLQuaternion &b); // Returns a * b - - friend LLVector4 operator*(const LLVector4 &a, const LLQuaternion &rot); // Rotates a by rot - friend LLVector3 operator*(const LLVector3 &a, const LLQuaternion &rot); // Rotates a by rot - friend LLVector3d operator*(const LLVector3d &a, const LLQuaternion &rot); // Rotates a by rot - - // Non-standard operators - friend F32 dot(const LLQuaternion &a, const LLQuaternion &b); - friend LLQuaternion lerp(F32 t, const LLQuaternion &p, const LLQuaternion &q); // linear interpolation (t = 0 to 1) from p to q - friend LLQuaternion lerp(F32 t, const LLQuaternion &q); // linear interpolation (t = 0 to 1) from identity to q - friend LLQuaternion slerp(F32 t, const LLQuaternion &p, const LLQuaternion &q); // spherical linear interpolation from p to q - friend LLQuaternion slerp(F32 t, const LLQuaternion &q); // spherical linear interpolation from identity to q - friend LLQuaternion nlerp(F32 t, const LLQuaternion &p, const LLQuaternion &q); // normalized linear interpolation from p to q - friend LLQuaternion nlerp(F32 t, const LLQuaternion &q); // normalized linear interpolation from p to q - - LLVector3 packToVector3() const; // Saves space by using the fact that our quaternions are normalized - void unpackFromVector3(const LLVector3& vec); // Saves space by using the fact that our quaternions are normalized - - enum Order { - XYZ = 0, - YZX = 1, - ZXY = 2, - XZY = 3, - YXZ = 4, - ZYX = 5 - }; - // Creates a quaternions from maya's rotation representation, - // which is 3 rotations (in DEGREES) in the specified order - friend LLQuaternion mayaQ(F32 x, F32 y, F32 z, Order order); - - // Conversions between Order and strings like "xyz" or "ZYX" - friend const char *OrderToString( const Order order ); - friend Order StringToOrder( const char *str ); - - static BOOL parseQuat(const std::string& buf, LLQuaternion* value); - - // For debugging, only - //static U32 mMultCount; -}; - -// checker -inline BOOL LLQuaternion::isFinite() const -{ - return (llfinite(mQ[VX]) && llfinite(mQ[VY]) && llfinite(mQ[VZ]) && llfinite(mQ[VS])); -} - -inline BOOL LLQuaternion::isIdentity() const -{ - return - ( mQ[VX] == 0.f ) && - ( mQ[VY] == 0.f ) && - ( mQ[VZ] == 0.f ) && - ( mQ[VS] == 1.f ); -} - -inline BOOL LLQuaternion::isNotIdentity() const -{ - return - ( mQ[VX] != 0.f ) || - ( mQ[VY] != 0.f ) || - ( mQ[VZ] != 0.f ) || - ( mQ[VS] != 1.f ); -} - - - -inline LLQuaternion::LLQuaternion(void) -{ - mQ[VX] = 0.f; - mQ[VY] = 0.f; - mQ[VZ] = 0.f; - mQ[VS] = 1.f; -} - -inline LLQuaternion::LLQuaternion(F32 x, F32 y, F32 z, F32 w) -{ - mQ[VX] = x; - mQ[VY] = y; - mQ[VZ] = z; - mQ[VS] = w; - - //RN: don't normalize this case as its used mainly for temporaries during calculations - //normalize(); - /* - F32 mag = sqrtf(mQ[VX]*mQ[VX] + mQ[VY]*mQ[VY] + mQ[VZ]*mQ[VZ] + mQ[VS]*mQ[VS]); - mag -= 1.f; - mag = fabs(mag); - llassert(mag < 10.f*FP_MAG_THRESHOLD); - */ -} - -inline LLQuaternion::LLQuaternion(const F32 *q) -{ - mQ[VX] = q[VX]; - mQ[VY] = q[VY]; - mQ[VZ] = q[VZ]; - mQ[VS] = q[VW]; - - normalize(); - /* - F32 mag = sqrtf(mQ[VX]*mQ[VX] + mQ[VY]*mQ[VY] + mQ[VZ]*mQ[VZ] + mQ[VS]*mQ[VS]); - mag -= 1.f; - mag = fabs(mag); - llassert(mag < FP_MAG_THRESHOLD); - */ -} - - -inline void LLQuaternion::loadIdentity() -{ - mQ[VX] = 0.0f; - mQ[VY] = 0.0f; - mQ[VZ] = 0.0f; - mQ[VW] = 1.0f; -} - - -inline const LLQuaternion& LLQuaternion::set(F32 x, F32 y, F32 z, F32 w) -{ - mQ[VX] = x; - mQ[VY] = y; - mQ[VZ] = z; - mQ[VS] = w; - normalize(); - return (*this); -} - -inline const LLQuaternion& LLQuaternion::set(const LLQuaternion &quat) -{ - mQ[VX] = quat.mQ[VX]; - mQ[VY] = quat.mQ[VY]; - mQ[VZ] = quat.mQ[VZ]; - mQ[VW] = quat.mQ[VW]; - normalize(); - return (*this); -} - -inline const LLQuaternion& LLQuaternion::set(const F32 *q) -{ - mQ[VX] = q[VX]; - mQ[VY] = q[VY]; - mQ[VZ] = q[VZ]; - mQ[VS] = q[VW]; - normalize(); - return (*this); -} - - -// deprecated -inline const LLQuaternion& LLQuaternion::setQuatInit(F32 x, F32 y, F32 z, F32 w) -{ - mQ[VX] = x; - mQ[VY] = y; - mQ[VZ] = z; - mQ[VS] = w; - normalize(); - return (*this); -} - -// deprecated -inline const LLQuaternion& LLQuaternion::setQuat(const LLQuaternion &quat) -{ - mQ[VX] = quat.mQ[VX]; - mQ[VY] = quat.mQ[VY]; - mQ[VZ] = quat.mQ[VZ]; - mQ[VW] = quat.mQ[VW]; - normalize(); - return (*this); -} - -// deprecated -inline const LLQuaternion& LLQuaternion::setQuat(const F32 *q) -{ - mQ[VX] = q[VX]; - mQ[VY] = q[VY]; - mQ[VZ] = q[VZ]; - mQ[VS] = q[VW]; - normalize(); - return (*this); -} - -// There may be a cheaper way that avoids the sqrt. -// Does sin_a = VX*VX + VY*VY + VZ*VZ? -// Copied from Matrix and Quaternion FAQ 1.12 -inline void LLQuaternion::getAngleAxis(F32* angle, F32* x, F32* y, F32* z) const -{ - F32 cos_a = mQ[VW]; - if (cos_a > 1.0f) cos_a = 1.0f; - if (cos_a < -1.0f) cos_a = -1.0f; - - F32 sin_a = (F32) sqrt( 1.0f - cos_a * cos_a ); - - if ( fabs( sin_a ) < 0.0005f ) - sin_a = 1.0f; - else - sin_a = 1.f/sin_a; - - F32 temp_angle = 2.0f * (F32) acos( cos_a ); - if (temp_angle > F_PI) - { - // The (angle,axis) pair should never have angles outside [PI, -PI] - // since we want the _shortest_ (angle,axis) solution. - // Since acos is defined for [0, PI], and we multiply by 2.0, we - // can push the angle outside the acceptible range. - // When this happens we set the angle to the other portion of a - // full 2PI rotation, and negate the axis, which reverses the - // direction of the rotation (by the right-hand rule). - *angle = 2.f * F_PI - temp_angle; - *x = - mQ[VX] * sin_a; - *y = - mQ[VY] * sin_a; - *z = - mQ[VZ] * sin_a; - } - else - { - *angle = temp_angle; - *x = mQ[VX] * sin_a; - *y = mQ[VY] * sin_a; - *z = mQ[VZ] * sin_a; - } -} - -inline const LLQuaternion& LLQuaternion::conjugate() -{ - mQ[VX] *= -1.f; - mQ[VY] *= -1.f; - mQ[VZ] *= -1.f; - return (*this); -} - -inline const LLQuaternion& LLQuaternion::conjQuat() -{ - mQ[VX] *= -1.f; - mQ[VY] *= -1.f; - mQ[VZ] *= -1.f; - return (*this); -} - -// Transpose -inline const LLQuaternion& LLQuaternion::transpose() -{ - mQ[VX] *= -1.f; - mQ[VY] *= -1.f; - mQ[VZ] *= -1.f; - return (*this); -} - -// deprecated -inline const LLQuaternion& LLQuaternion::transQuat() -{ - mQ[VX] *= -1.f; - mQ[VY] *= -1.f; - mQ[VZ] *= -1.f; - return (*this); -} - - -inline LLQuaternion operator+(const LLQuaternion &a, const LLQuaternion &b) -{ - return LLQuaternion( - a.mQ[VX] + b.mQ[VX], - a.mQ[VY] + b.mQ[VY], - a.mQ[VZ] + b.mQ[VZ], - a.mQ[VW] + b.mQ[VW] ); -} - - -inline LLQuaternion operator-(const LLQuaternion &a, const LLQuaternion &b) -{ - return LLQuaternion( - a.mQ[VX] - b.mQ[VX], - a.mQ[VY] - b.mQ[VY], - a.mQ[VZ] - b.mQ[VZ], - a.mQ[VW] - b.mQ[VW] ); -} - - -inline LLQuaternion operator-(const LLQuaternion &a) -{ - return LLQuaternion( - -a.mQ[VX], - -a.mQ[VY], - -a.mQ[VZ], - -a.mQ[VW] ); -} - - -inline LLQuaternion operator*(F32 a, const LLQuaternion &q) -{ - return LLQuaternion( - a * q.mQ[VX], - a * q.mQ[VY], - a * q.mQ[VZ], - a * q.mQ[VW] ); -} - - -inline LLQuaternion operator*(const LLQuaternion &q, F32 a) -{ - return LLQuaternion( - a * q.mQ[VX], - a * q.mQ[VY], - a * q.mQ[VZ], - a * q.mQ[VW] ); -} - -inline LLQuaternion operator~(const LLQuaternion &a) -{ - LLQuaternion q(a); - q.conjQuat(); - return q; -} - -inline bool LLQuaternion::operator==(const LLQuaternion &b) const -{ - return ( (mQ[VX] == b.mQ[VX]) - &&(mQ[VY] == b.mQ[VY]) - &&(mQ[VZ] == b.mQ[VZ]) - &&(mQ[VS] == b.mQ[VS])); -} - -inline bool LLQuaternion::operator!=(const LLQuaternion &b) const -{ - return ( (mQ[VX] != b.mQ[VX]) - ||(mQ[VY] != b.mQ[VY]) - ||(mQ[VZ] != b.mQ[VZ]) - ||(mQ[VS] != b.mQ[VS])); -} - -inline const LLQuaternion& operator*=(LLQuaternion &a, const LLQuaternion &b) -{ -#if 1 - LLQuaternion q( - b.mQ[3] * a.mQ[0] + b.mQ[0] * a.mQ[3] + b.mQ[1] * a.mQ[2] - b.mQ[2] * a.mQ[1], - b.mQ[3] * a.mQ[1] + b.mQ[1] * a.mQ[3] + b.mQ[2] * a.mQ[0] - b.mQ[0] * a.mQ[2], - b.mQ[3] * a.mQ[2] + b.mQ[2] * a.mQ[3] + b.mQ[0] * a.mQ[1] - b.mQ[1] * a.mQ[0], - b.mQ[3] * a.mQ[3] - b.mQ[0] * a.mQ[0] - b.mQ[1] * a.mQ[1] - b.mQ[2] * a.mQ[2] - ); - a = q; -#else - a = a * b; -#endif - return a; -} - -const F32 ONE_PART_IN_A_MILLION = 0.000001f; - -inline F32 LLQuaternion::normalize() -{ - F32 mag = sqrtf(mQ[VX]*mQ[VX] + mQ[VY]*mQ[VY] + mQ[VZ]*mQ[VZ] + mQ[VS]*mQ[VS]); - - if (mag > FP_MAG_THRESHOLD) - { - // Floating point error can prevent some quaternions from achieving - // exact unity length. When trying to renormalize such quaternions we - // can oscillate between multiple quantized states. To prevent such - // drifts we only renomalize if the length is far enough from unity. - if (fabs(1.f - mag) > ONE_PART_IN_A_MILLION) - { - F32 oomag = 1.f/mag; - mQ[VX] *= oomag; - mQ[VY] *= oomag; - mQ[VZ] *= oomag; - mQ[VS] *= oomag; - } - } - else - { - // we were given a very bad quaternion so we set it to identity - mQ[VX] = 0.f; - mQ[VY] = 0.f; - mQ[VZ] = 0.f; - mQ[VS] = 1.f; - } - - return mag; -} - -// deprecated -inline F32 LLQuaternion::normQuat() -{ - F32 mag = sqrtf(mQ[VX]*mQ[VX] + mQ[VY]*mQ[VY] + mQ[VZ]*mQ[VZ] + mQ[VS]*mQ[VS]); - - if (mag > FP_MAG_THRESHOLD) - { - if (fabs(1.f - mag) > ONE_PART_IN_A_MILLION) - { - // only renormalize if length not close enough to 1.0 already - F32 oomag = 1.f/mag; - mQ[VX] *= oomag; - mQ[VY] *= oomag; - mQ[VZ] *= oomag; - mQ[VS] *= oomag; - } - } - else - { - mQ[VX] = 0.f; - mQ[VY] = 0.f; - mQ[VZ] = 0.f; - mQ[VS] = 1.f; - } - - return mag; -} - -LLQuaternion::Order StringToOrder( const char *str ); - -// Some notes about Quaternions - -// What is a Quaternion? -// --------------------- -// A quaternion is a point in 4-dimensional complex space. -// Q = { Qx, Qy, Qz, Qw } -// -// -// Why Quaternions? -// ---------------- -// The set of quaternions that make up the the 4-D unit sphere -// can be mapped to the set of all rotations in 3-D space. Sometimes -// it is easier to describe/manipulate rotations in quaternion space -// than rotation-matrix space. -// -// -// How Quaternions? -// ---------------- -// In order to take advantage of quaternions we need to know how to -// go from rotation-matricies to quaternions and back. We also have -// to agree what variety of rotations we're generating. -// -// Consider the equation... v' = v * R -// -// There are two ways to think about rotations of vectors. -// 1) v' is the same vector in a different reference frame -// 2) v' is a new vector in the same reference frame -// -// bookmark -- which way are we using? -// -// -// Quaternion from Angle-Axis: -// --------------------------- -// Suppose we wanted to represent a rotation of some angle (theta) -// about some axis ({Ax, Ay, Az})... -// -// axis of rotation = {Ax, Ay, Az} -// angle_of_rotation = theta -// -// s = sin(0.5 * theta) -// c = cos(0.5 * theta) -// Q = { s * Ax, s * Ay, s * Az, c } -// -// -// 3x3 Matrix from Quaternion -// -------------------------- -// -// | | -// | 1 - 2 * (y^2 + z^2) 2 * (x * y + z * w) 2 * (y * w - x * z) | -// | | -// M = | 2 * (x * y - z * w) 1 - 2 * (x^2 + z^2) 2 * (y * z + x * w) | -// | | -// | 2 * (x * z + y * w) 2 * (y * z - x * w) 1 - 2 * (x^2 + y^2) | -// | | - -#endif +/** + * @file llquaternion.h + * @brief LLQuaternion class header file. + * + * $LicenseInfo:firstyear=2000&license=viewergpl$ + * + * Copyright (c) 2000-2009, Linden Research, Inc. + * + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. + * + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. + * $/LicenseInfo$ + */ + +#ifndef LLQUATERNION_H +#define LLQUATERNION_H + +#include + +#ifndef LLMATH_H //enforce specific include order to avoid tangling inline dependencies +#error "Please include llmath.h first." +#endif + +class LLVector4; +class LLVector3; +class LLVector3d; +class LLMatrix4; +class LLMatrix3; + +// NOTA BENE: Quaternion code is written assuming Unit Quaternions!!!! +// Moreover, it is written assuming that all vectors and matricies +// passed as arguments are normalized and unitary respectively. +// VERY VERY VERY VERY BAD THINGS will happen if these assumptions fail. + +static const U32 LENGTHOFQUAT = 4; + +class LLQuaternion +{ +public: + F32 mQ[LENGTHOFQUAT]; + + static const LLQuaternion DEFAULT; + + LLQuaternion(); // Initializes Quaternion to (0,0,0,1) + explicit LLQuaternion(const LLMatrix4 &mat); // Initializes Quaternion from Matrix4 + explicit LLQuaternion(const LLMatrix3 &mat); // Initializes Quaternion from Matrix3 + LLQuaternion(F32 x, F32 y, F32 z, F32 w); // Initializes Quaternion to normalize(x, y, z, w) + LLQuaternion(F32 angle, const LLVector4 &vec); // Initializes Quaternion to axis_angle2quat(angle, vec) + LLQuaternion(F32 angle, const LLVector3 &vec); // Initializes Quaternion to axis_angle2quat(angle, vec) + LLQuaternion(const F32 *q); // Initializes Quaternion to normalize(x, y, z, w) + LLQuaternion(const LLVector3 &x_axis, + const LLVector3 &y_axis, + const LLVector3 &z_axis); // Initializes Quaternion from Matrix3 = [x_axis ; y_axis ; z_axis] + + BOOL isIdentity() const; + BOOL isNotIdentity() const; + BOOL isFinite() const; // checks to see if all values of LLQuaternion are finite + void quantize16(F32 lower, F32 upper); // changes the vector to reflect quatization + void quantize8(F32 lower, F32 upper); // changes the vector to reflect quatization + void loadIdentity(); // Loads the quaternion that represents the identity rotation + + const LLQuaternion& set(F32 x, F32 y, F32 z, F32 w); // Sets Quaternion to normalize(x, y, z, w) + const LLQuaternion& set(const LLQuaternion &quat); // Copies Quaternion + const LLQuaternion& set(const F32 *q); // Sets Quaternion to normalize(quat[VX], quat[VY], quat[VZ], quat[VW]) + const LLQuaternion& set(const LLMatrix3 &mat); // Sets Quaternion to mat2quat(mat) + const LLQuaternion& set(const LLMatrix4 &mat); // Sets Quaternion to mat2quat(mat) + + const LLQuaternion& setAngleAxis(F32 angle, F32 x, F32 y, F32 z); // Sets Quaternion to axis_angle2quat(angle, x, y, z) + const LLQuaternion& setAngleAxis(F32 angle, const LLVector3 &vec); // Sets Quaternion to axis_angle2quat(angle, vec) + const LLQuaternion& setAngleAxis(F32 angle, const LLVector4 &vec); // Sets Quaternion to axis_angle2quat(angle, vec) + const LLQuaternion& setEulerAngles(F32 roll, F32 pitch, F32 yaw); // Sets Quaternion to euler2quat(pitch, yaw, roll) + + const LLQuaternion& setQuatInit(F32 x, F32 y, F32 z, F32 w); // deprecated + const LLQuaternion& setQuat(const LLQuaternion &quat); // deprecated + const LLQuaternion& setQuat(const F32 *q); // deprecated + const LLQuaternion& setQuat(const LLMatrix3 &mat); // deprecated + const LLQuaternion& setQuat(const LLMatrix4 &mat); // deprecated + const LLQuaternion& setQuat(F32 angle, F32 x, F32 y, F32 z); // deprecated + const LLQuaternion& setQuat(F32 angle, const LLVector3 &vec); // deprecated + const LLQuaternion& setQuat(F32 angle, const LLVector4 &vec); // deprecated + const LLQuaternion& setQuat(F32 roll, F32 pitch, F32 yaw); // deprecated + + LLMatrix4 getMatrix4(void) const; // Returns the Matrix4 equivalent of Quaternion + LLMatrix3 getMatrix3(void) const; // Returns the Matrix3 equivalent of Quaternion + void getAngleAxis(F32* angle, F32* x, F32* y, F32* z) const; // returns rotation in radians about axis x,y,z + void getAngleAxis(F32* angle, LLVector3 &vec) const; + void getEulerAngles(F32 *roll, F32* pitch, F32 *yaw) const; + + F32 normalize(); // Normalizes Quaternion and returns magnitude + F32 normQuat(); // deprecated + + const LLQuaternion& conjugate(void); // Conjugates Quaternion and returns result + const LLQuaternion& conjQuat(void); // deprecated + + // Other useful methods + const LLQuaternion& transpose(); // transpose (same as conjugate) + const LLQuaternion& transQuat(); // deprecated + + void shortestArc(const LLVector3 &a, const LLVector3 &b); // shortest rotation from a to b + const LLQuaternion& constrain(F32 radians); // constrains rotation to a cone angle specified in radians + + // Standard operators + friend std::ostream& operator<<(std::ostream &s, const LLQuaternion &a); // Prints a + friend LLQuaternion operator+(const LLQuaternion &a, const LLQuaternion &b); // Addition + friend LLQuaternion operator-(const LLQuaternion &a, const LLQuaternion &b); // Subtraction + friend LLQuaternion operator-(const LLQuaternion &a); // Negation + friend LLQuaternion operator*(F32 a, const LLQuaternion &q); // Scale + friend LLQuaternion operator*(const LLQuaternion &q, F32 b); // Scale + friend LLQuaternion operator*(const LLQuaternion &a, const LLQuaternion &b); // Returns a * b + friend LLQuaternion operator~(const LLQuaternion &a); // Returns a* (Conjugate of a) + bool operator==(const LLQuaternion &b) const; // Returns a == b + bool operator!=(const LLQuaternion &b) const; // Returns a != b + + friend const LLQuaternion& operator*=(LLQuaternion &a, const LLQuaternion &b); // Returns a * b + + friend LLVector4 operator*(const LLVector4 &a, const LLQuaternion &rot); // Rotates a by rot + friend LLVector3 operator*(const LLVector3 &a, const LLQuaternion &rot); // Rotates a by rot + friend LLVector3d operator*(const LLVector3d &a, const LLQuaternion &rot); // Rotates a by rot + + // Non-standard operators + friend F32 dot(const LLQuaternion &a, const LLQuaternion &b); + friend LLQuaternion lerp(F32 t, const LLQuaternion &p, const LLQuaternion &q); // linear interpolation (t = 0 to 1) from p to q + friend LLQuaternion lerp(F32 t, const LLQuaternion &q); // linear interpolation (t = 0 to 1) from identity to q + friend LLQuaternion slerp(F32 t, const LLQuaternion &p, const LLQuaternion &q); // spherical linear interpolation from p to q + friend LLQuaternion slerp(F32 t, const LLQuaternion &q); // spherical linear interpolation from identity to q + friend LLQuaternion nlerp(F32 t, const LLQuaternion &p, const LLQuaternion &q); // normalized linear interpolation from p to q + friend LLQuaternion nlerp(F32 t, const LLQuaternion &q); // normalized linear interpolation from p to q + + LLVector3 packToVector3() const; // Saves space by using the fact that our quaternions are normalized + void unpackFromVector3(const LLVector3& vec); // Saves space by using the fact that our quaternions are normalized + + enum Order { + XYZ = 0, + YZX = 1, + ZXY = 2, + XZY = 3, + YXZ = 4, + ZYX = 5 + }; + // Creates a quaternions from maya's rotation representation, + // which is 3 rotations (in DEGREES) in the specified order + friend LLQuaternion mayaQ(F32 x, F32 y, F32 z, Order order); + + // Conversions between Order and strings like "xyz" or "ZYX" + friend const char *OrderToString( const Order order ); + friend Order StringToOrder( const char *str ); + + static BOOL parseQuat(const std::string& buf, LLQuaternion* value); + + // For debugging, only + //static U32 mMultCount; +}; + +// checker +inline BOOL LLQuaternion::isFinite() const +{ + return (llfinite(mQ[VX]) && llfinite(mQ[VY]) && llfinite(mQ[VZ]) && llfinite(mQ[VS])); +} + +inline BOOL LLQuaternion::isIdentity() const +{ + return + ( mQ[VX] == 0.f ) && + ( mQ[VY] == 0.f ) && + ( mQ[VZ] == 0.f ) && + ( mQ[VS] == 1.f ); +} + +inline BOOL LLQuaternion::isNotIdentity() const +{ + return + ( mQ[VX] != 0.f ) || + ( mQ[VY] != 0.f ) || + ( mQ[VZ] != 0.f ) || + ( mQ[VS] != 1.f ); +} + + + +inline LLQuaternion::LLQuaternion(void) +{ + mQ[VX] = 0.f; + mQ[VY] = 0.f; + mQ[VZ] = 0.f; + mQ[VS] = 1.f; +} + +inline LLQuaternion::LLQuaternion(F32 x, F32 y, F32 z, F32 w) +{ + mQ[VX] = x; + mQ[VY] = y; + mQ[VZ] = z; + mQ[VS] = w; + + //RN: don't normalize this case as its used mainly for temporaries during calculations + //normalize(); + /* + F32 mag = sqrtf(mQ[VX]*mQ[VX] + mQ[VY]*mQ[VY] + mQ[VZ]*mQ[VZ] + mQ[VS]*mQ[VS]); + mag -= 1.f; + mag = fabs(mag); + llassert(mag < 10.f*FP_MAG_THRESHOLD); + */ +} + +inline LLQuaternion::LLQuaternion(const F32 *q) +{ + mQ[VX] = q[VX]; + mQ[VY] = q[VY]; + mQ[VZ] = q[VZ]; + mQ[VS] = q[VW]; + + normalize(); + /* + F32 mag = sqrtf(mQ[VX]*mQ[VX] + mQ[VY]*mQ[VY] + mQ[VZ]*mQ[VZ] + mQ[VS]*mQ[VS]); + mag -= 1.f; + mag = fabs(mag); + llassert(mag < FP_MAG_THRESHOLD); + */ +} + + +inline void LLQuaternion::loadIdentity() +{ + mQ[VX] = 0.0f; + mQ[VY] = 0.0f; + mQ[VZ] = 0.0f; + mQ[VW] = 1.0f; +} + + +inline const LLQuaternion& LLQuaternion::set(F32 x, F32 y, F32 z, F32 w) +{ + mQ[VX] = x; + mQ[VY] = y; + mQ[VZ] = z; + mQ[VS] = w; + normalize(); + return (*this); +} + +inline const LLQuaternion& LLQuaternion::set(const LLQuaternion &quat) +{ + mQ[VX] = quat.mQ[VX]; + mQ[VY] = quat.mQ[VY]; + mQ[VZ] = quat.mQ[VZ]; + mQ[VW] = quat.mQ[VW]; + normalize(); + return (*this); +} + +inline const LLQuaternion& LLQuaternion::set(const F32 *q) +{ + mQ[VX] = q[VX]; + mQ[VY] = q[VY]; + mQ[VZ] = q[VZ]; + mQ[VS] = q[VW]; + normalize(); + return (*this); +} + + +// deprecated +inline const LLQuaternion& LLQuaternion::setQuatInit(F32 x, F32 y, F32 z, F32 w) +{ + mQ[VX] = x; + mQ[VY] = y; + mQ[VZ] = z; + mQ[VS] = w; + normalize(); + return (*this); +} + +// deprecated +inline const LLQuaternion& LLQuaternion::setQuat(const LLQuaternion &quat) +{ + mQ[VX] = quat.mQ[VX]; + mQ[VY] = quat.mQ[VY]; + mQ[VZ] = quat.mQ[VZ]; + mQ[VW] = quat.mQ[VW]; + normalize(); + return (*this); +} + +// deprecated +inline const LLQuaternion& LLQuaternion::setQuat(const F32 *q) +{ + mQ[VX] = q[VX]; + mQ[VY] = q[VY]; + mQ[VZ] = q[VZ]; + mQ[VS] = q[VW]; + normalize(); + return (*this); +} + +// There may be a cheaper way that avoids the sqrt. +// Does sin_a = VX*VX + VY*VY + VZ*VZ? +// Copied from Matrix and Quaternion FAQ 1.12 +inline void LLQuaternion::getAngleAxis(F32* angle, F32* x, F32* y, F32* z) const +{ + F32 cos_a = mQ[VW]; + if (cos_a > 1.0f) cos_a = 1.0f; + if (cos_a < -1.0f) cos_a = -1.0f; + + F32 sin_a = (F32) sqrt( 1.0f - cos_a * cos_a ); + + if ( fabs( sin_a ) < 0.0005f ) + sin_a = 1.0f; + else + sin_a = 1.f/sin_a; + + F32 temp_angle = 2.0f * (F32) acos( cos_a ); + if (temp_angle > F_PI) + { + // The (angle,axis) pair should never have angles outside [PI, -PI] + // since we want the _shortest_ (angle,axis) solution. + // Since acos is defined for [0, PI], and we multiply by 2.0, we + // can push the angle outside the acceptible range. + // When this happens we set the angle to the other portion of a + // full 2PI rotation, and negate the axis, which reverses the + // direction of the rotation (by the right-hand rule). + *angle = 2.f * F_PI - temp_angle; + *x = - mQ[VX] * sin_a; + *y = - mQ[VY] * sin_a; + *z = - mQ[VZ] * sin_a; + } + else + { + *angle = temp_angle; + *x = mQ[VX] * sin_a; + *y = mQ[VY] * sin_a; + *z = mQ[VZ] * sin_a; + } +} + +inline const LLQuaternion& LLQuaternion::conjugate() +{ + mQ[VX] *= -1.f; + mQ[VY] *= -1.f; + mQ[VZ] *= -1.f; + return (*this); +} + +inline const LLQuaternion& LLQuaternion::conjQuat() +{ + mQ[VX] *= -1.f; + mQ[VY] *= -1.f; + mQ[VZ] *= -1.f; + return (*this); +} + +// Transpose +inline const LLQuaternion& LLQuaternion::transpose() +{ + mQ[VX] *= -1.f; + mQ[VY] *= -1.f; + mQ[VZ] *= -1.f; + return (*this); +} + +// deprecated +inline const LLQuaternion& LLQuaternion::transQuat() +{ + mQ[VX] *= -1.f; + mQ[VY] *= -1.f; + mQ[VZ] *= -1.f; + return (*this); +} + + +inline LLQuaternion operator+(const LLQuaternion &a, const LLQuaternion &b) +{ + return LLQuaternion( + a.mQ[VX] + b.mQ[VX], + a.mQ[VY] + b.mQ[VY], + a.mQ[VZ] + b.mQ[VZ], + a.mQ[VW] + b.mQ[VW] ); +} + + +inline LLQuaternion operator-(const LLQuaternion &a, const LLQuaternion &b) +{ + return LLQuaternion( + a.mQ[VX] - b.mQ[VX], + a.mQ[VY] - b.mQ[VY], + a.mQ[VZ] - b.mQ[VZ], + a.mQ[VW] - b.mQ[VW] ); +} + + +inline LLQuaternion operator-(const LLQuaternion &a) +{ + return LLQuaternion( + -a.mQ[VX], + -a.mQ[VY], + -a.mQ[VZ], + -a.mQ[VW] ); +} + + +inline LLQuaternion operator*(F32 a, const LLQuaternion &q) +{ + return LLQuaternion( + a * q.mQ[VX], + a * q.mQ[VY], + a * q.mQ[VZ], + a * q.mQ[VW] ); +} + + +inline LLQuaternion operator*(const LLQuaternion &q, F32 a) +{ + return LLQuaternion( + a * q.mQ[VX], + a * q.mQ[VY], + a * q.mQ[VZ], + a * q.mQ[VW] ); +} + +inline LLQuaternion operator~(const LLQuaternion &a) +{ + LLQuaternion q(a); + q.conjQuat(); + return q; +} + +inline bool LLQuaternion::operator==(const LLQuaternion &b) const +{ + return ( (mQ[VX] == b.mQ[VX]) + &&(mQ[VY] == b.mQ[VY]) + &&(mQ[VZ] == b.mQ[VZ]) + &&(mQ[VS] == b.mQ[VS])); +} + +inline bool LLQuaternion::operator!=(const LLQuaternion &b) const +{ + return ( (mQ[VX] != b.mQ[VX]) + ||(mQ[VY] != b.mQ[VY]) + ||(mQ[VZ] != b.mQ[VZ]) + ||(mQ[VS] != b.mQ[VS])); +} + +inline const LLQuaternion& operator*=(LLQuaternion &a, const LLQuaternion &b) +{ +#if 1 + LLQuaternion q( + b.mQ[3] * a.mQ[0] + b.mQ[0] * a.mQ[3] + b.mQ[1] * a.mQ[2] - b.mQ[2] * a.mQ[1], + b.mQ[3] * a.mQ[1] + b.mQ[1] * a.mQ[3] + b.mQ[2] * a.mQ[0] - b.mQ[0] * a.mQ[2], + b.mQ[3] * a.mQ[2] + b.mQ[2] * a.mQ[3] + b.mQ[0] * a.mQ[1] - b.mQ[1] * a.mQ[0], + b.mQ[3] * a.mQ[3] - b.mQ[0] * a.mQ[0] - b.mQ[1] * a.mQ[1] - b.mQ[2] * a.mQ[2] + ); + a = q; +#else + a = a * b; +#endif + return a; +} + +const F32 ONE_PART_IN_A_MILLION = 0.000001f; + +inline F32 LLQuaternion::normalize() +{ + F32 mag = sqrtf(mQ[VX]*mQ[VX] + mQ[VY]*mQ[VY] + mQ[VZ]*mQ[VZ] + mQ[VS]*mQ[VS]); + + if (mag > FP_MAG_THRESHOLD) + { + // Floating point error can prevent some quaternions from achieving + // exact unity length. When trying to renormalize such quaternions we + // can oscillate between multiple quantized states. To prevent such + // drifts we only renomalize if the length is far enough from unity. + if (fabs(1.f - mag) > ONE_PART_IN_A_MILLION) + { + F32 oomag = 1.f/mag; + mQ[VX] *= oomag; + mQ[VY] *= oomag; + mQ[VZ] *= oomag; + mQ[VS] *= oomag; + } + } + else + { + // we were given a very bad quaternion so we set it to identity + mQ[VX] = 0.f; + mQ[VY] = 0.f; + mQ[VZ] = 0.f; + mQ[VS] = 1.f; + } + + return mag; +} + +// deprecated +inline F32 LLQuaternion::normQuat() +{ + F32 mag = sqrtf(mQ[VX]*mQ[VX] + mQ[VY]*mQ[VY] + mQ[VZ]*mQ[VZ] + mQ[VS]*mQ[VS]); + + if (mag > FP_MAG_THRESHOLD) + { + if (fabs(1.f - mag) > ONE_PART_IN_A_MILLION) + { + // only renormalize if length not close enough to 1.0 already + F32 oomag = 1.f/mag; + mQ[VX] *= oomag; + mQ[VY] *= oomag; + mQ[VZ] *= oomag; + mQ[VS] *= oomag; + } + } + else + { + mQ[VX] = 0.f; + mQ[VY] = 0.f; + mQ[VZ] = 0.f; + mQ[VS] = 1.f; + } + + return mag; +} + +LLQuaternion::Order StringToOrder( const char *str ); + +// Some notes about Quaternions + +// What is a Quaternion? +// --------------------- +// A quaternion is a point in 4-dimensional complex space. +// Q = { Qx, Qy, Qz, Qw } +// +// +// Why Quaternions? +// ---------------- +// The set of quaternions that make up the the 4-D unit sphere +// can be mapped to the set of all rotations in 3-D space. Sometimes +// it is easier to describe/manipulate rotations in quaternion space +// than rotation-matrix space. +// +// +// How Quaternions? +// ---------------- +// In order to take advantage of quaternions we need to know how to +// go from rotation-matricies to quaternions and back. We also have +// to agree what variety of rotations we're generating. +// +// Consider the equation... v' = v * R +// +// There are two ways to think about rotations of vectors. +// 1) v' is the same vector in a different reference frame +// 2) v' is a new vector in the same reference frame +// +// bookmark -- which way are we using? +// +// +// Quaternion from Angle-Axis: +// --------------------------- +// Suppose we wanted to represent a rotation of some angle (theta) +// about some axis ({Ax, Ay, Az})... +// +// axis of rotation = {Ax, Ay, Az} +// angle_of_rotation = theta +// +// s = sin(0.5 * theta) +// c = cos(0.5 * theta) +// Q = { s * Ax, s * Ay, s * Az, c } +// +// +// 3x3 Matrix from Quaternion +// -------------------------- +// +// | | +// | 1 - 2 * (y^2 + z^2) 2 * (x * y + z * w) 2 * (y * w - x * z) | +// | | +// M = | 2 * (x * y - z * w) 1 - 2 * (x^2 + z^2) 2 * (y * z + x * w) | +// | | +// | 2 * (x * z + y * w) 2 * (y * z - x * w) 1 - 2 * (x^2 + y^2) | +// | | + +#endif diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index bba0a6d089..ab9f8c4c24 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -45,7 +45,7 @@ #include "v4math.h" #include "m4math.h" #include "m3math.h" -#include "llmatrix4a.h" +#include "llmatrix3a.h" #include "lloctree.h" #include "lldarray.h" #include "llvolume.h" @@ -53,6 +53,7 @@ #include "llstl.h" #include "llsdserialize.h" #include "llvector4a.h" +#include "llmatrix4a.h" #define DEBUG_SILHOUETTE_BINORMALS 0 #define DEBUG_SILHOUETTE_NORMALS 0 // TomY: Use this to display normals using the silhouette @@ -161,7 +162,7 @@ BOOL LLTriangleRayIntersect(const LLVector4a& vert0, const LLVector4a& vert1, co LLVector4a det; det.setAllDot3(edge1, pvec); - if (det.greaterEqual4(LLVector4a::getApproximatelyZero()).getComparisonMask() & 0x7) + if (det.greaterEqual(LLVector4a::getEpsilon()).getGatheredBits() & 0x7) { /* calculate distance from vert0 to ray origin */ LLVector4a tvec; @@ -171,8 +172,8 @@ BOOL LLTriangleRayIntersect(const LLVector4a& vert0, const LLVector4a& vert1, co LLVector4a u; u.setAllDot3(tvec,pvec); - if ((u.greaterEqual4(LLVector4a::getZero()).getComparisonMask() & 0x7) && - (u.lessEqual4(det).getComparisonMask() & 0x7)) + if ((u.greaterEqual(LLVector4a::getZero()).getGatheredBits() & 0x7) && + (u.lessEqual(det).getGatheredBits() & 0x7)) { /* prepare to test V parameter */ LLVector4a qvec; @@ -188,8 +189,8 @@ BOOL LLTriangleRayIntersect(const LLVector4a& vert0, const LLVector4a& vert1, co LLVector4a sum_uv; sum_uv.setAdd(u, v); - S32 v_gequal = v.greaterEqual4(LLVector4a::getZero()).getComparisonMask() & 0x7; - S32 sum_lequal = sum_uv.lessEqual4(det).getComparisonMask() & 0x7; + S32 v_gequal = v.greaterEqual(LLVector4a::getZero()).getGatheredBits() & 0x7; + S32 sum_lequal = sum_uv.lessEqual(det).getGatheredBits() & 0x7; if (v_gequal && sum_lequal) { @@ -230,7 +231,7 @@ BOOL LLTriangleRayIntersectTwoSided(const LLVector4a& vert0, const LLVector4a& v pvec.setCross3(dir, edge2); /* if determinant is near zero, ray lies in plane of triangle */ - F32 det = edge1.dot3(pvec); + F32 det = edge1.dot3(pvec).getF32(); if (det > -F_APPROXIMATELY_ZERO && det < F_APPROXIMATELY_ZERO) @@ -245,7 +246,7 @@ BOOL LLTriangleRayIntersectTwoSided(const LLVector4a& vert0, const LLVector4a& v tvec.setSub(orig, vert0); /* calculate U parameter and test bounds */ - u = (tvec.dot3(pvec)) * inv_det; + u = (tvec.dot3(pvec).getF32()) * inv_det; if (u < 0.f || u > 1.f) { return FALSE; @@ -255,7 +256,7 @@ BOOL LLTriangleRayIntersectTwoSided(const LLVector4a& vert0, const LLVector4a& v tvec.sub(edge1); /* calculate V parameter and test bounds */ - v = (dir.dot3(tvec)) * inv_det; + v = (dir.dot3(tvec).getF32()) * inv_det; if (v < 0.f || u + v > 1.f) { @@ -263,7 +264,7 @@ BOOL LLTriangleRayIntersectTwoSided(const LLVector4a& vert0, const LLVector4a& v } /* calculate t, ray intersects triangle */ - t = (edge2.dot3(tvec)) * inv_det; + t = (edge2.dot3(tvec).getF32()) * inv_det; intersection_a = u; intersection_b = v; @@ -326,20 +327,20 @@ public: //stretch by triangles in node tri = *iter; - min.setMin(*tri->mV[0]); - min.setMin(*tri->mV[1]); - min.setMin(*tri->mV[2]); + min.setMin(min, *tri->mV[0]); + min.setMin(min, *tri->mV[1]); + min.setMin(min, *tri->mV[2]); - max.setMax(*tri->mV[0]); - max.setMax(*tri->mV[1]); - max.setMax(*tri->mV[2]); + max.setMax(max, *tri->mV[0]); + max.setMax(max, *tri->mV[1]); + max.setMax(max, *tri->mV[2]); } for (S32 i = 0; i < branch->getChildCount(); ++i) { //stretch by child extents LLVolumeOctreeListener* child = (LLVolumeOctreeListener*) branch->getChild(i)->getListener(0); - min.setMin(child->mExtents[0]); - max.setMax(child->mExtents[1]); + min.setMin(min, child->mExtents[0]); + max.setMax(min, child->mExtents[1]); } } else if (branch->getChildCount() != 0) @@ -352,8 +353,8 @@ public: for (S32 i = 1; i < branch->getChildCount(); ++i) { //stretch by child extents child = (LLVolumeOctreeListener*) branch->getChild(i)->getListener(0); - min.setMin(child->mExtents[0]); - max.setMax(child->mExtents[1]); + min.setMin(min, child->mExtents[0]); + max.setMax(max, child->mExtents[1]); } } else @@ -2011,7 +2012,7 @@ const LLVolumeFace::VertexData& LLVolumeFace::VertexData::operator=(const LLVolu if (this != &rhs) { init(); - LLVector4a::memcpyNonAliased16((F32*) mData, (F32*) rhs.mData, 8); + LLVector4a::memcpyNonAliased16((F32*) mData, (F32*) rhs.mData, 8*sizeof(F32)); mTexCoord = rhs.mTexCoord; } return *this; @@ -2055,8 +2056,8 @@ void LLVolumeFace::VertexData::setNormal(const LLVector4a& norm) bool LLVolumeFace::VertexData::operator<(const LLVolumeFace::VertexData& rhs)const { - const F32* lp = this->getPosition().getF32(); - const F32* rp = rhs.getPosition().getF32(); + const F32* lp = this->getPosition().getF32ptr(); + const F32* rp = rhs.getPosition().getF32ptr(); if (lp[0] != rp[0]) { @@ -2073,8 +2074,8 @@ bool LLVolumeFace::VertexData::operator<(const LLVolumeFace::VertexData& rhs)con return lp[2] < rp[2]; } - lp = getNormal().getF32(); - rp = rhs.getNormal().getF32(); + lp = getNormal().getF32ptr(); + rp = rhs.getNormal().getF32ptr(); if (lp[0] != rp[0]) { @@ -2101,23 +2102,23 @@ bool LLVolumeFace::VertexData::operator<(const LLVolumeFace::VertexData& rhs)con bool LLVolumeFace::VertexData::operator==(const LLVolumeFace::VertexData& rhs)const { - return mData[POSITION].equal3(rhs.getPosition()) && - mData[NORMAL].equal3(rhs.getNormal()) && + return mData[POSITION].equals3(rhs.getPosition()) && + mData[NORMAL].equals3(rhs.getNormal()) && mTexCoord == rhs.mTexCoord; } bool LLVolumeFace::VertexData::compareNormal(const LLVolumeFace::VertexData& rhs, F32 angle_cutoff) const { bool retval = false; - if (rhs.mData[POSITION].equal3(mData[POSITION]) && rhs.mTexCoord == mTexCoord) + if (rhs.mData[POSITION].equals3(mData[POSITION]) && rhs.mTexCoord == mTexCoord) { if (angle_cutoff > 1.f) { - retval = (mData[NORMAL].equal3(rhs.mData[NORMAL])); + retval = (mData[NORMAL].equals3(rhs.mData[NORMAL])); } else { - F32 cur_angle = rhs.mData[NORMAL].dot3(mData[NORMAL]); + F32 cur_angle = rhs.mData[NORMAL].dot3(mData[NORMAL]).getF32(); retval = cur_angle > angle_cutoff; } } @@ -2331,8 +2332,8 @@ bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) } else { - min.setMin(*pos_out); - max.setMax(*pos_out); + min.setMin(min, *pos_out); + max.setMax(max, *pos_out); } pos_out++; @@ -2944,7 +2945,7 @@ void sculpt_calc_mesh_resolution(U16 width, U16 height, U8 type, F32 detail, S32 ratio = (F32) width / (F32) height; - s = (S32)fsqrtf(((F32)vertices / ratio)); + s = (S32)(F32) sqrt(((F32)vertices / ratio)); s = llmax(s, 4); // no degenerate sizes, please t = vertices / s; @@ -5280,16 +5281,15 @@ LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src) freeData(); - LLVector4a::memcpyNonAliased16((F32*) mExtents, (F32*) src.mExtents, 12); + LLVector4a::memcpyNonAliased16((F32*) mExtents, (F32*) src.mExtents, 12*sizeof(F32)); resizeVertices(src.mNumVertices); resizeIndices(src.mNumIndices); if (mNumVertices) { - S32 vert_size = mNumVertices*4; + S32 vert_size = mNumVertices*4*sizeof(F32); S32 tc_size = (mNumVertices*8+0xF) & ~0xF; - tc_size /= 4; LLVector4a::memcpyNonAliased16((F32*) mPositions, (F32*) src.mPositions, vert_size); LLVector4a::memcpyNonAliased16((F32*) mNormals, (F32*) src.mNormals, vert_size); @@ -5322,8 +5322,7 @@ LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src) if (mNumIndices) { S32 idx_size = (mNumIndices*2+0xF) & ~0xF; - idx_size /= 4; - + LLVector4a::memcpyNonAliased16((F32*) mIndices, (F32*) src.mIndices, idx_size); } @@ -5388,9 +5387,9 @@ void LLVolumeFace::getVertexData(U16 index, LLVolumeFace::VertexData& cv) bool LLVolumeFace::VertexMapData::operator==(const LLVolumeFace::VertexData& rhs) const { - return getPosition().equal3(rhs.getPosition()) && + return getPosition().equals3(rhs.getPosition()) && mTexCoord == rhs.mTexCoord && - getNormal().equal3(rhs.getNormal()); + getNormal().equals3(rhs.getNormal()); } bool LLVolumeFace::VertexMapData::ComparePosition::operator()(const LLVector3& a, const LLVector3& b) const @@ -5423,7 +5422,7 @@ void LLVolumeFace::optimize(F32 angle_cutoff) getVertexData(index, cv); BOOL found = FALSE; - VertexMapData::PointMap::iterator point_iter = point_map.find(LLVector3(cv.getPosition().getF32())); + VertexMapData::PointMap::iterator point_iter = point_map.find(LLVector3(cv.getPosition().getF32ptr())); if (point_iter != point_map.end()) { //duplicate point might exist for (U32 j = 0; j < point_iter->second.size(); ++j) @@ -5455,7 +5454,7 @@ void LLVolumeFace::optimize(F32 angle_cutoff) } else { - point_map[LLVector3(d.getPosition().getF32())].push_back(d); + point_map[LLVector3(d.getPosition().getF32ptr())].push_back(d); } } } @@ -5491,12 +5490,12 @@ void LLVolumeFace::createOctree() tri->mIndex[2] = mIndices[i+2]; LLVector4a min = v0; - min.setMin(v1); - min.setMin(v2); + min.setMin(min, v1); + min.setMin(min, v2); LLVector4a max = v0; - max.setMax(v1); - max.setMax(v2); + max.setMax(max, v1); + max.setMax(max, v2); LLVector4a center; center.setAdd(min, max); @@ -5507,7 +5506,7 @@ void LLVolumeFace::createOctree() LLVector4a size; size.setSub(max,min); - tri->mRadius = size.length3() * 0.5f; + tri->mRadius = size.getLength3().getF32() * 0.5f; mOctree->insert(tri); } @@ -5655,12 +5654,13 @@ BOOL LLVolumeFace::createUnCutCubeCap(LLVolume* volume, BOOL partial_build) if (gx == 0 && gy == 0) { - min = max = newVert.getPosition(); + min = newVert.getPosition(); + max = min; } else { - min.setMin(newVert.getPosition()); - max.setMax(newVert.getPosition()); + min.setMin(min, newVert.getPosition()); + max.setMax(max, newVert.getPosition()); } } } @@ -5795,7 +5795,8 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) if (i == 0) { - min = max = pos[i]; + max = pos[i]; + min = max; min_uv = max_uv = tc[i]; } else @@ -5848,8 +5849,8 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) for (S32 i = 0; i < num_vertices; i++) { - binorm[i].load4a((F32*) &binormal.mQ); - norm[i].load4a((F32*) &normal.mQ); + binorm[i].load4a(binormal.getF32ptr()); + norm[i].load4a(normal.getF32ptr()); } if (partial_build) @@ -6186,7 +6187,7 @@ void LLVolumeFace::pushVertex(const LLVector4a& pos, const LLVector4a& norm, con LLVector4a* dst = (LLVector4a*) ll_aligned_malloc_16(new_size); if (mPositions) { - LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mPositions, old_size/4); + LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mPositions, old_size); ll_aligned_free_16(mPositions); } mPositions = dst; @@ -6195,7 +6196,7 @@ void LLVolumeFace::pushVertex(const LLVector4a& pos, const LLVector4a& norm, con dst = (LLVector4a*) ll_aligned_malloc_16(new_size); if (mNormals) { - LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mNormals, old_size/4); + LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mNormals, old_size); ll_aligned_free_16(mNormals); } mNormals = dst; @@ -6209,7 +6210,7 @@ void LLVolumeFace::pushVertex(const LLVector4a& pos, const LLVector4a& norm, con LLVector2* dst = (LLVector2*) ll_aligned_malloc_16(new_size); if (mTexCoords) { - LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mTexCoords, old_size/4); + LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mTexCoords, old_size); ll_aligned_free_16(mTexCoords); } } @@ -6268,7 +6269,7 @@ void LLVolumeFace::pushIndex(const U16& idx) U16* dst = (U16*) ll_aligned_malloc_16(new_size); if (mIndices) { - LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mIndices, old_size/4); + LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mIndices, old_size); ll_aligned_free_16(mIndices); } mIndices = dst; @@ -6319,9 +6320,9 @@ void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat_in, LLMat if (mNumVertices > 0) { //copy old buffers - LLVector4a::memcpyNonAliased16((F32*) new_pos, (F32*) mPositions, mNumVertices*4); - LLVector4a::memcpyNonAliased16((F32*) new_norm, (F32*) mNormals, mNumVertices*4); - LLVector4a::memcpyNonAliased16((F32*) new_tc, (F32*) mTexCoords, mNumVertices*2); + LLVector4a::memcpyNonAliased16((F32*) new_pos, (F32*) mPositions, mNumVertices*4*sizeof(F32)); + LLVector4a::memcpyNonAliased16((F32*) new_norm, (F32*) mNormals, mNumVertices*4*sizeof(F32)); + LLVector4a::memcpyNonAliased16((F32*) new_tc, (F32*) mTexCoords, mNumVertices*2*sizeof(F32)); } //free old buffer space @@ -6382,7 +6383,7 @@ void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat_in, LLMat if (mNumIndices > 0) { //copy old index buffer S32 old_size = (mNumIndices*2+0xF) & ~0xF; - LLVector4a::memcpyNonAliased16((F32*) new_indices, (F32*) mIndices, old_size/4); + LLVector4a::memcpyNonAliased16((F32*) new_indices, (F32*) mIndices, old_size); } //free old index buffer diff --git a/indra/llmath/tests/v2math_test.cpp b/indra/llmath/tests/v2math_test.cpp index 4660fcb955..c745b9989e 100644 --- a/indra/llmath/tests/v2math_test.cpp +++ b/indra/llmath/tests/v2math_test.cpp @@ -91,7 +91,7 @@ namespace tut F32 x = 2.2345f, y = 3.5678f ; LLVector2 vec2(x,y); ensure("magVecSquared:Fail ", is_approx_equal(vec2.magVecSquared(), (x*x + y*y))); - ensure("magVec:Fail ", is_approx_equal(vec2.magVec(), fsqrtf(x*x + y*y))); + ensure("magVec:Fail ", is_approx_equal(vec2.magVec(), (F32) sqrt(x*x + y*y))); } template<> template<> @@ -413,7 +413,7 @@ namespace tut ensure_equals("dist_vec_squared values are not equal",val2, val1); val1 = dist_vec(vec2, vec3); - val2 = fsqrtf((x1 - x2)*(x1 - x2) + (y1 - y2)* (y1 - y2)); + val2 = (F32) sqrt((x1 - x2)*(x1 - x2) + (y1 - y2)* (y1 - y2)); ensure_equals("dist_vec values are not equal",val2, val1); } @@ -437,7 +437,7 @@ namespace tut LLVector2 vec2(x1, y1); F32 vecMag = vec2.normVec(); - F32 mag = fsqrtf(x1*x1 + y1*y1); + F32 mag = (F32) sqrt(x1*x1 + y1*y1); F32 oomag = 1.f / mag; val1 = x1 * oomag; diff --git a/indra/llmath/tests/v3color_test.cpp b/indra/llmath/tests/v3color_test.cpp index 316b6e392f..0efba8e9f3 100644 --- a/indra/llmath/tests/v3color_test.cpp +++ b/indra/llmath/tests/v3color_test.cpp @@ -99,7 +99,7 @@ namespace tut F32 r = 2.3436212f, g = 1231.f, b = 4.7849321232f; LLColor3 llcolor3(r,g,b); ensure("magVecSquared:Fail ", is_approx_equal(llcolor3.magVecSquared(), (r*r + g*g + b*b))); - ensure("magVec:Fail ", is_approx_equal(llcolor3.magVec(), fsqrtf(r*r + g*g + b*b))); + ensure("magVec:Fail ", is_approx_equal(llcolor3.magVec(), (F32) sqrt(r*r + g*g + b*b))); } template<> template<> @@ -109,7 +109,7 @@ namespace tut F32 val1, val2,val3; LLColor3 llcolor3(r,g,b); F32 vecMag = llcolor3.normVec(); - F32 mag = fsqrtf(r*r + g*g + b*b); + F32 mag = (F32) sqrt(r*r + g*g + b*b); F32 oomag = 1.f / mag; val1 = r * oomag; val2 = g * oomag; @@ -292,7 +292,7 @@ namespace tut F32 r1 =1.f, g1 = 2.f,b1 = 1.2f, r2 = -2.3f, g2 = 1.11f, b2 = 1234.234f; LLColor3 llcolor3(r1,g1,b1),llcolor3a(r2,g2,b2); F32 val = distVec(llcolor3,llcolor3a); - ensure("distVec failed ", is_approx_equal(fsqrtf((r1-r2)*(r1-r2) + (g1-g2)*(g1-g2) + (b1-b2)*(b1-b2)) ,val)); + ensure("distVec failed ", is_approx_equal((F32) sqrt((r1-r2)*(r1-r2) + (g1-g2)*(g1-g2) + (b1-b2)*(b1-b2)) ,val)); F32 val1 = distVec_squared(llcolor3,llcolor3a); ensure("distVec_squared failed ", is_approx_equal(((r1-r2)*(r1-r2) + (g1-g2)*(g1-g2) + (b1-b2)*(b1-b2)) ,val1)); diff --git a/indra/llmath/tests/v3dmath_test.cpp b/indra/llmath/tests/v3dmath_test.cpp index e7c949186c..894b6200f5 100644 --- a/indra/llmath/tests/v3dmath_test.cpp +++ b/indra/llmath/tests/v3dmath_test.cpp @@ -409,7 +409,7 @@ namespace tut LLVector3d vec3D(x,y,z); F64 res = (x*x + y*y + z*z) - vec3D.magVecSquared(); ensure("1:magVecSquared:Fail ", ((-F_APPROXIMATELY_ZERO <= res)&& (res <=F_APPROXIMATELY_ZERO))); - res = fsqrtf(x*x + y*y + z*z) - vec3D.magVec(); + res = (F32) sqrt(x*x + y*y + z*z) - vec3D.magVec(); ensure("2:magVec: Fail ", ((-F_APPROXIMATELY_ZERO <= res)&& (res <=F_APPROXIMATELY_ZERO))); } diff --git a/indra/llmath/tests/v3math_test.cpp b/indra/llmath/tests/v3math_test.cpp index 7faf076243..d5c8dd2f9c 100644 --- a/indra/llmath/tests/v3math_test.cpp +++ b/indra/llmath/tests/v3math_test.cpp @@ -155,7 +155,7 @@ namespace tut F32 x = 2.32f, y = 1.212f, z = -.12f; LLVector3 vec3(x,y,z); ensure("1:magVecSquared:Fail ", is_approx_equal(vec3.magVecSquared(), (x*x + y*y + z*z))); - ensure("2:magVec:Fail ", is_approx_equal(vec3.magVec(), fsqrtf(x*x + y*y + z*z))); + ensure("2:magVec:Fail ", is_approx_equal(vec3.magVec(), (F32) sqrt(x*x + y*y + z*z))); } template<> template<> @@ -515,7 +515,7 @@ namespace tut F32 val1,val2; LLVector3 vec3(x1,y1,z1),vec3a(x2,y2,z2); val1 = dist_vec(vec3,vec3a); - val2 = fsqrtf((x1 - x2)*(x1 - x2) + (y1 - y2)* (y1 - y2) + (z1 - z2)* (z1 -z2)); + val2 = (F32) sqrt((x1 - x2)*(x1 - x2) + (y1 - y2)* (y1 - y2) + (z1 - z2)* (z1 -z2)); ensure_equals("1:dist_vec: Fail ",val2, val1); val1 = dist_vec_squared(vec3,vec3a); val2 =((x1 - x2)*(x1 - x2) + (y1 - y2)* (y1 - y2) + (z1 - z2)* (z1 -z2)); diff --git a/indra/llmath/tests/v4color_test.cpp b/indra/llmath/tests/v4color_test.cpp index 33921e0f0f..636446027a 100644 --- a/indra/llmath/tests/v4color_test.cpp +++ b/indra/llmath/tests/v4color_test.cpp @@ -161,7 +161,7 @@ namespace tut F32 r = 0x20, g = 0xFFFF, b = 0xFF; LLColor4 llcolor4(r,g,b); ensure("magVecSquared:Fail ", is_approx_equal(llcolor4.magVecSquared(), (r*r + g*g + b*b))); - ensure("magVec:Fail ", is_approx_equal(llcolor4.magVec(), fsqrtf(r*r + g*g + b*b))); + ensure("magVec:Fail ", is_approx_equal(llcolor4.magVec(), (F32) sqrt(r*r + g*g + b*b))); } template<> template<> @@ -170,7 +170,7 @@ namespace tut F32 r = 0x20, g = 0xFFFF, b = 0xFF; LLColor4 llcolor4(r,g,b); F32 vecMag = llcolor4.normVec(); - F32 mag = fsqrtf(r*r + g*g + b*b); + F32 mag = (F32) sqrt(r*r + g*g + b*b); F32 oomag = 1.f / mag; F32 val1 = r * oomag, val2 = g * oomag, val3 = b * oomag; ensure("1:normVec failed ", (is_approx_equal(val1, llcolor4.mV[0]) && is_approx_equal(val2, llcolor4.mV[1]) && is_approx_equal(val3, llcolor4.mV[2]) && is_approx_equal(vecMag, mag))); diff --git a/indra/llmath/tests/v4coloru_test.cpp b/indra/llmath/tests/v4coloru_test.cpp index 9f71cfc8cc..b3dbfece34 100644 --- a/indra/llmath/tests/v4coloru_test.cpp +++ b/indra/llmath/tests/v4coloru_test.cpp @@ -141,7 +141,7 @@ namespace tut U8 r = 0x12, g = 0xFF, b = 0xAF; LLColor4U llcolor4u(r,g,b); ensure("magVecSquared:Fail ", is_approx_equal(llcolor4u.magVecSquared(), (F32)(r*r + g*g + b*b))); - ensure("magVec:Fail ", is_approx_equal(llcolor4u.magVec(), fsqrtf(r*r + g*g + b*b))); + ensure("magVec:Fail ", is_approx_equal(llcolor4u.magVec(), (F32) sqrt((F32) (r*r + g*g + b*b)))); } template<> template<> diff --git a/indra/llmath/tests/v4math_test.cpp b/indra/llmath/tests/v4math_test.cpp index fe051c27e9..e919c90efa 100644 --- a/indra/llmath/tests/v4math_test.cpp +++ b/indra/llmath/tests/v4math_test.cpp @@ -102,7 +102,7 @@ namespace tut { F32 x = 10.f, y = -2.3f, z = -.023f; LLVector4 vec4(x,y,z); - ensure("magVec:Fail ", is_approx_equal(vec4.magVec(), fsqrtf(x*x + y*y + z*z))); + ensure("magVec:Fail ", is_approx_equal(vec4.magVec(), (F32) sqrt(x*x + y*y + z*z))); ensure("magVecSquared:Fail ", is_approx_equal(vec4.magVecSquared(), (x*x + y*y + z*z))); } @@ -343,7 +343,7 @@ namespace tut F32 val1,val2; LLVector4 vec4(x1,y1,z1),vec4a(x2,y2,z2); val1 = dist_vec(vec4,vec4a); - val2 = fsqrtf((x1 - x2)*(x1 - x2) + (y1 - y2)* (y1 - y2) + (z1 - z2)* (z1 -z2)); + val2 = (F32) sqrt((x1 - x2)*(x1 - x2) + (y1 - y2)* (y1 - y2) + (z1 - z2)* (z1 -z2)); ensure_equals("dist_vec: Fail ",val2, val1); val1 = dist_vec_squared(vec4,vec4a); val2 =((x1 - x2)*(x1 - x2) + (y1 - y2)* (y1 - y2) + (z1 - z2)* (z1 -z2)); diff --git a/indra/llmath/v2math.cpp b/indra/llmath/v2math.cpp index 220336e0c2..2603127f75 100644 --- a/indra/llmath/v2math.cpp +++ b/indra/llmath/v2math.cpp @@ -92,7 +92,7 @@ F32 dist_vec(const LLVector2 &a, const LLVector2 &b) { F32 x = a.mV[0] - b.mV[0]; F32 y = a.mV[1] - b.mV[1]; - return fsqrtf( x*x + y*y ); + return (F32) sqrt( x*x + y*y ); } F32 dist_vec_squared(const LLVector2 &a, const LLVector2 &b) diff --git a/indra/llmath/v2math.h b/indra/llmath/v2math.h index ae26c85ce4..35fd1b6048 100644 --- a/indra/llmath/v2math.h +++ b/indra/llmath/v2math.h @@ -225,7 +225,7 @@ inline void LLVector2::setVec(const F32 *vec) inline F32 LLVector2::length(void) const { - return fsqrtf(mV[0]*mV[0] + mV[1]*mV[1]); + return (F32) sqrt(mV[0]*mV[0] + mV[1]*mV[1]); } inline F32 LLVector2::lengthSquared(void) const @@ -235,7 +235,7 @@ inline F32 LLVector2::lengthSquared(void) const inline F32 LLVector2::normalize(void) { - F32 mag = fsqrtf(mV[0]*mV[0] + mV[1]*mV[1]); + F32 mag = (F32) sqrt(mV[0]*mV[0] + mV[1]*mV[1]); F32 oomag; if (mag > FP_MAG_THRESHOLD) @@ -262,7 +262,7 @@ inline bool LLVector2::isFinite() const // deprecated inline F32 LLVector2::magVec(void) const { - return fsqrtf(mV[0]*mV[0] + mV[1]*mV[1]); + return (F32) sqrt(mV[0]*mV[0] + mV[1]*mV[1]); } // deprecated @@ -274,7 +274,7 @@ inline F32 LLVector2::magVecSquared(void) const // deprecated inline F32 LLVector2::normVec(void) { - F32 mag = fsqrtf(mV[0]*mV[0] + mV[1]*mV[1]); + F32 mag = (F32) sqrt(mV[0]*mV[0] + mV[1]*mV[1]); F32 oomag; if (mag > FP_MAG_THRESHOLD) diff --git a/indra/llmath/v3color.h b/indra/llmath/v3color.h index 1915d80502..95a3de8b62 100644 --- a/indra/llmath/v3color.h +++ b/indra/llmath/v3color.h @@ -284,7 +284,7 @@ inline F32 LLColor3::brightness(void) const inline F32 LLColor3::length(void) const { - return fsqrtf(mV[0]*mV[0] + mV[1]*mV[1] + mV[2]*mV[2]); + return (F32) sqrt(mV[0]*mV[0] + mV[1]*mV[1] + mV[2]*mV[2]); } inline F32 LLColor3::lengthSquared(void) const @@ -294,7 +294,7 @@ inline F32 LLColor3::lengthSquared(void) const inline F32 LLColor3::normalize(void) { - F32 mag = fsqrtf(mV[0]*mV[0] + mV[1]*mV[1] + mV[2]*mV[2]); + F32 mag = (F32) sqrt(mV[0]*mV[0] + mV[1]*mV[1] + mV[2]*mV[2]); F32 oomag; if (mag) @@ -310,7 +310,7 @@ inline F32 LLColor3::normalize(void) // deprecated inline F32 LLColor3::magVec(void) const { - return fsqrtf(mV[0]*mV[0] + mV[1]*mV[1] + mV[2]*mV[2]); + return (F32) sqrt(mV[0]*mV[0] + mV[1]*mV[1] + mV[2]*mV[2]); } // deprecated @@ -322,7 +322,7 @@ inline F32 LLColor3::magVecSquared(void) const // deprecated inline F32 LLColor3::normVec(void) { - F32 mag = fsqrtf(mV[0]*mV[0] + mV[1]*mV[1] + mV[2]*mV[2]); + F32 mag = (F32) sqrt(mV[0]*mV[0] + mV[1]*mV[1] + mV[2]*mV[2]); F32 oomag; if (mag) @@ -444,7 +444,7 @@ inline F32 distVec(const LLColor3 &a, const LLColor3 &b) F32 x = a.mV[0] - b.mV[0]; F32 y = a.mV[1] - b.mV[1]; F32 z = a.mV[2] - b.mV[2]; - return fsqrtf( x*x + y*y + z*z ); + return (F32) sqrt( x*x + y*y + z*z ); } inline F32 distVec_squared(const LLColor3 &a, const LLColor3 &b) diff --git a/indra/llmath/v3dmath.h b/indra/llmath/v3dmath.h index 6ab31e8a41..ab253de064 100644 --- a/indra/llmath/v3dmath.h +++ b/indra/llmath/v3dmath.h @@ -240,7 +240,7 @@ inline const LLVector3d& LLVector3d::setVec(const F64 *vec) inline F64 LLVector3d::normVec(void) { - F64 mag = fsqrtf(mdV[0]*mdV[0] + mdV[1]*mdV[1] + mdV[2]*mdV[2]); + F64 mag = (F32) sqrt(mdV[0]*mdV[0] + mdV[1]*mdV[1] + mdV[2]*mdV[2]); F64 oomag; if (mag > FP_MAG_THRESHOLD) @@ -262,7 +262,7 @@ inline F64 LLVector3d::normVec(void) inline F64 LLVector3d::normalize(void) { - F64 mag = fsqrtf(mdV[0]*mdV[0] + mdV[1]*mdV[1] + mdV[2]*mdV[2]); + F64 mag = (F32) sqrt(mdV[0]*mdV[0] + mdV[1]*mdV[1] + mdV[2]*mdV[2]); F64 oomag; if (mag > FP_MAG_THRESHOLD) @@ -286,7 +286,7 @@ inline F64 LLVector3d::normalize(void) inline F64 LLVector3d::magVec(void) const { - return fsqrtf(mdV[0]*mdV[0] + mdV[1]*mdV[1] + mdV[2]*mdV[2]); + return (F32) sqrt(mdV[0]*mdV[0] + mdV[1]*mdV[1] + mdV[2]*mdV[2]); } inline F64 LLVector3d::magVecSquared(void) const @@ -296,7 +296,7 @@ inline F64 LLVector3d::magVecSquared(void) const inline F64 LLVector3d::length(void) const { - return fsqrtf(mdV[0]*mdV[0] + mdV[1]*mdV[1] + mdV[2]*mdV[2]); + return (F32) sqrt(mdV[0]*mdV[0] + mdV[1]*mdV[1] + mdV[2]*mdV[2]); } inline F64 LLVector3d::lengthSquared(void) const @@ -406,7 +406,7 @@ inline F64 dist_vec(const LLVector3d &a, const LLVector3d &b) F64 x = a.mdV[0] - b.mdV[0]; F64 y = a.mdV[1] - b.mdV[1]; F64 z = a.mdV[2] - b.mdV[2]; - return fsqrtf( x*x + y*y + z*z ); + return (F32) sqrt( x*x + y*y + z*z ); } inline F64 dist_vec_squared(const LLVector3d &a, const LLVector3d &b) diff --git a/indra/llmath/v3math.h b/indra/llmath/v3math.h index 75c860a91e..5d483a8753 100644 --- a/indra/llmath/v3math.h +++ b/indra/llmath/v3math.h @@ -282,7 +282,7 @@ inline void LLVector3::setVec(const F32 *vec) inline F32 LLVector3::normalize(void) { - F32 mag = fsqrtf(mV[0]*mV[0] + mV[1]*mV[1] + mV[2]*mV[2]); + F32 mag = (F32) sqrt(mV[0]*mV[0] + mV[1]*mV[1] + mV[2]*mV[2]); F32 oomag; if (mag > FP_MAG_THRESHOLD) @@ -305,7 +305,7 @@ inline F32 LLVector3::normalize(void) // deprecated inline F32 LLVector3::normVec(void) { - F32 mag = fsqrtf(mV[0]*mV[0] + mV[1]*mV[1] + mV[2]*mV[2]); + F32 mag = (F32) sqrt(mV[0]*mV[0] + mV[1]*mV[1] + mV[2]*mV[2]); F32 oomag; if (mag > FP_MAG_THRESHOLD) @@ -329,7 +329,7 @@ inline F32 LLVector3::normVec(void) inline F32 LLVector3::length(void) const { - return fsqrtf(mV[0]*mV[0] + mV[1]*mV[1] + mV[2]*mV[2]); + return (F32) sqrt(mV[0]*mV[0] + mV[1]*mV[1] + mV[2]*mV[2]); } inline F32 LLVector3::lengthSquared(void) const @@ -339,7 +339,7 @@ inline F32 LLVector3::lengthSquared(void) const inline F32 LLVector3::magVec(void) const { - return fsqrtf(mV[0]*mV[0] + mV[1]*mV[1] + mV[2]*mV[2]); + return (F32) sqrt(mV[0]*mV[0] + mV[1]*mV[1] + mV[2]*mV[2]); } inline F32 LLVector3::magVecSquared(void) const @@ -473,7 +473,7 @@ inline F32 dist_vec(const LLVector3 &a, const LLVector3 &b) F32 x = a.mV[0] - b.mV[0]; F32 y = a.mV[1] - b.mV[1]; F32 z = a.mV[2] - b.mV[2]; - return fsqrtf( x*x + y*y + z*z ); + return (F32) sqrt( x*x + y*y + z*z ); } inline F32 dist_vec_squared(const LLVector3 &a, const LLVector3 &b) diff --git a/indra/llmath/v4color.h b/indra/llmath/v4color.h index 6b63b976b0..dd92e1cc63 100644 --- a/indra/llmath/v4color.h +++ b/indra/llmath/v4color.h @@ -392,7 +392,7 @@ inline const LLColor4& LLColor4::setAlpha(F32 a) inline F32 LLColor4::length(void) const { - return fsqrtf(mV[VX]*mV[VX] + mV[VY]*mV[VY] + mV[VZ]*mV[VZ]); + return (F32) sqrt(mV[VX]*mV[VX] + mV[VY]*mV[VY] + mV[VZ]*mV[VZ]); } inline F32 LLColor4::lengthSquared(void) const @@ -402,7 +402,7 @@ inline F32 LLColor4::lengthSquared(void) const inline F32 LLColor4::normalize(void) { - F32 mag = fsqrtf(mV[VX]*mV[VX] + mV[VY]*mV[VY] + mV[VZ]*mV[VZ]); + F32 mag = (F32) sqrt(mV[VX]*mV[VX] + mV[VY]*mV[VY] + mV[VZ]*mV[VZ]); F32 oomag; if (mag) @@ -418,7 +418,7 @@ inline F32 LLColor4::normalize(void) // deprecated inline F32 LLColor4::magVec(void) const { - return fsqrtf(mV[VX]*mV[VX] + mV[VY]*mV[VY] + mV[VZ]*mV[VZ]); + return (F32) sqrt(mV[VX]*mV[VX] + mV[VY]*mV[VY] + mV[VZ]*mV[VZ]); } // deprecated @@ -430,7 +430,7 @@ inline F32 LLColor4::magVecSquared(void) const // deprecated inline F32 LLColor4::normVec(void) { - F32 mag = fsqrtf(mV[VX]*mV[VX] + mV[VY]*mV[VY] + mV[VZ]*mV[VZ]); + F32 mag = (F32) sqrt(mV[VX]*mV[VX] + mV[VY]*mV[VY] + mV[VZ]*mV[VZ]); F32 oomag; if (mag) diff --git a/indra/llmath/v4coloru.h b/indra/llmath/v4coloru.h index 4ec5a345eb..08245403a1 100644 --- a/indra/llmath/v4coloru.h +++ b/indra/llmath/v4coloru.h @@ -300,7 +300,7 @@ inline const LLColor4U& LLColor4U::setAlpha(U8 a) inline F32 LLColor4U::length(void) const { - return fsqrtf( ((F32)mV[VX]) * mV[VX] + ((F32)mV[VY]) * mV[VY] + ((F32)mV[VZ]) * mV[VZ] ); + return (F32) sqrt( ((F32)mV[VX]) * mV[VX] + ((F32)mV[VY]) * mV[VY] + ((F32)mV[VZ]) * mV[VZ] ); } inline F32 LLColor4U::lengthSquared(void) const @@ -311,7 +311,7 @@ inline F32 LLColor4U::lengthSquared(void) const // deprecated inline F32 LLColor4U::magVec(void) const { - return fsqrtf( ((F32)mV[VX]) * mV[VX] + ((F32)mV[VY]) * mV[VY] + ((F32)mV[VZ]) * mV[VZ] ); + return (F32) sqrt( ((F32)mV[VX]) * mV[VX] + ((F32)mV[VY]) * mV[VY] + ((F32)mV[VZ]) * mV[VZ] ); } // deprecated diff --git a/indra/llmath/v4math.h b/indra/llmath/v4math.h index 4c82e6b629..72a477ed20 100644 --- a/indra/llmath/v4math.h +++ b/indra/llmath/v4math.h @@ -321,7 +321,7 @@ inline void LLVector4::setVec(const F32 *vec) inline F32 LLVector4::length(void) const { - return fsqrtf(mV[VX]*mV[VX] + mV[VY]*mV[VY] + mV[VZ]*mV[VZ]); + return (F32) sqrt(mV[VX]*mV[VX] + mV[VY]*mV[VY] + mV[VZ]*mV[VZ]); } inline F32 LLVector4::lengthSquared(void) const @@ -331,7 +331,7 @@ inline F32 LLVector4::lengthSquared(void) const inline F32 LLVector4::magVec(void) const { - return fsqrtf(mV[VX]*mV[VX] + mV[VY]*mV[VY] + mV[VZ]*mV[VZ]); + return (F32) sqrt(mV[VX]*mV[VX] + mV[VY]*mV[VY] + mV[VZ]*mV[VZ]); } inline F32 LLVector4::magVecSquared(void) const @@ -463,7 +463,7 @@ inline LLVector4 lerp(const LLVector4 &a, const LLVector4 &b, F32 u) inline F32 LLVector4::normalize(void) { - F32 mag = fsqrtf(mV[VX]*mV[VX] + mV[VY]*mV[VY] + mV[VZ]*mV[VZ]); + F32 mag = (F32) sqrt(mV[VX]*mV[VX] + mV[VY]*mV[VY] + mV[VZ]*mV[VZ]); F32 oomag; if (mag > FP_MAG_THRESHOLD) @@ -486,7 +486,7 @@ inline F32 LLVector4::normalize(void) // deprecated inline F32 LLVector4::normVec(void) { - F32 mag = fsqrtf(mV[VX]*mV[VX] + mV[VY]*mV[VY] + mV[VZ]*mV[VZ]); + F32 mag = (F32) sqrt(mV[VX]*mV[VX] + mV[VY]*mV[VY] + mV[VZ]*mV[VZ]); F32 oomag; if (mag > FP_MAG_THRESHOLD) -- cgit v1.2.3 From 45df2d70f018512055ed15fa52c9efd3d8e833e8 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Thu, 19 Aug 2010 13:08:57 -0500 Subject: More line endings. --- indra/llmath/CMakeLists.txt | 256 +++--- indra/llmath/llmath.h | 1018 +++++++++++----------- indra/llmath/llquantize.h | 316 +++---- indra/llmath/llquaternion.cpp | 1922 ++++++++++++++++++++--------------------- indra/llmath/llquaternion.h | 1188 ++++++++++++------------- 5 files changed, 2350 insertions(+), 2350 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/CMakeLists.txt b/indra/llmath/CMakeLists.txt index 8d85765eb8..9dadad7dd3 100644 --- a/indra/llmath/CMakeLists.txt +++ b/indra/llmath/CMakeLists.txt @@ -1,128 +1,128 @@ -# -*- cmake -*- - -project(llmath) - -include(00-Common) -include(LLCommon) - -include_directories( - ${LLCOMMON_INCLUDE_DIRS} - ) - -set(llmath_SOURCE_FILES - llbbox.cpp - llbboxlocal.cpp - llcamera.cpp - llcoordframe.cpp - llline.cpp - llmatrix3a.cpp - llmodularmath.cpp - llperlin.cpp - llquaternion.cpp - llrect.cpp - llsphere.cpp - llvector4a.cpp - llvolume.cpp - llvolumemgr.cpp - llvolumeoctree.cpp - llsdutil_math.cpp - m3math.cpp - m4math.cpp - raytrace.cpp - v2math.cpp - v3color.cpp - v3dmath.cpp - v3math.cpp - v4color.cpp - v4coloru.cpp - v4math.cpp - xform.cpp - ) - -set(llmath_HEADER_FILES - CMakeLists.txt - - camera.h - coordframe.h - llbbox.h - llbboxlocal.h - llcamera.h - llcoord.h - llcoordframe.h - llinterp.h - llline.h - llmath.h - llmatrix3a.h - llmatrix3a.inl - llmodularmath.h - lloctree.h - llperlin.h - llplane.h - llquantize.h - llquaternion.h - llquaternion2.h - llquaternion2.inl - llrect.h - llsimdmath.h - llsimdtypes.h - llsimdtypes.inl - llsphere.h - lltreenode.h - llvector4a.h - llvector4a.inl - llvector4logical.h - llv4math.h - llv4matrix3.h - llv4matrix4.h - llv4vector3.h - llvolume.h - llvolumemgr.h - llvolumeoctree.h - llsdutil_math.h - m3math.h - m4math.h - raytrace.h - v2math.h - v3color.h - v3dmath.h - v3math.h - v4color.h - v4coloru.h - v4math.h - xform.h - ) - -set_source_files_properties(${llmath_HEADER_FILES} - PROPERTIES HEADER_FILE_ONLY TRUE) - -list(APPEND llmath_SOURCE_FILES ${llmath_HEADER_FILES}) - -add_library (llmath ${llmath_SOURCE_FILES}) - -# Add tests -if (LL_TESTS) - include(LLAddBuildTest) - # UNIT TESTS - SET(llmath_TEST_SOURCE_FILES - llbboxlocal.cpp - llmodularmath.cpp - llrect.cpp - v2math.cpp - v3color.cpp - v4color.cpp - v4coloru.cpp - ) - LL_ADD_PROJECT_UNIT_TESTS(llmath "${llmath_TEST_SOURCE_FILES}") - - # INTEGRATION TESTS - set(test_libs llmath llcommon ${LLCOMMON_LIBRARIES} ${WINDOWS_LIBRARIES}) - # TODO: Some of these need refactoring to be proper Unit tests rather than Integration tests. - LL_ADD_INTEGRATION_TEST(llbbox llbbox.cpp "${test_libs}") - LL_ADD_INTEGRATION_TEST(llquaternion llquaternion.cpp "${test_libs}") - LL_ADD_INTEGRATION_TEST(mathmisc "" "${test_libs}") - LL_ADD_INTEGRATION_TEST(m3math "" "${test_libs}") - LL_ADD_INTEGRATION_TEST(v3dmath v3dmath.cpp "${test_libs}") - LL_ADD_INTEGRATION_TEST(v3math v3math.cpp "${test_libs}") - LL_ADD_INTEGRATION_TEST(v4math v4math.cpp "${test_libs}") - LL_ADD_INTEGRATION_TEST(xform xform.cpp "${test_libs}") -endif (LL_TESTS) +# -*- cmake -*- + +project(llmath) + +include(00-Common) +include(LLCommon) + +include_directories( + ${LLCOMMON_INCLUDE_DIRS} + ) + +set(llmath_SOURCE_FILES + llbbox.cpp + llbboxlocal.cpp + llcamera.cpp + llcoordframe.cpp + llline.cpp + llmatrix3a.cpp + llmodularmath.cpp + llperlin.cpp + llquaternion.cpp + llrect.cpp + llsphere.cpp + llvector4a.cpp + llvolume.cpp + llvolumemgr.cpp + llvolumeoctree.cpp + llsdutil_math.cpp + m3math.cpp + m4math.cpp + raytrace.cpp + v2math.cpp + v3color.cpp + v3dmath.cpp + v3math.cpp + v4color.cpp + v4coloru.cpp + v4math.cpp + xform.cpp + ) + +set(llmath_HEADER_FILES + CMakeLists.txt + + camera.h + coordframe.h + llbbox.h + llbboxlocal.h + llcamera.h + llcoord.h + llcoordframe.h + llinterp.h + llline.h + llmath.h + llmatrix3a.h + llmatrix3a.inl + llmodularmath.h + lloctree.h + llperlin.h + llplane.h + llquantize.h + llquaternion.h + llquaternion2.h + llquaternion2.inl + llrect.h + llsimdmath.h + llsimdtypes.h + llsimdtypes.inl + llsphere.h + lltreenode.h + llvector4a.h + llvector4a.inl + llvector4logical.h + llv4math.h + llv4matrix3.h + llv4matrix4.h + llv4vector3.h + llvolume.h + llvolumemgr.h + llvolumeoctree.h + llsdutil_math.h + m3math.h + m4math.h + raytrace.h + v2math.h + v3color.h + v3dmath.h + v3math.h + v4color.h + v4coloru.h + v4math.h + xform.h + ) + +set_source_files_properties(${llmath_HEADER_FILES} + PROPERTIES HEADER_FILE_ONLY TRUE) + +list(APPEND llmath_SOURCE_FILES ${llmath_HEADER_FILES}) + +add_library (llmath ${llmath_SOURCE_FILES}) + +# Add tests +if (LL_TESTS) + include(LLAddBuildTest) + # UNIT TESTS + SET(llmath_TEST_SOURCE_FILES + llbboxlocal.cpp + llmodularmath.cpp + llrect.cpp + v2math.cpp + v3color.cpp + v4color.cpp + v4coloru.cpp + ) + LL_ADD_PROJECT_UNIT_TESTS(llmath "${llmath_TEST_SOURCE_FILES}") + + # INTEGRATION TESTS + set(test_libs llmath llcommon ${LLCOMMON_LIBRARIES} ${WINDOWS_LIBRARIES}) + # TODO: Some of these need refactoring to be proper Unit tests rather than Integration tests. + LL_ADD_INTEGRATION_TEST(llbbox llbbox.cpp "${test_libs}") + LL_ADD_INTEGRATION_TEST(llquaternion llquaternion.cpp "${test_libs}") + LL_ADD_INTEGRATION_TEST(mathmisc "" "${test_libs}") + LL_ADD_INTEGRATION_TEST(m3math "" "${test_libs}") + LL_ADD_INTEGRATION_TEST(v3dmath v3dmath.cpp "${test_libs}") + LL_ADD_INTEGRATION_TEST(v3math v3math.cpp "${test_libs}") + LL_ADD_INTEGRATION_TEST(v4math v4math.cpp "${test_libs}") + LL_ADD_INTEGRATION_TEST(xform xform.cpp "${test_libs}") +endif (LL_TESTS) diff --git a/indra/llmath/llmath.h b/indra/llmath/llmath.h index 742bbc4751..e572381b1a 100644 --- a/indra/llmath/llmath.h +++ b/indra/llmath/llmath.h @@ -1,509 +1,509 @@ -/** - * @file llmath.h - * @brief Useful math constants and macros. - * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * - * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 - * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception - * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. - * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. - * $/LicenseInfo$ - */ - -#ifndef LLMATH_H -#define LLMATH_H - -#include -#include -#include "lldefs.h" -//#include "llstl.h" // *TODO: Remove when LLString is gone -//#include "llstring.h" // *TODO: Remove when LLString is gone -// lltut.h uses is_approx_equal_fraction(). This was moved to its own header -// file in llcommon so we can use lltut.h for llcommon tests without making -// llcommon depend on llmath. -#include "is_approx_equal_fraction.h" - -// work around for Windows & older gcc non-standard function names. -#if LL_WINDOWS -#include -#define llisnan(val) _isnan(val) -#define llfinite(val) _finite(val) -#elif (LL_LINUX && __GNUC__ <= 2) -#define llisnan(val) isnan(val) -#define llfinite(val) isfinite(val) -#elif LL_SOLARIS -#define llisnan(val) isnan(val) -#define llfinite(val) (val <= std::numeric_limits::max()) -#else -#define llisnan(val) std::isnan(val) -#define llfinite(val) std::isfinite(val) -#endif - -// Single Precision Floating Point Routines -// (There used to be more defined here, but they appeared to be redundant and -// were breaking some other includes. Removed by Falcon, reviewed by Andrew, 11/25/09) -/*#ifndef tanf -#define tanf(x) ((F32)tan((F64)(x))) -#endif*/ - -const F32 GRAVITY = -9.8f; - -// mathematical constants -const F32 F_PI = 3.1415926535897932384626433832795f; -const F32 F_TWO_PI = 6.283185307179586476925286766559f; -const F32 F_PI_BY_TWO = 1.5707963267948966192313216916398f; -const F32 F_SQRT_TWO_PI = 2.506628274631000502415765284811f; -const F32 F_E = 2.71828182845904523536f; -const F32 F_SQRT2 = 1.4142135623730950488016887242097f; -const F32 F_SQRT3 = 1.73205080756888288657986402541f; -const F32 OO_SQRT2 = 0.7071067811865475244008443621049f; -const F32 DEG_TO_RAD = 0.017453292519943295769236907684886f; -const F32 RAD_TO_DEG = 57.295779513082320876798154814105f; -const F32 F_APPROXIMATELY_ZERO = 0.00001f; -const F32 F_LN2 = 0.69314718056f; -const F32 OO_LN2 = 1.4426950408889634073599246810019f; - -const F32 F_ALMOST_ZERO = 0.0001f; -const F32 F_ALMOST_ONE = 1.0f - F_ALMOST_ZERO; - -// BUG: Eliminate in favor of F_APPROXIMATELY_ZERO above? -const F32 FP_MAG_THRESHOLD = 0.0000001f; - -// TODO: Replace with logic like is_approx_equal -inline BOOL is_approx_zero( F32 f ) { return (-F_APPROXIMATELY_ZERO < f) && (f < F_APPROXIMATELY_ZERO); } - -// These functions work by interpreting sign+exp+mantissa as an unsigned -// integer. -// For example: -// x = 1 00000010 00000000000000000000000 -// y = 1 00000001 11111111111111111111111 -// -// interpreted as ints = -// x = 10000001000000000000000000000000 -// y = 10000000111111111111111111111111 -// which is clearly a different of 1 in the least significant bit -// Values with the same exponent can be trivially shown to work. -// -// WARNING: Denormals of opposite sign do not work -// x = 1 00000000 00000000000000000000001 -// y = 0 00000000 00000000000000000000001 -// Although these values differ by 2 in the LSB, the sign bit makes -// the int comparison fail. -// -// WARNING: NaNs can compare equal -// There is no special treatment of exceptional values like NaNs -// -// WARNING: Infinity is comparable with F32_MAX and negative -// infinity is comparable with F32_MIN - -inline BOOL is_approx_equal(F32 x, F32 y) -{ - const S32 COMPARE_MANTISSA_UP_TO_BIT = 0x02; - return (std::abs((S32) ((U32&)x - (U32&)y) ) < COMPARE_MANTISSA_UP_TO_BIT); -} - -inline BOOL is_approx_equal(F64 x, F64 y) -{ - const S64 COMPARE_MANTISSA_UP_TO_BIT = 0x02; - return (std::abs((S32) ((U64&)x - (U64&)y) ) < COMPARE_MANTISSA_UP_TO_BIT); -} - -inline S32 llabs(const S32 a) -{ - return S32(std::labs(a)); -} - -inline F32 llabs(const F32 a) -{ - return F32(std::fabs(a)); -} - -inline F64 llabs(const F64 a) -{ - return F64(std::fabs(a)); -} - -inline S32 lltrunc( F32 f ) -{ -#if LL_WINDOWS && !defined( __INTEL_COMPILER ) - // Avoids changing the floating point control word. - // Add or subtract 0.5 - epsilon and then round - const static U32 zpfp[] = { 0xBEFFFFFF, 0x3EFFFFFF }; - S32 result; - __asm { - fld f - mov eax, f - shr eax, 29 - and eax, 4 - fadd dword ptr [zpfp + eax] - fistp result - } - return result; -#else - return (S32)f; -#endif -} - -inline S32 lltrunc( F64 f ) -{ - return (S32)f; -} - -inline S32 llfloor( F32 f ) -{ -#if LL_WINDOWS && !defined( __INTEL_COMPILER ) - // Avoids changing the floating point control word. - // Accurate (unlike Stereopsis version) for all values between S32_MIN and S32_MAX and slightly faster than Stereopsis version. - // Add -(0.5 - epsilon) and then round - const U32 zpfp = 0xBEFFFFFF; - S32 result; - __asm { - fld f - fadd dword ptr [zpfp] - fistp result - } - return result; -#else - return (S32)floor(f); -#endif -} - - -inline S32 llceil( F32 f ) -{ - // This could probably be optimized, but this works. - return (S32)ceil(f); -} - - -#ifndef BOGUS_ROUND -// Use this round. Does an arithmetic round (0.5 always rounds up) -inline S32 llround(const F32 val) -{ - return llfloor(val + 0.5f); -} - -#else // BOGUS_ROUND -// Old llround implementation - does banker's round (toward nearest even in the case of a 0.5. -// Not using this because we don't have a consistent implementation on both platforms, use -// llfloor(val + 0.5f), which is consistent on all platforms. -inline S32 llround(const F32 val) -{ - #if LL_WINDOWS - // Note: assumes that the floating point control word is set to rounding mode (the default) - S32 ret_val; - _asm fld val - _asm fistp ret_val; - return ret_val; - #elif LL_LINUX - // Note: assumes that the floating point control word is set - // to rounding mode (the default) - S32 ret_val; - __asm__ __volatile__( "flds %1 \n\t" - "fistpl %0 \n\t" - : "=m" (ret_val) - : "m" (val) ); - return ret_val; - #else - return llfloor(val + 0.5f); - #endif -} - -// A fast arithmentic round on intel, from Laurent de Soras http://ldesoras.free.fr -inline int round_int(double x) -{ - const float round_to_nearest = 0.5f; - int i; - __asm - { - fld x - fadd st, st (0) - fadd round_to_nearest - fistp i - sar i, 1 - } - return (i); -} -#endif // BOGUS_ROUND - -inline F32 llround( F32 val, F32 nearest ) -{ - return F32(floor(val * (1.0f / nearest) + 0.5f)) * nearest; -} - -inline F64 llround( F64 val, F64 nearest ) -{ - return F64(floor(val * (1.0 / nearest) + 0.5)) * nearest; -} - -// these provide minimum peak error -// -// avg error = -0.013049 -// peak error = -31.4 dB -// RMS error = -28.1 dB - -const F32 FAST_MAG_ALPHA = 0.960433870103f; -const F32 FAST_MAG_BETA = 0.397824734759f; - -// these provide minimum RMS error -// -// avg error = 0.000003 -// peak error = -32.6 dB -// RMS error = -25.7 dB -// -//const F32 FAST_MAG_ALPHA = 0.948059448969f; -//const F32 FAST_MAG_BETA = 0.392699081699f; - -inline F32 fastMagnitude(F32 a, F32 b) -{ - a = (a > 0) ? a : -a; - b = (b > 0) ? b : -b; - return(FAST_MAG_ALPHA * llmax(a,b) + FAST_MAG_BETA * llmin(a,b)); -} - - - -//////////////////// -// -// Fast F32/S32 conversions -// -// Culled from www.stereopsis.com/FPU.html - -const F64 LL_DOUBLE_TO_FIX_MAGIC = 68719476736.0*1.5; //2^36 * 1.5, (52-_shiftamt=36) uses limited precisicion to floor -const S32 LL_SHIFT_AMOUNT = 16; //16.16 fixed point representation, - -// Endian dependent code -#ifdef LL_LITTLE_ENDIAN - #define LL_EXP_INDEX 1 - #define LL_MAN_INDEX 0 -#else - #define LL_EXP_INDEX 0 - #define LL_MAN_INDEX 1 -#endif - -/* Deprecated: use llround(), lltrunc(), or llfloor() instead -// ================================================================================================ -// Real2Int -// ================================================================================================ -inline S32 F64toS32(F64 val) -{ - val = val + LL_DOUBLE_TO_FIX_MAGIC; - return ((S32*)&val)[LL_MAN_INDEX] >> LL_SHIFT_AMOUNT; -} - -// ================================================================================================ -// Real2Int -// ================================================================================================ -inline S32 F32toS32(F32 val) -{ - return F64toS32 ((F64)val); -} -*/ - -//////////////////////////////////////////////// -// -// Fast exp and log -// - -// Implementation of fast exp() approximation (from a paper by Nicol N. Schraudolph -// http://www.inf.ethz.ch/~schraudo/pubs/exp.pdf -static union -{ - double d; - struct - { -#ifdef LL_LITTLE_ENDIAN - S32 j, i; -#else - S32 i, j; -#endif - } n; -} LLECO; // not sure what the name means - -#define LL_EXP_A (1048576 * OO_LN2) // use 1512775 for integer -#define LL_EXP_C (60801) // this value of C good for -4 < y < 4 - -#define LL_FAST_EXP(y) (LLECO.n.i = llround(F32(LL_EXP_A*(y))) + (1072693248 - LL_EXP_C), LLECO.d) - - - -inline F32 llfastpow(const F32 x, const F32 y) -{ - return (F32)(LL_FAST_EXP(y * log(x))); -} - - -inline F32 snap_to_sig_figs(F32 foo, S32 sig_figs) -{ - // compute the power of ten - F32 bar = 1.f; - for (S32 i = 0; i < sig_figs; i++) - { - bar *= 10.f; - } - - //F32 new_foo = (F32)llround(foo * bar); - // the llround() implementation sucks. Don't us it. - - F32 sign = (foo > 0.f) ? 1.f : -1.f; - F32 new_foo = F32( S64(foo * bar + sign * 0.5f)); - new_foo /= bar; - - return new_foo; -} - -inline F32 lerp(F32 a, F32 b, F32 u) -{ - return a + ((b - a) * u); -} - -inline F32 lerp2d(F32 x00, F32 x01, F32 x10, F32 x11, F32 u, F32 v) -{ - F32 a = x00 + (x01-x00)*u; - F32 b = x10 + (x11-x10)*u; - F32 r = a + (b-a)*v; - return r; -} - -inline F32 ramp(F32 x, F32 a, F32 b) -{ - return (a == b) ? 0.0f : ((a - x) / (a - b)); -} - -inline F32 rescale(F32 x, F32 x1, F32 x2, F32 y1, F32 y2) -{ - return lerp(y1, y2, ramp(x, x1, x2)); -} - -inline F32 clamp_rescale(F32 x, F32 x1, F32 x2, F32 y1, F32 y2) -{ - if (y1 < y2) - { - return llclamp(rescale(x,x1,x2,y1,y2),y1,y2); - } - else - { - return llclamp(rescale(x,x1,x2,y1,y2),y2,y1); - } -} - - -inline F32 cubic_step( F32 x, F32 x0, F32 x1, F32 s0, F32 s1 ) -{ - if (x <= x0) - return s0; - - if (x >= x1) - return s1; - - F32 f = (x - x0) / (x1 - x0); - - return s0 + (s1 - s0) * (f * f) * (3.0f - 2.0f * f); -} - -inline F32 cubic_step( F32 x ) -{ - x = llclampf(x); - - return (x * x) * (3.0f - 2.0f * x); -} - -inline F32 quadratic_step( F32 x, F32 x0, F32 x1, F32 s0, F32 s1 ) -{ - if (x <= x0) - return s0; - - if (x >= x1) - return s1; - - F32 f = (x - x0) / (x1 - x0); - F32 f_squared = f * f; - - return (s0 * (1.f - f_squared)) + ((s1 - s0) * f_squared); -} - -inline F32 llsimple_angle(F32 angle) -{ - while(angle <= -F_PI) - angle += F_TWO_PI; - while(angle > F_PI) - angle -= F_TWO_PI; - return angle; -} - -//SDK - Renamed this to get_lower_power_two, since this is what this actually does. -inline U32 get_lower_power_two(U32 val, U32 max_power_two) -{ - if(!max_power_two) - { - max_power_two = 1 << 31 ; - } - if(max_power_two & (max_power_two - 1)) - { - return 0 ; - } - - for(; val < max_power_two ; max_power_two >>= 1) ; - - return max_power_two ; -} - -// calculate next highest power of two, limited by max_power_two -// This is taken from a brilliant little code snipped on http://acius2.blogspot.com/2007/11/calculating-next-power-of-2.html -// Basically we convert the binary to a solid string of 1's with the same -// number of digits, then add one. We subtract 1 initially to handle -// the case where the number passed in is actually a power of two. -// WARNING: this only works with 32 bit ints. -inline U32 get_next_power_two(U32 val, U32 max_power_two) -{ - if(!max_power_two) - { - max_power_two = 1 << 31 ; - } - - if(val >= max_power_two) - { - return max_power_two; - } - - val--; - val = (val >> 1) | val; - val = (val >> 2) | val; - val = (val >> 4) | val; - val = (val >> 8) | val; - val = (val >> 16) | val; - val++; - - return val; -} - -//get the gaussian value given the linear distance from axis x and guassian value o -inline F32 llgaussian(F32 x, F32 o) -{ - return 1.f/(F_SQRT_TWO_PI*o)*powf(F_E, -(x*x)/(2*o*o)); -} - -// Include simd math header -#include "llsimdmath.h" - -#endif +/** + * @file llmath.h + * @brief Useful math constants and macros. + * + * $LicenseInfo:firstyear=2000&license=viewergpl$ + * + * Copyright (c) 2000-2009, Linden Research, Inc. + * + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. + * + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. + * $/LicenseInfo$ + */ + +#ifndef LLMATH_H +#define LLMATH_H + +#include +#include +#include "lldefs.h" +//#include "llstl.h" // *TODO: Remove when LLString is gone +//#include "llstring.h" // *TODO: Remove when LLString is gone +// lltut.h uses is_approx_equal_fraction(). This was moved to its own header +// file in llcommon so we can use lltut.h for llcommon tests without making +// llcommon depend on llmath. +#include "is_approx_equal_fraction.h" + +// work around for Windows & older gcc non-standard function names. +#if LL_WINDOWS +#include +#define llisnan(val) _isnan(val) +#define llfinite(val) _finite(val) +#elif (LL_LINUX && __GNUC__ <= 2) +#define llisnan(val) isnan(val) +#define llfinite(val) isfinite(val) +#elif LL_SOLARIS +#define llisnan(val) isnan(val) +#define llfinite(val) (val <= std::numeric_limits::max()) +#else +#define llisnan(val) std::isnan(val) +#define llfinite(val) std::isfinite(val) +#endif + +// Single Precision Floating Point Routines +// (There used to be more defined here, but they appeared to be redundant and +// were breaking some other includes. Removed by Falcon, reviewed by Andrew, 11/25/09) +/*#ifndef tanf +#define tanf(x) ((F32)tan((F64)(x))) +#endif*/ + +const F32 GRAVITY = -9.8f; + +// mathematical constants +const F32 F_PI = 3.1415926535897932384626433832795f; +const F32 F_TWO_PI = 6.283185307179586476925286766559f; +const F32 F_PI_BY_TWO = 1.5707963267948966192313216916398f; +const F32 F_SQRT_TWO_PI = 2.506628274631000502415765284811f; +const F32 F_E = 2.71828182845904523536f; +const F32 F_SQRT2 = 1.4142135623730950488016887242097f; +const F32 F_SQRT3 = 1.73205080756888288657986402541f; +const F32 OO_SQRT2 = 0.7071067811865475244008443621049f; +const F32 DEG_TO_RAD = 0.017453292519943295769236907684886f; +const F32 RAD_TO_DEG = 57.295779513082320876798154814105f; +const F32 F_APPROXIMATELY_ZERO = 0.00001f; +const F32 F_LN2 = 0.69314718056f; +const F32 OO_LN2 = 1.4426950408889634073599246810019f; + +const F32 F_ALMOST_ZERO = 0.0001f; +const F32 F_ALMOST_ONE = 1.0f - F_ALMOST_ZERO; + +// BUG: Eliminate in favor of F_APPROXIMATELY_ZERO above? +const F32 FP_MAG_THRESHOLD = 0.0000001f; + +// TODO: Replace with logic like is_approx_equal +inline BOOL is_approx_zero( F32 f ) { return (-F_APPROXIMATELY_ZERO < f) && (f < F_APPROXIMATELY_ZERO); } + +// These functions work by interpreting sign+exp+mantissa as an unsigned +// integer. +// For example: +// x = 1 00000010 00000000000000000000000 +// y = 1 00000001 11111111111111111111111 +// +// interpreted as ints = +// x = 10000001000000000000000000000000 +// y = 10000000111111111111111111111111 +// which is clearly a different of 1 in the least significant bit +// Values with the same exponent can be trivially shown to work. +// +// WARNING: Denormals of opposite sign do not work +// x = 1 00000000 00000000000000000000001 +// y = 0 00000000 00000000000000000000001 +// Although these values differ by 2 in the LSB, the sign bit makes +// the int comparison fail. +// +// WARNING: NaNs can compare equal +// There is no special treatment of exceptional values like NaNs +// +// WARNING: Infinity is comparable with F32_MAX and negative +// infinity is comparable with F32_MIN + +inline BOOL is_approx_equal(F32 x, F32 y) +{ + const S32 COMPARE_MANTISSA_UP_TO_BIT = 0x02; + return (std::abs((S32) ((U32&)x - (U32&)y) ) < COMPARE_MANTISSA_UP_TO_BIT); +} + +inline BOOL is_approx_equal(F64 x, F64 y) +{ + const S64 COMPARE_MANTISSA_UP_TO_BIT = 0x02; + return (std::abs((S32) ((U64&)x - (U64&)y) ) < COMPARE_MANTISSA_UP_TO_BIT); +} + +inline S32 llabs(const S32 a) +{ + return S32(std::labs(a)); +} + +inline F32 llabs(const F32 a) +{ + return F32(std::fabs(a)); +} + +inline F64 llabs(const F64 a) +{ + return F64(std::fabs(a)); +} + +inline S32 lltrunc( F32 f ) +{ +#if LL_WINDOWS && !defined( __INTEL_COMPILER ) + // Avoids changing the floating point control word. + // Add or subtract 0.5 - epsilon and then round + const static U32 zpfp[] = { 0xBEFFFFFF, 0x3EFFFFFF }; + S32 result; + __asm { + fld f + mov eax, f + shr eax, 29 + and eax, 4 + fadd dword ptr [zpfp + eax] + fistp result + } + return result; +#else + return (S32)f; +#endif +} + +inline S32 lltrunc( F64 f ) +{ + return (S32)f; +} + +inline S32 llfloor( F32 f ) +{ +#if LL_WINDOWS && !defined( __INTEL_COMPILER ) + // Avoids changing the floating point control word. + // Accurate (unlike Stereopsis version) for all values between S32_MIN and S32_MAX and slightly faster than Stereopsis version. + // Add -(0.5 - epsilon) and then round + const U32 zpfp = 0xBEFFFFFF; + S32 result; + __asm { + fld f + fadd dword ptr [zpfp] + fistp result + } + return result; +#else + return (S32)floor(f); +#endif +} + + +inline S32 llceil( F32 f ) +{ + // This could probably be optimized, but this works. + return (S32)ceil(f); +} + + +#ifndef BOGUS_ROUND +// Use this round. Does an arithmetic round (0.5 always rounds up) +inline S32 llround(const F32 val) +{ + return llfloor(val + 0.5f); +} + +#else // BOGUS_ROUND +// Old llround implementation - does banker's round (toward nearest even in the case of a 0.5. +// Not using this because we don't have a consistent implementation on both platforms, use +// llfloor(val + 0.5f), which is consistent on all platforms. +inline S32 llround(const F32 val) +{ + #if LL_WINDOWS + // Note: assumes that the floating point control word is set to rounding mode (the default) + S32 ret_val; + _asm fld val + _asm fistp ret_val; + return ret_val; + #elif LL_LINUX + // Note: assumes that the floating point control word is set + // to rounding mode (the default) + S32 ret_val; + __asm__ __volatile__( "flds %1 \n\t" + "fistpl %0 \n\t" + : "=m" (ret_val) + : "m" (val) ); + return ret_val; + #else + return llfloor(val + 0.5f); + #endif +} + +// A fast arithmentic round on intel, from Laurent de Soras http://ldesoras.free.fr +inline int round_int(double x) +{ + const float round_to_nearest = 0.5f; + int i; + __asm + { + fld x + fadd st, st (0) + fadd round_to_nearest + fistp i + sar i, 1 + } + return (i); +} +#endif // BOGUS_ROUND + +inline F32 llround( F32 val, F32 nearest ) +{ + return F32(floor(val * (1.0f / nearest) + 0.5f)) * nearest; +} + +inline F64 llround( F64 val, F64 nearest ) +{ + return F64(floor(val * (1.0 / nearest) + 0.5)) * nearest; +} + +// these provide minimum peak error +// +// avg error = -0.013049 +// peak error = -31.4 dB +// RMS error = -28.1 dB + +const F32 FAST_MAG_ALPHA = 0.960433870103f; +const F32 FAST_MAG_BETA = 0.397824734759f; + +// these provide minimum RMS error +// +// avg error = 0.000003 +// peak error = -32.6 dB +// RMS error = -25.7 dB +// +//const F32 FAST_MAG_ALPHA = 0.948059448969f; +//const F32 FAST_MAG_BETA = 0.392699081699f; + +inline F32 fastMagnitude(F32 a, F32 b) +{ + a = (a > 0) ? a : -a; + b = (b > 0) ? b : -b; + return(FAST_MAG_ALPHA * llmax(a,b) + FAST_MAG_BETA * llmin(a,b)); +} + + + +//////////////////// +// +// Fast F32/S32 conversions +// +// Culled from www.stereopsis.com/FPU.html + +const F64 LL_DOUBLE_TO_FIX_MAGIC = 68719476736.0*1.5; //2^36 * 1.5, (52-_shiftamt=36) uses limited precisicion to floor +const S32 LL_SHIFT_AMOUNT = 16; //16.16 fixed point representation, + +// Endian dependent code +#ifdef LL_LITTLE_ENDIAN + #define LL_EXP_INDEX 1 + #define LL_MAN_INDEX 0 +#else + #define LL_EXP_INDEX 0 + #define LL_MAN_INDEX 1 +#endif + +/* Deprecated: use llround(), lltrunc(), or llfloor() instead +// ================================================================================================ +// Real2Int +// ================================================================================================ +inline S32 F64toS32(F64 val) +{ + val = val + LL_DOUBLE_TO_FIX_MAGIC; + return ((S32*)&val)[LL_MAN_INDEX] >> LL_SHIFT_AMOUNT; +} + +// ================================================================================================ +// Real2Int +// ================================================================================================ +inline S32 F32toS32(F32 val) +{ + return F64toS32 ((F64)val); +} +*/ + +//////////////////////////////////////////////// +// +// Fast exp and log +// + +// Implementation of fast exp() approximation (from a paper by Nicol N. Schraudolph +// http://www.inf.ethz.ch/~schraudo/pubs/exp.pdf +static union +{ + double d; + struct + { +#ifdef LL_LITTLE_ENDIAN + S32 j, i; +#else + S32 i, j; +#endif + } n; +} LLECO; // not sure what the name means + +#define LL_EXP_A (1048576 * OO_LN2) // use 1512775 for integer +#define LL_EXP_C (60801) // this value of C good for -4 < y < 4 + +#define LL_FAST_EXP(y) (LLECO.n.i = llround(F32(LL_EXP_A*(y))) + (1072693248 - LL_EXP_C), LLECO.d) + + + +inline F32 llfastpow(const F32 x, const F32 y) +{ + return (F32)(LL_FAST_EXP(y * log(x))); +} + + +inline F32 snap_to_sig_figs(F32 foo, S32 sig_figs) +{ + // compute the power of ten + F32 bar = 1.f; + for (S32 i = 0; i < sig_figs; i++) + { + bar *= 10.f; + } + + //F32 new_foo = (F32)llround(foo * bar); + // the llround() implementation sucks. Don't us it. + + F32 sign = (foo > 0.f) ? 1.f : -1.f; + F32 new_foo = F32( S64(foo * bar + sign * 0.5f)); + new_foo /= bar; + + return new_foo; +} + +inline F32 lerp(F32 a, F32 b, F32 u) +{ + return a + ((b - a) * u); +} + +inline F32 lerp2d(F32 x00, F32 x01, F32 x10, F32 x11, F32 u, F32 v) +{ + F32 a = x00 + (x01-x00)*u; + F32 b = x10 + (x11-x10)*u; + F32 r = a + (b-a)*v; + return r; +} + +inline F32 ramp(F32 x, F32 a, F32 b) +{ + return (a == b) ? 0.0f : ((a - x) / (a - b)); +} + +inline F32 rescale(F32 x, F32 x1, F32 x2, F32 y1, F32 y2) +{ + return lerp(y1, y2, ramp(x, x1, x2)); +} + +inline F32 clamp_rescale(F32 x, F32 x1, F32 x2, F32 y1, F32 y2) +{ + if (y1 < y2) + { + return llclamp(rescale(x,x1,x2,y1,y2),y1,y2); + } + else + { + return llclamp(rescale(x,x1,x2,y1,y2),y2,y1); + } +} + + +inline F32 cubic_step( F32 x, F32 x0, F32 x1, F32 s0, F32 s1 ) +{ + if (x <= x0) + return s0; + + if (x >= x1) + return s1; + + F32 f = (x - x0) / (x1 - x0); + + return s0 + (s1 - s0) * (f * f) * (3.0f - 2.0f * f); +} + +inline F32 cubic_step( F32 x ) +{ + x = llclampf(x); + + return (x * x) * (3.0f - 2.0f * x); +} + +inline F32 quadratic_step( F32 x, F32 x0, F32 x1, F32 s0, F32 s1 ) +{ + if (x <= x0) + return s0; + + if (x >= x1) + return s1; + + F32 f = (x - x0) / (x1 - x0); + F32 f_squared = f * f; + + return (s0 * (1.f - f_squared)) + ((s1 - s0) * f_squared); +} + +inline F32 llsimple_angle(F32 angle) +{ + while(angle <= -F_PI) + angle += F_TWO_PI; + while(angle > F_PI) + angle -= F_TWO_PI; + return angle; +} + +//SDK - Renamed this to get_lower_power_two, since this is what this actually does. +inline U32 get_lower_power_two(U32 val, U32 max_power_two) +{ + if(!max_power_two) + { + max_power_two = 1 << 31 ; + } + if(max_power_two & (max_power_two - 1)) + { + return 0 ; + } + + for(; val < max_power_two ; max_power_two >>= 1) ; + + return max_power_two ; +} + +// calculate next highest power of two, limited by max_power_two +// This is taken from a brilliant little code snipped on http://acius2.blogspot.com/2007/11/calculating-next-power-of-2.html +// Basically we convert the binary to a solid string of 1's with the same +// number of digits, then add one. We subtract 1 initially to handle +// the case where the number passed in is actually a power of two. +// WARNING: this only works with 32 bit ints. +inline U32 get_next_power_two(U32 val, U32 max_power_two) +{ + if(!max_power_two) + { + max_power_two = 1 << 31 ; + } + + if(val >= max_power_two) + { + return max_power_two; + } + + val--; + val = (val >> 1) | val; + val = (val >> 2) | val; + val = (val >> 4) | val; + val = (val >> 8) | val; + val = (val >> 16) | val; + val++; + + return val; +} + +//get the gaussian value given the linear distance from axis x and guassian value o +inline F32 llgaussian(F32 x, F32 o) +{ + return 1.f/(F_SQRT_TWO_PI*o)*powf(F_E, -(x*x)/(2*o*o)); +} + +// Include simd math header +#include "llsimdmath.h" + +#endif diff --git a/indra/llmath/llquantize.h b/indra/llmath/llquantize.h index 000d8a060f..c043f7f752 100644 --- a/indra/llmath/llquantize.h +++ b/indra/llmath/llquantize.h @@ -1,158 +1,158 @@ -/** - * @file llquantize.h - * @brief useful routines for quantizing floats to various length ints - * and back out again - * - * $LicenseInfo:firstyear=2001&license=viewergpl$ - * - * Copyright (c) 2001-2009, Linden Research, Inc. - * - * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 - * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception - * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. - * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. - * $/LicenseInfo$ - */ - -#ifndef LL_LLQUANTIZE_H -#define LL_LLQUANTIZE_H - -const U16 U16MAX = 65535; -LL_ALIGN_16( const F32 F_U16MAX_4A[4] ) = { 65535.f, 65535.f, 65535.f, 65535.f }; - -const F32 OOU16MAX = 1.f/(F32)(U16MAX); -LL_ALIGN_16( const F32 F_OOU16MAX_4A[4] ) = { OOU16MAX, OOU16MAX, OOU16MAX, OOU16MAX }; - -const U8 U8MAX = 255; -LL_ALIGN_16( const F32 F_U8MAX_4A[4] ) = { 255.f, 255.f, 255.f, 255.f }; - -const F32 OOU8MAX = 1.f/(F32)(U8MAX); -LL_ALIGN_16( const F32 F_OOU8MAX_4A[4] ) = { OOU8MAX, OOU8MAX, OOU8MAX, OOU8MAX }; - -const U8 FIRSTVALIDCHAR = 54; -const U8 MAXSTRINGVAL = U8MAX - FIRSTVALIDCHAR; //we don't allow newline or null - - -inline U16 F32_to_U16_ROUND(F32 val, F32 lower, F32 upper) -{ - val = llclamp(val, lower, upper); - // make sure that the value is positive and normalized to <0, 1> - val -= lower; - val /= (upper - lower); - - // round the value. Sreturn the U16 - return (U16)(llround(val*U16MAX)); -} - - -inline U16 F32_to_U16(F32 val, F32 lower, F32 upper) -{ - val = llclamp(val, lower, upper); - // make sure that the value is positive and normalized to <0, 1> - val -= lower; - val /= (upper - lower); - - // return the U16 - return (U16)(llfloor(val*U16MAX)); -} - -inline F32 U16_to_F32(U16 ival, F32 lower, F32 upper) -{ - F32 val = ival*OOU16MAX; - F32 delta = (upper - lower); - val *= delta; - val += lower; - - F32 max_error = delta*OOU16MAX; - - // make sure that zero's come through as zero - if (fabsf(val) < max_error) - val = 0.f; - - return val; -} - - -inline U8 F32_to_U8_ROUND(F32 val, F32 lower, F32 upper) -{ - val = llclamp(val, lower, upper); - // make sure that the value is positive and normalized to <0, 1> - val -= lower; - val /= (upper - lower); - - // return the rounded U8 - return (U8)(llround(val*U8MAX)); -} - - -inline U8 F32_to_U8(F32 val, F32 lower, F32 upper) -{ - val = llclamp(val, lower, upper); - // make sure that the value is positive and normalized to <0, 1> - val -= lower; - val /= (upper - lower); - - // return the U8 - return (U8)(llfloor(val*U8MAX)); -} - -inline F32 U8_to_F32(U8 ival, F32 lower, F32 upper) -{ - F32 val = ival*OOU8MAX; - F32 delta = (upper - lower); - val *= delta; - val += lower; - - F32 max_error = delta*OOU8MAX; - - // make sure that zero's come through as zero - if (fabsf(val) < max_error) - val = 0.f; - - return val; -} - -inline U8 F32_TO_STRING(F32 val, F32 lower, F32 upper) -{ - val = llclamp(val, lower, upper); //[lower, upper] - // make sure that the value is positive and normalized to <0, 1> - val -= lower; //[0, upper-lower] - val /= (upper - lower); //[0,1] - val = val * MAXSTRINGVAL; //[0, MAXSTRINGVAL] - val = floor(val + 0.5f); //[0, MAXSTRINGVAL] - - U8 stringVal = (U8)(val) + FIRSTVALIDCHAR; //[FIRSTVALIDCHAR, MAXSTRINGVAL + FIRSTVALIDCHAR] - return stringVal; -} - -inline F32 STRING_TO_F32(U8 ival, F32 lower, F32 upper) -{ - // remove empty space left for NULL, newline, etc. - ival -= FIRSTVALIDCHAR; //[0, MAXSTRINGVAL] - - F32 val = (F32)ival * (1.f / (F32)MAXSTRINGVAL); //[0, 1] - F32 delta = (upper - lower); - val *= delta; //[0, upper - lower] - val += lower; //[lower, upper] - - return val; -} - -#endif +/** + * @file llquantize.h + * @brief useful routines for quantizing floats to various length ints + * and back out again + * + * $LicenseInfo:firstyear=2001&license=viewergpl$ + * + * Copyright (c) 2001-2009, Linden Research, Inc. + * + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. + * + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. + * $/LicenseInfo$ + */ + +#ifndef LL_LLQUANTIZE_H +#define LL_LLQUANTIZE_H + +const U16 U16MAX = 65535; +LL_ALIGN_16( const F32 F_U16MAX_4A[4] ) = { 65535.f, 65535.f, 65535.f, 65535.f }; + +const F32 OOU16MAX = 1.f/(F32)(U16MAX); +LL_ALIGN_16( const F32 F_OOU16MAX_4A[4] ) = { OOU16MAX, OOU16MAX, OOU16MAX, OOU16MAX }; + +const U8 U8MAX = 255; +LL_ALIGN_16( const F32 F_U8MAX_4A[4] ) = { 255.f, 255.f, 255.f, 255.f }; + +const F32 OOU8MAX = 1.f/(F32)(U8MAX); +LL_ALIGN_16( const F32 F_OOU8MAX_4A[4] ) = { OOU8MAX, OOU8MAX, OOU8MAX, OOU8MAX }; + +const U8 FIRSTVALIDCHAR = 54; +const U8 MAXSTRINGVAL = U8MAX - FIRSTVALIDCHAR; //we don't allow newline or null + + +inline U16 F32_to_U16_ROUND(F32 val, F32 lower, F32 upper) +{ + val = llclamp(val, lower, upper); + // make sure that the value is positive and normalized to <0, 1> + val -= lower; + val /= (upper - lower); + + // round the value. Sreturn the U16 + return (U16)(llround(val*U16MAX)); +} + + +inline U16 F32_to_U16(F32 val, F32 lower, F32 upper) +{ + val = llclamp(val, lower, upper); + // make sure that the value is positive and normalized to <0, 1> + val -= lower; + val /= (upper - lower); + + // return the U16 + return (U16)(llfloor(val*U16MAX)); +} + +inline F32 U16_to_F32(U16 ival, F32 lower, F32 upper) +{ + F32 val = ival*OOU16MAX; + F32 delta = (upper - lower); + val *= delta; + val += lower; + + F32 max_error = delta*OOU16MAX; + + // make sure that zero's come through as zero + if (fabsf(val) < max_error) + val = 0.f; + + return val; +} + + +inline U8 F32_to_U8_ROUND(F32 val, F32 lower, F32 upper) +{ + val = llclamp(val, lower, upper); + // make sure that the value is positive and normalized to <0, 1> + val -= lower; + val /= (upper - lower); + + // return the rounded U8 + return (U8)(llround(val*U8MAX)); +} + + +inline U8 F32_to_U8(F32 val, F32 lower, F32 upper) +{ + val = llclamp(val, lower, upper); + // make sure that the value is positive and normalized to <0, 1> + val -= lower; + val /= (upper - lower); + + // return the U8 + return (U8)(llfloor(val*U8MAX)); +} + +inline F32 U8_to_F32(U8 ival, F32 lower, F32 upper) +{ + F32 val = ival*OOU8MAX; + F32 delta = (upper - lower); + val *= delta; + val += lower; + + F32 max_error = delta*OOU8MAX; + + // make sure that zero's come through as zero + if (fabsf(val) < max_error) + val = 0.f; + + return val; +} + +inline U8 F32_TO_STRING(F32 val, F32 lower, F32 upper) +{ + val = llclamp(val, lower, upper); //[lower, upper] + // make sure that the value is positive and normalized to <0, 1> + val -= lower; //[0, upper-lower] + val /= (upper - lower); //[0,1] + val = val * MAXSTRINGVAL; //[0, MAXSTRINGVAL] + val = floor(val + 0.5f); //[0, MAXSTRINGVAL] + + U8 stringVal = (U8)(val) + FIRSTVALIDCHAR; //[FIRSTVALIDCHAR, MAXSTRINGVAL + FIRSTVALIDCHAR] + return stringVal; +} + +inline F32 STRING_TO_F32(U8 ival, F32 lower, F32 upper) +{ + // remove empty space left for NULL, newline, etc. + ival -= FIRSTVALIDCHAR; //[0, MAXSTRINGVAL] + + F32 val = (F32)ival * (1.f / (F32)MAXSTRINGVAL); //[0, 1] + F32 delta = (upper - lower); + val *= delta; //[0, upper - lower] + val += lower; //[lower, upper] + + return val; +} + +#endif diff --git a/indra/llmath/llquaternion.cpp b/indra/llmath/llquaternion.cpp index efdc10e2c6..73c5f4505e 100644 --- a/indra/llmath/llquaternion.cpp +++ b/indra/llmath/llquaternion.cpp @@ -1,961 +1,961 @@ -/** - * @file llquaternion.cpp - * @brief LLQuaternion class implementation. - * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * - * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 - * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception - * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. - * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. - * $/LicenseInfo$ - */ - -#include "linden_common.h" - -#include "llmath.h" // for F_PI - -#include "llquaternion.h" - -//#include "vmath.h" -#include "v3math.h" -#include "v3dmath.h" -#include "v4math.h" -#include "m4math.h" -#include "m3math.h" -#include "llquantize.h" - -// WARNING: Don't use this for global const definitions! using this -// at the top of a *.cpp file might not give you what you think. -const LLQuaternion LLQuaternion::DEFAULT; - -// Constructors - -LLQuaternion::LLQuaternion(const LLMatrix4 &mat) -{ - *this = mat.quaternion(); - normalize(); -} - -LLQuaternion::LLQuaternion(const LLMatrix3 &mat) -{ - *this = mat.quaternion(); - normalize(); -} - -LLQuaternion::LLQuaternion(F32 angle, const LLVector4 &vec) -{ - LLVector3 v(vec.mV[VX], vec.mV[VY], vec.mV[VZ]); - v.normalize(); - - F32 c, s; - c = cosf(angle*0.5f); - s = sinf(angle*0.5f); - - mQ[VX] = v.mV[VX] * s; - mQ[VY] = v.mV[VY] * s; - mQ[VZ] = v.mV[VZ] * s; - mQ[VW] = c; - normalize(); -} - -LLQuaternion::LLQuaternion(F32 angle, const LLVector3 &vec) -{ - LLVector3 v(vec); - v.normalize(); - - F32 c, s; - c = cosf(angle*0.5f); - s = sinf(angle*0.5f); - - mQ[VX] = v.mV[VX] * s; - mQ[VY] = v.mV[VY] * s; - mQ[VZ] = v.mV[VZ] * s; - mQ[VW] = c; - normalize(); -} - -LLQuaternion::LLQuaternion(const LLVector3 &x_axis, - const LLVector3 &y_axis, - const LLVector3 &z_axis) -{ - LLMatrix3 mat; - mat.setRows(x_axis, y_axis, z_axis); - *this = mat.quaternion(); - normalize(); -} - -// Quatizations -void LLQuaternion::quantize16(F32 lower, F32 upper) -{ - F32 x = mQ[VX]; - F32 y = mQ[VY]; - F32 z = mQ[VZ]; - F32 s = mQ[VS]; - - x = U16_to_F32(F32_to_U16_ROUND(x, lower, upper), lower, upper); - y = U16_to_F32(F32_to_U16_ROUND(y, lower, upper), lower, upper); - z = U16_to_F32(F32_to_U16_ROUND(z, lower, upper), lower, upper); - s = U16_to_F32(F32_to_U16_ROUND(s, lower, upper), lower, upper); - - mQ[VX] = x; - mQ[VY] = y; - mQ[VZ] = z; - mQ[VS] = s; - - normalize(); -} - -void LLQuaternion::quantize8(F32 lower, F32 upper) -{ - mQ[VX] = U8_to_F32(F32_to_U8_ROUND(mQ[VX], lower, upper), lower, upper); - mQ[VY] = U8_to_F32(F32_to_U8_ROUND(mQ[VY], lower, upper), lower, upper); - mQ[VZ] = U8_to_F32(F32_to_U8_ROUND(mQ[VZ], lower, upper), lower, upper); - mQ[VS] = U8_to_F32(F32_to_U8_ROUND(mQ[VS], lower, upper), lower, upper); - - normalize(); -} - -// LLVector3 Magnitude and Normalization Functions - - -// Set LLQuaternion routines - -const LLQuaternion& LLQuaternion::setAngleAxis(F32 angle, F32 x, F32 y, F32 z) -{ - LLVector3 vec(x, y, z); - vec.normalize(); - - angle *= 0.5f; - F32 c, s; - c = cosf(angle); - s = sinf(angle); - - mQ[VX] = vec.mV[VX]*s; - mQ[VY] = vec.mV[VY]*s; - mQ[VZ] = vec.mV[VZ]*s; - mQ[VW] = c; - - normalize(); - return (*this); -} - -const LLQuaternion& LLQuaternion::setAngleAxis(F32 angle, const LLVector3 &vec) -{ - LLVector3 v(vec); - v.normalize(); - - angle *= 0.5f; - F32 c, s; - c = cosf(angle); - s = sinf(angle); - - mQ[VX] = v.mV[VX]*s; - mQ[VY] = v.mV[VY]*s; - mQ[VZ] = v.mV[VZ]*s; - mQ[VW] = c; - - normalize(); - return (*this); -} - -const LLQuaternion& LLQuaternion::setAngleAxis(F32 angle, const LLVector4 &vec) -{ - LLVector3 v(vec.mV[VX], vec.mV[VY], vec.mV[VZ]); - v.normalize(); - - F32 c, s; - c = cosf(angle*0.5f); - s = sinf(angle*0.5f); - - mQ[VX] = v.mV[VX]*s; - mQ[VY] = v.mV[VY]*s; - mQ[VZ] = v.mV[VZ]*s; - mQ[VW] = c; - - normalize(); - return (*this); -} - -const LLQuaternion& LLQuaternion::setEulerAngles(F32 roll, F32 pitch, F32 yaw) -{ - LLMatrix3 rot_mat(roll, pitch, yaw); - rot_mat.orthogonalize(); - *this = rot_mat.quaternion(); - - normalize(); - return (*this); -} - -// deprecated -const LLQuaternion& LLQuaternion::set(const LLMatrix3 &mat) -{ - *this = mat.quaternion(); - normalize(); - return (*this); -} - -// deprecated -const LLQuaternion& LLQuaternion::set(const LLMatrix4 &mat) -{ - *this = mat.quaternion(); - normalize(); - return (*this); -} - -// deprecated -const LLQuaternion& LLQuaternion::setQuat(F32 angle, F32 x, F32 y, F32 z) -{ - LLVector3 vec(x, y, z); - vec.normalize(); - - angle *= 0.5f; - F32 c, s; - c = cosf(angle); - s = sinf(angle); - - mQ[VX] = vec.mV[VX]*s; - mQ[VY] = vec.mV[VY]*s; - mQ[VZ] = vec.mV[VZ]*s; - mQ[VW] = c; - - normalize(); - return (*this); -} - -// deprecated -const LLQuaternion& LLQuaternion::setQuat(F32 angle, const LLVector3 &vec) -{ - LLVector3 v(vec); - v.normalize(); - - angle *= 0.5f; - F32 c, s; - c = cosf(angle); - s = sinf(angle); - - mQ[VX] = v.mV[VX]*s; - mQ[VY] = v.mV[VY]*s; - mQ[VZ] = v.mV[VZ]*s; - mQ[VW] = c; - - normalize(); - return (*this); -} - -const LLQuaternion& LLQuaternion::setQuat(F32 angle, const LLVector4 &vec) -{ - LLVector3 v(vec.mV[VX], vec.mV[VY], vec.mV[VZ]); - v.normalize(); - - F32 c, s; - c = cosf(angle*0.5f); - s = sinf(angle*0.5f); - - mQ[VX] = v.mV[VX]*s; - mQ[VY] = v.mV[VY]*s; - mQ[VZ] = v.mV[VZ]*s; - mQ[VW] = c; - - normalize(); - return (*this); -} - -const LLQuaternion& LLQuaternion::setQuat(F32 roll, F32 pitch, F32 yaw) -{ - LLMatrix3 rot_mat(roll, pitch, yaw); - rot_mat.orthogonalize(); - *this = rot_mat.quaternion(); - - normalize(); - return (*this); -} - -const LLQuaternion& LLQuaternion::setQuat(const LLMatrix3 &mat) -{ - *this = mat.quaternion(); - normalize(); - return (*this); -} - -const LLQuaternion& LLQuaternion::setQuat(const LLMatrix4 &mat) -{ - *this = mat.quaternion(); - normalize(); - return (*this); -//#if 1 -// // NOTE: LLQuaternion's are actually inverted with respect to -// // the matrices, so this code also assumes inverted quaternions -// // (-x, -y, -z, w). The result is that roll,pitch,yaw are applied -// // in reverse order (yaw,pitch,roll). -// F64 cosX = cos(roll); -// F64 cosY = cos(pitch); -// F64 cosZ = cos(yaw); -// -// F64 sinX = sin(roll); -// F64 sinY = sin(pitch); -// F64 sinZ = sin(yaw); -// -// mQ[VW] = (F32)sqrt(cosY*cosZ - sinX*sinY*sinZ + cosX*cosZ + cosX*cosY + 1.0)*.5; -// if (fabs(mQ[VW]) < F_APPROXIMATELY_ZERO) -// { -// // null rotation, any axis will do -// mQ[VX] = 0.0f; -// mQ[VY] = 1.0f; -// mQ[VZ] = 0.0f; -// } -// else -// { -// F32 inv_s = 1.0f / (4.0f * mQ[VW]); -// mQ[VX] = (F32)-(-sinX*cosY - cosX*sinY*sinZ - sinX*cosZ) * inv_s; -// mQ[VY] = (F32)-(-cosX*sinY*cosZ + sinX*sinZ - sinY) * inv_s; -// mQ[VZ] = (F32)-(-cosY*sinZ - sinX*sinY*cosZ - cosX*sinZ) * inv_s; -// } -// -//#else // This only works on a certain subset of roll/pitch/yaw -// -// F64 cosX = cosf(roll/2.0); -// F64 cosY = cosf(pitch/2.0); -// F64 cosZ = cosf(yaw/2.0); -// -// F64 sinX = sinf(roll/2.0); -// F64 sinY = sinf(pitch/2.0); -// F64 sinZ = sinf(yaw/2.0); -// -// mQ[VW] = (F32)(cosX*cosY*cosZ + sinX*sinY*sinZ); -// mQ[VX] = (F32)(sinX*cosY*cosZ - cosX*sinY*sinZ); -// mQ[VY] = (F32)(cosX*sinY*cosZ + sinX*cosY*sinZ); -// mQ[VZ] = (F32)(cosX*cosY*sinZ - sinX*sinY*cosZ); -//#endif -// -// normalize(); -// return (*this); -} - -// SJB: This code is correct for a logicly stored (non-transposed) matrix; -// Our matrices are stored transposed, OpenGL style, so this generates the -// INVERSE matrix, or the CORRECT matrix form an INVERSE quaternion. -// Because we use similar logic in LLMatrix3::quaternion(), -// we are internally consistant so everything works OK :) -LLMatrix3 LLQuaternion::getMatrix3(void) const -{ - LLMatrix3 mat; - F32 xx, xy, xz, xw, yy, yz, yw, zz, zw; - - xx = mQ[VX] * mQ[VX]; - xy = mQ[VX] * mQ[VY]; - xz = mQ[VX] * mQ[VZ]; - xw = mQ[VX] * mQ[VW]; - - yy = mQ[VY] * mQ[VY]; - yz = mQ[VY] * mQ[VZ]; - yw = mQ[VY] * mQ[VW]; - - zz = mQ[VZ] * mQ[VZ]; - zw = mQ[VZ] * mQ[VW]; - - mat.mMatrix[0][0] = 1.f - 2.f * ( yy + zz ); - mat.mMatrix[0][1] = 2.f * ( xy + zw ); - mat.mMatrix[0][2] = 2.f * ( xz - yw ); - - mat.mMatrix[1][0] = 2.f * ( xy - zw ); - mat.mMatrix[1][1] = 1.f - 2.f * ( xx + zz ); - mat.mMatrix[1][2] = 2.f * ( yz + xw ); - - mat.mMatrix[2][0] = 2.f * ( xz + yw ); - mat.mMatrix[2][1] = 2.f * ( yz - xw ); - mat.mMatrix[2][2] = 1.f - 2.f * ( xx + yy ); - - return mat; -} - -LLMatrix4 LLQuaternion::getMatrix4(void) const -{ - LLMatrix4 mat; - F32 xx, xy, xz, xw, yy, yz, yw, zz, zw; - - xx = mQ[VX] * mQ[VX]; - xy = mQ[VX] * mQ[VY]; - xz = mQ[VX] * mQ[VZ]; - xw = mQ[VX] * mQ[VW]; - - yy = mQ[VY] * mQ[VY]; - yz = mQ[VY] * mQ[VZ]; - yw = mQ[VY] * mQ[VW]; - - zz = mQ[VZ] * mQ[VZ]; - zw = mQ[VZ] * mQ[VW]; - - mat.mMatrix[0][0] = 1.f - 2.f * ( yy + zz ); - mat.mMatrix[0][1] = 2.f * ( xy + zw ); - mat.mMatrix[0][2] = 2.f * ( xz - yw ); - - mat.mMatrix[1][0] = 2.f * ( xy - zw ); - mat.mMatrix[1][1] = 1.f - 2.f * ( xx + zz ); - mat.mMatrix[1][2] = 2.f * ( yz + xw ); - - mat.mMatrix[2][0] = 2.f * ( xz + yw ); - mat.mMatrix[2][1] = 2.f * ( yz - xw ); - mat.mMatrix[2][2] = 1.f - 2.f * ( xx + yy ); - - // TODO -- should we set the translation portion to zero? - - return mat; -} - - - - -// Other useful methods - - -// calculate the shortest rotation from a to b -void LLQuaternion::shortestArc(const LLVector3 &a, const LLVector3 &b) -{ - // Make a local copy of both vectors. - LLVector3 vec_a = a; - LLVector3 vec_b = b; - - // Make sure neither vector is zero length. Also normalize - // the vectors while we are at it. - F32 vec_a_mag = vec_a.normalize(); - F32 vec_b_mag = vec_b.normalize(); - if (vec_a_mag < F_APPROXIMATELY_ZERO || - vec_b_mag < F_APPROXIMATELY_ZERO) - { - // Can't calculate a rotation from this. - // Just return ZERO_ROTATION instead. - loadIdentity(); - return; - } - - // Create an axis to rotate around, and the cos of the angle to rotate. - LLVector3 axis = vec_a % vec_b; - F32 cos_theta = vec_a * vec_b; - - // Check the angle between the vectors to see if they are parallel or anti-parallel. - if (cos_theta > 1.0 - F_APPROXIMATELY_ZERO) - { - // a and b are parallel. No rotation is necessary. - loadIdentity(); - } - else if (cos_theta < -1.0 + F_APPROXIMATELY_ZERO) - { - // a and b are anti-parallel. - // Rotate 180 degrees around some orthogonal axis. - // Find the projection of the x-axis onto a, and try - // using the vector between the projection and the x-axis - // as the orthogonal axis. - LLVector3 proj = vec_a.mV[VX] / (vec_a * vec_a) * vec_a; - LLVector3 ortho_axis(1.f, 0.f, 0.f); - ortho_axis -= proj; - - // Turn this into an orthonormal axis. - F32 ortho_length = ortho_axis.normalize(); - // If the axis' length is 0, then our guess at an orthogonal axis - // was wrong (a is parallel to the x-axis). - if (ortho_length < F_APPROXIMATELY_ZERO) - { - // Use the z-axis instead. - ortho_axis.setVec(0.f, 0.f, 1.f); - } - - // Construct a quaternion from this orthonormal axis. - mQ[VX] = ortho_axis.mV[VX]; - mQ[VY] = ortho_axis.mV[VY]; - mQ[VZ] = ortho_axis.mV[VZ]; - mQ[VW] = 0.f; - } - else - { - // a and b are NOT parallel or anti-parallel. - // Return the rotation between these vectors. - F32 theta = (F32)acos(cos_theta); - - setAngleAxis(theta, axis); - } -} - -// constrains rotation to a cone angle specified in radians -const LLQuaternion &LLQuaternion::constrain(F32 radians) -{ - const F32 cos_angle_lim = cosf( radians/2 ); // mQ[VW] limit - const F32 sin_angle_lim = sinf( radians/2 ); // rotation axis length limit - - if (mQ[VW] < 0.f) - { - mQ[VX] *= -1.f; - mQ[VY] *= -1.f; - mQ[VZ] *= -1.f; - mQ[VW] *= -1.f; - } - - // if rotation angle is greater than limit (cos is less than limit) - if( mQ[VW] < cos_angle_lim ) - { - mQ[VW] = cos_angle_lim; - F32 axis_len = sqrtf( mQ[VX]*mQ[VX] + mQ[VY]*mQ[VY] + mQ[VZ]*mQ[VZ] ); // sin(theta/2) - F32 axis_mult_fact = sin_angle_lim / axis_len; - mQ[VX] *= axis_mult_fact; - mQ[VY] *= axis_mult_fact; - mQ[VZ] *= axis_mult_fact; - } - - return *this; -} - -// Operators - -std::ostream& operator<<(std::ostream &s, const LLQuaternion &a) -{ - s << "{ " - << a.mQ[VX] << ", " << a.mQ[VY] << ", " << a.mQ[VZ] << ", " << a.mQ[VW] - << " }"; - return s; -} - - -// Does NOT renormalize the result -LLQuaternion operator*(const LLQuaternion &a, const LLQuaternion &b) -{ -// LLQuaternion::mMultCount++; - - LLQuaternion q( - b.mQ[3] * a.mQ[0] + b.mQ[0] * a.mQ[3] + b.mQ[1] * a.mQ[2] - b.mQ[2] * a.mQ[1], - b.mQ[3] * a.mQ[1] + b.mQ[1] * a.mQ[3] + b.mQ[2] * a.mQ[0] - b.mQ[0] * a.mQ[2], - b.mQ[3] * a.mQ[2] + b.mQ[2] * a.mQ[3] + b.mQ[0] * a.mQ[1] - b.mQ[1] * a.mQ[0], - b.mQ[3] * a.mQ[3] - b.mQ[0] * a.mQ[0] - b.mQ[1] * a.mQ[1] - b.mQ[2] * a.mQ[2] - ); - return q; -} - -/* -LLMatrix4 operator*(const LLMatrix4 &m, const LLQuaternion &q) -{ - LLMatrix4 qmat(q); - return (m*qmat); -} -*/ - - - -LLVector4 operator*(const LLVector4 &a, const LLQuaternion &rot) -{ - F32 rw = - rot.mQ[VX] * a.mV[VX] - rot.mQ[VY] * a.mV[VY] - rot.mQ[VZ] * a.mV[VZ]; - F32 rx = rot.mQ[VW] * a.mV[VX] + rot.mQ[VY] * a.mV[VZ] - rot.mQ[VZ] * a.mV[VY]; - F32 ry = rot.mQ[VW] * a.mV[VY] + rot.mQ[VZ] * a.mV[VX] - rot.mQ[VX] * a.mV[VZ]; - F32 rz = rot.mQ[VW] * a.mV[VZ] + rot.mQ[VX] * a.mV[VY] - rot.mQ[VY] * a.mV[VX]; - - F32 nx = - rw * rot.mQ[VX] + rx * rot.mQ[VW] - ry * rot.mQ[VZ] + rz * rot.mQ[VY]; - F32 ny = - rw * rot.mQ[VY] + ry * rot.mQ[VW] - rz * rot.mQ[VX] + rx * rot.mQ[VZ]; - F32 nz = - rw * rot.mQ[VZ] + rz * rot.mQ[VW] - rx * rot.mQ[VY] + ry * rot.mQ[VX]; - - return LLVector4(nx, ny, nz, a.mV[VW]); -} - -LLVector3 operator*(const LLVector3 &a, const LLQuaternion &rot) -{ - F32 rw = - rot.mQ[VX] * a.mV[VX] - rot.mQ[VY] * a.mV[VY] - rot.mQ[VZ] * a.mV[VZ]; - F32 rx = rot.mQ[VW] * a.mV[VX] + rot.mQ[VY] * a.mV[VZ] - rot.mQ[VZ] * a.mV[VY]; - F32 ry = rot.mQ[VW] * a.mV[VY] + rot.mQ[VZ] * a.mV[VX] - rot.mQ[VX] * a.mV[VZ]; - F32 rz = rot.mQ[VW] * a.mV[VZ] + rot.mQ[VX] * a.mV[VY] - rot.mQ[VY] * a.mV[VX]; - - F32 nx = - rw * rot.mQ[VX] + rx * rot.mQ[VW] - ry * rot.mQ[VZ] + rz * rot.mQ[VY]; - F32 ny = - rw * rot.mQ[VY] + ry * rot.mQ[VW] - rz * rot.mQ[VX] + rx * rot.mQ[VZ]; - F32 nz = - rw * rot.mQ[VZ] + rz * rot.mQ[VW] - rx * rot.mQ[VY] + ry * rot.mQ[VX]; - - return LLVector3(nx, ny, nz); -} - -LLVector3d operator*(const LLVector3d &a, const LLQuaternion &rot) -{ - F64 rw = - rot.mQ[VX] * a.mdV[VX] - rot.mQ[VY] * a.mdV[VY] - rot.mQ[VZ] * a.mdV[VZ]; - F64 rx = rot.mQ[VW] * a.mdV[VX] + rot.mQ[VY] * a.mdV[VZ] - rot.mQ[VZ] * a.mdV[VY]; - F64 ry = rot.mQ[VW] * a.mdV[VY] + rot.mQ[VZ] * a.mdV[VX] - rot.mQ[VX] * a.mdV[VZ]; - F64 rz = rot.mQ[VW] * a.mdV[VZ] + rot.mQ[VX] * a.mdV[VY] - rot.mQ[VY] * a.mdV[VX]; - - F64 nx = - rw * rot.mQ[VX] + rx * rot.mQ[VW] - ry * rot.mQ[VZ] + rz * rot.mQ[VY]; - F64 ny = - rw * rot.mQ[VY] + ry * rot.mQ[VW] - rz * rot.mQ[VX] + rx * rot.mQ[VZ]; - F64 nz = - rw * rot.mQ[VZ] + rz * rot.mQ[VW] - rx * rot.mQ[VY] + ry * rot.mQ[VX]; - - return LLVector3d(nx, ny, nz); -} - -F32 dot(const LLQuaternion &a, const LLQuaternion &b) -{ - return a.mQ[VX] * b.mQ[VX] + - a.mQ[VY] * b.mQ[VY] + - a.mQ[VZ] * b.mQ[VZ] + - a.mQ[VW] * b.mQ[VW]; -} - -// DEMO HACK: This lerp is probably inocrrect now due intermediate normalization -// it should look more like the lerp below -#if 0 -// linear interpolation -LLQuaternion lerp(F32 t, const LLQuaternion &p, const LLQuaternion &q) -{ - LLQuaternion r; - r = t * (q - p) + p; - r.normalize(); - return r; -} -#endif - -// lerp from identity to q -LLQuaternion lerp(F32 t, const LLQuaternion &q) -{ - LLQuaternion r; - r.mQ[VX] = t * q.mQ[VX]; - r.mQ[VY] = t * q.mQ[VY]; - r.mQ[VZ] = t * q.mQ[VZ]; - r.mQ[VW] = t * (q.mQ[VZ] - 1.f) + 1.f; - r.normalize(); - return r; -} - -LLQuaternion lerp(F32 t, const LLQuaternion &p, const LLQuaternion &q) -{ - LLQuaternion r; - F32 inv_t; - - inv_t = 1.f - t; - - r.mQ[VX] = t * q.mQ[VX] + (inv_t * p.mQ[VX]); - r.mQ[VY] = t * q.mQ[VY] + (inv_t * p.mQ[VY]); - r.mQ[VZ] = t * q.mQ[VZ] + (inv_t * p.mQ[VZ]); - r.mQ[VW] = t * q.mQ[VW] + (inv_t * p.mQ[VW]); - r.normalize(); - return r; -} - - -// spherical linear interpolation -LLQuaternion slerp( F32 u, const LLQuaternion &a, const LLQuaternion &b ) -{ - // cosine theta = dot product of a and b - F32 cos_t = a.mQ[0]*b.mQ[0] + a.mQ[1]*b.mQ[1] + a.mQ[2]*b.mQ[2] + a.mQ[3]*b.mQ[3]; - - // if b is on opposite hemisphere from a, use -a instead - int bflip; - if (cos_t < 0.0f) - { - cos_t = -cos_t; - bflip = TRUE; - } - else - bflip = FALSE; - - // if B is (within precision limits) the same as A, - // just linear interpolate between A and B. - F32 alpha; // interpolant - F32 beta; // 1 - interpolant - if (1.0f - cos_t < 0.00001f) - { - beta = 1.0f - u; - alpha = u; - } - else - { - F32 theta = acosf(cos_t); - F32 sin_t = sinf(theta); - beta = sinf(theta - u*theta) / sin_t; - alpha = sinf(u*theta) / sin_t; - } - - if (bflip) - beta = -beta; - - // interpolate - LLQuaternion ret; - ret.mQ[0] = beta*a.mQ[0] + alpha*b.mQ[0]; - ret.mQ[1] = beta*a.mQ[1] + alpha*b.mQ[1]; - ret.mQ[2] = beta*a.mQ[2] + alpha*b.mQ[2]; - ret.mQ[3] = beta*a.mQ[3] + alpha*b.mQ[3]; - - return ret; -} - -// lerp whenever possible -LLQuaternion nlerp(F32 t, const LLQuaternion &a, const LLQuaternion &b) -{ - if (dot(a, b) < 0.f) - { - return slerp(t, a, b); - } - else - { - return lerp(t, a, b); - } -} - -LLQuaternion nlerp(F32 t, const LLQuaternion &q) -{ - if (q.mQ[VW] < 0.f) - { - return slerp(t, q); - } - else - { - return lerp(t, q); - } -} - -// slerp from identity quaternion to another quaternion -LLQuaternion slerp(F32 t, const LLQuaternion &q) -{ - F32 c = q.mQ[VW]; - if (1.0f == t || 1.0f == c) - { - // the trivial cases - return q; - } - - LLQuaternion r; - F32 s, angle, stq, stp; - - s = (F32) sqrt(1.f - c*c); - - if (c < 0.0f) - { - // when c < 0.0 then theta > PI/2 - // since quat and -quat are the same rotation we invert one of - // p or q to reduce unecessary spins - // A equivalent way to do it is to convert acos(c) as if it had - // been negative, and to negate stp - angle = (F32) acos(-c); - stp = -(F32) sin(angle * (1.f - t)); - stq = (F32) sin(angle * t); - } - else - { - angle = (F32) acos(c); - stp = (F32) sin(angle * (1.f - t)); - stq = (F32) sin(angle * t); - } - - r.mQ[VX] = (q.mQ[VX] * stq) / s; - r.mQ[VY] = (q.mQ[VY] * stq) / s; - r.mQ[VZ] = (q.mQ[VZ] * stq) / s; - r.mQ[VW] = (stp + q.mQ[VW] * stq) / s; - - return r; -} - -LLQuaternion mayaQ(F32 xRot, F32 yRot, F32 zRot, LLQuaternion::Order order) -{ - LLQuaternion xQ( xRot*DEG_TO_RAD, LLVector3(1.0f, 0.0f, 0.0f) ); - LLQuaternion yQ( yRot*DEG_TO_RAD, LLVector3(0.0f, 1.0f, 0.0f) ); - LLQuaternion zQ( zRot*DEG_TO_RAD, LLVector3(0.0f, 0.0f, 1.0f) ); - LLQuaternion ret; - switch( order ) - { - case LLQuaternion::XYZ: - ret = xQ * yQ * zQ; - break; - case LLQuaternion::YZX: - ret = yQ * zQ * xQ; - break; - case LLQuaternion::ZXY: - ret = zQ * xQ * yQ; - break; - case LLQuaternion::XZY: - ret = xQ * zQ * yQ; - break; - case LLQuaternion::YXZ: - ret = yQ * xQ * zQ; - break; - case LLQuaternion::ZYX: - ret = zQ * yQ * xQ; - break; - } - return ret; -} - -const char *OrderToString( const LLQuaternion::Order order ) -{ - const char *p = NULL; - switch( order ) - { - default: - case LLQuaternion::XYZ: - p = "XYZ"; - break; - case LLQuaternion::YZX: - p = "YZX"; - break; - case LLQuaternion::ZXY: - p = "ZXY"; - break; - case LLQuaternion::XZY: - p = "XZY"; - break; - case LLQuaternion::YXZ: - p = "YXZ"; - break; - case LLQuaternion::ZYX: - p = "ZYX"; - break; - } - return p; -} - -LLQuaternion::Order StringToOrder( const char *str ) -{ - if (strncmp(str, "XYZ", 3)==0 || strncmp(str, "xyz", 3)==0) - return LLQuaternion::XYZ; - - if (strncmp(str, "YZX", 3)==0 || strncmp(str, "yzx", 3)==0) - return LLQuaternion::YZX; - - if (strncmp(str, "ZXY", 3)==0 || strncmp(str, "zxy", 3)==0) - return LLQuaternion::ZXY; - - if (strncmp(str, "XZY", 3)==0 || strncmp(str, "xzy", 3)==0) - return LLQuaternion::XZY; - - if (strncmp(str, "YXZ", 3)==0 || strncmp(str, "yxz", 3)==0) - return LLQuaternion::YXZ; - - if (strncmp(str, "ZYX", 3)==0 || strncmp(str, "zyx", 3)==0) - return LLQuaternion::ZYX; - - return LLQuaternion::XYZ; -} - -void LLQuaternion::getAngleAxis(F32* angle, LLVector3 &vec) const -{ - F32 cos_a = mQ[VW]; - if (cos_a > 1.0f) cos_a = 1.0f; - if (cos_a < -1.0f) cos_a = -1.0f; - - F32 sin_a = (F32) sqrt( 1.0f - cos_a * cos_a ); - - if ( fabs( sin_a ) < 0.0005f ) - sin_a = 1.0f; - else - sin_a = 1.f/sin_a; - - F32 temp_angle = 2.0f * (F32) acos( cos_a ); - if (temp_angle > F_PI) - { - // The (angle,axis) pair should never have angles outside [PI, -PI] - // since we want the _shortest_ (angle,axis) solution. - // Since acos is defined for [0, PI], and we multiply by 2.0, we - // can push the angle outside the acceptible range. - // When this happens we set the angle to the other portion of a - // full 2PI rotation, and negate the axis, which reverses the - // direction of the rotation (by the right-hand rule). - *angle = 2.f * F_PI - temp_angle; - vec.mV[VX] = - mQ[VX] * sin_a; - vec.mV[VY] = - mQ[VY] * sin_a; - vec.mV[VZ] = - mQ[VZ] * sin_a; - } - else - { - *angle = temp_angle; - vec.mV[VX] = mQ[VX] * sin_a; - vec.mV[VY] = mQ[VY] * sin_a; - vec.mV[VZ] = mQ[VZ] * sin_a; - } -} - - -// quaternion does not need to be normalized -void LLQuaternion::getEulerAngles(F32 *roll, F32 *pitch, F32 *yaw) const -{ - LLMatrix3 rot_mat(*this); - rot_mat.orthogonalize(); - rot_mat.getEulerAngles(roll, pitch, yaw); - -// // NOTE: LLQuaternion's are actually inverted with respect to -// // the matrices, so this code also assumes inverted quaternions -// // (-x, -y, -z, w). The result is that roll,pitch,yaw are applied -// // in reverse order (yaw,pitch,roll). -// F32 x = -mQ[VX], y = -mQ[VY], z = -mQ[VZ], w = mQ[VW]; -// F64 m20 = 2.0*(x*z-y*w); -// if (1.0f - fabsf(m20) < F_APPROXIMATELY_ZERO) -// { -// *roll = 0.0f; -// *pitch = (F32)asin(m20); -// *yaw = (F32)atan2(2.0*(x*y-z*w), 1.0 - 2.0*(x*x+z*z)); -// } -// else -// { -// *roll = (F32)atan2(-2.0*(y*z+x*w), 1.0-2.0*(x*x+y*y)); -// *pitch = (F32)asin(m20); -// *yaw = (F32)atan2(-2.0*(x*y+z*w), 1.0-2.0*(y*y+z*z)); -// } -} - -// Saves space by using the fact that our quaternions are normalized -LLVector3 LLQuaternion::packToVector3() const -{ - if( mQ[VW] >= 0 ) - { - return LLVector3( mQ[VX], mQ[VY], mQ[VZ] ); - } - else - { - return LLVector3( -mQ[VX], -mQ[VY], -mQ[VZ] ); - } -} - -// Saves space by using the fact that our quaternions are normalized -void LLQuaternion::unpackFromVector3( const LLVector3& vec ) -{ - mQ[VX] = vec.mV[VX]; - mQ[VY] = vec.mV[VY]; - mQ[VZ] = vec.mV[VZ]; - F32 t = 1.f - vec.magVecSquared(); - if( t > 0 ) - { - mQ[VW] = sqrt( t ); - } - else - { - // Need this to avoid trying to find the square root of a negative number due - // to floating point error. - mQ[VW] = 0; - } -} - -BOOL LLQuaternion::parseQuat(const std::string& buf, LLQuaternion* value) -{ - if( buf.empty() || value == NULL) - { - return FALSE; - } - - LLQuaternion quat; - S32 count = sscanf( buf.c_str(), "%f %f %f %f", quat.mQ + 0, quat.mQ + 1, quat.mQ + 2, quat.mQ + 3 ); - if( 4 == count ) - { - value->set( quat ); - return TRUE; - } - - return FALSE; -} - - -// End +/** + * @file llquaternion.cpp + * @brief LLQuaternion class implementation. + * + * $LicenseInfo:firstyear=2000&license=viewergpl$ + * + * Copyright (c) 2000-2009, Linden Research, Inc. + * + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. + * + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. + * $/LicenseInfo$ + */ + +#include "linden_common.h" + +#include "llmath.h" // for F_PI + +#include "llquaternion.h" + +//#include "vmath.h" +#include "v3math.h" +#include "v3dmath.h" +#include "v4math.h" +#include "m4math.h" +#include "m3math.h" +#include "llquantize.h" + +// WARNING: Don't use this for global const definitions! using this +// at the top of a *.cpp file might not give you what you think. +const LLQuaternion LLQuaternion::DEFAULT; + +// Constructors + +LLQuaternion::LLQuaternion(const LLMatrix4 &mat) +{ + *this = mat.quaternion(); + normalize(); +} + +LLQuaternion::LLQuaternion(const LLMatrix3 &mat) +{ + *this = mat.quaternion(); + normalize(); +} + +LLQuaternion::LLQuaternion(F32 angle, const LLVector4 &vec) +{ + LLVector3 v(vec.mV[VX], vec.mV[VY], vec.mV[VZ]); + v.normalize(); + + F32 c, s; + c = cosf(angle*0.5f); + s = sinf(angle*0.5f); + + mQ[VX] = v.mV[VX] * s; + mQ[VY] = v.mV[VY] * s; + mQ[VZ] = v.mV[VZ] * s; + mQ[VW] = c; + normalize(); +} + +LLQuaternion::LLQuaternion(F32 angle, const LLVector3 &vec) +{ + LLVector3 v(vec); + v.normalize(); + + F32 c, s; + c = cosf(angle*0.5f); + s = sinf(angle*0.5f); + + mQ[VX] = v.mV[VX] * s; + mQ[VY] = v.mV[VY] * s; + mQ[VZ] = v.mV[VZ] * s; + mQ[VW] = c; + normalize(); +} + +LLQuaternion::LLQuaternion(const LLVector3 &x_axis, + const LLVector3 &y_axis, + const LLVector3 &z_axis) +{ + LLMatrix3 mat; + mat.setRows(x_axis, y_axis, z_axis); + *this = mat.quaternion(); + normalize(); +} + +// Quatizations +void LLQuaternion::quantize16(F32 lower, F32 upper) +{ + F32 x = mQ[VX]; + F32 y = mQ[VY]; + F32 z = mQ[VZ]; + F32 s = mQ[VS]; + + x = U16_to_F32(F32_to_U16_ROUND(x, lower, upper), lower, upper); + y = U16_to_F32(F32_to_U16_ROUND(y, lower, upper), lower, upper); + z = U16_to_F32(F32_to_U16_ROUND(z, lower, upper), lower, upper); + s = U16_to_F32(F32_to_U16_ROUND(s, lower, upper), lower, upper); + + mQ[VX] = x; + mQ[VY] = y; + mQ[VZ] = z; + mQ[VS] = s; + + normalize(); +} + +void LLQuaternion::quantize8(F32 lower, F32 upper) +{ + mQ[VX] = U8_to_F32(F32_to_U8_ROUND(mQ[VX], lower, upper), lower, upper); + mQ[VY] = U8_to_F32(F32_to_U8_ROUND(mQ[VY], lower, upper), lower, upper); + mQ[VZ] = U8_to_F32(F32_to_U8_ROUND(mQ[VZ], lower, upper), lower, upper); + mQ[VS] = U8_to_F32(F32_to_U8_ROUND(mQ[VS], lower, upper), lower, upper); + + normalize(); +} + +// LLVector3 Magnitude and Normalization Functions + + +// Set LLQuaternion routines + +const LLQuaternion& LLQuaternion::setAngleAxis(F32 angle, F32 x, F32 y, F32 z) +{ + LLVector3 vec(x, y, z); + vec.normalize(); + + angle *= 0.5f; + F32 c, s; + c = cosf(angle); + s = sinf(angle); + + mQ[VX] = vec.mV[VX]*s; + mQ[VY] = vec.mV[VY]*s; + mQ[VZ] = vec.mV[VZ]*s; + mQ[VW] = c; + + normalize(); + return (*this); +} + +const LLQuaternion& LLQuaternion::setAngleAxis(F32 angle, const LLVector3 &vec) +{ + LLVector3 v(vec); + v.normalize(); + + angle *= 0.5f; + F32 c, s; + c = cosf(angle); + s = sinf(angle); + + mQ[VX] = v.mV[VX]*s; + mQ[VY] = v.mV[VY]*s; + mQ[VZ] = v.mV[VZ]*s; + mQ[VW] = c; + + normalize(); + return (*this); +} + +const LLQuaternion& LLQuaternion::setAngleAxis(F32 angle, const LLVector4 &vec) +{ + LLVector3 v(vec.mV[VX], vec.mV[VY], vec.mV[VZ]); + v.normalize(); + + F32 c, s; + c = cosf(angle*0.5f); + s = sinf(angle*0.5f); + + mQ[VX] = v.mV[VX]*s; + mQ[VY] = v.mV[VY]*s; + mQ[VZ] = v.mV[VZ]*s; + mQ[VW] = c; + + normalize(); + return (*this); +} + +const LLQuaternion& LLQuaternion::setEulerAngles(F32 roll, F32 pitch, F32 yaw) +{ + LLMatrix3 rot_mat(roll, pitch, yaw); + rot_mat.orthogonalize(); + *this = rot_mat.quaternion(); + + normalize(); + return (*this); +} + +// deprecated +const LLQuaternion& LLQuaternion::set(const LLMatrix3 &mat) +{ + *this = mat.quaternion(); + normalize(); + return (*this); +} + +// deprecated +const LLQuaternion& LLQuaternion::set(const LLMatrix4 &mat) +{ + *this = mat.quaternion(); + normalize(); + return (*this); +} + +// deprecated +const LLQuaternion& LLQuaternion::setQuat(F32 angle, F32 x, F32 y, F32 z) +{ + LLVector3 vec(x, y, z); + vec.normalize(); + + angle *= 0.5f; + F32 c, s; + c = cosf(angle); + s = sinf(angle); + + mQ[VX] = vec.mV[VX]*s; + mQ[VY] = vec.mV[VY]*s; + mQ[VZ] = vec.mV[VZ]*s; + mQ[VW] = c; + + normalize(); + return (*this); +} + +// deprecated +const LLQuaternion& LLQuaternion::setQuat(F32 angle, const LLVector3 &vec) +{ + LLVector3 v(vec); + v.normalize(); + + angle *= 0.5f; + F32 c, s; + c = cosf(angle); + s = sinf(angle); + + mQ[VX] = v.mV[VX]*s; + mQ[VY] = v.mV[VY]*s; + mQ[VZ] = v.mV[VZ]*s; + mQ[VW] = c; + + normalize(); + return (*this); +} + +const LLQuaternion& LLQuaternion::setQuat(F32 angle, const LLVector4 &vec) +{ + LLVector3 v(vec.mV[VX], vec.mV[VY], vec.mV[VZ]); + v.normalize(); + + F32 c, s; + c = cosf(angle*0.5f); + s = sinf(angle*0.5f); + + mQ[VX] = v.mV[VX]*s; + mQ[VY] = v.mV[VY]*s; + mQ[VZ] = v.mV[VZ]*s; + mQ[VW] = c; + + normalize(); + return (*this); +} + +const LLQuaternion& LLQuaternion::setQuat(F32 roll, F32 pitch, F32 yaw) +{ + LLMatrix3 rot_mat(roll, pitch, yaw); + rot_mat.orthogonalize(); + *this = rot_mat.quaternion(); + + normalize(); + return (*this); +} + +const LLQuaternion& LLQuaternion::setQuat(const LLMatrix3 &mat) +{ + *this = mat.quaternion(); + normalize(); + return (*this); +} + +const LLQuaternion& LLQuaternion::setQuat(const LLMatrix4 &mat) +{ + *this = mat.quaternion(); + normalize(); + return (*this); +//#if 1 +// // NOTE: LLQuaternion's are actually inverted with respect to +// // the matrices, so this code also assumes inverted quaternions +// // (-x, -y, -z, w). The result is that roll,pitch,yaw are applied +// // in reverse order (yaw,pitch,roll). +// F64 cosX = cos(roll); +// F64 cosY = cos(pitch); +// F64 cosZ = cos(yaw); +// +// F64 sinX = sin(roll); +// F64 sinY = sin(pitch); +// F64 sinZ = sin(yaw); +// +// mQ[VW] = (F32)sqrt(cosY*cosZ - sinX*sinY*sinZ + cosX*cosZ + cosX*cosY + 1.0)*.5; +// if (fabs(mQ[VW]) < F_APPROXIMATELY_ZERO) +// { +// // null rotation, any axis will do +// mQ[VX] = 0.0f; +// mQ[VY] = 1.0f; +// mQ[VZ] = 0.0f; +// } +// else +// { +// F32 inv_s = 1.0f / (4.0f * mQ[VW]); +// mQ[VX] = (F32)-(-sinX*cosY - cosX*sinY*sinZ - sinX*cosZ) * inv_s; +// mQ[VY] = (F32)-(-cosX*sinY*cosZ + sinX*sinZ - sinY) * inv_s; +// mQ[VZ] = (F32)-(-cosY*sinZ - sinX*sinY*cosZ - cosX*sinZ) * inv_s; +// } +// +//#else // This only works on a certain subset of roll/pitch/yaw +// +// F64 cosX = cosf(roll/2.0); +// F64 cosY = cosf(pitch/2.0); +// F64 cosZ = cosf(yaw/2.0); +// +// F64 sinX = sinf(roll/2.0); +// F64 sinY = sinf(pitch/2.0); +// F64 sinZ = sinf(yaw/2.0); +// +// mQ[VW] = (F32)(cosX*cosY*cosZ + sinX*sinY*sinZ); +// mQ[VX] = (F32)(sinX*cosY*cosZ - cosX*sinY*sinZ); +// mQ[VY] = (F32)(cosX*sinY*cosZ + sinX*cosY*sinZ); +// mQ[VZ] = (F32)(cosX*cosY*sinZ - sinX*sinY*cosZ); +//#endif +// +// normalize(); +// return (*this); +} + +// SJB: This code is correct for a logicly stored (non-transposed) matrix; +// Our matrices are stored transposed, OpenGL style, so this generates the +// INVERSE matrix, or the CORRECT matrix form an INVERSE quaternion. +// Because we use similar logic in LLMatrix3::quaternion(), +// we are internally consistant so everything works OK :) +LLMatrix3 LLQuaternion::getMatrix3(void) const +{ + LLMatrix3 mat; + F32 xx, xy, xz, xw, yy, yz, yw, zz, zw; + + xx = mQ[VX] * mQ[VX]; + xy = mQ[VX] * mQ[VY]; + xz = mQ[VX] * mQ[VZ]; + xw = mQ[VX] * mQ[VW]; + + yy = mQ[VY] * mQ[VY]; + yz = mQ[VY] * mQ[VZ]; + yw = mQ[VY] * mQ[VW]; + + zz = mQ[VZ] * mQ[VZ]; + zw = mQ[VZ] * mQ[VW]; + + mat.mMatrix[0][0] = 1.f - 2.f * ( yy + zz ); + mat.mMatrix[0][1] = 2.f * ( xy + zw ); + mat.mMatrix[0][2] = 2.f * ( xz - yw ); + + mat.mMatrix[1][0] = 2.f * ( xy - zw ); + mat.mMatrix[1][1] = 1.f - 2.f * ( xx + zz ); + mat.mMatrix[1][2] = 2.f * ( yz + xw ); + + mat.mMatrix[2][0] = 2.f * ( xz + yw ); + mat.mMatrix[2][1] = 2.f * ( yz - xw ); + mat.mMatrix[2][2] = 1.f - 2.f * ( xx + yy ); + + return mat; +} + +LLMatrix4 LLQuaternion::getMatrix4(void) const +{ + LLMatrix4 mat; + F32 xx, xy, xz, xw, yy, yz, yw, zz, zw; + + xx = mQ[VX] * mQ[VX]; + xy = mQ[VX] * mQ[VY]; + xz = mQ[VX] * mQ[VZ]; + xw = mQ[VX] * mQ[VW]; + + yy = mQ[VY] * mQ[VY]; + yz = mQ[VY] * mQ[VZ]; + yw = mQ[VY] * mQ[VW]; + + zz = mQ[VZ] * mQ[VZ]; + zw = mQ[VZ] * mQ[VW]; + + mat.mMatrix[0][0] = 1.f - 2.f * ( yy + zz ); + mat.mMatrix[0][1] = 2.f * ( xy + zw ); + mat.mMatrix[0][2] = 2.f * ( xz - yw ); + + mat.mMatrix[1][0] = 2.f * ( xy - zw ); + mat.mMatrix[1][1] = 1.f - 2.f * ( xx + zz ); + mat.mMatrix[1][2] = 2.f * ( yz + xw ); + + mat.mMatrix[2][0] = 2.f * ( xz + yw ); + mat.mMatrix[2][1] = 2.f * ( yz - xw ); + mat.mMatrix[2][2] = 1.f - 2.f * ( xx + yy ); + + // TODO -- should we set the translation portion to zero? + + return mat; +} + + + + +// Other useful methods + + +// calculate the shortest rotation from a to b +void LLQuaternion::shortestArc(const LLVector3 &a, const LLVector3 &b) +{ + // Make a local copy of both vectors. + LLVector3 vec_a = a; + LLVector3 vec_b = b; + + // Make sure neither vector is zero length. Also normalize + // the vectors while we are at it. + F32 vec_a_mag = vec_a.normalize(); + F32 vec_b_mag = vec_b.normalize(); + if (vec_a_mag < F_APPROXIMATELY_ZERO || + vec_b_mag < F_APPROXIMATELY_ZERO) + { + // Can't calculate a rotation from this. + // Just return ZERO_ROTATION instead. + loadIdentity(); + return; + } + + // Create an axis to rotate around, and the cos of the angle to rotate. + LLVector3 axis = vec_a % vec_b; + F32 cos_theta = vec_a * vec_b; + + // Check the angle between the vectors to see if they are parallel or anti-parallel. + if (cos_theta > 1.0 - F_APPROXIMATELY_ZERO) + { + // a and b are parallel. No rotation is necessary. + loadIdentity(); + } + else if (cos_theta < -1.0 + F_APPROXIMATELY_ZERO) + { + // a and b are anti-parallel. + // Rotate 180 degrees around some orthogonal axis. + // Find the projection of the x-axis onto a, and try + // using the vector between the projection and the x-axis + // as the orthogonal axis. + LLVector3 proj = vec_a.mV[VX] / (vec_a * vec_a) * vec_a; + LLVector3 ortho_axis(1.f, 0.f, 0.f); + ortho_axis -= proj; + + // Turn this into an orthonormal axis. + F32 ortho_length = ortho_axis.normalize(); + // If the axis' length is 0, then our guess at an orthogonal axis + // was wrong (a is parallel to the x-axis). + if (ortho_length < F_APPROXIMATELY_ZERO) + { + // Use the z-axis instead. + ortho_axis.setVec(0.f, 0.f, 1.f); + } + + // Construct a quaternion from this orthonormal axis. + mQ[VX] = ortho_axis.mV[VX]; + mQ[VY] = ortho_axis.mV[VY]; + mQ[VZ] = ortho_axis.mV[VZ]; + mQ[VW] = 0.f; + } + else + { + // a and b are NOT parallel or anti-parallel. + // Return the rotation between these vectors. + F32 theta = (F32)acos(cos_theta); + + setAngleAxis(theta, axis); + } +} + +// constrains rotation to a cone angle specified in radians +const LLQuaternion &LLQuaternion::constrain(F32 radians) +{ + const F32 cos_angle_lim = cosf( radians/2 ); // mQ[VW] limit + const F32 sin_angle_lim = sinf( radians/2 ); // rotation axis length limit + + if (mQ[VW] < 0.f) + { + mQ[VX] *= -1.f; + mQ[VY] *= -1.f; + mQ[VZ] *= -1.f; + mQ[VW] *= -1.f; + } + + // if rotation angle is greater than limit (cos is less than limit) + if( mQ[VW] < cos_angle_lim ) + { + mQ[VW] = cos_angle_lim; + F32 axis_len = sqrtf( mQ[VX]*mQ[VX] + mQ[VY]*mQ[VY] + mQ[VZ]*mQ[VZ] ); // sin(theta/2) + F32 axis_mult_fact = sin_angle_lim / axis_len; + mQ[VX] *= axis_mult_fact; + mQ[VY] *= axis_mult_fact; + mQ[VZ] *= axis_mult_fact; + } + + return *this; +} + +// Operators + +std::ostream& operator<<(std::ostream &s, const LLQuaternion &a) +{ + s << "{ " + << a.mQ[VX] << ", " << a.mQ[VY] << ", " << a.mQ[VZ] << ", " << a.mQ[VW] + << " }"; + return s; +} + + +// Does NOT renormalize the result +LLQuaternion operator*(const LLQuaternion &a, const LLQuaternion &b) +{ +// LLQuaternion::mMultCount++; + + LLQuaternion q( + b.mQ[3] * a.mQ[0] + b.mQ[0] * a.mQ[3] + b.mQ[1] * a.mQ[2] - b.mQ[2] * a.mQ[1], + b.mQ[3] * a.mQ[1] + b.mQ[1] * a.mQ[3] + b.mQ[2] * a.mQ[0] - b.mQ[0] * a.mQ[2], + b.mQ[3] * a.mQ[2] + b.mQ[2] * a.mQ[3] + b.mQ[0] * a.mQ[1] - b.mQ[1] * a.mQ[0], + b.mQ[3] * a.mQ[3] - b.mQ[0] * a.mQ[0] - b.mQ[1] * a.mQ[1] - b.mQ[2] * a.mQ[2] + ); + return q; +} + +/* +LLMatrix4 operator*(const LLMatrix4 &m, const LLQuaternion &q) +{ + LLMatrix4 qmat(q); + return (m*qmat); +} +*/ + + + +LLVector4 operator*(const LLVector4 &a, const LLQuaternion &rot) +{ + F32 rw = - rot.mQ[VX] * a.mV[VX] - rot.mQ[VY] * a.mV[VY] - rot.mQ[VZ] * a.mV[VZ]; + F32 rx = rot.mQ[VW] * a.mV[VX] + rot.mQ[VY] * a.mV[VZ] - rot.mQ[VZ] * a.mV[VY]; + F32 ry = rot.mQ[VW] * a.mV[VY] + rot.mQ[VZ] * a.mV[VX] - rot.mQ[VX] * a.mV[VZ]; + F32 rz = rot.mQ[VW] * a.mV[VZ] + rot.mQ[VX] * a.mV[VY] - rot.mQ[VY] * a.mV[VX]; + + F32 nx = - rw * rot.mQ[VX] + rx * rot.mQ[VW] - ry * rot.mQ[VZ] + rz * rot.mQ[VY]; + F32 ny = - rw * rot.mQ[VY] + ry * rot.mQ[VW] - rz * rot.mQ[VX] + rx * rot.mQ[VZ]; + F32 nz = - rw * rot.mQ[VZ] + rz * rot.mQ[VW] - rx * rot.mQ[VY] + ry * rot.mQ[VX]; + + return LLVector4(nx, ny, nz, a.mV[VW]); +} + +LLVector3 operator*(const LLVector3 &a, const LLQuaternion &rot) +{ + F32 rw = - rot.mQ[VX] * a.mV[VX] - rot.mQ[VY] * a.mV[VY] - rot.mQ[VZ] * a.mV[VZ]; + F32 rx = rot.mQ[VW] * a.mV[VX] + rot.mQ[VY] * a.mV[VZ] - rot.mQ[VZ] * a.mV[VY]; + F32 ry = rot.mQ[VW] * a.mV[VY] + rot.mQ[VZ] * a.mV[VX] - rot.mQ[VX] * a.mV[VZ]; + F32 rz = rot.mQ[VW] * a.mV[VZ] + rot.mQ[VX] * a.mV[VY] - rot.mQ[VY] * a.mV[VX]; + + F32 nx = - rw * rot.mQ[VX] + rx * rot.mQ[VW] - ry * rot.mQ[VZ] + rz * rot.mQ[VY]; + F32 ny = - rw * rot.mQ[VY] + ry * rot.mQ[VW] - rz * rot.mQ[VX] + rx * rot.mQ[VZ]; + F32 nz = - rw * rot.mQ[VZ] + rz * rot.mQ[VW] - rx * rot.mQ[VY] + ry * rot.mQ[VX]; + + return LLVector3(nx, ny, nz); +} + +LLVector3d operator*(const LLVector3d &a, const LLQuaternion &rot) +{ + F64 rw = - rot.mQ[VX] * a.mdV[VX] - rot.mQ[VY] * a.mdV[VY] - rot.mQ[VZ] * a.mdV[VZ]; + F64 rx = rot.mQ[VW] * a.mdV[VX] + rot.mQ[VY] * a.mdV[VZ] - rot.mQ[VZ] * a.mdV[VY]; + F64 ry = rot.mQ[VW] * a.mdV[VY] + rot.mQ[VZ] * a.mdV[VX] - rot.mQ[VX] * a.mdV[VZ]; + F64 rz = rot.mQ[VW] * a.mdV[VZ] + rot.mQ[VX] * a.mdV[VY] - rot.mQ[VY] * a.mdV[VX]; + + F64 nx = - rw * rot.mQ[VX] + rx * rot.mQ[VW] - ry * rot.mQ[VZ] + rz * rot.mQ[VY]; + F64 ny = - rw * rot.mQ[VY] + ry * rot.mQ[VW] - rz * rot.mQ[VX] + rx * rot.mQ[VZ]; + F64 nz = - rw * rot.mQ[VZ] + rz * rot.mQ[VW] - rx * rot.mQ[VY] + ry * rot.mQ[VX]; + + return LLVector3d(nx, ny, nz); +} + +F32 dot(const LLQuaternion &a, const LLQuaternion &b) +{ + return a.mQ[VX] * b.mQ[VX] + + a.mQ[VY] * b.mQ[VY] + + a.mQ[VZ] * b.mQ[VZ] + + a.mQ[VW] * b.mQ[VW]; +} + +// DEMO HACK: This lerp is probably inocrrect now due intermediate normalization +// it should look more like the lerp below +#if 0 +// linear interpolation +LLQuaternion lerp(F32 t, const LLQuaternion &p, const LLQuaternion &q) +{ + LLQuaternion r; + r = t * (q - p) + p; + r.normalize(); + return r; +} +#endif + +// lerp from identity to q +LLQuaternion lerp(F32 t, const LLQuaternion &q) +{ + LLQuaternion r; + r.mQ[VX] = t * q.mQ[VX]; + r.mQ[VY] = t * q.mQ[VY]; + r.mQ[VZ] = t * q.mQ[VZ]; + r.mQ[VW] = t * (q.mQ[VZ] - 1.f) + 1.f; + r.normalize(); + return r; +} + +LLQuaternion lerp(F32 t, const LLQuaternion &p, const LLQuaternion &q) +{ + LLQuaternion r; + F32 inv_t; + + inv_t = 1.f - t; + + r.mQ[VX] = t * q.mQ[VX] + (inv_t * p.mQ[VX]); + r.mQ[VY] = t * q.mQ[VY] + (inv_t * p.mQ[VY]); + r.mQ[VZ] = t * q.mQ[VZ] + (inv_t * p.mQ[VZ]); + r.mQ[VW] = t * q.mQ[VW] + (inv_t * p.mQ[VW]); + r.normalize(); + return r; +} + + +// spherical linear interpolation +LLQuaternion slerp( F32 u, const LLQuaternion &a, const LLQuaternion &b ) +{ + // cosine theta = dot product of a and b + F32 cos_t = a.mQ[0]*b.mQ[0] + a.mQ[1]*b.mQ[1] + a.mQ[2]*b.mQ[2] + a.mQ[3]*b.mQ[3]; + + // if b is on opposite hemisphere from a, use -a instead + int bflip; + if (cos_t < 0.0f) + { + cos_t = -cos_t; + bflip = TRUE; + } + else + bflip = FALSE; + + // if B is (within precision limits) the same as A, + // just linear interpolate between A and B. + F32 alpha; // interpolant + F32 beta; // 1 - interpolant + if (1.0f - cos_t < 0.00001f) + { + beta = 1.0f - u; + alpha = u; + } + else + { + F32 theta = acosf(cos_t); + F32 sin_t = sinf(theta); + beta = sinf(theta - u*theta) / sin_t; + alpha = sinf(u*theta) / sin_t; + } + + if (bflip) + beta = -beta; + + // interpolate + LLQuaternion ret; + ret.mQ[0] = beta*a.mQ[0] + alpha*b.mQ[0]; + ret.mQ[1] = beta*a.mQ[1] + alpha*b.mQ[1]; + ret.mQ[2] = beta*a.mQ[2] + alpha*b.mQ[2]; + ret.mQ[3] = beta*a.mQ[3] + alpha*b.mQ[3]; + + return ret; +} + +// lerp whenever possible +LLQuaternion nlerp(F32 t, const LLQuaternion &a, const LLQuaternion &b) +{ + if (dot(a, b) < 0.f) + { + return slerp(t, a, b); + } + else + { + return lerp(t, a, b); + } +} + +LLQuaternion nlerp(F32 t, const LLQuaternion &q) +{ + if (q.mQ[VW] < 0.f) + { + return slerp(t, q); + } + else + { + return lerp(t, q); + } +} + +// slerp from identity quaternion to another quaternion +LLQuaternion slerp(F32 t, const LLQuaternion &q) +{ + F32 c = q.mQ[VW]; + if (1.0f == t || 1.0f == c) + { + // the trivial cases + return q; + } + + LLQuaternion r; + F32 s, angle, stq, stp; + + s = (F32) sqrt(1.f - c*c); + + if (c < 0.0f) + { + // when c < 0.0 then theta > PI/2 + // since quat and -quat are the same rotation we invert one of + // p or q to reduce unecessary spins + // A equivalent way to do it is to convert acos(c) as if it had + // been negative, and to negate stp + angle = (F32) acos(-c); + stp = -(F32) sin(angle * (1.f - t)); + stq = (F32) sin(angle * t); + } + else + { + angle = (F32) acos(c); + stp = (F32) sin(angle * (1.f - t)); + stq = (F32) sin(angle * t); + } + + r.mQ[VX] = (q.mQ[VX] * stq) / s; + r.mQ[VY] = (q.mQ[VY] * stq) / s; + r.mQ[VZ] = (q.mQ[VZ] * stq) / s; + r.mQ[VW] = (stp + q.mQ[VW] * stq) / s; + + return r; +} + +LLQuaternion mayaQ(F32 xRot, F32 yRot, F32 zRot, LLQuaternion::Order order) +{ + LLQuaternion xQ( xRot*DEG_TO_RAD, LLVector3(1.0f, 0.0f, 0.0f) ); + LLQuaternion yQ( yRot*DEG_TO_RAD, LLVector3(0.0f, 1.0f, 0.0f) ); + LLQuaternion zQ( zRot*DEG_TO_RAD, LLVector3(0.0f, 0.0f, 1.0f) ); + LLQuaternion ret; + switch( order ) + { + case LLQuaternion::XYZ: + ret = xQ * yQ * zQ; + break; + case LLQuaternion::YZX: + ret = yQ * zQ * xQ; + break; + case LLQuaternion::ZXY: + ret = zQ * xQ * yQ; + break; + case LLQuaternion::XZY: + ret = xQ * zQ * yQ; + break; + case LLQuaternion::YXZ: + ret = yQ * xQ * zQ; + break; + case LLQuaternion::ZYX: + ret = zQ * yQ * xQ; + break; + } + return ret; +} + +const char *OrderToString( const LLQuaternion::Order order ) +{ + const char *p = NULL; + switch( order ) + { + default: + case LLQuaternion::XYZ: + p = "XYZ"; + break; + case LLQuaternion::YZX: + p = "YZX"; + break; + case LLQuaternion::ZXY: + p = "ZXY"; + break; + case LLQuaternion::XZY: + p = "XZY"; + break; + case LLQuaternion::YXZ: + p = "YXZ"; + break; + case LLQuaternion::ZYX: + p = "ZYX"; + break; + } + return p; +} + +LLQuaternion::Order StringToOrder( const char *str ) +{ + if (strncmp(str, "XYZ", 3)==0 || strncmp(str, "xyz", 3)==0) + return LLQuaternion::XYZ; + + if (strncmp(str, "YZX", 3)==0 || strncmp(str, "yzx", 3)==0) + return LLQuaternion::YZX; + + if (strncmp(str, "ZXY", 3)==0 || strncmp(str, "zxy", 3)==0) + return LLQuaternion::ZXY; + + if (strncmp(str, "XZY", 3)==0 || strncmp(str, "xzy", 3)==0) + return LLQuaternion::XZY; + + if (strncmp(str, "YXZ", 3)==0 || strncmp(str, "yxz", 3)==0) + return LLQuaternion::YXZ; + + if (strncmp(str, "ZYX", 3)==0 || strncmp(str, "zyx", 3)==0) + return LLQuaternion::ZYX; + + return LLQuaternion::XYZ; +} + +void LLQuaternion::getAngleAxis(F32* angle, LLVector3 &vec) const +{ + F32 cos_a = mQ[VW]; + if (cos_a > 1.0f) cos_a = 1.0f; + if (cos_a < -1.0f) cos_a = -1.0f; + + F32 sin_a = (F32) sqrt( 1.0f - cos_a * cos_a ); + + if ( fabs( sin_a ) < 0.0005f ) + sin_a = 1.0f; + else + sin_a = 1.f/sin_a; + + F32 temp_angle = 2.0f * (F32) acos( cos_a ); + if (temp_angle > F_PI) + { + // The (angle,axis) pair should never have angles outside [PI, -PI] + // since we want the _shortest_ (angle,axis) solution. + // Since acos is defined for [0, PI], and we multiply by 2.0, we + // can push the angle outside the acceptible range. + // When this happens we set the angle to the other portion of a + // full 2PI rotation, and negate the axis, which reverses the + // direction of the rotation (by the right-hand rule). + *angle = 2.f * F_PI - temp_angle; + vec.mV[VX] = - mQ[VX] * sin_a; + vec.mV[VY] = - mQ[VY] * sin_a; + vec.mV[VZ] = - mQ[VZ] * sin_a; + } + else + { + *angle = temp_angle; + vec.mV[VX] = mQ[VX] * sin_a; + vec.mV[VY] = mQ[VY] * sin_a; + vec.mV[VZ] = mQ[VZ] * sin_a; + } +} + + +// quaternion does not need to be normalized +void LLQuaternion::getEulerAngles(F32 *roll, F32 *pitch, F32 *yaw) const +{ + LLMatrix3 rot_mat(*this); + rot_mat.orthogonalize(); + rot_mat.getEulerAngles(roll, pitch, yaw); + +// // NOTE: LLQuaternion's are actually inverted with respect to +// // the matrices, so this code also assumes inverted quaternions +// // (-x, -y, -z, w). The result is that roll,pitch,yaw are applied +// // in reverse order (yaw,pitch,roll). +// F32 x = -mQ[VX], y = -mQ[VY], z = -mQ[VZ], w = mQ[VW]; +// F64 m20 = 2.0*(x*z-y*w); +// if (1.0f - fabsf(m20) < F_APPROXIMATELY_ZERO) +// { +// *roll = 0.0f; +// *pitch = (F32)asin(m20); +// *yaw = (F32)atan2(2.0*(x*y-z*w), 1.0 - 2.0*(x*x+z*z)); +// } +// else +// { +// *roll = (F32)atan2(-2.0*(y*z+x*w), 1.0-2.0*(x*x+y*y)); +// *pitch = (F32)asin(m20); +// *yaw = (F32)atan2(-2.0*(x*y+z*w), 1.0-2.0*(y*y+z*z)); +// } +} + +// Saves space by using the fact that our quaternions are normalized +LLVector3 LLQuaternion::packToVector3() const +{ + if( mQ[VW] >= 0 ) + { + return LLVector3( mQ[VX], mQ[VY], mQ[VZ] ); + } + else + { + return LLVector3( -mQ[VX], -mQ[VY], -mQ[VZ] ); + } +} + +// Saves space by using the fact that our quaternions are normalized +void LLQuaternion::unpackFromVector3( const LLVector3& vec ) +{ + mQ[VX] = vec.mV[VX]; + mQ[VY] = vec.mV[VY]; + mQ[VZ] = vec.mV[VZ]; + F32 t = 1.f - vec.magVecSquared(); + if( t > 0 ) + { + mQ[VW] = sqrt( t ); + } + else + { + // Need this to avoid trying to find the square root of a negative number due + // to floating point error. + mQ[VW] = 0; + } +} + +BOOL LLQuaternion::parseQuat(const std::string& buf, LLQuaternion* value) +{ + if( buf.empty() || value == NULL) + { + return FALSE; + } + + LLQuaternion quat; + S32 count = sscanf( buf.c_str(), "%f %f %f %f", quat.mQ + 0, quat.mQ + 1, quat.mQ + 2, quat.mQ + 3 ); + if( 4 == count ) + { + value->set( quat ); + return TRUE; + } + + return FALSE; +} + + +// End diff --git a/indra/llmath/llquaternion.h b/indra/llmath/llquaternion.h index bbd4326483..a7bb09fae3 100644 --- a/indra/llmath/llquaternion.h +++ b/indra/llmath/llquaternion.h @@ -1,594 +1,594 @@ -/** - * @file llquaternion.h - * @brief LLQuaternion class header file. - * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * - * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 - * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception - * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. - * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. - * $/LicenseInfo$ - */ - -#ifndef LLQUATERNION_H -#define LLQUATERNION_H - -#include - -#ifndef LLMATH_H //enforce specific include order to avoid tangling inline dependencies -#error "Please include llmath.h first." -#endif - -class LLVector4; -class LLVector3; -class LLVector3d; -class LLMatrix4; -class LLMatrix3; - -// NOTA BENE: Quaternion code is written assuming Unit Quaternions!!!! -// Moreover, it is written assuming that all vectors and matricies -// passed as arguments are normalized and unitary respectively. -// VERY VERY VERY VERY BAD THINGS will happen if these assumptions fail. - -static const U32 LENGTHOFQUAT = 4; - -class LLQuaternion -{ -public: - F32 mQ[LENGTHOFQUAT]; - - static const LLQuaternion DEFAULT; - - LLQuaternion(); // Initializes Quaternion to (0,0,0,1) - explicit LLQuaternion(const LLMatrix4 &mat); // Initializes Quaternion from Matrix4 - explicit LLQuaternion(const LLMatrix3 &mat); // Initializes Quaternion from Matrix3 - LLQuaternion(F32 x, F32 y, F32 z, F32 w); // Initializes Quaternion to normalize(x, y, z, w) - LLQuaternion(F32 angle, const LLVector4 &vec); // Initializes Quaternion to axis_angle2quat(angle, vec) - LLQuaternion(F32 angle, const LLVector3 &vec); // Initializes Quaternion to axis_angle2quat(angle, vec) - LLQuaternion(const F32 *q); // Initializes Quaternion to normalize(x, y, z, w) - LLQuaternion(const LLVector3 &x_axis, - const LLVector3 &y_axis, - const LLVector3 &z_axis); // Initializes Quaternion from Matrix3 = [x_axis ; y_axis ; z_axis] - - BOOL isIdentity() const; - BOOL isNotIdentity() const; - BOOL isFinite() const; // checks to see if all values of LLQuaternion are finite - void quantize16(F32 lower, F32 upper); // changes the vector to reflect quatization - void quantize8(F32 lower, F32 upper); // changes the vector to reflect quatization - void loadIdentity(); // Loads the quaternion that represents the identity rotation - - const LLQuaternion& set(F32 x, F32 y, F32 z, F32 w); // Sets Quaternion to normalize(x, y, z, w) - const LLQuaternion& set(const LLQuaternion &quat); // Copies Quaternion - const LLQuaternion& set(const F32 *q); // Sets Quaternion to normalize(quat[VX], quat[VY], quat[VZ], quat[VW]) - const LLQuaternion& set(const LLMatrix3 &mat); // Sets Quaternion to mat2quat(mat) - const LLQuaternion& set(const LLMatrix4 &mat); // Sets Quaternion to mat2quat(mat) - - const LLQuaternion& setAngleAxis(F32 angle, F32 x, F32 y, F32 z); // Sets Quaternion to axis_angle2quat(angle, x, y, z) - const LLQuaternion& setAngleAxis(F32 angle, const LLVector3 &vec); // Sets Quaternion to axis_angle2quat(angle, vec) - const LLQuaternion& setAngleAxis(F32 angle, const LLVector4 &vec); // Sets Quaternion to axis_angle2quat(angle, vec) - const LLQuaternion& setEulerAngles(F32 roll, F32 pitch, F32 yaw); // Sets Quaternion to euler2quat(pitch, yaw, roll) - - const LLQuaternion& setQuatInit(F32 x, F32 y, F32 z, F32 w); // deprecated - const LLQuaternion& setQuat(const LLQuaternion &quat); // deprecated - const LLQuaternion& setQuat(const F32 *q); // deprecated - const LLQuaternion& setQuat(const LLMatrix3 &mat); // deprecated - const LLQuaternion& setQuat(const LLMatrix4 &mat); // deprecated - const LLQuaternion& setQuat(F32 angle, F32 x, F32 y, F32 z); // deprecated - const LLQuaternion& setQuat(F32 angle, const LLVector3 &vec); // deprecated - const LLQuaternion& setQuat(F32 angle, const LLVector4 &vec); // deprecated - const LLQuaternion& setQuat(F32 roll, F32 pitch, F32 yaw); // deprecated - - LLMatrix4 getMatrix4(void) const; // Returns the Matrix4 equivalent of Quaternion - LLMatrix3 getMatrix3(void) const; // Returns the Matrix3 equivalent of Quaternion - void getAngleAxis(F32* angle, F32* x, F32* y, F32* z) const; // returns rotation in radians about axis x,y,z - void getAngleAxis(F32* angle, LLVector3 &vec) const; - void getEulerAngles(F32 *roll, F32* pitch, F32 *yaw) const; - - F32 normalize(); // Normalizes Quaternion and returns magnitude - F32 normQuat(); // deprecated - - const LLQuaternion& conjugate(void); // Conjugates Quaternion and returns result - const LLQuaternion& conjQuat(void); // deprecated - - // Other useful methods - const LLQuaternion& transpose(); // transpose (same as conjugate) - const LLQuaternion& transQuat(); // deprecated - - void shortestArc(const LLVector3 &a, const LLVector3 &b); // shortest rotation from a to b - const LLQuaternion& constrain(F32 radians); // constrains rotation to a cone angle specified in radians - - // Standard operators - friend std::ostream& operator<<(std::ostream &s, const LLQuaternion &a); // Prints a - friend LLQuaternion operator+(const LLQuaternion &a, const LLQuaternion &b); // Addition - friend LLQuaternion operator-(const LLQuaternion &a, const LLQuaternion &b); // Subtraction - friend LLQuaternion operator-(const LLQuaternion &a); // Negation - friend LLQuaternion operator*(F32 a, const LLQuaternion &q); // Scale - friend LLQuaternion operator*(const LLQuaternion &q, F32 b); // Scale - friend LLQuaternion operator*(const LLQuaternion &a, const LLQuaternion &b); // Returns a * b - friend LLQuaternion operator~(const LLQuaternion &a); // Returns a* (Conjugate of a) - bool operator==(const LLQuaternion &b) const; // Returns a == b - bool operator!=(const LLQuaternion &b) const; // Returns a != b - - friend const LLQuaternion& operator*=(LLQuaternion &a, const LLQuaternion &b); // Returns a * b - - friend LLVector4 operator*(const LLVector4 &a, const LLQuaternion &rot); // Rotates a by rot - friend LLVector3 operator*(const LLVector3 &a, const LLQuaternion &rot); // Rotates a by rot - friend LLVector3d operator*(const LLVector3d &a, const LLQuaternion &rot); // Rotates a by rot - - // Non-standard operators - friend F32 dot(const LLQuaternion &a, const LLQuaternion &b); - friend LLQuaternion lerp(F32 t, const LLQuaternion &p, const LLQuaternion &q); // linear interpolation (t = 0 to 1) from p to q - friend LLQuaternion lerp(F32 t, const LLQuaternion &q); // linear interpolation (t = 0 to 1) from identity to q - friend LLQuaternion slerp(F32 t, const LLQuaternion &p, const LLQuaternion &q); // spherical linear interpolation from p to q - friend LLQuaternion slerp(F32 t, const LLQuaternion &q); // spherical linear interpolation from identity to q - friend LLQuaternion nlerp(F32 t, const LLQuaternion &p, const LLQuaternion &q); // normalized linear interpolation from p to q - friend LLQuaternion nlerp(F32 t, const LLQuaternion &q); // normalized linear interpolation from p to q - - LLVector3 packToVector3() const; // Saves space by using the fact that our quaternions are normalized - void unpackFromVector3(const LLVector3& vec); // Saves space by using the fact that our quaternions are normalized - - enum Order { - XYZ = 0, - YZX = 1, - ZXY = 2, - XZY = 3, - YXZ = 4, - ZYX = 5 - }; - // Creates a quaternions from maya's rotation representation, - // which is 3 rotations (in DEGREES) in the specified order - friend LLQuaternion mayaQ(F32 x, F32 y, F32 z, Order order); - - // Conversions between Order and strings like "xyz" or "ZYX" - friend const char *OrderToString( const Order order ); - friend Order StringToOrder( const char *str ); - - static BOOL parseQuat(const std::string& buf, LLQuaternion* value); - - // For debugging, only - //static U32 mMultCount; -}; - -// checker -inline BOOL LLQuaternion::isFinite() const -{ - return (llfinite(mQ[VX]) && llfinite(mQ[VY]) && llfinite(mQ[VZ]) && llfinite(mQ[VS])); -} - -inline BOOL LLQuaternion::isIdentity() const -{ - return - ( mQ[VX] == 0.f ) && - ( mQ[VY] == 0.f ) && - ( mQ[VZ] == 0.f ) && - ( mQ[VS] == 1.f ); -} - -inline BOOL LLQuaternion::isNotIdentity() const -{ - return - ( mQ[VX] != 0.f ) || - ( mQ[VY] != 0.f ) || - ( mQ[VZ] != 0.f ) || - ( mQ[VS] != 1.f ); -} - - - -inline LLQuaternion::LLQuaternion(void) -{ - mQ[VX] = 0.f; - mQ[VY] = 0.f; - mQ[VZ] = 0.f; - mQ[VS] = 1.f; -} - -inline LLQuaternion::LLQuaternion(F32 x, F32 y, F32 z, F32 w) -{ - mQ[VX] = x; - mQ[VY] = y; - mQ[VZ] = z; - mQ[VS] = w; - - //RN: don't normalize this case as its used mainly for temporaries during calculations - //normalize(); - /* - F32 mag = sqrtf(mQ[VX]*mQ[VX] + mQ[VY]*mQ[VY] + mQ[VZ]*mQ[VZ] + mQ[VS]*mQ[VS]); - mag -= 1.f; - mag = fabs(mag); - llassert(mag < 10.f*FP_MAG_THRESHOLD); - */ -} - -inline LLQuaternion::LLQuaternion(const F32 *q) -{ - mQ[VX] = q[VX]; - mQ[VY] = q[VY]; - mQ[VZ] = q[VZ]; - mQ[VS] = q[VW]; - - normalize(); - /* - F32 mag = sqrtf(mQ[VX]*mQ[VX] + mQ[VY]*mQ[VY] + mQ[VZ]*mQ[VZ] + mQ[VS]*mQ[VS]); - mag -= 1.f; - mag = fabs(mag); - llassert(mag < FP_MAG_THRESHOLD); - */ -} - - -inline void LLQuaternion::loadIdentity() -{ - mQ[VX] = 0.0f; - mQ[VY] = 0.0f; - mQ[VZ] = 0.0f; - mQ[VW] = 1.0f; -} - - -inline const LLQuaternion& LLQuaternion::set(F32 x, F32 y, F32 z, F32 w) -{ - mQ[VX] = x; - mQ[VY] = y; - mQ[VZ] = z; - mQ[VS] = w; - normalize(); - return (*this); -} - -inline const LLQuaternion& LLQuaternion::set(const LLQuaternion &quat) -{ - mQ[VX] = quat.mQ[VX]; - mQ[VY] = quat.mQ[VY]; - mQ[VZ] = quat.mQ[VZ]; - mQ[VW] = quat.mQ[VW]; - normalize(); - return (*this); -} - -inline const LLQuaternion& LLQuaternion::set(const F32 *q) -{ - mQ[VX] = q[VX]; - mQ[VY] = q[VY]; - mQ[VZ] = q[VZ]; - mQ[VS] = q[VW]; - normalize(); - return (*this); -} - - -// deprecated -inline const LLQuaternion& LLQuaternion::setQuatInit(F32 x, F32 y, F32 z, F32 w) -{ - mQ[VX] = x; - mQ[VY] = y; - mQ[VZ] = z; - mQ[VS] = w; - normalize(); - return (*this); -} - -// deprecated -inline const LLQuaternion& LLQuaternion::setQuat(const LLQuaternion &quat) -{ - mQ[VX] = quat.mQ[VX]; - mQ[VY] = quat.mQ[VY]; - mQ[VZ] = quat.mQ[VZ]; - mQ[VW] = quat.mQ[VW]; - normalize(); - return (*this); -} - -// deprecated -inline const LLQuaternion& LLQuaternion::setQuat(const F32 *q) -{ - mQ[VX] = q[VX]; - mQ[VY] = q[VY]; - mQ[VZ] = q[VZ]; - mQ[VS] = q[VW]; - normalize(); - return (*this); -} - -// There may be a cheaper way that avoids the sqrt. -// Does sin_a = VX*VX + VY*VY + VZ*VZ? -// Copied from Matrix and Quaternion FAQ 1.12 -inline void LLQuaternion::getAngleAxis(F32* angle, F32* x, F32* y, F32* z) const -{ - F32 cos_a = mQ[VW]; - if (cos_a > 1.0f) cos_a = 1.0f; - if (cos_a < -1.0f) cos_a = -1.0f; - - F32 sin_a = (F32) sqrt( 1.0f - cos_a * cos_a ); - - if ( fabs( sin_a ) < 0.0005f ) - sin_a = 1.0f; - else - sin_a = 1.f/sin_a; - - F32 temp_angle = 2.0f * (F32) acos( cos_a ); - if (temp_angle > F_PI) - { - // The (angle,axis) pair should never have angles outside [PI, -PI] - // since we want the _shortest_ (angle,axis) solution. - // Since acos is defined for [0, PI], and we multiply by 2.0, we - // can push the angle outside the acceptible range. - // When this happens we set the angle to the other portion of a - // full 2PI rotation, and negate the axis, which reverses the - // direction of the rotation (by the right-hand rule). - *angle = 2.f * F_PI - temp_angle; - *x = - mQ[VX] * sin_a; - *y = - mQ[VY] * sin_a; - *z = - mQ[VZ] * sin_a; - } - else - { - *angle = temp_angle; - *x = mQ[VX] * sin_a; - *y = mQ[VY] * sin_a; - *z = mQ[VZ] * sin_a; - } -} - -inline const LLQuaternion& LLQuaternion::conjugate() -{ - mQ[VX] *= -1.f; - mQ[VY] *= -1.f; - mQ[VZ] *= -1.f; - return (*this); -} - -inline const LLQuaternion& LLQuaternion::conjQuat() -{ - mQ[VX] *= -1.f; - mQ[VY] *= -1.f; - mQ[VZ] *= -1.f; - return (*this); -} - -// Transpose -inline const LLQuaternion& LLQuaternion::transpose() -{ - mQ[VX] *= -1.f; - mQ[VY] *= -1.f; - mQ[VZ] *= -1.f; - return (*this); -} - -// deprecated -inline const LLQuaternion& LLQuaternion::transQuat() -{ - mQ[VX] *= -1.f; - mQ[VY] *= -1.f; - mQ[VZ] *= -1.f; - return (*this); -} - - -inline LLQuaternion operator+(const LLQuaternion &a, const LLQuaternion &b) -{ - return LLQuaternion( - a.mQ[VX] + b.mQ[VX], - a.mQ[VY] + b.mQ[VY], - a.mQ[VZ] + b.mQ[VZ], - a.mQ[VW] + b.mQ[VW] ); -} - - -inline LLQuaternion operator-(const LLQuaternion &a, const LLQuaternion &b) -{ - return LLQuaternion( - a.mQ[VX] - b.mQ[VX], - a.mQ[VY] - b.mQ[VY], - a.mQ[VZ] - b.mQ[VZ], - a.mQ[VW] - b.mQ[VW] ); -} - - -inline LLQuaternion operator-(const LLQuaternion &a) -{ - return LLQuaternion( - -a.mQ[VX], - -a.mQ[VY], - -a.mQ[VZ], - -a.mQ[VW] ); -} - - -inline LLQuaternion operator*(F32 a, const LLQuaternion &q) -{ - return LLQuaternion( - a * q.mQ[VX], - a * q.mQ[VY], - a * q.mQ[VZ], - a * q.mQ[VW] ); -} - - -inline LLQuaternion operator*(const LLQuaternion &q, F32 a) -{ - return LLQuaternion( - a * q.mQ[VX], - a * q.mQ[VY], - a * q.mQ[VZ], - a * q.mQ[VW] ); -} - -inline LLQuaternion operator~(const LLQuaternion &a) -{ - LLQuaternion q(a); - q.conjQuat(); - return q; -} - -inline bool LLQuaternion::operator==(const LLQuaternion &b) const -{ - return ( (mQ[VX] == b.mQ[VX]) - &&(mQ[VY] == b.mQ[VY]) - &&(mQ[VZ] == b.mQ[VZ]) - &&(mQ[VS] == b.mQ[VS])); -} - -inline bool LLQuaternion::operator!=(const LLQuaternion &b) const -{ - return ( (mQ[VX] != b.mQ[VX]) - ||(mQ[VY] != b.mQ[VY]) - ||(mQ[VZ] != b.mQ[VZ]) - ||(mQ[VS] != b.mQ[VS])); -} - -inline const LLQuaternion& operator*=(LLQuaternion &a, const LLQuaternion &b) -{ -#if 1 - LLQuaternion q( - b.mQ[3] * a.mQ[0] + b.mQ[0] * a.mQ[3] + b.mQ[1] * a.mQ[2] - b.mQ[2] * a.mQ[1], - b.mQ[3] * a.mQ[1] + b.mQ[1] * a.mQ[3] + b.mQ[2] * a.mQ[0] - b.mQ[0] * a.mQ[2], - b.mQ[3] * a.mQ[2] + b.mQ[2] * a.mQ[3] + b.mQ[0] * a.mQ[1] - b.mQ[1] * a.mQ[0], - b.mQ[3] * a.mQ[3] - b.mQ[0] * a.mQ[0] - b.mQ[1] * a.mQ[1] - b.mQ[2] * a.mQ[2] - ); - a = q; -#else - a = a * b; -#endif - return a; -} - -const F32 ONE_PART_IN_A_MILLION = 0.000001f; - -inline F32 LLQuaternion::normalize() -{ - F32 mag = sqrtf(mQ[VX]*mQ[VX] + mQ[VY]*mQ[VY] + mQ[VZ]*mQ[VZ] + mQ[VS]*mQ[VS]); - - if (mag > FP_MAG_THRESHOLD) - { - // Floating point error can prevent some quaternions from achieving - // exact unity length. When trying to renormalize such quaternions we - // can oscillate between multiple quantized states. To prevent such - // drifts we only renomalize if the length is far enough from unity. - if (fabs(1.f - mag) > ONE_PART_IN_A_MILLION) - { - F32 oomag = 1.f/mag; - mQ[VX] *= oomag; - mQ[VY] *= oomag; - mQ[VZ] *= oomag; - mQ[VS] *= oomag; - } - } - else - { - // we were given a very bad quaternion so we set it to identity - mQ[VX] = 0.f; - mQ[VY] = 0.f; - mQ[VZ] = 0.f; - mQ[VS] = 1.f; - } - - return mag; -} - -// deprecated -inline F32 LLQuaternion::normQuat() -{ - F32 mag = sqrtf(mQ[VX]*mQ[VX] + mQ[VY]*mQ[VY] + mQ[VZ]*mQ[VZ] + mQ[VS]*mQ[VS]); - - if (mag > FP_MAG_THRESHOLD) - { - if (fabs(1.f - mag) > ONE_PART_IN_A_MILLION) - { - // only renormalize if length not close enough to 1.0 already - F32 oomag = 1.f/mag; - mQ[VX] *= oomag; - mQ[VY] *= oomag; - mQ[VZ] *= oomag; - mQ[VS] *= oomag; - } - } - else - { - mQ[VX] = 0.f; - mQ[VY] = 0.f; - mQ[VZ] = 0.f; - mQ[VS] = 1.f; - } - - return mag; -} - -LLQuaternion::Order StringToOrder( const char *str ); - -// Some notes about Quaternions - -// What is a Quaternion? -// --------------------- -// A quaternion is a point in 4-dimensional complex space. -// Q = { Qx, Qy, Qz, Qw } -// -// -// Why Quaternions? -// ---------------- -// The set of quaternions that make up the the 4-D unit sphere -// can be mapped to the set of all rotations in 3-D space. Sometimes -// it is easier to describe/manipulate rotations in quaternion space -// than rotation-matrix space. -// -// -// How Quaternions? -// ---------------- -// In order to take advantage of quaternions we need to know how to -// go from rotation-matricies to quaternions and back. We also have -// to agree what variety of rotations we're generating. -// -// Consider the equation... v' = v * R -// -// There are two ways to think about rotations of vectors. -// 1) v' is the same vector in a different reference frame -// 2) v' is a new vector in the same reference frame -// -// bookmark -- which way are we using? -// -// -// Quaternion from Angle-Axis: -// --------------------------- -// Suppose we wanted to represent a rotation of some angle (theta) -// about some axis ({Ax, Ay, Az})... -// -// axis of rotation = {Ax, Ay, Az} -// angle_of_rotation = theta -// -// s = sin(0.5 * theta) -// c = cos(0.5 * theta) -// Q = { s * Ax, s * Ay, s * Az, c } -// -// -// 3x3 Matrix from Quaternion -// -------------------------- -// -// | | -// | 1 - 2 * (y^2 + z^2) 2 * (x * y + z * w) 2 * (y * w - x * z) | -// | | -// M = | 2 * (x * y - z * w) 1 - 2 * (x^2 + z^2) 2 * (y * z + x * w) | -// | | -// | 2 * (x * z + y * w) 2 * (y * z - x * w) 1 - 2 * (x^2 + y^2) | -// | | - -#endif +/** + * @file llquaternion.h + * @brief LLQuaternion class header file. + * + * $LicenseInfo:firstyear=2000&license=viewergpl$ + * + * Copyright (c) 2000-2009, Linden Research, Inc. + * + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. + * + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. + * $/LicenseInfo$ + */ + +#ifndef LLQUATERNION_H +#define LLQUATERNION_H + +#include + +#ifndef LLMATH_H //enforce specific include order to avoid tangling inline dependencies +#error "Please include llmath.h first." +#endif + +class LLVector4; +class LLVector3; +class LLVector3d; +class LLMatrix4; +class LLMatrix3; + +// NOTA BENE: Quaternion code is written assuming Unit Quaternions!!!! +// Moreover, it is written assuming that all vectors and matricies +// passed as arguments are normalized and unitary respectively. +// VERY VERY VERY VERY BAD THINGS will happen if these assumptions fail. + +static const U32 LENGTHOFQUAT = 4; + +class LLQuaternion +{ +public: + F32 mQ[LENGTHOFQUAT]; + + static const LLQuaternion DEFAULT; + + LLQuaternion(); // Initializes Quaternion to (0,0,0,1) + explicit LLQuaternion(const LLMatrix4 &mat); // Initializes Quaternion from Matrix4 + explicit LLQuaternion(const LLMatrix3 &mat); // Initializes Quaternion from Matrix3 + LLQuaternion(F32 x, F32 y, F32 z, F32 w); // Initializes Quaternion to normalize(x, y, z, w) + LLQuaternion(F32 angle, const LLVector4 &vec); // Initializes Quaternion to axis_angle2quat(angle, vec) + LLQuaternion(F32 angle, const LLVector3 &vec); // Initializes Quaternion to axis_angle2quat(angle, vec) + LLQuaternion(const F32 *q); // Initializes Quaternion to normalize(x, y, z, w) + LLQuaternion(const LLVector3 &x_axis, + const LLVector3 &y_axis, + const LLVector3 &z_axis); // Initializes Quaternion from Matrix3 = [x_axis ; y_axis ; z_axis] + + BOOL isIdentity() const; + BOOL isNotIdentity() const; + BOOL isFinite() const; // checks to see if all values of LLQuaternion are finite + void quantize16(F32 lower, F32 upper); // changes the vector to reflect quatization + void quantize8(F32 lower, F32 upper); // changes the vector to reflect quatization + void loadIdentity(); // Loads the quaternion that represents the identity rotation + + const LLQuaternion& set(F32 x, F32 y, F32 z, F32 w); // Sets Quaternion to normalize(x, y, z, w) + const LLQuaternion& set(const LLQuaternion &quat); // Copies Quaternion + const LLQuaternion& set(const F32 *q); // Sets Quaternion to normalize(quat[VX], quat[VY], quat[VZ], quat[VW]) + const LLQuaternion& set(const LLMatrix3 &mat); // Sets Quaternion to mat2quat(mat) + const LLQuaternion& set(const LLMatrix4 &mat); // Sets Quaternion to mat2quat(mat) + + const LLQuaternion& setAngleAxis(F32 angle, F32 x, F32 y, F32 z); // Sets Quaternion to axis_angle2quat(angle, x, y, z) + const LLQuaternion& setAngleAxis(F32 angle, const LLVector3 &vec); // Sets Quaternion to axis_angle2quat(angle, vec) + const LLQuaternion& setAngleAxis(F32 angle, const LLVector4 &vec); // Sets Quaternion to axis_angle2quat(angle, vec) + const LLQuaternion& setEulerAngles(F32 roll, F32 pitch, F32 yaw); // Sets Quaternion to euler2quat(pitch, yaw, roll) + + const LLQuaternion& setQuatInit(F32 x, F32 y, F32 z, F32 w); // deprecated + const LLQuaternion& setQuat(const LLQuaternion &quat); // deprecated + const LLQuaternion& setQuat(const F32 *q); // deprecated + const LLQuaternion& setQuat(const LLMatrix3 &mat); // deprecated + const LLQuaternion& setQuat(const LLMatrix4 &mat); // deprecated + const LLQuaternion& setQuat(F32 angle, F32 x, F32 y, F32 z); // deprecated + const LLQuaternion& setQuat(F32 angle, const LLVector3 &vec); // deprecated + const LLQuaternion& setQuat(F32 angle, const LLVector4 &vec); // deprecated + const LLQuaternion& setQuat(F32 roll, F32 pitch, F32 yaw); // deprecated + + LLMatrix4 getMatrix4(void) const; // Returns the Matrix4 equivalent of Quaternion + LLMatrix3 getMatrix3(void) const; // Returns the Matrix3 equivalent of Quaternion + void getAngleAxis(F32* angle, F32* x, F32* y, F32* z) const; // returns rotation in radians about axis x,y,z + void getAngleAxis(F32* angle, LLVector3 &vec) const; + void getEulerAngles(F32 *roll, F32* pitch, F32 *yaw) const; + + F32 normalize(); // Normalizes Quaternion and returns magnitude + F32 normQuat(); // deprecated + + const LLQuaternion& conjugate(void); // Conjugates Quaternion and returns result + const LLQuaternion& conjQuat(void); // deprecated + + // Other useful methods + const LLQuaternion& transpose(); // transpose (same as conjugate) + const LLQuaternion& transQuat(); // deprecated + + void shortestArc(const LLVector3 &a, const LLVector3 &b); // shortest rotation from a to b + const LLQuaternion& constrain(F32 radians); // constrains rotation to a cone angle specified in radians + + // Standard operators + friend std::ostream& operator<<(std::ostream &s, const LLQuaternion &a); // Prints a + friend LLQuaternion operator+(const LLQuaternion &a, const LLQuaternion &b); // Addition + friend LLQuaternion operator-(const LLQuaternion &a, const LLQuaternion &b); // Subtraction + friend LLQuaternion operator-(const LLQuaternion &a); // Negation + friend LLQuaternion operator*(F32 a, const LLQuaternion &q); // Scale + friend LLQuaternion operator*(const LLQuaternion &q, F32 b); // Scale + friend LLQuaternion operator*(const LLQuaternion &a, const LLQuaternion &b); // Returns a * b + friend LLQuaternion operator~(const LLQuaternion &a); // Returns a* (Conjugate of a) + bool operator==(const LLQuaternion &b) const; // Returns a == b + bool operator!=(const LLQuaternion &b) const; // Returns a != b + + friend const LLQuaternion& operator*=(LLQuaternion &a, const LLQuaternion &b); // Returns a * b + + friend LLVector4 operator*(const LLVector4 &a, const LLQuaternion &rot); // Rotates a by rot + friend LLVector3 operator*(const LLVector3 &a, const LLQuaternion &rot); // Rotates a by rot + friend LLVector3d operator*(const LLVector3d &a, const LLQuaternion &rot); // Rotates a by rot + + // Non-standard operators + friend F32 dot(const LLQuaternion &a, const LLQuaternion &b); + friend LLQuaternion lerp(F32 t, const LLQuaternion &p, const LLQuaternion &q); // linear interpolation (t = 0 to 1) from p to q + friend LLQuaternion lerp(F32 t, const LLQuaternion &q); // linear interpolation (t = 0 to 1) from identity to q + friend LLQuaternion slerp(F32 t, const LLQuaternion &p, const LLQuaternion &q); // spherical linear interpolation from p to q + friend LLQuaternion slerp(F32 t, const LLQuaternion &q); // spherical linear interpolation from identity to q + friend LLQuaternion nlerp(F32 t, const LLQuaternion &p, const LLQuaternion &q); // normalized linear interpolation from p to q + friend LLQuaternion nlerp(F32 t, const LLQuaternion &q); // normalized linear interpolation from p to q + + LLVector3 packToVector3() const; // Saves space by using the fact that our quaternions are normalized + void unpackFromVector3(const LLVector3& vec); // Saves space by using the fact that our quaternions are normalized + + enum Order { + XYZ = 0, + YZX = 1, + ZXY = 2, + XZY = 3, + YXZ = 4, + ZYX = 5 + }; + // Creates a quaternions from maya's rotation representation, + // which is 3 rotations (in DEGREES) in the specified order + friend LLQuaternion mayaQ(F32 x, F32 y, F32 z, Order order); + + // Conversions between Order and strings like "xyz" or "ZYX" + friend const char *OrderToString( const Order order ); + friend Order StringToOrder( const char *str ); + + static BOOL parseQuat(const std::string& buf, LLQuaternion* value); + + // For debugging, only + //static U32 mMultCount; +}; + +// checker +inline BOOL LLQuaternion::isFinite() const +{ + return (llfinite(mQ[VX]) && llfinite(mQ[VY]) && llfinite(mQ[VZ]) && llfinite(mQ[VS])); +} + +inline BOOL LLQuaternion::isIdentity() const +{ + return + ( mQ[VX] == 0.f ) && + ( mQ[VY] == 0.f ) && + ( mQ[VZ] == 0.f ) && + ( mQ[VS] == 1.f ); +} + +inline BOOL LLQuaternion::isNotIdentity() const +{ + return + ( mQ[VX] != 0.f ) || + ( mQ[VY] != 0.f ) || + ( mQ[VZ] != 0.f ) || + ( mQ[VS] != 1.f ); +} + + + +inline LLQuaternion::LLQuaternion(void) +{ + mQ[VX] = 0.f; + mQ[VY] = 0.f; + mQ[VZ] = 0.f; + mQ[VS] = 1.f; +} + +inline LLQuaternion::LLQuaternion(F32 x, F32 y, F32 z, F32 w) +{ + mQ[VX] = x; + mQ[VY] = y; + mQ[VZ] = z; + mQ[VS] = w; + + //RN: don't normalize this case as its used mainly for temporaries during calculations + //normalize(); + /* + F32 mag = sqrtf(mQ[VX]*mQ[VX] + mQ[VY]*mQ[VY] + mQ[VZ]*mQ[VZ] + mQ[VS]*mQ[VS]); + mag -= 1.f; + mag = fabs(mag); + llassert(mag < 10.f*FP_MAG_THRESHOLD); + */ +} + +inline LLQuaternion::LLQuaternion(const F32 *q) +{ + mQ[VX] = q[VX]; + mQ[VY] = q[VY]; + mQ[VZ] = q[VZ]; + mQ[VS] = q[VW]; + + normalize(); + /* + F32 mag = sqrtf(mQ[VX]*mQ[VX] + mQ[VY]*mQ[VY] + mQ[VZ]*mQ[VZ] + mQ[VS]*mQ[VS]); + mag -= 1.f; + mag = fabs(mag); + llassert(mag < FP_MAG_THRESHOLD); + */ +} + + +inline void LLQuaternion::loadIdentity() +{ + mQ[VX] = 0.0f; + mQ[VY] = 0.0f; + mQ[VZ] = 0.0f; + mQ[VW] = 1.0f; +} + + +inline const LLQuaternion& LLQuaternion::set(F32 x, F32 y, F32 z, F32 w) +{ + mQ[VX] = x; + mQ[VY] = y; + mQ[VZ] = z; + mQ[VS] = w; + normalize(); + return (*this); +} + +inline const LLQuaternion& LLQuaternion::set(const LLQuaternion &quat) +{ + mQ[VX] = quat.mQ[VX]; + mQ[VY] = quat.mQ[VY]; + mQ[VZ] = quat.mQ[VZ]; + mQ[VW] = quat.mQ[VW]; + normalize(); + return (*this); +} + +inline const LLQuaternion& LLQuaternion::set(const F32 *q) +{ + mQ[VX] = q[VX]; + mQ[VY] = q[VY]; + mQ[VZ] = q[VZ]; + mQ[VS] = q[VW]; + normalize(); + return (*this); +} + + +// deprecated +inline const LLQuaternion& LLQuaternion::setQuatInit(F32 x, F32 y, F32 z, F32 w) +{ + mQ[VX] = x; + mQ[VY] = y; + mQ[VZ] = z; + mQ[VS] = w; + normalize(); + return (*this); +} + +// deprecated +inline const LLQuaternion& LLQuaternion::setQuat(const LLQuaternion &quat) +{ + mQ[VX] = quat.mQ[VX]; + mQ[VY] = quat.mQ[VY]; + mQ[VZ] = quat.mQ[VZ]; + mQ[VW] = quat.mQ[VW]; + normalize(); + return (*this); +} + +// deprecated +inline const LLQuaternion& LLQuaternion::setQuat(const F32 *q) +{ + mQ[VX] = q[VX]; + mQ[VY] = q[VY]; + mQ[VZ] = q[VZ]; + mQ[VS] = q[VW]; + normalize(); + return (*this); +} + +// There may be a cheaper way that avoids the sqrt. +// Does sin_a = VX*VX + VY*VY + VZ*VZ? +// Copied from Matrix and Quaternion FAQ 1.12 +inline void LLQuaternion::getAngleAxis(F32* angle, F32* x, F32* y, F32* z) const +{ + F32 cos_a = mQ[VW]; + if (cos_a > 1.0f) cos_a = 1.0f; + if (cos_a < -1.0f) cos_a = -1.0f; + + F32 sin_a = (F32) sqrt( 1.0f - cos_a * cos_a ); + + if ( fabs( sin_a ) < 0.0005f ) + sin_a = 1.0f; + else + sin_a = 1.f/sin_a; + + F32 temp_angle = 2.0f * (F32) acos( cos_a ); + if (temp_angle > F_PI) + { + // The (angle,axis) pair should never have angles outside [PI, -PI] + // since we want the _shortest_ (angle,axis) solution. + // Since acos is defined for [0, PI], and we multiply by 2.0, we + // can push the angle outside the acceptible range. + // When this happens we set the angle to the other portion of a + // full 2PI rotation, and negate the axis, which reverses the + // direction of the rotation (by the right-hand rule). + *angle = 2.f * F_PI - temp_angle; + *x = - mQ[VX] * sin_a; + *y = - mQ[VY] * sin_a; + *z = - mQ[VZ] * sin_a; + } + else + { + *angle = temp_angle; + *x = mQ[VX] * sin_a; + *y = mQ[VY] * sin_a; + *z = mQ[VZ] * sin_a; + } +} + +inline const LLQuaternion& LLQuaternion::conjugate() +{ + mQ[VX] *= -1.f; + mQ[VY] *= -1.f; + mQ[VZ] *= -1.f; + return (*this); +} + +inline const LLQuaternion& LLQuaternion::conjQuat() +{ + mQ[VX] *= -1.f; + mQ[VY] *= -1.f; + mQ[VZ] *= -1.f; + return (*this); +} + +// Transpose +inline const LLQuaternion& LLQuaternion::transpose() +{ + mQ[VX] *= -1.f; + mQ[VY] *= -1.f; + mQ[VZ] *= -1.f; + return (*this); +} + +// deprecated +inline const LLQuaternion& LLQuaternion::transQuat() +{ + mQ[VX] *= -1.f; + mQ[VY] *= -1.f; + mQ[VZ] *= -1.f; + return (*this); +} + + +inline LLQuaternion operator+(const LLQuaternion &a, const LLQuaternion &b) +{ + return LLQuaternion( + a.mQ[VX] + b.mQ[VX], + a.mQ[VY] + b.mQ[VY], + a.mQ[VZ] + b.mQ[VZ], + a.mQ[VW] + b.mQ[VW] ); +} + + +inline LLQuaternion operator-(const LLQuaternion &a, const LLQuaternion &b) +{ + return LLQuaternion( + a.mQ[VX] - b.mQ[VX], + a.mQ[VY] - b.mQ[VY], + a.mQ[VZ] - b.mQ[VZ], + a.mQ[VW] - b.mQ[VW] ); +} + + +inline LLQuaternion operator-(const LLQuaternion &a) +{ + return LLQuaternion( + -a.mQ[VX], + -a.mQ[VY], + -a.mQ[VZ], + -a.mQ[VW] ); +} + + +inline LLQuaternion operator*(F32 a, const LLQuaternion &q) +{ + return LLQuaternion( + a * q.mQ[VX], + a * q.mQ[VY], + a * q.mQ[VZ], + a * q.mQ[VW] ); +} + + +inline LLQuaternion operator*(const LLQuaternion &q, F32 a) +{ + return LLQuaternion( + a * q.mQ[VX], + a * q.mQ[VY], + a * q.mQ[VZ], + a * q.mQ[VW] ); +} + +inline LLQuaternion operator~(const LLQuaternion &a) +{ + LLQuaternion q(a); + q.conjQuat(); + return q; +} + +inline bool LLQuaternion::operator==(const LLQuaternion &b) const +{ + return ( (mQ[VX] == b.mQ[VX]) + &&(mQ[VY] == b.mQ[VY]) + &&(mQ[VZ] == b.mQ[VZ]) + &&(mQ[VS] == b.mQ[VS])); +} + +inline bool LLQuaternion::operator!=(const LLQuaternion &b) const +{ + return ( (mQ[VX] != b.mQ[VX]) + ||(mQ[VY] != b.mQ[VY]) + ||(mQ[VZ] != b.mQ[VZ]) + ||(mQ[VS] != b.mQ[VS])); +} + +inline const LLQuaternion& operator*=(LLQuaternion &a, const LLQuaternion &b) +{ +#if 1 + LLQuaternion q( + b.mQ[3] * a.mQ[0] + b.mQ[0] * a.mQ[3] + b.mQ[1] * a.mQ[2] - b.mQ[2] * a.mQ[1], + b.mQ[3] * a.mQ[1] + b.mQ[1] * a.mQ[3] + b.mQ[2] * a.mQ[0] - b.mQ[0] * a.mQ[2], + b.mQ[3] * a.mQ[2] + b.mQ[2] * a.mQ[3] + b.mQ[0] * a.mQ[1] - b.mQ[1] * a.mQ[0], + b.mQ[3] * a.mQ[3] - b.mQ[0] * a.mQ[0] - b.mQ[1] * a.mQ[1] - b.mQ[2] * a.mQ[2] + ); + a = q; +#else + a = a * b; +#endif + return a; +} + +const F32 ONE_PART_IN_A_MILLION = 0.000001f; + +inline F32 LLQuaternion::normalize() +{ + F32 mag = sqrtf(mQ[VX]*mQ[VX] + mQ[VY]*mQ[VY] + mQ[VZ]*mQ[VZ] + mQ[VS]*mQ[VS]); + + if (mag > FP_MAG_THRESHOLD) + { + // Floating point error can prevent some quaternions from achieving + // exact unity length. When trying to renormalize such quaternions we + // can oscillate between multiple quantized states. To prevent such + // drifts we only renomalize if the length is far enough from unity. + if (fabs(1.f - mag) > ONE_PART_IN_A_MILLION) + { + F32 oomag = 1.f/mag; + mQ[VX] *= oomag; + mQ[VY] *= oomag; + mQ[VZ] *= oomag; + mQ[VS] *= oomag; + } + } + else + { + // we were given a very bad quaternion so we set it to identity + mQ[VX] = 0.f; + mQ[VY] = 0.f; + mQ[VZ] = 0.f; + mQ[VS] = 1.f; + } + + return mag; +} + +// deprecated +inline F32 LLQuaternion::normQuat() +{ + F32 mag = sqrtf(mQ[VX]*mQ[VX] + mQ[VY]*mQ[VY] + mQ[VZ]*mQ[VZ] + mQ[VS]*mQ[VS]); + + if (mag > FP_MAG_THRESHOLD) + { + if (fabs(1.f - mag) > ONE_PART_IN_A_MILLION) + { + // only renormalize if length not close enough to 1.0 already + F32 oomag = 1.f/mag; + mQ[VX] *= oomag; + mQ[VY] *= oomag; + mQ[VZ] *= oomag; + mQ[VS] *= oomag; + } + } + else + { + mQ[VX] = 0.f; + mQ[VY] = 0.f; + mQ[VZ] = 0.f; + mQ[VS] = 1.f; + } + + return mag; +} + +LLQuaternion::Order StringToOrder( const char *str ); + +// Some notes about Quaternions + +// What is a Quaternion? +// --------------------- +// A quaternion is a point in 4-dimensional complex space. +// Q = { Qx, Qy, Qz, Qw } +// +// +// Why Quaternions? +// ---------------- +// The set of quaternions that make up the the 4-D unit sphere +// can be mapped to the set of all rotations in 3-D space. Sometimes +// it is easier to describe/manipulate rotations in quaternion space +// than rotation-matrix space. +// +// +// How Quaternions? +// ---------------- +// In order to take advantage of quaternions we need to know how to +// go from rotation-matricies to quaternions and back. We also have +// to agree what variety of rotations we're generating. +// +// Consider the equation... v' = v * R +// +// There are two ways to think about rotations of vectors. +// 1) v' is the same vector in a different reference frame +// 2) v' is a new vector in the same reference frame +// +// bookmark -- which way are we using? +// +// +// Quaternion from Angle-Axis: +// --------------------------- +// Suppose we wanted to represent a rotation of some angle (theta) +// about some axis ({Ax, Ay, Az})... +// +// axis of rotation = {Ax, Ay, Az} +// angle_of_rotation = theta +// +// s = sin(0.5 * theta) +// c = cos(0.5 * theta) +// Q = { s * Ax, s * Ay, s * Az, c } +// +// +// 3x3 Matrix from Quaternion +// -------------------------- +// +// | | +// | 1 - 2 * (y^2 + z^2) 2 * (x * y + z * w) 2 * (y * w - x * z) | +// | | +// M = | 2 * (x * y - z * w) 1 - 2 * (x^2 + z^2) 2 * (y * z + x * w) | +// | | +// | 2 * (x * z + y * w) 2 * (y * z - x * w) 1 - 2 * (x^2 + y^2) | +// | | + +#endif -- cgit v1.2.3 From 71de5f622a7917f78823a7e7840194e1b0f8f070 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Thu, 26 Aug 2010 14:23:12 -0500 Subject: Add missing files from viewer-experimental --- indra/llmath/llmatrix3a.cpp | 140 ++++++++++ indra/llmath/llmatrix3a.h | 134 +++++++++ indra/llmath/llmatrix3a.inl | 125 +++++++++ indra/llmath/llmatrix4a.h | 149 ++++++++++ indra/llmath/llquaternion2.h | 111 ++++++++ indra/llmath/llquaternion2.inl | 108 ++++++++ indra/llmath/llsimdmath.h | 95 +++++++ indra/llmath/llsimdtypes.h | 130 +++++++++ indra/llmath/llsimdtypes.inl | 163 +++++++++++ indra/llmath/llvector4a.cpp | 228 +++++++++++++++ indra/llmath/llvector4a.h | 331 ++++++++++++++++++++++ indra/llmath/llvector4a.inl | 599 ++++++++++++++++++++++++++++++++++++++++ indra/llmath/llvector4logical.h | 130 +++++++++ indra/llmath/llvolumeoctree.cpp | 208 ++++++++++++++ indra/llmath/llvolumeoctree.h | 138 +++++++++ 15 files changed, 2789 insertions(+) create mode 100644 indra/llmath/llmatrix3a.cpp create mode 100644 indra/llmath/llmatrix3a.h create mode 100644 indra/llmath/llmatrix3a.inl create mode 100644 indra/llmath/llmatrix4a.h create mode 100644 indra/llmath/llquaternion2.h create mode 100644 indra/llmath/llquaternion2.inl create mode 100644 indra/llmath/llsimdmath.h create mode 100644 indra/llmath/llsimdtypes.h create mode 100644 indra/llmath/llsimdtypes.inl create mode 100644 indra/llmath/llvector4a.cpp create mode 100644 indra/llmath/llvector4a.h create mode 100644 indra/llmath/llvector4a.inl create mode 100644 indra/llmath/llvector4logical.h create mode 100644 indra/llmath/llvolumeoctree.cpp create mode 100644 indra/llmath/llvolumeoctree.h (limited to 'indra/llmath') diff --git a/indra/llmath/llmatrix3a.cpp b/indra/llmath/llmatrix3a.cpp new file mode 100644 index 0000000000..b7468f4914 --- /dev/null +++ b/indra/llmath/llmatrix3a.cpp @@ -0,0 +1,140 @@ +/** + * @file llvector4a.cpp + * @brief SIMD vector implementation + * + * $LicenseInfo:firstyear=2010&license=viewergpl$ + * + * Copyright (c) 2007-2010, Linden Research, Inc. + * + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. + * + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. + * $/LicenseInfo$ + */ + +#include "llmath.h" + +static LL_ALIGN_16(const F32 M_IDENT_3A[12]) = + { 1.f, 0.f, 0.f, 0.f, // Column 1 + 0.f, 1.f, 0.f, 0.f, // Column 2 + 0.f, 0.f, 1.f, 0.f }; // Column 3 + +extern const LLMatrix3a LL_M3A_IDENTITY = *reinterpret_cast (M_IDENT_3A); + +void LLMatrix3a::setMul( const LLMatrix3a& lhs, const LLMatrix3a& rhs ) +{ + const LLVector4a col0 = lhs.getColumn(0); + const LLVector4a col1 = lhs.getColumn(1); + const LLVector4a col2 = lhs.getColumn(2); + + for ( int i = 0; i < 3; i++ ) + { + LLVector4a xxxx = _mm_load_ss( rhs.mColumns[i].getF32ptr() ); + xxxx.splat<0>( xxxx ); + xxxx.mul( col0 ); + + { + LLVector4a yyyy = _mm_load_ss( rhs.mColumns[i].getF32ptr() + 1 ); + yyyy.splat<0>( yyyy ); + yyyy.mul( col1 ); + xxxx.add( yyyy ); + } + + { + LLVector4a zzzz = _mm_load_ss( rhs.mColumns[i].getF32ptr() + 2 ); + zzzz.splat<0>( zzzz ); + zzzz.mul( col2 ); + xxxx.add( zzzz ); + } + + xxxx.store4a( mColumns[i].getF32ptr() ); + } + +} + +/*static */void LLMatrix3a::batchTransform( const LLMatrix3a& xform, const LLVector4a* src, int numVectors, LLVector4a* dst ) +{ + const LLVector4a col0 = xform.getColumn(0); + const LLVector4a col1 = xform.getColumn(1); + const LLVector4a col2 = xform.getColumn(2); + const LLVector4a* maxAddr = src + numVectors; + + if ( numVectors & 0x1 ) + { + LLVector4a xxxx = _mm_load_ss( (const F32*)src ); + LLVector4a yyyy = _mm_load_ss( (const F32*)src + 1 ); + LLVector4a zzzz = _mm_load_ss( (const F32*)src + 2 ); + xxxx.splat<0>( xxxx ); + yyyy.splat<0>( yyyy ); + zzzz.splat<0>( zzzz ); + xxxx.mul( col0 ); + yyyy.mul( col1 ); + zzzz.mul( col2 ); + xxxx.add( yyyy ); + xxxx.add( zzzz ); + xxxx.store4a( (F32*)dst ); + src++; + dst++; + } + + + numVectors >>= 1; + while ( src < maxAddr ) + { + _mm_prefetch( (const char*)(src + 32 ), _MM_HINT_NTA ); + _mm_prefetch( (const char*)(dst + 32), _MM_HINT_NTA ); + LLVector4a xxxx = _mm_load_ss( (const F32*)src ); + LLVector4a xxxx1= _mm_load_ss( (const F32*)(src + 1) ); + + xxxx.splat<0>( xxxx ); + xxxx1.splat<0>( xxxx1 ); + xxxx.mul( col0 ); + xxxx1.mul( col0 ); + + { + LLVector4a yyyy = _mm_load_ss( (const F32*)src + 1 ); + LLVector4a yyyy1 = _mm_load_ss( (const F32*)(src + 1) + 1); + yyyy.splat<0>( yyyy ); + yyyy1.splat<0>( yyyy1 ); + yyyy.mul( col1 ); + yyyy1.mul( col1 ); + xxxx.add( yyyy ); + xxxx1.add( yyyy1 ); + } + + { + LLVector4a zzzz = _mm_load_ss( (const F32*)(src) + 2 ); + LLVector4a zzzz1 = _mm_load_ss( (const F32*)(++src) + 2 ); + zzzz.splat<0>( zzzz ); + zzzz1.splat<0>( zzzz1 ); + zzzz.mul( col2 ); + zzzz1.mul( col2 ); + xxxx.add( zzzz ); + xxxx1.add( zzzz1 ); + } + + xxxx.store4a(dst->getF32ptr()); + src++; + dst++; + + xxxx1.store4a((F32*)dst++); + } +} diff --git a/indra/llmath/llmatrix3a.h b/indra/llmath/llmatrix3a.h new file mode 100644 index 0000000000..56327f9f6d --- /dev/null +++ b/indra/llmath/llmatrix3a.h @@ -0,0 +1,134 @@ +/** + * @file llmatrix3a.h + * @brief LLMatrix3a class header file - memory aligned and vectorized 3x3 matrix + * + * $LicenseInfo:firstyear=2010&license=viewergpl$ + * + * Copyright (c) 2010, Linden Research, Inc. + * + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. + * + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. + * $/LicenseInfo$ + */ + +#ifndef LL_LLMATRIX3A_H +#define LL_LLMATRIX3A_H + +///////////////////////////// +// LLMatrix3a, LLRotation +///////////////////////////// +// This class stores a 3x3 (technically 4x3) matrix in column-major order +///////////////////////////// +///////////////////////////// +// These classes are intentionally minimal right now. If you need additional +// functionality, please contact someone with SSE experience (e.g., Falcon or +// Huseby). +///////////////////////////// + +// LLMatrix3a is the base class for LLRotation, which should be used instead any time you're dealing with a +// rotation matrix. +class LLMatrix3a +{ +public: + + // Utility function for quickly transforming an array of LLVector4a's + // For transforming a single LLVector4a, see LLVector4a::setRotated + static void batchTransform( const LLMatrix3a& xform, const LLVector4a* src, int numVectors, LLVector4a* dst ); + + // Utility function to obtain the identity matrix + static inline const LLMatrix3a& getIdentity(); + + ////////////////////////// + // Ctors + ////////////////////////// + + // Ctor + LLMatrix3a() {} + + // Ctor for setting by columns + inline LLMatrix3a( const LLVector4a& c0, const LLVector4a& c1, const LLVector4a& c2 ); + + ////////////////////////// + // Get/Set + ////////////////////////// + + // Loads from an LLMatrix3 + inline void loadu(const LLMatrix3& src); + + // Set rows + inline void setRows(const LLVector4a& r0, const LLVector4a& r1, const LLVector4a& r2); + + // Set columns + inline void setColumns(const LLVector4a& c0, const LLVector4a& c1, const LLVector4a& c2); + + // Get the read-only access to a specified column. Valid columns are 0-2, but the + // function is unchecked. You've been warned. + inline const LLVector4a& getColumn(const U32 column) const; + + ///////////////////////// + // Matrix modification + ///////////////////////// + + // Set this matrix to the product of lhs and rhs ( this = lhs * rhs ) + void setMul( const LLMatrix3a& lhs, const LLMatrix3a& rhs ); + + // Set this matrix to the transpose of src + inline void setTranspose(const LLMatrix3a& src); + + // Set this matrix to a*w + b*(1-w) + inline void setLerp(const LLMatrix3a& a, const LLMatrix3a& b, F32 w); + + ///////////////////////// + // Matrix inspection + ///////////////////////// + + // Sets all 4 elements in 'dest' to the determinant of this matrix. + // If you will be using the determinant in subsequent ops with LLVector4a, use this version + inline void getDeterminant( LLVector4a& dest ) const; + + // Returns the determinant as an LLSimdScalar. Use this if you will be using the determinant + // primary for scalar operations. + inline LLSimdScalar getDeterminant() const; + + // Returns nonzero if rows 0-2 and colums 0-2 contain no NaN or INF values. Row 3 is ignored + inline LLBool32 isFinite() const; + + // Returns true if this matrix is equal to 'rhs' up to 'tolerance' + inline bool isApproximatelyEqual( const LLMatrix3a& rhs, F32 tolerance = F_APPROXIMATELY_ZERO ) const; + +protected: + + LLVector4a mColumns[3]; + +}; + +class LLRotation : public LLMatrix3a +{ +public: + + LLRotation() {} + + // Returns true if this rotation is orthonormal with det ~= 1 + inline bool isOkRotation() const; +}; + +#endif diff --git a/indra/llmath/llmatrix3a.inl b/indra/llmath/llmatrix3a.inl new file mode 100644 index 0000000000..65fd949f78 --- /dev/null +++ b/indra/llmath/llmatrix3a.inl @@ -0,0 +1,125 @@ +/** + * @file llmatrix3a.inl + * @brief LLMatrix3a inline definitions + * + * $LicenseInfo:firstyear=2010&license=viewergpl$ + * + * Copyright (c) 2010, Linden Research, Inc. + * + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. + * + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. + * $/LicenseInfo$ + */ + +#include "llmatrix3a.h" +#include "m3math.h" + +inline LLMatrix3a::LLMatrix3a( const LLVector4a& c0, const LLVector4a& c1, const LLVector4a& c2 ) +{ + setColumns( c0, c1, c2 ); +} + +inline void LLMatrix3a::loadu(const LLMatrix3& src) +{ + mColumns[0].load3(src.mMatrix[0]); + mColumns[1].load3(src.mMatrix[1]); + mColumns[2].load3(src.mMatrix[2]); +} + +inline void LLMatrix3a::setRows(const LLVector4a& r0, const LLVector4a& r1, const LLVector4a& r2) +{ + mColumns[0] = r0; + mColumns[1] = r1; + mColumns[2] = r2; + setTranspose( *this ); +} + +inline void LLMatrix3a::setColumns(const LLVector4a& c0, const LLVector4a& c1, const LLVector4a& c2) +{ + mColumns[0] = c0; + mColumns[1] = c1; + mColumns[2] = c2; +} + +inline void LLMatrix3a::setTranspose(const LLMatrix3a& src) +{ + const LLQuad srcCol0 = src.mColumns[0]; + const LLQuad srcCol1 = src.mColumns[1]; + const LLQuad unpacklo = _mm_unpacklo_ps( srcCol0, srcCol1 ); + mColumns[0] = _mm_movelh_ps( unpacklo, src.mColumns[2] ); + mColumns[1] = _mm_shuffle_ps( _mm_movehl_ps( srcCol0, unpacklo ), src.mColumns[2], _MM_SHUFFLE(0, 1, 1, 0) ); + mColumns[2] = _mm_shuffle_ps( _mm_unpackhi_ps( srcCol0, srcCol1 ), src.mColumns[2], _MM_SHUFFLE(0, 2, 1, 0) ); +} + +inline const LLVector4a& LLMatrix3a::getColumn(const U32 column) const +{ + llassert( column < 3 ); + return mColumns[column]; +} + +inline void LLMatrix3a::setLerp(const LLMatrix3a& a, const LLMatrix3a& b, F32 w) +{ + mColumns[0].setLerp( a.mColumns[0], b.mColumns[0], w ); + mColumns[1].setLerp( a.mColumns[1], b.mColumns[1], w ); + mColumns[2].setLerp( a.mColumns[2], b.mColumns[2], w ); +} + +inline LLBool32 LLMatrix3a::isFinite() const +{ + return mColumns[0].isFinite3() && mColumns[1].isFinite3() && mColumns[2].isFinite3(); +} + +inline void LLMatrix3a::getDeterminant( LLVector4a& dest ) const +{ + LLVector4a col1xcol2; col1xcol2.setCross3( mColumns[1], mColumns[2] ); + dest.setAllDot3( col1xcol2, mColumns[0] ); +} + +inline LLSimdScalar LLMatrix3a::getDeterminant() const +{ + LLVector4a col1xcol2; col1xcol2.setCross3( mColumns[1], mColumns[2] ); + return col1xcol2.dot3( mColumns[0] ); +} + +inline bool LLMatrix3a::isApproximatelyEqual( const LLMatrix3a& rhs, F32 tolerance /*= F_APPROXIMATELY_ZERO*/ ) const +{ + return rhs.getColumn(0).equals3(mColumns[0], tolerance) + && rhs.getColumn(1).equals3(mColumns[1], tolerance) + && rhs.getColumn(2).equals3(mColumns[2], tolerance); +} + +inline const LLMatrix3a& LLMatrix3a::getIdentity() +{ + extern const LLMatrix3a LL_M3A_IDENTITY; + return LL_M3A_IDENTITY; +} + +inline bool LLRotation::isOkRotation() const +{ + LLMatrix3a transpose; transpose.setTranspose( *this ); + LLMatrix3a product; product.setMul( *this, transpose ); + + LLSimdScalar detMinusOne = getDeterminant() - 1.f; + + return product.isApproximatelyEqual( LLMatrix3a::getIdentity() ) && (detMinusOne.getAbs() < F_APPROXIMATELY_ZERO); +} + diff --git a/indra/llmath/llmatrix4a.h b/indra/llmath/llmatrix4a.h new file mode 100644 index 0000000000..0ead045d04 --- /dev/null +++ b/indra/llmath/llmatrix4a.h @@ -0,0 +1,149 @@ +/** + * @file llmatrix4a.h + * @brief LLMatrix4a class header file - memory aligned and vectorized 4x4 matrix + * + * $LicenseInfo:firstyear=2007&license=viewergpl$ + * + * Copyright (c) 2007-2010, Linden Research, Inc. + * + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. + * + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. + * $/LicenseInfo$ + */ + +#ifndef LL_LLMATRIX4A_H +#define LL_LLMATRIX4A_H + +#include "llvector4a.h" +#include "m4math.h" +#include "m3math.h" + +class LLMatrix4a +{ +public: + LLVector4a mMatrix[4]; + + inline void clear() + { + mMatrix[0].clear(); + mMatrix[1].clear(); + mMatrix[2].clear(); + mMatrix[3].clear(); + } + + inline void loadu(const LLMatrix4& src) + { + mMatrix[0] = _mm_loadu_ps(src.mMatrix[0]); + mMatrix[1] = _mm_loadu_ps(src.mMatrix[1]); + mMatrix[2] = _mm_loadu_ps(src.mMatrix[2]); + mMatrix[3] = _mm_loadu_ps(src.mMatrix[3]); + + } + + inline void loadu(const LLMatrix3& src) + { + mMatrix[0].load3(src.mMatrix[0]); + mMatrix[1].load3(src.mMatrix[1]); + mMatrix[2].load3(src.mMatrix[2]); + mMatrix[3].set(0,0,0,1.f); + } + + inline void add(const LLMatrix4a& rhs) + { + mMatrix[0].add(rhs.mMatrix[0]); + mMatrix[1].add(rhs.mMatrix[1]); + mMatrix[2].add(rhs.mMatrix[2]); + mMatrix[3].add(rhs.mMatrix[3]); + } + + inline void setRows(const LLVector4a& r0, const LLVector4a& r1, const LLVector4a& r2) + { + mMatrix[0] = r0; + mMatrix[1] = r1; + mMatrix[2] = r2; + } + + inline void setMul(const LLMatrix4a& m, const F32 s) + { + mMatrix[0].setMul(m.mMatrix[0], s); + mMatrix[1].setMul(m.mMatrix[1], s); + mMatrix[2].setMul(m.mMatrix[2], s); + mMatrix[3].setMul(m.mMatrix[3], s); + } + + inline void setLerp(const LLMatrix4a& a, const LLMatrix4a& b, F32 w) + { + LLVector4a d0,d1,d2,d3; + d0.setSub(b.mMatrix[0], a.mMatrix[0]); + d1.setSub(b.mMatrix[1], a.mMatrix[1]); + d2.setSub(b.mMatrix[2], a.mMatrix[2]); + d3.setSub(b.mMatrix[3], a.mMatrix[3]); + + // this = a + d*w + + d0.mul(w); + d1.mul(w); + d2.mul(w); + d3.mul(w); + + mMatrix[0].setAdd(a.mMatrix[0],d0); + mMatrix[1].setAdd(a.mMatrix[1],d1); + mMatrix[2].setAdd(a.mMatrix[2],d2); + mMatrix[3].setAdd(a.mMatrix[3],d3); + } + + inline void rotate(const LLVector4a& v, LLVector4a& res) + { + res = _mm_shuffle_ps(v, v, _MM_SHUFFLE(0, 0, 0, 0)); + res.mul(mMatrix[0]); + + LLVector4a y; + y = _mm_shuffle_ps(v, v, _MM_SHUFFLE(1, 1, 1, 1)); + y.mul(mMatrix[1]); + + LLVector4a z; + z = _mm_shuffle_ps(v, v, _MM_SHUFFLE(2, 2, 2, 2)); + z.mul(mMatrix[2]); + + res.add(y); + res.add(z); + } + + inline void affineTransform(const LLVector4a& v, LLVector4a& res) + { + LLVector4a x,y,z; + + x = _mm_shuffle_ps(v, v, _MM_SHUFFLE(0, 0, 0, 0)); + y = _mm_shuffle_ps(v, v, _MM_SHUFFLE(1, 1, 1, 1)); + z = _mm_shuffle_ps(v, v, _MM_SHUFFLE(2, 2, 2, 2)); + + x.mul(mMatrix[0]); + y.mul(mMatrix[1]); + z.mul(mMatrix[2]); + + x.add(y); + z.add(mMatrix[3]); + res.setAdd(x,z); + } +}; + +#endif diff --git a/indra/llmath/llquaternion2.h b/indra/llmath/llquaternion2.h new file mode 100644 index 0000000000..dbb4afe312 --- /dev/null +++ b/indra/llmath/llquaternion2.h @@ -0,0 +1,111 @@ +/** + * @file llquaternion2.h + * @brief LLQuaternion2 class header file - SIMD-enabled quaternion class + * + * $LicenseInfo:firstyear=2010&license=viewergpl$ + * + * Copyright (c) 2010, Linden Research, Inc. + * + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. + * + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. + * $/LicenseInfo$ + */ + +#ifndef LL_QUATERNION2_H +#define LL_QUATERNION2_H + +///////////////////////////// +// LLQuaternion2 +///////////////////////////// +// This class stores a quaternion x*i + y*j + z*k + w in order +// (i.e., w in high order element of vector) +///////////////////////////// +///////////////////////////// +// These classes are intentionally minimal right now. If you need additional +// functionality, please contact someone with SSE experience (e.g., Falcon or +// Huseby). +///////////////////////////// +#include "llquaternion.h" + +class LLQuaternion2 +{ +public: + + ////////////////////////// + // Ctors + ////////////////////////// + + // Ctor + LLQuaternion2() {} + + // Ctor from LLQuaternion + explicit LLQuaternion2( const class LLQuaternion& quat ); + + ////////////////////////// + // Get/Set + ////////////////////////// + + // Load from an LLQuaternion + inline void operator=( const LLQuaternion& quat ) + { + mQ.loadua( quat.mQ ); + } + + // Return the internal LLVector4a representation of the quaternion + inline const LLVector4a& getVector4a() const; + inline LLVector4a& getVector4aRw(); + + ///////////////////////// + // Quaternion modification + ///////////////////////// + + // Set this quaternion to the conjugate of src + inline void setConjugate(const LLQuaternion2& src); + + // Renormalizes the quaternion. Assumes it has nonzero length. + inline void normalize(); + + // Quantize this quaternion to 8 bit precision + inline void quantize8(); + + // Quantize this quaternion to 16 bit precision + inline void quantize16(); + + ///////////////////////// + // Quaternion inspection + ///////////////////////// + + // Return true if this quaternion is equal to 'rhs'. + // Note! Quaternions exhibit "double-cover", so any rotation has two equally valid + // quaternion representations and they will NOT compare equal. + inline bool equals(const LLQuaternion2& rhs, F32 tolerance = F_APPROXIMATELY_ZERO ) const; + + // Return true if all components are finite and the quaternion is normalized + inline bool isOkRotation() const; + +protected: + + LLVector4a mQ; + +}; + +#endif diff --git a/indra/llmath/llquaternion2.inl b/indra/llmath/llquaternion2.inl new file mode 100644 index 0000000000..9a4274d6a4 --- /dev/null +++ b/indra/llmath/llquaternion2.inl @@ -0,0 +1,108 @@ +/** + * @file llquaternion2.inl + * @brief LLQuaternion2 inline definitions + * + * $LicenseInfo:firstyear=2010&license=viewergpl$ + * + * Copyright (c) 2010, Linden Research, Inc. + * + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. + * + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. + * $/LicenseInfo$ + */ + +#include "llquaternion2.h" + +static const LLQuad LL_V4A_PLUS_ONE = {1.f, 1.f, 1.f, 1.f}; +static const LLQuad LL_V4A_MINUS_ONE = {-1.f, -1.f, -1.f, -1.f}; + +// Ctor from LLQuaternion +inline LLQuaternion2::LLQuaternion2( const LLQuaternion& quat ) +{ + mQ.set(quat.mQ[VX], quat.mQ[VY], quat.mQ[VZ], quat.mQ[VW]); +} + +////////////////////////// +// Get/Set +////////////////////////// + +// Return the internal LLVector4a representation of the quaternion +inline const LLVector4a& LLQuaternion2::getVector4a() const +{ + return mQ; +} + +inline LLVector4a& LLQuaternion2::getVector4aRw() +{ + return mQ; +} + +///////////////////////// +// Quaternion modification +///////////////////////// + +// Set this quaternion to the conjugate of src +inline void LLQuaternion2::setConjugate(const LLQuaternion2& src) +{ + static LL_ALIGN_16( const U32 F_QUAT_INV_MASK_4A[4] ) = { 0x80000000, 0x80000000, 0x80000000, 0x00000000 }; + mQ = _mm_xor_ps(src.mQ, *reinterpret_cast(&F_QUAT_INV_MASK_4A)); +} + +// Renormalizes the quaternion. Assumes it has nonzero length. +inline void LLQuaternion2::normalize() +{ + mQ.normalize4(); +} + +// Quantize this quaternion to 8 bit precision +inline void LLQuaternion2::quantize8() +{ + mQ.quantize8( LL_V4A_MINUS_ONE, LL_V4A_PLUS_ONE ); + normalize(); +} + +// Quantize this quaternion to 16 bit precision +inline void LLQuaternion2::quantize16() +{ + mQ.quantize16( LL_V4A_MINUS_ONE, LL_V4A_PLUS_ONE ); + normalize(); +} + + +///////////////////////// +// Quaternion inspection +///////////////////////// + +// Return true if this quaternion is equal to 'rhs'. +// Note! Quaternions exhibit "double-cover", so any rotation has two equally valid +// quaternion representations and they will NOT compare equal. +inline bool LLQuaternion2::equals(const LLQuaternion2 &rhs, F32 tolerance/* = F_APPROXIMATELY_ZERO*/) const +{ + return mQ.equals4(rhs.mQ, tolerance); +} + +// Return true if all components are finite and the quaternion is normalized +inline bool LLQuaternion2::isOkRotation() const +{ + return mQ.isFinite4() && mQ.isNormalized4(); +} + diff --git a/indra/llmath/llsimdmath.h b/indra/llmath/llsimdmath.h new file mode 100644 index 0000000000..9377bfdb53 --- /dev/null +++ b/indra/llmath/llsimdmath.h @@ -0,0 +1,95 @@ +/** + * @file llsimdmath.h + * @brief Common header for SIMD-based math library (llvector4a, llmatrix3a, etc.) + * + * $LicenseInfo:firstyear=2010&license=viewergpl$ + * + * Copyright (c) 2007-2010, Linden Research, Inc. + * + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. + * + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. + * $/LicenseInfo$ + */ + +#ifndef LL_SIMD_MATH_H +#define LL_SIMD_MATH_H + +#ifndef LLMATH_H +#error "Please include llmath.h before this file." +#endif + +#if ( ( LL_DARWIN || LL_LINUX ) && !(__SSE2__) ) || ( LL_WINDOWS && ( _M_IX86_FP < 2 ) ) +#error SSE2 not enabled. LLVector4a and related class will not compile. +#endif + +template T* LL_NEXT_ALIGNED_ADDRESS(T* address) +{ + return reinterpret_cast( + (reinterpret_cast(address) + 0xF) & ~0xF); +} + +template T* LL_NEXT_ALIGNED_ADDRESS_64(T* address) +{ + return reinterpret_cast( + (reinterpret_cast(address) + 0x3F) & ~0x3F); +} + +#if LL_LINUX || LL_DARWIN + +#define LL_ALIGN_PREFIX(x) +#define LL_ALIGN_POSTFIX(x) __attribute__((aligned(x))) + +#elif LL_WINDOWS + +#define LL_ALIGN_PREFIX(x) __declspec(align(x)) +#define LL_ALIGN_POSTFIX(x) + +#else +#error "LL_ALIGN_PREFIX and LL_ALIGN_POSTFIX undefined" +#endif + +#define LL_ALIGN_16(var) LL_ALIGN_PREFIX(16) var LL_ALIGN_POSTFIX(16) + + + +#include +#include + +#include "llsimdtypes.h" +#include "llsimdtypes.inl" + +class LLMatrix3a; +class LLRotation; +class LLMatrix3; + +#include "llquaternion.h" + +#include "llvector4logical.h" +#include "llvector4a.h" +#include "llmatrix3a.h" +#include "llquaternion2.h" +#include "llvector4a.inl" +#include "llmatrix3a.inl" +#include "llquaternion2.inl" + + +#endif //LL_SIMD_MATH_H diff --git a/indra/llmath/llsimdtypes.h b/indra/llmath/llsimdtypes.h new file mode 100644 index 0000000000..82e318c8bf --- /dev/null +++ b/indra/llmath/llsimdtypes.h @@ -0,0 +1,130 @@ +/** + * @file llsimdtypes.h + * @brief Declaration of basic SIMD math related types + * + * $LicenseInfo:firstyear=2010&license=viewergpl$ + * + * Copyright (c) 2007-2010, Linden Research, Inc. + * + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. + * + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. + * $/LicenseInfo$ + */ + +#ifndef LL_SIMD_TYPES_H +#define LL_SIMD_TYPES_H + +#ifndef LL_SIMD_MATH_H +#error "Please include llmath.h before this file." +#endif + +typedef __m128 LLQuad; + + +#if LL_WINDOWS +#pragma warning(push) +#pragma warning( disable : 4800 3 ) // Disable warning about casting int to bool for this class. +#if defined(_MSC_VER) && (_MSC_VER < 1500) +// VC++ 2005 is missing these intrinsics +// __forceinline is MSVC specific and attempts to override compiler inlining judgment. This is so +// even in debug builds this call is a NOP. +__forceinline const __m128 _mm_castsi128_ps( const __m128i a ) { return reinterpret_cast(a); } +__forceinline const __m128i _mm_castps_si128( const __m128 a ) { return reinterpret_cast(a); } +#endif // _MSC_VER + +#endif // LL_WINDOWS + +class LLBool32 +{ +public: + inline LLBool32() {} + inline LLBool32(int rhs) : m_bool(rhs) {} + inline LLBool32(unsigned int rhs) : m_bool(rhs) {} + inline LLBool32(bool rhs) { m_bool = static_cast(rhs); } + inline LLBool32& operator= (bool rhs) { m_bool = (int)rhs; return *this; } + inline bool operator== (bool rhs) const { return static_cast(m_bool) == rhs; } + inline bool operator!= (bool rhs) const { return !operator==(rhs); } + inline operator bool() const { return static_cast(m_bool); } + +private: + int m_bool; +}; + +#if LL_WINDOWS +#pragma warning(pop) +#endif + +class LLSimdScalar +{ +public: + inline LLSimdScalar() {} + inline LLSimdScalar(LLQuad q) + { + mQ = q; + } + + inline LLSimdScalar(F32 f) + { + mQ = _mm_set_ss(f); + } + + static inline const LLSimdScalar& getZero() + { + extern const LLQuad F_ZERO_4A; + return reinterpret_cast(F_ZERO_4A); + } + + inline F32 getF32() const; + + inline LLBool32 isApproximatelyEqual(const LLSimdScalar& rhs, F32 tolerance = F_APPROXIMATELY_ZERO) const; + + inline LLSimdScalar getAbs() const; + + inline void setMax( const LLSimdScalar& a, const LLSimdScalar& b ); + + inline void setMin( const LLSimdScalar& a, const LLSimdScalar& b ); + + inline LLSimdScalar& operator=(F32 rhs); + + inline LLSimdScalar& operator+=(const LLSimdScalar& rhs); + + inline LLSimdScalar& operator-=(const LLSimdScalar& rhs); + + inline LLSimdScalar& operator*=(const LLSimdScalar& rhs); + + inline LLSimdScalar& operator/=(const LLSimdScalar& rhs); + + inline operator LLQuad() const + { + return mQ; + } + + inline const LLQuad& getQuad() const + { + return mQ; + } + +private: + LLQuad mQ; +}; + +#endif //LL_SIMD_TYPES_H diff --git a/indra/llmath/llsimdtypes.inl b/indra/llmath/llsimdtypes.inl new file mode 100644 index 0000000000..69c858e310 --- /dev/null +++ b/indra/llmath/llsimdtypes.inl @@ -0,0 +1,163 @@ +/** + * @file llsimdtypes.inl + * @brief Inlined definitions of basic SIMD math related types + * + * $LicenseInfo:firstyear=2010&license=viewergpl$ + * + * Copyright (c) 2007-2010, Linden Research, Inc. + * + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. + * + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. + * $/LicenseInfo$ + */ + + + + +////////////////// +// LLSimdScalar +////////////////// + +inline LLSimdScalar operator+(const LLSimdScalar& a, const LLSimdScalar& b) +{ + LLSimdScalar t(a); + t += b; + return t; +} + +inline LLSimdScalar operator-(const LLSimdScalar& a, const LLSimdScalar& b) +{ + LLSimdScalar t(a); + t -= b; + return t; +} + +inline LLSimdScalar operator*(const LLSimdScalar& a, const LLSimdScalar& b) +{ + LLSimdScalar t(a); + t *= b; + return t; +} + +inline LLSimdScalar operator/(const LLSimdScalar& a, const LLSimdScalar& b) +{ + LLSimdScalar t(a); + t /= b; + return t; +} + +inline LLSimdScalar operator-(const LLSimdScalar& a) +{ + static LL_ALIGN_16(const U32 signMask[4]) = {0x80000000, 0x80000000, 0x80000000, 0x80000000 }; + return _mm_xor_ps(*reinterpret_cast(signMask), a); +} + +inline LLBool32 operator==(const LLSimdScalar& a, const LLSimdScalar& b) +{ + return _mm_comieq_ss(a, b); +} + +inline LLBool32 operator!=(const LLSimdScalar& a, const LLSimdScalar& b) +{ + return _mm_comineq_ss(a, b); +} + +inline LLBool32 operator<(const LLSimdScalar& a, const LLSimdScalar& b) +{ + return _mm_comilt_ss(a, b); +} + +inline LLBool32 operator<=(const LLSimdScalar& a, const LLSimdScalar& b) +{ + return _mm_comile_ss(a, b); +} + +inline LLBool32 operator>(const LLSimdScalar& a, const LLSimdScalar& b) +{ + return _mm_comigt_ss(a, b); +} + +inline LLBool32 operator>=(const LLSimdScalar& a, const LLSimdScalar& b) +{ + return _mm_comige_ss(a, b); +} + +inline LLBool32 LLSimdScalar::isApproximatelyEqual(const LLSimdScalar& rhs, F32 tolerance /* = F_APPROXIMATELY_ZERO */) const +{ + const LLSimdScalar tol( tolerance ); + const LLSimdScalar diff = _mm_sub_ss( mQ, rhs.mQ ); + const LLSimdScalar absDiff = diff.getAbs(); + return absDiff <= tol; +} + +inline void LLSimdScalar::setMax( const LLSimdScalar& a, const LLSimdScalar& b ) +{ + mQ = _mm_max_ss( a, b ); +} + +inline void LLSimdScalar::setMin( const LLSimdScalar& a, const LLSimdScalar& b ) +{ + mQ = _mm_min_ss( a, b ); +} + +inline LLSimdScalar& LLSimdScalar::operator=(F32 rhs) +{ + mQ = _mm_set_ss(rhs); + return *this; +} + +inline LLSimdScalar& LLSimdScalar::operator+=(const LLSimdScalar& rhs) +{ + mQ = _mm_add_ss( mQ, rhs ); + return *this; +} + +inline LLSimdScalar& LLSimdScalar::operator-=(const LLSimdScalar& rhs) +{ + mQ = _mm_sub_ss( mQ, rhs ); + return *this; +} + +inline LLSimdScalar& LLSimdScalar::operator*=(const LLSimdScalar& rhs) +{ + mQ = _mm_mul_ss( mQ, rhs ); + return *this; +} + +inline LLSimdScalar& LLSimdScalar::operator/=(const LLSimdScalar& rhs) +{ + mQ = _mm_div_ss( mQ, rhs ); + return *this; +} + +inline LLSimdScalar LLSimdScalar::getAbs() const +{ + static const LL_ALIGN_16(U32 F_ABS_MASK_4A[4]) = { 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF }; + return _mm_and_ps( mQ, *reinterpret_cast(F_ABS_MASK_4A)); +} + +inline F32 LLSimdScalar::getF32() const +{ + F32 ret; + _mm_store_ss(&ret, mQ); + return ret; +} diff --git a/indra/llmath/llvector4a.cpp b/indra/llmath/llvector4a.cpp new file mode 100644 index 0000000000..b62c17302f --- /dev/null +++ b/indra/llmath/llvector4a.cpp @@ -0,0 +1,228 @@ +/** + * @file llvector4a.cpp + * @brief SIMD vector implementation + * + * $LicenseInfo:firstyear=2010&license=viewergpl$ + * + * Copyright (c) 2007-2010, Linden Research, Inc. + * + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. + * + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. + * $/LicenseInfo$ + */ + +#include "llmath.h" +#include "llquantize.h" + +extern const LLQuad F_ZERO_4A = { 0, 0, 0, 0 }; +extern const LLQuad F_APPROXIMATELY_ZERO_4A = { + F_APPROXIMATELY_ZERO, + F_APPROXIMATELY_ZERO, + F_APPROXIMATELY_ZERO, + F_APPROXIMATELY_ZERO +}; + +extern const LLVector4a LL_V4A_ZERO = reinterpret_cast ( F_ZERO_4A ); +extern const LLVector4a LL_V4A_EPSILON = reinterpret_cast ( F_APPROXIMATELY_ZERO_4A ); + +/*static */void LLVector4a::memcpyNonAliased16(F32* __restrict dst, const F32* __restrict src, size_t bytes) +{ + assert(src != NULL); + assert(dst != NULL); + assert(bytes > 0); + assert((bytes % sizeof(F32))== 0); + + F32* end = dst + (bytes / sizeof(F32) ); + + if (bytes > 64) + { + F32* begin_64 = LL_NEXT_ALIGNED_ADDRESS_64(dst); + + //at least 64 (16*4) bytes before the end of the destination, switch to 16 byte copies + F32* end_64 = end-16; + + _mm_prefetch((char*)begin_64, _MM_HINT_NTA); + _mm_prefetch((char*)begin_64 + 64, _MM_HINT_NTA); + _mm_prefetch((char*)begin_64 + 128, _MM_HINT_NTA); + _mm_prefetch((char*)begin_64 + 192, _MM_HINT_NTA); + + while (dst < begin_64) + { + copy4a(dst, src); + dst += 4; + src += 4; + } + + while (dst < end_64) + { + _mm_prefetch((char*)src + 512, _MM_HINT_NTA); + _mm_prefetch((char*)dst + 512, _MM_HINT_NTA); + copy4a(dst, src); + copy4a(dst+4, src+4); + copy4a(dst+8, src+8); + copy4a(dst+12, src+12); + + dst += 16; + src += 16; + } + } + + while (dst < end) + { + copy4a(dst, src); + dst += 4; + src += 4; + } +} + +void LLVector4a::setRotated( const LLRotation& rot, const LLVector4a& vec ) +{ + const LLVector4a col0 = rot.getColumn(0); + const LLVector4a col1 = rot.getColumn(1); + const LLVector4a col2 = rot.getColumn(2); + + LLVector4a result = _mm_load_ss( vec.getF32ptr() ); + result.splat<0>( result ); + result.mul( col0 ); + + { + LLVector4a yyyy = _mm_load_ss( vec.getF32ptr() + 1 ); + yyyy.splat<0>( yyyy ); + yyyy.mul( col1 ); + result.add( yyyy ); + } + + { + LLVector4a zzzz = _mm_load_ss( vec.getF32ptr() + 2 ); + zzzz.splat<0>( zzzz ); + zzzz.mul( col2 ); + result.add( zzzz ); + } + + *this = result; +} + +void LLVector4a::setRotated( const LLQuaternion2& quat, const LLVector4a& vec ) +{ + const LLVector4a& quatVec = quat.getVector4a(); + LLVector4a temp; temp.setCross3(quatVec, vec); + temp.add( temp ); + + const LLVector4a realPart( quatVec.getScalarAt<3>() ); + LLVector4a tempTimesReal; tempTimesReal.setMul( temp, realPart ); + + mQ = vec; + add( tempTimesReal ); + + LLVector4a imagCrossTemp; imagCrossTemp.setCross3( quatVec, temp ); + add(imagCrossTemp); +} + +void LLVector4a::quantize8( const LLVector4a& low, const LLVector4a& high ) +{ + LLVector4a val(mQ); + LLVector4a delta; delta.setSub( high, low ); + + { + val.clamp(low, high); + val.sub(low); + + // 8-bit quantization means we can do with just 12 bits of reciprocal accuracy + const LLVector4a oneOverDelta = _mm_rcp_ps(delta.mQ); +// { +// static LL_ALIGN_16( const F32 F_TWO_4A[4] ) = { 2.f, 2.f, 2.f, 2.f }; +// LLVector4a two; two.load4a( F_TWO_4A ); +// +// // Here we use _mm_rcp_ps plus one round of newton-raphson +// // We wish to find 'x' such that x = 1/delta +// // As a first approximation, we take x0 = _mm_rcp_ps(delta) +// // Then x1 = 2 * x0 - a * x0^2 or x1 = x0 * ( 2 - a * x0 ) +// // See Intel AP-803 http://ompf.org/!/Intel_application_note_AP-803.pdf +// const LLVector4a recipApprox = _mm_rcp_ps(delta.mQ); +// oneOverDelta.setMul( delta, recipApprox ); +// oneOverDelta.setSub( two, oneOverDelta ); +// oneOverDelta.mul( recipApprox ); +// } + + val.mul(oneOverDelta); + val.mul(*reinterpret_cast(F_U8MAX_4A)); + } + + val = _mm_cvtepi32_ps(_mm_cvtps_epi32( val.mQ )); + + { + val.mul(*reinterpret_cast(F_OOU8MAX_4A)); + val.mul(delta); + val.add(low); + } + + { + LLVector4a maxError; maxError.setMul(delta, *reinterpret_cast(F_OOU8MAX_4A)); + LLVector4a absVal; absVal.setAbs( val ); + setSelectWithMask( absVal.lessThan( maxError ), F_ZERO_4A, val ); + } +} + +void LLVector4a::quantize16( const LLVector4a& low, const LLVector4a& high ) +{ + LLVector4a val(mQ); + LLVector4a delta; delta.setSub( high, low ); + + { + val.clamp(low, high); + val.sub(low); + + // 16-bit quantization means we need a round of Newton-Raphson + LLVector4a oneOverDelta; + { + static LL_ALIGN_16( const F32 F_TWO_4A[4] ) = { 2.f, 2.f, 2.f, 2.f }; + LLVector4a two; two.load4a( F_TWO_4A ); + + // Here we use _mm_rcp_ps plus one round of newton-raphson + // We wish to find 'x' such that x = 1/delta + // As a first approximation, we take x0 = _mm_rcp_ps(delta) + // Then x1 = 2 * x0 - a * x0^2 or x1 = x0 * ( 2 - a * x0 ) + // See Intel AP-803 http://ompf.org/!/Intel_application_note_AP-803.pdf + const LLVector4a recipApprox = _mm_rcp_ps(delta.mQ); + oneOverDelta.setMul( delta, recipApprox ); + oneOverDelta.setSub( two, oneOverDelta ); + oneOverDelta.mul( recipApprox ); + } + + val.mul(oneOverDelta); + val.mul(*reinterpret_cast(F_U16MAX_4A)); + } + + val = _mm_cvtepi32_ps(_mm_cvtps_epi32( val.mQ )); + + { + val.mul(*reinterpret_cast(F_OOU16MAX_4A)); + val.mul(delta); + val.add(low); + } + + { + LLVector4a maxError; maxError.setMul(delta, *reinterpret_cast(F_OOU16MAX_4A)); + LLVector4a absVal; absVal.setAbs( val ); + setSelectWithMask( absVal.lessThan( maxError ), F_ZERO_4A, val ); + } +} diff --git a/indra/llmath/llvector4a.h b/indra/llmath/llvector4a.h new file mode 100644 index 0000000000..76a3e999ce --- /dev/null +++ b/indra/llmath/llvector4a.h @@ -0,0 +1,331 @@ +/** + * @file llvector4a.h + * @brief LLVector4a class header file - memory aligned and vectorized 4 component vector + * + * $LicenseInfo:firstyear=2010&license=viewergpl$ + * + * Copyright (c) 2007-2010, Linden Research, Inc. + * + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. + * + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. + * $/LicenseInfo$ + */ + +#ifndef LL_LLVECTOR4A_H +#define LL_LLVECTOR4A_H + + +class LLRotation; + +#include +#include "llpreprocessor.h" + +/////////////////////////////////// +// FIRST TIME USERS PLEASE READ +////////////////////////////////// +// This is just the beginning of LLVector4a. There are many more useful functions +// yet to be implemented. For example, setNeg to negate a vector, rotate() to apply +// a matrix rotation, various functions to manipulate only the X, Y, and Z elements +// and many others (including a whole variety of accessors). So if you don't see a +// function here that you need, please contact Falcon or someone else with SSE +// experience (Richard, I think, has some and davep has a little as of the time +// of this writing, July 08, 2010) about getting it implemented before you resort to +// LLVector3/LLVector4. +///////////////////////////////// + +class LLVector4a +{ +public: + + /////////////////////////////////// + // STATIC METHODS + /////////////////////////////////// + + // Call initClass() at startup to avoid 15,000+ cycle penalties from denormalized numbers + static void initClass() + { + _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); + _MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST); + } + + // Return a vector of all zeros + static inline const LLVector4a& getZero() + { + extern const LLVector4a LL_V4A_ZERO; + return LL_V4A_ZERO; + } + + // Return a vector of all epsilon, where epsilon is a small float suitable for approximate equality checks + static inline const LLVector4a& getEpsilon() + { + extern const LLVector4a LL_V4A_EPSILON; + return LL_V4A_EPSILON; + } + + // Copy 16 bytes from src to dst. Source and destination must be 16-byte aligned + static inline void copy4a(F32* dst, const F32* src) + { + _mm_store_ps(dst, _mm_load_ps(src)); + } + + // Copy words 16-byte blocks from src to dst. Source and destination must not overlap. + static void memcpyNonAliased16(F32* __restrict dst, const F32* __restrict src, size_t bytes); + + //////////////////////////////////// + // CONSTRUCTORS + //////////////////////////////////// + + LLVector4a() + { //DO NOT INITIALIZE -- The overhead is completely unnecessary + } + + LLVector4a(F32 x, F32 y, F32 z, F32 w = 0.f) + { + set(x,y,z,w); + } + + LLVector4a(F32 x) + { + splat(x); + } + + LLVector4a(const LLSimdScalar& x) + { + splat(x); + } + + LLVector4a(LLQuad q) + { + mQ = q; + } + + //////////////////////////////////// + // LOAD/STORE + //////////////////////////////////// + + // Load from 16-byte aligned src array (preferred method of loading) + inline void load4a(const F32* src); + + // Load from unaligned src array (NB: Significantly slower than load4a) + inline void loadua(const F32* src); + + // Load only three floats beginning at address 'src'. Slowest method. + inline void load3(const F32* src); + + // Store to a 16-byte aligned memory address + inline void store4a(F32* dst) const; + + //////////////////////////////////// + // BASIC GET/SET + //////////////////////////////////// + + // Return a "this" as an F32 pointer. Do not use unless you have a very good reason. (Not sure? Ask Falcon) + inline F32* getF32ptr(); + + // Return a "this" as a const F32 pointer. Do not use unless you have a very good reason. (Not sure? Ask Falcon) + inline const F32* const getF32ptr() const; + + // Read-only access a single float in this vector. Do not use in proximity to any function call that manipulates + // the data at the whole vector level or you will incur a substantial penalty. Consider using the splat functions instead + inline F32 operator[](const S32 idx) const; + + // Prefer this method for read-only access to a single element. Prefer the templated version if the elem is known at compile time. + inline LLSimdScalar getScalarAt(const S32 idx) const; + + // Prefer this method for read-only access to a single element. Prefer the templated version if the elem is known at compile time. + template LL_FORCE_INLINE LLSimdScalar getScalarAt() const; + template <> LL_FORCE_INLINE LLSimdScalar getScalarAt<0>() const; + + // Set to an x, y, z and optional w provided + inline void set(F32 x, F32 y, F32 z, F32 w = 0.f); + + // Set to all zeros. This is preferred to using ::getZero() + inline void clear(); + + // Set all elements to 'x' + inline void splat(const F32 x); + + // Set all elements to 'x' + inline void splat(const LLSimdScalar& x); + + // Set all 4 elements to element N of src, with N known at compile time + template void splat(const LLVector4a& src); + + // Set all 4 elements to element i of v, with i NOT known at compile time + inline void splat(const LLVector4a& v, U32 i); + + // Select bits from sourceIfTrue and sourceIfFalse according to bits in mask + inline void setSelectWithMask( const LLVector4Logical& mask, const LLVector4a& sourceIfTrue, const LLVector4a& sourceIfFalse ); + + //////////////////////////////////// + // ALGEBRAIC + //////////////////////////////////// + + // Set this to the element-wise (a + b) + inline void setAdd(const LLVector4a& a, const LLVector4a& b); + + // Set this to element-wise (a - b) + inline void setSub(const LLVector4a& a, const LLVector4a& b); + + // Set this to element-wise multiply (a * b) + inline void setMul(const LLVector4a& a, const LLVector4a& b); + + // Set this to element-wise quotient (a / b) + inline void setDiv(const LLVector4a& a, const LLVector4a& b); + + // Set this to the element-wise absolute value of src + inline void setAbs(const LLVector4a& src); + + // Add to each component in this vector the corresponding component in rhs + inline void add(const LLVector4a& rhs); + + // Subtract from each component in this vector the corresponding component in rhs + inline void sub(const LLVector4a& rhs); + + // Multiply each component in this vector by the corresponding component in rhs + inline void mul(const LLVector4a& rhs); + + // Divide each component in this vector by the corresponding component in rhs + inline void div(const LLVector4a& rhs); + + // Multiply this vector by x in a scalar fashion + inline void mul(const F32 x); + + // Set this to (a x b) (geometric cross-product) + inline void setCross3(const LLVector4a& a, const LLVector4a& b); + + // Set all elements to the dot product of the x, y, and z elements in a and b + inline void setAllDot3(const LLVector4a& a, const LLVector4a& b); + + // Set all elements to the dot product of the x, y, z, and w elements in a and b + inline void setAllDot4(const LLVector4a& a, const LLVector4a& b); + + // Return the 3D dot product of this vector and b + inline LLSimdScalar dot3(const LLVector4a& b) const; + + // Return the 4D dot product of this vector and b + inline LLSimdScalar dot4(const LLVector4a& b) const; + + // Normalize this vector with respect to the x, y, and z components only. Accurate to 22 bites of precision. W component is destroyed + // Note that this does not consider zero length vectors! + inline void normalize3(); + + // Same as normalize3() but with respect to all 4 components + inline void normalize4(); + + // Same as normalize3(), but returns length as a SIMD scalar + inline LLSimdScalar normalize3withLength(); + + // Normalize this vector with respect to the x, y, and z components only. Accurate only to 10-12 bits of precision. W component is destroyed + // Note that this does not consider zero length vectors! + inline void normalize3fast(); + + // Return true if this vector is normalized with respect to x,y,z up to tolerance + inline LLBool32 isNormalized3( F32 tolerance = 1e-3 ) const; + + // Return true if this vector is normalized with respect to all components up to tolerance + inline LLBool32 isNormalized4( F32 tolerance = 1e-3 ) const; + + // Set all elements to the length of vector 'v' + inline void setAllLength3( const LLVector4a& v ); + + // Get this vector's length + inline LLSimdScalar getLength3() const; + + // Set the components of this vector to the minimum of the corresponding components of lhs and rhs + inline void setMin(const LLVector4a& lhs, const LLVector4a& rhs); + + // Set the components of this vector to the maximum of the corresponding components of lhs and rhs + inline void setMax(const LLVector4a& lhs, const LLVector4a& rhs); + + // Clamps this vector to be within the component-wise range low to high (inclusive) + inline void clamp( const LLVector4a& low, const LLVector4a& high ); + + // Set this to (c * lhs) + rhs * ( 1 - c) + inline void setLerp(const LLVector4a& lhs, const LLVector4a& rhs, F32 c); + + // Return true (nonzero) if x, y, z (and w for Finite4) are all finite floats + inline LLBool32 isFinite3() const; + inline LLBool32 isFinite4() const; + + // Set this vector to 'vec' rotated by the LLRotation or LLQuaternion2 provided + void setRotated( const LLRotation& rot, const LLVector4a& vec ); + void setRotated( const class LLQuaternion2& quat, const LLVector4a& vec ); + + // Set this vector to 'vec' rotated by the INVERSE of the LLRotation or LLQuaternion2 provided + inline void setRotatedInv( const LLRotation& rot, const LLVector4a& vec ); + inline void setRotatedInv( const class LLQuaternion2& quat, const LLVector4a& vec ); + + // Quantize this vector to 8 or 16 bit precision + void quantize8( const LLVector4a& low, const LLVector4a& high ); + void quantize16( const LLVector4a& low, const LLVector4a& high ); + + //////////////////////////////////// + // LOGICAL + //////////////////////////////////// + // The functions in this section will compare the elements in this vector + // to those in rhs and return an LLVector4Logical with all bits set in elements + // where the comparison was true and all bits unset in elements where the comparison + // was false. See llvector4logica.h + //////////////////////////////////// + // WARNING: Other than equals3 and equals4, these functions do NOT account + // for floating point tolerance. You should include the appropriate tolerance + // in the inputs. + //////////////////////////////////// + + inline LLVector4Logical greaterThan(const LLVector4a& rhs) const; + + inline LLVector4Logical lessThan(const LLVector4a& rhs) const; + + inline LLVector4Logical greaterEqual(const LLVector4a& rhs) const; + + inline LLVector4Logical lessEqual(const LLVector4a& rhs) const; + + inline LLVector4Logical equal(const LLVector4a& rhs) const; + + // Returns true if this and rhs are componentwise equal up to the specified absolute tolerance + inline bool equals4(const LLVector4a& rhs, F32 tolerance = F_APPROXIMATELY_ZERO ) const; + + inline bool equals3(const LLVector4a& rhs, F32 tolerance = F_APPROXIMATELY_ZERO ) const; + + //////////////////////////////////// + // OPERATORS + //////////////////////////////////// + + // Do NOT add aditional operators without consulting someone with SSE experience + inline const LLVector4a& operator= ( const LLVector4a& rhs ); + + inline const LLVector4a& operator= ( const LLQuad& rhs ); + + inline operator LLQuad() const; + +private: + LLQuad mQ; +}; + +inline void update_min_max(LLVector4a& min, LLVector4a& max, const LLVector4a& p) +{ + min.setMin(min, p); + max.setMax(max, p); +} + +#endif diff --git a/indra/llmath/llvector4a.inl b/indra/llmath/llvector4a.inl new file mode 100644 index 0000000000..e52b550883 --- /dev/null +++ b/indra/llmath/llvector4a.inl @@ -0,0 +1,599 @@ +/** + * @file llvector4a.inl + * @brief LLVector4a inline function implementations + * + * $LicenseInfo:firstyear=2010&license=viewergpl$ + * + * Copyright (c) 2007-2010, Linden Research, Inc. + * + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. + * + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. + * $/LicenseInfo$ + */ + +//////////////////////////////////// +// LOAD/STORE +//////////////////////////////////// + +// Load from 16-byte aligned src array (preferred method of loading) +inline void LLVector4a::load4a(const F32* src) +{ + mQ = _mm_load_ps(src); +} + +// Load from unaligned src array (NB: Significantly slower than load4a) +inline void LLVector4a::loadua(const F32* src) +{ + mQ = _mm_loadu_ps(src); +} + +// Load only three floats beginning at address 'src'. Slowest method. +inline void LLVector4a::load3(const F32* src) +{ + // mQ = { 0.f, src[2], src[1], src[0] } = { W, Z, Y, X } + // NB: This differs from the convention of { Z, Y, X, W } + mQ = _mm_set_ps(0.f, src[2], src[1], src[0]); +} + +// Store to a 16-byte aligned memory address +inline void LLVector4a::store4a(F32* dst) const +{ + _mm_store_ps(dst, mQ); +} + +//////////////////////////////////// +// BASIC GET/SET +//////////////////////////////////// + +// Return a "this" as an F32 pointer. Do not use unless you have a very good reason. (Not sure? Ask Falcon) +F32* LLVector4a::getF32ptr() +{ + return (F32*) &mQ; +} + +// Return a "this" as a const F32 pointer. Do not use unless you have a very good reason. (Not sure? Ask Falcon) +const F32* const LLVector4a::getF32ptr() const +{ + return (const F32* const) &mQ; +} + +// Read-only access a single float in this vector. Do not use in proximity to any function call that manipulates +// the data at the whole vector level or you will incur a substantial penalty. Consider using the splat functions instead +inline F32 LLVector4a::operator[](const S32 idx) const +{ + return ((F32*)&mQ)[idx]; +} + +// Prefer this method for read-only access to a single element. Prefer the templated version if the elem is known at compile time. +inline LLSimdScalar LLVector4a::getScalarAt(const S32 idx) const +{ + // Return appropriate LLQuad. It will be cast to LLSimdScalar automatically (should be effectively a nop) + switch (idx) + { + case 0: + return mQ; + case 1: + return _mm_shuffle_ps(mQ, mQ, _MM_SHUFFLE(1, 1, 1, 1)); + case 2: + return _mm_shuffle_ps(mQ, mQ, _MM_SHUFFLE(2, 2, 2, 2)); + case 3: + default: + return _mm_shuffle_ps(mQ, mQ, _MM_SHUFFLE(3, 3, 3, 3)); + } +} + +// Prefer this method for read-only access to a single element. Prefer the templated version if the elem is known at compile time. +template LL_FORCE_INLINE LLSimdScalar LLVector4a::getScalarAt() const +{ + return _mm_shuffle_ps(mQ, mQ, _MM_SHUFFLE(N, N, N, N)); +} + +template<> LL_FORCE_INLINE LLSimdScalar LLVector4a::getScalarAt<0>() const +{ + return mQ; +} + +// Set to an x, y, z and optional w provided +inline void LLVector4a::set(F32 x, F32 y, F32 z, F32 w) +{ + mQ = _mm_set_ps(w, z, y, x); +} + +// Set to all zeros +inline void LLVector4a::clear() +{ + mQ = LLVector4a::getZero().mQ; +} + +inline void LLVector4a::splat(const F32 x) +{ + mQ = _mm_set1_ps(x); +} + +inline void LLVector4a::splat(const LLSimdScalar& x) +{ + mQ = _mm_shuffle_ps( x.getQuad(), x.getQuad(), _MM_SHUFFLE(0,0,0,0) ); +} + +// Set all 4 elements to element N of src, with N known at compile time +template void LLVector4a::splat(const LLVector4a& src) +{ + mQ = _mm_shuffle_ps(src.mQ, src.mQ, _MM_SHUFFLE(N, N, N, N) ); +} + +// Set all 4 elements to element i of v, with i NOT known at compile time +inline void LLVector4a::splat(const LLVector4a& v, U32 i) +{ + switch (i) + { + case 0: + mQ = _mm_shuffle_ps(v.mQ, v.mQ, _MM_SHUFFLE(0, 0, 0, 0)); + break; + case 1: + mQ = _mm_shuffle_ps(v.mQ, v.mQ, _MM_SHUFFLE(1, 1, 1, 1)); + break; + case 2: + mQ = _mm_shuffle_ps(v.mQ, v.mQ, _MM_SHUFFLE(2, 2, 2, 2)); + break; + case 3: + mQ = _mm_shuffle_ps(v.mQ, v.mQ, _MM_SHUFFLE(3, 3, 3, 3)); + break; + } +} + +// Select bits from sourceIfTrue and sourceIfFalse according to bits in mask +inline void LLVector4a::setSelectWithMask( const LLVector4Logical& mask, const LLVector4a& sourceIfTrue, const LLVector4a& sourceIfFalse ) +{ + // ((( sourceIfTrue ^ sourceIfFalse ) & mask) ^ sourceIfFalse ) + // E.g., sourceIfFalse = 1010b, sourceIfTrue = 0101b, mask = 1100b + // (sourceIfTrue ^ sourceIfFalse) = 1111b --> & mask = 1100b --> ^ sourceIfFalse = 0110b, + // as expected (01 from sourceIfTrue, 10 from sourceIfFalse) + // Courtesy of Mark++, http://markplusplus.wordpress.com/2007/03/14/fast-sse-select-operation/ + mQ = _mm_xor_ps( sourceIfFalse, _mm_and_ps( mask, _mm_xor_ps( sourceIfTrue, sourceIfFalse ) ) ); +} + +//////////////////////////////////// +// ALGEBRAIC +//////////////////////////////////// + +// Set this to the element-wise (a + b) +inline void LLVector4a::setAdd(const LLVector4a& a, const LLVector4a& b) +{ + mQ = _mm_add_ps(a.mQ, b.mQ); +} + +// Set this to element-wise (a - b) +inline void LLVector4a::setSub(const LLVector4a& a, const LLVector4a& b) +{ + mQ = _mm_sub_ps(a.mQ, b.mQ); +} + +// Set this to element-wise multiply (a * b) +inline void LLVector4a::setMul(const LLVector4a& a, const LLVector4a& b) +{ + mQ = _mm_mul_ps(a.mQ, b.mQ); +} + +// Set this to element-wise quotient (a / b) +inline void LLVector4a::setDiv(const LLVector4a& a, const LLVector4a& b) +{ + mQ = _mm_div_ps( a.mQ, b.mQ ); +} + +// Set this to the element-wise absolute value of src +inline void LLVector4a::setAbs(const LLVector4a& src) +{ + static const LL_ALIGN_16(U32 F_ABS_MASK_4A[4]) = { 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF }; + mQ = _mm_and_ps(src.mQ, *reinterpret_cast(F_ABS_MASK_4A)); +} + +// Add to each component in this vector the corresponding component in rhs +inline void LLVector4a::add(const LLVector4a& rhs) +{ + mQ = _mm_add_ps(mQ, rhs.mQ); +} + +// Subtract from each component in this vector the corresponding component in rhs +inline void LLVector4a::sub(const LLVector4a& rhs) +{ + mQ = _mm_sub_ps(mQ, rhs.mQ); +} + +// Multiply each component in this vector by the corresponding component in rhs +inline void LLVector4a::mul(const LLVector4a& rhs) +{ + mQ = _mm_mul_ps(mQ, rhs.mQ); +} + +// Divide each component in this vector by the corresponding component in rhs +inline void LLVector4a::div(const LLVector4a& rhs) +{ + // TODO: Check accuracy, maybe add divFast + mQ = _mm_div_ps(mQ, rhs.mQ); +} + +// Multiply this vector by x in a scalar fashion +inline void LLVector4a::mul(const F32 x) +{ + LLVector4a t; + t.splat(x); + + mQ = _mm_mul_ps(mQ, t.mQ); +} + +// Set this to (a x b) (geometric cross-product) +inline void LLVector4a::setCross3(const LLVector4a& a, const LLVector4a& b) +{ + // Vectors are stored in memory in w, z, y, x order from high to low + // Set vector1 = { a[W], a[X], a[Z], a[Y] } + const LLQuad vector1 = _mm_shuffle_ps( a.mQ, a.mQ, _MM_SHUFFLE( 3, 0, 2, 1 )); + // Set vector2 = { b[W], b[Y], b[X], b[Z] } + const LLQuad vector2 = _mm_shuffle_ps( b.mQ, b.mQ, _MM_SHUFFLE( 3, 1, 0, 2 )); + // mQ = { a[W]*b[W], a[X]*b[Y], a[Z]*b[X], a[Y]*b[Z] } + mQ = _mm_mul_ps( vector1, vector2 ); + // vector3 = { a[W], a[Y], a[X], a[Z] } + const LLQuad vector3 = _mm_shuffle_ps( a.mQ, a.mQ, _MM_SHUFFLE( 3, 1, 0, 2 )); + // vector4 = { b[W], b[X], b[Z], b[Y] } + const LLQuad vector4 = _mm_shuffle_ps( b.mQ, b.mQ, _MM_SHUFFLE( 3, 0, 2, 1 )); + // mQ = { 0, a[X]*b[Y] - a[Y]*b[X], a[Z]*b[X] - a[X]*b[Z], a[Y]*b[Z] - a[Z]*b[Y] } + mQ = _mm_sub_ps( mQ, _mm_mul_ps( vector3, vector4 )); +} + +/* This function works, but may be slightly slower than the one below on older machines + inline void LLVector4a::setAllDot3(const LLVector4a& a, const LLVector4a& b) + { + // ab = { a[W]*b[W], a[Z]*b[Z], a[Y]*b[Y], a[X]*b[X] } + const LLQuad ab = _mm_mul_ps( a.mQ, b.mQ ); + // yzxw = { a[W]*b[W], a[Z]*b[Z], a[X]*b[X], a[Y]*b[Y] } + const LLQuad wzxy = _mm_shuffle_ps( ab, ab, _MM_SHUFFLE(3, 2, 0, 1 )); + // xPlusY = { 2*a[W]*b[W], 2 * a[Z] * b[Z], a[Y]*b[Y] + a[X] * b[X], a[X] * b[X] + a[Y] * b[Y] } + const LLQuad xPlusY = _mm_add_ps(ab, wzxy); + // xPlusYSplat = { a[Y]*b[Y] + a[X] * b[X], a[X] * b[X] + a[Y] * b[Y], a[Y]*b[Y] + a[X] * b[X], a[X] * b[X] + a[Y] * b[Y] } + const LLQuad xPlusYSplat = _mm_movelh_ps(xPlusY, xPlusY); + // zSplat = { a[Z]*b[Z], a[Z]*b[Z], a[Z]*b[Z], a[Z]*b[Z] } + const LLQuad zSplat = _mm_shuffle_ps( ab, ab, _MM_SHUFFLE( 2, 2, 2, 2 )); + // mQ = { a[Z] * b[Z] + a[Y] * b[Y] + a[X] * b[X], same, same, same } + mQ = _mm_add_ps(zSplat, xPlusYSplat); + }*/ + +// Set all elements to the dot product of the x, y, and z elements in a and b +inline void LLVector4a::setAllDot3(const LLVector4a& a, const LLVector4a& b) +{ + // ab = { a[W]*b[W], a[Z]*b[Z], a[Y]*b[Y], a[X]*b[X] } + const LLQuad ab = _mm_mul_ps( a.mQ, b.mQ ); + // yzxw = { a[W]*b[W], a[Z]*b[Z], a[X]*b[X], a[Y]*b[Y] } + const __m128i wzxy = _mm_shuffle_epi32(_mm_castps_si128(ab), _MM_SHUFFLE(3, 2, 0, 1 )); + // xPlusY = { 2*a[W]*b[W], 2 * a[Z] * b[Z], a[Y]*b[Y] + a[X] * b[X], a[X] * b[X] + a[Y] * b[Y] } + const LLQuad xPlusY = _mm_add_ps(ab, _mm_castsi128_ps(wzxy)); + // xPlusYSplat = { a[Y]*b[Y] + a[X] * b[X], a[X] * b[X] + a[Y] * b[Y], a[Y]*b[Y] + a[X] * b[X], a[X] * b[X] + a[Y] * b[Y] } + const LLQuad xPlusYSplat = _mm_movelh_ps(xPlusY, xPlusY); + // zSplat = { a[Z]*b[Z], a[Z]*b[Z], a[Z]*b[Z], a[Z]*b[Z] } + const __m128i zSplat = _mm_shuffle_epi32(_mm_castps_si128(ab), _MM_SHUFFLE( 2, 2, 2, 2 )); + // mQ = { a[Z] * b[Z] + a[Y] * b[Y] + a[X] * b[X], same, same, same } + mQ = _mm_add_ps(_mm_castsi128_ps(zSplat), xPlusYSplat); +} + +// Set all elements to the dot product of the x, y, z, and w elements in a and b +inline void LLVector4a::setAllDot4(const LLVector4a& a, const LLVector4a& b) +{ + // ab = { a[W]*b[W], a[Z]*b[Z], a[Y]*b[Y], a[X]*b[X] } + const LLQuad ab = _mm_mul_ps( a.mQ, b.mQ ); + // yzxw = { a[W]*b[W], a[Z]*b[Z], a[X]*b[X], a[Y]*b[Y] } + const __m128i zwxy = _mm_shuffle_epi32(_mm_castps_si128(ab), _MM_SHUFFLE(2, 3, 0, 1 )); + // zPlusWandXplusY = { a[W]*b[W] + a[Z]*b[Z], a[Z] * b[Z] + a[W]*b[W], a[Y]*b[Y] + a[X] * b[X], a[X] * b[X] + a[Y] * b[Y] } + const LLQuad zPlusWandXplusY = _mm_add_ps(ab, _mm_castsi128_ps(zwxy)); + // xPlusYSplat = { a[Y]*b[Y] + a[X] * b[X], a[X] * b[X] + a[Y] * b[Y], a[Y]*b[Y] + a[X] * b[X], a[X] * b[X] + a[Y] * b[Y] } + const LLQuad xPlusYSplat = _mm_movelh_ps(zPlusWandXplusY, zPlusWandXplusY); + const LLQuad zPlusWSplat = _mm_movehl_ps(zPlusWandXplusY, zPlusWandXplusY); + + // mQ = { a[W]*b[W] + a[Z] * b[Z] + a[Y] * b[Y] + a[X] * b[X], same, same, same } + mQ = _mm_add_ps(xPlusYSplat, zPlusWSplat); +} + +// Return the 3D dot product of this vector and b +inline LLSimdScalar LLVector4a::dot3(const LLVector4a& b) const +{ + const LLQuad ab = _mm_mul_ps( mQ, b.mQ ); + const LLQuad splatY = _mm_castsi128_ps( _mm_shuffle_epi32( _mm_castps_si128(ab), _MM_SHUFFLE(1, 1, 1, 1) ) ); + const LLQuad splatZ = _mm_castsi128_ps( _mm_shuffle_epi32( _mm_castps_si128(ab), _MM_SHUFFLE(2, 2, 2, 2) ) ); + const LLQuad xPlusY = _mm_add_ps( ab, splatY ); + return _mm_add_ps( xPlusY, splatZ ); +} + +// Return the 4D dot product of this vector and b +inline LLSimdScalar LLVector4a::dot4(const LLVector4a& b) const +{ + // ab = { w, z, y, x } + const LLQuad ab = _mm_mul_ps( mQ, b.mQ ); + // upperProdsInLowerElems = { y, x, y, x } + const LLQuad upperProdsInLowerElems = _mm_movehl_ps( ab, ab ); + // sumOfPairs = { w+y, z+x, 2y, 2x } + const LLQuad sumOfPairs = _mm_add_ps( upperProdsInLowerElems, ab ); + // shuffled = { z+x, z+x, z+x, z+x } + const LLQuad shuffled = _mm_castsi128_ps( _mm_shuffle_epi32( _mm_castps_si128( sumOfPairs ), _MM_SHUFFLE(1, 1, 1, 1) ) ); + return _mm_add_ss( sumOfPairs, shuffled ); +} + +// Normalize this vector with respect to the x, y, and z components only. Accurate to 22 bites of precision. W component is destroyed +// Note that this does not consider zero length vectors! +inline void LLVector4a::normalize3() +{ + // lenSqrd = a dot a + LLVector4a lenSqrd; lenSqrd.setAllDot3( *this, *this ); + // rsqrt = approximate reciprocal square (i.e., { ~1/len(a)^2, ~1/len(a)^2, ~1/len(a)^2, ~1/len(a)^2 } + const LLQuad rsqrt = _mm_rsqrt_ps(lenSqrd.mQ); + static const LLQuad half = { 0.5f, 0.5f, 0.5f, 0.5f }; + static const LLQuad three = {3.f, 3.f, 3.f, 3.f }; + // Now we do one round of Newton-Raphson approximation to get full accuracy + // According to the Newton-Raphson method, given a first 'w' for the root of f(x) = 1/x^2 - a (i.e., x = 1/sqrt(a)) + // the next better approximation w[i+1] = w - f(w)/f'(w) = w - (1/w^2 - a)/(-2*w^(-3)) + // w[i+1] = w + 0.5 * (1/w^2 - a) * w^3 = w + 0.5 * (w - a*w^3) = 1.5 * w - 0.5 * a * w^3 + // = 0.5 * w * (3 - a*w^2) + // Our first approx is w = rsqrt. We need out = a * w[i+1] (this is the input vector 'a', not the 'a' from the above formula + // which is actually lenSqrd). So out = a * [0.5*rsqrt * (3 - lenSqrd*rsqrt*rsqrt)] + const LLQuad AtimesRsqrt = _mm_mul_ps( lenSqrd.mQ, rsqrt ); + const LLQuad AtimesRsqrtTimesRsqrt = _mm_mul_ps( AtimesRsqrt, rsqrt ); + const LLQuad threeMinusAtimesRsqrtTimesRsqrt = _mm_sub_ps(three, AtimesRsqrtTimesRsqrt ); + const LLQuad nrApprox = _mm_mul_ps(half, _mm_mul_ps(rsqrt, threeMinusAtimesRsqrtTimesRsqrt)); + mQ = _mm_mul_ps( mQ, nrApprox ); +} + +// Normalize this vector with respect to all components. Accurate to 22 bites of precision. +// Note that this does not consider zero length vectors! +inline void LLVector4a::normalize4() +{ + // lenSqrd = a dot a + LLVector4a lenSqrd; lenSqrd.setAllDot4( *this, *this ); + // rsqrt = approximate reciprocal square (i.e., { ~1/len(a)^2, ~1/len(a)^2, ~1/len(a)^2, ~1/len(a)^2 } + const LLQuad rsqrt = _mm_rsqrt_ps(lenSqrd.mQ); + static const LLQuad half = { 0.5f, 0.5f, 0.5f, 0.5f }; + static const LLQuad three = {3.f, 3.f, 3.f, 3.f }; + // Now we do one round of Newton-Raphson approximation to get full accuracy + // According to the Newton-Raphson method, given a first 'w' for the root of f(x) = 1/x^2 - a (i.e., x = 1/sqrt(a)) + // the next better approximation w[i+1] = w - f(w)/f'(w) = w - (1/w^2 - a)/(-2*w^(-3)) + // w[i+1] = w + 0.5 * (1/w^2 - a) * w^3 = w + 0.5 * (w - a*w^3) = 1.5 * w - 0.5 * a * w^3 + // = 0.5 * w * (3 - a*w^2) + // Our first approx is w = rsqrt. We need out = a * w[i+1] (this is the input vector 'a', not the 'a' from the above formula + // which is actually lenSqrd). So out = a * [0.5*rsqrt * (3 - lenSqrd*rsqrt*rsqrt)] + const LLQuad AtimesRsqrt = _mm_mul_ps( lenSqrd.mQ, rsqrt ); + const LLQuad AtimesRsqrtTimesRsqrt = _mm_mul_ps( AtimesRsqrt, rsqrt ); + const LLQuad threeMinusAtimesRsqrtTimesRsqrt = _mm_sub_ps(three, AtimesRsqrtTimesRsqrt ); + const LLQuad nrApprox = _mm_mul_ps(half, _mm_mul_ps(rsqrt, threeMinusAtimesRsqrtTimesRsqrt)); + mQ = _mm_mul_ps( mQ, nrApprox ); +} + +// Normalize this vector with respect to the x, y, and z components only. Accurate to 22 bites of precision. W component is destroyed +// Note that this does not consider zero length vectors! +inline LLSimdScalar LLVector4a::normalize3withLength() +{ + // lenSqrd = a dot a + LLVector4a lenSqrd; lenSqrd.setAllDot3( *this, *this ); + // rsqrt = approximate reciprocal square (i.e., { ~1/len(a)^2, ~1/len(a)^2, ~1/len(a)^2, ~1/len(a)^2 } + const LLQuad rsqrt = _mm_rsqrt_ps(lenSqrd.mQ); + static const LLQuad half = { 0.5f, 0.5f, 0.5f, 0.5f }; + static const LLQuad three = {3.f, 3.f, 3.f, 3.f }; + // Now we do one round of Newton-Raphson approximation to get full accuracy + // According to the Newton-Raphson method, given a first 'w' for the root of f(x) = 1/x^2 - a (i.e., x = 1/sqrt(a)) + // the next better approximation w[i+1] = w - f(w)/f'(w) = w - (1/w^2 - a)/(-2*w^(-3)) + // w[i+1] = w + 0.5 * (1/w^2 - a) * w^3 = w + 0.5 * (w - a*w^3) = 1.5 * w - 0.5 * a * w^3 + // = 0.5 * w * (3 - a*w^2) + // Our first approx is w = rsqrt. We need out = a * w[i+1] (this is the input vector 'a', not the 'a' from the above formula + // which is actually lenSqrd). So out = a * [0.5*rsqrt * (3 - lenSqrd*rsqrt*rsqrt)] + const LLQuad AtimesRsqrt = _mm_mul_ps( lenSqrd.mQ, rsqrt ); + const LLQuad AtimesRsqrtTimesRsqrt = _mm_mul_ps( AtimesRsqrt, rsqrt ); + const LLQuad threeMinusAtimesRsqrtTimesRsqrt = _mm_sub_ps(three, AtimesRsqrtTimesRsqrt ); + const LLQuad nrApprox = _mm_mul_ps(half, _mm_mul_ps(rsqrt, threeMinusAtimesRsqrtTimesRsqrt)); + mQ = _mm_mul_ps( mQ, nrApprox ); + return _mm_sqrt_ss(lenSqrd); +} + +// Normalize this vector with respect to the x, y, and z components only. Accurate only to 10-12 bits of precision. W component is destroyed +// Note that this does not consider zero length vectors! +inline void LLVector4a::normalize3fast() +{ + LLVector4a lenSqrd; lenSqrd.setAllDot3( *this, *this ); + const LLQuad approxRsqrt = _mm_rsqrt_ps(lenSqrd.mQ); + mQ = _mm_mul_ps( mQ, approxRsqrt ); +} + +// Return true if this vector is normalized with respect to x,y,z up to tolerance +inline LLBool32 LLVector4a::isNormalized3( F32 tolerance ) const +{ + static LL_ALIGN_16(const U32 ones[4]) = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 }; + LLSimdScalar tol = _mm_load_ss( &tolerance ); + tol = _mm_mul_ss( tol, tol ); + LLVector4a lenSquared; lenSquared.setAllDot3( *this, *this ); + lenSquared.sub( *reinterpret_cast(ones) ); + lenSquared.setAbs(lenSquared); + return _mm_comile_ss( lenSquared, tol ); +} + +// Return true if this vector is normalized with respect to all components up to tolerance +inline LLBool32 LLVector4a::isNormalized4( F32 tolerance ) const +{ + static LL_ALIGN_16(const U32 ones[4]) = { 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000 }; + LLSimdScalar tol = _mm_load_ss( &tolerance ); + tol = _mm_mul_ss( tol, tol ); + LLVector4a lenSquared; lenSquared.setAllDot4( *this, *this ); + lenSquared.sub( *reinterpret_cast(ones) ); + lenSquared.setAbs(lenSquared); + return _mm_comile_ss( lenSquared, tol ); +} + +// Set all elements to the length of vector 'v' +inline void LLVector4a::setAllLength3( const LLVector4a& v ) +{ + LLVector4a lenSqrd; + lenSqrd.setAllDot3(v, v); + + mQ = _mm_sqrt_ps(lenSqrd.mQ); +} + +// Get this vector's length +inline LLSimdScalar LLVector4a::getLength3() const +{ + return _mm_sqrt_ss( dot3( (const LLVector4a)mQ ) ); +} + +// Set the components of this vector to the minimum of the corresponding components of lhs and rhs +inline void LLVector4a::setMin(const LLVector4a& lhs, const LLVector4a& rhs) +{ + mQ = _mm_min_ps(lhs.mQ, rhs.mQ); +} + +// Set the components of this vector to the maximum of the corresponding components of lhs and rhs +inline void LLVector4a::setMax(const LLVector4a& lhs, const LLVector4a& rhs) +{ + mQ = _mm_max_ps(lhs.mQ, rhs.mQ); +} + +// Set this to (c * lhs) + rhs * ( 1 - c) +inline void LLVector4a::setLerp(const LLVector4a& lhs, const LLVector4a& rhs, F32 c) +{ + LLVector4a a = lhs; + a.mul(c); + + LLVector4a b = rhs; + b.mul(1.f-c); + + setAdd(a, b); +} + +inline LLBool32 LLVector4a::isFinite3() const +{ + static LL_ALIGN_16(const U32 nanOrInfMask[4]) = { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 }; + const __m128i nanOrInfMaskV = *reinterpret_cast (nanOrInfMask); + const __m128i maskResult = _mm_and_si128( _mm_castps_si128(mQ), nanOrInfMaskV ); + const LLVector4Logical equalityCheck = _mm_castsi128_ps(_mm_cmpeq_epi32( maskResult, nanOrInfMaskV )); + return !equalityCheck.areAnySet( LLVector4Logical::MASK_XYZ ); +} + +inline LLBool32 LLVector4a::isFinite4() const +{ + static LL_ALIGN_16(const U32 nanOrInfMask[4]) = { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 }; + const __m128i nanOrInfMaskV = *reinterpret_cast (nanOrInfMask); + const __m128i maskResult = _mm_and_si128( _mm_castps_si128(mQ), nanOrInfMaskV ); + const LLVector4Logical equalityCheck = _mm_castsi128_ps(_mm_cmpeq_epi32( maskResult, nanOrInfMaskV )); + return !equalityCheck.areAnySet( LLVector4Logical::MASK_XYZW ); +} + +inline void LLVector4a::setRotatedInv( const LLRotation& rot, const LLVector4a& vec ) +{ + LLRotation inv; inv.setTranspose( rot ); + setRotated( inv, vec ); +} + +inline void LLVector4a::setRotatedInv( const LLQuaternion2& quat, const LLVector4a& vec ) +{ + LLQuaternion2 invRot; invRot.setConjugate( quat ); + setRotated(invRot, vec); +} + +inline void LLVector4a::clamp( const LLVector4a& low, const LLVector4a& high ) +{ + const LLVector4Logical highMask = greaterThan( high ); + const LLVector4Logical lowMask = lessThan( low ); + + setSelectWithMask( highMask, high, *this ); + setSelectWithMask( lowMask, low, *this ); +} + + +//////////////////////////////////// +// LOGICAL +//////////////////////////////////// +// The functions in this section will compare the elements in this vector +// to those in rhs and return an LLVector4Logical with all bits set in elements +// where the comparison was true and all bits unset in elements where the comparison +// was false. See llvector4logica.h +//////////////////////////////////// +// WARNING: Other than equals3 and equals4, these functions do NOT account +// for floating point tolerance. You should include the appropriate tolerance +// in the inputs. +//////////////////////////////////// + +inline LLVector4Logical LLVector4a::greaterThan(const LLVector4a& rhs) const +{ + return _mm_cmpgt_ps(mQ, rhs.mQ); +} + +inline LLVector4Logical LLVector4a::lessThan(const LLVector4a& rhs) const +{ + return _mm_cmplt_ps(mQ, rhs.mQ); +} + +inline LLVector4Logical LLVector4a::greaterEqual(const LLVector4a& rhs) const +{ + return _mm_cmpge_ps(mQ, rhs.mQ); +} + +inline LLVector4Logical LLVector4a::lessEqual(const LLVector4a& rhs) const +{ + return _mm_cmple_ps(mQ, rhs.mQ); +} + +inline LLVector4Logical LLVector4a::equal(const LLVector4a& rhs) const +{ + return _mm_cmpeq_ps(mQ, rhs.mQ); +} + +// Returns true if this and rhs are componentwise equal up to the specified absolute tolerance +inline bool LLVector4a::equals4(const LLVector4a& rhs, F32 tolerance ) const +{ + LLVector4a diff; diff.setSub( *this, rhs ); + diff.setAbs( diff ); + const LLQuad tol = _mm_set1_ps( tolerance ); + const LLQuad cmp = _mm_cmplt_ps( diff, tol ); + return (_mm_movemask_ps( cmp ) & LLVector4Logical::MASK_XYZW) == LLVector4Logical::MASK_XYZW; +} + +inline bool LLVector4a::equals3(const LLVector4a& rhs, F32 tolerance ) const +{ + LLVector4a diff; diff.setSub( *this, rhs ); + diff.setAbs( diff ); + const LLQuad tol = _mm_set1_ps( tolerance ); + const LLQuad t = _mm_cmplt_ps( diff, tol ); + return (_mm_movemask_ps( t ) & LLVector4Logical::MASK_XYZ) == LLVector4Logical::MASK_XYZ; + +} + +//////////////////////////////////// +// OPERATORS +//////////////////////////////////// + +// Do NOT add aditional operators without consulting someone with SSE experience +inline const LLVector4a& LLVector4a::operator= ( const LLVector4a& rhs ) +{ + mQ = rhs.mQ; + return *this; +} + +inline const LLVector4a& LLVector4a::operator= ( const LLQuad& rhs ) +{ + mQ = rhs; + return *this; +} + +inline LLVector4a::operator LLQuad() const +{ + return mQ; +} diff --git a/indra/llmath/llvector4logical.h b/indra/llmath/llvector4logical.h new file mode 100644 index 0000000000..1c7ee1d79f --- /dev/null +++ b/indra/llmath/llvector4logical.h @@ -0,0 +1,130 @@ +/** + * @file llvector4logical.h + * @brief LLVector4Logical class header file - Companion class to LLVector4a for logical and bit-twiddling operations + * + * $LicenseInfo:firstyear=2010&license=viewergpl$ + * + * Copyright (c) 2007-2010, Linden Research, Inc. + * + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. + * + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. + * $/LicenseInfo$ + */ + +#ifndef LL_VECTOR4LOGICAL_H +#define LL_VECTOR4LOGICAL_H + + +//////////////////////////// +// LLVector4Logical +//////////////////////////// +// This class is incomplete. If you need additional functionality, +// for example setting/unsetting particular elements or performing +// other boolean operations, feel free to implement. If you need +// assistance in determining the most optimal implementation, +// contact someone with SSE experience (Falcon, Richard, Davep, e.g.) +//////////////////////////// + +static LL_ALIGN_16(const U32 S_V4LOGICAL_MASK_TABLE[4*4]) = +{ + 0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xFFFFFFFF, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0xFFFFFFFF, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF +}; + +class LLVector4Logical +{ +public: + + enum { + MASK_X = 1, + MASK_Y = 1 << 1, + MASK_Z = 1 << 2, + MASK_W = 1 << 3, + MASK_XYZ = MASK_X | MASK_Y | MASK_Z, + MASK_XYZW = MASK_XYZ | MASK_W + }; + + // Empty default ctor + LLVector4Logical() {} + + LLVector4Logical( const LLQuad& quad ) + { + mQ = quad; + } + + // Create and return a mask consisting of the lowest order bit of each element + inline U32 getGatheredBits() const + { + return _mm_movemask_ps(mQ); + }; + + // Invert this mask + inline LLVector4Logical& invert() + { + static const LL_ALIGN_16(U32 allOnes[4]) = { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }; + mQ = _mm_andnot_ps( mQ, *(LLQuad*)(allOnes) ); + return *this; + } + + inline LLBool32 areAllSet( U32 mask ) const + { + return ( getGatheredBits() & mask) == mask; + } + + inline LLBool32 areAllSet() const + { + return areAllSet( MASK_XYZW ); + } + + inline LLBool32 areAnySet( U32 mask ) const + { + return getGatheredBits() & mask; + } + + inline LLBool32 areAnySet() const + { + return areAnySet( MASK_XYZW ); + } + + inline operator LLQuad() const + { + return mQ; + } + + inline void clear() + { + mQ = _mm_setzero_ps(); + } + + template void setElement() + { + mQ = _mm_or_ps( mQ, *reinterpret_cast(S_V4LOGICAL_MASK_TABLE + 4*N) ); + } + +private: + + LLQuad mQ; +}; + +#endif //LL_VECTOR4ALOGICAL_H diff --git a/indra/llmath/llvolumeoctree.cpp b/indra/llmath/llvolumeoctree.cpp new file mode 100644 index 0000000000..194b1faf81 --- /dev/null +++ b/indra/llmath/llvolumeoctree.cpp @@ -0,0 +1,208 @@ +/** + + * @file llvolumeoctree.cpp + * + * $LicenseInfo:firstyear=2002&license=viewergpl$ + * + * Copyright (c) 2002-2009, Linden Research, Inc. + * + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. + * + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. + * $/LicenseInfo$ + */ + +#include "llvolumeoctree.h" +#include "llvector4a.h" + +BOOL LLLineSegmentBoxIntersect(const LLVector4a& start, const LLVector4a& end, const LLVector4a& center, const LLVector4a& size) +{ + LLVector4a fAWdU; + LLVector4a dir; + LLVector4a diff; + + dir.setSub(end, start); + dir.mul(0.5f); + + diff.setAdd(end,start); + diff.mul(0.5f); + diff.sub(center); + fAWdU.setAbs(dir); + + LLVector4a rhs; + rhs.setAdd(size, fAWdU); + + LLVector4a lhs; + lhs.setAbs(diff); + + U32 grt = lhs.greaterThan(rhs).getGatheredBits(); + + if (grt & 0x7) + { + return false; + } + + LLVector4a f; + f.setCross3(dir, diff); + f.setAbs(f); + + LLVector4a v0, v1; + + v0 = _mm_shuffle_ps(size, size,_MM_SHUFFLE(3,0,0,1)); + v1 = _mm_shuffle_ps(fAWdU, fAWdU, _MM_SHUFFLE(3,1,2,2)); + lhs.setMul(v0, v1); + + v0 = _mm_shuffle_ps(size, size, _MM_SHUFFLE(3,1,2,2)); + v1 = _mm_shuffle_ps(fAWdU, fAWdU, _MM_SHUFFLE(3,0,0,1)); + rhs.setMul(v0, v1); + rhs.add(lhs); + + grt = f.greaterThan(rhs).getGatheredBits(); + + return (grt & 0x7) ? false : true; +} + + +LLVolumeOctreeListener::LLVolumeOctreeListener(LLOctreeNode* node) +{ + node->addListener(this); + + mBounds = (LLVector4a*) ll_aligned_malloc_16(sizeof(LLVector4a)*4); + mExtents = mBounds+2; +} + +LLVolumeOctreeListener::~LLVolumeOctreeListener() +{ + ll_aligned_free_16(mBounds); +} + +void LLVolumeOctreeListener::handleChildAddition(const LLOctreeNode* parent, + LLOctreeNode* child) +{ + new LLVolumeOctreeListener(child); +} + + +LLOctreeTriangleRayIntersect::LLOctreeTriangleRayIntersect(const LLVector4a& start, const LLVector4a& dir, + const LLVolumeFace* face, F32* closest_t, + LLVector3* intersection,LLVector2* tex_coord, LLVector3* normal, LLVector3* bi_normal) + : mFace(face), + mStart(start), + mDir(dir), + mIntersection(intersection), + mTexCoord(tex_coord), + mNormal(normal), + mBinormal(bi_normal), + mClosestT(closest_t), + mHitFace(false) +{ + mEnd.setAdd(mStart, mDir); +} + +void LLOctreeTriangleRayIntersect::traverse(const LLOctreeNode* node) +{ + LLVolumeOctreeListener* vl = (LLVolumeOctreeListener*) node->getListener(0); + + /*const F32* start = mStart.getF32(); + const F32* end = mEnd.getF32(); + const F32* center = vl->mBounds[0].getF32(); + const F32* size = vl->mBounds[1].getF32();*/ + + //if (LLLineSegmentBoxIntersect(mStart.getF32(), mEnd.getF32(), vl->mBounds[0].getF32(), vl->mBounds[1].getF32())) + if (LLLineSegmentBoxIntersect(mStart, mEnd, vl->mBounds[0], vl->mBounds[1])) + { + node->accept(this); + for (S32 i = 0; i < node->getChildCount(); ++i) + { + traverse(node->getChild(i)); + } + } +} + +void LLOctreeTriangleRayIntersect::visit(const LLOctreeNode* node) +{ + for (LLOctreeNode::const_element_iter iter = + node->getData().begin(); iter != node->getData().end(); ++iter) + { + const LLVolumeTriangle* tri = *iter; + + F32 a, b, t; + + if (LLTriangleRayIntersect(*tri->mV[0], *tri->mV[1], *tri->mV[2], + mStart, mDir, a, b, t)) + { + if ((t >= 0.f) && // if hit is after start + (t <= 1.f) && // and before end + (t < *mClosestT)) // and this hit is closer + { + *mClosestT = t; + mHitFace = true; + + if (mIntersection != NULL) + { + LLVector4a intersect = mDir; + intersect.mul(*mClosestT); + intersect.add(mStart); + mIntersection->set(intersect.getF32ptr()); + } + + + if (mTexCoord != NULL) + { + LLVector2* tc = (LLVector2*) mFace->mTexCoords; + *mTexCoord = ((1.f - a - b) * tc[tri->mIndex[0]] + + a * tc[tri->mIndex[1]] + + b * tc[tri->mIndex[2]]); + + } + + if (mNormal != NULL) + { + LLVector4* norm = (LLVector4*) mFace->mNormals; + + *mNormal = ((1.f - a - b) * LLVector3(norm[tri->mIndex[0]]) + + a * LLVector3(norm[tri->mIndex[1]]) + + b * LLVector3(norm[tri->mIndex[2]])); + } + + if (mBinormal != NULL) + { + LLVector4* binormal = (LLVector4*) mFace->mBinormals; + *mBinormal = ((1.f - a - b) * LLVector3(binormal[tri->mIndex[0]]) + + a * LLVector3(binormal[tri->mIndex[1]]) + + b * LLVector3(binormal[tri->mIndex[2]])); + } + } + } + } +} + +const LLVector4a& LLVolumeTriangle::getPositionGroup() const +{ + return *mPositionGroup; +} + +const F32& LLVolumeTriangle::getBinRadius() const +{ + return mRadius; +} + + diff --git a/indra/llmath/llvolumeoctree.h b/indra/llmath/llvolumeoctree.h new file mode 100644 index 0000000000..0031626498 --- /dev/null +++ b/indra/llmath/llvolumeoctree.h @@ -0,0 +1,138 @@ +/** + * @file llvolumeoctree.h + * @brief LLVolume octree classes. + * + * $LicenseInfo:firstyear=2002&license=viewergpl$ + * + * Copyright (c) 2002-2009, Linden Research, Inc. + * + * Second Life Viewer Source Code + * The source code in this file ("Source Code") is provided by Linden Lab + * to you under the terms of the GNU General Public License, version 2.0 + * ("GPL"), unless you have obtained a separate licensing agreement + * ("Other License"), formally executed by you and Linden Lab. Terms of + * the GPL can be found in doc/GPL-license.txt in this distribution, or + * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * + * There are special exceptions to the terms and conditions of the GPL as + * it is applied to this Source Code. View the full text of the exception + * in the file doc/FLOSS-exception.txt in this software distribution, or + * online at + * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * + * By copying, modifying or distributing this software, you acknowledge + * that you have read and understood your obligations described above, + * and agree to abide by those obligations. + * + * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO + * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, + * COMPLETENESS OR PERFORMANCE. + * $/LicenseInfo$ + */ + +#ifndef LL_LLVOLUME_OCTREE_H +#define LL_LLVOLUME_OCTREE_H + +#include "linden_common.h" +#include "llmemory.h" + +#include "lloctree.h" +#include "llvolume.h" +#include "llvector4a.h" + +class LLVolumeOctreeListener : public LLOctreeListener +{ +public: + + LLVolumeOctreeListener(LLOctreeNode* node); + ~LLVolumeOctreeListener(); + + LLVolumeOctreeListener(const LLVolumeOctreeListener& rhs) + { + *this = rhs; + } + + const LLVolumeOctreeListener& operator=(const LLVolumeOctreeListener& rhs) + { + llerrs << "Illegal operation!" << llendl; + return *this; + } + + //LISTENER FUNCTIONS + virtual void handleChildAddition(const LLOctreeNode* parent, + LLOctreeNode* child); + virtual void handleStateChange(const LLTreeNode* node) { } + virtual void handleChildRemoval(const LLOctreeNode* parent, + const LLOctreeNode* child) { } + virtual void handleInsertion(const LLTreeNode* node, LLVolumeTriangle* tri) { } + virtual void handleRemoval(const LLTreeNode* node, LLVolumeTriangle* tri) { } + virtual void handleDestruction(const LLTreeNode* node) { } + + +public: + LLVector4a* mBounds; // bounding box (center, size) of this node and all its children (tight fit to objects) + LLVector4a* mExtents; // extents (min, max) of this node and all its children +}; + +class LLOctreeTriangleRayIntersect : public LLOctreeTraveler +{ +public: + const LLVolumeFace* mFace; + LLVector4a mStart; + LLVector4a mDir; + LLVector4a mEnd; + LLVector3* mIntersection; + LLVector2* mTexCoord; + LLVector3* mNormal; + LLVector3* mBinormal; + F32* mClosestT; + bool mHitFace; + + LLOctreeTriangleRayIntersect() { }; + + LLOctreeTriangleRayIntersect(const LLVector4a& start, const LLVector4a& dir, + const LLVolumeFace* face, F32* closest_t, + LLVector3* intersection,LLVector2* tex_coord, LLVector3* normal, LLVector3* bi_normal); + + void traverse(const LLOctreeNode* node); + + virtual void visit(const LLOctreeNode* node); +}; + +class LLVolumeTriangle : public LLRefCount +{ +public: + LLVolumeTriangle() + { + mPositionGroup = (LLVector4a*) ll_aligned_malloc_16(16); + } + + LLVolumeTriangle(const LLVolumeTriangle& rhs) + { + *this = rhs; + } + + const LLVolumeTriangle& operator=(const LLVolumeTriangle& rhs) + { + llerrs << "Illegal operation!" << llendl; + return *this; + } + + ~LLVolumeTriangle() + { + ll_aligned_free_16(mPositionGroup); + } + + const LLVector4a* mV[3]; + U16 mIndex[3]; + + LLVector4a* mPositionGroup; + + F32 mRadius; + + virtual const LLVector4a& getPositionGroup() const; + virtual const F32& getBinRadius() const; +}; + + +#endif -- cgit v1.2.3 From 054e8fbde325c03bc8f9a7c10aa8bb9319d501ba Mon Sep 17 00:00:00 2001 From: "Nyx (Neal Orman)" Date: Thu, 26 Aug 2010 16:52:32 -0400 Subject: buildfix: fixing linux build to handle new SSE2 architecture code reviewed by davep and falcon --- indra/llmath/lloctree.h | 2 +- indra/llmath/llvector4a.h | 3 ++- indra/llmath/tests/llquaternion_test.cpp | 2 +- indra/llmath/tests/v3dmath_test.cpp | 2 +- indra/llmath/tests/v3math_test.cpp | 4 ++-- indra/llmath/tests/v4math_test.cpp | 2 +- 6 files changed, 8 insertions(+), 7 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/lloctree.h b/indra/llmath/lloctree.h index 432e9fbcd8..73910ef98d 100644 --- a/indra/llmath/lloctree.h +++ b/indra/llmath/lloctree.h @@ -206,7 +206,7 @@ public: { const LLVector4a& pos = data->getPositionGroup(); - LLVector4a gt = pos.greaterThan(center); + LLVector4Logical gt = pos.greaterThan(center); LLVector4a up; up = _mm_and_ps(size, gt); diff --git a/indra/llmath/llvector4a.h b/indra/llmath/llvector4a.h index 76a3e999ce..a80e9b49dd 100644 --- a/indra/llmath/llvector4a.h +++ b/indra/llmath/llvector4a.h @@ -153,7 +153,6 @@ public: // Prefer this method for read-only access to a single element. Prefer the templated version if the elem is known at compile time. template LL_FORCE_INLINE LLSimdScalar getScalarAt() const; - template <> LL_FORCE_INLINE LLSimdScalar getScalarAt<0>() const; // Set to an x, y, z and optional w provided inline void set(F32 x, F32 y, F32 z, F32 w = 0.f); @@ -322,6 +321,8 @@ private: LLQuad mQ; }; +template <> LL_FORCE_INLINE LLSimdScalar LLVector4a::getScalarAt<0>() const; + inline void update_min_max(LLVector4a& min, LLVector4a& max, const LLVector4a& p) { min.setMin(min, p); diff --git a/indra/llmath/tests/llquaternion_test.cpp b/indra/llmath/tests/llquaternion_test.cpp index 2d27d1eb32..451b0ecc60 100644 --- a/indra/llmath/tests/llquaternion_test.cpp +++ b/indra/llmath/tests/llquaternion_test.cpp @@ -35,12 +35,12 @@ #include "linden_common.h" #include "../test/lltut.h" -#include "../llquaternion.h" #include "../v4math.h" #include "../v3math.h" #include "../v3dmath.h" #include "../m4math.h" #include "../m3math.h" +#include "../llquaternion.h" namespace tut { diff --git a/indra/llmath/tests/v3dmath_test.cpp b/indra/llmath/tests/v3dmath_test.cpp index 894b6200f5..734b8f8b80 100644 --- a/indra/llmath/tests/v3dmath_test.cpp +++ b/indra/llmath/tests/v3dmath_test.cpp @@ -36,11 +36,11 @@ #include "llsd.h" #include "../test/lltut.h" -#include "../llquaternion.h" #include "../m3math.h" #include "../v4math.h" #include "../v3dmath.h" #include "../v3dmath.h" +#include "../llquaternion.h" namespace tut { diff --git a/indra/llmath/tests/v3math_test.cpp b/indra/llmath/tests/v3math_test.cpp index d5c8dd2f9c..e6354c2b86 100644 --- a/indra/llmath/tests/v3math_test.cpp +++ b/indra/llmath/tests/v3math_test.cpp @@ -36,12 +36,12 @@ #include "../test/lltut.h" #include "llsd.h" -#include "../llquaternion.h" -#include "../llquantize.h" #include "../v3dmath.h" #include "../m3math.h" #include "../v4math.h" #include "../v3math.h" +#include "../llquaternion.h" +#include "../llquantize.h" namespace tut diff --git a/indra/llmath/tests/v4math_test.cpp b/indra/llmath/tests/v4math_test.cpp index e919c90efa..61fc108488 100644 --- a/indra/llmath/tests/v4math_test.cpp +++ b/indra/llmath/tests/v4math_test.cpp @@ -36,9 +36,9 @@ #include "../test/lltut.h" #include "llsd.h" -#include "../llquaternion.h" #include "../m4math.h" #include "../v4math.h" +#include "../llquaternion.h" namespace tut { -- cgit v1.2.3 From 0d305552003d8378f692cddf87caebb7c47d8ab3 Mon Sep 17 00:00:00 2001 From: "Nyx (Neal Orman)" Date: Thu, 26 Aug 2010 16:57:24 -0400 Subject: buildfix removing unnecessary line in llvector4a.h upon further review, falcon determined this fix was unnecessary. --- indra/llmath/llvector4a.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvector4a.h b/indra/llmath/llvector4a.h index a80e9b49dd..1e0b3bc854 100644 --- a/indra/llmath/llvector4a.h +++ b/indra/llmath/llvector4a.h @@ -321,8 +321,6 @@ private: LLQuad mQ; }; -template <> LL_FORCE_INLINE LLSimdScalar LLVector4a::getScalarAt<0>() const; - inline void update_min_max(LLVector4a& min, LLVector4a& max, const LLVector4a& p) { min.setMin(min, p); -- cgit v1.2.3 From 3cda7606380109beb3f331b8b53d38914f8ba8f5 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Fri, 10 Sep 2010 14:08:12 -0500 Subject: Added test code for volume raycast octree and fixed a crash in render cost calculation when selecting trees/grass. Reviewed by jwolk. --- indra/llmath/llvolume.cpp | 61 +++++++++++++++++++++++++---------------- indra/llmath/llvolumeoctree.cpp | 61 +++++++++++++++++++++++++++++++++++++++-- indra/llmath/llvolumeoctree.h | 6 ++-- 3 files changed, 100 insertions(+), 28 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index ab9f8c4c24..4798197921 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -94,6 +94,8 @@ const F32 SKEW_MAX = 0.95f; const F32 SCULPT_MIN_AREA = 0.002f; const S32 SCULPT_MIN_AREA_DETAIL = 1; +extern BOOL gDebugGL; + BOOL check_same_clock_dir( const LLVector3& pt1, const LLVector3& pt2, const LLVector3& pt3, const LLVector3& norm) { LLVector3 test = (pt2-pt1)%(pt3-pt2); @@ -308,22 +310,26 @@ public: } virtual void visit(const LLOctreeNode* branch) - { + { //this is a depth first traversal, so it's safe to assum all children have complete + //bounding data + LLVolumeOctreeListener* node = (LLVolumeOctreeListener*) branch->getListener(0); LLVector4a& min = node->mExtents[0]; LLVector4a& max = node->mExtents[1]; - if (branch->getElementCount() != 0) - { + if (!branch->getData().empty()) + { //node has data, find AABB that binds data set const LLVolumeTriangle* tri = *(branch->getData().begin()); - + + //initialize min/max to first available vertex min = *(tri->mV[0]); max = *(tri->mV[0]); for (LLOctreeNode::const_element_iter iter = branch->getData().begin(); iter != branch->getData().end(); ++iter) - { + { //for each triangle in node + //stretch by triangles in node tri = *iter; @@ -335,33 +341,27 @@ public: max.setMax(max, *tri->mV[1]); max.setMax(max, *tri->mV[2]); } - - for (S32 i = 0; i < branch->getChildCount(); ++i) - { //stretch by child extents - LLVolumeOctreeListener* child = (LLVolumeOctreeListener*) branch->getChild(i)->getListener(0); - min.setMin(min, child->mExtents[0]); - max.setMax(min, child->mExtents[1]); - } } - else if (branch->getChildCount() != 0) - { + else if (!branch->getChildren().empty()) + { //no data, but child nodes exist LLVolumeOctreeListener* child = (LLVolumeOctreeListener*) branch->getChild(0)->getListener(0); + //initialize min/max to extents of first child min = child->mExtents[0]; max = child->mExtents[1]; - - for (S32 i = 1; i < branch->getChildCount(); ++i) - { //stretch by child extents - child = (LLVolumeOctreeListener*) branch->getChild(i)->getListener(0); - min.setMin(min, child->mExtents[0]); - max.setMax(max, child->mExtents[1]); - } } else { llerrs << "WTF? Empty leaf" << llendl; } - + + for (S32 i = 0; i < branch->getChildCount(); ++i) + { //stretch by child extents + LLVolumeOctreeListener* child = (LLVolumeOctreeListener*) branch->getChild(i)->getListener(0); + min.setMin(min, child->mExtents[0]); + max.setMax(max, child->mExtents[1]); + } + node->mBounds[0].setAdd(min, max); node->mBounds[0].mul(0.5f); @@ -370,7 +370,6 @@ public: } }; - //------------------------------------------------------------------- // statics //------------------------------------------------------------------- @@ -5474,45 +5473,59 @@ void LLVolumeFace::createOctree() new LLVolumeOctreeListener(mOctree); for (U32 i = 0; i < mNumIndices; i+= 3) - { + { //for each triangle LLPointer tri = new LLVolumeTriangle(); const LLVector4a& v0 = mPositions[mIndices[i]]; const LLVector4a& v1 = mPositions[mIndices[i+1]]; const LLVector4a& v2 = mPositions[mIndices[i+2]]; + //store pointers to vertex data tri->mV[0] = &v0; tri->mV[1] = &v1; tri->mV[2] = &v2; + //store indices tri->mIndex[0] = mIndices[i]; tri->mIndex[1] = mIndices[i+1]; tri->mIndex[2] = mIndices[i+2]; + //get minimum point LLVector4a min = v0; min.setMin(min, v1); min.setMin(min, v2); + //get maximum point LLVector4a max = v0; max.setMax(max, v1); max.setMax(max, v2); + //compute center LLVector4a center; center.setAdd(min, max); center.mul(0.5f); *tri->mPositionGroup = center; + //compute "radius" LLVector4a size; size.setSub(max,min); tri->mRadius = size.getLength3().getF32() * 0.5f; + //insert mOctree->insert(tri); } + //calculate AABB for each node LLVolumeOctreeRebound rebound(this); rebound.traverse(mOctree); + + if (gDebugGL) + { + LLVolumeOctreeValidate validate; + validate.traverse(mOctree); + } } diff --git a/indra/llmath/llvolumeoctree.cpp b/indra/llmath/llvolumeoctree.cpp index 194b1faf81..12fe90f35d 100644 --- a/indra/llmath/llvolumeoctree.cpp +++ b/indra/llmath/llvolumeoctree.cpp @@ -126,8 +126,8 @@ void LLOctreeTriangleRayIntersect::traverse(const LLOctreeNode const F32* center = vl->mBounds[0].getF32(); const F32* size = vl->mBounds[1].getF32();*/ - //if (LLLineSegmentBoxIntersect(mStart.getF32(), mEnd.getF32(), vl->mBounds[0].getF32(), vl->mBounds[1].getF32())) - if (LLLineSegmentBoxIntersect(mStart, mEnd, vl->mBounds[0], vl->mBounds[1])) + //if (LLLineSegmentBoxIntersect(mStart, mEnd, vl->mBounds[0], vl->mBounds[1])) + if (LLLineSegmentBoxIntersect(mStart.getF32ptr(), mEnd.getF32ptr(), vl->mBounds[0].getF32ptr(), vl->mBounds[1].getF32ptr())) { node->accept(this); for (S32 i = 0; i < node->getChildCount(); ++i) @@ -206,3 +206,60 @@ const F32& LLVolumeTriangle::getBinRadius() const } +//TEST CODE + +void LLVolumeOctreeValidate::visit(const LLOctreeNode* branch) +{ + LLVolumeOctreeListener* node = (LLVolumeOctreeListener*) branch->getListener(0); + + //make sure bounds matches extents + LLVector4a& min = node->mExtents[0]; + LLVector4a& max = node->mExtents[1]; + + LLVector4a& center = node->mBounds[0]; + LLVector4a& size = node->mBounds[1]; + + LLVector4a test_min, test_max; + test_min.setSub(center, size); + test_max.setAdd(center, size); + + if (!test_min.equals3(min) || + !test_max.equals3(max)) + { + llerrs << "Bad bounding box data found." << llendl; + } + + test_min.sub(LLVector4a::getEpsilon()); + test_max.add(LLVector4a::getEpsilon()); + + for (U32 i = 0; i < branch->getChildCount(); ++i) + { + LLVolumeOctreeListener* child = (LLVolumeOctreeListener*) branch->getChild(i)->getListener(0); + + //make sure all children fit inside this node + if (child->mExtents[0].lessThan(test_min).areAnySet(LLVector4Logical::MASK_XYZ) || + child->mExtents[1].greaterThan(test_max).areAnySet(LLVector4Logical::MASK_XYZ)) + { + llerrs << "Child protrudes from bounding box." << llendl; + } + } + + //children fit, check data + for (LLOctreeNode::const_element_iter iter = branch->getData().begin(); + iter != branch->getData().end(); ++iter) + { + const LLVolumeTriangle* tri = *iter; + + //validate triangle + for (U32 i = 0; i < 3; i++) + { + if (tri->mV[i]->greaterThan(test_max).areAnySet(LLVector4Logical::MASK_XYZ) || + tri->mV[i]->lessThan(test_min).areAnySet(LLVector4Logical::MASK_XYZ)) + { + llerrs << "Triangle protrudes from node." << llendl; + } + } + } +} + + diff --git a/indra/llmath/llvolumeoctree.h b/indra/llmath/llvolumeoctree.h index 0031626498..0a02a2c597 100644 --- a/indra/llmath/llvolumeoctree.h +++ b/indra/llmath/llvolumeoctree.h @@ -88,8 +88,6 @@ public: F32* mClosestT; bool mHitFace; - LLOctreeTriangleRayIntersect() { }; - LLOctreeTriangleRayIntersect(const LLVector4a& start, const LLVector4a& dir, const LLVolumeFace* face, F32* closest_t, LLVector3* intersection,LLVector2* tex_coord, LLVector3* normal, LLVector3* bi_normal); @@ -134,5 +132,9 @@ public: virtual const F32& getBinRadius() const; }; +class LLVolumeOctreeValidate : public LLOctreeTraveler +{ + virtual void visit(const LLOctreeNode* branch); +}; #endif -- cgit v1.2.3 From 90da6d6fdc33343be72252101aed1be641e822b5 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Sun, 19 Sep 2010 23:07:15 -0500 Subject: Raycasting for rigged attachments now works for your own attachments while in edit mode. --- indra/llmath/llvolume.cpp | 17 ++++++++++++----- indra/llmath/llvolume.h | 4 ++-- indra/llmath/llvolumeoctree.cpp | 8 ++++---- 3 files changed, 18 insertions(+), 11 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 4798197921..07339f7526 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -2569,14 +2569,13 @@ void LLVolume::makeTetrahedron() mIsTetrahedron = TRUE; } -void LLVolume::copyVolumeFaces(LLVolume* volume) +void LLVolume::copyVolumeFaces(const LLVolume* volume) { mVolumeFaces = volume->mVolumeFaces; mSculptLevel = 0; mIsTetrahedron = FALSE; } - S32 LLVolume::getNumFaces() const { #if LL_MESH_ENABLED @@ -5462,12 +5461,17 @@ void LLVolumeFace::optimize(F32 angle_cutoff) } -void LLVolumeFace::createOctree() +void LLVolumeFace::createOctree(F32 scaler) { + if (mOctree) + { + return; + } + LLVector4a center; LLVector4a size; center.splat(0.f); - size.splat(1.f); + size.splat(0.5f); mOctree = new LLOctreeRoot(center, size, NULL); new LLVolumeOctreeListener(mOctree); @@ -5511,12 +5515,15 @@ void LLVolumeFace::createOctree() LLVector4a size; size.setSub(max,min); - tri->mRadius = size.getLength3().getF32() * 0.5f; + tri->mRadius = size.getLength3().getF32() * scaler; //insert mOctree->insert(tri); } + //remove unneeded octree layers + while (!mOctree->balance()) { } + //calculate AABB for each node LLVolumeOctreeRebound rebound(this); rebound.traverse(mOctree); diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index af28337f57..ff5e3d9dfa 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -882,7 +882,7 @@ public: }; void optimize(F32 angle_cutoff = 2.f); - void createOctree(); + void createOctree(F32 scaler = 0.25f); enum { @@ -1044,7 +1044,7 @@ public: LLVector3 mLODScaleBias; // vector for biasing LOD based on scale void sculpt(U16 sculpt_width, U16 sculpt_height, S8 sculpt_components, const U8* sculpt_data, S32 sculpt_level); - void copyVolumeFaces(LLVolume* volume); + void copyVolumeFaces(const LLVolume* volume); private: void sculptGenerateMapVertices(U16 sculpt_width, U16 sculpt_height, S8 sculpt_components, const U8* sculpt_data, U8 sculpt_type); diff --git a/indra/llmath/llvolumeoctree.cpp b/indra/llmath/llvolumeoctree.cpp index 12fe90f35d..cb6211f63c 100644 --- a/indra/llmath/llvolumeoctree.cpp +++ b/indra/llmath/llvolumeoctree.cpp @@ -223,14 +223,14 @@ void LLVolumeOctreeValidate::visit(const LLOctreeNode* branch) test_min.setSub(center, size); test_max.setAdd(center, size); - if (!test_min.equals3(min) || - !test_max.equals3(max)) + if (!test_min.equals3(min, 0.001f) || + !test_max.equals3(max, 0.001f)) { llerrs << "Bad bounding box data found." << llendl; } - test_min.sub(LLVector4a::getEpsilon()); - test_max.add(LLVector4a::getEpsilon()); + test_min.sub(LLVector4a(0.001f)); + test_max.add(LLVector4a(0.001f)); for (U32 i = 0; i < branch->getChildCount(); ++i) { -- cgit v1.2.3 From 2c4c1c47bce3db7420f0eb27bdaa7598fbbbb65a Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Sun, 19 Sep 2010 23:08:12 -0500 Subject: Take advantage of automagical tcmalloc alignment. --- indra/llmath/lloctree.h | 127 ++++++++++++++++++++++++++++++++---------------- 1 file changed, 86 insertions(+), 41 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/lloctree.h b/indra/llmath/lloctree.h index 73910ef98d..63adfa85b2 100644 --- a/indra/llmath/lloctree.h +++ b/indra/llmath/lloctree.h @@ -95,22 +95,30 @@ public: typedef LLOctreeNode oct_node; typedef LLOctreeListener oct_listener; + /*void* operator new(size_t size) + { + return ll_aligned_malloc_16(size); + } + + void operator delete(void* ptr) + { + ll_aligned_free_16(ptr); + }*/ + LLOctreeNode( const LLVector4a& center, const LLVector4a& size, BaseType* parent, - S32 octant = -1) + U8 octant = 255) : mParent((oct_node*)parent), mOctant(octant) { - mD = (LLVector4a*) ll_aligned_malloc_16(sizeof(LLVector4a)*4); - - mD[CENTER] = center; - mD[SIZE] = size; + mCenter = center; + mSize = size; updateMinMax(); - if ((mOctant == -1) && mParent) + if ((mOctant == 255) && mParent) { - mOctant = ((oct_node*) mParent)->getOctant(mD[CENTER]); + mOctant = ((oct_node*) mParent)->getOctant(mCenter); } clearChildren(); @@ -124,30 +132,27 @@ public: { delete getChild(i); } - - ll_aligned_free_16(mD); } inline const BaseType* getParent() const { return mParent; } inline void setParent(BaseType* parent) { mParent = (oct_node*) parent; } - inline const LLVector4a& getCenter() const { return mD[CENTER]; } - inline const LLVector4a& getSize() const { return mD[SIZE]; } - inline void setCenter(const LLVector4a& center) { mD[CENTER] = center; } - inline void setSize(const LLVector4a& size) { mD[SIZE] = size; } + inline const LLVector4a& getCenter() const { return mCenter; } + inline const LLVector4a& getSize() const { return mSize; } + inline void setCenter(const LLVector4a& center) { mCenter = center; } + inline void setSize(const LLVector4a& size) { mSize = size; } inline oct_node* getNodeAt(T* data) { return getNodeAt(data->getPositionGroup(), data->getBinRadius()); } - inline S32 getOctant() const { return mOctant; } - inline void setOctant(S32 octant) { mOctant = octant; } + inline U8 getOctant() const { return mOctant; } inline const oct_node* getOctParent() const { return (const oct_node*) getParent(); } inline oct_node* getOctParent() { return (oct_node*) getParent(); } - S32 getOctant(const LLVector4a& pos) const //get the octant pos is in + U8 getOctant(const LLVector4a& pos) const //get the octant pos is in { - return pos.greaterThan(mD[CENTER]).getGatheredBits() & 0x7; + return (U8) (pos.greaterThan(mCenter).getGatheredBits() & 0x7); } inline bool isInside(const LLVector4a& pos, const F32& rad) const { - return rad <= mD[SIZE][0]*2.f && isInside(pos); + return rad <= mSize[0]*2.f && isInside(pos); } inline bool isInside(T* data) const @@ -157,13 +162,13 @@ public: bool isInside(const LLVector4a& pos) const { - S32 gt = pos.greaterThan(mD[MAX]).getGatheredBits() & 0x7; + S32 gt = pos.greaterThan(mMax).getGatheredBits() & 0x7; if (gt) { return false; } - S32 lt = pos.lessEqual(mD[MIN]).getGatheredBits() & 0x7; + S32 lt = pos.lessEqual(mMin).getGatheredBits() & 0x7; if (lt) { return false; @@ -174,8 +179,8 @@ public: void updateMinMax() { - mD[MAX].setAdd(mD[CENTER], mD[SIZE]); - mD[MIN].setSub(mD[CENTER], mD[SIZE]); + mMax.setAdd(mCenter, mSize); + mMin.setSub(mCenter, mSize); } inline oct_listener* getOctListener(U32 index) @@ -195,7 +200,7 @@ public: return false; } - F32 size = mD[SIZE][0]; + F32 size = mSize[0]; F32 p_size = size * 2.f; return (radius <= 0.001f && size <= 0.001f) || @@ -234,6 +239,29 @@ public: void accept(tree_traveler* visitor) const { visitor->visit(this); } void accept(oct_traveler* visitor) const { visitor->visit(this); } + void validateChildMap() + { + for (U32 i = 0; i < 8; i++) + { + U8 idx = mChildMap[i]; + if (idx != 255) + { + LLOctreeNode* child = mChild[idx]; + + if (child->getOctant() != i) + { + llerrs << "Invalid child map, bad octant data." << llendl; + } + + if (getOctant(child->getCenter()) != child->getOctant()) + { + llerrs << "Invalid child octant compared to position data." << llendl; + } + } + } + } + + oct_node* getNodeAt(const LLVector4a& pos, const F32& rad) { LLOctreeNode* node = this; @@ -241,25 +269,19 @@ public: if (node->isInside(pos, rad)) { //do a quick search by octant - S32 octant = node->getOctant(pos); - BOOL keep_going = TRUE; - + U8 octant = node->getOctant(pos); + //traverse the tree until we find a node that has no node //at the appropriate octant or is smaller than the object. //by definition, that node is the smallest node that contains // the data - while (keep_going && node->getSize()[0] >= rad) + U8 next_node = node->mChildMap[octant]; + + while (next_node != 255 && node->getSize()[0] >= rad) { - keep_going = FALSE; - for (U32 i = 0; i < node->getChildCount() && !keep_going; i++) - { - if (node->getChild(i)->getOctant() == octant) - { - node = node->getChild(i); - octant = node->getOctant(pos); - keep_going = TRUE; - } - } + node = node->getChild(next_node); + octant = node->getOctant(pos); + next_node = node->mChildMap[octant]; } } else if (!node->contains(rad) && node->getParent()) @@ -439,6 +461,9 @@ public: void clearChildren() { mChild.clear(); + + U32* foo = (U32*) mChildMap; + foo[0] = foo[1] = 0xFFFFFFFF; } void validate() @@ -496,6 +521,8 @@ public: } #endif + mChildMap[child->getOctant()] = (U8) mChild.size(); + mChild.push_back(child); child->setParent(this); @@ -517,6 +544,8 @@ public: listener->handleChildRemoval(this, getChild(index)); } + + if (destroy) { mChild[index]->destroy(); @@ -524,6 +553,15 @@ public: } mChild.erase(mChild.begin() + index); + //rebuild child map + U32* foo = (U32*) mChildMap; + foo[0] = foo[1] = 0xFFFFFFFF; + + for (U32 i = 0; i < mChild.size(); ++i) + { + mChildMap[mChild[i]->getOctant()] = i; + } + checkAlive(); } @@ -562,15 +600,20 @@ protected: MIN = 3 } eDName; - LLVector4a* mD; + LLVector4a mCenter; + LLVector4a mSize; + LLVector4a mMax; + LLVector4a mMin; oct_node* mParent; - S32 mOctant; + U8 mOctant; child_list mChild; + U8 mChildMap[8]; + element_list mData; -}; +}; //just like a regular node, except it might expand on insert and compress on balance template @@ -613,6 +656,8 @@ public: //destroy child child->clearChildren(); delete child; + + return false; } return true; @@ -639,7 +684,7 @@ public: const LLVector4a& v = data->getPositionGroup(); LLVector4a val; - val.setSub(v, BaseType::mD[BaseType::CENTER]); + val.setSub(v, BaseType::mCenter); val.setAbs(val); S32 lt = val.lessThan(MAX_MAG).getGatheredBits() & 0x7; -- cgit v1.2.3 From c42ed54b0a532cb4e0ad30a1b0b5038cef9938f2 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Mon, 20 Sep 2010 18:45:56 -0500 Subject: Stop using ll_aligned_malloc/free in llvolume. Fix for garbage data in vertex weight array crashing software skinning. Proper integration of picking for rigged attachhments. Optimization in LLDrawable::updateDistance (don't call updateRelativeXform, just use spatial group position). --- indra/llmath/llvolume.cpp | 108 ++++++++++++++++++++++------------------------ indra/llmath/llvolume.h | 2 +- 2 files changed, 52 insertions(+), 58 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 07339f7526..24528a8ce9 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -1990,7 +1990,7 @@ void LLVolumeFace::VertexData::init() { if (!mData) { - mData = (LLVector4a*) ll_aligned_malloc_16(32); + mData = new LLVector4a[2]; } } @@ -2019,7 +2019,7 @@ const LLVolumeFace::VertexData& LLVolumeFace::VertexData::operator=(const LLVolu LLVolumeFace::VertexData::~VertexData() { - ll_aligned_free_16(mData); + delete [] mData; } LLVector4a& LLVolumeFace::VertexData::getPosition() @@ -2257,12 +2257,12 @@ bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) U32 cur_influence = 0; LLVector4 wght(0,0,0,0); - while (joint != END_INFLUENCES) + while (joint != END_INFLUENCES && idx < weights.size()) { U16 influence = weights[idx++]; influence |= ((U16) weights[idx++] << 8); - F32 w = llmin((F32) influence / 65535.f, 0.99999f); + F32 w = llclamp((F32) influence / 65535.f, 0.f, 0.99999f); wght.mV[cur_influence++] = (F32) joint + w; if (cur_influence >= 4) @@ -5230,7 +5230,7 @@ LLVolumeFace::LLVolumeFace() : mWeights(NULL), mOctree(NULL) { - mExtents = (LLVector4a*) ll_aligned_malloc_16(48); + mExtents = new LLVector4a[3]; mCenter = mExtents+2; } @@ -5251,7 +5251,7 @@ LLVolumeFace::LLVolumeFace(const LLVolumeFace& src) mWeights(NULL), mOctree(NULL) { - mExtents = (LLVector4a*) ll_aligned_malloc_16(48); + mExtents = new LLVector4a[3]; mCenter = mExtents+2; *this = src; } @@ -5286,7 +5286,7 @@ LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src) if (mNumVertices) { - S32 vert_size = mNumVertices*4*sizeof(F32); + S32 vert_size = mNumVertices*sizeof(LLVector4a); S32 tc_size = (mNumVertices*8+0xF) & ~0xF; LLVector4a::memcpyNonAliased16((F32*) mPositions, (F32*) src.mPositions, vert_size); @@ -5301,7 +5301,7 @@ LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src) } else { - ll_aligned_free_16(mBinormals); + delete [] mBinormals; mBinormals = NULL; } @@ -5312,7 +5312,7 @@ LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src) } else { - ll_aligned_free_16(mWeights); + delete [] mWeights; mWeights = NULL; } } @@ -5330,7 +5330,7 @@ LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src) LLVolumeFace::~LLVolumeFace() { - ll_aligned_free_16(mExtents); + delete [] mExtents; mExtents = NULL; freeData(); @@ -5338,17 +5338,17 @@ LLVolumeFace::~LLVolumeFace() void LLVolumeFace::freeData() { - ll_aligned_free_16(mPositions); + delete [] mPositions; mPositions = NULL; - ll_aligned_free_16(mNormals); + delete [] mNormals; mNormals = NULL; - ll_aligned_free_16(mTexCoords); + delete [] mTexCoords; mTexCoords = NULL; - ll_aligned_free_16(mIndices); + delete [] mIndices; mIndices = NULL; - ll_aligned_free_16(mBinormals); + delete [] mBinormals; mBinormals = NULL; - ll_aligned_free_16(mWeights); + delete [] mWeights; mWeights = NULL; delete mOctree; @@ -5461,18 +5461,13 @@ void LLVolumeFace::optimize(F32 angle_cutoff) } -void LLVolumeFace::createOctree(F32 scaler) +void LLVolumeFace::createOctree(F32 scaler, const LLVector4a& center, const LLVector4a& size) { if (mOctree) { return; } - LLVector4a center; - LLVector4a size; - center.splat(0.f); - size.splat(0.5f); - mOctree = new LLOctreeRoot(center, size, NULL); new LLVolumeOctreeListener(mOctree); @@ -6166,21 +6161,21 @@ void LLVolumeFace::createBinormals() void LLVolumeFace::resizeVertices(S32 num_verts) { - ll_aligned_free_16(mPositions); - ll_aligned_free_16(mNormals); - ll_aligned_free_16(mBinormals); - ll_aligned_free_16(mTexCoords); + delete [] mPositions; + delete [] mNormals; + delete [] mBinormals; + delete [] mTexCoords; mBinormals = NULL; if (num_verts) { - mPositions = (LLVector4a*) ll_aligned_malloc_16(num_verts*16); - mNormals = (LLVector4a*) ll_aligned_malloc_16(num_verts*16); + mPositions = new LLVector4a[num_verts]; + mNormals = new LLVector4a[num_verts]; //pad texture coordinate block end to allow for QWORD reads S32 size = ((num_verts*8) + 0xF) & ~0xF; - mTexCoords = (LLVector2*) ll_aligned_malloc_16(size); + mTexCoords = new LLVector2[size/8]; } else { @@ -6204,20 +6199,20 @@ void LLVolumeFace::pushVertex(const LLVector4a& pos, const LLVector4a& norm, con S32 old_size = mNumVertices*16; //positions - LLVector4a* dst = (LLVector4a*) ll_aligned_malloc_16(new_size); + LLVector4a* dst = new LLVector4a[new_verts]; if (mPositions) { LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mPositions, old_size); - ll_aligned_free_16(mPositions); + delete [] mPositions; } mPositions = dst; //normals - dst = (LLVector4a*) ll_aligned_malloc_16(new_size); + dst = new LLVector4a[new_verts]; if (mNormals) { LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mNormals, old_size); - ll_aligned_free_16(mNormals); + delete [] mNormals; } mNormals = dst; @@ -6225,19 +6220,18 @@ void LLVolumeFace::pushVertex(const LLVector4a& pos, const LLVector4a& norm, con new_size = ((new_verts*8)+0xF) & ~0xF; old_size = ((mNumVertices*8)+0xF) & ~0xF; - dst = (LLVector4a*) ll_aligned_malloc_16(new_size); { - LLVector2* dst = (LLVector2*) ll_aligned_malloc_16(new_size); + LLVector2* dst = new LLVector2[new_size/8]; if (mTexCoords) { LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mTexCoords, old_size); - ll_aligned_free_16(mTexCoords); + delete [] mTexCoords; } + mTexCoords = dst; } - mTexCoords = (LLVector2*) dst; //just clear binormals - ll_aligned_free_16(mBinormals); + delete [] mBinormals; mBinormals = NULL; mPositions[mNumVertices] = pos; @@ -6249,26 +6243,26 @@ void LLVolumeFace::pushVertex(const LLVector4a& pos, const LLVector4a& norm, con void LLVolumeFace::allocateBinormals(S32 num_verts) { - ll_aligned_free_16(mBinormals); - mBinormals = (LLVector4a*) ll_aligned_malloc_16(num_verts*16); + delete [] mBinormals; + mBinormals = new LLVector4a[num_verts]; } void LLVolumeFace::allocateWeights(S32 num_verts) { - ll_aligned_free_16(mWeights); - mWeights = (LLVector4a*) ll_aligned_malloc_16(num_verts*16); + delete [] mWeights; + mWeights = new LLVector4a[num_verts]; } void LLVolumeFace::resizeIndices(S32 num_indices) { - ll_aligned_free_16(mIndices); - + delete [] mIndices; + if (num_indices) { //pad index block end to allow for QWORD reads S32 size = ((num_indices*2) + 0xF) & ~0xF; - mIndices = (U16*) ll_aligned_malloc_16(size); + mIndices = new U16[size/2]; } else { @@ -6286,11 +6280,11 @@ void LLVolumeFace::pushIndex(const U16& idx) S32 old_size = ((mNumIndices*2)+0xF) & ~0xF; if (new_size != old_size) { - U16* dst = (U16*) ll_aligned_malloc_16(new_size); + U16* dst = new U16[new_size/2]; if (mIndices) { LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mIndices, old_size); - ll_aligned_free_16(mIndices); + delete [] mIndices; } mIndices = dst; } @@ -6333,9 +6327,9 @@ void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat_in, LLMat } //allocate new buffer space - LLVector4a* new_pos = (LLVector4a*) ll_aligned_malloc_16(new_count*16); - LLVector4a* new_norm = (LLVector4a*) ll_aligned_malloc_16(new_count*16); - LLVector2* new_tc = (LLVector2*) ll_aligned_malloc_16((new_count*8+0xF) & ~0xF); + LLVector4a* new_pos = new LLVector4a[new_count]; + LLVector4a* new_norm = new LLVector4a[new_count]; + LLVector2* new_tc = new LLVector2[((new_count*8+0xF) & ~0xF)/8]; if (mNumVertices > 0) @@ -6346,10 +6340,10 @@ void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat_in, LLMat } //free old buffer space - ll_aligned_free_16(mPositions); - ll_aligned_free_16(mNormals); - ll_aligned_free_16(mTexCoords); - + delete [] mPositions; + delete [] mNormals; + delete [] mTexCoords; + //point to new buffers mPositions = new_pos; mNormals = new_norm; @@ -6399,7 +6393,7 @@ void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat_in, LLMat new_count = mNumIndices + face.mNumIndices; //allocate new index buffer - U16* new_indices = (U16*) ll_aligned_malloc_16((new_count*2+0xF) & ~0xF); + U16* new_indices = new U16[((new_count*2+0xF) & ~0xF)/2]; if (mNumIndices > 0) { //copy old index buffer S32 old_size = (mNumIndices*2+0xF) & ~0xF; @@ -6407,8 +6401,8 @@ void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat_in, LLMat } //free old index buffer - ll_aligned_free_16(mIndices); - + delete [] mIndices; + //point to new index buffer mIndices = new_indices; diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index ff5e3d9dfa..32364bd4b8 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -882,7 +882,7 @@ public: }; void optimize(F32 angle_cutoff = 2.f); - void createOctree(F32 scaler = 0.25f); + void createOctree(F32 scaler = 0.25f, const LLVector4a& center = LLVector4a(0,0,0), const LLVector4a& size = LLVector4a(0.5f,0.5f,0.5f)); enum { -- cgit v1.2.3 From 90e3d83a5cb35e98a02a3017dd79ebc272bbfe85 Mon Sep 17 00:00:00 2001 From: "Brad Payne (Vir Linden)" Date: Tue, 21 Sep 2010 13:26:52 -0400 Subject: Fix for build failures - disabling tcmalloc for now --- indra/llmath/CMakeLists.txt | 0 indra/llmath/camera.h | 0 indra/llmath/coordframe.h | 0 indra/llmath/llbbox.cpp | 0 indra/llmath/llbbox.h | 0 indra/llmath/llbboxlocal.cpp | 0 indra/llmath/llbboxlocal.h | 0 indra/llmath/llcamera.cpp | 0 indra/llmath/llcamera.h | 0 indra/llmath/llcoord.h | 0 indra/llmath/llcoordframe.cpp | 0 indra/llmath/llcoordframe.h | 0 indra/llmath/llinterp.h | 0 indra/llmath/llline.cpp | 0 indra/llmath/llline.h | 0 indra/llmath/llmath.h | 0 indra/llmath/llmatrix3a.cpp | 0 indra/llmath/llmatrix3a.h | 0 indra/llmath/llmatrix3a.inl | 0 indra/llmath/llmatrix4a.h | 0 indra/llmath/llmodularmath.cpp | 0 indra/llmath/llmodularmath.h | 0 indra/llmath/lloctree.h | 0 indra/llmath/llperlin.cpp | 0 indra/llmath/llperlin.h | 0 indra/llmath/llplane.h | 0 indra/llmath/llquantize.h | 0 indra/llmath/llquaternion.cpp | 0 indra/llmath/llquaternion.h | 0 indra/llmath/llquaternion2.h | 0 indra/llmath/llquaternion2.inl | 0 indra/llmath/llrect.cpp | 0 indra/llmath/llrect.h | 0 indra/llmath/llsdutil_math.cpp | 0 indra/llmath/llsdutil_math.h | 0 indra/llmath/llsimdmath.h | 0 indra/llmath/llsimdtypes.h | 0 indra/llmath/llsimdtypes.inl | 0 indra/llmath/llsphere.cpp | 0 indra/llmath/llsphere.h | 0 indra/llmath/lltreenode.h | 0 indra/llmath/llv4math.h | 0 indra/llmath/llv4matrix3.h | 0 indra/llmath/llv4matrix4.h | 0 indra/llmath/llv4vector3.h | 0 indra/llmath/llvector4a.cpp | 0 indra/llmath/llvector4a.h | 0 indra/llmath/llvector4a.inl | 0 indra/llmath/llvector4logical.h | 0 indra/llmath/llvolume.cpp | 0 indra/llmath/llvolume.h | 0 indra/llmath/llvolumemgr.cpp | 0 indra/llmath/llvolumemgr.h | 0 indra/llmath/llvolumeoctree.cpp | 0 indra/llmath/llvolumeoctree.h | 0 indra/llmath/m3math.cpp | 0 indra/llmath/m3math.h | 0 indra/llmath/m4math.cpp | 0 indra/llmath/m4math.h | 0 indra/llmath/raytrace.cpp | 0 indra/llmath/raytrace.h | 0 indra/llmath/tests/llbbox_test.cpp | 0 indra/llmath/tests/llbboxlocal_test.cpp | 0 indra/llmath/tests/llmodularmath_test.cpp | 0 indra/llmath/tests/llquaternion_test.cpp | 0 indra/llmath/tests/llrect_test.cpp | 0 indra/llmath/tests/m3math_test.cpp | 0 indra/llmath/tests/mathmisc_test.cpp | 0 indra/llmath/tests/v2math_test.cpp | 0 indra/llmath/tests/v3color_test.cpp | 0 indra/llmath/tests/v3dmath_test.cpp | 0 indra/llmath/tests/v3math_test.cpp | 0 indra/llmath/tests/v4color_test.cpp | 0 indra/llmath/tests/v4coloru_test.cpp | 0 indra/llmath/tests/v4math_test.cpp | 0 indra/llmath/tests/xform_test.cpp | 0 indra/llmath/v2math.cpp | 0 indra/llmath/v2math.h | 0 indra/llmath/v3color.cpp | 0 indra/llmath/v3color.h | 0 indra/llmath/v3dmath.cpp | 0 indra/llmath/v3dmath.h | 0 indra/llmath/v3math.cpp | 0 indra/llmath/v3math.h | 0 indra/llmath/v4color.cpp | 0 indra/llmath/v4color.h | 0 indra/llmath/v4coloru.cpp | 0 indra/llmath/v4coloru.h | 0 indra/llmath/v4math.cpp | 0 indra/llmath/v4math.h | 0 indra/llmath/xform.cpp | 0 indra/llmath/xform.h | 0 92 files changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 indra/llmath/CMakeLists.txt mode change 100644 => 100755 indra/llmath/camera.h mode change 100644 => 100755 indra/llmath/coordframe.h mode change 100644 => 100755 indra/llmath/llbbox.cpp mode change 100644 => 100755 indra/llmath/llbbox.h mode change 100644 => 100755 indra/llmath/llbboxlocal.cpp mode change 100644 => 100755 indra/llmath/llbboxlocal.h mode change 100644 => 100755 indra/llmath/llcamera.cpp mode change 100644 => 100755 indra/llmath/llcamera.h mode change 100644 => 100755 indra/llmath/llcoord.h mode change 100644 => 100755 indra/llmath/llcoordframe.cpp mode change 100644 => 100755 indra/llmath/llcoordframe.h mode change 100644 => 100755 indra/llmath/llinterp.h mode change 100644 => 100755 indra/llmath/llline.cpp mode change 100644 => 100755 indra/llmath/llline.h mode change 100644 => 100755 indra/llmath/llmath.h mode change 100644 => 100755 indra/llmath/llmatrix3a.cpp mode change 100644 => 100755 indra/llmath/llmatrix3a.h mode change 100644 => 100755 indra/llmath/llmatrix3a.inl mode change 100644 => 100755 indra/llmath/llmatrix4a.h mode change 100644 => 100755 indra/llmath/llmodularmath.cpp mode change 100644 => 100755 indra/llmath/llmodularmath.h mode change 100644 => 100755 indra/llmath/lloctree.h mode change 100644 => 100755 indra/llmath/llperlin.cpp mode change 100644 => 100755 indra/llmath/llperlin.h mode change 100644 => 100755 indra/llmath/llplane.h mode change 100644 => 100755 indra/llmath/llquantize.h mode change 100644 => 100755 indra/llmath/llquaternion.cpp mode change 100644 => 100755 indra/llmath/llquaternion.h mode change 100644 => 100755 indra/llmath/llquaternion2.h mode change 100644 => 100755 indra/llmath/llquaternion2.inl mode change 100644 => 100755 indra/llmath/llrect.cpp mode change 100644 => 100755 indra/llmath/llrect.h mode change 100644 => 100755 indra/llmath/llsdutil_math.cpp mode change 100644 => 100755 indra/llmath/llsdutil_math.h mode change 100644 => 100755 indra/llmath/llsimdmath.h mode change 100644 => 100755 indra/llmath/llsimdtypes.h mode change 100644 => 100755 indra/llmath/llsimdtypes.inl mode change 100644 => 100755 indra/llmath/llsphere.cpp mode change 100644 => 100755 indra/llmath/llsphere.h mode change 100644 => 100755 indra/llmath/lltreenode.h mode change 100644 => 100755 indra/llmath/llv4math.h mode change 100644 => 100755 indra/llmath/llv4matrix3.h mode change 100644 => 100755 indra/llmath/llv4matrix4.h mode change 100644 => 100755 indra/llmath/llv4vector3.h mode change 100644 => 100755 indra/llmath/llvector4a.cpp mode change 100644 => 100755 indra/llmath/llvector4a.h mode change 100644 => 100755 indra/llmath/llvector4a.inl mode change 100644 => 100755 indra/llmath/llvector4logical.h mode change 100644 => 100755 indra/llmath/llvolume.cpp mode change 100644 => 100755 indra/llmath/llvolume.h mode change 100644 => 100755 indra/llmath/llvolumemgr.cpp mode change 100644 => 100755 indra/llmath/llvolumemgr.h mode change 100644 => 100755 indra/llmath/llvolumeoctree.cpp mode change 100644 => 100755 indra/llmath/llvolumeoctree.h mode change 100644 => 100755 indra/llmath/m3math.cpp mode change 100644 => 100755 indra/llmath/m3math.h mode change 100644 => 100755 indra/llmath/m4math.cpp mode change 100644 => 100755 indra/llmath/m4math.h mode change 100644 => 100755 indra/llmath/raytrace.cpp mode change 100644 => 100755 indra/llmath/raytrace.h mode change 100644 => 100755 indra/llmath/tests/llbbox_test.cpp mode change 100644 => 100755 indra/llmath/tests/llbboxlocal_test.cpp mode change 100644 => 100755 indra/llmath/tests/llmodularmath_test.cpp mode change 100644 => 100755 indra/llmath/tests/llquaternion_test.cpp mode change 100644 => 100755 indra/llmath/tests/llrect_test.cpp mode change 100644 => 100755 indra/llmath/tests/m3math_test.cpp mode change 100644 => 100755 indra/llmath/tests/mathmisc_test.cpp mode change 100644 => 100755 indra/llmath/tests/v2math_test.cpp mode change 100644 => 100755 indra/llmath/tests/v3color_test.cpp mode change 100644 => 100755 indra/llmath/tests/v3dmath_test.cpp mode change 100644 => 100755 indra/llmath/tests/v3math_test.cpp mode change 100644 => 100755 indra/llmath/tests/v4color_test.cpp mode change 100644 => 100755 indra/llmath/tests/v4coloru_test.cpp mode change 100644 => 100755 indra/llmath/tests/v4math_test.cpp mode change 100644 => 100755 indra/llmath/tests/xform_test.cpp mode change 100644 => 100755 indra/llmath/v2math.cpp mode change 100644 => 100755 indra/llmath/v2math.h mode change 100644 => 100755 indra/llmath/v3color.cpp mode change 100644 => 100755 indra/llmath/v3color.h mode change 100644 => 100755 indra/llmath/v3dmath.cpp mode change 100644 => 100755 indra/llmath/v3dmath.h mode change 100644 => 100755 indra/llmath/v3math.cpp mode change 100644 => 100755 indra/llmath/v3math.h mode change 100644 => 100755 indra/llmath/v4color.cpp mode change 100644 => 100755 indra/llmath/v4color.h mode change 100644 => 100755 indra/llmath/v4coloru.cpp mode change 100644 => 100755 indra/llmath/v4coloru.h mode change 100644 => 100755 indra/llmath/v4math.cpp mode change 100644 => 100755 indra/llmath/v4math.h mode change 100644 => 100755 indra/llmath/xform.cpp mode change 100644 => 100755 indra/llmath/xform.h (limited to 'indra/llmath') diff --git a/indra/llmath/CMakeLists.txt b/indra/llmath/CMakeLists.txt old mode 100644 new mode 100755 diff --git a/indra/llmath/camera.h b/indra/llmath/camera.h old mode 100644 new mode 100755 diff --git a/indra/llmath/coordframe.h b/indra/llmath/coordframe.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llbbox.cpp b/indra/llmath/llbbox.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/llbbox.h b/indra/llmath/llbbox.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llbboxlocal.cpp b/indra/llmath/llbboxlocal.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/llbboxlocal.h b/indra/llmath/llbboxlocal.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llcamera.cpp b/indra/llmath/llcamera.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/llcamera.h b/indra/llmath/llcamera.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llcoord.h b/indra/llmath/llcoord.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llcoordframe.cpp b/indra/llmath/llcoordframe.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/llcoordframe.h b/indra/llmath/llcoordframe.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llinterp.h b/indra/llmath/llinterp.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llline.cpp b/indra/llmath/llline.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/llline.h b/indra/llmath/llline.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llmath.h b/indra/llmath/llmath.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llmatrix3a.cpp b/indra/llmath/llmatrix3a.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/llmatrix3a.h b/indra/llmath/llmatrix3a.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llmatrix3a.inl b/indra/llmath/llmatrix3a.inl old mode 100644 new mode 100755 diff --git a/indra/llmath/llmatrix4a.h b/indra/llmath/llmatrix4a.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llmodularmath.cpp b/indra/llmath/llmodularmath.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/llmodularmath.h b/indra/llmath/llmodularmath.h old mode 100644 new mode 100755 diff --git a/indra/llmath/lloctree.h b/indra/llmath/lloctree.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llperlin.cpp b/indra/llmath/llperlin.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/llperlin.h b/indra/llmath/llperlin.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llplane.h b/indra/llmath/llplane.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llquantize.h b/indra/llmath/llquantize.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llquaternion.cpp b/indra/llmath/llquaternion.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/llquaternion.h b/indra/llmath/llquaternion.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llquaternion2.h b/indra/llmath/llquaternion2.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llquaternion2.inl b/indra/llmath/llquaternion2.inl old mode 100644 new mode 100755 diff --git a/indra/llmath/llrect.cpp b/indra/llmath/llrect.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/llrect.h b/indra/llmath/llrect.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llsdutil_math.cpp b/indra/llmath/llsdutil_math.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/llsdutil_math.h b/indra/llmath/llsdutil_math.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llsimdmath.h b/indra/llmath/llsimdmath.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llsimdtypes.h b/indra/llmath/llsimdtypes.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llsimdtypes.inl b/indra/llmath/llsimdtypes.inl old mode 100644 new mode 100755 diff --git a/indra/llmath/llsphere.cpp b/indra/llmath/llsphere.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/llsphere.h b/indra/llmath/llsphere.h old mode 100644 new mode 100755 diff --git a/indra/llmath/lltreenode.h b/indra/llmath/lltreenode.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llv4math.h b/indra/llmath/llv4math.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llv4matrix3.h b/indra/llmath/llv4matrix3.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llv4matrix4.h b/indra/llmath/llv4matrix4.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llv4vector3.h b/indra/llmath/llv4vector3.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llvector4a.cpp b/indra/llmath/llvector4a.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/llvector4a.h b/indra/llmath/llvector4a.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llvector4a.inl b/indra/llmath/llvector4a.inl old mode 100644 new mode 100755 diff --git a/indra/llmath/llvector4logical.h b/indra/llmath/llvector4logical.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llvolumemgr.cpp b/indra/llmath/llvolumemgr.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/llvolumemgr.h b/indra/llmath/llvolumemgr.h old mode 100644 new mode 100755 diff --git a/indra/llmath/llvolumeoctree.cpp b/indra/llmath/llvolumeoctree.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/llvolumeoctree.h b/indra/llmath/llvolumeoctree.h old mode 100644 new mode 100755 diff --git a/indra/llmath/m3math.cpp b/indra/llmath/m3math.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/m3math.h b/indra/llmath/m3math.h old mode 100644 new mode 100755 diff --git a/indra/llmath/m4math.cpp b/indra/llmath/m4math.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/m4math.h b/indra/llmath/m4math.h old mode 100644 new mode 100755 diff --git a/indra/llmath/raytrace.cpp b/indra/llmath/raytrace.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/raytrace.h b/indra/llmath/raytrace.h old mode 100644 new mode 100755 diff --git a/indra/llmath/tests/llbbox_test.cpp b/indra/llmath/tests/llbbox_test.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/tests/llbboxlocal_test.cpp b/indra/llmath/tests/llbboxlocal_test.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/tests/llmodularmath_test.cpp b/indra/llmath/tests/llmodularmath_test.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/tests/llquaternion_test.cpp b/indra/llmath/tests/llquaternion_test.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/tests/llrect_test.cpp b/indra/llmath/tests/llrect_test.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/tests/m3math_test.cpp b/indra/llmath/tests/m3math_test.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/tests/mathmisc_test.cpp b/indra/llmath/tests/mathmisc_test.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/tests/v2math_test.cpp b/indra/llmath/tests/v2math_test.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/tests/v3color_test.cpp b/indra/llmath/tests/v3color_test.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/tests/v3dmath_test.cpp b/indra/llmath/tests/v3dmath_test.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/tests/v3math_test.cpp b/indra/llmath/tests/v3math_test.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/tests/v4color_test.cpp b/indra/llmath/tests/v4color_test.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/tests/v4coloru_test.cpp b/indra/llmath/tests/v4coloru_test.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/tests/v4math_test.cpp b/indra/llmath/tests/v4math_test.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/tests/xform_test.cpp b/indra/llmath/tests/xform_test.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/v2math.cpp b/indra/llmath/v2math.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/v2math.h b/indra/llmath/v2math.h old mode 100644 new mode 100755 diff --git a/indra/llmath/v3color.cpp b/indra/llmath/v3color.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/v3color.h b/indra/llmath/v3color.h old mode 100644 new mode 100755 diff --git a/indra/llmath/v3dmath.cpp b/indra/llmath/v3dmath.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/v3dmath.h b/indra/llmath/v3dmath.h old mode 100644 new mode 100755 diff --git a/indra/llmath/v3math.cpp b/indra/llmath/v3math.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/v3math.h b/indra/llmath/v3math.h old mode 100644 new mode 100755 diff --git a/indra/llmath/v4color.cpp b/indra/llmath/v4color.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/v4color.h b/indra/llmath/v4color.h old mode 100644 new mode 100755 diff --git a/indra/llmath/v4coloru.cpp b/indra/llmath/v4coloru.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/v4coloru.h b/indra/llmath/v4coloru.h old mode 100644 new mode 100755 diff --git a/indra/llmath/v4math.cpp b/indra/llmath/v4math.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/v4math.h b/indra/llmath/v4math.h old mode 100644 new mode 100755 diff --git a/indra/llmath/xform.cpp b/indra/llmath/xform.cpp old mode 100644 new mode 100755 diff --git a/indra/llmath/xform.h b/indra/llmath/xform.h old mode 100644 new mode 100755 -- cgit v1.2.3 From d40db3d8e19fdcc024ca08e901d542bf9c552458 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Wed, 22 Sep 2010 01:43:17 -0500 Subject: SH-100, SH-111 Fix for generate normals stalling/crashing. Switch from "new" to "malloc" in llvolume.cpp aligned arrays so "realloc" can be used to avoid copies. --- indra/llmath/llvolume.cpp | 163 ++++++++++++++++++---------------------------- 1 file changed, 62 insertions(+), 101 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 24528a8ce9..c73f0e2755 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -96,6 +96,15 @@ const S32 SCULPT_MIN_AREA_DETAIL = 1; extern BOOL gDebugGL; +void assert_aligned(void* ptr, U32 alignment) +{ + U32 t = (U32) ptr; + if (t%alignment != 0) + { + llerrs << "WTF?" << llendl; + } +} + BOOL check_same_clock_dir( const LLVector3& pt1, const LLVector3& pt2, const LLVector3& pt3, const LLVector3& norm) { LLVector3 test = (pt2-pt1)%(pt3-pt2); @@ -1990,7 +1999,7 @@ void LLVolumeFace::VertexData::init() { if (!mData) { - mData = new LLVector4a[2]; + mData = (LLVector4a*) malloc(sizeof(LLVector4a)*2); } } @@ -2011,7 +2020,7 @@ const LLVolumeFace::VertexData& LLVolumeFace::VertexData::operator=(const LLVolu if (this != &rhs) { init(); - LLVector4a::memcpyNonAliased16((F32*) mData, (F32*) rhs.mData, 8*sizeof(F32)); + LLVector4a::memcpyNonAliased16((F32*) mData, (F32*) rhs.mData, 2*sizeof(LLVector4a)); mTexCoord = rhs.mTexCoord; } return *this; @@ -2019,7 +2028,8 @@ const LLVolumeFace::VertexData& LLVolumeFace::VertexData::operator=(const LLVolu LLVolumeFace::VertexData::~VertexData() { - delete [] mData; + free(mData); + mData = NULL; } LLVector4a& LLVolumeFace::VertexData::getPosition() @@ -5230,7 +5240,7 @@ LLVolumeFace::LLVolumeFace() : mWeights(NULL), mOctree(NULL) { - mExtents = new LLVector4a[3]; + mExtents = (LLVector4a*) malloc(sizeof(LLVector4a)*3); mCenter = mExtents+2; } @@ -5251,7 +5261,7 @@ LLVolumeFace::LLVolumeFace(const LLVolumeFace& src) mWeights(NULL), mOctree(NULL) { - mExtents = new LLVector4a[3]; + mExtents = (LLVector4a*) malloc(sizeof(LLVector4a)*3); mCenter = mExtents+2; *this = src; } @@ -5279,7 +5289,7 @@ LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src) freeData(); - LLVector4a::memcpyNonAliased16((F32*) mExtents, (F32*) src.mExtents, 12*sizeof(F32)); + LLVector4a::memcpyNonAliased16((F32*) mExtents, (F32*) src.mExtents, 3*sizeof(LLVector4a)); resizeVertices(src.mNumVertices); resizeIndices(src.mNumIndices); @@ -5287,7 +5297,7 @@ LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src) if (mNumVertices) { S32 vert_size = mNumVertices*sizeof(LLVector4a); - S32 tc_size = (mNumVertices*8+0xF) & ~0xF; + S32 tc_size = (mNumVertices*sizeof(LLVector2)+0xF) & ~0xF; LLVector4a::memcpyNonAliased16((F32*) mPositions, (F32*) src.mPositions, vert_size); LLVector4a::memcpyNonAliased16((F32*) mNormals, (F32*) src.mNormals, vert_size); @@ -5301,7 +5311,7 @@ LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src) } else { - delete [] mBinormals; + free(mBinormals); mBinormals = NULL; } @@ -5312,14 +5322,14 @@ LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src) } else { - delete [] mWeights; + free(mWeights); mWeights = NULL; } } if (mNumIndices) { - S32 idx_size = (mNumIndices*2+0xF) & ~0xF; + S32 idx_size = (mNumIndices*sizeof(U16)+0xF) & ~0xF; LLVector4a::memcpyNonAliased16((F32*) mIndices, (F32*) src.mIndices, idx_size); } @@ -5330,7 +5340,7 @@ LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src) LLVolumeFace::~LLVolumeFace() { - delete [] mExtents; + free(mExtents); mExtents = NULL; freeData(); @@ -5338,17 +5348,17 @@ LLVolumeFace::~LLVolumeFace() void LLVolumeFace::freeData() { - delete [] mPositions; + free(mPositions); mPositions = NULL; - delete [] mNormals; + free( mNormals); mNormals = NULL; - delete [] mTexCoords; + free(mTexCoords); mTexCoords = NULL; - delete [] mIndices; + free(mIndices); mIndices = NULL; - delete [] mBinormals; + free(mBinormals); mBinormals = NULL; - delete [] mWeights; + free(mWeights); mWeights = NULL; delete mOctree; @@ -5402,13 +5412,14 @@ bool LLVolumeFace::VertexMapData::ComparePosition::operator()(const LLVector3& a return a.mV[1] < b.mV[1]; } - return a.mV[2] < b.mV[2]; + return a.mV[2] < b.mV[2]; } void LLVolumeFace::optimize(F32 angle_cutoff) { LLVolumeFace new_face; + //map of points to vector of vertices at that point VertexMapData::PointMap point_map; //remove redundant vertices @@ -6161,21 +6172,24 @@ void LLVolumeFace::createBinormals() void LLVolumeFace::resizeVertices(S32 num_verts) { - delete [] mPositions; - delete [] mNormals; - delete [] mBinormals; - delete [] mTexCoords; + free(mPositions); + free(mNormals); + free(mBinormals); + free(mTexCoords); mBinormals = NULL; if (num_verts) { - mPositions = new LLVector4a[num_verts]; - mNormals = new LLVector4a[num_verts]; + mPositions = (LLVector4a*) malloc(sizeof(LLVector4a)*num_verts); + assert_aligned(mPositions, 16); + mNormals = (LLVector4a*) malloc(sizeof(LLVector4a)*num_verts); + assert_aligned(mNormals, 16); //pad texture coordinate block end to allow for QWORD reads - S32 size = ((num_verts*8) + 0xF) & ~0xF; - mTexCoords = new LLVector2[size/8]; + S32 size = ((num_verts*sizeof(LLVector2)) + 0xF) & ~0xF; + mTexCoords = (LLVector2*) malloc(size); + assert_aligned(mTexCoords, 16); } else { @@ -6199,39 +6213,18 @@ void LLVolumeFace::pushVertex(const LLVector4a& pos, const LLVector4a& norm, con S32 old_size = mNumVertices*16; //positions - LLVector4a* dst = new LLVector4a[new_verts]; - if (mPositions) - { - LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mPositions, old_size); - delete [] mPositions; - } - mPositions = dst; - + mPositions = (LLVector4a*) realloc(mPositions, new_size); + //normals - dst = new LLVector4a[new_verts]; - if (mNormals) - { - LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mNormals, old_size); - delete [] mNormals; - } - mNormals = dst; - + mNormals = (LLVector4a*) realloc(mNormals, new_size); + //tex coords new_size = ((new_verts*8)+0xF) & ~0xF; - old_size = ((mNumVertices*8)+0xF) & ~0xF; - - { - LLVector2* dst = new LLVector2[new_size/8]; - if (mTexCoords) - { - LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mTexCoords, old_size); - delete [] mTexCoords; - } - mTexCoords = dst; - } + mTexCoords = (LLVector2*) realloc(mTexCoords, new_size); + //just clear binormals - delete [] mBinormals; + free(mBinormals); mBinormals = NULL; mPositions[mNumVertices] = pos; @@ -6243,26 +6236,26 @@ void LLVolumeFace::pushVertex(const LLVector4a& pos, const LLVector4a& norm, con void LLVolumeFace::allocateBinormals(S32 num_verts) { - delete [] mBinormals; - mBinormals = new LLVector4a[num_verts]; + free(mBinormals); + mBinormals = (LLVector4a*) malloc(sizeof(LLVector4a)*num_verts); } void LLVolumeFace::allocateWeights(S32 num_verts) { - delete [] mWeights; - mWeights = new LLVector4a[num_verts]; + free(mWeights); + mWeights = (LLVector4a*) malloc(sizeof(LLVector4a)*num_verts); } void LLVolumeFace::resizeIndices(S32 num_indices) { - delete [] mIndices; + free(mIndices); if (num_indices) { //pad index block end to allow for QWORD reads - S32 size = ((num_indices*2) + 0xF) & ~0xF; + S32 size = ((num_indices*sizeof(U16)) + 0xF) & ~0xF; - mIndices = new U16[size/2]; + mIndices = (U16*) malloc(size); } else { @@ -6280,13 +6273,7 @@ void LLVolumeFace::pushIndex(const U16& idx) S32 old_size = ((mNumIndices*2)+0xF) & ~0xF; if (new_size != old_size) { - U16* dst = new U16[new_size/2]; - if (mIndices) - { - LLVector4a::memcpyNonAliased16((F32*) dst, (F32*) mIndices, old_size); - delete [] mIndices; - } - mIndices = dst; + mIndices = (U16*) realloc(mIndices, new_size); } mIndices[mNumIndices++] = idx; @@ -6327,28 +6314,13 @@ void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat_in, LLMat } //allocate new buffer space - LLVector4a* new_pos = new LLVector4a[new_count]; - LLVector4a* new_norm = new LLVector4a[new_count]; - LLVector2* new_tc = new LLVector2[((new_count*8+0xF) & ~0xF)/8]; + mPositions = (LLVector4a*) realloc(mPositions, new_count*sizeof(LLVector4a)); + assert_aligned(mPositions, 16); + mNormals = (LLVector4a*) realloc(mNormals, new_count*sizeof(LLVector4a)); + assert_aligned(mNormals, 16); + mTexCoords = (LLVector2*) realloc(mTexCoords, (new_count*sizeof(LLVector2)+0xF) & ~0xF); + assert_aligned(mTexCoords, 16); - - if (mNumVertices > 0) - { //copy old buffers - LLVector4a::memcpyNonAliased16((F32*) new_pos, (F32*) mPositions, mNumVertices*4*sizeof(F32)); - LLVector4a::memcpyNonAliased16((F32*) new_norm, (F32*) mNormals, mNumVertices*4*sizeof(F32)); - LLVector4a::memcpyNonAliased16((F32*) new_tc, (F32*) mTexCoords, mNumVertices*2*sizeof(F32)); - } - - //free old buffer space - delete [] mPositions; - delete [] mNormals; - delete [] mTexCoords; - - //point to new buffers - mPositions = new_pos; - mNormals = new_norm; - mTexCoords = new_tc; - mNumVertices = new_count; //get destination address of appended face @@ -6393,19 +6365,8 @@ void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat_in, LLMat new_count = mNumIndices + face.mNumIndices; //allocate new index buffer - U16* new_indices = new U16[((new_count*2+0xF) & ~0xF)/2]; - if (mNumIndices > 0) - { //copy old index buffer - S32 old_size = (mNumIndices*2+0xF) & ~0xF; - LLVector4a::memcpyNonAliased16((F32*) new_indices, (F32*) mIndices, old_size); - } - - //free old index buffer - delete [] mIndices; + mIndices = (U16*) realloc(mIndices, (new_count*sizeof(U16)+0xF) & ~0xF); - //point to new index buffer - mIndices = new_indices; - //get destination address into new index buffer U16* dst_idx = mIndices+mNumIndices; mNumIndices = new_count; -- cgit v1.2.3 From cf09d6c58a741263cddcf338c2f79836873475b1 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Wed, 22 Sep 2010 03:04:21 -0500 Subject: Remove LL_MESH_ENABLED macros (fixes drag and drop). Add mesh stitching type back into tools floater. --- indra/llmath/llvolume.cpp | 8 -------- indra/llmath/llvolume.h | 8 -------- 2 files changed, 16 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index c73f0e2755..1a24e0fbe9 100755 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -2588,14 +2588,12 @@ void LLVolume::copyVolumeFaces(const LLVolume* volume) S32 LLVolume::getNumFaces() const { -#if LL_MESH_ENABLED U8 sculpt_type = (mParams.getSculptType() & LL_SCULPT_TYPE_MASK); if (sculpt_type == LL_SCULPT_TYPE_MESH) { return LL_SCULPT_MESH_MAX_FACES; } -#endif return (S32)mProfilep->mFaces.size(); } @@ -4176,12 +4174,10 @@ void LLVolume::generateSilhouetteVertices(std::vector &vertices, normals.clear(); segments.clear(); -#if LL_MESH_ENABLED if ((mParams.getSculptType() & LL_SCULPT_TYPE_MASK) == LL_SCULPT_TYPE_MESH) { return; } -#endif S32 cur_index = 0; //for each face @@ -6408,14 +6404,10 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) resizeVertices(num_vertices); resizeIndices(num_indices); -#if LL_MESH_ENABLED if ((volume->getParams().getSculptType() & LL_SCULPT_TYPE_MASK) != LL_SCULPT_TYPE_MESH) { mEdge.resize(num_indices); } -#else - mEdge.resize(num_indices); -#endif } LLVector4a* pos = (LLVector4a*) mPositions; diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index 32364bd4b8..b4b59fd402 100755 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -41,8 +41,6 @@ class LLVolumeParams; class LLProfile; class LLPath; -#define LL_MESH_ENABLED 1 - template class LLOctreeNode; class LLVector4a; @@ -192,15 +190,9 @@ const U8 LL_SCULPT_TYPE_SPHERE = 1; const U8 LL_SCULPT_TYPE_TORUS = 2; const U8 LL_SCULPT_TYPE_PLANE = 3; const U8 LL_SCULPT_TYPE_CYLINDER = 4; -#if LL_MESH_ENABLED const U8 LL_SCULPT_TYPE_MESH = 5; - const U8 LL_SCULPT_TYPE_MASK = LL_SCULPT_TYPE_SPHERE | LL_SCULPT_TYPE_TORUS | LL_SCULPT_TYPE_PLANE | LL_SCULPT_TYPE_CYLINDER | LL_SCULPT_TYPE_MESH; -#else -const U8 LL_SCULPT_TYPE_MASK = LL_SCULPT_TYPE_SPHERE | LL_SCULPT_TYPE_TORUS | LL_SCULPT_TYPE_PLANE | - LL_SCULPT_TYPE_CYLINDER; -#endif const U8 LL_SCULPT_FLAG_INVERT = 64; const U8 LL_SCULPT_FLAG_MIRROR = 128; -- cgit v1.2.3 From 6d8e9cd8bde57bd033beeb9610f7094c19655ed1 Mon Sep 17 00:00:00 2001 From: Jonathan Wolk Date: Wed, 22 Sep 2010 11:22:50 -0700 Subject: Commenting out unused variables to help mac build --- indra/llmath/llvolume.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 1a24e0fbe9..1f15d5465c 100755 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -6206,7 +6206,7 @@ void LLVolumeFace::pushVertex(const LLVector4a& pos, const LLVector4a& norm, con { S32 new_verts = mNumVertices+1; S32 new_size = new_verts*16; - S32 old_size = mNumVertices*16; +// S32 old_size = mNumVertices*16; //positions mPositions = (LLVector4a*) realloc(mPositions, new_size); -- cgit v1.2.3 From 11fce2013426c8472e9dba5dbf552fdd55259b86 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Fri, 1 Oct 2010 13:33:14 -0500 Subject: Add llhysicsshapebuilderutil --- indra/llmath/llvolume.cpp | 10 ++++++++++ indra/llmath/llvolume.h | 2 ++ 2 files changed, 12 insertions(+) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 1f15d5465c..b3a6880011 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -3044,6 +3044,16 @@ BOOL LLVolume::isFlat(S32 face) } +bool LLVolumeParams::isSculpt() const +{ + return mSculptID.notNull(); +} + +bool LLVolumeParams::isMeshSculpt() const +{ + return isSculpt() && ((mSculptType & LL_SCULPT_TYPE_MASK) == LL_SCULPT_TYPE_MESH); +} + bool LLVolumeParams::operator==(const LLVolumeParams ¶ms) const { return ( (getPathParams() == params.getPathParams()) && diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index b4b59fd402..857188ed28 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -646,6 +646,8 @@ public: const F32& getSkew() const { return mPathParams.getSkew(); } const LLUUID& getSculptID() const { return mSculptID; } const U8& getSculptType() const { return mSculptType; } + bool isSculpt() const; + bool isMeshSculpt() const; BOOL isConvex() const; // 'begin' and 'end' should be in range [0, 1] (they will be clamped) -- cgit v1.2.3 From 478e0927c87338e02e75d3791f51ad2b4e7b8c74 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Mon, 4 Oct 2010 09:48:05 -0500 Subject: Interface/state for rendering convex hull physics shapes. Still need to implement gMeshRepo.buildHull --- indra/llmath/llvolume.cpp | 9 +++++++++ indra/llmath/llvolume.h | 8 +++++++- 2 files changed, 16 insertions(+), 1 deletion(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index b3a6880011..3da9c9ca79 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -1828,6 +1828,10 @@ LLVolume::LLVolume(const LLVolumeParams ¶ms, const F32 detail, const BOOL ge mSculptLevel = -2; mIsTetrahedron = FALSE; mLODScaleBias.setVec(1,1,1); + mHullPoints = NULL; + mHullIndices = NULL; + mNumHullPoints = 0; + mNumHullIndices = 0; // set defaults if (mParams.getPathParams().getCurveType() == LL_PCODE_PATH_FLEXIBLE) @@ -1879,6 +1883,11 @@ LLVolume::~LLVolume() mPathp = NULL; mProfilep = NULL; mVolumeFaces.clear(); + + free(mHullPoints); + mHullPoints = NULL; + free(mHullIndices); + mHullIndices = NULL; } BOOL LLVolume::generate() diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index 857188ed28..6e080f4877 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -1068,10 +1068,16 @@ public: LLPath *mPathp; LLProfile *mProfilep; std::vector mMesh; - + BOOL mGenerateSingleFace; typedef std::vector face_list_t; face_list_t mVolumeFaces; + +public: + LLVector4a* mHullPoints; + U16* mHullIndices; + S32 mNumHullPoints; + S32 mNumHullIndices; }; std::ostream& operator<<(std::ostream &s, const LLVolumeParams &volume_params); -- cgit v1.2.3 From fecf1883efab2df720e4097d66d450dd8011399f Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Thu, 7 Oct 2010 15:58:01 -0500 Subject: Increase default scale limit to 64m --- indra/llmath/xform.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'indra/llmath') diff --git a/indra/llmath/xform.h b/indra/llmath/xform.h index c4edd46279..1595da3a72 100644 --- a/indra/llmath/xform.h +++ b/indra/llmath/xform.h @@ -38,7 +38,7 @@ const F32 MAX_OBJECT_Z = 4096.f; // should match REGION_HEIGHT_METERS, Pre-havok4: 768.f const F32 MIN_OBJECT_Z = -256.f; -const F32 DEFAULT_MAX_PRIM_SCALE = 10.f; +const F32 DEFAULT_MAX_PRIM_SCALE = 64.f; const F32 MIN_PRIM_SCALE = 0.01f; const F32 MAX_PRIM_SCALE = 65536.f; // something very high but not near FLT_MAX -- cgit v1.2.3 From a5619d16f74863168f45b04b37cc6383e1a92263 Mon Sep 17 00:00:00 2001 From: Oz Linden Date: Wed, 13 Oct 2010 07:24:37 -0400 Subject: correct licenses (fix problem with license change merge) --- indra/llmath/camera.h | 36 +++++++++++--------------- indra/llmath/coordframe.h | 36 +++++++++++--------------- indra/llmath/llbbox.cpp | 36 +++++++++++--------------- indra/llmath/llbbox.h | 36 +++++++++++--------------- indra/llmath/llbboxlocal.cpp | 36 +++++++++++--------------- indra/llmath/llbboxlocal.h | 36 +++++++++++--------------- indra/llmath/llcamera.cpp | 36 +++++++++++--------------- indra/llmath/llcamera.h | 36 +++++++++++--------------- indra/llmath/llcoord.h | 36 +++++++++++--------------- indra/llmath/llcoordframe.cpp | 36 +++++++++++--------------- indra/llmath/llcoordframe.h | 36 +++++++++++--------------- indra/llmath/llinterp.h | 36 +++++++++++--------------- indra/llmath/llline.cpp | 36 +++++++++++--------------- indra/llmath/llline.h | 36 +++++++++++--------------- indra/llmath/llmath.h | 36 +++++++++++--------------- indra/llmath/llmatrix3a.cpp | 36 +++++++++++--------------- indra/llmath/llmatrix3a.h | 36 +++++++++++--------------- indra/llmath/llmatrix3a.inl | 36 +++++++++++--------------- indra/llmath/llmatrix4a.h | 36 +++++++++++--------------- indra/llmath/llmodularmath.cpp | 36 +++++++++++--------------- indra/llmath/llmodularmath.h | 36 +++++++++++--------------- indra/llmath/lloctree.h | 36 +++++++++++--------------- indra/llmath/llperlin.cpp | 36 +++++++++++--------------- indra/llmath/llperlin.h | 36 +++++++++++--------------- indra/llmath/llplane.h | 36 +++++++++++--------------- indra/llmath/llquantize.h | 36 +++++++++++--------------- indra/llmath/llquaternion.cpp | 36 +++++++++++--------------- indra/llmath/llquaternion.h | 36 +++++++++++--------------- indra/llmath/llquaternion2.h | 36 +++++++++++--------------- indra/llmath/llquaternion2.inl | 36 +++++++++++--------------- indra/llmath/llrect.cpp | 36 +++++++++++--------------- indra/llmath/llrect.h | 36 +++++++++++--------------- indra/llmath/llsdutil_math.cpp | 36 +++++++++++--------------- indra/llmath/llsdutil_math.h | 36 +++++++++++--------------- indra/llmath/llsimdmath.h | 36 +++++++++++--------------- indra/llmath/llsimdtypes.h | 36 +++++++++++--------------- indra/llmath/llsimdtypes.inl | 36 +++++++++++--------------- indra/llmath/llsphere.cpp | 36 +++++++++++--------------- indra/llmath/llsphere.h | 36 +++++++++++--------------- indra/llmath/lltreenode.h | 36 +++++++++++--------------- indra/llmath/llv4math.h | 36 +++++++++++--------------- indra/llmath/llv4matrix3.h | 36 +++++++++++--------------- indra/llmath/llv4matrix4.h | 36 +++++++++++--------------- indra/llmath/llv4vector3.h | 36 +++++++++++--------------- indra/llmath/llvector4a.cpp | 36 +++++++++++--------------- indra/llmath/llvector4a.h | 36 +++++++++++--------------- indra/llmath/llvector4a.inl | 36 +++++++++++--------------- indra/llmath/llvector4logical.h | 36 +++++++++++--------------- indra/llmath/llvolume.cpp | 36 +++++++++++--------------- indra/llmath/llvolume.h | 36 +++++++++++--------------- indra/llmath/llvolumemgr.cpp | 36 +++++++++++--------------- indra/llmath/llvolumemgr.h | 36 +++++++++++--------------- indra/llmath/llvolumeoctree.cpp | 36 +++++++++++--------------- indra/llmath/llvolumeoctree.h | 36 +++++++++++--------------- indra/llmath/m3math.cpp | 36 +++++++++++--------------- indra/llmath/m3math.h | 36 +++++++++++--------------- indra/llmath/m4math.cpp | 36 +++++++++++--------------- indra/llmath/m4math.h | 36 +++++++++++--------------- indra/llmath/raytrace.cpp | 36 +++++++++++--------------- indra/llmath/raytrace.h | 36 +++++++++++--------------- indra/llmath/tests/llbbox_test.cpp | 42 +++++++++++++------------------ indra/llmath/tests/llbboxlocal_test.cpp | 42 +++++++++++++------------------ indra/llmath/tests/llmodularmath_test.cpp | 36 +++++++++++--------------- indra/llmath/tests/llquaternion_test.cpp | 36 +++++++++++--------------- indra/llmath/tests/llrect_test.cpp | 42 +++++++++++++------------------ indra/llmath/tests/m3math_test.cpp | 36 +++++++++++--------------- indra/llmath/tests/mathmisc_test.cpp | 36 +++++++++++--------------- indra/llmath/tests/v2math_test.cpp | 36 +++++++++++--------------- indra/llmath/tests/v3color_test.cpp | 36 +++++++++++--------------- indra/llmath/tests/v3dmath_test.cpp | 36 +++++++++++--------------- indra/llmath/tests/v3math_test.cpp | 36 +++++++++++--------------- indra/llmath/tests/v4color_test.cpp | 36 +++++++++++--------------- indra/llmath/tests/v4coloru_test.cpp | 36 +++++++++++--------------- indra/llmath/tests/v4math_test.cpp | 36 +++++++++++--------------- indra/llmath/tests/xform_test.cpp | 36 +++++++++++--------------- indra/llmath/v2math.cpp | 36 +++++++++++--------------- indra/llmath/v2math.h | 36 +++++++++++--------------- indra/llmath/v3color.cpp | 36 +++++++++++--------------- indra/llmath/v3color.h | 36 +++++++++++--------------- indra/llmath/v3dmath.cpp | 36 +++++++++++--------------- indra/llmath/v3dmath.h | 36 +++++++++++--------------- indra/llmath/v3math.cpp | 36 +++++++++++--------------- indra/llmath/v3math.h | 36 +++++++++++--------------- indra/llmath/v4color.cpp | 36 +++++++++++--------------- indra/llmath/v4color.h | 36 +++++++++++--------------- indra/llmath/v4coloru.cpp | 36 +++++++++++--------------- indra/llmath/v4coloru.h | 36 +++++++++++--------------- indra/llmath/v4math.cpp | 36 +++++++++++--------------- indra/llmath/v4math.h | 36 +++++++++++--------------- indra/llmath/xform.cpp | 36 +++++++++++--------------- indra/llmath/xform.h | 36 +++++++++++--------------- 91 files changed, 1374 insertions(+), 1920 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/camera.h b/indra/llmath/camera.h index ce41f8781d..26f3c3d19f 100644 --- a/indra/llmath/camera.h +++ b/indra/llmath/camera.h @@ -2,31 +2,25 @@ * @file camera.h * @brief Legacy wrapper header. * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2000&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/coordframe.h b/indra/llmath/coordframe.h index b8a1c14abf..271bcb433c 100644 --- a/indra/llmath/coordframe.h +++ b/indra/llmath/coordframe.h @@ -2,31 +2,25 @@ * @file coordframe.h * @brief Legacy wrapper header. * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2000&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llbbox.cpp b/indra/llmath/llbbox.cpp index 914cbfdc12..b46a6e03d2 100644 --- a/indra/llmath/llbbox.cpp +++ b/indra/llmath/llbbox.cpp @@ -2,31 +2,25 @@ * @file llbbox.cpp * @brief General purpose bounding box class (Not axis aligned) * - * $LicenseInfo:firstyear=2001&license=viewergpl$ - * - * Copyright (c) 2001-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llbbox.h b/indra/llmath/llbbox.h index cd29551b01..5b911793f0 100644 --- a/indra/llmath/llbbox.h +++ b/indra/llmath/llbbox.h @@ -2,31 +2,25 @@ * @file llbbox.h * @brief General purpose bounding box class * - * $LicenseInfo:firstyear=2001&license=viewergpl$ - * - * Copyright (c) 2001-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llbboxlocal.cpp b/indra/llmath/llbboxlocal.cpp index 3d0dbb0762..bf0c1a7b93 100644 --- a/indra/llmath/llbboxlocal.cpp +++ b/indra/llmath/llbboxlocal.cpp @@ -2,31 +2,25 @@ * @file llbboxlocal.cpp * @brief General purpose bounding box class (Not axis aligned). * - * $LicenseInfo:firstyear=2001&license=viewergpl$ - * - * Copyright (c) 2001-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llbboxlocal.h b/indra/llmath/llbboxlocal.h index 0d1e5a3ae5..defb899248 100644 --- a/indra/llmath/llbboxlocal.h +++ b/indra/llmath/llbboxlocal.h @@ -2,31 +2,25 @@ * @file llbboxlocal.h * @brief General purpose bounding box class. * - * $LicenseInfo:firstyear=2001&license=viewergpl$ - * - * Copyright (c) 2001-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llcamera.cpp b/indra/llmath/llcamera.cpp index beb5c48624..a442a0edb8 100644 --- a/indra/llmath/llcamera.cpp +++ b/indra/llmath/llcamera.cpp @@ -2,31 +2,25 @@ * @file llcamera.cpp * @brief Implementation of the LLCamera class. * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2000&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llcamera.h b/indra/llmath/llcamera.h index c40e819dcf..e15bd7ad43 100644 --- a/indra/llmath/llcamera.h +++ b/indra/llmath/llcamera.h @@ -2,31 +2,25 @@ * @file llcamera.h * @brief Header file for the LLCamera class. * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2000&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llcoord.h b/indra/llmath/llcoord.h index 9e38fddbd7..706ad92787 100644 --- a/indra/llmath/llcoord.h +++ b/indra/llmath/llcoord.h @@ -1,31 +1,25 @@ /** * @file llcoord.h * - * $LicenseInfo:firstyear=2001&license=viewergpl$ - * - * Copyright (c) 2001-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llcoordframe.cpp b/indra/llmath/llcoordframe.cpp index 673a8f2b04..7dd8e43185 100644 --- a/indra/llmath/llcoordframe.cpp +++ b/indra/llmath/llcoordframe.cpp @@ -2,31 +2,25 @@ * @file llcoordframe.cpp * @brief LLCoordFrame class implementation. * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2000&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llcoordframe.h b/indra/llmath/llcoordframe.h index 89b5d8beff..909adf260c 100644 --- a/indra/llmath/llcoordframe.h +++ b/indra/llmath/llcoordframe.h @@ -2,31 +2,25 @@ * @file llcoordframe.h * @brief LLCoordFrame class header file. * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2000&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llinterp.h b/indra/llmath/llinterp.h index 88af004170..5187646179 100644 --- a/indra/llmath/llinterp.h +++ b/indra/llmath/llinterp.h @@ -1,31 +1,25 @@ /** * @file llinterp.h * - * $LicenseInfo:firstyear=2001&license=viewergpl$ - * - * Copyright (c) 2001-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llline.cpp b/indra/llmath/llline.cpp index b198573206..ef10d1e7fa 100644 --- a/indra/llmath/llline.cpp +++ b/indra/llmath/llline.cpp @@ -3,31 +3,25 @@ * @author Andrew Meadows * @brief Simple line class that can compute nearest approach between two lines * - * $LicenseInfo:firstyear=2006&license=viewergpl$ - * - * Copyright (c) 2006-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2006&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llline.h b/indra/llmath/llline.h index 9ab41b162b..e1cbc1323e 100644 --- a/indra/llmath/llline.h +++ b/indra/llmath/llline.h @@ -4,31 +4,25 @@ * @author Andrew Meadows * @brief Simple line for computing nearest approach between two infinite lines * - * $LicenseInfo:firstyear=2006&license=viewergpl$ - * - * Copyright (c) 2006-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2006&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llmath.h b/indra/llmath/llmath.h index e572381b1a..25b29ac1a8 100644 --- a/indra/llmath/llmath.h +++ b/indra/llmath/llmath.h @@ -2,31 +2,25 @@ * @file llmath.h * @brief Useful math constants and macros. * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2000&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llmatrix3a.cpp b/indra/llmath/llmatrix3a.cpp index b7468f4914..ab077abcb0 100644 --- a/indra/llmath/llmatrix3a.cpp +++ b/indra/llmath/llmatrix3a.cpp @@ -2,31 +2,25 @@ * @file llvector4a.cpp * @brief SIMD vector implementation * - * $LicenseInfo:firstyear=2010&license=viewergpl$ - * - * Copyright (c) 2007-2010, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2010&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llmatrix3a.h b/indra/llmath/llmatrix3a.h index 56327f9f6d..adb7e3389d 100644 --- a/indra/llmath/llmatrix3a.h +++ b/indra/llmath/llmatrix3a.h @@ -2,31 +2,25 @@ * @file llmatrix3a.h * @brief LLMatrix3a class header file - memory aligned and vectorized 3x3 matrix * - * $LicenseInfo:firstyear=2010&license=viewergpl$ - * - * Copyright (c) 2010, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2010&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llmatrix3a.inl b/indra/llmath/llmatrix3a.inl index 65fd949f78..37819fea3c 100644 --- a/indra/llmath/llmatrix3a.inl +++ b/indra/llmath/llmatrix3a.inl @@ -2,31 +2,25 @@ * @file llmatrix3a.inl * @brief LLMatrix3a inline definitions * - * $LicenseInfo:firstyear=2010&license=viewergpl$ - * - * Copyright (c) 2010, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2010&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llmatrix4a.h b/indra/llmath/llmatrix4a.h index 0ead045d04..27cf5b79f6 100644 --- a/indra/llmath/llmatrix4a.h +++ b/indra/llmath/llmatrix4a.h @@ -2,31 +2,25 @@ * @file llmatrix4a.h * @brief LLMatrix4a class header file - memory aligned and vectorized 4x4 matrix * - * $LicenseInfo:firstyear=2007&license=viewergpl$ - * - * Copyright (c) 2007-2010, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2007&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llmodularmath.cpp b/indra/llmath/llmodularmath.cpp index 4a6553ae7c..cdc20028bf 100644 --- a/indra/llmath/llmodularmath.cpp +++ b/indra/llmath/llmodularmath.cpp @@ -2,31 +2,25 @@ * @file llmodularmath.cpp * @brief LLModularMath class implementation * - * $LicenseInfo:firstyear=2001&license=viewergpl$ - * - * Copyright (c) 2001-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llmodularmath.h b/indra/llmath/llmodularmath.h index 60095293c9..0d4d28fadc 100644 --- a/indra/llmath/llmodularmath.h +++ b/indra/llmath/llmodularmath.h @@ -2,31 +2,25 @@ * @file llmodularmath.h * @brief Useful modular math functions. * - * $LicenseInfo:firstyear=2008&license=viewergpl$ - * - * Copyright (c) 2008-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2008&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/lloctree.h b/indra/llmath/lloctree.h index 63adfa85b2..ed97ff76ba 100644 --- a/indra/llmath/lloctree.h +++ b/indra/llmath/lloctree.h @@ -2,31 +2,25 @@ * @file lloctree.h * @brief Octree declaration. * - * $LicenseInfo:firstyear=2005&license=viewergpl$ - * - * Copyright (c) 2005-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2005&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llperlin.cpp b/indra/llmath/llperlin.cpp index 9293d972a4..e1da2bf92b 100644 --- a/indra/llmath/llperlin.cpp +++ b/indra/llmath/llperlin.cpp @@ -1,31 +1,25 @@ /** * @file llperlin.cpp * - * $LicenseInfo:firstyear=2001&license=viewergpl$ - * - * Copyright (c) 2001-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llperlin.h b/indra/llmath/llperlin.h index e8815ece58..40cf19d1ec 100644 --- a/indra/llmath/llperlin.h +++ b/indra/llmath/llperlin.h @@ -1,31 +1,25 @@ /** * @file llperlin.h * - * $LicenseInfo:firstyear=2001&license=viewergpl$ - * - * Copyright (c) 2001-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llplane.h b/indra/llmath/llplane.h index 89c6a1460f..443f3f46b9 100644 --- a/indra/llmath/llplane.h +++ b/indra/llmath/llplane.h @@ -1,31 +1,25 @@ /** * @file llplane.h * - * $LicenseInfo:firstyear=2001&license=viewergpl$ - * - * Copyright (c) 2001-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llquantize.h b/indra/llmath/llquantize.h index c043f7f752..1595dbecf8 100644 --- a/indra/llmath/llquantize.h +++ b/indra/llmath/llquantize.h @@ -3,31 +3,25 @@ * @brief useful routines for quantizing floats to various length ints * and back out again * - * $LicenseInfo:firstyear=2001&license=viewergpl$ - * - * Copyright (c) 2001-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llquaternion.cpp b/indra/llmath/llquaternion.cpp index 73c5f4505e..7381d5eb99 100644 --- a/indra/llmath/llquaternion.cpp +++ b/indra/llmath/llquaternion.cpp @@ -2,31 +2,25 @@ * @file llquaternion.cpp * @brief LLQuaternion class implementation. * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2000&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llquaternion.h b/indra/llmath/llquaternion.h index a7bb09fae3..ca0dfe206b 100644 --- a/indra/llmath/llquaternion.h +++ b/indra/llmath/llquaternion.h @@ -2,31 +2,25 @@ * @file llquaternion.h * @brief LLQuaternion class header file. * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2000&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llquaternion2.h b/indra/llmath/llquaternion2.h index dbb4afe312..fd9c0cf3ab 100644 --- a/indra/llmath/llquaternion2.h +++ b/indra/llmath/llquaternion2.h @@ -2,31 +2,25 @@ * @file llquaternion2.h * @brief LLQuaternion2 class header file - SIMD-enabled quaternion class * - * $LicenseInfo:firstyear=2010&license=viewergpl$ - * - * Copyright (c) 2010, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2010&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llquaternion2.inl b/indra/llmath/llquaternion2.inl index 9a4274d6a4..2a6987552d 100644 --- a/indra/llmath/llquaternion2.inl +++ b/indra/llmath/llquaternion2.inl @@ -2,31 +2,25 @@ * @file llquaternion2.inl * @brief LLQuaternion2 inline definitions * - * $LicenseInfo:firstyear=2010&license=viewergpl$ - * - * Copyright (c) 2010, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2010&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llrect.cpp b/indra/llmath/llrect.cpp index 14e094792d..4083c99768 100644 --- a/indra/llmath/llrect.cpp +++ b/indra/llmath/llrect.cpp @@ -2,31 +2,25 @@ * @file llrect.cpp * @brief LLRect class implementation * - * $LicenseInfo:firstyear=2001&license=viewergpl$ - * - * Copyright (c) 2001-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llrect.h b/indra/llmath/llrect.h index 2fa8cb3f16..c51e0e0ae6 100644 --- a/indra/llmath/llrect.h +++ b/indra/llmath/llrect.h @@ -2,31 +2,25 @@ * @file llrect.h * @brief A rectangle in GL coordinates, with bottom,left = 0,0 * - * $LicenseInfo:firstyear=2001&license=viewergpl$ - * - * Copyright (c) 2001-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llsdutil_math.cpp b/indra/llmath/llsdutil_math.cpp index 1bd12ae513..591f7fde36 100644 --- a/indra/llmath/llsdutil_math.cpp +++ b/indra/llmath/llsdutil_math.cpp @@ -4,31 +4,25 @@ * @date 2006-05-24 * @brief Implementation of classes, functions, etc, for using structured data. * - * $LicenseInfo:firstyear=2006&license=viewergpl$ - * - * Copyright (c) 2006-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2006&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llsdutil_math.h b/indra/llmath/llsdutil_math.h index 121f4b746a..0ea78cd231 100644 --- a/indra/llmath/llsdutil_math.h +++ b/indra/llmath/llsdutil_math.h @@ -4,31 +4,25 @@ * @date 2009-05-19 * @brief Utility classes, functions, etc, for using structured data with math classes. * - * $LicenseInfo:firstyear=2009&license=viewergpl$ - * - * Copyright (c) 2009-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2009&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llsimdmath.h b/indra/llmath/llsimdmath.h index 9377bfdb53..d6debce2d7 100644 --- a/indra/llmath/llsimdmath.h +++ b/indra/llmath/llsimdmath.h @@ -2,31 +2,25 @@ * @file llsimdmath.h * @brief Common header for SIMD-based math library (llvector4a, llmatrix3a, etc.) * - * $LicenseInfo:firstyear=2010&license=viewergpl$ - * - * Copyright (c) 2007-2010, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2010&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llsimdtypes.h b/indra/llmath/llsimdtypes.h index 82e318c8bf..bd991d0e71 100644 --- a/indra/llmath/llsimdtypes.h +++ b/indra/llmath/llsimdtypes.h @@ -2,31 +2,25 @@ * @file llsimdtypes.h * @brief Declaration of basic SIMD math related types * - * $LicenseInfo:firstyear=2010&license=viewergpl$ - * - * Copyright (c) 2007-2010, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2010&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llsimdtypes.inl b/indra/llmath/llsimdtypes.inl index 69c858e310..712239e425 100644 --- a/indra/llmath/llsimdtypes.inl +++ b/indra/llmath/llsimdtypes.inl @@ -2,31 +2,25 @@ * @file llsimdtypes.inl * @brief Inlined definitions of basic SIMD math related types * - * $LicenseInfo:firstyear=2010&license=viewergpl$ - * - * Copyright (c) 2007-2010, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2010&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llsphere.cpp b/indra/llmath/llsphere.cpp index b260c134a7..740047b93a 100644 --- a/indra/llmath/llsphere.cpp +++ b/indra/llmath/llsphere.cpp @@ -3,31 +3,25 @@ * @author Andrew Meadows * @brief Simple line class that can compute nearest approach between two lines * - * $LicenseInfo:firstyear=2007&license=viewergpl$ - * - * Copyright (c) 2007-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2007&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llsphere.h b/indra/llmath/llsphere.h index 58df07a567..7c60a11406 100644 --- a/indra/llmath/llsphere.h +++ b/indra/llmath/llsphere.h @@ -4,31 +4,25 @@ * @author Andrew Meadows * @brief Simple sphere implementation for basic geometric operations * - * $LicenseInfo:firstyear=2007&license=viewergpl$ - * - * Copyright (c) 2007-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2007&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/lltreenode.h b/indra/llmath/lltreenode.h index e6d2521b2a..c66bc26176 100644 --- a/indra/llmath/lltreenode.h +++ b/indra/llmath/lltreenode.h @@ -1,31 +1,25 @@ /** * @file lltreenode.h * - * $LicenseInfo:firstyear=2005&license=viewergpl$ - * - * Copyright (c) 2005-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2005&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llv4math.h b/indra/llmath/llv4math.h index 5b180b3d77..5f403ba526 100644 --- a/indra/llmath/llv4math.h +++ b/indra/llmath/llv4math.h @@ -2,31 +2,25 @@ * @file llv4math.h * @brief LLV4* class header file - vector processor enabled math * - * $LicenseInfo:firstyear=2007&license=viewergpl$ - * - * Copyright (c) 2007-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2007&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llv4matrix3.h b/indra/llmath/llv4matrix3.h index ed503e9d78..270f5d7dae 100644 --- a/indra/llmath/llv4matrix3.h +++ b/indra/llmath/llv4matrix3.h @@ -2,31 +2,25 @@ * @file llviewerjointmesh.cpp * @brief LLV4* class header file - vector processor enabled math * - * $LicenseInfo:firstyear=2007&license=viewergpl$ - * - * Copyright (c) 2007-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2007&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llv4matrix4.h b/indra/llmath/llv4matrix4.h index 959fa34572..2eb49d9294 100644 --- a/indra/llmath/llv4matrix4.h +++ b/indra/llmath/llv4matrix4.h @@ -2,31 +2,25 @@ * @file llviewerjointmesh.cpp * @brief LLV4* class header file - vector processor enabled math * - * $LicenseInfo:firstyear=2007&license=viewergpl$ - * - * Copyright (c) 2007-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2007&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llv4vector3.h b/indra/llmath/llv4vector3.h index a41de47e6d..a340d53f5a 100644 --- a/indra/llmath/llv4vector3.h +++ b/indra/llmath/llv4vector3.h @@ -2,31 +2,25 @@ * @file llviewerjointmesh.cpp * @brief LLV4* class header file - vector processor enabled math * - * $LicenseInfo:firstyear=2007&license=viewergpl$ - * - * Copyright (c) 2007-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2007&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llvector4a.cpp b/indra/llmath/llvector4a.cpp index b62c17302f..b66b7a7076 100644 --- a/indra/llmath/llvector4a.cpp +++ b/indra/llmath/llvector4a.cpp @@ -2,31 +2,25 @@ * @file llvector4a.cpp * @brief SIMD vector implementation * - * $LicenseInfo:firstyear=2010&license=viewergpl$ - * - * Copyright (c) 2007-2010, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2010&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llvector4a.h b/indra/llmath/llvector4a.h index 1e0b3bc854..79022eade3 100644 --- a/indra/llmath/llvector4a.h +++ b/indra/llmath/llvector4a.h @@ -2,31 +2,25 @@ * @file llvector4a.h * @brief LLVector4a class header file - memory aligned and vectorized 4 component vector * - * $LicenseInfo:firstyear=2010&license=viewergpl$ - * - * Copyright (c) 2007-2010, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2010&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llvector4a.inl b/indra/llmath/llvector4a.inl index e52b550883..7ad22a5631 100644 --- a/indra/llmath/llvector4a.inl +++ b/indra/llmath/llvector4a.inl @@ -2,31 +2,25 @@ * @file llvector4a.inl * @brief LLVector4a inline function implementations * - * $LicenseInfo:firstyear=2010&license=viewergpl$ - * - * Copyright (c) 2007-2010, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2010&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llvector4logical.h b/indra/llmath/llvector4logical.h index 1c7ee1d79f..dd66b09d43 100644 --- a/indra/llmath/llvector4logical.h +++ b/indra/llmath/llvector4logical.h @@ -2,31 +2,25 @@ * @file llvector4logical.h * @brief LLVector4Logical class header file - Companion class to LLVector4a for logical and bit-twiddling operations * - * $LicenseInfo:firstyear=2010&license=viewergpl$ + * $LicenseInfo:firstyear=2010&license=viewerlgpl$ + * Second Life Viewer Source Code + * Copyright (C) 2010, Linden Research, Inc. * - * Copyright (c) 2007-2010, Linden Research, Inc. + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. - * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 3da9c9ca79..0fe309ddf3 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -2,31 +2,25 @@ * @file llvolume.cpp * - * $LicenseInfo:firstyear=2002&license=viewergpl$ - * - * Copyright (c) 2002-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2002&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index 6e080f4877..e2d6ce4b69 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -2,31 +2,25 @@ * @file llvolume.h * @brief LLVolume base class. * - * $LicenseInfo:firstyear=2002&license=viewergpl$ - * - * Copyright (c) 2002-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2002&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llvolumemgr.cpp b/indra/llmath/llvolumemgr.cpp index 419e0015ba..c60b750088 100644 --- a/indra/llmath/llvolumemgr.cpp +++ b/indra/llmath/llvolumemgr.cpp @@ -1,31 +1,25 @@ /** * @file llvolumemgr.cpp * - * $LicenseInfo:firstyear=2002&license=viewergpl$ - * - * Copyright (c) 2002-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2002&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llvolumemgr.h b/indra/llmath/llvolumemgr.h index f5dc4cd748..c75906f675 100644 --- a/indra/llmath/llvolumemgr.h +++ b/indra/llmath/llvolumemgr.h @@ -2,31 +2,25 @@ * @file llvolumemgr.h * @brief LLVolumeMgr class. * - * $LicenseInfo:firstyear=2002&license=viewergpl$ - * - * Copyright (c) 2002-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2002&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llvolumeoctree.cpp b/indra/llmath/llvolumeoctree.cpp index cb6211f63c..3a7cb4940a 100644 --- a/indra/llmath/llvolumeoctree.cpp +++ b/indra/llmath/llvolumeoctree.cpp @@ -2,31 +2,25 @@ * @file llvolumeoctree.cpp * - * $LicenseInfo:firstyear=2002&license=viewergpl$ - * - * Copyright (c) 2002-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2002&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/llvolumeoctree.h b/indra/llmath/llvolumeoctree.h index 0a02a2c597..1f5a78b137 100644 --- a/indra/llmath/llvolumeoctree.h +++ b/indra/llmath/llvolumeoctree.h @@ -2,31 +2,25 @@ * @file llvolumeoctree.h * @brief LLVolume octree classes. * - * $LicenseInfo:firstyear=2002&license=viewergpl$ - * - * Copyright (c) 2002-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2002&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/m3math.cpp b/indra/llmath/m3math.cpp index 1b878c8f4d..802ddb9e57 100644 --- a/indra/llmath/m3math.cpp +++ b/indra/llmath/m3math.cpp @@ -2,31 +2,25 @@ * @file m3math.cpp * @brief LLMatrix3 class implementation. * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2000&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/m3math.h b/indra/llmath/m3math.h index 3ac963e5aa..2be5452f8d 100644 --- a/indra/llmath/m3math.h +++ b/indra/llmath/m3math.h @@ -2,31 +2,25 @@ * @file m3math.h * @brief LLMatrix3 class header file. * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2000&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/m4math.cpp b/indra/llmath/m4math.cpp index ce5428f0e1..8741400e52 100644 --- a/indra/llmath/m4math.cpp +++ b/indra/llmath/m4math.cpp @@ -2,31 +2,25 @@ * @file m4math.cpp * @brief LLMatrix4 class implementation. * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2000&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/m4math.h b/indra/llmath/m4math.h index 40599a0886..10c88464e5 100644 --- a/indra/llmath/m4math.h +++ b/indra/llmath/m4math.h @@ -2,31 +2,25 @@ * @file m4math.h * @brief LLMatrix4 class header file. * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2000&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/raytrace.cpp b/indra/llmath/raytrace.cpp index a5eb0d2682..f38fe49bcb 100644 --- a/indra/llmath/raytrace.cpp +++ b/indra/llmath/raytrace.cpp @@ -2,31 +2,25 @@ * @file raytrace.cpp * @brief Functions called by box object scripts. * - * $LicenseInfo:firstyear=2001&license=viewergpl$ - * - * Copyright (c) 2001-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/raytrace.h b/indra/llmath/raytrace.h index b433e1769c..2d32af0c86 100644 --- a/indra/llmath/raytrace.h +++ b/indra/llmath/raytrace.h @@ -2,31 +2,25 @@ * @file raytrace.h * @brief Ray intersection tests for primitives. * - * $LicenseInfo:firstyear=2001&license=viewergpl$ - * - * Copyright (c) 2001-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/tests/llbbox_test.cpp b/indra/llmath/tests/llbbox_test.cpp index 3031310a5d..8064ab217d 100644 --- a/indra/llmath/tests/llbbox_test.cpp +++ b/indra/llmath/tests/llbbox_test.cpp @@ -4,31 +4,25 @@ * @date 2009-06-25 * @brief Test for llbbox.cpp. * - * $LicenseInfo:firstyear=2009&license=viewergpl$ - * - * Copyright (c) 2009-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2009&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 - * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception - * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. - * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/tests/llbboxlocal_test.cpp b/indra/llmath/tests/llbboxlocal_test.cpp index fb51deab4a..f31e4126c4 100644 --- a/indra/llmath/tests/llbboxlocal_test.cpp +++ b/indra/llmath/tests/llbboxlocal_test.cpp @@ -4,31 +4,25 @@ * @date 2009-06-25 * @brief Test for llbboxlocal.cpp. * - * $LicenseInfo:firstyear=2009&license=viewergpl$ - * - * Copyright (c) 2009-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2009&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 - * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception - * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. - * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/tests/llmodularmath_test.cpp b/indra/llmath/tests/llmodularmath_test.cpp index a4acf9bbf5..063d3ef79f 100644 --- a/indra/llmath/tests/llmodularmath_test.cpp +++ b/indra/llmath/tests/llmodularmath_test.cpp @@ -4,31 +4,25 @@ * @date 2008-09 * @brief llmodularmath tests * - * $LicenseInfo:firstyear=2007&license=viewergpl$ - * - * Copyright (c) 2007-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2007&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/tests/llquaternion_test.cpp b/indra/llmath/tests/llquaternion_test.cpp index 0e3cee2c68..e69010b2d6 100644 --- a/indra/llmath/tests/llquaternion_test.cpp +++ b/indra/llmath/tests/llquaternion_test.cpp @@ -4,31 +4,25 @@ * @date 2007-03 * @brief Test cases of llquaternion.h * - * $LicenseInfo:firstyear=2007&license=viewergpl$ - * - * Copyright (c) 2007-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2007&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/tests/llrect_test.cpp b/indra/llmath/tests/llrect_test.cpp index c5e9e425bb..d740173e69 100644 --- a/indra/llmath/tests/llrect_test.cpp +++ b/indra/llmath/tests/llrect_test.cpp @@ -4,31 +4,25 @@ * @date 2009-06-25 * @brief Test for llrect.cpp. * - * $LicenseInfo:firstyear=2009&license=viewergpl$ - * - * Copyright (c) 2009-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2009&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 - * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception - * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. - * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/tests/m3math_test.cpp b/indra/llmath/tests/m3math_test.cpp index b23253ab5e..8abf61b740 100644 --- a/indra/llmath/tests/m3math_test.cpp +++ b/indra/llmath/tests/m3math_test.cpp @@ -4,31 +4,25 @@ * @date 2007-03 * @brief Test cases of m3math.h * - * $LicenseInfo:firstyear=2007&license=viewergpl$ - * - * Copyright (c) 2007-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2007&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/tests/mathmisc_test.cpp b/indra/llmath/tests/mathmisc_test.cpp index d9eb92f909..91a2e6c009 100644 --- a/indra/llmath/tests/mathmisc_test.cpp +++ b/indra/llmath/tests/mathmisc_test.cpp @@ -4,31 +4,25 @@ * @date 2005-09-26 * @brief Tests for the llmath library. * - * $LicenseInfo:firstyear=2005&license=viewergpl$ - * - * Copyright (c) 2005-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2005&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/tests/v2math_test.cpp b/indra/llmath/tests/v2math_test.cpp index f2f95a43da..4d6a2eca93 100644 --- a/indra/llmath/tests/v2math_test.cpp +++ b/indra/llmath/tests/v2math_test.cpp @@ -4,31 +4,25 @@ * @date 2007-02 * @brief v2math test cases. * - * $LicenseInfo:firstyear=2007&license=viewergpl$ - * - * Copyright (c) 2007-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2007&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/tests/v3color_test.cpp b/indra/llmath/tests/v3color_test.cpp index 2ab565cc11..29d1c483ab 100644 --- a/indra/llmath/tests/v3color_test.cpp +++ b/indra/llmath/tests/v3color_test.cpp @@ -4,31 +4,25 @@ * @date 2007-03 * @brief v3color test cases. * - * $LicenseInfo:firstyear=2007&license=viewergpl$ - * - * Copyright (c) 2007-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2007&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/tests/v3dmath_test.cpp b/indra/llmath/tests/v3dmath_test.cpp index fa52930a9c..20b26faa12 100644 --- a/indra/llmath/tests/v3dmath_test.cpp +++ b/indra/llmath/tests/v3dmath_test.cpp @@ -4,31 +4,25 @@ * @date 2007-03 * @brief v3dmath test cases. * - * $LicenseInfo:firstyear=2007&license=viewergpl$ - * - * Copyright (c) 2007-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2007&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/tests/v3math_test.cpp b/indra/llmath/tests/v3math_test.cpp index 5670a3d784..df7a77002f 100644 --- a/indra/llmath/tests/v3math_test.cpp +++ b/indra/llmath/tests/v3math_test.cpp @@ -4,31 +4,25 @@ * @date 2007-02 * @brief v3math test cases. * - * $LicenseInfo:firstyear=2007&license=viewergpl$ - * - * Copyright (c) 2007-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2007&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/tests/v4color_test.cpp b/indra/llmath/tests/v4color_test.cpp index fe355a0795..d7eec3c87f 100644 --- a/indra/llmath/tests/v4color_test.cpp +++ b/indra/llmath/tests/v4color_test.cpp @@ -4,31 +4,25 @@ * @date 2007-03 * @brief v4color test cases. * - * $LicenseInfo:firstyear=2007&license=viewergpl$ - * - * Copyright (c) 2007-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2007&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/tests/v4coloru_test.cpp b/indra/llmath/tests/v4coloru_test.cpp index 58a8f6a7b5..128f6f3564 100644 --- a/indra/llmath/tests/v4coloru_test.cpp +++ b/indra/llmath/tests/v4coloru_test.cpp @@ -4,31 +4,25 @@ * @date 2007-03 * @brief v4coloru test cases. * - * $LicenseInfo:firstyear=2007&license=viewergpl$ - * - * Copyright (c) 2007-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2007&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/tests/v4math_test.cpp b/indra/llmath/tests/v4math_test.cpp index 7842309ef0..191ac864df 100644 --- a/indra/llmath/tests/v4math_test.cpp +++ b/indra/llmath/tests/v4math_test.cpp @@ -4,31 +4,25 @@ * @date 2007-03 * @brief v4math test cases. * - * $LicenseInfo:firstyear=2007&license=viewergpl$ - * - * Copyright (c) 2007-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2007&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/tests/xform_test.cpp b/indra/llmath/tests/xform_test.cpp index 4a85021ad2..49870eef3c 100644 --- a/indra/llmath/tests/xform_test.cpp +++ b/indra/llmath/tests/xform_test.cpp @@ -4,31 +4,25 @@ * @date March 2007 * @brief Test cases for LLXform * - * $LicenseInfo:firstyear=2007&license=viewergpl$ - * - * Copyright (c) 2007-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2007&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/v2math.cpp b/indra/llmath/v2math.cpp index 2603127f75..a0cd642853 100644 --- a/indra/llmath/v2math.cpp +++ b/indra/llmath/v2math.cpp @@ -2,31 +2,25 @@ * @file v2math.cpp * @brief LLVector2 class implementation. * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2000&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/v2math.h b/indra/llmath/v2math.h index 35fd1b6048..8d5db96f5e 100644 --- a/indra/llmath/v2math.h +++ b/indra/llmath/v2math.h @@ -2,31 +2,25 @@ * @file v2math.h * @brief LLVector2 class header file. * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2000&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/v3color.cpp b/indra/llmath/v3color.cpp index b4cd410076..d38f48b11e 100644 --- a/indra/llmath/v3color.cpp +++ b/indra/llmath/v3color.cpp @@ -2,31 +2,25 @@ * @file v3color.cpp * @brief LLColor3 class implementation. * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2000&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/v3color.h b/indra/llmath/v3color.h index 95a3de8b62..56cb2ae73e 100644 --- a/indra/llmath/v3color.h +++ b/indra/llmath/v3color.h @@ -2,31 +2,25 @@ * @file v3color.h * @brief LLColor3 class header file. * - * $LicenseInfo:firstyear=2001&license=viewergpl$ - * - * Copyright (c) 2001-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/v3dmath.cpp b/indra/llmath/v3dmath.cpp index 2bcbf632b1..a50cb3c6ca 100644 --- a/indra/llmath/v3dmath.cpp +++ b/indra/llmath/v3dmath.cpp @@ -2,31 +2,25 @@ * @file v3dmath.cpp * @brief LLVector3d class implementation. * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2000&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/v3dmath.h b/indra/llmath/v3dmath.h index ab253de064..578dcdc8ea 100644 --- a/indra/llmath/v3dmath.h +++ b/indra/llmath/v3dmath.h @@ -2,31 +2,25 @@ * @file v3dmath.h * @brief High precision 3 dimensional vector. * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2000&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/v3math.cpp b/indra/llmath/v3math.cpp index 82aad6550b..e88b34f8d6 100644 --- a/indra/llmath/v3math.cpp +++ b/indra/llmath/v3math.cpp @@ -2,31 +2,25 @@ * @file v3math.cpp * @brief LLVector3 class implementation. * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2000&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/v3math.h b/indra/llmath/v3math.h index 5d483a8753..b6425087b3 100644 --- a/indra/llmath/v3math.h +++ b/indra/llmath/v3math.h @@ -2,31 +2,25 @@ * @file v3math.h * @brief LLVector3 class header file. * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2000&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/v4color.cpp b/indra/llmath/v4color.cpp index 219b06ec74..81ac62be56 100644 --- a/indra/llmath/v4color.cpp +++ b/indra/llmath/v4color.cpp @@ -2,31 +2,25 @@ * @file v4color.cpp * @brief LLColor4 class implementation. * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2000&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/v4color.h b/indra/llmath/v4color.h index dd92e1cc63..b047f86e6e 100644 --- a/indra/llmath/v4color.h +++ b/indra/llmath/v4color.h @@ -2,31 +2,25 @@ * @file v4color.h * @brief LLColor4 class header file. * - * $LicenseInfo:firstyear=2001&license=viewergpl$ - * - * Copyright (c) 2001-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/v4coloru.cpp b/indra/llmath/v4coloru.cpp index 061b4970f7..f1a2518cf3 100644 --- a/indra/llmath/v4coloru.cpp +++ b/indra/llmath/v4coloru.cpp @@ -2,31 +2,25 @@ * @file v4coloru.cpp * @brief LLColor4U class implementation. * - * $LicenseInfo:firstyear=2001&license=viewergpl$ - * - * Copyright (c) 2001-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/v4coloru.h b/indra/llmath/v4coloru.h index 08245403a1..12da7e2dd7 100644 --- a/indra/llmath/v4coloru.h +++ b/indra/llmath/v4coloru.h @@ -2,31 +2,25 @@ * @file v4coloru.h * @brief The LLColor4U class. * - * $LicenseInfo:firstyear=2001&license=viewergpl$ - * - * Copyright (c) 2001-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/v4math.cpp b/indra/llmath/v4math.cpp index b938480dd9..2782cf2966 100644 --- a/indra/llmath/v4math.cpp +++ b/indra/llmath/v4math.cpp @@ -2,31 +2,25 @@ * @file v4math.cpp * @brief LLVector4 class implementation. * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2000&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/v4math.h b/indra/llmath/v4math.h index 72a477ed20..623c8b2003 100644 --- a/indra/llmath/v4math.h +++ b/indra/llmath/v4math.h @@ -2,31 +2,25 @@ * @file v4math.h * @brief LLVector4 class header file. * - * $LicenseInfo:firstyear=2000&license=viewergpl$ - * - * Copyright (c) 2000-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2000&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/xform.cpp b/indra/llmath/xform.cpp index 7a8b0cf6a3..b75aec6a27 100644 --- a/indra/llmath/xform.cpp +++ b/indra/llmath/xform.cpp @@ -1,31 +1,25 @@ /** * @file xform.cpp * - * $LicenseInfo:firstyear=2001&license=viewergpl$ - * - * Copyright (c) 2001-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ diff --git a/indra/llmath/xform.h b/indra/llmath/xform.h index 1595da3a72..7a9f0f62cf 100644 --- a/indra/llmath/xform.h +++ b/indra/llmath/xform.h @@ -1,31 +1,25 @@ /** * @file xform.h * - * $LicenseInfo:firstyear=2001&license=viewergpl$ - * - * Copyright (c) 2001-2009, Linden Research, Inc. - * + * $LicenseInfo:firstyear=2001&license=viewerlgpl$ * Second Life Viewer Source Code - * The source code in this file ("Source Code") is provided by Linden Lab - * to you under the terms of the GNU General Public License, version 2.0 - * ("GPL"), unless you have obtained a separate licensing agreement - * ("Other License"), formally executed by you and Linden Lab. Terms of - * the GPL can be found in doc/GPL-license.txt in this distribution, or - * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2 + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. * - * There are special exceptions to the terms and conditions of the GPL as - * it is applied to this Source Code. View the full text of the exception - * in the file doc/FLOSS-exception.txt in this software distribution, or - * online at - * http://secondlifegrid.net/programs/open_source/licensing/flossexception + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. * - * By copying, modifying or distributing this software, you acknowledge - * that you have read and understood your obligations described above, - * and agree to abide by those obligations. + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * - * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO - * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY, - * COMPLETENESS OR PERFORMANCE. + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ -- cgit v1.2.3 From e2049c332b9b834e843249c1ae0ba8542491940f Mon Sep 17 00:00:00 2001 From: Boroondas Gupte Date: Sun, 24 Oct 2010 16:01:57 +0200 Subject: CTS-323: (part 1 of 2) Don't cast pointers to U32 --- indra/llmath/llsimdmath.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llsimdmath.h b/indra/llmath/llsimdmath.h index 9377bfdb53..1be1b7a55f 100644 --- a/indra/llmath/llsimdmath.h +++ b/indra/llmath/llsimdmath.h @@ -41,16 +41,18 @@ #error SSE2 not enabled. LLVector4a and related class will not compile. #endif +#include + template T* LL_NEXT_ALIGNED_ADDRESS(T* address) { return reinterpret_cast( - (reinterpret_cast(address) + 0xF) & ~0xF); + (reinterpret_cast(address) + 0xF) & ~0xF); } template T* LL_NEXT_ALIGNED_ADDRESS_64(T* address) { return reinterpret_cast( - (reinterpret_cast(address) + 0x3F) & ~0x3F); + (reinterpret_cast(address) + 0x3F) & ~0x3F); } #if LL_LINUX || LL_DARWIN -- cgit v1.2.3 From da04af47cc6e9f6acfcac0d2d6f1466b6f9baec2 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Tue, 26 Oct 2010 16:19:24 -0500 Subject: Fix for bad binormals. --- indra/llmath/llvolume.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 0fe309ddf3..a0874e859c 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -6139,6 +6139,14 @@ void LLVolumeFace::createBinormals() LLVector2* tc = (LLVector2*) mTexCoords; LLVector4a* binorm = (LLVector4a*) mBinormals; + LLVector4a* end = mBinormals+mNumVertices; + while (binorm < end) + { + (*binorm++).clear(); + } + + binorm = mBinormals; + for (U32 i = 0; i < mNumIndices/3; i++) { //for each triangle const U16& i0 = mIndices[i*3+0]; -- cgit v1.2.3 From 78e62d00306f55c4f04366bf8f89fd7b3ccdf489 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Fri, 29 Oct 2010 12:06:20 -0500 Subject: Fix for bad normals on some prim faces. --- indra/llmath/llvolume.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index a0874e859c..2411bb7900 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -92,11 +92,13 @@ extern BOOL gDebugGL; void assert_aligned(void* ptr, U32 alignment) { +#if 0 U32 t = (U32) ptr; if (t%alignment != 0) { llerrs << "WTF?" << llendl; } +#endif } BOOL check_same_clock_dir( const LLVector3& pt1, const LLVector3& pt2, const LLVector3& pt3, const LLVector3& norm) @@ -6600,6 +6602,12 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) } } + //clear normals + for (U32 i = 0; i < mNumVertices; i++) + { + mNormals[i].clear(); + } + //generate normals for (U32 i = 0; i < mNumIndices/3; i++) //for each triangle { -- cgit v1.2.3 From ed29cac3a18aeaa366b51c9d5ad89e128787b11f Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Fri, 29 Oct 2010 16:10:17 -0500 Subject: SH-92 Fix for busted bounding boxes for mirrored meshes. --- indra/llmath/llvolume.cpp | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 2411bb7900..9fe4c622d7 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -2320,12 +2320,6 @@ bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) pos_range.setSub(max_pos, min_pos); LLVector2 tc_range = max_tc - min_tc; - LLVector4a& min = face.mExtents[0]; - LLVector4a& max = face.mExtents[1]; - - min.clear(); - max.clear(); - LLVector4a* pos_out = face.mPositions; LLVector4a* norm_out = face.mNormals; LLVector2* tc_out = face.mTexCoords; @@ -2339,17 +2333,6 @@ bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) pos_out->mul(pos_range); pos_out->add(min_pos); - if (j == 0) - { - min = *pos_out; - max = min; - } - else - { - min.setMin(min, *pos_out); - max.setMax(max, *pos_out); - } - pos_out++; U16* n = (U16*) &(norm[j*3*2]); @@ -2426,6 +2409,19 @@ bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) } } + //calculate bounding box + LLVector4a& min = face.mExtents[0]; + LLVector4a& max = face.mExtents[1]; + + min.clear(); + max.clear(); + min = max = face.mPositions[0]; + + for (S32 i = 1; i < face.mNumVertices; ++i) + { + min.setMin(min, face.mPositions[i]); + max.setMax(max, face.mPositions[i]); + } } } -- cgit v1.2.3 From 117af7e7e3061a4111678dd2bfd5adfcf71b45df Mon Sep 17 00:00:00 2001 From: JonathanLinden Date: Wed, 3 Nov 2010 13:23:53 -0700 Subject: Fix for SH-391 'Viewer crash when enabling align planar faces on a mesh object'. Paired with Runitai --- indra/llmath/llvolume.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 9fe4c622d7..a129182f01 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -2593,7 +2593,7 @@ S32 LLVolume::getNumFaces() const if (sculpt_type == LL_SCULPT_TYPE_MESH) { - return LL_SCULPT_MESH_MAX_FACES; + return llmax((S32)mVolumeFaces.size(), 1); } return (S32)mProfilep->mFaces.size(); -- cgit v1.2.3 From ffb8b78ae9b0c0f4be8ac262054681ed11726993 Mon Sep 17 00:00:00 2001 From: JonathanLinden Date: Wed, 3 Nov 2010 17:11:08 -0700 Subject: Reverting fix for SH-391 until it works for all use cases --- indra/llmath/llvolume.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index a129182f01..9fe4c622d7 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -2593,7 +2593,7 @@ S32 LLVolume::getNumFaces() const if (sculpt_type == LL_SCULPT_TYPE_MESH) { - return llmax((S32)mVolumeFaces.size(), 1); + return LL_SCULPT_MESH_MAX_FACES; } return (S32)mProfilep->mFaces.size(); -- cgit v1.2.3 From c149a0020bfc6983e7ce7a2426f324a6b8e75495 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Sun, 21 Nov 2010 03:06:47 -0600 Subject: Get rid of pointless redirection and malloc/free. --- indra/llmath/llvolume.cpp | 2 +- indra/llmath/llvolumeoctree.cpp | 2 +- indra/llmath/llvolumeoctree.h | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 9fe4c622d7..c20124076b 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -5522,7 +5522,7 @@ void LLVolumeFace::createOctree(F32 scaler, const LLVector4a& center, const LLVe center.setAdd(min, max); center.mul(0.5f); - *tri->mPositionGroup = center; + tri->mPositionGroup = center; //compute "radius" LLVector4a size; diff --git a/indra/llmath/llvolumeoctree.cpp b/indra/llmath/llvolumeoctree.cpp index 3a7cb4940a..cc97026ad4 100644 --- a/indra/llmath/llvolumeoctree.cpp +++ b/indra/llmath/llvolumeoctree.cpp @@ -191,7 +191,7 @@ void LLOctreeTriangleRayIntersect::visit(const LLOctreeNode* n const LLVector4a& LLVolumeTriangle::getPositionGroup() const { - return *mPositionGroup; + return mPositionGroup; } const F32& LLVolumeTriangle::getBinRadius() const diff --git a/indra/llmath/llvolumeoctree.h b/indra/llmath/llvolumeoctree.h index 1f5a78b137..63240ba022 100644 --- a/indra/llmath/llvolumeoctree.h +++ b/indra/llmath/llvolumeoctree.h @@ -96,7 +96,7 @@ class LLVolumeTriangle : public LLRefCount public: LLVolumeTriangle() { - mPositionGroup = (LLVector4a*) ll_aligned_malloc_16(16); + } LLVolumeTriangle(const LLVolumeTriangle& rhs) @@ -112,14 +112,14 @@ public: ~LLVolumeTriangle() { - ll_aligned_free_16(mPositionGroup); + } + LLVector4a mPositionGroup; + const LLVector4a* mV[3]; U16 mIndex[3]; - LLVector4a* mPositionGroup; - F32 mRadius; virtual const LLVector4a& getPositionGroup() const; -- cgit v1.2.3 From 81ba8b8bc61addcb77e4f6d7d637a3401de295bd Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Sun, 21 Nov 2010 03:25:22 -0600 Subject: Get rid of more ll_aligned_malloc calls. --- indra/llmath/llvolumeoctree.cpp | 5 +---- indra/llmath/llvolumeoctree.h | 4 ++-- 2 files changed, 3 insertions(+), 6 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolumeoctree.cpp b/indra/llmath/llvolumeoctree.cpp index cc97026ad4..b5a935c2b5 100644 --- a/indra/llmath/llvolumeoctree.cpp +++ b/indra/llmath/llvolumeoctree.cpp @@ -78,14 +78,11 @@ BOOL LLLineSegmentBoxIntersect(const LLVector4a& start, const LLVector4a& end, c LLVolumeOctreeListener::LLVolumeOctreeListener(LLOctreeNode* node) { node->addListener(this); - - mBounds = (LLVector4a*) ll_aligned_malloc_16(sizeof(LLVector4a)*4); - mExtents = mBounds+2; } LLVolumeOctreeListener::~LLVolumeOctreeListener() { - ll_aligned_free_16(mBounds); + } void LLVolumeOctreeListener::handleChildAddition(const LLOctreeNode* parent, diff --git a/indra/llmath/llvolumeoctree.h b/indra/llmath/llvolumeoctree.h index 63240ba022..f696cbd976 100644 --- a/indra/llmath/llvolumeoctree.h +++ b/indra/llmath/llvolumeoctree.h @@ -64,8 +64,8 @@ public: public: - LLVector4a* mBounds; // bounding box (center, size) of this node and all its children (tight fit to objects) - LLVector4a* mExtents; // extents (min, max) of this node and all its children + LLVector4a mBounds[2]; // bounding box (center, size) of this node and all its children (tight fit to objects) + LLVector4a mExtents[2]; // extents (min, max) of this node and all its children }; class LLOctreeTriangleRayIntersect : public LLOctreeTraveler -- cgit v1.2.3 From e9d21ba941a52665d7ad2ee3483c6ac7b7ec6486 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Mon, 20 Dec 2010 15:09:15 -0600 Subject: Fix for windows build. Reviewed by Nyx. --- indra/llmath/llsimdmath.h | 2 ++ indra/llmath/llvolume.cpp | 2 ++ 2 files changed, 4 insertions(+) (limited to 'indra/llmath') diff --git a/indra/llmath/llsimdmath.h b/indra/llmath/llsimdmath.h index b6ac5190a7..c7cdf7b32c 100644 --- a/indra/llmath/llsimdmath.h +++ b/indra/llmath/llsimdmath.h @@ -35,7 +35,9 @@ #error SSE2 not enabled. LLVector4a and related class will not compile. #endif +#if !LL_WINDOWS #include +#endif template T* LL_NEXT_ALIGNED_ADDRESS(T* address) { diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index c0b50b606e..8a7b19800c 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -29,7 +29,9 @@ #include "llmath.h" #include +#if !LL_WINDOWS #include +#endif #include "llerror.h" #include "llmemtype.h" -- cgit v1.2.3 From 57153cf0f1fffe669b9d8871c33f9c4aaba67a2f Mon Sep 17 00:00:00 2001 From: Roxie Linden Date: Thu, 23 Dec 2010 01:48:44 -0800 Subject: SH-655 - Project mesh viewer crashes on exit. A copy constructor was implemented which did a memcpy, which included the vtable pointer which was to another object of another class (same child though). This resulted in the wrong destructor being called. The reason for the memcpy was for alignment purposes. The solution was to move to LLVector4a, which is intrinsicly aligned. Also, did some performance optimizations based on the LLVector4a optimizations. The solution was to re-implement the --- indra/llmath/llcamera.cpp | 171 ++++++++++++++++------------------------------ indra/llmath/llcamera.h | 11 +-- indra/llmath/llplane.h | 44 ++++++++++-- indra/llmath/llvector4a.h | 2 +- 4 files changed, 102 insertions(+), 126 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llcamera.cpp b/indra/llmath/llcamera.cpp index a442a0edb8..a30a50ab2e 100644 --- a/indra/llmath/llcamera.cpp +++ b/indra/llmath/llcamera.cpp @@ -42,7 +42,6 @@ LLCamera::LLCamera() : mPlaneCount(6), mFrustumCornerDist(0.f) { - alignPlanes(); calculateFrustumPlanes(); } @@ -53,7 +52,6 @@ LLCamera::LLCamera(F32 vertical_fov_rads, F32 aspect_ratio, S32 view_height_in_p mPlaneCount(6), mFrustumCornerDist(0.f) { - alignPlanes(); mAspect = llclamp(aspect_ratio, MIN_ASPECT_RATIO, MAX_ASPECT_RATIO); mNearPlane = llclamp(near_plane, MIN_NEAR_PLANE, MAX_NEAR_PLANE); if(far_plane < 0) far_plane = DEFAULT_FAR_PLANE; @@ -67,19 +65,6 @@ LLCamera::~LLCamera() } -const LLCamera& LLCamera::operator=(const LLCamera& rhs) -{ - memcpy(this, &rhs, sizeof(LLCamera)); - alignPlanes(); - LLVector4a::memcpyNonAliased16((F32*) mAgentPlanes, (F32*) rhs.mAgentPlanes, 4*7*sizeof(F32)); - return *this; -} - -void LLCamera::alignPlanes() -{ - mAgentPlanes = (LLPlane*) LL_NEXT_ALIGNED_ADDRESS(mAgentPlaneBuffer); -} - // ---------------- LLCamera::getFoo() member functions ---------------- F32 LLCamera::getMinView() const @@ -104,7 +89,7 @@ void LLCamera::setUserClipPlane(LLPlane plane) { mPlaneCount = 7; mAgentPlanes[6] = plane; - mPlaneMask[6] = calcPlaneMask(plane); + mPlaneMask[6] = plane.calcPlaneMask(); } void LLCamera::disableUserClipPlane() @@ -190,38 +175,33 @@ S32 LLCamera::AABBInFrustum(const LLVector4a ¢er, const LLVector4a& radius) }; U8 mask = 0; - S32 result = 2; - + bool result = false; + LLVector4a rscale, maxp, minp; + LLSimdScalar d; for (U32 i = 0; i < mPlaneCount; i++) { mask = mPlaneMask[i]; - if (mask == 0xff) - { - continue; - } - - const LLPlane& p = mAgentPlanes[i]; - const LLVector4a& n = reinterpret_cast(p); - float d = p.mV[3]; - LLVector4a rscale; - rscale.setMul(radius, scaler[mask]); - - LLVector4a minp, maxp; - minp.setSub(center, rscale); - maxp.setAdd(center, rscale); - - if (n.dot3(minp) > -d) + if (mask != 0xff) { - return 0; - } - - if (n.dot3(maxp) > -d) - { - result = 1; + const LLPlane& p(mAgentPlanes[i]); + p.getAt<3>(d); + rscale.setMul(radius, scaler[mask]); + minp.setSub(center, rscale); + d = -d; + if (p.dot3(minp).getF32() > d) + { + return 0; + } + + if(!result) + { + maxp.setAdd(center, rscale); + result = (p.dot3(maxp).getF32() > d); + } } } - return result; + return result?1:2; } @@ -239,43 +219,33 @@ S32 LLCamera::AABBInFrustumNoFarClip(const LLVector4a& center, const LLVector4a& }; U8 mask = 0; - S32 result = 2; - + bool result = false; + LLVector4a rscale, maxp, minp; + LLSimdScalar d; for (U32 i = 0; i < mPlaneCount; i++) { - if (i == 5) - { - continue; - } - mask = mPlaneMask[i]; - if (mask == 0xff) - { - continue; - } - - const LLPlane& p = mAgentPlanes[i]; - const LLVector4a& n = reinterpret_cast(p); - float d = p.mV[3]; - LLVector4a rscale; - rscale.setMul(radius, scaler[mask]); - - LLVector4a minp, maxp; - minp.setSub(center, rscale); - maxp.setAdd(center, rscale); - - if (n.dot3(minp) > -d) - { - return 0; - } - - if (n.dot3(maxp) > -d) + if ((i != 5) && (mask != 0xff)) { - result = 1; + const LLPlane& p(mAgentPlanes[i]); + p.getAt<3>(d); + rscale.setMul(radius, scaler[mask]); + minp.setSub(center, rscale); + d = -d; + if (p.dot3(minp).getF32() > d) + { + return 0; + } + + if(!result) + { + maxp.setAdd(center, rscale); + result = (p.dot3(maxp).getF32() > d); + } } } - return result; + return result?1:2; } int LLCamera::sphereInFrustumQuick(const LLVector3 &sphere_center, const F32 radius) @@ -396,28 +366,22 @@ int LLCamera::sphereInFrustumOld(const LLVector3 &sphere_center, const F32 radiu int LLCamera::sphereInFrustum(const LLVector3 &sphere_center, const F32 radius) const { // Returns 1 if sphere is in frustum, 0 if not. - int res = 2; + bool res = false; for (int i = 0; i < 6; i++) { - if (mPlaneMask[i] == 0xff) - { - continue; - } - - float d = mAgentPlanes[i].dist(sphere_center); - - if (d > radius) + if (mPlaneMask[i] != 0xff) { - return 0; - } + float d = mAgentPlanes[i].dist(sphere_center); - if (d > -radius) - { - res = 1; + if (d > radius) + { + return 0; + } + res |= (d > -radius); } } - return res; + return res?1:2; } @@ -569,25 +533,6 @@ LLPlane planeFromPoints(LLVector3 p1, LLVector3 p2, LLVector3 p3) return LLPlane(p1, n); } -U8 LLCamera::calcPlaneMask(const LLPlane& plane) -{ - U8 mask = 0; - - if (plane.mV[0] >= 0) - { - mask |= 1; - } - if (plane.mV[1] >= 0) - { - mask |= 2; - } - if (plane.mV[2] >= 0) - { - mask |= 4; - } - - return mask; -} void LLCamera::ignoreAgentFrustumPlane(S32 idx) { @@ -597,13 +542,12 @@ void LLCamera::ignoreAgentFrustumPlane(S32 idx) } mPlaneMask[idx] = 0xff; - mAgentPlanes[idx].clearVec(); + mAgentPlanes[idx].clear(); } void LLCamera::calcAgentFrustumPlanes(LLVector3* frust) { - alignPlanes(); - + for (int i = 0; i < 8; i++) { mAgentFrustum[i] = frust[i]; @@ -636,7 +580,7 @@ void LLCamera::calcAgentFrustumPlanes(LLVector3* frust) //cache plane octant facing mask for use in AABBInFrustum for (U32 i = 0; i < mPlaneCount; i++) { - mPlaneMask[i] = calcPlaneMask(mAgentPlanes[i]); + mPlaneMask[i] = mAgentPlanes[i].calcPlaneMask(); } } @@ -689,9 +633,10 @@ void LLCamera::calculateWorldFrustumPlanes() F32 d; LLVector3 center = mOrigin - mXAxis*mNearPlane; mWorldPlanePos = center; + LLVector3 pnorm; for (int p=0; p<4; p++) { - LLVector3 pnorm = LLVector3(mLocalPlanes[p]); + mLocalPlanes[p].getVector3(pnorm); LLVector3 norm = rotateToAbsolute(pnorm); norm.normVec(); d = -(center * norm); @@ -701,13 +646,15 @@ void LLCamera::calculateWorldFrustumPlanes() LLVector3 zaxis(0, 0, 1.0f); F32 yaw = getYaw(); { - LLVector3 tnorm = LLVector3(mLocalPlanes[PLANE_LEFT]); + LLVector3 tnorm; + mLocalPlanes[PLANE_LEFT].getVector3(tnorm); tnorm.rotVec(yaw, zaxis); d = -(mOrigin * tnorm); mHorizPlanes[HORIZ_PLANE_LEFT] = LLPlane(tnorm, d); } { - LLVector3 tnorm = LLVector3(mLocalPlanes[PLANE_RIGHT]); + LLVector3 tnorm; + mLocalPlanes[PLANE_RIGHT].getVector3(tnorm); tnorm.rotVec(yaw, zaxis); d = -(mOrigin * tnorm); mHorizPlanes[HORIZ_PLANE_RIGHT] = LLPlane(tnorm, d); diff --git a/indra/llmath/llcamera.h b/indra/llmath/llcamera.h index e15bd7ad43..a346322e0e 100644 --- a/indra/llmath/llcamera.h +++ b/indra/llmath/llcamera.h @@ -79,8 +79,6 @@ public: { *this = rhs; } - - const LLCamera& operator=(const LLCamera& rhs); enum { PLANE_LEFT = 0, @@ -119,6 +117,9 @@ public: }; private: + LLPlane mAgentPlanes[7]; //frustum planes in agent space a la gluUnproject (I'm a bastard, I know) - DaveP + U8 mPlaneMask[8]; // 8 for alignment + F32 mView; // angle between top and bottom frustum planes in radians. F32 mAspect; // width/height S32 mViewHeightInPixels; // for ViewHeightInPixels() only @@ -132,10 +133,6 @@ private: LLPlane mWorldPlanes[PLANE_NUM]; LLPlane mHorizPlanes[HORIZ_PLANE_NUM]; - LLPlane* mAgentPlanes; //frustum planes in agent space a la gluUnproject (I'm a bastard, I know) - DaveP - U8 mAgentPlaneBuffer[sizeof(LLPlane)*8]; - U8 mPlaneMask[7]; - U32 mPlaneCount; //defaults to 6, if setUserClipPlane is called, uses user supplied clip plane in LLVector3 mWorldPlanePos; // Position of World Planes (may be offset from camera) @@ -149,11 +146,9 @@ public: LLCamera(F32 vertical_fov_rads, F32 aspect_ratio, S32 view_height_in_pixels, F32 near_plane, F32 far_plane); virtual ~LLCamera(); - void alignPlanes(); void setUserClipPlane(LLPlane plane); void disableUserClipPlane(); - U8 calcPlaneMask(const LLPlane& plane); virtual void setView(F32 vertical_fov_rads); void setViewHeightInPixels(S32 height); void setAspect(F32 new_aspect); diff --git a/indra/llmath/llplane.h b/indra/llmath/llplane.h index 443f3f46b9..a611894721 100644 --- a/indra/llmath/llplane.h +++ b/indra/llmath/llplane.h @@ -36,19 +36,23 @@ // The plane normal = [A, B, C] // The closest approach = D / sqrt(A*A + B*B + C*C) -class LLPlane : public LLVector4 +class LLPlane { public: + + // Constructors LLPlane() {}; // no default constructor LLPlane(const LLVector3 &p0, F32 d) { setVec(p0, d); } LLPlane(const LLVector3 &p0, const LLVector3 &n) { setVec(p0, n); } - void setVec(const LLVector3 &p0, F32 d) { LLVector4::setVec(p0[0], p0[1], p0[2], d); } - void setVec(const LLVector3 &p0, const LLVector3 &n) + inline void setVec(const LLVector3 &p0, F32 d) { mV.set(p0[0], p0[1], p0[2], d); } + + // Set + inline void setVec(const LLVector3 &p0, const LLVector3 &n) { F32 d = -(p0 * n); setVec(n, d); } - void setVec(const LLVector3 &p0, const LLVector3 &p1, const LLVector3 &p2) + inline void setVec(const LLVector3 &p0, const LLVector3 &p1, const LLVector3 &p2) { LLVector3 u, v, w; u = p1 - p0; @@ -58,8 +62,38 @@ public: F32 d = -(w * p0); setVec(w, d); } - LLPlane& operator=(const LLVector4& v2) { LLVector4::setVec(v2[0],v2[1],v2[2],v2[3]); return *this;} + + inline LLPlane& operator=(const LLVector4& v2) { mV.set(v2[0],v2[1],v2[2],v2[3]); return *this;} + + inline LLPlane& operator=(const LLVector4a& v2) { mV.set(v2[0],v2[1],v2[2],v2[3]); return *this;} + + inline void set(const LLPlane& p2) { mV = p2.mV; } + + // F32 dist(const LLVector3 &v2) const { return mV[0]*v2[0] + mV[1]*v2[1] + mV[2]*v2[2] + mV[3]; } + + inline LLSimdScalar dot3(const LLVector4a& b) const { return mV.dot3(b); } + + // Read-only access a single float in this vector. Do not use in proximity to any function call that manipulates + // the data at the whole vector level or you will incur a substantial penalty. Consider using the splat functions instead + inline F32 operator[](const S32 idx) const { return mV[idx]; } + + // preferable when index is known at compile time + template LL_FORCE_INLINE void getAt(LLSimdScalar& v) const { v = mV.getScalarAt(); } + + // reset the vector to 0, 0, 0, 1 + inline void clear() { mV.set(0, 0, 0, 1); } + + inline void getVector3(LLVector3& vec) const { vec.set(mV[0], mV[1], mV[2]); } + + // Retrieve the mask indicating which of the x, y, or z axis are greater or equal to zero. + inline U8 calcPlaneMask() + { + return mV.greaterEqual(LLVector4a::getZero()).getGatheredBits() & LLVector4Logical::MASK_XYZ; + } + +private: + LLVector4a mV; }; diff --git a/indra/llmath/llvector4a.h b/indra/llmath/llvector4a.h index 79022eade3..596082509d 100644 --- a/indra/llmath/llvector4a.h +++ b/indra/llmath/llvector4a.h @@ -67,7 +67,7 @@ public: extern const LLVector4a LL_V4A_ZERO; return LL_V4A_ZERO; } - + // Return a vector of all epsilon, where epsilon is a small float suitable for approximate equality checks static inline const LLVector4a& getEpsilon() { -- cgit v1.2.3 From 0b4c2fa99e50129a6ef5ac6a3d0337ee2b1b1e97 Mon Sep 17 00:00:00 2001 From: Roxie Linden Date: Thu, 23 Dec 2010 23:31:25 -0800 Subject: Fix windows build break --- indra/llmath/llcamera.cpp | 2 +- indra/llmath/llcamera.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llcamera.cpp b/indra/llmath/llcamera.cpp index a30a50ab2e..c681e00b32 100644 --- a/indra/llmath/llcamera.cpp +++ b/indra/llmath/llcamera.cpp @@ -85,7 +85,7 @@ F32 LLCamera::getMaxView() const // ---------------- LLCamera::setFoo() member functions ---------------- -void LLCamera::setUserClipPlane(LLPlane plane) +void LLCamera::setUserClipPlane(LLPlane& plane) { mPlaneCount = 7; mAgentPlanes[6] = plane; diff --git a/indra/llmath/llcamera.h b/indra/llmath/llcamera.h index a346322e0e..82d80f1057 100644 --- a/indra/llmath/llcamera.h +++ b/indra/llmath/llcamera.h @@ -147,7 +147,7 @@ public: virtual ~LLCamera(); - void setUserClipPlane(LLPlane plane); + void setUserClipPlane(LLPlane& plane); void disableUserClipPlane(); virtual void setView(F32 vertical_fov_rads); void setViewHeightInPixels(S32 height); -- cgit v1.2.3 From fad456e2b13fab83206240d7973dd2529b53de9e Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Wed, 29 Dec 2010 16:29:34 -0600 Subject: SH-647 Switch asserts to warnings in octree for development builds (were already warnings in release builds). --- indra/llmath/lloctree.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/lloctree.h b/indra/llmath/lloctree.h index ed97ff76ba..276f7b0f06 100644 --- a/indra/llmath/lloctree.h +++ b/indra/llmath/lloctree.h @@ -33,11 +33,7 @@ #include #include -#if LL_RELEASE_WITH_DEBUG_INFO || LL_DEBUG -#define OCT_ERRS LL_ERRS("OctreeErrors") -#else #define OCT_ERRS LL_WARNS("OctreeErrors") -#endif #define LL_OCTREE_PARANOIA_CHECK 0 #if LL_DARWIN -- cgit v1.2.3 From ca9df698db9c7a118d29adfe52f49054c333d982 Mon Sep 17 00:00:00 2001 From: Roxie Linden Date: Thu, 30 Dec 2010 22:08:25 -0800 Subject: Code review fix from davep - use booling or operator and not binary one Remove unnecessary state check. Initial dialog work for showing physics cost in upload dialog --- indra/llmath/llcamera.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llcamera.cpp b/indra/llmath/llcamera.cpp index c681e00b32..2ffc56644f 100644 --- a/indra/llmath/llcamera.cpp +++ b/indra/llmath/llcamera.cpp @@ -377,7 +377,7 @@ int LLCamera::sphereInFrustum(const LLVector3 &sphere_center, const F32 radius) { return 0; } - res |= (d > -radius); + res = res || (d > -radius); } } -- cgit v1.2.3 From ce5dc4d42b52fad2bb6d7bdf98b7656418061278 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Fri, 7 Jan 2011 13:47:50 -0600 Subject: SH-762 Forsyth style vertex buffer optimization for meshes. --- indra/llmath/llvolume.cpp | 727 +++++++++++++++++++++++++++++++++++++--------- indra/llmath/llvolume.h | 71 ++--- 2 files changed, 625 insertions(+), 173 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 8a7b19800c..f41cd1b4e8 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -2003,145 +2003,145 @@ BOOL LLVolume::generate() return FALSE; } -void LLVolumeFace::VertexData::init() -{ - if (!mData) - { - mData = (LLVector4a*) malloc(sizeof(LLVector4a)*2); - } -} - -LLVolumeFace::VertexData::VertexData() -{ - mData = NULL; - init(); -} - -LLVolumeFace::VertexData::VertexData(const VertexData& rhs) -{ - mData = NULL; - *this = rhs; -} - -const LLVolumeFace::VertexData& LLVolumeFace::VertexData::operator=(const LLVolumeFace::VertexData& rhs) -{ - if (this != &rhs) - { - init(); - LLVector4a::memcpyNonAliased16((F32*) mData, (F32*) rhs.mData, 2*sizeof(LLVector4a)); - mTexCoord = rhs.mTexCoord; - } - return *this; -} - -LLVolumeFace::VertexData::~VertexData() -{ - free(mData); - mData = NULL; -} - -LLVector4a& LLVolumeFace::VertexData::getPosition() -{ - return mData[POSITION]; -} - -LLVector4a& LLVolumeFace::VertexData::getNormal() -{ - return mData[NORMAL]; -} - -const LLVector4a& LLVolumeFace::VertexData::getPosition() const -{ - return mData[POSITION]; -} - -const LLVector4a& LLVolumeFace::VertexData::getNormal() const -{ - return mData[NORMAL]; -} - - -void LLVolumeFace::VertexData::setPosition(const LLVector4a& pos) -{ - mData[POSITION] = pos; -} - -void LLVolumeFace::VertexData::setNormal(const LLVector4a& norm) -{ - mData[NORMAL] = norm; -} - -bool LLVolumeFace::VertexData::operator<(const LLVolumeFace::VertexData& rhs)const -{ - const F32* lp = this->getPosition().getF32ptr(); - const F32* rp = rhs.getPosition().getF32ptr(); - - if (lp[0] != rp[0]) - { - return lp[0] < rp[0]; - } - - if (rp[1] != lp[1]) - { - return lp[1] < rp[1]; - } - - if (rp[2] != lp[2]) - { - return lp[2] < rp[2]; - } - - lp = getNormal().getF32ptr(); - rp = rhs.getNormal().getF32ptr(); - - if (lp[0] != rp[0]) - { - return lp[0] < rp[0]; - } - - if (rp[1] != lp[1]) - { - return lp[1] < rp[1]; - } - - if (rp[2] != lp[2]) - { - return lp[2] < rp[2]; - } - - if (mTexCoord.mV[0] != rhs.mTexCoord.mV[0]) - { - return mTexCoord.mV[0] < rhs.mTexCoord.mV[0]; - } - - return mTexCoord.mV[1] < rhs.mTexCoord.mV[1]; -} - -bool LLVolumeFace::VertexData::operator==(const LLVolumeFace::VertexData& rhs)const -{ - return mData[POSITION].equals3(rhs.getPosition()) && - mData[NORMAL].equals3(rhs.getNormal()) && - mTexCoord == rhs.mTexCoord; -} - -bool LLVolumeFace::VertexData::compareNormal(const LLVolumeFace::VertexData& rhs, F32 angle_cutoff) const -{ - bool retval = false; - if (rhs.mData[POSITION].equals3(mData[POSITION]) && rhs.mTexCoord == mTexCoord) - { - if (angle_cutoff > 1.f) - { - retval = (mData[NORMAL].equals3(rhs.mData[NORMAL])); - } - else - { - F32 cur_angle = rhs.mData[NORMAL].dot3(mData[NORMAL]).getF32(); - retval = cur_angle > angle_cutoff; - } - } - - return retval; -} +void LLVolumeFace::VertexData::init() +{ + if (!mData) + { + mData = (LLVector4a*) malloc(sizeof(LLVector4a)*2); + } +} + +LLVolumeFace::VertexData::VertexData() +{ + mData = NULL; + init(); +} + +LLVolumeFace::VertexData::VertexData(const VertexData& rhs) +{ + mData = NULL; + *this = rhs; +} + +const LLVolumeFace::VertexData& LLVolumeFace::VertexData::operator=(const LLVolumeFace::VertexData& rhs) +{ + if (this != &rhs) + { + init(); + LLVector4a::memcpyNonAliased16((F32*) mData, (F32*) rhs.mData, 2*sizeof(LLVector4a)); + mTexCoord = rhs.mTexCoord; + } + return *this; +} + +LLVolumeFace::VertexData::~VertexData() +{ + free(mData); + mData = NULL; +} + +LLVector4a& LLVolumeFace::VertexData::getPosition() +{ + return mData[POSITION]; +} + +LLVector4a& LLVolumeFace::VertexData::getNormal() +{ + return mData[NORMAL]; +} + +const LLVector4a& LLVolumeFace::VertexData::getPosition() const +{ + return mData[POSITION]; +} + +const LLVector4a& LLVolumeFace::VertexData::getNormal() const +{ + return mData[NORMAL]; +} + + +void LLVolumeFace::VertexData::setPosition(const LLVector4a& pos) +{ + mData[POSITION] = pos; +} + +void LLVolumeFace::VertexData::setNormal(const LLVector4a& norm) +{ + mData[NORMAL] = norm; +} + +bool LLVolumeFace::VertexData::operator<(const LLVolumeFace::VertexData& rhs)const +{ + const F32* lp = this->getPosition().getF32ptr(); + const F32* rp = rhs.getPosition().getF32ptr(); + + if (lp[0] != rp[0]) + { + return lp[0] < rp[0]; + } + + if (rp[1] != lp[1]) + { + return lp[1] < rp[1]; + } + + if (rp[2] != lp[2]) + { + return lp[2] < rp[2]; + } + + lp = getNormal().getF32ptr(); + rp = rhs.getNormal().getF32ptr(); + + if (lp[0] != rp[0]) + { + return lp[0] < rp[0]; + } + + if (rp[1] != lp[1]) + { + return lp[1] < rp[1]; + } + + if (rp[2] != lp[2]) + { + return lp[2] < rp[2]; + } + + if (mTexCoord.mV[0] != rhs.mTexCoord.mV[0]) + { + return mTexCoord.mV[0] < rhs.mTexCoord.mV[0]; + } + + return mTexCoord.mV[1] < rhs.mTexCoord.mV[1]; +} + +bool LLVolumeFace::VertexData::operator==(const LLVolumeFace::VertexData& rhs)const +{ + return mData[POSITION].equals3(rhs.getPosition()) && + mData[NORMAL].equals3(rhs.getNormal()) && + mTexCoord == rhs.mTexCoord; +} + +bool LLVolumeFace::VertexData::compareNormal(const LLVolumeFace::VertexData& rhs, F32 angle_cutoff) const +{ + bool retval = false; + if (rhs.mData[POSITION].equals3(mData[POSITION]) && rhs.mTexCoord == mTexCoord) + { + if (angle_cutoff > 1.f) + { + retval = (mData[NORMAL].equals3(rhs.mData[NORMAL])); + } + else + { + F32 cur_angle = rhs.mData[NORMAL].dot3(mData[NORMAL]).getF32(); + retval = cur_angle > angle_cutoff; + } + } + + return retval; +} BOOL LLVolume::createVolumeFacesFromFile(const std::string& file_name) { @@ -2429,6 +2429,9 @@ bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) } mSculptLevel = 0; // success! + + cacheOptimize(); + return true; } @@ -2590,6 +2593,15 @@ void LLVolume::copyVolumeFaces(const LLVolume* volume) mIsTetrahedron = FALSE; } +void LLVolume::cacheOptimize() +{ + for (S32 i = 0; i < mVolumeFaces.size(); ++i) + { + mVolumeFaces[i].cacheOptimize(); + } +} + + S32 LLVolume::getNumFaces() const { U8 sculpt_type = (mParams.getSculptType() & LL_SCULPT_TYPE_MASK); @@ -5481,6 +5493,443 @@ void LLVolumeFace::optimize(F32 angle_cutoff) swapData(new_face); } +class LLVCacheTriangleData; + +class LLVCacheVertexData +{ +public: + S32 mIdx; + S32 mCacheTag; + F32 mScore; + U32 mActiveTriangles; + std::vector mTriangles; + + LLVCacheVertexData() + { + mCacheTag = -1; + mScore = 0.f; + mActiveTriangles = 0; + mIdx = -1; + } +}; + +class LLVCacheTriangleData +{ +public: + bool mActive; + F32 mScore; + LLVCacheVertexData* mVertex[3]; + + LLVCacheTriangleData() + { + mActive = true; + mScore = 0.f; + mVertex[0] = mVertex[1] = mVertex[2] = NULL; + } + + void complete() + { + mActive = false; + for (S32 i = 0; i < 3; ++i) + { + if (mVertex[i]) + { + llassert_always(mVertex[i]->mActiveTriangles > 0); + mVertex[i]->mActiveTriangles--; + } + } + } + + bool operator<(const LLVCacheTriangleData& rhs) const + { //highest score first + return rhs.mScore < mScore; + } +}; + +const F32 FindVertexScore_CacheDecayPower = 1.5f; +const F32 FindVertexScore_LastTriScore = 0.75f; +const F32 FindVertexScore_ValenceBoostScale = 2.0f; +const F32 FindVertexScore_ValenceBoostPower = 0.5f; +const U32 MaxSizeVertexCache = 32; + +F32 find_vertex_score(LLVCacheVertexData& data) +{ + if (data.mActiveTriangles == 0) + { //no triangle references this vertex + return -1.f; + } + + F32 score = 0.f; + + S32 cache_idx = data.mCacheTag; + + if (cache_idx < 0) + { + //not in cache + } + else + { + if (cache_idx < 3) + { //vertex was in the last triangle + score = FindVertexScore_LastTriScore; + } + else + { //more points for being higher in the cache + F32 scaler = 1.f/(MaxSizeVertexCache-3); + score = 1.f-((cache_idx-3)*scaler); + score = powf(score, FindVertexScore_CacheDecayPower); + } + } + + //bonus points for having low valence + F32 valence_boost = powf(data.mActiveTriangles, -FindVertexScore_ValenceBoostPower); + score += FindVertexScore_ValenceBoostScale * valence_boost; + + return score; +} + +class LLVCacheFIFO +{ +public: + LLVCacheVertexData* mCache[MaxSizeVertexCache]; + U32 mMisses; + + LLVCacheFIFO() + { + mMisses = 0; + for (U32 i = 0; i < MaxSizeVertexCache; ++i) + { + mCache[i] = NULL; + } + } + + void addVertex(LLVCacheVertexData* data) + { + if (data->mCacheTag == -1) + { + mMisses++; + + S32 end = MaxSizeVertexCache-1; + + if (mCache[end]) + { + mCache[end]->mCacheTag = -1; + } + + for (S32 i = end; i > 0; --i) + { + mCache[i] = mCache[i-1]; + if (mCache[i]) + { + mCache[i]->mCacheTag = i; + } + } + + mCache[0] = data; + data->mCacheTag = 0; + } + } +}; + +class LLVCacheLRU +{ +public: + LLVCacheVertexData* mCache[MaxSizeVertexCache+3]; + + LLVCacheTriangleData* mBestTriangle; + + U32 mMisses; + + LLVCacheLRU() + { + for (U32 i = 0; i < MaxSizeVertexCache+3; ++i) + { + mCache[i] = NULL; + } + + mBestTriangle = NULL; + mMisses = 0; + } + + void addVertex(LLVCacheVertexData* data) + { + S32 end = MaxSizeVertexCache+2; + if (data->mCacheTag != -1) + { //just moving a vertex to the front of the cache + end = data->mCacheTag; + } + else + { + mMisses++; + if (mCache[end]) + { //adding a new vertex, vertex at end of cache falls off + mCache[end]->mCacheTag = -1; + } + } + + for (S32 i = end; i > 0; --i) + { //adjust cache pointers and tags + mCache[i] = mCache[i-1]; + + if (mCache[i]) + { + mCache[i]->mCacheTag = i; + } + } + + mCache[0] = data; + mCache[0]->mCacheTag = 0; + } + + void addTriangle(LLVCacheTriangleData* data) + { + addVertex(data->mVertex[0]); + addVertex(data->mVertex[1]); + addVertex(data->mVertex[2]); + } + + void updateScores() + { + for (U32 i = MaxSizeVertexCache; i < MaxSizeVertexCache+3; ++i) + { //trailing 3 vertices aren't actually in the cache for scoring purposes + if (mCache[i]) + { + mCache[i]->mCacheTag = -1; + } + } + + for (U32 i = 0; i < MaxSizeVertexCache; ++i) + { //update scores of vertices in cache + if (mCache[i]) + { + mCache[i]->mScore = find_vertex_score(*(mCache[i])); + llassert_always(mCache[i]->mCacheTag == i); + } + } + + mBestTriangle = NULL; + //update triangle scores + for (U32 i = 0; i < MaxSizeVertexCache+3; ++i) + { + if (mCache[i]) + { + for (U32 j = 0; j < mCache[i]->mTriangles.size(); ++j) + { + LLVCacheTriangleData* tri = mCache[i]->mTriangles[j]; + if (tri->mActive) + { + tri->mScore = tri->mVertex[0]->mScore; + tri->mScore += tri->mVertex[1]->mScore; + tri->mScore += tri->mVertex[2]->mScore; + + if (!mBestTriangle || mBestTriangle->mScore < tri->mScore) + { + mBestTriangle = tri; + } + } + } + } + } + + //knock trailing 3 vertices off the cache + for (U32 i = MaxSizeVertexCache; i < MaxSizeVertexCache+3; ++i) + { + if (mCache[i]) + { + llassert_always(mCache[i]->mCacheTag == -1); + mCache[i] = NULL; + } + } + } +}; + + +void LLVolumeFace::cacheOptimize() +{ //optimize for vertex cache according to Forsyth method: + // http://home.comcast.net/~tom_forsyth/papers/fast_vert_cache_opt.html + + LLVCacheLRU cache; + + //mapping of vertices to triangles and indices + std::vector vertex_data; + + //mapping of triangles do vertices + std::vector triangle_data; + + triangle_data.resize(mNumIndices/3); + vertex_data.resize(mNumVertices); + + for (U32 i = 0; i < mNumIndices; i++) + { //populate vertex data and triangle data arrays + U16 idx = mIndices[i]; + U16 tri_idx = i/3; + + vertex_data[idx].mTriangles.push_back(&(triangle_data[tri_idx])); + vertex_data[idx].mIdx = idx; + triangle_data[tri_idx].mVertex[i%3] = &(vertex_data[idx]); + } + + /*F32 pre_acmr = 1.f; + //measure cache misses from before rebuild + { + LLVCacheFIFO test_cache; + for (U32 i = 0; i < mNumIndices; ++i) + { + test_cache.addVertex(&vertex_data[mIndices[i]]); + } + + for (U32 i = 0; i < mNumVertices; i++) + { + vertex_data[i].mCacheTag = -1; + } + + pre_acmr = (F32) test_cache.mMisses/(mNumIndices/3); + }*/ + + for (U32 i = 0; i < mNumVertices; i++) + { //initialize score values (no cache -- might try a fifo cache here) + vertex_data[i].mScore = find_vertex_score(vertex_data[i]); + vertex_data[i].mActiveTriangles = vertex_data[i].mTriangles.size(); + + for (U32 j = 0; j < vertex_data[i].mTriangles.size(); ++j) + { + vertex_data[i].mTriangles[j]->mScore += vertex_data[i].mScore; + } + } + + //sort triangle data by score + std::sort(triangle_data.begin(), triangle_data.end()); + + std::vector new_indices; + + LLVCacheTriangleData* tri; + + //prime pump by adding first triangle to cache; + tri = &(triangle_data[0]); + cache.addTriangle(tri); + new_indices.push_back(tri->mVertex[0]->mIdx); + new_indices.push_back(tri->mVertex[1]->mIdx); + new_indices.push_back(tri->mVertex[2]->mIdx); + tri->complete(); + + U32 breaks = 0; + for (U32 i = 1; i < mNumIndices/3; ++i) + { + cache.updateScores(); + tri = cache.mBestTriangle; + if (!tri) + { + breaks++; + for (U32 j = 0; j < triangle_data.size(); ++j) + { + if (triangle_data[j].mActive) + { + tri = &(triangle_data[j]); + break; + } + } + } + + cache.addTriangle(tri); + new_indices.push_back(tri->mVertex[0]->mIdx); + new_indices.push_back(tri->mVertex[1]->mIdx); + new_indices.push_back(tri->mVertex[2]->mIdx); + tri->complete(); + } + + for (U32 i = 0; i < mNumIndices; ++i) + { + mIndices[i] = new_indices[i]; + } + + /*F32 post_acmr = 1.f; + //measure cache misses from after rebuild + { + LLVCacheFIFO test_cache; + for (U32 i = 0; i < mNumVertices; i++) + { + vertex_data[i].mCacheTag = -1; + } + + for (U32 i = 0; i < mNumIndices; ++i) + { + test_cache.addVertex(&vertex_data[mIndices[i]]); + } + + post_acmr = (F32) test_cache.mMisses/(mNumIndices/3); + }*/ + + //optimize for pre-TnL cache + + //allocate space for new buffer + S32 num_verts = mNumVertices; + LLVector4a* pos = (LLVector4a*) malloc(sizeof(LLVector4a)*num_verts); + LLVector4a* norm = (LLVector4a*) malloc(sizeof(LLVector4a)*num_verts); + S32 size = ((num_verts*sizeof(LLVector2)) + 0xF) & ~0xF; + LLVector2* tc = (LLVector2*) malloc(size); + + LLVector4a* wght = NULL; + if (mWeights) + { + wght = (LLVector4a*) malloc(sizeof(LLVector4a)*num_verts); + } + + LLVector4a* binorm = NULL; + if (mBinormals) + { + binorm = (LLVector4a*) malloc(sizeof(LLVector4a)*num_verts); + } + + //allocate mapping of old indices to new indices + std::vector new_idx; + new_idx.resize(mNumVertices, -1); + + S32 cur_idx = 0; + for (U32 i = 0; i < mNumIndices; ++i) + { + U16 idx = mIndices[i]; + if (new_idx[idx] == -1) + { //this vertex hasn't been added yet + new_idx[idx] = cur_idx; + + //copy vertex data + pos[cur_idx] = mPositions[idx]; + norm[cur_idx] = mNormals[idx]; + tc[cur_idx] = mTexCoords[idx]; + if (mWeights) + { + wght[cur_idx] = mWeights[idx]; + } + if (mBinormals) + { + binorm[cur_idx] = mBinormals[idx]; + } + + cur_idx++; + } + } + + for (U32 i = 0; i < mNumIndices; ++i) + { + mIndices[i] = new_idx[mIndices[i]]; + } + + free(mPositions); + free(mNormals); + free(mTexCoords); + free(mWeights); + free(mBinormals); + + mPositions = pos; + mNormals = norm; + mTexCoords = tc; + mWeights = wght; + mBinormals = binorm; + + //std::string result = llformat("ACMR pre/post: %.3f/%.3f -- %d triangles %d breaks", pre_acmr, post_acmr, mNumIndices/3, breaks); + //llinfos << result << llendl; + +} void LLVolumeFace::createOctree(F32 scaler, const LLVector4a& center, const LLVector4a& size) { diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index e2d6ce4b69..32ac531306 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -792,39 +792,39 @@ public: class LLVolumeFace { public: - class VertexData - { - enum - { - POSITION = 0, - NORMAL = 1 - }; - - private: - void init(); - public: - VertexData(); - VertexData(const VertexData& rhs); - const VertexData& operator=(const VertexData& rhs); - - ~VertexData(); - LLVector4a& getPosition(); - LLVector4a& getNormal(); - const LLVector4a& getPosition() const; - const LLVector4a& getNormal() const; - void setPosition(const LLVector4a& pos); - void setNormal(const LLVector4a& norm); - - - LLVector2 mTexCoord; - - bool operator<(const VertexData& rhs) const; - bool operator==(const VertexData& rhs) const; - bool compareNormal(const VertexData& rhs, F32 angle_cutoff) const; - - private: - LLVector4a* mData; - }; + class VertexData + { + enum + { + POSITION = 0, + NORMAL = 1 + }; + + private: + void init(); + public: + VertexData(); + VertexData(const VertexData& rhs); + const VertexData& operator=(const VertexData& rhs); + + ~VertexData(); + LLVector4a& getPosition(); + LLVector4a& getNormal(); + const LLVector4a& getPosition() const; + const LLVector4a& getNormal() const; + void setPosition(const LLVector4a& pos); + void setNormal(const LLVector4a& norm); + + + LLVector2 mTexCoord; + + bool operator<(const VertexData& rhs) const; + bool operator==(const VertexData& rhs) const; + bool compareNormal(const VertexData& rhs, F32 angle_cutoff) const; + + private: + LLVector4a* mData; + }; LLVolumeFace(); LLVolumeFace(const LLVolumeFace& src); @@ -870,6 +870,8 @@ public: }; void optimize(F32 angle_cutoff = 2.f); + void cacheOptimize(); + void createOctree(F32 scaler = 0.25f, const LLVector4a& center = LLVector4a(0,0,0), const LLVector4a& size = LLVector4a(0.5f,0.5f,0.5f)); enum @@ -1027,12 +1029,13 @@ public: friend std::ostream& operator<<(std::ostream &s, const LLVolume *volumep); // HACK to bypass Windoze confusion over // conversion if *(LLVolume*) to LLVolume& const LLVolumeFace &getVolumeFace(const S32 f) const {return mVolumeFaces[f];} // DO NOT DELETE VOLUME WHILE USING THIS REFERENCE, OR HOLD A POINTER TO THIS VOLUMEFACE - + U32 mFaceMask; // bit array of which faces exist in this volume LLVector3 mLODScaleBias; // vector for biasing LOD based on scale void sculpt(U16 sculpt_width, U16 sculpt_height, S8 sculpt_components, const U8* sculpt_data, S32 sculpt_level); void copyVolumeFaces(const LLVolume* volume); + void cacheOptimize(); private: void sculptGenerateMapVertices(U16 sculpt_width, U16 sculpt_height, S8 sculpt_components, const U8* sculpt_data, U8 sculpt_type); -- cgit v1.2.3 From f3493d41655e357374e6a7122e19d463100bb27c Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Wed, 19 Jan 2011 23:14:41 -0600 Subject: SH-822 Fix for crash in cacheOptimize (U16 should have been a U32) --- indra/llmath/llvolume.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index f41cd1b4e8..d6dfb5c7a9 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -5762,7 +5762,7 @@ void LLVolumeFace::cacheOptimize() for (U32 i = 0; i < mNumIndices; i++) { //populate vertex data and triangle data arrays U16 idx = mIndices[i]; - U16 tri_idx = i/3; + U32 tri_idx = i/3; vertex_data[idx].mTriangles.push_back(&(triangle_data[tri_idx])); vertex_data[idx].mIdx = idx; -- cgit v1.2.3 From 3c053b3fdba8bcd86179578a5d492847c0b17458 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Fri, 21 Jan 2011 16:23:44 -0600 Subject: SH-534 Fix for various bump map glitches. --- indra/llmath/lloctree.h | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/lloctree.h b/indra/llmath/lloctree.h index 276f7b0f06..fdfc24f8b7 100644 --- a/indra/llmath/lloctree.h +++ b/indra/llmath/lloctree.h @@ -294,10 +294,8 @@ public: //is it here? if (isInside(data->getPositionGroup())) { - if (getElementCount() < LL_OCTREE_MAX_CAPACITY && - (contains(data->getBinRadius()) || - (data->getBinRadius() > getSize()[0] && - parent && parent->getElementCount() >= LL_OCTREE_MAX_CAPACITY))) + if ((getElementCount() < LL_OCTREE_MAX_CAPACITY && contains(data->getBinRadius()) || + (data->getBinRadius() > getSize()[0] && parent && parent->getElementCount() >= LL_OCTREE_MAX_CAPACITY))) { //it belongs here #if LL_OCTREE_PARANOIA_CHECK //if this is a redundant insertion, error out (should never happen) @@ -359,7 +357,7 @@ public: //make sure no existing node matches this position for (U32 i = 0; i < getChildCount(); i++) { - if (mChild[i]->getCenter().equal3(center)) + if (mChild[i]->getCenter().equals3(center)) { OCT_ERRS << "Octree detected duplicate child center and gave up." << llendl; return false; @@ -488,18 +486,18 @@ public: { #if LL_OCTREE_PARANOIA_CHECK - if (child->getSize().equal3(getSize())) + if (child->getSize().equals3(getSize())) { OCT_ERRS << "Child size is same as parent size!" << llendl; } for (U32 i = 0; i < getChildCount(); i++) { - if(!mChild[i]->getSize().equal3(child->getSize())) + if(!mChild[i]->getSize().equals3(child->getSize())) { OCT_ERRS <<"Invalid octree child size." << llendl; } - if (mChild[i]->getCenter().equal3(child->getCenter())) + if (mChild[i]->getCenter().equals3(child->getCenter())) { OCT_ERRS <<"Duplicate octree child position." << llendl; } -- cgit v1.2.3 From fefc37e92fc5313e88fc7416e78fc2436abfffdb Mon Sep 17 00:00:00 2001 From: leyla_linden Date: Fri, 28 Jan 2011 16:39:02 -0800 Subject: SH-808 making sure the 10m vs. 64m scale limit switch happens for both the spinner and the manipulation tools etc. --- indra/llmath/xform.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'indra/llmath') diff --git a/indra/llmath/xform.h b/indra/llmath/xform.h index 7a9f0f62cf..1b50749b3e 100644 --- a/indra/llmath/xform.h +++ b/indra/llmath/xform.h @@ -33,10 +33,10 @@ const F32 MAX_OBJECT_Z = 4096.f; // should match REGION_HEIGHT_METERS, Pre-havok4: 768.f const F32 MIN_OBJECT_Z = -256.f; const F32 DEFAULT_MAX_PRIM_SCALE = 64.f; +const F32 DEFAULT_MAX_PRIM_SCALE_NO_MESH = 10.f; const F32 MIN_PRIM_SCALE = 0.01f; const F32 MAX_PRIM_SCALE = 65536.f; // something very high but not near FLT_MAX - class LLXform { protected: -- cgit v1.2.3 From c7d0fab7b9279c5f6a57ee3de103b8fb142fb747 Mon Sep 17 00:00:00 2001 From: Loren Shih Date: Tue, 1 Feb 2011 12:33:39 -0500 Subject: Fixes for merge up from viewer-development to mesh-development. Backed out SH-659 since merge was messy; this will be merged in manually later. --- indra/llmath/llvolume.cpp | 6557 ++++++++++++++++++++++++++--------------- indra/llmath/llvolume.h | 2212 +++++++------- indra/llmath/llvolumeoctree.h | 70 +- 3 files changed, 5280 insertions(+), 3559 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 316eed679d..617a8b4ca3 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -1,4 +1,5 @@ /** + * @file llvolume.cpp * * $LicenseInfo:firstyear=2002&license=viewerlgpl$ @@ -24,9 +25,13 @@ */ #include "linden_common.h" +#include "llmemory.h" #include "llmath.h" #include +#if !LL_WINDOWS +#include +#endif #include "llerror.h" #include "llmemtype.h" @@ -37,9 +42,15 @@ #include "v4math.h" #include "m4math.h" #include "m3math.h" +#include "llmatrix3a.h" +#include "lloctree.h" #include "lldarray.h" #include "llvolume.h" +#include "llvolumeoctree.h" #include "llstl.h" +#include "llsdserialize.h" +#include "llvector4a.h" +#include "llmatrix4a.h" #define DEBUG_SILHOUETTE_BINORMALS 0 #define DEBUG_SILHOUETTE_NORMALS 0 // TomY: Use this to display normals using the silhouette @@ -80,7 +91,18 @@ const F32 SKEW_MAX = 0.95f; const F32 SCULPT_MIN_AREA = 0.002f; const S32 SCULPT_MIN_AREA_DETAIL = 1; -#define GEN_TRI_STRIP 0 +extern BOOL gDebugGL; + +void assert_aligned(void* ptr, uintptr_t alignment) +{ +#if 0 + uintptr_t t = (uintptr_t) ptr; + if (t%alignment != 0) + { + llerrs << "WTF?" << llendl; + } +#endif +} BOOL check_same_clock_dir( const LLVector3& pt1, const LLVector3& pt2, const LLVector3& pt3, const LLVector3& norm) { @@ -99,128 +121,262 @@ BOOL check_same_clock_dir( const LLVector3& pt1, const LLVector3& pt2, const LLV BOOL LLLineSegmentBoxIntersect(const LLVector3& start, const LLVector3& end, const LLVector3& center, const LLVector3& size) { - float fAWdU[3]; - LLVector3 dir; - LLVector3 diff; + return LLLineSegmentBoxIntersect(start.mV, end.mV, center.mV, size.mV); +} + +BOOL LLLineSegmentBoxIntersect(const F32* start, const F32* end, const F32* center, const F32* size) +{ + F32 fAWdU[3]; + F32 dir[3]; + F32 diff[3]; for (U32 i = 0; i < 3; i++) { - dir.mV[i] = 0.5f * (end.mV[i] - start.mV[i]); - diff.mV[i] = (0.5f * (end.mV[i] + start.mV[i])) - center.mV[i]; - fAWdU[i] = fabsf(dir.mV[i]); - if(fabsf(diff.mV[i])>size.mV[i] + fAWdU[i]) return false; + dir[i] = 0.5f * (end[i] - start[i]); + diff[i] = (0.5f * (end[i] + start[i])) - center[i]; + fAWdU[i] = fabsf(dir[i]); + if(fabsf(diff[i])>size[i] + fAWdU[i]) return false; } float f; - f = dir.mV[1] * diff.mV[2] - dir.mV[2] * diff.mV[1]; if(fabsf(f)>size.mV[1]*fAWdU[2] + size.mV[2]*fAWdU[1]) return false; - f = dir.mV[2] * diff.mV[0] - dir.mV[0] * diff.mV[2]; if(fabsf(f)>size.mV[0]*fAWdU[2] + size.mV[2]*fAWdU[0]) return false; - f = dir.mV[0] * diff.mV[1] - dir.mV[1] * diff.mV[0]; if(fabsf(f)>size.mV[0]*fAWdU[1] + size.mV[1]*fAWdU[0]) return false; + f = dir[1] * diff[2] - dir[2] * diff[1]; if(fabsf(f)>size[1]*fAWdU[2] + size[2]*fAWdU[1]) return false; + f = dir[2] * diff[0] - dir[0] * diff[2]; if(fabsf(f)>size[0]*fAWdU[2] + size[2]*fAWdU[0]) return false; + f = dir[0] * diff[1] - dir[1] * diff[0]; if(fabsf(f)>size[0]*fAWdU[1] + size[1]*fAWdU[0]) return false; return true; } + // intersect test between triangle vert0, vert1, vert2 and a ray from orig in direction dir. // returns TRUE if intersecting and returns barycentric coordinates in intersection_a, intersection_b, // and returns the intersection point along dir in intersection_t. // Moller-Trumbore algorithm -BOOL LLTriangleRayIntersect(const LLVector3& vert0, const LLVector3& vert1, const LLVector3& vert2, const LLVector3& orig, const LLVector3& dir, - F32* intersection_a, F32* intersection_b, F32* intersection_t, BOOL two_sided) +BOOL LLTriangleRayIntersect(const LLVector4a& vert0, const LLVector4a& vert1, const LLVector4a& vert2, const LLVector4a& orig, const LLVector4a& dir, + F32& intersection_a, F32& intersection_b, F32& intersection_t) { - F32 u, v, t; /* find vectors for two edges sharing vert0 */ - LLVector3 edge1 = vert1 - vert0; + LLVector4a edge1; + edge1.setSub(vert1, vert0); - LLVector3 edge2 = vert2 - vert0;; + LLVector4a edge2; + edge2.setSub(vert2, vert0); /* begin calculating determinant - also used to calculate U parameter */ - LLVector3 pvec = dir % edge2; - - /* if determinant is near zero, ray lies in plane of triangle */ - F32 det = edge1 * pvec; + LLVector4a pvec; + pvec.setCross3(dir, edge2); - if (!two_sided) + /* if determinant is near zero, ray lies in plane of triangle */ + LLVector4a det; + det.setAllDot3(edge1, pvec); + + if (det.greaterEqual(LLVector4a::getEpsilon()).getGatheredBits() & 0x7) { - if (det < F_APPROXIMATELY_ZERO) - { - return FALSE; - } - /* calculate distance from vert0 to ray origin */ - LLVector3 tvec = orig - vert0; + LLVector4a tvec; + tvec.setSub(orig, vert0); /* calculate U parameter and test bounds */ - u = tvec * pvec; + LLVector4a u; + u.setAllDot3(tvec,pvec); - if (u < 0.f || u > det) + if ((u.greaterEqual(LLVector4a::getZero()).getGatheredBits() & 0x7) && + (u.lessEqual(det).getGatheredBits() & 0x7)) { - return FALSE; + /* prepare to test V parameter */ + LLVector4a qvec; + qvec.setCross3(tvec, edge1); + + /* calculate V parameter and test bounds */ + LLVector4a v; + v.setAllDot3(dir, qvec); + + + //if (!(v < 0.f || u + v > det)) + + LLVector4a sum_uv; + sum_uv.setAdd(u, v); + + S32 v_gequal = v.greaterEqual(LLVector4a::getZero()).getGatheredBits() & 0x7; + S32 sum_lequal = sum_uv.lessEqual(det).getGatheredBits() & 0x7; + + if (v_gequal && sum_lequal) + { + /* calculate t, scale parameters, ray intersects triangle */ + LLVector4a t; + t.setAllDot3(edge2,qvec); + + t.div(det); + u.div(det); + v.div(det); + + intersection_a = u[0]; + intersection_b = v[0]; + intersection_t = t[0]; + return TRUE; + } } - - /* prepare to test V parameter */ - LLVector3 qvec = tvec % edge1; + } - /* calculate V parameter and test bounds */ - v = dir * qvec; - if (v < 0.f || u + v > det) - { - return FALSE; - } + return FALSE; +} - /* calculate t, scale parameters, ray intersects triangle */ - t = edge2 * qvec; - F32 inv_det = 1.0 / det; - t *= inv_det; - u *= inv_det; - v *= inv_det; - } +BOOL LLTriangleRayIntersectTwoSided(const LLVector4a& vert0, const LLVector4a& vert1, const LLVector4a& vert2, const LLVector4a& orig, const LLVector4a& dir, + F32& intersection_a, F32& intersection_b, F32& intersection_t) +{ + F32 u, v, t; - else // two sided - { - if (det > -F_APPROXIMATELY_ZERO && det < F_APPROXIMATELY_ZERO) - { - return FALSE; - } - F32 inv_det = 1.0 / det; + /* find vectors for two edges sharing vert0 */ + LLVector4a edge1; + edge1.setSub(vert1, vert0); + + + LLVector4a edge2; + edge2.setSub(vert2, vert0); - /* calculate distance from vert0 to ray origin */ - LLVector3 tvec = orig - vert0; - - /* calculate U parameter and test bounds */ - u = (tvec * pvec) * inv_det; - if (u < 0.f || u > 1.f) - { - return FALSE; - } + /* begin calculating determinant - also used to calculate U parameter */ + LLVector4a pvec; + pvec.setCross3(dir, edge2); - /* prepare to test V parameter */ - LLVector3 qvec = tvec - edge1; - - /* calculate V parameter and test bounds */ - v = (dir * qvec) * inv_det; - - if (v < 0.f || u + v > 1.f) - { - return FALSE; - } + /* if determinant is near zero, ray lies in plane of triangle */ + F32 det = edge1.dot3(pvec).getF32(); - /* calculate t, ray intersects triangle */ - t = (edge2 * qvec) * inv_det; + + if (det > -F_APPROXIMATELY_ZERO && det < F_APPROXIMATELY_ZERO) + { + return FALSE; + } + + F32 inv_det = 1.f / det; + + /* calculate distance from vert0 to ray origin */ + LLVector4a tvec; + tvec.setSub(orig, vert0); + + /* calculate U parameter and test bounds */ + u = (tvec.dot3(pvec).getF32()) * inv_det; + if (u < 0.f || u > 1.f) + { + return FALSE; + } + + /* prepare to test V parameter */ + tvec.sub(edge1); + + /* calculate V parameter and test bounds */ + v = (dir.dot3(tvec).getF32()) * inv_det; + + if (v < 0.f || u + v > 1.f) + { + return FALSE; } + + /* calculate t, ray intersects triangle */ + t = (edge2.dot3(tvec).getF32()) * inv_det; - if (intersection_a != NULL) - *intersection_a = u; - if (intersection_b != NULL) - *intersection_b = v; - if (intersection_t != NULL) - *intersection_t = t; + intersection_a = u; + intersection_b = v; + intersection_t = t; return TRUE; } +//helper for non-aligned vectors +BOOL LLTriangleRayIntersect(const LLVector3& vert0, const LLVector3& vert1, const LLVector3& vert2, const LLVector3& orig, const LLVector3& dir, + F32& intersection_a, F32& intersection_b, F32& intersection_t, BOOL two_sided) +{ + LLVector4a vert0a, vert1a, vert2a, origa, dira; + vert0a.load3(vert0.mV); + vert1a.load3(vert1.mV); + vert2a.load3(vert2.mV); + origa.load3(orig.mV); + dira.load3(dir.mV); + + if (two_sided) + { + return LLTriangleRayIntersectTwoSided(vert0a, vert1a, vert2a, origa, dira, + intersection_a, intersection_b, intersection_t); + } + else + { + return LLTriangleRayIntersect(vert0a, vert1a, vert2a, origa, dira, + intersection_a, intersection_b, intersection_t); + } +} + +class LLVolumeOctreeRebound : public LLOctreeTravelerDepthFirst +{ +public: + const LLVolumeFace* mFace; + + LLVolumeOctreeRebound(const LLVolumeFace* face) + { + mFace = face; + } + + virtual void visit(const LLOctreeNode* branch) + { //this is a depth first traversal, so it's safe to assum all children have complete + //bounding data + + LLVolumeOctreeListener* node = (LLVolumeOctreeListener*) branch->getListener(0); + + LLVector4a& min = node->mExtents[0]; + LLVector4a& max = node->mExtents[1]; + + if (!branch->getData().empty()) + { //node has data, find AABB that binds data set + const LLVolumeTriangle* tri = *(branch->getData().begin()); + + //initialize min/max to first available vertex + min = *(tri->mV[0]); + max = *(tri->mV[0]); + + for (LLOctreeNode::const_element_iter iter = + branch->getData().begin(); iter != branch->getData().end(); ++iter) + { //for each triangle in node + + //stretch by triangles in node + tri = *iter; + + min.setMin(min, *tri->mV[0]); + min.setMin(min, *tri->mV[1]); + min.setMin(min, *tri->mV[2]); + + max.setMax(max, *tri->mV[0]); + max.setMax(max, *tri->mV[1]); + max.setMax(max, *tri->mV[2]); + } + } + else if (!branch->getChildren().empty()) + { //no data, but child nodes exist + LLVolumeOctreeListener* child = (LLVolumeOctreeListener*) branch->getChild(0)->getListener(0); + + //initialize min/max to extents of first child + min = child->mExtents[0]; + max = child->mExtents[1]; + } + else + { + llerrs << "WTF? Empty leaf" << llendl; + } + + for (S32 i = 0; i < branch->getChildCount(); ++i) + { //stretch by child extents + LLVolumeOctreeListener* child = (LLVolumeOctreeListener*) branch->getChild(i)->getListener(0); + min.setMin(min, child->mExtents[0]); + max.setMax(max, child->mExtents[1]); + } + + node->mBounds[0].setAdd(min, max); + node->mBounds[0].mul(0.5f); + + node->mBounds[1].setSub(max,min); + node->mBounds[1].mul(0.5f); + } +}; //------------------------------------------------------------------- // statics @@ -1669,7 +1825,13 @@ LLVolume::LLVolume(const LLVolumeParams ¶ms, const F32 detail, const BOOL ge mFaceMask = 0x0; mDetail = detail; mSculptLevel = -2; - + mIsTetrahedron = FALSE; + mLODScaleBias.setVec(1,1,1); + mHullPoints = NULL; + mHullIndices = NULL; + mNumHullPoints = 0; + mNumHullIndices = 0; + // set defaults if (mParams.getPathParams().getCurveType() == LL_PCODE_PATH_FLEXIBLE) { @@ -1684,7 +1846,8 @@ LLVolume::LLVolume(const LLVolumeParams ¶ms, const F32 detail, const BOOL ge mGenerateSingleFace = generate_single_face; generate(); - if (mParams.getSculptID().isNull() && params.getSculptType() == LL_SCULPT_TYPE_NONE) + + if (mParams.getSculptID().isNull() && mParams.getSculptType() == LL_SCULPT_TYPE_NONE) { createVolumeFaces(); } @@ -1719,6 +1882,11 @@ LLVolume::~LLVolume() mPathp = NULL; mProfilep = NULL; mVolumeFaces.clear(); + + free(mHullPoints); + mHullPoints = NULL; + free(mHullIndices); + mHullIndices = NULL; } BOOL LLVolume::generate() @@ -1835,815 +2003,1435 @@ BOOL LLVolume::generate() return FALSE; } - -void LLVolume::createVolumeFaces() +void LLVolumeFace::VertexData::init() { - LLMemType m1(LLMemType::MTYPE_VOLUME); - - if (mGenerateSingleFace) + if (!mData) { - // do nothing + mData = (LLVector4a*) malloc(sizeof(LLVector4a)*2); } - else - { - S32 num_faces = getNumFaces(); - BOOL partial_build = TRUE; - if (num_faces != mVolumeFaces.size()) - { - partial_build = FALSE; - mVolumeFaces.resize(num_faces); - } - // Initialize volume faces with parameter data - for (S32 i = 0; i < (S32)mVolumeFaces.size(); i++) - { - LLVolumeFace& vf = mVolumeFaces[i]; - LLProfile::Face& face = mProfilep->mFaces[i]; - vf.mBeginS = face.mIndex; - vf.mNumS = face.mCount; - if (vf.mNumS < 0) - { - llerrs << "Volume face corruption detected." << llendl; - } - - vf.mBeginT = 0; - vf.mNumT= getPath().mPath.size(); - vf.mID = i; +} - // Set the type mask bits correctly - if (mParams.getProfileParams().getHollow() > 0) - { - vf.mTypeMask |= LLVolumeFace::HOLLOW_MASK; - } - if (mProfilep->isOpen()) - { - vf.mTypeMask |= LLVolumeFace::OPEN_MASK; - } - if (face.mCap) - { - vf.mTypeMask |= LLVolumeFace::CAP_MASK; - if (face.mFaceID == LL_FACE_PATH_BEGIN) - { - vf.mTypeMask |= LLVolumeFace::TOP_MASK; - } - else - { - llassert(face.mFaceID == LL_FACE_PATH_END); - vf.mTypeMask |= LLVolumeFace::BOTTOM_MASK; - } - } - else if (face.mFaceID & (LL_FACE_PROFILE_BEGIN | LL_FACE_PROFILE_END)) - { - vf.mTypeMask |= LLVolumeFace::FLAT_MASK | LLVolumeFace::END_MASK; - } - else - { - vf.mTypeMask |= LLVolumeFace::SIDE_MASK; - if (face.mFlat) - { - vf.mTypeMask |= LLVolumeFace::FLAT_MASK; - } - if (face.mFaceID & LL_FACE_INNER_SIDE) - { - vf.mTypeMask |= LLVolumeFace::INNER_MASK; - if (face.mFlat && vf.mNumS > 2) - { //flat inner faces have to copy vert normals - vf.mNumS = vf.mNumS*2; - if (vf.mNumS < 0) - { - llerrs << "Volume face corruption detected." << llendl; - } - } - } - else - { - vf.mTypeMask |= LLVolumeFace::OUTER_MASK; - } - } - } +LLVolumeFace::VertexData::VertexData() +{ + mData = NULL; + init(); +} + +LLVolumeFace::VertexData::VertexData(const VertexData& rhs) +{ + mData = NULL; + *this = rhs; +} - for (face_list_t::iterator iter = mVolumeFaces.begin(); - iter != mVolumeFaces.end(); ++iter) - { - (*iter).create(this, partial_build); - } +const LLVolumeFace::VertexData& LLVolumeFace::VertexData::operator=(const LLVolumeFace::VertexData& rhs) +{ + if (this != &rhs) + { + init(); + LLVector4a::memcpyNonAliased16((F32*) mData, (F32*) rhs.mData, 2*sizeof(LLVector4a)); + mTexCoord = rhs.mTexCoord; } + return *this; } - -inline LLVector3 sculpt_rgb_to_vector(U8 r, U8 g, U8 b) +LLVolumeFace::VertexData::~VertexData() { - // maps RGB values to vector values [0..255] -> [-0.5..0.5] - LLVector3 value; - value.mV[VX] = r / 255.f - 0.5f; - value.mV[VY] = g / 255.f - 0.5f; - value.mV[VZ] = b / 255.f - 0.5f; - - return value; + free(mData); + mData = NULL; } -inline U32 sculpt_xy_to_index(U32 x, U32 y, U16 sculpt_width, U16 sculpt_height, S8 sculpt_components) +LLVector4a& LLVolumeFace::VertexData::getPosition() { - U32 index = (x + y * sculpt_width) * sculpt_components; - return index; + return mData[POSITION]; } +LLVector4a& LLVolumeFace::VertexData::getNormal() +{ + return mData[NORMAL]; +} -inline U32 sculpt_st_to_index(S32 s, S32 t, S32 size_s, S32 size_t, U16 sculpt_width, U16 sculpt_height, S8 sculpt_components) +const LLVector4a& LLVolumeFace::VertexData::getPosition() const { - U32 x = (U32) ((F32)s/(size_s) * (F32) sculpt_width); - U32 y = (U32) ((F32)t/(size_t) * (F32) sculpt_height); + return mData[POSITION]; +} - return sculpt_xy_to_index(x, y, sculpt_width, sculpt_height, sculpt_components); +const LLVector4a& LLVolumeFace::VertexData::getNormal() const +{ + return mData[NORMAL]; } -inline LLVector3 sculpt_index_to_vector(U32 index, const U8* sculpt_data) +void LLVolumeFace::VertexData::setPosition(const LLVector4a& pos) { - LLVector3 v = sculpt_rgb_to_vector(sculpt_data[index], sculpt_data[index+1], sculpt_data[index+2]); - - return v; + mData[POSITION] = pos; } -inline LLVector3 sculpt_st_to_vector(S32 s, S32 t, S32 size_s, S32 size_t, U16 sculpt_width, U16 sculpt_height, S8 sculpt_components, const U8* sculpt_data) +void LLVolumeFace::VertexData::setNormal(const LLVector4a& norm) { - U32 index = sculpt_st_to_index(s, t, size_s, size_t, sculpt_width, sculpt_height, sculpt_components); - - return sculpt_index_to_vector(index, sculpt_data); + mData[NORMAL] = norm; } -inline LLVector3 sculpt_xy_to_vector(U32 x, U32 y, U16 sculpt_width, U16 sculpt_height, S8 sculpt_components, const U8* sculpt_data) +bool LLVolumeFace::VertexData::operator<(const LLVolumeFace::VertexData& rhs)const { - U32 index = sculpt_xy_to_index(x, y, sculpt_width, sculpt_height, sculpt_components); + const F32* lp = this->getPosition().getF32ptr(); + const F32* rp = rhs.getPosition().getF32ptr(); - return sculpt_index_to_vector(index, sculpt_data); -} + if (lp[0] != rp[0]) + { + return lp[0] < rp[0]; + } + if (rp[1] != lp[1]) + { + return lp[1] < rp[1]; + } -F32 LLVolume::sculptGetSurfaceArea() -{ - // test to see if image has enough variation to create non-degenerate geometry + if (rp[2] != lp[2]) + { + return lp[2] < rp[2]; + } - F32 area = 0; + lp = getNormal().getF32ptr(); + rp = rhs.getNormal().getF32ptr(); - S32 sizeS = mPathp->mPath.size(); - S32 sizeT = mProfilep->mProfile.size(); - - for (S32 s = 0; s < sizeS-1; s++) + if (lp[0] != rp[0]) { - for (S32 t = 0; t < sizeT-1; t++) - { - // get four corners of quad - LLVector3 p1 = mMesh[(s )*sizeT + (t )].mPos; - LLVector3 p2 = mMesh[(s+1)*sizeT + (t )].mPos; - LLVector3 p3 = mMesh[(s )*sizeT + (t+1)].mPos; - LLVector3 p4 = mMesh[(s+1)*sizeT + (t+1)].mPos; + return lp[0] < rp[0]; + } - // compute the area of the quad by taking the length of the cross product of the two triangles - LLVector3 cross1 = (p1 - p2) % (p1 - p3); - LLVector3 cross2 = (p4 - p2) % (p4 - p3); - area += (cross1.magVec() + cross2.magVec()) / 2.0; + if (rp[1] != lp[1]) + { + return lp[1] < rp[1]; + } + + if (rp[2] != lp[2]) + { + return lp[2] < rp[2]; + } + + if (mTexCoord.mV[0] != rhs.mTexCoord.mV[0]) + { + return mTexCoord.mV[0] < rhs.mTexCoord.mV[0]; + } + + return mTexCoord.mV[1] < rhs.mTexCoord.mV[1]; +} + +bool LLVolumeFace::VertexData::operator==(const LLVolumeFace::VertexData& rhs)const +{ + return mData[POSITION].equals3(rhs.getPosition()) && + mData[NORMAL].equals3(rhs.getNormal()) && + mTexCoord == rhs.mTexCoord; +} + +bool LLVolumeFace::VertexData::compareNormal(const LLVolumeFace::VertexData& rhs, F32 angle_cutoff) const +{ + bool retval = false; + if (rhs.mData[POSITION].equals3(mData[POSITION]) && rhs.mTexCoord == mTexCoord) + { + if (angle_cutoff > 1.f) + { + retval = (mData[NORMAL].equals3(rhs.mData[NORMAL])); + } + else + { + F32 cur_angle = rhs.mData[NORMAL].dot3(mData[NORMAL]).getF32(); + retval = cur_angle > angle_cutoff; } } - return area; + return retval; } -// create placeholder shape -void LLVolume::sculptGeneratePlaceholder() +BOOL LLVolume::createVolumeFacesFromFile(const std::string& file_name) { - LLMemType m1(LLMemType::MTYPE_VOLUME); + std::ifstream is; - S32 sizeS = mPathp->mPath.size(); - S32 sizeT = mProfilep->mProfile.size(); + is.open(file_name.c_str(), std::ifstream::in | std::ifstream::binary); + + BOOL success = createVolumeFacesFromStream(is); - S32 line = 0; + is.close(); - // for now, this is a sphere. - for (S32 s = 0; s < sizeS; s++) + return success; +} + +BOOL LLVolume::createVolumeFacesFromStream(std::istream& is) +{ + mSculptLevel = -1; // default is an error occured + + LLSD header; { - for (S32 t = 0; t < sizeT; t++) + if (!LLSDSerialize::deserialize(header, is, 1024*1024*1024)) { - S32 i = t + line; - Point& pt = mMesh[i]; + llwarns << "Mesh header parse error. Not a valid mesh asset!" << llendl; + return FALSE; + } + } + + std::string nm[] = + { + "lowest_lod", + "low_lod", + "medium_lod", + "high_lod" + }; - - F32 u = (F32)s/(sizeS-1); - F32 v = (F32)t/(sizeT-1); + S32 lod = llclamp((S32) mDetail, 0, 3); - const F32 RADIUS = (F32) 0.3; - - pt.mPos.mV[0] = (F32)(sin(F_PI * v) * cos(2.0 * F_PI * u) * RADIUS); - pt.mPos.mV[1] = (F32)(sin(F_PI * v) * sin(2.0 * F_PI * u) * RADIUS); - pt.mPos.mV[2] = (F32)(cos(F_PI * v) * RADIUS); + while (lod < 4 && + (header[nm[lod]]["offset"].asInteger() == -1 || + header[nm[lod]]["size"].asInteger() == 0 )) + { + ++lod; + } + + if (lod >= 4) + { + lod = llclamp((S32) mDetail, 0, 3); + while (lod >= 0 && + (header[nm[lod]]["offset"].asInteger() == -1 || + header[nm[lod]]["size"].asInteger() == 0) ) + { + --lod; + } + + if (lod < 0) + { + llwarns << "Mesh header missing LOD offsets. Not a valid mesh asset!" << llendl; + return FALSE; } - line += sizeT; } + + is.seekg(header[nm[lod]]["offset"].asInteger(), std::ios_base::cur); + + return unpackVolumeFaces(is, header[nm[lod]]["size"].asInteger()); } -// create the vertices from the map -void LLVolume::sculptGenerateMapVertices(U16 sculpt_width, U16 sculpt_height, S8 sculpt_components, const U8* sculpt_data, U8 sculpt_type) +bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) { - U8 sculpt_stitching = sculpt_type & LL_SCULPT_TYPE_MASK; - BOOL sculpt_invert = sculpt_type & LL_SCULPT_FLAG_INVERT; - BOOL sculpt_mirror = sculpt_type & LL_SCULPT_FLAG_MIRROR; - BOOL reverse_horizontal = (sculpt_invert ? !sculpt_mirror : sculpt_mirror); // XOR - - - LLMemType m1(LLMemType::MTYPE_VOLUME); - - S32 sizeS = mPathp->mPath.size(); - S32 sizeT = mProfilep->mProfile.size(); + //input stream is now pointing at a zlib compressed block of LLSD + //decompress block + LLSD mdl; + if (!unzip_llsd(mdl, is, size)) + { + llwarns << "not a valid mesh asset!" << llendl; + return false; + } - S32 line = 0; - for (S32 s = 0; s < sizeS; s++) { - // Run along the profile. - for (S32 t = 0; t < sizeT; t++) + U32 face_count = mdl.size(); + + if (face_count == 0) { - S32 i = t + line; - Point& pt = mMesh[i]; + llerrs << "WTF?" << llendl; + } - S32 reversed_t = t; + mVolumeFaces.resize(face_count); - if (reverse_horizontal) - { - reversed_t = sizeT - t - 1; - } - - U32 x = (U32) ((F32)reversed_t/(sizeT-1) * (F32) sculpt_width); - U32 y = (U32) ((F32)s/(sizeS-1) * (F32) sculpt_height); + for (U32 i = 0; i < face_count; ++i) + { + LLSD::Binary pos = mdl[i]["Position"]; + LLSD::Binary norm = mdl[i]["Normal"]; + LLSD::Binary tc = mdl[i]["TexCoord0"]; + LLSD::Binary idx = mdl[i]["TriangleList"]; + + LLVolumeFace& face = mVolumeFaces[i]; + //copy out indices + face.resizeIndices(idx.size()/2); - if (y == 0) // top row stitching + if (idx.empty() || face.mNumIndices < 3) + { //why is there an empty index list? + llerrs <<"WTF?" << llendl; + continue; + } + + U16* indices = (U16*) &(idx[0]); + for (U32 j = 0; j < idx.size()/2; ++j) { - // pinch? - if (sculpt_stitching == LL_SCULPT_TYPE_SPHERE) - { - x = sculpt_width / 2; - } + face.mIndices[j] = indices[j]; } - if (y == sculpt_height) // bottom row stitching + //copy out vertices + U32 num_verts = pos.size()/(3*2); + face.resizeVertices(num_verts); + + if (mdl[i].has("Weights")) { - // wrap? - if (sculpt_stitching == LL_SCULPT_TYPE_TORUS) + face.allocateWeights(num_verts); + + LLSD::Binary weights = mdl[i]["Weights"]; + + U32 idx = 0; + + U32 cur_vertex = 0; + while (idx < weights.size() && cur_vertex < num_verts) { - y = 0; + const U8 END_INFLUENCES = 0xFF; + U8 joint = weights[idx++]; + + U32 cur_influence = 0; + LLVector4 wght(0,0,0,0); + + while (joint != END_INFLUENCES && idx < weights.size()) + { + U16 influence = weights[idx++]; + influence |= ((U16) weights[idx++] << 8); + + F32 w = llclamp((F32) influence / 65535.f, 0.f, 0.99999f); + wght.mV[cur_influence++] = (F32) joint + w; + + if (cur_influence >= 4) + { + joint = END_INFLUENCES; + } + else + { + joint = weights[idx++]; + } + } + + face.mWeights[cur_vertex].loadua(wght.mV); + + cur_vertex++; } - else + + if (cur_vertex != num_verts || idx != weights.size()) { - y = sculpt_height - 1; + llwarns << "Vertex weight count does not match vertex count!" << llendl; } + + } - // pinch? - if (sculpt_stitching == LL_SCULPT_TYPE_SPHERE) + LLVector3 minp; + LLVector3 maxp; + LLVector2 min_tc; + LLVector2 max_tc; + + minp.setValue(mdl[i]["PositionDomain"]["Min"]); + maxp.setValue(mdl[i]["PositionDomain"]["Max"]); + LLVector4a min_pos, max_pos; + min_pos.load3(minp.mV); + max_pos.load3(maxp.mV); + + min_tc.setValue(mdl[i]["TexCoord0Domain"]["Min"]); + max_tc.setValue(mdl[i]["TexCoord0Domain"]["Max"]); + + LLVector4a pos_range; + pos_range.setSub(max_pos, min_pos); + LLVector2 tc_range = max_tc - min_tc; + + LLVector4a* pos_out = face.mPositions; + LLVector4a* norm_out = face.mNormals; + LLVector2* tc_out = face.mTexCoords; + + for (U32 j = 0; j < num_verts; ++j) + { + U16* v = (U16*) &(pos[j*3*2]); + + pos_out->set((F32) v[0], (F32) v[1], (F32) v[2]); + pos_out->div(65535.f); + pos_out->mul(pos_range); + pos_out->add(min_pos); + + pos_out++; + + U16* n = (U16*) &(norm[j*3*2]); + + norm_out->set((F32) n[0], (F32) n[1], (F32) n[2]); + norm_out->div(65535.f); + norm_out->mul(2.f); + norm_out->sub(1.f); + norm_out++; + + U16* t = (U16*) &(tc[j*2*2]); + + tc_out->mV[0] = (F32) t[0] / 65535.f * tc_range.mV[0] + min_tc.mV[0]; + tc_out->mV[1] = (F32) t[1] / 65535.f * tc_range.mV[1] + min_tc.mV[1]; + + tc_out++; + } + + + // modifier flags? + bool do_mirror = (mParams.getSculptType() & LL_SCULPT_FLAG_MIRROR); + bool do_invert = (mParams.getSculptType() &LL_SCULPT_FLAG_INVERT); + + + // translate to actions: + bool do_reflect_x = false; + bool do_reverse_triangles = false; + bool do_invert_normals = false; + + if (do_mirror) + { + do_reflect_x = true; + do_reverse_triangles = !do_reverse_triangles; + } + + if (do_invert) + { + do_invert_normals = true; + do_reverse_triangles = !do_reverse_triangles; + } + + // now do the work + + if (do_reflect_x) + { + LLVector4a* p = (LLVector4a*) face.mPositions; + LLVector4a* n = (LLVector4a*) face.mNormals; + + for (S32 i = 0; i < face.mNumVertices; i++) { - x = sculpt_width / 2; + p[i].mul(-1.0f); + n[i].mul(-1.0f); } } - if (x == sculpt_width) // side stitching + if (do_invert_normals) { - // wrap? - if ((sculpt_stitching == LL_SCULPT_TYPE_SPHERE) || - (sculpt_stitching == LL_SCULPT_TYPE_TORUS) || - (sculpt_stitching == LL_SCULPT_TYPE_CYLINDER)) + LLVector4a* n = (LLVector4a*) face.mNormals; + + for (S32 i = 0; i < face.mNumVertices; i++) { - x = 0; + n[i].mul(-1.0f); } - - else + } + + if (do_reverse_triangles) + { + for (U32 j = 0; j < face.mNumIndices; j += 3) { - x = sculpt_width - 1; + // swap the 2nd and 3rd index + S32 swap = face.mIndices[j+1]; + face.mIndices[j+1] = face.mIndices[j+2]; + face.mIndices[j+2] = swap; } } - pt.mPos = sculpt_xy_to_vector(x, y, sculpt_width, sculpt_height, sculpt_components, sculpt_data); + //calculate bounding box + LLVector4a& min = face.mExtents[0]; + LLVector4a& max = face.mExtents[1]; - if (sculpt_mirror) + min.clear(); + max.clear(); + min = max = face.mPositions[0]; + + for (S32 i = 1; i < face.mNumVertices; ++i) { - pt.mPos.mV[VX] *= -1.f; + min.setMin(min, face.mPositions[i]); + max.setMax(max, face.mPositions[i]); } } - - line += sizeT; } + + mSculptLevel = 0; // success! + + cacheOptimize(); + + return true; } +void tetrahedron_set_normal(LLVolumeFace::VertexData* cv) +{ + LLVector4a v0; + v0.setSub(cv[1].getPosition(), cv[0].getNormal()); + LLVector4a v1; + v1.setSub(cv[2].getNormal(), cv[0].getPosition()); + + cv[0].getNormal().setCross3(v0,v1); + cv[0].getNormal().normalize3fast(); + cv[1].setNormal(cv[0].getNormal()); + cv[2].setNormal(cv[1].getNormal()); +} -const S32 SCULPT_REZ_1 = 6; // changed from 4 to 6 - 6 looks round whereas 4 looks square -const S32 SCULPT_REZ_2 = 8; -const S32 SCULPT_REZ_3 = 16; -const S32 SCULPT_REZ_4 = 32; +BOOL LLVolume::isTetrahedron() +{ + return mIsTetrahedron; +} -S32 sculpt_sides(F32 detail) +void LLVolume::makeTetrahedron() { + mVolumeFaces.clear(); - // detail is usually one of: 1, 1.5, 2.5, 4.0. + LLVolumeFace face; + + F32 x = 0.25f; + LLVector4a p[] = + { //unit tetrahedron corners + LLVector4a(x,x,x), + LLVector4a(-x,-x,x), + LLVector4a(-x,x,-x), + LLVector4a(x,-x,-x) + }; + + face.mExtents[0].splat(-x); + face.mExtents[1].splat(x); - if (detail <= 1.0) - { - return SCULPT_REZ_1; - } - if (detail <= 2.0) + LLVolumeFace::VertexData cv[3]; + + //set texture coordinates + cv[0].mTexCoord = LLVector2(0,0); + cv[1].mTexCoord = LLVector2(1,0); + cv[2].mTexCoord = LLVector2(0.5f, 0.5f*F_SQRT3); + + + //side 1 + cv[0].setPosition(p[1]); + cv[1].setPosition(p[0]); + cv[2].setPosition(p[2]); + + tetrahedron_set_normal(cv); + + face.resizeVertices(12); + face.resizeIndices(12); + + LLVector4a* v = (LLVector4a*) face.mPositions; + LLVector4a* n = (LLVector4a*) face.mNormals; + LLVector2* tc = (LLVector2*) face.mTexCoords; + + v[0] = cv[0].getPosition(); + v[1] = cv[1].getPosition(); + v[2] = cv[2].getPosition(); + v += 3; + + n[0] = cv[0].getNormal(); + n[1] = cv[1].getNormal(); + n[2] = cv[2].getNormal(); + n += 3; + + tc[0] = cv[0].mTexCoord; + tc[1] = cv[1].mTexCoord; + tc[2] = cv[2].mTexCoord; + tc += 3; + + + //side 2 + cv[0].setPosition(p[3]); + cv[1].setPosition(p[0]); + cv[2].setPosition(p[1]); + + tetrahedron_set_normal(cv); + + v[0] = cv[0].getPosition(); + v[1] = cv[1].getPosition(); + v[2] = cv[2].getPosition(); + v += 3; + + n[0] = cv[0].getNormal(); + n[1] = cv[1].getNormal(); + n[2] = cv[2].getNormal(); + n += 3; + + tc[0] = cv[0].mTexCoord; + tc[1] = cv[1].mTexCoord; + tc[2] = cv[2].mTexCoord; + tc += 3; + + //side 3 + cv[0].setPosition(p[3]); + cv[1].setPosition(p[1]); + cv[2].setPosition(p[2]); + + tetrahedron_set_normal(cv); + + v[0] = cv[0].getPosition(); + v[1] = cv[1].getPosition(); + v[2] = cv[2].getPosition(); + v += 3; + + n[0] = cv[0].getNormal(); + n[1] = cv[1].getNormal(); + n[2] = cv[2].getNormal(); + n += 3; + + tc[0] = cv[0].mTexCoord; + tc[1] = cv[1].mTexCoord; + tc[2] = cv[2].mTexCoord; + tc += 3; + + //side 4 + cv[0].setPosition(p[2]); + cv[1].setPosition(p[0]); + cv[2].setPosition(p[3]); + + tetrahedron_set_normal(cv); + + v[0] = cv[0].getPosition(); + v[1] = cv[1].getPosition(); + v[2] = cv[2].getPosition(); + v += 3; + + n[0] = cv[0].getNormal(); + n[1] = cv[1].getNormal(); + n[2] = cv[2].getNormal(); + n += 3; + + tc[0] = cv[0].mTexCoord; + tc[1] = cv[1].mTexCoord; + tc[2] = cv[2].mTexCoord; + tc += 3; + + //set index buffer + for (U16 i = 0; i < 12; i++) { - return SCULPT_REZ_2; + face.mIndices[i] = i; } - if (detail <= 3.0) + + mVolumeFaces.push_back(face); + mSculptLevel = 0; + mIsTetrahedron = TRUE; +} + +void LLVolume::copyVolumeFaces(const LLVolume* volume) +{ + mVolumeFaces = volume->mVolumeFaces; + mSculptLevel = 0; + mIsTetrahedron = FALSE; +} + +void LLVolume::cacheOptimize() +{ + for (S32 i = 0; i < mVolumeFaces.size(); ++i) { - return SCULPT_REZ_3; + mVolumeFaces[i].cacheOptimize(); } - else +} + + +S32 LLVolume::getNumFaces() const +{ + U8 sculpt_type = (mParams.getSculptType() & LL_SCULPT_TYPE_MASK); + + if (sculpt_type == LL_SCULPT_TYPE_MESH) { - return SCULPT_REZ_4; + return LL_SCULPT_MESH_MAX_FACES; } -} + return (S32)mProfilep->mFaces.size(); +} -// determine the number of vertices in both s and t direction for this sculpt -void sculpt_calc_mesh_resolution(U16 width, U16 height, U8 type, F32 detail, S32& s, S32& t) -{ - // this code has the following properties: - // 1) the aspect ratio of the mesh is as close as possible to the ratio of the map - // while still using all available verts - // 2) the mesh cannot have more verts than is allowed by LOD - // 3) the mesh cannot have more verts than is allowed by the map - - S32 max_vertices_lod = (S32)pow((double)sculpt_sides(detail), 2.0); - S32 max_vertices_map = width * height / 4; - - S32 vertices; - if (max_vertices_map > 0) - vertices = llmin(max_vertices_lod, max_vertices_map); - else - vertices = max_vertices_lod; - - - F32 ratio; - if ((width == 0) || (height == 0)) - ratio = 1.f; - else - ratio = (F32) width / (F32) height; - - - s = (S32)fsqrtf(((F32)vertices / ratio)); - - s = llmax(s, 4); // no degenerate sizes, please - t = vertices / s; - - t = llmax(t, 4); // no degenerate sizes, please - s = vertices / t; -} - -// sculpt replaces generate() for sculpted surfaces -void LLVolume::sculpt(U16 sculpt_width, U16 sculpt_height, S8 sculpt_components, const U8* sculpt_data, S32 sculpt_level) +void LLVolume::createVolumeFaces() { LLMemType m1(LLMemType::MTYPE_VOLUME); - U8 sculpt_type = mParams.getSculptType(); - - BOOL data_is_empty = FALSE; - if (sculpt_width == 0 || sculpt_height == 0 || sculpt_components < 3 || sculpt_data == NULL) + if (mGenerateSingleFace) { - sculpt_level = -1; - data_is_empty = TRUE; + // do nothing } - - S32 requested_sizeS = 0; - S32 requested_sizeT = 0; - - sculpt_calc_mesh_resolution(sculpt_width, sculpt_height, sculpt_type, mDetail, requested_sizeS, requested_sizeT); - - mPathp->generate(mParams.getPathParams(), mDetail, 0, TRUE, requested_sizeS); - mProfilep->generate(mParams.getProfileParams(), mPathp->isOpen(), mDetail, 0, TRUE, requested_sizeT); - - S32 sizeS = mPathp->mPath.size(); // we requested a specific size, now see what we really got - S32 sizeT = mProfilep->mProfile.size(); // we requested a specific size, now see what we really got - - // weird crash bug - DEV-11158 - trying to collect more data: - if ((sizeS == 0) || (sizeT == 0)) + else { - llwarns << "sculpt bad mesh size " << sizeS << " " << sizeT << llendl; - } - - sNumMeshPoints -= mMesh.size(); - mMesh.resize(sizeS * sizeT); - sNumMeshPoints += mMesh.size(); + S32 num_faces = getNumFaces(); + BOOL partial_build = TRUE; + if (num_faces != mVolumeFaces.size()) + { + partial_build = FALSE; + mVolumeFaces.resize(num_faces); + } + // Initialize volume faces with parameter data + for (S32 i = 0; i < (S32)mVolumeFaces.size(); i++) + { + LLVolumeFace& vf = mVolumeFaces[i]; + LLProfile::Face& face = mProfilep->mFaces[i]; + vf.mBeginS = face.mIndex; + vf.mNumS = face.mCount; + if (vf.mNumS < 0) + { + llerrs << "Volume face corruption detected." << llendl; + } - //generate vertex positions - if (!data_is_empty) - { - sculptGenerateMapVertices(sculpt_width, sculpt_height, sculpt_components, sculpt_data, sculpt_type); + vf.mBeginT = 0; + vf.mNumT= getPath().mPath.size(); + vf.mID = i; - // don't test lowest LOD to support legacy content DEV-33670 - if (mDetail > SCULPT_MIN_AREA_DETAIL) - { - if (sculptGetSurfaceArea() < SCULPT_MIN_AREA) + // Set the type mask bits correctly + if (mParams.getProfileParams().getHollow() > 0) { - data_is_empty = TRUE; + vf.mTypeMask |= LLVolumeFace::HOLLOW_MASK; + } + if (mProfilep->isOpen()) + { + vf.mTypeMask |= LLVolumeFace::OPEN_MASK; + } + if (face.mCap) + { + vf.mTypeMask |= LLVolumeFace::CAP_MASK; + if (face.mFaceID == LL_FACE_PATH_BEGIN) + { + vf.mTypeMask |= LLVolumeFace::TOP_MASK; + } + else + { + llassert(face.mFaceID == LL_FACE_PATH_END); + vf.mTypeMask |= LLVolumeFace::BOTTOM_MASK; + } + } + else if (face.mFaceID & (LL_FACE_PROFILE_BEGIN | LL_FACE_PROFILE_END)) + { + vf.mTypeMask |= LLVolumeFace::FLAT_MASK | LLVolumeFace::END_MASK; + } + else + { + vf.mTypeMask |= LLVolumeFace::SIDE_MASK; + if (face.mFlat) + { + vf.mTypeMask |= LLVolumeFace::FLAT_MASK; + } + if (face.mFaceID & LL_FACE_INNER_SIDE) + { + vf.mTypeMask |= LLVolumeFace::INNER_MASK; + if (face.mFlat && vf.mNumS > 2) + { //flat inner faces have to copy vert normals + vf.mNumS = vf.mNumS*2; + if (vf.mNumS < 0) + { + llerrs << "Volume face corruption detected." << llendl; + } + } + } + else + { + vf.mTypeMask |= LLVolumeFace::OUTER_MASK; + } } } - } - if (data_is_empty) - { - sculptGeneratePlaceholder(); + for (face_list_t::iterator iter = mVolumeFaces.begin(); + iter != mVolumeFaces.end(); ++iter) + { + (*iter).create(this, partial_build); + } } +} - - for (S32 i = 0; i < (S32)mProfilep->mFaces.size(); i++) - { - mFaceMask |= mProfilep->mFaces[i].mFaceID; - } - - mSculptLevel = sculpt_level; +inline LLVector3 sculpt_rgb_to_vector(U8 r, U8 g, U8 b) +{ + // maps RGB values to vector values [0..255] -> [-0.5..0.5] + LLVector3 value; + value.mV[VX] = r / 255.f - 0.5f; + value.mV[VY] = g / 255.f - 0.5f; + value.mV[VZ] = b / 255.f - 0.5f; - // Delete any existing faces so that they get regenerated - mVolumeFaces.clear(); - - createVolumeFaces(); + return value; } +inline U32 sculpt_xy_to_index(U32 x, U32 y, U16 sculpt_width, U16 sculpt_height, S8 sculpt_components) +{ + U32 index = (x + y * sculpt_width) * sculpt_components; + return index; +} - -BOOL LLVolume::isCap(S32 face) +inline U32 sculpt_st_to_index(S32 s, S32 t, S32 size_s, S32 size_t, U16 sculpt_width, U16 sculpt_height, S8 sculpt_components) { - return mProfilep->mFaces[face].mCap; + U32 x = (U32) ((F32)s/(size_s) * (F32) sculpt_width); + U32 y = (U32) ((F32)t/(size_t) * (F32) sculpt_height); + + return sculpt_xy_to_index(x, y, sculpt_width, sculpt_height, sculpt_components); } -BOOL LLVolume::isFlat(S32 face) + +inline LLVector3 sculpt_index_to_vector(U32 index, const U8* sculpt_data) { - return mProfilep->mFaces[face].mFlat; -} + LLVector3 v = sculpt_rgb_to_vector(sculpt_data[index], sculpt_data[index+1], sculpt_data[index+2]); + return v; +} -bool LLVolumeParams::operator==(const LLVolumeParams ¶ms) const +inline LLVector3 sculpt_st_to_vector(S32 s, S32 t, S32 size_s, S32 size_t, U16 sculpt_width, U16 sculpt_height, S8 sculpt_components, const U8* sculpt_data) { - return ( (getPathParams() == params.getPathParams()) && - (getProfileParams() == params.getProfileParams()) && - (mSculptID == params.mSculptID) && - (mSculptType == params.mSculptType) ); + U32 index = sculpt_st_to_index(s, t, size_s, size_t, sculpt_width, sculpt_height, sculpt_components); + + return sculpt_index_to_vector(index, sculpt_data); } -bool LLVolumeParams::operator!=(const LLVolumeParams ¶ms) const +inline LLVector3 sculpt_xy_to_vector(U32 x, U32 y, U16 sculpt_width, U16 sculpt_height, S8 sculpt_components, const U8* sculpt_data) { - return ( (getPathParams() != params.getPathParams()) || - (getProfileParams() != params.getProfileParams()) || - (mSculptID != params.mSculptID) || - (mSculptType != params.mSculptType) ); + U32 index = sculpt_xy_to_index(x, y, sculpt_width, sculpt_height, sculpt_components); + + return sculpt_index_to_vector(index, sculpt_data); } -bool LLVolumeParams::operator<(const LLVolumeParams ¶ms) const + +F32 LLVolume::sculptGetSurfaceArea() { - if( getPathParams() != params.getPathParams() ) - { - return getPathParams() < params.getPathParams(); - } - - if (getProfileParams() != params.getProfileParams()) - { - return getProfileParams() < params.getProfileParams(); - } - - if (mSculptID != params.mSculptID) - { - return mSculptID < params.mSculptID; - } + // test to see if image has enough variation to create non-degenerate geometry + F32 area = 0; - return mSculptType < params.mSculptType; + S32 sizeS = mPathp->mPath.size(); + S32 sizeT = mProfilep->mProfile.size(); + + for (S32 s = 0; s < sizeS-1; s++) + { + for (S32 t = 0; t < sizeT-1; t++) + { + // get four corners of quad + LLVector3 p1 = mMesh[(s )*sizeT + (t )].mPos; + LLVector3 p2 = mMesh[(s+1)*sizeT + (t )].mPos; + LLVector3 p3 = mMesh[(s )*sizeT + (t+1)].mPos; + LLVector3 p4 = mMesh[(s+1)*sizeT + (t+1)].mPos; + // compute the area of the quad by taking the length of the cross product of the two triangles + LLVector3 cross1 = (p1 - p2) % (p1 - p3); + LLVector3 cross2 = (p4 - p2) % (p4 - p3); + area += (cross1.magVec() + cross2.magVec()) / 2.0; + } + } + return area; } -void LLVolumeParams::copyParams(const LLVolumeParams ¶ms) +// create placeholder shape +void LLVolume::sculptGeneratePlaceholder() { LLMemType m1(LLMemType::MTYPE_VOLUME); - mProfileParams.copyParams(params.mProfileParams); - mPathParams.copyParams(params.mPathParams); - mSculptID = params.getSculptID(); - mSculptType = params.getSculptType(); -} - -// Less restricitve approx 0 for volumes -const F32 APPROXIMATELY_ZERO = 0.001f; -bool approx_zero( F32 f, F32 tolerance = APPROXIMATELY_ZERO) -{ - return (f >= -tolerance) && (f <= tolerance); -} + + S32 sizeS = mPathp->mPath.size(); + S32 sizeT = mProfilep->mProfile.size(); + + S32 line = 0; -// return true if in range (or nearly so) -static bool limit_range(F32& v, F32 min, F32 max, F32 tolerance = APPROXIMATELY_ZERO) -{ - F32 min_delta = v - min; - if (min_delta < 0.f) - { - v = min; - if (!approx_zero(min_delta, tolerance)) - return false; - } - F32 max_delta = max - v; - if (max_delta < 0.f) + // for now, this is a sphere. + for (S32 s = 0; s < sizeS; s++) { - v = max; - if (!approx_zero(max_delta, tolerance)) - return false; - } - return true; -} - -bool LLVolumeParams::setBeginAndEndS(const F32 b, const F32 e) -{ - bool valid = true; - - // First, clamp to valid ranges. - F32 begin = b; - valid &= limit_range(begin, 0.f, 1.f - MIN_CUT_DELTA); - - F32 end = e; - if (end >= .0149f && end < MIN_CUT_DELTA) end = MIN_CUT_DELTA; // eliminate warning for common rounding error - valid &= limit_range(end, MIN_CUT_DELTA, 1.f); + for (S32 t = 0; t < sizeT; t++) + { + S32 i = t + line; + Point& pt = mMesh[i]; - valid &= limit_range(begin, 0.f, end - MIN_CUT_DELTA, .01f); + + F32 u = (F32)s/(sizeS-1); + F32 v = (F32)t/(sizeT-1); - // Now set them. - mProfileParams.setBegin(begin); - mProfileParams.setEnd(end); + const F32 RADIUS = (F32) 0.3; + + pt.mPos.mV[0] = (F32)(sin(F_PI * v) * cos(2.0 * F_PI * u) * RADIUS); + pt.mPos.mV[1] = (F32)(sin(F_PI * v) * sin(2.0 * F_PI * u) * RADIUS); + pt.mPos.mV[2] = (F32)(cos(F_PI * v) * RADIUS); - return valid; + } + line += sizeT; + } } -bool LLVolumeParams::setBeginAndEndT(const F32 b, const F32 e) +// create the vertices from the map +void LLVolume::sculptGenerateMapVertices(U16 sculpt_width, U16 sculpt_height, S8 sculpt_components, const U8* sculpt_data, U8 sculpt_type) { - bool valid = true; + U8 sculpt_stitching = sculpt_type & LL_SCULPT_TYPE_MASK; + BOOL sculpt_invert = sculpt_type & LL_SCULPT_FLAG_INVERT; + BOOL sculpt_mirror = sculpt_type & LL_SCULPT_FLAG_MIRROR; + BOOL reverse_horizontal = (sculpt_invert ? !sculpt_mirror : sculpt_mirror); // XOR + + + LLMemType m1(LLMemType::MTYPE_VOLUME); + + S32 sizeS = mPathp->mPath.size(); + S32 sizeT = mProfilep->mProfile.size(); + + S32 line = 0; + for (S32 s = 0; s < sizeS; s++) + { + // Run along the profile. + for (S32 t = 0; t < sizeT; t++) + { + S32 i = t + line; + Point& pt = mMesh[i]; - // First, clamp to valid ranges. - F32 begin = b; - valid &= limit_range(begin, 0.f, 1.f - MIN_CUT_DELTA); + S32 reversed_t = t; - F32 end = e; - valid &= limit_range(end, MIN_CUT_DELTA, 1.f); + if (reverse_horizontal) + { + reversed_t = sizeT - t - 1; + } + + U32 x = (U32) ((F32)reversed_t/(sizeT-1) * (F32) sculpt_width); + U32 y = (U32) ((F32)s/(sizeS-1) * (F32) sculpt_height); - valid &= limit_range(begin, 0.f, end - MIN_CUT_DELTA, .01f); + + if (y == 0) // top row stitching + { + // pinch? + if (sculpt_stitching == LL_SCULPT_TYPE_SPHERE) + { + x = sculpt_width / 2; + } + } - // Now set them. - mPathParams.setBegin(begin); - mPathParams.setEnd(end); + if (y == sculpt_height) // bottom row stitching + { + // wrap? + if (sculpt_stitching == LL_SCULPT_TYPE_TORUS) + { + y = 0; + } + else + { + y = sculpt_height - 1; + } - return valid; -} + // pinch? + if (sculpt_stitching == LL_SCULPT_TYPE_SPHERE) + { + x = sculpt_width / 2; + } + } -bool LLVolumeParams::setHollow(const F32 h) -{ - // Validate the hollow based on path and profile. - U8 profile = mProfileParams.getCurveType() & LL_PCODE_PROFILE_MASK; - U8 hole_type = mProfileParams.getCurveType() & LL_PCODE_HOLE_MASK; - - F32 max_hollow = HOLLOW_MAX; + if (x == sculpt_width) // side stitching + { + // wrap? + if ((sculpt_stitching == LL_SCULPT_TYPE_SPHERE) || + (sculpt_stitching == LL_SCULPT_TYPE_TORUS) || + (sculpt_stitching == LL_SCULPT_TYPE_CYLINDER)) + { + x = 0; + } + + else + { + x = sculpt_width - 1; + } + } - // Only square holes have trouble. - if (LL_PCODE_HOLE_SQUARE == hole_type) - { - switch(profile) - { - case LL_PCODE_PROFILE_CIRCLE: - case LL_PCODE_PROFILE_CIRCLE_HALF: - case LL_PCODE_PROFILE_EQUALTRI: - max_hollow = HOLLOW_MAX_SQUARE; + pt.mPos = sculpt_xy_to_vector(x, y, sculpt_width, sculpt_height, sculpt_components, sculpt_data); + + if (sculpt_mirror) + { + pt.mPos.mV[VX] *= -1.f; + } } + + line += sizeT; } +} - F32 hollow = h; - bool valid = limit_range(hollow, HOLLOW_MIN, max_hollow); - mProfileParams.setHollow(hollow); - return valid; -} +const S32 SCULPT_REZ_1 = 6; // changed from 4 to 6 - 6 looks round whereas 4 looks square +const S32 SCULPT_REZ_2 = 8; +const S32 SCULPT_REZ_3 = 16; +const S32 SCULPT_REZ_4 = 32; -bool LLVolumeParams::setTwistBegin(const F32 b) +S32 sculpt_sides(F32 detail) { - F32 twist_begin = b; - bool valid = limit_range(twist_begin, TWIST_MIN, TWIST_MAX); - mPathParams.setTwistBegin(twist_begin); - return valid; -} - -bool LLVolumeParams::setTwistEnd(const F32 e) -{ - F32 twist_end = e; - bool valid = limit_range(twist_end, TWIST_MIN, TWIST_MAX); - mPathParams.setTwistEnd(twist_end); - return valid; -} -bool LLVolumeParams::setRatio(const F32 x, const F32 y) -{ - F32 min_x = RATIO_MIN; - F32 max_x = RATIO_MAX; - F32 min_y = RATIO_MIN; - F32 max_y = RATIO_MAX; - // If this is a circular path (and not a sphere) then 'ratio' is actually hole size. - U8 path_type = mPathParams.getCurveType(); - U8 profile_type = mProfileParams.getCurveType() & LL_PCODE_PROFILE_MASK; - if ( LL_PCODE_PATH_CIRCLE == path_type && - LL_PCODE_PROFILE_CIRCLE_HALF != profile_type) + // detail is usually one of: 1, 1.5, 2.5, 4.0. + + if (detail <= 1.0) { - // Holes are more restricted... - min_x = HOLE_X_MIN; - max_x = HOLE_X_MAX; - min_y = HOLE_Y_MIN; - max_y = HOLE_Y_MAX; + return SCULPT_REZ_1; + } + if (detail <= 2.0) + { + return SCULPT_REZ_2; + } + if (detail <= 3.0) + { + return SCULPT_REZ_3; + } + else + { + return SCULPT_REZ_4; } - - F32 ratio_x = x; - bool valid = limit_range(ratio_x, min_x, max_x); - F32 ratio_y = y; - valid &= limit_range(ratio_y, min_y, max_y); - - mPathParams.setScale(ratio_x, ratio_y); - - return valid; } -bool LLVolumeParams::setShear(const F32 x, const F32 y) -{ - F32 shear_x = x; - bool valid = limit_range(shear_x, SHEAR_MIN, SHEAR_MAX); - F32 shear_y = y; - valid &= limit_range(shear_y, SHEAR_MIN, SHEAR_MAX); - mPathParams.setShear(shear_x, shear_y); - return valid; -} -bool LLVolumeParams::setTaperX(const F32 v) -{ - F32 taper = v; - bool valid = limit_range(taper, TAPER_MIN, TAPER_MAX); - mPathParams.setTaperX(taper); - return valid; -} -bool LLVolumeParams::setTaperY(const F32 v) +// determine the number of vertices in both s and t direction for this sculpt +void sculpt_calc_mesh_resolution(U16 width, U16 height, U8 type, F32 detail, S32& s, S32& t) { - F32 taper = v; - bool valid = limit_range(taper, TAPER_MIN, TAPER_MAX); - mPathParams.setTaperY(taper); - return valid; -} + // this code has the following properties: + // 1) the aspect ratio of the mesh is as close as possible to the ratio of the map + // while still using all available verts + // 2) the mesh cannot have more verts than is allowed by LOD + // 3) the mesh cannot have more verts than is allowed by the map + + S32 max_vertices_lod = (S32)pow((double)sculpt_sides(detail), 2.0); + S32 max_vertices_map = width * height / 4; + + S32 vertices; + if (max_vertices_map > 0) + vertices = llmin(max_vertices_lod, max_vertices_map); + else + vertices = max_vertices_lod; + -bool LLVolumeParams::setRevolutions(const F32 r) -{ - F32 revolutions = r; - bool valid = limit_range(revolutions, REV_MIN, REV_MAX); - mPathParams.setRevolutions(revolutions); - return valid; + F32 ratio; + if ((width == 0) || (height == 0)) + ratio = 1.f; + else + ratio = (F32) width / (F32) height; + + + s = (S32)(F32) sqrt(((F32)vertices / ratio)); + + s = llmax(s, 4); // no degenerate sizes, please + t = vertices / s; + + t = llmax(t, 4); // no degenerate sizes, please + s = vertices / t; } -bool LLVolumeParams::setRadiusOffset(const F32 offset) +// sculpt replaces generate() for sculpted surfaces +void LLVolume::sculpt(U16 sculpt_width, U16 sculpt_height, S8 sculpt_components, const U8* sculpt_data, S32 sculpt_level) { - bool valid = true; + LLMemType m1(LLMemType::MTYPE_VOLUME); + U8 sculpt_type = mParams.getSculptType(); - // If this is a sphere, just set it to 0 and get out. - U8 path_type = mPathParams.getCurveType(); - U8 profile_type = mProfileParams.getCurveType() & LL_PCODE_PROFILE_MASK; - if ( LL_PCODE_PROFILE_CIRCLE_HALF == profile_type || - LL_PCODE_PATH_CIRCLE != path_type ) + BOOL data_is_empty = FALSE; + + if (sculpt_width == 0 || sculpt_height == 0 || sculpt_components < 3 || sculpt_data == NULL) { - mPathParams.setRadiusOffset(0.f); - return true; + sculpt_level = -1; + data_is_empty = TRUE; } - // Limit radius offset, based on taper and hole size y. - F32 radius_offset = offset; - F32 taper_y = getTaperY(); - F32 radius_mag = fabs(radius_offset); - F32 hole_y_mag = fabs(getRatioY()); - F32 taper_y_mag = fabs(taper_y); - // Check to see if the taper effects us. - if ( (radius_offset > 0.f && taper_y < 0.f) || - (radius_offset < 0.f && taper_y > 0.f) ) + S32 requested_sizeS = 0; + S32 requested_sizeT = 0; + + sculpt_calc_mesh_resolution(sculpt_width, sculpt_height, sculpt_type, mDetail, requested_sizeS, requested_sizeT); + + mPathp->generate(mParams.getPathParams(), mDetail, 0, TRUE, requested_sizeS); + mProfilep->generate(mParams.getProfileParams(), mPathp->isOpen(), mDetail, 0, TRUE, requested_sizeT); + + S32 sizeS = mPathp->mPath.size(); // we requested a specific size, now see what we really got + S32 sizeT = mProfilep->mProfile.size(); // we requested a specific size, now see what we really got + + // weird crash bug - DEV-11158 - trying to collect more data: + if ((sizeS == 0) || (sizeT == 0)) { - // The taper does not help increase the radius offset range. - taper_y_mag = 0.f; + llwarns << "sculpt bad mesh size " << sizeS << " " << sizeT << llendl; } - F32 max_radius_mag = 1.f - hole_y_mag * (1.f - taper_y_mag) / (1.f - hole_y_mag); + + sNumMeshPoints -= mMesh.size(); + mMesh.resize(sizeS * sizeT); + sNumMeshPoints += mMesh.size(); - // Enforce the maximum magnitude. - F32 delta = max_radius_mag - radius_mag; - if (delta < 0.f) + //generate vertex positions + if (!data_is_empty) { - // Check radius offset sign. - if (radius_offset < 0.f) - { - radius_offset = -max_radius_mag; - } - else + sculptGenerateMapVertices(sculpt_width, sculpt_height, sculpt_components, sculpt_data, sculpt_type); + + // don't test lowest LOD to support legacy content DEV-33670 + if (mDetail > SCULPT_MIN_AREA_DETAIL) { - radius_offset = max_radius_mag; + if (sculptGetSurfaceArea() < SCULPT_MIN_AREA) + { + data_is_empty = TRUE; + } } - valid = approx_zero(delta, .1f); } - mPathParams.setRadiusOffset(radius_offset); - return valid; + if (data_is_empty) + { + sculptGeneratePlaceholder(); + } + + + + for (S32 i = 0; i < (S32)mProfilep->mFaces.size(); i++) + { + mFaceMask |= mProfilep->mFaces[i].mFaceID; + } + + mSculptLevel = sculpt_level; + + // Delete any existing faces so that they get regenerated + mVolumeFaces.clear(); + + createVolumeFaces(); } -bool LLVolumeParams::setSkew(const F32 skew_value) + + + +BOOL LLVolume::isCap(S32 face) { - bool valid = true; + return mProfilep->mFaces[face].mCap; +} - // Check the skew value against the revolutions. - F32 skew = llclamp(skew_value, SKEW_MIN, SKEW_MAX); - F32 skew_mag = fabs(skew); - F32 revolutions = getRevolutions(); - F32 scale_x = getRatioX(); - F32 min_skew_mag = 1.0f - 1.0f / (revolutions * scale_x + 1.0f); - // Discontinuity; A revolution of 1 allows skews below 0.5. - if ( fabs(revolutions - 1.0f) < 0.001) - min_skew_mag = 0.0f; +BOOL LLVolume::isFlat(S32 face) +{ + return mProfilep->mFaces[face].mFlat; +} - // Clip skew. - F32 delta = skew_mag - min_skew_mag; - if (delta < 0.f) - { - // Check skew sign. - if (skew < 0.0f) - { - skew = -min_skew_mag; - } - else - { - skew = min_skew_mag; - } - valid = approx_zero(delta, .01f); - } - mPathParams.setSkew(skew); - return valid; +bool LLVolumeParams::isSculpt() const +{ + return mSculptID.notNull(); } -bool LLVolumeParams::setSculptID(const LLUUID sculpt_id, U8 sculpt_type) +bool LLVolumeParams::isMeshSculpt() const { - mSculptID = sculpt_id; - mSculptType = sculpt_type; - return true; + return isSculpt() && ((mSculptType & LL_SCULPT_TYPE_MASK) == LL_SCULPT_TYPE_MESH); } -bool LLVolumeParams::setType(U8 profile, U8 path) +bool LLVolumeParams::operator==(const LLVolumeParams ¶ms) const { - bool result = true; - // First, check profile and path for validity. - U8 profile_type = profile & LL_PCODE_PROFILE_MASK; - U8 hole_type = (profile & LL_PCODE_HOLE_MASK) >> 4; - U8 path_type = path >> 4; + return ( (getPathParams() == params.getPathParams()) && + (getProfileParams() == params.getProfileParams()) && + (mSculptID == params.mSculptID) && + (mSculptType == params.mSculptType) ); +} - if (profile_type > LL_PCODE_PROFILE_MAX) +bool LLVolumeParams::operator!=(const LLVolumeParams ¶ms) const +{ + return ( (getPathParams() != params.getPathParams()) || + (getProfileParams() != params.getProfileParams()) || + (mSculptID != params.mSculptID) || + (mSculptType != params.mSculptType) ); +} + +bool LLVolumeParams::operator<(const LLVolumeParams ¶ms) const +{ + if( getPathParams() != params.getPathParams() ) { - // Bad profile. Make it square. - profile = LL_PCODE_PROFILE_SQUARE; - result = false; - llwarns << "LLVolumeParams::setType changing bad profile type (" << profile_type - << ") to be LL_PCODE_PROFILE_SQUARE" << llendl; + return getPathParams() < params.getPathParams(); } - else if (hole_type > LL_PCODE_HOLE_MAX) + + if (getProfileParams() != params.getProfileParams()) { - // Bad hole. Make it the same. - profile = profile_type; - result = false; - llwarns << "LLVolumeParams::setType changing bad hole type (" << hole_type - << ") to be LL_PCODE_HOLE_SAME" << llendl; + return getProfileParams() < params.getProfileParams(); } - - if (path_type < LL_PCODE_PATH_MIN || - path_type > LL_PCODE_PATH_MAX) + + if (mSculptID != params.mSculptID) { - // Bad path. Make it linear. - result = false; - llwarns << "LLVolumeParams::setType changing bad path (" << path - << ") to be LL_PCODE_PATH_LINE" << llendl; - path = LL_PCODE_PATH_LINE; + return mSculptID < params.mSculptID; } - mProfileParams.setCurveType(profile); - mPathParams.setCurveType(path); - return result; + return mSculptType < params.mSculptType; + + } -// static -bool LLVolumeParams::validate(U8 prof_curve, F32 prof_begin, F32 prof_end, F32 hollow, - U8 path_curve, F32 path_begin, F32 path_end, - F32 scx, F32 scy, F32 shx, F32 shy, - F32 twistend, F32 twistbegin, F32 radiusoffset, - F32 tx, F32 ty, F32 revolutions, F32 skew) +void LLVolumeParams::copyParams(const LLVolumeParams ¶ms) { - LLVolumeParams test_params; - if (!test_params.setType (prof_curve, path_curve)) + LLMemType m1(LLMemType::MTYPE_VOLUME); + mProfileParams.copyParams(params.mProfileParams); + mPathParams.copyParams(params.mPathParams); + mSculptID = params.getSculptID(); + mSculptType = params.getSculptType(); +} + +// Less restricitve approx 0 for volumes +const F32 APPROXIMATELY_ZERO = 0.001f; +bool approx_zero( F32 f, F32 tolerance = APPROXIMATELY_ZERO) +{ + return (f >= -tolerance) && (f <= tolerance); +} + +// return true if in range (or nearly so) +static bool limit_range(F32& v, F32 min, F32 max, F32 tolerance = APPROXIMATELY_ZERO) +{ + F32 min_delta = v - min; + if (min_delta < 0.f) { - return false; + v = min; + if (!approx_zero(min_delta, tolerance)) + return false; + } + F32 max_delta = max - v; + if (max_delta < 0.f) + { + v = max; + if (!approx_zero(max_delta, tolerance)) + return false; + } + return true; +} + +bool LLVolumeParams::setBeginAndEndS(const F32 b, const F32 e) +{ + bool valid = true; + + // First, clamp to valid ranges. + F32 begin = b; + valid &= limit_range(begin, 0.f, 1.f - MIN_CUT_DELTA); + + F32 end = e; + if (end >= .0149f && end < MIN_CUT_DELTA) end = MIN_CUT_DELTA; // eliminate warning for common rounding error + valid &= limit_range(end, MIN_CUT_DELTA, 1.f); + + valid &= limit_range(begin, 0.f, end - MIN_CUT_DELTA, .01f); + + // Now set them. + mProfileParams.setBegin(begin); + mProfileParams.setEnd(end); + + return valid; +} + +bool LLVolumeParams::setBeginAndEndT(const F32 b, const F32 e) +{ + bool valid = true; + + // First, clamp to valid ranges. + F32 begin = b; + valid &= limit_range(begin, 0.f, 1.f - MIN_CUT_DELTA); + + F32 end = e; + valid &= limit_range(end, MIN_CUT_DELTA, 1.f); + + valid &= limit_range(begin, 0.f, end - MIN_CUT_DELTA, .01f); + + // Now set them. + mPathParams.setBegin(begin); + mPathParams.setEnd(end); + + return valid; +} + +bool LLVolumeParams::setHollow(const F32 h) +{ + // Validate the hollow based on path and profile. + U8 profile = mProfileParams.getCurveType() & LL_PCODE_PROFILE_MASK; + U8 hole_type = mProfileParams.getCurveType() & LL_PCODE_HOLE_MASK; + + F32 max_hollow = HOLLOW_MAX; + + // Only square holes have trouble. + if (LL_PCODE_HOLE_SQUARE == hole_type) + { + switch(profile) + { + case LL_PCODE_PROFILE_CIRCLE: + case LL_PCODE_PROFILE_CIRCLE_HALF: + case LL_PCODE_PROFILE_EQUALTRI: + max_hollow = HOLLOW_MAX_SQUARE; + } + } + + F32 hollow = h; + bool valid = limit_range(hollow, HOLLOW_MIN, max_hollow); + mProfileParams.setHollow(hollow); + + return valid; +} + +bool LLVolumeParams::setTwistBegin(const F32 b) +{ + F32 twist_begin = b; + bool valid = limit_range(twist_begin, TWIST_MIN, TWIST_MAX); + mPathParams.setTwistBegin(twist_begin); + return valid; +} + +bool LLVolumeParams::setTwistEnd(const F32 e) +{ + F32 twist_end = e; + bool valid = limit_range(twist_end, TWIST_MIN, TWIST_MAX); + mPathParams.setTwistEnd(twist_end); + return valid; +} + +bool LLVolumeParams::setRatio(const F32 x, const F32 y) +{ + F32 min_x = RATIO_MIN; + F32 max_x = RATIO_MAX; + F32 min_y = RATIO_MIN; + F32 max_y = RATIO_MAX; + // If this is a circular path (and not a sphere) then 'ratio' is actually hole size. + U8 path_type = mPathParams.getCurveType(); + U8 profile_type = mProfileParams.getCurveType() & LL_PCODE_PROFILE_MASK; + if ( LL_PCODE_PATH_CIRCLE == path_type && + LL_PCODE_PROFILE_CIRCLE_HALF != profile_type) + { + // Holes are more restricted... + min_x = HOLE_X_MIN; + max_x = HOLE_X_MAX; + min_y = HOLE_Y_MIN; + max_y = HOLE_Y_MAX; + } + + F32 ratio_x = x; + bool valid = limit_range(ratio_x, min_x, max_x); + F32 ratio_y = y; + valid &= limit_range(ratio_y, min_y, max_y); + + mPathParams.setScale(ratio_x, ratio_y); + + return valid; +} + +bool LLVolumeParams::setShear(const F32 x, const F32 y) +{ + F32 shear_x = x; + bool valid = limit_range(shear_x, SHEAR_MIN, SHEAR_MAX); + F32 shear_y = y; + valid &= limit_range(shear_y, SHEAR_MIN, SHEAR_MAX); + mPathParams.setShear(shear_x, shear_y); + return valid; +} + +bool LLVolumeParams::setTaperX(const F32 v) +{ + F32 taper = v; + bool valid = limit_range(taper, TAPER_MIN, TAPER_MAX); + mPathParams.setTaperX(taper); + return valid; +} + +bool LLVolumeParams::setTaperY(const F32 v) +{ + F32 taper = v; + bool valid = limit_range(taper, TAPER_MIN, TAPER_MAX); + mPathParams.setTaperY(taper); + return valid; +} + +bool LLVolumeParams::setRevolutions(const F32 r) +{ + F32 revolutions = r; + bool valid = limit_range(revolutions, REV_MIN, REV_MAX); + mPathParams.setRevolutions(revolutions); + return valid; +} + +bool LLVolumeParams::setRadiusOffset(const F32 offset) +{ + bool valid = true; + + // If this is a sphere, just set it to 0 and get out. + U8 path_type = mPathParams.getCurveType(); + U8 profile_type = mProfileParams.getCurveType() & LL_PCODE_PROFILE_MASK; + if ( LL_PCODE_PROFILE_CIRCLE_HALF == profile_type || + LL_PCODE_PATH_CIRCLE != path_type ) + { + mPathParams.setRadiusOffset(0.f); + return true; + } + + // Limit radius offset, based on taper and hole size y. + F32 radius_offset = offset; + F32 taper_y = getTaperY(); + F32 radius_mag = fabs(radius_offset); + F32 hole_y_mag = fabs(getRatioY()); + F32 taper_y_mag = fabs(taper_y); + // Check to see if the taper effects us. + if ( (radius_offset > 0.f && taper_y < 0.f) || + (radius_offset < 0.f && taper_y > 0.f) ) + { + // The taper does not help increase the radius offset range. + taper_y_mag = 0.f; + } + F32 max_radius_mag = 1.f - hole_y_mag * (1.f - taper_y_mag) / (1.f - hole_y_mag); + + // Enforce the maximum magnitude. + F32 delta = max_radius_mag - radius_mag; + if (delta < 0.f) + { + // Check radius offset sign. + if (radius_offset < 0.f) + { + radius_offset = -max_radius_mag; + } + else + { + radius_offset = max_radius_mag; + } + valid = approx_zero(delta, .1f); + } + + mPathParams.setRadiusOffset(radius_offset); + return valid; +} + +bool LLVolumeParams::setSkew(const F32 skew_value) +{ + bool valid = true; + + // Check the skew value against the revolutions. + F32 skew = llclamp(skew_value, SKEW_MIN, SKEW_MAX); + F32 skew_mag = fabs(skew); + F32 revolutions = getRevolutions(); + F32 scale_x = getRatioX(); + F32 min_skew_mag = 1.0f - 1.0f / (revolutions * scale_x + 1.0f); + // Discontinuity; A revolution of 1 allows skews below 0.5. + if ( fabs(revolutions - 1.0f) < 0.001) + min_skew_mag = 0.0f; + + // Clip skew. + F32 delta = skew_mag - min_skew_mag; + if (delta < 0.f) + { + // Check skew sign. + if (skew < 0.0f) + { + skew = -min_skew_mag; + } + else + { + skew = min_skew_mag; + } + valid = approx_zero(delta, .01f); + } + + mPathParams.setSkew(skew); + return valid; +} + +bool LLVolumeParams::setSculptID(const LLUUID sculpt_id, U8 sculpt_type) +{ + mSculptID = sculpt_id; + mSculptType = sculpt_type; + return true; +} + +bool LLVolumeParams::setType(U8 profile, U8 path) +{ + bool result = true; + // First, check profile and path for validity. + U8 profile_type = profile & LL_PCODE_PROFILE_MASK; + U8 hole_type = (profile & LL_PCODE_HOLE_MASK) >> 4; + U8 path_type = path >> 4; + + if (profile_type > LL_PCODE_PROFILE_MAX) + { + // Bad profile. Make it square. + profile = LL_PCODE_PROFILE_SQUARE; + result = false; + llwarns << "LLVolumeParams::setType changing bad profile type (" << profile_type + << ") to be LL_PCODE_PROFILE_SQUARE" << llendl; + } + else if (hole_type > LL_PCODE_HOLE_MAX) + { + // Bad hole. Make it the same. + profile = profile_type; + result = false; + llwarns << "LLVolumeParams::setType changing bad hole type (" << hole_type + << ") to be LL_PCODE_HOLE_SAME" << llendl; + } + + if (path_type < LL_PCODE_PATH_MIN || + path_type > LL_PCODE_PATH_MAX) + { + // Bad path. Make it linear. + result = false; + llwarns << "LLVolumeParams::setType changing bad path (" << path + << ") to be LL_PCODE_PATH_LINE" << llendl; + path = LL_PCODE_PATH_LINE; + } + + mProfileParams.setCurveType(profile); + mPathParams.setCurveType(path); + return result; +} + +// static +bool LLVolumeParams::validate(U8 prof_curve, F32 prof_begin, F32 prof_end, F32 hollow, + U8 path_curve, F32 path_begin, F32 path_end, + F32 scx, F32 scy, F32 shx, F32 shy, + F32 twistend, F32 twistbegin, F32 radiusoffset, + F32 tx, F32 ty, F32 revolutions, F32 skew) +{ + LLVolumeParams test_params; + if (!test_params.setType (prof_curve, path_curve)) + { + return false; } if (!test_params.setBeginAndEndS (prof_begin, prof_end)) { @@ -2692,1768 +3480,2536 @@ bool LLVolumeParams::validate(U8 prof_curve, F32 prof_begin, F32 prof_end, F32 h return true; } -S32 *LLVolume::getTriangleIndices(U32 &num_indices) const -{ - LLMemType m1(LLMemType::MTYPE_VOLUME); - - S32 expected_num_triangle_indices = getNumTriangleIndices(); - if (expected_num_triangle_indices > MAX_VOLUME_TRIANGLE_INDICES) - { - // we don't allow LLVolumes with this many vertices - llwarns << "Couldn't allocate triangle indices" << llendl; - num_indices = 0; - return NULL; - } +S32 *LLVolume::getTriangleIndices(U32 &num_indices) const +{ + LLMemType m1(LLMemType::MTYPE_VOLUME); + + S32 expected_num_triangle_indices = getNumTriangleIndices(); + if (expected_num_triangle_indices > MAX_VOLUME_TRIANGLE_INDICES) + { + // we don't allow LLVolumes with this many vertices + llwarns << "Couldn't allocate triangle indices" << llendl; + num_indices = 0; + return NULL; + } + + S32* index = new S32[expected_num_triangle_indices]; + S32 count = 0; + + // Let's do this totally diffently, as we don't care about faces... + // Counter-clockwise triangles are forward facing... + + BOOL open = getProfile().isOpen(); + BOOL hollow = (mParams.getProfileParams().getHollow() > 0); + BOOL path_open = getPath().isOpen(); + S32 size_s, size_s_out, size_t; + S32 s, t, i; + size_s = getProfile().getTotal(); + size_s_out = getProfile().getTotalOut(); + size_t = getPath().mPath.size(); + + // NOTE -- if the construction of the triangles below ever changes + // then getNumTriangleIndices() method may also have to be updated. + + if (open) /* Flawfinder: ignore */ + { + if (hollow) + { + // Open hollow -- much like the closed solid, except we + // we need to stitch up the gap between s=0 and s=size_s-1 + + for (t = 0; t < size_t - 1; t++) + { + // The outer face, first cut, and inner face + for (s = 0; s < size_s - 1; s++) + { + i = s + t*size_s; + index[count++] = i; // x,y + index[count++] = i + 1; // x+1,y + index[count++] = i + size_s; // x,y+1 + + index[count++] = i + size_s; // x,y+1 + index[count++] = i + 1; // x+1,y + index[count++] = i + size_s + 1; // x+1,y+1 + } + + // The other cut face + index[count++] = s + t*size_s; // x,y + index[count++] = 0 + t*size_s; // x+1,y + index[count++] = s + (t+1)*size_s; // x,y+1 + + index[count++] = s + (t+1)*size_s; // x,y+1 + index[count++] = 0 + t*size_s; // x+1,y + index[count++] = 0 + (t+1)*size_s; // x+1,y+1 + } + + // Do the top and bottom caps, if necessary + if (path_open) + { + // Top cap + S32 pt1 = 0; + S32 pt2 = size_s-1; + S32 i = (size_t - 1)*size_s; + + while (pt2 - pt1 > 1) + { + // Use the profile points instead of the mesh, since you want + // the un-transformed profile distances. + LLVector3 p1 = getProfile().mProfile[pt1]; + LLVector3 p2 = getProfile().mProfile[pt2]; + LLVector3 pa = getProfile().mProfile[pt1+1]; + LLVector3 pb = getProfile().mProfile[pt2-1]; + + p1.mV[VZ] = 0.f; + p2.mV[VZ] = 0.f; + pa.mV[VZ] = 0.f; + pb.mV[VZ] = 0.f; + + // Use area of triangle to determine backfacing + F32 area_1a2, area_1ba, area_21b, area_2ab; + area_1a2 = (p1.mV[0]*pa.mV[1] - pa.mV[0]*p1.mV[1]) + + (pa.mV[0]*p2.mV[1] - p2.mV[0]*pa.mV[1]) + + (p2.mV[0]*p1.mV[1] - p1.mV[0]*p2.mV[1]); - S32* index = new S32[expected_num_triangle_indices]; - S32 count = 0; + area_1ba = (p1.mV[0]*pb.mV[1] - pb.mV[0]*p1.mV[1]) + + (pb.mV[0]*pa.mV[1] - pa.mV[0]*pb.mV[1]) + + (pa.mV[0]*p1.mV[1] - p1.mV[0]*pa.mV[1]); - // Let's do this totally diffently, as we don't care about faces... - // Counter-clockwise triangles are forward facing... + area_21b = (p2.mV[0]*p1.mV[1] - p1.mV[0]*p2.mV[1]) + + (p1.mV[0]*pb.mV[1] - pb.mV[0]*p1.mV[1]) + + (pb.mV[0]*p2.mV[1] - p2.mV[0]*pb.mV[1]); - BOOL open = getProfile().isOpen(); - BOOL hollow = (mParams.getProfileParams().getHollow() > 0); - BOOL path_open = getPath().isOpen(); - S32 size_s, size_s_out, size_t; - S32 s, t, i; - size_s = getProfile().getTotal(); - size_s_out = getProfile().getTotalOut(); - size_t = getPath().mPath.size(); + area_2ab = (p2.mV[0]*pa.mV[1] - pa.mV[0]*p2.mV[1]) + + (pa.mV[0]*pb.mV[1] - pb.mV[0]*pa.mV[1]) + + (pb.mV[0]*p2.mV[1] - p2.mV[0]*pb.mV[1]); - // NOTE -- if the construction of the triangles below ever changes - // then getNumTriangleIndices() method may also have to be updated. + BOOL use_tri1a2 = TRUE; + BOOL tri_1a2 = TRUE; + BOOL tri_21b = TRUE; - if (open) /* Flawfinder: ignore */ - { - if (hollow) + if (area_1a2 < 0) + { + tri_1a2 = FALSE; + } + if (area_2ab < 0) + { + // Can't use, because it contains point b + tri_1a2 = FALSE; + } + if (area_21b < 0) + { + tri_21b = FALSE; + } + if (area_1ba < 0) + { + // Can't use, because it contains point b + tri_21b = FALSE; + } + + if (!tri_1a2) + { + use_tri1a2 = FALSE; + } + else if (!tri_21b) + { + use_tri1a2 = TRUE; + } + else + { + LLVector3 d1 = p1 - pa; + LLVector3 d2 = p2 - pb; + + if (d1.magVecSquared() < d2.magVecSquared()) + { + use_tri1a2 = TRUE; + } + else + { + use_tri1a2 = FALSE; + } + } + + if (use_tri1a2) + { + index[count++] = pt1 + i; + index[count++] = pt1 + 1 + i; + index[count++] = pt2 + i; + pt1++; + } + else + { + index[count++] = pt1 + i; + index[count++] = pt2 - 1 + i; + index[count++] = pt2 + i; + pt2--; + } + } + + // Bottom cap + pt1 = 0; + pt2 = size_s-1; + while (pt2 - pt1 > 1) + { + // Use the profile points instead of the mesh, since you want + // the un-transformed profile distances. + LLVector3 p1 = getProfile().mProfile[pt1]; + LLVector3 p2 = getProfile().mProfile[pt2]; + LLVector3 pa = getProfile().mProfile[pt1+1]; + LLVector3 pb = getProfile().mProfile[pt2-1]; + + p1.mV[VZ] = 0.f; + p2.mV[VZ] = 0.f; + pa.mV[VZ] = 0.f; + pb.mV[VZ] = 0.f; + + // Use area of triangle to determine backfacing + F32 area_1a2, area_1ba, area_21b, area_2ab; + area_1a2 = (p1.mV[0]*pa.mV[1] - pa.mV[0]*p1.mV[1]) + + (pa.mV[0]*p2.mV[1] - p2.mV[0]*pa.mV[1]) + + (p2.mV[0]*p1.mV[1] - p1.mV[0]*p2.mV[1]); + + area_1ba = (p1.mV[0]*pb.mV[1] - pb.mV[0]*p1.mV[1]) + + (pb.mV[0]*pa.mV[1] - pa.mV[0]*pb.mV[1]) + + (pa.mV[0]*p1.mV[1] - p1.mV[0]*pa.mV[1]); + + area_21b = (p2.mV[0]*p1.mV[1] - p1.mV[0]*p2.mV[1]) + + (p1.mV[0]*pb.mV[1] - pb.mV[0]*p1.mV[1]) + + (pb.mV[0]*p2.mV[1] - p2.mV[0]*pb.mV[1]); + + area_2ab = (p2.mV[0]*pa.mV[1] - pa.mV[0]*p2.mV[1]) + + (pa.mV[0]*pb.mV[1] - pb.mV[0]*pa.mV[1]) + + (pb.mV[0]*p2.mV[1] - p2.mV[0]*pb.mV[1]); + + BOOL use_tri1a2 = TRUE; + BOOL tri_1a2 = TRUE; + BOOL tri_21b = TRUE; + + if (area_1a2 < 0) + { + tri_1a2 = FALSE; + } + if (area_2ab < 0) + { + // Can't use, because it contains point b + tri_1a2 = FALSE; + } + if (area_21b < 0) + { + tri_21b = FALSE; + } + if (area_1ba < 0) + { + // Can't use, because it contains point b + tri_21b = FALSE; + } + + if (!tri_1a2) + { + use_tri1a2 = FALSE; + } + else if (!tri_21b) + { + use_tri1a2 = TRUE; + } + else + { + LLVector3 d1 = p1 - pa; + LLVector3 d2 = p2 - pb; + + if (d1.magVecSquared() < d2.magVecSquared()) + { + use_tri1a2 = TRUE; + } + else + { + use_tri1a2 = FALSE; + } + } + + if (use_tri1a2) + { + index[count++] = pt1; + index[count++] = pt2; + index[count++] = pt1 + 1; + pt1++; + } + else + { + index[count++] = pt1; + index[count++] = pt2; + index[count++] = pt2 - 1; + pt2--; + } + } + } + } + else { - // Open hollow -- much like the closed solid, except we - // we need to stitch up the gap between s=0 and s=size_s-1 + // Open solid for (t = 0; t < size_t - 1; t++) { - // The outer face, first cut, and inner face + // Outer face + 1 cut face for (s = 0; s < size_s - 1; s++) { i = s + t*size_s; + index[count++] = i; // x,y index[count++] = i + 1; // x+1,y index[count++] = i + size_s; // x,y+1 - + index[count++] = i + size_s; // x,y+1 index[count++] = i + 1; // x+1,y index[count++] = i + size_s + 1; // x+1,y+1 } - // The other cut face - index[count++] = s + t*size_s; // x,y - index[count++] = 0 + t*size_s; // x+1,y - index[count++] = s + (t+1)*size_s; // x,y+1 - - index[count++] = s + (t+1)*size_s; // x,y+1 - index[count++] = 0 + t*size_s; // x+1,y - index[count++] = 0 + (t+1)*size_s; // x+1,y+1 + // The other cut face + index[count++] = (size_s - 1) + (t*size_s); // x,y + index[count++] = 0 + t*size_s; // x+1,y + index[count++] = (size_s - 1) + (t+1)*size_s; // x,y+1 + + index[count++] = (size_s - 1) + (t+1)*size_s; // x,y+1 + index[count++] = 0 + (t*size_s); // x+1,y + index[count++] = 0 + (t+1)*size_s; // x+1,y+1 + } + + // Do the top and bottom caps, if necessary + if (path_open) + { + for (s = 0; s < size_s - 2; s++) + { + index[count++] = s+1; + index[count++] = s; + index[count++] = size_s - 1; + } + + // We've got a top cap + S32 offset = (size_t - 1)*size_s; + for (s = 0; s < size_s - 2; s++) + { + // Inverted ordering from bottom cap. + index[count++] = offset + size_s - 1; + index[count++] = offset + s; + index[count++] = offset + s + 1; + } + } + } + } + else if (hollow) + { + // Closed hollow + // Outer face + + for (t = 0; t < size_t - 1; t++) + { + for (s = 0; s < size_s_out - 1; s++) + { + i = s + t*size_s; + + index[count++] = i; // x,y + index[count++] = i + 1; // x+1,y + index[count++] = i + size_s; // x,y+1 + + index[count++] = i + size_s; // x,y+1 + index[count++] = i + 1; // x+1,y + index[count++] = i + 1 + size_s; // x+1,y+1 } + } - // Do the top and bottom caps, if necessary - if (path_open) + // Inner face + // Invert facing from outer face + for (t = 0; t < size_t - 1; t++) + { + for (s = size_s_out; s < size_s - 1; s++) { - // Top cap - S32 pt1 = 0; - S32 pt2 = size_s-1; - S32 i = (size_t - 1)*size_s; + i = s + t*size_s; - while (pt2 - pt1 > 1) - { - // Use the profile points instead of the mesh, since you want - // the un-transformed profile distances. - LLVector3 p1 = getProfile().mProfile[pt1]; - LLVector3 p2 = getProfile().mProfile[pt2]; - LLVector3 pa = getProfile().mProfile[pt1+1]; - LLVector3 pb = getProfile().mProfile[pt2-1]; + index[count++] = i; // x,y + index[count++] = i + 1; // x+1,y + index[count++] = i + size_s; // x,y+1 - p1.mV[VZ] = 0.f; - p2.mV[VZ] = 0.f; - pa.mV[VZ] = 0.f; - pb.mV[VZ] = 0.f; + index[count++] = i + size_s; // x,y+1 + index[count++] = i + 1; // x+1,y + index[count++] = i + 1 + size_s; // x+1,y+1 + } + } - // Use area of triangle to determine backfacing - F32 area_1a2, area_1ba, area_21b, area_2ab; - area_1a2 = (p1.mV[0]*pa.mV[1] - pa.mV[0]*p1.mV[1]) + - (pa.mV[0]*p2.mV[1] - p2.mV[0]*pa.mV[1]) + - (p2.mV[0]*p1.mV[1] - p1.mV[0]*p2.mV[1]); + // Do the top and bottom caps, if necessary + if (path_open) + { + // Top cap + S32 pt1 = 0; + S32 pt2 = size_s-1; + S32 i = (size_t - 1)*size_s; - area_1ba = (p1.mV[0]*pb.mV[1] - pb.mV[0]*p1.mV[1]) + - (pb.mV[0]*pa.mV[1] - pa.mV[0]*pb.mV[1]) + - (pa.mV[0]*p1.mV[1] - p1.mV[0]*pa.mV[1]); + while (pt2 - pt1 > 1) + { + // Use the profile points instead of the mesh, since you want + // the un-transformed profile distances. + LLVector3 p1 = getProfile().mProfile[pt1]; + LLVector3 p2 = getProfile().mProfile[pt2]; + LLVector3 pa = getProfile().mProfile[pt1+1]; + LLVector3 pb = getProfile().mProfile[pt2-1]; - area_21b = (p2.mV[0]*p1.mV[1] - p1.mV[0]*p2.mV[1]) + - (p1.mV[0]*pb.mV[1] - pb.mV[0]*p1.mV[1]) + - (pb.mV[0]*p2.mV[1] - p2.mV[0]*pb.mV[1]); + p1.mV[VZ] = 0.f; + p2.mV[VZ] = 0.f; + pa.mV[VZ] = 0.f; + pb.mV[VZ] = 0.f; - area_2ab = (p2.mV[0]*pa.mV[1] - pa.mV[0]*p2.mV[1]) + - (pa.mV[0]*pb.mV[1] - pb.mV[0]*pa.mV[1]) + - (pb.mV[0]*p2.mV[1] - p2.mV[0]*pb.mV[1]); + // Use area of triangle to determine backfacing + F32 area_1a2, area_1ba, area_21b, area_2ab; + area_1a2 = (p1.mV[0]*pa.mV[1] - pa.mV[0]*p1.mV[1]) + + (pa.mV[0]*p2.mV[1] - p2.mV[0]*pa.mV[1]) + + (p2.mV[0]*p1.mV[1] - p1.mV[0]*p2.mV[1]); - BOOL use_tri1a2 = TRUE; - BOOL tri_1a2 = TRUE; - BOOL tri_21b = TRUE; + area_1ba = (p1.mV[0]*pb.mV[1] - pb.mV[0]*p1.mV[1]) + + (pb.mV[0]*pa.mV[1] - pa.mV[0]*pb.mV[1]) + + (pa.mV[0]*p1.mV[1] - p1.mV[0]*pa.mV[1]); - if (area_1a2 < 0) - { - tri_1a2 = FALSE; - } - if (area_2ab < 0) - { - // Can't use, because it contains point b - tri_1a2 = FALSE; - } - if (area_21b < 0) - { - tri_21b = FALSE; - } - if (area_1ba < 0) - { - // Can't use, because it contains point b - tri_21b = FALSE; - } + area_21b = (p2.mV[0]*p1.mV[1] - p1.mV[0]*p2.mV[1]) + + (p1.mV[0]*pb.mV[1] - pb.mV[0]*p1.mV[1]) + + (pb.mV[0]*p2.mV[1] - p2.mV[0]*pb.mV[1]); - if (!tri_1a2) - { - use_tri1a2 = FALSE; - } - else if (!tri_21b) - { - use_tri1a2 = TRUE; - } - else - { - LLVector3 d1 = p1 - pa; - LLVector3 d2 = p2 - pb; + area_2ab = (p2.mV[0]*pa.mV[1] - pa.mV[0]*p2.mV[1]) + + (pa.mV[0]*pb.mV[1] - pb.mV[0]*pa.mV[1]) + + (pb.mV[0]*p2.mV[1] - p2.mV[0]*pb.mV[1]); - if (d1.magVecSquared() < d2.magVecSquared()) - { - use_tri1a2 = TRUE; - } - else - { - use_tri1a2 = FALSE; - } - } + BOOL use_tri1a2 = TRUE; + BOOL tri_1a2 = TRUE; + BOOL tri_21b = TRUE; - if (use_tri1a2) + if (area_1a2 < 0) + { + tri_1a2 = FALSE; + } + if (area_2ab < 0) + { + // Can't use, because it contains point b + tri_1a2 = FALSE; + } + if (area_21b < 0) + { + tri_21b = FALSE; + } + if (area_1ba < 0) + { + // Can't use, because it contains point b + tri_21b = FALSE; + } + + if (!tri_1a2) + { + use_tri1a2 = FALSE; + } + else if (!tri_21b) + { + use_tri1a2 = TRUE; + } + else + { + LLVector3 d1 = p1 - pa; + LLVector3 d2 = p2 - pb; + + if (d1.magVecSquared() < d2.magVecSquared()) { - index[count++] = pt1 + i; - index[count++] = pt1 + 1 + i; - index[count++] = pt2 + i; - pt1++; + use_tri1a2 = TRUE; } else { - index[count++] = pt1 + i; - index[count++] = pt2 - 1 + i; - index[count++] = pt2 + i; - pt2--; + use_tri1a2 = FALSE; } } - // Bottom cap - pt1 = 0; - pt2 = size_s-1; - while (pt2 - pt1 > 1) + if (use_tri1a2) { - // Use the profile points instead of the mesh, since you want - // the un-transformed profile distances. - LLVector3 p1 = getProfile().mProfile[pt1]; - LLVector3 p2 = getProfile().mProfile[pt2]; - LLVector3 pa = getProfile().mProfile[pt1+1]; - LLVector3 pb = getProfile().mProfile[pt2-1]; + index[count++] = pt1 + i; + index[count++] = pt1 + 1 + i; + index[count++] = pt2 + i; + pt1++; + } + else + { + index[count++] = pt1 + i; + index[count++] = pt2 - 1 + i; + index[count++] = pt2 + i; + pt2--; + } + } - p1.mV[VZ] = 0.f; - p2.mV[VZ] = 0.f; - pa.mV[VZ] = 0.f; - pb.mV[VZ] = 0.f; + // Bottom cap + pt1 = 0; + pt2 = size_s-1; + while (pt2 - pt1 > 1) + { + // Use the profile points instead of the mesh, since you want + // the un-transformed profile distances. + LLVector3 p1 = getProfile().mProfile[pt1]; + LLVector3 p2 = getProfile().mProfile[pt2]; + LLVector3 pa = getProfile().mProfile[pt1+1]; + LLVector3 pb = getProfile().mProfile[pt2-1]; - // Use area of triangle to determine backfacing - F32 area_1a2, area_1ba, area_21b, area_2ab; - area_1a2 = (p1.mV[0]*pa.mV[1] - pa.mV[0]*p1.mV[1]) + - (pa.mV[0]*p2.mV[1] - p2.mV[0]*pa.mV[1]) + - (p2.mV[0]*p1.mV[1] - p1.mV[0]*p2.mV[1]); + p1.mV[VZ] = 0.f; + p2.mV[VZ] = 0.f; + pa.mV[VZ] = 0.f; + pb.mV[VZ] = 0.f; - area_1ba = (p1.mV[0]*pb.mV[1] - pb.mV[0]*p1.mV[1]) + - (pb.mV[0]*pa.mV[1] - pa.mV[0]*pb.mV[1]) + - (pa.mV[0]*p1.mV[1] - p1.mV[0]*pa.mV[1]); + // Use area of triangle to determine backfacing + F32 area_1a2, area_1ba, area_21b, area_2ab; + area_1a2 = (p1.mV[0]*pa.mV[1] - pa.mV[0]*p1.mV[1]) + + (pa.mV[0]*p2.mV[1] - p2.mV[0]*pa.mV[1]) + + (p2.mV[0]*p1.mV[1] - p1.mV[0]*p2.mV[1]); - area_21b = (p2.mV[0]*p1.mV[1] - p1.mV[0]*p2.mV[1]) + - (p1.mV[0]*pb.mV[1] - pb.mV[0]*p1.mV[1]) + - (pb.mV[0]*p2.mV[1] - p2.mV[0]*pb.mV[1]); + area_1ba = (p1.mV[0]*pb.mV[1] - pb.mV[0]*p1.mV[1]) + + (pb.mV[0]*pa.mV[1] - pa.mV[0]*pb.mV[1]) + + (pa.mV[0]*p1.mV[1] - p1.mV[0]*pa.mV[1]); - area_2ab = (p2.mV[0]*pa.mV[1] - pa.mV[0]*p2.mV[1]) + - (pa.mV[0]*pb.mV[1] - pb.mV[0]*pa.mV[1]) + - (pb.mV[0]*p2.mV[1] - p2.mV[0]*pb.mV[1]); + area_21b = (p2.mV[0]*p1.mV[1] - p1.mV[0]*p2.mV[1]) + + (p1.mV[0]*pb.mV[1] - pb.mV[0]*p1.mV[1]) + + (pb.mV[0]*p2.mV[1] - p2.mV[0]*pb.mV[1]); + + area_2ab = (p2.mV[0]*pa.mV[1] - pa.mV[0]*p2.mV[1]) + + (pa.mV[0]*pb.mV[1] - pb.mV[0]*pa.mV[1]) + + (pb.mV[0]*p2.mV[1] - p2.mV[0]*pb.mV[1]); - BOOL use_tri1a2 = TRUE; - BOOL tri_1a2 = TRUE; - BOOL tri_21b = TRUE; + BOOL use_tri1a2 = TRUE; + BOOL tri_1a2 = TRUE; + BOOL tri_21b = TRUE; - if (area_1a2 < 0) - { - tri_1a2 = FALSE; - } - if (area_2ab < 0) - { - // Can't use, because it contains point b - tri_1a2 = FALSE; - } - if (area_21b < 0) - { - tri_21b = FALSE; - } - if (area_1ba < 0) - { - // Can't use, because it contains point b - tri_21b = FALSE; - } + if (area_1a2 < 0) + { + tri_1a2 = FALSE; + } + if (area_2ab < 0) + { + // Can't use, because it contains point b + tri_1a2 = FALSE; + } + if (area_21b < 0) + { + tri_21b = FALSE; + } + if (area_1ba < 0) + { + // Can't use, because it contains point b + tri_21b = FALSE; + } - if (!tri_1a2) - { - use_tri1a2 = FALSE; - } - else if (!tri_21b) + if (!tri_1a2) + { + use_tri1a2 = FALSE; + } + else if (!tri_21b) + { + use_tri1a2 = TRUE; + } + else + { + LLVector3 d1 = p1 - pa; + LLVector3 d2 = p2 - pb; + + if (d1.magVecSquared() < d2.magVecSquared()) { use_tri1a2 = TRUE; } else { - LLVector3 d1 = p1 - pa; - LLVector3 d2 = p2 - pb; - - if (d1.magVecSquared() < d2.magVecSquared()) - { - use_tri1a2 = TRUE; - } - else - { - use_tri1a2 = FALSE; - } + use_tri1a2 = FALSE; } + } - if (use_tri1a2) - { - index[count++] = pt1; - index[count++] = pt2; - index[count++] = pt1 + 1; - pt1++; - } - else - { - index[count++] = pt1; - index[count++] = pt2; - index[count++] = pt2 - 1; - pt2--; - } + if (use_tri1a2) + { + index[count++] = pt1; + index[count++] = pt2; + index[count++] = pt1 + 1; + pt1++; + } + else + { + index[count++] = pt1; + index[count++] = pt2; + index[count++] = pt2 - 1; + pt2--; } } + } + } + else + { + // Closed solid. Easy case. + for (t = 0; t < size_t - 1; t++) + { + for (s = 0; s < size_s - 1; s++) + { + // Should wrap properly, but for now... + i = s + t*size_s; + + index[count++] = i; // x,y + index[count++] = i + 1; // x+1,y + index[count++] = i + size_s; // x,y+1 + + index[count++] = i + size_s; // x,y+1 + index[count++] = i + 1; // x+1,y + index[count++] = i + size_s + 1; // x+1,y+1 + } } - else + + // Do the top and bottom caps, if necessary + if (path_open) { - // Open solid + // bottom cap + for (s = 1; s < size_s - 2; s++) + { + index[count++] = s+1; + index[count++] = s; + index[count++] = 0; + } - for (t = 0; t < size_t - 1; t++) + // top cap + S32 offset = (size_t - 1)*size_s; + for (s = 1; s < size_s - 2; s++) { - // Outer face + 1 cut face - for (s = 0; s < size_s - 1; s++) - { - i = s + t*size_s; + // Inverted ordering from bottom cap. + index[count++] = offset; + index[count++] = offset + s; + index[count++] = offset + s + 1; + } + } + } - index[count++] = i; // x,y - index[count++] = i + 1; // x+1,y - index[count++] = i + size_s; // x,y+1 +#ifdef LL_DEBUG + // assert that we computed the correct number of indices + if (count != expected_num_triangle_indices ) + { + llerrs << "bad index count prediciton:" + << " expected=" << expected_num_triangle_indices + << " actual=" << count << llendl; + } +#endif - index[count++] = i + size_s; // x,y+1 - index[count++] = i + 1; // x+1,y - index[count++] = i + size_s + 1; // x+1,y+1 - } +#if 0 + // verify that each index does not point beyond the size of the mesh + S32 num_vertices = mMesh.size(); + for (i = 0; i < count; i+=3) + { + llinfos << index[i] << ":" << index[i+1] << ":" << index[i+2] << llendl; + llassert(index[i] < num_vertices); + llassert(index[i+1] < num_vertices); + llassert(index[i+2] < num_vertices); + } +#endif - // The other cut face - index[count++] = (size_s - 1) + (t*size_s); // x,y - index[count++] = 0 + t*size_s; // x+1,y - index[count++] = (size_s - 1) + (t+1)*size_s; // x,y+1 + num_indices = count; + return index; +} - index[count++] = (size_s - 1) + (t+1)*size_s; // x,y+1 - index[count++] = 0 + (t*size_s); // x+1,y - index[count++] = 0 + (t+1)*size_s; // x+1,y+1 - } +S32 LLVolume::getNumTriangleIndices() const +{ + BOOL profile_open = getProfile().isOpen(); + BOOL hollow = (mParams.getProfileParams().getHollow() > 0); + BOOL path_open = getPath().isOpen(); - // Do the top and bottom caps, if necessary - if (path_open) - { - for (s = 0; s < size_s - 2; s++) - { - index[count++] = s+1; - index[count++] = s; - index[count++] = size_s - 1; - } + S32 size_s, size_s_out, size_t; + size_s = getProfile().getTotal(); + size_s_out = getProfile().getTotalOut(); + size_t = getPath().mPath.size(); - // We've got a top cap - S32 offset = (size_t - 1)*size_s; - for (s = 0; s < size_s - 2; s++) - { - // Inverted ordering from bottom cap. - index[count++] = offset + size_s - 1; - index[count++] = offset + s; - index[count++] = offset + s + 1; - } - } + S32 count = 0; + if (profile_open) /* Flawfinder: ignore */ + { + if (hollow) + { + // Open hollow -- much like the closed solid, except we + // we need to stitch up the gap between s=0 and s=size_s-1 + count = (size_t - 1) * (((size_s -1) * 6) + 6); + } + else + { + count = (size_t - 1) * (((size_s -1) * 6) + 6); } } else if (hollow) { // Closed hollow // Outer face - - for (t = 0; t < size_t - 1; t++) + count = (size_t - 1) * (size_s_out - 1) * 6; + + // Inner face + count += (size_t - 1) * ((size_s - 1) - size_s_out) * 6; + } + else + { + // Closed solid. Easy case. + count = (size_t - 1) * (size_s - 1) * 6; + } + + if (path_open) + { + S32 cap_triangle_count = size_s - 3; + if ( profile_open + || hollow ) { - for (s = 0; s < size_s_out - 1; s++) - { - i = s + t*size_s; + cap_triangle_count = size_s - 2; + } + if ( cap_triangle_count > 0 ) + { + // top and bottom caps + count += cap_triangle_count * 2 * 3; + } + } + return count; +} - index[count++] = i; // x,y - index[count++] = i + 1; // x+1,y - index[count++] = i + size_s; // x,y+1 - index[count++] = i + size_s; // x,y+1 - index[count++] = i + 1; // x+1,y - index[count++] = i + 1 + size_s; // x+1,y+1 - } - } +S32 LLVolume::getNumTriangles() const +{ + U32 triangle_count = 0; - // Inner face - // Invert facing from outer face - for (t = 0; t < size_t - 1; t++) - { - for (s = size_s_out; s < size_s - 1; s++) - { - i = s + t*size_s; + for (S32 i = 0; i < getNumVolumeFaces(); ++i) + { + triangle_count += getVolumeFace(i).mNumIndices/3; + } - index[count++] = i; // x,y - index[count++] = i + 1; // x+1,y - index[count++] = i + size_s; // x,y+1 + return triangle_count; +} - index[count++] = i + size_s; // x,y+1 - index[count++] = i + 1; // x+1,y - index[count++] = i + 1 + size_s; // x+1,y+1 - } - } - // Do the top and bottom caps, if necessary - if (path_open) - { - // Top cap - S32 pt1 = 0; - S32 pt2 = size_s-1; - S32 i = (size_t - 1)*size_s; +//----------------------------------------------------------------------------- +// generateSilhouetteVertices() +//----------------------------------------------------------------------------- +void LLVolume::generateSilhouetteVertices(std::vector &vertices, + std::vector &normals, + std::vector &segments, + const LLVector3& obj_cam_vec_in, + const LLMatrix4& mat_in, + const LLMatrix3& norm_mat_in, + S32 face_mask) +{ + LLMemType m1(LLMemType::MTYPE_VOLUME); - while (pt2 - pt1 > 1) - { - // Use the profile points instead of the mesh, since you want - // the un-transformed profile distances. - LLVector3 p1 = getProfile().mProfile[pt1]; - LLVector3 p2 = getProfile().mProfile[pt2]; - LLVector3 pa = getProfile().mProfile[pt1+1]; - LLVector3 pb = getProfile().mProfile[pt2-1]; + LLMatrix4a mat; + mat.loadu(mat_in); - p1.mV[VZ] = 0.f; - p2.mV[VZ] = 0.f; - pa.mV[VZ] = 0.f; - pb.mV[VZ] = 0.f; + LLMatrix4a norm_mat; + norm_mat.loadu(norm_mat_in); + + LLVector4a obj_cam_vec; + obj_cam_vec.load3(obj_cam_vec_in.mV); - // Use area of triangle to determine backfacing - F32 area_1a2, area_1ba, area_21b, area_2ab; - area_1a2 = (p1.mV[0]*pa.mV[1] - pa.mV[0]*p1.mV[1]) + - (pa.mV[0]*p2.mV[1] - p2.mV[0]*pa.mV[1]) + - (p2.mV[0]*p1.mV[1] - p1.mV[0]*p2.mV[1]); + vertices.clear(); + normals.clear(); + segments.clear(); - area_1ba = (p1.mV[0]*pb.mV[1] - pb.mV[0]*p1.mV[1]) + - (pb.mV[0]*pa.mV[1] - pa.mV[0]*pb.mV[1]) + - (pa.mV[0]*p1.mV[1] - p1.mV[0]*pa.mV[1]); + if ((mParams.getSculptType() & LL_SCULPT_TYPE_MASK) == LL_SCULPT_TYPE_MESH) + { + return; + } + + S32 cur_index = 0; + //for each face + for (face_list_t::iterator iter = mVolumeFaces.begin(); + iter != mVolumeFaces.end(); ++iter) + { + LLVolumeFace& face = *iter; + + if (!(face_mask & (0x1 << cur_index++)) || + face.mNumIndices == 0 || face.mEdge.empty()) + { + continue; + } - area_21b = (p2.mV[0]*p1.mV[1] - p1.mV[0]*p2.mV[1]) + - (p1.mV[0]*pb.mV[1] - pb.mV[0]*p1.mV[1]) + - (pb.mV[0]*p2.mV[1] - p2.mV[0]*pb.mV[1]); + if (face.mTypeMask & (LLVolumeFace::CAP_MASK)) { + + } + else { - area_2ab = (p2.mV[0]*pa.mV[1] - pa.mV[0]*p2.mV[1]) + - (pa.mV[0]*pb.mV[1] - pb.mV[0]*pa.mV[1]) + - (pb.mV[0]*p2.mV[1] - p2.mV[0]*pb.mV[1]); + //============================================== + //DEBUG draw edge map instead of silhouette edge + //============================================== - BOOL use_tri1a2 = TRUE; - BOOL tri_1a2 = TRUE; - BOOL tri_21b = TRUE; +#if DEBUG_SILHOUETTE_EDGE_MAP - if (area_1a2 < 0) - { - tri_1a2 = FALSE; - } - if (area_2ab < 0) - { - // Can't use, because it contains point b - tri_1a2 = FALSE; - } - if (area_21b < 0) - { - tri_21b = FALSE; - } - if (area_1ba < 0) - { - // Can't use, because it contains point b - tri_21b = FALSE; - } + //for each triangle + U32 count = face.mNumIndices; + for (U32 j = 0; j < count/3; j++) { + //get vertices + S32 v1 = face.mIndices[j*3+0]; + S32 v2 = face.mIndices[j*3+1]; + S32 v3 = face.mIndices[j*3+2]; - if (!tri_1a2) - { - use_tri1a2 = FALSE; - } - else if (!tri_21b) - { - use_tri1a2 = TRUE; - } - else - { - LLVector3 d1 = p1 - pa; - LLVector3 d2 = p2 - pb; + //get current face center + LLVector3 cCenter = (face.mVertices[v1].getPosition() + + face.mVertices[v2].getPosition() + + face.mVertices[v3].getPosition()) / 3.0f; - if (d1.magVecSquared() < d2.magVecSquared()) - { - use_tri1a2 = TRUE; + //for each edge + for (S32 k = 0; k < 3; k++) { + S32 nIndex = face.mEdge[j*3+k]; + if (nIndex <= -1) { + continue; } - else - { - use_tri1a2 = FALSE; + + if (nIndex >= (S32) count/3) { + continue; } - } + //get neighbor vertices + v1 = face.mIndices[nIndex*3+0]; + v2 = face.mIndices[nIndex*3+1]; + v3 = face.mIndices[nIndex*3+2]; - if (use_tri1a2) - { - index[count++] = pt1 + i; - index[count++] = pt1 + 1 + i; - index[count++] = pt2 + i; - pt1++; - } - else - { - index[count++] = pt1 + i; - index[count++] = pt2 - 1 + i; - index[count++] = pt2 + i; - pt2--; + //get neighbor face center + LLVector3 nCenter = (face.mVertices[v1].getPosition() + + face.mVertices[v2].getPosition() + + face.mVertices[v3].getPosition()) / 3.0f; + + //draw line + vertices.push_back(cCenter); + vertices.push_back(nCenter); + normals.push_back(LLVector3(1,1,1)); + normals.push_back(LLVector3(1,1,1)); + segments.push_back(vertices.size()); } } + + continue; - // Bottom cap - pt1 = 0; - pt2 = size_s-1; - while (pt2 - pt1 > 1) - { - // Use the profile points instead of the mesh, since you want - // the un-transformed profile distances. - LLVector3 p1 = getProfile().mProfile[pt1]; - LLVector3 p2 = getProfile().mProfile[pt2]; - LLVector3 pa = getProfile().mProfile[pt1+1]; - LLVector3 pb = getProfile().mProfile[pt2-1]; - - p1.mV[VZ] = 0.f; - p2.mV[VZ] = 0.f; - pa.mV[VZ] = 0.f; - pb.mV[VZ] = 0.f; + //============================================== + //DEBUG + //============================================== - // Use area of triangle to determine backfacing - F32 area_1a2, area_1ba, area_21b, area_2ab; - area_1a2 = (p1.mV[0]*pa.mV[1] - pa.mV[0]*p1.mV[1]) + - (pa.mV[0]*p2.mV[1] - p2.mV[0]*pa.mV[1]) + - (p2.mV[0]*p1.mV[1] - p1.mV[0]*p2.mV[1]); + //============================================== + //DEBUG draw normals instead of silhouette edge + //============================================== +#elif DEBUG_SILHOUETTE_NORMALS - area_1ba = (p1.mV[0]*pb.mV[1] - pb.mV[0]*p1.mV[1]) + - (pb.mV[0]*pa.mV[1] - pa.mV[0]*pb.mV[1]) + - (pa.mV[0]*p1.mV[1] - p1.mV[0]*pa.mV[1]); + //for each vertex + for (U32 j = 0; j < face.mNumVertices; j++) { + vertices.push_back(face.mVertices[j].getPosition()); + vertices.push_back(face.mVertices[j].getPosition() + face.mVertices[j].getNormal()*0.1f); + normals.push_back(LLVector3(0,0,1)); + normals.push_back(LLVector3(0,0,1)); + segments.push_back(vertices.size()); +#if DEBUG_SILHOUETTE_BINORMALS + vertices.push_back(face.mVertices[j].getPosition()); + vertices.push_back(face.mVertices[j].getPosition() + face.mVertices[j].mBinormal*0.1f); + normals.push_back(LLVector3(0,0,1)); + normals.push_back(LLVector3(0,0,1)); + segments.push_back(vertices.size()); +#endif + } + + continue; +#else + //============================================== + //DEBUG + //============================================== - area_21b = (p2.mV[0]*p1.mV[1] - p1.mV[0]*p2.mV[1]) + - (p1.mV[0]*pb.mV[1] - pb.mV[0]*p1.mV[1]) + - (pb.mV[0]*p2.mV[1] - p2.mV[0]*pb.mV[1]); + static const U8 AWAY = 0x01, + TOWARDS = 0x02; - area_2ab = (p2.mV[0]*pa.mV[1] - pa.mV[0]*p2.mV[1]) + - (pa.mV[0]*pb.mV[1] - pb.mV[0]*pa.mV[1]) + - (pb.mV[0]*p2.mV[1] - p2.mV[0]*pb.mV[1]); + //for each triangle + std::vector fFacing; + vector_append(fFacing, face.mNumIndices/3); - BOOL use_tri1a2 = TRUE; - BOOL tri_1a2 = TRUE; - BOOL tri_21b = TRUE; + LLVector4a* v = (LLVector4a*) face.mPositions; + LLVector4a* n = (LLVector4a*) face.mNormals; - if (area_1a2 < 0) - { - tri_1a2 = FALSE; - } - if (area_2ab < 0) - { - // Can't use, because it contains point b - tri_1a2 = FALSE; - } - if (area_21b < 0) - { - tri_21b = FALSE; - } - if (area_1ba < 0) - { - // Can't use, because it contains point b - tri_21b = FALSE; - } + for (U32 j = 0; j < face.mNumIndices/3; j++) + { + //approximate normal + S32 v1 = face.mIndices[j*3+0]; + S32 v2 = face.mIndices[j*3+1]; + S32 v3 = face.mIndices[j*3+2]; - if (!tri_1a2) - { - use_tri1a2 = FALSE; - } - else if (!tri_21b) + LLVector4a c1,c2; + c1.setSub(v[v1], v[v2]); + c2.setSub(v[v2], v[v3]); + + LLVector4a norm; + + norm.setCross3(c1, c2); + + if (norm.dot3(norm) < 0.00000001f) { - use_tri1a2 = TRUE; + fFacing[j] = AWAY | TOWARDS; } - else + else { - LLVector3 d1 = p1 - pa; - LLVector3 d2 = p2 - pb; - - if (d1.magVecSquared() < d2.magVecSquared()) + //get view vector + LLVector4a view; + view.setSub(obj_cam_vec, v[v1]); + bool away = view.dot3(norm) > 0.0f; + if (away) { - use_tri1a2 = TRUE; + fFacing[j] = AWAY; } - else + else { - use_tri1a2 = FALSE; + fFacing[j] = TOWARDS; } } - - if (use_tri1a2) - { - index[count++] = pt1; - index[count++] = pt2; - index[count++] = pt1 + 1; - pt1++; - } - else - { - index[count++] = pt1; - index[count++] = pt2; - index[count++] = pt2 - 1; - pt2--; + } + + //for each triangle + for (U32 j = 0; j < face.mNumIndices/3; j++) + { + if (fFacing[j] == (AWAY | TOWARDS)) + { //this is a degenerate triangle + //take neighbor facing (degenerate faces get facing of one of their neighbors) + // *FIX IF NEEDED: this does not deal with neighboring degenerate faces + for (S32 k = 0; k < 3; k++) + { + S32 index = face.mEdge[j*3+k]; + if (index != -1) + { + fFacing[j] = fFacing[index]; + break; + } + } + continue; //skip degenerate face } + + //for each edge + for (S32 k = 0; k < 3; k++) { + S32 index = face.mEdge[j*3+k]; + if (index != -1 && fFacing[index] == (AWAY | TOWARDS)) { + //our neighbor is degenerate, make him face our direction + fFacing[face.mEdge[j*3+k]] = fFacing[j]; + continue; + } + + if (index == -1 || //edge has no neighbor, MUST be a silhouette edge + (fFacing[index] & fFacing[j]) == 0) { //we found a silhouette edge + + S32 v1 = face.mIndices[j*3+k]; + S32 v2 = face.mIndices[j*3+((k+1)%3)]; + + LLVector4a t; + mat.affineTransform(v[v1], t); + vertices.push_back(LLVector3(t[0], t[1], t[2])); + + norm_mat.rotate(n[v1], t); + + t.normalize3fast(); + normals.push_back(LLVector3(t[0], t[1], t[2])); + + mat.affineTransform(v[v2], t); + vertices.push_back(LLVector3(t[0], t[1], t[2])); + + norm_mat.rotate(n[v2], t); + t.normalize3fast(); + normals.push_back(LLVector3(t[0], t[1], t[2])); + + segments.push_back(vertices.size()); + } + } } - } +#endif + } + } +} + +S32 LLVolume::lineSegmentIntersect(const LLVector3& start, const LLVector3& end, + S32 face, + LLVector3* intersection,LLVector2* tex_coord, LLVector3* normal, LLVector3* bi_normal) +{ + LLVector4a starta, enda; + starta.load3(start.mV); + enda.load3(end.mV); + + return lineSegmentIntersect(starta, enda, face, intersection, tex_coord, normal, bi_normal); + +} + + +S32 LLVolume::lineSegmentIntersect(const LLVector4a& start, const LLVector4a& end, + S32 face, + LLVector3* intersection,LLVector2* tex_coord, LLVector3* normal, LLVector3* bi_normal) +{ + S32 hit_face = -1; + + S32 start_face; + S32 end_face; + + if (face == -1) // ALL_SIDES + { + start_face = 0; + end_face = getNumVolumeFaces() - 1; } else { - // Closed solid. Easy case. - for (t = 0; t < size_t - 1; t++) - { - for (s = 0; s < size_s - 1; s++) - { - // Should wrap properly, but for now... - i = s + t*size_s; + start_face = face; + end_face = face; + } - index[count++] = i; // x,y - index[count++] = i + 1; // x+1,y - index[count++] = i + size_s; // x,y+1 + LLVector4a dir; + dir.setSub(end, start); - index[count++] = i + size_s; // x,y+1 - index[count++] = i + 1; // x+1,y - index[count++] = i + size_s + 1; // x+1,y+1 - } - } + F32 closest_t = 2.f; // must be larger than 1 + + end_face = llmin(end_face, getNumVolumeFaces()-1); - // Do the top and bottom caps, if necessary - if (path_open) + for (S32 i = start_face; i <= end_face; i++) + { + LLVolumeFace &face = mVolumeFaces[i]; + + LLVector4a box_center; + box_center.setAdd(face.mExtents[0], face.mExtents[1]); + box_center.mul(0.5f); + + LLVector4a box_size; + box_size.setSub(face.mExtents[1], face.mExtents[0]); + + if (LLLineSegmentBoxIntersect(start, end, box_center, box_size)) { - // bottom cap - for (s = 1; s < size_s - 2; s++) + if (bi_normal != NULL) // if the caller wants binormals, we may need to generate them { - index[count++] = s+1; - index[count++] = s; - index[count++] = 0; + genBinormals(i); } - // top cap - S32 offset = (size_t - 1)*size_s; - for (s = 1; s < size_s - 2; s++) + if (!face.mOctree) { - // Inverted ordering from bottom cap. - index[count++] = offset; - index[count++] = offset + s; - index[count++] = offset + s + 1; + face.createOctree(); } - } - } + + //LLVector4a* p = (LLVector4a*) face.mPositions; -#ifdef LL_DEBUG - // assert that we computed the correct number of indices - if (count != expected_num_triangle_indices ) - { - llerrs << "bad index count prediciton:" - << " expected=" << expected_num_triangle_indices - << " actual=" << count << llendl; + LLOctreeTriangleRayIntersect intersect(start, dir, &face, &closest_t, intersection, tex_coord, normal, bi_normal); + intersect.traverse(face.mOctree); + if (intersect.mHitFace) + { + hit_face = i; + } + } } -#endif + + + return hit_face; +} -#if 0 - // verify that each index does not point beyond the size of the mesh - S32 num_vertices = mMesh.size(); - for (i = 0; i < count; i+=3) - { - llinfos << index[i] << ":" << index[i+1] << ":" << index[i+2] << llendl; - llassert(index[i] < num_vertices); - llassert(index[i+1] < num_vertices); - llassert(index[i+2] < num_vertices); - } -#endif +class LLVertexIndexPair +{ +public: + LLVertexIndexPair(const LLVector3 &vertex, const S32 index); - num_indices = count; - return index; + LLVector3 mVertex; + S32 mIndex; +}; + +LLVertexIndexPair::LLVertexIndexPair(const LLVector3 &vertex, const S32 index) +{ + mVertex = vertex; + mIndex = index; } -S32 LLVolume::getNumTriangleIndices() const +const F32 VERTEX_SLOP = 0.00001f; +const F32 VERTEX_SLOP_SQRD = VERTEX_SLOP * VERTEX_SLOP; + +struct lessVertex { - BOOL profile_open = getProfile().isOpen(); - BOOL hollow = (mParams.getProfileParams().getHollow() > 0); - BOOL path_open = getPath().isOpen(); + bool operator()(const LLVertexIndexPair *a, const LLVertexIndexPair *b) + { + const F32 slop = VERTEX_SLOP; - S32 size_s, size_s_out, size_t; - size_s = getProfile().getTotal(); - size_s_out = getProfile().getTotalOut(); - size_t = getPath().mPath.size(); + if (a->mVertex.mV[0] + slop < b->mVertex.mV[0]) + { + return TRUE; + } + else if (a->mVertex.mV[0] - slop > b->mVertex.mV[0]) + { + return FALSE; + } + + if (a->mVertex.mV[1] + slop < b->mVertex.mV[1]) + { + return TRUE; + } + else if (a->mVertex.mV[1] - slop > b->mVertex.mV[1]) + { + return FALSE; + } + + if (a->mVertex.mV[2] + slop < b->mVertex.mV[2]) + { + return TRUE; + } + else if (a->mVertex.mV[2] - slop > b->mVertex.mV[2]) + { + return FALSE; + } + + return FALSE; + } +}; - S32 count = 0; - if (profile_open) /* Flawfinder: ignore */ +struct lessTriangle +{ + bool operator()(const S32 *a, const S32 *b) { - if (hollow) + if (*a < *b) + { + return TRUE; + } + else if (*a > *b) + { + return FALSE; + } + + if (*(a+1) < *(b+1)) { - // Open hollow -- much like the closed solid, except we - // we need to stitch up the gap between s=0 and s=size_s-1 - count = (size_t - 1) * (((size_s -1) * 6) + 6); + return TRUE; } - else + else if (*(a+1) > *(b+1)) { - count = (size_t - 1) * (((size_s -1) * 6) + 6); + return FALSE; } - } - else if (hollow) - { - // Closed hollow - // Outer face - count = (size_t - 1) * (size_s_out - 1) * 6; - - // Inner face - count += (size_t - 1) * ((size_s - 1) - size_s_out) * 6; - } - else - { - // Closed solid. Easy case. - count = (size_t - 1) * (size_s - 1) * 6; - } - if (path_open) - { - S32 cap_triangle_count = size_s - 3; - if ( profile_open - || hollow ) + if (*(a+2) < *(b+2)) { - cap_triangle_count = size_s - 2; + return TRUE; } - if ( cap_triangle_count > 0 ) + else if (*(a+2) > *(b+2)) { - // top and bottom caps - count += cap_triangle_count * 2 * 3; + return FALSE; } + + return FALSE; } - return count; +}; + +BOOL equalTriangle(const S32 *a, const S32 *b) +{ + if ((*a == *b) && (*(a+1) == *(b+1)) && (*(a+2) == *(b+2))) + { + return TRUE; + } + return FALSE; } -//----------------------------------------------------------------------------- -// generateSilhouetteVertices() -//----------------------------------------------------------------------------- -void LLVolume::generateSilhouetteVertices(std::vector &vertices, - std::vector &normals, - std::vector &segments, - const LLVector3& obj_cam_vec, - const LLMatrix4& mat, - const LLMatrix3& norm_mat, - S32 face_mask) +BOOL LLVolume::cleanupTriangleData( const S32 num_input_vertices, + const std::vector& input_vertices, + const S32 num_input_triangles, + S32 *input_triangles, + S32 &num_output_vertices, + LLVector3 **output_vertices, + S32 &num_output_triangles, + S32 **output_triangles) { LLMemType m1(LLMemType::MTYPE_VOLUME); - vertices.clear(); - normals.clear(); - segments.clear(); - - S32 cur_index = 0; - //for each face - for (face_list_t::iterator iter = mVolumeFaces.begin(); - iter != mVolumeFaces.end(); ++iter) + /* Testing: avoid any cleanup + static BOOL skip_cleanup = TRUE; + if ( skip_cleanup ) { - const LLVolumeFace& face = *iter; - - if (!(face_mask & (0x1 << cur_index++))) + num_output_vertices = num_input_vertices; + num_output_triangles = num_input_triangles; + + *output_vertices = new LLVector3[num_input_vertices]; + for (S32 index = 0; index < num_input_vertices; index++) { - continue; - } - if (face.mTypeMask & (LLVolumeFace::CAP_MASK)) { - + (*output_vertices)[index] = input_vertices[index].mPos; } - else { - - //============================================== - //DEBUG draw edge map instead of silhouette edge - //============================================== - -#if DEBUG_SILHOUETTE_EDGE_MAP - - //for each triangle - U32 count = face.mIndices.size(); - for (U32 j = 0; j < count/3; j++) { - //get vertices - S32 v1 = face.mIndices[j*3+0]; - S32 v2 = face.mIndices[j*3+1]; - S32 v3 = face.mIndices[j*3+2]; - - //get current face center - LLVector3 cCenter = (face.mVertices[v1].mPosition + - face.mVertices[v2].mPosition + - face.mVertices[v3].mPosition) / 3.0f; - - //for each edge - for (S32 k = 0; k < 3; k++) { - S32 nIndex = face.mEdge[j*3+k]; - if (nIndex <= -1) { - continue; - } - - if (nIndex >= (S32) count/3) { - continue; - } - //get neighbor vertices - v1 = face.mIndices[nIndex*3+0]; - v2 = face.mIndices[nIndex*3+1]; - v3 = face.mIndices[nIndex*3+2]; - - //get neighbor face center - LLVector3 nCenter = (face.mVertices[v1].mPosition + - face.mVertices[v2].mPosition + - face.mVertices[v3].mPosition) / 3.0f; - //draw line - vertices.push_back(cCenter); - vertices.push_back(nCenter); - normals.push_back(LLVector3(1,1,1)); - normals.push_back(LLVector3(1,1,1)); - segments.push_back(vertices.size()); - } - } - - continue; + *output_triangles = new S32[num_input_triangles*3]; + memcpy(*output_triangles, input_triangles, 3*num_input_triangles*sizeof(S32)); // Flawfinder: ignore + return TRUE; + } + */ - //============================================== - //DEBUG - //============================================== + // Here's how we do this: + // Create a structure which contains the original vertex index and the + // LLVector3 data. + // "Sort" the data by the vectors + // Create an array the size of the old vertex list, with a mapping of + // old indices to new indices. + // Go through triangles, shift so the lowest index is first + // Sort triangles by first index + // Remove duplicate triangles + // Allocate and pack new triangle data. - //============================================== - //DEBUG draw normals instead of silhouette edge - //============================================== -#elif DEBUG_SILHOUETTE_NORMALS + //LLTimer cleanupTimer; + //llinfos << "In vertices: " << num_input_vertices << llendl; + //llinfos << "In triangles: " << num_input_triangles << llendl; - //for each vertex - for (U32 j = 0; j < face.mVertices.size(); j++) { - vertices.push_back(face.mVertices[j].mPosition); - vertices.push_back(face.mVertices[j].mPosition + face.mVertices[j].mNormal*0.1f); - normals.push_back(LLVector3(0,0,1)); - normals.push_back(LLVector3(0,0,1)); - segments.push_back(vertices.size()); -#if DEBUG_SILHOUETTE_BINORMALS - vertices.push_back(face.mVertices[j].mPosition); - vertices.push_back(face.mVertices[j].mPosition + face.mVertices[j].mBinormal*0.1f); - normals.push_back(LLVector3(0,0,1)); - normals.push_back(LLVector3(0,0,1)); - segments.push_back(vertices.size()); -#endif - } - - continue; -#else - //============================================== - //DEBUG - //============================================== + S32 i; + typedef std::multiset vertex_set_t; + vertex_set_t vertex_list; - static const U8 AWAY = 0x01, - TOWARDS = 0x02; + LLVertexIndexPair *pairp = NULL; + for (i = 0; i < num_input_vertices; i++) + { + LLVertexIndexPair *new_pairp = new LLVertexIndexPair(input_vertices[i].mPos, i); + vertex_list.insert(new_pairp); + } - //for each triangle - std::vector fFacing; - vector_append(fFacing, face.mIndices.size()/3); - for (U32 j = 0; j < face.mIndices.size()/3; j++) - { - //approximate normal - S32 v1 = face.mIndices[j*3+0]; - S32 v2 = face.mIndices[j*3+1]; - S32 v3 = face.mIndices[j*3+2]; + // Generate the vertex mapping and the list of vertices without + // duplicates. This will crash if there are no vertices. + llassert(num_input_vertices > 0); // check for no vertices! + S32 *vertex_mapping = new S32[num_input_vertices]; + LLVector3 *new_vertices = new LLVector3[num_input_vertices]; + LLVertexIndexPair *prev_pairp = NULL; - LLVector3 norm = (face.mVertices[v1].mPosition - face.mVertices[v2].mPosition) % - (face.mVertices[v2].mPosition - face.mVertices[v3].mPosition); - - if (norm.magVecSquared() < 0.00000001f) - { - fFacing[j] = AWAY | TOWARDS; - } - else - { - //get view vector - LLVector3 view = (obj_cam_vec-face.mVertices[v1].mPosition); - bool away = view * norm > 0.0f; - if (away) - { - fFacing[j] = AWAY; - } - else - { - fFacing[j] = TOWARDS; - } - } - } - - //for each triangle - for (U32 j = 0; j < face.mIndices.size()/3; j++) - { - if (fFacing[j] == (AWAY | TOWARDS)) - { //this is a degenerate triangle - //take neighbor facing (degenerate faces get facing of one of their neighbors) - // *FIX IF NEEDED: this does not deal with neighboring degenerate faces - for (S32 k = 0; k < 3; k++) - { - S32 index = face.mEdge[j*3+k]; - if (index != -1) - { - fFacing[j] = fFacing[index]; - break; - } - } - continue; //skip degenerate face - } + S32 new_num_vertices; - //for each edge - for (S32 k = 0; k < 3; k++) { - S32 index = face.mEdge[j*3+k]; - if (index != -1 && fFacing[index] == (AWAY | TOWARDS)) { - //our neighbor is degenerate, make him face our direction - fFacing[face.mEdge[j*3+k]] = fFacing[j]; - continue; - } + new_num_vertices = 0; + for (vertex_set_t::iterator iter = vertex_list.begin(), + end = vertex_list.end(); + iter != end; iter++) + { + pairp = *iter; + if (!prev_pairp || ((pairp->mVertex - prev_pairp->mVertex).magVecSquared() >= VERTEX_SLOP_SQRD)) + { + new_vertices[new_num_vertices] = pairp->mVertex; + //llinfos << "Added vertex " << new_num_vertices << " : " << pairp->mVertex << llendl; + new_num_vertices++; + // Update the previous + prev_pairp = pairp; + } + else + { + //llinfos << "Removed duplicate vertex " << pairp->mVertex << ", distance magVecSquared() is " << (pairp->mVertex - prev_pairp->mVertex).magVecSquared() << llendl; + } + vertex_mapping[pairp->mIndex] = new_num_vertices - 1; + } - if (index == -1 || //edge has no neighbor, MUST be a silhouette edge - (fFacing[index] & fFacing[j]) == 0) { //we found a silhouette edge + // Iterate through triangles and remove degenerates, re-ordering vertices + // along the way. + S32 *new_triangles = new S32[num_input_triangles * 3]; + S32 new_num_triangles = 0; - S32 v1 = face.mIndices[j*3+k]; - S32 v2 = face.mIndices[j*3+((k+1)%3)]; - - vertices.push_back(face.mVertices[v1].mPosition*mat); - LLVector3 norm1 = face.mVertices[v1].mNormal * norm_mat; - norm1.normVec(); - normals.push_back(norm1); + for (i = 0; i < num_input_triangles; i++) + { + S32 v1 = i*3; + S32 v2 = v1 + 1; + S32 v3 = v1 + 2; - vertices.push_back(face.mVertices[v2].mPosition*mat); - LLVector3 norm2 = face.mVertices[v2].mNormal * norm_mat; - norm2.normVec(); - normals.push_back(norm2); + //llinfos << "Checking triangle " << input_triangles[v1] << ":" << input_triangles[v2] << ":" << input_triangles[v3] << llendl; + input_triangles[v1] = vertex_mapping[input_triangles[v1]]; + input_triangles[v2] = vertex_mapping[input_triangles[v2]]; + input_triangles[v3] = vertex_mapping[input_triangles[v3]]; - segments.push_back(vertices.size()); - } - } + if ((input_triangles[v1] == input_triangles[v2]) + || (input_triangles[v1] == input_triangles[v3]) + || (input_triangles[v2] == input_triangles[v3])) + { + //llinfos << "Removing degenerate triangle " << input_triangles[v1] << ":" << input_triangles[v2] << ":" << input_triangles[v3] << llendl; + // Degenerate triangle, skip + continue; + } + + if (input_triangles[v1] < input_triangles[v2]) + { + if (input_triangles[v1] < input_triangles[v3]) + { + // (0 < 1) && (0 < 2) + new_triangles[new_num_triangles*3] = input_triangles[v1]; + new_triangles[new_num_triangles*3+1] = input_triangles[v2]; + new_triangles[new_num_triangles*3+2] = input_triangles[v3]; + } + else + { + // (0 < 1) && (2 < 0) + new_triangles[new_num_triangles*3] = input_triangles[v3]; + new_triangles[new_num_triangles*3+1] = input_triangles[v1]; + new_triangles[new_num_triangles*3+2] = input_triangles[v2]; } -#endif } + else if (input_triangles[v2] < input_triangles[v3]) + { + // (1 < 0) && (1 < 2) + new_triangles[new_num_triangles*3] = input_triangles[v2]; + new_triangles[new_num_triangles*3+1] = input_triangles[v3]; + new_triangles[new_num_triangles*3+2] = input_triangles[v1]; + } + else + { + // (1 < 0) && (2 < 1) + new_triangles[new_num_triangles*3] = input_triangles[v3]; + new_triangles[new_num_triangles*3+1] = input_triangles[v1]; + new_triangles[new_num_triangles*3+2] = input_triangles[v2]; + } + new_num_triangles++; } -} -S32 LLVolume::lineSegmentIntersect(const LLVector3& start, const LLVector3& end, - S32 face, - LLVector3* intersection,LLVector2* tex_coord, LLVector3* normal, LLVector3* bi_normal) -{ - S32 hit_face = -1; - - S32 start_face; - S32 end_face; - - if (face == -1) // ALL_SIDES - { - start_face = 0; - end_face = getNumVolumeFaces() - 1; - } - else + if (new_num_triangles == 0) { - start_face = face; - end_face = face; + llwarns << "Created volume object with 0 faces." << llendl; + delete[] new_triangles; + delete[] vertex_mapping; + delete[] new_vertices; + return FALSE; } - LLVector3 dir = end - start; + typedef std::set triangle_set_t; + triangle_set_t triangle_list; - F32 closest_t = 2.f; // must be larger than 1 - - for (S32 i = start_face; i <= end_face; i++) + for (i = 0; i < new_num_triangles; i++) { - const LLVolumeFace &face = getVolumeFace((U32)i); + triangle_list.insert(&new_triangles[i*3]); + } - LLVector3 box_center = (face.mExtents[0] + face.mExtents[1]) / 2.f; - LLVector3 box_size = face.mExtents[1] - face.mExtents[0]; + // Sort through the triangle list, and delete duplicates - if (LLLineSegmentBoxIntersect(start, end, box_center, box_size)) - { - if (bi_normal != NULL) // if the caller wants binormals, we may need to generate them - { - genBinormals(i); - } - - for (U32 tri = 0; tri < face.mIndices.size()/3; tri++) - { - S32 index1 = face.mIndices[tri*3+0]; - S32 index2 = face.mIndices[tri*3+1]; - S32 index3 = face.mIndices[tri*3+2]; + S32 *prevp = NULL; + S32 *curp = NULL; - F32 a, b, t; - - if (LLTriangleRayIntersect(face.mVertices[index1].mPosition, - face.mVertices[index2].mPosition, - face.mVertices[index3].mPosition, - start, dir, &a, &b, &t, FALSE)) - { - if ((t >= 0.f) && // if hit is after start - (t <= 1.f) && // and before end - (t < closest_t)) // and this hit is closer + S32 *sorted_tris = new S32[new_num_triangles*3]; + S32 cur_tri = 0; + for (triangle_set_t::iterator iter = triangle_list.begin(), + end = triangle_list.end(); + iter != end; iter++) + { + curp = *iter; + if (!prevp || !equalTriangle(prevp, curp)) { - closest_t = t; - hit_face = i; - - if (intersection != NULL) - { - *intersection = start + dir * closest_t; - } - - if (tex_coord != NULL) - { - *tex_coord = ((1.f - a - b) * face.mVertices[index1].mTexCoord + - a * face.mVertices[index2].mTexCoord + - b * face.mVertices[index3].mTexCoord); - - } + //llinfos << "Added triangle " << *curp << ":" << *(curp+1) << ":" << *(curp+2) << llendl; + sorted_tris[cur_tri*3] = *curp; + sorted_tris[cur_tri*3+1] = *(curp+1); + sorted_tris[cur_tri*3+2] = *(curp+2); + cur_tri++; + prevp = curp; + } + else + { + //llinfos << "Skipped triangle " << *curp << ":" << *(curp+1) << ":" << *(curp+2) << llendl; + } + } - if (normal != NULL) - { - *normal = ((1.f - a - b) * face.mVertices[index1].mNormal + - a * face.mVertices[index2].mNormal + - b * face.mVertices[index3].mNormal); - } + *output_vertices = new LLVector3[new_num_vertices]; + num_output_vertices = new_num_vertices; + for (i = 0; i < new_num_vertices; i++) + { + (*output_vertices)[i] = new_vertices[i]; + } - if (bi_normal != NULL) - { - *bi_normal = ((1.f - a - b) * face.mVertices[index1].mBinormal + - a * face.mVertices[index2].mBinormal + - b * face.mVertices[index3].mBinormal); - } + *output_triangles = new S32[cur_tri*3]; + num_output_triangles = cur_tri; + memcpy(*output_triangles, sorted_tris, 3*cur_tri*sizeof(S32)); /* Flawfinder: ignore */ - } - } - } - } + /* + llinfos << "Out vertices: " << num_output_vertices << llendl; + llinfos << "Out triangles: " << num_output_triangles << llendl; + for (i = 0; i < num_output_vertices; i++) + { + llinfos << i << ":" << (*output_vertices)[i] << llendl; } + for (i = 0; i < num_output_triangles; i++) + { + llinfos << i << ":" << (*output_triangles)[i*3] << ":" << (*output_triangles)[i*3+1] << ":" << (*output_triangles)[i*3+2] << llendl; + } + */ + + //llinfos << "Out vertices: " << num_output_vertices << llendl; + //llinfos << "Out triangles: " << num_output_triangles << llendl; + delete[] vertex_mapping; + vertex_mapping = NULL; + delete[] new_vertices; + new_vertices = NULL; + delete[] new_triangles; + new_triangles = NULL; + delete[] sorted_tris; + sorted_tris = NULL; + triangle_list.clear(); + std::for_each(vertex_list.begin(), vertex_list.end(), DeletePointer()); + vertex_list.clear(); - - return hit_face; + return TRUE; } -class LLVertexIndexPair -{ -public: - LLVertexIndexPair(const LLVector3 &vertex, const S32 index); - - LLVector3 mVertex; - S32 mIndex; -}; -LLVertexIndexPair::LLVertexIndexPair(const LLVector3 &vertex, const S32 index) +BOOL LLVolumeParams::importFile(LLFILE *fp) { - mVertex = vertex; - mIndex = index; -} - -const F32 VERTEX_SLOP = 0.00001f; -const F32 VERTEX_SLOP_SQRD = VERTEX_SLOP * VERTEX_SLOP; + LLMemType m1(LLMemType::MTYPE_VOLUME); + + //llinfos << "importing volume" << llendl; + const S32 BUFSIZE = 16384; + char buffer[BUFSIZE]; /* Flawfinder: ignore */ + // *NOTE: changing the size or type of this buffer will require + // changing the sscanf below. + char keyword[256]; /* Flawfinder: ignore */ + keyword[0] = 0; -struct lessVertex -{ - bool operator()(const LLVertexIndexPair *a, const LLVertexIndexPair *b) + while (!feof(fp)) { - const F32 slop = VERTEX_SLOP; - - if (a->mVertex.mV[0] + slop < b->mVertex.mV[0]) + if (fgets(buffer, BUFSIZE, fp) == NULL) { - return TRUE; + buffer[0] = '\0'; } - else if (a->mVertex.mV[0] - slop > b->mVertex.mV[0]) + + sscanf(buffer, " %255s", keyword); /* Flawfinder: ignore */ + if (!strcmp("{", keyword)) { - return FALSE; + continue; } - - if (a->mVertex.mV[1] + slop < b->mVertex.mV[1]) + if (!strcmp("}",keyword)) { - return TRUE; + break; } - else if (a->mVertex.mV[1] - slop > b->mVertex.mV[1]) + else if (!strcmp("profile", keyword)) { - return FALSE; + mProfileParams.importFile(fp); } - - if (a->mVertex.mV[2] + slop < b->mVertex.mV[2]) + else if (!strcmp("path",keyword)) { - return TRUE; + mPathParams.importFile(fp); } - else if (a->mVertex.mV[2] - slop > b->mVertex.mV[2]) + else { - return FALSE; + llwarns << "unknown keyword " << keyword << " in volume import" << llendl; } - - return FALSE; } -}; -struct lessTriangle + return TRUE; +} + +BOOL LLVolumeParams::exportFile(LLFILE *fp) const +{ + fprintf(fp,"\tshape 0\n"); + fprintf(fp,"\t{\n"); + mPathParams.exportFile(fp); + mProfileParams.exportFile(fp); + fprintf(fp, "\t}\n"); + return TRUE; +} + + +BOOL LLVolumeParams::importLegacyStream(std::istream& input_stream) { - bool operator()(const S32 *a, const S32 *b) + LLMemType m1(LLMemType::MTYPE_VOLUME); + + //llinfos << "importing volume" << llendl; + const S32 BUFSIZE = 16384; + // *NOTE: changing the size or type of this buffer will require + // changing the sscanf below. + char buffer[BUFSIZE]; /* Flawfinder: ignore */ + char keyword[256]; /* Flawfinder: ignore */ + keyword[0] = 0; + + while (input_stream.good()) { - if (*a < *b) - { - return TRUE; - } - else if (*a > *b) + input_stream.getline(buffer, BUFSIZE); + sscanf(buffer, " %255s", keyword); + if (!strcmp("{", keyword)) { - return FALSE; + continue; } - - if (*(a+1) < *(b+1)) + if (!strcmp("}",keyword)) { - return TRUE; + break; } - else if (*(a+1) > *(b+1)) + else if (!strcmp("profile", keyword)) { - return FALSE; + mProfileParams.importLegacyStream(input_stream); } - - if (*(a+2) < *(b+2)) + else if (!strcmp("path",keyword)) { - return TRUE; + mPathParams.importLegacyStream(input_stream); } - else if (*(a+2) > *(b+2)) + else { - return FALSE; + llwarns << "unknown keyword " << keyword << " in volume import" << llendl; } - - return FALSE; } -}; -BOOL equalTriangle(const S32 *a, const S32 *b) -{ - if ((*a == *b) && (*(a+1) == *(b+1)) && (*(a+2) == *(b+2))) - { - return TRUE; - } - return FALSE; + return TRUE; } -BOOL LLVolume::cleanupTriangleData( const S32 num_input_vertices, - const std::vector& input_vertices, - const S32 num_input_triangles, - S32 *input_triangles, - S32 &num_output_vertices, - LLVector3 **output_vertices, - S32 &num_output_triangles, - S32 **output_triangles) +BOOL LLVolumeParams::exportLegacyStream(std::ostream& output_stream) const { LLMemType m1(LLMemType::MTYPE_VOLUME); - /* Testing: avoid any cleanup - static BOOL skip_cleanup = TRUE; - if ( skip_cleanup ) - { - num_output_vertices = num_input_vertices; - num_output_triangles = num_input_triangles; + output_stream <<"\tshape 0\n"; + output_stream <<"\t{\n"; + mPathParams.exportLegacyStream(output_stream); + mProfileParams.exportLegacyStream(output_stream); + output_stream << "\t}\n"; + return TRUE; +} - *output_vertices = new LLVector3[num_input_vertices]; - for (S32 index = 0; index < num_input_vertices; index++) - { - (*output_vertices)[index] = input_vertices[index].mPos; - } +LLSD LLVolumeParams::sculptAsLLSD() const +{ + LLSD sd = LLSD(); + sd["id"] = getSculptID(); + sd["type"] = getSculptType(); - *output_triangles = new S32[num_input_triangles*3]; - memcpy(*output_triangles, input_triangles, 3*num_input_triangles*sizeof(S32)); // Flawfinder: ignore - return TRUE; - } - */ + return sd; +} - // Here's how we do this: - // Create a structure which contains the original vertex index and the - // LLVector3 data. - // "Sort" the data by the vectors - // Create an array the size of the old vertex list, with a mapping of - // old indices to new indices. - // Go through triangles, shift so the lowest index is first - // Sort triangles by first index - // Remove duplicate triangles - // Allocate and pack new triangle data. +bool LLVolumeParams::sculptFromLLSD(LLSD& sd) +{ + setSculptID(sd["id"].asUUID(), (U8)sd["type"].asInteger()); + return true; +} - //LLTimer cleanupTimer; - //llinfos << "In vertices: " << num_input_vertices << llendl; - //llinfos << "In triangles: " << num_input_triangles << llendl; +LLSD LLVolumeParams::asLLSD() const +{ + LLSD sd = LLSD(); + sd["path"] = mPathParams; + sd["profile"] = mProfileParams; + sd["sculpt"] = sculptAsLLSD(); + + return sd; +} - S32 i; - typedef std::multiset vertex_set_t; - vertex_set_t vertex_list; +bool LLVolumeParams::fromLLSD(LLSD& sd) +{ + mPathParams.fromLLSD(sd["path"]); + mProfileParams.fromLLSD(sd["profile"]); + sculptFromLLSD(sd["sculpt"]); + + return true; +} - LLVertexIndexPair *pairp = NULL; - for (i = 0; i < num_input_vertices; i++) +void LLVolumeParams::reduceS(F32 begin, F32 end) +{ + begin = llclampf(begin); + end = llclampf(end); + if (begin > end) { - LLVertexIndexPair *new_pairp = new LLVertexIndexPair(input_vertices[i].mPos, i); - vertex_list.insert(new_pairp); + F32 temp = begin; + begin = end; + end = temp; } + F32 a = mProfileParams.getBegin(); + F32 b = mProfileParams.getEnd(); + mProfileParams.setBegin(a + begin * (b - a)); + mProfileParams.setEnd(a + end * (b - a)); +} - // Generate the vertex mapping and the list of vertices without - // duplicates. This will crash if there are no vertices. - llassert(num_input_vertices > 0); // check for no vertices! - S32 *vertex_mapping = new S32[num_input_vertices]; - LLVector3 *new_vertices = new LLVector3[num_input_vertices]; - LLVertexIndexPair *prev_pairp = NULL; - - S32 new_num_vertices; - - new_num_vertices = 0; - for (vertex_set_t::iterator iter = vertex_list.begin(), - end = vertex_list.end(); - iter != end; iter++) +void LLVolumeParams::reduceT(F32 begin, F32 end) +{ + begin = llclampf(begin); + end = llclampf(end); + if (begin > end) { - pairp = *iter; - if (!prev_pairp || ((pairp->mVertex - prev_pairp->mVertex).magVecSquared() >= VERTEX_SLOP_SQRD)) - { - new_vertices[new_num_vertices] = pairp->mVertex; - //llinfos << "Added vertex " << new_num_vertices << " : " << pairp->mVertex << llendl; - new_num_vertices++; - // Update the previous - prev_pairp = pairp; - } - else - { - //llinfos << "Removed duplicate vertex " << pairp->mVertex << ", distance magVecSquared() is " << (pairp->mVertex - prev_pairp->mVertex).magVecSquared() << llendl; - } - vertex_mapping[pairp->mIndex] = new_num_vertices - 1; + F32 temp = begin; + begin = end; + end = temp; } + F32 a = mPathParams.getBegin(); + F32 b = mPathParams.getEnd(); + mPathParams.setBegin(a + begin * (b - a)); + mPathParams.setEnd(a + end * (b - a)); +} - // Iterate through triangles and remove degenerates, re-ordering vertices - // along the way. - S32 *new_triangles = new S32[num_input_triangles * 3]; - S32 new_num_triangles = 0; +const F32 MIN_CONCAVE_PROFILE_WEDGE = 0.125f; // 1/8 unity +const F32 MIN_CONCAVE_PATH_WEDGE = 0.111111f; // 1/9 unity - for (i = 0; i < num_input_triangles; i++) +// returns TRUE if the shape can be approximated with a convex shape +// for collison purposes +BOOL LLVolumeParams::isConvex() const +{ + if (!getSculptID().isNull()) { - S32 v1 = i*3; - S32 v2 = v1 + 1; - S32 v3 = v1 + 2; - - //llinfos << "Checking triangle " << input_triangles[v1] << ":" << input_triangles[v2] << ":" << input_triangles[v3] << llendl; - input_triangles[v1] = vertex_mapping[input_triangles[v1]]; - input_triangles[v2] = vertex_mapping[input_triangles[v2]]; - input_triangles[v3] = vertex_mapping[input_triangles[v3]]; - - if ((input_triangles[v1] == input_triangles[v2]) - || (input_triangles[v1] == input_triangles[v3]) - || (input_triangles[v2] == input_triangles[v3])) - { - //llinfos << "Removing degenerate triangle " << input_triangles[v1] << ":" << input_triangles[v2] << ":" << input_triangles[v3] << llendl; - // Degenerate triangle, skip - continue; - } + // can't determine, be safe and say no: + return FALSE; + } + + F32 path_length = mPathParams.getEnd() - mPathParams.getBegin(); + F32 hollow = mProfileParams.getHollow(); + + U8 path_type = mPathParams.getCurveType(); + if ( path_length > MIN_CONCAVE_PATH_WEDGE + && ( mPathParams.getTwist() != mPathParams.getTwistBegin() + || (hollow > 0.f + && LL_PCODE_PATH_LINE != path_type) ) ) + { + // twist along a "not too short" path is concave + return FALSE; + } - if (input_triangles[v1] < input_triangles[v2]) - { - if (input_triangles[v1] < input_triangles[v3]) - { - // (0 < 1) && (0 < 2) - new_triangles[new_num_triangles*3] = input_triangles[v1]; - new_triangles[new_num_triangles*3+1] = input_triangles[v2]; - new_triangles[new_num_triangles*3+2] = input_triangles[v3]; - } - else - { - // (0 < 1) && (2 < 0) - new_triangles[new_num_triangles*3] = input_triangles[v3]; - new_triangles[new_num_triangles*3+1] = input_triangles[v1]; - new_triangles[new_num_triangles*3+2] = input_triangles[v2]; - } - } - else if (input_triangles[v2] < input_triangles[v3]) - { - // (1 < 0) && (1 < 2) - new_triangles[new_num_triangles*3] = input_triangles[v2]; - new_triangles[new_num_triangles*3+1] = input_triangles[v3]; - new_triangles[new_num_triangles*3+2] = input_triangles[v1]; - } - else - { - // (1 < 0) && (2 < 1) - new_triangles[new_num_triangles*3] = input_triangles[v3]; - new_triangles[new_num_triangles*3+1] = input_triangles[v1]; - new_triangles[new_num_triangles*3+2] = input_triangles[v2]; - } - new_num_triangles++; + F32 profile_length = mProfileParams.getEnd() - mProfileParams.getBegin(); + BOOL same_hole = hollow == 0.f + || (mProfileParams.getCurveType() & LL_PCODE_HOLE_MASK) == LL_PCODE_HOLE_SAME; + + F32 min_profile_wedge = MIN_CONCAVE_PROFILE_WEDGE; + U8 profile_type = mProfileParams.getCurveType() & LL_PCODE_PROFILE_MASK; + if ( LL_PCODE_PROFILE_CIRCLE_HALF == profile_type ) + { + // it is a sphere and spheres get twice the minimum profile wedge + min_profile_wedge = 2.f * MIN_CONCAVE_PROFILE_WEDGE; } - if (new_num_triangles == 0) + BOOL convex_profile = ( ( profile_length == 1.f + || profile_length <= 0.5f ) + && hollow == 0.f ) // trivially convex + || ( profile_length <= min_profile_wedge + && same_hole ); // effectvely convex (even when hollow) + + if (!convex_profile) { - llwarns << "Created volume object with 0 faces." << llendl; - delete[] new_triangles; - delete[] vertex_mapping; - delete[] new_vertices; + // profile is concave return FALSE; } - typedef std::set triangle_set_t; - triangle_set_t triangle_list; + if ( LL_PCODE_PATH_LINE == path_type ) + { + // straight paths with convex profile + return TRUE; + } - for (i = 0; i < new_num_triangles; i++) + BOOL concave_path = (path_length < 1.0f) && (path_length > 0.5f); + if (concave_path) { - triangle_list.insert(&new_triangles[i*3]); + return FALSE; } - // Sort through the triangle list, and delete duplicates + // we're left with spheres, toroids and tubes + if ( LL_PCODE_PROFILE_CIRCLE_HALF == profile_type ) + { + // at this stage all spheres must be convex + return TRUE; + } - S32 *prevp = NULL; - S32 *curp = NULL; + // it's a toroid or tube + if ( path_length <= MIN_CONCAVE_PATH_WEDGE ) + { + // effectively convex + return TRUE; + } - S32 *sorted_tris = new S32[new_num_triangles*3]; - S32 cur_tri = 0; - for (triangle_set_t::iterator iter = triangle_list.begin(), - end = triangle_list.end(); - iter != end; iter++) + return FALSE; +} + +// debug +void LLVolumeParams::setCube() +{ + mProfileParams.setCurveType(LL_PCODE_PROFILE_SQUARE); + mProfileParams.setBegin(0.f); + mProfileParams.setEnd(1.f); + mProfileParams.setHollow(0.f); + + mPathParams.setBegin(0.f); + mPathParams.setEnd(1.f); + mPathParams.setScale(1.f, 1.f); + mPathParams.setShear(0.f, 0.f); + mPathParams.setCurveType(LL_PCODE_PATH_LINE); + mPathParams.setTwistBegin(0.f); + mPathParams.setTwistEnd(0.f); + mPathParams.setRadiusOffset(0.f); + mPathParams.setTaper(0.f, 0.f); + mPathParams.setRevolutions(0.f); + mPathParams.setSkew(0.f); +} + +LLFaceID LLVolume::generateFaceMask() +{ + LLFaceID new_mask = 0x0000; + + switch(mParams.getProfileParams().getCurveType() & LL_PCODE_PROFILE_MASK) { - curp = *iter; - if (!prevp || !equalTriangle(prevp, curp)) + case LL_PCODE_PROFILE_CIRCLE: + case LL_PCODE_PROFILE_CIRCLE_HALF: + new_mask |= LL_FACE_OUTER_SIDE_0; + break; + case LL_PCODE_PROFILE_SQUARE: { - //llinfos << "Added triangle " << *curp << ":" << *(curp+1) << ":" << *(curp+2) << llendl; - sorted_tris[cur_tri*3] = *curp; - sorted_tris[cur_tri*3+1] = *(curp+1); - sorted_tris[cur_tri*3+2] = *(curp+2); - cur_tri++; - prevp = curp; + for(S32 side = (S32)(mParams.getProfileParams().getBegin() * 4.f); side < llceil(mParams.getProfileParams().getEnd() * 4.f); side++) + { + new_mask |= LL_FACE_OUTER_SIDE_0 << side; + } } - else + break; + case LL_PCODE_PROFILE_ISOTRI: + case LL_PCODE_PROFILE_EQUALTRI: + case LL_PCODE_PROFILE_RIGHTTRI: { - //llinfos << "Skipped triangle " << *curp << ":" << *(curp+1) << ":" << *(curp+2) << llendl; + for(S32 side = (S32)(mParams.getProfileParams().getBegin() * 3.f); side < llceil(mParams.getProfileParams().getEnd() * 3.f); side++) + { + new_mask |= LL_FACE_OUTER_SIDE_0 << side; + } } + break; + default: + llerrs << "Unknown profile!" << llendl; + break; } - *output_vertices = new LLVector3[new_num_vertices]; - num_output_vertices = new_num_vertices; - for (i = 0; i < new_num_vertices; i++) + // handle hollow objects + if (mParams.getProfileParams().getHollow() > 0) { - (*output_vertices)[i] = new_vertices[i]; + new_mask |= LL_FACE_INNER_SIDE; } - *output_triangles = new S32[cur_tri*3]; - num_output_triangles = cur_tri; - memcpy(*output_triangles, sorted_tris, 3*cur_tri*sizeof(S32)); /* Flawfinder: ignore */ + // handle open profile curves + if (mProfilep->isOpen()) + { + new_mask |= LL_FACE_PROFILE_BEGIN | LL_FACE_PROFILE_END; + } - /* - llinfos << "Out vertices: " << num_output_vertices << llendl; - llinfos << "Out triangles: " << num_output_triangles << llendl; - for (i = 0; i < num_output_vertices; i++) + // handle open path curves + if (mPathp->isOpen()) { - llinfos << i << ":" << (*output_vertices)[i] << llendl; + new_mask |= LL_FACE_PATH_BEGIN | LL_FACE_PATH_END; } - for (i = 0; i < num_output_triangles; i++) + + return new_mask; +} + +BOOL LLVolume::isFaceMaskValid(LLFaceID face_mask) +{ + LLFaceID test_mask = 0; + for(S32 i = 0; i < getNumFaces(); i++) { - llinfos << i << ":" << (*output_triangles)[i*3] << ":" << (*output_triangles)[i*3+1] << ":" << (*output_triangles)[i*3+2] << llendl; + test_mask |= mProfilep->mFaces[i].mFaceID; } - */ - //llinfos << "Out vertices: " << num_output_vertices << llendl; - //llinfos << "Out triangles: " << num_output_triangles << llendl; - delete[] vertex_mapping; - vertex_mapping = NULL; - delete[] new_vertices; - new_vertices = NULL; - delete[] new_triangles; - new_triangles = NULL; - delete[] sorted_tris; - sorted_tris = NULL; - triangle_list.clear(); - std::for_each(vertex_list.begin(), vertex_list.end(), DeletePointer()); - vertex_list.clear(); - - return TRUE; + return test_mask == face_mask; +} + +BOOL LLVolume::isConvex() const +{ + // mParams.isConvex() may return FALSE even though the final + // geometry is actually convex due to LOD approximations. + // TODO -- provide LLPath and LLProfile with isConvex() methods + // that correctly determine convexity. -- Leviathan + return mParams.isConvex(); +} + + +std::ostream& operator<<(std::ostream &s, const LLProfileParams &profile_params) +{ + s << "{type=" << (U32) profile_params.mCurveType; + s << ", begin=" << profile_params.mBegin; + s << ", end=" << profile_params.mEnd; + s << ", hollow=" << profile_params.mHollow; + s << "}"; + return s; +} + + +std::ostream& operator<<(std::ostream &s, const LLPathParams &path_params) +{ + s << "{type=" << (U32) path_params.mCurveType; + s << ", begin=" << path_params.mBegin; + s << ", end=" << path_params.mEnd; + s << ", twist=" << path_params.mTwistEnd; + s << ", scale=" << path_params.mScale; + s << ", shear=" << path_params.mShear; + s << ", twist_begin=" << path_params.mTwistBegin; + s << ", radius_offset=" << path_params.mRadiusOffset; + s << ", taper=" << path_params.mTaper; + s << ", revolutions=" << path_params.mRevolutions; + s << ", skew=" << path_params.mSkew; + s << "}"; + return s; +} + + +std::ostream& operator<<(std::ostream &s, const LLVolumeParams &volume_params) +{ + s << "{profileparams = " << volume_params.mProfileParams; + s << ", pathparams = " << volume_params.mPathParams; + s << "}"; + return s; +} + + +std::ostream& operator<<(std::ostream &s, const LLProfile &profile) +{ + s << " {open=" << (U32) profile.mOpen; + s << ", dirty=" << profile.mDirty; + s << ", totalout=" << profile.mTotalOut; + s << ", total=" << profile.mTotal; + s << "}"; + return s; +} + + +std::ostream& operator<<(std::ostream &s, const LLPath &path) +{ + s << "{open=" << (U32) path.mOpen; + s << ", dirty=" << path.mDirty; + s << ", step=" << path.mStep; + s << ", total=" << path.mTotal; + s << "}"; + return s; +} + +std::ostream& operator<<(std::ostream &s, const LLVolume &volume) +{ + s << "{params = " << volume.getParams(); + s << ", path = " << *volume.mPathp; + s << ", profile = " << *volume.mProfilep; + s << "}"; + return s; +} + + +std::ostream& operator<<(std::ostream &s, const LLVolume *volumep) +{ + s << "{params = " << volumep->getParams(); + s << ", path = " << *(volumep->mPathp); + s << ", profile = " << *(volumep->mProfilep); + s << "}"; + return s; +} + +LLVolumeFace::LLVolumeFace() : + mID(0), + mTypeMask(0), + mBeginS(0), + mBeginT(0), + mNumS(0), + mNumT(0), + mNumVertices(0), + mNumIndices(0), + mPositions(NULL), + mNormals(NULL), + mBinormals(NULL), + mTexCoords(NULL), + mIndices(NULL), + mWeights(NULL), + mOctree(NULL) +{ + mExtents = (LLVector4a*) malloc(sizeof(LLVector4a)*3); + mCenter = mExtents+2; } +LLVolumeFace::LLVolumeFace(const LLVolumeFace& src) +: mID(0), + mTypeMask(0), + mBeginS(0), + mBeginT(0), + mNumS(0), + mNumT(0), + mNumVertices(0), + mNumIndices(0), + mPositions(NULL), + mNormals(NULL), + mBinormals(NULL), + mTexCoords(NULL), + mIndices(NULL), + mWeights(NULL), + mOctree(NULL) +{ + mExtents = (LLVector4a*) malloc(sizeof(LLVector4a)*3); + mCenter = mExtents+2; + *this = src; +} -BOOL LLVolumeParams::importFile(LLFILE *fp) +LLVolumeFace& LLVolumeFace::operator=(const LLVolumeFace& src) { - LLMemType m1(LLMemType::MTYPE_VOLUME); + if (&src == this) + { //self assignment, do nothing + return *this; + } + + mID = src.mID; + mTypeMask = src.mTypeMask; + mBeginS = src.mBeginS; + mBeginT = src.mBeginT; + mNumS = src.mNumS; + mNumT = src.mNumT; + + mExtents[0] = src.mExtents[0]; + mExtents[1] = src.mExtents[1]; + *mCenter = *src.mCenter; + + mNumVertices = 0; + mNumIndices = 0; + + freeData(); - //llinfos << "importing volume" << llendl; - const S32 BUFSIZE = 16384; - char buffer[BUFSIZE]; /* Flawfinder: ignore */ - // *NOTE: changing the size or type of this buffer will require - // changing the sscanf below. - char keyword[256]; /* Flawfinder: ignore */ - keyword[0] = 0; + LLVector4a::memcpyNonAliased16((F32*) mExtents, (F32*) src.mExtents, 3*sizeof(LLVector4a)); - while (!feof(fp)) + resizeVertices(src.mNumVertices); + resizeIndices(src.mNumIndices); + + if (mNumVertices) { - if (fgets(buffer, BUFSIZE, fp) == NULL) - { - buffer[0] = '\0'; - } - - sscanf(buffer, " %255s", keyword); /* Flawfinder: ignore */ - if (!strcmp("{", keyword)) - { - continue; - } - if (!strcmp("}",keyword)) + S32 vert_size = mNumVertices*sizeof(LLVector4a); + S32 tc_size = (mNumVertices*sizeof(LLVector2)+0xF) & ~0xF; + + LLVector4a::memcpyNonAliased16((F32*) mPositions, (F32*) src.mPositions, vert_size); + LLVector4a::memcpyNonAliased16((F32*) mNormals, (F32*) src.mNormals, vert_size); + LLVector4a::memcpyNonAliased16((F32*) mTexCoords, (F32*) src.mTexCoords, tc_size); + + + if (src.mBinormals) { - break; + allocateBinormals(src.mNumVertices); + LLVector4a::memcpyNonAliased16((F32*) mBinormals, (F32*) src.mBinormals, vert_size); } - else if (!strcmp("profile", keyword)) + else { - mProfileParams.importFile(fp); + free(mBinormals); + mBinormals = NULL; } - else if (!strcmp("path",keyword)) + + if (src.mWeights) { - mPathParams.importFile(fp); + allocateWeights(src.mNumVertices); + LLVector4a::memcpyNonAliased16((F32*) mWeights, (F32*) src.mWeights, vert_size); } else { - llwarns << "unknown keyword " << keyword << " in volume import" << llendl; + free(mWeights); + mWeights = NULL; } } - return TRUE; + if (mNumIndices) + { + S32 idx_size = (mNumIndices*sizeof(U16)+0xF) & ~0xF; + + LLVector4a::memcpyNonAliased16((F32*) mIndices, (F32*) src.mIndices, idx_size); + } + + //delete + return *this; } -BOOL LLVolumeParams::exportFile(LLFILE *fp) const +LLVolumeFace::~LLVolumeFace() { - fprintf(fp,"\tshape 0\n"); - fprintf(fp,"\t{\n"); - mPathParams.exportFile(fp); - mProfileParams.exportFile(fp); - fprintf(fp, "\t}\n"); - return TRUE; + free(mExtents); + mExtents = NULL; + + freeData(); } +void LLVolumeFace::freeData() +{ + free(mPositions); + mPositions = NULL; + free( mNormals); + mNormals = NULL; + free(mTexCoords); + mTexCoords = NULL; + free(mIndices); + mIndices = NULL; + free(mBinormals); + mBinormals = NULL; + free(mWeights); + mWeights = NULL; + + delete mOctree; + mOctree = NULL; +} -BOOL LLVolumeParams::importLegacyStream(std::istream& input_stream) +BOOL LLVolumeFace::create(LLVolume* volume, BOOL partial_build) { - LLMemType m1(LLMemType::MTYPE_VOLUME); - - //llinfos << "importing volume" << llendl; - const S32 BUFSIZE = 16384; - // *NOTE: changing the size or type of this buffer will require - // changing the sscanf below. - char buffer[BUFSIZE]; /* Flawfinder: ignore */ - char keyword[256]; /* Flawfinder: ignore */ - keyword[0] = 0; + //tree for this face is no longer valid + delete mOctree; + mOctree = NULL; - while (input_stream.good()) + if (mTypeMask & CAP_MASK) { - input_stream.getline(buffer, BUFSIZE); - sscanf(buffer, " %255s", keyword); - if (!strcmp("{", keyword)) - { - continue; - } - if (!strcmp("}",keyword)) - { - break; - } - else if (!strcmp("profile", keyword)) - { - mProfileParams.importLegacyStream(input_stream); - } - else if (!strcmp("path",keyword)) - { - mPathParams.importLegacyStream(input_stream); - } - else - { - llwarns << "unknown keyword " << keyword << " in volume import" << llendl; - } + return createCap(volume, partial_build); + } + else if ((mTypeMask & END_MASK) || (mTypeMask & SIDE_MASK)) + { + return createSide(volume, partial_build); + } + else + { + llerrs << "Unknown/uninitialized face type!" << llendl; + return FALSE; } - - return TRUE; -} - -BOOL LLVolumeParams::exportLegacyStream(std::ostream& output_stream) const -{ - LLMemType m1(LLMemType::MTYPE_VOLUME); - - output_stream <<"\tshape 0\n"; - output_stream <<"\t{\n"; - mPathParams.exportLegacyStream(output_stream); - mProfileParams.exportLegacyStream(output_stream); - output_stream << "\t}\n"; - return TRUE; } -LLSD LLVolumeParams::asLLSD() const +void LLVolumeFace::getVertexData(U16 index, LLVolumeFace::VertexData& cv) { - LLSD sd = LLSD(); - sd["path"] = mPathParams; - sd["profile"] = mProfileParams; - return sd; + cv.setPosition(mPositions[index]); + cv.setNormal(mNormals[index]); + cv.mTexCoord = mTexCoords[index]; } -bool LLVolumeParams::fromLLSD(LLSD& sd) +bool LLVolumeFace::VertexMapData::operator==(const LLVolumeFace::VertexData& rhs) const { - mPathParams.fromLLSD(sd["path"]); - mProfileParams.fromLLSD(sd["profile"]); - return true; + return getPosition().equals3(rhs.getPosition()) && + mTexCoord == rhs.mTexCoord && + getNormal().equals3(rhs.getNormal()); } -void LLVolumeParams::reduceS(F32 begin, F32 end) +bool LLVolumeFace::VertexMapData::ComparePosition::operator()(const LLVector3& a, const LLVector3& b) const { - begin = llclampf(begin); - end = llclampf(end); - if (begin > end) + if (a.mV[0] != b.mV[0]) { - F32 temp = begin; - begin = end; - end = temp; + return a.mV[0] < b.mV[0]; } - F32 a = mProfileParams.getBegin(); - F32 b = mProfileParams.getEnd(); - mProfileParams.setBegin(a + begin * (b - a)); - mProfileParams.setEnd(a + end * (b - a)); + + if (a.mV[1] != b.mV[1]) + { + return a.mV[1] < b.mV[1]; + } + + return a.mV[2] < b.mV[2]; } -void LLVolumeParams::reduceT(F32 begin, F32 end) +void LLVolumeFace::optimize(F32 angle_cutoff) { - begin = llclampf(begin); - end = llclampf(end); - if (begin > end) + LLVolumeFace new_face; + + //map of points to vector of vertices at that point + VertexMapData::PointMap point_map; + + //remove redundant vertices + for (U32 i = 0; i < mNumIndices; ++i) { - F32 temp = begin; - begin = end; - end = temp; + U16 index = mIndices[i]; + + LLVolumeFace::VertexData cv; + getVertexData(index, cv); + + BOOL found = FALSE; + VertexMapData::PointMap::iterator point_iter = point_map.find(LLVector3(cv.getPosition().getF32ptr())); + if (point_iter != point_map.end()) + { //duplicate point might exist + for (U32 j = 0; j < point_iter->second.size(); ++j) + { + LLVolumeFace::VertexData& tv = (point_iter->second)[j]; + if (tv.compareNormal(cv, angle_cutoff)) + { + found = TRUE; + new_face.pushIndex((point_iter->second)[j].mIndex); + break; + } + } + } + + if (!found) + { + new_face.pushVertex(cv); + U16 index = (U16) new_face.mNumVertices-1; + new_face.pushIndex(index); + + VertexMapData d; + d.setPosition(cv.getPosition()); + d.mTexCoord = cv.mTexCoord; + d.setNormal(cv.getNormal()); + d.mIndex = index; + if (point_iter != point_map.end()) + { + point_iter->second.push_back(d); + } + else + { + point_map[LLVector3(d.getPosition().getF32ptr())].push_back(d); + } + } } - F32 a = mPathParams.getBegin(); - F32 b = mPathParams.getEnd(); - mPathParams.setBegin(a + begin * (b - a)); - mPathParams.setEnd(a + end * (b - a)); + + swapData(new_face); } -const F32 MIN_CONCAVE_PROFILE_WEDGE = 0.125f; // 1/8 unity -const F32 MIN_CONCAVE_PATH_WEDGE = 0.111111f; // 1/9 unity +class LLVCacheTriangleData; -// returns TRUE if the shape can be approximated with a convex shape -// for collison purposes -BOOL LLVolumeParams::isConvex() const +class LLVCacheVertexData { - F32 path_length = mPathParams.getEnd() - mPathParams.getBegin(); - F32 hollow = mProfileParams.getHollow(); - - U8 path_type = mPathParams.getCurveType(); - if ( path_length > MIN_CONCAVE_PATH_WEDGE - && ( mPathParams.getTwist() != mPathParams.getTwistBegin() - || (hollow > 0.f - && LL_PCODE_PATH_LINE != path_type) ) ) +public: + S32 mIdx; + S32 mCacheTag; + F32 mScore; + U32 mActiveTriangles; + std::vector mTriangles; + + LLVCacheVertexData() { - // twist along a "not too short" path is concave - return FALSE; + mCacheTag = -1; + mScore = 0.f; + mActiveTriangles = 0; + mIdx = -1; } +}; - F32 profile_length = mProfileParams.getEnd() - mProfileParams.getBegin(); - BOOL same_hole = hollow == 0.f - || (mProfileParams.getCurveType() & LL_PCODE_HOLE_MASK) == LL_PCODE_HOLE_SAME; +class LLVCacheTriangleData +{ +public: + bool mActive; + F32 mScore; + LLVCacheVertexData* mVertex[3]; - F32 min_profile_wedge = MIN_CONCAVE_PROFILE_WEDGE; - U8 profile_type = mProfileParams.getCurveType() & LL_PCODE_PROFILE_MASK; - if ( LL_PCODE_PROFILE_CIRCLE_HALF == profile_type ) + LLVCacheTriangleData() { - // it is a sphere and spheres get twice the minimum profile wedge - min_profile_wedge = 2.f * MIN_CONCAVE_PROFILE_WEDGE; + mActive = true; + mScore = 0.f; + mVertex[0] = mVertex[1] = mVertex[2] = NULL; } - BOOL convex_profile = ( ( profile_length == 1.f - || profile_length <= 0.5f ) - && hollow == 0.f ) // trivially convex - || ( profile_length <= min_profile_wedge - && same_hole ); // effectvely convex (even when hollow) - - if (!convex_profile) + void complete() { - // profile is concave - return FALSE; + mActive = false; + for (S32 i = 0; i < 3; ++i) + { + if (mVertex[i]) + { + llassert_always(mVertex[i]->mActiveTriangles > 0); + mVertex[i]->mActiveTriangles--; + } + } } - if ( LL_PCODE_PATH_LINE == path_type ) - { - // straight paths with convex profile - return TRUE; + bool operator<(const LLVCacheTriangleData& rhs) const + { //highest score first + return rhs.mScore < mScore; } +}; - BOOL concave_path = (path_length < 1.0f) && (path_length > 0.5f); - if (concave_path) - { - return FALSE; +const F32 FindVertexScore_CacheDecayPower = 1.5f; +const F32 FindVertexScore_LastTriScore = 0.75f; +const F32 FindVertexScore_ValenceBoostScale = 2.0f; +const F32 FindVertexScore_ValenceBoostPower = 0.5f; +const U32 MaxSizeVertexCache = 32; + +F32 find_vertex_score(LLVCacheVertexData& data) +{ + if (data.mActiveTriangles == 0) + { //no triangle references this vertex + return -1.f; } - // we're left with spheres, toroids and tubes - if ( LL_PCODE_PROFILE_CIRCLE_HALF == profile_type ) + F32 score = 0.f; + + S32 cache_idx = data.mCacheTag; + + if (cache_idx < 0) { - // at this stage all spheres must be convex - return TRUE; + //not in cache } - - // it's a toroid or tube - if ( path_length <= MIN_CONCAVE_PATH_WEDGE ) + else { - // effectively convex - return TRUE; + if (cache_idx < 3) + { //vertex was in the last triangle + score = FindVertexScore_LastTriScore; + } + else + { //more points for being higher in the cache + F32 scaler = 1.f/(MaxSizeVertexCache-3); + score = 1.f-((cache_idx-3)*scaler); + score = powf(score, FindVertexScore_CacheDecayPower); + } } - return FALSE; -} - -// debug -void LLVolumeParams::setCube() -{ - mProfileParams.setCurveType(LL_PCODE_PROFILE_SQUARE); - mProfileParams.setBegin(0.f); - mProfileParams.setEnd(1.f); - mProfileParams.setHollow(0.f); + //bonus points for having low valence + F32 valence_boost = powf(data.mActiveTriangles, -FindVertexScore_ValenceBoostPower); + score += FindVertexScore_ValenceBoostScale * valence_boost; - mPathParams.setBegin(0.f); - mPathParams.setEnd(1.f); - mPathParams.setScale(1.f, 1.f); - mPathParams.setShear(0.f, 0.f); - mPathParams.setCurveType(LL_PCODE_PATH_LINE); - mPathParams.setTwistBegin(0.f); - mPathParams.setTwistEnd(0.f); - mPathParams.setRadiusOffset(0.f); - mPathParams.setTaper(0.f, 0.f); - mPathParams.setRevolutions(0.f); - mPathParams.setSkew(0.f); + return score; } -LLFaceID LLVolume::generateFaceMask() +class LLVCacheFIFO { - LLFaceID new_mask = 0x0000; +public: + LLVCacheVertexData* mCache[MaxSizeVertexCache]; + U32 mMisses; - switch(mParams.getProfileParams().getCurveType() & LL_PCODE_PROFILE_MASK) + LLVCacheFIFO() { - case LL_PCODE_PROFILE_CIRCLE: - case LL_PCODE_PROFILE_CIRCLE_HALF: - new_mask |= LL_FACE_OUTER_SIDE_0; - break; - case LL_PCODE_PROFILE_SQUARE: + mMisses = 0; + for (U32 i = 0; i < MaxSizeVertexCache; ++i) { - for(S32 side = (S32)(mParams.getProfileParams().getBegin() * 4.f); side < llceil(mParams.getProfileParams().getEnd() * 4.f); side++) - { - new_mask |= LL_FACE_OUTER_SIDE_0 << side; - } + mCache[i] = NULL; } - break; - case LL_PCODE_PROFILE_ISOTRI: - case LL_PCODE_PROFILE_EQUALTRI: - case LL_PCODE_PROFILE_RIGHTTRI: + } + + void addVertex(LLVCacheVertexData* data) + { + if (data->mCacheTag == -1) { - for(S32 side = (S32)(mParams.getProfileParams().getBegin() * 3.f); side < llceil(mParams.getProfileParams().getEnd() * 3.f); side++) + mMisses++; + + S32 end = MaxSizeVertexCache-1; + + if (mCache[end]) { - new_mask |= LL_FACE_OUTER_SIDE_0 << side; + mCache[end]->mCacheTag = -1; + } + + for (S32 i = end; i > 0; --i) + { + mCache[i] = mCache[i-1]; + if (mCache[i]) + { + mCache[i]->mCacheTag = i; + } } + + mCache[0] = data; + data->mCacheTag = 0; } - break; - default: - llerrs << "Unknown profile!" << llendl; - break; } +}; - // handle hollow objects - if (mParams.getProfileParams().getHollow() > 0) +class LLVCacheLRU +{ +public: + LLVCacheVertexData* mCache[MaxSizeVertexCache+3]; + + LLVCacheTriangleData* mBestTriangle; + + U32 mMisses; + + LLVCacheLRU() { - new_mask |= LL_FACE_INNER_SIDE; + for (U32 i = 0; i < MaxSizeVertexCache+3; ++i) + { + mCache[i] = NULL; + } + + mBestTriangle = NULL; + mMisses = 0; } - // handle open profile curves - if (mProfilep->isOpen()) + void addVertex(LLVCacheVertexData* data) { - new_mask |= LL_FACE_PROFILE_BEGIN | LL_FACE_PROFILE_END; + S32 end = MaxSizeVertexCache+2; + if (data->mCacheTag != -1) + { //just moving a vertex to the front of the cache + end = data->mCacheTag; + } + else + { + mMisses++; + if (mCache[end]) + { //adding a new vertex, vertex at end of cache falls off + mCache[end]->mCacheTag = -1; + } + } + + for (S32 i = end; i > 0; --i) + { //adjust cache pointers and tags + mCache[i] = mCache[i-1]; + + if (mCache[i]) + { + mCache[i]->mCacheTag = i; + } + } + + mCache[0] = data; + mCache[0]->mCacheTag = 0; } - // handle open path curves - if (mPathp->isOpen()) + void addTriangle(LLVCacheTriangleData* data) { - new_mask |= LL_FACE_PATH_BEGIN | LL_FACE_PATH_END; + addVertex(data->mVertex[0]); + addVertex(data->mVertex[1]); + addVertex(data->mVertex[2]); } - return new_mask; -} - -BOOL LLVolume::isFaceMaskValid(LLFaceID face_mask) -{ - LLFaceID test_mask = 0; - for(S32 i = 0; i < getNumFaces(); i++) + void updateScores() { - test_mask |= mProfilep->mFaces[i].mFaceID; + for (U32 i = MaxSizeVertexCache; i < MaxSizeVertexCache+3; ++i) + { //trailing 3 vertices aren't actually in the cache for scoring purposes + if (mCache[i]) + { + mCache[i]->mCacheTag = -1; + } + } + + for (U32 i = 0; i < MaxSizeVertexCache; ++i) + { //update scores of vertices in cache + if (mCache[i]) + { + mCache[i]->mScore = find_vertex_score(*(mCache[i])); + llassert_always(mCache[i]->mCacheTag == i); + } + } + + mBestTriangle = NULL; + //update triangle scores + for (U32 i = 0; i < MaxSizeVertexCache+3; ++i) + { + if (mCache[i]) + { + for (U32 j = 0; j < mCache[i]->mTriangles.size(); ++j) + { + LLVCacheTriangleData* tri = mCache[i]->mTriangles[j]; + if (tri->mActive) + { + tri->mScore = tri->mVertex[0]->mScore; + tri->mScore += tri->mVertex[1]->mScore; + tri->mScore += tri->mVertex[2]->mScore; + + if (!mBestTriangle || mBestTriangle->mScore < tri->mScore) + { + mBestTriangle = tri; + } + } + } + } + } + + //knock trailing 3 vertices off the cache + for (U32 i = MaxSizeVertexCache; i < MaxSizeVertexCache+3; ++i) + { + if (mCache[i]) + { + llassert_always(mCache[i]->mCacheTag == -1); + mCache[i] = NULL; + } + } } +}; - return test_mask == face_mask; -} -BOOL LLVolume::isConvex() const -{ - // mParams.isConvex() may return FALSE even though the final - // geometry is actually convex due to LOD approximations. - // TODO -- provide LLPath and LLProfile with isConvex() methods - // that correctly determine convexity. -- Leviathan - return mParams.isConvex(); -} +void LLVolumeFace::cacheOptimize() +{ //optimize for vertex cache according to Forsyth method: + // http://home.comcast.net/~tom_forsyth/papers/fast_vert_cache_opt.html + + LLVCacheLRU cache; + + //mapping of vertices to triangles and indices + std::vector vertex_data; + //mapping of triangles do vertices + std::vector triangle_data; -std::ostream& operator<<(std::ostream &s, const LLProfileParams &profile_params) -{ - s << "{type=" << (U32) profile_params.mCurveType; - s << ", begin=" << profile_params.mBegin; - s << ", end=" << profile_params.mEnd; - s << ", hollow=" << profile_params.mHollow; - s << "}"; - return s; -} + triangle_data.resize(mNumIndices/3); + vertex_data.resize(mNumVertices); + for (U32 i = 0; i < mNumIndices; i++) + { //populate vertex data and triangle data arrays + U16 idx = mIndices[i]; + U32 tri_idx = i/3; -std::ostream& operator<<(std::ostream &s, const LLPathParams &path_params) -{ - s << "{type=" << (U32) path_params.mCurveType; - s << ", begin=" << path_params.mBegin; - s << ", end=" << path_params.mEnd; - s << ", twist=" << path_params.mTwistEnd; - s << ", scale=" << path_params.mScale; - s << ", shear=" << path_params.mShear; - s << ", twist_begin=" << path_params.mTwistBegin; - s << ", radius_offset=" << path_params.mRadiusOffset; - s << ", taper=" << path_params.mTaper; - s << ", revolutions=" << path_params.mRevolutions; - s << ", skew=" << path_params.mSkew; - s << "}"; - return s; -} + vertex_data[idx].mTriangles.push_back(&(triangle_data[tri_idx])); + vertex_data[idx].mIdx = idx; + triangle_data[tri_idx].mVertex[i%3] = &(vertex_data[idx]); + } + /*F32 pre_acmr = 1.f; + //measure cache misses from before rebuild + { + LLVCacheFIFO test_cache; + for (U32 i = 0; i < mNumIndices; ++i) + { + test_cache.addVertex(&vertex_data[mIndices[i]]); + } -std::ostream& operator<<(std::ostream &s, const LLVolumeParams &volume_params) -{ - s << "{profileparams = " << volume_params.mProfileParams; - s << ", pathparams = " << volume_params.mPathParams; - s << "}"; - return s; -} + for (U32 i = 0; i < mNumVertices; i++) + { + vertex_data[i].mCacheTag = -1; + } + pre_acmr = (F32) test_cache.mMisses/(mNumIndices/3); + }*/ -std::ostream& operator<<(std::ostream &s, const LLProfile &profile) -{ - s << " {open=" << (U32) profile.mOpen; - s << ", dirty=" << profile.mDirty; - s << ", totalout=" << profile.mTotalOut; - s << ", total=" << profile.mTotal; - s << "}"; - return s; -} + for (U32 i = 0; i < mNumVertices; i++) + { //initialize score values (no cache -- might try a fifo cache here) + vertex_data[i].mScore = find_vertex_score(vertex_data[i]); + vertex_data[i].mActiveTriangles = vertex_data[i].mTriangles.size(); + for (U32 j = 0; j < vertex_data[i].mTriangles.size(); ++j) + { + vertex_data[i].mTriangles[j]->mScore += vertex_data[i].mScore; + } + } -std::ostream& operator<<(std::ostream &s, const LLPath &path) -{ - s << "{open=" << (U32) path.mOpen; - s << ", dirty=" << path.mDirty; - s << ", step=" << path.mStep; - s << ", total=" << path.mTotal; - s << "}"; - return s; -} + //sort triangle data by score + std::sort(triangle_data.begin(), triangle_data.end()); -std::ostream& operator<<(std::ostream &s, const LLVolume &volume) -{ - s << "{params = " << volume.getParams(); - s << ", path = " << *volume.mPathp; - s << ", profile = " << *volume.mProfilep; - s << "}"; - return s; -} + std::vector new_indices; + LLVCacheTriangleData* tri; -std::ostream& operator<<(std::ostream &s, const LLVolume *volumep) -{ - s << "{params = " << volumep->getParams(); - s << ", path = " << *(volumep->mPathp); - s << ", profile = " << *(volumep->mProfilep); - s << "}"; - return s; -} + //prime pump by adding first triangle to cache; + tri = &(triangle_data[0]); + cache.addTriangle(tri); + new_indices.push_back(tri->mVertex[0]->mIdx); + new_indices.push_back(tri->mVertex[1]->mIdx); + new_indices.push_back(tri->mVertex[2]->mIdx); + tri->complete(); + U32 breaks = 0; + for (U32 i = 1; i < mNumIndices/3; ++i) + { + cache.updateScores(); + tri = cache.mBestTriangle; + if (!tri) + { + breaks++; + for (U32 j = 0; j < triangle_data.size(); ++j) + { + if (triangle_data[j].mActive) + { + tri = &(triangle_data[j]); + break; + } + } + } + + cache.addTriangle(tri); + new_indices.push_back(tri->mVertex[0]->mIdx); + new_indices.push_back(tri->mVertex[1]->mIdx); + new_indices.push_back(tri->mVertex[2]->mIdx); + tri->complete(); + } -BOOL LLVolumeFace::create(LLVolume* volume, BOOL partial_build) -{ - BOOL ret = FALSE ; - if (mTypeMask & CAP_MASK) + for (U32 i = 0; i < mNumIndices; ++i) { - ret = createCap(volume, partial_build); + mIndices[i] = new_indices[i]; } - else if ((mTypeMask & END_MASK) || (mTypeMask & SIDE_MASK)) + + /*F32 post_acmr = 1.f; + //measure cache misses from after rebuild + { + LLVCacheFIFO test_cache; + for (U32 i = 0; i < mNumVertices; i++) + { + vertex_data[i].mCacheTag = -1; + } + + for (U32 i = 0; i < mNumIndices; ++i) + { + test_cache.addVertex(&vertex_data[mIndices[i]]); + } + + post_acmr = (F32) test_cache.mMisses/(mNumIndices/3); + }*/ + + //optimize for pre-TnL cache + + //allocate space for new buffer + S32 num_verts = mNumVertices; + LLVector4a* pos = (LLVector4a*) malloc(sizeof(LLVector4a)*num_verts); + LLVector4a* norm = (LLVector4a*) malloc(sizeof(LLVector4a)*num_verts); + S32 size = ((num_verts*sizeof(LLVector2)) + 0xF) & ~0xF; + LLVector2* tc = (LLVector2*) malloc(size); + + LLVector4a* wght = NULL; + if (mWeights) { - ret = createSide(volume, partial_build); + wght = (LLVector4a*) malloc(sizeof(LLVector4a)*num_verts); } - else + + LLVector4a* binorm = NULL; + if (mBinormals) { - llerrs << "Unknown/uninitialized face type!" << llendl; + binorm = (LLVector4a*) malloc(sizeof(LLVector4a)*num_verts); } - //update the range of the texture coordinates - if(ret) + //allocate mapping of old indices to new indices + std::vector new_idx; + new_idx.resize(mNumVertices, -1); + + S32 cur_idx = 0; + for (U32 i = 0; i < mNumIndices; ++i) { - mTexCoordExtents[0].setVec(1.f, 1.f) ; - mTexCoordExtents[1].setVec(0.f, 0.f) ; + U16 idx = mIndices[i]; + if (new_idx[idx] == -1) + { //this vertex hasn't been added yet + new_idx[idx] = cur_idx; - U32 end = mVertices.size() ; - for(U32 i = 0 ; i < end ; i++) - { - if(mTexCoordExtents[0].mV[0] > mVertices[i].mTexCoord.mV[0]) + //copy vertex data + pos[cur_idx] = mPositions[idx]; + norm[cur_idx] = mNormals[idx]; + tc[cur_idx] = mTexCoords[idx]; + if (mWeights) { - mTexCoordExtents[0].mV[0] = mVertices[i].mTexCoord.mV[0] ; + wght[cur_idx] = mWeights[idx]; } - if(mTexCoordExtents[1].mV[0] < mVertices[i].mTexCoord.mV[0]) + if (mBinormals) { - mTexCoordExtents[1].mV[0] = mVertices[i].mTexCoord.mV[0] ; + binorm[cur_idx] = mBinormals[idx]; } - if(mTexCoordExtents[0].mV[1] > mVertices[i].mTexCoord.mV[1]) - { - mTexCoordExtents[0].mV[1] = mVertices[i].mTexCoord.mV[1] ; - } - if(mTexCoordExtents[1].mV[1] < mVertices[i].mTexCoord.mV[1]) - { - mTexCoordExtents[1].mV[1] = mVertices[i].mTexCoord.mV[1] ; - } + cur_idx++; } - mTexCoordExtents[0].mV[0] = llmax(0.f, mTexCoordExtents[0].mV[0]) ; - mTexCoordExtents[0].mV[1] = llmax(0.f, mTexCoordExtents[0].mV[1]) ; - mTexCoordExtents[1].mV[0] = llmin(1.f, mTexCoordExtents[1].mV[0]) ; - mTexCoordExtents[1].mV[1] = llmin(1.f, mTexCoordExtents[1].mV[1]) ; } - return ret ; + for (U32 i = 0; i < mNumIndices; ++i) + { + mIndices[i] = new_idx[mIndices[i]]; + } + + free(mPositions); + free(mNormals); + free(mTexCoords); + free(mWeights); + free(mBinormals); + + mPositions = pos; + mNormals = norm; + mTexCoords = tc; + mWeights = wght; + mBinormals = binorm; + + //std::string result = llformat("ACMR pre/post: %.3f/%.3f -- %d triangles %d breaks", pre_acmr, post_acmr, mNumIndices/3, breaks); + //llinfos << result << llendl; + +} + +void LLVolumeFace::createOctree(F32 scaler, const LLVector4a& center, const LLVector4a& size) +{ + if (mOctree) + { + return; + } + + mOctree = new LLOctreeRoot(center, size, NULL); + new LLVolumeOctreeListener(mOctree); + + for (U32 i = 0; i < mNumIndices; i+= 3) + { //for each triangle + LLPointer tri = new LLVolumeTriangle(); + + const LLVector4a& v0 = mPositions[mIndices[i]]; + const LLVector4a& v1 = mPositions[mIndices[i+1]]; + const LLVector4a& v2 = mPositions[mIndices[i+2]]; + + //store pointers to vertex data + tri->mV[0] = &v0; + tri->mV[1] = &v1; + tri->mV[2] = &v2; + + //store indices + tri->mIndex[0] = mIndices[i]; + tri->mIndex[1] = mIndices[i+1]; + tri->mIndex[2] = mIndices[i+2]; + + //get minimum point + LLVector4a min = v0; + min.setMin(min, v1); + min.setMin(min, v2); + + //get maximum point + LLVector4a max = v0; + max.setMax(max, v1); + max.setMax(max, v2); + + //compute center + LLVector4a center; + center.setAdd(min, max); + center.mul(0.5f); + + tri->mPositionGroup = center; + + //compute "radius" + LLVector4a size; + size.setSub(max,min); + + tri->mRadius = size.getLength3().getF32() * scaler; + + //insert + mOctree->insert(tri); + } + + //remove unneeded octree layers + while (!mOctree->balance()) { } + + //calculate AABB for each node + LLVolumeOctreeRebound rebound(this); + rebound.traverse(mOctree); + + if (gDebugGL) + { + LLVolumeOctreeValidate validate; + validate.traverse(mOctree); + } +} + + +void LLVolumeFace::swapData(LLVolumeFace& rhs) +{ + llswap(rhs.mPositions, mPositions); + llswap(rhs.mNormals, mNormals); + llswap(rhs.mBinormals, mBinormals); + llswap(rhs.mTexCoords, mTexCoords); + llswap(rhs.mIndices,mIndices); + llswap(rhs.mNumVertices, mNumVertices); + llswap(rhs.mNumIndices, mNumIndices); } void LerpPlanarVertex(LLVolumeFace::VertexData& v0, @@ -4463,10 +6019,21 @@ void LerpPlanarVertex(LLVolumeFace::VertexData& v0, F32 coef01, F32 coef02) { - vout.mPosition = v0.mPosition + ((v1.mPosition-v0.mPosition)*coef01)+((v2.mPosition-v0.mPosition)*coef02); + + LLVector4a lhs; + lhs.setSub(v1.getPosition(), v0.getPosition()); + lhs.mul(coef01); + LLVector4a rhs; + rhs.setSub(v2.getPosition(), v0.getPosition()); + rhs.mul(coef02); + + rhs.add(lhs); + rhs.add(v0.getPosition()); + + vout.setPosition(rhs); + vout.mTexCoord = v0.mTexCoord + ((v1.mTexCoord-v0.mTexCoord)*coef01)+((v2.mTexCoord-v0.mTexCoord)*coef02); - vout.mNormal = v0.mNormal; - vout.mBinormal = v0.mBinormal; + vout.setNormal(v0.getNormal()); } BOOL LLVolumeFace::createUnCutCubeCap(LLVolume* volume, BOOL partial_build) @@ -4486,84 +6053,113 @@ BOOL LLVolumeFace::createUnCutCubeCap(LLVolume* volume, BOOL partial_build) num_vertices = (grid_size+1)*(grid_size+1); num_indices = quad_count * 4; - LLVector3& min = mExtents[0]; - LLVector3& max = mExtents[1]; + LLVector4a& min = mExtents[0]; + LLVector4a& max = mExtents[1]; S32 offset = 0; if (mTypeMask & TOP_MASK) + { offset = (max_t-1) * max_s; + } else + { offset = mBeginS; + } + + { + VertexData corners[4]; + VertexData baseVert; + for(S32 t = 0; t < 4; t++) + { + corners[t].getPosition().load3( mesh[offset + (grid_size*t)].mPos.mV); + corners[t].mTexCoord.mV[0] = profile[grid_size*t].mV[0]+0.5f; + corners[t].mTexCoord.mV[1] = 0.5f - profile[grid_size*t].mV[1]; + } + + { + LLVector4a lhs; + lhs.setSub(corners[1].getPosition(), corners[0].getPosition()); + LLVector4a rhs; + rhs.setSub(corners[2].getPosition(), corners[1].getPosition()); + baseVert.getNormal().setCross3(lhs, rhs); + baseVert.getNormal().normalize3fast(); + } + + if(!(mTypeMask & TOP_MASK)) + { + baseVert.getNormal().mul(-1.0f); + } + else + { + //Swap the UVs on the U(X) axis for top face + LLVector2 swap; + swap = corners[0].mTexCoord; + corners[0].mTexCoord=corners[3].mTexCoord; + corners[3].mTexCoord=swap; + swap = corners[1].mTexCoord; + corners[1].mTexCoord=corners[2].mTexCoord; + corners[2].mTexCoord=swap; + } - VertexData corners[4]; - VertexData baseVert; - for(int t = 0; t < 4; t++){ - corners[t].mPosition = mesh[offset + (grid_size*t)].mPos; - corners[t].mTexCoord.mV[0] = profile[grid_size*t].mV[0]+0.5f; - corners[t].mTexCoord.mV[1] = 0.5f - profile[grid_size*t].mV[1]; - } - baseVert.mNormal = - ((corners[1].mPosition-corners[0].mPosition) % - (corners[2].mPosition-corners[1].mPosition)); - baseVert.mNormal.normVec(); - if(!(mTypeMask & TOP_MASK)){ - baseVert.mNormal *= -1.0f; - }else{ - //Swap the UVs on the U(X) axis for top face - LLVector2 swap; - swap = corners[0].mTexCoord; - corners[0].mTexCoord=corners[3].mTexCoord; - corners[3].mTexCoord=swap; - swap = corners[1].mTexCoord; - corners[1].mTexCoord=corners[2].mTexCoord; - corners[2].mTexCoord=swap; - } - baseVert.mBinormal = calc_binormal_from_triangle( - corners[0].mPosition, corners[0].mTexCoord, - corners[1].mPosition, corners[1].mTexCoord, - corners[2].mPosition, corners[2].mTexCoord); - for(int t = 0; t < 4; t++){ - corners[t].mBinormal = baseVert.mBinormal; - corners[t].mNormal = baseVert.mNormal; - } - mHasBinormals = TRUE; + LLVector4a binormal; + + calc_binormal_from_triangle( binormal, + corners[0].getPosition(), corners[0].mTexCoord, + corners[1].getPosition(), corners[1].mTexCoord, + corners[2].getPosition(), corners[2].mTexCoord); + + binormal.normalize3fast(); - if (partial_build) - { - mVertices.clear(); - } + S32 size = (grid_size+1)*(grid_size+1); + resizeVertices(size); + allocateBinormals(size); - S32 vtop = mVertices.size(); - for(int gx = 0;gxsetAdd(min, max); + mCenter->mul(0.5f); + } if (!partial_build) { -#if GEN_TRI_STRIP - mTriStrip.clear(); -#endif + resizeIndices(grid_size*grid_size*6); + + U16* out = mIndices; + S32 idxs[] = {0,1,(grid_size+1)+1,(grid_size+1)+1,(grid_size+1),0}; for(S32 gx = 0;gx=0;i--) { - mIndices.push_back(vtop+(gy*(grid_size+1))+gx+idxs[i]); - } - -#if GEN_TRI_STRIP - if (gy == 0) - { - mTriStrip.push_back((gx+1)*(grid_size+1)); - mTriStrip.push_back((gx+1)*(grid_size+1)); - mTriStrip.push_back(gx*(grid_size+1)); - } - - mTriStrip.push_back(gy+1+(gx+1)*(grid_size+1)); - mTriStrip.push_back(gy+1+gx*(grid_size+1)); - - - if (gy == grid_size-1) - { - mTriStrip.push_back(gy+1+gx*(grid_size+1)); - } -#endif + *out++ = ((gy*(grid_size+1))+gx+idxs[i]); + } } else { for(S32 i=0;i<6;i++) { - mIndices.push_back(vtop+(gy*(grid_size+1))+gx+idxs[i]); - } - -#if GEN_TRI_STRIP - if (gy == 0) - { - mTriStrip.push_back(gx*(grid_size+1)); - mTriStrip.push_back(gx*(grid_size+1)); - mTriStrip.push_back((gx+1)*(grid_size+1)); - } - - mTriStrip.push_back(gy+1+gx*(grid_size+1)); - mTriStrip.push_back(gy+1+(gx+1)*(grid_size+1)); - - if (gy == grid_size-1) - { - mTriStrip.push_back(gy+1+(gx+1)*(grid_size+1)); + *out++ = ((gy*(grid_size+1))+gx+idxs[i]); } -#endif } - } - - } - -#if GEN_TRI_STRIP - if (mTriStrip.size()%2 == 1) - { - mTriStrip.push_back(mTriStrip[mTriStrip.size()-1]); + } } -#endif } return TRUE; @@ -4658,17 +6211,31 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) num_vertices = profile.size(); num_indices = (profile.size() - 2)*3; - mVertices.resize(num_vertices); + if (!(mTypeMask & HOLLOW_MASK) && !(mTypeMask & OPEN_MASK)) + { + resizeVertices(num_vertices+1); + allocateBinormals(num_vertices+1); - if (!partial_build) + if (!partial_build) + { + resizeIndices(num_indices+3); + } + } + else { - mIndices.resize(num_indices); + resizeVertices(num_vertices); + allocateBinormals(num_vertices); + + if (!partial_build) + { + resizeIndices(num_indices); + } } S32 max_s = volume->getProfile().getTotal(); S32 max_t = volume->getPath().mPath.size(); - mCenter.clearVec(); + mCenter->clear(); S32 offset = 0; if (mTypeMask & TOP_MASK) @@ -4686,82 +6253,91 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) LLVector2 cuv; LLVector2 min_uv, max_uv; - LLVector3& min = mExtents[0]; - LLVector3& max = mExtents[1]; + LLVector4a& min = mExtents[0]; + LLVector4a& max = mExtents[1]; + + LLVector2* tc = (LLVector2*) mTexCoords; + LLVector4a* pos = (LLVector4a*) mPositions; + LLVector4a* norm = (LLVector4a*) mNormals; + LLVector4a* binorm = (LLVector4a*) mBinormals; // Copy the vertices into the array for (S32 i = 0; i < num_vertices; i++) { if (mTypeMask & TOP_MASK) { - mVertices[i].mTexCoord.mV[0] = profile[i].mV[0]+0.5f; - mVertices[i].mTexCoord.mV[1] = profile[i].mV[1]+0.5f; + tc[i].mV[0] = profile[i].mV[0]+0.5f; + tc[i].mV[1] = profile[i].mV[1]+0.5f; } else { // Mirror for underside. - mVertices[i].mTexCoord.mV[0] = profile[i].mV[0]+0.5f; - mVertices[i].mTexCoord.mV[1] = 0.5f - profile[i].mV[1]; + tc[i].mV[0] = profile[i].mV[0]+0.5f; + tc[i].mV[1] = 0.5f - profile[i].mV[1]; } - mVertices[i].mPosition = mesh[i + offset].mPos; + pos[i].load3(mesh[i + offset].mPos.mV); if (i == 0) { - min = max = mVertices[i].mPosition; - min_uv = max_uv = mVertices[i].mTexCoord; + max = pos[i]; + min = max; + min_uv = max_uv = tc[i]; } else { - update_min_max(min,max, mVertices[i].mPosition); - update_min_max(min_uv, max_uv, mVertices[i].mTexCoord); + update_min_max(min,max,pos[i]); + update_min_max(min_uv, max_uv, tc[i]); } } - mCenter = (min+max)*0.5f; + mCenter->setAdd(min, max); + mCenter->mul(0.5f); + cuv = (min_uv + max_uv)*0.5f; - LLVector3 binormal = calc_binormal_from_triangle( - mCenter, cuv, - mVertices[0].mPosition, mVertices[0].mTexCoord, - mVertices[1].mPosition, mVertices[1].mTexCoord); - binormal.normVec(); + LLVector4a binormal; + calc_binormal_from_triangle(binormal, + *mCenter, cuv, + pos[0], tc[0], + pos[1], tc[1]); + binormal.normalize3fast(); + + LLVector4a normal; + LLVector4a d0, d1; + - LLVector3 d0; - LLVector3 d1; - LLVector3 normal; + d0.setSub(*mCenter, pos[0]); + d1.setSub(*mCenter, pos[1]); - d0 = mCenter-mVertices[0].mPosition; - d1 = mCenter-mVertices[1].mPosition; + if (mTypeMask & TOP_MASK) + { + normal.setCross3(d0, d1); + } + else + { + normal.setCross3(d1, d0); + } - normal = (mTypeMask & TOP_MASK) ? (d0%d1) : (d1%d0); - normal.normVec(); + normal.normalize3fast(); VertexData vd; - vd.mPosition = mCenter; - vd.mNormal = normal; - vd.mBinormal = binormal; + vd.setPosition(*mCenter); vd.mTexCoord = cuv; if (!(mTypeMask & HOLLOW_MASK) && !(mTypeMask & OPEN_MASK)) { - mVertices.push_back(vd); + pos[num_vertices] = *mCenter; + tc[num_vertices] = cuv; num_vertices++; - if (!partial_build) - { - vector_append(mIndices, 3); - } } - for (S32 i = 0; i < num_vertices; i++) { - mVertices[i].mBinormal = binormal; - mVertices[i].mNormal = normal; + binorm[i].load4a(binormal.getF32ptr()); + norm[i].load4a(normal.getF32ptr()); } - mHasBinormals = TRUE; - if (partial_build) { return TRUE; @@ -4869,8 +6445,6 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) pt2--; } } - - makeTriStrip(); } else { @@ -4975,8 +6549,6 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) pt2--; } } - - makeTriStrip(); } } else @@ -4998,131 +6570,277 @@ BOOL LLVolumeFace::createCap(LLVolume* volume, BOOL partial_build) mIndices[3*i+v2] = i + 1; } -#if GEN_TRI_STRIP - //make tri strip - if (mTypeMask & OPEN_MASK) - { - makeTriStrip(); - } - else - { - S32 j = num_vertices-2; - if (mTypeMask & TOP_MASK) - { - mTriStrip.push_back(0); - for (S32 i = 0; i <= j; ++i) - { - mTriStrip.push_back(i); - if (i != j) - { - mTriStrip.push_back(j); - } - --j; - } - } - else - { - mTriStrip.push_back(j); - for (S32 i = 0; i <= j; ++i) - { - if (i != j) - { - mTriStrip.push_back(j); - } - mTriStrip.push_back(i); - --j; - } - } - - mTriStrip.push_back(mTriStrip[mTriStrip.size()-1]); - if (mTriStrip.size()%2 == 1) - { - mTriStrip.push_back(mTriStrip[mTriStrip.size()-1]); - } - } -#endif } return TRUE; } -void LLVolumeFace::makeTriStrip() +void LLVolumeFace::createBinormals() { -#if GEN_TRI_STRIP - for (U32 i = 0; i < mIndices.size(); i+=3) + LLMemType m1(LLMemType::MTYPE_VOLUME); + + if (!mBinormals) { - U16 i0 = mIndices[i]; - U16 i1 = mIndices[i+1]; - U16 i2 = mIndices[i+2]; + allocateBinormals(mNumVertices); - if ((i/3)%2 == 1) - { - mTriStrip.push_back(i0); - mTriStrip.push_back(i0); - mTriStrip.push_back(i1); - mTriStrip.push_back(i2); - mTriStrip.push_back(i2); - } - else + //generate binormals + LLVector4a* pos = mPositions; + LLVector2* tc = (LLVector2*) mTexCoords; + LLVector4a* binorm = (LLVector4a*) mBinormals; + + LLVector4a* end = mBinormals+mNumVertices; + while (binorm < end) { - mTriStrip.push_back(i2); - mTriStrip.push_back(i2); - mTriStrip.push_back(i1); - mTriStrip.push_back(i0); - mTriStrip.push_back(i0); + (*binorm++).clear(); } - } - if (mTriStrip.size()%2 == 1) - { - mTriStrip.push_back(mTriStrip[mTriStrip.size()-1]); - } -#endif -} + binorm = mBinormals; -void LLVolumeFace::createBinormals() -{ - LLMemType m1(LLMemType::MTYPE_VOLUME); - - if (!mHasBinormals) - { - //generate binormals - for (U32 i = 0; i < mIndices.size()/3; i++) + for (U32 i = 0; i < mNumIndices/3; i++) { //for each triangle - const VertexData& v0 = mVertices[mIndices[i*3+0]]; - const VertexData& v1 = mVertices[mIndices[i*3+1]]; - const VertexData& v2 = mVertices[mIndices[i*3+2]]; + const U16& i0 = mIndices[i*3+0]; + const U16& i1 = mIndices[i*3+1]; + const U16& i2 = mIndices[i*3+2]; //calculate binormal - LLVector3 binorm = calc_binormal_from_triangle(v0.mPosition, v0.mTexCoord, - v1.mPosition, v1.mTexCoord, - v2.mPosition, v2.mTexCoord); + LLVector4a binormal; + calc_binormal_from_triangle(binormal, + pos[i0], tc[i0], + pos[i1], tc[i1], + pos[i2], tc[i2]); - for (U32 j = 0; j < 3; j++) - { //add triangle normal to vertices - mVertices[mIndices[i*3+j]].mBinormal += binorm; // * (weight_sum - d[j])/weight_sum; - } + + //add triangle normal to vertices + binorm[i0].add(binormal); + binorm[i1].add(binormal); + binorm[i2].add(binormal); //even out quad contributions if (i % 2 == 0) { - mVertices[mIndices[i*3+2]].mBinormal += binorm; + binorm[i2].add(binormal); } else { - mVertices[mIndices[i*3+1]].mBinormal += binorm; + binorm[i1].add(binormal); } } //normalize binormals - for (U32 i = 0; i < mVertices.size(); i++) + for (U32 i = 0; i < mNumVertices; i++) + { + binorm[i].normalize3fast(); + //bump map/planar projection code requires normals to be normalized + mNormals[i].normalize3fast(); + } + } +} + +void LLVolumeFace::resizeVertices(S32 num_verts) +{ + free(mPositions); + free(mNormals); + free(mBinormals); + free(mTexCoords); + + mBinormals = NULL; + + if (num_verts) + { + mPositions = (LLVector4a*) malloc(sizeof(LLVector4a)*num_verts); + assert_aligned(mPositions, 16); + mNormals = (LLVector4a*) malloc(sizeof(LLVector4a)*num_verts); + assert_aligned(mNormals, 16); + + //pad texture coordinate block end to allow for QWORD reads + S32 size = ((num_verts*sizeof(LLVector2)) + 0xF) & ~0xF; + mTexCoords = (LLVector2*) malloc(size); + assert_aligned(mTexCoords, 16); + } + else + { + mPositions = NULL; + mNormals = NULL; + mTexCoords = NULL; + } + + mNumVertices = num_verts; +} + +void LLVolumeFace::pushVertex(const LLVolumeFace::VertexData& cv) +{ + pushVertex(cv.getPosition(), cv.getNormal(), cv.mTexCoord); +} + +void LLVolumeFace::pushVertex(const LLVector4a& pos, const LLVector4a& norm, const LLVector2& tc) +{ + S32 new_verts = mNumVertices+1; + S32 new_size = new_verts*16; +// S32 old_size = mNumVertices*16; + + //positions + mPositions = (LLVector4a*) realloc(mPositions, new_size); + + //normals + mNormals = (LLVector4a*) realloc(mNormals, new_size); + + //tex coords + new_size = ((new_verts*8)+0xF) & ~0xF; + mTexCoords = (LLVector2*) realloc(mTexCoords, new_size); + + + //just clear binormals + free(mBinormals); + mBinormals = NULL; + + mPositions[mNumVertices] = pos; + mNormals[mNumVertices] = norm; + mTexCoords[mNumVertices] = tc; + + mNumVertices++; +} + +void LLVolumeFace::allocateBinormals(S32 num_verts) +{ + free(mBinormals); + mBinormals = (LLVector4a*) malloc(sizeof(LLVector4a)*num_verts); +} + +void LLVolumeFace::allocateWeights(S32 num_verts) +{ + free(mWeights); + mWeights = (LLVector4a*) malloc(sizeof(LLVector4a)*num_verts); +} + +void LLVolumeFace::resizeIndices(S32 num_indices) +{ + free(mIndices); + + if (num_indices) + { + //pad index block end to allow for QWORD reads + S32 size = ((num_indices*sizeof(U16)) + 0xF) & ~0xF; + + mIndices = (U16*) malloc(size); + } + else + { + mIndices = NULL; + } + + mNumIndices = num_indices; +} + +void LLVolumeFace::pushIndex(const U16& idx) +{ + S32 new_count = mNumIndices + 1; + S32 new_size = ((new_count*2)+0xF) & ~0xF; + + S32 old_size = ((mNumIndices*2)+0xF) & ~0xF; + if (new_size != old_size) + { + mIndices = (U16*) realloc(mIndices, new_size); + } + + mIndices[mNumIndices++] = idx; +} + +void LLVolumeFace::fillFromLegacyData(std::vector& v, std::vector& idx) +{ + resizeVertices(v.size()); + resizeIndices(idx.size()); + + for (U32 i = 0; i < v.size(); ++i) + { + mPositions[i] = v[i].getPosition(); + mNormals[i] = v[i].getNormal(); + mTexCoords[i] = v[i].mTexCoord; + } + + for (U32 i = 0; i < idx.size(); ++i) + { + mIndices[i] = idx[i]; + } +} + +void LLVolumeFace::appendFace(const LLVolumeFace& face, LLMatrix4& mat_in, LLMatrix4& norm_mat_in) +{ + U16 offset = mNumVertices; + + S32 new_count = face.mNumVertices + mNumVertices; + + if (new_count > 65536) + { + llerrs << "Cannot append face -- 16-bit overflow will occur." << llendl; + } + + if (face.mNumVertices == 0) + { + llerrs << "Cannot append empty face." << llendl; + } + + //allocate new buffer space + mPositions = (LLVector4a*) realloc(mPositions, new_count*sizeof(LLVector4a)); + assert_aligned(mPositions, 16); + mNormals = (LLVector4a*) realloc(mNormals, new_count*sizeof(LLVector4a)); + assert_aligned(mNormals, 16); + mTexCoords = (LLVector2*) realloc(mTexCoords, (new_count*sizeof(LLVector2)+0xF) & ~0xF); + assert_aligned(mTexCoords, 16); + + mNumVertices = new_count; + + //get destination address of appended face + LLVector4a* dst_pos = mPositions+offset; + LLVector2* dst_tc = mTexCoords+offset; + LLVector4a* dst_norm = mNormals+offset; + + //get source addresses of appended face + const LLVector4a* src_pos = face.mPositions; + const LLVector2* src_tc = face.mTexCoords; + const LLVector4a* src_norm = face.mNormals; + + //load aligned matrices + LLMatrix4a mat, norm_mat; + mat.loadu(mat_in); + norm_mat.loadu(norm_mat_in); + + for (U32 i = 0; i < face.mNumVertices; ++i) + { + //transform appended face position and store + mat.affineTransform(src_pos[i], dst_pos[i]); + + //transform appended face normal and store + norm_mat.rotate(src_norm[i], dst_norm[i]); + dst_norm[i].normalize3fast(); + + //copy appended face texture coordinate + dst_tc[i] = src_tc[i]; + + if (offset == 0 && i == 0) + { //initialize bounding box + mExtents[0] = mExtents[1] = dst_pos[i]; + } + else { - mVertices[i].mBinormal.normVec(); - mVertices[i].mNormal.normVec(); + //stretch bounding box + update_min_max(mExtents[0], mExtents[1], dst_pos[i]); } + } + - mHasBinormals = TRUE; + new_count = mNumIndices + face.mNumIndices; + + //allocate new index buffer + mIndices = (U16*) realloc(mIndices, (new_count*sizeof(U16)+0xF) & ~0xF); + + //get destination address into new index buffer + U16* dst_idx = mIndices+mNumIndices; + mNumIndices = new_count; + + for (U32 i = 0; i < face.mNumIndices; ++i) + { //copy indices, offsetting by old vertex count + dst_idx[i] = face.mIndices[i]+offset; } } @@ -5152,18 +6870,20 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) num_vertices = mNumS*mNumT; num_indices = (mNumS-1)*(mNumT-1)*6; - mVertices.resize(num_vertices); - if (!partial_build) { - mIndices.resize(num_indices); - mEdge.resize(num_indices); - } - else - { - mHasBinormals = FALSE; + resizeVertices(num_vertices); + resizeIndices(num_indices); + + if ((volume->getParams().getSculptType() & LL_SCULPT_TYPE_MASK) != LL_SCULPT_TYPE_MESH) + { + mEdge.resize(num_indices); + } } + LLVector4a* pos = (LLVector4a*) mPositions; + LLVector4a* norm = (LLVector4a*) mNormals; + LLVector2* tc = (LLVector2*) mTexCoords; S32 begin_stex = llfloor( profile[mBeginS].mV[2] ); S32 num_s = ((mTypeMask & INNER_MASK) && (mTypeMask & FLAT_MASK) && mNumS > 2) ? mNumS/2 : mNumS; @@ -5214,21 +6934,20 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) i = mBeginS + s + max_s*t; } - mVertices[cur_vertex].mPosition = mesh[i].mPos; - mVertices[cur_vertex].mTexCoord = LLVector2(ss,tt); + pos[cur_vertex].load3(mesh[i].mPos.mV); + tc[cur_vertex] = LLVector2(ss,tt); - mVertices[cur_vertex].mNormal = LLVector3(0,0,0); - mVertices[cur_vertex].mBinormal = LLVector3(0,0,0); - + norm[cur_vertex].clear(); cur_vertex++; if ((mTypeMask & INNER_MASK) && (mTypeMask & FLAT_MASK) && mNumS > 2 && s > 0) { - mVertices[cur_vertex].mPosition = mesh[i].mPos; - mVertices[cur_vertex].mTexCoord = LLVector2(ss,tt); + + pos[cur_vertex].load3(mesh[i].mPos.mV); + tc[cur_vertex] = LLVector2(ss,tt); - mVertices[cur_vertex].mNormal = LLVector3(0,0,0); - mVertices[cur_vertex].mBinormal = LLVector3(0,0,0); + norm[cur_vertex].clear(); + cur_vertex++; } } @@ -5246,29 +6965,29 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) i = mBeginS + s + max_s*t; ss = profile[mBeginS + s].mV[2] - begin_stex; - mVertices[cur_vertex].mPosition = mesh[i].mPos; - mVertices[cur_vertex].mTexCoord = LLVector2(ss,tt); - - mVertices[cur_vertex].mNormal = LLVector3(0,0,0); - mVertices[cur_vertex].mBinormal = LLVector3(0,0,0); - + pos[cur_vertex].load3(mesh[i].mPos.mV); + tc[cur_vertex] = LLVector2(ss,tt); + norm[cur_vertex].clear(); + cur_vertex++; } } //get bounding box for this side - LLVector3& face_min = mExtents[0]; - LLVector3& face_max = mExtents[1]; - mCenter.clearVec(); + LLVector4a& face_min = mExtents[0]; + LLVector4a& face_max = mExtents[1]; + mCenter->clear(); - face_min = face_max = mVertices[0].mPosition; - for (U32 i = 1; i < mVertices.size(); ++i) + face_min = face_max = pos[0]; + + for (U32 i = 1; i < mNumVertices; ++i) { - update_min_max(face_min, face_max, mVertices[i].mPosition); + update_min_max(face_min, face_max, pos[i]); } - mCenter = (face_min + face_max) * 0.5f; + mCenter->setAdd(face_min, face_max); + mCenter->mul(0.5f); S32 cur_index = 0; S32 cur_edge = 0; @@ -5276,18 +6995,9 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) if (!partial_build) { -#if GEN_TRI_STRIP - mTriStrip.clear(); -#endif - // Now we generate the indices. for (t = 0; t < (mNumT-1); t++) { -#if GEN_TRI_STRIP - //prepend terminating index to strip - mTriStrip.push_back(mNumS*t); -#endif - for (s = 0; s < (mNumS-1); s++) { mIndices[cur_index++] = s + mNumS*t; //bottom left @@ -5297,16 +7007,6 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) mIndices[cur_index++] = s+1 + mNumS*t; //bottom right mIndices[cur_index++] = s+1 + mNumS*(t+1); //top right -#if GEN_TRI_STRIP - if (s == 0) - { - mTriStrip.push_back(s+mNumS*t); - mTriStrip.push_back(s+mNumS*(t+1)); - } - mTriStrip.push_back(s+1+mNumS*t); - mTriStrip.push_back(s+1+mNumS*(t+1)); -#endif - mEdge[cur_edge++] = (mNumS-1)*2*t+s*2+1; //bottom left/top right neighbor face if (t < mNumT-2) { //top right/top left neighbor face mEdge[cur_edge++] = (mNumS-1)*2*(t+1)+s*2+1; @@ -5347,52 +7047,61 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) } mEdge[cur_edge++] = (mNumS-1)*2*t+s*2; //top right/bottom left neighbor face } -#if GEN_TRI_STRIP - //append terminating vertex to strip - mTriStrip.push_back(mNumS-1+mNumS*(t+1)); -#endif } + } -#if GEN_TRI_STRIP - if (mTriStrip.size()%2 == 1) - { - mTriStrip.push_back(mTriStrip[mTriStrip.size()-1]); - } -#endif + //clear normals + for (U32 i = 0; i < mNumVertices; i++) + { + mNormals[i].clear(); } //generate normals - for (U32 i = 0; i < mIndices.size()/3; i++) //for each triangle + for (U32 i = 0; i < mNumIndices/3; i++) //for each triangle { const U16* idx = &(mIndices[i*3]); - - VertexData* v[] = - { &mVertices[idx[0]], &mVertices[idx[1]], &mVertices[idx[2]] }; - - //calculate triangle normal - LLVector3 norm = (v[0]->mPosition-v[1]->mPosition) % (v[0]->mPosition-v[2]->mPosition); + - v[0]->mNormal += norm; - v[1]->mNormal += norm; - v[2]->mNormal += norm; + LLVector4a* v[] = + { pos+idx[0], pos+idx[1], pos+idx[2] }; + + LLVector4a* n[] = + { norm+idx[0], norm+idx[1], norm+idx[2] }; + + //calculate triangle normal + LLVector4a a, b, c; + + a.setSub(*v[0], *v[1]); + b.setSub(*v[0], *v[2]); + c.setCross3(a,b); + n[0]->add(c); + n[1]->add(c); + n[2]->add(c); + //even out quad contributions - v[i%2+1]->mNormal += norm; + n[i%2+1]->add(c); } // adjust normals based on wrapping and stitching - BOOL s_bottom_converges = ((mVertices[0].mPosition - mVertices[mNumS*(mNumT-2)].mPosition).magVecSquared() < 0.000001f); - BOOL s_top_converges = ((mVertices[mNumS-1].mPosition - mVertices[mNumS*(mNumT-2)+mNumS-1].mPosition).magVecSquared() < 0.000001f); + LLVector4a top; + top.setSub(pos[0], pos[mNumS*(mNumT-2)]); + BOOL s_bottom_converges = (top.dot3(top) < 0.000001f); + + top.setSub(pos[mNumS-1], pos[mNumS*(mNumT-2)+mNumS-1]); + BOOL s_top_converges = (top.dot3(top) < 0.000001f); + if (sculpt_stitching == LL_SCULPT_TYPE_NONE) // logic for non-sculpt volumes { if (volume->getPath().isOpen() == FALSE) { //wrap normals on T for (S32 i = 0; i < mNumS; i++) { - LLVector3 norm = mVertices[i].mNormal + mVertices[mNumS*(mNumT-1)+i].mNormal; - mVertices[i].mNormal = norm; - mVertices[mNumS*(mNumT-1)+i].mNormal = norm; + LLVector4a n; + n.setAdd(norm[i], norm[mNumS*(mNumT-1)+i]); + norm[i] = n; + norm[mNumS*(mNumT-1)+i] = n; } } @@ -5400,9 +7109,10 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) { //wrap normals on S for (S32 i = 0; i < mNumT; i++) { - LLVector3 norm = mVertices[mNumS*i].mNormal + mVertices[mNumS*i+mNumS-1].mNormal; - mVertices[mNumS * i].mNormal = norm; - mVertices[mNumS * i+mNumS-1].mNormal = norm; + LLVector4a n; + n.setAdd(norm[mNumS*i], norm[mNumS*i+mNumS-1]); + norm[mNumS * i] = n; + norm[mNumS * i+mNumS-1] = n; } } @@ -5413,7 +7123,7 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) { //all lower S have same normal for (S32 i = 0; i < mNumT; i++) { - mVertices[mNumS*i].mNormal = LLVector3(1,0,0); + norm[mNumS*i].set(1,0,0); } } @@ -5421,12 +7131,11 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) { //all upper S have same normal for (S32 i = 0; i < mNumT; i++) { - mVertices[mNumS*i+mNumS-1].mNormal = LLVector3(-1,0,0); + norm[mNumS*i+mNumS-1].set(-1,0,0); } } } } - else // logic for sculpt volumes { BOOL average_poles = FALSE; @@ -5449,30 +7158,33 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) { // average normals for north pole - LLVector3 average(0.0, 0.0, 0.0); + LLVector4a average; + average.clear(); + for (S32 i = 0; i < mNumS; i++) { - average += mVertices[i].mNormal; + average.add(norm[i]); } // set average for (S32 i = 0; i < mNumS; i++) { - mVertices[i].mNormal = average; + norm[i] = average; } // average normals for south pole - average = LLVector3(0.0, 0.0, 0.0); + average.clear(); + for (S32 i = 0; i < mNumS; i++) { - average += mVertices[i + mNumS * (mNumT - 1)].mNormal; + average.add(norm[i + mNumS * (mNumT - 1)]); } // set average for (S32 i = 0; i < mNumS; i++) { - mVertices[i + mNumS * (mNumT - 1)].mNormal = average; + norm[i + mNumS * (mNumT - 1)] = average; } } @@ -5482,23 +7194,22 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) { for (S32 i = 0; i < mNumT; i++) { - LLVector3 norm = mVertices[mNumS*i].mNormal + mVertices[mNumS*i+mNumS-1].mNormal; - mVertices[mNumS * i].mNormal = norm; - mVertices[mNumS * i+mNumS-1].mNormal = norm; + LLVector4a n; + n.setAdd(norm[mNumS*i], norm[mNumS*i+mNumS-1]); + norm[mNumS * i] = n; + norm[mNumS * i+mNumS-1] = n; } } - - if (wrap_t) { for (S32 i = 0; i < mNumS; i++) { - LLVector3 norm = mVertices[i].mNormal + mVertices[mNumS*(mNumT-1)+i].mNormal; - mVertices[i].mNormal = norm; - mVertices[mNumS*(mNumT-1)+i].mNormal = norm; + LLVector4a n; + n.setAdd(norm[i], norm[mNumS*(mNumT-1)+i]); + norm[i] = n; + norm[mNumS*(mNumT-1)+i] = n; } - } } @@ -5508,41 +7219,51 @@ BOOL LLVolumeFace::createSide(LLVolume* volume, BOOL partial_build) // Finds binormal based on three vertices with texture coordinates. // Fills in dummy values if the triangle has degenerate texture coordinates. -LLVector3 calc_binormal_from_triangle( - const LLVector3& pos0, +void calc_binormal_from_triangle(LLVector4a& binormal, + + const LLVector4a& pos0, const LLVector2& tex0, - const LLVector3& pos1, + const LLVector4a& pos1, const LLVector2& tex1, - const LLVector3& pos2, + const LLVector4a& pos2, const LLVector2& tex2) { - LLVector3 rx0( pos0.mV[VX], tex0.mV[VX], tex0.mV[VY] ); - LLVector3 rx1( pos1.mV[VX], tex1.mV[VX], tex1.mV[VY] ); - LLVector3 rx2( pos2.mV[VX], tex2.mV[VX], tex2.mV[VY] ); + LLVector4a rx0( pos0[VX], tex0.mV[VX], tex0.mV[VY] ); + LLVector4a rx1( pos1[VX], tex1.mV[VX], tex1.mV[VY] ); + LLVector4a rx2( pos2[VX], tex2.mV[VX], tex2.mV[VY] ); - LLVector3 ry0( pos0.mV[VY], tex0.mV[VX], tex0.mV[VY] ); - LLVector3 ry1( pos1.mV[VY], tex1.mV[VX], tex1.mV[VY] ); - LLVector3 ry2( pos2.mV[VY], tex2.mV[VX], tex2.mV[VY] ); + LLVector4a ry0( pos0[VY], tex0.mV[VX], tex0.mV[VY] ); + LLVector4a ry1( pos1[VY], tex1.mV[VX], tex1.mV[VY] ); + LLVector4a ry2( pos2[VY], tex2.mV[VX], tex2.mV[VY] ); - LLVector3 rz0( pos0.mV[VZ], tex0.mV[VX], tex0.mV[VY] ); - LLVector3 rz1( pos1.mV[VZ], tex1.mV[VX], tex1.mV[VY] ); - LLVector3 rz2( pos2.mV[VZ], tex2.mV[VX], tex2.mV[VY] ); + LLVector4a rz0( pos0[VZ], tex0.mV[VX], tex0.mV[VY] ); + LLVector4a rz1( pos1[VZ], tex1.mV[VX], tex1.mV[VY] ); + LLVector4a rz2( pos2[VZ], tex2.mV[VX], tex2.mV[VY] ); - LLVector3 r0 = (rx0 - rx1) % (rx0 - rx2); - LLVector3 r1 = (ry0 - ry1) % (ry0 - ry2); - LLVector3 r2 = (rz0 - rz1) % (rz0 - rz2); + LLVector4a lhs, rhs; + + LLVector4a r0; + lhs.setSub(rx0, rx1); rhs.setSub(rx0, rx2); + r0.setCross3(lhs, rhs); + + LLVector4a r1; + lhs.setSub(ry0, ry1); rhs.setSub(ry0, ry2); + r1.setCross3(lhs, rhs); + + LLVector4a r2; + lhs.setSub(rz0, rz1); rhs.setSub(rz0, rz2); + r2.setCross3(lhs, rhs); - if( r0.mV[VX] && r1.mV[VX] && r2.mV[VX] ) + if( r0[VX] && r1[VX] && r2[VX] ) { - LLVector3 binormal( - -r0.mV[VZ] / r0.mV[VX], - -r1.mV[VZ] / r1.mV[VX], - -r2.mV[VZ] / r2.mV[VX]); + binormal.set( + -r0[VZ] / r0[VX], + -r1[VZ] / r1[VX], + -r2[VZ] / r2[VX]); // binormal.normVec(); - return binormal; } else { - return LLVector3( 0, 1 , 0 ); + binormal.set( 0, 1 , 0 ); } } diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index 4a25f586da..eceaced9e2 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -1,1106 +1,1106 @@ -/** - * @file llvolume.h - * @brief LLVolume base class. - * - * $LicenseInfo:firstyear=2002&license=viewerlgpl$ - * Second Life Viewer Source Code - * Copyright (C) 2010, Linden Research, Inc. - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; - * version 2.1 of the License only. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - * - * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA - * $/LicenseInfo$ - */ - -#ifndef LL_LLVOLUME_H -#define LL_LLVOLUME_H - -#include - -class LLProfileParams; -class LLPathParams; -class LLVolumeParams; -class LLProfile; -class LLPath; - -template class LLOctreeNode; - -class LLVector4a; -class LLVolumeFace; -class LLVolume; -class LLVolumeTriangle; - -#include "lldarray.h" -#include "lluuid.h" -#include "v4color.h" -//#include "vmath.h" -#include "v2math.h" -#include "v3math.h" -#include "v3dmath.h" -#include "v4math.h" -#include "llquaternion.h" -#include "llstrider.h" -#include "v4coloru.h" -#include "llrefcount.h" -#include "llfile.h" - -//============================================================================ - -const S32 MIN_DETAIL_FACES = 6; -const S32 MIN_LOD = 0; -const S32 MAX_LOD = 3; - -// These are defined here but are not enforced at this level, -// rather they are here for the convenience of code that uses -// the LLVolume class. -const F32 MIN_VOLUME_PROFILE_WIDTH = 0.05f; -const F32 MIN_VOLUME_PATH_WIDTH = 0.05f; - -const F32 CUT_QUANTA = 0.00002f; -const F32 SCALE_QUANTA = 0.01f; -const F32 SHEAR_QUANTA = 0.01f; -const F32 TAPER_QUANTA = 0.01f; -const F32 REV_QUANTA = 0.015f; -const F32 HOLLOW_QUANTA = 0.00002f; - -const S32 MAX_VOLUME_TRIANGLE_INDICES = 10000; - -//============================================================================ - -// useful masks -const LLPCode LL_PCODE_HOLLOW_MASK = 0x80; // has a thickness -const LLPCode LL_PCODE_SEGMENT_MASK = 0x40; // segments (1 angle) -const LLPCode LL_PCODE_PATCH_MASK = 0x20; // segmented segments (2 angles) -const LLPCode LL_PCODE_HEMI_MASK = 0x10; // half-primitives get their own type per PR's dictum -const LLPCode LL_PCODE_BASE_MASK = 0x0F; - - // primitive shapes -const LLPCode LL_PCODE_CUBE = 1; -const LLPCode LL_PCODE_PRISM = 2; -const LLPCode LL_PCODE_TETRAHEDRON = 3; -const LLPCode LL_PCODE_PYRAMID = 4; -const LLPCode LL_PCODE_CYLINDER = 5; -const LLPCode LL_PCODE_CONE = 6; -const LLPCode LL_PCODE_SPHERE = 7; -const LLPCode LL_PCODE_TORUS = 8; -const LLPCode LL_PCODE_VOLUME = 9; - - // surfaces -//const LLPCode LL_PCODE_SURFACE_TRIANGLE = 10; -//const LLPCode LL_PCODE_SURFACE_SQUARE = 11; -//const LLPCode LL_PCODE_SURFACE_DISC = 12; - -const LLPCode LL_PCODE_APP = 14; // App specific pcode (for viewer/sim side only objects) -const LLPCode LL_PCODE_LEGACY = 15; - -// Pcodes for legacy objects -//const LLPCode LL_PCODE_LEGACY_ATOR = 0x10 | LL_PCODE_LEGACY; // ATOR -const LLPCode LL_PCODE_LEGACY_AVATAR = 0x20 | LL_PCODE_LEGACY; // PLAYER -//const LLPCode LL_PCODE_LEGACY_BIRD = 0x30 | LL_PCODE_LEGACY; // BIRD -//const LLPCode LL_PCODE_LEGACY_DEMON = 0x40 | LL_PCODE_LEGACY; // DEMON -const LLPCode LL_PCODE_LEGACY_GRASS = 0x50 | LL_PCODE_LEGACY; // GRASS -const LLPCode LL_PCODE_TREE_NEW = 0x60 | LL_PCODE_LEGACY; // new trees -//const LLPCode LL_PCODE_LEGACY_ORACLE = 0x70 | LL_PCODE_LEGACY; // ORACLE -const LLPCode LL_PCODE_LEGACY_PART_SYS = 0x80 | LL_PCODE_LEGACY; // PART_SYS -const LLPCode LL_PCODE_LEGACY_ROCK = 0x90 | LL_PCODE_LEGACY; // ROCK -//const LLPCode LL_PCODE_LEGACY_SHOT = 0xA0 | LL_PCODE_LEGACY; // BASIC_SHOT -//const LLPCode LL_PCODE_LEGACY_SHOT_BIG = 0xB0 | LL_PCODE_LEGACY; -//const LLPCode LL_PCODE_LEGACY_SMOKE = 0xC0 | LL_PCODE_LEGACY; // SMOKE -//const LLPCode LL_PCODE_LEGACY_SPARK = 0xD0 | LL_PCODE_LEGACY;// SPARK -const LLPCode LL_PCODE_LEGACY_TEXT_BUBBLE = 0xE0 | LL_PCODE_LEGACY; // TEXTBUBBLE -const LLPCode LL_PCODE_LEGACY_TREE = 0xF0 | LL_PCODE_LEGACY; // TREE - - // hemis -const LLPCode LL_PCODE_CYLINDER_HEMI = LL_PCODE_CYLINDER | LL_PCODE_HEMI_MASK; -const LLPCode LL_PCODE_CONE_HEMI = LL_PCODE_CONE | LL_PCODE_HEMI_MASK; -const LLPCode LL_PCODE_SPHERE_HEMI = LL_PCODE_SPHERE | LL_PCODE_HEMI_MASK; -const LLPCode LL_PCODE_TORUS_HEMI = LL_PCODE_TORUS | LL_PCODE_HEMI_MASK; - - -// Volumes consist of a profile at the base that is swept around -// a path to make a volume. -// The profile code -const U8 LL_PCODE_PROFILE_MASK = 0x0f; -const U8 LL_PCODE_PROFILE_MIN = 0x00; -const U8 LL_PCODE_PROFILE_CIRCLE = 0x00; -const U8 LL_PCODE_PROFILE_SQUARE = 0x01; -const U8 LL_PCODE_PROFILE_ISOTRI = 0x02; -const U8 LL_PCODE_PROFILE_EQUALTRI = 0x03; -const U8 LL_PCODE_PROFILE_RIGHTTRI = 0x04; -const U8 LL_PCODE_PROFILE_CIRCLE_HALF = 0x05; -const U8 LL_PCODE_PROFILE_MAX = 0x05; - -// Stored in the profile byte -const U8 LL_PCODE_HOLE_MASK = 0xf0; -const U8 LL_PCODE_HOLE_MIN = 0x00; -const U8 LL_PCODE_HOLE_SAME = 0x00; // same as outside profile -const U8 LL_PCODE_HOLE_CIRCLE = 0x10; -const U8 LL_PCODE_HOLE_SQUARE = 0x20; -const U8 LL_PCODE_HOLE_TRIANGLE = 0x30; -const U8 LL_PCODE_HOLE_MAX = 0x03; // min/max needs to be >> 4 of real min/max - -const U8 LL_PCODE_PATH_IGNORE = 0x00; -const U8 LL_PCODE_PATH_MIN = 0x01; // min/max needs to be >> 4 of real min/max -const U8 LL_PCODE_PATH_LINE = 0x10; -const U8 LL_PCODE_PATH_CIRCLE = 0x20; -const U8 LL_PCODE_PATH_CIRCLE2 = 0x30; -const U8 LL_PCODE_PATH_TEST = 0x40; -const U8 LL_PCODE_PATH_FLEXIBLE = 0x80; -const U8 LL_PCODE_PATH_MAX = 0x08; - -//============================================================================ - -// face identifiers -typedef U16 LLFaceID; - -const LLFaceID LL_FACE_PATH_BEGIN = 0x1 << 0; -const LLFaceID LL_FACE_PATH_END = 0x1 << 1; -const LLFaceID LL_FACE_INNER_SIDE = 0x1 << 2; -const LLFaceID LL_FACE_PROFILE_BEGIN = 0x1 << 3; -const LLFaceID LL_FACE_PROFILE_END = 0x1 << 4; -const LLFaceID LL_FACE_OUTER_SIDE_0 = 0x1 << 5; -const LLFaceID LL_FACE_OUTER_SIDE_1 = 0x1 << 6; -const LLFaceID LL_FACE_OUTER_SIDE_2 = 0x1 << 7; -const LLFaceID LL_FACE_OUTER_SIDE_3 = 0x1 << 8; - -//============================================================================ - -// sculpt types + flags - -const U8 LL_SCULPT_TYPE_NONE = 0; -const U8 LL_SCULPT_TYPE_SPHERE = 1; -const U8 LL_SCULPT_TYPE_TORUS = 2; -const U8 LL_SCULPT_TYPE_PLANE = 3; -const U8 LL_SCULPT_TYPE_CYLINDER = 4; -const U8 LL_SCULPT_TYPE_MESH = 5; -const U8 LL_SCULPT_TYPE_MASK = LL_SCULPT_TYPE_SPHERE | LL_SCULPT_TYPE_TORUS | LL_SCULPT_TYPE_PLANE | - LL_SCULPT_TYPE_CYLINDER | LL_SCULPT_TYPE_MESH; - -const U8 LL_SCULPT_FLAG_INVERT = 64; -const U8 LL_SCULPT_FLAG_MIRROR = 128; - -const S32 LL_SCULPT_MESH_MAX_FACES = 8; - -class LLProfileParams -{ -public: - LLProfileParams() - : mCurveType(LL_PCODE_PROFILE_SQUARE), - mBegin(0.f), - mEnd(1.f), - mHollow(0.f), - mCRC(0) - { - } - - LLProfileParams(U8 curve, F32 begin, F32 end, F32 hollow) - : mCurveType(curve), - mBegin(begin), - mEnd(end), - mHollow(hollow), - mCRC(0) - { - } - - LLProfileParams(U8 curve, U16 begin, U16 end, U16 hollow) - { - mCurveType = curve; - F32 temp_f32 = begin * CUT_QUANTA; - if (temp_f32 > 1.f) - { - temp_f32 = 1.f; - } - mBegin = temp_f32; - temp_f32 = end * CUT_QUANTA; - if (temp_f32 > 1.f) - { - temp_f32 = 1.f; - } - mEnd = 1.f - temp_f32; - temp_f32 = hollow * HOLLOW_QUANTA; - if (temp_f32 > 1.f) - { - temp_f32 = 1.f; - } - mHollow = temp_f32; - mCRC = 0; - } - - bool operator==(const LLProfileParams ¶ms) const; - bool operator!=(const LLProfileParams ¶ms) const; - bool operator<(const LLProfileParams ¶ms) const; - - void copyParams(const LLProfileParams ¶ms); - - BOOL importFile(LLFILE *fp); - BOOL exportFile(LLFILE *fp) const; - - BOOL importLegacyStream(std::istream& input_stream); - BOOL exportLegacyStream(std::ostream& output_stream) const; - - LLSD asLLSD() const; - operator LLSD() const { return asLLSD(); } - bool fromLLSD(LLSD& sd); - - const F32& getBegin () const { return mBegin; } - const F32& getEnd () const { return mEnd; } - const F32& getHollow() const { return mHollow; } - const U8& getCurveType () const { return mCurveType; } - - void setCurveType(const U32 type) { mCurveType = type;} - void setBegin(const F32 begin) { mBegin = (begin >= 1.0f) ? 0.0f : ((int) (begin * 100000))/100000.0f;} - void setEnd(const F32 end) { mEnd = (end <= 0.0f) ? 1.0f : ((int) (end * 100000))/100000.0f;} - void setHollow(const F32 hollow) { mHollow = ((int) (hollow * 100000))/100000.0f;} - - friend std::ostream& operator<<(std::ostream &s, const LLProfileParams &profile_params); - -protected: - // Profile params - U8 mCurveType; - F32 mBegin; - F32 mEnd; - F32 mHollow; - - U32 mCRC; -}; - -inline bool LLProfileParams::operator==(const LLProfileParams ¶ms) const -{ - return - (getCurveType() == params.getCurveType()) && - (getBegin() == params.getBegin()) && - (getEnd() == params.getEnd()) && - (getHollow() == params.getHollow()); -} - -inline bool LLProfileParams::operator!=(const LLProfileParams ¶ms) const -{ - return - (getCurveType() != params.getCurveType()) || - (getBegin() != params.getBegin()) || - (getEnd() != params.getEnd()) || - (getHollow() != params.getHollow()); -} - - -inline bool LLProfileParams::operator<(const LLProfileParams ¶ms) const -{ - if (getCurveType() != params.getCurveType()) - { - return getCurveType() < params.getCurveType(); - } - else - if (getBegin() != params.getBegin()) - { - return getBegin() < params.getBegin(); - } - else - if (getEnd() != params.getEnd()) - { - return getEnd() < params.getEnd(); - } - else - { - return getHollow() < params.getHollow(); - } -} - -#define U8_TO_F32(x) (F32)(*((S8 *)&x)) - -class LLPathParams -{ -public: - LLPathParams() - : - mCurveType(LL_PCODE_PATH_LINE), - mBegin(0.f), - mEnd(1.f), - mScale(1.f,1.f), - mShear(0.f,0.f), - mTwistBegin(0.f), - mTwistEnd(0.f), - mRadiusOffset(0.f), - mTaper(0.f,0.f), - mRevolutions(1.f), - mSkew(0.f), - mCRC(0) - { - } - - LLPathParams(U8 curve, F32 begin, F32 end, F32 scx, F32 scy, F32 shx, F32 shy, F32 twistend, F32 twistbegin, F32 radiusoffset, F32 tx, F32 ty, F32 revolutions, F32 skew) - : mCurveType(curve), - mBegin(begin), - mEnd(end), - mScale(scx,scy), - mShear(shx,shy), - mTwistBegin(twistbegin), - mTwistEnd(twistend), - mRadiusOffset(radiusoffset), - mTaper(tx,ty), - mRevolutions(revolutions), - mSkew(skew), - mCRC(0) - { - } - - LLPathParams(U8 curve, U16 begin, U16 end, U8 scx, U8 scy, U8 shx, U8 shy, U8 twistend, U8 twistbegin, U8 radiusoffset, U8 tx, U8 ty, U8 revolutions, U8 skew) - { - mCurveType = curve; - mBegin = (F32)(begin * CUT_QUANTA); - mEnd = (F32)(100.f - end) * CUT_QUANTA; - if (mEnd > 1.f) - mEnd = 1.f; - mScale.setVec((F32) (200 - scx) * SCALE_QUANTA,(F32) (200 - scy) * SCALE_QUANTA); - mShear.setVec(U8_TO_F32(shx) * SHEAR_QUANTA,U8_TO_F32(shy) * SHEAR_QUANTA); - mTwistBegin = U8_TO_F32(twistbegin) * SCALE_QUANTA; - mTwistEnd = U8_TO_F32(twistend) * SCALE_QUANTA; - mRadiusOffset = U8_TO_F32(radiusoffset) * SCALE_QUANTA; - mTaper.setVec(U8_TO_F32(tx) * TAPER_QUANTA,U8_TO_F32(ty) * TAPER_QUANTA); - mRevolutions = ((F32)revolutions) * REV_QUANTA + 1.0f; - mSkew = U8_TO_F32(skew) * SCALE_QUANTA; - - mCRC = 0; - } - - bool operator==(const LLPathParams ¶ms) const; - bool operator!=(const LLPathParams ¶ms) const; - bool operator<(const LLPathParams ¶ms) const; - - void copyParams(const LLPathParams ¶ms); - - BOOL importFile(LLFILE *fp); - BOOL exportFile(LLFILE *fp) const; - - BOOL importLegacyStream(std::istream& input_stream); - BOOL exportLegacyStream(std::ostream& output_stream) const; - - LLSD asLLSD() const; - operator LLSD() const { return asLLSD(); } - bool fromLLSD(LLSD& sd); - - const F32& getBegin() const { return mBegin; } - const F32& getEnd() const { return mEnd; } - const LLVector2 &getScale() const { return mScale; } - const F32& getScaleX() const { return mScale.mV[0]; } - const F32& getScaleY() const { return mScale.mV[1]; } - const LLVector2 getBeginScale() const; - const LLVector2 getEndScale() const; - const LLVector2 &getShear() const { return mShear; } - const F32& getShearX() const { return mShear.mV[0]; } - const F32& getShearY() const { return mShear.mV[1]; } - const U8& getCurveType () const { return mCurveType; } - - const F32& getTwistBegin() const { return mTwistBegin; } - const F32& getTwistEnd() const { return mTwistEnd; } - const F32& getTwist() const { return mTwistEnd; } // deprecated - const F32& getRadiusOffset() const { return mRadiusOffset; } - const LLVector2 &getTaper() const { return mTaper; } - const F32& getTaperX() const { return mTaper.mV[0]; } - const F32& getTaperY() const { return mTaper.mV[1]; } - const F32& getRevolutions() const { return mRevolutions; } - const F32& getSkew() const { return mSkew; } - - void setCurveType(const U8 type) { mCurveType = type; } - void setBegin(const F32 begin) { mBegin = begin; } - void setEnd(const F32 end) { mEnd = end; } - - void setScale(const F32 x, const F32 y) { mScale.setVec(x,y); } - void setScaleX(const F32 v) { mScale.mV[VX] = v; } - void setScaleY(const F32 v) { mScale.mV[VY] = v; } - void setShear(const F32 x, const F32 y) { mShear.setVec(x,y); } - void setShearX(const F32 v) { mShear.mV[VX] = v; } - void setShearY(const F32 v) { mShear.mV[VY] = v; } - - void setTwistBegin(const F32 twist_begin) { mTwistBegin = twist_begin; } - void setTwistEnd(const F32 twist_end) { mTwistEnd = twist_end; } - void setTwist(const F32 twist) { setTwistEnd(twist); } // deprecated - void setRadiusOffset(const F32 radius_offset){ mRadiusOffset = radius_offset; } - void setTaper(const F32 x, const F32 y) { mTaper.setVec(x,y); } - void setTaperX(const F32 v) { mTaper.mV[VX] = v; } - void setTaperY(const F32 v) { mTaper.mV[VY] = v; } - void setRevolutions(const F32 revolutions) { mRevolutions = revolutions; } - void setSkew(const F32 skew) { mSkew = skew; } - - friend std::ostream& operator<<(std::ostream &s, const LLPathParams &path_params); - -protected: - // Path params - U8 mCurveType; - F32 mBegin; - F32 mEnd; - LLVector2 mScale; - LLVector2 mShear; - - F32 mTwistBegin; - F32 mTwistEnd; - F32 mRadiusOffset; - LLVector2 mTaper; - F32 mRevolutions; - F32 mSkew; - - U32 mCRC; -}; - -inline bool LLPathParams::operator==(const LLPathParams ¶ms) const -{ - return - (getCurveType() == params.getCurveType()) && - (getScale() == params.getScale()) && - (getBegin() == params.getBegin()) && - (getEnd() == params.getEnd()) && - (getShear() == params.getShear()) && - (getTwist() == params.getTwist()) && - (getTwistBegin() == params.getTwistBegin()) && - (getRadiusOffset() == params.getRadiusOffset()) && - (getTaper() == params.getTaper()) && - (getRevolutions() == params.getRevolutions()) && - (getSkew() == params.getSkew()); -} - -inline bool LLPathParams::operator!=(const LLPathParams ¶ms) const -{ - return - (getCurveType() != params.getCurveType()) || - (getScale() != params.getScale()) || - (getBegin() != params.getBegin()) || - (getEnd() != params.getEnd()) || - (getShear() != params.getShear()) || - (getTwist() != params.getTwist()) || - (getTwistBegin() !=params.getTwistBegin()) || - (getRadiusOffset() != params.getRadiusOffset()) || - (getTaper() != params.getTaper()) || - (getRevolutions() != params.getRevolutions()) || - (getSkew() != params.getSkew()); -} - - -inline bool LLPathParams::operator<(const LLPathParams ¶ms) const -{ - if( getCurveType() != params.getCurveType()) - { - return getCurveType() < params.getCurveType(); - } - else - if( getScale() != params.getScale()) - { - return getScale() < params.getScale(); - } - else - if( getBegin() != params.getBegin()) - { - return getBegin() < params.getBegin(); - } - else - if( getEnd() != params.getEnd()) - { - return getEnd() < params.getEnd(); - } - else - if( getShear() != params.getShear()) - { - return getShear() < params.getShear(); - } - else - if( getTwist() != params.getTwist()) - { - return getTwist() < params.getTwist(); - } - else - if( getTwistBegin() != params.getTwistBegin()) - { - return getTwistBegin() < params.getTwistBegin(); - } - else - if( getRadiusOffset() != params.getRadiusOffset()) - { - return getRadiusOffset() < params.getRadiusOffset(); - } - else - if( getTaper() != params.getTaper()) - { - return getTaper() < params.getTaper(); - } - else - if( getRevolutions() != params.getRevolutions()) - { - return getRevolutions() < params.getRevolutions(); - } - else - { - return getSkew() < params.getSkew(); - } -} - -typedef LLVolumeParams* LLVolumeParamsPtr; -typedef const LLVolumeParams* const_LLVolumeParamsPtr; - -class LLVolumeParams -{ -public: - LLVolumeParams() - : mSculptType(LL_SCULPT_TYPE_NONE) - { - } - - LLVolumeParams(LLProfileParams &profile, LLPathParams &path, - LLUUID sculpt_id = LLUUID::null, U8 sculpt_type = LL_SCULPT_TYPE_NONE) - : mProfileParams(profile), mPathParams(path), mSculptID(sculpt_id), mSculptType(sculpt_type) - { - } - - bool operator==(const LLVolumeParams ¶ms) const; - bool operator!=(const LLVolumeParams ¶ms) const; - bool operator<(const LLVolumeParams ¶ms) const; - - - void copyParams(const LLVolumeParams ¶ms); - - const LLProfileParams &getProfileParams() const {return mProfileParams;} - LLProfileParams &getProfileParams() {return mProfileParams;} - const LLPathParams &getPathParams() const {return mPathParams;} - LLPathParams &getPathParams() {return mPathParams;} - - BOOL importFile(LLFILE *fp); - BOOL exportFile(LLFILE *fp) const; - - BOOL importLegacyStream(std::istream& input_stream); - BOOL exportLegacyStream(std::ostream& output_stream) const; - - LLSD sculptAsLLSD() const; - bool sculptFromLLSD(LLSD& sd); - - LLSD asLLSD() const; - operator LLSD() const { return asLLSD(); } - bool fromLLSD(LLSD& sd); - - bool setType(U8 profile, U8 path); - - //void setBeginS(const F32 beginS) { mProfileParams.setBegin(beginS); } // range 0 to 1 - //void setBeginT(const F32 beginT) { mPathParams.setBegin(beginT); } // range 0 to 1 - //void setEndS(const F32 endS) { mProfileParams.setEnd(endS); } // range 0 to 1, must be greater than begin - //void setEndT(const F32 endT) { mPathParams.setEnd(endT); } // range 0 to 1, must be greater than begin - - bool setBeginAndEndS(const F32 begin, const F32 end); // both range from 0 to 1, begin must be less than end - bool setBeginAndEndT(const F32 begin, const F32 end); // both range from 0 to 1, begin must be less than end - - bool setHollow(const F32 hollow); // range 0 to 1 - bool setRatio(const F32 x) { return setRatio(x,x); } // 0 = point, 1 = same as base - bool setShear(const F32 x) { return setShear(x,x); } // 0 = no movement, - bool setRatio(const F32 x, const F32 y); // 0 = point, 1 = same as base - bool setShear(const F32 x, const F32 y); // 0 = no movement - - bool setTwistBegin(const F32 twist_begin); // range -1 to 1 - bool setTwistEnd(const F32 twist_end); // range -1 to 1 - bool setTwist(const F32 twist) { return setTwistEnd(twist); } // deprecated - bool setTaper(const F32 x, const F32 y) { bool pass_x = setTaperX(x); bool pass_y = setTaperY(y); return pass_x && pass_y; } - bool setTaperX(const F32 v); // -1 to 1 - bool setTaperY(const F32 v); // -1 to 1 - bool setRevolutions(const F32 revolutions); // 1 to 4 - bool setRadiusOffset(const F32 radius_offset); - bool setSkew(const F32 skew); - bool setSculptID(const LLUUID sculpt_id, U8 sculpt_type); - - static bool validate(U8 prof_curve, F32 prof_begin, F32 prof_end, F32 hollow, - U8 path_curve, F32 path_begin, F32 path_end, - F32 scx, F32 scy, F32 shx, F32 shy, - F32 twistend, F32 twistbegin, F32 radiusoffset, - F32 tx, F32 ty, F32 revolutions, F32 skew); - - const F32& getBeginS() const { return mProfileParams.getBegin(); } - const F32& getBeginT() const { return mPathParams.getBegin(); } - const F32& getEndS() const { return mProfileParams.getEnd(); } - const F32& getEndT() const { return mPathParams.getEnd(); } - - const F32& getHollow() const { return mProfileParams.getHollow(); } - const F32& getTwist() const { return mPathParams.getTwist(); } - const F32& getRatio() const { return mPathParams.getScaleX(); } - const F32& getRatioX() const { return mPathParams.getScaleX(); } - const F32& getRatioY() const { return mPathParams.getScaleY(); } - const F32& getShearX() const { return mPathParams.getShearX(); } - const F32& getShearY() const { return mPathParams.getShearY(); } - - const F32& getTwistBegin()const { return mPathParams.getTwistBegin(); } - const F32& getRadiusOffset() const { return mPathParams.getRadiusOffset(); } - const F32& getTaper() const { return mPathParams.getTaperX(); } - const F32& getTaperX() const { return mPathParams.getTaperX(); } - const F32& getTaperY() const { return mPathParams.getTaperY(); } - const F32& getRevolutions() const { return mPathParams.getRevolutions(); } - const F32& getSkew() const { return mPathParams.getSkew(); } - const LLUUID& getSculptID() const { return mSculptID; } - const U8& getSculptType() const { return mSculptType; } - bool isSculpt() const; - bool isMeshSculpt() const; - BOOL isConvex() const; - - // 'begin' and 'end' should be in range [0, 1] (they will be clamped) - // (begin, end) = (0, 1) will not change the volume - // (begin, end) = (0, 0.5) will reduce the volume to the first half of its profile/path (S/T) - void reduceS(F32 begin, F32 end); - void reduceT(F32 begin, F32 end); - - struct compare - { - bool operator()( const const_LLVolumeParamsPtr& first, const const_LLVolumeParamsPtr& second) const - { - return (*first < *second); - } - }; - - friend std::ostream& operator<<(std::ostream &s, const LLVolumeParams &volume_params); - - // debug helper functions - void setCube(); - -protected: - LLProfileParams mProfileParams; - LLPathParams mPathParams; - LLUUID mSculptID; - U8 mSculptType; -}; - - -class LLProfile -{ -public: - LLProfile() - : mOpen(FALSE), - mConcave(FALSE), - mDirty(TRUE), - mTotalOut(0), - mTotal(2) - { - } - - ~LLProfile(); - - S32 getTotal() const { return mTotal; } - S32 getTotalOut() const { return mTotalOut; } // Total number of outside points - BOOL isFlat(S32 face) const { return (mFaces[face].mCount == 2); } - BOOL isOpen() const { return mOpen; } - void setDirty() { mDirty = TRUE; } - BOOL generate(const LLProfileParams& params, BOOL path_open, F32 detail = 1.0f, S32 split = 0, - BOOL is_sculpted = FALSE, S32 sculpt_size = 0); - BOOL isConcave() const { return mConcave; } -public: - struct Face - { - S32 mIndex; - S32 mCount; - F32 mScaleU; - BOOL mCap; - BOOL mFlat; - LLFaceID mFaceID; - }; - - std::vector mProfile; - std::vector mNormals; - std::vector mFaces; - std::vector mEdgeNormals; - std::vector mEdgeCenters; - - friend std::ostream& operator<<(std::ostream &s, const LLProfile &profile); - -protected: - void genNormals(const LLProfileParams& params); - void genNGon(const LLProfileParams& params, S32 sides, F32 offset=0.0f, F32 bevel = 0.0f, F32 ang_scale = 1.f, S32 split = 0); - - Face* addHole(const LLProfileParams& params, BOOL flat, F32 sides, F32 offset, F32 box_hollow, F32 ang_scale, S32 split = 0); - Face* addCap (S16 faceID); - Face* addFace(S32 index, S32 count, F32 scaleU, S16 faceID, BOOL flat); - -protected: - BOOL mOpen; - BOOL mConcave; - BOOL mDirty; - - S32 mTotalOut; - S32 mTotal; -}; - -//------------------------------------------------------------------- -// SWEEP/EXTRUDE PATHS -//------------------------------------------------------------------- - -class LLPath -{ -public: - struct PathPt - { - LLVector3 mPos; - LLVector2 mScale; - LLQuaternion mRot; - F32 mTexT; - PathPt() { mPos.setVec(0,0,0); mTexT = 0; mScale.setVec(0,0); mRot.loadIdentity(); } - }; - -public: - LLPath() - : mOpen(FALSE), - mTotal(0), - mDirty(TRUE), - mStep(1) - { - } - - virtual ~LLPath(); - - void genNGon(const LLPathParams& params, S32 sides, F32 offset=0.0f, F32 end_scale = 1.f, F32 twist_scale = 1.f); - virtual BOOL generate(const LLPathParams& params, F32 detail=1.0f, S32 split = 0, - BOOL is_sculpted = FALSE, S32 sculpt_size = 0); - - BOOL isOpen() const { return mOpen; } - F32 getStep() const { return mStep; } - void setDirty() { mDirty = TRUE; } - - S32 getPathLength() const { return (S32)mPath.size(); } - - void resizePath(S32 length) { mPath.resize(length); } - - friend std::ostream& operator<<(std::ostream &s, const LLPath &path); - -public: - std::vector mPath; - -protected: - BOOL mOpen; - S32 mTotal; - BOOL mDirty; - F32 mStep; -}; - -class LLDynamicPath : public LLPath -{ -public: - LLDynamicPath() : LLPath() { } - /*virtual*/ BOOL generate(const LLPathParams& params, F32 detail=1.0f, S32 split = 0, - BOOL is_sculpted = FALSE, S32 sculpt_size = 0); -}; - -// Yet another "face" class - caches volume-specific, but not instance-specific data for faces) -class LLVolumeFace -{ -public: - class VertexData - { - enum - { - POSITION = 0, - NORMAL = 1 - }; - - private: - void init(); - public: - VertexData(); - VertexData(const VertexData& rhs); - const VertexData& operator=(const VertexData& rhs); - - ~VertexData(); - LLVector4a& getPosition(); - LLVector4a& getNormal(); - const LLVector4a& getPosition() const; - const LLVector4a& getNormal() const; - void setPosition(const LLVector4a& pos); - void setNormal(const LLVector4a& norm); - - - LLVector2 mTexCoord; - - bool operator<(const VertexData& rhs) const; - bool operator==(const VertexData& rhs) const; - bool compareNormal(const VertexData& rhs, F32 angle_cutoff) const; - - private: - LLVector4a* mData; - }; - - LLVolumeFace(); - LLVolumeFace(const LLVolumeFace& src); - LLVolumeFace& operator=(const LLVolumeFace& rhs); - - ~LLVolumeFace(); -private: - void freeData(); -public: - - BOOL create(LLVolume* volume, BOOL partial_build = FALSE); - void createBinormals(); - - void appendFace(const LLVolumeFace& face, LLMatrix4& transform, LLMatrix4& normal_tranform); - - void resizeVertices(S32 num_verts); - void allocateBinormals(S32 num_verts); - void allocateWeights(S32 num_verts); - void resizeIndices(S32 num_indices); - void fillFromLegacyData(std::vector& v, std::vector& idx); - - void pushVertex(const VertexData& cv); - void pushVertex(const LLVector4a& pos, const LLVector4a& norm, const LLVector2& tc); - void pushIndex(const U16& idx); - - void swapData(LLVolumeFace& rhs); - - void getVertexData(U16 indx, LLVolumeFace::VertexData& cv); - - class VertexMapData : public LLVolumeFace::VertexData - { - public: - U16 mIndex; - - bool operator==(const LLVolumeFace::VertexData& rhs) const; - - struct ComparePosition - { - bool operator()(const LLVector3& a, const LLVector3& b) const; - }; - - typedef std::map, VertexMapData::ComparePosition > PointMap; - }; - - void optimize(F32 angle_cutoff = 2.f); - void cacheOptimize(); - - void createOctree(F32 scaler = 0.25f, const LLVector4a& center = LLVector4a(0,0,0), const LLVector4a& size = LLVector4a(0.5f,0.5f,0.5f)); - - enum - { - SINGLE_MASK = 0x0001, - CAP_MASK = 0x0002, - END_MASK = 0x0004, - SIDE_MASK = 0x0008, - INNER_MASK = 0x0010, - OUTER_MASK = 0x0020, - HOLLOW_MASK = 0x0040, - OPEN_MASK = 0x0080, - FLAT_MASK = 0x0100, - TOP_MASK = 0x0200, - BOTTOM_MASK = 0x0400 - }; - -public: - S32 mID; - U32 mTypeMask; - - // Only used for INNER/OUTER faces - S32 mBeginS; - S32 mBeginT; - S32 mNumS; - S32 mNumT; - - LLVector4a* mExtents; //minimum and maximum point of face - LLVector4a* mCenter; - LLVector2 mTexCoordExtents[2]; //minimum and maximum of texture coordinates of the face. - - S32 mNumVertices; - S32 mNumIndices; - - LLVector4a* mPositions; - LLVector4a* mNormals; - LLVector4a* mBinormals; - LLVector2* mTexCoords; - U16* mIndices; - - std::vector mEdge; - - //list of skin weights for rigged volumes - // format is mWeights[vertex_index].mV[influence] = . - // mWeights.size() should be empty or match mVertices.size() - LLVector4a* mWeights; - - LLOctreeNode* mOctree; - -private: - BOOL createUnCutCubeCap(LLVolume* volume, BOOL partial_build = FALSE); - BOOL createCap(LLVolume* volume, BOOL partial_build = FALSE); - BOOL createSide(LLVolume* volume, BOOL partial_build = FALSE); -}; - -class LLVolume : public LLRefCount -{ - friend class LLVolumeLODGroup; - -protected: - ~LLVolume(); // use unref - -public: - struct Point - { - LLVector3 mPos; - }; - - struct FaceParams - { - LLFaceID mFaceID; - S32 mBeginS; - S32 mCountS; - S32 mBeginT; - S32 mCountT; - }; - - LLVolume(const LLVolumeParams ¶ms, const F32 detail, const BOOL generate_single_face = FALSE, const BOOL is_unique = FALSE); - - U8 getProfileType() const { return mParams.getProfileParams().getCurveType(); } - U8 getPathType() const { return mParams.getPathParams().getCurveType(); } - S32 getNumFaces() const; - S32 getNumVolumeFaces() const { return mVolumeFaces.size(); } - F32 getDetail() const { return mDetail; } - const LLVolumeParams& getParams() const { return mParams; } - LLVolumeParams getCopyOfParams() const { return mParams; } - const LLProfile& getProfile() const { return *mProfilep; } - LLPath& getPath() const { return *mPathp; } - void resizePath(S32 length); - const std::vector& getMesh() const { return mMesh; } - const LLVector3& getMeshPt(const U32 i) const { return mMesh[i].mPos; } - - void setDirty() { mPathp->setDirty(); mProfilep->setDirty(); } - - void regen(); - void genBinormals(S32 face); - - BOOL isConvex() const; - BOOL isCap(S32 face); - BOOL isFlat(S32 face); - BOOL isUnique() const { return mUnique; } - - S32 getSculptLevel() const { return mSculptLevel; } - void setSculptLevel(S32 level) { mSculptLevel = level; } - - S32 *getTriangleIndices(U32 &num_indices) const; - - // returns number of triangle indeces required for path/profile mesh - S32 getNumTriangleIndices() const; - - S32 getNumTriangles() const; - - void generateSilhouetteVertices(std::vector &vertices, - std::vector &normals, - std::vector &segments, - const LLVector3& view_vec, - const LLMatrix4& mat, - const LLMatrix3& norm_mat, - S32 face_index); - - //get the face index of the face that intersects with the given line segment at the point - //closest to start. Moves end to the point of intersection. Returns -1 if no intersection. - //Line segment must be in volume space. - S32 lineSegmentIntersect(const LLVector3& start, const LLVector3& end, - S32 face = -1, // which face to check, -1 = ALL_SIDES - LLVector3* intersection = NULL, // return the intersection point - LLVector2* tex_coord = NULL, // return the texture coordinates of the intersection point - LLVector3* normal = NULL, // return the surface normal at the intersection point - LLVector3* bi_normal = NULL // return the surface bi-normal at the intersection point - ); - - S32 lineSegmentIntersect(const LLVector4a& start, const LLVector4a& end, - S32 face = 1, - LLVector3* intersection = NULL, - LLVector2* tex_coord = NULL, - LLVector3* normal = NULL, - LLVector3* bi_normal = NULL); - - // The following cleans up vertices and triangles, - // getting rid of degenerate triangles and duplicate vertices, - // and allocates new arrays with the clean data. - static BOOL cleanupTriangleData( const S32 num_input_vertices, - const std::vector &input_vertices, - const S32 num_input_triangles, - S32 *input_triangles, - S32 &num_output_vertices, - LLVector3 **output_vertices, - S32 &num_output_triangles, - S32 **output_triangles); - LLFaceID generateFaceMask(); - - BOOL isFaceMaskValid(LLFaceID face_mask); - static S32 sNumMeshPoints; - - friend std::ostream& operator<<(std::ostream &s, const LLVolume &volume); - friend std::ostream& operator<<(std::ostream &s, const LLVolume *volumep); // HACK to bypass Windoze confusion over - // conversion if *(LLVolume*) to LLVolume& - const LLVolumeFace &getVolumeFace(const S32 f) const {return mVolumeFaces[f];} // DO NOT DELETE VOLUME WHILE USING THIS REFERENCE, OR HOLD A POINTER TO THIS VOLUMEFACE - - U32 mFaceMask; // bit array of which faces exist in this volume - LLVector3 mLODScaleBias; // vector for biasing LOD based on scale - - void sculpt(U16 sculpt_width, U16 sculpt_height, S8 sculpt_components, const U8* sculpt_data, S32 sculpt_level); - void copyVolumeFaces(const LLVolume* volume); - void cacheOptimize(); - -private: - void sculptGenerateMapVertices(U16 sculpt_width, U16 sculpt_height, S8 sculpt_components, const U8* sculpt_data, U8 sculpt_type); - F32 sculptGetSurfaceArea(); - void sculptGeneratePlaceholder(); - void sculptCalcMeshResolution(U16 width, U16 height, U8 type, S32& s, S32& t); - - -protected: - BOOL generate(); - void createVolumeFaces(); -public: - virtual BOOL createVolumeFacesFromFile(const std::string& file_name); - virtual BOOL createVolumeFacesFromStream(std::istream& is); - virtual bool unpackVolumeFaces(std::istream& is, S32 size); - - virtual void makeTetrahedron(); - virtual BOOL isTetrahedron(); - - protected: - BOOL mUnique; - F32 mDetail; - S32 mSculptLevel; - BOOL mIsTetrahedron; - - LLVolumeParams mParams; - LLPath *mPathp; - LLProfile *mProfilep; - std::vector mMesh; - - BOOL mGenerateSingleFace; - typedef std::vector face_list_t; - face_list_t mVolumeFaces; - -public: - LLVector4a* mHullPoints; - U16* mHullIndices; - S32 mNumHullPoints; - S32 mNumHullIndices; -}; - -std::ostream& operator<<(std::ostream &s, const LLVolumeParams &volume_params); - -void calc_binormal_from_triangle( - LLVector4a& binormal, - const LLVector4a& pos0, - const LLVector2& tex0, - const LLVector4a& pos1, - const LLVector2& tex1, - const LLVector4a& pos2, - const LLVector2& tex2); - -BOOL LLLineSegmentBoxIntersect(const F32* start, const F32* end, const F32* center, const F32* size); -BOOL LLLineSegmentBoxIntersect(const LLVector3& start, const LLVector3& end, const LLVector3& center, const LLVector3& size); -BOOL LLLineSegmentBoxIntersect(const LLVector4a& start, const LLVector4a& end, const LLVector4a& center, const LLVector4a& size); - -BOOL LLTriangleRayIntersect(const LLVector3& vert0, const LLVector3& vert1, const LLVector3& vert2, const LLVector3& orig, const LLVector3& dir, - F32& intersection_a, F32& intersection_b, F32& intersection_t, BOOL two_sided); - -BOOL LLTriangleRayIntersect(const LLVector4a& vert0, const LLVector4a& vert1, const LLVector4a& vert2, const LLVector4a& orig, const LLVector4a& dir, - F32& intersection_a, F32& intersection_b, F32& intersection_t); -BOOL LLTriangleRayIntersectTwoSided(const LLVector4a& vert0, const LLVector4a& vert1, const LLVector4a& vert2, const LLVector4a& orig, const LLVector4a& dir, - F32& intersection_a, F32& intersection_b, F32& intersection_t); - - - -#endif +/** + * @file llvolume.h + * @brief LLVolume base class. + * + * $LicenseInfo:firstyear=2002&license=viewerlgpl$ + * Second Life Viewer Source Code + * Copyright (C) 2010, Linden Research, Inc. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; + * version 2.1 of the License only. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA + * $/LicenseInfo$ + */ + +#ifndef LL_LLVOLUME_H +#define LL_LLVOLUME_H + +#include + +class LLProfileParams; +class LLPathParams; +class LLVolumeParams; +class LLProfile; +class LLPath; + +template class LLOctreeNode; + +class LLVector4a; +class LLVolumeFace; +class LLVolume; +class LLVolumeTriangle; + +#include "lldarray.h" +#include "lluuid.h" +#include "v4color.h" +//#include "vmath.h" +#include "v2math.h" +#include "v3math.h" +#include "v3dmath.h" +#include "v4math.h" +#include "llquaternion.h" +#include "llstrider.h" +#include "v4coloru.h" +#include "llrefcount.h" +#include "llfile.h" + +//============================================================================ + +const S32 MIN_DETAIL_FACES = 6; +const S32 MIN_LOD = 0; +const S32 MAX_LOD = 3; + +// These are defined here but are not enforced at this level, +// rather they are here for the convenience of code that uses +// the LLVolume class. +const F32 MIN_VOLUME_PROFILE_WIDTH = 0.05f; +const F32 MIN_VOLUME_PATH_WIDTH = 0.05f; + +const F32 CUT_QUANTA = 0.00002f; +const F32 SCALE_QUANTA = 0.01f; +const F32 SHEAR_QUANTA = 0.01f; +const F32 TAPER_QUANTA = 0.01f; +const F32 REV_QUANTA = 0.015f; +const F32 HOLLOW_QUANTA = 0.00002f; + +const S32 MAX_VOLUME_TRIANGLE_INDICES = 10000; + +//============================================================================ + +// useful masks +const LLPCode LL_PCODE_HOLLOW_MASK = 0x80; // has a thickness +const LLPCode LL_PCODE_SEGMENT_MASK = 0x40; // segments (1 angle) +const LLPCode LL_PCODE_PATCH_MASK = 0x20; // segmented segments (2 angles) +const LLPCode LL_PCODE_HEMI_MASK = 0x10; // half-primitives get their own type per PR's dictum +const LLPCode LL_PCODE_BASE_MASK = 0x0F; + + // primitive shapes +const LLPCode LL_PCODE_CUBE = 1; +const LLPCode LL_PCODE_PRISM = 2; +const LLPCode LL_PCODE_TETRAHEDRON = 3; +const LLPCode LL_PCODE_PYRAMID = 4; +const LLPCode LL_PCODE_CYLINDER = 5; +const LLPCode LL_PCODE_CONE = 6; +const LLPCode LL_PCODE_SPHERE = 7; +const LLPCode LL_PCODE_TORUS = 8; +const LLPCode LL_PCODE_VOLUME = 9; + + // surfaces +//const LLPCode LL_PCODE_SURFACE_TRIANGLE = 10; +//const LLPCode LL_PCODE_SURFACE_SQUARE = 11; +//const LLPCode LL_PCODE_SURFACE_DISC = 12; + +const LLPCode LL_PCODE_APP = 14; // App specific pcode (for viewer/sim side only objects) +const LLPCode LL_PCODE_LEGACY = 15; + +// Pcodes for legacy objects +//const LLPCode LL_PCODE_LEGACY_ATOR = 0x10 | LL_PCODE_LEGACY; // ATOR +const LLPCode LL_PCODE_LEGACY_AVATAR = 0x20 | LL_PCODE_LEGACY; // PLAYER +//const LLPCode LL_PCODE_LEGACY_BIRD = 0x30 | LL_PCODE_LEGACY; // BIRD +//const LLPCode LL_PCODE_LEGACY_DEMON = 0x40 | LL_PCODE_LEGACY; // DEMON +const LLPCode LL_PCODE_LEGACY_GRASS = 0x50 | LL_PCODE_LEGACY; // GRASS +const LLPCode LL_PCODE_TREE_NEW = 0x60 | LL_PCODE_LEGACY; // new trees +//const LLPCode LL_PCODE_LEGACY_ORACLE = 0x70 | LL_PCODE_LEGACY; // ORACLE +const LLPCode LL_PCODE_LEGACY_PART_SYS = 0x80 | LL_PCODE_LEGACY; // PART_SYS +const LLPCode LL_PCODE_LEGACY_ROCK = 0x90 | LL_PCODE_LEGACY; // ROCK +//const LLPCode LL_PCODE_LEGACY_SHOT = 0xA0 | LL_PCODE_LEGACY; // BASIC_SHOT +//const LLPCode LL_PCODE_LEGACY_SHOT_BIG = 0xB0 | LL_PCODE_LEGACY; +//const LLPCode LL_PCODE_LEGACY_SMOKE = 0xC0 | LL_PCODE_LEGACY; // SMOKE +//const LLPCode LL_PCODE_LEGACY_SPARK = 0xD0 | LL_PCODE_LEGACY;// SPARK +const LLPCode LL_PCODE_LEGACY_TEXT_BUBBLE = 0xE0 | LL_PCODE_LEGACY; // TEXTBUBBLE +const LLPCode LL_PCODE_LEGACY_TREE = 0xF0 | LL_PCODE_LEGACY; // TREE + + // hemis +const LLPCode LL_PCODE_CYLINDER_HEMI = LL_PCODE_CYLINDER | LL_PCODE_HEMI_MASK; +const LLPCode LL_PCODE_CONE_HEMI = LL_PCODE_CONE | LL_PCODE_HEMI_MASK; +const LLPCode LL_PCODE_SPHERE_HEMI = LL_PCODE_SPHERE | LL_PCODE_HEMI_MASK; +const LLPCode LL_PCODE_TORUS_HEMI = LL_PCODE_TORUS | LL_PCODE_HEMI_MASK; + + +// Volumes consist of a profile at the base that is swept around +// a path to make a volume. +// The profile code +const U8 LL_PCODE_PROFILE_MASK = 0x0f; +const U8 LL_PCODE_PROFILE_MIN = 0x00; +const U8 LL_PCODE_PROFILE_CIRCLE = 0x00; +const U8 LL_PCODE_PROFILE_SQUARE = 0x01; +const U8 LL_PCODE_PROFILE_ISOTRI = 0x02; +const U8 LL_PCODE_PROFILE_EQUALTRI = 0x03; +const U8 LL_PCODE_PROFILE_RIGHTTRI = 0x04; +const U8 LL_PCODE_PROFILE_CIRCLE_HALF = 0x05; +const U8 LL_PCODE_PROFILE_MAX = 0x05; + +// Stored in the profile byte +const U8 LL_PCODE_HOLE_MASK = 0xf0; +const U8 LL_PCODE_HOLE_MIN = 0x00; +const U8 LL_PCODE_HOLE_SAME = 0x00; // same as outside profile +const U8 LL_PCODE_HOLE_CIRCLE = 0x10; +const U8 LL_PCODE_HOLE_SQUARE = 0x20; +const U8 LL_PCODE_HOLE_TRIANGLE = 0x30; +const U8 LL_PCODE_HOLE_MAX = 0x03; // min/max needs to be >> 4 of real min/max + +const U8 LL_PCODE_PATH_IGNORE = 0x00; +const U8 LL_PCODE_PATH_MIN = 0x01; // min/max needs to be >> 4 of real min/max +const U8 LL_PCODE_PATH_LINE = 0x10; +const U8 LL_PCODE_PATH_CIRCLE = 0x20; +const U8 LL_PCODE_PATH_CIRCLE2 = 0x30; +const U8 LL_PCODE_PATH_TEST = 0x40; +const U8 LL_PCODE_PATH_FLEXIBLE = 0x80; +const U8 LL_PCODE_PATH_MAX = 0x08; + +//============================================================================ + +// face identifiers +typedef U16 LLFaceID; + +const LLFaceID LL_FACE_PATH_BEGIN = 0x1 << 0; +const LLFaceID LL_FACE_PATH_END = 0x1 << 1; +const LLFaceID LL_FACE_INNER_SIDE = 0x1 << 2; +const LLFaceID LL_FACE_PROFILE_BEGIN = 0x1 << 3; +const LLFaceID LL_FACE_PROFILE_END = 0x1 << 4; +const LLFaceID LL_FACE_OUTER_SIDE_0 = 0x1 << 5; +const LLFaceID LL_FACE_OUTER_SIDE_1 = 0x1 << 6; +const LLFaceID LL_FACE_OUTER_SIDE_2 = 0x1 << 7; +const LLFaceID LL_FACE_OUTER_SIDE_3 = 0x1 << 8; + +//============================================================================ + +// sculpt types + flags + +const U8 LL_SCULPT_TYPE_NONE = 0; +const U8 LL_SCULPT_TYPE_SPHERE = 1; +const U8 LL_SCULPT_TYPE_TORUS = 2; +const U8 LL_SCULPT_TYPE_PLANE = 3; +const U8 LL_SCULPT_TYPE_CYLINDER = 4; +const U8 LL_SCULPT_TYPE_MESH = 5; +const U8 LL_SCULPT_TYPE_MASK = LL_SCULPT_TYPE_SPHERE | LL_SCULPT_TYPE_TORUS | LL_SCULPT_TYPE_PLANE | + LL_SCULPT_TYPE_CYLINDER | LL_SCULPT_TYPE_MESH; + +const U8 LL_SCULPT_FLAG_INVERT = 64; +const U8 LL_SCULPT_FLAG_MIRROR = 128; + +const S32 LL_SCULPT_MESH_MAX_FACES = 8; + +class LLProfileParams +{ +public: + LLProfileParams() + : mCurveType(LL_PCODE_PROFILE_SQUARE), + mBegin(0.f), + mEnd(1.f), + mHollow(0.f), + mCRC(0) + { + } + + LLProfileParams(U8 curve, F32 begin, F32 end, F32 hollow) + : mCurveType(curve), + mBegin(begin), + mEnd(end), + mHollow(hollow), + mCRC(0) + { + } + + LLProfileParams(U8 curve, U16 begin, U16 end, U16 hollow) + { + mCurveType = curve; + F32 temp_f32 = begin * CUT_QUANTA; + if (temp_f32 > 1.f) + { + temp_f32 = 1.f; + } + mBegin = temp_f32; + temp_f32 = end * CUT_QUANTA; + if (temp_f32 > 1.f) + { + temp_f32 = 1.f; + } + mEnd = 1.f - temp_f32; + temp_f32 = hollow * HOLLOW_QUANTA; + if (temp_f32 > 1.f) + { + temp_f32 = 1.f; + } + mHollow = temp_f32; + mCRC = 0; + } + + bool operator==(const LLProfileParams ¶ms) const; + bool operator!=(const LLProfileParams ¶ms) const; + bool operator<(const LLProfileParams ¶ms) const; + + void copyParams(const LLProfileParams ¶ms); + + BOOL importFile(LLFILE *fp); + BOOL exportFile(LLFILE *fp) const; + + BOOL importLegacyStream(std::istream& input_stream); + BOOL exportLegacyStream(std::ostream& output_stream) const; + + LLSD asLLSD() const; + operator LLSD() const { return asLLSD(); } + bool fromLLSD(LLSD& sd); + + const F32& getBegin () const { return mBegin; } + const F32& getEnd () const { return mEnd; } + const F32& getHollow() const { return mHollow; } + const U8& getCurveType () const { return mCurveType; } + + void setCurveType(const U32 type) { mCurveType = type;} + void setBegin(const F32 begin) { mBegin = (begin >= 1.0f) ? 0.0f : ((int) (begin * 100000))/100000.0f;} + void setEnd(const F32 end) { mEnd = (end <= 0.0f) ? 1.0f : ((int) (end * 100000))/100000.0f;} + void setHollow(const F32 hollow) { mHollow = ((int) (hollow * 100000))/100000.0f;} + + friend std::ostream& operator<<(std::ostream &s, const LLProfileParams &profile_params); + +protected: + // Profile params + U8 mCurveType; + F32 mBegin; + F32 mEnd; + F32 mHollow; + + U32 mCRC; +}; + +inline bool LLProfileParams::operator==(const LLProfileParams ¶ms) const +{ + return + (getCurveType() == params.getCurveType()) && + (getBegin() == params.getBegin()) && + (getEnd() == params.getEnd()) && + (getHollow() == params.getHollow()); +} + +inline bool LLProfileParams::operator!=(const LLProfileParams ¶ms) const +{ + return + (getCurveType() != params.getCurveType()) || + (getBegin() != params.getBegin()) || + (getEnd() != params.getEnd()) || + (getHollow() != params.getHollow()); +} + + +inline bool LLProfileParams::operator<(const LLProfileParams ¶ms) const +{ + if (getCurveType() != params.getCurveType()) + { + return getCurveType() < params.getCurveType(); + } + else + if (getBegin() != params.getBegin()) + { + return getBegin() < params.getBegin(); + } + else + if (getEnd() != params.getEnd()) + { + return getEnd() < params.getEnd(); + } + else + { + return getHollow() < params.getHollow(); + } +} + +#define U8_TO_F32(x) (F32)(*((S8 *)&x)) + +class LLPathParams +{ +public: + LLPathParams() + : + mCurveType(LL_PCODE_PATH_LINE), + mBegin(0.f), + mEnd(1.f), + mScale(1.f,1.f), + mShear(0.f,0.f), + mTwistBegin(0.f), + mTwistEnd(0.f), + mRadiusOffset(0.f), + mTaper(0.f,0.f), + mRevolutions(1.f), + mSkew(0.f), + mCRC(0) + { + } + + LLPathParams(U8 curve, F32 begin, F32 end, F32 scx, F32 scy, F32 shx, F32 shy, F32 twistend, F32 twistbegin, F32 radiusoffset, F32 tx, F32 ty, F32 revolutions, F32 skew) + : mCurveType(curve), + mBegin(begin), + mEnd(end), + mScale(scx,scy), + mShear(shx,shy), + mTwistBegin(twistbegin), + mTwistEnd(twistend), + mRadiusOffset(radiusoffset), + mTaper(tx,ty), + mRevolutions(revolutions), + mSkew(skew), + mCRC(0) + { + } + + LLPathParams(U8 curve, U16 begin, U16 end, U8 scx, U8 scy, U8 shx, U8 shy, U8 twistend, U8 twistbegin, U8 radiusoffset, U8 tx, U8 ty, U8 revolutions, U8 skew) + { + mCurveType = curve; + mBegin = (F32)(begin * CUT_QUANTA); + mEnd = (F32)(100.f - end) * CUT_QUANTA; + if (mEnd > 1.f) + mEnd = 1.f; + mScale.setVec((F32) (200 - scx) * SCALE_QUANTA,(F32) (200 - scy) * SCALE_QUANTA); + mShear.setVec(U8_TO_F32(shx) * SHEAR_QUANTA,U8_TO_F32(shy) * SHEAR_QUANTA); + mTwistBegin = U8_TO_F32(twistbegin) * SCALE_QUANTA; + mTwistEnd = U8_TO_F32(twistend) * SCALE_QUANTA; + mRadiusOffset = U8_TO_F32(radiusoffset) * SCALE_QUANTA; + mTaper.setVec(U8_TO_F32(tx) * TAPER_QUANTA,U8_TO_F32(ty) * TAPER_QUANTA); + mRevolutions = ((F32)revolutions) * REV_QUANTA + 1.0f; + mSkew = U8_TO_F32(skew) * SCALE_QUANTA; + + mCRC = 0; + } + + bool operator==(const LLPathParams ¶ms) const; + bool operator!=(const LLPathParams ¶ms) const; + bool operator<(const LLPathParams ¶ms) const; + + void copyParams(const LLPathParams ¶ms); + + BOOL importFile(LLFILE *fp); + BOOL exportFile(LLFILE *fp) const; + + BOOL importLegacyStream(std::istream& input_stream); + BOOL exportLegacyStream(std::ostream& output_stream) const; + + LLSD asLLSD() const; + operator LLSD() const { return asLLSD(); } + bool fromLLSD(LLSD& sd); + + const F32& getBegin() const { return mBegin; } + const F32& getEnd() const { return mEnd; } + const LLVector2 &getScale() const { return mScale; } + const F32& getScaleX() const { return mScale.mV[0]; } + const F32& getScaleY() const { return mScale.mV[1]; } + const LLVector2 getBeginScale() const; + const LLVector2 getEndScale() const; + const LLVector2 &getShear() const { return mShear; } + const F32& getShearX() const { return mShear.mV[0]; } + const F32& getShearY() const { return mShear.mV[1]; } + const U8& getCurveType () const { return mCurveType; } + + const F32& getTwistBegin() const { return mTwistBegin; } + const F32& getTwistEnd() const { return mTwistEnd; } + const F32& getTwist() const { return mTwistEnd; } // deprecated + const F32& getRadiusOffset() const { return mRadiusOffset; } + const LLVector2 &getTaper() const { return mTaper; } + const F32& getTaperX() const { return mTaper.mV[0]; } + const F32& getTaperY() const { return mTaper.mV[1]; } + const F32& getRevolutions() const { return mRevolutions; } + const F32& getSkew() const { return mSkew; } + + void setCurveType(const U8 type) { mCurveType = type; } + void setBegin(const F32 begin) { mBegin = begin; } + void setEnd(const F32 end) { mEnd = end; } + + void setScale(const F32 x, const F32 y) { mScale.setVec(x,y); } + void setScaleX(const F32 v) { mScale.mV[VX] = v; } + void setScaleY(const F32 v) { mScale.mV[VY] = v; } + void setShear(const F32 x, const F32 y) { mShear.setVec(x,y); } + void setShearX(const F32 v) { mShear.mV[VX] = v; } + void setShearY(const F32 v) { mShear.mV[VY] = v; } + + void setTwistBegin(const F32 twist_begin) { mTwistBegin = twist_begin; } + void setTwistEnd(const F32 twist_end) { mTwistEnd = twist_end; } + void setTwist(const F32 twist) { setTwistEnd(twist); } // deprecated + void setRadiusOffset(const F32 radius_offset){ mRadiusOffset = radius_offset; } + void setTaper(const F32 x, const F32 y) { mTaper.setVec(x,y); } + void setTaperX(const F32 v) { mTaper.mV[VX] = v; } + void setTaperY(const F32 v) { mTaper.mV[VY] = v; } + void setRevolutions(const F32 revolutions) { mRevolutions = revolutions; } + void setSkew(const F32 skew) { mSkew = skew; } + + friend std::ostream& operator<<(std::ostream &s, const LLPathParams &path_params); + +protected: + // Path params + U8 mCurveType; + F32 mBegin; + F32 mEnd; + LLVector2 mScale; + LLVector2 mShear; + + F32 mTwistBegin; + F32 mTwistEnd; + F32 mRadiusOffset; + LLVector2 mTaper; + F32 mRevolutions; + F32 mSkew; + + U32 mCRC; +}; + +inline bool LLPathParams::operator==(const LLPathParams ¶ms) const +{ + return + (getCurveType() == params.getCurveType()) && + (getScale() == params.getScale()) && + (getBegin() == params.getBegin()) && + (getEnd() == params.getEnd()) && + (getShear() == params.getShear()) && + (getTwist() == params.getTwist()) && + (getTwistBegin() == params.getTwistBegin()) && + (getRadiusOffset() == params.getRadiusOffset()) && + (getTaper() == params.getTaper()) && + (getRevolutions() == params.getRevolutions()) && + (getSkew() == params.getSkew()); +} + +inline bool LLPathParams::operator!=(const LLPathParams ¶ms) const +{ + return + (getCurveType() != params.getCurveType()) || + (getScale() != params.getScale()) || + (getBegin() != params.getBegin()) || + (getEnd() != params.getEnd()) || + (getShear() != params.getShear()) || + (getTwist() != params.getTwist()) || + (getTwistBegin() !=params.getTwistBegin()) || + (getRadiusOffset() != params.getRadiusOffset()) || + (getTaper() != params.getTaper()) || + (getRevolutions() != params.getRevolutions()) || + (getSkew() != params.getSkew()); +} + + +inline bool LLPathParams::operator<(const LLPathParams ¶ms) const +{ + if( getCurveType() != params.getCurveType()) + { + return getCurveType() < params.getCurveType(); + } + else + if( getScale() != params.getScale()) + { + return getScale() < params.getScale(); + } + else + if( getBegin() != params.getBegin()) + { + return getBegin() < params.getBegin(); + } + else + if( getEnd() != params.getEnd()) + { + return getEnd() < params.getEnd(); + } + else + if( getShear() != params.getShear()) + { + return getShear() < params.getShear(); + } + else + if( getTwist() != params.getTwist()) + { + return getTwist() < params.getTwist(); + } + else + if( getTwistBegin() != params.getTwistBegin()) + { + return getTwistBegin() < params.getTwistBegin(); + } + else + if( getRadiusOffset() != params.getRadiusOffset()) + { + return getRadiusOffset() < params.getRadiusOffset(); + } + else + if( getTaper() != params.getTaper()) + { + return getTaper() < params.getTaper(); + } + else + if( getRevolutions() != params.getRevolutions()) + { + return getRevolutions() < params.getRevolutions(); + } + else + { + return getSkew() < params.getSkew(); + } +} + +typedef LLVolumeParams* LLVolumeParamsPtr; +typedef const LLVolumeParams* const_LLVolumeParamsPtr; + +class LLVolumeParams +{ +public: + LLVolumeParams() + : mSculptType(LL_SCULPT_TYPE_NONE) + { + } + + LLVolumeParams(LLProfileParams &profile, LLPathParams &path, + LLUUID sculpt_id = LLUUID::null, U8 sculpt_type = LL_SCULPT_TYPE_NONE) + : mProfileParams(profile), mPathParams(path), mSculptID(sculpt_id), mSculptType(sculpt_type) + { + } + + bool operator==(const LLVolumeParams ¶ms) const; + bool operator!=(const LLVolumeParams ¶ms) const; + bool operator<(const LLVolumeParams ¶ms) const; + + + void copyParams(const LLVolumeParams ¶ms); + + const LLProfileParams &getProfileParams() const {return mProfileParams;} + LLProfileParams &getProfileParams() {return mProfileParams;} + const LLPathParams &getPathParams() const {return mPathParams;} + LLPathParams &getPathParams() {return mPathParams;} + + BOOL importFile(LLFILE *fp); + BOOL exportFile(LLFILE *fp) const; + + BOOL importLegacyStream(std::istream& input_stream); + BOOL exportLegacyStream(std::ostream& output_stream) const; + + LLSD sculptAsLLSD() const; + bool sculptFromLLSD(LLSD& sd); + + LLSD asLLSD() const; + operator LLSD() const { return asLLSD(); } + bool fromLLSD(LLSD& sd); + + bool setType(U8 profile, U8 path); + + //void setBeginS(const F32 beginS) { mProfileParams.setBegin(beginS); } // range 0 to 1 + //void setBeginT(const F32 beginT) { mPathParams.setBegin(beginT); } // range 0 to 1 + //void setEndS(const F32 endS) { mProfileParams.setEnd(endS); } // range 0 to 1, must be greater than begin + //void setEndT(const F32 endT) { mPathParams.setEnd(endT); } // range 0 to 1, must be greater than begin + + bool setBeginAndEndS(const F32 begin, const F32 end); // both range from 0 to 1, begin must be less than end + bool setBeginAndEndT(const F32 begin, const F32 end); // both range from 0 to 1, begin must be less than end + + bool setHollow(const F32 hollow); // range 0 to 1 + bool setRatio(const F32 x) { return setRatio(x,x); } // 0 = point, 1 = same as base + bool setShear(const F32 x) { return setShear(x,x); } // 0 = no movement, + bool setRatio(const F32 x, const F32 y); // 0 = point, 1 = same as base + bool setShear(const F32 x, const F32 y); // 0 = no movement + + bool setTwistBegin(const F32 twist_begin); // range -1 to 1 + bool setTwistEnd(const F32 twist_end); // range -1 to 1 + bool setTwist(const F32 twist) { return setTwistEnd(twist); } // deprecated + bool setTaper(const F32 x, const F32 y) { bool pass_x = setTaperX(x); bool pass_y = setTaperY(y); return pass_x && pass_y; } + bool setTaperX(const F32 v); // -1 to 1 + bool setTaperY(const F32 v); // -1 to 1 + bool setRevolutions(const F32 revolutions); // 1 to 4 + bool setRadiusOffset(const F32 radius_offset); + bool setSkew(const F32 skew); + bool setSculptID(const LLUUID sculpt_id, U8 sculpt_type); + + static bool validate(U8 prof_curve, F32 prof_begin, F32 prof_end, F32 hollow, + U8 path_curve, F32 path_begin, F32 path_end, + F32 scx, F32 scy, F32 shx, F32 shy, + F32 twistend, F32 twistbegin, F32 radiusoffset, + F32 tx, F32 ty, F32 revolutions, F32 skew); + + const F32& getBeginS() const { return mProfileParams.getBegin(); } + const F32& getBeginT() const { return mPathParams.getBegin(); } + const F32& getEndS() const { return mProfileParams.getEnd(); } + const F32& getEndT() const { return mPathParams.getEnd(); } + + const F32& getHollow() const { return mProfileParams.getHollow(); } + const F32& getTwist() const { return mPathParams.getTwist(); } + const F32& getRatio() const { return mPathParams.getScaleX(); } + const F32& getRatioX() const { return mPathParams.getScaleX(); } + const F32& getRatioY() const { return mPathParams.getScaleY(); } + const F32& getShearX() const { return mPathParams.getShearX(); } + const F32& getShearY() const { return mPathParams.getShearY(); } + + const F32& getTwistBegin()const { return mPathParams.getTwistBegin(); } + const F32& getRadiusOffset() const { return mPathParams.getRadiusOffset(); } + const F32& getTaper() const { return mPathParams.getTaperX(); } + const F32& getTaperX() const { return mPathParams.getTaperX(); } + const F32& getTaperY() const { return mPathParams.getTaperY(); } + const F32& getRevolutions() const { return mPathParams.getRevolutions(); } + const F32& getSkew() const { return mPathParams.getSkew(); } + const LLUUID& getSculptID() const { return mSculptID; } + const U8& getSculptType() const { return mSculptType; } + bool isSculpt() const; + bool isMeshSculpt() const; + BOOL isConvex() const; + + // 'begin' and 'end' should be in range [0, 1] (they will be clamped) + // (begin, end) = (0, 1) will not change the volume + // (begin, end) = (0, 0.5) will reduce the volume to the first half of its profile/path (S/T) + void reduceS(F32 begin, F32 end); + void reduceT(F32 begin, F32 end); + + struct compare + { + bool operator()( const const_LLVolumeParamsPtr& first, const const_LLVolumeParamsPtr& second) const + { + return (*first < *second); + } + }; + + friend std::ostream& operator<<(std::ostream &s, const LLVolumeParams &volume_params); + + // debug helper functions + void setCube(); + +protected: + LLProfileParams mProfileParams; + LLPathParams mPathParams; + LLUUID mSculptID; + U8 mSculptType; +}; + + +class LLProfile +{ +public: + LLProfile() + : mOpen(FALSE), + mConcave(FALSE), + mDirty(TRUE), + mTotalOut(0), + mTotal(2) + { + } + + ~LLProfile(); + + S32 getTotal() const { return mTotal; } + S32 getTotalOut() const { return mTotalOut; } // Total number of outside points + BOOL isFlat(S32 face) const { return (mFaces[face].mCount == 2); } + BOOL isOpen() const { return mOpen; } + void setDirty() { mDirty = TRUE; } + BOOL generate(const LLProfileParams& params, BOOL path_open, F32 detail = 1.0f, S32 split = 0, + BOOL is_sculpted = FALSE, S32 sculpt_size = 0); + BOOL isConcave() const { return mConcave; } +public: + struct Face + { + S32 mIndex; + S32 mCount; + F32 mScaleU; + BOOL mCap; + BOOL mFlat; + LLFaceID mFaceID; + }; + + std::vector mProfile; + std::vector mNormals; + std::vector mFaces; + std::vector mEdgeNormals; + std::vector mEdgeCenters; + + friend std::ostream& operator<<(std::ostream &s, const LLProfile &profile); + +protected: + void genNormals(const LLProfileParams& params); + void genNGon(const LLProfileParams& params, S32 sides, F32 offset=0.0f, F32 bevel = 0.0f, F32 ang_scale = 1.f, S32 split = 0); + + Face* addHole(const LLProfileParams& params, BOOL flat, F32 sides, F32 offset, F32 box_hollow, F32 ang_scale, S32 split = 0); + Face* addCap (S16 faceID); + Face* addFace(S32 index, S32 count, F32 scaleU, S16 faceID, BOOL flat); + +protected: + BOOL mOpen; + BOOL mConcave; + BOOL mDirty; + + S32 mTotalOut; + S32 mTotal; +}; + +//------------------------------------------------------------------- +// SWEEP/EXTRUDE PATHS +//------------------------------------------------------------------- + +class LLPath +{ +public: + struct PathPt + { + LLVector3 mPos; + LLVector2 mScale; + LLQuaternion mRot; + F32 mTexT; + PathPt() { mPos.setVec(0,0,0); mTexT = 0; mScale.setVec(0,0); mRot.loadIdentity(); } + }; + +public: + LLPath() + : mOpen(FALSE), + mTotal(0), + mDirty(TRUE), + mStep(1) + { + } + + virtual ~LLPath(); + + void genNGon(const LLPathParams& params, S32 sides, F32 offset=0.0f, F32 end_scale = 1.f, F32 twist_scale = 1.f); + virtual BOOL generate(const LLPathParams& params, F32 detail=1.0f, S32 split = 0, + BOOL is_sculpted = FALSE, S32 sculpt_size = 0); + + BOOL isOpen() const { return mOpen; } + F32 getStep() const { return mStep; } + void setDirty() { mDirty = TRUE; } + + S32 getPathLength() const { return (S32)mPath.size(); } + + void resizePath(S32 length) { mPath.resize(length); } + + friend std::ostream& operator<<(std::ostream &s, const LLPath &path); + +public: + std::vector mPath; + +protected: + BOOL mOpen; + S32 mTotal; + BOOL mDirty; + F32 mStep; +}; + +class LLDynamicPath : public LLPath +{ +public: + LLDynamicPath() : LLPath() { } + /*virtual*/ BOOL generate(const LLPathParams& params, F32 detail=1.0f, S32 split = 0, + BOOL is_sculpted = FALSE, S32 sculpt_size = 0); +}; + +// Yet another "face" class - caches volume-specific, but not instance-specific data for faces) +class LLVolumeFace +{ +public: + class VertexData + { + enum + { + POSITION = 0, + NORMAL = 1 + }; + + private: + void init(); + public: + VertexData(); + VertexData(const VertexData& rhs); + const VertexData& operator=(const VertexData& rhs); + + ~VertexData(); + LLVector4a& getPosition(); + LLVector4a& getNormal(); + const LLVector4a& getPosition() const; + const LLVector4a& getNormal() const; + void setPosition(const LLVector4a& pos); + void setNormal(const LLVector4a& norm); + + + LLVector2 mTexCoord; + + bool operator<(const VertexData& rhs) const; + bool operator==(const VertexData& rhs) const; + bool compareNormal(const VertexData& rhs, F32 angle_cutoff) const; + + private: + LLVector4a* mData; + }; + + LLVolumeFace(); + LLVolumeFace(const LLVolumeFace& src); + LLVolumeFace& operator=(const LLVolumeFace& rhs); + + ~LLVolumeFace(); +private: + void freeData(); +public: + + BOOL create(LLVolume* volume, BOOL partial_build = FALSE); + void createBinormals(); + + void appendFace(const LLVolumeFace& face, LLMatrix4& transform, LLMatrix4& normal_tranform); + + void resizeVertices(S32 num_verts); + void allocateBinormals(S32 num_verts); + void allocateWeights(S32 num_verts); + void resizeIndices(S32 num_indices); + void fillFromLegacyData(std::vector& v, std::vector& idx); + + void pushVertex(const VertexData& cv); + void pushVertex(const LLVector4a& pos, const LLVector4a& norm, const LLVector2& tc); + void pushIndex(const U16& idx); + + void swapData(LLVolumeFace& rhs); + + void getVertexData(U16 indx, LLVolumeFace::VertexData& cv); + + class VertexMapData : public LLVolumeFace::VertexData + { + public: + U16 mIndex; + + bool operator==(const LLVolumeFace::VertexData& rhs) const; + + struct ComparePosition + { + bool operator()(const LLVector3& a, const LLVector3& b) const; + }; + + typedef std::map, VertexMapData::ComparePosition > PointMap; + }; + + void optimize(F32 angle_cutoff = 2.f); + void cacheOptimize(); + + void createOctree(F32 scaler = 0.25f, const LLVector4a& center = LLVector4a(0,0,0), const LLVector4a& size = LLVector4a(0.5f,0.5f,0.5f)); + + enum + { + SINGLE_MASK = 0x0001, + CAP_MASK = 0x0002, + END_MASK = 0x0004, + SIDE_MASK = 0x0008, + INNER_MASK = 0x0010, + OUTER_MASK = 0x0020, + HOLLOW_MASK = 0x0040, + OPEN_MASK = 0x0080, + FLAT_MASK = 0x0100, + TOP_MASK = 0x0200, + BOTTOM_MASK = 0x0400 + }; + +public: + S32 mID; + U32 mTypeMask; + + // Only used for INNER/OUTER faces + S32 mBeginS; + S32 mBeginT; + S32 mNumS; + S32 mNumT; + + LLVector4a* mExtents; //minimum and maximum point of face + LLVector4a* mCenter; + LLVector2 mTexCoordExtents[2]; //minimum and maximum of texture coordinates of the face. + + S32 mNumVertices; + S32 mNumIndices; + + LLVector4a* mPositions; + LLVector4a* mNormals; + LLVector4a* mBinormals; + LLVector2* mTexCoords; + U16* mIndices; + + std::vector mEdge; + + //list of skin weights for rigged volumes + // format is mWeights[vertex_index].mV[influence] = . + // mWeights.size() should be empty or match mVertices.size() + LLVector4a* mWeights; + + LLOctreeNode* mOctree; + +private: + BOOL createUnCutCubeCap(LLVolume* volume, BOOL partial_build = FALSE); + BOOL createCap(LLVolume* volume, BOOL partial_build = FALSE); + BOOL createSide(LLVolume* volume, BOOL partial_build = FALSE); +}; + +class LLVolume : public LLRefCount +{ + friend class LLVolumeLODGroup; + +protected: + ~LLVolume(); // use unref + +public: + struct Point + { + LLVector3 mPos; + }; + + struct FaceParams + { + LLFaceID mFaceID; + S32 mBeginS; + S32 mCountS; + S32 mBeginT; + S32 mCountT; + }; + + LLVolume(const LLVolumeParams ¶ms, const F32 detail, const BOOL generate_single_face = FALSE, const BOOL is_unique = FALSE); + + U8 getProfileType() const { return mParams.getProfileParams().getCurveType(); } + U8 getPathType() const { return mParams.getPathParams().getCurveType(); } + S32 getNumFaces() const; + S32 getNumVolumeFaces() const { return mVolumeFaces.size(); } + F32 getDetail() const { return mDetail; } + const LLVolumeParams& getParams() const { return mParams; } + LLVolumeParams getCopyOfParams() const { return mParams; } + const LLProfile& getProfile() const { return *mProfilep; } + LLPath& getPath() const { return *mPathp; } + void resizePath(S32 length); + const std::vector& getMesh() const { return mMesh; } + const LLVector3& getMeshPt(const U32 i) const { return mMesh[i].mPos; } + + void setDirty() { mPathp->setDirty(); mProfilep->setDirty(); } + + void regen(); + void genBinormals(S32 face); + + BOOL isConvex() const; + BOOL isCap(S32 face); + BOOL isFlat(S32 face); + BOOL isUnique() const { return mUnique; } + + S32 getSculptLevel() const { return mSculptLevel; } + void setSculptLevel(S32 level) { mSculptLevel = level; } + + S32 *getTriangleIndices(U32 &num_indices) const; + + // returns number of triangle indeces required for path/profile mesh + S32 getNumTriangleIndices() const; + + S32 getNumTriangles() const; + + void generateSilhouetteVertices(std::vector &vertices, + std::vector &normals, + std::vector &segments, + const LLVector3& view_vec, + const LLMatrix4& mat, + const LLMatrix3& norm_mat, + S32 face_index); + + //get the face index of the face that intersects with the given line segment at the point + //closest to start. Moves end to the point of intersection. Returns -1 if no intersection. + //Line segment must be in volume space. + S32 lineSegmentIntersect(const LLVector3& start, const LLVector3& end, + S32 face = -1, // which face to check, -1 = ALL_SIDES + LLVector3* intersection = NULL, // return the intersection point + LLVector2* tex_coord = NULL, // return the texture coordinates of the intersection point + LLVector3* normal = NULL, // return the surface normal at the intersection point + LLVector3* bi_normal = NULL // return the surface bi-normal at the intersection point + ); + + S32 lineSegmentIntersect(const LLVector4a& start, const LLVector4a& end, + S32 face = 1, + LLVector3* intersection = NULL, + LLVector2* tex_coord = NULL, + LLVector3* normal = NULL, + LLVector3* bi_normal = NULL); + + // The following cleans up vertices and triangles, + // getting rid of degenerate triangles and duplicate vertices, + // and allocates new arrays with the clean data. + static BOOL cleanupTriangleData( const S32 num_input_vertices, + const std::vector &input_vertices, + const S32 num_input_triangles, + S32 *input_triangles, + S32 &num_output_vertices, + LLVector3 **output_vertices, + S32 &num_output_triangles, + S32 **output_triangles); + LLFaceID generateFaceMask(); + + BOOL isFaceMaskValid(LLFaceID face_mask); + static S32 sNumMeshPoints; + + friend std::ostream& operator<<(std::ostream &s, const LLVolume &volume); + friend std::ostream& operator<<(std::ostream &s, const LLVolume *volumep); // HACK to bypass Windoze confusion over + // conversion if *(LLVolume*) to LLVolume& + const LLVolumeFace &getVolumeFace(const S32 f) const {return mVolumeFaces[f];} // DO NOT DELETE VOLUME WHILE USING THIS REFERENCE, OR HOLD A POINTER TO THIS VOLUMEFACE + + U32 mFaceMask; // bit array of which faces exist in this volume + LLVector3 mLODScaleBias; // vector for biasing LOD based on scale + + void sculpt(U16 sculpt_width, U16 sculpt_height, S8 sculpt_components, const U8* sculpt_data, S32 sculpt_level); + void copyVolumeFaces(const LLVolume* volume); + void cacheOptimize(); + +private: + void sculptGenerateMapVertices(U16 sculpt_width, U16 sculpt_height, S8 sculpt_components, const U8* sculpt_data, U8 sculpt_type); + F32 sculptGetSurfaceArea(); + void sculptGeneratePlaceholder(); + void sculptCalcMeshResolution(U16 width, U16 height, U8 type, S32& s, S32& t); + + +protected: + BOOL generate(); + void createVolumeFaces(); +public: + virtual BOOL createVolumeFacesFromFile(const std::string& file_name); + virtual BOOL createVolumeFacesFromStream(std::istream& is); + virtual bool unpackVolumeFaces(std::istream& is, S32 size); + + virtual void makeTetrahedron(); + virtual BOOL isTetrahedron(); + + protected: + BOOL mUnique; + F32 mDetail; + S32 mSculptLevel; + BOOL mIsTetrahedron; + + LLVolumeParams mParams; + LLPath *mPathp; + LLProfile *mProfilep; + std::vector mMesh; + + BOOL mGenerateSingleFace; + typedef std::vector face_list_t; + face_list_t mVolumeFaces; + +public: + LLVector4a* mHullPoints; + U16* mHullIndices; + S32 mNumHullPoints; + S32 mNumHullIndices; +}; + +std::ostream& operator<<(std::ostream &s, const LLVolumeParams &volume_params); + +void calc_binormal_from_triangle( + LLVector4a& binormal, + const LLVector4a& pos0, + const LLVector2& tex0, + const LLVector4a& pos1, + const LLVector2& tex1, + const LLVector4a& pos2, + const LLVector2& tex2); + +BOOL LLLineSegmentBoxIntersect(const F32* start, const F32* end, const F32* center, const F32* size); +BOOL LLLineSegmentBoxIntersect(const LLVector3& start, const LLVector3& end, const LLVector3& center, const LLVector3& size); +BOOL LLLineSegmentBoxIntersect(const LLVector4a& start, const LLVector4a& end, const LLVector4a& center, const LLVector4a& size); + +BOOL LLTriangleRayIntersect(const LLVector3& vert0, const LLVector3& vert1, const LLVector3& vert2, const LLVector3& orig, const LLVector3& dir, + F32& intersection_a, F32& intersection_b, F32& intersection_t, BOOL two_sided); + +BOOL LLTriangleRayIntersect(const LLVector4a& vert0, const LLVector4a& vert1, const LLVector4a& vert2, const LLVector4a& orig, const LLVector4a& dir, + F32& intersection_a, F32& intersection_b, F32& intersection_t); +BOOL LLTriangleRayIntersectTwoSided(const LLVector4a& vert0, const LLVector4a& vert1, const LLVector4a& vert2, const LLVector4a& orig, const LLVector4a& dir, + F32& intersection_a, F32& intersection_b, F32& intersection_t); + + + +#endif diff --git a/indra/llmath/llvolumeoctree.h b/indra/llmath/llvolumeoctree.h index f696cbd976..688d91dc40 100644 --- a/indra/llmath/llvolumeoctree.h +++ b/indra/llmath/llvolumeoctree.h @@ -34,6 +34,41 @@ #include "llvolume.h" #include "llvector4a.h" +class LLVolumeTriangle : public LLRefCount +{ +public: + LLVolumeTriangle() + { + + } + + LLVolumeTriangle(const LLVolumeTriangle& rhs) + { + *this = rhs; + } + + const LLVolumeTriangle& operator=(const LLVolumeTriangle& rhs) + { + llerrs << "Illegal operation!" << llendl; + return *this; + } + + ~LLVolumeTriangle() + { + + } + + LLVector4a mPositionGroup; + + const LLVector4a* mV[3]; + U16 mIndex[3]; + + F32 mRadius; + + virtual const LLVector4a& getPositionGroup() const; + virtual const F32& getBinRadius() const; +}; + class LLVolumeOctreeListener : public LLOctreeListener { public: @@ -91,41 +126,6 @@ public: virtual void visit(const LLOctreeNode* node); }; -class LLVolumeTriangle : public LLRefCount -{ -public: - LLVolumeTriangle() - { - - } - - LLVolumeTriangle(const LLVolumeTriangle& rhs) - { - *this = rhs; - } - - const LLVolumeTriangle& operator=(const LLVolumeTriangle& rhs) - { - llerrs << "Illegal operation!" << llendl; - return *this; - } - - ~LLVolumeTriangle() - { - - } - - LLVector4a mPositionGroup; - - const LLVector4a* mV[3]; - U16 mIndex[3]; - - F32 mRadius; - - virtual const LLVector4a& getPositionGroup() const; - virtual const F32& getBinRadius() const; -}; - class LLVolumeOctreeValidate : public LLOctreeTraveler { virtual void visit(const LLOctreeNode* branch); -- cgit v1.2.3 From d08372f71d575ebc3050806660655e1c7c7cff84 Mon Sep 17 00:00:00 2001 From: Xiaohong Bao Date: Tue, 1 Feb 2011 12:43:12 -0700 Subject: merge fix for SH-659 from v-d to mesh: small textures not loaded --- indra/llmath/llvolume.cpp | 40 +++++++++++++++++++++++++++++++++++++--- indra/llmath/llvolume.h | 4 ++-- 2 files changed, 39 insertions(+), 5 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 617a8b4ca3..2f662b757b 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -5393,19 +5393,53 @@ BOOL LLVolumeFace::create(LLVolume* volume, BOOL partial_build) delete mOctree; mOctree = NULL; + BOOL ret = FALSE ; if (mTypeMask & CAP_MASK) { - return createCap(volume, partial_build); + ret = createCap(volume, partial_build); } else if ((mTypeMask & END_MASK) || (mTypeMask & SIDE_MASK)) { - return createSide(volume, partial_build); + ret = createSide(volume, partial_build); } else { llerrs << "Unknown/uninitialized face type!" << llendl; - return FALSE; } + + //update the range of the texture coordinates + if(ret) + { + mTexCoordExtents[0].setVec(1.f, 1.f) ; + mTexCoordExtents[1].setVec(0.f, 0.f) ; + + for(U32 i = 0 ; i < mNumVertices ; i++) + { + if(mTexCoordExtents[0].mV[0] > mTexCoords[i].mV[0]) + { + mTexCoordExtents[0].mV[0] = mTexCoords[i].mV[0] ; + } + if(mTexCoordExtents[1].mV[0] < mTexCoords[i].mV[0]) + { + mTexCoordExtents[1].mV[0] = mTexCoords[i].mV[0] ; + } + + if(mTexCoordExtents[0].mV[1] > mTexCoords[i].mV[1]) + { + mTexCoordExtents[0].mV[1] = mTexCoords[i].mV[1] ; + } + if(mTexCoordExtents[1].mV[1] < mTexCoords[i].mV[1]) + { + mTexCoordExtents[1].mV[1] = mTexCoords[i].mV[1] ; + } + } + mTexCoordExtents[0].mV[0] = llmax(0.f, mTexCoordExtents[0].mV[0]) ; + mTexCoordExtents[0].mV[1] = llmax(0.f, mTexCoordExtents[0].mV[1]) ; + mTexCoordExtents[1].mV[0] = llmin(1.f, mTexCoordExtents[1].mV[0]) ; + mTexCoordExtents[1].mV[1] = llmin(1.f, mTexCoordExtents[1].mV[1]) ; + } + + return ret ; } void LLVolumeFace::getVertexData(U16 index, LLVolumeFace::VertexData& cv) diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index eceaced9e2..5bd65c2bf2 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -901,7 +901,7 @@ public: LLVector4a* mExtents; //minimum and maximum point of face LLVector4a* mCenter; - LLVector2 mTexCoordExtents[2]; //minimum and maximum of texture coordinates of the face. + LLVector2 mTexCoordExtents[2]; //minimum and maximum of texture coordinates of the face. S32 mNumVertices; S32 mNumIndices; @@ -909,7 +909,7 @@ public: LLVector4a* mPositions; LLVector4a* mNormals; LLVector4a* mBinormals; - LLVector2* mTexCoords; + LLVector2* mTexCoords; U16* mIndices; std::vector mEdge; -- cgit v1.2.3 From a242129b571daa8c6137c79931e31f9d43422abc Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Tue, 8 Feb 2011 15:53:50 -0600 Subject: SH-523 Fix for non-finite values in silhouette rendering resulting in silhouette segments pointing at center of screen. --- indra/llmath/llvolume.cpp | 4 ---- indra/llmath/llvolume.h | 1 - 2 files changed, 5 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 2f662b757b..c4be176353 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -4179,7 +4179,6 @@ S32 LLVolume::getNumTriangles() const //----------------------------------------------------------------------------- void LLVolume::generateSilhouetteVertices(std::vector &vertices, std::vector &normals, - std::vector &segments, const LLVector3& obj_cam_vec_in, const LLMatrix4& mat_in, const LLMatrix3& norm_mat_in, @@ -4198,7 +4197,6 @@ void LLVolume::generateSilhouetteVertices(std::vector &vertices, vertices.clear(); normals.clear(); - segments.clear(); if ((mParams.getSculptType() & LL_SCULPT_TYPE_MASK) == LL_SCULPT_TYPE_MESH) { @@ -4399,8 +4397,6 @@ void LLVolume::generateSilhouetteVertices(std::vector &vertices, norm_mat.rotate(n[v2], t); t.normalize3fast(); normals.push_back(LLVector3(t[0], t[1], t[2])); - - segments.push_back(vertices.size()); } } } diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index 5bd65c2bf2..60b64b1285 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -986,7 +986,6 @@ public: void generateSilhouetteVertices(std::vector &vertices, std::vector &normals, - std::vector &segments, const LLVector3& view_vec, const LLMatrix4& mat, const LLMatrix3& norm_mat, -- cgit v1.2.3 From 9e0ee4dff0109326c31425581437a44351d08344 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Thu, 17 Feb 2011 17:18:57 -0600 Subject: SH-1006 Quick pass at cutting down the number of redundant GL calls based on data from gDEBugger. Reviewed by Nyx. --- indra/llmath/m4math.h | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'indra/llmath') diff --git a/indra/llmath/m4math.h b/indra/llmath/m4math.h index 10c88464e5..3588f36758 100644 --- a/indra/llmath/m4math.h +++ b/indra/llmath/m4math.h @@ -132,6 +132,7 @@ public: // various useful matrix functions const LLMatrix4& setIdentity(); // Load identity matrix + bool isIdentity() const; const LLMatrix4& setZero(); // Clears matrix to all zeros. const LLMatrix4& initRotation(const F32 angle, const F32 x, const F32 y, const F32 z); // Calculate rotation matrix by rotating angle radians about (x, y, z) @@ -262,6 +263,30 @@ inline const LLMatrix4& LLMatrix4::setIdentity() return (*this); } +inline bool LLMatrix4::isIdentity() const +{ + return + mMatrix[0][0] == 1.f && + mMatrix[0][1] == 0.f && + mMatrix[0][2] == 0.f && + mMatrix[0][3] == 0.f && + + mMatrix[1][0] == 0.f && + mMatrix[1][1] == 1.f && + mMatrix[1][2] == 0.f && + mMatrix[1][3] == 0.f && + + mMatrix[2][0] == 0.f && + mMatrix[2][1] == 0.f && + mMatrix[2][2] == 1.f && + mMatrix[2][3] == 0.f && + + mMatrix[3][0] == 0.f && + mMatrix[3][1] == 0.f && + mMatrix[3][2] == 0.f && + mMatrix[3][3] == 1.f; +} + /* inline LLMatrix4 operator*(const LLMatrix4 &a, const LLMatrix4 &b) -- cgit v1.2.3 From 51a4867ae6bcf0ec71882ba77b1c995d98834126 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Mon, 21 Mar 2011 17:31:25 -0500 Subject: SH-1168 Export upload data to disk on upload. --- indra/llmath/m4math.cpp | 50 +++++++++++++++++++++++++++++++++++++++++++++++++ indra/llmath/m4math.h | 2 ++ 2 files changed, 52 insertions(+) (limited to 'indra/llmath') diff --git a/indra/llmath/m4math.cpp b/indra/llmath/m4math.cpp index 8741400e52..bad4deb4de 100644 --- a/indra/llmath/m4math.cpp +++ b/indra/llmath/m4math.cpp @@ -829,4 +829,54 @@ std::ostream& operator<<(std::ostream& s, const LLMatrix4 &a) return s; } +LLSD LLMatrix4::getValue() const +{ + LLSD ret; + + ret[0] = mMatrix[0][0]; + ret[1] = mMatrix[0][1]; + ret[2] = mMatrix[0][2]; + ret[3] = mMatrix[0][3]; + + ret[4] = mMatrix[1][0]; + ret[5] = mMatrix[1][1]; + ret[6] = mMatrix[1][2]; + ret[7] = mMatrix[1][3]; + + ret[8] = mMatrix[2][0]; + ret[9] = mMatrix[2][1]; + ret[10] = mMatrix[2][2]; + ret[11] = mMatrix[2][3]; + + ret[12] = mMatrix[3][0]; + ret[13] = mMatrix[3][1]; + ret[14] = mMatrix[3][2]; + ret[15] = mMatrix[3][3]; + + return ret; +} + +void LLMatrix4::setValue(const LLSD& data) +{ + mMatrix[0][0] = data[0].asReal(); + mMatrix[0][1] = data[1].asReal(); + mMatrix[0][2] = data[2].asReal(); + mMatrix[0][3] = data[3].asReal(); + + mMatrix[1][0] = data[4].asReal(); + mMatrix[1][1] = data[5].asReal(); + mMatrix[1][2] = data[6].asReal(); + mMatrix[1][3] = data[7].asReal(); + + mMatrix[2][0] = data[8].asReal(); + mMatrix[2][1] = data[9].asReal(); + mMatrix[2][2] = data[10].asReal(); + mMatrix[2][3] = data[11].asReal(); + + mMatrix[3][0] = data[12].asReal(); + mMatrix[3][1] = data[13].asReal(); + mMatrix[3][2] = data[14].asReal(); + mMatrix[3][3] = data[15].asReal(); +} + diff --git a/indra/llmath/m4math.h b/indra/llmath/m4math.h index 3588f36758..a7dce10397 100644 --- a/indra/llmath/m4math.h +++ b/indra/llmath/m4math.h @@ -119,6 +119,8 @@ public: ~LLMatrix4(void); // Destructor + LLSD getValue() const; + void setValue(const LLSD&); ////////////////////////////// // -- cgit v1.2.3 From 6ff87e2840c585711e2927028a11ba5ce78a192a Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Tue, 22 Mar 2011 17:23:48 -0500 Subject: SH-1169 Import from slm instead of dae when appropriate. --- indra/llmath/llvolume.cpp | 34 +++++++++------------------------- 1 file changed, 9 insertions(+), 25 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index c4be176353..7a2f06da8f 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -2162,7 +2162,7 @@ BOOL LLVolume::createVolumeFacesFromStream(std::istream& is) LLSD header; { - if (!LLSDSerialize::deserialize(header, is, 1024*1024*1024)) + if (!LLSDSerialize::fromBinary(header, is, 1024*1024*1024)) { llwarns << "Mesh header parse error. Not a valid mesh asset!" << llendl; return FALSE; @@ -2174,34 +2174,18 @@ BOOL LLVolume::createVolumeFacesFromStream(std::istream& is) "lowest_lod", "low_lod", "medium_lod", - "high_lod" + "high_lod", + "physics_shape", }; - S32 lod = llclamp((S32) mDetail, 0, 3); + const S32 MODEL_LODS = 5; - while (lod < 4 && - (header[nm[lod]]["offset"].asInteger() == -1 || - header[nm[lod]]["size"].asInteger() == 0 )) - { - ++lod; - } - - if (lod >= 4) - { - lod = llclamp((S32) mDetail, 0, 3); + S32 lod = llclamp((S32) mDetail, 0, MODEL_LODS); - while (lod >= 0 && - (header[nm[lod]]["offset"].asInteger() == -1 || - header[nm[lod]]["size"].asInteger() == 0) ) - { - --lod; - } - - if (lod < 0) - { - llwarns << "Mesh header missing LOD offsets. Not a valid mesh asset!" << llendl; - return FALSE; - } + if (header[nm[lod]]["offset"].asInteger() == -1 || + header[nm[lod]]["size"].asInteger() == 0 ) + { //cannot load requested LOD + return FALSE; } is.seekg(header[nm[lod]]["offset"].asInteger(), std::ios_base::cur); -- cgit v1.2.3 From 1ff79683128f09baf6dbaf081092fda7e5f2fe65 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Mon, 28 Mar 2011 23:50:23 -0500 Subject: SH-1225 Add skinning info to import path of .slm files. --- indra/llmath/llvolume.cpp | 50 ----------------------------------------------- indra/llmath/llvolume.h | 2 -- 2 files changed, 52 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llvolume.cpp b/indra/llmath/llvolume.cpp index 7a2f06da8f..dc360818d6 100644 --- a/indra/llmath/llvolume.cpp +++ b/indra/llmath/llvolume.cpp @@ -2143,56 +2143,6 @@ bool LLVolumeFace::VertexData::compareNormal(const LLVolumeFace::VertexData& rhs return retval; } -BOOL LLVolume::createVolumeFacesFromFile(const std::string& file_name) -{ - std::ifstream is; - - is.open(file_name.c_str(), std::ifstream::in | std::ifstream::binary); - - BOOL success = createVolumeFacesFromStream(is); - - is.close(); - - return success; -} - -BOOL LLVolume::createVolumeFacesFromStream(std::istream& is) -{ - mSculptLevel = -1; // default is an error occured - - LLSD header; - { - if (!LLSDSerialize::fromBinary(header, is, 1024*1024*1024)) - { - llwarns << "Mesh header parse error. Not a valid mesh asset!" << llendl; - return FALSE; - } - } - - std::string nm[] = - { - "lowest_lod", - "low_lod", - "medium_lod", - "high_lod", - "physics_shape", - }; - - const S32 MODEL_LODS = 5; - - S32 lod = llclamp((S32) mDetail, 0, MODEL_LODS); - - if (header[nm[lod]]["offset"].asInteger() == -1 || - header[nm[lod]]["size"].asInteger() == 0 ) - { //cannot load requested LOD - return FALSE; - } - - is.seekg(header[nm[lod]]["offset"].asInteger(), std::ios_base::cur); - - return unpackVolumeFaces(is, header[nm[lod]]["size"].asInteger()); -} - bool LLVolume::unpackVolumeFaces(std::istream& is, S32 size) { //input stream is now pointing at a zlib compressed block of LLSD diff --git a/indra/llmath/llvolume.h b/indra/llmath/llvolume.h index 60b64b1285..01bfbd858b 100644 --- a/indra/llmath/llvolume.h +++ b/indra/llmath/llvolume.h @@ -1048,8 +1048,6 @@ protected: BOOL generate(); void createVolumeFaces(); public: - virtual BOOL createVolumeFacesFromFile(const std::string& file_name); - virtual BOOL createVolumeFacesFromStream(std::istream& is); virtual bool unpackVolumeFaces(std::istream& is, S32 size); virtual void makeTetrahedron(); -- cgit v1.2.3 From f90eb750f1e8c3e300d1a079fe99d15f35201072 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Thu, 7 Apr 2011 00:29:02 -0500 Subject: SH-1292 Remove outliers from graphs in scene console to make a more useful view. --- indra/llmath/llmath.h | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) (limited to 'indra/llmath') diff --git a/indra/llmath/llmath.h b/indra/llmath/llmath.h index 25b29ac1a8..97001b4062 100644 --- a/indra/llmath/llmath.h +++ b/indra/llmath/llmath.h @@ -29,6 +29,7 @@ #include #include +#include #include "lldefs.h" //#include "llstl.h" // *TODO: Remove when LLString is gone //#include "llstring.h" // *TODO: Remove when LLString is gone @@ -497,6 +498,44 @@ inline F32 llgaussian(F32 x, F32 o) return 1.f/(F_SQRT_TWO_PI*o)*powf(F_E, -(x*x)/(2*o*o)); } +//helper function for removing outliers +template +inline void ll_remove_outliers(std::vector& data, F32 k) +{ + if (data.size() < 100) + { //not enough samples + return; + } + + VEC_TYPE Q1 = data[data.size()/4]; + VEC_TYPE Q3 = data[data.size()-data.size()/4-1]; + + VEC_TYPE min = Q1-k*(Q3-Q1); + VEC_TYPE max = Q3+k*(Q3-Q1); + + U32 i = 0; + while (i < data.size() && data[i] < min) + { + i++; + } + + S32 j = data.size()-1; + while (j > 0 && data[j] > max) + { + j--; + } + + if (j < data.size()-1) + { + data.erase(data.begin()+j, data.end()); + } + + if (i > 0) + { + data.erase(data.begin(), data.begin()+i); + } +} + // Include simd math header #include "llsimdmath.h" -- cgit v1.2.3 From 8807496ed78b36658f488d901842852d63f73cb9 Mon Sep 17 00:00:00 2001 From: Dave Parks Date: Fri, 8 Apr 2011 02:11:46 -0500 Subject: attempted fix for linux build --- indra/llmath/llmath.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'indra/llmath') diff --git a/indra/llmath/llmath.h b/indra/llmath/llmath.h index 97001b4062..eea7c977fb 100644 --- a/indra/llmath/llmath.h +++ b/indra/llmath/llmath.h @@ -510,8 +510,8 @@ inline void ll_remove_outliers(std::vector& data, F32 k) VEC_TYPE Q1 = data[data.size()/4]; VEC_TYPE Q3 = data[data.size()-data.size()/4-1]; - VEC_TYPE min = Q1-k*(Q3-Q1); - VEC_TYPE max = Q3+k*(Q3-Q1); + VEC_TYPE min = (VEC_TYPE) ((F32) Q1-k * (F32) (Q3-Q1)); + VEC_TYPE max = (VEC_TYPE) ((F32) Q3+k * (F32) (Q3-Q1)); U32 i = 0; while (i < data.size() && data[i] < min) -- cgit v1.2.3