diff options
author | JonathanLinden <none@none> | 2010-10-28 14:32:00 -0700 |
---|---|---|
committer | JonathanLinden <none@none> | 2010-10-28 14:32:00 -0700 |
commit | dda21d67e5df62993c57be1d9d5c3195c02ddfef (patch) | |
tree | f2bde3ec0437050b28b65e97dfbaa1e6ef74c75b | |
parent | 9c1d9665f7aee1faad6eaf4d23407688cc7ba3b7 (diff) |
Fix for CTS-305 'Uploaded meshes have uniform scale'
-rw-r--r-- | indra/llprimitive/llmodel.cpp | 51 |
1 files changed, 44 insertions, 7 deletions
diff --git a/indra/llprimitive/llmodel.cpp b/indra/llprimitive/llmodel.cpp index 1cee5a6e15..6baf20a726 100644 --- a/indra/llprimitive/llmodel.cpp +++ b/indra/llprimitive/llmodel.cpp @@ -875,6 +875,15 @@ void LLModel::optimizeVolumeFaces() #endif } +// Shrink the model to fit +// on a 1x1x1 cube centered at the origin. +// The positions and extents +// multiplied by mNormalizedScale +// and offset by mNormalizedTranslation +// to be the "original" extents and position. +// Also, the positions will fit +// within the unit cube and the extents +// to be corners of the unit cube. void LLModel::normalizeVolumeFaces() { @@ -891,6 +900,10 @@ void LLModel::normalizeVolumeFaces() llerrs << "WTF?" << llendl; } + // For all of the volume faces + // in the model, loop over + // them and see what the extents + // of the volume along each axis. min = mVolumeFaces[0].mExtents[0]; max = mVolumeFaces[0].mExtents[1]; @@ -907,33 +920,57 @@ void LLModel::normalizeVolumeFaces() update_min_max(min, max, face.mExtents[1]); } + // Now that we have the extents of the model + // we can compute the offset needed to center + // the model at the origin. + + // Compute center of the model + // and make it negative to get translation + // needed to center at origin. LLVector4a trans; trans.setAdd(min, max); trans.mul(-0.5f); + + // Compute the total size along all + // axes of the model. LLVector4a size; size.setSub(max, min); - F32 scale = 1.f/llmax(llmax(size[0], size[1]), size[2]); + // To make the model's total size + // be the size of the unit cube, compute + // a scale factor that can be applied + // to do that. + LLVector4a scale = LLVector4a(1, 1, 1); + scale.setDiv(scale, size); for (U32 i = 0; i < mVolumeFaces.size(); ++i) { LLVolumeFace& face = mVolumeFaces[i]; - + + // We shrink the extents so + // that they fall on the corners + // of the unit cube. face.mExtents[0].add(trans); - face.mExtents[0].mul(scale); + face.mExtents[0].setMul(face.mExtents[0], scale); face.mExtents[1].add(trans); - face.mExtents[1].mul(scale); + face.mExtents[1].setMul(face.mExtents[1], scale); + // For all the positions, we scale + // the positions to fit within the unit cube. LLVector4a* pos = (LLVector4a*) face.mPositions; for (U32 j = 0; j < face.mNumVertices; ++j) { - pos[j].add(trans); - pos[j].mul(scale); + pos[j].add(trans); + pos[j].setMul(pos[j], scale); } } - mNormalizedScale = LLVector3(1,1,1) / scale; + // mNormalizedScale is the scale at which + // we would need to multiply the model + // by to get the original size of the + // model instead of the normalized size. + mNormalizedScale.set(size.getF32ptr()); mNormalizedTranslation.set(trans.getF32ptr()); mNormalizedTranslation *= -1.f; } |