summaryrefslogtreecommitdiff
path: root/indra/llprimitive/lldaeloader.cpp
diff options
context:
space:
mode:
authorGraham Linden <graham@lindenlab.com>2014-04-22 13:57:56 -0700
committerGraham Linden <graham@lindenlab.com>2014-04-22 13:57:56 -0700
commit428f2aa84e4cb33e49e58ef115e88d057be664c1 (patch)
treee516559af5b907825e3e1faeee77213e096ec5ce /indra/llprimitive/lldaeloader.cpp
parentae035a0d66604e25b1277c4fa303aea8d798e719 (diff)
Added missing new files
Diffstat (limited to 'indra/llprimitive/lldaeloader.cpp')
-rw-r--r--indra/llprimitive/lldaeloader.cpp2326
1 files changed, 2326 insertions, 0 deletions
diff --git a/indra/llprimitive/lldaeloader.cpp b/indra/llprimitive/lldaeloader.cpp
new file mode 100644
index 0000000000..740ecf31c7
--- /dev/null
+++ b/indra/llprimitive/lldaeloader.cpp
@@ -0,0 +1,2326 @@
+/**
+ * @file lldaeloader.cpp
+ * @brief LLDAELoader class implementation
+ *
+ * $LicenseInfo:firstyear=2013&license=viewerlgpl$
+ * Second Life Viewer Source Code
+ * Copyright (C) 2013, Linden Research, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License only.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
+ * $/LicenseInfo$
+ */
+
+#if LL_MSVC
+#pragma warning (disable : 4263)
+#pragma warning (disable : 4264)
+#endif
+#include "dae.h"
+#include "dom/domAsset.h"
+#include "dom/domBind_material.h"
+#include "dom/domCOLLADA.h"
+#include "dom/domConstants.h"
+#include "dom/domController.h"
+#include "dom/domEffect.h"
+#include "dom/domGeometry.h"
+#include "dom/domInstance_geometry.h"
+#include "dom/domInstance_material.h"
+#include "dom/domInstance_node.h"
+#include "dom/domInstance_effect.h"
+#include "dom/domMaterial.h"
+#include "dom/domMatrix.h"
+#include "dom/domNode.h"
+#include "dom/domProfile_COMMON.h"
+#include "dom/domRotate.h"
+#include "dom/domScale.h"
+#include "dom/domTranslate.h"
+#include "dom/domVisual_scene.h"
+#if LL_MSVC
+#pragma warning (default : 4263)
+#pragma warning (default : 4264)
+#endif
+
+#include "lldaeloader.h"
+#include "llsdserialize.h"
+#include "lljoint.h"
+
+#include "glh/glh_linear.h"
+#include "llmatrix4a.h"
+
+std::string colladaVersion[VERSIONTYPE_COUNT+1] =
+{
+ "1.4.0",
+ "1.4.1",
+ "Unsupported"
+};
+
+bool get_dom_sources(const domInputLocalOffset_Array& inputs, S32& pos_offset, S32& tc_offset, S32& norm_offset, S32 &idx_stride,
+ domSource* &pos_source, domSource* &tc_source, domSource* &norm_source)
+{
+ idx_stride = 0;
+
+ for (U32 j = 0; j < inputs.getCount(); ++j)
+ {
+ idx_stride = llmax((S32) inputs[j]->getOffset(), idx_stride);
+
+ if (strcmp(COMMON_PROFILE_INPUT_VERTEX, inputs[j]->getSemantic()) == 0)
+ { //found vertex array
+ const domURIFragmentType& uri = inputs[j]->getSource();
+ daeElementRef elem = uri.getElement();
+ domVertices* vertices = (domVertices*) elem.cast();
+ if ( !vertices )
+ {
+ return false;
+ }
+
+ domInputLocal_Array& v_inp = vertices->getInput_array();
+
+
+ for (U32 k = 0; k < v_inp.getCount(); ++k)
+ {
+ if (strcmp(COMMON_PROFILE_INPUT_POSITION, v_inp[k]->getSemantic()) == 0)
+ {
+ pos_offset = inputs[j]->getOffset();
+
+ const domURIFragmentType& uri = v_inp[k]->getSource();
+ daeElementRef elem = uri.getElement();
+ pos_source = (domSource*) elem.cast();
+ }
+
+ if (strcmp(COMMON_PROFILE_INPUT_NORMAL, v_inp[k]->getSemantic()) == 0)
+ {
+ norm_offset = inputs[j]->getOffset();
+
+ const domURIFragmentType& uri = v_inp[k]->getSource();
+ daeElementRef elem = uri.getElement();
+ norm_source = (domSource*) elem.cast();
+ }
+ }
+ }
+
+ if (strcmp(COMMON_PROFILE_INPUT_NORMAL, inputs[j]->getSemantic()) == 0)
+ {
+ //found normal array for this triangle list
+ norm_offset = inputs[j]->getOffset();
+ const domURIFragmentType& uri = inputs[j]->getSource();
+ daeElementRef elem = uri.getElement();
+ norm_source = (domSource*) elem.cast();
+ }
+ else if (strcmp(COMMON_PROFILE_INPUT_TEXCOORD, inputs[j]->getSemantic()) == 0)
+ { //found texCoords
+ tc_offset = inputs[j]->getOffset();
+ const domURIFragmentType& uri = inputs[j]->getSource();
+ daeElementRef elem = uri.getElement();
+ tc_source = (domSource*) elem.cast();
+ }
+ }
+
+ idx_stride += 1;
+
+ return true;
+}
+
+LLModel::EModelStatus load_face_from_dom_triangles(std::vector<LLVolumeFace>& face_list, std::vector<std::string>& materials, domTrianglesRef& tri)
+{
+ LLVolumeFace face;
+ std::vector<LLVolumeFace::VertexData> verts;
+ std::vector<U16> indices;
+
+ const domInputLocalOffset_Array& inputs = tri->getInput_array();
+
+ S32 pos_offset = -1;
+ S32 tc_offset = -1;
+ S32 norm_offset = -1;
+
+ domSource* pos_source = NULL;
+ domSource* tc_source = NULL;
+ domSource* norm_source = NULL;
+
+ S32 idx_stride = 0;
+
+ if ( !get_dom_sources(inputs, pos_offset, tc_offset, norm_offset, idx_stride, pos_source, tc_source, norm_source) || !pos_source )
+ {
+ return LLModel::BAD_ELEMENT;
+ }
+
+
+ domPRef p = tri->getP();
+ domListOfUInts& idx = p->getValue();
+
+ domListOfFloats dummy ;
+ domListOfFloats& v = pos_source ? pos_source->getFloat_array()->getValue() : dummy ;
+ domListOfFloats& tc = tc_source ? tc_source->getFloat_array()->getValue() : dummy ;
+ domListOfFloats& n = norm_source ? norm_source->getFloat_array()->getValue() : dummy ;
+
+ if (pos_source)
+ {
+ face.mExtents[0].set(v[0], v[1], v[2]);
+ face.mExtents[1].set(v[0], v[1], v[2]);
+ }
+
+ LLVolumeFace::VertexMapData::PointMap point_map;
+
+ for (U32 i = 0; i < idx.getCount(); i += idx_stride)
+ {
+ LLVolumeFace::VertexData cv;
+ if (pos_source)
+ {
+ cv.setPosition(LLVector4a(v[idx[i+pos_offset]*3+0],
+ v[idx[i+pos_offset]*3+1],
+ v[idx[i+pos_offset]*3+2]));
+ }
+
+ if (tc_source)
+ {
+ cv.mTexCoord.setVec(tc[idx[i+tc_offset]*2+0],
+ tc[idx[i+tc_offset]*2+1]);
+ }
+
+ if (norm_source)
+ {
+ cv.setNormal(LLVector4a(n[idx[i+norm_offset]*3+0],
+ n[idx[i+norm_offset]*3+1],
+ n[idx[i+norm_offset]*3+2]));
+ }
+
+ BOOL found = FALSE;
+
+ LLVolumeFace::VertexMapData::PointMap::iterator point_iter;
+ point_iter = point_map.find(LLVector3(cv.getPosition().getF32ptr()));
+
+ if (point_iter != point_map.end())
+ {
+ for (U32 j = 0; j < point_iter->second.size(); ++j)
+ {
+ // We have a matching loc
+ //
+ if ((point_iter->second)[j] == cv)
+ {
+ U16 shared_index = (point_iter->second)[j].mIndex;
+
+ // Don't share verts within the same tri, degenerate
+ //
+ if ((indices.size() % 3) && (indices[indices.size()-1] != shared_index))
+ {
+ found = true;
+ indices.push_back(shared_index);
+ }
+ break;
+ }
+ }
+ }
+
+ if (!found)
+ {
+ update_min_max(face.mExtents[0], face.mExtents[1], cv.getPosition());
+ verts.push_back(cv);
+ if (verts.size() >= 65535)
+ {
+ //llerrs << "Attempted to write model exceeding 16-bit index buffer limitation." << llendl;
+ return LLModel::VERTEX_NUMBER_OVERFLOW ;
+ }
+ U16 index = (U16) (verts.size()-1);
+ indices.push_back(index);
+
+ LLVolumeFace::VertexMapData d;
+ d.setPosition(cv.getPosition());
+ d.mTexCoord = cv.mTexCoord;
+ d.setNormal(cv.getNormal());
+ d.mIndex = index;
+ if (point_iter != point_map.end())
+ {
+ point_iter->second.push_back(d);
+ }
+ else
+ {
+ point_map[LLVector3(d.getPosition().getF32ptr())].push_back(d);
+ }
+ }
+
+ if (indices.size()%3 == 0 && verts.size() >= 65532)
+ {
+ face_list.push_back(face);
+ face_list.rbegin()->fillFromLegacyData(verts, indices);
+ LLVolumeFace& new_face = *face_list.rbegin();
+ if (!norm_source)
+ {
+ ll_aligned_free_16(new_face.mNormals);
+ new_face.mNormals = NULL;
+ }
+
+ if (!tc_source)
+ {
+ ll_aligned_free_16(new_face.mTexCoords);
+ new_face.mTexCoords = NULL;
+ }
+
+ face = LLVolumeFace();
+ point_map.clear();
+ }
+ }
+
+ if (!verts.empty())
+ {
+ std::string material;
+
+ if (tri->getMaterial())
+ {
+ material = std::string(tri->getMaterial());
+ }
+
+ materials.push_back(material);
+ face_list.push_back(face);
+
+ face_list.rbegin()->fillFromLegacyData(verts, indices);
+ LLVolumeFace& new_face = *face_list.rbegin();
+ if (!norm_source)
+ {
+ ll_aligned_free_16(new_face.mNormals);
+ new_face.mNormals = NULL;
+ }
+
+ if (!tc_source)
+ {
+ ll_aligned_free_16(new_face.mTexCoords);
+ new_face.mTexCoords = NULL;
+ }
+ }
+
+ return LLModel::NO_ERRORS ;
+}
+
+LLModel::EModelStatus load_face_from_dom_polylist(std::vector<LLVolumeFace>& face_list, std::vector<std::string>& materials, domPolylistRef& poly)
+{
+ domPRef p = poly->getP();
+ domListOfUInts& idx = p->getValue();
+
+ if (idx.getCount() == 0)
+ {
+ return LLModel::NO_ERRORS ;
+ }
+
+ const domInputLocalOffset_Array& inputs = poly->getInput_array();
+
+
+ domListOfUInts& vcount = poly->getVcount()->getValue();
+
+ S32 pos_offset = -1;
+ S32 tc_offset = -1;
+ S32 norm_offset = -1;
+
+ domSource* pos_source = NULL;
+ domSource* tc_source = NULL;
+ domSource* norm_source = NULL;
+
+ S32 idx_stride = 0;
+
+ if (!get_dom_sources(inputs, pos_offset, tc_offset, norm_offset, idx_stride, pos_source, tc_source, norm_source))
+ {
+ return LLModel::BAD_ELEMENT;
+ }
+
+ LLVolumeFace face;
+
+ std::vector<U16> indices;
+ std::vector<LLVolumeFace::VertexData> verts;
+
+ domListOfFloats v;
+ domListOfFloats tc;
+ domListOfFloats n;
+
+ if (pos_source)
+ {
+ v = pos_source->getFloat_array()->getValue();
+ face.mExtents[0].set(v[0], v[1], v[2]);
+ face.mExtents[1].set(v[0], v[1], v[2]);
+ }
+
+ if (tc_source)
+ {
+ tc = tc_source->getFloat_array()->getValue();
+ }
+
+ if (norm_source)
+ {
+ n = norm_source->getFloat_array()->getValue();
+ }
+
+ LLVolumeFace::VertexMapData::PointMap point_map;
+
+ U32 cur_idx = 0;
+ for (U32 i = 0; i < vcount.getCount(); ++i)
+ { //for each polygon
+ U32 first_index = 0;
+ U32 last_index = 0;
+ for (U32 j = 0; j < vcount[i]; ++j)
+ { //for each vertex
+
+ LLVolumeFace::VertexData cv;
+
+ if (pos_source)
+ {
+ cv.getPosition().set(v[idx[cur_idx+pos_offset]*3+0],
+ v[idx[cur_idx+pos_offset]*3+1],
+ v[idx[cur_idx+pos_offset]*3+2]);
+ }
+
+ if (tc_source)
+ {
+ cv.mTexCoord.setVec(tc[idx[cur_idx+tc_offset]*2+0],
+ tc[idx[cur_idx+tc_offset]*2+1]);
+ }
+
+ if (norm_source)
+ {
+ cv.getNormal().set(n[idx[cur_idx+norm_offset]*3+0],
+ n[idx[cur_idx+norm_offset]*3+1],
+ n[idx[cur_idx+norm_offset]*3+2]);
+ }
+
+ cur_idx += idx_stride;
+
+ BOOL found = FALSE;
+
+ LLVolumeFace::VertexMapData::PointMap::iterator point_iter;
+ LLVector3 pos3(cv.getPosition().getF32ptr());
+ point_iter = point_map.find(pos3);
+
+ if (point_iter != point_map.end())
+ {
+ for (U32 k = 0; k < point_iter->second.size(); ++k)
+ {
+ if ((point_iter->second)[k] == cv)
+ {
+ found = TRUE;
+ U32 index = (point_iter->second)[k].mIndex;
+ if (j == 0)
+ {
+ first_index = index;
+ }
+ else if (j == 1)
+ {
+ last_index = index;
+ }
+ else
+ {
+ // if these are the same, we have a very, very skinny triangle (coincident verts on one or more edges)
+ //
+ llassert((first_index != last_index) && (last_index != index) && (first_index != index));
+ indices.push_back(first_index);
+ indices.push_back(last_index);
+ indices.push_back(index);
+ last_index = index;
+ }
+
+ break;
+ }
+ }
+ }
+
+ if (!found)
+ {
+ update_min_max(face.mExtents[0], face.mExtents[1], cv.getPosition());
+ verts.push_back(cv);
+ if (verts.size() >= 65535)
+ {
+ //llerrs << "Attempted to write model exceeding 16-bit index buffer limitation." << llendl;
+ return LLModel::VERTEX_NUMBER_OVERFLOW ;
+ }
+ U16 index = (U16) (verts.size()-1);
+
+ if (j == 0)
+ {
+ first_index = index;
+ }
+ else if (j == 1)
+ {
+ last_index = index;
+ }
+ else
+ {
+ // detect very skinny degenerate triangles with collapsed edges
+ //
+ llassert((first_index != last_index) && (last_index != index) && (first_index != index));
+ indices.push_back(first_index);
+ indices.push_back(last_index);
+ indices.push_back(index);
+ last_index = index;
+ }
+
+ LLVolumeFace::VertexMapData d;
+ d.setPosition(cv.getPosition());
+ d.mTexCoord = cv.mTexCoord;
+ d.setNormal(cv.getNormal());
+ d.mIndex = index;
+ if (point_iter != point_map.end())
+ {
+ point_iter->second.push_back(d);
+ }
+ else
+ {
+ point_map[pos3].push_back(d);
+ }
+ }
+
+ if (indices.size()%3 == 0 && indices.size() >= 65532)
+ {
+ face_list.push_back(face);
+ face_list.rbegin()->fillFromLegacyData(verts, indices);
+ LLVolumeFace& new_face = *face_list.rbegin();
+ if (!norm_source)
+ {
+ ll_aligned_free_16(new_face.mNormals);
+ new_face.mNormals = NULL;
+ }
+
+ if (!tc_source)
+ {
+ ll_aligned_free_16(new_face.mTexCoords);
+ new_face.mTexCoords = NULL;
+ }
+
+ face = LLVolumeFace();
+ verts.clear();
+ indices.clear();
+ point_map.clear();
+ }
+ }
+ }
+
+ if (!verts.empty())
+ {
+ std::string material;
+
+ if (poly->getMaterial())
+ {
+ material = std::string(poly->getMaterial());
+ }
+
+ materials.push_back(material);
+ face_list.push_back(face);
+ face_list.rbegin()->fillFromLegacyData(verts, indices);
+
+ LLVolumeFace& new_face = *face_list.rbegin();
+ if (!norm_source)
+ {
+ ll_aligned_free_16(new_face.mNormals);
+ new_face.mNormals = NULL;
+ }
+
+ if (!tc_source)
+ {
+ ll_aligned_free_16(new_face.mTexCoords);
+ new_face.mTexCoords = NULL;
+ }
+ }
+
+ return LLModel::NO_ERRORS ;
+}
+
+LLModel::EModelStatus load_face_from_dom_polygons(std::vector<LLVolumeFace>& face_list, std::vector<std::string>& materials, domPolygonsRef& poly)
+{
+ LLVolumeFace face;
+ std::vector<U16> indices;
+ std::vector<LLVolumeFace::VertexData> verts;
+
+ const domInputLocalOffset_Array& inputs = poly->getInput_array();
+
+ S32 v_offset = -1;
+ S32 n_offset = -1;
+ S32 t_offset = -1;
+
+ domListOfFloats* v = NULL;
+ domListOfFloats* n = NULL;
+ domListOfFloats* t = NULL;
+
+ U32 stride = 0;
+ for (U32 i = 0; i < inputs.getCount(); ++i)
+ {
+ stride = llmax((U32) inputs[i]->getOffset()+1, stride);
+
+ if (strcmp(COMMON_PROFILE_INPUT_VERTEX, inputs[i]->getSemantic()) == 0)
+ { //found vertex array
+ v_offset = inputs[i]->getOffset();
+
+ const domURIFragmentType& uri = inputs[i]->getSource();
+ daeElementRef elem = uri.getElement();
+ domVertices* vertices = (domVertices*) elem.cast();
+ if (!vertices)
+ {
+ return LLModel::BAD_ELEMENT;
+ }
+ domInputLocal_Array& v_inp = vertices->getInput_array();
+
+ for (U32 k = 0; k < v_inp.getCount(); ++k)
+ {
+ if (strcmp(COMMON_PROFILE_INPUT_POSITION, v_inp[k]->getSemantic()) == 0)
+ {
+ const domURIFragmentType& uri = v_inp[k]->getSource();
+ daeElementRef elem = uri.getElement();
+ domSource* src = (domSource*) elem.cast();
+ if (!src)
+ {
+ return LLModel::BAD_ELEMENT;
+ }
+ v = &(src->getFloat_array()->getValue());
+ }
+ }
+ }
+ else if (strcmp(COMMON_PROFILE_INPUT_NORMAL, inputs[i]->getSemantic()) == 0)
+ {
+ n_offset = inputs[i]->getOffset();
+ //found normal array for this triangle list
+ const domURIFragmentType& uri = inputs[i]->getSource();
+ daeElementRef elem = uri.getElement();
+ domSource* src = (domSource*) elem.cast();
+ if (!src)
+ {
+ return LLModel::BAD_ELEMENT;
+ }
+ n = &(src->getFloat_array()->getValue());
+ }
+ else if (strcmp(COMMON_PROFILE_INPUT_TEXCOORD, inputs[i]->getSemantic()) == 0 && inputs[i]->getSet() == 0)
+ { //found texCoords
+ t_offset = inputs[i]->getOffset();
+ const domURIFragmentType& uri = inputs[i]->getSource();
+ daeElementRef elem = uri.getElement();
+ domSource* src = (domSource*) elem.cast();
+ if (!src)
+ {
+ return LLModel::BAD_ELEMENT;
+ }
+ t = &(src->getFloat_array()->getValue());
+ }
+ }
+
+ domP_Array& ps = poly->getP_array();
+
+ //make a triangle list in <verts>
+ for (U32 i = 0; i < ps.getCount(); ++i)
+ { //for each polygon
+ domListOfUInts& idx = ps[i]->getValue();
+ for (U32 j = 0; j < idx.getCount()/stride; ++j)
+ { //for each vertex
+ if (j > 2)
+ {
+ U32 size = verts.size();
+ LLVolumeFace::VertexData v0 = verts[size-3];
+ LLVolumeFace::VertexData v1 = verts[size-1];
+
+ verts.push_back(v0);
+ verts.push_back(v1);
+ }
+
+ LLVolumeFace::VertexData vert;
+
+
+ if (v)
+ {
+ U32 v_idx = idx[j*stride+v_offset]*3;
+ v_idx = llclamp(v_idx, (U32) 0, (U32) v->getCount());
+ vert.getPosition().set(v->get(v_idx),
+ v->get(v_idx+1),
+ v->get(v_idx+2));
+ }
+
+ //bounds check n and t lookups because some FBX to DAE converters
+ //use negative indices and empty arrays to indicate data does not exist
+ //for a particular channel
+ if (n && n->getCount() > 0)
+ {
+ U32 n_idx = idx[j*stride+n_offset]*3;
+ n_idx = llclamp(n_idx, (U32) 0, (U32) n->getCount());
+ vert.getNormal().set(n->get(n_idx),
+ n->get(n_idx+1),
+ n->get(n_idx+2));
+ }
+ else
+ {
+ vert.getNormal().clear();
+ }
+
+
+ if (t && t->getCount() > 0)
+ {
+ U32 t_idx = idx[j*stride+t_offset]*2;
+ t_idx = llclamp(t_idx, (U32) 0, (U32) t->getCount());
+ vert.mTexCoord.setVec(t->get(t_idx),
+ t->get(t_idx+1));
+ }
+ else
+ {
+ vert.mTexCoord.clear();
+ }
+
+
+ verts.push_back(vert);
+ }
+ }
+
+ if (verts.empty())
+ {
+ return LLModel::NO_ERRORS;
+ }
+
+ face.mExtents[0] = verts[0].getPosition();
+ face.mExtents[1] = verts[0].getPosition();
+
+ //create a map of unique vertices to indices
+ std::map<LLVolumeFace::VertexData, U32> vert_idx;
+
+ U32 cur_idx = 0;
+ for (U32 i = 0; i < verts.size(); ++i)
+ {
+ std::map<LLVolumeFace::VertexData, U32>::iterator iter = vert_idx.find(verts[i]);
+ if (iter == vert_idx.end())
+ {
+ vert_idx[verts[i]] = cur_idx++;
+ }
+ }
+
+ //build vertex array from map
+ std::vector<LLVolumeFace::VertexData> new_verts;
+ new_verts.resize(vert_idx.size());
+
+ for (std::map<LLVolumeFace::VertexData, U32>::iterator iter = vert_idx.begin(); iter != vert_idx.end(); ++iter)
+ {
+ new_verts[iter->second] = iter->first;
+ update_min_max(face.mExtents[0], face.mExtents[1], iter->first.getPosition());
+ }
+
+ //build index array from map
+ indices.resize(verts.size());
+
+ for (U32 i = 0; i < verts.size(); ++i)
+ {
+ indices[i] = vert_idx[verts[i]];
+ llassert(!i || (indices[i-1] != indices[i]));
+ }
+
+ // DEBUG just build an expanded triangle list
+ /*for (U32 i = 0; i < verts.size(); ++i)
+ {
+ indices.push_back((U16) i);
+ update_min_max(face.mExtents[0], face.mExtents[1], verts[i].getPosition());
+ }*/
+
+ if (!new_verts.empty())
+ {
+ std::string material;
+
+ if (poly->getMaterial())
+ {
+ material = std::string(poly->getMaterial());
+ }
+
+ materials.push_back(material);
+ face_list.push_back(face);
+ face_list.rbegin()->fillFromLegacyData(new_verts, indices);
+
+ LLVolumeFace& new_face = *face_list.rbegin();
+ if (!n)
+ {
+ ll_aligned_free_16(new_face.mNormals);
+ new_face.mNormals = NULL;
+ }
+
+ if (!t)
+ {
+ ll_aligned_free_16(new_face.mTexCoords);
+ new_face.mTexCoords = NULL;
+ }
+ }
+
+ return LLModel::NO_ERRORS ;
+}
+
+//-----------------------------------------------------------------------------
+// LLDAELoader
+//-----------------------------------------------------------------------------
+LLDAELoader::LLDAELoader(
+ std::string filename,
+ S32 lod,
+ load_callback_t load_cb,
+ joint_lookup_func_t joint_lookup_func,
+ texture_load_func_t texture_load_func,
+ state_callback_t state_cb,
+ void* opaque_userdata,
+ JointTransformMap& jointMap,
+ JointSet& jointsFromNodes )
+: LLModelLoader(
+ filename,
+ lod,
+ load_cb,
+ joint_lookup_func,
+ texture_load_func,
+ state_cb,
+ opaque_userdata,
+ jointMap,
+ jointsFromNodes)
+{
+}
+
+LLDAELoader::~LLDAELoader()
+{
+}
+
+struct ModelSort
+{
+ bool operator()(const LLPointer< LLModel >& lhs, const LLPointer< LLModel >& rhs)
+ {
+ if (lhs->mSubmodelID < rhs->mSubmodelID)
+ {
+ return true;
+ }
+ return LLStringUtil::compareInsensitive(lhs->mLabel, rhs->mLabel) < 0;
+ }
+};
+
+bool LLDAELoader::OpenFile(const std::string& filename)
+{
+ //no suitable slm exists, load from the .dae file
+ DAE dae;
+ domCOLLADA* dom = dae.open(filename);
+
+ if (!dom)
+ {
+ llinfos<<" Error with dae - traditionally indicates a corrupt file."<<llendl;
+ setLoadState( ERROR_PARSING );
+ return false;
+ }
+ //Dom version
+ daeString domVersion = dae.getDomVersion();
+ std::string sldom(domVersion);
+ llinfos<<"Collada Importer Version: "<<sldom<<llendl;
+ //Dae version
+ domVersionType docVersion = dom->getVersion();
+ //0=1.4
+ //1=1.4.1
+ //2=Currently unsupported, however may work
+ if (docVersion > 1 )
+ {
+ docVersion = VERSIONTYPE_COUNT;
+ }
+ llinfos<<"Dae version "<<colladaVersion[docVersion]<<llendl;
+
+
+ daeDatabase* db = dae.getDatabase();
+
+ daeInt count = db->getElementCount(NULL, COLLADA_TYPE_MESH);
+
+ daeDocument* doc = dae.getDoc(mFilename);
+ if (!doc)
+ {
+ llwarns << "can't find internal doc" << llendl;
+ return false;
+ }
+
+ daeElement* root = doc->getDomRoot();
+ if (!root)
+ {
+ llwarns << "document has no root" << llendl;
+ return false;
+ }
+
+ //Verify some basic properties of the dae
+ //1. Basic validity check on controller
+ U32 controllerCount = (int) db->getElementCount( NULL, "controller" );
+ bool result = false;
+ for ( int i=0; i<controllerCount; ++i )
+ {
+ domController* pController = NULL;
+ db->getElement( (daeElement**) &pController, i , NULL, "controller" );
+ result = verifyController( pController );
+ if (!result)
+ {
+ setLoadState( ERROR_PARSING );
+ return true;
+ }
+ }
+
+
+ //get unit scale
+ mTransform.setIdentity();
+
+ domAsset::domUnit* unit = daeSafeCast<domAsset::domUnit>(root->getDescendant(daeElement::matchType(domAsset::domUnit::ID())));
+
+ if (unit)
+ {
+ F32 meter = unit->getMeter();
+ mTransform.mMatrix[0][0] = meter;
+ mTransform.mMatrix[1][1] = meter;
+ mTransform.mMatrix[2][2] = meter;
+ }
+
+ //get up axis rotation
+ LLMatrix4 rotation;
+
+ domUpAxisType up = UPAXISTYPE_Y_UP; // default is Y_UP
+ domAsset::domUp_axis* up_axis =
+ daeSafeCast<domAsset::domUp_axis>(root->getDescendant(daeElement::matchType(domAsset::domUp_axis::ID())));
+
+ if (up_axis)
+ {
+ up = up_axis->getValue();
+ }
+
+ if (up == UPAXISTYPE_X_UP)
+ {
+ rotation.initRotation(0.0f, 90.0f * DEG_TO_RAD, 0.0f);
+ }
+ else if (up == UPAXISTYPE_Y_UP)
+ {
+ rotation.initRotation(90.0f * DEG_TO_RAD, 0.0f, 0.0f);
+ }
+
+ rotation *= mTransform;
+ mTransform = rotation;
+
+ mTransform.condition();
+
+ for (daeInt idx = 0; idx < count; ++idx)
+ { //build map of domEntities to LLModel
+ domMesh* mesh = NULL;
+ db->getElement((daeElement**) &mesh, idx, NULL, COLLADA_TYPE_MESH);
+
+ if (mesh)
+ {
+
+ std::vector<LLModel*> models;
+
+ loadModelsFromDomMesh(mesh, models);
+
+ std::vector<LLModel*>::iterator i;
+ i = models.begin();
+ while (i != models.end())
+ {
+ LLModel* mdl = *i;
+ if(mdl->getStatus() != LLModel::NO_ERRORS)
+ {
+ setLoadState(ERROR_PARSING + mdl->getStatus()) ;
+ return false; //abort
+ }
+
+ if (mdl && validate_model(mdl))
+ {
+ mModelList.push_back(mdl);
+ mModelsMap[mesh].push_back(mdl);
+ }
+ i++;
+ }
+ }
+ }
+
+ std::sort(mModelList.begin(), mModelList.end(), ModelSort());
+
+ model_list::iterator model_iter = mModelList.begin();
+ while (model_iter != mModelList.end())
+ {
+ llinfos << "Importing " << (*model_iter)->mLabel << llendl;
+ std::vector<std::string>::iterator mat_iter = (*model_iter)->mMaterialList.begin();
+ while (mat_iter != (*model_iter)->mMaterialList.end())
+ {
+ llinfos << (*model_iter)->mLabel << " references " << (*mat_iter) << llendl;
+ mat_iter++;
+ }
+ model_iter++;
+ }
+
+ count = db->getElementCount(NULL, COLLADA_TYPE_SKIN);
+ for (daeInt idx = 0; idx < count; ++idx)
+ { //add skinned meshes as instances
+ domSkin* skin = NULL;
+ db->getElement((daeElement**) &skin, idx, NULL, COLLADA_TYPE_SKIN);
+
+ if (skin)
+ {
+ domGeometry* geom = daeSafeCast<domGeometry>(skin->getSource().getElement());
+
+ if (geom)
+ {
+ domMesh* mesh = geom->getMesh();
+ if (mesh)
+ {
+ std::vector< LLPointer< LLModel > >::iterator i = mModelsMap[mesh].begin();
+ while (i != mModelsMap[mesh].end())
+ {
+ LLPointer<LLModel> mdl = *i;
+ LLDAELoader::processDomModel(mdl, &dae, root, mesh, skin);
+ i++;
+ }
+ }
+ }
+ }
+ }
+
+ daeElement* scene = root->getDescendant("visual_scene");
+
+ if (!scene)
+ {
+ llwarns << "document has no visual_scene" << llendl;
+ setLoadState( ERROR_PARSING );
+ return true;
+ }
+
+ setLoadState( DONE );
+
+ bool badElement = false;
+
+ processElement( scene, badElement, &dae );
+
+ if ( badElement )
+ {
+ setLoadState( ERROR_PARSING );
+ }
+
+ return true;
+}
+
+void LLDAELoader::processDomModel(LLModel* model, DAE* dae, daeElement* root, domMesh* mesh, domSkin* skin)
+{
+ llassert(model && dae && mesh && skin);
+
+ if (model)
+ {
+ LLVector3 mesh_scale_vector;
+ LLVector3 mesh_translation_vector;
+ model->getNormalizedScaleTranslation(mesh_scale_vector, mesh_translation_vector);
+
+ LLMatrix4 normalized_transformation;
+ normalized_transformation.setTranslation(mesh_translation_vector);
+
+ LLMatrix4 mesh_scale;
+ mesh_scale.initScale(mesh_scale_vector);
+ mesh_scale *= normalized_transformation;
+ normalized_transformation = mesh_scale;
+
+ glh::matrix4f inv_mat((F32*) normalized_transformation.mMatrix);
+ inv_mat = inv_mat.inverse();
+ LLMatrix4 inverse_normalized_transformation(inv_mat.m);
+
+ domSkin::domBind_shape_matrix* bind_mat = skin->getBind_shape_matrix();
+
+ if (bind_mat)
+ { //get bind shape matrix
+ domFloat4x4& dom_value = bind_mat->getValue();
+
+ LLMeshSkinInfo& skin_info = model->mSkinInfo;
+
+ for (int i = 0; i < 4; i++)
+ {
+ for(int j = 0; j < 4; j++)
+ {
+ skin_info.mBindShapeMatrix.mMatrix[i][j] = dom_value[i + j*4];
+ }
+ }
+
+ LLMatrix4 trans = normalized_transformation;
+ trans *= skin_info.mBindShapeMatrix;
+ skin_info.mBindShapeMatrix = trans;
+ }
+
+
+ //Some collada setup for accessing the skeleton
+ daeElement* pElement = 0;
+ dae->getDatabase()->getElement( &pElement, 0, 0, "skeleton" );
+
+ //Try to get at the skeletal instance controller
+ domInstance_controller::domSkeleton* pSkeleton = daeSafeCast<domInstance_controller::domSkeleton>( pElement );
+ bool missingSkeletonOrScene = false;
+
+ //If no skeleton, do a breadth-first search to get at specific joints
+ bool rootNode = false;
+
+ //Need to test for a skeleton that does not have a root node
+ //This occurs when your instance controller does not have an associated scene
+ if ( pSkeleton )
+ {
+ daeElement* pSkeletonRootNode = pSkeleton->getValue().getElement();
+ if ( pSkeletonRootNode )
+ {
+ rootNode = true;
+ }
+
+ }
+ if ( !pSkeleton || !rootNode )
+ {
+ daeElement* pScene = root->getDescendant("visual_scene");
+ if ( !pScene )
+ {
+ llwarns<<"No visual scene - unable to parse bone offsets "<<llendl;
+ missingSkeletonOrScene = true;
+ }
+ else
+ {
+ //Get the children at this level
+ daeTArray< daeSmartRef<daeElement> > children = pScene->getChildren();
+ S32 childCount = children.getCount();
+
+ //Process any children that are joints
+ //Not all children are joints, some code be ambient lights, cameras, geometry etc..
+ for (S32 i = 0; i < childCount; ++i)
+ {
+ domNode* pNode = daeSafeCast<domNode>(children[i]);
+ if ( isNodeAJoint( pNode ) )
+ {
+ processJointNode( pNode, mJointList );
+ }
+ }
+ }
+ }
+ else
+ //Has Skeleton
+ {
+ //Get the root node of the skeleton
+ daeElement* pSkeletonRootNode = pSkeleton->getValue().getElement();
+ if ( pSkeletonRootNode )
+ {
+ //Once we have the root node - start acccessing it's joint components
+ const int jointCnt = mJointMap.size();
+ JointMap :: const_iterator jointIt = mJointMap.begin();
+
+ //Loop over all the possible joints within the .dae - using the allowed joint list in the ctor.
+ for ( int i=0; i<jointCnt; ++i, ++jointIt )
+ {
+ //Build a joint for the resolver to work with
+ char str[64]={0};
+ sprintf(str,"./%s",(*jointIt).first.c_str() );
+ //llwarns<<"Joint "<< str <<llendl;
+
+ //Setup the resolver
+ daeSIDResolver resolver( pSkeletonRootNode, str );
+
+ //Look for the joint
+ domNode* pJoint = daeSafeCast<domNode>( resolver.getElement() );
+ if ( pJoint )
+ {
+ //Pull out the translate id and store it in the jointTranslations map
+ daeSIDResolver jointResolverA( pJoint, "./translate" );
+ domTranslate* pTranslateA = daeSafeCast<domTranslate>( jointResolverA.getElement() );
+ daeSIDResolver jointResolverB( pJoint, "./location" );
+ domTranslate* pTranslateB = daeSafeCast<domTranslate>( jointResolverB.getElement() );
+
+ LLMatrix4 workingTransform;
+
+ //Translation via SID
+ if ( pTranslateA )
+ {
+ extractTranslation( pTranslateA, workingTransform );
+ }
+ else
+ if ( pTranslateB )
+ {
+ extractTranslation( pTranslateB, workingTransform );
+ }
+ else
+ {
+ //Translation via child from element
+ daeElement* pTranslateElement = getChildFromElement( pJoint, "translate" );
+ if ( pTranslateElement && pTranslateElement->typeID() != domTranslate::ID() )
+ {
+ llwarns<< "The found element is not a translate node" <<llendl;
+ missingSkeletonOrScene = true;
+ }
+ else
+ if ( pTranslateElement )
+ {
+ extractTranslationViaElement( pTranslateElement, workingTransform );
+ }
+ else
+ {
+ extractTranslationViaSID( pJoint, workingTransform );
+ }
+
+ }
+
+ //Store the joint transform w/respect to it's name.
+ mJointList[(*jointIt).second.c_str()] = workingTransform;
+ }
+ }
+
+ //If anything failed in regards to extracting the skeleton, joints or translation id,
+ //mention it
+ if ( missingSkeletonOrScene )
+ {
+ llwarns<< "Partial jointmap found in asset - did you mean to just have a partial map?" << llendl;
+ }
+ }//got skeleton?
+ }
+
+
+ domSkin::domJoints* joints = skin->getJoints();
+
+ domInputLocal_Array& joint_input = joints->getInput_array();
+
+ for (size_t i = 0; i < joint_input.getCount(); ++i)
+ {
+ domInputLocal* input = joint_input.get(i);
+ xsNMTOKEN semantic = input->getSemantic();
+
+ if (strcmp(semantic, COMMON_PROFILE_INPUT_JOINT) == 0)
+ { //found joint source, fill model->mJointMap and model->mSkinInfo.mJointNames
+ daeElement* elem = input->getSource().getElement();
+
+ domSource* source = daeSafeCast<domSource>(elem);
+ if (source)
+ {
+
+
+ domName_array* names_source = source->getName_array();
+
+ if (names_source)
+ {
+ domListOfNames &names = names_source->getValue();
+
+ for (size_t j = 0; j < names.getCount(); ++j)
+ {
+ std::string name(names.get(j));
+ if (mJointMap.find(name) != mJointMap.end())
+ {
+ name = mJointMap[name];
+ }
+ model->mSkinInfo.mJointNames.push_back(name);
+ model->mSkinInfo.mJointMap[name] = j;
+ }
+ }
+ else
+ {
+ domIDREF_array* names_source = source->getIDREF_array();
+ if (names_source)
+ {
+ xsIDREFS& names = names_source->getValue();
+
+ for (size_t j = 0; j < names.getCount(); ++j)
+ {
+ std::string name(names.get(j).getID());
+ if (mJointMap.find(name) != mJointMap.end())
+ {
+ name = mJointMap[name];
+ }
+ model->mSkinInfo.mJointNames.push_back(name);
+ model->mSkinInfo.mJointMap[name] = j;
+ }
+ }
+ }
+ }
+ }
+ else if (strcmp(semantic, COMMON_PROFILE_INPUT_INV_BIND_MATRIX) == 0)
+ { //found inv_bind_matrix array, fill model->mInvBindMatrix
+ domSource* source = daeSafeCast<domSource>(input->getSource().getElement());
+ if (source)
+ {
+ domFloat_array* t = source->getFloat_array();
+ if (t)
+ {
+ domListOfFloats& transform = t->getValue();
+ S32 count = transform.getCount()/16;
+
+ for (S32 k = 0; k < count; ++k)
+ {
+ LLMatrix4 mat;
+
+ for (int i = 0; i < 4; i++)
+ {
+ for(int j = 0; j < 4; j++)
+ {
+ mat.mMatrix[i][j] = transform[k*16 + i + j*4];
+ }
+ }
+
+ model->mSkinInfo.mInvBindMatrix.push_back(mat);
+ }
+ }
+ }
+ }
+ }
+
+ //Now that we've parsed the joint array, let's determine if we have a full rig
+ //(which means we have all the joint sthat are required for an avatar versus
+ //a skinned asset attached to a node in a file that contains an entire skeleton,
+ //but does not use the skeleton).
+ buildJointToNodeMappingFromScene( root );
+ critiqueRigForUploadApplicability( model->mSkinInfo.mJointNames );
+
+ if ( !missingSkeletonOrScene )
+ {
+ //Set the joint translations on the avatar - if it's a full mapping
+ //The joints are reset in the dtor
+ if ( getRigWithSceneParity() )
+ {
+ JointMap :: const_iterator masterJointIt = mJointMap.begin();
+ JointMap :: const_iterator masterJointItEnd = mJointMap.end();
+ for (;masterJointIt!=masterJointItEnd;++masterJointIt )
+ {
+ std::string lookingForJoint = (*masterJointIt).first.c_str();
+
+ if ( mJointList.find( lookingForJoint ) != mJointList.end() )
+ {
+ //llinfos<<"joint "<<lookingForJoint.c_str()<<llendl;
+ LLMatrix4 jointTransform = mJointList[lookingForJoint];
+ LLJoint* pJoint = mJointLookupFunc(lookingForJoint,mOpaqueData);
+ if ( pJoint )
+ {
+ pJoint->storeCurrentXform( jointTransform.getTranslation() );
+ }
+ else
+ {
+ //Most likely an error in the asset.
+ llwarns<<"Tried to apply joint position from .dae, but it did not exist in the avatar rig." << llendl;
+ }
+ }
+ }
+ }
+ } //missingSkeletonOrScene
+
+ //We need to construct the alternate bind matrix (which contains the new joint positions)
+ //in the same order as they were stored in the joint buffer. The joints associated
+ //with the skeleton are not stored in the same order as they are in the exported joint buffer.
+ //This remaps the skeletal joints to be in the same order as the joints stored in the model.
+ std::vector<std::string> :: const_iterator jointIt = model->mSkinInfo.mJointNames.begin();
+ const int jointCnt = model->mSkinInfo.mJointNames.size();
+ for ( int i=0; i<jointCnt; ++i, ++jointIt )
+ {
+ std::string lookingForJoint = (*jointIt).c_str();
+ //Look for the joint xform that we extracted from the skeleton, using the jointIt as the key
+ //and store it in the alternate bind matrix
+ if ( mJointList.find( lookingForJoint ) != mJointList.end() )
+ {
+ LLMatrix4 jointTransform = mJointList[lookingForJoint];
+ LLMatrix4 newInverse = model->mSkinInfo.mInvBindMatrix[i];
+ newInverse.setTranslation( mJointList[lookingForJoint].getTranslation() );
+ model->mSkinInfo.mAlternateBindMatrix.push_back( newInverse );
+ }
+ else
+ {
+ llwarns<<"Possibly misnamed/missing joint [" <<lookingForJoint.c_str()<<" ] "<<llendl;
+ }
+ }
+
+ //grab raw position array
+
+ domVertices* verts = mesh->getVertices();
+ if (verts)
+ {
+ domInputLocal_Array& inputs = verts->getInput_array();
+ for (size_t i = 0; i < inputs.getCount() && model->mPosition.empty(); ++i)
+ {
+ if (strcmp(inputs[i]->getSemantic(), COMMON_PROFILE_INPUT_POSITION) == 0)
+ {
+ domSource* pos_source = daeSafeCast<domSource>(inputs[i]->getSource().getElement());
+ if (pos_source)
+ {
+ domFloat_array* pos_array = pos_source->getFloat_array();
+ if (pos_array)
+ {
+ domListOfFloats& pos = pos_array->getValue();
+
+ for (size_t j = 0; j < pos.getCount(); j += 3)
+ {
+ if (pos.getCount() <= j+2)
+ {
+ llerrs << "Invalid position array size." << llendl;
+ }
+
+ LLVector3 v(pos[j], pos[j+1], pos[j+2]);
+
+ //transform from COLLADA space to volume space
+ v = v * inverse_normalized_transformation;
+
+ model->mPosition.push_back(v);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ //grab skin weights array
+ domSkin::domVertex_weights* weights = skin->getVertex_weights();
+ if (weights)
+ {
+ domInputLocalOffset_Array& inputs = weights->getInput_array();
+ domFloat_array* vertex_weights = NULL;
+ for (size_t i = 0; i < inputs.getCount(); ++i)
+ {
+ if (strcmp(inputs[i]->getSemantic(), COMMON_PROFILE_INPUT_WEIGHT) == 0)
+ {
+ domSource* weight_source = daeSafeCast<domSource>(inputs[i]->getSource().getElement());
+ if (weight_source)
+ {
+ vertex_weights = weight_source->getFloat_array();
+ }
+ }
+ }
+
+ if (vertex_weights)
+ {
+ domListOfFloats& w = vertex_weights->getValue();
+ domListOfUInts& vcount = weights->getVcount()->getValue();
+ domListOfInts& v = weights->getV()->getValue();
+
+ U32 c_idx = 0;
+ for (size_t vc_idx = 0; vc_idx < vcount.getCount(); ++vc_idx)
+ { //for each vertex
+ daeUInt count = vcount[vc_idx];
+
+ //create list of weights that influence this vertex
+ LLModel::weight_list weight_list;
+
+ for (daeUInt i = 0; i < count; ++i)
+ { //for each weight
+ daeInt joint_idx = v[c_idx++];
+ daeInt weight_idx = v[c_idx++];
+
+ if (joint_idx == -1)
+ {
+ //ignore bindings to bind_shape_matrix
+ continue;
+ }
+
+ F32 weight_value = w[weight_idx];
+
+ weight_list.push_back(LLModel::JointWeight(joint_idx, weight_value));
+ }
+
+ //sort by joint weight
+ std::sort(weight_list.begin(), weight_list.end(), LLModel::CompareWeightGreater());
+
+ std::vector<LLModel::JointWeight> wght;
+
+ F32 total = 0.f;
+
+ for (U32 i = 0; i < llmin((U32) 4, (U32) weight_list.size()); ++i)
+ { //take up to 4 most significant weights
+ if (weight_list[i].mWeight > 0.f)
+ {
+ wght.push_back( weight_list[i] );
+ total += weight_list[i].mWeight;
+ }
+ }
+
+ F32 scale = 1.f/total;
+ if (scale != 1.f)
+ { //normalize weights
+ for (U32 i = 0; i < wght.size(); ++i)
+ {
+ wght[i].mWeight *= scale;
+ }
+ }
+
+ model->mSkinWeights[model->mPosition[vc_idx]] = wght;
+ }
+ }
+
+ }
+
+ //add instance to scene for this model
+
+ LLMatrix4 transformation;
+ transformation.initScale(mesh_scale_vector);
+ transformation.setTranslation(mesh_translation_vector);
+ transformation *= mTransform;
+
+ std::map<std::string, LLImportMaterial> materials;
+ for (U32 i = 0; i < model->mMaterialList.size(); ++i)
+ {
+ materials[model->mMaterialList[i]] = LLImportMaterial();
+ }
+ mScene[transformation].push_back(LLModelInstance(model, model->mLabel, transformation, materials));
+ stretch_extents(model, transformation, mExtents[0], mExtents[1], mFirstTransform);
+ }
+}
+
+//-----------------------------------------------------------------------------
+// buildJointToNodeMappingFromScene()
+//-----------------------------------------------------------------------------
+void LLDAELoader::buildJointToNodeMappingFromScene( daeElement* pRoot )
+{
+ daeElement* pScene = pRoot->getDescendant("visual_scene");
+ if ( pScene )
+ {
+ daeTArray< daeSmartRef<daeElement> > children = pScene->getChildren();
+ S32 childCount = children.getCount();
+ for (S32 i = 0; i < childCount; ++i)
+ {
+ domNode* pNode = daeSafeCast<domNode>(children[i]);
+ processJointToNodeMapping( pNode );
+ }
+ }
+}
+//-----------------------------------------------------------------------------
+// processJointToNodeMapping()
+//-----------------------------------------------------------------------------
+void LLDAELoader::processJointToNodeMapping( domNode* pNode )
+{
+ if ( isNodeAJoint( pNode ) )
+ {
+ //1.Store the parent
+ std::string nodeName = pNode->getName();
+ if ( !nodeName.empty() )
+ {
+ mJointsFromNode.push_front( pNode->getName() );
+ }
+ //2. Handle the kiddo's
+ processChildJoints( pNode );
+ }
+ else
+ {
+ //Determine if the're any children wrt to this failed node.
+ //This occurs when an armature is exported and ends up being what essentially amounts to
+ //as the root for the visual_scene
+ if ( pNode )
+ {
+ processChildJoints( pNode );
+ }
+ else
+ {
+ llinfos<<"Node is NULL"<<llendl;
+ }
+
+ }
+}
+//-----------------------------------------------------------------------------
+// processChildJoint()
+//-----------------------------------------------------------------------------
+void LLDAELoader::processChildJoints( domNode* pParentNode )
+{
+ daeTArray< daeSmartRef<daeElement> > childOfChild = pParentNode->getChildren();
+ S32 childOfChildCount = childOfChild.getCount();
+ for (S32 i = 0; i < childOfChildCount; ++i)
+ {
+ domNode* pChildNode = daeSafeCast<domNode>( childOfChild[i] );
+ if ( pChildNode )
+ {
+ processJointToNodeMapping( pChildNode );
+ }
+ }
+}
+
+//-----------------------------------------------------------------------------
+// isNodeAJoint()
+//-----------------------------------------------------------------------------
+bool LLDAELoader::isNodeAJoint( domNode* pNode )
+{
+ if ( !pNode )
+ {
+ llinfos<<"Created node is NULL"<<llendl;
+ return false;
+ }
+
+ return LLModelLoader::isNodeAJoint(pNode->getName());
+}
+//-----------------------------------------------------------------------------
+// verifyCount
+//-----------------------------------------------------------------------------
+bool LLDAELoader::verifyCount( int expected, int result )
+{
+ if ( expected != result )
+ {
+ llinfos<< "Error: (expected/got)"<<expected<<"/"<<result<<"verts"<<llendl;
+ return false;
+ }
+ return true;
+}
+//-----------------------------------------------------------------------------
+// verifyController
+//-----------------------------------------------------------------------------
+bool LLDAELoader::verifyController( domController* pController )
+{
+
+ bool result = true;
+
+ domSkin* pSkin = pController->getSkin();
+
+ if ( pSkin )
+ {
+ xsAnyURI & uri = pSkin->getSource();
+ domElement* pElement = uri.getElement();
+
+ if ( !pElement )
+ {
+ llinfos<<"Can't resolve skin source"<<llendl;
+ return false;
+ }
+
+ daeString type_str = pElement->getTypeName();
+ if ( stricmp(type_str, "geometry") == 0 )
+ {
+ //Skin is reference directly by geometry and get the vertex count from skin
+ domSkin::domVertex_weights* pVertexWeights = pSkin->getVertex_weights();
+ U32 vertexWeightsCount = pVertexWeights->getCount();
+ domGeometry* pGeometry = (domGeometry*) (domElement*) uri.getElement();
+ domMesh* pMesh = pGeometry->getMesh();
+
+ if ( pMesh )
+ {
+ //Get vertex count from geometry
+ domVertices* pVertices = pMesh->getVertices();
+ if ( !pVertices )
+ {
+ llinfos<<"No vertices!"<<llendl;
+ return false;
+ }
+
+ if ( pVertices )
+ {
+ xsAnyURI src = pVertices->getInput_array()[0]->getSource();
+ domSource* pSource = (domSource*) (domElement*) src.getElement();
+ U32 verticesCount = pSource->getTechnique_common()->getAccessor()->getCount();
+ result = verifyCount( verticesCount, vertexWeightsCount );
+ if ( !result )
+ {
+ return result;
+ }
+ }
+ }
+
+ U32 vcountCount = (U32) pVertexWeights->getVcount()->getValue().getCount();
+ result = verifyCount( vcountCount, vertexWeightsCount );
+ if ( !result )
+ {
+ return result;
+ }
+
+ domInputLocalOffset_Array& inputs = pVertexWeights->getInput_array();
+ U32 sum = 0;
+ for (size_t i=0; i<vcountCount; i++)
+ {
+ sum += pVertexWeights->getVcount()->getValue()[i];
+ }
+ result = verifyCount( sum * inputs.getCount(), (domInt) pVertexWeights->getV()->getValue().getCount() );
+ }
+ }
+
+ return result;
+}
+
+//-----------------------------------------------------------------------------
+// extractTranslation()
+//-----------------------------------------------------------------------------
+void LLDAELoader::extractTranslation( domTranslate* pTranslate, LLMatrix4& transform )
+{
+ domFloat3 jointTrans = pTranslate->getValue();
+ LLVector3 singleJointTranslation( jointTrans[0], jointTrans[1], jointTrans[2] );
+ transform.setTranslation( singleJointTranslation );
+}
+//-----------------------------------------------------------------------------
+// extractTranslationViaElement()
+//-----------------------------------------------------------------------------
+void LLDAELoader::extractTranslationViaElement( daeElement* pTranslateElement, LLMatrix4& transform )
+{
+ if ( pTranslateElement )
+ {
+ domTranslate* pTranslateChild = dynamic_cast<domTranslate*>( pTranslateElement );
+ domFloat3 translateChild = pTranslateChild->getValue();
+ LLVector3 singleJointTranslation( translateChild[0], translateChild[1], translateChild[2] );
+ transform.setTranslation( singleJointTranslation );
+ }
+}
+//-----------------------------------------------------------------------------
+// extractTranslationViaSID()
+//-----------------------------------------------------------------------------
+void LLDAELoader::extractTranslationViaSID( daeElement* pElement, LLMatrix4& transform )
+{
+ if ( pElement )
+ {
+ daeSIDResolver resolver( pElement, "./transform" );
+ domMatrix* pMatrix = daeSafeCast<domMatrix>( resolver.getElement() );
+ //We are only extracting out the translational component atm
+ LLMatrix4 workingTransform;
+ if ( pMatrix )
+ {
+ domFloat4x4 domArray = pMatrix->getValue();
+ for ( int i = 0; i < 4; i++ )
+ {
+ for( int j = 0; j < 4; j++ )
+ {
+ workingTransform.mMatrix[i][j] = domArray[i + j*4];
+ }
+ }
+ LLVector3 trans = workingTransform.getTranslation();
+ transform.setTranslation( trans );
+ }
+ }
+ else
+ {
+ llwarns<<"Element is nonexistent - empty/unsupported node."<<llendl;
+ }
+}
+//-----------------------------------------------------------------------------
+// processJointNode()
+//-----------------------------------------------------------------------------
+void LLDAELoader::processJointNode( domNode* pNode, JointTransformMap& jointTransforms )
+{
+ if (pNode->getName() == NULL)
+ {
+ llwarns << "nameless node, can't process" << llendl;
+ return;
+ }
+
+ //llwarns<<"ProcessJointNode# Node:" <<pNode->getName()<<llendl;
+
+ //1. handle the incoming node - extract out translation via SID or element
+
+ LLMatrix4 workingTransform;
+
+ //Pull out the translate id and store it in the jointTranslations map
+ daeSIDResolver jointResolverA( pNode, "./translate" );
+ domTranslate* pTranslateA = daeSafeCast<domTranslate>( jointResolverA.getElement() );
+ daeSIDResolver jointResolverB( pNode, "./location" );
+ domTranslate* pTranslateB = daeSafeCast<domTranslate>( jointResolverB.getElement() );
+
+ //Translation via SID was successful
+ if ( pTranslateA )
+ {
+ extractTranslation( pTranslateA, workingTransform );
+ }
+ else
+ if ( pTranslateB )
+ {
+ extractTranslation( pTranslateB, workingTransform );
+ }
+ else
+ {
+ //Translation via child from element
+ daeElement* pTranslateElement = getChildFromElement( pNode, "translate" );
+ if ( !pTranslateElement || pTranslateElement->typeID() != domTranslate::ID() )
+ {
+ //llwarns<< "The found element is not a translate node" <<llendl;
+ daeSIDResolver jointResolver( pNode, "./matrix" );
+ domMatrix* pMatrix = daeSafeCast<domMatrix>( jointResolver.getElement() );
+ if ( pMatrix )
+ {
+ //llinfos<<"A matrix SID was however found!"<<llendl;
+ domFloat4x4 domArray = pMatrix->getValue();
+ for ( int i = 0; i < 4; i++ )
+ {
+ for( int j = 0; j < 4; j++ )
+ {
+ workingTransform.mMatrix[i][j] = domArray[i + j*4];
+ }
+ }
+ }
+ else
+ {
+ llwarns<< "The found element is not translate or matrix node - most likely a corrupt export!" <<llendl;
+ }
+ }
+ else
+ {
+ extractTranslationViaElement( pTranslateElement, workingTransform );
+ }
+ }
+
+ //Store the working transform relative to the nodes name.
+ jointTransforms[ pNode->getName() ] = workingTransform;
+
+ //2. handle the nodes children
+
+ //Gather and handle the incoming nodes children
+ daeTArray< daeSmartRef<daeElement> > childOfChild = pNode->getChildren();
+ S32 childOfChildCount = childOfChild.getCount();
+
+ for (S32 i = 0; i < childOfChildCount; ++i)
+ {
+ domNode* pChildNode = daeSafeCast<domNode>( childOfChild[i] );
+ if ( pChildNode )
+ {
+ processJointNode( pChildNode, jointTransforms );
+ }
+ }
+}
+//-----------------------------------------------------------------------------
+// getChildFromElement()
+//-----------------------------------------------------------------------------
+daeElement* LLDAELoader::getChildFromElement( daeElement* pElement, std::string const & name )
+{
+ daeElement* pChildOfElement = pElement->getChild( name.c_str() );
+ if ( pChildOfElement )
+ {
+ return pChildOfElement;
+ }
+ llwarns<< "Could not find a child [" << name << "] for the element: \"" << pElement->getAttribute("id") << "\"" << llendl;
+ return NULL;
+}
+
+void LLDAELoader::processElement( daeElement* element, bool& badElement, DAE* dae )
+{
+ LLMatrix4 saved_transform;
+ bool pushed_mat = false;
+
+ domNode* node = daeSafeCast<domNode>(element);
+ if (node)
+ {
+ pushed_mat = true;
+ saved_transform = mTransform;
+ }
+
+ domTranslate* translate = daeSafeCast<domTranslate>(element);
+ if (translate)
+ {
+ domFloat3 dom_value = translate->getValue();
+
+ LLMatrix4 translation;
+ translation.setTranslation(LLVector3(dom_value[0], dom_value[1], dom_value[2]));
+
+ translation *= mTransform;
+ mTransform = translation;
+ mTransform.condition();
+ }
+
+ domRotate* rotate = daeSafeCast<domRotate>(element);
+ if (rotate)
+ {
+ domFloat4 dom_value = rotate->getValue();
+
+ LLMatrix4 rotation;
+ rotation.initRotTrans(dom_value[3] * DEG_TO_RAD, LLVector3(dom_value[0], dom_value[1], dom_value[2]), LLVector3(0, 0, 0));
+
+ rotation *= mTransform;
+ mTransform = rotation;
+ mTransform.condition();
+ }
+
+ domScale* scale = daeSafeCast<domScale>(element);
+ if (scale)
+ {
+ domFloat3 dom_value = scale->getValue();
+
+
+ LLVector3 scale_vector = LLVector3(dom_value[0], dom_value[1], dom_value[2]);
+ scale_vector.abs(); // Set all values positive, since we don't currently support mirrored meshes
+ LLMatrix4 scaling;
+ scaling.initScale(scale_vector);
+
+ scaling *= mTransform;
+ mTransform = scaling;
+ mTransform.condition();
+ }
+
+ domMatrix* matrix = daeSafeCast<domMatrix>(element);
+ if (matrix)
+ {
+ domFloat4x4 dom_value = matrix->getValue();
+
+ LLMatrix4 matrix_transform;
+
+ for (int i = 0; i < 4; i++)
+ {
+ for(int j = 0; j < 4; j++)
+ {
+ matrix_transform.mMatrix[i][j] = dom_value[i + j*4];
+ }
+ }
+
+ mTransform *= matrix_transform;
+ mTransform.condition();
+ }
+
+ domInstance_geometry* instance_geo = daeSafeCast<domInstance_geometry>(element);
+ if (instance_geo)
+ {
+ domGeometry* geo = daeSafeCast<domGeometry>(instance_geo->getUrl().getElement());
+ if (geo)
+ {
+ domMesh* mesh = daeSafeCast<domMesh>(geo->getDescendant(daeElement::matchType(domMesh::ID())));
+ if (mesh)
+ {
+
+ std::vector< LLPointer< LLModel > >::iterator i = mModelsMap[mesh].begin();
+ while (i != mModelsMap[mesh].end())
+ {
+ LLModel* model = *i;
+
+ LLMatrix4 transformation = mTransform;
+
+ if (mTransform.determinant() < 0)
+ { //negative scales are not supported
+ llinfos << "Negative scale detected, unsupported transform. domInstance_geometry: " << getElementLabel(instance_geo) << llendl;
+ badElement = true;
+ }
+
+ LLModelLoader::material_map materials = getMaterials(model, instance_geo, dae);
+
+ // adjust the transformation to compensate for mesh normalization
+ LLVector3 mesh_scale_vector;
+ LLVector3 mesh_translation_vector;
+ model->getNormalizedScaleTranslation(mesh_scale_vector, mesh_translation_vector);
+
+ LLMatrix4 mesh_translation;
+ mesh_translation.setTranslation(mesh_translation_vector);
+ mesh_translation *= transformation;
+ transformation = mesh_translation;
+
+ LLMatrix4 mesh_scale;
+ mesh_scale.initScale(mesh_scale_vector);
+ mesh_scale *= transformation;
+ transformation = mesh_scale;
+
+ if (transformation.determinant() < 0)
+ { //negative scales are not supported
+ llinfos << "Negative scale detected, unsupported post-normalization transform. domInstance_geometry: " << getElementLabel(instance_geo) << llendl;
+ badElement = true;
+ }
+
+ std::string label = getElementLabel(instance_geo);
+
+ llassert(!label.empty());
+
+ if (model->mSubmodelID)
+ {
+ // CHECK FOR _LODX and _PHYS here to ensure we bolt the submodel 'salt' at the right loc
+ //
+ if ((label.find("_LOD") != -1) || (label.find("_PHYS") != -1))
+ {
+ std::string labelStart;
+ std::string markup;
+ size_t underscore_offset = label.rfind('_');
+ if (underscore_offset != -1)
+ {
+ markup = label.substr(underscore_offset + 1, 4);
+ label.erase(label.begin() + underscore_offset, label.end());
+ label +=(char)((int)'a' + model->mSubmodelID);
+ label += "_";
+ label += markup;
+ }
+ else
+ {
+ label +=(char)((int)'a' + model->mSubmodelID);
+ }
+ }
+ else
+ {
+ label += (char)((int)'a' + model->mSubmodelID);
+ }
+ }
+
+ model->mLabel = label;
+
+ mScene[transformation].push_back(LLModelInstance(model, label, transformation, materials));
+ stretch_extents(model, transformation, mExtents[0], mExtents[1], mFirstTransform);
+ i++;
+ }
+ }
+ }
+ else
+ {
+ llinfos<<"Unable to resolve geometry URL."<<llendl;
+ badElement = true;
+ }
+
+ }
+
+ domInstance_node* instance_node = daeSafeCast<domInstance_node>(element);
+ if (instance_node)
+ {
+ daeElement* instance = instance_node->getUrl().getElement();
+ if (instance)
+ {
+ processElement(instance,badElement, dae);
+ }
+ }
+
+ //process children
+ daeTArray< daeSmartRef<daeElement> > children = element->getChildren();
+ int childCount = children.getCount();
+ for (S32 i = 0; i < childCount; i++)
+ {
+ processElement(children[i],badElement, dae);
+ }
+
+ if (pushed_mat)
+ { //this element was a node, restore transform before processiing siblings
+ mTransform = saved_transform;
+ }
+}
+
+std::map<std::string, LLImportMaterial> LLDAELoader::getMaterials(LLModel* model, domInstance_geometry* instance_geo, DAE* dae)
+{
+ std::map<std::string, LLImportMaterial> materials;
+ for (int i = 0; i < model->mMaterialList.size(); i++)
+ {
+ LLImportMaterial import_material;
+
+ domInstance_material* instance_mat = NULL;
+
+ domBind_material::domTechnique_common* technique =
+ daeSafeCast<domBind_material::domTechnique_common>(instance_geo->getDescendant(daeElement::matchType(domBind_material::domTechnique_common::ID())));
+
+ if (technique)
+ {
+ daeTArray< daeSmartRef<domInstance_material> > inst_materials = technique->getChildrenByType<domInstance_material>();
+ for (int j = 0; j < inst_materials.getCount(); j++)
+ {
+ std::string symbol(inst_materials[j]->getSymbol());
+
+ if (symbol == model->mMaterialList[i]) // found the binding
+ {
+ instance_mat = inst_materials[j];
+ break;
+ }
+ }
+ }
+
+ if (instance_mat)
+ {
+ domMaterial* material = daeSafeCast<domMaterial>(instance_mat->getTarget().getElement());
+ if (material)
+ {
+ domInstance_effect* instance_effect =
+ daeSafeCast<domInstance_effect>(material->getDescendant(daeElement::matchType(domInstance_effect::ID())));
+ if (instance_effect)
+ {
+ domEffect* effect = daeSafeCast<domEffect>(instance_effect->getUrl().getElement());
+ if (effect)
+ {
+ domProfile_COMMON* profile =
+ daeSafeCast<domProfile_COMMON>(effect->getDescendant(daeElement::matchType(domProfile_COMMON::ID())));
+ if (profile)
+ {
+ import_material = profileToMaterial(profile, dae);
+ }
+ }
+ }
+ }
+ }
+
+ import_material.mBinding = model->mMaterialList[i];
+ materials[model->mMaterialList[i]] = import_material;
+ }
+
+ return materials;
+}
+
+LLImportMaterial LLDAELoader::profileToMaterial(domProfile_COMMON* material, DAE* dae)
+{
+ LLImportMaterial mat;
+ mat.mFullbright = FALSE;
+
+ daeElement* diffuse = material->getDescendant("diffuse");
+ if (diffuse)
+ {
+ domCommon_color_or_texture_type_complexType::domTexture* texture =
+ daeSafeCast<domCommon_color_or_texture_type_complexType::domTexture>(diffuse->getDescendant("texture"));
+ if (texture)
+ {
+ domCommon_newparam_type_Array newparams = material->getNewparam_array();
+ if (newparams.getCount())
+ {
+
+ for (S32 i = 0; i < newparams.getCount(); i++)
+ {
+ domFx_surface_common* surface = newparams[i]->getSurface();
+ if (surface)
+ {
+ domFx_surface_init_common* init = surface->getFx_surface_init_common();
+ if (init)
+ {
+ domFx_surface_init_from_common_Array init_from = init->getInit_from_array();
+
+ if (init_from.getCount() > i)
+ {
+ domImage* image = daeSafeCast<domImage>(init_from[i]->getValue().getElement());
+ if (image)
+ {
+ // we only support init_from now - embedded data will come later
+ domImage::domInit_from* init = image->getInit_from();
+ if (init)
+ {
+ mat.mDiffuseMapFilename = cdom::uriToNativePath(init->getValue().str());
+ mat.mDiffuseMapLabel = getElementLabel(material);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ else if (texture->getTexture())
+ {
+ domImage* image = NULL;
+ dae->getDatabase()->getElement((daeElement**) &image, 0, texture->getTexture(), COLLADA_TYPE_IMAGE);
+ if (image)
+ {
+ // we only support init_from now - embedded data will come later
+ domImage::domInit_from* init = image->getInit_from();
+ if (init)
+ {
+ std::string image_path_value = cdom::uriToNativePath(init->getValue().str());
+
+#if LL_WINDOWS
+ // Work-around DOM tendency to resort to UNC names which are only confusing for downstream...
+ //
+ std::string::iterator i = image_path_value.begin();
+ while (*i == '\\')
+ i++;
+ mat.mDiffuseMapFilename.assign(i, image_path_value.end());
+#else
+ mat.mDiffuseMapFilename = image_path_value;
+#endif
+ mat.mDiffuseMapLabel = getElementLabel(material);
+ }
+ }
+ }
+ }
+
+ domCommon_color_or_texture_type_complexType::domColor* color =
+ daeSafeCast<domCommon_color_or_texture_type_complexType::domColor>(diffuse->getDescendant("color"));
+ if (color)
+ {
+ domFx_color_common domfx_color = color->getValue();
+ LLColor4 value = LLColor4(domfx_color[0], domfx_color[1], domfx_color[2], domfx_color[3]);
+ mat.mDiffuseColor = value;
+ }
+ }
+
+ daeElement* emission = material->getDescendant("emission");
+ if (emission)
+ {
+ LLColor4 emission_color = getDaeColor(emission);
+ if (((emission_color[0] + emission_color[1] + emission_color[2]) / 3.0) > 0.25)
+ {
+ mat.mFullbright = TRUE;
+ }
+ }
+
+ return mat;
+}
+
+// try to get a decent label for this element
+std::string LLDAELoader::getElementLabel(daeElement *element)
+{
+ // if we have a name attribute, use it
+ std::string name = element->getAttribute("name");
+ if (name.length())
+ {
+ return name;
+ }
+
+ // if we have an ID attribute, use it
+ if (element->getID())
+ {
+ return std::string(element->getID());
+ }
+
+ // if we have a parent, use it
+ daeElement* parent = element->getParent();
+ if (parent)
+ {
+ // if parent has a name, use it
+ std::string name = parent->getAttribute("name");
+ if (name.length())
+ {
+ return name;
+ }
+
+ // if parent has an ID, use it
+ if (parent->getID())
+ {
+ return std::string(parent->getID());
+ }
+ }
+
+ // try to use our type
+ daeString element_name = element->getElementName();
+ if (element_name)
+ {
+ return std::string(element_name);
+ }
+
+ // if all else fails, use "object"
+ return std::string("object");
+}
+
+LLColor4 LLDAELoader::getDaeColor(daeElement* element)
+{
+ LLColor4 value;
+ domCommon_color_or_texture_type_complexType::domColor* color =
+ daeSafeCast<domCommon_color_or_texture_type_complexType::domColor>(element->getDescendant("color"));
+ if (color)
+ {
+ domFx_color_common domfx_color = color->getValue();
+ value = LLColor4(domfx_color[0], domfx_color[1], domfx_color[2], domfx_color[3]);
+ }
+
+ return value;
+}
+
+bool LLDAELoader::addVolumeFacesFromDomMesh(LLModel* pModel,domMesh* mesh)
+{
+ LLModel::EModelStatus status = LLModel::NO_ERRORS;
+ domTriangles_Array& tris = mesh->getTriangles_array();
+
+ for (U32 i = 0; i < tris.getCount(); ++i)
+ {
+ domTrianglesRef& tri = tris.get(i);
+
+ status = load_face_from_dom_triangles(pModel->getVolumeFaces(), pModel->getMaterialList(), tri);
+
+ if(status != LLModel::NO_ERRORS)
+ {
+ pModel->ClearFacesAndMaterials();
+ return false;
+ }
+ }
+
+ domPolylist_Array& polys = mesh->getPolylist_array();
+ for (U32 i = 0; i < polys.getCount(); ++i)
+ {
+ domPolylistRef& poly = polys.get(i);
+ status = load_face_from_dom_polylist(pModel->getVolumeFaces(), pModel->getMaterialList(), poly);
+
+ if(status != LLModel::NO_ERRORS)
+ {
+ pModel->ClearFacesAndMaterials();
+ return false;
+ }
+ }
+
+ domPolygons_Array& polygons = mesh->getPolygons_array();
+
+ for (U32 i = 0; i < polygons.getCount(); ++i)
+ {
+ domPolygonsRef& poly = polygons.get(i);
+ status = load_face_from_dom_polygons(pModel->getVolumeFaces(), pModel->getMaterialList(), poly);
+
+ if(status != LLModel::NO_ERRORS)
+ {
+ pModel->ClearFacesAndMaterials();
+ return false;
+ }
+ }
+
+ return (status == LLModel::NO_ERRORS);
+}
+
+//static
+LLModel* LLDAELoader::loadModelFromDomMesh(domMesh *mesh)
+{
+ LLVolumeParams volume_params;
+ volume_params.setType(LL_PCODE_PROFILE_SQUARE, LL_PCODE_PATH_LINE);
+ LLModel* ret = new LLModel(volume_params, 0.f);
+ createVolumeFacesFromDomMesh(ret, mesh);
+ if (ret->mLabel.empty())
+ {
+ ret->mLabel = getElementLabel(mesh);
+ }
+ return ret;
+}
+
+//static diff version supports creating multiple models when material counts spill
+// over the 8 face server-side limit
+//
+bool LLDAELoader::loadModelsFromDomMesh(domMesh* mesh, std::vector<LLModel*>& models_out)
+{
+
+ LLVolumeParams volume_params;
+ volume_params.setType(LL_PCODE_PROFILE_SQUARE, LL_PCODE_PATH_LINE);
+
+ models_out.clear();
+
+ LLModel* ret = new LLModel(volume_params, 0.f);
+
+ ret->mLabel = getElementLabel(mesh);
+
+ llassert(!ret->mLabel.empty());
+
+ // Like a monkey, ready to be shot into space
+ //
+ ret->ClearFacesAndMaterials();
+
+ // Get the whole set of volume faces
+ //
+ addVolumeFacesFromDomMesh(ret, mesh);
+
+ U32 volume_faces = ret->getNumVolumeFaces();
+
+ // Side-steps all manner of issues when splitting models
+ // and matching lower LOD materials to base models
+ //
+ ret->sortVolumeFacesByMaterialName();
+
+ bool normalized = false;
+
+ int submodelID = 0;
+ LLVolume::face_list_t remainder;
+ do
+ {
+ // Insure we do this once with the whole gang and not per-model
+ //
+ if (!normalized && !mNoNormalize)
+ {
+ normalized = true;
+ ret->normalizeVolumeFaces();
+ }
+
+ ret->trimVolumeFacesToSize(LL_SCULPT_MESH_MAX_FACES, &remainder);
+
+ if (!mNoOptimize)
+ {
+ ret->optimizeVolumeFaces();
+ }
+
+ volume_faces = remainder.size();
+
+ models_out.push_back(ret);
+
+ // If we have left-over volume faces, create another model
+ // to absorb them...
+ //
+ if (volume_faces)
+ {
+ LLModel* next = new LLModel(volume_params, 0.f);
+ next->mSubmodelID = ++submodelID;
+ next->getVolumeFaces() = remainder;
+ next->mNormalizedScale = ret->mNormalizedScale;
+ next->mNormalizedTranslation = ret->mNormalizedTranslation;
+ next->mMaterialList.assign(ret->mMaterialList.begin() + LL_SCULPT_MESH_MAX_FACES, ret->mMaterialList.end());
+ ret = next;
+ }
+
+ remainder.clear();
+
+ } while (volume_faces);
+
+ return true;
+}
+
+bool LLDAELoader::createVolumeFacesFromDomMesh(LLModel* pModel, domMesh* mesh)
+{
+ if (mesh)
+ {
+ pModel->ClearFacesAndMaterials();
+
+ addVolumeFacesFromDomMesh(pModel, mesh);
+
+ if (pModel->getNumVolumeFaces() > 0)
+ {
+ pModel->normalizeVolumeFaces();
+ pModel->optimizeVolumeFaces();
+
+ if (pModel->getNumVolumeFaces() > 0)
+ {
+ return true;
+ }
+ }
+ }
+ else
+ {
+ llwarns << "no mesh found" << llendl;
+ }
+
+ return false;
+}