/** * @file lldaeloader.cpp * @brief LLDAELoader class implementation * * $LicenseInfo:firstyear=2013&license=viewerlgpl$ * Second Life Viewer Source Code * Copyright (C) 2013, Linden Research, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; * version 2.1 of the License only. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * * Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA * $/LicenseInfo$ */ #if LL_MSVC #pragma warning (disable : 4263) #pragma warning (disable : 4264) #endif #include "dae.h" #include "dom/domAsset.h" #include "dom/domBind_material.h" #include "dom/domCOLLADA.h" #include "dom/domConstants.h" #include "dom/domController.h" #include "dom/domEffect.h" #include "dom/domGeometry.h" #include "dom/domInstance_geometry.h" #include "dom/domInstance_material.h" #include "dom/domInstance_node.h" #include "dom/domInstance_effect.h" #include "dom/domMaterial.h" #include "dom/domMatrix.h" #include "dom/domNode.h" #include "dom/domProfile_COMMON.h" #include "dom/domRotate.h" #include "dom/domScale.h" #include "dom/domTranslate.h" #include "dom/domVisual_scene.h" #if LL_MSVC #pragma warning (default : 4263) #pragma warning (default : 4264) #endif #include "lldaeloader.h" #include "llsdserialize.h" #include "lljoint.h" #include "glh/glh_linear.h" #include "llmatrix4a.h" #include #include std::string colladaVersion[VERSIONTYPE_COUNT+1] = { "1.4.0", "1.4.1", "Unsupported" }; static const std::string lod_suffix[LLModel::NUM_LODS] = { "_LOD0", "_LOD1", "_LOD2", "", "_PHYS", }; const U32 LIMIT_MATERIALS_OUTPUT = 12; bool get_dom_sources(const domInputLocalOffset_Array& inputs, S32& pos_offset, S32& tc_offset, S32& norm_offset, S32 &idx_stride, domSource* &pos_source, domSource* &tc_source, domSource* &norm_source) { idx_stride = 0; for (U32 j = 0; j < inputs.getCount(); ++j) { idx_stride = llmax((S32) inputs[j]->getOffset(), idx_stride); if (strcmp(COMMON_PROFILE_INPUT_VERTEX, inputs[j]->getSemantic()) == 0) { //found vertex array const domURIFragmentType& uri = inputs[j]->getSource(); daeElementRef elem = uri.getElement(); domVertices* vertices = (domVertices*) elem.cast(); if ( !vertices ) { return false; } domInputLocal_Array& v_inp = vertices->getInput_array(); for (U32 k = 0; k < v_inp.getCount(); ++k) { if (strcmp(COMMON_PROFILE_INPUT_POSITION, v_inp[k]->getSemantic()) == 0) { pos_offset = inputs[j]->getOffset(); const domURIFragmentType& uri = v_inp[k]->getSource(); daeElementRef elem = uri.getElement(); pos_source = (domSource*) elem.cast(); } if (strcmp(COMMON_PROFILE_INPUT_NORMAL, v_inp[k]->getSemantic()) == 0) { norm_offset = inputs[j]->getOffset(); const domURIFragmentType& uri = v_inp[k]->getSource(); daeElementRef elem = uri.getElement(); norm_source = (domSource*) elem.cast(); } } } if (strcmp(COMMON_PROFILE_INPUT_NORMAL, inputs[j]->getSemantic()) == 0) { //found normal array for this triangle list norm_offset = inputs[j]->getOffset(); const domURIFragmentType& uri = inputs[j]->getSource(); daeElementRef elem = uri.getElement(); norm_source = (domSource*) elem.cast(); } else if (strcmp(COMMON_PROFILE_INPUT_TEXCOORD, inputs[j]->getSemantic()) == 0) { //found texCoords tc_offset = inputs[j]->getOffset(); const domURIFragmentType& uri = inputs[j]->getSource(); daeElementRef elem = uri.getElement(); tc_source = (domSource*) elem.cast(); } } idx_stride += 1; return true; } LLModel::EModelStatus load_face_from_dom_triangles( std::vector& face_list, std::vector& materials, domTrianglesRef& tri, LLSD& log_msg) { LLVolumeFace face; std::vector verts; std::vector indices; const domInputLocalOffset_Array& inputs = tri->getInput_array(); S32 pos_offset = -1; S32 tc_offset = -1; S32 norm_offset = -1; domSource* pos_source = NULL; domSource* tc_source = NULL; domSource* norm_source = NULL; S32 idx_stride = 0; if ( !get_dom_sources(inputs, pos_offset, tc_offset, norm_offset, idx_stride, pos_source, tc_source, norm_source)) { LLSD args; args["Message"] = "ParsingErrorBadElement"; log_msg.append(args); return LLModel::BAD_ELEMENT; } if (!pos_source || !pos_source->getFloat_array()) { LL_WARNS() << "Unable to process mesh without position data; invalid model; invalid model." << LL_ENDL; LLSD args; args["Message"] = "ParsingErrorPositionInvalidModel"; log_msg.append(args); return LLModel::BAD_ELEMENT; } domPRef p = tri->getP(); domListOfUInts& idx = p->getValue(); domListOfFloats dummy ; domListOfFloats& v = pos_source ? pos_source->getFloat_array()->getValue() : dummy ; domListOfFloats& tc = tc_source ? tc_source->getFloat_array()->getValue() : dummy ; domListOfFloats& n = norm_source ? norm_source->getFloat_array()->getValue() : dummy ; if (pos_source) { if(v.getCount() == 0) { return LLModel::BAD_ELEMENT; } // VFExtents change face.mExtents[0].set(v[0], v[1], v[2]); face.mExtents[1].set(v[0], v[1], v[2]); } LLVolumeFace::VertexMapData::PointMap point_map; if (idx_stride <= 0 || (pos_source && pos_offset >= idx_stride) || (tc_source && tc_offset >= idx_stride) || (norm_source && norm_offset >= idx_stride)) { // Looks like these offsets should fit inside idx_stride // Might be good idea to also check idx.getCount()%idx_stride != 0 LL_WARNS() << "Invalid pos_offset " << pos_offset << ", tc_offset " << tc_offset << " or norm_offset " << norm_offset << LL_ENDL; return LLModel::BAD_ELEMENT; } for (U32 i = 0; i < idx.getCount(); i += idx_stride) { LLVolumeFace::VertexData cv; if (pos_source) { cv.setPosition(LLVector4a(v[idx[i+pos_offset]*3+0], v[idx[i+pos_offset]*3+1], v[idx[i+pos_offset]*3+2])); } if (tc_source) { cv.mTexCoord.setVec(tc[idx[i+tc_offset]*2+0], tc[idx[i+tc_offset]*2+1]); } if (norm_source) { cv.setNormal(LLVector4a(n[idx[i+norm_offset]*3+0], n[idx[i+norm_offset]*3+1], n[idx[i+norm_offset]*3+2])); } bool found = false; LLVolumeFace::VertexMapData::PointMap::iterator point_iter; point_iter = point_map.find(LLVector3(cv.getPosition().getF32ptr())); if (point_iter != point_map.end()) { for (U32 j = 0; j < point_iter->second.size(); ++j) { // We have a matching loc // if ((point_iter->second)[j] == cv) { U16 shared_index = (point_iter->second)[j].mIndex; // Don't share verts within the same tri, degenerate // U32 indx_size = static_cast(indices.size()); U32 verts_new_tri = indx_size % 3; if ((verts_new_tri < 1 || indices[indx_size - 1] != shared_index) && (verts_new_tri < 2 || indices[indx_size - 2] != shared_index)) { found = true; indices.push_back(shared_index); } break; } } } if (!found) { // VFExtents change update_min_max(face.mExtents[0], face.mExtents[1], cv.getPosition()); verts.push_back(cv); if (verts.size() >= 65535) { //llerrs << "Attempted to write model exceeding 16-bit index buffer limitation." << LL_ENDL; return LLModel::VERTEX_NUMBER_OVERFLOW ; } U16 index = (U16) (verts.size()-1); indices.push_back(index); LLVolumeFace::VertexMapData d; d.setPosition(cv.getPosition()); d.mTexCoord = cv.mTexCoord; d.setNormal(cv.getNormal()); d.mIndex = index; if (point_iter != point_map.end()) { point_iter->second.push_back(d); } else { point_map[LLVector3(d.getPosition().getF32ptr())].push_back(d); } } if (indices.size()%3 == 0 && verts.size() >= 65532) { std::string material; if (tri->getMaterial()) { material = std::string(tri->getMaterial()); } materials.push_back(material); face_list.push_back(face); face_list.rbegin()->fillFromLegacyData(verts, indices); LLVolumeFace& new_face = *face_list.rbegin(); if (!norm_source) { //ll_aligned_free_16(new_face.mNormals); new_face.mNormals = NULL; } if (!tc_source) { //ll_aligned_free_16(new_face.mTexCoords); new_face.mTexCoords = NULL; } face = LLVolumeFace(); // VFExtents change face.mExtents[0].set(v[0], v[1], v[2]); face.mExtents[1].set(v[0], v[1], v[2]); verts.clear(); indices.clear(); point_map.clear(); } } if (!verts.empty()) { std::string material; if (tri->getMaterial()) { material = std::string(tri->getMaterial()); } materials.push_back(material); face_list.push_back(face); face_list.rbegin()->fillFromLegacyData(verts, indices); LLVolumeFace& new_face = *face_list.rbegin(); if (!norm_source) { //ll_aligned_free_16(new_face.mNormals); new_face.mNormals = NULL; } if (!tc_source) { //ll_aligned_free_16(new_face.mTexCoords); new_face.mTexCoords = NULL; } } return LLModel::NO_ERRORS ; } LLModel::EModelStatus load_face_from_dom_polylist( std::vector& face_list, std::vector& materials, domPolylistRef& poly, LLSD& log_msg) { domPRef p = poly->getP(); domListOfUInts& idx = p->getValue(); if (idx.getCount() == 0) { return LLModel::NO_ERRORS ; } const domInputLocalOffset_Array& inputs = poly->getInput_array(); domListOfUInts& vcount = poly->getVcount()->getValue(); S32 pos_offset = -1; S32 tc_offset = -1; S32 norm_offset = -1; domSource* pos_source = NULL; domSource* tc_source = NULL; domSource* norm_source = NULL; S32 idx_stride = 0; if (!get_dom_sources(inputs, pos_offset, tc_offset, norm_offset, idx_stride, pos_source, tc_source, norm_source)) { LL_WARNS() << "Bad element." << LL_ENDL; LLSD args; args["Message"] = "ParsingErrorBadElement"; log_msg.append(args); return LLModel::BAD_ELEMENT; } LLVolumeFace face; std::vector indices; std::vector verts; domListOfFloats v; domListOfFloats tc; domListOfFloats n; if (pos_source) { v = pos_source->getFloat_array()->getValue(); // VFExtents change face.mExtents[0].set(v[0], v[1], v[2]); face.mExtents[1].set(v[0], v[1], v[2]); } if (tc_source) { tc = tc_source->getFloat_array()->getValue(); } if (norm_source) { n = norm_source->getFloat_array()->getValue(); } LLVolumeFace::VertexMapData::PointMap point_map; U32 cur_idx = 0; bool log_tc_msg = true; for (U32 i = 0; i < vcount.getCount(); ++i) { //for each polygon U32 first_index = 0; U32 last_index = 0; for (U32 j = 0; j < vcount[i]; ++j) { //for each vertex LLVolumeFace::VertexData cv; if (pos_source) { cv.getPosition().set(v[idx[cur_idx+pos_offset]*3+0], v[idx[cur_idx+pos_offset]*3+1], v[idx[cur_idx+pos_offset]*3+2]); if (!cv.getPosition().isFinite3()) { LL_WARNS() << "Found NaN while loading position data from DAE-Model, invalid model." << LL_ENDL; LLSD args; args["Message"] = "PositionNaN"; log_msg.append(args); return LLModel::BAD_ELEMENT; } } if (tc_source) { U64 idx_x = idx[cur_idx + tc_offset] * 2 + 0; U64 idx_y = idx[cur_idx + tc_offset] * 2 + 1; if (idx_y < tc.getCount()) { cv.mTexCoord.setVec(tc[idx_x], tc[idx_y]); } else if (log_tc_msg) { log_tc_msg = false; LL_WARNS() << "Texture coordinates data is not complete." << LL_ENDL; LLSD args; args["Message"] = "IncompleteTC"; log_msg.append(args); } } if (norm_source) { cv.getNormal().set(n[idx[cur_idx+norm_offset]*3+0], n[idx[cur_idx+norm_offset]*3+1], n[idx[cur_idx+norm_offset]*3+2]); if (!cv.getNormal().isFinite3()) { LL_WARNS() << "Found NaN while loading normals from DAE-Model, invalid model." << LL_ENDL; LLSD args; args["Message"] = "NormalsNaN"; log_msg.append(args); return LLModel::BAD_ELEMENT; } } cur_idx += idx_stride; bool found = false; LLVolumeFace::VertexMapData::PointMap::iterator point_iter; LLVector3 pos3(cv.getPosition().getF32ptr()); point_iter = point_map.find(pos3); if (point_iter != point_map.end()) { for (U32 k = 0; k < point_iter->second.size(); ++k) { if ((point_iter->second)[k] == cv) { found = true; U32 index = (point_iter->second)[k].mIndex; if (j == 0) { first_index = index; } else if (j == 1) { last_index = index; } else { // if these are the same, we have a very, very skinny triangle (coincident verts on one or more edges) // llassert((first_index != last_index) && (last_index != index) && (first_index != index)); indices.push_back(first_index); indices.push_back(last_index); indices.push_back(index); last_index = index; } break; } } } if (!found) { // VFExtents change update_min_max(face.mExtents[0], face.mExtents[1], cv.getPosition()); verts.push_back(cv); if (verts.size() >= 65535) { //llerrs << "Attempted to write model exceeding 16-bit index buffer limitation." << LL_ENDL; return LLModel::VERTEX_NUMBER_OVERFLOW ; } U16 index = (U16) (verts.size()-1); if (j == 0) { first_index = index; } else if (j == 1) { last_index = index; } else { // detect very skinny degenerate triangles with collapsed edges // llassert((first_index != last_index) && (last_index != index) && (first_index != index)); indices.push_back(first_index); indices.push_back(last_index); indices.push_back(index); last_index = index; } LLVolumeFace::VertexMapData d; d.setPosition(cv.getPosition()); d.mTexCoord = cv.mTexCoord; d.setNormal(cv.getNormal()); d.mIndex = index; if (point_iter != point_map.end()) { point_iter->second.push_back(d); } else { point_map[pos3].push_back(d); } } if (indices.size()%3 == 0 && indices.size() >= 65532) { std::string material; if (poly->getMaterial()) { material = std::string(poly->getMaterial()); } materials.push_back(material); face_list.push_back(face); face_list.rbegin()->fillFromLegacyData(verts, indices); LLVolumeFace& new_face = *face_list.rbegin(); if (!norm_source) { //ll_aligned_free_16(new_face.mNormals); new_face.mNormals = NULL; } if (!tc_source) { //ll_aligned_free_16(new_face.mTexCoords); new_face.mTexCoords = NULL; } face = LLVolumeFace(); // VFExtents change face.mExtents[0].set(v[0], v[1], v[2]); face.mExtents[1].set(v[0], v[1], v[2]); verts.clear(); indices.clear(); point_map.clear(); } } } if (!verts.empty()) { std::string material; if (poly->getMaterial()) { material = std::string(poly->getMaterial()); } materials.push_back(material); face_list.push_back(face); face_list.rbegin()->fillFromLegacyData(verts, indices); LLVolumeFace& new_face = *face_list.rbegin(); if (!norm_source) { //ll_aligned_free_16(new_face.mNormals); new_face.mNormals = NULL; } if (!tc_source) { //ll_aligned_free_16(new_face.mTexCoords); new_face.mTexCoords = NULL; } } return LLModel::NO_ERRORS ; } LLModel::EModelStatus load_face_from_dom_polygons(std::vector& face_list, std::vector& materials, domPolygonsRef& poly) { LLVolumeFace face; std::vector indices; std::vector verts; const domInputLocalOffset_Array& inputs = poly->getInput_array(); S32 v_offset = -1; S32 n_offset = -1; S32 t_offset = -1; domListOfFloats* v = NULL; domListOfFloats* n = NULL; domListOfFloats* t = NULL; U32 stride = 0; for (U32 i = 0; i < inputs.getCount(); ++i) { stride = llmax((U32) inputs[i]->getOffset()+1, stride); if (strcmp(COMMON_PROFILE_INPUT_VERTEX, inputs[i]->getSemantic()) == 0) { //found vertex array v_offset = inputs[i]->getOffset(); const domURIFragmentType& uri = inputs[i]->getSource(); daeElementRef elem = uri.getElement(); domVertices* vertices = (domVertices*) elem.cast(); if (!vertices) { return LLModel::BAD_ELEMENT; } domInputLocal_Array& v_inp = vertices->getInput_array(); for (U32 k = 0; k < v_inp.getCount(); ++k) { if (strcmp(COMMON_PROFILE_INPUT_POSITION, v_inp[k]->getSemantic()) == 0) { const domURIFragmentType& uri = v_inp[k]->getSource(); daeElementRef elem = uri.getElement(); domSource* src = (domSource*) elem.cast(); if (!src) { return LLModel::BAD_ELEMENT; } v = &(src->getFloat_array()->getValue()); } } } else if (strcmp(COMMON_PROFILE_INPUT_NORMAL, inputs[i]->getSemantic()) == 0) { n_offset = inputs[i]->getOffset(); //found normal array for this triangle list const domURIFragmentType& uri = inputs[i]->getSource(); daeElementRef elem = uri.getElement(); domSource* src = (domSource*) elem.cast(); if (!src) { return LLModel::BAD_ELEMENT; } n = &(src->getFloat_array()->getValue()); } else if (strcmp(COMMON_PROFILE_INPUT_TEXCOORD, inputs[i]->getSemantic()) == 0 && inputs[i]->getSet() == 0) { //found texCoords t_offset = inputs[i]->getOffset(); const domURIFragmentType& uri = inputs[i]->getSource(); daeElementRef elem = uri.getElement(); domSource* src = (domSource*) elem.cast(); if (!src) { return LLModel::BAD_ELEMENT; } t = &(src->getFloat_array()->getValue()); } } domP_Array& ps = poly->getP_array(); //make a triangle list in for (U32 i = 0; i < ps.getCount(); ++i) { //for each polygon domListOfUInts& idx = ps[i]->getValue(); for (U32 j = 0; j < idx.getCount()/stride; ++j) { //for each vertex if (j > 2) { auto size = verts.size(); LLVolumeFace::VertexData v0 = verts[size-3]; LLVolumeFace::VertexData v1 = verts[size-1]; verts.push_back(v0); verts.push_back(v1); } LLVolumeFace::VertexData vert; if (v) { U32 v_idx = idx[j*stride+v_offset]*3; v_idx = llclamp(v_idx, (U32) 0, (U32) v->getCount()); vert.getPosition().set(v->get(v_idx), v->get(v_idx+1), v->get(v_idx+2)); } //bounds check n and t lookups because some FBX to DAE converters //use negative indices and empty arrays to indicate data does not exist //for a particular channel if (n && n->getCount() > 0) { U32 n_idx = idx[j*stride+n_offset]*3; n_idx = llclamp(n_idx, (U32) 0, (U32) n->getCount()); vert.getNormal().set(n->get(n_idx), n->get(n_idx+1), n->get(n_idx+2)); } else { vert.getNormal().clear(); } if (t && t->getCount() > 0) { U32 t_idx = idx[j*stride+t_offset]*2; t_idx = llclamp(t_idx, (U32) 0, (U32) t->getCount()); vert.mTexCoord.setVec(t->get(t_idx), t->get(t_idx+1)); } else { vert.mTexCoord.clear(); } verts.push_back(vert); } } if (verts.empty()) { return LLModel::NO_ERRORS; } // VFExtents change face.mExtents[0] = verts[0].getPosition(); face.mExtents[1] = verts[0].getPosition(); //create a map of unique vertices to indices std::map vert_idx; U32 cur_idx = 0; for (U32 i = 0; i < verts.size(); ++i) { std::map::iterator iter = vert_idx.find(verts[i]); if (iter == vert_idx.end()) { vert_idx[verts[i]] = cur_idx++; } } // Viewer can only fit U16 vertices, shouldn't we do some checks here and return overflow if result has more? llassert(vert_idx.size() < U16_MAX); //build vertex array from map std::vector new_verts; new_verts.resize(vert_idx.size()); for (std::map::iterator iter = vert_idx.begin(); iter != vert_idx.end(); ++iter) { new_verts[iter->second] = iter->first; // VFExtents change update_min_max(face.mExtents[0], face.mExtents[1], iter->first.getPosition()); } //build index array from map indices.resize(verts.size()); for (U32 i = 0; i < verts.size(); ++i) { indices[i] = vert_idx[verts[i]]; if (i % 3 != 0) // assumes GL_TRIANGLES, compare 0-1, 1-2, 3-4, 4-5 but not 2-3 or 5-6 { // A faulty degenerate triangle detection (triangle with 0 area), // probably should be a warning and not an assert llassert(!i || (indices[i-1] != indices[i])); } } // DEBUG just build an expanded triangle list /*for (U32 i = 0; i < verts.size(); ++i) { indices.push_back((U16) i); update_min_max(face.mExtents[0], face.mExtents[1], verts[i].getPosition()); }*/ if (!new_verts.empty()) { std::string material; if (poly->getMaterial()) { material = std::string(poly->getMaterial()); } materials.push_back(material); face_list.push_back(face); face_list.rbegin()->fillFromLegacyData(new_verts, indices); LLVolumeFace& new_face = *face_list.rbegin(); if (!n) { //ll_aligned_free_16(new_face.mNormals); new_face.mNormals = NULL; } if (!t) { //ll_aligned_free_16(new_face.mTexCoords); new_face.mTexCoords = NULL; } } return LLModel::NO_ERRORS ; } //----------------------------------------------------------------------------- // LLDAELoader //----------------------------------------------------------------------------- LLDAELoader::LLDAELoader( std::string filename, S32 lod, load_callback_t load_cb, joint_lookup_func_t joint_lookup_func, texture_load_func_t texture_load_func, state_callback_t state_cb, void* opaque_userdata, JointTransformMap& jointTransformMap, JointNameSet& jointsFromNodes, std::map& jointAliasMap, U32 maxJointsPerMesh, U32 modelLimit, bool preprocess) : LLModelLoader( filename, lod, load_cb, joint_lookup_func, texture_load_func, state_cb, opaque_userdata, jointTransformMap, jointsFromNodes, jointAliasMap, maxJointsPerMesh), mGeneratedModelLimit(modelLimit), mPreprocessDAE(preprocess) { } LLDAELoader::~LLDAELoader() { } struct ModelSort { bool operator()(const LLPointer< LLModel >& lhs, const LLPointer< LLModel >& rhs) { if (lhs->mSubmodelID < rhs->mSubmodelID) { return true; } return LLStringUtil::compareInsensitive(lhs->mLabel, rhs->mLabel) < 0; } }; bool LLDAELoader::OpenFile(const std::string& filename) { setLoadState( READING_FILE ); //no suitable slm exists, load from the .dae file // Collada expects file and folder names to be escaped // Note: cdom::nativePathToUri() const char* allowed = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz" "0123456789" "%-._~:\"|\\/"; std::string uri_filename = LLURI::escape(filename, allowed); DAE dae; domCOLLADA* dom; if (mPreprocessDAE) { #if __FreeBSD__ || (LL_USESYSTEMLIBS && LL_LINUX) dom = static_cast(dae.openFromMemory(uri_filename, preprocessDAE(filename).c_str())); #else dom = dae.openFromMemory(uri_filename, preprocessDAE(filename).c_str()); #endif } else { LL_INFOS() << "Skipping dae preprocessing" << LL_ENDL; #if __FreeBSD__ || (LL_USESYSTEMLIBS && LL_LINUX) dom = static_cast(dae.open(uri_filename)); #else dom = dae.open(uri_filename); #endif } if (!dom) { LL_INFOS() <<" Error with dae - traditionally indicates a corrupt file."<getVersion(); //0=1.4 //1=1.4.1 //2=Currently unsupported, however may work if (docVersion > 1 ) { docVersion = VERSIONTYPE_COUNT; } LL_INFOS()<<"Dae version "<getElementCount(NULL, COLLADA_TYPE_MESH); daeDocument* doc = dae.getDoc(uri_filename); if (!doc) { LL_WARNS() << "can't find internal doc" << LL_ENDL; LLSD args; args["Message"] = "ParsingErrorNoDoc"; mWarningsArray.append(args); return false; } daeElement* root = doc->getDomRoot(); if (!root) { LL_WARNS() << "document has no root" << LL_ENDL; LLSD args; args["Message"] = "ParsingErrorNoRoot"; mWarningsArray.append(args); return false; } //Verify some basic properties of the dae //1. Basic validity check on controller U32 controllerCount = db->getElementCount(NULL, "controller"); bool result = false; for (U32 i = 0; i < controllerCount; ++i) { domController* pController = NULL; db->getElement( (daeElement**) &pController, i , NULL, "controller" ); result = verifyController( pController ); if (!result) { LL_INFOS() << "Could not verify controller" << LL_ENDL; LLSD args; args["Message"] = "ParsingErrorBadElement"; mWarningsArray.append(args); setLoadState( ERROR_PARSING ); return true; } } //get unit scale mTransform.setIdentity(); domAsset::domUnit* unit = daeSafeCast(root->getDescendant(daeElement::matchType(domAsset::domUnit::ID()))); if (unit) { F32 meter = unit->getMeter(); mTransform.mMatrix[0][0] = meter; mTransform.mMatrix[1][1] = meter; mTransform.mMatrix[2][2] = meter; } //get up axis rotation LLMatrix4 rotation; domUpAxisType up = UPAXISTYPE_Y_UP; // default is Y_UP domAsset::domUp_axis* up_axis = daeSafeCast(root->getDescendant(daeElement::matchType(domAsset::domUp_axis::ID()))); if (up_axis) { up = up_axis->getValue(); } if (up == UPAXISTYPE_X_UP) { rotation.initRotation(0.0f, 90.0f * DEG_TO_RAD, 0.0f); } else if (up == UPAXISTYPE_Y_UP) { rotation.initRotation(90.0f * DEG_TO_RAD, 0.0f, 0.0f); } rotation *= mTransform; mTransform = rotation; mTransform.condition(); U32 submodel_limit = count > 0 ? mGeneratedModelLimit/count : 0; for (daeInt idx = 0; idx < count; ++idx) { //build map of domEntities to LLModel domMesh* mesh = NULL; db->getElement((daeElement**) &mesh, idx, NULL, COLLADA_TYPE_MESH); if (mesh) { std::vector models; loadModelsFromDomMesh(mesh, models, submodel_limit); std::vector::iterator i; i = models.begin(); while (i != models.end()) { LLModel* mdl = *i; if(mdl->getStatus() != LLModel::NO_ERRORS) { setLoadState(ERROR_MODEL + mdl->getStatus()) ; return false; //abort } if (mdl && validate_model(mdl)) { mModelList.push_back(mdl); mModelsMap[mesh].push_back(mdl); } i++; } } } std::sort(mModelList.begin(), mModelList.end(), ModelSort()); model_list::iterator model_iter = mModelList.begin(); while (model_iter != mModelList.end()) { LLModel* mdl = *model_iter; U32 material_count = static_cast(mdl->mMaterialList.size()); LL_INFOS() << "Importing " << mdl->mLabel << " model with " << material_count << " material references" << LL_ENDL; std::vector::iterator mat_iter = mdl->mMaterialList.begin(); std::vector::iterator end_iter = material_count > LIMIT_MATERIALS_OUTPUT ? mat_iter + LIMIT_MATERIALS_OUTPUT : mdl->mMaterialList.end(); while (mat_iter != end_iter) { LL_INFOS() << mdl->mLabel << " references " << (*mat_iter) << LL_ENDL; mat_iter++; } model_iter++; } count = db->getElementCount(NULL, COLLADA_TYPE_SKIN); for (daeInt idx = 0; idx < count; ++idx) { //add skinned meshes as instances domSkin* skin = NULL; db->getElement((daeElement**) &skin, idx, NULL, COLLADA_TYPE_SKIN); if (skin) { if (domGeometry* geom = daeSafeCast(skin->getSource().getElement())) { if (domMesh* mesh = geom->getMesh()) { dae_model_map::const_iterator it = mModelsMap.find(mesh); if (it != mModelsMap.end()) { for (const LLPointer& model : it->second) { LLDAELoader::processDomModel(model, &dae, root, mesh, skin); } } } } } } LL_INFOS()<< "Collada skins processed: " << count <getDescendant("visual_scene"); if (!scene) { LL_WARNS() << "document has no visual_scene" << LL_ENDL; LLSD args; args["Message"] = "ParsingErrorNoScene"; mWarningsArray.append(args); setLoadState( ERROR_PARSING ); return true; } setLoadState( DONE ); bool badElement = false; processElement( scene, badElement, &dae); if ( badElement ) { LL_INFOS()<<"Scene could not be parsed"<getNormalizedScaleTranslation(mesh_scale_vector, mesh_translation_vector); LLMatrix4 normalized_transformation; normalized_transformation.setTranslation(mesh_translation_vector); LLMatrix4 mesh_scale; mesh_scale.initScale(mesh_scale_vector); mesh_scale *= normalized_transformation; normalized_transformation = mesh_scale; glh::matrix4f inv_mat((F32*) normalized_transformation.mMatrix); inv_mat = inv_mat.inverse(); LLMatrix4 inverse_normalized_transformation(inv_mat.m); domSkin::domBind_shape_matrix* bind_mat = skin->getBind_shape_matrix(); if (bind_mat) { //get bind shape matrix domFloat4x4& dom_value = bind_mat->getValue(); LLMeshSkinInfo& skin_info = model->mSkinInfo; LLMatrix4 mat; for (int i = 0; i < 4; i++) { for(int j = 0; j < 4; j++) { mat.mMatrix[i][j] = dom_value[i + j*4]; } } skin_info.mBindShapeMatrix.loadu(mat); LLMatrix4a trans(normalized_transformation); matMul(trans, skin_info.mBindShapeMatrix, skin_info.mBindShapeMatrix); } //Some collada setup for accessing the skeleton U32 skeleton_count = dae->getDatabase()->getElementCount( NULL, "skeleton" ); std::vector skeletons; for (U32 i = 0; i < skeleton_count; i++) { daeElement* pElement = 0; dae->getDatabase()->getElement( &pElement, i, 0, "skeleton" ); //Try to get at the skeletal instance controller domInstance_controller::domSkeleton* pSkeleton = daeSafeCast( pElement ); daeElement* pSkeletonRootNode = NULL; if (pSkeleton) { pSkeletonRootNode = pSkeleton->getValue().getElement(); } if (pSkeleton && pSkeletonRootNode) { skeletons.push_back(pSkeleton); } } bool missingSkeletonOrScene = false; //If no skeleton, do a breadth-first search to get at specific joints if ( skeletons.size() == 0 ) { daeElement* pScene = root->getDescendant("visual_scene"); if ( !pScene ) { LL_WARNS()<<"No visual scene - unable to parse bone offsets "< > children = pScene->getChildren(); auto childCount = children.getCount(); //Process any children that are joints //Not all children are joints, some could be ambient lights, cameras, geometry etc.. for (size_t i = 0; i < childCount; ++i) { domNode* pNode = daeSafeCast(children[i]); if (pNode) { processJointNode( pNode, mJointList ); } } } } else { //Has one or more skeletons for (std::vector::iterator skel_it = skeletons.begin(); skel_it != skeletons.end(); ++skel_it) { domInstance_controller::domSkeleton* pSkeleton = *skel_it; //Get the root node of the skeleton daeElement* pSkeletonRootNode = pSkeleton->getValue().getElement(); if ( pSkeletonRootNode ) { //Once we have the root node - start acccessing it's joint components const int jointCnt = static_cast(mJointMap.size()); JointMap :: const_iterator jointIt = mJointMap.begin(); //Loop over all the possible joints within the .dae - using the allowed joint list in the ctor. for ( int i=0; i( resolver.getElement() ); if ( pJoint ) { // FIXME this has a lot of overlap with processJointNode(), would be nice to refactor. //Pull out the translate id and store it in the jointTranslations map daeSIDResolver jointResolverA( pJoint, "./translate" ); domTranslate* pTranslateA = daeSafeCast( jointResolverA.getElement() ); daeSIDResolver jointResolverB( pJoint, "./location" ); domTranslate* pTranslateB = daeSafeCast( jointResolverB.getElement() ); LLMatrix4 workingTransform; //Translation via SID if ( pTranslateA ) { extractTranslation( pTranslateA, workingTransform ); } else { if ( pTranslateB ) { extractTranslation( pTranslateB, workingTransform ); } else { //Translation via child from element daeElement* pTranslateElement = getChildFromElement( pJoint, "translate" ); if ( pTranslateElement && pTranslateElement->typeID() != domTranslate::ID() ) { LL_WARNS()<< "The found element is not a translate node" <getJoints(); domInputLocal_Array& joint_input = joints->getInput_array(); for (size_t i = 0; i < joint_input.getCount(); ++i) { domInputLocal* input = joint_input.get(i); xsNMTOKEN semantic = input->getSemantic(); if (strcmp(semantic, COMMON_PROFILE_INPUT_JOINT) == 0) { //found joint source, fill model->mJointMap and model->mSkinInfo.mJointNames daeElement* elem = input->getSource().getElement(); domSource* source = daeSafeCast(elem); if (source) { domName_array* names_source = source->getName_array(); if (names_source) { domListOfNames &names = names_source->getValue(); for (size_t j = 0; j < names.getCount(); ++j) { std::string name(names.get(j)); if (mJointMap.find(name) != mJointMap.end()) { name = mJointMap[name]; } model->mSkinInfo.mJointNames.push_back(name); model->mSkinInfo.mJointNums.push_back(-1); } } else { domIDREF_array* names_source = source->getIDREF_array(); if (names_source) { xsIDREFS& names = names_source->getValue(); for (size_t j = 0; j < names.getCount(); ++j) { std::string name(names.get(j).getID()); if (mJointMap.find(name) != mJointMap.end()) { name = mJointMap[name]; } model->mSkinInfo.mJointNames.push_back(name); model->mSkinInfo.mJointNums.push_back(-1); } } } } } else if (strcmp(semantic, COMMON_PROFILE_INPUT_INV_BIND_MATRIX) == 0) { //found inv_bind_matrix array, fill model->mInvBindMatrix domSource* source = daeSafeCast(input->getSource().getElement()); if (source) { domFloat_array* t = source->getFloat_array(); if (t) { domListOfFloats& transform = t->getValue(); auto count = transform.getCount()/16; for (size_t k = 0; k < count; ++k) { LLMatrix4 mat; for (int i = 0; i < 4; i++) { for(int j = 0; j < 4; j++) { mat.mMatrix[i][j] = transform[k*16 + i + j*4]; } } model->mSkinInfo.mInvBindMatrix.push_back(LLMatrix4a(mat)); } } } } } //Now that we've parsed the joint array, let's determine if we have a full rig //(which means we have all the joint sthat are required for an avatar versus //a skinned asset attached to a node in a file that contains an entire skeleton, //but does not use the skeleton). buildJointToNodeMappingFromScene( root ); critiqueRigForUploadApplicability( model->mSkinInfo.mJointNames ); if ( !missingSkeletonOrScene ) { // FIXME: mesh_id is used to determine which mesh gets to // set the joint offset, in the event of a conflict. Since // we don't know the mesh id yet, we can't guarantee that // joint offsets will be applied with the same priority as // in the uploaded model. If the file contains multiple // meshes with conflicting joint offsets, preview may be // incorrect. LLUUID fake_mesh_id; fake_mesh_id.generate(); //Set the joint translations on the avatar JointMap :: const_iterator masterJointIt = mJointMap.begin(); JointMap :: const_iterator masterJointItEnd = mJointMap.end(); for (;masterJointIt!=masterJointItEnd;++masterJointIt ) { std::string lookingForJoint = (*masterJointIt).first.c_str(); if ( mJointList.find( lookingForJoint ) != mJointList.end() ) { //LL_INFOS()<<"joint "<aboveJointPosThreshold(joint_pos)) { bool override_changed; // not used pJoint->addAttachmentPosOverride(joint_pos, fake_mesh_id, "", override_changed); if (model->mSkinInfo.mLockScaleIfJointPosition) { pJoint->addAttachmentScaleOverride(pJoint->getDefaultScale(), fake_mesh_id, ""); } } } else { //Most likely an error in the asset. LL_WARNS()<<"Tried to apply joint position from .dae, but it did not exist in the avatar rig." << LL_ENDL; } } } } //missingSkeletonOrScene //We need to construct the alternate bind matrix (which contains the new joint positions) //in the same order as they were stored in the joint buffer. The joints associated //with the skeleton are not stored in the same order as they are in the exported joint buffer. //This remaps the skeletal joints to be in the same order as the joints stored in the model. std::vector :: const_iterator jointIt = model->mSkinInfo.mJointNames.begin(); const int jointCnt = static_cast(model->mSkinInfo.mJointNames.size()); for ( int i=0; imSkinInfo.mInvBindMatrix.size() > i) { LLMatrix4 newInverse = LLMatrix4(model->mSkinInfo.mInvBindMatrix[i].getF32ptr()); newInverse.setTranslation( mJointList[lookingForJoint].getTranslation() ); model->mSkinInfo.mAlternateBindMatrix.push_back( LLMatrix4a(newInverse) ); } else { LL_DEBUGS("Mesh")<<"Possibly misnamed/missing joint [" <mSkinInfo.mAlternateBindMatrix.size(); if (bind_count > 0 && bind_count != jointCnt) { LL_WARNS("Mesh") << "Model " << model->mLabel << " has invalid joint bind matrix list." << LL_ENDL; } //grab raw position array domVertices* verts = mesh->getVertices(); if (verts) { domInputLocal_Array& inputs = verts->getInput_array(); for (size_t i = 0; i < inputs.getCount() && model->mPosition.empty(); ++i) { if (strcmp(inputs[i]->getSemantic(), COMMON_PROFILE_INPUT_POSITION) == 0) { domSource* pos_source = daeSafeCast(inputs[i]->getSource().getElement()); if (pos_source) { domFloat_array* pos_array = pos_source->getFloat_array(); if (pos_array) { domListOfFloats& pos = pos_array->getValue(); for (size_t j = 0; j < pos.getCount(); j += 3) { if (pos.getCount() <= j+2) { LL_ERRS() << "Invalid position array size." << LL_ENDL; } LLVector3 v(pos[j], pos[j+1], pos[j+2]); //transform from COLLADA space to volume space v = v * inverse_normalized_transformation; model->mPosition.push_back(v); } } } } } } //grab skin weights array domSkin::domVertex_weights* weights = skin->getVertex_weights(); if (weights) { domInputLocalOffset_Array& inputs = weights->getInput_array(); domFloat_array* vertex_weights = NULL; for (size_t i = 0; i < inputs.getCount(); ++i) { if (strcmp(inputs[i]->getSemantic(), COMMON_PROFILE_INPUT_WEIGHT) == 0) { domSource* weight_source = daeSafeCast(inputs[i]->getSource().getElement()); if (weight_source) { vertex_weights = weight_source->getFloat_array(); } } } if (vertex_weights) { domListOfFloats& w = vertex_weights->getValue(); domListOfUInts& vcount = weights->getVcount()->getValue(); domListOfInts& v = weights->getV()->getValue(); U32 c_idx = 0; for (size_t vc_idx = 0; vc_idx < vcount.getCount(); ++vc_idx) { //for each vertex daeUInt count = vcount[vc_idx]; //create list of weights that influence this vertex LLModel::weight_list weight_list; for (daeUInt i = 0; i < count; ++i) { //for each weight daeInt joint_idx = v[c_idx++]; daeInt weight_idx = v[c_idx++]; if (joint_idx == -1) { //ignore bindings to bind_shape_matrix continue; } F32 weight_value = w[weight_idx]; weight_list.push_back(LLModel::JointWeight(joint_idx, weight_value)); } //sort by joint weight std::sort(weight_list.begin(), weight_list.end(), LLModel::CompareWeightGreater()); std::vector wght; F32 total = 0.f; for (U32 i = 0; i < llmin((U32) 4, (U32) weight_list.size()); ++i) { //take up to 4 most significant weights if (weight_list[i].mWeight > 0.f) { wght.push_back( weight_list[i] ); total += weight_list[i].mWeight; } } F32 scale = 1.f/total; if (scale != 1.f) { //normalize weights for (U32 i = 0; i < wght.size(); ++i) { wght[i].mWeight *= scale; } } model->mSkinWeights[model->mPosition[vc_idx]] = wght; } } } //add instance to scene for this model LLMatrix4 transformation; transformation.initScale(mesh_scale_vector); transformation.setTranslation(mesh_translation_vector); transformation *= mTransform; std::map materials; for (U32 i = 0; i < model->mMaterialList.size(); ++i) { materials[model->mMaterialList[i]] = LLImportMaterial(); } mScene[transformation].push_back(LLModelInstance(model, model->mLabel, transformation, materials)); stretch_extents(model, transformation); } } //----------------------------------------------------------------------------- // buildJointToNodeMappingFromScene() //----------------------------------------------------------------------------- void LLDAELoader::buildJointToNodeMappingFromScene( daeElement* pRoot ) { daeElement* pScene = pRoot->getDescendant("visual_scene"); if ( pScene ) { daeTArray< daeSmartRef > children = pScene->getChildren(); auto childCount = children.getCount(); for (size_t i = 0; i < childCount; ++i) { domNode* pNode = daeSafeCast(children[i]); processJointToNodeMapping( pNode ); } } } //----------------------------------------------------------------------------- // processJointToNodeMapping() //----------------------------------------------------------------------------- void LLDAELoader::processJointToNodeMapping( domNode* pNode ) { if ( isNodeAJoint( pNode ) ) { //1.Store the parent std::string nodeName = pNode->getName(); if ( !nodeName.empty() ) { mJointsFromNode.push_front( pNode->getName() ); } //2. Handle the kiddo's processChildJoints( pNode ); } else { //Determine if the're any children wrt to this failed node. //This occurs when an armature is exported and ends up being what essentially amounts to //as the root for the visual_scene if ( pNode ) { processChildJoints( pNode ); } else { LL_INFOS()<<"Node is NULL"< > childOfChild = pParentNode->getChildren(); auto childOfChildCount = childOfChild.getCount(); for (size_t i = 0; i < childOfChildCount; ++i) { domNode* pChildNode = daeSafeCast( childOfChild[i] ); if ( pChildNode ) { processJointToNodeMapping( pChildNode ); } } } //----------------------------------------------------------------------------- // isNodeAJoint() //----------------------------------------------------------------------------- bool LLDAELoader::isNodeAJoint( domNode* pNode ) { if ( !pNode || !pNode->getName() ) { LL_INFOS()<<"Created node is NULL or invalid"<getName()); } //----------------------------------------------------------------------------- // verifyCount //----------------------------------------------------------------------------- bool LLDAELoader::verifyCount( int expected, int result ) { if ( expected != result ) { LL_INFOS()<< "Error: (expected/got)"<getSkin(); if ( pSkin ) { xsAnyURI & uri = pSkin->getSource(); domElement* pElement = uri.getElement(); if ( !pElement ) { LL_INFOS()<<"Can't resolve skin source"<getTypeName(); if ( stricmp(type_str, "geometry") == 0 ) { //Skin is reference directly by geometry and get the vertex count from skin domSkin::domVertex_weights* pVertexWeights = pSkin->getVertex_weights(); U32 vertexWeightsCount = pVertexWeights->getCount(); domGeometry* pGeometry = (domGeometry*) (domElement*) uri.getElement(); domMesh* pMesh = pGeometry->getMesh(); if ( pMesh ) { //Get vertex count from geometry domVertices* pVertices = pMesh->getVertices(); if ( !pVertices ) { LL_INFOS()<<"No vertices!"<getInput_array()[0]->getSource(); domSource* pSource = (domSource*) (domElement*) src.getElement(); U32 verticesCount = pSource->getTechnique_common()->getAccessor()->getCount(); result = verifyCount( verticesCount, vertexWeightsCount ); if ( !result ) { return result; } } } U32 vcountCount = (U32) pVertexWeights->getVcount()->getValue().getCount(); result = verifyCount( vcountCount, vertexWeightsCount ); if ( !result ) { return result; } domInputLocalOffset_Array& inputs = pVertexWeights->getInput_array(); U32 sum = 0; for (size_t i=0; igetVcount()->getValue()[i]; } result = verifyCount( sum * static_cast(inputs.getCount()), (domInt) static_cast(pVertexWeights->getV()->getValue().getCount()) ); } } return result; } //----------------------------------------------------------------------------- // extractTranslation() //----------------------------------------------------------------------------- void LLDAELoader::extractTranslation( domTranslate* pTranslate, LLMatrix4& transform ) { domFloat3 jointTrans = pTranslate->getValue(); LLVector3 singleJointTranslation( jointTrans[0], jointTrans[1], jointTrans[2] ); transform.setTranslation( singleJointTranslation ); } //----------------------------------------------------------------------------- // extractTranslationViaElement() //----------------------------------------------------------------------------- void LLDAELoader::extractTranslationViaElement( daeElement* pTranslateElement, LLMatrix4& transform ) { if ( pTranslateElement ) { domTranslate* pTranslateChild = static_cast( pTranslateElement ); domFloat3 translateChild = pTranslateChild->getValue(); LLVector3 singleJointTranslation( translateChild[0], translateChild[1], translateChild[2] ); transform.setTranslation( singleJointTranslation ); } } //----------------------------------------------------------------------------- // extractTranslationViaSID() //----------------------------------------------------------------------------- void LLDAELoader::extractTranslationViaSID( daeElement* pElement, LLMatrix4& transform ) { if ( pElement ) { daeSIDResolver resolver( pElement, "./transform" ); domMatrix* pMatrix = daeSafeCast( resolver.getElement() ); //We are only extracting out the translational component atm LLMatrix4 workingTransform; if ( pMatrix ) { domFloat4x4 domArray = pMatrix->getValue(); for ( int i = 0; i < 4; i++ ) { for( int j = 0; j < 4; j++ ) { workingTransform.mMatrix[i][j] = domArray[i + j*4]; } } LLVector3 trans = workingTransform.getTranslation(); transform.setTranslation( trans ); } } else { LL_WARNS()<<"Element is nonexistent - empty/unsupported node."<getName() == NULL) { LL_WARNS() << "nameless node, can't process" << LL_ENDL; return; } //LL_WARNS()<<"ProcessJointNode# Node:" <getName()<(jointResolverA.getElement()); daeSIDResolver jointResolverB(pNode, "./location"); domTranslate* pTranslateB = daeSafeCast(jointResolverB.getElement()); //Translation via SID was successful if (pTranslateA) { extractTranslation(pTranslateA, workingTransform); } else if (pTranslateB) { extractTranslation(pTranslateB, workingTransform); } else { //Translation via child from element daeElement* pTranslateElement = getChildFromElement(pNode, "translate"); if (!pTranslateElement || pTranslateElement->typeID() != domTranslate::ID()) { //LL_WARNS()<< "The found element is not a translate node" <(jointResolver.getElement()); if (pMatrix) { //LL_INFOS()<<"A matrix SID was however found!"<getValue(); for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { workingTransform.mMatrix[i][j] = domArray[i + j * 4]; } } } else { LL_WARNS() << "The found element is not translate or matrix node - most likely a corrupt export!" << LL_ENDL; } } else { extractTranslationViaElement(pTranslateElement, workingTransform); } } //Store the working transform relative to the nodes name. jointTransforms[pNode->getName()] = workingTransform; } //2. handle the nodes children //Gather and handle the incoming nodes children daeTArray< daeSmartRef > childOfChild = pNode->getChildren(); auto childOfChildCount = childOfChild.getCount(); for (size_t i = 0; i < childOfChildCount; ++i) { domNode* pChildNode = daeSafeCast( childOfChild[i] ); if ( pChildNode ) { processJointNode( pChildNode, jointTransforms ); } } } //----------------------------------------------------------------------------- // getChildFromElement() //----------------------------------------------------------------------------- daeElement* LLDAELoader::getChildFromElement( daeElement* pElement, std::string const & name ) { daeElement* pChildOfElement = pElement->getChild( name.c_str() ); if ( pChildOfElement ) { return pChildOfElement; } LL_DEBUGS("Mesh")<< "Could not find a child [" << name << "] for the element: \"" << pElement->getAttribute("id") << "\"" << LL_ENDL; return NULL; } void LLDAELoader::processElement( daeElement* element, bool& badElement, DAE* dae) { LLMatrix4 saved_transform; bool pushed_mat = false; domNode* node = daeSafeCast(element); if (node) { pushed_mat = true; saved_transform = mTransform; } domTranslate* translate = daeSafeCast(element); if (translate) { domFloat3 dom_value = translate->getValue(); LLMatrix4 translation; translation.setTranslation(LLVector3(dom_value[0], dom_value[1], dom_value[2])); translation *= mTransform; mTransform = translation; mTransform.condition(); } domRotate* rotate = daeSafeCast(element); if (rotate) { domFloat4 dom_value = rotate->getValue(); LLMatrix4 rotation; rotation.initRotTrans(dom_value[3] * DEG_TO_RAD, LLVector3(dom_value[0], dom_value[1], dom_value[2]), LLVector3(0, 0, 0)); rotation *= mTransform; mTransform = rotation; mTransform.condition(); } domScale* scale = daeSafeCast(element); if (scale) { domFloat3 dom_value = scale->getValue(); LLVector3 scale_vector = LLVector3(dom_value[0], dom_value[1], dom_value[2]); scale_vector.abs(); // Set all values positive, since we don't currently support mirrored meshes LLMatrix4 scaling; scaling.initScale(scale_vector); scaling *= mTransform; mTransform = scaling; mTransform.condition(); } domMatrix* matrix = daeSafeCast(element); if (matrix) { domFloat4x4 dom_value = matrix->getValue(); LLMatrix4 matrix_transform; for (int i = 0; i < 4; i++) { for(int j = 0; j < 4; j++) { matrix_transform.mMatrix[i][j] = dom_value[i + j*4]; } } matrix_transform *= mTransform; mTransform = matrix_transform; mTransform.condition(); } if (domInstance_geometry* instance_geo = daeSafeCast(element)) { if (domGeometry* geo = daeSafeCast(instance_geo->getUrl().getElement())) { if (domMesh* mesh = daeSafeCast(geo->getDescendant(daeElement::matchType(domMesh::ID())))) { for (LLModel* model : mModelsMap.find(mesh)->second) { LLMatrix4 transformation = mTransform; if (mTransform.determinant() < 0) { //negative scales are not supported LL_INFOS() << "Negative scale detected, unsupported transform. domInstance_geometry: " << getElementLabel(instance_geo) << LL_ENDL; LLSD args; args["Message"] = "NegativeScaleTrans"; args["LABEL"] = getElementLabel(instance_geo); mWarningsArray.append(args); badElement = true; } LLModelLoader::material_map materials = getMaterials(model, instance_geo, dae); // adjust the transformation to compensate for mesh normalization LLVector3 mesh_scale_vector; LLVector3 mesh_translation_vector; model->getNormalizedScaleTranslation(mesh_scale_vector, mesh_translation_vector); LLMatrix4 mesh_translation; mesh_translation.setTranslation(mesh_translation_vector); mesh_translation *= transformation; transformation = mesh_translation; LLMatrix4 mesh_scale; mesh_scale.initScale(mesh_scale_vector); mesh_scale *= transformation; transformation = mesh_scale; if (transformation.determinant() < 0) { //negative scales are not supported LL_INFOS() << "Negative scale detected, unsupported post-normalization transform. domInstance_geometry: " << getElementLabel(instance_geo) << LL_ENDL; LLSD args; args["Message"] = "NegativeScaleNormTrans"; args["LABEL"] = getElementLabel(instance_geo); mWarningsArray.append(args); badElement = true; } std::string label; if (model->mLabel.empty()) { label = getLodlessLabel(instance_geo); llassert(!label.empty()); if (model->mSubmodelID) { label += (char)((int)'a' + model->mSubmodelID); } model->mLabel = label + lod_suffix[mLod]; } else { // Don't change model's name if possible, it will play havoc with scenes that already use said model. size_t ext_pos = getSuffixPosition(model->mLabel); if (ext_pos != -1) { label = model->mLabel.substr(0, ext_pos); } else { label = model->mLabel; } } mScene[transformation].push_back(LLModelInstance(model, label, transformation, materials)); stretch_extents(model, transformation); } } } else { LL_INFOS()<<"Unable to resolve geometry URL."<(element); if (instance_node) { daeElement* instance = instance_node->getUrl().getElement(); if (instance) { processElement(instance,badElement, dae); } } //process children daeTArray< daeSmartRef > children = element->getChildren(); auto childCount = children.getCount(); for (size_t i = 0; i < childCount; i++) { processElement(children[i],badElement, dae); } if (pushed_mat) { //this element was a node, restore transform before processiing siblings mTransform = saved_transform; } } std::map LLDAELoader::getMaterials(LLModel* model, domInstance_geometry* instance_geo, DAE* dae) { std::map materials; for (int i = 0; i < model->mMaterialList.size(); i++) { LLImportMaterial import_material; domInstance_material* instance_mat = NULL; domBind_material::domTechnique_common* technique = daeSafeCast(instance_geo->getDescendant(daeElement::matchType(domBind_material::domTechnique_common::ID()))); if (technique) { daeTArray< daeSmartRef > inst_materials = technique->getChildrenByType(); for (int j = 0; j < inst_materials.getCount(); j++) { std::string symbol(inst_materials[j]->getSymbol()); if (symbol == model->mMaterialList[i]) // found the binding { instance_mat = inst_materials[j]; break; } } } if (instance_mat) { domMaterial* material = daeSafeCast(instance_mat->getTarget().getElement()); if (material) { domInstance_effect* instance_effect = daeSafeCast(material->getDescendant(daeElement::matchType(domInstance_effect::ID()))); if (instance_effect) { domEffect* effect = daeSafeCast(instance_effect->getUrl().getElement()); if (effect) { domProfile_COMMON* profile = daeSafeCast(effect->getDescendant(daeElement::matchType(domProfile_COMMON::ID()))); if (profile) { import_material = profileToMaterial(profile, dae); } } } } } import_material.mBinding = model->mMaterialList[i]; materials[model->mMaterialList[i]] = import_material; } return materials; } LLImportMaterial LLDAELoader::profileToMaterial(domProfile_COMMON* material, DAE* dae) { LLImportMaterial mat; mat.mFullbright = false; daeElement* diffuse = material->getDescendant("diffuse"); if (diffuse) { domCommon_color_or_texture_type_complexType::domTexture* texture = daeSafeCast(diffuse->getDescendant("texture")); if (texture) { domCommon_newparam_type_Array newparams = material->getNewparam_array(); if (newparams.getCount()) { for (S32 i = 0; i < newparams.getCount(); i++) { domFx_surface_common* surface = newparams[i]->getSurface(); if (surface) { domFx_surface_init_common* init = surface->getFx_surface_init_common(); if (init) { domFx_surface_init_from_common_Array init_from = init->getInit_from_array(); if (init_from.getCount() > i) { domImage* image = daeSafeCast(init_from[i]->getValue().getElement()); if (image) { // we only support init_from now - embedded data will come later domImage::domInit_from* init = image->getInit_from(); if (init) { mat.mDiffuseMapFilename = cdom::uriToNativePath(init->getValue().str()); mat.mDiffuseMapLabel = getElementLabel(material); } } } } } } } else if (texture->getTexture()) { domImage* image = NULL; dae->getDatabase()->getElement((daeElement**) &image, 0, texture->getTexture(), COLLADA_TYPE_IMAGE); if (image) { // we only support init_from now - embedded data will come later domImage::domInit_from* init = image->getInit_from(); if (init) { std::string image_path_value = cdom::uriToNativePath(init->getValue().str()); #if LL_WINDOWS // Work-around DOM tendency to resort to UNC names which are only confusing for downstream... // std::string::iterator i = image_path_value.begin(); while (*i == '\\') i++; mat.mDiffuseMapFilename.assign(i, image_path_value.end()); #else mat.mDiffuseMapFilename = image_path_value; #endif mat.mDiffuseMapLabel = getElementLabel(material); } } } } domCommon_color_or_texture_type_complexType::domColor* color = daeSafeCast(diffuse->getDescendant("color")); if (color) { domFx_color_common domfx_color = color->getValue(); LLColor4 value = LLColor4(domfx_color[0], domfx_color[1], domfx_color[2], domfx_color[3]); mat.mDiffuseColor = value; } } daeElement* emission = material->getDescendant("emission"); if (emission) { LLColor4 emission_color = getDaeColor(emission); if (((emission_color[0] + emission_color[1] + emission_color[2]) / 3.0) > 0.25) { mat.mFullbright = true; } } return mat; } // try to get a decent label for this element std::string LLDAELoader::getElementLabel(daeElement *element) { // if we have a name attribute, use it std::string name = element->getAttribute("name"); if (name.length()) { return name; } // if we have an ID attribute, use it if (element->getID()) { return std::string(element->getID()); } // if we have a parent, use it daeElement* parent = element->getParent(); std::string index_string; if (parent) { // retrieve index to distinguish items inside same parent size_t ind = 0; parent->getChildren().find(element, ind); if (ind > 0) { index_string = "_" + std::to_string(ind); } // if parent has a name or ID, use it std::string name = parent->getAttribute("name"); if (!name.length()) { name = std::string(parent->getID()); } if (name.length()) { // make sure that index won't mix up with pre-named lod extensions size_t ext_pos = getSuffixPosition(name); if (ext_pos == -1) { return name + index_string; } else { return name.insert(ext_pos, index_string); } } } // try to use our type daeString element_name = element->getElementName(); if (element_name) { return std::string(element_name) + index_string; } // if all else fails, use "object" return std::string("object") + index_string; } // static size_t LLDAELoader::getSuffixPosition(std::string label) { if ((label.find("_LOD") != -1) || (label.find("_PHYS") != -1)) { return label.rfind('_'); } return -1; } // static std::string LLDAELoader::getLodlessLabel(daeElement *element) { std::string label = getElementLabel(element); size_t ext_pos = getSuffixPosition(label); if (ext_pos != -1) { return label.substr(0, ext_pos); } return label; } LLColor4 LLDAELoader::getDaeColor(daeElement* element) { LLColor4 value; domCommon_color_or_texture_type_complexType::domColor* color = daeSafeCast(element->getDescendant("color")); if (color) { domFx_color_common domfx_color = color->getValue(); value = LLColor4(domfx_color[0], domfx_color[1], domfx_color[2], domfx_color[3]); } return value; } bool LLDAELoader::addVolumeFacesFromDomMesh(LLModel* pModel,domMesh* mesh, LLSD& log_msg) { LLModel::EModelStatus status = LLModel::NO_ERRORS; domTriangles_Array& tris = mesh->getTriangles_array(); for (U32 i = 0; i < tris.getCount(); ++i) { domTrianglesRef& tri = tris.get(i); status = load_face_from_dom_triangles(pModel->getVolumeFaces(), pModel->getMaterialList(), tri, log_msg); pModel->mStatus = status; if(status != LLModel::NO_ERRORS) { pModel->ClearFacesAndMaterials(); return false; } } domPolylist_Array& polys = mesh->getPolylist_array(); for (U32 i = 0; i < polys.getCount(); ++i) { domPolylistRef& poly = polys.get(i); status = load_face_from_dom_polylist(pModel->getVolumeFaces(), pModel->getMaterialList(), poly, log_msg); if(status != LLModel::NO_ERRORS) { pModel->ClearFacesAndMaterials(); return false; } } domPolygons_Array& polygons = mesh->getPolygons_array(); for (U32 i = 0; i < polygons.getCount(); ++i) { domPolygonsRef& poly = polygons.get(i); status = load_face_from_dom_polygons(pModel->getVolumeFaces(), pModel->getMaterialList(), poly); if(status != LLModel::NO_ERRORS) { pModel->ClearFacesAndMaterials(); return false; } } return (status == LLModel::NO_ERRORS); } //static diff version supports creating multiple models when material counts spill // over the 8 face server-side limit // bool LLDAELoader::loadModelsFromDomMesh(domMesh* mesh, std::vector& models_out, U32 submodel_limit) { LLVolumeParams volume_params; volume_params.setType(LL_PCODE_PROFILE_SQUARE, LL_PCODE_PATH_LINE); models_out.clear(); LLModel* ret = new LLModel(volume_params, 0.f); std::string model_name = getLodlessLabel(mesh); ret->mLabel = model_name + lod_suffix[mLod]; llassert(!ret->mLabel.empty()); // Like a monkey, ready to be shot into space // ret->ClearFacesAndMaterials(); // Get the whole set of volume faces // addVolumeFacesFromDomMesh(ret, mesh, mWarningsArray); U32 volume_faces = ret->getNumVolumeFaces(); // Side-steps all manner of issues when splitting models // and matching lower LOD materials to base models // ret->sortVolumeFacesByMaterialName(); bool normalized = false; int submodelID = 0; // remove all faces that definitely won't fit into one model and submodel limit U32 face_limit = (submodel_limit + 1) * LL_SCULPT_MESH_MAX_FACES; if (face_limit < volume_faces) { ret->setNumVolumeFaces(face_limit); } LLVolume::face_list_t remainder; do { // Insure we do this once with the whole gang and not per-model // if (!normalized && !mNoNormalize) { normalized = true; ret->normalizeVolumeFaces(); } ret->trimVolumeFacesToSize(LL_SCULPT_MESH_MAX_FACES, &remainder); // remove unused/redundant vertices after normalizing if (!mNoOptimize) { ret->remapVolumeFaces(); } volume_faces = static_cast(remainder.size()); models_out.push_back(ret); // If we have left-over volume faces, create another model // to absorb them... // if (volume_faces) { LLModel* next = new LLModel(volume_params, 0.f); next->mSubmodelID = ++submodelID; next->mLabel = model_name + (char)((int)'a' + next->mSubmodelID) + lod_suffix[mLod]; next->getVolumeFaces() = remainder; next->mNormalizedScale = ret->mNormalizedScale; next->mNormalizedTranslation = ret->mNormalizedTranslation; if ( ret->mMaterialList.size() > LL_SCULPT_MESH_MAX_FACES) { next->mMaterialList.assign(ret->mMaterialList.begin() + LL_SCULPT_MESH_MAX_FACES, ret->mMaterialList.end()); } ret = next; } remainder.clear(); } while (volume_faces); return true; }