summaryrefslogtreecommitdiff
path: root/indra/newview/gltf/buffer_util.h
diff options
context:
space:
mode:
authorDave Parks <davep@lindenlab.com>2024-05-20 13:22:55 -0500
committerGitHub <noreply@github.com>2024-05-20 13:22:55 -0500
commit03c4458bdcc6821a3047f93b729d412e274ab9af (patch)
treeecc314de3aa32161e8ac5b1a554e9e7d2a608dc3 /indra/newview/gltf/buffer_util.h
parenteab232d3ed49bfb1f873e332ff57ec8c311c163b (diff)
#1392 GLTF Upload (#1394)
* #1392 WIP -- Functional texture upload, stubbed out .bin upload. * #1392 GLTF Upload WIP -- Emulates successful upload Successfully uploads texture Emulates successful .gltf and .bin upload by injecting into local asset cache. Emulates rez from inventory by setting sculpt ID of selected object Currently fails in tinygltf parsing due to missing .bin * Add missing notification * Build fix * #1392 Add boost::json .gltf reading support. * #1392 boost::json GLTF writing prototype * Create gltf/README.md * Update README.md * Update README.md * Update README.md * Update README.md * Update README.md * #1392 Add ability to render directly from LL::GLTF::Material * Fix for mac build * Mac build fix * #1392 AssetType and Inventory Type plumbing * #1392 More sane error handling and scheduling of uploads. * #1392 Actually attempt to upload glbin * Mac build fix, upload nudge * Mac build fix * Fix glTF asset uploads to server * Mac build fix (inline not static) * More consistent inline * Add glm, mac nudge. * #1392 For consistency with spec, start using glm over glh:: and LLFoo * Another attempt at placating Mac builds * Another Mac nudge * Mac build take 23 * #1392 Prune LLMatrix4a from GLTF namespace. * #1392 Fix for orientation being off (glm::quat is wxyz, not xyzw) * #1392 WIP -- Actually send the sculpt type and id, nudge readme and alpha rendering * #1392 Working download! * #1394 Add support for GLTFEnabled SimulatorFeature * #1392 Review feedback --------- Co-authored-by: Pepper Linden <3782201+rohvani@users.noreply.github.com>
Diffstat (limited to 'indra/newview/gltf/buffer_util.h')
-rw-r--r--indra/newview/gltf/buffer_util.h608
1 files changed, 549 insertions, 59 deletions
diff --git a/indra/newview/gltf/buffer_util.h b/indra/newview/gltf/buffer_util.h
index 4e6f5901e7..b0fbc8524d 100644
--- a/indra/newview/gltf/buffer_util.h
+++ b/indra/newview/gltf/buffer_util.h
@@ -36,55 +36,60 @@
#define LL_FUNCSIG __PRETTY_FUNCTION__
#endif
+#include "accessor.h"
+
namespace LL
{
namespace GLTF
{
+
+ using string_view = boost::json::string_view;
+
// copy one Scalar from src to dst
template<class S, class T>
- static void copyScalar(S* src, T& dst)
+ inline void copyScalar(S* src, T& dst)
{
LL_ERRS() << "TODO: implement " << LL_FUNCSIG << LL_ENDL;
}
// copy one vec2 from src to dst
template<class S, class T>
- static void copyVec2(S* src, T& dst)
+ inline void copyVec2(S* src, T& dst)
{
LL_ERRS() << "TODO: implement " << LL_FUNCSIG << LL_ENDL;
}
// copy one vec3 from src to dst
template<class S, class T>
- static void copyVec3(S* src, T& dst)
+ inline void copyVec3(S* src, T& dst)
{
LL_ERRS() << "TODO: implement " << LL_FUNCSIG << LL_ENDL;
}
// copy one vec4 from src to dst
template<class S, class T>
- static void copyVec4(S* src, T& dst)
+ inline void copyVec4(S* src, T& dst)
{
LL_ERRS() << "TODO: implement " << LL_FUNCSIG << LL_ENDL;
}
- // copy one vec2 from src to dst
+ // copy one mat2 from src to dst
template<class S, class T>
- static void copyMat2(S* src, T& dst)
+ inline void copyMat2(S* src, T& dst)
{
LL_ERRS() << "TODO: implement " << LL_FUNCSIG << LL_ENDL;
}
- // copy one vec3 from src to dst
+ // copy one mat3 from src to dst
template<class S, class T>
- static void copyMat3(S* src, T& dst)
+ inline void copyMat3(S* src, T& dst)
{
LL_ERRS() << "TODO: implement " << LL_FUNCSIG << LL_ENDL;
}
- // copy one vec4 from src to dst
+ // copy one mat4 from src to dst
template<class S, class T>
- static void copyMat4(S* src, T& dst)
+ inline void copyMat4(S* src, T& dst)
{
LL_ERRS() << "TODO: implement " << LL_FUNCSIG << LL_ENDL;
}
@@ -93,135 +98,128 @@ namespace LL
// concrete implementations for different types of source and destination
//=========================================================================================================
-// suppress unused function warning -- clang complains here but these specializations are definitely used
-#if defined(__clang__)
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wunused-function"
-#endif
-
template<>
- void copyScalar<F32, F32>(F32* src, F32& dst)
+ inline void copyScalar<F32, F32>(F32* src, F32& dst)
{
dst = *src;
}
template<>
- void copyScalar<U32, U32>(U32* src, U32& dst)
+ inline void copyScalar<U32, U32>(U32* src, U32& dst)
{
dst = *src;
}
template<>
- void copyScalar<U32, U16>(U32* src, U16& dst)
+ inline void copyScalar<U32, U16>(U32* src, U16& dst)
{
dst = *src;
}
template<>
- void copyScalar<U16, U16>(U16* src, U16& dst)
+ inline void copyScalar<U16, U16>(U16* src, U16& dst)
{
dst = *src;
}
template<>
- void copyScalar<U16, U32>(U16* src, U32& dst)
+ inline void copyScalar<U16, U32>(U16* src, U32& dst)
{
dst = *src;
}
template<>
- void copyScalar<U8, U16>(U8* src, U16& dst)
+ inline void copyScalar<U8, U16>(U8* src, U16& dst)
{
dst = *src;
}
template<>
- void copyScalar<U8, U32>(U8* src, U32& dst)
+ inline void copyScalar<U8, U32>(U8* src, U32& dst)
{
dst = *src;
}
template<>
- void copyVec2<F32, LLVector2>(F32* src, LLVector2& dst)
+ inline void copyVec2<F32, LLVector2>(F32* src, LLVector2& dst)
{
dst.set(src[0], src[1]);
}
template<>
- void copyVec3<F32, glh::vec3f>(F32* src, glh::vec3f& dst)
+ inline void copyVec3<F32, vec3>(F32* src, vec3& dst)
{
- dst.set_value(src[0], src[1], src[2]);
+ dst = vec3(src[0], src[1], src[2]);
}
template<>
- void copyVec3<F32, LLVector4a>(F32* src, LLVector4a& dst)
+ inline void copyVec3<F32, LLVector4a>(F32* src, LLVector4a& dst)
{
dst.load3(src);
}
template<>
- void copyVec3<U16, LLColor4U>(U16* src, LLColor4U& dst)
+ inline void copyVec3<U16, LLColor4U>(U16* src, LLColor4U& dst)
{
dst.set(src[0], src[1], src[2], 255);
}
template<>
- void copyVec4<U8, LLColor4U>(U8* src, LLColor4U& dst)
+ inline void copyVec4<U8, LLColor4U>(U8* src, LLColor4U& dst)
{
dst.set(src[0], src[1], src[2], src[3]);
}
template<>
- void copyVec4<U16, LLColor4U>(U16* src, LLColor4U& dst)
+ inline void copyVec4<U16, LLColor4U>(U16* src, LLColor4U& dst)
{
dst.set(src[0], src[1], src[2], src[3]);
}
template<>
- void copyVec4<F32, LLColor4U>(F32* src, LLColor4U& dst)
+ inline void copyVec4<F32, LLColor4U>(F32* src, LLColor4U& dst)
{
dst.set(src[0]*255, src[1]*255, src[2]*255, src[3]*255);
}
template<>
- void copyVec4<F32, LLVector4a>(F32* src, LLVector4a& dst)
+ inline void copyVec4<F32, LLVector4a>(F32* src, LLVector4a& dst)
{
dst.loadua(src);
}
template<>
- void copyVec4<U16, LLVector4a>(U16* src, LLVector4a& dst)
+ inline void copyVec4<U16, LLVector4a>(U16* src, LLVector4a& dst)
{
dst.set(src[0], src[1], src[2], src[3]);
}
template<>
- void copyVec4<U8, LLVector4a>(U8* src, LLVector4a& dst)
+ inline void copyVec4<U8, LLVector4a>(U8* src, LLVector4a& dst)
{
dst.set(src[0], src[1], src[2], src[3]);
}
template<>
- void copyVec4<F32, glh::quaternionf>(F32* src, glh::quaternionf& dst)
+ inline void copyVec4<F32, quat>(F32* src, quat& dst)
{
- dst.set_value(src);
+ dst.x = src[0];
+ dst.y = src[1];
+ dst.z = src[2];
+ dst.w = src[3];
}
template<>
- void copyMat4<F32, glh::matrix4f>(F32* src, glh::matrix4f& dst)
+ inline void copyMat4<F32, mat4>(F32* src, mat4& dst)
{
- dst.set_value(src);
+ dst = glm::make_mat4(src);
}
-#if defined(__clang__)
-#pragma clang diagnostic pop
-#endif
-
//=========================================================================================================
// copy from src to dst, stride is the number of bytes between each element in src, count is number of elements to copy
template<class S, class T>
- static void copyScalar(S* src, LLStrider<T> dst, S32 stride, S32 count)
+ inline void copyScalar(S* src, LLStrider<T> dst, S32 stride, S32 count)
{
for (S32 i = 0; i < count; ++i)
{
@@ -233,7 +231,7 @@ namespace LL
// copy from src to dst, stride is the number of bytes between each element in src, count is number of elements to copy
template<class S, class T>
- static void copyVec2(S* src, LLStrider<T> dst, S32 stride, S32 count)
+ inline void copyVec2(S* src, LLStrider<T> dst, S32 stride, S32 count)
{
for (S32 i = 0; i < count; ++i)
{
@@ -245,7 +243,7 @@ namespace LL
// copy from src to dst, stride is the number of bytes between each element in src, count is number of elements to copy
template<class S, class T>
- static void copyVec3(S* src, LLStrider<T> dst, S32 stride, S32 count)
+ inline void copyVec3(S* src, LLStrider<T> dst, S32 stride, S32 count)
{
for (S32 i = 0; i < count; ++i)
{
@@ -257,7 +255,7 @@ namespace LL
// copy from src to dst, stride is the number of bytes between each element in src, count is number of elements to copy
template<class S, class T>
- static void copyVec4(S* src, LLStrider<T> dst, S32 stride, S32 count)
+ inline void copyVec4(S* src, LLStrider<T> dst, S32 stride, S32 count)
{
for (S32 i = 0; i < count; ++i)
{
@@ -269,7 +267,7 @@ namespace LL
// copy from src to dst, stride is the number of bytes between each element in src, count is number of elements to copy
template<class S, class T>
- static void copyMat2(S* src, LLStrider<T> dst, S32 stride, S32 count)
+ inline void copyMat2(S* src, LLStrider<T> dst, S32 stride, S32 count)
{
for (S32 i = 0; i < count; ++i)
{
@@ -281,7 +279,7 @@ namespace LL
// copy from src to dst, stride is the number of bytes between each element in src, count is number of elements to copy
template<class S, class T>
- static void copyMat3(S* src, LLStrider<T> dst, S32 stride, S32 count)
+ inline void copyMat3(S* src, LLStrider<T> dst, S32 stride, S32 count)
{
for (S32 i = 0; i < count; ++i)
{
@@ -293,7 +291,7 @@ namespace LL
// copy from src to dst, stride is the number of bytes between each element in src, count is number of elements to copy
template<class S, class T>
- static void copyMat4(S* src, LLStrider<T> dst, S32 stride, S32 count)
+ inline void copyMat4(S* src, LLStrider<T> dst, S32 stride, S32 count)
{
for (S32 i = 0; i < count; ++i)
{
@@ -304,39 +302,39 @@ namespace LL
}
template<class S, class T>
- static void copy(Asset& asset, Accessor& accessor, const S* src, LLStrider<T>& dst, S32 byteStride)
+ inline void copy(Asset& asset, Accessor& accessor, const S* src, LLStrider<T>& dst, S32 byteStride)
{
- if (accessor.mType == (S32)Accessor::Type::SCALAR)
+ if (accessor.mType == Accessor::Type::SCALAR)
{
S32 stride = byteStride == 0 ? sizeof(S) * 1 : byteStride;
copyScalar((S*)src, dst, stride, accessor.mCount);
}
- else if (accessor.mType == (S32)Accessor::Type::VEC2)
+ else if (accessor.mType == Accessor::Type::VEC2)
{
S32 stride = byteStride == 0 ? sizeof(S) * 2 : byteStride;
copyVec2((S*)src, dst, stride, accessor.mCount);
}
- else if (accessor.mType == (S32)Accessor::Type::VEC3)
+ else if (accessor.mType == Accessor::Type::VEC3)
{
S32 stride = byteStride == 0 ? sizeof(S) * 3 : byteStride;
copyVec3((S*)src, dst, stride, accessor.mCount);
}
- else if (accessor.mType == (S32)Accessor::Type::VEC4)
+ else if (accessor.mType == Accessor::Type::VEC4)
{
S32 stride = byteStride == 0 ? sizeof(S) * 4 : byteStride;
copyVec4((S*)src, dst, stride, accessor.mCount);
}
- else if (accessor.mType == (S32)Accessor::Type::MAT2)
+ else if (accessor.mType == Accessor::Type::MAT2)
{
S32 stride = byteStride == 0 ? sizeof(S) * 4 : byteStride;
copyMat2((S*)src, dst, stride, accessor.mCount);
}
- else if (accessor.mType == (S32)Accessor::Type::MAT3)
+ else if (accessor.mType == Accessor::Type::MAT3)
{
S32 stride = byteStride == 0 ? sizeof(S) * 9 : byteStride;
copyMat3((S*)src, dst, stride, accessor.mCount);
}
- else if (accessor.mType == (S32)Accessor::Type::MAT4)
+ else if (accessor.mType == Accessor::Type::MAT4)
{
S32 stride = byteStride == 0 ? sizeof(S) * 16 : byteStride;
copyMat4((S*)src, dst, stride, accessor.mCount);
@@ -349,7 +347,7 @@ namespace LL
// copy data from accessor to strider
template<class T>
- static void copy(Asset& asset, Accessor& accessor, LLStrider<T>& dst)
+ inline void copy(Asset& asset, Accessor& accessor, LLStrider<T>& dst)
{
const BufferView& bufferView = asset.mBufferViews[accessor.mBufferView];
const Buffer& buffer = asset.mBuffers[bufferView.mBuffer];
@@ -391,12 +389,504 @@ namespace LL
// copy data from accessor to vector
template<class T>
- static void copy(Asset& asset, Accessor& accessor, std::vector<T>& dst)
+ inline void copy(Asset& asset, Accessor& accessor, std::vector<T>& dst)
{
dst.resize(accessor.mCount);
LLStrider<T> strider = dst.data();
copy(asset, accessor, strider);
}
+
+
+ //=========================================================================================================
+ // boost::json copying utilities
+ // ========================================================================================================
+
+ //====================== unspecialized base template, single value ===========================
+
+ // to/from Value
+ template<typename T>
+ inline bool copy(const Value& src, T& dst)
+ {
+ dst = src;
+ return true;
+ }
+
+ template<typename T>
+ inline bool write(const T& src, Value& dst)
+ {
+ dst = boost::json::object();
+ src.serialize(dst.as_object());
+ return true;
+ }
+
+ template<typename T>
+ inline bool copy(const Value& src, std::unordered_map<std::string, T>& dst)
+ {
+ if (src.is_object())
+ {
+ const boost::json::object& obj = src.as_object();
+ for (const auto& [key, value] : obj)
+ {
+ copy<T>(value, dst[key]);
+ }
+ return true;
+ }
+ return false;
+ }
+
+ template<typename T>
+ inline bool write(const std::unordered_map<std::string, T>& src, Value& dst)
+ {
+ boost::json::object obj;
+ for (const auto& [key, value] : src)
+ {
+ Value v;
+ if (write<T>(value, v))
+ {
+ obj[key] = v;
+ }
+ else
+ {
+ return false;
+ }
+ }
+ dst = obj;
+ return true;
+ }
+
+ // to/from array
+ template<typename T>
+ inline bool copy(const Value& src, std::vector<T>& dst)
+ {
+ if (src.is_array())
+ {
+ const boost::json::array& arr = src.get_array();
+ dst.resize(arr.size());
+ for (size_t i = 0; i < arr.size(); ++i)
+ {
+ copy(arr[i], dst[i]);
+ }
+ return true;
+ }
+
+ return false;
+ }
+
+ template<typename T>
+ inline bool write(const std::vector<T>& src, Value& dst)
+ {
+ boost::json::array arr;
+ for (const T& t : src)
+ {
+ Value v;
+ if (write(t, v))
+ {
+ arr.push_back(v);
+ }
+ else
+ {
+ return false;
+ }
+ }
+ dst = arr;
+ return true;
+ }
+
+ // to/from object member
+ template<typename T>
+ inline bool copy(const boost::json::object& src, string_view member, T& dst)
+ {
+ auto it = src.find(member);
+ if (it != src.end())
+ {
+ return copy(it->value(), dst);
+ }
+ return false;
+ }
+
+ // always write a member to an object without checking default
+ template<typename T>
+ inline bool write_always(const T& src, string_view member, boost::json::object& dst)
+ {
+ Value& v = dst[member];
+ if (!write(src, v))
+ {
+ dst.erase(member);
+ return false;
+ }
+ return true;
+ }
+
+ // conditionally write a member to an object if the member
+ // is not the default value
+ template<typename T>
+ inline bool write(const T& src, string_view member, boost::json::object& dst, const T& default_value = T())
+ {
+ if (src != default_value)
+ {
+ return write_always(src, member, dst);
+ }
+ return false;
+ }
+
+ template<typename T>
+ inline bool write(const std::unordered_map<std::string, T>& src, string_view member, boost::json::object& dst, const std::unordered_map<std::string, T>& default_value = std::unordered_map<std::string, T>())
+ {
+ if (!src.empty())
+ {
+ Value v;
+ if (write<T>(src, v))
+ {
+ dst[member] = v;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ template<typename T>
+ inline bool write(const std::vector<T>& src, string_view member, boost::json::object& dst, const std::vector<T>& deafault_value = std::vector<T>())
+ {
+ if (!src.empty())
+ {
+ Value v;
+ if (write(src, v))
+ {
+ dst[member] = v;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ template<typename T>
+ inline bool copy(const Value& src, string_view member, T& dst)
+ {
+ if (src.is_object())
+ {
+ const boost::json::object& obj = src.as_object();
+ return copy(obj, member, dst);
+ }
+
+ return false;
+ }
+
+ // vec4
+ template<>
+ inline bool copy(const Value& src, vec4& dst)
+ {
+ if (src.is_array())
+ {
+ const boost::json::array& arr = src.as_array();
+ if (arr.size() == 4)
+ {
+ if (arr[0].is_double() &&
+ arr[1].is_double() &&
+ arr[2].is_double() &&
+ arr[3].is_double())
+ {
+ dst = vec4(arr[0].get_double(), arr[1].get_double(), arr[2].get_double(), arr[3].get_double());
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ template<>
+ inline bool write(const vec4& src, Value& dst)
+ {
+ dst = boost::json::array();
+ boost::json::array& arr = dst.get_array();
+ arr.resize(4);
+ arr[0] = src.x;
+ arr[1] = src.y;
+ arr[2] = src.z;
+ arr[3] = src.w;
+ return true;
+ }
+
+ // quat
+ template<>
+ inline bool copy(const Value& src, quat& dst)
+ {
+ if (src.is_array())
+ {
+ const boost::json::array& arr = src.as_array();
+ if (arr.size() == 4)
+ {
+ if (arr[0].is_double() &&
+ arr[1].is_double() &&
+ arr[2].is_double() &&
+ arr[3].is_double())
+ {
+ dst.x = arr[0].get_double();
+ dst.y = arr[1].get_double();
+ dst.z = arr[2].get_double();
+ dst.w = arr[3].get_double();
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ template<>
+ inline bool write(const quat& src, Value& dst)
+ {
+ dst = boost::json::array();
+ boost::json::array& arr = dst.get_array();
+ arr.resize(4);
+ arr[0] = src.x;
+ arr[1] = src.y;
+ arr[2] = src.z;
+ arr[3] = src.w;
+ return true;
+ }
+
+
+ // vec3
+ template<>
+ inline bool copy(const Value& src, vec3& dst)
+ {
+ if (src.is_array())
+ {
+ const boost::json::array& arr = src.as_array();
+ if (arr.size() == 3)
+ {
+ if (arr[0].is_double() &&
+ arr[1].is_double() &&
+ arr[2].is_double())
+ {
+ dst = vec3(arr[0].get_double(), arr[1].get_double(), arr[2].get_double());
+ }
+ return true;
+ }
+ }
+ return false;
+ }
+
+ template<>
+ inline bool write(const vec3& src, Value& dst)
+ {
+ dst = boost::json::array();
+ boost::json::array& arr = dst.as_array();
+ arr.resize(3);
+ arr[0] = src.x;
+ arr[1] = src.y;
+ arr[2] = src.z;
+ return true;
+ }
+
+ // bool
+ template<>
+ inline bool copy(const Value& src, bool& dst)
+ {
+ if (src.is_bool())
+ {
+ dst = src.get_bool();
+ return true;
+ }
+ return false;
+ }
+
+ template<>
+ inline bool write(const bool& src, Value& dst)
+ {
+ dst = src;
+ return true;
+ }
+
+ // F32
+ template<>
+ inline bool copy(const Value& src, F32& dst)
+ {
+ if (src.is_double())
+ {
+ dst = src.get_double();
+ return true;
+ }
+ return false;
+ }
+
+ template<>
+ inline bool write(const F32& src, Value& dst)
+ {
+ dst = src;
+ return true;
+ }
+
+
+ // U32
+ template<>
+ inline bool copy(const Value& src, U32& dst)
+ {
+ if (src.is_int64())
+ {
+ dst = src.get_int64();
+ return true;
+ }
+ return false;
+ }
+
+ template<>
+ inline bool write(const U32& src, Value& dst)
+ {
+ dst = src;
+ return true;
+ }
+
+ // F64
+ template<>
+ inline bool copy(const Value& src, F64& dst)
+ {
+ if (src.is_double())
+ {
+ dst = src.get_double();
+ return true;
+ }
+ return false;
+ }
+
+ template<>
+ inline bool write(const F64& src, Value& dst)
+ {
+ dst = src;
+ return true;
+ }
+
+ // Accessor::Type
+ template<>
+ inline bool copy(const Value& src, Accessor::Type& dst)
+ {
+ if (src.is_string())
+ {
+ dst = gltf_type_to_enum(src.get_string().c_str());
+ return true;
+ }
+ return false;
+ }
+
+ template<>
+ inline bool write(const Accessor::Type& src, Value& dst)
+ {
+ dst = enum_to_gltf_type(src);
+ return true;
+ }
+
+ // S32
+ template<>
+ inline bool copy(const Value& src, S32& dst)
+ {
+ if (src.is_int64())
+ {
+ dst = src.get_int64();
+ return true;
+ }
+ return false;
+ }
+
+ template<>
+ inline bool write(const S32& src, Value& dst)
+ {
+ dst = src;
+ return true;
+ }
+
+
+ // std::string
+ template<>
+ inline bool copy(const Value& src, std::string& dst)
+ {
+ if (src.is_string())
+ {
+ dst = src.get_string().c_str();
+ return true;
+ }
+ return false;
+ }
+
+ template<>
+ inline bool write(const std::string& src, Value& dst)
+ {
+ dst = src;
+ return true;
+ }
+
+ // mat4
+ template<>
+ inline bool copy(const Value& src, mat4& dst)
+ {
+ if (src.is_array())
+ {
+ const boost::json::array& arr = src.get_array();
+ if (arr.size() == 16)
+ {
+ // populate a temporary local in case
+ // we hit an error in the middle of the array
+ // (don't partially write a matrix)
+ mat4 t;
+ F32* p = glm::value_ptr(t);
+
+ for (U32 i = 0; i < arr.size(); ++i)
+ {
+ if (arr[i].is_double())
+ {
+ p[i] = arr[i].get_double();
+ }
+ else
+ {
+ return false;
+ }
+ }
+
+ dst = t;
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ template<>
+ inline bool write(const mat4& src, Value& dst)
+ {
+ dst = boost::json::array();
+ boost::json::array& arr = dst.get_array();
+ arr.resize(16);
+ const F32* p = glm::value_ptr(src);
+ for (U32 i = 0; i < 16; ++i)
+ {
+ arr[i] = p[i];
+ }
+ return true;
+ }
+
+ // Material::AlphaMode
+ template<>
+ inline bool copy(const Value& src, Material::AlphaMode& dst)
+ {
+ if (src.is_string())
+ {
+ dst = gltf_alpha_mode_to_enum(src.get_string().c_str());
+ return true;
+ }
+ return true;
+ }
+
+ template<>
+ inline bool write(const Material::AlphaMode& src, Value& dst)
+ {
+ dst = enum_to_gltf_alpha_mode(src);
+ return true;
+ }
+
+ //
+ // ========================================================================================================
+
}
}
+
+
+