summaryrefslogtreecommitdiff
path: root/indra/llimage
diff options
context:
space:
mode:
authorAndrey Lihatskiy <alihatskiy@productengine.com>2024-05-15 12:47:27 +0300
committerAndrey Lihatskiy <alihatskiy@productengine.com>2024-05-15 12:47:27 +0300
commitb06a99f7c76950484972e25d9dbbee8660a6a6c3 (patch)
tree58fb3b7d1c95cbd2eb938a9d0f252cdb3ab3b6a8 /indra/llimage
parentbf1235b017b254ba989b156c73c4ce18ba4e6c23 (diff)
Post-merge spaces fix
Diffstat (limited to 'indra/llimage')
-rw-r--r--indra/llimage/llimage.cpp3408
-rw-r--r--indra/llimage/llimage.h408
2 files changed, 1908 insertions, 1908 deletions
diff --git a/indra/llimage/llimage.cpp b/indra/llimage/llimage.cpp
index ed8f36286f..864315d0ff 100644
--- a/indra/llimage/llimage.cpp
+++ b/indra/llimage/llimage.cpp
@@ -1,25 +1,25 @@
-/**
+/**
* @file llimage.cpp
* @brief Base class for images.
*
* $LicenseInfo:firstyear=2001&license=viewerlgpl$
* Second Life Viewer Source Code
* Copyright (C) 2010, Linden Research, Inc.
- *
+ *
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation;
* version 2.1 of the License only.
- *
+ *
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
- *
+ *
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
+ *
* Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
* $/LicenseInfo$
*/
@@ -54,40 +54,40 @@
#define _UNROL_GEN_TPL_comma_1 BOOST_PP_COMMA()
//..................................................................................
#define _UNROL_GEN_TPL_ARGS_macro(z,n,seq) \
- BOOST_PP_CAT(_UNROL_GEN_TPL_arg_, BOOST_PP_MOD(n, 2))(BOOST_PP_SEQ_ELEM(n, seq)) BOOST_PP_CAT(_UNROL_GEN_TPL_comma_, BOOST_PP_AND(BOOST_PP_MOD(n, 2), BOOST_PP_NOT_EQUAL(BOOST_PP_INC(n), BOOST_PP_SEQ_SIZE(seq))))
+ BOOST_PP_CAT(_UNROL_GEN_TPL_arg_, BOOST_PP_MOD(n, 2))(BOOST_PP_SEQ_ELEM(n, seq)) BOOST_PP_CAT(_UNROL_GEN_TPL_comma_, BOOST_PP_AND(BOOST_PP_MOD(n, 2), BOOST_PP_NOT_EQUAL(BOOST_PP_INC(n), BOOST_PP_SEQ_SIZE(seq))))
#define _UNROL_GEN_TPL_ARGS(seq) \
- BOOST_PP_REPEAT(BOOST_PP_SEQ_SIZE(seq), _UNROL_GEN_TPL_ARGS_macro, seq)
+ BOOST_PP_REPEAT(BOOST_PP_SEQ_SIZE(seq), _UNROL_GEN_TPL_ARGS_macro, seq)
//..................................................................................
#define _UNROL_GEN_TPL_TYPE_ARGS_macro(z,n,seq) \
- BOOST_PP_SEQ_ELEM(n, seq) BOOST_PP_CAT(_UNROL_GEN_TPL_comma_, BOOST_PP_AND(BOOST_PP_MOD(n, 2), BOOST_PP_NOT_EQUAL(BOOST_PP_INC(n), BOOST_PP_SEQ_SIZE(seq))))
+ BOOST_PP_SEQ_ELEM(n, seq) BOOST_PP_CAT(_UNROL_GEN_TPL_comma_, BOOST_PP_AND(BOOST_PP_MOD(n, 2), BOOST_PP_NOT_EQUAL(BOOST_PP_INC(n), BOOST_PP_SEQ_SIZE(seq))))
#define _UNROL_GEN_TPL_TYPE_ARGS(seq) \
- BOOST_PP_REPEAT(BOOST_PP_SEQ_SIZE(seq), _UNROL_GEN_TPL_TYPE_ARGS_macro, seq)
+ BOOST_PP_REPEAT(BOOST_PP_SEQ_SIZE(seq), _UNROL_GEN_TPL_TYPE_ARGS_macro, seq)
//..................................................................................
#define _UNROLL_GEN_TPL_foreach_ee(z, n, seq) \
- executor<n>(_UNROL_GEN_TPL_ARGS(seq));
+ executor<n>(_UNROL_GEN_TPL_ARGS(seq));
#define _UNROLL_GEN_TPL(name, args_seq, operation, spec) \
- template<> struct name<spec> { \
- private: \
- template<S32 _idx> inline void executor(_UNROL_GEN_TPL_TYPE_ARGS(args_seq)) { \
- BOOST_PP_SEQ_ENUM(operation) ; \
- } \
- public: \
- inline void operator()(_UNROL_GEN_TPL_TYPE_ARGS(args_seq)) { \
- BOOST_PP_REPEAT(spec, _UNROLL_GEN_TPL_foreach_ee, args_seq) \
- } \
+ template<> struct name<spec> { \
+ private: \
+ template<S32 _idx> inline void executor(_UNROL_GEN_TPL_TYPE_ARGS(args_seq)) { \
+ BOOST_PP_SEQ_ENUM(operation) ; \
+ } \
+ public: \
+ inline void operator()(_UNROL_GEN_TPL_TYPE_ARGS(args_seq)) { \
+ BOOST_PP_REPEAT(spec, _UNROLL_GEN_TPL_foreach_ee, args_seq) \
+ } \
};
//..................................................................................
#define _UNROLL_GEN_TPL_foreach_seq_macro(r, data, elem) \
- _UNROLL_GEN_TPL(BOOST_PP_SEQ_ELEM(0, data), BOOST_PP_SEQ_ELEM(1, data), BOOST_PP_SEQ_ELEM(2, data), elem)
+ _UNROLL_GEN_TPL(BOOST_PP_SEQ_ELEM(0, data), BOOST_PP_SEQ_ELEM(1, data), BOOST_PP_SEQ_ELEM(2, data), elem)
#define UNROLL_GEN_TPL(name, args_seq, operation, spec_seq) \
- /*general specialization - should not be implemented!*/ \
- template<U8> struct name { inline void operator()(_UNROL_GEN_TPL_TYPE_ARGS(args_seq)) { /*static_assert(!"Should not be instantiated.");*/ } }; \
- BOOST_PP_SEQ_FOR_EACH(_UNROLL_GEN_TPL_foreach_seq_macro, (name)(args_seq)(operation), spec_seq)
+ /*general specialization - should not be implemented!*/ \
+ template<U8> struct name { inline void operator()(_UNROL_GEN_TPL_TYPE_ARGS(args_seq)) { /*static_assert(!"Should not be instantiated.");*/ } }; \
+ BOOST_PP_SEQ_FOR_EACH(_UNROLL_GEN_TPL_foreach_seq_macro, (name)(args_seq)(operation), spec_seq)
//..................................................................................
//..................................................................................
@@ -123,460 +123,460 @@ UNROLL_GEN_TPL(uroll_uref_dptr_inc_asgn_comp_rshft_cval_and_ff, (U8 *&)(dptr)(S3
template<U8 ch>
-struct scale_info
+struct scale_info
{
public:
- std::vector<S32> xpoints;
- std::vector<const U8*> ystrides;
- std::vector<S32> xapoints, yapoints;
- S32 xup_yup;
+ std::vector<S32> xpoints;
+ std::vector<const U8*> ystrides;
+ std::vector<S32> xapoints, yapoints;
+ S32 xup_yup;
public:
- //unrolling loop types declaration
- typedef uroll_zeroze_cx_comp<ch> uroll_zeroze_cx_comp_t;
- typedef uroll_comp_rshftasgn_constval<ch> uroll_comp_rshftasgn_constval_t;
- typedef uroll_comp_asgn_cx_rshft_cval_all_mul_val<ch> uroll_comp_asgn_cx_rshft_cval_all_mul_val_t;
- typedef uroll_comp_plusasgn_cx_rshft_cval_all_mul_val<ch> uroll_comp_plusasgn_cx_rshft_cval_all_mul_val_t;
- typedef uroll_inp_plusasgn_pix_mul_val<ch> uroll_inp_plusasgn_pix_mul_val_t;
- typedef uroll_inp_asgn_pix_mul_val<ch> uroll_inp_asgn_pix_mul_val_t;
- typedef uroll_comp_asgn_cx_mul_apoint_plus_comp_mul_inv_apoint_allshifted_16_r<ch> uroll_comp_asgn_cx_mul_apoint_plus_comp_mul_inv_apoint_allshifted_16_r_t;
- typedef uroll_comp_asgn_comp_plus_pix_mul_apoint_allshifted_8_r<ch> uroll_comp_asgn_comp_plus_pix_mul_apoint_allshifted_8_r_t;
- typedef uroll_comp_asgn_comp_mul_inv_apoint_plus_cx_mul_apoint_allshifted_12_r<ch> uroll_comp_asgn_comp_mul_inv_apoint_plus_cx_mul_apoint_allshifted_12_r_t;
- typedef uroll_uref_dptr_inc_asgn_comp_and_ff<ch> uroll_uref_dptr_inc_asgn_comp_and_ff_t;
- typedef uroll_uref_dptr_inc_asgn_sptr_apoint_plus_idx_alland_ff<ch> uroll_uref_dptr_inc_asgn_sptr_apoint_plus_idx_alland_ff_t;
- typedef uroll_uref_dptr_inc_asgn_comp_rshft_cval_and_ff<ch> uroll_uref_dptr_inc_asgn_comp_rshft_cval_and_ff_t;
+ //unrolling loop types declaration
+ typedef uroll_zeroze_cx_comp<ch> uroll_zeroze_cx_comp_t;
+ typedef uroll_comp_rshftasgn_constval<ch> uroll_comp_rshftasgn_constval_t;
+ typedef uroll_comp_asgn_cx_rshft_cval_all_mul_val<ch> uroll_comp_asgn_cx_rshft_cval_all_mul_val_t;
+ typedef uroll_comp_plusasgn_cx_rshft_cval_all_mul_val<ch> uroll_comp_plusasgn_cx_rshft_cval_all_mul_val_t;
+ typedef uroll_inp_plusasgn_pix_mul_val<ch> uroll_inp_plusasgn_pix_mul_val_t;
+ typedef uroll_inp_asgn_pix_mul_val<ch> uroll_inp_asgn_pix_mul_val_t;
+ typedef uroll_comp_asgn_cx_mul_apoint_plus_comp_mul_inv_apoint_allshifted_16_r<ch> uroll_comp_asgn_cx_mul_apoint_plus_comp_mul_inv_apoint_allshifted_16_r_t;
+ typedef uroll_comp_asgn_comp_plus_pix_mul_apoint_allshifted_8_r<ch> uroll_comp_asgn_comp_plus_pix_mul_apoint_allshifted_8_r_t;
+ typedef uroll_comp_asgn_comp_mul_inv_apoint_plus_cx_mul_apoint_allshifted_12_r<ch> uroll_comp_asgn_comp_mul_inv_apoint_plus_cx_mul_apoint_allshifted_12_r_t;
+ typedef uroll_uref_dptr_inc_asgn_comp_and_ff<ch> uroll_uref_dptr_inc_asgn_comp_and_ff_t;
+ typedef uroll_uref_dptr_inc_asgn_sptr_apoint_plus_idx_alland_ff<ch> uroll_uref_dptr_inc_asgn_sptr_apoint_plus_idx_alland_ff_t;
+ typedef uroll_uref_dptr_inc_asgn_comp_rshft_cval_and_ff<ch> uroll_uref_dptr_inc_asgn_comp_rshft_cval_and_ff_t;
public:
- scale_info(const U8 *src, U32 srcW, U32 srcH, U32 dstW, U32 dstH, U32 srcStride)
- : xup_yup((dstW >= srcW) + ((dstH >= srcH) << 1))
- {
- calc_x_points(srcW, dstW);
- calc_y_strides(src, srcStride, srcH, dstH);
- calc_aa_points(srcW, dstW, xup_yup&1, xapoints);
- calc_aa_points(srcH, dstH, xup_yup&2, yapoints);
- }
+ scale_info(const U8 *src, U32 srcW, U32 srcH, U32 dstW, U32 dstH, U32 srcStride)
+ : xup_yup((dstW >= srcW) + ((dstH >= srcH) << 1))
+ {
+ calc_x_points(srcW, dstW);
+ calc_y_strides(src, srcStride, srcH, dstH);
+ calc_aa_points(srcW, dstW, xup_yup&1, xapoints);
+ calc_aa_points(srcH, dstH, xup_yup&2, yapoints);
+ }
private:
- //...........................................................................................
- void calc_x_points(U32 srcW, U32 dstW)
- {
- xpoints.resize(dstW+1);
-
- S32 val = dstW >= srcW ? 0x8000 * srcW / dstW - 0x8000 : 0;
- S32 inc = (srcW << 16) / dstW;
-
- for(U32 i = 0, j = 0; i < dstW; ++i, ++j, val += inc)
- {
- xpoints[j] = llmax(0, val >> 16);
- }
- }
- //...........................................................................................
- void calc_y_strides(const U8 *src, U32 srcStride, U32 srcH, U32 dstH)
- {
- ystrides.resize(dstH+1);
-
- S32 val = dstH >= srcH ? 0x8000 * srcH / dstH - 0x8000 : 0;
- S32 inc = (srcH << 16) / dstH;
-
- for(U32 i = 0, j = 0; i < dstH; ++i, ++j, val += inc)
- {
- ystrides[j] = src + llmax(0, val >> 16) * srcStride;
- }
- }
- //...........................................................................................
- void calc_aa_points(U32 srcSz, U32 dstSz, bool scale_up, std::vector<S32> &vp)
- {
- vp.resize(dstSz);
-
- if(scale_up)
- {
- S32 val = 0x8000 * srcSz / dstSz - 0x8000;
- S32 inc = (srcSz << 16) / dstSz;
- U32 pos;
-
- for(U32 i = 0, j = 0; i < dstSz; ++i, ++j, val += inc)
- {
- pos = val >> 16;
-
- if (pos >= (srcSz - 1))
- vp[j] = 0;
- else
- vp[j] = (val >> 8) - ((val >> 8) & 0xffffff00);
- }
- }
- else
- {
- S32 inc = (srcSz << 16) / dstSz;
- S32 Cp = ((dstSz << 14) / srcSz) + 1;
- S32 ap;
-
- for(U32 i = 0, j = 0, val = 0; i < dstSz; ++i, ++j, val += inc)
- {
- ap = ((0x100 - ((val >> 8) & 0xff)) * Cp) >> 8;
- vp[j] = ap | (Cp << 16);
- }
- }
- }
+ //...........................................................................................
+ void calc_x_points(U32 srcW, U32 dstW)
+ {
+ xpoints.resize(dstW+1);
+
+ S32 val = dstW >= srcW ? 0x8000 * srcW / dstW - 0x8000 : 0;
+ S32 inc = (srcW << 16) / dstW;
+
+ for(U32 i = 0, j = 0; i < dstW; ++i, ++j, val += inc)
+ {
+ xpoints[j] = llmax(0, val >> 16);
+ }
+ }
+ //...........................................................................................
+ void calc_y_strides(const U8 *src, U32 srcStride, U32 srcH, U32 dstH)
+ {
+ ystrides.resize(dstH+1);
+
+ S32 val = dstH >= srcH ? 0x8000 * srcH / dstH - 0x8000 : 0;
+ S32 inc = (srcH << 16) / dstH;
+
+ for(U32 i = 0, j = 0; i < dstH; ++i, ++j, val += inc)
+ {
+ ystrides[j] = src + llmax(0, val >> 16) * srcStride;
+ }
+ }
+ //...........................................................................................
+ void calc_aa_points(U32 srcSz, U32 dstSz, bool scale_up, std::vector<S32> &vp)
+ {
+ vp.resize(dstSz);
+
+ if(scale_up)
+ {
+ S32 val = 0x8000 * srcSz / dstSz - 0x8000;
+ S32 inc = (srcSz << 16) / dstSz;
+ U32 pos;
+
+ for(U32 i = 0, j = 0; i < dstSz; ++i, ++j, val += inc)
+ {
+ pos = val >> 16;
+
+ if (pos >= (srcSz - 1))
+ vp[j] = 0;
+ else
+ vp[j] = (val >> 8) - ((val >> 8) & 0xffffff00);
+ }
+ }
+ else
+ {
+ S32 inc = (srcSz << 16) / dstSz;
+ S32 Cp = ((dstSz << 14) / srcSz) + 1;
+ S32 ap;
+
+ for(U32 i = 0, j = 0, val = 0; i < dstSz; ++i, ++j, val += inc)
+ {
+ ap = ((0x100 - ((val >> 8) & 0xff)) * Cp) >> 8;
+ vp[j] = ap | (Cp << 16);
+ }
+ }
+ }
};
template<U8 ch>
inline void bilinear_scale(
- const U8 *src, U32 srcW, U32 srcH, U32 srcStride
- , U8 *dst, U32 dstW, U32 dstH, U32 dstStride
- )
-{
- typedef scale_info<ch> scale_info_t;
-
- scale_info_t info(src, srcW, srcH, dstW, dstH, srcStride);
-
- const U8 *sptr;
- U8 *dptr;
- U32 x, y;
- const U8 *pix;
-
- S32 cx[ch], comp[ch];
-
-
- if(3 == info.xup_yup)
- { //scale x/y - up
- for(y = 0; y < dstH; ++y)
- {
- dptr = dst + (y * dstStride);
- sptr = info.ystrides[y];
-
- if(0 < info.yapoints[y])
- {
- for(x = 0; x < dstW; ++x)
- {
- //for(c = 0; c < ch; ++c) cx[c] = comp[c] = 0;
- typename scale_info_t::uroll_zeroze_cx_comp_t()(cx, comp);
-
- if(0 < info.xapoints[x])
- {
- pix = info.ystrides[y] + info.xpoints[x] * ch;
-
- //for(c = 0; c < ch; ++c) comp[c] = pix[c] * (256 - info.xapoints[x]);
- typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(comp, pix, 256 - info.xapoints[x]);
-
- pix += ch;
-
- //for(c = 0; c < ch; ++c) comp[c] += pix[c] * info.xapoints[x];
- typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(comp, pix, info.xapoints[x]);
-
- pix += srcStride;
-
- //for(c = 0; c < ch; ++c) cx[c] = pix[c] * info.xapoints[x];
- typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(cx, pix, info.xapoints[x]);
-
- pix -= ch;
-
- //for(c = 0; c < ch; ++c) {
- // cx[c] += pix[c] * (256 - info.xapoints[x]);
- // comp[c] = ((cx[c] * info.yapoints[y]) + (comp[c] * (256 - info.yapoints[y]))) >> 16;
- // *dptr++ = comp[c]&0xff;
- //}
- typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, 256 - info.xapoints[x]);
- typename scale_info_t::uroll_comp_asgn_cx_mul_apoint_plus_comp_mul_inv_apoint_allshifted_16_r_t()(comp, cx, info.yapoints[y]);
- typename scale_info_t::uroll_uref_dptr_inc_asgn_comp_and_ff_t()(dptr, comp);
- }
- else
- {
- pix = info.ystrides[y] + info.xpoints[x] * ch;
-
- //for(c = 0; c < ch; ++c) comp[c] = pix[c] * (256 - info.yapoints[y]);
- typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(comp, pix, 256-info.yapoints[y]);
-
- pix += srcStride;
-
- //for(c = 0; c < ch; ++c) {
- // comp[c] = (comp[c] + pix[c] * info.yapoints[y]) >> 8;
- // *dptr++ = comp[c]&0xff;
- //}
- typename scale_info_t::uroll_comp_asgn_comp_plus_pix_mul_apoint_allshifted_8_r_t()(comp, pix, info.yapoints[y]);
- typename scale_info_t::uroll_uref_dptr_inc_asgn_comp_and_ff_t()(dptr, comp);
- }
- }
- }
- else
- {
- for(x = 0; x < dstW; ++x)
- {
- if(0 < info.xapoints[x])
- {
- pix = info.ystrides[y] + info.xpoints[x] * ch;
-
- //for(c = 0; c < ch; ++c) {
- // comp[c] = pix[c] * (256 - info.xapoints[x]);
- // comp[c] = (comp[c] + pix[c] * info.xapoints[x]) >> 8;
- // *dptr++ = comp[c]&0xff;
- //}
- typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(comp, pix, 256 - info.xapoints[x]);
- typename scale_info_t::uroll_comp_asgn_comp_plus_pix_mul_apoint_allshifted_8_r_t()(comp, pix, info.xapoints[x]);
- typename scale_info_t::uroll_uref_dptr_inc_asgn_comp_and_ff_t()(dptr, comp);
- }
- else
- {
- //for(c = 0; c < ch; ++c) *dptr++ = (sptr[info.xpoints[x]*ch + c])&0xff;
- typename scale_info_t::uroll_uref_dptr_inc_asgn_sptr_apoint_plus_idx_alland_ff_t()(dptr, sptr, info.xpoints[x]*ch);
- }
- }
- }
- }
- }
- else if(info.xup_yup == 1)
- { //scaling down vertically
- S32 Cy, j;
- S32 yap;
-
- for(y = 0; y < dstH; y++)
- {
- Cy = info.yapoints[y] >> 16;
- yap = info.yapoints[y] & 0xffff;
-
- dptr = dst + (y * dstStride);
-
- for(x = 0; x < dstW; x++)
- {
- pix = info.ystrides[y] + info.xpoints[x] * ch;
-
- //for(c = 0; c < ch; ++c) comp[c] = pix[c] * yap;
- typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(comp, pix, yap);
-
- pix += srcStride;
-
- for(j = (1 << 14) - yap; j > Cy; j -= Cy, pix += srcStride)
- {
- //for(c = 0; c < ch; ++c) comp[c] += pix[c] * Cy;
- typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(comp, pix, Cy);
- }
-
- if(j > 0)
- {
- //for(c = 0; c < ch; ++c) comp[c] += pix[c] * j;
- typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(comp, pix, j);
- }
-
- if(info.xapoints[x] > 0)
- {
- pix = info.ystrides[y] + info.xpoints[x]*ch + ch;
- //for(c = 0; c < ch; ++c) cx[c] = pix[c] * yap;
- typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(cx, pix, yap);
-
- pix += srcStride;
- for(j = (1 << 14) - yap; j > Cy; j -= Cy)
- {
- //for(c = 0; c < ch; ++c) cx[c] += pix[c] * Cy;
- typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, Cy);
- pix += srcStride;
- }
-
- if(j > 0)
- {
- //for(c = 0; c < ch; ++c) cx[c] += pix[c] * j;
- typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, j);
- }
-
- //for(c = 0; c < ch; ++c) comp[c] = ((comp[c]*(256 - info.xapoints[x])) + ((cx[c] * info.xapoints[x]))) >> 12;
- typename scale_info_t::uroll_comp_asgn_comp_mul_inv_apoint_plus_cx_mul_apoint_allshifted_12_r_t()(comp, info.xapoints[x], cx);
- }
- else
- {
- //for(c = 0; c < ch; ++c) comp[c] >>= 4;
- typename scale_info_t::uroll_comp_rshftasgn_constval_t()(comp, 4);
- }
-
- //for(c = 0; c < ch; ++c) *dptr++ = (comp[c]>>10)&0xff;
- typename scale_info_t::uroll_uref_dptr_inc_asgn_comp_rshft_cval_and_ff_t()(dptr, comp, 10);
- }
- }
- }
- else if(info.xup_yup == 2)
- { // scaling down horizontally
- S32 Cx, j;
- S32 xap;
-
- for(y = 0; y < dstH; y++)
- {
- dptr = dst + (y * dstStride);
-
- for(x = 0; x < dstW; x++)
- {
- Cx = info.xapoints[x] >> 16;
- xap = info.xapoints[x] & 0xffff;
-
- pix = info.ystrides[y] + info.xpoints[x] * ch;
-
- //for(c = 0; c < ch; ++c) comp[c] = pix[c] * xap;
- typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(comp, pix, xap);
-
- pix+=ch;
- for(j = (1 << 14) - xap; j > Cx; j -= Cx)
- {
- //for(c = 0; c < ch; ++c) comp[c] += pix[c] * Cx;
- typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(comp, pix, Cx);
- pix+=ch;
- }
-
- if(j > 0)
- {
- //for(c = 0; c < ch; ++c) comp[c] += pix[c] * j;
- typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(comp, pix, j);
- }
-
- if(info.yapoints[y] > 0)
- {
- pix = info.ystrides[y] + info.xpoints[x]*ch + srcStride;
- //for(c = 0; c < ch; ++c) cx[c] = pix[c] * xap;
- typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(cx, pix, xap);
-
- pix+=ch;
- for(j = (1 << 14) - xap; j > Cx; j -= Cx)
- {
- //for(c = 0; c < ch; ++c) cx[c] += pix[c] * Cx;
- typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, Cx);
- pix+=ch;
- }
-
- if(j > 0)
- {
- //for(c = 0; c < ch; ++c) cx[c] += pix[c] * j;
- typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, j);
- }
-
- //for(c = 0; c < ch; ++c) comp[c] = ((comp[c] * (256 - info.yapoints[y])) + ((cx[c] * info.yapoints[y]))) >> 12;
- typename scale_info_t::uroll_comp_asgn_comp_mul_inv_apoint_plus_cx_mul_apoint_allshifted_12_r_t()(comp, info.yapoints[y], cx);
- }
- else
- {
- //for(c = 0; c < ch; ++c) comp[c] >>= 4;
- typename scale_info_t::uroll_comp_rshftasgn_constval_t()(comp, 4);
- }
-
- //for(c = 0; c < ch; ++c) *dptr++ = (comp[c]>>10)&0xff;
- typename scale_info_t::uroll_uref_dptr_inc_asgn_comp_rshft_cval_and_ff_t()(dptr, comp, 10);
- }
- }
- }
- else
- { //scale x/y - down
- S32 Cx, Cy, i, j;
- S32 xap, yap;
-
- for(y = 0; y < dstH; y++)
- {
- Cy = info.yapoints[y] >> 16;
- yap = info.yapoints[y] & 0xffff;
-
- dptr = dst + (y * dstStride);
- for(x = 0; x < dstW; x++)
- {
- Cx = info.xapoints[x] >> 16;
- xap = info.xapoints[x] & 0xffff;
-
- sptr = info.ystrides[y] + info.xpoints[x] * ch;
- pix = sptr;
- sptr += srcStride;
-
- //for(c = 0; c < ch; ++c) cx[c] = pix[c] * xap;
- typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(cx, pix, xap);
-
- pix+=ch;
- for(i = (1 << 14) - xap; i > Cx; i -= Cx)
- {
- //for(c = 0; c < ch; ++c) cx[c] += pix[c] * Cx;
- typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, Cx);
- pix+=ch;
- }
-
- if(i > 0)
- {
- //for(c = 0; c < ch; ++c) cx[c] += pix[c] * i;
- typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, i);
- }
-
- //for(c = 0; c < ch; ++c) comp[c] = (cx[c] >> 5) * yap;
- typename scale_info_t::uroll_comp_asgn_cx_rshft_cval_all_mul_val_t()(comp, cx, 5, yap);
-
- for(j = (1 << 14) - yap; j > Cy; j -= Cy)
- {
- pix = sptr;
- sptr += srcStride;
-
- //for(c = 0; c < ch; ++c) cx[c] = pix[c] * xap;
- typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(cx, pix, xap);
-
- pix+=ch;
- for(i = (1 << 14) - xap; i > Cx; i -= Cx)
- {
- //for(c = 0; c < ch; ++c) cx[c] += pix[c] * Cx;
- typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, Cx);
- pix+=ch;
- }
-
- if(i > 0)
- {
- //for(c = 0; c < ch; ++c) cx[c] += pix[c] * i;
- typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, i);
- }
-
- //for(c = 0; c < ch; ++c) comp[c] += (cx[c] >> 5) * Cy;
- typename scale_info_t::uroll_comp_plusasgn_cx_rshft_cval_all_mul_val_t()(comp, cx, 5, Cy);
- }
-
- if(j > 0)
- {
- pix = sptr;
- sptr += srcStride;
-
- //for(c = 0; c < ch; ++c) cx[c] = pix[c] * xap;
- typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(cx, pix, xap);
-
- pix+=ch;
- for(i = (1 << 14) - xap; i > Cx; i -= Cx)
- {
- //for(c = 0; c < ch; ++c) cx[c] += pix[c] * Cx;
- typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, Cx);
- pix+=ch;
- }
-
- if(i > 0)
- {
- //for(c = 0; c < ch; ++c) cx[c] += pix[c] * i;
- typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, i);
- }
-
- //for(c = 0; c < ch; ++c) comp[c] += (cx[c] >> 5) * j;
- typename scale_info_t::uroll_comp_plusasgn_cx_rshft_cval_all_mul_val_t()(comp, cx, 5, j);
- }
-
- //for(c = 0; c < ch; ++c) *dptr++ = (comp[c]>>23)&0xff;
- typename scale_info_t::uroll_uref_dptr_inc_asgn_comp_rshft_cval_and_ff_t()(dptr, comp, 23);
- }
- }
- } //else
+ const U8 *src, U32 srcW, U32 srcH, U32 srcStride
+ , U8 *dst, U32 dstW, U32 dstH, U32 dstStride
+ )
+{
+ typedef scale_info<ch> scale_info_t;
+
+ scale_info_t info(src, srcW, srcH, dstW, dstH, srcStride);
+
+ const U8 *sptr;
+ U8 *dptr;
+ U32 x, y;
+ const U8 *pix;
+
+ S32 cx[ch], comp[ch];
+
+
+ if(3 == info.xup_yup)
+ { //scale x/y - up
+ for(y = 0; y < dstH; ++y)
+ {
+ dptr = dst + (y * dstStride);
+ sptr = info.ystrides[y];
+
+ if(0 < info.yapoints[y])
+ {
+ for(x = 0; x < dstW; ++x)
+ {
+ //for(c = 0; c < ch; ++c) cx[c] = comp[c] = 0;
+ typename scale_info_t::uroll_zeroze_cx_comp_t()(cx, comp);
+
+ if(0 < info.xapoints[x])
+ {
+ pix = info.ystrides[y] + info.xpoints[x] * ch;
+
+ //for(c = 0; c < ch; ++c) comp[c] = pix[c] * (256 - info.xapoints[x]);
+ typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(comp, pix, 256 - info.xapoints[x]);
+
+ pix += ch;
+
+ //for(c = 0; c < ch; ++c) comp[c] += pix[c] * info.xapoints[x];
+ typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(comp, pix, info.xapoints[x]);
+
+ pix += srcStride;
+
+ //for(c = 0; c < ch; ++c) cx[c] = pix[c] * info.xapoints[x];
+ typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(cx, pix, info.xapoints[x]);
+
+ pix -= ch;
+
+ //for(c = 0; c < ch; ++c) {
+ // cx[c] += pix[c] * (256 - info.xapoints[x]);
+ // comp[c] = ((cx[c] * info.yapoints[y]) + (comp[c] * (256 - info.yapoints[y]))) >> 16;
+ // *dptr++ = comp[c]&0xff;
+ //}
+ typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, 256 - info.xapoints[x]);
+ typename scale_info_t::uroll_comp_asgn_cx_mul_apoint_plus_comp_mul_inv_apoint_allshifted_16_r_t()(comp, cx, info.yapoints[y]);
+ typename scale_info_t::uroll_uref_dptr_inc_asgn_comp_and_ff_t()(dptr, comp);
+ }
+ else
+ {
+ pix = info.ystrides[y] + info.xpoints[x] * ch;
+
+ //for(c = 0; c < ch; ++c) comp[c] = pix[c] * (256 - info.yapoints[y]);
+ typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(comp, pix, 256-info.yapoints[y]);
+
+ pix += srcStride;
+
+ //for(c = 0; c < ch; ++c) {
+ // comp[c] = (comp[c] + pix[c] * info.yapoints[y]) >> 8;
+ // *dptr++ = comp[c]&0xff;
+ //}
+ typename scale_info_t::uroll_comp_asgn_comp_plus_pix_mul_apoint_allshifted_8_r_t()(comp, pix, info.yapoints[y]);
+ typename scale_info_t::uroll_uref_dptr_inc_asgn_comp_and_ff_t()(dptr, comp);
+ }
+ }
+ }
+ else
+ {
+ for(x = 0; x < dstW; ++x)
+ {
+ if(0 < info.xapoints[x])
+ {
+ pix = info.ystrides[y] + info.xpoints[x] * ch;
+
+ //for(c = 0; c < ch; ++c) {
+ // comp[c] = pix[c] * (256 - info.xapoints[x]);
+ // comp[c] = (comp[c] + pix[c] * info.xapoints[x]) >> 8;
+ // *dptr++ = comp[c]&0xff;
+ //}
+ typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(comp, pix, 256 - info.xapoints[x]);
+ typename scale_info_t::uroll_comp_asgn_comp_plus_pix_mul_apoint_allshifted_8_r_t()(comp, pix, info.xapoints[x]);
+ typename scale_info_t::uroll_uref_dptr_inc_asgn_comp_and_ff_t()(dptr, comp);
+ }
+ else
+ {
+ //for(c = 0; c < ch; ++c) *dptr++ = (sptr[info.xpoints[x]*ch + c])&0xff;
+ typename scale_info_t::uroll_uref_dptr_inc_asgn_sptr_apoint_plus_idx_alland_ff_t()(dptr, sptr, info.xpoints[x]*ch);
+ }
+ }
+ }
+ }
+ }
+ else if(info.xup_yup == 1)
+ { //scaling down vertically
+ S32 Cy, j;
+ S32 yap;
+
+ for(y = 0; y < dstH; y++)
+ {
+ Cy = info.yapoints[y] >> 16;
+ yap = info.yapoints[y] & 0xffff;
+
+ dptr = dst + (y * dstStride);
+
+ for(x = 0; x < dstW; x++)
+ {
+ pix = info.ystrides[y] + info.xpoints[x] * ch;
+
+ //for(c = 0; c < ch; ++c) comp[c] = pix[c] * yap;
+ typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(comp, pix, yap);
+
+ pix += srcStride;
+
+ for(j = (1 << 14) - yap; j > Cy; j -= Cy, pix += srcStride)
+ {
+ //for(c = 0; c < ch; ++c) comp[c] += pix[c] * Cy;
+ typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(comp, pix, Cy);
+ }
+
+ if(j > 0)
+ {
+ //for(c = 0; c < ch; ++c) comp[c] += pix[c] * j;
+ typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(comp, pix, j);
+ }
+
+ if(info.xapoints[x] > 0)
+ {
+ pix = info.ystrides[y] + info.xpoints[x]*ch + ch;
+ //for(c = 0; c < ch; ++c) cx[c] = pix[c] * yap;
+ typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(cx, pix, yap);
+
+ pix += srcStride;
+ for(j = (1 << 14) - yap; j > Cy; j -= Cy)
+ {
+ //for(c = 0; c < ch; ++c) cx[c] += pix[c] * Cy;
+ typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, Cy);
+ pix += srcStride;
+ }
+
+ if(j > 0)
+ {
+ //for(c = 0; c < ch; ++c) cx[c] += pix[c] * j;
+ typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, j);
+ }
+
+ //for(c = 0; c < ch; ++c) comp[c] = ((comp[c]*(256 - info.xapoints[x])) + ((cx[c] * info.xapoints[x]))) >> 12;
+ typename scale_info_t::uroll_comp_asgn_comp_mul_inv_apoint_plus_cx_mul_apoint_allshifted_12_r_t()(comp, info.xapoints[x], cx);
+ }
+ else
+ {
+ //for(c = 0; c < ch; ++c) comp[c] >>= 4;
+ typename scale_info_t::uroll_comp_rshftasgn_constval_t()(comp, 4);
+ }
+
+ //for(c = 0; c < ch; ++c) *dptr++ = (comp[c]>>10)&0xff;
+ typename scale_info_t::uroll_uref_dptr_inc_asgn_comp_rshft_cval_and_ff_t()(dptr, comp, 10);
+ }
+ }
+ }
+ else if(info.xup_yup == 2)
+ { // scaling down horizontally
+ S32 Cx, j;
+ S32 xap;
+
+ for(y = 0; y < dstH; y++)
+ {
+ dptr = dst + (y * dstStride);
+
+ for(x = 0; x < dstW; x++)
+ {
+ Cx = info.xapoints[x] >> 16;
+ xap = info.xapoints[x] & 0xffff;
+
+ pix = info.ystrides[y] + info.xpoints[x] * ch;
+
+ //for(c = 0; c < ch; ++c) comp[c] = pix[c] * xap;
+ typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(comp, pix, xap);
+
+ pix+=ch;
+ for(j = (1 << 14) - xap; j > Cx; j -= Cx)
+ {
+ //for(c = 0; c < ch; ++c) comp[c] += pix[c] * Cx;
+ typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(comp, pix, Cx);
+ pix+=ch;
+ }
+
+ if(j > 0)
+ {
+ //for(c = 0; c < ch; ++c) comp[c] += pix[c] * j;
+ typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(comp, pix, j);
+ }
+
+ if(info.yapoints[y] > 0)
+ {
+ pix = info.ystrides[y] + info.xpoints[x]*ch + srcStride;
+ //for(c = 0; c < ch; ++c) cx[c] = pix[c] * xap;
+ typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(cx, pix, xap);
+
+ pix+=ch;
+ for(j = (1 << 14) - xap; j > Cx; j -= Cx)
+ {
+ //for(c = 0; c < ch; ++c) cx[c] += pix[c] * Cx;
+ typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, Cx);
+ pix+=ch;
+ }
+
+ if(j > 0)
+ {
+ //for(c = 0; c < ch; ++c) cx[c] += pix[c] * j;
+ typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, j);
+ }
+
+ //for(c = 0; c < ch; ++c) comp[c] = ((comp[c] * (256 - info.yapoints[y])) + ((cx[c] * info.yapoints[y]))) >> 12;
+ typename scale_info_t::uroll_comp_asgn_comp_mul_inv_apoint_plus_cx_mul_apoint_allshifted_12_r_t()(comp, info.yapoints[y], cx);
+ }
+ else
+ {
+ //for(c = 0; c < ch; ++c) comp[c] >>= 4;
+ typename scale_info_t::uroll_comp_rshftasgn_constval_t()(comp, 4);
+ }
+
+ //for(c = 0; c < ch; ++c) *dptr++ = (comp[c]>>10)&0xff;
+ typename scale_info_t::uroll_uref_dptr_inc_asgn_comp_rshft_cval_and_ff_t()(dptr, comp, 10);
+ }
+ }
+ }
+ else
+ { //scale x/y - down
+ S32 Cx, Cy, i, j;
+ S32 xap, yap;
+
+ for(y = 0; y < dstH; y++)
+ {
+ Cy = info.yapoints[y] >> 16;
+ yap = info.yapoints[y] & 0xffff;
+
+ dptr = dst + (y * dstStride);
+ for(x = 0; x < dstW; x++)
+ {
+ Cx = info.xapoints[x] >> 16;
+ xap = info.xapoints[x] & 0xffff;
+
+ sptr = info.ystrides[y] + info.xpoints[x] * ch;
+ pix = sptr;
+ sptr += srcStride;
+
+ //for(c = 0; c < ch; ++c) cx[c] = pix[c] * xap;
+ typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(cx, pix, xap);
+
+ pix+=ch;
+ for(i = (1 << 14) - xap; i > Cx; i -= Cx)
+ {
+ //for(c = 0; c < ch; ++c) cx[c] += pix[c] * Cx;
+ typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, Cx);
+ pix+=ch;
+ }
+
+ if(i > 0)
+ {
+ //for(c = 0; c < ch; ++c) cx[c] += pix[c] * i;
+ typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, i);
+ }
+
+ //for(c = 0; c < ch; ++c) comp[c] = (cx[c] >> 5) * yap;
+ typename scale_info_t::uroll_comp_asgn_cx_rshft_cval_all_mul_val_t()(comp, cx, 5, yap);
+
+ for(j = (1 << 14) - yap; j > Cy; j -= Cy)
+ {
+ pix = sptr;
+ sptr += srcStride;
+
+ //for(c = 0; c < ch; ++c) cx[c] = pix[c] * xap;
+ typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(cx, pix, xap);
+
+ pix+=ch;
+ for(i = (1 << 14) - xap; i > Cx; i -= Cx)
+ {
+ //for(c = 0; c < ch; ++c) cx[c] += pix[c] * Cx;
+ typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, Cx);
+ pix+=ch;
+ }
+
+ if(i > 0)
+ {
+ //for(c = 0; c < ch; ++c) cx[c] += pix[c] * i;
+ typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, i);
+ }
+
+ //for(c = 0; c < ch; ++c) comp[c] += (cx[c] >> 5) * Cy;
+ typename scale_info_t::uroll_comp_plusasgn_cx_rshft_cval_all_mul_val_t()(comp, cx, 5, Cy);
+ }
+
+ if(j > 0)
+ {
+ pix = sptr;
+ sptr += srcStride;
+
+ //for(c = 0; c < ch; ++c) cx[c] = pix[c] * xap;
+ typename scale_info_t::uroll_inp_asgn_pix_mul_val_t()(cx, pix, xap);
+
+ pix+=ch;
+ for(i = (1 << 14) - xap; i > Cx; i -= Cx)
+ {
+ //for(c = 0; c < ch; ++c) cx[c] += pix[c] * Cx;
+ typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, Cx);
+ pix+=ch;
+ }
+
+ if(i > 0)
+ {
+ //for(c = 0; c < ch; ++c) cx[c] += pix[c] * i;
+ typename scale_info_t::uroll_inp_plusasgn_pix_mul_val_t()(cx, pix, i);
+ }
+
+ //for(c = 0; c < ch; ++c) comp[c] += (cx[c] >> 5) * j;
+ typename scale_info_t::uroll_comp_plusasgn_cx_rshft_cval_all_mul_val_t()(comp, cx, 5, j);
+ }
+
+ //for(c = 0; c < ch; ++c) *dptr++ = (comp[c]>>23)&0xff;
+ typename scale_info_t::uroll_uref_dptr_inc_asgn_comp_rshft_cval_and_ff_t()(dptr, comp, 23);
+ }
+ }
+ } //else
}
//wrapper
static void bilinear_scale(const U8 *src, U32 srcW, U32 srcH, U32 srcCh, U32 srcStride, U8 *dst, U32 dstW, U32 dstH, U32 dstCh, U32 dstStride)
{
- llassert(srcCh == dstCh);
+ llassert(srcCh == dstCh);
- switch(srcCh)
- {
- case 1:
- bilinear_scale<1>(src, srcW, srcH, srcStride, dst, dstW, dstH, dstStride);
- break;
- case 3:
- bilinear_scale<3>(src, srcW, srcH, srcStride, dst, dstW, dstH, dstStride);
- break;
- case 4:
- bilinear_scale<4>(src, srcW, srcH, srcStride, dst, dstW, dstH, dstStride);
- break;
- default:
- llassert(!"Implement if need");
- break;
- }
+ switch(srcCh)
+ {
+ case 1:
+ bilinear_scale<1>(src, srcW, srcH, srcStride, dst, dstW, dstH, dstStride);
+ break;
+ case 3:
+ bilinear_scale<3>(src, srcW, srcH, srcStride, dst, dstW, dstH, dstStride);
+ break;
+ case 4:
+ bilinear_scale<4>(src, srcW, srcH, srcStride, dst, dstW, dstH, dstStride);
+ break;
+ default:
+ llassert(!"Implement if need");
+ break;
+ }
}
@@ -593,30 +593,30 @@ S32 LLImage::sMinimalReverseByteRangePercent = 75;
//static
void LLImage::initClass(bool use_new_byte_range, S32 minimal_reverse_byte_range_percent)
{
- sUseNewByteRange = use_new_byte_range;
+ sUseNewByteRange = use_new_byte_range;
sMinimalReverseByteRangePercent = minimal_reverse_byte_range_percent;
- sMutex = new LLMutex();
+ sMutex = new LLMutex();
}
//static
void LLImage::cleanupClass()
{
- delete sMutex;
- sMutex = NULL;
+ delete sMutex;
+ sMutex = NULL;
}
//static
const std::string& LLImage::getLastError()
{
- static const std::string noerr("No Error");
- return sLastErrorMessage.empty() ? noerr : sLastErrorMessage;
+ static const std::string noerr("No Error");
+ return sLastErrorMessage.empty() ? noerr : sLastErrorMessage;
}
//static
void LLImage::setLastError(const std::string& message)
{
- LLMutexLock m(sMutex);
- sLastErrorMessage = message;
+ LLMutexLock m(sMutex);
+ sLastErrorMessage = message;
}
//---------------------------------------------------------------------------
@@ -624,175 +624,175 @@ void LLImage::setLastError(const std::string& message)
//---------------------------------------------------------------------------
LLImageBase::LLImageBase()
-: mData(NULL),
- mDataSize(0),
- mWidth(0),
- mHeight(0),
- mComponents(0),
- mBadBufferAllocation(false),
- mAllowOverSize(false)
+: mData(NULL),
+ mDataSize(0),
+ mWidth(0),
+ mHeight(0),
+ mComponents(0),
+ mBadBufferAllocation(false),
+ mAllowOverSize(false)
{}
// virtual
LLImageBase::~LLImageBase()
{
- deleteData(); // virtual
+ deleteData(); // virtual
}
// virtual
void LLImageBase::dump()
{
- LL_INFOS() << "LLImageBase mComponents " << mComponents
- << " mData " << mData
- << " mDataSize " << mDataSize
- << " mWidth " << mWidth
- << " mHeight " << mHeight
- << LL_ENDL;
+ LL_INFOS() << "LLImageBase mComponents " << mComponents
+ << " mData " << mData
+ << " mDataSize " << mDataSize
+ << " mWidth " << mWidth
+ << " mHeight " << mHeight
+ << LL_ENDL;
}
// virtual
void LLImageBase::sanityCheck()
{
- if (mWidth > MAX_IMAGE_SIZE
- || mHeight > MAX_IMAGE_SIZE
- || mDataSize > (S32)MAX_IMAGE_DATA_SIZE
- || mComponents > (S8)MAX_IMAGE_COMPONENTS
- )
- {
- LL_ERRS() << "Failed LLImageBase::sanityCheck "
- << "width " << mWidth
- << "height " << mHeight
- << "datasize " << mDataSize
- << "components " << mComponents
- << "data " << mData
- << LL_ENDL;
- }
+ if (mWidth > MAX_IMAGE_SIZE
+ || mHeight > MAX_IMAGE_SIZE
+ || mDataSize > (S32)MAX_IMAGE_DATA_SIZE
+ || mComponents > (S8)MAX_IMAGE_COMPONENTS
+ )
+ {
+ LL_ERRS() << "Failed LLImageBase::sanityCheck "
+ << "width " << mWidth
+ << "height " << mHeight
+ << "datasize " << mDataSize
+ << "components " << mComponents
+ << "data " << mData
+ << LL_ENDL;
+ }
}
// virtual
void LLImageBase::deleteData()
{
- ll_aligned_free_16(mData);
- mDataSize = 0;
- mData = NULL;
+ ll_aligned_free_16(mData);
+ mDataSize = 0;
+ mData = NULL;
}
// virtual
U8* LLImageBase::allocateData(S32 size)
{
- //make this function thread-safe.
- static const U32 MAX_BUFFER_SIZE = 4096 * 4096 * 16; //256 MB
- mBadBufferAllocation = false;
-
- if (size < 0)
- {
- size = mWidth * mHeight * mComponents;
- if (size <= 0)
- {
- LL_WARNS() << llformat("LLImageBase::allocateData called with bad dimensions: %dx%dx%d",mWidth,mHeight,(S32)mComponents) << LL_ENDL;
- mBadBufferAllocation = true;
- }
- }
-
- if (!mBadBufferAllocation && (size < 1 || size > MAX_BUFFER_SIZE))
- {
- LL_INFOS() << "width: " << mWidth << " height: " << mHeight << " components: " << mComponents << LL_ENDL ;
- if(mAllowOverSize)
- {
- LL_INFOS() << "Oversize: " << size << LL_ENDL ;
- }
- else
- {
- LL_WARNS() << "LLImageBase::allocateData: bad size: " << size << LL_ENDL;
- mBadBufferAllocation = true;
- }
- }
-
- if (!mBadBufferAllocation && (!mData || size != mDataSize))
- {
- deleteData(); // virtual
- mData = (U8*)ll_aligned_malloc_16(size);
- if (!mData)
- {
- LL_WARNS() << "Failed to allocate image data size [" << size << "]" << LL_ENDL;
- mBadBufferAllocation = true;
- }
- }
-
- if (mBadBufferAllocation)
- {
- size = 0;
- mWidth = mHeight = 0;
- if (mData)
- {
- deleteData(); // virtual
- mData = NULL;
- }
- }
- mDataSize = size;
-
- return mData;
+ //make this function thread-safe.
+ static const U32 MAX_BUFFER_SIZE = 4096 * 4096 * 16; //256 MB
+ mBadBufferAllocation = false;
+
+ if (size < 0)
+ {
+ size = mWidth * mHeight * mComponents;
+ if (size <= 0)
+ {
+ LL_WARNS() << llformat("LLImageBase::allocateData called with bad dimensions: %dx%dx%d",mWidth,mHeight,(S32)mComponents) << LL_ENDL;
+ mBadBufferAllocation = true;
+ }
+ }
+
+ if (!mBadBufferAllocation && (size < 1 || size > MAX_BUFFER_SIZE))
+ {
+ LL_INFOS() << "width: " << mWidth << " height: " << mHeight << " components: " << mComponents << LL_ENDL ;
+ if(mAllowOverSize)
+ {
+ LL_INFOS() << "Oversize: " << size << LL_ENDL ;
+ }
+ else
+ {
+ LL_WARNS() << "LLImageBase::allocateData: bad size: " << size << LL_ENDL;
+ mBadBufferAllocation = true;
+ }
+ }
+
+ if (!mBadBufferAllocation && (!mData || size != mDataSize))
+ {
+ deleteData(); // virtual
+ mData = (U8*)ll_aligned_malloc_16(size);
+ if (!mData)
+ {
+ LL_WARNS() << "Failed to allocate image data size [" << size << "]" << LL_ENDL;
+ mBadBufferAllocation = true;
+ }
+ }
+
+ if (mBadBufferAllocation)
+ {
+ size = 0;
+ mWidth = mHeight = 0;
+ if (mData)
+ {
+ deleteData(); // virtual
+ mData = NULL;
+ }
+ }
+ mDataSize = size;
+
+ return mData;
}
// virtual
U8* LLImageBase::reallocateData(S32 size)
{
- U8 *new_datap = (U8*)ll_aligned_malloc_16(size);
- if (!new_datap)
- {
- LL_WARNS() << "Out of memory in LLImageBase::reallocateData" << LL_ENDL;
- return 0;
- }
- if (mData)
- {
- S32 bytes = llmin(mDataSize, size);
- memcpy(new_datap, mData, bytes); /* Flawfinder: ignore */
- ll_aligned_free_16(mData) ;
- }
- mData = new_datap;
- mDataSize = size;
- mBadBufferAllocation = false;
- return mData;
-}
-
-const U8* LLImageBase::getData() const
-{
- if(mBadBufferAllocation)
- {
- LL_WARNS() << "Bad memory allocation for the image buffer!" << LL_ENDL ;
- return NULL;
- }
-
- return mData;
+ U8 *new_datap = (U8*)ll_aligned_malloc_16(size);
+ if (!new_datap)
+ {
+ LL_WARNS() << "Out of memory in LLImageBase::reallocateData" << LL_ENDL;
+ return 0;
+ }
+ if (mData)
+ {
+ S32 bytes = llmin(mDataSize, size);
+ memcpy(new_datap, mData, bytes); /* Flawfinder: ignore */
+ ll_aligned_free_16(mData) ;
+ }
+ mData = new_datap;
+ mDataSize = size;
+ mBadBufferAllocation = false;
+ return mData;
+}
+
+const U8* LLImageBase::getData() const
+{
+ if(mBadBufferAllocation)
+ {
+ LL_WARNS() << "Bad memory allocation for the image buffer!" << LL_ENDL ;
+ return NULL;
+ }
+
+ return mData;
} // read only
-U8* LLImageBase::getData()
-{
- if(mBadBufferAllocation)
- {
- LL_WARNS() << "Bad memory allocation for the image buffer!" << LL_ENDL;
- return NULL;
- }
+U8* LLImageBase::getData()
+{
+ if(mBadBufferAllocation)
+ {
+ LL_WARNS() << "Bad memory allocation for the image buffer!" << LL_ENDL;
+ return NULL;
+ }
- return mData;
+ return mData;
}
bool LLImageBase::isBufferInvalid() const
{
- return mBadBufferAllocation || mData == NULL ;
+ return mBadBufferAllocation || mData == NULL ;
}
void LLImageBase::setSize(S32 width, S32 height, S32 ncomponents)
{
- mWidth = width;
- mHeight = height;
- mComponents = ncomponents;
+ mWidth = width;
+ mHeight = height;
+ mComponents = ncomponents;
}
U8* LLImageBase::allocateDataSize(S32 width, S32 height, S32 ncomponents, S32 size)
{
- setSize(width, height, ncomponents);
- return allocateData(size); // virtual
+ setSize(width, height, ncomponents);
+ return allocateData(size); // virtual
}
//---------------------------------------------------------------------------
@@ -802,17 +802,17 @@ U8* LLImageBase::allocateDataSize(S32 width, S32 height, S32 ncomponents, S32 si
S32 LLImageRaw::sRawImageCount = 0;
LLImageRaw::LLImageRaw()
- : LLImageBase()
+ : LLImageBase()
{
- ++sRawImageCount;
+ ++sRawImageCount;
}
LLImageRaw::LLImageRaw(U16 width, U16 height, S8 components)
- : LLImageBase()
+ : LLImageBase()
{
- //llassert( S32(width) * S32(height) * S32(components) <= MAX_IMAGE_DATA_SIZE );
- allocateDataSize(width, height, components);
- ++sRawImageCount;
+ //llassert( S32(width) * S32(height) * S32(components) <= MAX_IMAGE_DATA_SIZE );
+ allocateDataSize(width, height, components);
+ ++sRawImageCount;
}
LLImageRaw::LLImageRaw(const U8* data, U16 width, U16 height, S8 components)
@@ -825,45 +825,45 @@ LLImageRaw::LLImageRaw(const U8* data, U16 width, U16 height, S8 components)
}
LLImageRaw::LLImageRaw(U8 *data, U16 width, U16 height, S8 components, bool no_copy)
- : LLImageBase()
+ : LLImageBase()
{
- if(no_copy)
- {
- setDataAndSize(data, width, height, components);
- }
- else if(allocateDataSize(width, height, components))
- {
- memcpy(getData(), data, width*height*components);
- }
- ++sRawImageCount;
+ if(no_copy)
+ {
+ setDataAndSize(data, width, height, components);
+ }
+ else if(allocateDataSize(width, height, components))
+ {
+ memcpy(getData(), data, width*height*components);
+ }
+ ++sRawImageCount;
}
//LLImageRaw::LLImageRaw(const std::string& filename, bool j2c_lowest_mip_only)
-// : LLImageBase()
+// : LLImageBase()
//{
-// createFromFile(filename, j2c_lowest_mip_only);
+// createFromFile(filename, j2c_lowest_mip_only);
//}
LLImageRaw::~LLImageRaw()
{
- // NOTE: ~LLimageBase() call to deleteData() calls LLImageBase::deleteData()
- // NOT LLImageRaw::deleteData()
- deleteData();
- --sRawImageCount;
+ // NOTE: ~LLimageBase() call to deleteData() calls LLImageBase::deleteData()
+ // NOT LLImageRaw::deleteData()
+ deleteData();
+ --sRawImageCount;
}
// virtual
U8* LLImageRaw::allocateData(S32 size)
{
- U8* res = LLImageBase::allocateData(size);
- return res;
+ U8* res = LLImageBase::allocateData(size);
+ return res;
}
// virtual
U8* LLImageRaw::reallocateData(S32 size)
{
- U8* res = LLImageBase::reallocateData(size);
- return res;
+ U8* res = LLImageBase::reallocateData(size);
+ return res;
}
void LLImageRaw::releaseData()
@@ -875,118 +875,118 @@ void LLImageRaw::releaseData()
// virtual
void LLImageRaw::deleteData()
{
- LLImageBase::deleteData();
+ LLImageBase::deleteData();
}
-void LLImageRaw::setDataAndSize(U8 *data, S32 width, S32 height, S8 components)
-{
- if(data == getData())
- {
- return ;
- }
+void LLImageRaw::setDataAndSize(U8 *data, S32 width, S32 height, S8 components)
+{
+ if(data == getData())
+ {
+ return ;
+ }
- deleteData();
+ deleteData();
- LLImageBase::setSize(width, height, components) ;
- LLImageBase::setDataAndSize(data, width * height * components) ;
+ LLImageBase::setSize(width, height, components) ;
+ LLImageBase::setDataAndSize(data, width * height * components) ;
}
bool LLImageRaw::resize(U16 width, U16 height, S8 components)
{
- if ((getWidth() == width) && (getHeight() == height) && (getComponents() == components) && !isBufferInvalid())
- {
- return true;
- }
- // Reallocate the data buffer.
- deleteData();
+ if ((getWidth() == width) && (getHeight() == height) && (getComponents() == components) && !isBufferInvalid())
+ {
+ return true;
+ }
+ // Reallocate the data buffer.
+ deleteData();
- allocateDataSize(width,height,components);
+ allocateDataSize(width,height,components);
- return !isBufferInvalid();
+ return !isBufferInvalid();
}
bool LLImageRaw::setSubImage(U32 x_pos, U32 y_pos, U32 width, U32 height,
- const U8 *data, U32 stride, bool reverse_y)
+ const U8 *data, U32 stride, bool reverse_y)
{
- if (!getData())
- {
- return false;
- }
- if (!data)
- {
- return false;
- }
+ if (!getData())
+ {
+ return false;
+ }
+ if (!data)
+ {
+ return false;
+ }
- // Should do some simple bounds checking
+ // Should do some simple bounds checking
- U32 i;
- for (i = 0; i < height; i++)
- {
- const U32 row = reverse_y ? height - 1 - i : i;
- const U32 from_offset = row * ((stride == 0) ? width*getComponents() : stride);
- const U32 to_offset = (y_pos + i)*getWidth() + x_pos;
- memcpy(getData() + to_offset*getComponents(), /* Flawfinder: ignore */
- data + from_offset, getComponents()*width);
- }
+ U32 i;
+ for (i = 0; i < height; i++)
+ {
+ const U32 row = reverse_y ? height - 1 - i : i;
+ const U32 from_offset = row * ((stride == 0) ? width*getComponents() : stride);
+ const U32 to_offset = (y_pos + i)*getWidth() + x_pos;
+ memcpy(getData() + to_offset*getComponents(), /* Flawfinder: ignore */
+ data + from_offset, getComponents()*width);
+ }
- return true;
+ return true;
}
void LLImageRaw::clear(U8 r, U8 g, U8 b, U8 a)
{
- llassert( getComponents() <= 4 );
- // This is fairly bogus, but it'll do for now.
- if (isBufferInvalid())
- {
- LL_WARNS() << "Invalid image buffer" << LL_ENDL;
- return;
- }
-
- U8 *pos = getData();
- U32 x, y;
- for (x = 0; x < getWidth(); x++)
- {
- for (y = 0; y < getHeight(); y++)
- {
- *pos = r;
- pos++;
- if (getComponents() == 1)
- {
- continue;
- }
- *pos = g;
- pos++;
- if (getComponents() == 2)
- {
- continue;
- }
- *pos = b;
- pos++;
- if (getComponents() == 3)
- {
- continue;
- }
- *pos = a;
- pos++;
- }
- }
+ llassert( getComponents() <= 4 );
+ // This is fairly bogus, but it'll do for now.
+ if (isBufferInvalid())
+ {
+ LL_WARNS() << "Invalid image buffer" << LL_ENDL;
+ return;
+ }
+
+ U8 *pos = getData();
+ U32 x, y;
+ for (x = 0; x < getWidth(); x++)
+ {
+ for (y = 0; y < getHeight(); y++)
+ {
+ *pos = r;
+ pos++;
+ if (getComponents() == 1)
+ {
+ continue;
+ }
+ *pos = g;
+ pos++;
+ if (getComponents() == 2)
+ {
+ continue;
+ }
+ *pos = b;
+ pos++;
+ if (getComponents() == 3)
+ {
+ continue;
+ }
+ *pos = a;
+ pos++;
+ }
+ }
}
// Reverses the order of the rows in the image
void LLImageRaw::verticalFlip()
{
- S32 row_bytes = getWidth() * getComponents();
- llassert(row_bytes > 0);
- std::vector<U8> line_buffer(row_bytes);
- S32 mid_row = getHeight() / 2;
- for( S32 row = 0; row < mid_row; row++ )
- {
- U8* row_a_data = getData() + row * row_bytes;
- U8* row_b_data = getData() + (getHeight() - 1 - row) * row_bytes;
- memcpy( &line_buffer[0], row_a_data, row_bytes );
- memcpy( row_a_data, row_b_data, row_bytes );
- memcpy( row_b_data, &line_buffer[0], row_bytes );
- }
+ S32 row_bytes = getWidth() * getComponents();
+ llassert(row_bytes > 0);
+ std::vector<U8> line_buffer(row_bytes);
+ S32 mid_row = getHeight() / 2;
+ for( S32 row = 0; row < mid_row; row++ )
+ {
+ U8* row_a_data = getData() + row * row_bytes;
+ U8* row_b_data = getData() + (getHeight() - 1 - row) * row_bytes;
+ memcpy( &line_buffer[0], row_a_data, row_bytes );
+ memcpy( row_a_data, row_b_data, row_bytes );
+ memcpy( row_b_data, &line_buffer[0], row_bytes );
+ }
}
@@ -1079,256 +1079,256 @@ bool LLImageRaw::makeAlpha()
void LLImageRaw::expandToPowerOfTwo(S32 max_dim, bool scale_image)
{
- // Find new sizes
- S32 new_width = expandDimToPowerOfTwo(getWidth(), max_dim);
- S32 new_height = expandDimToPowerOfTwo(getHeight(), max_dim);
+ // Find new sizes
+ S32 new_width = expandDimToPowerOfTwo(getWidth(), max_dim);
+ S32 new_height = expandDimToPowerOfTwo(getHeight(), max_dim);
- scale( new_width, new_height, scale_image );
+ scale( new_width, new_height, scale_image );
}
void LLImageRaw::contractToPowerOfTwo(S32 max_dim, bool scale_image)
{
- // Find new sizes
- S32 new_width = contractDimToPowerOfTwo(getWidth(), MIN_IMAGE_SIZE);
- S32 new_height = contractDimToPowerOfTwo(getHeight(), MIN_IMAGE_SIZE);
+ // Find new sizes
+ S32 new_width = contractDimToPowerOfTwo(getWidth(), MIN_IMAGE_SIZE);
+ S32 new_height = contractDimToPowerOfTwo(getHeight(), MIN_IMAGE_SIZE);
- scale( new_width, new_height, scale_image );
+ scale( new_width, new_height, scale_image );
}
// static
S32 LLImageRaw::biasedDimToPowerOfTwo(S32 curr_dim, S32 max_dim)
{
- // Strong bias towards rounding down (to save bandwidth)
- // No bias would mean THRESHOLD == 1.5f;
- const F32 THRESHOLD = 1.75f;
-
- // Find new sizes
- S32 larger_dim = max_dim; // 2^n >= curr_dim
- S32 smaller_dim = max_dim; // 2^(n-1) <= curr_dim
- while( (smaller_dim > curr_dim) && (smaller_dim > MIN_IMAGE_SIZE) )
- {
- larger_dim = smaller_dim;
- smaller_dim >>= 1;
- }
- return ( ((F32)curr_dim / (F32)smaller_dim) > THRESHOLD ) ? larger_dim : smaller_dim;
+ // Strong bias towards rounding down (to save bandwidth)
+ // No bias would mean THRESHOLD == 1.5f;
+ const F32 THRESHOLD = 1.75f;
+
+ // Find new sizes
+ S32 larger_dim = max_dim; // 2^n >= curr_dim
+ S32 smaller_dim = max_dim; // 2^(n-1) <= curr_dim
+ while( (smaller_dim > curr_dim) && (smaller_dim > MIN_IMAGE_SIZE) )
+ {
+ larger_dim = smaller_dim;
+ smaller_dim >>= 1;
+ }
+ return ( ((F32)curr_dim / (F32)smaller_dim) > THRESHOLD ) ? larger_dim : smaller_dim;
}
// static
S32 LLImageRaw::expandDimToPowerOfTwo(S32 curr_dim, S32 max_dim)
{
- S32 new_dim = MIN_IMAGE_SIZE;
- while( (new_dim < curr_dim) && (new_dim < max_dim) )
- {
- new_dim <<= 1;
- }
+ S32 new_dim = MIN_IMAGE_SIZE;
+ while( (new_dim < curr_dim) && (new_dim < max_dim) )
+ {
+ new_dim <<= 1;
+ }
return new_dim;
}
// static
S32 LLImageRaw::contractDimToPowerOfTwo(S32 curr_dim, S32 min_dim)
{
- S32 new_dim = MAX_IMAGE_SIZE;
- while( (new_dim > curr_dim) && (new_dim > min_dim) )
- {
- new_dim >>= 1;
- }
+ S32 new_dim = MAX_IMAGE_SIZE;
+ while( (new_dim > curr_dim) && (new_dim > min_dim) )
+ {
+ new_dim >>= 1;
+ }
return new_dim;
}
void LLImageRaw::biasedScaleToPowerOfTwo(S32 max_dim)
{
- // Find new sizes
- S32 new_width = biasedDimToPowerOfTwo(getWidth(),max_dim);
- S32 new_height = biasedDimToPowerOfTwo(getHeight(),max_dim);
+ // Find new sizes
+ S32 new_width = biasedDimToPowerOfTwo(getWidth(),max_dim);
+ S32 new_height = biasedDimToPowerOfTwo(getHeight(),max_dim);
- scale( new_width, new_height );
+ scale( new_width, new_height );
}
// Calculates (U8)(255*(a/255.f)*(b/255.f) + 0.5f). Thanks, Jim Blinn!
inline U8 LLImageRaw::fastFractionalMult( U8 a, U8 b )
{
- U32 i = a * b + 128;
- return U8((i + (i>>8)) >> 8);
+ U32 i = a * b + 128;
+ return U8((i + (i>>8)) >> 8);
}
void LLImageRaw::composite( LLImageRaw* src )
{
- LLImageRaw* dst = this; // Just for clarity.
-
- if (!validateSrcAndDst("LLImageRaw::composite", src, dst))
- {
- return;
- }
-
- llassert((3 == src->getComponents()) || (4 == src->getComponents()));
- llassert(3 == dst->getComponents());
-
- if( 3 == dst->getComponents() )
- {
- if( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) )
- {
- // No scaling needed
- if( 3 == src->getComponents() )
- {
- copyUnscaled( src ); // alpha is one so just copy the data.
- }
- else
- {
- compositeUnscaled4onto3( src );
- }
- }
- else
- {
- if( 3 == src->getComponents() )
- {
- copyScaled( src ); // alpha is one so just copy the data.
- }
- else
- {
- compositeScaled4onto3( src );
- }
- }
- }
+ LLImageRaw* dst = this; // Just for clarity.
+
+ if (!validateSrcAndDst("LLImageRaw::composite", src, dst))
+ {
+ return;
+ }
+
+ llassert((3 == src->getComponents()) || (4 == src->getComponents()));
+ llassert(3 == dst->getComponents());
+
+ if( 3 == dst->getComponents() )
+ {
+ if( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) )
+ {
+ // No scaling needed
+ if( 3 == src->getComponents() )
+ {
+ copyUnscaled( src ); // alpha is one so just copy the data.
+ }
+ else
+ {
+ compositeUnscaled4onto3( src );
+ }
+ }
+ else
+ {
+ if( 3 == src->getComponents() )
+ {
+ copyScaled( src ); // alpha is one so just copy the data.
+ }
+ else
+ {
+ compositeScaled4onto3( src );
+ }
+ }
+ }
}
// Src and dst can be any size. Src has 4 components. Dst has 3 components.
void LLImageRaw::compositeScaled4onto3(LLImageRaw* src)
{
- LL_INFOS() << "compositeScaled4onto3" << LL_ENDL;
+ LL_INFOS() << "compositeScaled4onto3" << LL_ENDL;
- LLImageRaw* dst = this; // Just for clarity.
+ LLImageRaw* dst = this; // Just for clarity.
- llassert( (4 == src->getComponents()) && (3 == dst->getComponents()) );
+ llassert( (4 == src->getComponents()) && (3 == dst->getComponents()) );
- S32 temp_data_size = src->getWidth() * dst->getHeight() * src->getComponents();
- llassert_always(temp_data_size > 0);
- std::vector<U8> temp_buffer(temp_data_size);
+ S32 temp_data_size = src->getWidth() * dst->getHeight() * src->getComponents();
+ llassert_always(temp_data_size > 0);
+ std::vector<U8> temp_buffer(temp_data_size);
- // Vertical: scale but no composite
- for( S32 col = 0; col < src->getWidth(); col++ )
- {
- copyLineScaled( src->getData() + (src->getComponents() * col), &temp_buffer[0] + (src->getComponents() * col), src->getHeight(), dst->getHeight(), src->getWidth(), src->getWidth() );
- }
+ // Vertical: scale but no composite
+ for( S32 col = 0; col < src->getWidth(); col++ )
+ {
+ copyLineScaled( src->getData() + (src->getComponents() * col), &temp_buffer[0] + (src->getComponents() * col), src->getHeight(), dst->getHeight(), src->getWidth(), src->getWidth() );
+ }
- // Horizontal: scale and composite
- for( S32 row = 0; row < dst->getHeight(); row++ )
- {
- compositeRowScaled4onto3( &temp_buffer[0] + (src->getComponents() * src->getWidth() * row), dst->getData() + (dst->getComponents() * dst->getWidth() * row), src->getWidth(), dst->getWidth() );
- }
+ // Horizontal: scale and composite
+ for( S32 row = 0; row < dst->getHeight(); row++ )
+ {
+ compositeRowScaled4onto3( &temp_buffer[0] + (src->getComponents() * src->getWidth() * row), dst->getData() + (dst->getComponents() * dst->getWidth() * row), src->getWidth(), dst->getWidth() );
+ }
}
// Src and dst are same size. Src has 4 components. Dst has 3 components.
void LLImageRaw::compositeUnscaled4onto3( LLImageRaw* src )
{
- LLImageRaw* dst = this; // Just for clarity.
-
- llassert( (3 == src->getComponents()) || (4 == src->getComponents()) );
- llassert( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) );
-
- U8* src_data = src->getData();
- U8* dst_data = dst->getData();
- S32 pixels = getWidth() * getHeight();
- while( pixels-- )
- {
- U8 alpha = src_data[3];
- if( alpha )
- {
- if( 255 == alpha )
- {
- dst_data[0] = src_data[0];
- dst_data[1] = src_data[1];
- dst_data[2] = src_data[2];
- }
- else
- {
-
- U8 transparency = 255 - alpha;
- dst_data[0] = fastFractionalMult( dst_data[0], transparency ) + fastFractionalMult( src_data[0], alpha );
- dst_data[1] = fastFractionalMult( dst_data[1], transparency ) + fastFractionalMult( src_data[1], alpha );
- dst_data[2] = fastFractionalMult( dst_data[2], transparency ) + fastFractionalMult( src_data[2], alpha );
- }
- }
-
- src_data += 4;
- dst_data += 3;
- }
+ LLImageRaw* dst = this; // Just for clarity.
+
+ llassert( (3 == src->getComponents()) || (4 == src->getComponents()) );
+ llassert( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) );
+
+ U8* src_data = src->getData();
+ U8* dst_data = dst->getData();
+ S32 pixels = getWidth() * getHeight();
+ while( pixels-- )
+ {
+ U8 alpha = src_data[3];
+ if( alpha )
+ {
+ if( 255 == alpha )
+ {
+ dst_data[0] = src_data[0];
+ dst_data[1] = src_data[1];
+ dst_data[2] = src_data[2];
+ }
+ else
+ {
+
+ U8 transparency = 255 - alpha;
+ dst_data[0] = fastFractionalMult( dst_data[0], transparency ) + fastFractionalMult( src_data[0], alpha );
+ dst_data[1] = fastFractionalMult( dst_data[1], transparency ) + fastFractionalMult( src_data[1], alpha );
+ dst_data[2] = fastFractionalMult( dst_data[2], transparency ) + fastFractionalMult( src_data[2], alpha );
+ }
+ }
+
+ src_data += 4;
+ dst_data += 3;
+ }
}
void LLImageRaw::copyUnscaledAlphaMask( LLImageRaw* src, const LLColor4U& fill)
{
- LLImageRaw* dst = this; // Just for clarity.
+ LLImageRaw* dst = this; // Just for clarity.
- if (!validateSrcAndDst("LLImageRaw::copyUnscaledAlphaMask", src, dst))
- {
- return;
- }
+ if (!validateSrcAndDst("LLImageRaw::copyUnscaledAlphaMask", src, dst))
+ {
+ return;
+ }
- llassert( 1 == src->getComponents() );
- llassert( 4 == dst->getComponents() );
- llassert( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) );
+ llassert( 1 == src->getComponents() );
+ llassert( 4 == dst->getComponents() );
+ llassert( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) );
- S32 pixels = getWidth() * getHeight();
- U8* src_data = src->getData();
- U8* dst_data = dst->getData();
- for ( S32 i = 0; i < pixels; i++ )
- {
- dst_data[0] = fill.mV[0];
- dst_data[1] = fill.mV[1];
- dst_data[2] = fill.mV[2];
- dst_data[3] = src_data[0];
- src_data += 1;
- dst_data += 4;
- }
+ S32 pixels = getWidth() * getHeight();
+ U8* src_data = src->getData();
+ U8* dst_data = dst->getData();
+ for ( S32 i = 0; i < pixels; i++ )
+ {
+ dst_data[0] = fill.mV[0];
+ dst_data[1] = fill.mV[1];
+ dst_data[2] = fill.mV[2];
+ dst_data[3] = src_data[0];
+ src_data += 1;
+ dst_data += 4;
+ }
}
// Fill the buffer with a constant color
void LLImageRaw::fill( const LLColor4U& color )
{
- if (isBufferInvalid())
- {
- LL_WARNS() << "Invalid image buffer" << LL_ENDL;
- return;
- }
-
- S32 pixels = getWidth() * getHeight();
- if( 4 == getComponents() )
- {
- U32* data = (U32*) getData();
- U32 rgbaColor = color.asRGBA();
- for( S32 i = 0; i < pixels; i++ )
- {
- data[ i ] = rgbaColor;
- }
- }
- else
- if( 3 == getComponents() )
- {
- U8* data = getData();
- for( S32 i = 0; i < pixels; i++ )
- {
- data[0] = color.mV[0];
- data[1] = color.mV[1];
- data[2] = color.mV[2];
- data += 3;
- }
- }
+ if (isBufferInvalid())
+ {
+ LL_WARNS() << "Invalid image buffer" << LL_ENDL;
+ return;
+ }
+
+ S32 pixels = getWidth() * getHeight();
+ if( 4 == getComponents() )
+ {
+ U32* data = (U32*) getData();
+ U32 rgbaColor = color.asRGBA();
+ for( S32 i = 0; i < pixels; i++ )
+ {
+ data[ i ] = rgbaColor;
+ }
+ }
+ else
+ if( 3 == getComponents() )
+ {
+ U8* data = getData();
+ for( S32 i = 0; i < pixels; i++ )
+ {
+ data[0] = color.mV[0];
+ data[1] = color.mV[1];
+ data[2] = color.mV[2];
+ data += 3;
+ }
+ }
}
void LLImageRaw::tint( const LLColor3& color )
{
- llassert( (3 == getComponents()) || (4 == getComponents()) );
- if (isBufferInvalid())
- {
- LL_WARNS() << "Invalid image buffer" << LL_ENDL;
- return;
- }
+ llassert( (3 == getComponents()) || (4 == getComponents()) );
+ if (isBufferInvalid())
+ {
+ LL_WARNS() << "Invalid image buffer" << LL_ENDL;
+ return;
+ }
- S32 pixels = getWidth() * getHeight();
+ S32 pixels = getWidth() * getHeight();
const S32 components = getComponents();
U8* data = getData();
for( S32 i = 0; i < pixels; i++ )
@@ -1345,188 +1345,188 @@ void LLImageRaw::tint( const LLColor3& color )
LLPointer<LLImageRaw> LLImageRaw::duplicate()
{
- if(getNumRefs() < 2)
- {
- return this; //nobody else refences to this image, no need to duplicate.
- }
+ if(getNumRefs() < 2)
+ {
+ return this; //nobody else refences to this image, no need to duplicate.
+ }
- //make a duplicate
- LLPointer<LLImageRaw> dup = new LLImageRaw(getData(), getWidth(), getHeight(), getComponents());
- return dup;
+ //make a duplicate
+ LLPointer<LLImageRaw> dup = new LLImageRaw(getData(), getWidth(), getHeight(), getComponents());
+ return dup;
}
// Src and dst can be any size. Src and dst can each have 3 or 4 components.
void LLImageRaw::copy(LLImageRaw* src)
{
- LLImageRaw* dst = this; // Just for clarity.
-
- if (!validateSrcAndDst("LLImageRaw::copy", src, dst))
- {
- return;
- }
-
- if( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) )
- {
- // No scaling needed
- if( src->getComponents() == dst->getComponents() )
- {
- copyUnscaled( src );
- }
- else
- if( 3 == src->getComponents() )
- {
- copyUnscaled3onto4( src );
- }
- else
- {
- // 4 == src->getComponents()
- copyUnscaled4onto3( src );
- }
- }
- else
- {
- // Scaling needed
- // No scaling needed
- if( src->getComponents() == dst->getComponents() )
- {
- copyScaled( src );
- }
- else
- if( 3 == src->getComponents() )
- {
- copyScaled3onto4( src );
- }
- else
- {
- // 4 == src->getComponents()
- copyScaled4onto3( src );
- }
- }
+ LLImageRaw* dst = this; // Just for clarity.
+
+ if (!validateSrcAndDst("LLImageRaw::copy", src, dst))
+ {
+ return;
+ }
+
+ if( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) )
+ {
+ // No scaling needed
+ if( src->getComponents() == dst->getComponents() )
+ {
+ copyUnscaled( src );
+ }
+ else
+ if( 3 == src->getComponents() )
+ {
+ copyUnscaled3onto4( src );
+ }
+ else
+ {
+ // 4 == src->getComponents()
+ copyUnscaled4onto3( src );
+ }
+ }
+ else
+ {
+ // Scaling needed
+ // No scaling needed
+ if( src->getComponents() == dst->getComponents() )
+ {
+ copyScaled( src );
+ }
+ else
+ if( 3 == src->getComponents() )
+ {
+ copyScaled3onto4( src );
+ }
+ else
+ {
+ // 4 == src->getComponents()
+ copyScaled4onto3( src );
+ }
+ }
}
// Src and dst are same size. Src and dst have same number of components.
void LLImageRaw::copyUnscaled(LLImageRaw* src)
{
- LLImageRaw* dst = this; // Just for clarity.
+ LLImageRaw* dst = this; // Just for clarity.
- llassert( (1 == src->getComponents()) || (3 == src->getComponents()) || (4 == src->getComponents()) );
- llassert( src->getComponents() == dst->getComponents() );
- llassert( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) );
+ llassert( (1 == src->getComponents()) || (3 == src->getComponents()) || (4 == src->getComponents()) );
+ llassert( src->getComponents() == dst->getComponents() );
+ llassert( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) );
- memcpy( dst->getData(), src->getData(), getWidth() * getHeight() * getComponents() ); /* Flawfinder: ignore */
+ memcpy( dst->getData(), src->getData(), getWidth() * getHeight() * getComponents() ); /* Flawfinder: ignore */
}
// Src and dst can be any size. Src has 3 components. Dst has 4 components.
void LLImageRaw::copyScaled3onto4(LLImageRaw* src)
{
- llassert( (3 == src->getComponents()) && (4 == getComponents()) );
+ llassert( (3 == src->getComponents()) && (4 == getComponents()) );
- // Slow, but simple. Optimize later if needed.
- LLImageRaw temp( src->getWidth(), src->getHeight(), 4);
- temp.copyUnscaled3onto4( src );
- copyScaled( &temp );
+ // Slow, but simple. Optimize later if needed.
+ LLImageRaw temp( src->getWidth(), src->getHeight(), 4);
+ temp.copyUnscaled3onto4( src );
+ copyScaled( &temp );
}
// Src and dst can be any size. Src has 4 components. Dst has 3 components.
void LLImageRaw::copyScaled4onto3(LLImageRaw* src)
{
- llassert( (4 == src->getComponents()) && (3 == getComponents()) );
+ llassert( (4 == src->getComponents()) && (3 == getComponents()) );
- // Slow, but simple. Optimize later if needed.
- LLImageRaw temp( src->getWidth(), src->getHeight(), 3);
- temp.copyUnscaled4onto3( src );
- copyScaled( &temp );
+ // Slow, but simple. Optimize later if needed.
+ LLImageRaw temp( src->getWidth(), src->getHeight(), 3);
+ temp.copyUnscaled4onto3( src );
+ copyScaled( &temp );
}
// Src and dst are same size. Src has 4 components. Dst has 3 components.
void LLImageRaw::copyUnscaled4onto3( LLImageRaw* src )
{
- LLImageRaw* dst = this; // Just for clarity.
+ LLImageRaw* dst = this; // Just for clarity.
- llassert( (3 == dst->getComponents()) && (4 == src->getComponents()) );
- llassert( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) );
+ llassert( (3 == dst->getComponents()) && (4 == src->getComponents()) );
+ llassert( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) );
- S32 pixels = getWidth() * getHeight();
- U8* src_data = src->getData();
- U8* dst_data = dst->getData();
- for( S32 i=0; i<pixels; i++ )
- {
- dst_data[0] = src_data[0];
- dst_data[1] = src_data[1];
- dst_data[2] = src_data[2];
- src_data += 4;
- dst_data += 3;
- }
+ S32 pixels = getWidth() * getHeight();
+ U8* src_data = src->getData();
+ U8* dst_data = dst->getData();
+ for( S32 i=0; i<pixels; i++ )
+ {
+ dst_data[0] = src_data[0];
+ dst_data[1] = src_data[1];
+ dst_data[2] = src_data[2];
+ src_data += 4;
+ dst_data += 3;
+ }
}
// Src and dst are same size. Src has 3 components. Dst has 4 components.
void LLImageRaw::copyUnscaled3onto4( LLImageRaw* src )
{
- LLImageRaw* dst = this; // Just for clarity.
- llassert( 3 == src->getComponents() );
- llassert( 4 == dst->getComponents() );
- llassert( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) );
+ LLImageRaw* dst = this; // Just for clarity.
+ llassert( 3 == src->getComponents() );
+ llassert( 4 == dst->getComponents() );
+ llassert( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) );
- S32 pixels = getWidth() * getHeight();
- U8* src_data = src->getData();
- U8* dst_data = dst->getData();
- for( S32 i=0; i<pixels; i++ )
- {
- dst_data[0] = src_data[0];
- dst_data[1] = src_data[1];
- dst_data[2] = src_data[2];
- dst_data[3] = 255;
- src_data += 3;
- dst_data += 4;
- }
+ S32 pixels = getWidth() * getHeight();
+ U8* src_data = src->getData();
+ U8* dst_data = dst->getData();
+ for( S32 i=0; i<pixels; i++ )
+ {
+ dst_data[0] = src_data[0];
+ dst_data[1] = src_data[1];
+ dst_data[2] = src_data[2];
+ dst_data[3] = 255;
+ src_data += 3;
+ dst_data += 4;
+ }
}
// Src and dst can be any size. Src and dst have same number of components.
void LLImageRaw::copyScaled( LLImageRaw* src )
{
- LLImageRaw* dst = this; // Just for clarity.
+ LLImageRaw* dst = this; // Just for clarity.
- if (!validateSrcAndDst("LLImageRaw::copyScaled", src, dst))
- {
- return;
- }
+ if (!validateSrcAndDst("LLImageRaw::copyScaled", src, dst))
+ {
+ return;
+ }
- llassert_always( (1 == src->getComponents()) || (3 == src->getComponents()) || (4 == src->getComponents()) );
- llassert_always( src->getComponents() == dst->getComponents() );
+ llassert_always( (1 == src->getComponents()) || (3 == src->getComponents()) || (4 == src->getComponents()) );
+ llassert_always( src->getComponents() == dst->getComponents() );
- if( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) )
- {
- memcpy( dst->getData(), src->getData(), getWidth() * getHeight() * getComponents() ); /* Flawfinder: ignore */
- return;
- }
+ if( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) )
+ {
+ memcpy( dst->getData(), src->getData(), getWidth() * getHeight() * getComponents() ); /* Flawfinder: ignore */
+ return;
+ }
- bilinear_scale(
- src->getData(), src->getWidth(), src->getHeight(), src->getComponents(), src->getWidth()*src->getComponents()
- , dst->getData(), dst->getWidth(), dst->getHeight(), dst->getComponents(), dst->getWidth()*dst->getComponents()
- );
+ bilinear_scale(
+ src->getData(), src->getWidth(), src->getHeight(), src->getComponents(), src->getWidth()*src->getComponents()
+ , dst->getData(), dst->getWidth(), dst->getHeight(), dst->getComponents(), dst->getWidth()*dst->getComponents()
+ );
- /*
- S32 temp_data_size = src->getWidth() * dst->getHeight() * getComponents();
- llassert_always(temp_data_size > 0);
- std::vector<U8> temp_buffer(temp_data_size);
+ /*
+ S32 temp_data_size = src->getWidth() * dst->getHeight() * getComponents();
+ llassert_always(temp_data_size > 0);
+ std::vector<U8> temp_buffer(temp_data_size);
- // Vertical
- for( S32 col = 0; col < src->getWidth(); col++ )
- {
- copyLineScaled( src->getData() + (getComponents() * col), &temp_buffer[0] + (getComponents() * col), src->getHeight(), dst->getHeight(), src->getWidth(), src->getWidth() );
- }
+ // Vertical
+ for( S32 col = 0; col < src->getWidth(); col++ )
+ {
+ copyLineScaled( src->getData() + (getComponents() * col), &temp_buffer[0] + (getComponents() * col), src->getHeight(), dst->getHeight(), src->getWidth(), src->getWidth() );
+ }
- // Horizontal
- for( S32 row = 0; row < dst->getHeight(); row++ )
- {
- copyLineScaled( &temp_buffer[0] + (getComponents() * src->getWidth() * row), dst->getData() + (getComponents() * dst->getWidth() * row), src->getWidth(), dst->getWidth(), 1, 1 );
- }
- */
+ // Horizontal
+ for( S32 row = 0; row < dst->getHeight(); row++ )
+ {
+ copyLineScaled( &temp_buffer[0] + (getComponents() * src->getWidth() * row), dst->getData() + (getComponents() * dst->getWidth() * row), src->getWidth(), dst->getWidth(), 1, 1 );
+ }
+ */
}
@@ -1539,79 +1539,79 @@ bool LLImageRaw::scale( S32 new_width, S32 new_height, bool scale_image_data )
return false;
}
- if (isBufferInvalid())
- {
- LL_WARNS() << "Invalid image buffer" << LL_ENDL;
- return false;
- }
+ if (isBufferInvalid())
+ {
+ LL_WARNS() << "Invalid image buffer" << LL_ENDL;
+ return false;
+ }
- S32 old_width = getWidth();
- S32 old_height = getHeight();
-
- if( (old_width == new_width) && (old_height == new_height) )
- {
- return true; // Nothing to do.
- }
+ S32 old_width = getWidth();
+ S32 old_height = getHeight();
+
+ if( (old_width == new_width) && (old_height == new_height) )
+ {
+ return true; // Nothing to do.
+ }
- // Reallocate the data buffer.
+ // Reallocate the data buffer.
- if (scale_image_data)
- {
- S32 new_data_size = new_width * new_height * components;
+ if (scale_image_data)
+ {
+ S32 new_data_size = new_width * new_height * components;
- if (new_data_size > 0)
+ if (new_data_size > 0)
{
- U8 *new_data = (U8*)ll_aligned_malloc_16(new_data_size);
- if(NULL == new_data)
+ U8 *new_data = (U8*)ll_aligned_malloc_16(new_data_size);
+ if(NULL == new_data)
{
- return false;
+ return false;
}
bilinear_scale(getData(), old_width, old_height, components, old_width*components, new_data, new_width, new_height, components, new_width*components);
- setDataAndSize(new_data, new_width, new_height, components);
- }
- }
- else try
- {
- // copy out existing image data
- S32 temp_data_size = old_width * old_height * components;
- std::vector<U8> temp_buffer(temp_data_size);
- memcpy(&temp_buffer[0], getData(), temp_data_size);
-
- // allocate new image data, will delete old data
- U8* new_buffer = allocateDataSize(new_width, new_height, components);
+ setDataAndSize(new_data, new_width, new_height, components);
+ }
+ }
+ else try
+ {
+ // copy out existing image data
+ S32 temp_data_size = old_width * old_height * components;
+ std::vector<U8> temp_buffer(temp_data_size);
+ memcpy(&temp_buffer[0], getData(), temp_data_size);
+
+ // allocate new image data, will delete old data
+ U8* new_buffer = allocateDataSize(new_width, new_height, components);
if (!new_buffer)
{
LL_WARNS() << "Failed to allocate new image data buffer" << LL_ENDL;
return false;
}
-
- for( S32 row = 0; row < new_height; row++ )
+
+ for( S32 row = 0; row < new_height; row++ )
{
- if (row < old_height)
+ if (row < old_height)
{
- memcpy(new_buffer + (new_width * row * components), &temp_buffer[0] + (old_width * row * components), components * llmin(old_width, new_width));
- if (old_width < new_width)
+ memcpy(new_buffer + (new_width * row * components), &temp_buffer[0] + (old_width * row * components), components * llmin(old_width, new_width));
+ if (old_width < new_width)
{
- // pad out rest of row with black
- memset(new_buffer + (components * ((new_width * row) + old_width)), 0, components * (new_width - old_width));
+ // pad out rest of row with black
+ memset(new_buffer + (components * ((new_width * row) + old_width)), 0, components * (new_width - old_width));
}
}
else
{
// pad remaining rows with black
- memset(new_buffer + (new_width * row * components), 0, new_width * components);
+ memset(new_buffer + (new_width * row * components), 0, new_width * components);
}
}
- }
+ }
catch (std::bad_alloc&) // for temp_buffer
{
LL_WARNS() << "Failed to allocate temporary image buffer" << LL_ENDL;
return false;
}
- return true ;
+ return true ;
}
LLPointer<LLImageRaw> LLImageRaw::scaled(S32 new_width, S32 new_height)
@@ -1665,253 +1665,253 @@ LLPointer<LLImageRaw> LLImageRaw::scaled(S32 new_width, S32 new_height)
void LLImageRaw::copyLineScaled( U8* in, U8* out, S32 in_pixel_len, S32 out_pixel_len, S32 in_pixel_step, S32 out_pixel_step )
{
- const S32 components = getComponents();
- llassert( components >= 1 && components <= 4 );
-
- const F32 ratio = F32(in_pixel_len) / out_pixel_len; // ratio of old to new
- const F32 norm_factor = 1.f / ratio;
-
- S32 goff = components >= 2 ? 1 : 0;
- S32 boff = components >= 3 ? 2 : 0;
- for( S32 x = 0; x < out_pixel_len; x++ )
- {
- // Sample input pixels in range from sample0 to sample1.
- // Avoid floating point accumulation error... don't just add ratio each time. JC
- const F32 sample0 = x * ratio;
- const F32 sample1 = (x+1) * ratio;
- const S32 index0 = llfloor(sample0); // left integer (floor)
- const S32 index1 = llfloor(sample1); // right integer (floor)
- const F32 fract0 = 1.f - (sample0 - F32(index0)); // spill over on left
- const F32 fract1 = sample1 - F32(index1); // spill-over on right
-
- if( index0 == index1 )
- {
- // Interval is embedded in one input pixel
- S32 t0 = x * out_pixel_step * components;
- S32 t1 = index0 * in_pixel_step * components;
- U8* outp = out + t0;
- U8* inp = in + t1;
- for (S32 i = 0; i < components; ++i)
- {
- *outp = *inp;
- ++outp;
- ++inp;
- }
- }
- else
- {
- // Left straddle
- S32 t1 = index0 * in_pixel_step * components;
- F32 r = in[t1 + 0] * fract0;
- F32 g = in[t1 + goff] * fract0;
- F32 b = in[t1 + boff] * fract0;
- F32 a = 0;
- if( components == 4)
- {
- a = in[t1 + 3] * fract0;
- }
-
- // Central interval
- if (components < 4)
- {
- for( S32 u = index0 + 1; u < index1; u++ )
- {
- S32 t2 = u * in_pixel_step * components;
- r += in[t2 + 0];
- g += in[t2 + goff];
- b += in[t2 + boff];
- }
- }
- else
- {
- for( S32 u = index0 + 1; u < index1; u++ )
- {
- S32 t2 = u * in_pixel_step * components;
- r += in[t2 + 0];
- g += in[t2 + 1];
- b += in[t2 + 2];
- a += in[t2 + 3];
- }
- }
-
- // right straddle
- // Watch out for reading off of end of input array.
- if( fract1 && index1 < in_pixel_len )
- {
- S32 t3 = index1 * in_pixel_step * components;
- if (components < 4)
- {
- U8 in0 = in[t3 + 0];
- U8 in1 = in[t3 + goff];
- U8 in2 = in[t3 + boff];
- r += in0 * fract1;
- g += in1 * fract1;
- b += in2 * fract1;
- }
- else
- {
- U8 in0 = in[t3 + 0];
- U8 in1 = in[t3 + 1];
- U8 in2 = in[t3 + 2];
- U8 in3 = in[t3 + 3];
- r += in0 * fract1;
- g += in1 * fract1;
- b += in2 * fract1;
- a += in3 * fract1;
- }
- }
-
- r *= norm_factor;
- g *= norm_factor;
- b *= norm_factor;
- a *= norm_factor; // skip conditional
-
- S32 t4 = x * out_pixel_step * components;
- out[t4 + 0] = U8(ll_round(r));
- if (components >= 2)
- out[t4 + 1] = U8(ll_round(g));
- if (components >= 3)
- out[t4 + 2] = U8(ll_round(b));
- if( components == 4)
- out[t4 + 3] = U8(ll_round(a));
- }
- }
+ const S32 components = getComponents();
+ llassert( components >= 1 && components <= 4 );
+
+ const F32 ratio = F32(in_pixel_len) / out_pixel_len; // ratio of old to new
+ const F32 norm_factor = 1.f / ratio;
+
+ S32 goff = components >= 2 ? 1 : 0;
+ S32 boff = components >= 3 ? 2 : 0;
+ for( S32 x = 0; x < out_pixel_len; x++ )
+ {
+ // Sample input pixels in range from sample0 to sample1.
+ // Avoid floating point accumulation error... don't just add ratio each time. JC
+ const F32 sample0 = x * ratio;
+ const F32 sample1 = (x+1) * ratio;
+ const S32 index0 = llfloor(sample0); // left integer (floor)
+ const S32 index1 = llfloor(sample1); // right integer (floor)
+ const F32 fract0 = 1.f - (sample0 - F32(index0)); // spill over on left
+ const F32 fract1 = sample1 - F32(index1); // spill-over on right
+
+ if( index0 == index1 )
+ {
+ // Interval is embedded in one input pixel
+ S32 t0 = x * out_pixel_step * components;
+ S32 t1 = index0 * in_pixel_step * components;
+ U8* outp = out + t0;
+ U8* inp = in + t1;
+ for (S32 i = 0; i < components; ++i)
+ {
+ *outp = *inp;
+ ++outp;
+ ++inp;
+ }
+ }
+ else
+ {
+ // Left straddle
+ S32 t1 = index0 * in_pixel_step * components;
+ F32 r = in[t1 + 0] * fract0;
+ F32 g = in[t1 + goff] * fract0;
+ F32 b = in[t1 + boff] * fract0;
+ F32 a = 0;
+ if( components == 4)
+ {
+ a = in[t1 + 3] * fract0;
+ }
+
+ // Central interval
+ if (components < 4)
+ {
+ for( S32 u = index0 + 1; u < index1; u++ )
+ {
+ S32 t2 = u * in_pixel_step * components;
+ r += in[t2 + 0];
+ g += in[t2 + goff];
+ b += in[t2 + boff];
+ }
+ }
+ else
+ {
+ for( S32 u = index0 + 1; u < index1; u++ )
+ {
+ S32 t2 = u * in_pixel_step * components;
+ r += in[t2 + 0];
+ g += in[t2 + 1];
+ b += in[t2 + 2];
+ a += in[t2 + 3];
+ }
+ }
+
+ // right straddle
+ // Watch out for reading off of end of input array.
+ if( fract1 && index1 < in_pixel_len )
+ {
+ S32 t3 = index1 * in_pixel_step * components;
+ if (components < 4)
+ {
+ U8 in0 = in[t3 + 0];
+ U8 in1 = in[t3 + goff];
+ U8 in2 = in[t3 + boff];
+ r += in0 * fract1;
+ g += in1 * fract1;
+ b += in2 * fract1;
+ }
+ else
+ {
+ U8 in0 = in[t3 + 0];
+ U8 in1 = in[t3 + 1];
+ U8 in2 = in[t3 + 2];
+ U8 in3 = in[t3 + 3];
+ r += in0 * fract1;
+ g += in1 * fract1;
+ b += in2 * fract1;
+ a += in3 * fract1;
+ }
+ }
+
+ r *= norm_factor;
+ g *= norm_factor;
+ b *= norm_factor;
+ a *= norm_factor; // skip conditional
+
+ S32 t4 = x * out_pixel_step * components;
+ out[t4 + 0] = U8(ll_round(r));
+ if (components >= 2)
+ out[t4 + 1] = U8(ll_round(g));
+ if (components >= 3)
+ out[t4 + 2] = U8(ll_round(b));
+ if( components == 4)
+ out[t4 + 3] = U8(ll_round(a));
+ }
+ }
}
void LLImageRaw::compositeRowScaled4onto3( U8* in, U8* out, S32 in_pixel_len, S32 out_pixel_len )
{
- llassert( getComponents() == 3 );
-
- const S32 IN_COMPONENTS = 4;
- const S32 OUT_COMPONENTS = 3;
-
- const F32 ratio = F32(in_pixel_len) / out_pixel_len; // ratio of old to new
- const F32 norm_factor = 1.f / ratio;
-
- for( S32 x = 0; x < out_pixel_len; x++ )
- {
- // Sample input pixels in range from sample0 to sample1.
- // Avoid floating point accumulation error... don't just add ratio each time. JC
- const F32 sample0 = x * ratio;
- const F32 sample1 = (x+1) * ratio;
- const S32 index0 = S32(sample0); // left integer (floor)
- const S32 index1 = S32(sample1); // right integer (floor)
- const F32 fract0 = 1.f - (sample0 - F32(index0)); // spill over on left
- const F32 fract1 = sample1 - F32(index1); // spill-over on right
-
- U8 in_scaled_r;
- U8 in_scaled_g;
- U8 in_scaled_b;
- U8 in_scaled_a;
-
- if( index0 == index1 )
- {
- // Interval is embedded in one input pixel
- S32 t1 = index0 * IN_COMPONENTS;
- in_scaled_r = in[t1 + 0];
- in_scaled_g = in[t1 + 0];
- in_scaled_b = in[t1 + 0];
- in_scaled_a = in[t1 + 0];
- }
- else
- {
- // Left straddle
- S32 t1 = index0 * IN_COMPONENTS;
- F32 r = in[t1 + 0] * fract0;
- F32 g = in[t1 + 1] * fract0;
- F32 b = in[t1 + 2] * fract0;
- F32 a = in[t1 + 3] * fract0;
-
- // Central interval
- for( S32 u = index0 + 1; u < index1; u++ )
- {
- S32 t2 = u * IN_COMPONENTS;
- r += in[t2 + 0];
- g += in[t2 + 1];
- b += in[t2 + 2];
- a += in[t2 + 3];
- }
-
- // right straddle
- // Watch out for reading off of end of input array.
- if( fract1 && index1 < in_pixel_len )
- {
- S32 t3 = index1 * IN_COMPONENTS;
- r += in[t3 + 0] * fract1;
- g += in[t3 + 1] * fract1;
- b += in[t3 + 2] * fract1;
- a += in[t3 + 3] * fract1;
- }
-
- r *= norm_factor;
- g *= norm_factor;
- b *= norm_factor;
- a *= norm_factor;
-
- in_scaled_r = U8(ll_round(r));
- in_scaled_g = U8(ll_round(g));
- in_scaled_b = U8(ll_round(b));
- in_scaled_a = U8(ll_round(a));
- }
-
- if( in_scaled_a )
- {
- if( 255 == in_scaled_a )
- {
- out[0] = in_scaled_r;
- out[1] = in_scaled_g;
- out[2] = in_scaled_b;
- }
- else
- {
- U8 transparency = 255 - in_scaled_a;
- out[0] = fastFractionalMult( out[0], transparency ) + fastFractionalMult( in_scaled_r, in_scaled_a );
- out[1] = fastFractionalMult( out[1], transparency ) + fastFractionalMult( in_scaled_g, in_scaled_a );
- out[2] = fastFractionalMult( out[2], transparency ) + fastFractionalMult( in_scaled_b, in_scaled_a );
- }
- }
- out += OUT_COMPONENTS;
- }
+ llassert( getComponents() == 3 );
+
+ const S32 IN_COMPONENTS = 4;
+ const S32 OUT_COMPONENTS = 3;
+
+ const F32 ratio = F32(in_pixel_len) / out_pixel_len; // ratio of old to new
+ const F32 norm_factor = 1.f / ratio;
+
+ for( S32 x = 0; x < out_pixel_len; x++ )
+ {
+ // Sample input pixels in range from sample0 to sample1.
+ // Avoid floating point accumulation error... don't just add ratio each time. JC
+ const F32 sample0 = x * ratio;
+ const F32 sample1 = (x+1) * ratio;
+ const S32 index0 = S32(sample0); // left integer (floor)
+ const S32 index1 = S32(sample1); // right integer (floor)
+ const F32 fract0 = 1.f - (sample0 - F32(index0)); // spill over on left
+ const F32 fract1 = sample1 - F32(index1); // spill-over on right
+
+ U8 in_scaled_r;
+ U8 in_scaled_g;
+ U8 in_scaled_b;
+ U8 in_scaled_a;
+
+ if( index0 == index1 )
+ {
+ // Interval is embedded in one input pixel
+ S32 t1 = index0 * IN_COMPONENTS;
+ in_scaled_r = in[t1 + 0];
+ in_scaled_g = in[t1 + 0];
+ in_scaled_b = in[t1 + 0];
+ in_scaled_a = in[t1 + 0];
+ }
+ else
+ {
+ // Left straddle
+ S32 t1 = index0 * IN_COMPONENTS;
+ F32 r = in[t1 + 0] * fract0;
+ F32 g = in[t1 + 1] * fract0;
+ F32 b = in[t1 + 2] * fract0;
+ F32 a = in[t1 + 3] * fract0;
+
+ // Central interval
+ for( S32 u = index0 + 1; u < index1; u++ )
+ {
+ S32 t2 = u * IN_COMPONENTS;
+ r += in[t2 + 0];
+ g += in[t2 + 1];
+ b += in[t2 + 2];
+ a += in[t2 + 3];
+ }
+
+ // right straddle
+ // Watch out for reading off of end of input array.
+ if( fract1 && index1 < in_pixel_len )
+ {
+ S32 t3 = index1 * IN_COMPONENTS;
+ r += in[t3 + 0] * fract1;
+ g += in[t3 + 1] * fract1;
+ b += in[t3 + 2] * fract1;
+ a += in[t3 + 3] * fract1;
+ }
+
+ r *= norm_factor;
+ g *= norm_factor;
+ b *= norm_factor;
+ a *= norm_factor;
+
+ in_scaled_r = U8(ll_round(r));
+ in_scaled_g = U8(ll_round(g));
+ in_scaled_b = U8(ll_round(b));
+ in_scaled_a = U8(ll_round(a));
+ }
+
+ if( in_scaled_a )
+ {
+ if( 255 == in_scaled_a )
+ {
+ out[0] = in_scaled_r;
+ out[1] = in_scaled_g;
+ out[2] = in_scaled_b;
+ }
+ else
+ {
+ U8 transparency = 255 - in_scaled_a;
+ out[0] = fastFractionalMult( out[0], transparency ) + fastFractionalMult( in_scaled_r, in_scaled_a );
+ out[1] = fastFractionalMult( out[1], transparency ) + fastFractionalMult( in_scaled_g, in_scaled_a );
+ out[2] = fastFractionalMult( out[2], transparency ) + fastFractionalMult( in_scaled_b, in_scaled_a );
+ }
+ }
+ out += OUT_COMPONENTS;
+ }
}
void LLImageRaw::addEmissive(LLImageRaw* src)
{
- LLImageRaw* dst = this; // Just for clarity.
+ LLImageRaw* dst = this; // Just for clarity.
- if (!validateSrcAndDst(__FUNCTION__, src, dst))
- {
- return;
- }
+ if (!validateSrcAndDst(__FUNCTION__, src, dst))
+ {
+ return;
+ }
- llassert((3 == src->getComponents()) || (4 == src->getComponents()));
- llassert(3 == dst->getComponents());
+ llassert((3 == src->getComponents()) || (4 == src->getComponents()));
+ llassert(3 == dst->getComponents());
- if( 3 == dst->getComponents() )
- {
- if( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) )
- {
+ if( 3 == dst->getComponents() )
+ {
+ if( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) )
+ {
addEmissiveUnscaled(src);
- }
- else
- {
+ }
+ else
+ {
addEmissiveScaled(src);
- }
- }
+ }
+ }
}
void LLImageRaw::addEmissiveUnscaled(LLImageRaw* src)
{
- LLImageRaw* dst = this; // Just for clarity.
+ LLImageRaw* dst = this; // Just for clarity.
- llassert((3 == src->getComponents()) || (4 == src->getComponents()));
- llassert((3 == dst->getComponents()) || (4 == dst->getComponents()));
- llassert( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) );
+ llassert((3 == src->getComponents()) || (4 == src->getComponents()));
+ llassert((3 == dst->getComponents()) || (4 == dst->getComponents()));
+ llassert( (src->getWidth() == dst->getWidth()) && (src->getHeight() == dst->getHeight()) );
U8* const src_data = src->getData();
U8* const dst_data = dst->getData();
- for(S32 y = 0; y < dst->getHeight(); ++y)
- {
+ for(S32 y = 0; y < dst->getHeight(); ++y)
+ {
const S32 src_row_offset = src->getComponents() * src->getWidth() * y;
const S32 dst_row_offset = dst->getComponents() * dst->getWidth() * y;
for (S32 x = 0; x < dst->getWidth(); ++x)
@@ -1924,17 +1924,17 @@ void LLImageRaw::addEmissiveUnscaled(LLImageRaw* src)
dst_pixel[1] = llmin(255, dst_pixel[1] + src_pixel[1]);
dst_pixel[2] = llmin(255, dst_pixel[2] + src_pixel[2]);
}
- }
+ }
}
void LLImageRaw::addEmissiveScaled(LLImageRaw* src)
{
- LLImageRaw* dst = this; // Just for clarity.
+ LLImageRaw* dst = this; // Just for clarity.
- llassert( (4 == src->getComponents()) && (3 == dst->getComponents()) );
+ llassert( (4 == src->getComponents()) && (3 == dst->getComponents()) );
LLImageRaw temp(dst->getWidth(), dst->getHeight(), dst->getComponents());
- llassert_always(temp.getDataSize() > 0);
+ llassert_always(temp.getDataSize() > 0);
temp.copyScaled(src);
dst->addEmissiveUnscaled(&temp);
@@ -1943,161 +1943,161 @@ void LLImageRaw::addEmissiveScaled(LLImageRaw* src)
bool LLImageRaw::validateSrcAndDst(std::string func, LLImageRaw* src, LLImageRaw* dst)
{
- if (!src || !dst || src->isBufferInvalid() || dst->isBufferInvalid())
- {
- LL_WARNS() << func << ": Source: ";
- if (!src) LL_CONT << "Null pointer";
- else if (src->isBufferInvalid()) LL_CONT << "Invalid buffer";
- else LL_CONT << "OK";
+ if (!src || !dst || src->isBufferInvalid() || dst->isBufferInvalid())
+ {
+ LL_WARNS() << func << ": Source: ";
+ if (!src) LL_CONT << "Null pointer";
+ else if (src->isBufferInvalid()) LL_CONT << "Invalid buffer";
+ else LL_CONT << "OK";
- LL_CONT << "; Destination: ";
- if (!dst) LL_CONT << "Null pointer";
- else if (dst->isBufferInvalid()) LL_CONT << "Invalid buffer";
- else LL_CONT << "OK";
- LL_CONT << "." << LL_ENDL;
+ LL_CONT << "; Destination: ";
+ if (!dst) LL_CONT << "Null pointer";
+ else if (dst->isBufferInvalid()) LL_CONT << "Invalid buffer";
+ else LL_CONT << "OK";
+ LL_CONT << "." << LL_ENDL;
- return false;
- }
- return true;
+ return false;
+ }
+ return true;
}
//----------------------------------------------------------------------------
static struct
{
- const char* exten;
- EImageCodec codec;
+ const char* exten;
+ EImageCodec codec;
}
file_extensions[] =
{
- { "bmp", IMG_CODEC_BMP },
- { "tga", IMG_CODEC_TGA },
- { "j2c", IMG_CODEC_J2C },
- { "jp2", IMG_CODEC_J2C },
- { "texture", IMG_CODEC_J2C },
- { "jpg", IMG_CODEC_JPEG },
- { "jpeg", IMG_CODEC_JPEG },
- { "mip", IMG_CODEC_DXT },
- { "dxt", IMG_CODEC_DXT },
- { "png", IMG_CODEC_PNG }
+ { "bmp", IMG_CODEC_BMP },
+ { "tga", IMG_CODEC_TGA },
+ { "j2c", IMG_CODEC_J2C },
+ { "jp2", IMG_CODEC_J2C },
+ { "texture", IMG_CODEC_J2C },
+ { "jpg", IMG_CODEC_JPEG },
+ { "jpeg", IMG_CODEC_JPEG },
+ { "mip", IMG_CODEC_DXT },
+ { "dxt", IMG_CODEC_DXT },
+ { "png", IMG_CODEC_PNG }
};
#define NUM_FILE_EXTENSIONS LL_ARRAY_SIZE(file_extensions)
#if 0
static std::string find_file(std::string &name, S8 *codec)
{
- std::string tname;
- for (int i=0; i<(int)(NUM_FILE_EXTENSIONS); i++)
- {
- tname = name + "." + std::string(file_extensions[i].exten);
- llifstream ifs(tname.c_str(), llifstream::binary);
- if (ifs.is_open())
- {
- ifs.close();
- if (codec)
- *codec = file_extensions[i].codec;
- return std::string(file_extensions[i].exten);
- }
- }
- return std::string("");
+ std::string tname;
+ for (int i=0; i<(int)(NUM_FILE_EXTENSIONS); i++)
+ {
+ tname = name + "." + std::string(file_extensions[i].exten);
+ llifstream ifs(tname.c_str(), llifstream::binary);
+ if (ifs.is_open())
+ {
+ ifs.close();
+ if (codec)
+ *codec = file_extensions[i].codec;
+ return std::string(file_extensions[i].exten);
+ }
+ }
+ return std::string("");
}
#endif
EImageCodec LLImageBase::getCodecFromExtension(const std::string& exten)
{
- if (!exten.empty())
- {
- for (int i = 0; i < (int)(NUM_FILE_EXTENSIONS); i++)
- {
- if (exten == file_extensions[i].exten)
- return file_extensions[i].codec;
- }
- }
- return IMG_CODEC_INVALID;
+ if (!exten.empty())
+ {
+ for (int i = 0; i < (int)(NUM_FILE_EXTENSIONS); i++)
+ {
+ if (exten == file_extensions[i].exten)
+ return file_extensions[i].codec;
+ }
+ }
+ return IMG_CODEC_INVALID;
}
#if 0
bool LLImageRaw::createFromFile(const std::string &filename, bool j2c_lowest_mip_only)
{
- std::string name = filename;
- size_t dotidx = name.rfind('.');
- S8 codec = IMG_CODEC_INVALID;
- std::string exten;
-
- deleteData(); // delete any existing data
-
- if (dotidx != std::string::npos)
- {
- exten = name.substr(dotidx+1);
- LLStringUtil::toLower(exten);
- codec = getCodecFromExtension(exten);
- }
- else
- {
- exten = find_file(name, &codec);
- name = name + "." + exten;
- }
- if (codec == IMG_CODEC_INVALID)
- {
- return false; // format not recognized
- }
-
- llifstream ifs(name.c_str(), llifstream::binary);
- if (!ifs.is_open())
- {
- // SJB: changed from LL_INFOS() to LL_DEBUGS() to reduce spam
- LL_DEBUGS() << "Unable to open image file: " << name << LL_ENDL;
- return false;
- }
-
- ifs.seekg (0, std::ios::end);
- int length = ifs.tellg();
- if (j2c_lowest_mip_only && length > 2048)
- {
- length = 2048;
- }
- ifs.seekg (0, std::ios::beg);
-
- if (!length)
- {
- LL_INFOS() << "Zero length file file: " << name << LL_ENDL;
- return false;
- }
-
- LLPointer<LLImageFormatted> image = LLImageFormatted::createFromType(codec);
- llassert(image.notNull());
-
- U8 *buffer = image->allocateData(length);
- ifs.read ((char*)buffer, length);
- ifs.close();
-
- bool success;
-
- success = image->updateData();
- if (success)
- {
- if (j2c_lowest_mip_only && codec == IMG_CODEC_J2C)
- {
- S32 width = image->getWidth();
- S32 height = image->getHeight();
- S32 discard_level = 0;
- while (width > 1 && height > 1 && discard_level < MAX_DISCARD_LEVEL)
- {
- width >>= 1;
- height >>= 1;
- discard_level++;
- }
- ((LLImageJ2C *)((LLImageFormatted*)image))->setDiscardLevel(discard_level);
- }
- success = image->decode(this, 100000.0f);
- }
-
- image = NULL; // deletes image
- if (!success)
- {
- deleteData();
- LL_WARNS() << "Unable to decode image" << name << LL_ENDL;
- return false;
- }
-
- return true;
+ std::string name = filename;
+ size_t dotidx = name.rfind('.');
+ S8 codec = IMG_CODEC_INVALID;
+ std::string exten;
+
+ deleteData(); // delete any existing data
+
+ if (dotidx != std::string::npos)
+ {
+ exten = name.substr(dotidx+1);
+ LLStringUtil::toLower(exten);
+ codec = getCodecFromExtension(exten);
+ }
+ else
+ {
+ exten = find_file(name, &codec);
+ name = name + "." + exten;
+ }
+ if (codec == IMG_CODEC_INVALID)
+ {
+ return false; // format not recognized
+ }
+
+ llifstream ifs(name.c_str(), llifstream::binary);
+ if (!ifs.is_open())
+ {
+ // SJB: changed from LL_INFOS() to LL_DEBUGS() to reduce spam
+ LL_DEBUGS() << "Unable to open image file: " << name << LL_ENDL;
+ return false;
+ }
+
+ ifs.seekg (0, std::ios::end);
+ int length = ifs.tellg();
+ if (j2c_lowest_mip_only && length > 2048)
+ {
+ length = 2048;
+ }
+ ifs.seekg (0, std::ios::beg);
+
+ if (!length)
+ {
+ LL_INFOS() << "Zero length file file: " << name << LL_ENDL;
+ return false;
+ }
+
+ LLPointer<LLImageFormatted> image = LLImageFormatted::createFromType(codec);
+ llassert(image.notNull());
+
+ U8 *buffer = image->allocateData(length);
+ ifs.read ((char*)buffer, length);
+ ifs.close();
+
+ bool success;
+
+ success = image->updateData();
+ if (success)
+ {
+ if (j2c_lowest_mip_only && codec == IMG_CODEC_J2C)
+ {
+ S32 width = image->getWidth();
+ S32 height = image->getHeight();
+ S32 discard_level = 0;
+ while (width > 1 && height > 1 && discard_level < MAX_DISCARD_LEVEL)
+ {
+ width >>= 1;
+ height >>= 1;
+ discard_level++;
+ }
+ ((LLImageJ2C *)((LLImageFormatted*)image))->setDiscardLevel(discard_level);
+ }
+ success = image->decode(this, 100000.0f);
+ }
+
+ image = NULL; // deletes image
+ if (!success)
+ {
+ deleteData();
+ LL_WARNS() << "Unable to decode image" << name << LL_ENDL;
+ return false;
+ }
+
+ return true;
}
#endif
//---------------------------------------------------------------------------
@@ -2108,21 +2108,21 @@ bool LLImageRaw::createFromFile(const std::string &filename, bool j2c_lowest_mip
S32 LLImageFormatted::sGlobalFormattedMemory = 0;
LLImageFormatted::LLImageFormatted(S8 codec)
- : LLImageBase(),
- mCodec(codec),
- mDecoding(0),
- mDecoded(0),
- mDiscardLevel(-1),
- mLevels(0)
+ : LLImageBase(),
+ mCodec(codec),
+ mDecoding(0),
+ mDecoded(0),
+ mDiscardLevel(-1),
+ mLevels(0)
{
}
// virtual
LLImageFormatted::~LLImageFormatted()
{
- // NOTE: ~LLimageBase() call to deleteData() calls LLImageBase::deleteData()
- // NOT LLImageFormatted::deleteData()
- deleteData();
+ // NOTE: ~LLimageBase() call to deleteData() calls LLImageBase::deleteData()
+ // NOT LLImageFormatted::deleteData()
+ deleteData();
}
//----------------------------------------------------------------------------
@@ -2130,16 +2130,16 @@ LLImageFormatted::~LLImageFormatted()
//virtual
void LLImageFormatted::resetLastError()
{
- LLImage::setLastError("");
+ LLImage::setLastError("");
}
//virtual
void LLImageFormatted::setLastError(const std::string& message, const std::string& filename)
{
- std::string error = message;
- if (!filename.empty())
- error += std::string(" FILE: ") + filename;
- LLImage::setLastError(error);
+ std::string error = message;
+ if (!filename.empty())
+ error += std::string(" FILE: ") + filename;
+ LLImage::setLastError(error);
}
//----------------------------------------------------------------------------
@@ -2147,97 +2147,97 @@ void LLImageFormatted::setLastError(const std::string& message, const std::strin
// static
LLImageFormatted* LLImageFormatted::createFromType(S8 codec)
{
- LLImageFormatted* image;
- switch(codec)
- {
- case IMG_CODEC_BMP:
- image = new LLImageBMP();
- break;
- case IMG_CODEC_TGA:
- image = new LLImageTGA();
- break;
- case IMG_CODEC_JPEG:
- image = new LLImageJPEG();
- break;
- case IMG_CODEC_PNG:
- image = new LLImagePNG();
- break;
- case IMG_CODEC_J2C:
- image = new LLImageJ2C();
- break;
- case IMG_CODEC_DXT:
- image = new LLImageDXT();
- break;
- default:
- image = NULL;
- break;
- }
- return image;
+ LLImageFormatted* image;
+ switch(codec)
+ {
+ case IMG_CODEC_BMP:
+ image = new LLImageBMP();
+ break;
+ case IMG_CODEC_TGA:
+ image = new LLImageTGA();
+ break;
+ case IMG_CODEC_JPEG:
+ image = new LLImageJPEG();
+ break;
+ case IMG_CODEC_PNG:
+ image = new LLImagePNG();
+ break;
+ case IMG_CODEC_J2C:
+ image = new LLImageJ2C();
+ break;
+ case IMG_CODEC_DXT:
+ image = new LLImageDXT();
+ break;
+ default:
+ image = NULL;
+ break;
+ }
+ return image;
}
// static
LLImageFormatted* LLImageFormatted::createFromExtension(const std::string& instring)
{
- std::string exten;
- size_t dotidx = instring.rfind('.');
- if (dotidx != std::string::npos)
- {
- exten = instring.substr(dotidx+1);
- }
- else
- {
- exten = instring;
- }
- S8 codec = getCodecFromExtension(exten);
- return createFromType(codec);
+ std::string exten;
+ size_t dotidx = instring.rfind('.');
+ if (dotidx != std::string::npos)
+ {
+ exten = instring.substr(dotidx+1);
+ }
+ else
+ {
+ exten = instring;
+ }
+ S8 codec = getCodecFromExtension(exten);
+ return createFromType(codec);
}
//----------------------------------------------------------------------------
// virtual
void LLImageFormatted::dump()
{
- LLImageBase::dump();
+ LLImageBase::dump();
- LL_INFOS() << "LLImageFormatted"
- << " mDecoding " << mDecoding
- << " mCodec " << S32(mCodec)
- << " mDecoded " << mDecoded
- << LL_ENDL;
+ LL_INFOS() << "LLImageFormatted"
+ << " mDecoding " << mDecoding
+ << " mCodec " << S32(mCodec)
+ << " mDecoded " << mDecoded
+ << LL_ENDL;
}
//----------------------------------------------------------------------------
S32 LLImageFormatted::calcDataSize(S32 discard_level)
{
- if (discard_level < 0)
- {
- discard_level = mDiscardLevel;
- }
- S32 w = getWidth() >> discard_level;
- S32 h = getHeight() >> discard_level;
- w = llmax(w, 1);
- h = llmax(h, 1);
- return w * h * getComponents();
+ if (discard_level < 0)
+ {
+ discard_level = mDiscardLevel;
+ }
+ S32 w = getWidth() >> discard_level;
+ S32 h = getHeight() >> discard_level;
+ w = llmax(w, 1);
+ h = llmax(h, 1);
+ return w * h * getComponents();
}
S32 LLImageFormatted::calcDiscardLevelBytes(S32 bytes)
{
- llassert(bytes >= 0);
- S32 discard_level = 0;
- while (1)
- {
- S32 bytes_needed = calcDataSize(discard_level); // virtual
- if (bytes_needed <= bytes)
- {
- break;
- }
- discard_level++;
- if (discard_level > MAX_IMAGE_MIP)
- {
- return -1;
- }
- }
- return discard_level;
+ llassert(bytes >= 0);
+ S32 discard_level = 0;
+ while (1)
+ {
+ S32 bytes_needed = calcDataSize(discard_level); // virtual
+ if (bytes_needed <= bytes)
+ {
+ break;
+ }
+ discard_level++;
+ if (discard_level > MAX_IMAGE_MIP)
+ {
+ return -1;
+ }
+ }
+ return discard_level;
}
@@ -2246,34 +2246,34 @@ S32 LLImageFormatted::calcDiscardLevelBytes(S32 bytes)
// Subclasses that can handle more than 4 channels should override this function.
bool LLImageFormatted::decodeChannels(LLImageRaw* raw_image,F32 decode_time, S32 first_channel, S32 max_channel)
{
- llassert( (first_channel == 0) && (max_channel == 4) );
- return decode( raw_image, decode_time ); // Loads first 4 channels by default.
-}
+ llassert( (first_channel == 0) && (max_channel == 4) );
+ return decode( raw_image, decode_time ); // Loads first 4 channels by default.
+}
//----------------------------------------------------------------------------
// virtual
U8* LLImageFormatted::allocateData(S32 size)
{
- U8* res = LLImageBase::allocateData(size); // calls deleteData()
- sGlobalFormattedMemory += getDataSize();
- return res;
+ U8* res = LLImageBase::allocateData(size); // calls deleteData()
+ sGlobalFormattedMemory += getDataSize();
+ return res;
}
// virtual
U8* LLImageFormatted::reallocateData(S32 size)
{
- sGlobalFormattedMemory -= getDataSize();
- U8* res = LLImageBase::reallocateData(size);
- sGlobalFormattedMemory += getDataSize();
- return res;
+ sGlobalFormattedMemory -= getDataSize();
+ U8* res = LLImageBase::reallocateData(size);
+ sGlobalFormattedMemory += getDataSize();
+ return res;
}
// virtual
void LLImageFormatted::deleteData()
{
- sGlobalFormattedMemory -= getDataSize();
- LLImageBase::deleteData();
+ sGlobalFormattedMemory -= getDataSize();
+ LLImageBase::deleteData();
}
//----------------------------------------------------------------------------
@@ -2281,196 +2281,196 @@ void LLImageFormatted::deleteData()
// virtual
void LLImageFormatted::sanityCheck()
{
- LLImageBase::sanityCheck();
+ LLImageBase::sanityCheck();
- if (mCodec >= IMG_CODEC_EOF)
- {
- LL_ERRS() << "Failed LLImageFormatted::sanityCheck "
- << "decoding " << S32(mDecoding)
- << "decoded " << S32(mDecoded)
- << "codec " << S32(mCodec)
- << LL_ENDL;
- }
+ if (mCodec >= IMG_CODEC_EOF)
+ {
+ LL_ERRS() << "Failed LLImageFormatted::sanityCheck "
+ << "decoding " << S32(mDecoding)
+ << "decoded " << S32(mDecoded)
+ << "codec " << S32(mCodec)
+ << LL_ENDL;
+ }
}
//----------------------------------------------------------------------------
bool LLImageFormatted::copyData(U8 *data, S32 size)
{
- if ( data && ((data != getData()) || (size != getDataSize())) )
- {
- deleteData();
- allocateData(size);
- memcpy(getData(), data, size); /* Flawfinder: ignore */
- }
- return true;
+ if ( data && ((data != getData()) || (size != getDataSize())) )
+ {
+ deleteData();
+ allocateData(size);
+ memcpy(getData(), data, size); /* Flawfinder: ignore */
+ }
+ return true;
}
// LLImageFormatted becomes the owner of data
void LLImageFormatted::setData(U8 *data, S32 size)
{
- if (data && data != getData())
- {
- deleteData();
- setDataAndSize(data, size); // Access private LLImageBase members
+ if (data && data != getData())
+ {
+ deleteData();
+ setDataAndSize(data, size); // Access private LLImageBase members
- sGlobalFormattedMemory += getDataSize();
- }
+ sGlobalFormattedMemory += getDataSize();
+ }
}
void LLImageFormatted::appendData(U8 *data, S32 size)
{
- if (data)
- {
- if (!getData())
- {
- setData(data, size);
- }
- else
- {
- S32 cursize = getDataSize();
- S32 newsize = cursize + size;
- reallocateData(newsize);
- memcpy(getData() + cursize, data, size);
- ll_aligned_free_16(data);
- }
- }
+ if (data)
+ {
+ if (!getData())
+ {
+ setData(data, size);
+ }
+ else
+ {
+ S32 cursize = getDataSize();
+ S32 newsize = cursize + size;
+ reallocateData(newsize);
+ memcpy(getData() + cursize, data, size);
+ ll_aligned_free_16(data);
+ }
+ }
}
//----------------------------------------------------------------------------
bool LLImageFormatted::load(const std::string &filename, int load_size)
{
- resetLastError();
-
- S32 file_size = 0;
- LLAPRFile infile ;
- infile.open(filename, LL_APR_RB, NULL, &file_size);
- apr_file_t* apr_file = infile.getFileHandle();
- if (!apr_file)
- {
- setLastError("Unable to open file for reading", filename);
- return false;
- }
- if (file_size == 0)
- {
- setLastError("File is empty",filename);
- return false;
- }
-
- // Constrain the load size to acceptable values
- if ((load_size == 0) || (load_size > file_size))
- {
- load_size = file_size;
- }
- bool res;
- U8 *data = allocateData(load_size);
- if (data)
- {
- apr_size_t bytes_read = load_size;
- apr_status_t s = apr_file_read(apr_file, data, &bytes_read); // modifies bytes_read
- if (s != APR_SUCCESS || (S32) bytes_read != load_size)
- {
- deleteData();
- setLastError("Unable to read file",filename);
- res = false;
- }
- else
- {
- res = updateData();
- }
- }
- else
- {
- setLastError("Allocation failure", filename);
- res = false;
- }
-
- return res;
+ resetLastError();
+
+ S32 file_size = 0;
+ LLAPRFile infile ;
+ infile.open(filename, LL_APR_RB, NULL, &file_size);
+ apr_file_t* apr_file = infile.getFileHandle();
+ if (!apr_file)
+ {
+ setLastError("Unable to open file for reading", filename);
+ return false;
+ }
+ if (file_size == 0)
+ {
+ setLastError("File is empty",filename);
+ return false;
+ }
+
+ // Constrain the load size to acceptable values
+ if ((load_size == 0) || (load_size > file_size))
+ {
+ load_size = file_size;
+ }
+ bool res;
+ U8 *data = allocateData(load_size);
+ if (data)
+ {
+ apr_size_t bytes_read = load_size;
+ apr_status_t s = apr_file_read(apr_file, data, &bytes_read); // modifies bytes_read
+ if (s != APR_SUCCESS || (S32) bytes_read != load_size)
+ {
+ deleteData();
+ setLastError("Unable to read file",filename);
+ res = false;
+ }
+ else
+ {
+ res = updateData();
+ }
+ }
+ else
+ {
+ setLastError("Allocation failure", filename);
+ res = false;
+ }
+
+ return res;
}
bool LLImageFormatted::save(const std::string &filename)
{
- resetLastError();
-
- LLAPRFile outfile ;
- outfile.open(filename, LL_APR_WB);
- if (!outfile.getFileHandle())
- {
- setLastError("Unable to open file for writing", filename);
- return false;
- }
-
- S32 result = outfile.write(getData(), getDataSize());
- outfile.close() ;
+ resetLastError();
+
+ LLAPRFile outfile ;
+ outfile.open(filename, LL_APR_WB);
+ if (!outfile.getFileHandle())
+ {
+ setLastError("Unable to open file for writing", filename);
+ return false;
+ }
+
+ S32 result = outfile.write(getData(), getDataSize());
+ outfile.close() ;
return (result != 0);
}
S8 LLImageFormatted::getCodec() const
{
- return mCodec;
+ return mCodec;
}
static void avg4_colors4(const U8* a, const U8* b, const U8* c, const U8* d, U8* dst)
{
- dst[0] = (U8)(((U32)(a[0]) + b[0] + c[0] + d[0])>>2);
- dst[1] = (U8)(((U32)(a[1]) + b[1] + c[1] + d[1])>>2);
- dst[2] = (U8)(((U32)(a[2]) + b[2] + c[2] + d[2])>>2);
- dst[3] = (U8)(((U32)(a[3]) + b[3] + c[3] + d[3])>>2);
+ dst[0] = (U8)(((U32)(a[0]) + b[0] + c[0] + d[0])>>2);
+ dst[1] = (U8)(((U32)(a[1]) + b[1] + c[1] + d[1])>>2);
+ dst[2] = (U8)(((U32)(a[2]) + b[2] + c[2] + d[2])>>2);
+ dst[3] = (U8)(((U32)(a[3]) + b[3] + c[3] + d[3])>>2);
}
static void avg4_colors3(const U8* a, const U8* b, const U8* c, const U8* d, U8* dst)
{
- dst[0] = (U8)(((U32)(a[0]) + b[0] + c[0] + d[0])>>2);
- dst[1] = (U8)(((U32)(a[1]) + b[1] + c[1] + d[1])>>2);
- dst[2] = (U8)(((U32)(a[2]) + b[2] + c[2] + d[2])>>2);
+ dst[0] = (U8)(((U32)(a[0]) + b[0] + c[0] + d[0])>>2);
+ dst[1] = (U8)(((U32)(a[1]) + b[1] + c[1] + d[1])>>2);
+ dst[2] = (U8)(((U32)(a[2]) + b[2] + c[2] + d[2])>>2);
}
static void avg4_colors2(const U8* a, const U8* b, const U8* c, const U8* d, U8* dst)
{
- dst[0] = (U8)(((U32)(a[0]) + b[0] + c[0] + d[0])>>2);
- dst[1] = (U8)(((U32)(a[1]) + b[1] + c[1] + d[1])>>2);
+ dst[0] = (U8)(((U32)(a[0]) + b[0] + c[0] + d[0])>>2);
+ dst[1] = (U8)(((U32)(a[1]) + b[1] + c[1] + d[1])>>2);
}
void LLImageBase::setDataAndSize(U8 *data, S32 size)
-{
- ll_assert_aligned(data, 16);
- mData = data;
- mDataSize = size;
-}
+{
+ ll_assert_aligned(data, 16);
+ mData = data;
+ mDataSize = size;
+}
//static
void LLImageBase::generateMip(const U8* indata, U8* mipdata, S32 width, S32 height, S32 nchannels)
{
- llassert(width > 0 && height > 0);
- U8* data = mipdata;
- S32 in_width = width*2;
- for (S32 h=0; h<height; h++)
- {
- for (S32 w=0; w<width; w++)
- {
- switch(nchannels)
- {
- case 4:
- avg4_colors4(indata, indata+4, indata+4*in_width, indata+4*in_width+4, data);
- break;
- case 3:
- avg4_colors3(indata, indata+3, indata+3*in_width, indata+3*in_width+3, data);
- break;
- case 2:
- avg4_colors2(indata, indata+2, indata+2*in_width, indata+2*in_width+2, data);
- break;
- case 1:
- *(U8*)data = (U8)(((U32)(indata[0]) + indata[1] + indata[in_width] + indata[in_width+1])>>2);
- break;
- default:
- LL_ERRS() << "generateMmip called with bad num channels" << LL_ENDL;
- }
- indata += nchannels*2;
- data += nchannels;
- }
- indata += nchannels*in_width; // skip odd lines
- }
+ llassert(width > 0 && height > 0);
+ U8* data = mipdata;
+ S32 in_width = width*2;
+ for (S32 h=0; h<height; h++)
+ {
+ for (S32 w=0; w<width; w++)
+ {
+ switch(nchannels)
+ {
+ case 4:
+ avg4_colors4(indata, indata+4, indata+4*in_width, indata+4*in_width+4, data);
+ break;
+ case 3:
+ avg4_colors3(indata, indata+3, indata+3*in_width, indata+3*in_width+3, data);
+ break;
+ case 2:
+ avg4_colors2(indata, indata+2, indata+2*in_width, indata+2*in_width+2, data);
+ break;
+ case 1:
+ *(U8*)data = (U8)(((U32)(indata[0]) + indata[1] + indata[in_width] + indata[in_width+1])>>2);
+ break;
+ default:
+ LL_ERRS() << "generateMmip called with bad num channels" << LL_ENDL;
+ }
+ indata += nchannels*2;
+ data += nchannels;
+ }
+ indata += nchannels*in_width; // skip odd lines
+ }
}
@@ -2479,69 +2479,69 @@ void LLImageBase::generateMip(const U8* indata, U8* mipdata, S32 width, S32 heig
//static
F32 LLImageBase::calc_download_priority(F32 virtual_size, F32 visible_pixels, S32 bytes_sent)
{
- F32 w_priority;
-
- F32 bytes_weight = 1.f;
- if (!bytes_sent)
- {
- bytes_weight = 20.f;
- }
- else if (bytes_sent < 1000)
- {
- bytes_weight = 1.f;
- }
- else if (bytes_sent < 2000)
- {
- bytes_weight = 1.f/1.5f;
- }
- else if (bytes_sent < 4000)
- {
- bytes_weight = 1.f/3.f;
- }
- else if (bytes_sent < 8000)
- {
- bytes_weight = 1.f/6.f;
- }
- else if (bytes_sent < 16000)
- {
- bytes_weight = 1.f/12.f;
- }
- else if (bytes_sent < 32000)
- {
- bytes_weight = 1.f/20.f;
- }
- else if (bytes_sent < 64000)
- {
- bytes_weight = 1.f/32.f;
- }
- else
- {
- bytes_weight = 1.f/64.f;
- }
- bytes_weight *= bytes_weight;
-
-
- //LL_INFOS() << "VS: " << virtual_size << LL_ENDL;
- F32 virtual_size_factor = virtual_size / (10.f*10.f);
-
- // The goal is for weighted priority to be <= 0 when we've reached a point where
- // we've sent enough data.
- //LL_INFOS() << "BytesSent: " << bytes_sent << LL_ENDL;
- //LL_INFOS() << "BytesWeight: " << bytes_weight << LL_ENDL;
- //LL_INFOS() << "PreLog: " << bytes_weight * virtual_size_factor << LL_ENDL;
- w_priority = (F32)log10(bytes_weight * virtual_size_factor);
-
- //LL_INFOS() << "PreScale: " << w_priority << LL_ENDL;
-
- // We don't want to affect how MANY bytes we send based on the visible pixels, but the order
- // in which they're sent. We post-multiply so we don't change the zero point.
- if (w_priority > 0.f)
- {
- F32 pixel_weight = (F32)log10(visible_pixels + 1)*3.0f;
- w_priority *= pixel_weight;
- }
-
- return w_priority;
+ F32 w_priority;
+
+ F32 bytes_weight = 1.f;
+ if (!bytes_sent)
+ {
+ bytes_weight = 20.f;
+ }
+ else if (bytes_sent < 1000)
+ {
+ bytes_weight = 1.f;
+ }
+ else if (bytes_sent < 2000)
+ {
+ bytes_weight = 1.f/1.5f;
+ }
+ else if (bytes_sent < 4000)
+ {
+ bytes_weight = 1.f/3.f;
+ }
+ else if (bytes_sent < 8000)
+ {
+ bytes_weight = 1.f/6.f;
+ }
+ else if (bytes_sent < 16000)
+ {
+ bytes_weight = 1.f/12.f;
+ }
+ else if (bytes_sent < 32000)
+ {
+ bytes_weight = 1.f/20.f;
+ }
+ else if (bytes_sent < 64000)
+ {
+ bytes_weight = 1.f/32.f;
+ }
+ else
+ {
+ bytes_weight = 1.f/64.f;
+ }
+ bytes_weight *= bytes_weight;
+
+
+ //LL_INFOS() << "VS: " << virtual_size << LL_ENDL;
+ F32 virtual_size_factor = virtual_size / (10.f*10.f);
+
+ // The goal is for weighted priority to be <= 0 when we've reached a point where
+ // we've sent enough data.
+ //LL_INFOS() << "BytesSent: " << bytes_sent << LL_ENDL;
+ //LL_INFOS() << "BytesWeight: " << bytes_weight << LL_ENDL;
+ //LL_INFOS() << "PreLog: " << bytes_weight * virtual_size_factor << LL_ENDL;
+ w_priority = (F32)log10(bytes_weight * virtual_size_factor);
+
+ //LL_INFOS() << "PreScale: " << w_priority << LL_ENDL;
+
+ // We don't want to affect how MANY bytes we send based on the visible pixels, but the order
+ // in which they're sent. We post-multiply so we don't change the zero point.
+ if (w_priority > 0.f)
+ {
+ F32 pixel_weight = (F32)log10(visible_pixels + 1)*3.0f;
+ w_priority *= pixel_weight;
+ }
+
+ return w_priority;
}
//============================================================================
diff --git a/indra/llimage/llimage.h b/indra/llimage/llimage.h
index c49184e338..a5f05d4380 100644
--- a/indra/llimage/llimage.h
+++ b/indra/llimage/llimage.h
@@ -1,25 +1,25 @@
-/**
+/**
* @file llimage.h
* @brief Object for managing images and their textures.
*
* $LicenseInfo:firstyear=2000&license=viewerlgpl$
* Second Life Viewer Source Code
* Copyright (C) 2010, Linden Research, Inc.
- *
+ *
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation;
* version 2.1 of the License only.
- *
+ *
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
- *
+ *
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
+ *
* Linden Research, Inc., 945 Battery Street, San Francisco, CA 94111 USA
* $/LicenseInfo$
*/
@@ -35,21 +35,21 @@
const S32 MIN_IMAGE_MIP = 2; // 4x4, only used for expand/contract power of 2
const S32 MAX_IMAGE_MIP = 12; // 4096x4096
-// *TODO : Use MAX_IMAGE_MIP as max discard level and modify j2c management so that the number
+// *TODO : Use MAX_IMAGE_MIP as max discard level and modify j2c management so that the number
// of levels is read from the header's file, not inferred from its size.
const S32 MAX_DISCARD_LEVEL = 5;
// JPEG2000 size constraints
// Those are declared here as they are germane to other image constraints used in the viewer
// and declared right here. Some come from the JPEG2000 spec, some conventions specific to SL.
-const S32 MAX_DECOMPOSITION_LEVELS = 32; // Number of decomposition levels cannot exceed 32 according to jpeg2000 spec
-const S32 MIN_DECOMPOSITION_LEVELS = 5; // the SL viewer will *crash* trying to decode images with fewer than 5 decomposition levels (unless image is small that is)
-const S32 MAX_PRECINCT_SIZE = 4096; // No reason to be bigger than MAX_IMAGE_SIZE
-const S32 MIN_PRECINCT_SIZE = 4; // Can't be smaller than MIN_BLOCK_SIZE
-const S32 MAX_BLOCK_SIZE = 64; // Max total block size is 4096, hence 64x64 when using square blocks
-const S32 MIN_BLOCK_SIZE = 4; // Min block dim is 4 according to jpeg2000 spec
-const S32 MIN_LAYER_SIZE = 2000; // Size of the first quality layer (after header). Must be > to FIRST_PACKET_SIZE!!
-const S32 MAX_NB_LAYERS = 64; // Max number of layers we'll entertain in SL (practical limit)
+const S32 MAX_DECOMPOSITION_LEVELS = 32; // Number of decomposition levels cannot exceed 32 according to jpeg2000 spec
+const S32 MIN_DECOMPOSITION_LEVELS = 5; // the SL viewer will *crash* trying to decode images with fewer than 5 decomposition levels (unless image is small that is)
+const S32 MAX_PRECINCT_SIZE = 4096; // No reason to be bigger than MAX_IMAGE_SIZE
+const S32 MIN_PRECINCT_SIZE = 4; // Can't be smaller than MIN_BLOCK_SIZE
+const S32 MAX_BLOCK_SIZE = 64; // Max total block size is 4096, hence 64x64 when using square blocks
+const S32 MIN_BLOCK_SIZE = 4; // Min block dim is 4 according to jpeg2000 spec
+const S32 MIN_LAYER_SIZE = 2000; // Size of the first quality layer (after header). Must be > to FIRST_PACKET_SIZE!!
+const S32 MAX_NB_LAYERS = 64; // Max number of layers we'll entertain in SL (practical limit)
const S32 MIN_IMAGE_SIZE = (1<<MIN_IMAGE_MIP); // 4, only used for expand/contract power of 2
const S32 MAX_IMAGE_SIZE = (1<<MAX_IMAGE_MIP); // 4096
@@ -75,15 +75,15 @@ class LLColor3;
typedef enum e_image_codec
{
- IMG_CODEC_INVALID = 0,
- IMG_CODEC_RGB = 1,
- IMG_CODEC_J2C = 2,
- IMG_CODEC_BMP = 3,
- IMG_CODEC_TGA = 4,
- IMG_CODEC_JPEG = 5,
- IMG_CODEC_DXT = 6,
- IMG_CODEC_PNG = 7,
- IMG_CODEC_EOF = 8
+ IMG_CODEC_INVALID = 0,
+ IMG_CODEC_RGB = 1,
+ IMG_CODEC_J2C = 2,
+ IMG_CODEC_BMP = 3,
+ IMG_CODEC_TGA = 4,
+ IMG_CODEC_JPEG = 5,
+ IMG_CODEC_DXT = 6,
+ IMG_CODEC_PNG = 7,
+ IMG_CODEC_EOF = 8
} EImageCodec;
//============================================================================
@@ -93,123 +93,123 @@ typedef enum e_image_codec
class LLImage
{
public:
- static void initClass(bool use_new_byte_range = false, S32 minimal_reverse_byte_range_percent = 75);
- static void cleanupClass();
-
- static const std::string& getLastError();
- static void setLastError(const std::string& message);
-
- static bool useNewByteRange() { return sUseNewByteRange; }
- static S32 getReverseByteRangePercent() { return sMinimalReverseByteRangePercent; }
-
+ static void initClass(bool use_new_byte_range = false, S32 minimal_reverse_byte_range_percent = 75);
+ static void cleanupClass();
+
+ static const std::string& getLastError();
+ static void setLastError(const std::string& message);
+
+ static bool useNewByteRange() { return sUseNewByteRange; }
+ static S32 getReverseByteRangePercent() { return sMinimalReverseByteRangePercent; }
+
protected:
- static LLMutex* sMutex;
- static std::string sLastErrorMessage;
- static bool sUseNewByteRange;
+ static LLMutex* sMutex;
+ static std::string sLastErrorMessage;
+ static bool sUseNewByteRange;
static S32 sMinimalReverseByteRangePercent;
};
//============================================================================
// Image base class
-class LLImageBase
-: public LLThreadSafeRefCount
+class LLImageBase
+: public LLThreadSafeRefCount
{
protected:
- virtual ~LLImageBase();
-
+ virtual ~LLImageBase();
+
public:
- LLImageBase();
+ LLImageBase();
- enum
- {
- TYPE_NORMAL = 0,
- TYPE_AVATAR_BAKE = 1,
- };
+ enum
+ {
+ TYPE_NORMAL = 0,
+ TYPE_AVATAR_BAKE = 1,
+ };
- virtual void deleteData();
- virtual U8* allocateData(S32 size = -1);
- virtual U8* reallocateData(S32 size = -1);
+ virtual void deleteData();
+ virtual U8* allocateData(S32 size = -1);
+ virtual U8* reallocateData(S32 size = -1);
- virtual void dump();
- virtual void sanityCheck();
+ virtual void dump();
+ virtual void sanityCheck();
- U16 getWidth() const { return mWidth; }
- U16 getHeight() const { return mHeight; }
- S8 getComponents() const { return mComponents; }
- S32 getDataSize() const { return mDataSize; }
+ U16 getWidth() const { return mWidth; }
+ U16 getHeight() const { return mHeight; }
+ S8 getComponents() const { return mComponents; }
+ S32 getDataSize() const { return mDataSize; }
- const U8 *getData() const ;
- U8 *getData() ;
- bool isBufferInvalid() const;
+ const U8 *getData() const ;
+ U8 *getData() ;
+ bool isBufferInvalid() const;
- void setSize(S32 width, S32 height, S32 ncomponents);
- U8* allocateDataSize(S32 width, S32 height, S32 ncomponents, S32 size = -1); // setSize() + allocateData()
- void enableOverSize() {mAllowOverSize = true ;}
- void disableOverSize() {mAllowOverSize = false; }
+ void setSize(S32 width, S32 height, S32 ncomponents);
+ U8* allocateDataSize(S32 width, S32 height, S32 ncomponents, S32 size = -1); // setSize() + allocateData()
+ void enableOverSize() {mAllowOverSize = true ;}
+ void disableOverSize() {mAllowOverSize = false; }
protected:
- // special accessor to allow direct setting of mData and mDataSize by LLImageFormatted
- void setDataAndSize(U8 *data, S32 size);
-
+ // special accessor to allow direct setting of mData and mDataSize by LLImageFormatted
+ void setDataAndSize(U8 *data, S32 size);
+
public:
- static void generateMip(const U8 *indata, U8* mipdata, int width, int height, S32 nchannels);
-
- // Function for calculating the download priority for textures
- // <= 0 priority means that there's no need for more data.
- static F32 calc_download_priority(F32 virtual_size, F32 visible_area, S32 bytes_sent);
+ static void generateMip(const U8 *indata, U8* mipdata, int width, int height, S32 nchannels);
+
+ // Function for calculating the download priority for textures
+ // <= 0 priority means that there's no need for more data.
+ static F32 calc_download_priority(F32 virtual_size, F32 visible_area, S32 bytes_sent);
- static EImageCodec getCodecFromExtension(const std::string& exten);
+ static EImageCodec getCodecFromExtension(const std::string& exten);
- //static LLTrace::MemStatHandle sMemStat;
+ //static LLTrace::MemStatHandle sMemStat;
private:
- U8 *mData;
- S32 mDataSize;
+ U8 *mData;
+ S32 mDataSize;
- U16 mWidth;
- U16 mHeight;
+ U16 mWidth;
+ U16 mHeight;
- S8 mComponents;
+ S8 mComponents;
- bool mBadBufferAllocation ;
- bool mAllowOverSize ;
+ bool mBadBufferAllocation ;
+ bool mAllowOverSize ;
};
// Raw representation of an image (used for textures, and other uncompressed formats
class LLImageRaw : public LLImageBase
{
protected:
- /*virtual*/ ~LLImageRaw();
-
+ /*virtual*/ ~LLImageRaw();
+
public:
- LLImageRaw();
- LLImageRaw(U16 width, U16 height, S8 components);
+ LLImageRaw();
+ LLImageRaw(U16 width, U16 height, S8 components);
LLImageRaw(const U8* data, U16 width, U16 height, S8 components);
- LLImageRaw(U8 *data, U16 width, U16 height, S8 components, bool no_copy = false);
- // Construct using createFromFile (used by tools)
- //LLImageRaw(const std::string& filename, bool j2c_lowest_mip_only = false);
+ LLImageRaw(U8 *data, U16 width, U16 height, S8 components, bool no_copy = false);
+ // Construct using createFromFile (used by tools)
+ //LLImageRaw(const std::string& filename, bool j2c_lowest_mip_only = false);
- /*virtual*/ void deleteData();
- /*virtual*/ U8* allocateData(S32 size = -1);
- /*virtual*/ U8* reallocateData(S32 size);
+ /*virtual*/ void deleteData();
+ /*virtual*/ U8* allocateData(S32 size = -1);
+ /*virtual*/ U8* reallocateData(S32 size);
// use in conjunction with "no_copy" constructor to release data pointer before deleting
- // so that deletion of this LLImageRaw will not free the memory at the "data" parameter
+ // so that deletion of this LLImageRaw will not free the memory at the "data" parameter
// provided to "no_copy" constructor
void releaseData();
-
- bool resize(U16 width, U16 height, S8 components);
- //U8 * getSubImage(U32 x_pos, U32 y_pos, U32 width, U32 height) const;
- bool setSubImage(U32 x_pos, U32 y_pos, U32 width, U32 height,
- const U8 *data, U32 stride = 0, bool reverse_y = false);
+ bool resize(U16 width, U16 height, S8 components);
+
+ //U8 * getSubImage(U32 x_pos, U32 y_pos, U32 width, U32 height) const;
+ bool setSubImage(U32 x_pos, U32 y_pos, U32 width, U32 height,
+ const U8 *data, U32 stride = 0, bool reverse_y = false);
+
+ void clear(U8 r=0, U8 g=0, U8 b=0, U8 a=255);
- void clear(U8 r=0, U8 g=0, U8 b=0, U8 a=255);
+ void verticalFlip();
- void verticalFlip();
-
// Returns true if the image is not fully opaque
bool checkHasTransparentPixels();
// if the alpha channel is all 100% opaque, delete it
@@ -221,60 +221,60 @@ public:
static S32 biasedDimToPowerOfTwo(S32 curr_dim, S32 max_dim = MAX_IMAGE_SIZE);
static S32 expandDimToPowerOfTwo(S32 curr_dim, S32 max_dim = MAX_IMAGE_SIZE);
static S32 contractDimToPowerOfTwo(S32 curr_dim, S32 min_dim = MIN_IMAGE_SIZE);
- void expandToPowerOfTwo(S32 max_dim = MAX_IMAGE_SIZE, bool scale_image = true);
- void contractToPowerOfTwo(S32 max_dim = MAX_IMAGE_SIZE, bool scale_image = true);
- void biasedScaleToPowerOfTwo(S32 max_dim = MAX_IMAGE_SIZE);
- bool scale(S32 new_width, S32 new_height, bool scale_image = true);
+ void expandToPowerOfTwo(S32 max_dim = MAX_IMAGE_SIZE, bool scale_image = true);
+ void contractToPowerOfTwo(S32 max_dim = MAX_IMAGE_SIZE, bool scale_image = true);
+ void biasedScaleToPowerOfTwo(S32 max_dim = MAX_IMAGE_SIZE);
+ bool scale(S32 new_width, S32 new_height, bool scale_image = true);
LLPointer<LLImageRaw> scaled(S32 new_width, S32 new_height);
-
- // Fill the buffer with a constant color
- void fill( const LLColor4U& color );
+
+ // Fill the buffer with a constant color
+ void fill( const LLColor4U& color );
// Multiply this raw image by the given color
void tint( const LLColor3& color );
- // Copy operations
-
- //duplicate this raw image if refCount > 1.
- LLPointer<LLImageRaw> duplicate();
+ // Copy operations
+
+ //duplicate this raw image if refCount > 1.
+ LLPointer<LLImageRaw> duplicate();
- // Src and dst can be any size. Src and dst can each have 3 or 4 components.
- void copy( LLImageRaw* src );
+ // Src and dst can be any size. Src and dst can each have 3 or 4 components.
+ void copy( LLImageRaw* src );
- // Src and dst are same size. Src and dst have same number of components.
- void copyUnscaled( LLImageRaw* src );
-
- // Src and dst are same size. Src has 4 components. Dst has 3 components.
- void copyUnscaled4onto3( LLImageRaw* src );
+ // Src and dst are same size. Src and dst have same number of components.
+ void copyUnscaled( LLImageRaw* src );
- // Src and dst are same size. Src has 3 components. Dst has 4 components.
- void copyUnscaled3onto4( LLImageRaw* src );
+ // Src and dst are same size. Src has 4 components. Dst has 3 components.
+ void copyUnscaled4onto3( LLImageRaw* src );
- // Src and dst are same size. Src has 1 component. Dst has 4 components.
- // Alpha component is set to source alpha mask component.
- // RGB components are set to fill color.
- void copyUnscaledAlphaMask( LLImageRaw* src, const LLColor4U& fill);
+ // Src and dst are same size. Src has 3 components. Dst has 4 components.
+ void copyUnscaled3onto4( LLImageRaw* src );
- // Src and dst can be any size. Src and dst have same number of components.
- void copyScaled( LLImageRaw* src );
+ // Src and dst are same size. Src has 1 component. Dst has 4 components.
+ // Alpha component is set to source alpha mask component.
+ // RGB components are set to fill color.
+ void copyUnscaledAlphaMask( LLImageRaw* src, const LLColor4U& fill);
- // Src and dst can be any size. Src has 3 components. Dst has 4 components.
- void copyScaled3onto4( LLImageRaw* src );
+ // Src and dst can be any size. Src and dst have same number of components.
+ void copyScaled( LLImageRaw* src );
- // Src and dst can be any size. Src has 4 components. Dst has 3 components.
- void copyScaled4onto3( LLImageRaw* src );
+ // Src and dst can be any size. Src has 3 components. Dst has 4 components.
+ void copyScaled3onto4( LLImageRaw* src );
+ // Src and dst can be any size. Src has 4 components. Dst has 3 components.
+ void copyScaled4onto3( LLImageRaw* src );
- // Composite operations
- // Src and dst can be any size. Src and dst can each have 3 or 4 components.
- void composite( LLImageRaw* src );
+ // Composite operations
- // Src and dst can be any size. Src has 4 components. Dst has 3 components.
- void compositeScaled4onto3( LLImageRaw* src );
+ // Src and dst can be any size. Src and dst can each have 3 or 4 components.
+ void composite( LLImageRaw* src );
- // Src and dst are same size. Src has 4 components. Dst has 3 components.
- void compositeUnscaled4onto3( LLImageRaw* src );
+ // Src and dst can be any size. Src has 4 components. Dst has 3 components.
+ void compositeScaled4onto3( LLImageRaw* src );
+
+ // Src and dst are same size. Src has 4 components. Dst has 3 components.
+ void compositeUnscaled4onto3( LLImageRaw* src );
// Emissive operations used by minimap
// Roughly emulates GLTF emissive texture, but is not GLTF-compliant
@@ -283,21 +283,21 @@ public:
void addEmissiveScaled(LLImageRaw* src);
void addEmissiveUnscaled(LLImageRaw* src);
protected:
- // Create an image from a local file (generally used in tools)
- //bool createFromFile(const std::string& filename, bool j2c_lowest_mip_only = false);
+ // Create an image from a local file (generally used in tools)
+ //bool createFromFile(const std::string& filename, bool j2c_lowest_mip_only = false);
- void copyLineScaled( U8* in, U8* out, S32 in_pixel_len, S32 out_pixel_len, S32 in_pixel_step, S32 out_pixel_step );
- void compositeRowScaled4onto3( U8* in, U8* out, S32 in_pixel_len, S32 out_pixel_len );
+ void copyLineScaled( U8* in, U8* out, S32 in_pixel_len, S32 out_pixel_len, S32 in_pixel_step, S32 out_pixel_step );
+ void compositeRowScaled4onto3( U8* in, U8* out, S32 in_pixel_len, S32 out_pixel_len );
- U8 fastFractionalMult(U8 a,U8 b);
+ U8 fastFractionalMult(U8 a,U8 b);
- void setDataAndSize(U8 *data, S32 width, S32 height, S8 components) ;
+ void setDataAndSize(U8 *data, S32 width, S32 height, S8 components) ;
public:
- static S32 sRawImageCount;
+ static S32 sRawImageCount;
private:
- bool validateSrcAndDst(std::string func, LLImageRaw* src, LLImageRaw* dst);
+ bool validateSrcAndDst(std::string func, LLImageRaw* src, LLImageRaw* dst);
};
// Compressed representation of image.
@@ -305,74 +305,74 @@ private:
class LLImageFormatted : public LLImageBase
{
public:
- static LLImageFormatted* createFromType(S8 codec);
- static LLImageFormatted* createFromExtension(const std::string& instring);
+ static LLImageFormatted* createFromType(S8 codec);
+ static LLImageFormatted* createFromExtension(const std::string& instring);
protected:
- /*virtual*/ ~LLImageFormatted();
-
+ /*virtual*/ ~LLImageFormatted();
+
public:
- LLImageFormatted(S8 codec);
-
- // LLImageBase
- /*virtual*/ void deleteData();
- /*virtual*/ U8* allocateData(S32 size = -1);
- /*virtual*/ U8* reallocateData(S32 size);
-
- /*virtual*/ void dump();
- /*virtual*/ void sanityCheck();
-
- // New methods
- // subclasses must return a prefered file extension (lowercase without a leading dot)
- virtual std::string getExtension() = 0;
- // calcHeaderSize() returns the maximum size of header;
- // 0 indicates we don't have a header and have to read the entire file
- virtual S32 calcHeaderSize() { return 0; };
- // calcDataSize() returns how many bytes to read to load discard_level (including header)
- virtual S32 calcDataSize(S32 discard_level);
- // calcDiscardLevelBytes() returns the smallest valid discard level based on the number of input bytes
- virtual S32 calcDiscardLevelBytes(S32 bytes);
- // getRawDiscardLevel() by default returns mDiscardLevel, but may be overridden (LLImageJ2C)
- virtual S8 getRawDiscardLevel() { return mDiscardLevel; }
-
- bool load(const std::string& filename, int load_size = 0);
- bool save(const std::string& filename);
-
- virtual bool updateData() = 0; // pure virtual
- void setData(U8 *data, S32 size);
- void appendData(U8 *data, S32 size);
-
- // Loads first 4 channels.
- virtual bool decode(LLImageRaw* raw_image, F32 decode_time) = 0;
- // Subclasses that can handle more than 4 channels should override this function.
- virtual bool decodeChannels(LLImageRaw* raw_image, F32 decode_time, S32 first_channel, S32 max_channel);
-
- virtual bool encode(const LLImageRaw* raw_image, F32 encode_time) = 0;
-
- S8 getCodec() const;
- bool isDecoding() const { return mDecoding; }
- bool isDecoded() const { return mDecoded; }
- void setDiscardLevel(S8 discard_level) { mDiscardLevel = discard_level; }
- S8 getDiscardLevel() const { return mDiscardLevel; }
- S8 getLevels() const { return mLevels; }
- void setLevels(S8 nlevels) { mLevels = nlevels; }
-
- // setLastError needs to be deferred for J2C images since it may be called from a DLL
- virtual void resetLastError();
- virtual void setLastError(const std::string& message, const std::string& filename = std::string());
-
+ LLImageFormatted(S8 codec);
+
+ // LLImageBase
+ /*virtual*/ void deleteData();
+ /*virtual*/ U8* allocateData(S32 size = -1);
+ /*virtual*/ U8* reallocateData(S32 size);
+
+ /*virtual*/ void dump();
+ /*virtual*/ void sanityCheck();
+
+ // New methods
+ // subclasses must return a prefered file extension (lowercase without a leading dot)
+ virtual std::string getExtension() = 0;
+ // calcHeaderSize() returns the maximum size of header;
+ // 0 indicates we don't have a header and have to read the entire file
+ virtual S32 calcHeaderSize() { return 0; };
+ // calcDataSize() returns how many bytes to read to load discard_level (including header)
+ virtual S32 calcDataSize(S32 discard_level);
+ // calcDiscardLevelBytes() returns the smallest valid discard level based on the number of input bytes
+ virtual S32 calcDiscardLevelBytes(S32 bytes);
+ // getRawDiscardLevel() by default returns mDiscardLevel, but may be overridden (LLImageJ2C)
+ virtual S8 getRawDiscardLevel() { return mDiscardLevel; }
+
+ bool load(const std::string& filename, int load_size = 0);
+ bool save(const std::string& filename);
+
+ virtual bool updateData() = 0; // pure virtual
+ void setData(U8 *data, S32 size);
+ void appendData(U8 *data, S32 size);
+
+ // Loads first 4 channels.
+ virtual bool decode(LLImageRaw* raw_image, F32 decode_time) = 0;
+ // Subclasses that can handle more than 4 channels should override this function.
+ virtual bool decodeChannels(LLImageRaw* raw_image, F32 decode_time, S32 first_channel, S32 max_channel);
+
+ virtual bool encode(const LLImageRaw* raw_image, F32 encode_time) = 0;
+
+ S8 getCodec() const;
+ bool isDecoding() const { return mDecoding; }
+ bool isDecoded() const { return mDecoded; }
+ void setDiscardLevel(S8 discard_level) { mDiscardLevel = discard_level; }
+ S8 getDiscardLevel() const { return mDiscardLevel; }
+ S8 getLevels() const { return mLevels; }
+ void setLevels(S8 nlevels) { mLevels = nlevels; }
+
+ // setLastError needs to be deferred for J2C images since it may be called from a DLL
+ virtual void resetLastError();
+ virtual void setLastError(const std::string& message, const std::string& filename = std::string());
+
protected:
- bool copyData(U8 *data, S32 size); // calls updateData()
-
+ bool copyData(U8 *data, S32 size); // calls updateData()
+
protected:
- S8 mCodec;
- S8 mDecoding;
- S8 mDecoded; // unused, but changing LLImage layout requires recompiling static Mac/Linux libs. 2009-01-30 JC
- S8 mDiscardLevel; // Current resolution level worked on. 0 = full res, 1 = half res, 2 = quarter res, etc...
- S8 mLevels; // Number of resolution levels in that image. Min is 1. 0 means unknown.
-
+ S8 mCodec;
+ S8 mDecoding;
+ S8 mDecoded; // unused, but changing LLImage layout requires recompiling static Mac/Linux libs. 2009-01-30 JC
+ S8 mDiscardLevel; // Current resolution level worked on. 0 = full res, 1 = half res, 2 = quarter res, etc...
+ S8 mLevels; // Number of resolution levels in that image. Min is 1. 0 means unknown.
+
public:
- static S32 sGlobalFormattedMemory;
+ static S32 sGlobalFormattedMemory;
};
#endif