summaryrefslogtreecommitdiff
path: root/indra/llmath/llsimdtypes.inl
blob: 69c858e310e50eec3dfb938573573dd5e0340e61 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
/** 
 * @file llsimdtypes.inl
 * @brief Inlined definitions of basic SIMD math related types
 *
 * $LicenseInfo:firstyear=2010&license=viewergpl$
 * 
 * Copyright (c) 2007-2010, Linden Research, Inc.
 * 
 * Second Life Viewer Source Code
 * The source code in this file ("Source Code") is provided by Linden Lab
 * to you under the terms of the GNU General Public License, version 2.0
 * ("GPL"), unless you have obtained a separate licensing agreement
 * ("Other License"), formally executed by you and Linden Lab.  Terms of
 * the GPL can be found in doc/GPL-license.txt in this distribution, or
 * online at http://secondlifegrid.net/programs/open_source/licensing/gplv2
 * 
 * There are special exceptions to the terms and conditions of the GPL as
 * it is applied to this Source Code. View the full text of the exception
 * in the file doc/FLOSS-exception.txt in this software distribution, or
 * online at
 * http://secondlifegrid.net/programs/open_source/licensing/flossexception
 * 
 * By copying, modifying or distributing this software, you acknowledge
 * that you have read and understood your obligations described above,
 * and agree to abide by those obligations.
 * 
 * ALL LINDEN LAB SOURCE CODE IS PROVIDED "AS IS." LINDEN LAB MAKES NO
 * WARRANTIES, EXPRESS, IMPLIED OR OTHERWISE, REGARDING ITS ACCURACY,
 * COMPLETENESS OR PERFORMANCE.
 * $/LicenseInfo$
 */




//////////////////
// LLSimdScalar
//////////////////

inline LLSimdScalar operator+(const LLSimdScalar& a, const LLSimdScalar& b)
{
	LLSimdScalar t(a);
	t += b;
	return t;
}

inline LLSimdScalar operator-(const LLSimdScalar& a, const LLSimdScalar& b)
{
	LLSimdScalar t(a);
	t -= b;
	return t;
}

inline LLSimdScalar operator*(const LLSimdScalar& a, const LLSimdScalar& b)
{
	LLSimdScalar t(a);
	t *= b;
	return t;
}

inline LLSimdScalar operator/(const LLSimdScalar& a, const LLSimdScalar& b)
{
	LLSimdScalar t(a);
	t /= b;
	return t;
}

inline LLSimdScalar operator-(const LLSimdScalar& a)
{
	static LL_ALIGN_16(const U32 signMask[4]) = {0x80000000, 0x80000000, 0x80000000, 0x80000000 };
	return _mm_xor_ps(*reinterpret_cast<const LLQuad*>(signMask), a);
}

inline LLBool32 operator==(const LLSimdScalar& a, const LLSimdScalar& b)
{
	return _mm_comieq_ss(a, b);
}

inline LLBool32 operator!=(const LLSimdScalar& a, const LLSimdScalar& b)
{
	return _mm_comineq_ss(a, b);
}

inline LLBool32 operator<(const LLSimdScalar& a, const LLSimdScalar& b)
{
	return _mm_comilt_ss(a, b);
}

inline LLBool32 operator<=(const LLSimdScalar& a, const LLSimdScalar& b)
{
	return _mm_comile_ss(a, b);
}

inline LLBool32 operator>(const LLSimdScalar& a, const LLSimdScalar& b)
{
	return _mm_comigt_ss(a, b);
}

inline LLBool32 operator>=(const LLSimdScalar& a, const LLSimdScalar& b)
{
	return _mm_comige_ss(a, b);
}

inline LLBool32 LLSimdScalar::isApproximatelyEqual(const LLSimdScalar& rhs, F32 tolerance /* = F_APPROXIMATELY_ZERO */) const
{
	const LLSimdScalar tol( tolerance );
	const LLSimdScalar diff = _mm_sub_ss( mQ, rhs.mQ );
	const LLSimdScalar absDiff = diff.getAbs();
	return absDiff <= tol;
}

inline void LLSimdScalar::setMax( const LLSimdScalar& a, const LLSimdScalar& b )
{
	mQ = _mm_max_ss( a, b );
}

inline void LLSimdScalar::setMin( const LLSimdScalar& a, const LLSimdScalar& b )
{
	mQ = _mm_min_ss( a, b );
}

inline LLSimdScalar& LLSimdScalar::operator=(F32 rhs) 
{ 
	mQ = _mm_set_ss(rhs); 
	return *this; 
}

inline LLSimdScalar& LLSimdScalar::operator+=(const LLSimdScalar& rhs) 
{
	mQ = _mm_add_ss( mQ, rhs );
	return *this;
}

inline LLSimdScalar& LLSimdScalar::operator-=(const LLSimdScalar& rhs)
{
	mQ = _mm_sub_ss( mQ, rhs );
	return *this;
}

inline LLSimdScalar& LLSimdScalar::operator*=(const LLSimdScalar& rhs)
{
	mQ = _mm_mul_ss( mQ, rhs );
	return *this;
}

inline LLSimdScalar& LLSimdScalar::operator/=(const LLSimdScalar& rhs)
{
	mQ = _mm_div_ss( mQ, rhs );
	return *this;
}

inline LLSimdScalar LLSimdScalar::getAbs() const
{
	static const LL_ALIGN_16(U32 F_ABS_MASK_4A[4]) = { 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF };
	return _mm_and_ps( mQ, *reinterpret_cast<const LLQuad*>(F_ABS_MASK_4A));
}

inline F32 LLSimdScalar::getF32() const
{ 
	F32 ret; 
	_mm_store_ss(&ret, mQ); 
	return ret; 
}