diff options
Diffstat (limited to 'depedencies/include/glm/simd')
-rw-r--r-- | depedencies/include/glm/simd/common.h | 240 | ||||
-rw-r--r-- | depedencies/include/glm/simd/exponential.h | 20 | ||||
-rw-r--r-- | depedencies/include/glm/simd/geometric.h | 124 | ||||
-rw-r--r-- | depedencies/include/glm/simd/integer.h | 115 | ||||
-rw-r--r-- | depedencies/include/glm/simd/matrix.h | 1028 | ||||
-rw-r--r-- | depedencies/include/glm/simd/packing.h | 8 | ||||
-rw-r--r-- | depedencies/include/glm/simd/platform.h | 452 | ||||
-rw-r--r-- | depedencies/include/glm/simd/trigonometric.h | 9 | ||||
-rw-r--r-- | depedencies/include/glm/simd/vector_relational.h | 8 |
9 files changed, 0 insertions, 2004 deletions
diff --git a/depedencies/include/glm/simd/common.h b/depedencies/include/glm/simd/common.h deleted file mode 100644 index d8c212d..0000000 --- a/depedencies/include/glm/simd/common.h +++ /dev/null @@ -1,240 +0,0 @@ -/// @ref simd -/// @file glm/simd/common.h - -#pragma once - -#include "platform.h" - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_add(glm_vec4 a, glm_vec4 b) -{ - return _mm_add_ps(a, b); -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec1_add(glm_vec4 a, glm_vec4 b) -{ - return _mm_add_ss(a, b); -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_sub(glm_vec4 a, glm_vec4 b) -{ - return _mm_sub_ps(a, b); -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec1_sub(glm_vec4 a, glm_vec4 b) -{ - return _mm_sub_ss(a, b); -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_mul(glm_vec4 a, glm_vec4 b) -{ - return _mm_mul_ps(a, b); -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec1_mul(glm_vec4 a, glm_vec4 b) -{ - return _mm_mul_ss(a, b); -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_div(glm_vec4 a, glm_vec4 b) -{ - return _mm_div_ps(a, b); -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec1_div(glm_vec4 a, glm_vec4 b) -{ - return _mm_div_ss(a, b); -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_div_lowp(glm_vec4 a, glm_vec4 b) -{ - return glm_vec4_mul(a, _mm_rcp_ps(b)); -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_swizzle_xyzw(glm_vec4 a) -{ -# if GLM_ARCH & GLM_ARCH_AVX2_BIT - return _mm_permute_ps(a, _MM_SHUFFLE(3, 2, 1, 0)); -# else - return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 2, 1, 0)); -# endif -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec1_fma(glm_vec4 a, glm_vec4 b, glm_vec4 c) -{ -# if GLM_ARCH & GLM_ARCH_AVX2_BIT - return _mm_fmadd_ss(a, b, c); -# else - return _mm_add_ss(_mm_mul_ss(a, b), c); -# endif -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_fma(glm_vec4 a, glm_vec4 b, glm_vec4 c) -{ -# if GLM_ARCH & GLM_ARCH_AVX2_BIT - return _mm_fmadd_ps(a, b, c); -# else - return glm_vec4_add(glm_vec4_mul(a, b), c); -# endif -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_abs(glm_vec4 x) -{ - return _mm_and_ps(x, _mm_castsi128_ps(_mm_set1_epi32(0x7FFFFFFF))); -} - -GLM_FUNC_QUALIFIER glm_ivec4 glm_ivec4_abs(glm_ivec4 x) -{ -# if GLM_ARCH & GLM_ARCH_SSSE3_BIT - return _mm_sign_epi32(x, x); -# else - glm_ivec4 const sgn0 = _mm_srai_epi32(x, 31); - glm_ivec4 const inv0 = _mm_xor_si128(x, sgn0); - glm_ivec4 const sub0 = _mm_sub_epi32(inv0, sgn0); - return sub0; -# endif -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_sign(glm_vec4 x) -{ - glm_vec4 const zro0 = _mm_setzero_ps(); - glm_vec4 const cmp0 = _mm_cmplt_ps(x, zro0); - glm_vec4 const cmp1 = _mm_cmpgt_ps(x, zro0); - glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(-1.0f)); - glm_vec4 const and1 = _mm_and_ps(cmp1, _mm_set1_ps(1.0f)); - glm_vec4 const or0 = _mm_or_ps(and0, and1);; - return or0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_round(glm_vec4 x) -{ -# if GLM_ARCH & GLM_ARCH_SSE41_BIT - return _mm_round_ps(x, _MM_FROUND_TO_NEAREST_INT); -# else - glm_vec4 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(0x80000000)); - glm_vec4 const and0 = _mm_and_ps(sgn0, x); - glm_vec4 const or0 = _mm_or_ps(and0, _mm_set_ps1(8388608.0f)); - glm_vec4 const add0 = glm_vec4_add(x, or0); - glm_vec4 const sub0 = glm_vec4_sub(add0, or0); - return sub0; -# endif -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_floor(glm_vec4 x) -{ -# if GLM_ARCH & GLM_ARCH_SSE41_BIT - return _mm_floor_ps(x); -# else - glm_vec4 const rnd0 = glm_vec4_round(x); - glm_vec4 const cmp0 = _mm_cmplt_ps(x, rnd0); - glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f)); - glm_vec4 const sub0 = glm_vec4_sub(rnd0, and0); - return sub0; -# endif -} - -/* trunc TODO -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_trunc(glm_vec4 x) -{ - return glm_vec4(); -} -*/ - -//roundEven -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_roundEven(glm_vec4 x) -{ - glm_vec4 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(0x80000000)); - glm_vec4 const and0 = _mm_and_ps(sgn0, x); - glm_vec4 const or0 = _mm_or_ps(and0, _mm_set_ps1(8388608.0f)); - glm_vec4 const add0 = glm_vec4_add(x, or0); - glm_vec4 const sub0 = glm_vec4_sub(add0, or0); - return sub0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_ceil(glm_vec4 x) -{ -# if GLM_ARCH & GLM_ARCH_SSE41_BIT - return _mm_ceil_ps(x); -# else - glm_vec4 const rnd0 = glm_vec4_round(x); - glm_vec4 const cmp0 = _mm_cmpgt_ps(x, rnd0); - glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f)); - glm_vec4 const add0 = glm_vec4_add(rnd0, and0); - return add0; -# endif -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_fract(glm_vec4 x) -{ - glm_vec4 const flr0 = glm_vec4_floor(x); - glm_vec4 const sub0 = glm_vec4_sub(x, flr0); - return sub0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_mod(glm_vec4 x, glm_vec4 y) -{ - glm_vec4 const div0 = glm_vec4_div(x, y); - glm_vec4 const flr0 = glm_vec4_floor(div0); - glm_vec4 const mul0 = glm_vec4_mul(y, flr0); - glm_vec4 const sub0 = glm_vec4_sub(x, mul0); - return sub0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_clamp(glm_vec4 v, glm_vec4 minVal, glm_vec4 maxVal) -{ - glm_vec4 const min0 = _mm_min_ps(v, maxVal); - glm_vec4 const max0 = _mm_max_ps(min0, minVal); - return max0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_mix(glm_vec4 v1, glm_vec4 v2, glm_vec4 a) -{ - glm_vec4 const sub0 = glm_vec4_sub(_mm_set1_ps(1.0f), a); - glm_vec4 const mul0 = glm_vec4_mul(v1, sub0); - glm_vec4 const mad0 = glm_vec4_fma(v2, a, mul0); - return mad0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_step(glm_vec4 edge, glm_vec4 x) -{ - glm_vec4 const cmp = _mm_cmple_ps(x, edge); - return _mm_movemask_ps(cmp) == 0 ? _mm_set1_ps(1.0f) : _mm_setzero_ps(); -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_smoothstep(glm_vec4 edge0, glm_vec4 edge1, glm_vec4 x) -{ - glm_vec4 const sub0 = glm_vec4_sub(x, edge0); - glm_vec4 const sub1 = glm_vec4_sub(edge1, edge0); - glm_vec4 const div0 = glm_vec4_sub(sub0, sub1); - glm_vec4 const clp0 = glm_vec4_clamp(div0, _mm_setzero_ps(), _mm_set1_ps(1.0f)); - glm_vec4 const mul0 = glm_vec4_mul(_mm_set1_ps(2.0f), clp0); - glm_vec4 const sub2 = glm_vec4_sub(_mm_set1_ps(3.0f), mul0); - glm_vec4 const mul1 = glm_vec4_mul(clp0, clp0); - glm_vec4 const mul2 = glm_vec4_mul(mul1, sub2); - return mul2; -} - -// Agner Fog method -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_nan(glm_vec4 x) -{ - glm_ivec4 const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer - glm_ivec4 const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit - glm_ivec4 const t3 = _mm_set1_epi32(0xFF000000); // exponent mask - glm_ivec4 const t4 = _mm_and_si128(t2, t3); // exponent - glm_ivec4 const t5 = _mm_andnot_si128(t3, t2); // fraction - glm_ivec4 const Equal = _mm_cmpeq_epi32(t3, t4); - glm_ivec4 const Nequal = _mm_cmpeq_epi32(t5, _mm_setzero_si128()); - glm_ivec4 const And = _mm_and_si128(Equal, Nequal); - return _mm_castsi128_ps(And); // exponent = all 1s and fraction != 0 -} - -// Agner Fog method -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_inf(glm_vec4 x) -{ - glm_ivec4 const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer - glm_ivec4 const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit - return _mm_castsi128_ps(_mm_cmpeq_epi32(t2, _mm_set1_epi32(0xFF000000))); // exponent is all 1s, fraction is 0 -} - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/depedencies/include/glm/simd/exponential.h b/depedencies/include/glm/simd/exponential.h deleted file mode 100644 index 4eb0fb7..0000000 --- a/depedencies/include/glm/simd/exponential.h +++ /dev/null @@ -1,20 +0,0 @@ -/// @ref simd -/// @file glm/simd/experimental.h - -#pragma once - -#include "platform.h" - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec1_sqrt_lowp(glm_vec4 x) -{ - return _mm_mul_ss(_mm_rsqrt_ss(x), x); -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_sqrt_lowp(glm_vec4 x) -{ - return _mm_mul_ps(_mm_rsqrt_ps(x), x); -} - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/depedencies/include/glm/simd/geometric.h b/depedencies/include/glm/simd/geometric.h deleted file mode 100644 index ca53387..0000000 --- a/depedencies/include/glm/simd/geometric.h +++ /dev/null @@ -1,124 +0,0 @@ -/// @ref simd -/// @file glm/simd/geometric.h - -#pragma once - -#include "common.h" - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -GLM_FUNC_DECL glm_vec4 glm_vec4_dot(glm_vec4 v1, glm_vec4 v2); -GLM_FUNC_DECL glm_vec4 glm_vec1_dot(glm_vec4 v1, glm_vec4 v2); - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_length(glm_vec4 x) -{ - glm_vec4 const dot0 = glm_vec4_dot(x, x); - glm_vec4 const sqt0 = _mm_sqrt_ps(dot0); - return sqt0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_distance(glm_vec4 p0, glm_vec4 p1) -{ - glm_vec4 const sub0 = _mm_sub_ps(p0, p1); - glm_vec4 const len0 = glm_vec4_length(sub0); - return len0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_dot(glm_vec4 v1, glm_vec4 v2) -{ -# if GLM_ARCH & GLM_ARCH_AVX_BIT - return _mm_dp_ps(v1, v2, 0xff); -# elif GLM_ARCH & GLM_ARCH_SSE3_BIT - glm_vec4 const mul0 = _mm_mul_ps(v1, v2); - glm_vec4 const hadd0 = _mm_hadd_ps(mul0, mul0); - glm_vec4 const hadd1 = _mm_hadd_ps(hadd0, hadd0); - return hadd1; -# else - glm_vec4 const mul0 = _mm_mul_ps(v1, v2); - glm_vec4 const swp0 = _mm_shuffle_ps(mul0, mul0, _MM_SHUFFLE(2, 3, 0, 1)); - glm_vec4 const add0 = _mm_add_ps(mul0, swp0); - glm_vec4 const swp1 = _mm_shuffle_ps(add0, add0, _MM_SHUFFLE(0, 1, 2, 3)); - glm_vec4 const add1 = _mm_add_ps(add0, swp1); - return add1; -# endif -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec1_dot(glm_vec4 v1, glm_vec4 v2) -{ -# if GLM_ARCH & GLM_ARCH_AVX_BIT - return _mm_dp_ps(v1, v2, 0xff); -# elif GLM_ARCH & GLM_ARCH_SSE3_BIT - glm_vec4 const mul0 = _mm_mul_ps(v1, v2); - glm_vec4 const had0 = _mm_hadd_ps(mul0, mul0); - glm_vec4 const had1 = _mm_hadd_ps(had0, had0); - return had1; -# else - glm_vec4 const mul0 = _mm_mul_ps(v1, v2); - glm_vec4 const mov0 = _mm_movehl_ps(mul0, mul0); - glm_vec4 const add0 = _mm_add_ps(mov0, mul0); - glm_vec4 const swp1 = _mm_shuffle_ps(add0, add0, 1); - glm_vec4 const add1 = _mm_add_ss(add0, swp1); - return add1; -# endif -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_cross(glm_vec4 v1, glm_vec4 v2) -{ - glm_vec4 const swp0 = _mm_shuffle_ps(v1, v1, _MM_SHUFFLE(3, 0, 2, 1)); - glm_vec4 const swp1 = _mm_shuffle_ps(v1, v1, _MM_SHUFFLE(3, 1, 0, 2)); - glm_vec4 const swp2 = _mm_shuffle_ps(v2, v2, _MM_SHUFFLE(3, 0, 2, 1)); - glm_vec4 const swp3 = _mm_shuffle_ps(v2, v2, _MM_SHUFFLE(3, 1, 0, 2)); - glm_vec4 const mul0 = _mm_mul_ps(swp0, swp3); - glm_vec4 const mul1 = _mm_mul_ps(swp1, swp2); - glm_vec4 const sub0 = _mm_sub_ps(mul0, mul1); - return sub0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_normalize(glm_vec4 v) -{ - glm_vec4 const dot0 = glm_vec4_dot(v, v); - glm_vec4 const isr0 = _mm_rsqrt_ps(dot0); - glm_vec4 const mul0 = _mm_mul_ps(v, isr0); - return mul0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_faceforward(glm_vec4 N, glm_vec4 I, glm_vec4 Nref) -{ - glm_vec4 const dot0 = glm_vec4_dot(Nref, I); - glm_vec4 const sgn0 = glm_vec4_sign(dot0); - glm_vec4 const mul0 = _mm_mul_ps(sgn0, _mm_set1_ps(-1.0f)); - glm_vec4 const mul1 = _mm_mul_ps(N, mul0); - return mul1; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_reflect(glm_vec4 I, glm_vec4 N) -{ - glm_vec4 const dot0 = glm_vec4_dot(N, I); - glm_vec4 const mul0 = _mm_mul_ps(N, dot0); - glm_vec4 const mul1 = _mm_mul_ps(mul0, _mm_set1_ps(2.0f)); - glm_vec4 const sub0 = _mm_sub_ps(I, mul1); - return sub0; -} - -GLM_FUNC_QUALIFIER __m128 glm_vec4_refract(glm_vec4 I, glm_vec4 N, glm_vec4 eta) -{ - glm_vec4 const dot0 = glm_vec4_dot(N, I); - glm_vec4 const mul0 = _mm_mul_ps(eta, eta); - glm_vec4 const mul1 = _mm_mul_ps(dot0, dot0); - glm_vec4 const sub0 = _mm_sub_ps(_mm_set1_ps(1.0f), mul0); - glm_vec4 const sub1 = _mm_sub_ps(_mm_set1_ps(1.0f), mul1); - glm_vec4 const mul2 = _mm_mul_ps(sub0, sub1); - - if(_mm_movemask_ps(_mm_cmplt_ss(mul2, _mm_set1_ps(0.0f))) == 0) - return _mm_set1_ps(0.0f); - - glm_vec4 const sqt0 = _mm_sqrt_ps(mul2); - glm_vec4 const mad0 = glm_vec4_fma(eta, dot0, sqt0); - glm_vec4 const mul4 = _mm_mul_ps(mad0, N); - glm_vec4 const mul5 = _mm_mul_ps(eta, I); - glm_vec4 const sub2 = _mm_sub_ps(mul5, mul4); - - return sub2; -} - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/depedencies/include/glm/simd/integer.h b/depedencies/include/glm/simd/integer.h deleted file mode 100644 index 50fd824..0000000 --- a/depedencies/include/glm/simd/integer.h +++ /dev/null @@ -1,115 +0,0 @@ -/// @ref simd -/// @file glm/simd/integer.h - -#pragma once - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -GLM_FUNC_QUALIFIER glm_uvec4 glm_i128_interleave(glm_uvec4 x) -{ - glm_uvec4 const Mask4 = _mm_set1_epi32(0x0000FFFF); - glm_uvec4 const Mask3 = _mm_set1_epi32(0x00FF00FF); - glm_uvec4 const Mask2 = _mm_set1_epi32(0x0F0F0F0F); - glm_uvec4 const Mask1 = _mm_set1_epi32(0x33333333); - glm_uvec4 const Mask0 = _mm_set1_epi32(0x55555555); - - glm_uvec4 Reg1; - glm_uvec4 Reg2; - - // REG1 = x; - // REG2 = y; - //Reg1 = _mm_unpacklo_epi64(x, y); - Reg1 = x; - - //REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF); - //REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF); - Reg2 = _mm_slli_si128(Reg1, 2); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask4); - - //REG1 = ((REG1 << 8) | REG1) & glm::uint64(0x00FF00FF00FF00FF); - //REG2 = ((REG2 << 8) | REG2) & glm::uint64(0x00FF00FF00FF00FF); - Reg2 = _mm_slli_si128(Reg1, 1); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask3); - - //REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F); - //REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F); - Reg2 = _mm_slli_epi32(Reg1, 4); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask2); - - //REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x3333333333333333); - //REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x3333333333333333); - Reg2 = _mm_slli_epi32(Reg1, 2); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask1); - - //REG1 = ((REG1 << 1) | REG1) & glm::uint64(0x5555555555555555); - //REG2 = ((REG2 << 1) | REG2) & glm::uint64(0x5555555555555555); - Reg2 = _mm_slli_epi32(Reg1, 1); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask0); - - //return REG1 | (REG2 << 1); - Reg2 = _mm_slli_epi32(Reg1, 1); - Reg2 = _mm_srli_si128(Reg2, 8); - Reg1 = _mm_or_si128(Reg1, Reg2); - - return Reg1; -} - -GLM_FUNC_QUALIFIER glm_uvec4 glm_i128_interleave2(glm_uvec4 x, glm_uvec4 y) -{ - glm_uvec4 const Mask4 = _mm_set1_epi32(0x0000FFFF); - glm_uvec4 const Mask3 = _mm_set1_epi32(0x00FF00FF); - glm_uvec4 const Mask2 = _mm_set1_epi32(0x0F0F0F0F); - glm_uvec4 const Mask1 = _mm_set1_epi32(0x33333333); - glm_uvec4 const Mask0 = _mm_set1_epi32(0x55555555); - - glm_uvec4 Reg1; - glm_uvec4 Reg2; - - // REG1 = x; - // REG2 = y; - Reg1 = _mm_unpacklo_epi64(x, y); - - //REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF); - //REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF); - Reg2 = _mm_slli_si128(Reg1, 2); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask4); - - //REG1 = ((REG1 << 8) | REG1) & glm::uint64(0x00FF00FF00FF00FF); - //REG2 = ((REG2 << 8) | REG2) & glm::uint64(0x00FF00FF00FF00FF); - Reg2 = _mm_slli_si128(Reg1, 1); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask3); - - //REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F); - //REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F); - Reg2 = _mm_slli_epi32(Reg1, 4); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask2); - - //REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x3333333333333333); - //REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x3333333333333333); - Reg2 = _mm_slli_epi32(Reg1, 2); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask1); - - //REG1 = ((REG1 << 1) | REG1) & glm::uint64(0x5555555555555555); - //REG2 = ((REG2 << 1) | REG2) & glm::uint64(0x5555555555555555); - Reg2 = _mm_slli_epi32(Reg1, 1); - Reg1 = _mm_or_si128(Reg2, Reg1); - Reg1 = _mm_and_si128(Reg1, Mask0); - - //return REG1 | (REG2 << 1); - Reg2 = _mm_slli_epi32(Reg1, 1); - Reg2 = _mm_srli_si128(Reg2, 8); - Reg1 = _mm_or_si128(Reg1, Reg2); - - return Reg1; -} - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/depedencies/include/glm/simd/matrix.h b/depedencies/include/glm/simd/matrix.h deleted file mode 100644 index 549d40c..0000000 --- a/depedencies/include/glm/simd/matrix.h +++ /dev/null @@ -1,1028 +0,0 @@ -/// @ref simd -/// @file glm/simd/matrix.h - -#pragma once - -#include "geometric.h" - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -GLM_FUNC_QUALIFIER void glm_mat4_matrixCompMult(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4]) -{ - out[0] = _mm_mul_ps(in1[0], in2[0]); - out[1] = _mm_mul_ps(in1[1], in2[1]); - out[2] = _mm_mul_ps(in1[2], in2[2]); - out[3] = _mm_mul_ps(in1[3], in2[3]); -} - -GLM_FUNC_QUALIFIER void glm_mat4_add(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4]) -{ - out[0] = _mm_add_ps(in1[0], in2[0]); - out[1] = _mm_add_ps(in1[1], in2[1]); - out[2] = _mm_add_ps(in1[2], in2[2]); - out[3] = _mm_add_ps(in1[3], in2[3]); -} - -GLM_FUNC_QUALIFIER void glm_mat4_sub(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4]) -{ - out[0] = _mm_sub_ps(in1[0], in2[0]); - out[1] = _mm_sub_ps(in1[1], in2[1]); - out[2] = _mm_sub_ps(in1[2], in2[2]); - out[3] = _mm_sub_ps(in1[3], in2[3]); -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_mul_vec4(glm_vec4 const m[4], glm_vec4 v) -{ - __m128 v0 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 v1 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(1, 1, 1, 1)); - __m128 v2 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(2, 2, 2, 2)); - __m128 v3 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 m0 = _mm_mul_ps(m[0], v0); - __m128 m1 = _mm_mul_ps(m[1], v1); - __m128 m2 = _mm_mul_ps(m[2], v2); - __m128 m3 = _mm_mul_ps(m[3], v3); - - __m128 a0 = _mm_add_ps(m0, m1); - __m128 a1 = _mm_add_ps(m2, m3); - __m128 a2 = _mm_add_ps(a0, a1); - - return a2; -} - -GLM_FUNC_QUALIFIER __m128 glm_vec4_mul_mat4(glm_vec4 v, glm_vec4 const m[4]) -{ - __m128 i0 = m[0]; - __m128 i1 = m[1]; - __m128 i2 = m[2]; - __m128 i3 = m[3]; - - __m128 m0 = _mm_mul_ps(v, i0); - __m128 m1 = _mm_mul_ps(v, i1); - __m128 m2 = _mm_mul_ps(v, i2); - __m128 m3 = _mm_mul_ps(v, i3); - - __m128 u0 = _mm_unpacklo_ps(m0, m1); - __m128 u1 = _mm_unpackhi_ps(m0, m1); - __m128 a0 = _mm_add_ps(u0, u1); - - __m128 u2 = _mm_unpacklo_ps(m2, m3); - __m128 u3 = _mm_unpackhi_ps(m2, m3); - __m128 a1 = _mm_add_ps(u2, u3); - - __m128 f0 = _mm_movelh_ps(a0, a1); - __m128 f1 = _mm_movehl_ps(a1, a0); - __m128 f2 = _mm_add_ps(f0, f1); - - return f2; -} - -GLM_FUNC_QUALIFIER void glm_mat4_mul(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4]) -{ - { - __m128 e0 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 e1 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 e2 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 e3 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 m0 = _mm_mul_ps(in1[0], e0); - __m128 m1 = _mm_mul_ps(in1[1], e1); - __m128 m2 = _mm_mul_ps(in1[2], e2); - __m128 m3 = _mm_mul_ps(in1[3], e3); - - __m128 a0 = _mm_add_ps(m0, m1); - __m128 a1 = _mm_add_ps(m2, m3); - __m128 a2 = _mm_add_ps(a0, a1); - - out[0] = a2; - } - - { - __m128 e0 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 e1 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 e2 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 e3 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 m0 = _mm_mul_ps(in1[0], e0); - __m128 m1 = _mm_mul_ps(in1[1], e1); - __m128 m2 = _mm_mul_ps(in1[2], e2); - __m128 m3 = _mm_mul_ps(in1[3], e3); - - __m128 a0 = _mm_add_ps(m0, m1); - __m128 a1 = _mm_add_ps(m2, m3); - __m128 a2 = _mm_add_ps(a0, a1); - - out[1] = a2; - } - - { - __m128 e0 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 e1 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 e2 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 e3 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 m0 = _mm_mul_ps(in1[0], e0); - __m128 m1 = _mm_mul_ps(in1[1], e1); - __m128 m2 = _mm_mul_ps(in1[2], e2); - __m128 m3 = _mm_mul_ps(in1[3], e3); - - __m128 a0 = _mm_add_ps(m0, m1); - __m128 a1 = _mm_add_ps(m2, m3); - __m128 a2 = _mm_add_ps(a0, a1); - - out[2] = a2; - } - - { - //(__m128&)_mm_shuffle_epi32(__m128i&)in2[0], _MM_SHUFFLE(3, 3, 3, 3)) - __m128 e0 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 e1 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 e2 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 e3 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 m0 = _mm_mul_ps(in1[0], e0); - __m128 m1 = _mm_mul_ps(in1[1], e1); - __m128 m2 = _mm_mul_ps(in1[2], e2); - __m128 m3 = _mm_mul_ps(in1[3], e3); - - __m128 a0 = _mm_add_ps(m0, m1); - __m128 a1 = _mm_add_ps(m2, m3); - __m128 a2 = _mm_add_ps(a0, a1); - - out[3] = a2; - } -} - -GLM_FUNC_QUALIFIER void glm_mat4_transpose(glm_vec4 const in[4], glm_vec4 out[4]) -{ - __m128 tmp0 = _mm_shuffle_ps(in[0], in[1], 0x44); - __m128 tmp2 = _mm_shuffle_ps(in[0], in[1], 0xEE); - __m128 tmp1 = _mm_shuffle_ps(in[2], in[3], 0x44); - __m128 tmp3 = _mm_shuffle_ps(in[2], in[3], 0xEE); - - out[0] = _mm_shuffle_ps(tmp0, tmp1, 0x88); - out[1] = _mm_shuffle_ps(tmp0, tmp1, 0xDD); - out[2] = _mm_shuffle_ps(tmp2, tmp3, 0x88); - out[3] = _mm_shuffle_ps(tmp2, tmp3, 0xDD); -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_determinant_highp(glm_vec4 const in[4]) -{ - __m128 Fac0; - { - // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; - // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac0 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac1; - { - // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; - // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac1 = _mm_sub_ps(Mul00, Mul01); - } - - - __m128 Fac2; - { - // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; - // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac2 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac3; - { - // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; - // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac3 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac4; - { - // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; - // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac4 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac5; - { - // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; - // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac5 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f); - __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f); - - // m[1][0] - // m[0][0] - // m[0][0] - // m[0][0] - __m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][1] - // m[0][1] - // m[0][1] - // m[0][1] - __m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][2] - // m[0][2] - // m[0][2] - // m[0][2] - __m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][3] - // m[0][3] - // m[0][3] - // m[0][3] - __m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0)); - - // col0 - // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]), - // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]), - // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]), - // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]), - __m128 Mul00 = _mm_mul_ps(Vec1, Fac0); - __m128 Mul01 = _mm_mul_ps(Vec2, Fac1); - __m128 Mul02 = _mm_mul_ps(Vec3, Fac2); - __m128 Sub00 = _mm_sub_ps(Mul00, Mul01); - __m128 Add00 = _mm_add_ps(Sub00, Mul02); - __m128 Inv0 = _mm_mul_ps(SignB, Add00); - - // col1 - // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]), - // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]), - // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]), - // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]), - __m128 Mul03 = _mm_mul_ps(Vec0, Fac0); - __m128 Mul04 = _mm_mul_ps(Vec2, Fac3); - __m128 Mul05 = _mm_mul_ps(Vec3, Fac4); - __m128 Sub01 = _mm_sub_ps(Mul03, Mul04); - __m128 Add01 = _mm_add_ps(Sub01, Mul05); - __m128 Inv1 = _mm_mul_ps(SignA, Add01); - - // col2 - // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]), - // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]), - // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]), - // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]), - __m128 Mul06 = _mm_mul_ps(Vec0, Fac1); - __m128 Mul07 = _mm_mul_ps(Vec1, Fac3); - __m128 Mul08 = _mm_mul_ps(Vec3, Fac5); - __m128 Sub02 = _mm_sub_ps(Mul06, Mul07); - __m128 Add02 = _mm_add_ps(Sub02, Mul08); - __m128 Inv2 = _mm_mul_ps(SignB, Add02); - - // col3 - // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]), - // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]), - // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]), - // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3])); - __m128 Mul09 = _mm_mul_ps(Vec0, Fac2); - __m128 Mul10 = _mm_mul_ps(Vec1, Fac4); - __m128 Mul11 = _mm_mul_ps(Vec2, Fac5); - __m128 Sub03 = _mm_sub_ps(Mul09, Mul10); - __m128 Add03 = _mm_add_ps(Sub03, Mul11); - __m128 Inv3 = _mm_mul_ps(SignA, Add03); - - __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0)); - - // valType Determinant = m[0][0] * Inverse[0][0] - // + m[0][1] * Inverse[1][0] - // + m[0][2] * Inverse[2][0] - // + m[0][3] * Inverse[3][0]; - __m128 Det0 = glm_vec4_dot(in[0], Row2); - return Det0; -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_determinant_lowp(glm_vec4 const m[4]) -{ - // _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128( - - //T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - //T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - //T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - //T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - //T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - //T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - - // First 2 columns - __m128 Swp2A = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(0, 1, 1, 2))); - __m128 Swp3A = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(3, 2, 3, 3))); - __m128 MulA = _mm_mul_ps(Swp2A, Swp3A); - - // Second 2 columns - __m128 Swp2B = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(3, 2, 3, 3))); - __m128 Swp3B = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(0, 1, 1, 2))); - __m128 MulB = _mm_mul_ps(Swp2B, Swp3B); - - // Columns subtraction - __m128 SubE = _mm_sub_ps(MulA, MulB); - - // Last 2 rows - __m128 Swp2C = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(0, 0, 1, 2))); - __m128 Swp3C = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(1, 2, 0, 0))); - __m128 MulC = _mm_mul_ps(Swp2C, Swp3C); - __m128 SubF = _mm_sub_ps(_mm_movehl_ps(MulC, MulC), MulC); - - //tvec4<T, P> DetCof( - // + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02), - // - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04), - // + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05), - // - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05)); - - __m128 SubFacA = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubE), _MM_SHUFFLE(2, 1, 0, 0))); - __m128 SwpFacA = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(0, 0, 0, 1))); - __m128 MulFacA = _mm_mul_ps(SwpFacA, SubFacA); - - __m128 SubTmpB = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(0, 0, 3, 1)); - __m128 SubFacB = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubTmpB), _MM_SHUFFLE(3, 1, 1, 0)));//SubF[0], SubE[3], SubE[3], SubE[1]; - __m128 SwpFacB = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(1, 1, 2, 2))); - __m128 MulFacB = _mm_mul_ps(SwpFacB, SubFacB); - - __m128 SubRes = _mm_sub_ps(MulFacA, MulFacB); - - __m128 SubTmpC = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(1, 0, 2, 2)); - __m128 SubFacC = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubTmpC), _MM_SHUFFLE(3, 3, 2, 0))); - __m128 SwpFacC = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(2, 3, 3, 3))); - __m128 MulFacC = _mm_mul_ps(SwpFacC, SubFacC); - - __m128 AddRes = _mm_add_ps(SubRes, MulFacC); - __m128 DetCof = _mm_mul_ps(AddRes, _mm_setr_ps( 1.0f,-1.0f, 1.0f,-1.0f)); - - //return m[0][0] * DetCof[0] - // + m[0][1] * DetCof[1] - // + m[0][2] * DetCof[2] - // + m[0][3] * DetCof[3]; - - return glm_vec4_dot(m[0], DetCof); -} - -GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_determinant(glm_vec4 const m[4]) -{ - // _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(add) - - //T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - //T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - //T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - //T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - //T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - //T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - - // First 2 columns - __m128 Swp2A = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(0, 1, 1, 2)); - __m128 Swp3A = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(3, 2, 3, 3)); - __m128 MulA = _mm_mul_ps(Swp2A, Swp3A); - - // Second 2 columns - __m128 Swp2B = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(3, 2, 3, 3)); - __m128 Swp3B = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(0, 1, 1, 2)); - __m128 MulB = _mm_mul_ps(Swp2B, Swp3B); - - // Columns subtraction - __m128 SubE = _mm_sub_ps(MulA, MulB); - - // Last 2 rows - __m128 Swp2C = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(0, 0, 1, 2)); - __m128 Swp3C = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(1, 2, 0, 0)); - __m128 MulC = _mm_mul_ps(Swp2C, Swp3C); - __m128 SubF = _mm_sub_ps(_mm_movehl_ps(MulC, MulC), MulC); - - //tvec4<T, P> DetCof( - // + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02), - // - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04), - // + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05), - // - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05)); - - __m128 SubFacA = _mm_shuffle_ps(SubE, SubE, _MM_SHUFFLE(2, 1, 0, 0)); - __m128 SwpFacA = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(0, 0, 0, 1)); - __m128 MulFacA = _mm_mul_ps(SwpFacA, SubFacA); - - __m128 SubTmpB = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(0, 0, 3, 1)); - __m128 SubFacB = _mm_shuffle_ps(SubTmpB, SubTmpB, _MM_SHUFFLE(3, 1, 1, 0));//SubF[0], SubE[3], SubE[3], SubE[1]; - __m128 SwpFacB = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(1, 1, 2, 2)); - __m128 MulFacB = _mm_mul_ps(SwpFacB, SubFacB); - - __m128 SubRes = _mm_sub_ps(MulFacA, MulFacB); - - __m128 SubTmpC = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(1, 0, 2, 2)); - __m128 SubFacC = _mm_shuffle_ps(SubTmpC, SubTmpC, _MM_SHUFFLE(3, 3, 2, 0)); - __m128 SwpFacC = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(2, 3, 3, 3)); - __m128 MulFacC = _mm_mul_ps(SwpFacC, SubFacC); - - __m128 AddRes = _mm_add_ps(SubRes, MulFacC); - __m128 DetCof = _mm_mul_ps(AddRes, _mm_setr_ps( 1.0f,-1.0f, 1.0f,-1.0f)); - - //return m[0][0] * DetCof[0] - // + m[0][1] * DetCof[1] - // + m[0][2] * DetCof[2] - // + m[0][3] * DetCof[3]; - - return glm_vec4_dot(m[0], DetCof); -} - -GLM_FUNC_QUALIFIER void glm_mat4_inverse(glm_vec4 const in[4], glm_vec4 out[4]) -{ - __m128 Fac0; - { - // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; - // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac0 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac1; - { - // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; - // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac1 = _mm_sub_ps(Mul00, Mul01); - } - - - __m128 Fac2; - { - // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; - // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac2 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac3; - { - // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; - // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac3 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac4; - { - // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; - // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac4 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac5; - { - // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; - // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac5 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f); - __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f); - - // m[1][0] - // m[0][0] - // m[0][0] - // m[0][0] - __m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][1] - // m[0][1] - // m[0][1] - // m[0][1] - __m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][2] - // m[0][2] - // m[0][2] - // m[0][2] - __m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][3] - // m[0][3] - // m[0][3] - // m[0][3] - __m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0)); - - // col0 - // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]), - // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]), - // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]), - // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]), - __m128 Mul00 = _mm_mul_ps(Vec1, Fac0); - __m128 Mul01 = _mm_mul_ps(Vec2, Fac1); - __m128 Mul02 = _mm_mul_ps(Vec3, Fac2); - __m128 Sub00 = _mm_sub_ps(Mul00, Mul01); - __m128 Add00 = _mm_add_ps(Sub00, Mul02); - __m128 Inv0 = _mm_mul_ps(SignB, Add00); - - // col1 - // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]), - // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]), - // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]), - // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]), - __m128 Mul03 = _mm_mul_ps(Vec0, Fac0); - __m128 Mul04 = _mm_mul_ps(Vec2, Fac3); - __m128 Mul05 = _mm_mul_ps(Vec3, Fac4); - __m128 Sub01 = _mm_sub_ps(Mul03, Mul04); - __m128 Add01 = _mm_add_ps(Sub01, Mul05); - __m128 Inv1 = _mm_mul_ps(SignA, Add01); - - // col2 - // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]), - // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]), - // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]), - // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]), - __m128 Mul06 = _mm_mul_ps(Vec0, Fac1); - __m128 Mul07 = _mm_mul_ps(Vec1, Fac3); - __m128 Mul08 = _mm_mul_ps(Vec3, Fac5); - __m128 Sub02 = _mm_sub_ps(Mul06, Mul07); - __m128 Add02 = _mm_add_ps(Sub02, Mul08); - __m128 Inv2 = _mm_mul_ps(SignB, Add02); - - // col3 - // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]), - // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]), - // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]), - // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3])); - __m128 Mul09 = _mm_mul_ps(Vec0, Fac2); - __m128 Mul10 = _mm_mul_ps(Vec1, Fac4); - __m128 Mul11 = _mm_mul_ps(Vec2, Fac5); - __m128 Sub03 = _mm_sub_ps(Mul09, Mul10); - __m128 Add03 = _mm_add_ps(Sub03, Mul11); - __m128 Inv3 = _mm_mul_ps(SignA, Add03); - - __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0)); - - // valType Determinant = m[0][0] * Inverse[0][0] - // + m[0][1] * Inverse[1][0] - // + m[0][2] * Inverse[2][0] - // + m[0][3] * Inverse[3][0]; - __m128 Det0 = glm_vec4_dot(in[0], Row2); - __m128 Rcp0 = _mm_div_ps(_mm_set1_ps(1.0f), Det0); - //__m128 Rcp0 = _mm_rcp_ps(Det0); - - // Inverse /= Determinant; - out[0] = _mm_mul_ps(Inv0, Rcp0); - out[1] = _mm_mul_ps(Inv1, Rcp0); - out[2] = _mm_mul_ps(Inv2, Rcp0); - out[3] = _mm_mul_ps(Inv3, Rcp0); -} - -GLM_FUNC_QUALIFIER void glm_mat4_inverse_lowp(glm_vec4 const in[4], glm_vec4 out[4]) -{ - __m128 Fac0; - { - // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3]; - // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3]; - // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac0 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac1; - { - // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3]; - // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3]; - // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac1 = _mm_sub_ps(Mul00, Mul01); - } - - - __m128 Fac2; - { - // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2]; - // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2]; - // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac2 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac3; - { - // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3]; - // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3]; - // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac3 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac4; - { - // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2]; - // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2]; - // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac4 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 Fac5; - { - // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1]; - // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1]; - // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1]; - - __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0)); - - __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0)); - __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1)); - - __m128 Mul00 = _mm_mul_ps(Swp00, Swp01); - __m128 Mul01 = _mm_mul_ps(Swp02, Swp03); - Fac5 = _mm_sub_ps(Mul00, Mul01); - } - - __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f); - __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f); - - // m[1][0] - // m[0][0] - // m[0][0] - // m[0][0] - __m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][1] - // m[0][1] - // m[0][1] - // m[0][1] - __m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1)); - __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][2] - // m[0][2] - // m[0][2] - // m[0][2] - __m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2)); - __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0)); - - // m[1][3] - // m[0][3] - // m[0][3] - // m[0][3] - __m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3)); - __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0)); - - // col0 - // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]), - // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]), - // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]), - // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]), - __m128 Mul00 = _mm_mul_ps(Vec1, Fac0); - __m128 Mul01 = _mm_mul_ps(Vec2, Fac1); - __m128 Mul02 = _mm_mul_ps(Vec3, Fac2); - __m128 Sub00 = _mm_sub_ps(Mul00, Mul01); - __m128 Add00 = _mm_add_ps(Sub00, Mul02); - __m128 Inv0 = _mm_mul_ps(SignB, Add00); - - // col1 - // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]), - // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]), - // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]), - // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]), - __m128 Mul03 = _mm_mul_ps(Vec0, Fac0); - __m128 Mul04 = _mm_mul_ps(Vec2, Fac3); - __m128 Mul05 = _mm_mul_ps(Vec3, Fac4); - __m128 Sub01 = _mm_sub_ps(Mul03, Mul04); - __m128 Add01 = _mm_add_ps(Sub01, Mul05); - __m128 Inv1 = _mm_mul_ps(SignA, Add01); - - // col2 - // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]), - // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]), - // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]), - // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]), - __m128 Mul06 = _mm_mul_ps(Vec0, Fac1); - __m128 Mul07 = _mm_mul_ps(Vec1, Fac3); - __m128 Mul08 = _mm_mul_ps(Vec3, Fac5); - __m128 Sub02 = _mm_sub_ps(Mul06, Mul07); - __m128 Add02 = _mm_add_ps(Sub02, Mul08); - __m128 Inv2 = _mm_mul_ps(SignB, Add02); - - // col3 - // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]), - // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]), - // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]), - // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3])); - __m128 Mul09 = _mm_mul_ps(Vec0, Fac2); - __m128 Mul10 = _mm_mul_ps(Vec1, Fac4); - __m128 Mul11 = _mm_mul_ps(Vec2, Fac5); - __m128 Sub03 = _mm_sub_ps(Mul09, Mul10); - __m128 Add03 = _mm_add_ps(Sub03, Mul11); - __m128 Inv3 = _mm_mul_ps(SignA, Add03); - - __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0)); - - // valType Determinant = m[0][0] * Inverse[0][0] - // + m[0][1] * Inverse[1][0] - // + m[0][2] * Inverse[2][0] - // + m[0][3] * Inverse[3][0]; - __m128 Det0 = glm_vec4_dot(in[0], Row2); - __m128 Rcp0 = _mm_rcp_ps(Det0); - //__m128 Rcp0 = _mm_div_ps(one, Det0); - // Inverse /= Determinant; - out[0] = _mm_mul_ps(Inv0, Rcp0); - out[1] = _mm_mul_ps(Inv1, Rcp0); - out[2] = _mm_mul_ps(Inv2, Rcp0); - out[3] = _mm_mul_ps(Inv3, Rcp0); -} -/* -GLM_FUNC_QUALIFIER void glm_mat4_rotate(__m128 const in[4], float Angle, float const v[3], __m128 out[4]) -{ - float a = glm::radians(Angle); - float c = cos(a); - float s = sin(a); - - glm::vec4 AxisA(v[0], v[1], v[2], float(0)); - __m128 AxisB = _mm_set_ps(AxisA.w, AxisA.z, AxisA.y, AxisA.x); - __m128 AxisC = detail::sse_nrm_ps(AxisB); - - __m128 Cos0 = _mm_set_ss(c); - __m128 CosA = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 Sin0 = _mm_set_ss(s); - __m128 SinA = _mm_shuffle_ps(Sin0, Sin0, _MM_SHUFFLE(0, 0, 0, 0)); - - // tvec3<T, P> temp = (valType(1) - c) * axis; - __m128 Temp0 = _mm_sub_ps(one, CosA); - __m128 Temp1 = _mm_mul_ps(Temp0, AxisC); - - //Rotate[0][0] = c + temp[0] * axis[0]; - //Rotate[0][1] = 0 + temp[0] * axis[1] + s * axis[2]; - //Rotate[0][2] = 0 + temp[0] * axis[2] - s * axis[1]; - __m128 Axis0 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(0, 0, 0, 0)); - __m128 TmpA0 = _mm_mul_ps(Axis0, AxisC); - __m128 CosA0 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 1, 1, 0)); - __m128 TmpA1 = _mm_add_ps(CosA0, TmpA0); - __m128 SinA0 = SinA;//_mm_set_ps(0.0f, s, -s, 0.0f); - __m128 TmpA2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 1, 2, 3)); - __m128 TmpA3 = _mm_mul_ps(SinA0, TmpA2); - __m128 TmpA4 = _mm_add_ps(TmpA1, TmpA3); - - //Rotate[1][0] = 0 + temp[1] * axis[0] - s * axis[2]; - //Rotate[1][1] = c + temp[1] * axis[1]; - //Rotate[1][2] = 0 + temp[1] * axis[2] + s * axis[0]; - __m128 Axis1 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(1, 1, 1, 1)); - __m128 TmpB0 = _mm_mul_ps(Axis1, AxisC); - __m128 CosA1 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 1, 0, 1)); - __m128 TmpB1 = _mm_add_ps(CosA1, TmpB0); - __m128 SinB0 = SinA;//_mm_set_ps(-s, 0.0f, s, 0.0f); - __m128 TmpB2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 0, 3, 2)); - __m128 TmpB3 = _mm_mul_ps(SinA0, TmpB2); - __m128 TmpB4 = _mm_add_ps(TmpB1, TmpB3); - - //Rotate[2][0] = 0 + temp[2] * axis[0] + s * axis[1]; - //Rotate[2][1] = 0 + temp[2] * axis[1] - s * axis[0]; - //Rotate[2][2] = c + temp[2] * axis[2]; - __m128 Axis2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(2, 2, 2, 2)); - __m128 TmpC0 = _mm_mul_ps(Axis2, AxisC); - __m128 CosA2 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 0, 1, 1)); - __m128 TmpC1 = _mm_add_ps(CosA2, TmpC0); - __m128 SinC0 = SinA;//_mm_set_ps(s, -s, 0.0f, 0.0f); - __m128 TmpC2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 3, 0, 1)); - __m128 TmpC3 = _mm_mul_ps(SinA0, TmpC2); - __m128 TmpC4 = _mm_add_ps(TmpC1, TmpC3); - - __m128 Result[4]; - Result[0] = TmpA4; - Result[1] = TmpB4; - Result[2] = TmpC4; - Result[3] = _mm_set_ps(1, 0, 0, 0); - - //tmat4x4<valType> Result(uninitialize); - //Result[0] = m[0] * Rotate[0][0] + m[1] * Rotate[0][1] + m[2] * Rotate[0][2]; - //Result[1] = m[0] * Rotate[1][0] + m[1] * Rotate[1][1] + m[2] * Rotate[1][2]; - //Result[2] = m[0] * Rotate[2][0] + m[1] * Rotate[2][1] + m[2] * Rotate[2][2]; - //Result[3] = m[3]; - //return Result; - sse_mul_ps(in, Result, out); -} -*/ -GLM_FUNC_QUALIFIER void glm_mat4_outerProduct(__m128 const & c, __m128 const & r, __m128 out[4]) -{ - out[0] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(0, 0, 0, 0))); - out[1] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(1, 1, 1, 1))); - out[2] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(2, 2, 2, 2))); - out[3] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(3, 3, 3, 3))); -} - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/depedencies/include/glm/simd/packing.h b/depedencies/include/glm/simd/packing.h deleted file mode 100644 index 609163e..0000000 --- a/depedencies/include/glm/simd/packing.h +++ /dev/null @@ -1,8 +0,0 @@ -/// @ref simd -/// @file glm/simd/packing.h - -#pragma once - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT diff --git a/depedencies/include/glm/simd/platform.h b/depedencies/include/glm/simd/platform.h deleted file mode 100644 index f779390..0000000 --- a/depedencies/include/glm/simd/platform.h +++ /dev/null @@ -1,452 +0,0 @@ -/// @ref simd -/// @file glm/simd/platform.h - -#pragma once - -/////////////////////////////////////////////////////////////////////////////////// -// Platform - -#define GLM_PLATFORM_UNKNOWN 0x00000000 -#define GLM_PLATFORM_WINDOWS 0x00010000 -#define GLM_PLATFORM_LINUX 0x00020000 -#define GLM_PLATFORM_APPLE 0x00040000 -//#define GLM_PLATFORM_IOS 0x00080000 -#define GLM_PLATFORM_ANDROID 0x00100000 -#define GLM_PLATFORM_CHROME_NACL 0x00200000 -#define GLM_PLATFORM_UNIX 0x00400000 -#define GLM_PLATFORM_QNXNTO 0x00800000 -#define GLM_PLATFORM_WINCE 0x01000000 -#define GLM_PLATFORM_CYGWIN 0x02000000 - -#ifdef GLM_FORCE_PLATFORM_UNKNOWN -# define GLM_PLATFORM GLM_PLATFORM_UNKNOWN -#elif defined(__CYGWIN__) -# define GLM_PLATFORM GLM_PLATFORM_CYGWIN -#elif defined(__QNXNTO__) -# define GLM_PLATFORM GLM_PLATFORM_QNXNTO -#elif defined(__APPLE__) -# define GLM_PLATFORM GLM_PLATFORM_APPLE -#elif defined(WINCE) -# define GLM_PLATFORM GLM_PLATFORM_WINCE -#elif defined(_WIN32) -# define GLM_PLATFORM GLM_PLATFORM_WINDOWS -#elif defined(__native_client__) -# define GLM_PLATFORM GLM_PLATFORM_CHROME_NACL -#elif defined(__ANDROID__) -# define GLM_PLATFORM GLM_PLATFORM_ANDROID -#elif defined(__linux) -# define GLM_PLATFORM GLM_PLATFORM_LINUX -#elif defined(__unix) -# define GLM_PLATFORM GLM_PLATFORM_UNIX -#else -# define GLM_PLATFORM GLM_PLATFORM_UNKNOWN -#endif// - -// Report platform detection -#if GLM_MESSAGES == GLM_MESSAGES_ENABLED && !defined(GLM_MESSAGE_PLATFORM_DISPLAYED) -# define GLM_MESSAGE_PLATFORM_DISPLAYED -# if(GLM_PLATFORM & GLM_PLATFORM_QNXNTO) -# pragma message("GLM: QNX platform detected") -//# elif(GLM_PLATFORM & GLM_PLATFORM_IOS) -//# pragma message("GLM: iOS platform detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_APPLE) -# pragma message("GLM: Apple platform detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_WINCE) -# pragma message("GLM: WinCE platform detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_WINDOWS) -# pragma message("GLM: Windows platform detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_CHROME_NACL) -# pragma message("GLM: Native Client detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_ANDROID) -# pragma message("GLM: Android platform detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_LINUX) -# pragma message("GLM: Linux platform detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_UNIX) -# pragma message("GLM: UNIX platform detected") -# elif(GLM_PLATFORM & GLM_PLATFORM_UNKNOWN) -# pragma message("GLM: platform unknown") -# else -# pragma message("GLM: platform not detected") -# endif -#endif//GLM_MESSAGES - -/////////////////////////////////////////////////////////////////////////////////// -// Compiler - -#define GLM_COMPILER_UNKNOWN 0x00000000 - -// Intel -#define GLM_COMPILER_INTEL 0x00100000 -#define GLM_COMPILER_INTEL12 0x00100010 -#define GLM_COMPILER_INTEL12_1 0x00100020 -#define GLM_COMPILER_INTEL13 0x00100030 -#define GLM_COMPILER_INTEL14 0x00100040 -#define GLM_COMPILER_INTEL15 0x00100050 -#define GLM_COMPILER_INTEL16 0x00100060 - -// Visual C++ defines -#define GLM_COMPILER_VC 0x01000000 -#define GLM_COMPILER_VC10 0x01000090 -#define GLM_COMPILER_VC11 0x010000A0 -#define GLM_COMPILER_VC12 0x010000B0 -#define GLM_COMPILER_VC14 0x010000C0 -#define GLM_COMPILER_VC15 0x010000D0 - -// GCC defines -#define GLM_COMPILER_GCC 0x02000000 -#define GLM_COMPILER_GCC44 0x020000B0 -#define GLM_COMPILER_GCC45 0x020000C0 -#define GLM_COMPILER_GCC46 0x020000D0 -#define GLM_COMPILER_GCC47 0x020000E0 -#define GLM_COMPILER_GCC48 0x020000F0 -#define GLM_COMPILER_GCC49 0x02000100 -#define GLM_COMPILER_GCC50 0x02000200 -#define GLM_COMPILER_GCC51 0x02000300 -#define GLM_COMPILER_GCC52 0x02000400 -#define GLM_COMPILER_GCC53 0x02000500 -#define GLM_COMPILER_GCC54 0x02000600 -#define GLM_COMPILER_GCC60 0x02000700 -#define GLM_COMPILER_GCC61 0x02000800 -#define GLM_COMPILER_GCC62 0x02000900 -#define GLM_COMPILER_GCC70 0x02000A00 -#define GLM_COMPILER_GCC71 0x02000B00 -#define GLM_COMPILER_GCC72 0x02000C00 -#define GLM_COMPILER_GCC80 0x02000D00 - -// CUDA -#define GLM_COMPILER_CUDA 0x10000000 -#define GLM_COMPILER_CUDA40 0x10000040 -#define GLM_COMPILER_CUDA41 0x10000050 -#define GLM_COMPILER_CUDA42 0x10000060 -#define GLM_COMPILER_CUDA50 0x10000070 -#define GLM_COMPILER_CUDA60 0x10000080 -#define GLM_COMPILER_CUDA65 0x10000090 -#define GLM_COMPILER_CUDA70 0x100000A0 -#define GLM_COMPILER_CUDA75 0x100000B0 -#define GLM_COMPILER_CUDA80 0x100000C0 - -// Clang -#define GLM_COMPILER_CLANG 0x20000000 -#define GLM_COMPILER_CLANG32 0x20000030 -#define GLM_COMPILER_CLANG33 0x20000040 -#define GLM_COMPILER_CLANG34 0x20000050 -#define GLM_COMPILER_CLANG35 0x20000060 -#define GLM_COMPILER_CLANG36 0x20000070 -#define GLM_COMPILER_CLANG37 0x20000080 -#define GLM_COMPILER_CLANG38 0x20000090 -#define GLM_COMPILER_CLANG39 0x200000A0 -#define GLM_COMPILER_CLANG40 0x200000B0 -#define GLM_COMPILER_CLANG41 0x200000C0 -#define GLM_COMPILER_CLANG42 0x200000D0 - -// Build model -#define GLM_MODEL_32 0x00000010 -#define GLM_MODEL_64 0x00000020 - -// Force generic C++ compiler -#ifdef GLM_FORCE_COMPILER_UNKNOWN -# define GLM_COMPILER GLM_COMPILER_UNKNOWN - -#elif defined(__INTEL_COMPILER) -# if __INTEL_COMPILER == 1200 -# define GLM_COMPILER GLM_COMPILER_INTEL12 -# elif __INTEL_COMPILER == 1210 -# define GLM_COMPILER GLM_COMPILER_INTEL12_1 -# elif __INTEL_COMPILER == 1300 -# define GLM_COMPILER GLM_COMPILER_INTEL13 -# elif __INTEL_COMPILER == 1400 -# define GLM_COMPILER GLM_COMPILER_INTEL14 -# elif __INTEL_COMPILER == 1500 -# define GLM_COMPILER GLM_COMPILER_INTEL15 -# elif __INTEL_COMPILER >= 1600 -# define GLM_COMPILER GLM_COMPILER_INTEL16 -# else -# define GLM_COMPILER GLM_COMPILER_INTEL -# endif - -// CUDA -#elif defined(__CUDACC__) -# if !defined(CUDA_VERSION) && !defined(GLM_FORCE_CUDA) -# include <cuda.h> // make sure version is defined since nvcc does not define it itself! -# endif -# if CUDA_VERSION < 3000 -# error "GLM requires CUDA 3.0 or higher" -# else -# define GLM_COMPILER GLM_COMPILER_CUDA -# endif - -// Clang -#elif defined(__clang__) -# if GLM_PLATFORM & GLM_PLATFORM_APPLE -# if __clang_major__ == 5 && __clang_minor__ == 0 -# define GLM_COMPILER GLM_COMPILER_CLANG33 -# elif __clang_major__ == 5 && __clang_minor__ == 1 -# define GLM_COMPILER GLM_COMPILER_CLANG34 -# elif __clang_major__ == 6 && __clang_minor__ == 0 -# define GLM_COMPILER GLM_COMPILER_CLANG35 -# elif __clang_major__ == 6 && __clang_minor__ >= 1 -# define GLM_COMPILER GLM_COMPILER_CLANG36 -# elif __clang_major__ >= 7 -# define GLM_COMPILER GLM_COMPILER_CLANG37 -# else -# define GLM_COMPILER GLM_COMPILER_CLANG -# endif -# else -# if __clang_major__ == 3 && __clang_minor__ == 0 -# define GLM_COMPILER GLM_COMPILER_CLANG30 -# elif __clang_major__ == 3 && __clang_minor__ == 1 -# define GLM_COMPILER GLM_COMPILER_CLANG31 -# elif __clang_major__ == 3 && __clang_minor__ == 2 -# define GLM_COMPILER GLM_COMPILER_CLANG32 -# elif __clang_major__ == 3 && __clang_minor__ == 3 -# define GLM_COMPILER GLM_COMPILER_CLANG33 -# elif __clang_major__ == 3 && __clang_minor__ == 4 -# define GLM_COMPILER GLM_COMPILER_CLANG34 -# elif __clang_major__ == 3 && __clang_minor__ == 5 -# define GLM_COMPILER GLM_COMPILER_CLANG35 -# elif __clang_major__ == 3 && __clang_minor__ == 6 -# define GLM_COMPILER GLM_COMPILER_CLANG36 -# elif __clang_major__ == 3 && __clang_minor__ == 7 -# define GLM_COMPILER GLM_COMPILER_CLANG37 -# elif __clang_major__ == 3 && __clang_minor__ == 8 -# define GLM_COMPILER GLM_COMPILER_CLANG38 -# elif __clang_major__ == 3 && __clang_minor__ >= 9 -# define GLM_COMPILER GLM_COMPILER_CLANG39 -# elif __clang_major__ == 4 && __clang_minor__ == 0 -# define GLM_COMPILER GLM_COMPILER_CLANG40 -# elif __clang_major__ == 4 && __clang_minor__ == 1 -# define GLM_COMPILER GLM_COMPILER_CLANG41 -# elif __clang_major__ == 4 && __clang_minor__ >= 2 -# define GLM_COMPILER GLM_COMPILER_CLANG42 -# elif __clang_major__ >= 4 -# define GLM_COMPILER GLM_COMPILER_CLANG42 -# else -# define GLM_COMPILER GLM_COMPILER_CLANG -# endif -# endif - -// Visual C++ -#elif defined(_MSC_VER) -# if _MSC_VER < 1600 -# error "GLM requires Visual C++ 10 - 2010 or higher" -# elif _MSC_VER == 1600 -# define GLM_COMPILER GLM_COMPILER_VC11 -# elif _MSC_VER == 1700 -# define GLM_COMPILER GLM_COMPILER_VC11 -# elif _MSC_VER == 1800 -# define GLM_COMPILER GLM_COMPILER_VC12 -# elif _MSC_VER == 1900 -# define GLM_COMPILER GLM_COMPILER_VC14 -# elif _MSC_VER >= 1910 -# define GLM_COMPILER GLM_COMPILER_VC15 -# else//_MSC_VER -# define GLM_COMPILER GLM_COMPILER_VC -# endif//_MSC_VER - -// G++ -#elif defined(__GNUC__) || defined(__MINGW32__) -# if (__GNUC__ == 4) && (__GNUC_MINOR__ == 2) -# define GLM_COMPILER (GLM_COMPILER_GCC42) -# elif (__GNUC__ == 4) && (__GNUC_MINOR__ == 3) -# define GLM_COMPILER (GLM_COMPILER_GCC43) -# elif (__GNUC__ == 4) && (__GNUC_MINOR__ == 4) -# define GLM_COMPILER (GLM_COMPILER_GCC44) -# elif (__GNUC__ == 4) && (__GNUC_MINOR__ == 5) -# define GLM_COMPILER (GLM_COMPILER_GCC45) -# elif (__GNUC__ == 4) && (__GNUC_MINOR__ == 6) -# define GLM_COMPILER (GLM_COMPILER_GCC46) -# elif (__GNUC__ == 4) && (__GNUC_MINOR__ == 7) -# define GLM_COMPILER (GLM_COMPILER_GCC47) -# elif (__GNUC__ == 4) && (__GNUC_MINOR__ == 8) -# define GLM_COMPILER (GLM_COMPILER_GCC48) -# elif (__GNUC__ == 4) && (__GNUC_MINOR__ >= 9) -# define GLM_COMPILER (GLM_COMPILER_GCC49) -# elif (__GNUC__ == 5) && (__GNUC_MINOR__ == 0) -# define GLM_COMPILER (GLM_COMPILER_GCC50) -# elif (__GNUC__ == 5) && (__GNUC_MINOR__ == 1) -# define GLM_COMPILER (GLM_COMPILER_GCC51) -# elif (__GNUC__ == 5) && (__GNUC_MINOR__ == 2) -# define GLM_COMPILER (GLM_COMPILER_GCC52) -# elif (__GNUC__ == 5) && (__GNUC_MINOR__ == 3) -# define GLM_COMPILER (GLM_COMPILER_GCC53) -# elif (__GNUC__ == 5) && (__GNUC_MINOR__ >= 4) -# define GLM_COMPILER (GLM_COMPILER_GCC54) -# elif (__GNUC__ == 6) && (__GNUC_MINOR__ == 0) -# define GLM_COMPILER (GLM_COMPILER_GCC60) -# elif (__GNUC__ == 6) && (__GNUC_MINOR__ == 1) -# define GLM_COMPILER (GLM_COMPILER_GCC61) -# elif (__GNUC__ == 6) && (__GNUC_MINOR__ >= 2) -# define GLM_COMPILER (GLM_COMPILER_GCC62) -# elif (__GNUC__ == 7) && (__GNUC_MINOR__ == 0) -# define GLM_COMPILER (GLM_COMPILER_GCC70) -# elif (__GNUC__ == 7) && (__GNUC_MINOR__ == 1) -# define GLM_COMPILER (GLM_COMPILER_GCC71) -# elif (__GNUC__ == 7) && (__GNUC_MINOR__ == 2) -# define GLM_COMPILER (GLM_COMPILER_GCC72) -# elif (__GNUC__ >= 8) -# define GLM_COMPILER (GLM_COMPILER_GCC80) -# else -# define GLM_COMPILER (GLM_COMPILER_GCC) -# endif - -#else -# define GLM_COMPILER GLM_COMPILER_UNKNOWN -#endif - -#ifndef GLM_COMPILER -# error "GLM_COMPILER undefined, your compiler may not be supported by GLM. Add #define GLM_COMPILER 0 to ignore this message." -#endif//GLM_COMPILER - -/////////////////////////////////////////////////////////////////////////////////// -// Instruction sets - -// User defines: GLM_FORCE_PURE GLM_FORCE_SSE2 GLM_FORCE_SSE3 GLM_FORCE_AVX GLM_FORCE_AVX2 GLM_FORCE_AVX2 - -#define GLM_ARCH_X86_BIT 0x00000001 -#define GLM_ARCH_SSE2_BIT 0x00000002 -#define GLM_ARCH_SSE3_BIT 0x00000004 -#define GLM_ARCH_SSSE3_BIT 0x00000008 -#define GLM_ARCH_SSE41_BIT 0x00000010 -#define GLM_ARCH_SSE42_BIT 0x00000020 -#define GLM_ARCH_AVX_BIT 0x00000040 -#define GLM_ARCH_AVX2_BIT 0x00000080 -#define GLM_ARCH_AVX512_BIT 0x00000100 // Skylake subset -#define GLM_ARCH_ARM_BIT 0x00000100 -#define GLM_ARCH_NEON_BIT 0x00000200 -#define GLM_ARCH_MIPS_BIT 0x00010000 -#define GLM_ARCH_PPC_BIT 0x01000000 - -#define GLM_ARCH_PURE (0x00000000) -#define GLM_ARCH_X86 (GLM_ARCH_X86_BIT) -#define GLM_ARCH_SSE2 (GLM_ARCH_SSE2_BIT | GLM_ARCH_X86) -#define GLM_ARCH_SSE3 (GLM_ARCH_SSE3_BIT | GLM_ARCH_SSE2) -#define GLM_ARCH_SSSE3 (GLM_ARCH_SSSE3_BIT | GLM_ARCH_SSE3) -#define GLM_ARCH_SSE41 (GLM_ARCH_SSE41_BIT | GLM_ARCH_SSSE3) -#define GLM_ARCH_SSE42 (GLM_ARCH_SSE42_BIT | GLM_ARCH_SSE41) -#define GLM_ARCH_AVX (GLM_ARCH_AVX_BIT | GLM_ARCH_SSE42) -#define GLM_ARCH_AVX2 (GLM_ARCH_AVX2_BIT | GLM_ARCH_AVX) -#define GLM_ARCH_AVX512 (GLM_ARCH_AVX512_BIT | GLM_ARCH_AVX2) // Skylake subset -#define GLM_ARCH_ARM (GLM_ARCH_ARM_BIT) -#define GLM_ARCH_NEON (GLM_ARCH_NEON_BIT | GLM_ARCH_ARM) -#define GLM_ARCH_MIPS (GLM_ARCH_MIPS_BIT) -#define GLM_ARCH_PPC (GLM_ARCH_PPC_BIT) - -#if defined(GLM_FORCE_PURE) -# define GLM_ARCH GLM_ARCH_PURE -#elif defined(GLM_FORCE_MIPS) -# define GLM_ARCH (GLM_ARCH_MIPS) -#elif defined(GLM_FORCE_PPC) -# define GLM_ARCH (GLM_ARCH_PPC) -#elif defined(GLM_FORCE_NEON) -# define GLM_ARCH (GLM_ARCH_NEON) -#elif defined(GLM_FORCE_AVX512) -# define GLM_ARCH (GLM_ARCH_AVX512) -#elif defined(GLM_FORCE_AVX2) -# define GLM_ARCH (GLM_ARCH_AVX2) -#elif defined(GLM_FORCE_AVX) -# define GLM_ARCH (GLM_ARCH_AVX) -#elif defined(GLM_FORCE_SSE42) -# define GLM_ARCH (GLM_ARCH_SSE42) -#elif defined(GLM_FORCE_SSE41) -# define GLM_ARCH (GLM_ARCH_SSE41) -#elif defined(GLM_FORCE_SSSE3) -# define GLM_ARCH (GLM_ARCH_SSSE3) -#elif defined(GLM_FORCE_SSE3) -# define GLM_ARCH (GLM_ARCH_SSE3) -#elif defined(GLM_FORCE_SSE2) -# define GLM_ARCH (GLM_ARCH_SSE2) -#elif (GLM_COMPILER & (GLM_COMPILER_CLANG | GLM_COMPILER_GCC)) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_LINUX)) -// This is Skylake set of instruction set -# if defined(__AVX512BW__) && defined(__AVX512F__) && defined(__AVX512CD__) && defined(__AVX512VL__) && defined(__AVX512DQ__) -# define GLM_ARCH (GLM_ARCH_AVX512) -# elif defined(__AVX2__) -# define GLM_ARCH (GLM_ARCH_AVX2) -# elif defined(__AVX__) -# define GLM_ARCH (GLM_ARCH_AVX) -# elif defined(__SSE4_2__) -# define GLM_ARCH (GLM_ARCH_SSE42) -# elif defined(__SSE4_1__) -# define GLM_ARCH (GLM_ARCH_SSE41) -# elif defined(__SSSE3__) -# define GLM_ARCH (GLM_ARCH_SSSE3) -# elif defined(__SSE3__) -# define GLM_ARCH (GLM_ARCH_SSE3) -# elif defined(__SSE2__) -# define GLM_ARCH (GLM_ARCH_SSE2) -# elif defined(__i386__) || defined(__x86_64__) -# define GLM_ARCH (GLM_ARCH_X86) -# elif defined(__ARM_NEON) -# define GLM_ARCH (GLM_ARCH_ARM | GLM_ARCH_NEON) -# elif defined(__arm__ ) -# define GLM_ARCH (GLM_ARCH_ARM) -# elif defined(__mips__ ) -# define GLM_ARCH (GLM_ARCH_MIPS) -# elif defined(__powerpc__ ) -# define GLM_ARCH (GLM_ARCH_PPC) -# else -# define GLM_ARCH (GLM_ARCH_PURE) -# endif -#elif (GLM_COMPILER & GLM_COMPILER_VC) || ((GLM_COMPILER & GLM_COMPILER_INTEL) && (GLM_PLATFORM & GLM_PLATFORM_WINDOWS)) -# if defined(_M_ARM) -# define GLM_ARCH (GLM_ARCH_ARM) -# elif defined(__AVX2__) -# define GLM_ARCH (GLM_ARCH_AVX2) -# elif defined(__AVX__) -# define GLM_ARCH (GLM_ARCH_AVX) -# elif defined(_M_X64) -# define GLM_ARCH (GLM_ARCH_SSE2) -# elif defined(_M_IX86_FP) -# if _M_IX86_FP >= 2 -# define GLM_ARCH (GLM_ARCH_SSE2) -# else -# define GLM_ARCH (GLM_ARCH_PURE) -# endif -# elif defined(_M_PPC) -# define GLM_ARCH (GLM_ARCH_PPC) -# else -# define GLM_ARCH (GLM_ARCH_PURE) -# endif -#else -# define GLM_ARCH GLM_ARCH_PURE -#endif - -// With MinGW-W64, including intrinsic headers before intrin.h will produce some errors. The problem is -// that windows.h (and maybe other headers) will silently include intrin.h, which of course causes problems. -// To fix, we just explicitly include intrin.h here. -#if defined(__MINGW64__) && (GLM_ARCH != GLM_ARCH_PURE) -# include <intrin.h> -#endif - -#if GLM_ARCH & GLM_ARCH_AVX2_BIT -# include <immintrin.h> -#elif GLM_ARCH & GLM_ARCH_AVX_BIT -# include <immintrin.h> -#elif GLM_ARCH & GLM_ARCH_SSE42_BIT -# if GLM_COMPILER & GLM_COMPILER_CLANG -# include <popcntintrin.h> -# endif -# include <nmmintrin.h> -#elif GLM_ARCH & GLM_ARCH_SSE41_BIT -# include <smmintrin.h> -#elif GLM_ARCH & GLM_ARCH_SSSE3_BIT -# include <tmmintrin.h> -#elif GLM_ARCH & GLM_ARCH_SSE3_BIT -# include <pmmintrin.h> -#elif GLM_ARCH & GLM_ARCH_SSE2_BIT -# include <emmintrin.h> -#endif//GLM_ARCH - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - typedef __m128 glm_vec4; - typedef __m128i glm_ivec4; - typedef __m128i glm_uvec4; -#endif - -#if GLM_ARCH & GLM_ARCH_AVX_BIT - typedef __m256d glm_dvec4; -#endif - -#if GLM_ARCH & GLM_ARCH_AVX2_BIT - typedef __m256i glm_i64vec4; - typedef __m256i glm_u64vec4; -#endif diff --git a/depedencies/include/glm/simd/trigonometric.h b/depedencies/include/glm/simd/trigonometric.h deleted file mode 100644 index 739b796..0000000 --- a/depedencies/include/glm/simd/trigonometric.h +++ /dev/null @@ -1,9 +0,0 @@ -/// @ref simd -/// @file glm/simd/trigonometric.h - -#pragma once - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT - diff --git a/depedencies/include/glm/simd/vector_relational.h b/depedencies/include/glm/simd/vector_relational.h deleted file mode 100644 index f7385e9..0000000 --- a/depedencies/include/glm/simd/vector_relational.h +++ /dev/null @@ -1,8 +0,0 @@ -/// @ref simd -/// @file glm/simd/vector_relational.h - -#pragma once - -#if GLM_ARCH & GLM_ARCH_SSE2_BIT - -#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT |