Update Files

This commit is contained in:
2025-01-22 16:18:30 +01:00
parent ed4603cf95
commit a36294b518
16718 changed files with 2960346 additions and 0 deletions

View File

@ -0,0 +1,719 @@
#pragma once
#include "types.h"
#include <string.h>
/*! \file float32x4.h
\brief Provides 128bit four-element floating point SIMD operations which are mapped to equivalent SSE or Neon operations.
*/
#ifdef __cplusplus
extern "C" {
#endif
#if defined(KINC_SSE)
static inline kinc_float32x4_t kinc_float32x4_intrin_load(const float *values) {
return _mm_load_ps(values);
}
static inline kinc_float32x4_t kinc_float32x4_intrin_load_unaligned(const float *values) {
return _mm_loadu_ps(values);
}
static inline kinc_float32x4_t kinc_float32x4_load(float a, float b, float c, float d) {
return _mm_set_ps(d, c, b, a);
}
static inline kinc_float32x4_t kinc_float32x4_load_all(float t) {
return _mm_set_ps1(t);
}
static inline void kinc_float32x4_store(float *destination, kinc_float32x4_t value) {
_mm_store_ps(destination, value);
}
static inline void kinc_float32x4_store_unaligned(float *destination, kinc_float32x4_t value) {
_mm_storeu_ps(destination, value);
}
static inline float kinc_float32x4_get(kinc_float32x4_t t, int index) {
union {
__m128 value;
float elements[4];
} converter;
converter.value = t;
return converter.elements[index];
}
static inline kinc_float32x4_t kinc_float32x4_abs(kinc_float32x4_t t) {
__m128 mask = _mm_set_ps1(-0.f);
return _mm_andnot_ps(mask, t);
}
static inline kinc_float32x4_t kinc_float32x4_add(kinc_float32x4_t a, kinc_float32x4_t b) {
return _mm_add_ps(a, b);
}
static inline kinc_float32x4_t kinc_float32x4_div(kinc_float32x4_t a, kinc_float32x4_t b) {
return _mm_div_ps(a, b);
}
static inline kinc_float32x4_t kinc_float32x4_mul(kinc_float32x4_t a, kinc_float32x4_t b) {
return _mm_mul_ps(a, b);
}
static inline kinc_float32x4_t kinc_float32x4_neg(kinc_float32x4_t t) {
__m128 negative = _mm_set_ps1(-1.0f);
return _mm_mul_ps(t, negative);
}
static inline kinc_float32x4_t kinc_float32x4_reciprocal_approximation(kinc_float32x4_t t) {
return _mm_rcp_ps(t);
}
static inline kinc_float32x4_t kinc_float32x4_reciprocal_sqrt_approximation(kinc_float32x4_t t) {
return _mm_rsqrt_ps(t);
}
static inline kinc_float32x4_t kinc_float32x4_sub(kinc_float32x4_t a, kinc_float32x4_t b) {
return _mm_sub_ps(a, b);
}
static inline kinc_float32x4_t kinc_float32x4_sqrt(kinc_float32x4_t t) {
return _mm_sqrt_ps(t);
}
static inline kinc_float32x4_t kinc_float32x4_max(kinc_float32x4_t a, kinc_float32x4_t b) {
return _mm_max_ps(a, b);
}
static inline kinc_float32x4_t kinc_float32x4_min(kinc_float32x4_t a, kinc_float32x4_t b) {
return _mm_min_ps(a, b);
}
static inline kinc_float32x4_mask_t kinc_float32x4_cmpeq(kinc_float32x4_t a, kinc_float32x4_t b) {
return _mm_cmpeq_ps(a, b);
}
static inline kinc_float32x4_mask_t kinc_float32x4_cmpge(kinc_float32x4_t a, kinc_float32x4_t b) {
return _mm_cmpge_ps(a, b);
}
static inline kinc_float32x4_mask_t kinc_float32x4_cmpgt(kinc_float32x4_t a, kinc_float32x4_t b) {
return _mm_cmpgt_ps(a, b);
}
static inline kinc_float32x4_mask_t kinc_float32x4_cmple(kinc_float32x4_t a, kinc_float32x4_t b) {
return _mm_cmple_ps(a, b);
}
static inline kinc_float32x4_mask_t kinc_float32x4_cmplt(kinc_float32x4_t a, kinc_float32x4_t b) {
return _mm_cmplt_ps(a, b);
}
static inline kinc_float32x4_mask_t kinc_float32x4_cmpneq(kinc_float32x4_t a, kinc_float32x4_t b) {
return _mm_cmpneq_ps(a, b);
}
static inline kinc_float32x4_t kinc_float32x4_sel(kinc_float32x4_t a, kinc_float32x4_t b, kinc_float32x4_mask_t mask) {
return _mm_xor_ps(b, _mm_and_ps(mask, _mm_xor_ps(a, b)));
}
static inline kinc_float32x4_t kinc_float32x4_or(kinc_float32x4_t a, kinc_float32x4_t b) {
return _mm_or_ps(a, b);
}
static inline kinc_float32x4_t kinc_float32x4_and(kinc_float32x4_t a, kinc_float32x4_t b) {
return _mm_and_ps(a, b);
}
static inline kinc_float32x4_t kinc_float32x4_xor(kinc_float32x4_t a, kinc_float32x4_t b) {
return _mm_xor_ps(a, b);
}
static inline kinc_float32x4_t kinc_float32x4_not(kinc_float32x4_t t) {
__m128 zeroes = _mm_setzero_ps();
return _mm_xor_ps(t, _mm_cmpeq_ps(zeroes, zeroes));
}
#define kinc_float32x4_shuffle_custom(abcd, efgh, left_1, left_2, right_1, right_2) \
_mm_shuffle_ps((abcd), (efgh), KINC_SHUFFLE_TABLE((left_1), (left_2), (right_1), (right_2)))
static inline kinc_float32x4_t kinc_float32x4_shuffle_aebf(kinc_float32x4_t abcd, kinc_float32x4_t efgh) {
// aka unpacklo aka zip1 aka interleave low
return _mm_unpacklo_ps(abcd, efgh);
}
static inline kinc_float32x4_t kinc_float32x4_shuffle_cgdh(kinc_float32x4_t abcd, kinc_float32x4_t efgh) {
// aka unpackhi aka zip2 aka interleave high
return _mm_unpackhi_ps(abcd, efgh);
}
static inline kinc_float32x4_t kinc_float32x4_shuffle_abef(kinc_float32x4_t abcd, kinc_float32x4_t efgh) {
// aka movelh
return _mm_movelh_ps(abcd, efgh);
}
static inline kinc_float32x4_t kinc_float32x4_shuffle_ghcd(kinc_float32x4_t abcd, kinc_float32x4_t efgh) {
// aka movehl
return _mm_movehl_ps(abcd, efgh);
}
#elif defined(KINC_NEON)
static inline kinc_float32x4_t kinc_float32x4_intrin_load(const float *values) {
return vld1q_f32(values);
}
static inline kinc_float32x4_t kinc_float32x4_intrin_load_unaligned(const float *values) {
return kinc_float32x4_intrin_load(values);
}
static inline kinc_float32x4_t kinc_float32x4_load(float a, float b, float c, float d) {
return (kinc_float32x4_t){a, b, c, d};
}
static inline kinc_float32x4_t kinc_float32x4_load_all(float t) {
return (kinc_float32x4_t){t, t, t, t};
}
static inline void kinc_float32x4_store(float *destination, kinc_float32x4_t value) {
vst1q_f32(destination, value);
}
static inline void kinc_float32x4_store_unaligned(float *destination, kinc_float32x4_t value) {
kinc_float32x4_store(destination, value);
}
static inline float kinc_float32x4_get(kinc_float32x4_t t, int index) {
return t[index];
}
static inline kinc_float32x4_t kinc_float32x4_abs(kinc_float32x4_t t) {
return vabsq_f32(t);
}
static inline kinc_float32x4_t kinc_float32x4_add(kinc_float32x4_t a, kinc_float32x4_t b) {
return vaddq_f32(a, b);
}
static inline kinc_float32x4_t kinc_float32x4_div(kinc_float32x4_t a, kinc_float32x4_t b) {
#if defined(__aarch64__)
return vdivq_f32(a, b);
#else
float32x4_t inv = vrecpeq_f32(b);
float32x4_t restep = vrecpsq_f32(b, inv);
inv = vmulq_f32(restep, inv);
return vmulq_f32(a, inv);
#endif
}
static inline kinc_float32x4_t kinc_float32x4_mul(kinc_float32x4_t a, kinc_float32x4_t b) {
return vmulq_f32(a, b);
}
static inline kinc_float32x4_t kinc_float32x4_neg(kinc_float32x4_t t) {
return vnegq_f32(t);
}
static inline kinc_float32x4_t kinc_float32x4_reciprocal_approximation(kinc_float32x4_t t) {
return vrecpeq_f32(t);
}
static inline kinc_float32x4_t kinc_float32x4_reciprocal_sqrt_approximation(kinc_float32x4_t t) {
return vrsqrteq_f32(t);
}
static inline kinc_float32x4_t kinc_float32x4_sub(kinc_float32x4_t a, kinc_float32x4_t b) {
return vsubq_f32(a, b);
}
static inline kinc_float32x4_t kinc_float32x4_sqrt(kinc_float32x4_t t) {
#if defined(__aarch64__)
return vsqrtq_f32(t);
#else
return vmulq_f32(t, vrsqrteq_f32(t));
#endif
}
static inline kinc_float32x4_t kinc_float32x4_max(kinc_float32x4_t a, kinc_float32x4_t b) {
return vmaxq_f32(a, b);
}
static inline kinc_float32x4_t kinc_float32x4_min(kinc_float32x4_t a, kinc_float32x4_t b) {
return vminq_f32(a, b);
}
static inline kinc_float32x4_mask_t kinc_float32x4_cmpeq(kinc_float32x4_t a, kinc_float32x4_t b) {
return vceqq_f32(a, b);
}
static inline kinc_float32x4_mask_t kinc_float32x4_cmpge(kinc_float32x4_t a, kinc_float32x4_t b) {
return vcgeq_f32(a, b);
}
static inline kinc_float32x4_mask_t kinc_float32x4_cmpgt(kinc_float32x4_t a, kinc_float32x4_t b) {
return vcgtq_f32(a, b);
}
static inline kinc_float32x4_mask_t kinc_float32x4_cmple(kinc_float32x4_t a, kinc_float32x4_t b) {
return vcleq_f32(a, b);
}
static inline kinc_float32x4_mask_t kinc_float32x4_cmplt(kinc_float32x4_t a, kinc_float32x4_t b) {
return vcltq_f32(a, b);
}
static inline kinc_float32x4_mask_t kinc_float32x4_cmpneq(kinc_float32x4_t a, kinc_float32x4_t b) {
return vmvnq_u32(vceqq_f32(a, b));
}
static inline kinc_float32x4_t kinc_float32x4_sel(kinc_float32x4_t a, kinc_float32x4_t b, kinc_float32x4_mask_t mask) {
return vbslq_f32(mask, a, b);
}
static inline kinc_float32x4_t kinc_float32x4_or(kinc_float32x4_t a, kinc_float32x4_t b) {
uint32x4_t acvt = vreinterpretq_u32_f32(a);
uint32x4_t bcvt = vreinterpretq_u32_f32(b);
return vreinterpretq_f32_u32(vorrq_u32(acvt, bcvt));
}
static inline kinc_float32x4_t kinc_float32x4_and(kinc_float32x4_t a, kinc_float32x4_t b) {
uint32x4_t acvt = vreinterpretq_u32_f32(a);
uint32x4_t bcvt = vreinterpretq_u32_f32(b);
return vreinterpretq_f32_u32(vandq_u32(acvt, bcvt));
}
static inline kinc_float32x4_t kinc_float32x4_xor(kinc_float32x4_t a, kinc_float32x4_t b) {
uint32x4_t acvt = vreinterpretq_u32_f32(a);
uint32x4_t bcvt = vreinterpretq_u32_f32(b);
return vreinterpretq_f32_u32(veorq_u32(acvt, bcvt));
}
static inline kinc_float32x4_t kinc_float32x4_not(kinc_float32x4_t t) {
uint32x4_t tcvt = vreinterpretq_u32_f32(t);
return vreinterpretq_f32_u32(vmvnq_u32(tcvt));
}
#define kinc_float32x4_shuffle_custom(abcd, efgh, left_1, left_2, right_1, right_2) \
(kinc_float32x4_t) { \
vgetq_lane_f32((abcd), ((left_1)&0x3)), vgetq_lane_f32((abcd), ((left_2)&0x3)), vgetq_lane_f32((efgh), ((right_1)&0x3)), \
vgetq_lane_f32((efgh), ((right_2)&0x3)) \
}
static inline kinc_float32x4_t kinc_float32x4_shuffle_aebf(kinc_float32x4_t abcd, kinc_float32x4_t efgh) {
#if defined(__aarch64__)
return vzip1q_f32(abcd, efgh);
#else
float a = vgetq_lane_f32(abcd, 0);
float b = vgetq_lane_f32(abcd, 1);
float e = vgetq_lane_f32(efgh, 0);
float f = vgetq_lane_f32(efgh, 1);
return (kinc_float32x4_t){a, e, b, f};
#endif
}
static inline kinc_float32x4_t kinc_float32x4_shuffle_cgdh(kinc_float32x4_t abcd, kinc_float32x4_t efgh) {
#if defined(__aarch64__)
return vzip2q_f32(abcd, efgh);
#else
float c = vgetq_lane_f32(abcd, 2);
float d = vgetq_lane_f32(abcd, 3);
float g = vgetq_lane_f32(efgh, 2);
float h = vgetq_lane_f32(efgh, 3);
return (kinc_float32x4_t){c, g, d, h};
#endif
}
static inline kinc_float32x4_t kinc_float32x4_shuffle_abef(kinc_float32x4_t abcd, kinc_float32x4_t efgh) {
float32x2_t ab = vget_low_f32(abcd);
float32x2_t ef = vget_low_f32(efgh);
return vcombine_f32(ab, ef);
}
static inline kinc_float32x4_t kinc_float32x4_shuffle_ghcd(kinc_float32x4_t abcd, kinc_float32x4_t efgh) {
float32x2_t cd = vget_high_f32(abcd);
float32x2_t gh = vget_high_f32(efgh);
return vcombine_f32(gh, cd);
}
#else
#include <math.h>
static inline kinc_float32x4_t kinc_float32x4_intrin_load(const float *values) {
kinc_float32x4_t value;
value.values[0] = values[0];
value.values[1] = values[1];
value.values[2] = values[2];
value.values[3] = values[3];
return value;
}
static inline kinc_float32x4_t kinc_float32x4_intrin_load_unaligned(const float *values) {
return kinc_float32x4_intrin_load(values);
}
static inline kinc_float32x4_t kinc_float32x4_load(float a, float b, float c, float d) {
kinc_float32x4_t value;
value.values[0] = a;
value.values[1] = b;
value.values[2] = c;
value.values[3] = d;
return value;
}
static inline kinc_float32x4_t kinc_float32x4_load_all(float t) {
kinc_float32x4_t value;
value.values[0] = t;
value.values[1] = t;
value.values[2] = t;
value.values[3] = t;
return value;
}
static inline void kinc_float32x4_store(float *destination, kinc_float32x4_t value) {
destination[0] = value.values[0];
destination[1] = value.values[1];
destination[2] = value.values[2];
destination[3] = value.values[3];
}
static inline void kinc_float32x4_store_unaligned(float *destination, kinc_float32x4_t value) {
kinc_float32x4_store(destination, value);
}
static inline float kinc_float32x4_get(kinc_float32x4_t t, int index) {
return t.values[index];
}
static inline kinc_float32x4_t kinc_float32x4_abs(kinc_float32x4_t t) {
kinc_float32x4_t value;
value.values[0] = kinc_abs(t.values[0]);
value.values[1] = kinc_abs(t.values[1]);
value.values[2] = kinc_abs(t.values[2]);
value.values[3] = kinc_abs(t.values[3]);
return value;
}
static inline kinc_float32x4_t kinc_float32x4_add(kinc_float32x4_t a, kinc_float32x4_t b) {
kinc_float32x4_t value;
value.values[0] = a.values[0] + b.values[0];
value.values[1] = a.values[1] + b.values[1];
value.values[2] = a.values[2] + b.values[2];
value.values[3] = a.values[3] + b.values[3];
return value;
}
static inline kinc_float32x4_t kinc_float32x4_div(kinc_float32x4_t a, kinc_float32x4_t b) {
kinc_float32x4_t value;
value.values[0] = a.values[0] / b.values[0];
value.values[1] = a.values[1] / b.values[1];
value.values[2] = a.values[2] / b.values[2];
value.values[3] = a.values[3] / b.values[3];
return value;
}
static inline kinc_float32x4_t kinc_float32x4_mul(kinc_float32x4_t a, kinc_float32x4_t b) {
kinc_float32x4_t value;
value.values[0] = a.values[0] * b.values[0];
value.values[1] = a.values[1] * b.values[1];
value.values[2] = a.values[2] * b.values[2];
value.values[3] = a.values[3] * b.values[3];
return value;
}
static inline kinc_float32x4_t kinc_float32x4_neg(kinc_float32x4_t t) {
kinc_float32x4_t value;
value.values[0] = -t.values[0];
value.values[1] = -t.values[1];
value.values[2] = -t.values[2];
value.values[3] = -t.values[3];
return value;
}
static inline kinc_float32x4_t kinc_float32x4_reciprocal_approximation(kinc_float32x4_t t) {
kinc_float32x4_t value;
value.values[0] = 1.0f / t.values[0];
value.values[1] = 1.0f / t.values[1];
value.values[2] = 1.0f / t.values[2];
value.values[3] = 1.0f / t.values[3];
return value;
}
static inline kinc_float32x4_t kinc_float32x4_reciprocal_sqrt_approximation(kinc_float32x4_t t) {
kinc_float32x4_t value;
value.values[0] = 1.0f / sqrtf(t.values[0]);
value.values[1] = 1.0f / sqrtf(t.values[1]);
value.values[2] = 1.0f / sqrtf(t.values[2]);
value.values[3] = 1.0f / sqrtf(t.values[3]);
return value;
}
static inline kinc_float32x4_t kinc_float32x4_sub(kinc_float32x4_t a, kinc_float32x4_t b) {
kinc_float32x4_t value;
value.values[0] = a.values[0] - b.values[0];
value.values[1] = a.values[1] - b.values[1];
value.values[2] = a.values[2] - b.values[2];
value.values[3] = a.values[3] - b.values[3];
return value;
}
static inline kinc_float32x4_t kinc_float32x4_sqrt(kinc_float32x4_t t) {
kinc_float32x4_t value;
value.values[0] = sqrtf(t.values[0]);
value.values[1] = sqrtf(t.values[1]);
value.values[2] = sqrtf(t.values[2]);
value.values[3] = sqrtf(t.values[3]);
return value;
}
static inline kinc_float32x4_t kinc_float32x4_max(kinc_float32x4_t a, kinc_float32x4_t b) {
kinc_float32x4_t value;
value.values[0] = kinc_max(a.values[0], b.values[0]);
value.values[1] = kinc_max(a.values[1], b.values[1]);
value.values[2] = kinc_max(a.values[2], b.values[2]);
value.values[3] = kinc_max(a.values[3], b.values[3]);
return value;
}
static inline kinc_float32x4_t kinc_float32x4_min(kinc_float32x4_t a, kinc_float32x4_t b) {
kinc_float32x4_t value;
value.values[0] = kinc_min(a.values[0], b.values[0]);
value.values[1] = kinc_min(a.values[1], b.values[1]);
value.values[2] = kinc_min(a.values[2], b.values[2]);
value.values[3] = kinc_min(a.values[3], b.values[3]);
return value;
}
static inline kinc_float32x4_mask_t kinc_float32x4_cmpeq(kinc_float32x4_t a, kinc_float32x4_t b) {
uint32_t mask_cvt[4];
mask_cvt[0] = a.values[0] == b.values[0] ? 0xffffffff : 0;
mask_cvt[1] = a.values[1] == b.values[1] ? 0xffffffff : 0;
mask_cvt[2] = a.values[2] == b.values[2] ? 0xffffffff : 0;
mask_cvt[3] = a.values[3] == b.values[3] ? 0xffffffff : 0;
kinc_float32x4_mask_t mask;
memcpy(&mask.values[0], &mask_cvt[0], sizeof(mask_cvt));
return mask;
}
static inline kinc_float32x4_mask_t kinc_float32x4_cmpge(kinc_float32x4_t a, kinc_float32x4_t b) {
uint32_t mask_cvt[4];
mask_cvt[0] = a.values[0] >= b.values[0] ? 0xffffffff : 0;
mask_cvt[1] = a.values[1] >= b.values[1] ? 0xffffffff : 0;
mask_cvt[2] = a.values[2] >= b.values[2] ? 0xffffffff : 0;
mask_cvt[3] = a.values[3] >= b.values[3] ? 0xffffffff : 0;
kinc_float32x4_mask_t mask;
memcpy(&mask.values[0], &mask_cvt[0], sizeof(mask_cvt));
return mask;
}
static inline kinc_float32x4_mask_t kinc_float32x4_cmpgt(kinc_float32x4_t a, kinc_float32x4_t b) {
uint32_t mask_cvt[4];
mask_cvt[0] = a.values[0] > b.values[0] ? 0xffffffff : 0;
mask_cvt[1] = a.values[1] > b.values[1] ? 0xffffffff : 0;
mask_cvt[2] = a.values[2] > b.values[2] ? 0xffffffff : 0;
mask_cvt[3] = a.values[3] > b.values[3] ? 0xffffffff : 0;
kinc_float32x4_mask_t mask;
memcpy(&mask.values[0], &mask_cvt[0], sizeof(mask_cvt));
return mask;
}
static inline kinc_float32x4_mask_t kinc_float32x4_cmple(kinc_float32x4_t a, kinc_float32x4_t b) {
uint32_t mask_cvt[4];
mask_cvt[0] = a.values[0] <= b.values[0] ? 0xffffffff : 0;
mask_cvt[1] = a.values[1] <= b.values[1] ? 0xffffffff : 0;
mask_cvt[2] = a.values[2] <= b.values[2] ? 0xffffffff : 0;
mask_cvt[3] = a.values[3] <= b.values[3] ? 0xffffffff : 0;
kinc_float32x4_mask_t mask;
memcpy(&mask.values[0], &mask_cvt[0], sizeof(mask_cvt));
return mask;
}
static inline kinc_float32x4_mask_t kinc_float32x4_cmplt(kinc_float32x4_t a, kinc_float32x4_t b) {
uint32_t mask_cvt[4];
mask_cvt[0] = a.values[0] < b.values[0] ? 0xffffffff : 0;
mask_cvt[1] = a.values[1] < b.values[1] ? 0xffffffff : 0;
mask_cvt[2] = a.values[2] < b.values[2] ? 0xffffffff : 0;
mask_cvt[3] = a.values[3] < b.values[3] ? 0xffffffff : 0;
kinc_float32x4_mask_t mask;
memcpy(&mask.values[0], &mask_cvt[0], sizeof(mask_cvt));
return mask;
}
static inline kinc_float32x4_mask_t kinc_float32x4_cmpneq(kinc_float32x4_t a, kinc_float32x4_t b) {
uint32_t mask_cvt[4];
mask_cvt[0] = a.values[0] != b.values[0] ? 0xffffffff : 0;
mask_cvt[1] = a.values[1] != b.values[1] ? 0xffffffff : 0;
mask_cvt[2] = a.values[2] != b.values[2] ? 0xffffffff : 0;
mask_cvt[3] = a.values[3] != b.values[3] ? 0xffffffff : 0;
kinc_float32x4_mask_t mask;
memcpy(&mask.values[0], &mask_cvt[0], sizeof(mask_cvt));
return mask;
}
static inline kinc_float32x4_t kinc_float32x4_sel(kinc_float32x4_t a, kinc_float32x4_t b, kinc_float32x4_mask_t mask) {
kinc_float32x4_t value;
value.values[0] = mask.values[0] != 0.0f ? a.values[0] : b.values[0];
value.values[1] = mask.values[1] != 0.0f ? a.values[1] : b.values[1];
value.values[2] = mask.values[2] != 0.0f ? a.values[2] : b.values[2];
value.values[3] = mask.values[3] != 0.0f ? a.values[3] : b.values[3];
return value;
}
static inline kinc_float32x4_t kinc_float32x4_or(kinc_float32x4_t a, kinc_float32x4_t b) {
uint32_t acvt[4];
uint32_t bcvt[4];
memcpy(&acvt[0], &a.values[0], sizeof(a));
memcpy(&bcvt[0], &b.values[0], sizeof(b));
acvt[0] |= bcvt[0];
acvt[1] |= bcvt[1];
acvt[2] |= bcvt[2];
acvt[3] |= bcvt[3];
kinc_float32x4_t value;
memcpy(&value.values[0], &acvt[0], sizeof(acvt));
return value;
}
static inline kinc_float32x4_t kinc_float32x4_and(kinc_float32x4_t a, kinc_float32x4_t b) {
uint32_t acvt[4];
uint32_t bcvt[4];
memcpy(&acvt[0], &a.values[0], sizeof(a));
memcpy(&bcvt[0], &b.values[0], sizeof(b));
acvt[0] &= bcvt[0];
acvt[1] &= bcvt[1];
acvt[2] &= bcvt[2];
acvt[3] &= bcvt[3];
kinc_float32x4_t value;
memcpy(&value.values[0], &acvt[0], sizeof(acvt));
return value;
}
static inline kinc_float32x4_t kinc_float32x4_xor(kinc_float32x4_t a, kinc_float32x4_t b) {
uint32_t acvt[4];
uint32_t bcvt[4];
memcpy(&acvt[0], &a.values[0], sizeof(a));
memcpy(&bcvt[0], &b.values[0], sizeof(b));
acvt[0] ^= bcvt[0];
acvt[1] ^= bcvt[1];
acvt[2] ^= bcvt[2];
acvt[3] ^= bcvt[3];
kinc_float32x4_t value;
memcpy(&value.values[0], &acvt[0], sizeof(acvt));
return value;
}
static inline kinc_float32x4_t kinc_float32x4_not(kinc_float32x4_t t) {
uint32_t tcvt[4];
memcpy(&tcvt[0], &t.values[0], sizeof(t));
tcvt[0] = ~tcvt[0];
tcvt[1] = ~tcvt[1];
tcvt[2] = ~tcvt[2];
tcvt[3] = ~tcvt[3];
kinc_float32x4_t value;
memcpy(&value.values[0], &tcvt[0], sizeof(tcvt));
return value;
}
static inline kinc_float32x4_t kinc_float32x4_shuffle_custom(kinc_float32x4_t abcd, kinc_float32x4_t efgh, const uint32_t left_1, const uint32_t left_2,
const uint32_t right_1, const uint32_t right_2) {
kinc_float32x4_t value;
value.values[0] = abcd.values[left_1 & 0x3];
value.values[1] = abcd.values[left_2 & 0x3];
value.values[2] = efgh.values[right_1 & 0x3];
value.values[3] = efgh.values[right_2 & 0x3];
return value;
}
static inline kinc_float32x4_t kinc_float32x4_shuffle_aebf(kinc_float32x4_t abcd, kinc_float32x4_t efgh) {
kinc_float32x4_t value;
value.values[0] = abcd.values[0];
value.values[1] = efgh.values[0];
value.values[2] = abcd.values[1];
value.values[3] = efgh.values[1];
return value;
}
static inline kinc_float32x4_t kinc_float32x4_shuffle_cgdh(kinc_float32x4_t abcd, kinc_float32x4_t efgh) {
kinc_float32x4_t value;
value.values[0] = abcd.values[2];
value.values[1] = efgh.values[2];
value.values[2] = abcd.values[3];
value.values[3] = efgh.values[3];
return value;
}
static inline kinc_float32x4_t kinc_float32x4_shuffle_abef(kinc_float32x4_t abcd, kinc_float32x4_t efgh) {
kinc_float32x4_t value;
value.values[0] = abcd.values[0];
value.values[1] = abcd.values[1];
value.values[2] = efgh.values[0];
value.values[3] = efgh.values[1];
return value;
}
static inline kinc_float32x4_t kinc_float32x4_shuffle_ghcd(kinc_float32x4_t abcd, kinc_float32x4_t efgh) {
kinc_float32x4_t value;
value.values[0] = efgh.values[2];
value.values[1] = efgh.values[3];
value.values[2] = abcd.values[2];
value.values[3] = abcd.values[3];
return value;
}
#endif
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,463 @@
#pragma once
#include "types.h"
/*! \file int16x8.h
\brief Provides 128bit eight-element signed 16-bit integer SIMD operations which are mapped to equivalent SSE2 or Neon operations.
*/
#ifdef __cplusplus
extern "C" {
#endif
#if defined(KINC_SSE2)
static inline kinc_int16x8_t kinc_int16x8_intrin_load(const int16_t *values) {
return _mm_load_si128((const kinc_int16x8_t *)values);
}
static inline kinc_int16x8_t kinc_int16x8_intrin_load_unaligned(const int16_t *values) {
return _mm_loadu_si128((const kinc_int16x8_t *)values);
}
static inline kinc_int16x8_t kinc_int16x8_load(const int16_t values[8]) {
return _mm_set_epi16(values[7], values[6], values[5], values[4], values[3], values[2], values[1], values[0]);
}
static inline kinc_int16x8_t kinc_int16x8_load_all(int16_t t) {
return _mm_set1_epi16(t);
}
static inline void kinc_int16x8_store(int16_t *destination, kinc_int16x8_t value) {
_mm_store_si128((kinc_int16x8_t *)destination, value);
}
static inline void kinc_int16x8_store_unaligned(int16_t *destination, kinc_int16x8_t value) {
_mm_storeu_si128((kinc_int16x8_t *)destination, value);
}
static inline int16_t kinc_int16x8_get(kinc_int16x8_t t, int index) {
union {
__m128i value;
int16_t elements[8];
} converter;
converter.value = t;
return converter.elements[index];
}
static inline kinc_int16x8_t kinc_int16x8_add(kinc_int16x8_t a, kinc_int16x8_t b) {
return _mm_add_epi16(a, b);
}
static inline kinc_int16x8_t kinc_int16x8_sub(kinc_int16x8_t a, kinc_int16x8_t b) {
return _mm_sub_epi16(a, b);
}
static inline kinc_int16x8_t kinc_int16x8_max(kinc_int16x8_t a, kinc_int16x8_t b) {
__m128i mask = _mm_cmpgt_epi16(a, b);
return _mm_xor_si128(b, _mm_and_si128(mask, _mm_xor_si128(a, b)));
}
static inline kinc_int16x8_t kinc_int16x8_min(kinc_int16x8_t a, kinc_int16x8_t b) {
__m128i mask = _mm_cmplt_epi16(a, b);
return _mm_xor_si128(b, _mm_and_si128(mask, _mm_xor_si128(a, b)));
}
static inline kinc_int16x8_mask_t kinc_int16x8_cmpeq(kinc_int16x8_t a, kinc_int16x8_t b) {
return _mm_cmpeq_epi16(a, b);
}
static inline kinc_int16x8_mask_t kinc_int16x8_cmpge(kinc_int16x8_t a, kinc_int16x8_t b) {
return _mm_or_si128(_mm_cmpgt_epi16(a, b), _mm_cmpeq_epi16(a, b));
}
static inline kinc_int16x8_mask_t kinc_int16x8_cmpgt(kinc_int16x8_t a, kinc_int16x8_t b) {
return _mm_cmpgt_epi16(a, b);
}
static inline kinc_int16x8_mask_t kinc_int16x8_cmple(kinc_int16x8_t a, kinc_int16x8_t b) {
return _mm_or_si128(_mm_cmplt_epi16(a, b), _mm_cmpeq_epi16(a, b));
}
static inline kinc_int16x8_mask_t kinc_int16x8_cmplt(kinc_int16x8_t a, kinc_int16x8_t b) {
return _mm_cmplt_epi16(a, b);
}
static inline kinc_int16x8_mask_t kinc_int16x8_cmpneq(kinc_int16x8_t a, kinc_int16x8_t b) {
return _mm_andnot_si128(_mm_cmpeq_epi16(a, b), _mm_set1_epi32(0xffffffff));
}
static inline kinc_int16x8_t kinc_int16x8_sel(kinc_int16x8_t a, kinc_int16x8_t b, kinc_int16x8_mask_t mask) {
return _mm_xor_si128(b, _mm_and_si128(mask, _mm_xor_si128(a, b)));
}
static inline kinc_int16x8_t kinc_int16x8_or(kinc_int16x8_t a, kinc_int16x8_t b) {
return _mm_or_si128(a, b);
}
static inline kinc_int16x8_t kinc_int16x8_and(kinc_int16x8_t a, kinc_int16x8_t b) {
return _mm_and_si128(a, b);
}
static inline kinc_int16x8_t kinc_int16x8_xor(kinc_int16x8_t a, kinc_int16x8_t b) {
return _mm_xor_si128(a, b);
}
static inline kinc_int16x8_t kinc_int16x8_not(kinc_int16x8_t t) {
return _mm_xor_si128(t, _mm_set1_epi32(0xffffffff));
}
#elif defined(KINC_NEON)
static inline kinc_int16x8_t kinc_int16x8_intrin_load(const int16_t *values) {
return vld1q_s16(values);
}
static inline kinc_int16x8_t kinc_int16x8_intrin_load_unaligned(const int16_t *values) {
return kinc_int16x8_intrin_load(values);
}
static inline kinc_int16x8_t kinc_int16x8_load(const int16_t values[8]) {
return (kinc_int16x8_t){values[0], values[1], values[2], values[3], values[4], values[5], values[6], values[7]};
}
static inline kinc_int16x8_t kinc_int16x8_load_all(int16_t t) {
return (kinc_int16x8_t){t, t, t, t, t, t, t, t};
}
static inline void kinc_int16x8_store(int16_t *destination, kinc_int16x8_t value) {
vst1q_s16(destination, value);
}
static inline void kinc_int16x8_store_unaligned(int16_t *destination, kinc_int16x8_t value) {
kinc_int16x8_store(destination, value);
}
static inline int16_t kinc_int16x8_get(kinc_int16x8_t t, int index) {
return t[index];
}
static inline kinc_int16x8_t kinc_int16x8_add(kinc_int16x8_t a, kinc_int16x8_t b) {
return vaddq_s16(a, b);
}
static inline kinc_int16x8_t kinc_int16x8_sub(kinc_int16x8_t a, kinc_int16x8_t b) {
return vsubq_s16(a, b);
}
static inline kinc_int16x8_t kinc_int16x8_max(kinc_int16x8_t a, kinc_int16x8_t b) {
return vmaxq_s16(a, b);
}
static inline kinc_int16x8_t kinc_int16x8_min(kinc_int16x8_t a, kinc_int16x8_t b) {
return vminq_s16(a, b);
}
static inline kinc_int16x8_mask_t kinc_int16x8_cmpeq(kinc_int16x8_t a, kinc_int16x8_t b) {
return vceqq_s16(a, b);
}
static inline kinc_int16x8_mask_t kinc_int16x8_cmpge(kinc_int16x8_t a, kinc_int16x8_t b) {
return vcgeq_s16(a, b);
}
static inline kinc_int16x8_mask_t kinc_int16x8_cmpgt(kinc_int16x8_t a, kinc_int16x8_t b) {
return vcgtq_s16(a, b);
}
static inline kinc_int16x8_mask_t kinc_int16x8_cmple(kinc_int16x8_t a, kinc_int16x8_t b) {
return vcleq_s16(a, b);
}
static inline kinc_int16x8_mask_t kinc_int16x8_cmplt(kinc_int16x8_t a, kinc_int16x8_t b) {
return vcltq_s16(a, b);
}
static inline kinc_int16x8_mask_t kinc_int16x8_cmpneq(kinc_int16x8_t a, kinc_int16x8_t b) {
return vmvnq_u16(vceqq_s16(a, b));
}
static inline kinc_int16x8_t kinc_int16x8_sel(kinc_int16x8_t a, kinc_int16x8_t b, kinc_int16x8_mask_t mask) {
return vbslq_s16(mask, a, b);
}
static inline kinc_int16x8_t kinc_int16x8_or(kinc_int16x8_t a, kinc_int16x8_t b) {
return vorrq_s16(a, b);
}
static inline kinc_int16x8_t kinc_int16x8_and(kinc_int16x8_t a, kinc_int16x8_t b) {
return vandq_s16(a, b);
}
static inline kinc_int16x8_t kinc_int16x8_xor(kinc_int16x8_t a, kinc_int16x8_t b) {
return veorq_s16(a, b);
}
static inline kinc_int16x8_t kinc_int16x8_not(kinc_int16x8_t t) {
return vmvnq_s16(t);
}
#else
static inline kinc_int16x8_t kinc_int16x8_intrin_load(const int16_t *values) {
kinc_int16x8_t value;
value.values[0] = values[0];
value.values[1] = values[1];
value.values[2] = values[2];
value.values[3] = values[3];
value.values[4] = values[4];
value.values[5] = values[5];
value.values[6] = values[6];
value.values[7] = values[7];
return value;
}
static inline kinc_int16x8_t kinc_int16x8_intrin_load_unaligned(const int16_t *values) {
return kinc_int16x8_intrin_load(values);
}
static inline kinc_int16x8_t kinc_int16x8_load(const int16_t values[8]) {
kinc_int16x8_t value;
value.values[0] = values[0];
value.values[1] = values[1];
value.values[2] = values[2];
value.values[3] = values[3];
value.values[4] = values[4];
value.values[5] = values[5];
value.values[6] = values[6];
value.values[7] = values[7];
return value;
}
static inline kinc_int16x8_t kinc_int16x8_load_all(int16_t t) {
kinc_int16x8_t value;
value.values[0] = t;
value.values[1] = t;
value.values[2] = t;
value.values[3] = t;
value.values[4] = t;
value.values[5] = t;
value.values[6] = t;
value.values[7] = t;
return value;
}
static inline void kinc_int16x8_store(int16_t *destination, kinc_int16x8_t value) {
destination[0] = value.values[0];
destination[1] = value.values[1];
destination[2] = value.values[2];
destination[3] = value.values[3];
destination[4] = value.values[4];
destination[5] = value.values[5];
destination[6] = value.values[6];
destination[7] = value.values[7];
}
static inline void kinc_int16x8_store_unaligned(int16_t *destination, kinc_int16x8_t value) {
return kinc_int16x8_store(destination, value);
}
static inline int16_t kinc_int16x8_get(kinc_int16x8_t t, int index) {
return t.values[index];
}
static inline kinc_int16x8_t kinc_int16x8_add(kinc_int16x8_t a, kinc_int16x8_t b) {
kinc_int16x8_t value;
value.values[0] = a.values[0] + b.values[0];
value.values[1] = a.values[1] + b.values[1];
value.values[2] = a.values[2] + b.values[2];
value.values[3] = a.values[3] + b.values[3];
value.values[4] = a.values[4] + b.values[4];
value.values[5] = a.values[5] + b.values[5];
value.values[6] = a.values[6] + b.values[6];
value.values[7] = a.values[7] + b.values[7];
return value;
}
static inline kinc_int16x8_t kinc_int16x8_sub(kinc_int16x8_t a, kinc_int16x8_t b) {
kinc_int16x8_t value;
value.values[0] = a.values[0] - b.values[0];
value.values[1] = a.values[1] - b.values[1];
value.values[2] = a.values[2] - b.values[2];
value.values[3] = a.values[3] - b.values[3];
value.values[4] = a.values[4] - b.values[4];
value.values[5] = a.values[5] - b.values[5];
value.values[6] = a.values[6] - b.values[6];
value.values[7] = a.values[7] - b.values[7];
return value;
}
static inline kinc_int16x8_t kinc_int16x8_max(kinc_int16x8_t a, kinc_int16x8_t b) {
kinc_int16x8_t value;
value.values[0] = a.values[0] > b.values[0] ? a.values[0] : b.values[0];
value.values[1] = a.values[1] > b.values[1] ? a.values[1] : b.values[1];
value.values[2] = a.values[2] > b.values[2] ? a.values[2] : b.values[2];
value.values[3] = a.values[3] > b.values[3] ? a.values[3] : b.values[3];
value.values[4] = a.values[4] > b.values[4] ? a.values[4] : b.values[4];
value.values[5] = a.values[5] > b.values[5] ? a.values[5] : b.values[5];
value.values[6] = a.values[6] > b.values[6] ? a.values[6] : b.values[6];
value.values[7] = a.values[7] > b.values[7] ? a.values[7] : b.values[7];
return value;
}
static inline kinc_int16x8_t kinc_int16x8_min(kinc_int16x8_t a, kinc_int16x8_t b) {
kinc_int16x8_t value;
value.values[0] = a.values[0] > b.values[0] ? b.values[0] : a.values[0];
value.values[1] = a.values[1] > b.values[1] ? b.values[1] : a.values[1];
value.values[2] = a.values[2] > b.values[2] ? b.values[2] : a.values[2];
value.values[3] = a.values[3] > b.values[3] ? b.values[3] : a.values[3];
value.values[4] = a.values[4] > b.values[4] ? b.values[4] : a.values[4];
value.values[5] = a.values[5] > b.values[5] ? b.values[5] : a.values[5];
value.values[6] = a.values[6] > b.values[6] ? b.values[6] : a.values[6];
value.values[7] = a.values[7] > b.values[7] ? b.values[7] : a.values[7];
return value;
}
static inline kinc_int16x8_mask_t kinc_int16x8_cmpeq(kinc_int16x8_t a, kinc_int16x8_t b) {
kinc_int16x8_mask_t mask;
mask.values[0] = a.values[0] == b.values[0] ? 0xffff : 0;
mask.values[1] = a.values[1] == b.values[1] ? 0xffff : 0;
mask.values[2] = a.values[2] == b.values[2] ? 0xffff : 0;
mask.values[3] = a.values[3] == b.values[3] ? 0xffff : 0;
mask.values[4] = a.values[4] == b.values[4] ? 0xffff : 0;
mask.values[5] = a.values[5] == b.values[5] ? 0xffff : 0;
mask.values[6] = a.values[6] == b.values[6] ? 0xffff : 0;
mask.values[7] = a.values[7] == b.values[7] ? 0xffff : 0;
return mask;
}
static inline kinc_int16x8_mask_t kinc_int16x8_cmpge(kinc_int16x8_t a, kinc_int16x8_t b) {
kinc_int16x8_mask_t mask;
mask.values[0] = a.values[0] >= b.values[0] ? 0xffff : 0;
mask.values[1] = a.values[1] >= b.values[1] ? 0xffff : 0;
mask.values[2] = a.values[2] >= b.values[2] ? 0xffff : 0;
mask.values[3] = a.values[3] >= b.values[3] ? 0xffff : 0;
mask.values[4] = a.values[4] >= b.values[4] ? 0xffff : 0;
mask.values[5] = a.values[5] >= b.values[5] ? 0xffff : 0;
mask.values[6] = a.values[6] >= b.values[6] ? 0xffff : 0;
mask.values[7] = a.values[7] >= b.values[7] ? 0xffff : 0;
return mask;
}
static inline kinc_int16x8_mask_t kinc_int16x8_cmpgt(kinc_int16x8_t a, kinc_int16x8_t b) {
kinc_int16x8_mask_t mask;
mask.values[0] = a.values[0] > b.values[0] ? 0xffff : 0;
mask.values[1] = a.values[1] > b.values[1] ? 0xffff : 0;
mask.values[2] = a.values[2] > b.values[2] ? 0xffff : 0;
mask.values[3] = a.values[3] > b.values[3] ? 0xffff : 0;
mask.values[4] = a.values[4] > b.values[4] ? 0xffff : 0;
mask.values[5] = a.values[5] > b.values[5] ? 0xffff : 0;
mask.values[6] = a.values[6] > b.values[6] ? 0xffff : 0;
mask.values[7] = a.values[7] > b.values[7] ? 0xffff : 0;
return mask;
}
static inline kinc_int16x8_mask_t kinc_int16x8_cmple(kinc_int16x8_t a, kinc_int16x8_t b) {
kinc_int16x8_mask_t mask;
mask.values[0] = a.values[0] <= b.values[0] ? 0xffff : 0;
mask.values[1] = a.values[1] <= b.values[1] ? 0xffff : 0;
mask.values[2] = a.values[2] <= b.values[2] ? 0xffff : 0;
mask.values[3] = a.values[3] <= b.values[3] ? 0xffff : 0;
mask.values[4] = a.values[4] <= b.values[4] ? 0xffff : 0;
mask.values[5] = a.values[5] <= b.values[5] ? 0xffff : 0;
mask.values[6] = a.values[6] <= b.values[6] ? 0xffff : 0;
mask.values[7] = a.values[7] <= b.values[7] ? 0xffff : 0;
return mask;
}
static inline kinc_int16x8_mask_t kinc_int16x8_cmplt(kinc_int16x8_t a, kinc_int16x8_t b) {
kinc_int16x8_mask_t mask;
mask.values[0] = a.values[0] < b.values[0] ? 0xffff : 0;
mask.values[1] = a.values[1] < b.values[1] ? 0xffff : 0;
mask.values[2] = a.values[2] < b.values[2] ? 0xffff : 0;
mask.values[3] = a.values[3] < b.values[3] ? 0xffff : 0;
mask.values[4] = a.values[4] < b.values[4] ? 0xffff : 0;
mask.values[5] = a.values[5] < b.values[5] ? 0xffff : 0;
mask.values[6] = a.values[6] < b.values[6] ? 0xffff : 0;
mask.values[7] = a.values[7] < b.values[7] ? 0xffff : 0;
return mask;
}
static inline kinc_int16x8_mask_t kinc_int16x8_cmpneq(kinc_int16x8_t a, kinc_int16x8_t b) {
kinc_int16x8_mask_t mask;
mask.values[0] = a.values[0] != b.values[0] ? 0xffff : 0;
mask.values[1] = a.values[1] != b.values[1] ? 0xffff : 0;
mask.values[2] = a.values[2] != b.values[2] ? 0xffff : 0;
mask.values[3] = a.values[3] != b.values[3] ? 0xffff : 0;
mask.values[4] = a.values[4] != b.values[4] ? 0xffff : 0;
mask.values[5] = a.values[5] != b.values[5] ? 0xffff : 0;
mask.values[6] = a.values[6] != b.values[6] ? 0xffff : 0;
mask.values[7] = a.values[7] != b.values[7] ? 0xffff : 0;
return mask;
}
static inline kinc_int16x8_t kinc_int16x8_sel(kinc_int16x8_t a, kinc_int16x8_t b, kinc_int16x8_mask_t mask) {
kinc_int16x8_t value;
value.values[0] = mask.values[0] != 0 ? a.values[0] : b.values[0];
value.values[1] = mask.values[1] != 0 ? a.values[1] : b.values[1];
value.values[2] = mask.values[2] != 0 ? a.values[2] : b.values[2];
value.values[3] = mask.values[3] != 0 ? a.values[3] : b.values[3];
value.values[4] = mask.values[4] != 0 ? a.values[4] : b.values[4];
value.values[5] = mask.values[5] != 0 ? a.values[5] : b.values[5];
value.values[6] = mask.values[6] != 0 ? a.values[6] : b.values[6];
value.values[7] = mask.values[7] != 0 ? a.values[7] : b.values[7];
return value;
}
static inline kinc_int16x8_t kinc_int16x8_or(kinc_int16x8_t a, kinc_int16x8_t b) {
kinc_int16x8_t value;
value.values[0] = a.values[0] | b.values[0];
value.values[1] = a.values[1] | b.values[1];
value.values[2] = a.values[2] | b.values[2];
value.values[3] = a.values[3] | b.values[3];
value.values[4] = a.values[4] | b.values[4];
value.values[5] = a.values[5] | b.values[5];
value.values[6] = a.values[6] | b.values[6];
value.values[7] = a.values[7] | b.values[7];
return value;
}
static inline kinc_int16x8_t kinc_int16x8_and(kinc_int16x8_t a, kinc_int16x8_t b) {
kinc_int16x8_t value;
value.values[0] = a.values[0] & b.values[0];
value.values[1] = a.values[1] & b.values[1];
value.values[2] = a.values[2] & b.values[2];
value.values[3] = a.values[3] & b.values[3];
value.values[4] = a.values[4] & b.values[4];
value.values[5] = a.values[5] & b.values[5];
value.values[6] = a.values[6] & b.values[6];
value.values[7] = a.values[7] & b.values[7];
return value;
}
static inline kinc_int16x8_t kinc_int16x8_xor(kinc_int16x8_t a, kinc_int16x8_t b) {
kinc_int16x8_t value;
value.values[0] = a.values[0] ^ b.values[0];
value.values[1] = a.values[1] ^ b.values[1];
value.values[2] = a.values[2] ^ b.values[2];
value.values[3] = a.values[3] ^ b.values[3];
value.values[4] = a.values[4] ^ b.values[4];
value.values[5] = a.values[5] ^ b.values[5];
value.values[6] = a.values[6] ^ b.values[6];
value.values[7] = a.values[7] ^ b.values[7];
return value;
}
static inline kinc_int16x8_t kinc_int16x8_not(kinc_int16x8_t t) {
kinc_int16x8_t value;
value.values[0] = ~t.values[0];
value.values[1] = ~t.values[1];
value.values[2] = ~t.values[2];
value.values[3] = ~t.values[3];
value.values[4] = ~t.values[4];
value.values[5] = ~t.values[5];
value.values[6] = ~t.values[6];
value.values[7] = ~t.values[7];
return value;
}
#endif
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,387 @@
#pragma once
#include "types.h"
/*! \file int32x4.h
\brief Provides 128bit four-element signed 32-bit integer SIMD operations which are mapped to equivalent SSE2 or Neon operations.
*/
#ifdef __cplusplus
extern "C" {
#endif
#if defined(KINC_SSE2)
static inline kinc_int32x4_t kinc_int32x4_intrin_load(const int32_t *values) {
return _mm_load_si128((const kinc_int32x4_t *)values);
}
static inline kinc_int32x4_t kinc_int32x4_intrin_load_unaligned(const int32_t *values) {
return _mm_loadu_si128((const kinc_int32x4_t *)values);
}
static inline kinc_int32x4_t kinc_int32x4_load(const int32_t values[4]) {
return _mm_set_epi32(values[3], values[2], values[1], values[0]);
}
static inline kinc_int32x4_t kinc_int32x4_load_all(int32_t t) {
return _mm_set1_epi32(t);
}
static inline void kinc_int32x4_store(int32_t *destination, kinc_int32x4_t value) {
_mm_store_si128((kinc_int32x4_t *)destination, value);
}
static inline void kinc_int32x4_store_unaligned(int32_t *destination, kinc_int32x4_t value) {
_mm_storeu_si128((kinc_int32x4_t *)destination, value);
}
static inline int32_t kinc_int32x4_get(kinc_int32x4_t t, int index) {
union {
__m128i value;
int32_t elements[4];
} converter;
converter.value = t;
return converter.elements[index];
}
static inline kinc_int32x4_t kinc_int32x4_add(kinc_int32x4_t a, kinc_int32x4_t b) {
return _mm_add_epi32(a, b);
}
static inline kinc_int32x4_t kinc_int32x4_sub(kinc_int32x4_t a, kinc_int32x4_t b) {
return _mm_sub_epi32(a, b);
}
static inline kinc_int32x4_t kinc_int32x4_max(kinc_int32x4_t a, kinc_int32x4_t b) {
__m128i mask = _mm_cmpgt_epi32(a, b);
return _mm_xor_si128(b, _mm_and_si128(mask, _mm_xor_si128(a, b)));
}
static inline kinc_int32x4_t kinc_int32x4_min(kinc_int32x4_t a, kinc_int32x4_t b) {
__m128i mask = _mm_cmplt_epi32(a, b);
return _mm_xor_si128(b, _mm_and_si128(mask, _mm_xor_si128(a, b)));
}
static inline kinc_int32x4_mask_t kinc_int32x4_cmpeq(kinc_int32x4_t a, kinc_int32x4_t b) {
return _mm_cmpeq_epi32(a, b);
}
static inline kinc_int32x4_mask_t kinc_int32x4_cmpge(kinc_int32x4_t a, kinc_int32x4_t b) {
return _mm_or_si128(_mm_cmpgt_epi32(a, b), _mm_cmpeq_epi32(a, b));
}
static inline kinc_int32x4_mask_t kinc_int32x4_cmpgt(kinc_int32x4_t a, kinc_int32x4_t b) {
return _mm_cmpgt_epi32(a, b);
}
static inline kinc_int32x4_mask_t kinc_int32x4_cmple(kinc_int32x4_t a, kinc_int32x4_t b) {
return _mm_or_si128(_mm_cmplt_epi32(a, b), _mm_cmpeq_epi32(a, b));
}
static inline kinc_int32x4_mask_t kinc_int32x4_cmplt(kinc_int32x4_t a, kinc_int32x4_t b) {
return _mm_cmplt_epi32(a, b);
}
static inline kinc_int32x4_mask_t kinc_int32x4_cmpneq(kinc_int32x4_t a, kinc_int32x4_t b) {
return _mm_andnot_si128(_mm_cmpeq_epi32(a, b), _mm_set1_epi32(0xffffffff));
}
static inline kinc_int32x4_t kinc_int32x4_sel(kinc_int32x4_t a, kinc_int32x4_t b, kinc_int32x4_mask_t mask) {
return _mm_xor_si128(b, _mm_and_si128(mask, _mm_xor_si128(a, b)));
}
static inline kinc_int32x4_t kinc_int32x4_or(kinc_int32x4_t a, kinc_int32x4_t b) {
return _mm_or_si128(a, b);
}
static inline kinc_int32x4_t kinc_int32x4_and(kinc_int32x4_t a, kinc_int32x4_t b) {
return _mm_and_si128(a, b);
}
static inline kinc_int32x4_t kinc_int32x4_xor(kinc_int32x4_t a, kinc_int32x4_t b) {
return _mm_xor_si128(a, b);
}
static inline kinc_int32x4_t kinc_int32x4_not(kinc_int32x4_t t) {
return _mm_xor_si128(t, _mm_set1_epi32(0xffffffff));
}
#elif defined(KINC_NEON)
static inline kinc_int32x4_t kinc_int32x4_intrin_load(const int32_t *values) {
return vld1q_s32(values);
}
static inline kinc_int32x4_t kinc_int32x4_intrin_load_unaligned(const int32_t *values) {
return kinc_int32x4_intrin_load(values);
}
static inline kinc_int32x4_t kinc_int32x4_load(const int32_t values[4]) {
return (kinc_int32x4_t){values[0], values[1], values[2], values[3]};
}
static inline kinc_int32x4_t kinc_int32x4_load_all(int32_t t) {
return (kinc_int32x4_t){t, t, t, t};
}
static inline void kinc_int32x4_store(int32_t *destination, kinc_int32x4_t value) {
vst1q_s32(destination, value);
}
static inline void kinc_int32x4_store_unaligned(int32_t *destination, kinc_int32x4_t value) {
kinc_int32x4_store(destination, value);
}
static inline int32_t kinc_int32x4_get(kinc_int32x4_t t, int index) {
return t[index];
}
static inline kinc_int32x4_t kinc_int32x4_add(kinc_int32x4_t a, kinc_int32x4_t b) {
return vaddq_s32(a, b);
}
static inline kinc_int32x4_t kinc_int32x4_sub(kinc_int32x4_t a, kinc_int32x4_t b) {
return vsubq_s32(a, b);
}
static inline kinc_int32x4_t kinc_int32x4_max(kinc_int32x4_t a, kinc_int32x4_t b) {
return vmaxq_s32(a, b);
}
static inline kinc_int32x4_t kinc_int32x4_min(kinc_int32x4_t a, kinc_int32x4_t b) {
return vminq_s32(a, b);
}
static inline kinc_int32x4_mask_t kinc_int32x4_cmpeq(kinc_int32x4_t a, kinc_int32x4_t b) {
return vceqq_s32(a, b);
}
static inline kinc_int32x4_mask_t kinc_int32x4_cmpge(kinc_int32x4_t a, kinc_int32x4_t b) {
return vcgeq_s32(a, b);
}
static inline kinc_int32x4_mask_t kinc_int32x4_cmpgt(kinc_int32x4_t a, kinc_int32x4_t b) {
return vcgtq_s32(a, b);
}
static inline kinc_int32x4_mask_t kinc_int32x4_cmple(kinc_int32x4_t a, kinc_int32x4_t b) {
return vcleq_s32(a, b);
}
static inline kinc_int32x4_mask_t kinc_int32x4_cmplt(kinc_int32x4_t a, kinc_int32x4_t b) {
return vcltq_s32(a, b);
}
static inline kinc_int32x4_mask_t kinc_int32x4_cmpneq(kinc_int32x4_t a, kinc_int32x4_t b) {
return vmvnq_u32(vceqq_s32(a, b));
}
static inline kinc_int32x4_t kinc_int32x4_sel(kinc_int32x4_t a, kinc_int32x4_t b, kinc_int32x4_mask_t mask) {
return vbslq_s32(mask, a, b);
}
static inline kinc_int32x4_t kinc_int32x4_or(kinc_int32x4_t a, kinc_int32x4_t b) {
return vorrq_s32(a, b);
}
static inline kinc_int32x4_t kinc_int32x4_and(kinc_int32x4_t a, kinc_int32x4_t b) {
return vandq_s32(a, b);
}
static inline kinc_int32x4_t kinc_int32x4_xor(kinc_int32x4_t a, kinc_int32x4_t b) {
return veorq_s32(a, b);
}
static inline kinc_int32x4_t kinc_int32x4_not(kinc_int32x4_t t) {
return vmvnq_s32(t);
}
#else
static inline kinc_int32x4_t kinc_int32x4_intrin_load(const int32_t *values) {
kinc_int32x4_t value;
value.values[0] = values[0];
value.values[1] = values[1];
value.values[2] = values[2];
value.values[3] = values[3];
return value;
}
static inline kinc_int32x4_t kinc_int32x4_intrin_load_unaligned(const int32_t *values) {
return kinc_int32x4_intrin_load(values);
}
static inline kinc_int32x4_t kinc_int32x4_load(const int32_t values[4]) {
kinc_int32x4_t value;
value.values[0] = values[0];
value.values[1] = values[1];
value.values[2] = values[2];
value.values[3] = values[3];
return value;
}
static inline kinc_int32x4_t kinc_int32x4_load_all(int32_t t) {
kinc_int32x4_t value;
value.values[0] = t;
value.values[1] = t;
value.values[2] = t;
value.values[3] = t;
return value;
}
static inline void kinc_int32x4_store(int32_t *destination, kinc_int32x4_t value) {
destination[0] = value.values[0];
destination[1] = value.values[1];
destination[2] = value.values[2];
destination[3] = value.values[3];
}
static inline void kinc_int32x4_store_unaligned(int32_t *destination, kinc_int32x4_t value) {
kinc_int32x4_store(destination, value);
}
static inline int32_t kinc_int32x4_get(kinc_int32x4_t t, int index) {
return t.values[index];
}
static inline kinc_int32x4_t kinc_int32x4_add(kinc_int32x4_t a, kinc_int32x4_t b) {
kinc_int32x4_t value;
value.values[0] = a.values[0] + b.values[0];
value.values[1] = a.values[1] + b.values[1];
value.values[2] = a.values[2] + b.values[2];
value.values[3] = a.values[3] + b.values[3];
return value;
}
static inline kinc_int32x4_t kinc_int32x4_sub(kinc_int32x4_t a, kinc_int32x4_t b) {
kinc_int32x4_t value;
value.values[0] = a.values[0] - b.values[0];
value.values[1] = a.values[1] - b.values[1];
value.values[2] = a.values[2] - b.values[2];
value.values[3] = a.values[3] - b.values[3];
return value;
}
static inline kinc_int32x4_t kinc_int32x4_max(kinc_int32x4_t a, kinc_int32x4_t b) {
kinc_int32x4_t value;
value.values[0] = a.values[0] > b.values[0] ? a.values[0] : b.values[0];
value.values[1] = a.values[1] > b.values[1] ? a.values[1] : b.values[1];
value.values[2] = a.values[2] > b.values[2] ? a.values[2] : b.values[2];
value.values[3] = a.values[3] > b.values[3] ? a.values[3] : b.values[3];
return value;
}
static inline kinc_int32x4_t kinc_int32x4_min(kinc_int32x4_t a, kinc_int32x4_t b) {
kinc_int32x4_t value;
value.values[0] = a.values[0] > b.values[0] ? b.values[0] : a.values[0];
value.values[1] = a.values[1] > b.values[1] ? b.values[1] : a.values[1];
value.values[2] = a.values[2] > b.values[2] ? b.values[2] : a.values[2];
value.values[3] = a.values[3] > b.values[3] ? b.values[3] : a.values[3];
return value;
}
static inline kinc_int32x4_mask_t kinc_int32x4_cmpeq(kinc_int32x4_t a, kinc_int32x4_t b) {
kinc_int32x4_mask_t mask;
mask.values[0] = a.values[0] == b.values[0] ? 0xffffffff : 0;
mask.values[1] = a.values[1] == b.values[1] ? 0xffffffff : 0;
mask.values[2] = a.values[2] == b.values[2] ? 0xffffffff : 0;
mask.values[3] = a.values[3] == b.values[3] ? 0xffffffff : 0;
return mask;
}
static inline kinc_int32x4_mask_t kinc_int32x4_cmpge(kinc_int32x4_t a, kinc_int32x4_t b) {
kinc_int32x4_mask_t mask;
mask.values[0] = a.values[0] >= b.values[0] ? 0xffffffff : 0;
mask.values[1] = a.values[1] >= b.values[1] ? 0xffffffff : 0;
mask.values[2] = a.values[2] >= b.values[2] ? 0xffffffff : 0;
mask.values[3] = a.values[3] >= b.values[3] ? 0xffffffff : 0;
return mask;
}
static inline kinc_int32x4_mask_t kinc_int32x4_cmpgt(kinc_int32x4_t a, kinc_int32x4_t b) {
kinc_int32x4_mask_t mask;
mask.values[0] = a.values[0] > b.values[0] ? 0xffffffff : 0;
mask.values[1] = a.values[1] > b.values[1] ? 0xffffffff : 0;
mask.values[2] = a.values[2] > b.values[2] ? 0xffffffff : 0;
mask.values[3] = a.values[3] > b.values[3] ? 0xffffffff : 0;
return mask;
}
static inline kinc_int32x4_mask_t kinc_int32x4_cmple(kinc_int32x4_t a, kinc_int32x4_t b) {
kinc_int32x4_mask_t mask;
mask.values[0] = a.values[0] <= b.values[0] ? 0xffffffff : 0;
mask.values[1] = a.values[1] <= b.values[1] ? 0xffffffff : 0;
mask.values[2] = a.values[2] <= b.values[2] ? 0xffffffff : 0;
mask.values[3] = a.values[3] <= b.values[3] ? 0xffffffff : 0;
return mask;
}
static inline kinc_int32x4_mask_t kinc_int32x4_cmplt(kinc_int32x4_t a, kinc_int32x4_t b) {
kinc_int32x4_mask_t mask;
mask.values[0] = a.values[0] < b.values[0] ? 0xffffffff : 0;
mask.values[1] = a.values[1] < b.values[1] ? 0xffffffff : 0;
mask.values[2] = a.values[2] < b.values[2] ? 0xffffffff : 0;
mask.values[3] = a.values[3] < b.values[3] ? 0xffffffff : 0;
return mask;
}
static inline kinc_int32x4_mask_t kinc_int32x4_cmpneq(kinc_int32x4_t a, kinc_int32x4_t b) {
kinc_int32x4_mask_t mask;
mask.values[0] = a.values[0] != b.values[0] ? 0xffffffff : 0;
mask.values[1] = a.values[1] != b.values[1] ? 0xffffffff : 0;
mask.values[2] = a.values[2] != b.values[2] ? 0xffffffff : 0;
mask.values[3] = a.values[3] != b.values[3] ? 0xffffffff : 0;
return mask;
}
static inline kinc_int32x4_t kinc_int32x4_sel(kinc_int32x4_t a, kinc_int32x4_t b, kinc_int32x4_mask_t mask) {
kinc_int32x4_t value;
value.values[0] = mask.values[0] != 0 ? a.values[0] : b.values[0];
value.values[1] = mask.values[1] != 0 ? a.values[1] : b.values[1];
value.values[2] = mask.values[2] != 0 ? a.values[2] : b.values[2];
value.values[3] = mask.values[3] != 0 ? a.values[3] : b.values[3];
return value;
}
static inline kinc_int32x4_t kinc_int32x4_or(kinc_int32x4_t a, kinc_int32x4_t b) {
kinc_int32x4_t value;
value.values[0] = a.values[0] | b.values[0];
value.values[1] = a.values[1] | b.values[1];
value.values[2] = a.values[2] | b.values[2];
value.values[3] = a.values[3] | b.values[3];
return value;
}
static inline kinc_int32x4_t kinc_int32x4_and(kinc_int32x4_t a, kinc_int32x4_t b) {
kinc_int32x4_t value;
value.values[0] = a.values[0] & b.values[0];
value.values[1] = a.values[1] & b.values[1];
value.values[2] = a.values[2] & b.values[2];
value.values[3] = a.values[3] & b.values[3];
return value;
}
static inline kinc_int32x4_t kinc_int32x4_xor(kinc_int32x4_t a, kinc_int32x4_t b) {
kinc_int32x4_t value;
value.values[0] = a.values[0] ^ b.values[0];
value.values[1] = a.values[1] ^ b.values[1];
value.values[2] = a.values[2] ^ b.values[2];
value.values[3] = a.values[3] ^ b.values[3];
return value;
}
static inline kinc_int32x4_t kinc_int32x4_not(kinc_int32x4_t t) {
kinc_int32x4_t value;
value.values[0] = ~t.values[0];
value.values[1] = ~t.values[1];
value.values[2] = ~t.values[2];
value.values[3] = ~t.values[3];
return value;
}
#endif
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,620 @@
#pragma once
#include "types.h"
/*! \file int8x16.h
\brief Provides 128bit sixteen-element signed 8-bit integer SIMD operations which are mapped to equivalent SSE2 or Neon operations.
*/
#ifdef __cplusplus
extern "C" {
#endif
#if defined(KINC_SSE2)
static inline kinc_int8x16_t kinc_int8x16_intrin_load(const int8_t *values) {
return _mm_load_si128((const kinc_int8x16_t *)values);
}
static inline kinc_int8x16_t kinc_int8x16_intrin_load_unaligned(const int8_t *values) {
return _mm_loadu_si128((const kinc_int8x16_t *)values);
}
static inline kinc_int8x16_t kinc_int8x16_load(const int8_t values[16]) {
return _mm_set_epi8(values[15], values[14], values[13], values[12], values[11], values[10], values[9], values[8], values[7], values[6], values[5],
values[4], values[3], values[2], values[1], values[0]);
}
static inline kinc_int8x16_t kinc_int8x16_load_all(int8_t t) {
return _mm_set1_epi8(t);
}
static inline void kinc_int8x16_store(int8_t *destination, kinc_int8x16_t value) {
_mm_store_si128((kinc_int8x16_t *)destination, value);
}
static inline void kinc_int8x16_store_unaligned(int8_t *destination, kinc_int8x16_t value) {
_mm_storeu_si128((kinc_int8x16_t *)destination, value);
}
static inline int8_t kinc_int8x16_get(kinc_int8x16_t t, int index) {
union {
__m128i value;
int8_t elements[16];
} converter;
converter.value = t;
return converter.elements[index];
}
static inline kinc_int8x16_t kinc_int8x16_add(kinc_int8x16_t a, kinc_int8x16_t b) {
return _mm_add_epi8(a, b);
}
static inline kinc_int8x16_t kinc_int8x16_sub(kinc_int8x16_t a, kinc_int8x16_t b) {
return _mm_sub_epi8(a, b);
}
static inline kinc_int8x16_t kinc_int8x16_max(kinc_int8x16_t a, kinc_int8x16_t b) {
__m128i mask = _mm_cmpgt_epi8(a, b);
return _mm_xor_si128(b, _mm_and_si128(mask, _mm_xor_si128(a, b)));
}
static inline kinc_int8x16_t kinc_int8x16_min(kinc_int8x16_t a, kinc_int8x16_t b) {
__m128i mask = _mm_cmplt_epi8(a, b);
return _mm_xor_si128(b, _mm_and_si128(mask, _mm_xor_si128(a, b)));
}
static inline kinc_int8x16_mask_t kinc_int8x16_cmpeq(kinc_int8x16_t a, kinc_int8x16_t b) {
return _mm_cmpeq_epi8(a, b);
}
static inline kinc_int8x16_mask_t kinc_int8x16_cmpge(kinc_int8x16_t a, kinc_int8x16_t b) {
return _mm_or_si128(_mm_cmpgt_epi8(a, b), _mm_cmpeq_epi8(a, b));
}
static inline kinc_int8x16_mask_t kinc_int8x16_cmpgt(kinc_int8x16_t a, kinc_int8x16_t b) {
return _mm_cmpgt_epi8(a, b);
}
static inline kinc_int8x16_mask_t kinc_int8x16_cmple(kinc_int8x16_t a, kinc_int8x16_t b) {
return _mm_or_si128(_mm_cmplt_epi8(a, b), _mm_cmpeq_epi8(a, b));
}
static inline kinc_int8x16_mask_t kinc_int8x16_cmplt(kinc_int8x16_t a, kinc_int8x16_t b) {
return _mm_cmplt_epi8(a, b);
}
static inline kinc_int8x16_mask_t kinc_int8x16_cmpneq(kinc_int8x16_t a, kinc_int8x16_t b) {
__m128i mask = _mm_cmpeq_epi8(a, b);
__m128i all = _mm_set1_epi32(0xffffffff);
return _mm_andnot_si128(mask, all);
}
static inline kinc_int8x16_t kinc_int8x16_sel(kinc_int8x16_t a, kinc_int8x16_t b, kinc_int8x16_mask_t mask) {
return _mm_xor_si128(b, _mm_and_si128(mask, _mm_xor_si128(a, b)));
}
static inline kinc_int8x16_t kinc_int8x16_or(kinc_int8x16_t a, kinc_int8x16_t b) {
return _mm_or_si128(a, b);
}
static inline kinc_int8x16_t kinc_int8x16_and(kinc_int8x16_t a, kinc_int8x16_t b) {
return _mm_and_si128(a, b);
}
static inline kinc_int8x16_t kinc_int8x16_xor(kinc_int8x16_t a, kinc_int8x16_t b) {
return _mm_xor_si128(a, b);
}
static inline kinc_int8x16_t kinc_int8x16_not(kinc_int8x16_t t) {
__m128i mask = _mm_set1_epi32(0xffffffff);
return _mm_xor_si128(t, mask);
}
#elif defined(KINC_NEON)
static inline kinc_int8x16_t kinc_int8x16_intrin_load(const int8_t *values) {
return vld1q_s8(values);
}
static inline kinc_int8x16_t kinc_int8x16_intrin_load_unaligned(const int8_t *values) {
return kinc_int8x16_intrin_load(values);
}
static inline kinc_int8x16_t kinc_int8x16_load(const int8_t values[16]) {
return (kinc_int8x16_t){values[0], values[1], values[2], values[3], values[4], values[5], values[6], values[7],
values[8], values[9], values[10], values[11], values[12], values[13], values[14], values[15]};
}
static inline kinc_int8x16_t kinc_int8x16_load_all(int8_t t) {
return (kinc_int8x16_t){t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t};
}
static inline void kinc_int8x16_store(int8_t *destination, kinc_int8x16_t value) {
vst1q_s8(destination, value);
}
static inline void kinc_int8x16_store_unaligned(int8_t *destination, kinc_int8x16_t value) {
kinc_int8x16_store(destination, value);
}
static inline int8_t kinc_int8x16_get(kinc_int8x16_t t, int index) {
return t[index];
}
static inline kinc_int8x16_t kinc_int8x16_add(kinc_int8x16_t a, kinc_int8x16_t b) {
return vaddq_s8(a, b);
}
static inline kinc_int8x16_t kinc_int8x16_sub(kinc_int8x16_t a, kinc_int8x16_t b) {
return vsubq_s8(a, b);
}
static inline kinc_int8x16_t kinc_int8x16_max(kinc_int8x16_t a, kinc_int8x16_t b) {
return vmaxq_s8(a, b);
}
static inline kinc_int8x16_t kinc_int8x16_min(kinc_int8x16_t a, kinc_int8x16_t b) {
return vminq_s8(a, b);
}
static inline kinc_int8x16_mask_t kinc_int8x16_cmpeq(kinc_int8x16_t a, kinc_int8x16_t b) {
return vceqq_s8(a, b);
}
static inline kinc_int8x16_mask_t kinc_int8x16_cmpge(kinc_int8x16_t a, kinc_int8x16_t b) {
return vcgeq_s8(a, b);
}
static inline kinc_int8x16_mask_t kinc_int8x16_cmpgt(kinc_int8x16_t a, kinc_int8x16_t b) {
return vcgtq_s8(a, b);
}
static inline kinc_int8x16_mask_t kinc_int8x16_cmple(kinc_int8x16_t a, kinc_int8x16_t b) {
return vcleq_s8(a, b);
}
static inline kinc_int8x16_mask_t kinc_int8x16_cmplt(kinc_int8x16_t a, kinc_int8x16_t b) {
return vcltq_s8(a, b);
}
static inline kinc_int8x16_mask_t kinc_int8x16_cmpneq(kinc_int8x16_t a, kinc_int8x16_t b) {
return vmvnq_u8(vceqq_s8(a, b));
}
static inline kinc_int8x16_t kinc_int8x16_sel(kinc_int8x16_t a, kinc_int8x16_t b, kinc_int8x16_mask_t mask) {
return vbslq_s8(mask, a, b);
}
static inline kinc_int8x16_t kinc_int8x16_or(kinc_int8x16_t a, kinc_int8x16_t b) {
return vorrq_s8(a, b);
}
static inline kinc_int8x16_t kinc_int8x16_and(kinc_int8x16_t a, kinc_int8x16_t b) {
return vandq_s8(a, b);
}
static inline kinc_int8x16_t kinc_int8x16_xor(kinc_int8x16_t a, kinc_int8x16_t b) {
return veorq_s8(a, b);
}
static inline kinc_int8x16_t kinc_int8x16_not(kinc_int8x16_t t) {
return vmvnq_s8(t);
}
#else
static inline kinc_int8x16_t kinc_int8x16_intrin_load(const int8_t *values) {
kinc_int8x16_t value;
value.values[0] = values[0];
value.values[1] = values[1];
value.values[2] = values[2];
value.values[3] = values[3];
value.values[4] = values[4];
value.values[5] = values[5];
value.values[6] = values[6];
value.values[7] = values[7];
value.values[8] = values[8];
value.values[9] = values[9];
value.values[10] = values[10];
value.values[11] = values[11];
value.values[12] = values[12];
value.values[13] = values[13];
value.values[14] = values[14];
value.values[15] = values[15];
return value;
}
static inline kinc_int8x16_t kinc_int8x16_intrin_load_unaligned(const int8_t *values) {
return kinc_int8x16_intrin_load(values);
}
static inline kinc_int8x16_t kinc_int8x16_load(const int8_t values[16]) {
kinc_int8x16_t value;
value.values[0] = values[0];
value.values[1] = values[1];
value.values[2] = values[2];
value.values[3] = values[3];
value.values[4] = values[4];
value.values[5] = values[5];
value.values[6] = values[6];
value.values[7] = values[7];
value.values[8] = values[8];
value.values[9] = values[9];
value.values[10] = values[10];
value.values[11] = values[11];
value.values[12] = values[12];
value.values[13] = values[13];
value.values[14] = values[14];
value.values[15] = values[15];
return value;
}
static inline kinc_int8x16_t kinc_int8x16_load_all(int8_t t) {
kinc_int8x16_t value;
value.values[0] = t;
value.values[1] = t;
value.values[2] = t;
value.values[3] = t;
value.values[4] = t;
value.values[5] = t;
value.values[6] = t;
value.values[7] = t;
value.values[8] = t;
value.values[9] = t;
value.values[10] = t;
value.values[11] = t;
value.values[12] = t;
value.values[13] = t;
value.values[14] = t;
value.values[15] = t;
return value;
}
static inline void kinc_int8x16_store(int8_t *destination, kinc_int8x16_t value) {
destination[0] = value.values[0];
destination[1] = value.values[1];
destination[2] = value.values[2];
destination[3] = value.values[3];
destination[4] = value.values[4];
destination[5] = value.values[5];
destination[6] = value.values[6];
destination[7] = value.values[7];
destination[8] = value.values[8];
destination[9] = value.values[9];
destination[10] = value.values[10];
destination[11] = value.values[11];
destination[12] = value.values[12];
destination[13] = value.values[13];
destination[14] = value.values[14];
destination[15] = value.values[15];
}
static inline void kinc_int8x16_store_unaligned(int8_t *destination, kinc_int8x16_t value) {
return kinc_int8x16_store(destination, value);
}
static inline int8_t kinc_int8x16_get(kinc_int8x16_t t, int index) {
return t.values[index];
}
static inline kinc_int8x16_t kinc_int8x16_add(kinc_int8x16_t a, kinc_int8x16_t b) {
kinc_int8x16_t value;
value.values[0] = a.values[0] + b.values[0];
value.values[1] = a.values[1] + b.values[1];
value.values[2] = a.values[2] + b.values[2];
value.values[3] = a.values[3] + b.values[3];
value.values[4] = a.values[4] + b.values[4];
value.values[5] = a.values[5] + b.values[5];
value.values[6] = a.values[6] + b.values[6];
value.values[7] = a.values[7] + b.values[7];
value.values[8] = a.values[8] + b.values[8];
value.values[9] = a.values[9] + b.values[9];
value.values[10] = a.values[10] + b.values[10];
value.values[11] = a.values[11] + b.values[11];
value.values[12] = a.values[12] + b.values[12];
value.values[13] = a.values[13] + b.values[13];
value.values[14] = a.values[14] + b.values[14];
value.values[15] = a.values[15] + b.values[15];
return value;
}
static inline kinc_int8x16_t kinc_int8x16_sub(kinc_int8x16_t a, kinc_int8x16_t b) {
kinc_int8x16_t value;
value.values[0] = a.values[0] - b.values[0];
value.values[1] = a.values[1] - b.values[1];
value.values[2] = a.values[2] - b.values[2];
value.values[3] = a.values[3] - b.values[3];
value.values[4] = a.values[4] - b.values[4];
value.values[5] = a.values[5] - b.values[5];
value.values[6] = a.values[6] - b.values[6];
value.values[7] = a.values[7] - b.values[7];
value.values[8] = a.values[8] - b.values[8];
value.values[9] = a.values[9] - b.values[9];
value.values[10] = a.values[10] - b.values[10];
value.values[11] = a.values[11] - b.values[11];
value.values[12] = a.values[12] - b.values[12];
value.values[13] = a.values[13] - b.values[13];
value.values[14] = a.values[14] - b.values[14];
value.values[15] = a.values[15] - b.values[15];
return value;
}
static inline kinc_int8x16_t kinc_int8x16_max(kinc_int8x16_t a, kinc_int8x16_t b) {
kinc_int8x16_t value;
value.values[0] = a.values[0] > b.values[0] ? a.values[0] : b.values[0];
value.values[1] = a.values[1] > b.values[1] ? a.values[1] : b.values[1];
value.values[2] = a.values[2] > b.values[2] ? a.values[2] : b.values[2];
value.values[3] = a.values[3] > b.values[3] ? a.values[3] : b.values[3];
value.values[4] = a.values[4] > b.values[4] ? a.values[4] : b.values[4];
value.values[5] = a.values[5] > b.values[5] ? a.values[5] : b.values[5];
value.values[6] = a.values[6] > b.values[6] ? a.values[6] : b.values[6];
value.values[7] = a.values[7] > b.values[7] ? a.values[7] : b.values[7];
value.values[8] = a.values[8] > b.values[8] ? a.values[8] : b.values[8];
value.values[9] = a.values[9] > b.values[9] ? a.values[9] : b.values[9];
value.values[10] = a.values[10] > b.values[10] ? a.values[10] : b.values[10];
value.values[11] = a.values[11] > b.values[11] ? a.values[11] : b.values[11];
value.values[12] = a.values[12] > b.values[12] ? a.values[12] : b.values[12];
value.values[13] = a.values[13] > b.values[13] ? a.values[13] : b.values[13];
value.values[14] = a.values[14] > b.values[14] ? a.values[14] : b.values[14];
value.values[15] = a.values[15] > b.values[15] ? a.values[15] : b.values[15];
return value;
}
static inline kinc_int8x16_t kinc_int8x16_min(kinc_int8x16_t a, kinc_int8x16_t b) {
kinc_int8x16_t value;
value.values[0] = a.values[0] > b.values[0] ? b.values[0] : a.values[0];
value.values[1] = a.values[1] > b.values[1] ? b.values[1] : a.values[1];
value.values[2] = a.values[2] > b.values[2] ? b.values[2] : a.values[2];
value.values[3] = a.values[3] > b.values[3] ? b.values[3] : a.values[3];
value.values[4] = a.values[4] > b.values[4] ? b.values[4] : a.values[4];
value.values[5] = a.values[5] > b.values[5] ? b.values[5] : a.values[5];
value.values[6] = a.values[6] > b.values[6] ? b.values[6] : a.values[6];
value.values[7] = a.values[7] > b.values[7] ? b.values[7] : a.values[7];
value.values[8] = a.values[8] > b.values[8] ? b.values[8] : a.values[8];
value.values[9] = a.values[9] > b.values[9] ? b.values[9] : a.values[9];
value.values[10] = a.values[10] > b.values[10] ? b.values[10] : a.values[10];
value.values[11] = a.values[11] > b.values[11] ? b.values[11] : a.values[11];
value.values[12] = a.values[12] > b.values[12] ? b.values[12] : a.values[12];
value.values[13] = a.values[13] > b.values[13] ? b.values[13] : a.values[13];
value.values[14] = a.values[14] > b.values[14] ? b.values[14] : a.values[14];
value.values[15] = a.values[15] > b.values[15] ? b.values[15] : a.values[15];
return value;
}
static inline kinc_int8x16_mask_t kinc_int8x16_cmpeq(kinc_int8x16_t a, kinc_int8x16_t b) {
kinc_int8x16_mask_t mask;
mask.values[0] = a.values[0] == b.values[0] ? 0xff : 0;
mask.values[1] = a.values[1] == b.values[1] ? 0xff : 0;
mask.values[2] = a.values[2] == b.values[2] ? 0xff : 0;
mask.values[3] = a.values[3] == b.values[3] ? 0xff : 0;
mask.values[4] = a.values[4] == b.values[4] ? 0xff : 0;
mask.values[5] = a.values[5] == b.values[5] ? 0xff : 0;
mask.values[6] = a.values[6] == b.values[6] ? 0xff : 0;
mask.values[7] = a.values[7] == b.values[7] ? 0xff : 0;
mask.values[8] = a.values[8] == b.values[8] ? 0xff : 0;
mask.values[9] = a.values[9] == b.values[9] ? 0xff : 0;
mask.values[10] = a.values[10] == b.values[10] ? 0xff : 0;
mask.values[11] = a.values[11] == b.values[11] ? 0xff : 0;
mask.values[12] = a.values[12] == b.values[12] ? 0xff : 0;
mask.values[13] = a.values[13] == b.values[13] ? 0xff : 0;
mask.values[14] = a.values[14] == b.values[14] ? 0xff : 0;
mask.values[15] = a.values[15] == b.values[15] ? 0xff : 0;
return mask;
}
static inline kinc_int8x16_mask_t kinc_int8x16_cmpge(kinc_int8x16_t a, kinc_int8x16_t b) {
kinc_int8x16_mask_t mask;
mask.values[0] = a.values[0] >= b.values[0] ? 0xff : 0;
mask.values[1] = a.values[1] >= b.values[1] ? 0xff : 0;
mask.values[2] = a.values[2] >= b.values[2] ? 0xff : 0;
mask.values[3] = a.values[3] >= b.values[3] ? 0xff : 0;
mask.values[4] = a.values[4] >= b.values[4] ? 0xff : 0;
mask.values[5] = a.values[5] >= b.values[5] ? 0xff : 0;
mask.values[6] = a.values[6] >= b.values[6] ? 0xff : 0;
mask.values[7] = a.values[7] >= b.values[7] ? 0xff : 0;
mask.values[8] = a.values[8] >= b.values[8] ? 0xff : 0;
mask.values[9] = a.values[9] >= b.values[9] ? 0xff : 0;
mask.values[10] = a.values[10] >= b.values[10] ? 0xff : 0;
mask.values[11] = a.values[11] >= b.values[11] ? 0xff : 0;
mask.values[12] = a.values[12] >= b.values[12] ? 0xff : 0;
mask.values[13] = a.values[13] >= b.values[13] ? 0xff : 0;
mask.values[14] = a.values[14] >= b.values[14] ? 0xff : 0;
mask.values[15] = a.values[15] >= b.values[15] ? 0xff : 0;
return mask;
}
static inline kinc_int8x16_mask_t kinc_int8x16_cmpgt(kinc_int8x16_t a, kinc_int8x16_t b) {
kinc_int8x16_mask_t mask;
mask.values[0] = a.values[0] > b.values[0] ? 0xff : 0;
mask.values[1] = a.values[1] > b.values[1] ? 0xff : 0;
mask.values[2] = a.values[2] > b.values[2] ? 0xff : 0;
mask.values[3] = a.values[3] > b.values[3] ? 0xff : 0;
mask.values[4] = a.values[4] > b.values[4] ? 0xff : 0;
mask.values[5] = a.values[5] > b.values[5] ? 0xff : 0;
mask.values[6] = a.values[6] > b.values[6] ? 0xff : 0;
mask.values[7] = a.values[7] > b.values[7] ? 0xff : 0;
mask.values[8] = a.values[8] > b.values[8] ? 0xff : 0;
mask.values[9] = a.values[9] > b.values[9] ? 0xff : 0;
mask.values[10] = a.values[10] > b.values[10] ? 0xff : 0;
mask.values[11] = a.values[11] > b.values[11] ? 0xff : 0;
mask.values[12] = a.values[12] > b.values[12] ? 0xff : 0;
mask.values[13] = a.values[13] > b.values[13] ? 0xff : 0;
mask.values[14] = a.values[14] > b.values[14] ? 0xff : 0;
mask.values[15] = a.values[15] > b.values[15] ? 0xff : 0;
return mask;
}
static inline kinc_int8x16_mask_t kinc_int8x16_cmple(kinc_int8x16_t a, kinc_int8x16_t b) {
kinc_int8x16_mask_t mask;
mask.values[0] = a.values[0] <= b.values[0] ? 0xff : 0;
mask.values[1] = a.values[1] <= b.values[1] ? 0xff : 0;
mask.values[2] = a.values[2] <= b.values[2] ? 0xff : 0;
mask.values[3] = a.values[3] <= b.values[3] ? 0xff : 0;
mask.values[4] = a.values[4] <= b.values[4] ? 0xff : 0;
mask.values[5] = a.values[5] <= b.values[5] ? 0xff : 0;
mask.values[6] = a.values[6] <= b.values[6] ? 0xff : 0;
mask.values[7] = a.values[7] <= b.values[7] ? 0xff : 0;
mask.values[8] = a.values[8] <= b.values[8] ? 0xff : 0;
mask.values[9] = a.values[9] <= b.values[9] ? 0xff : 0;
mask.values[10] = a.values[10] <= b.values[10] ? 0xff : 0;
mask.values[11] = a.values[11] <= b.values[11] ? 0xff : 0;
mask.values[12] = a.values[12] <= b.values[12] ? 0xff : 0;
mask.values[13] = a.values[13] <= b.values[13] ? 0xff : 0;
mask.values[14] = a.values[14] <= b.values[14] ? 0xff : 0;
mask.values[15] = a.values[15] <= b.values[15] ? 0xff : 0;
return mask;
}
static inline kinc_int8x16_mask_t kinc_int8x16_cmplt(kinc_int8x16_t a, kinc_int8x16_t b) {
kinc_int8x16_mask_t mask;
mask.values[0] = a.values[0] < b.values[0] ? 0xff : 0;
mask.values[1] = a.values[1] < b.values[1] ? 0xff : 0;
mask.values[2] = a.values[2] < b.values[2] ? 0xff : 0;
mask.values[3] = a.values[3] < b.values[3] ? 0xff : 0;
mask.values[4] = a.values[4] < b.values[4] ? 0xff : 0;
mask.values[5] = a.values[5] < b.values[5] ? 0xff : 0;
mask.values[6] = a.values[6] < b.values[6] ? 0xff : 0;
mask.values[7] = a.values[7] < b.values[7] ? 0xff : 0;
mask.values[8] = a.values[8] < b.values[8] ? 0xff : 0;
mask.values[9] = a.values[9] < b.values[9] ? 0xff : 0;
mask.values[10] = a.values[10] < b.values[10] ? 0xff : 0;
mask.values[11] = a.values[11] < b.values[11] ? 0xff : 0;
mask.values[12] = a.values[12] < b.values[12] ? 0xff : 0;
mask.values[13] = a.values[13] < b.values[13] ? 0xff : 0;
mask.values[14] = a.values[14] < b.values[14] ? 0xff : 0;
mask.values[15] = a.values[15] < b.values[15] ? 0xff : 0;
return mask;
}
static inline kinc_int8x16_mask_t kinc_int8x16_cmpneq(kinc_int8x16_t a, kinc_int8x16_t b) {
kinc_int8x16_mask_t mask;
mask.values[0] = a.values[0] != b.values[0] ? 0xff : 0;
mask.values[1] = a.values[1] != b.values[1] ? 0xff : 0;
mask.values[2] = a.values[2] != b.values[2] ? 0xff : 0;
mask.values[3] = a.values[3] != b.values[3] ? 0xff : 0;
mask.values[4] = a.values[4] != b.values[4] ? 0xff : 0;
mask.values[5] = a.values[5] != b.values[5] ? 0xff : 0;
mask.values[6] = a.values[6] != b.values[6] ? 0xff : 0;
mask.values[7] = a.values[7] != b.values[7] ? 0xff : 0;
mask.values[8] = a.values[8] != b.values[8] ? 0xff : 0;
mask.values[9] = a.values[9] != b.values[9] ? 0xff : 0;
mask.values[10] = a.values[10] != b.values[10] ? 0xff : 0;
mask.values[11] = a.values[11] != b.values[11] ? 0xff : 0;
mask.values[12] = a.values[12] != b.values[12] ? 0xff : 0;
mask.values[13] = a.values[13] != b.values[13] ? 0xff : 0;
mask.values[14] = a.values[14] != b.values[14] ? 0xff : 0;
mask.values[15] = a.values[15] != b.values[15] ? 0xff : 0;
return mask;
}
static inline kinc_int8x16_t kinc_int8x16_sel(kinc_int8x16_t a, kinc_int8x16_t b, kinc_int8x16_mask_t mask) {
kinc_int8x16_t value;
value.values[0] = mask.values[0] != 0 ? a.values[0] : b.values[0];
value.values[1] = mask.values[1] != 0 ? a.values[1] : b.values[1];
value.values[2] = mask.values[2] != 0 ? a.values[2] : b.values[2];
value.values[3] = mask.values[3] != 0 ? a.values[3] : b.values[3];
value.values[4] = mask.values[4] != 0 ? a.values[4] : b.values[4];
value.values[5] = mask.values[5] != 0 ? a.values[5] : b.values[5];
value.values[6] = mask.values[6] != 0 ? a.values[6] : b.values[6];
value.values[7] = mask.values[7] != 0 ? a.values[7] : b.values[7];
value.values[8] = mask.values[8] != 0 ? a.values[8] : b.values[8];
value.values[9] = mask.values[9] != 0 ? a.values[9] : b.values[9];
value.values[10] = mask.values[10] != 0 ? a.values[10] : b.values[10];
value.values[11] = mask.values[11] != 0 ? a.values[11] : b.values[11];
value.values[12] = mask.values[12] != 0 ? a.values[12] : b.values[12];
value.values[13] = mask.values[13] != 0 ? a.values[13] : b.values[13];
value.values[14] = mask.values[14] != 0 ? a.values[14] : b.values[14];
value.values[15] = mask.values[15] != 0 ? a.values[15] : b.values[15];
return value;
}
static inline kinc_int8x16_t kinc_int8x16_or(kinc_int8x16_t a, kinc_int8x16_t b) {
kinc_int8x16_t value;
value.values[0] = a.values[0] | b.values[0];
value.values[1] = a.values[1] | b.values[1];
value.values[2] = a.values[2] | b.values[2];
value.values[3] = a.values[3] | b.values[3];
value.values[4] = a.values[4] | b.values[4];
value.values[5] = a.values[5] | b.values[5];
value.values[6] = a.values[6] | b.values[6];
value.values[7] = a.values[7] | b.values[7];
value.values[8] = a.values[8] | b.values[8];
value.values[9] = a.values[9] | b.values[9];
value.values[10] = a.values[10] | b.values[10];
value.values[11] = a.values[11] | b.values[11];
value.values[12] = a.values[12] | b.values[12];
value.values[13] = a.values[13] | b.values[13];
value.values[14] = a.values[14] | b.values[14];
value.values[15] = a.values[15] | b.values[15];
return value;
}
static inline kinc_int8x16_t kinc_int8x16_and(kinc_int8x16_t a, kinc_int8x16_t b) {
kinc_int8x16_t value;
value.values[0] = a.values[0] & b.values[0];
value.values[1] = a.values[1] & b.values[1];
value.values[2] = a.values[2] & b.values[2];
value.values[3] = a.values[3] & b.values[3];
value.values[4] = a.values[4] & b.values[4];
value.values[5] = a.values[5] & b.values[5];
value.values[6] = a.values[6] & b.values[6];
value.values[7] = a.values[7] & b.values[7];
value.values[8] = a.values[8] & b.values[8];
value.values[9] = a.values[9] & b.values[9];
value.values[10] = a.values[10] & b.values[10];
value.values[11] = a.values[11] & b.values[11];
value.values[12] = a.values[12] & b.values[12];
value.values[13] = a.values[13] & b.values[13];
value.values[14] = a.values[14] & b.values[14];
value.values[15] = a.values[15] & b.values[15];
return value;
}
static inline kinc_int8x16_t kinc_int8x16_xor(kinc_int8x16_t a, kinc_int8x16_t b) {
kinc_int8x16_t value;
value.values[0] = a.values[0] ^ b.values[0];
value.values[1] = a.values[1] ^ b.values[1];
value.values[2] = a.values[2] ^ b.values[2];
value.values[3] = a.values[3] ^ b.values[3];
value.values[4] = a.values[4] ^ b.values[4];
value.values[5] = a.values[5] ^ b.values[5];
value.values[6] = a.values[6] ^ b.values[6];
value.values[7] = a.values[7] ^ b.values[7];
value.values[8] = a.values[8] ^ b.values[8];
value.values[9] = a.values[9] ^ b.values[9];
value.values[10] = a.values[10] ^ b.values[10];
value.values[11] = a.values[11] ^ b.values[11];
value.values[12] = a.values[12] ^ b.values[12];
value.values[13] = a.values[13] ^ b.values[13];
value.values[14] = a.values[14] ^ b.values[14];
value.values[15] = a.values[15] ^ b.values[15];
return value;
}
static inline kinc_int8x16_t kinc_int8x16_not(kinc_int8x16_t t) {
kinc_int8x16_t value;
value.values[0] = ~t.values[0];
value.values[1] = ~t.values[1];
value.values[2] = ~t.values[2];
value.values[3] = ~t.values[3];
value.values[4] = ~t.values[4];
value.values[5] = ~t.values[5];
value.values[6] = ~t.values[6];
value.values[7] = ~t.values[7];
value.values[8] = ~t.values[8];
value.values[9] = ~t.values[9];
value.values[10] = ~t.values[10];
value.values[11] = ~t.values[11];
value.values[12] = ~t.values[12];
value.values[13] = ~t.values[13];
value.values[14] = ~t.values[14];
value.values[15] = ~t.values[15];
return value;
}
#endif
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,800 @@
#pragma once
#include "types.h"
#include <kinc/global.h>
#include <string.h>
/*! \file type_conversions.h
\brief Provides type casts and type conversions between all 128bit SIMD types
*/
#ifdef __cplusplus
extern "C" {
#endif
#if defined(KINC_SSE2)
// Float32x4 ----> Other
static inline kinc_int32x4_t kinc_float32x4_cast_to_int32x4(kinc_float32x4_t t) {
return _mm_castps_si128(t);
}
static inline kinc_uint32x4_t kinc_float32x4_cast_to_uint32x4(kinc_float32x4_t t) {
return _mm_castps_si128(t);
}
static inline kinc_int16x8_t kinc_float32x4_cast_to_int16x8(kinc_float32x4_t t) {
return _mm_castps_si128(t);
}
static inline kinc_uint16x8_t kinc_float32x4_cast_to_uint16x8(kinc_float32x4_t t) {
return _mm_castps_si128(t);
}
static inline kinc_int8x16_t kinc_float32x4_cast_to_int8x16(kinc_float32x4_t t) {
return _mm_castps_si128(t);
}
static inline kinc_uint8x16_t kinc_float32x4_cast_to_uint8x16(kinc_float32x4_t t) {
return _mm_castps_si128(t);
}
// Int32x4 ----> Other
static inline kinc_float32x4_t kinc_int32x4_cast_to_float32x4(kinc_int32x4_t t) {
return _mm_castsi128_ps(t);
}
static inline kinc_uint32x4_t kinc_int32x4_cast_to_uint32x4(kinc_int32x4_t t) {
// SSE2's m128i is every int type, so we can just return any inbound int type parameter
return t;
}
static inline kinc_int16x8_t kinc_int32x4_cast_to_int16x8(kinc_int32x4_t t) {
return t;
}
static inline kinc_uint16x8_t kinc_int32x4_cast_to_uint16x8(kinc_int32x4_t t) {
return t;
}
static inline kinc_int8x16_t kinc_int32x4_cast_to_int8x16(kinc_int32x4_t t) {
return t;
}
static inline kinc_uint8x16_t kinc_int32x4_cast_to_uint8x16(kinc_int32x4_t t) {
return t;
}
// Unsigned Int32x4 ----> Other
static inline kinc_float32x4_t kinc_uint32x4_cast_to_float32x4(kinc_uint32x4_t t) {
return _mm_castsi128_ps(t);
}
static inline kinc_int32x4_t kinc_uint32x4_cast_to_int32x4(kinc_uint32x4_t t) {
return t;
}
static inline kinc_int16x8_t kinc_uint32x4_cast_to_int16x8(kinc_uint32x4_t t) {
return t;
}
static inline kinc_uint16x8_t kinc_uint32x4_cast_to_uint16x8(kinc_uint32x4_t t) {
return t;
}
static inline kinc_int8x16_t kinc_uint32x4_cast_to_int8x16(kinc_uint32x4_t t) {
return t;
}
static inline kinc_uint8x16_t kinc_uint32x4_cast_to_uint8x16(kinc_uint32x4_t t) {
return t;
}
// Int16x8 ----> Other
static inline kinc_float32x4_t kinc_int16x8_cast_to_float32x4(kinc_int16x8_t t) {
return _mm_castsi128_ps(t);
}
static inline kinc_int32x4_t kinc_int16x8_cast_to_int32x4(kinc_int16x8_t t) {
return t;
}
static inline kinc_uint32x4_t kinc_int16x8_cast_to_uint32x4(kinc_int16x8_t t) {
return t;
}
static inline kinc_uint16x8_t kinc_int16x8_cast_to_uint16x8(kinc_int16x8_t t) {
return t;
}
static inline kinc_int8x16_t kinc_int16x8_cast_to_int8x16(kinc_int16x8_t t) {
return t;
}
static inline kinc_uint8x16_t kinc_int16x8_cast_to_uint8x16(kinc_int16x8_t t) {
return t;
}
// Unsigned Int16x8 ----> Other
static inline kinc_float32x4_t kinc_uint16x8_cast_to_float32x4(kinc_uint16x8_t t) {
return _mm_castsi128_ps(t);
}
static inline kinc_int32x4_t kinc_uint16x8_cast_to_int32x4(kinc_uint16x8_t t) {
return t;
}
static inline kinc_uint32x4_t kinc_uint16x8_cast_to_uint32x4(kinc_uint16x8_t t) {
return t;
}
static inline kinc_int16x8_t kinc_uint16x8_cast_to_int16x8(kinc_uint16x8_t t) {
return t;
}
static inline kinc_int8x16_t kinc_uint16x8_cast_to_int8x16(kinc_uint16x8_t t) {
return t;
}
static inline kinc_uint8x16_t kinc_uint16x8_cast_to_uint8x16(kinc_uint16x8_t t) {
return t;
}
// Int8x16 ----> Other
static inline kinc_float32x4_t kinc_int8x16_cast_to_float32x4(kinc_int8x16_t t) {
return _mm_castsi128_ps(t);
}
static inline kinc_int32x4_t kinc_int8x16_cast_to_int32x4(kinc_int8x16_t t) {
return t;
}
static inline kinc_uint32x4_t kinc_int8x16_cast_to_uint32x4(kinc_int8x16_t t) {
return t;
}
static inline kinc_int16x8_t kinc_int8x16_cast_to_int16x8(kinc_int8x16_t t) {
return t;
}
static inline kinc_uint16x8_t kinc_int8x16_cast_to_uint16x8(kinc_int8x16_t t) {
return t;
}
static inline kinc_uint8x16_t kinc_int8x16_cast_to_uint8x16(kinc_int8x16_t t) {
return t;
}
// Unsigned Int8x16 ----> Other
static inline kinc_float32x4_t kinc_uint8x16_cast_to_float32x4(kinc_uint8x16_t t) {
return _mm_castsi128_ps(t);
}
static inline kinc_int32x4_t kinc_uint8x16_cast_to_int32x4(kinc_uint8x16_t t) {
return t;
}
static inline kinc_uint32x4_t kinc_uint8x16_cast_to_uint32x4(kinc_uint8x16_t t) {
return t;
}
static inline kinc_int16x8_t kinc_uint8x16_cast_to_int16x8(kinc_uint8x16_t t) {
return t;
}
static inline kinc_uint16x8_t kinc_uint8x16_cast_to_uint16x8(kinc_uint8x16_t t) {
return t;
}
static inline kinc_int8x16_t kinc_uint8x16_cast_to_int8x16(kinc_uint8x16_t t) {
return t;
}
#elif defined(KINC_SSE)
// Float32x4 ----> Other
static inline kinc_int32x4_t kinc_float32x4_cast_to_int32x4(kinc_float32x4_t t) {
float extracted[4];
_mm_storeu_ps(&extracted[0], t);
kinc_int32x4_t cvt;
memcpy(&cvt.values[0], &extracted[0], sizeof(extracted));
return cvt;
}
static inline kinc_uint32x4_t kinc_float32x4_cast_to_uint32x4(kinc_float32x4_t t) {
float extracted[4];
_mm_storeu_ps(&extracted[0], t);
kinc_uint32x4_t cvt;
memcpy(&cvt.values[0], &extracted[0], sizeof(extracted));
return cvt;
}
static inline kinc_int16x8_t kinc_float32x4_cast_to_int16x8(kinc_float32x4_t t) {
float extracted[4];
_mm_storeu_ps(&extracted[0], t);
kinc_int16x8_t cvt;
memcpy(&cvt.values[0], &extracted[0], sizeof(extracted));
return cvt;
}
static inline kinc_uint16x8_t kinc_float32x4_cast_to_uint16x8(kinc_float32x4_t t) {
float extracted[4];
_mm_storeu_ps(&extracted[0], t);
kinc_uint16x8_t cvt;
memcpy(&cvt.values[0], &extracted[0], sizeof(extracted));
return cvt;
}
static inline kinc_int8x16_t kinc_float32x4_cast_to_int8x16(kinc_float32x4_t t) {
float extracted[4];
_mm_storeu_ps(&extracted[0], t);
kinc_int8x16_t cvt;
memcpy(&cvt.values[0], &extracted[0], sizeof(extracted));
return cvt;
}
static inline kinc_uint8x16_t kinc_float32x4_cast_to_uint8x16(kinc_float32x4_t t) {
float extracted[4];
_mm_storeu_ps(&extracted[0], t);
kinc_uint8x16_t cvt;
memcpy(&cvt.values[0], &extracted[0], sizeof(extracted));
return cvt;
}
// Int32x4 ----> Other
static inline kinc_float32x4_t kinc_int32x4_cast_to_float32x4(kinc_int32x4_t t) {
float cvt[4];
memcpy(&cvt[0], &t.values[0], sizeof(t));
return _mm_loadu_ps(&cvt[0]);
}
// Unsigned Int32x4 ----> Other
static inline kinc_float32x4_t kinc_uint32x4_cast_to_float32x4(kinc_uint32x4_t t) {
float cvt[4];
memcpy(&cvt[0], &t.values[0], sizeof(t));
return _mm_loadu_ps(&cvt[0]);
}
// Int16x8 ----> Other
static inline kinc_float32x4_t kinc_int16x8_cast_to_float32x4(kinc_int16x8_t t) {
float cvt[4];
memcpy(&cvt[0], &t.values[0], sizeof(t));
return _mm_loadu_ps(&cvt[0]);
}
// Unsigned Int16x8 ----> Other
static inline kinc_float32x4_t kinc_uint16x8_cast_to_float32x4(kinc_uint16x8_t t) {
float cvt[4];
memcpy(&cvt[0], &t.values[0], sizeof(t));
return _mm_loadu_ps(&cvt[0]);
}
// Int8x16 ----> Other
static inline kinc_float32x4_t kinc_int8x16_cast_to_float32x4(kinc_int8x16_t t) {
float cvt[4];
memcpy(&cvt[0], &t.values[0], sizeof(t));
return _mm_loadu_ps(&cvt[0]);
}
// Unsigned Int8x16 ----> Other
static inline kinc_float32x4_t kinc_uint8x16_cast_to_float32x4(kinc_uint8x16_t t) {
float cvt[4];
memcpy(&cvt[0], &t.values[0], sizeof(t));
return _mm_loadu_ps(&cvt[0]);
}
#elif defined(KINC_NEON)
// Float32x4 ----> Other
static inline kinc_int32x4_t kinc_float32x4_cast_to_int32x4(kinc_float32x4_t t) {
return vreinterpretq_s32_f32(t);
}
static inline kinc_uint32x4_t kinc_float32x4_cast_to_uint32x4(kinc_float32x4_t t) {
return vreinterpretq_u32_f32(t);
}
static inline kinc_int16x8_t kinc_float32x4_cast_to_int16x8(kinc_float32x4_t t) {
return vreinterpretq_s16_f32(t);
}
static inline kinc_uint16x8_t kinc_float32x4_cast_to_uint16x8(kinc_float32x4_t t) {
return vreinterpretq_u16_f32(t);
}
static inline kinc_int8x16_t kinc_float32x4_cast_to_int8x16(kinc_float32x4_t t) {
return vreinterpretq_s8_f32(t);
}
static inline kinc_uint8x16_t kinc_float32x4_cast_to_uint8x16(kinc_float32x4_t t) {
return vreinterpretq_u8_f32(t);
}
// Int32x4 ----> Other
static inline kinc_float32x4_t kinc_int32x4_cast_to_float32x4(kinc_int32x4_t t) {
return vreinterpretq_f32_s32(t);
}
static inline kinc_uint32x4_t kinc_int32x4_cast_to_uint32x4(kinc_int32x4_t t) {
return vreinterpretq_u32_s32(t);
}
static inline kinc_int16x8_t kinc_int32x4_cast_to_int16x8(kinc_int32x4_t t) {
return vreinterpretq_s16_s32(t);
}
static inline kinc_uint16x8_t kinc_int32x4_cast_to_uint16x8(kinc_int32x4_t t) {
return vreinterpretq_u16_s32(t);
}
static inline kinc_int8x16_t kinc_int32x4_cast_to_int8x16(kinc_int32x4_t t) {
return vreinterpretq_s8_s32(t);
}
static inline kinc_uint8x16_t kinc_int32x4_cast_to_uint8x16(kinc_int32x4_t t) {
return vreinterpretq_u8_s32(t);
}
// Unsigned Int32x4 ----> Other
static inline kinc_float32x4_t kinc_uint32x4_cast_to_float32x4(kinc_uint32x4_t t) {
return vreinterpretq_f32_u32(t);
}
static inline kinc_int32x4_t kinc_uint32x4_cast_to_int32x4(kinc_uint32x4_t t) {
return vreinterpretq_s32_u32(t);
}
static inline kinc_int16x8_t kinc_uint32x4_cast_to_int16x8(kinc_uint32x4_t t) {
return vreinterpretq_s16_u32(t);
}
static inline kinc_uint16x8_t kinc_uint32x4_cast_to_uint16x8(kinc_uint32x4_t t) {
return vreinterpretq_u16_u32(t);
}
static inline kinc_int8x16_t kinc_uint32x4_cast_to_int8x16(kinc_uint32x4_t t) {
return vreinterpretq_s8_u32(t);
}
static inline kinc_uint8x16_t kinc_uint32x4_cast_to_uint8x16(kinc_uint32x4_t t) {
return vreinterpretq_u8_u32(t);
}
// Int16x8 ----> Other
static inline kinc_float32x4_t kinc_int16x8_cast_to_float32x4(kinc_int16x8_t t) {
return vreinterpretq_f32_s16(t);
}
static inline kinc_int32x4_t kinc_int16x8_cast_to_int32x4(kinc_int16x8_t t) {
return vreinterpretq_s32_s16(t);
}
static inline kinc_uint32x4_t kinc_int16x8_cast_to_uint32x4(kinc_int16x8_t t) {
return vreinterpretq_u32_s16(t);
}
static inline kinc_uint16x8_t kinc_int16x8_cast_to_uint16x8(kinc_int16x8_t t) {
return vreinterpretq_u16_s16(t);
}
static inline kinc_int8x16_t kinc_int16x8_cast_to_int8x16(kinc_int16x8_t t) {
return vreinterpretq_s8_s16(t);
}
static inline kinc_uint8x16_t kinc_int16x8_cast_to_uint8x16(kinc_int16x8_t t) {
return vreinterpretq_u8_s16(t);
}
// Unsigned Int16x8 ----> Other
static inline kinc_float32x4_t kinc_uint16x8_cast_to_float32x4(kinc_uint16x8_t t) {
return vreinterpretq_f32_u16(t);
}
static inline kinc_int32x4_t kinc_uint16x8_cast_to_int32x4(kinc_uint16x8_t t) {
return vreinterpretq_s32_u16(t);
}
static inline kinc_uint32x4_t kinc_uint16x8_cast_to_uint32x4(kinc_uint16x8_t t) {
return vreinterpretq_u32_u16(t);
}
static inline kinc_int16x8_t kinc_uint16x8_cast_to_int16x8(kinc_uint16x8_t t) {
return vreinterpretq_s16_u16(t);
}
static inline kinc_int8x16_t kinc_uint16x8_cast_to_int8x16(kinc_uint16x8_t t) {
return vreinterpretq_s8_u16(t);
}
static inline kinc_uint8x16_t kinc_uint16x8_cast_to_uint8x16(kinc_uint16x8_t t) {
return vreinterpretq_u8_u16(t);
}
// Int8x16 ----> Other
static inline kinc_float32x4_t kinc_int8x16_cast_to_float32x4(kinc_int8x16_t t) {
return vreinterpretq_f32_s8(t);
}
static inline kinc_int32x4_t kinc_int8x16_cast_to_int32x4(kinc_int8x16_t t) {
return vreinterpretq_s32_s8(t);
}
static inline kinc_uint32x4_t kinc_int8x16_cast_to_uint32x4(kinc_int8x16_t t) {
return vreinterpretq_u32_s8(t);
}
static inline kinc_int16x8_t kinc_int8x16_cast_to_int16x8(kinc_int8x16_t t) {
return vreinterpretq_s16_s8(t);
}
static inline kinc_uint16x8_t kinc_int8x16_cast_to_uint16x8(kinc_int8x16_t t) {
return vreinterpretq_u16_s8(t);
}
static inline kinc_uint8x16_t kinc_int8x16_cast_to_uint8x16(kinc_int8x16_t t) {
return vreinterpretq_u8_s8(t);
}
// Unsigned Int8x16 ----> Other
static inline kinc_float32x4_t kinc_uint8x16_cast_to_float32x4(kinc_uint8x16_t t) {
return vreinterpretq_f32_u8(t);
}
static inline kinc_int32x4_t kinc_uint8x16_cast_to_int32x4(kinc_uint8x16_t t) {
return vreinterpretq_s32_u8(t);
}
static inline kinc_uint32x4_t kinc_uint8x16_cast_to_uint32x4(kinc_uint8x16_t t) {
return vreinterpretq_u32_u8(t);
}
static inline kinc_int16x8_t kinc_uint8x16_cast_to_int16x8(kinc_uint8x16_t t) {
return vreinterpretq_s16_u8(t);
}
static inline kinc_uint16x8_t kinc_uint8x16_cast_to_uint16x8(kinc_uint8x16_t t) {
return vreinterpretq_u16_u8(t);
}
static inline kinc_int8x16_t kinc_uint8x16_cast_to_int8x16(kinc_uint8x16_t t) {
return vreinterpretq_s8_u8(t);
}
// KINC_NOSIMD float fallbacks casts
#else
// Float32x4 ----> Other
static inline kinc_int32x4_t kinc_float32x4_cast_to_int32x4(kinc_float32x4_t t) {
kinc_int32x4_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_uint32x4_t kinc_float32x4_cast_to_uint32x4(kinc_float32x4_t t) {
kinc_uint32x4_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_int16x8_t kinc_float32x4_cast_to_int16x8(kinc_float32x4_t t) {
kinc_int16x8_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_uint16x8_t kinc_float32x4_cast_to_uint16x8(kinc_float32x4_t t) {
kinc_uint16x8_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_int8x16_t kinc_float32x4_cast_to_int8x16(kinc_float32x4_t t) {
kinc_int8x16_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_uint8x16_t kinc_float32x4_cast_to_uint8x16(kinc_float32x4_t t) {
kinc_uint8x16_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
// Int32x4 ----> Float32x4
static inline kinc_float32x4_t kinc_int32x4_cast_to_float32x4(kinc_int32x4_t t) {
kinc_float32x4_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
// Unsigned Int32x4 ----> Float32x4
static inline kinc_float32x4_t kinc_uint32x4_cast_to_float32x4(kinc_uint32x4_t t) {
kinc_float32x4_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
// Int16x8 ----> Float32x4
static inline kinc_float32x4_t kinc_int16x8_cast_to_float32x4(kinc_int16x8_t t) {
kinc_float32x4_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
// Unsigned Int16x8 ----> Float32x4
static inline kinc_float32x4_t kinc_uint16x8_cast_to_float32x4(kinc_uint16x8_t t) {
kinc_float32x4_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
// Int8x16 ----> Float32x4
static inline kinc_float32x4_t kinc_int8x16_cast_to_float32x4(kinc_int8x16_t t) {
kinc_float32x4_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
// Unsigned Int8x16 ----> Float32x4
static inline kinc_float32x4_t kinc_uint8x16_cast_to_float32x4(kinc_uint8x16_t t) {
kinc_float32x4_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
#endif // KINC_NOSIMD floats
// Shared signed and unsigned integer vectors for SSE and SIMD-fallback
#if !defined(KINC_SSE2) && (defined(KINC_SSE) || defined(KINC_NOSIMD))
// Int32x4 ----> Other
static inline kinc_uint32x4_t kinc_int32x4_cast_to_uint32x4(kinc_int32x4_t t) {
kinc_uint32x4_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_int16x8_t kinc_int32x4_cast_to_int16x8(kinc_int32x4_t t) {
kinc_int16x8_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_uint16x8_t kinc_int32x4_cast_to_uint16x8(kinc_int32x4_t t) {
kinc_uint16x8_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_int8x16_t kinc_int32x4_cast_to_int8x16(kinc_int32x4_t t) {
kinc_int8x16_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_uint8x16_t kinc_int32x4_cast_to_uint8x16(kinc_int32x4_t t) {
kinc_uint8x16_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
// Unsigned Int32x4 ----> Other
static inline kinc_int32x4_t kinc_uint32x4_cast_to_int32x4(kinc_uint32x4_t t) {
kinc_int32x4_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_int16x8_t kinc_uint32x4_cast_to_int16x8(kinc_uint32x4_t t) {
kinc_int16x8_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_uint16x8_t kinc_uint32x4_cast_to_uint16x8(kinc_uint32x4_t t) {
kinc_uint16x8_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_int8x16_t kinc_uint32x4_cast_to_int8x16(kinc_uint32x4_t t) {
kinc_int8x16_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_uint8x16_t kinc_uint32x4_cast_to_uint8x16(kinc_uint32x4_t t) {
kinc_uint8x16_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
// Int16x8 ----> Other
static inline kinc_int32x4_t kinc_int16x8_cast_to_int32x4(kinc_int16x8_t t) {
kinc_int32x4_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_uint32x4_t kinc_int16x8_cast_to_uint32x4(kinc_int16x8_t t) {
kinc_uint32x4_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_uint16x8_t kinc_int16x8_cast_to_uint16x8(kinc_int16x8_t t) {
kinc_uint16x8_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_int8x16_t kinc_int16x8_cast_to_int8x16(kinc_int16x8_t t) {
kinc_int8x16_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_uint8x16_t kinc_int16x8_cast_to_uint8x16(kinc_int16x8_t t) {
kinc_uint8x16_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
// Unsigned Int16x8 ----> Other
static inline kinc_int32x4_t kinc_uint16x8_cast_to_int32x4(kinc_uint16x8_t t) {
kinc_int32x4_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_uint32x4_t kinc_uint16x8_cast_to_uint32x4(kinc_uint16x8_t t) {
kinc_uint32x4_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_int16x8_t kinc_uint16x8_cast_to_int16x8(kinc_uint16x8_t t) {
kinc_int16x8_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_int8x16_t kinc_uint16x8_cast_to_int8x16(kinc_uint16x8_t t) {
kinc_int8x16_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_uint8x16_t kinc_uint16x8_cast_to_uint8x16(kinc_uint16x8_t t) {
kinc_uint8x16_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
// Int8x16 ----> Other
static inline kinc_int32x4_t kinc_int8x16_cast_to_int32x4(kinc_int8x16_t t) {
kinc_int32x4_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_uint32x4_t kinc_int8x16_cast_to_uint32x4(kinc_int8x16_t t) {
kinc_uint32x4_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_int16x8_t kinc_int8x16_cast_to_int16x8(kinc_int8x16_t t) {
kinc_int16x8_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_uint16x8_t kinc_int8x16_cast_to_uint16x8(kinc_int8x16_t t) {
kinc_uint16x8_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_uint8x16_t kinc_int8x16_cast_to_uint8x16(kinc_int8x16_t t) {
kinc_uint8x16_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
// Unsigned Int8x16 ----> Other
static inline kinc_int32x4_t kinc_uint8x16_cast_to_int32x4(kinc_uint8x16_t t) {
kinc_int32x4_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_uint32x4_t kinc_uint8x16_cast_to_uint32x4(kinc_uint8x16_t t) {
kinc_uint32x4_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_int16x8_t kinc_uint8x16_cast_to_int16x8(kinc_uint8x16_t t) {
kinc_int16x8_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_uint16x8_t kinc_uint8x16_cast_to_uint16x8(kinc_uint8x16_t t) {
kinc_uint16x8_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
static inline kinc_int8x16_t kinc_uint8x16_cast_to_int8x16(kinc_uint8x16_t t) {
kinc_int8x16_t cvt;
memcpy(&cvt.values[0], &t.values[0], sizeof(t));
return cvt;
}
#endif // KINC_SSE || KINC_NOSIMD
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,180 @@
#pragma once
#include <kinc/global.h>
/*! \file types.h
\brief Provides 128bit SIMD types which are mapped to equivalent SSE or Neon types.
*/
#ifdef __cplusplus
extern "C" {
#endif
// Any level of AVX Capability (Could be AVX, AVX2, AVX512, etc.)
//(Currently) only used for checking existence of earlier SSE instruction sets
#if defined(__AVX__)
// Unfortunate situation here
// MSVC does not provide compiletime macros for the following instruction sets
// but their existence is implied by AVX and higher
#define KINC_SSE4_2
#define KINC_SSE4_1
#define KINC_SSSE3
#define KINC_SSE3
#endif
// SSE2 Capability check
// Note for Windows:
// _M_IX86_FP checks SSE2 and SSE for 32bit Windows programs only, and is unset if not a 32bit program.
// SSE2 and earlier is --guaranteed-- to be active for any 64bit Windows program
#if defined(__SSE2__) || (_M_IX86_FP == 2) || (defined(KINC_WINDOWS) && defined(KINC_64))
#define KINC_SSE2
#endif
// SSE Capability check
#if defined(__SSE__) || _M_IX86_FP == 2 || _M_IX86_FP == 1 || (defined(KINC_WINDOWS) && !defined(__aarch64__)) || \
(defined(KINC_WINDOWSAPP) && !defined(__aarch64__)) || (defined(KINC_MACOS) && __x86_64)
#define KINC_SSE
#endif
// NEON Capability check
#if defined(KINC_IOS) || defined(KINC_SWITCH) || defined(__aarch64__) || defined(KINC_NEON)
#define KINC_NEON
#endif
// No SIMD Capabilities
#if !defined(KINC_SSE4_2) && !defined(KINC_SSE4_1) && !defined(KINC_SSSE3) && !defined(KINC_SSE3) && !defined(KINC_SSE2) && !defined(KINC_SSE) && \
!defined(KINC_NEON)
#define KINC_NOSIMD
#endif
#define KINC_SHUFFLE_TABLE(LANE_A1, LANE_A2, LANE_B1, LANE_B2) \
((((LANE_B2)&0x3) << 6) | (((LANE_B1)&0x3) << 4) | (((LANE_A2)&0x3) << 2) | (((LANE_A1)&0x3) << 0))
#if defined(KINC_SSE2)
// SSE_## related headers include earlier revisions, IE
// SSE2 contains all of SSE
#include <emmintrin.h>
typedef __m128 kinc_float32x4_t;
typedef __m128 kinc_float32x4_mask_t;
typedef __m128i kinc_int8x16_t;
typedef __m128i kinc_int8x16_mask_t;
typedef __m128i kinc_uint8x16_t;
typedef __m128i kinc_uint8x16_mask_t;
typedef __m128i kinc_int16x8_t;
typedef __m128i kinc_int16x8_mask_t;
typedef __m128i kinc_uint16x8_t;
typedef __m128i kinc_uint16x8_mask_t;
typedef __m128i kinc_int32x4_t;
typedef __m128i kinc_int32x4_mask_t;
typedef __m128i kinc_uint32x4_t;
typedef __m128i kinc_uint32x4_mask_t;
#elif defined(KINC_SSE)
#include <xmmintrin.h>
typedef __m128 kinc_float32x4_t;
typedef __m128 kinc_float32x4_mask_t;
typedef struct kinc_int8x16 {
int8_t values[16];
} kinc_int8x16_t;
typedef struct kinc_uint8x16 {
uint8_t values[16];
} kinc_uint8x16_t;
typedef struct kinc_int16x8 {
int16_t values[8];
} kinc_int16x8_t;
typedef struct kinc_uint16x8 {
uint16_t values[8];
} kinc_uint16x8_t;
typedef struct kinc_int32x4 {
int32_t values[4];
} kinc_int32x4_t;
typedef struct kinc_uint32x4 {
uint32_t values[4];
} kinc_uint32x4_t;
typedef kinc_int8x16_t kinc_int8x16_mask_t;
typedef kinc_uint8x16_t kinc_uint8x16_mask_t;
typedef kinc_int16x8_t kinc_int16x8_mask_t;
typedef kinc_uint16x8_t kinc_uint16x8_mask_t;
typedef kinc_int32x4_t kinc_int32x4_mask_t;
typedef kinc_uint32x4_t kinc_uint32x4_mask_t;
#elif defined(KINC_NEON)
#include <arm_neon.h>
typedef float32x4_t kinc_float32x4_t;
typedef uint32x4_t kinc_float32x4_mask_t;
typedef int8x16_t kinc_int8x16_t;
typedef uint8x16_t kinc_int8x16_mask_t;
typedef uint8x16_t kinc_uint8x16_t;
typedef uint8x16_t kinc_uint8x16_mask_t;
typedef int16x8_t kinc_int16x8_t;
typedef uint16x8_t kinc_int16x8_mask_t;
typedef uint16x8_t kinc_uint16x8_t;
typedef uint16x8_t kinc_uint16x8_mask_t;
typedef int32x4_t kinc_int32x4_t;
typedef uint32x4_t kinc_int32x4_mask_t;
typedef uint32x4_t kinc_uint32x4_t;
typedef uint32x4_t kinc_uint32x4_mask_t;
#elif defined(KINC_NOSIMD)
#include <kinc/math/core.h>
typedef struct kinc_float32x4 {
float values[4];
} kinc_float32x4_t;
typedef kinc_float32x4_t kinc_float32x4_mask_t;
typedef struct kinc_int8x16 {
int8_t values[16];
} kinc_int8x16_t;
typedef struct kinc_uint8x16 {
uint8_t values[16];
} kinc_uint8x16_t;
typedef struct kinc_int16x8 {
int16_t values[8];
} kinc_int16x8_t;
typedef struct kinc_uint16x8 {
uint16_t values[8];
} kinc_uint16x8_t;
typedef struct kinc_int32x4 {
int32_t values[4];
} kinc_int32x4_t;
typedef struct kinc_uint32x4 {
uint32_t values[4];
} kinc_uint32x4_t;
typedef kinc_int8x16_t kinc_int8x16_mask_t;
typedef kinc_uint8x16_t kinc_uint8x16_mask_t;
typedef kinc_int16x8_t kinc_int16x8_mask_t;
typedef kinc_uint16x8_t kinc_uint16x8_mask_t;
typedef kinc_int32x4_t kinc_int32x4_mask_t;
typedef kinc_uint32x4_t kinc_uint32x4_mask_t;
#endif
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,501 @@
#pragma once
#include "types.h"
/*! \file uint16x8.h
\brief Provides 128bit eight-element unsigned 16-bit integer SIMD operations which are mapped to equivalent SSE2 or Neon operations.
*/
#ifdef __cplusplus
extern "C" {
#endif
#if defined(KINC_SSE2)
static inline kinc_uint16x8_t kinc_uint16x8_intrin_load(const uint16_t *values) {
return _mm_load_si128((const kinc_uint16x8_t *)values);
}
static inline kinc_uint16x8_t kinc_uint16x8_intrin_load_unaligned(const uint16_t *values) {
return _mm_loadu_si128((const kinc_uint16x8_t *)values);
}
static inline kinc_uint16x8_t kinc_uint16x8_load(const uint16_t values[8]) {
return _mm_set_epi16(values[7], values[6], values[5], values[4], values[3], values[2], values[1], values[0]);
}
static inline kinc_uint16x8_t kinc_uint16x8_load_all(uint16_t t) {
return _mm_set1_epi16(t);
}
static inline void kinc_uint16x8_store(uint16_t *destination, kinc_uint16x8_t value) {
_mm_store_si128((kinc_uint16x8_t *)destination, value);
}
static inline void kinc_uint16x8_store_unaligned(uint16_t *destination, kinc_uint16x8_t value) {
_mm_storeu_si128((kinc_uint16x8_t *)destination, value);
}
static inline uint16_t kinc_uint16x8_get(kinc_uint16x8_t t, int index) {
union {
__m128i value;
uint16_t elements[8];
} converter;
converter.value = t;
return converter.elements[index];
}
static inline kinc_uint16x8_t kinc_uint16x8_add(kinc_uint16x8_t a, kinc_uint16x8_t b) {
return _mm_add_epi16(a, b);
}
static inline kinc_uint16x8_t kinc_uint16x8_sub(kinc_uint16x8_t a, kinc_uint16x8_t b) {
return _mm_sub_epi16(a, b);
}
static inline kinc_uint16x8_mask_t kinc_uint16x8_cmpeq(kinc_uint16x8_t a, kinc_uint16x8_t b) {
return _mm_cmpeq_epi16(a, b);
}
static inline kinc_uint16x8_mask_t kinc_uint16x8_cmpneq(kinc_uint16x8_t a, kinc_uint16x8_t b) {
return _mm_andnot_si128(_mm_cmpeq_epi16(a, b), _mm_set1_epi32(0xffffffff));
}
static inline kinc_uint16x8_mask_t kinc_uint16x8_cmpge(kinc_uint16x8_t a, kinc_uint16x8_t b) {
__m128i bias_by = _mm_set1_epi16((uint16_t)0x8000);
return _mm_or_si128(_mm_cmpgt_epi16(_mm_sub_epi16(a, bias_by), _mm_sub_epi16(b, bias_by)), _mm_cmpeq_epi16(a, b));
}
static inline kinc_uint16x8_mask_t kinc_uint16x8_cmpgt(kinc_uint16x8_t a, kinc_uint16x8_t b) {
__m128i bias_by = _mm_set1_epi16((uint16_t)0x8000);
return _mm_cmpgt_epi16(_mm_sub_epi16(a, bias_by), _mm_sub_epi16(b, bias_by));
}
static inline kinc_uint16x8_mask_t kinc_uint16x8_cmple(kinc_uint16x8_t a, kinc_uint16x8_t b) {
__m128i bias_by = _mm_set1_epi16((uint16_t)0x8000);
return _mm_or_si128(_mm_cmplt_epi16(_mm_sub_epi16(a, bias_by), _mm_sub_epi16(b, bias_by)), _mm_cmpeq_epi16(a, b));
}
static inline kinc_uint16x8_mask_t kinc_uint16x8_cmplt(kinc_uint16x8_t a, kinc_uint16x8_t b) {
__m128i bias_by = _mm_set1_epi16((uint16_t)0x8000);
return _mm_cmplt_epi16(_mm_sub_epi16(a, bias_by), _mm_sub_epi16(b, bias_by));
}
static inline kinc_uint16x8_t kinc_uint16x8_sel(kinc_uint16x8_t a, kinc_uint16x8_t b, kinc_uint16x8_mask_t mask) {
return _mm_xor_si128(b, _mm_and_si128(mask, _mm_xor_si128(a, b)));
}
static inline kinc_uint16x8_t kinc_uint16x8_max(kinc_uint16x8_t a, kinc_uint16x8_t b) {
return kinc_uint16x8_sel(a, b, kinc_uint16x8_cmpgt(a, b));
}
static inline kinc_uint16x8_t kinc_uint16x8_min(kinc_uint16x8_t a, kinc_uint16x8_t b) {
return kinc_uint16x8_sel(a, b, kinc_uint16x8_cmplt(a, b));
}
static inline kinc_uint16x8_t kinc_uint16x8_or(kinc_uint16x8_t a, kinc_uint16x8_t b) {
return _mm_or_si128(a, b);
}
static inline kinc_uint16x8_t kinc_uint16x8_and(kinc_uint16x8_t a, kinc_uint16x8_t b) {
return _mm_and_si128(a, b);
}
static inline kinc_uint16x8_t kinc_uint16x8_xor(kinc_uint16x8_t a, kinc_uint16x8_t b) {
return _mm_xor_si128(a, b);
}
static inline kinc_uint16x8_t kinc_uint16x8_not(kinc_uint16x8_t t) {
return _mm_xor_si128(t, _mm_set1_epi32(0xffffffff));
}
#define kinc_uint16x8_shift_left(t, shift) _mm_slli_epi16((t), (shift))
#define kinc_uint16x8_shift_right(t, shift) _mm_srli_epi16((t), (shift))
#elif defined(KINC_NEON)
static inline kinc_uint16x8_t kinc_uint16x8_intrin_load(const uint16_t *values) {
return vld1q_u16(values);
}
static inline kinc_uint16x8_t kinc_uint16x8_intrin_load_unaligned(const uint16_t *values) {
return kinc_uint16x8_intrin_load(values);
}
static inline kinc_uint16x8_t kinc_uint16x8_load(const uint16_t values[8]) {
return (kinc_uint16x8_t){values[0], values[1], values[2], values[3], values[4], values[5], values[6], values[7]};
}
static inline kinc_uint16x8_t kinc_uint16x8_load_all(uint16_t t) {
return (kinc_uint16x8_t){t, t, t, t, t, t, t, t};
}
static inline void kinc_uint16x8_store(uint16_t *destination, kinc_uint16x8_t value) {
vst1q_u16(destination, value);
}
static inline void kinc_uint16x8_store_unaligned(uint16_t *destination, kinc_uint16x8_t value) {
kinc_uint16x8_store(destination, value);
}
static inline uint16_t kinc_uint16x8_get(kinc_uint16x8_t t, int index) {
return t[index];
}
static inline kinc_uint16x8_t kinc_uint16x8_add(kinc_uint16x8_t a, kinc_uint16x8_t b) {
return vaddq_u16(a, b);
}
static inline kinc_uint16x8_t kinc_uint16x8_sub(kinc_uint16x8_t a, kinc_uint16x8_t b) {
return vsubq_u16(a, b);
}
static inline kinc_uint16x8_t kinc_uint16x8_max(kinc_uint16x8_t a, kinc_uint16x8_t b) {
return vmaxq_u16(a, b);
}
static inline kinc_uint16x8_t kinc_uint16x8_min(kinc_uint16x8_t a, kinc_uint16x8_t b) {
return vminq_u16(a, b);
}
static inline kinc_uint16x8_mask_t kinc_uint16x8_cmpeq(kinc_uint16x8_t a, kinc_uint16x8_t b) {
return vceqq_u16(a, b);
}
static inline kinc_uint16x8_mask_t kinc_uint16x8_cmpneq(kinc_uint16x8_t a, kinc_uint16x8_t b) {
return vmvnq_u16(vceqq_u16(a, b));
}
static inline kinc_uint16x8_mask_t kinc_uint16x8_cmpge(kinc_uint16x8_t a, kinc_uint16x8_t b) {
return vcgeq_u16(a, b);
}
static inline kinc_uint16x8_mask_t kinc_uint16x8_cmpgt(kinc_uint16x8_t a, kinc_uint16x8_t b) {
return vcgtq_u16(a, b);
}
static inline kinc_uint16x8_mask_t kinc_uint16x8_cmple(kinc_uint16x8_t a, kinc_uint16x8_t b) {
return vcleq_u16(a, b);
}
static inline kinc_uint16x8_mask_t kinc_uint16x8_cmplt(kinc_uint16x8_t a, kinc_uint16x8_t b) {
return vcltq_u16(a, b);
}
static inline kinc_uint16x8_t kinc_uint16x8_sel(kinc_uint16x8_t a, kinc_uint16x8_t b, kinc_uint16x8_mask_t mask) {
return vbslq_u16(mask, a, b);
}
static inline kinc_uint16x8_t kinc_uint16x8_or(kinc_uint16x8_t a, kinc_uint16x8_t b) {
return vorrq_u16(a, b);
}
static inline kinc_uint16x8_t kinc_uint16x8_and(kinc_uint16x8_t a, kinc_uint16x8_t b) {
return vandq_u16(a, b);
}
static inline kinc_uint16x8_t kinc_uint16x8_xor(kinc_uint16x8_t a, kinc_uint16x8_t b) {
return veorq_u16(a, b);
}
static inline kinc_uint16x8_t kinc_uint16x8_not(kinc_uint16x8_t t) {
return vmvnq_u16(t);
}
#define kinc_uint16x8_shift_left(t, shift) vshlq_n_u16((t), (shift))
#define kinc_uint16x8_shift_right(t, shift) vshrq_n_u16((t), (shift))
#else
static inline kinc_uint16x8_t kinc_uint16x8_intrin_load(const uint16_t *values) {
kinc_uint16x8_t value;
value.values[0] = values[0];
value.values[1] = values[1];
value.values[2] = values[2];
value.values[3] = values[3];
value.values[4] = values[4];
value.values[5] = values[5];
value.values[6] = values[6];
value.values[7] = values[7];
return value;
}
static inline kinc_uint16x8_t kinc_uint16x8_intrin_load_unaligned(const uint16_t *values) {
return kinc_uint16x8_intrin_load(values);
}
static inline kinc_uint16x8_t kinc_uint16x8_load(const uint16_t values[8]) {
kinc_uint16x8_t value;
value.values[0] = values[0];
value.values[1] = values[1];
value.values[2] = values[2];
value.values[3] = values[3];
value.values[4] = values[4];
value.values[5] = values[5];
value.values[6] = values[6];
value.values[7] = values[7];
return value;
}
static inline kinc_uint16x8_t kinc_uint16x8_load_all(uint16_t t) {
kinc_uint16x8_t value;
value.values[0] = t;
value.values[1] = t;
value.values[2] = t;
value.values[3] = t;
value.values[4] = t;
value.values[5] = t;
value.values[6] = t;
value.values[7] = t;
return value;
}
static inline void kinc_uint16x8_store(uint16_t *destination, kinc_uint16x8_t value) {
destination[0] = value.values[0];
destination[1] = value.values[1];
destination[2] = value.values[2];
destination[3] = value.values[3];
destination[4] = value.values[4];
destination[5] = value.values[5];
destination[6] = value.values[6];
destination[7] = value.values[7];
}
static inline void kinc_uint16x8_store_unaligned(uint16_t *destination, kinc_uint16x8_t value) {
kinc_uint16x8_store(destination, value);
}
static inline uint16_t kinc_uint16x8_get(kinc_uint16x8_t t, int index) {
return t.values[index];
}
static inline kinc_uint16x8_t kinc_uint16x8_add(kinc_uint16x8_t a, kinc_uint16x8_t b) {
kinc_uint16x8_t value;
value.values[0] = a.values[0] + b.values[0];
value.values[1] = a.values[1] + b.values[1];
value.values[2] = a.values[2] + b.values[2];
value.values[3] = a.values[3] + b.values[3];
value.values[4] = a.values[4] + b.values[4];
value.values[5] = a.values[5] + b.values[5];
value.values[6] = a.values[6] + b.values[6];
value.values[7] = a.values[7] + b.values[7];
return value;
}
static inline kinc_uint16x8_t kinc_uint16x8_sub(kinc_uint16x8_t a, kinc_uint16x8_t b) {
kinc_uint16x8_t value;
value.values[0] = a.values[0] - b.values[0];
value.values[1] = a.values[1] - b.values[1];
value.values[2] = a.values[2] - b.values[2];
value.values[3] = a.values[3] - b.values[3];
value.values[4] = a.values[4] - b.values[4];
value.values[5] = a.values[5] - b.values[5];
value.values[6] = a.values[6] - b.values[6];
value.values[7] = a.values[7] - b.values[7];
return value;
}
static inline kinc_uint16x8_t kinc_uint16x8_max(kinc_uint16x8_t a, kinc_uint16x8_t b) {
kinc_uint16x8_t value;
value.values[0] = a.values[0] > b.values[0] ? a.values[0] : b.values[0];
value.values[1] = a.values[1] > b.values[1] ? a.values[1] : b.values[1];
value.values[2] = a.values[2] > b.values[2] ? a.values[2] : b.values[2];
value.values[3] = a.values[3] > b.values[3] ? a.values[3] : b.values[3];
value.values[4] = a.values[4] > b.values[4] ? a.values[4] : b.values[4];
value.values[5] = a.values[5] > b.values[5] ? a.values[5] : b.values[5];
value.values[6] = a.values[6] > b.values[6] ? a.values[6] : b.values[6];
value.values[7] = a.values[7] > b.values[7] ? a.values[7] : b.values[7];
return value;
}
static inline kinc_uint16x8_t kinc_uint16x8_min(kinc_uint16x8_t a, kinc_uint16x8_t b) {
kinc_uint16x8_t value;
value.values[0] = a.values[0] > b.values[0] ? b.values[0] : a.values[0];
value.values[1] = a.values[1] > b.values[1] ? b.values[1] : a.values[1];
value.values[2] = a.values[2] > b.values[2] ? b.values[2] : a.values[2];
value.values[3] = a.values[3] > b.values[3] ? b.values[3] : a.values[3];
value.values[4] = a.values[4] > b.values[4] ? b.values[4] : a.values[4];
value.values[5] = a.values[5] > b.values[5] ? b.values[5] : a.values[5];
value.values[6] = a.values[6] > b.values[6] ? b.values[6] : a.values[6];
value.values[7] = a.values[7] > b.values[7] ? b.values[7] : a.values[7];
return value;
}
static inline kinc_uint16x8_mask_t kinc_uint16x8_cmpeq(kinc_uint16x8_t a, kinc_uint16x8_t b) {
kinc_uint16x8_mask_t mask;
mask.values[0] = a.values[0] == b.values[0] ? 0xffff : 0;
mask.values[1] = a.values[1] == b.values[1] ? 0xffff : 0;
mask.values[2] = a.values[2] == b.values[2] ? 0xffff : 0;
mask.values[3] = a.values[3] == b.values[3] ? 0xffff : 0;
mask.values[4] = a.values[4] == b.values[4] ? 0xffff : 0;
mask.values[5] = a.values[5] == b.values[5] ? 0xffff : 0;
mask.values[6] = a.values[6] == b.values[6] ? 0xffff : 0;
mask.values[7] = a.values[7] == b.values[7] ? 0xffff : 0;
return mask;
}
static inline kinc_uint16x8_mask_t kinc_uint16x8_cmpneq(kinc_uint16x8_t a, kinc_uint16x8_t b) {
kinc_uint16x8_mask_t mask;
mask.values[0] = a.values[0] != b.values[0] ? 0xffff : 0;
mask.values[1] = a.values[1] != b.values[1] ? 0xffff : 0;
mask.values[2] = a.values[2] != b.values[2] ? 0xffff : 0;
mask.values[3] = a.values[3] != b.values[3] ? 0xffff : 0;
mask.values[4] = a.values[4] != b.values[4] ? 0xffff : 0;
mask.values[5] = a.values[5] != b.values[5] ? 0xffff : 0;
mask.values[6] = a.values[6] != b.values[6] ? 0xffff : 0;
mask.values[7] = a.values[7] != b.values[7] ? 0xffff : 0;
return mask;
}
static inline kinc_uint16x8_mask_t kinc_uint16x8_cmpge(kinc_uint16x8_t a, kinc_uint16x8_t b) {
kinc_uint16x8_mask_t mask;
mask.values[0] = a.values[0] >= b.values[0] ? 0xffff : 0;
mask.values[1] = a.values[1] >= b.values[1] ? 0xffff : 0;
mask.values[2] = a.values[2] >= b.values[2] ? 0xffff : 0;
mask.values[3] = a.values[3] >= b.values[3] ? 0xffff : 0;
mask.values[4] = a.values[4] >= b.values[4] ? 0xffff : 0;
mask.values[5] = a.values[5] >= b.values[5] ? 0xffff : 0;
mask.values[6] = a.values[6] >= b.values[6] ? 0xffff : 0;
mask.values[7] = a.values[7] >= b.values[7] ? 0xffff : 0;
return mask;
}
static inline kinc_uint16x8_mask_t kinc_uint16x8_cmpgt(kinc_uint16x8_t a, kinc_uint16x8_t b) {
kinc_uint16x8_mask_t mask;
mask.values[0] = a.values[0] > b.values[0] ? 0xffff : 0;
mask.values[1] = a.values[1] > b.values[1] ? 0xffff : 0;
mask.values[2] = a.values[2] > b.values[2] ? 0xffff : 0;
mask.values[3] = a.values[3] > b.values[3] ? 0xffff : 0;
mask.values[4] = a.values[4] > b.values[4] ? 0xffff : 0;
mask.values[5] = a.values[5] > b.values[5] ? 0xffff : 0;
mask.values[6] = a.values[6] > b.values[6] ? 0xffff : 0;
mask.values[7] = a.values[7] > b.values[7] ? 0xffff : 0;
return mask;
}
static inline kinc_uint16x8_mask_t kinc_uint16x8_cmple(kinc_uint16x8_t a, kinc_uint16x8_t b) {
kinc_uint16x8_mask_t mask;
mask.values[0] = a.values[0] <= b.values[0] ? 0xffff : 0;
mask.values[1] = a.values[1] <= b.values[1] ? 0xffff : 0;
mask.values[2] = a.values[2] <= b.values[2] ? 0xffff : 0;
mask.values[3] = a.values[3] <= b.values[3] ? 0xffff : 0;
mask.values[4] = a.values[4] <= b.values[4] ? 0xffff : 0;
mask.values[5] = a.values[5] <= b.values[5] ? 0xffff : 0;
mask.values[6] = a.values[6] <= b.values[6] ? 0xffff : 0;
mask.values[7] = a.values[7] <= b.values[7] ? 0xffff : 0;
return mask;
}
static inline kinc_uint16x8_mask_t kinc_uint16x8_cmplt(kinc_uint16x8_t a, kinc_uint16x8_t b) {
kinc_uint16x8_mask_t mask;
mask.values[0] = a.values[0] < b.values[0] ? 0xffff : 0;
mask.values[1] = a.values[1] < b.values[1] ? 0xffff : 0;
mask.values[2] = a.values[2] < b.values[2] ? 0xffff : 0;
mask.values[3] = a.values[3] < b.values[3] ? 0xffff : 0;
mask.values[4] = a.values[4] < b.values[4] ? 0xffff : 0;
mask.values[5] = a.values[5] < b.values[5] ? 0xffff : 0;
mask.values[6] = a.values[6] < b.values[6] ? 0xffff : 0;
mask.values[7] = a.values[7] < b.values[7] ? 0xffff : 0;
return mask;
}
static inline kinc_uint16x8_t kinc_uint16x8_sel(kinc_uint16x8_t a, kinc_uint16x8_t b, kinc_uint16x8_mask_t mask) {
kinc_uint16x8_t value;
value.values[0] = mask.values[0] != 0 ? a.values[0] : b.values[0];
value.values[1] = mask.values[1] != 0 ? a.values[1] : b.values[1];
value.values[2] = mask.values[2] != 0 ? a.values[2] : b.values[2];
value.values[3] = mask.values[3] != 0 ? a.values[3] : b.values[3];
value.values[4] = mask.values[4] != 0 ? a.values[4] : b.values[4];
value.values[5] = mask.values[5] != 0 ? a.values[5] : b.values[5];
value.values[6] = mask.values[6] != 0 ? a.values[6] : b.values[6];
value.values[7] = mask.values[7] != 0 ? a.values[7] : b.values[7];
return value;
}
static inline kinc_uint16x8_t kinc_uint16x8_or(kinc_uint16x8_t a, kinc_uint16x8_t b) {
kinc_uint16x8_t value;
value.values[0] = a.values[0] | b.values[0];
value.values[1] = a.values[1] | b.values[1];
value.values[2] = a.values[2] | b.values[2];
value.values[3] = a.values[3] | b.values[3];
value.values[4] = a.values[4] | b.values[4];
value.values[5] = a.values[5] | b.values[5];
value.values[6] = a.values[6] | b.values[6];
value.values[7] = a.values[7] | b.values[7];
return value;
}
static inline kinc_uint16x8_t kinc_uint16x8_and(kinc_uint16x8_t a, kinc_uint16x8_t b) {
kinc_uint16x8_t value;
value.values[0] = a.values[0] & b.values[0];
value.values[1] = a.values[1] & b.values[1];
value.values[2] = a.values[2] & b.values[2];
value.values[3] = a.values[3] & b.values[3];
value.values[4] = a.values[4] & b.values[4];
value.values[5] = a.values[5] & b.values[5];
value.values[6] = a.values[6] & b.values[6];
value.values[7] = a.values[7] & b.values[7];
return value;
}
static inline kinc_uint16x8_t kinc_uint16x8_xor(kinc_uint16x8_t a, kinc_uint16x8_t b) {
kinc_uint16x8_t value;
value.values[0] = a.values[0] ^ b.values[0];
value.values[1] = a.values[1] ^ b.values[1];
value.values[2] = a.values[2] ^ b.values[2];
value.values[3] = a.values[3] ^ b.values[3];
value.values[4] = a.values[4] ^ b.values[4];
value.values[5] = a.values[5] ^ b.values[5];
value.values[6] = a.values[6] ^ b.values[6];
value.values[7] = a.values[7] ^ b.values[7];
return value;
}
static inline kinc_uint16x8_t kinc_uint16x8_not(kinc_uint16x8_t t) {
kinc_uint16x8_t value;
value.values[0] = ~t.values[0];
value.values[1] = ~t.values[1];
value.values[2] = ~t.values[2];
value.values[3] = ~t.values[3];
value.values[4] = ~t.values[4];
value.values[5] = ~t.values[5];
value.values[6] = ~t.values[6];
value.values[7] = ~t.values[7];
return value;
}
static inline kinc_uint16x8_t kinc_uint16x8_shift_left(kinc_uint16x8_t t, const int shift) {
kinc_uint16x8_t value;
value.values[0] = t.values[0] << shift;
value.values[1] = t.values[1] << shift;
value.values[2] = t.values[2] << shift;
value.values[3] = t.values[3] << shift;
value.values[4] = t.values[4] << shift;
value.values[5] = t.values[5] << shift;
value.values[6] = t.values[6] << shift;
value.values[7] = t.values[7] << shift;
return value;
}
static inline kinc_uint16x8_t kinc_uint16x8_shift_right(kinc_uint16x8_t t, const int shift) {
kinc_uint16x8_t value;
value.values[0] = t.values[0] >> shift;
value.values[1] = t.values[1] >> shift;
value.values[2] = t.values[2] >> shift;
value.values[3] = t.values[3] >> shift;
value.values[4] = t.values[4] >> shift;
value.values[5] = t.values[5] >> shift;
value.values[6] = t.values[6] >> shift;
value.values[7] = t.values[7] >> shift;
return value;
}
#endif
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,417 @@
#pragma once
#include "types.h"
/*! \file uint32x4.h
\brief Provides 128bit four-element unsigned 32-bit integer SIMD operations which are mapped to equivalent SSE2 or Neon operations.
*/
#ifdef __cplusplus
extern "C" {
#endif
#if defined(KINC_SSE2)
static inline kinc_uint32x4_t kinc_uint32x4_intrin_load(const uint32_t *values) {
return _mm_load_si128((const kinc_uint32x4_t *)values);
}
static inline kinc_uint32x4_t kinc_uint32x4_intrin_load_unaligned(const uint32_t *values) {
return _mm_loadu_si128((const kinc_uint32x4_t *)values);
}
static inline kinc_uint32x4_t kinc_uint32x4_load(const uint32_t values[4]) {
return _mm_set_epi32(values[3], values[2], values[1], values[0]);
}
static inline kinc_uint32x4_t kinc_uint32x4_load_all(uint32_t t) {
return _mm_set1_epi32(t);
}
static inline void kinc_uint32x4_store(uint32_t *destination, kinc_uint32x4_t value) {
_mm_store_si128((kinc_uint32x4_t *)destination, value);
}
static inline void kinc_uint32x4_store_unaligned(uint32_t *destination, kinc_uint32x4_t value) {
_mm_storeu_si128((kinc_uint32x4_t *)destination, value);
}
static inline uint32_t kinc_uint32x4_get(kinc_uint32x4_t t, int index) {
union {
__m128i value;
uint32_t elements[4];
} converter;
converter.value = t;
return converter.elements[index];
}
static inline kinc_uint32x4_t kinc_uint32x4_add(kinc_uint32x4_t a, kinc_uint32x4_t b) {
return _mm_add_epi32(a, b);
}
static inline kinc_uint32x4_t kinc_uint32x4_sub(kinc_uint32x4_t a, kinc_uint32x4_t b) {
return _mm_sub_epi32(a, b);
}
static inline kinc_uint32x4_mask_t kinc_uint32x4_cmpeq(kinc_uint32x4_t a, kinc_uint32x4_t b) {
return _mm_cmpeq_epi32(a, b);
}
static inline kinc_uint32x4_mask_t kinc_uint32x4_cmpneq(kinc_uint32x4_t a, kinc_uint32x4_t b) {
return _mm_andnot_si128(_mm_cmpeq_epi32(a, b), _mm_set1_epi32(0xffffffff));
}
static inline kinc_uint32x4_mask_t kinc_uint32x4_cmpge(kinc_uint32x4_t a, kinc_uint32x4_t b) {
__m128i bias_by = _mm_set1_epi32((uint32_t)0x80000000);
return _mm_or_si128(_mm_cmpgt_epi32(_mm_sub_epi32(a, bias_by), _mm_sub_epi32(b, bias_by)), _mm_cmpeq_epi32(a, b));
}
static inline kinc_uint32x4_mask_t kinc_uint32x4_cmpgt(kinc_uint32x4_t a, kinc_uint32x4_t b) {
__m128i bias_by = _mm_set1_epi32((uint32_t)0x80000000);
return _mm_cmpgt_epi32(_mm_sub_epi32(a, bias_by), _mm_sub_epi32(b, bias_by));
}
static inline kinc_uint32x4_mask_t kinc_uint32x4_cmple(kinc_uint32x4_t a, kinc_uint32x4_t b) {
__m128i bias_by = _mm_set1_epi32((uint32_t)0x80000000);
return _mm_or_si128(_mm_cmplt_epi32(_mm_sub_epi32(a, bias_by), _mm_sub_epi32(b, bias_by)), _mm_cmpeq_epi32(a, b));
}
static inline kinc_uint32x4_mask_t kinc_uint32x4_cmplt(kinc_uint32x4_t a, kinc_uint32x4_t b) {
__m128i bias_by = _mm_set1_epi32((uint32_t)0x80000000);
return _mm_cmplt_epi32(_mm_sub_epi32(a, bias_by), _mm_sub_epi32(b, bias_by));
}
static inline kinc_uint32x4_t kinc_uint32x4_sel(kinc_uint32x4_t a, kinc_uint32x4_t b, kinc_uint32x4_mask_t mask) {
return _mm_xor_si128(b, _mm_and_si128(mask, _mm_xor_si128(a, b)));
}
static inline kinc_uint32x4_t kinc_uint32x4_max(kinc_uint32x4_t a, kinc_uint32x4_t b) {
return kinc_uint32x4_sel(a, b, kinc_uint32x4_cmpgt(a, b));
}
static inline kinc_uint32x4_t kinc_uint32x4_min(kinc_uint32x4_t a, kinc_uint32x4_t b) {
return kinc_uint32x4_sel(a, b, kinc_uint32x4_cmplt(a, b));
}
static inline kinc_uint32x4_t kinc_uint32x4_or(kinc_uint32x4_t a, kinc_uint32x4_t b) {
return _mm_or_si128(a, b);
}
static inline kinc_uint32x4_t kinc_uint32x4_and(kinc_uint32x4_t a, kinc_uint32x4_t b) {
return _mm_and_si128(a, b);
}
static inline kinc_uint32x4_t kinc_uint32x4_xor(kinc_uint32x4_t a, kinc_uint32x4_t b) {
return _mm_xor_si128(a, b);
}
static inline kinc_uint32x4_t kinc_uint32x4_not(kinc_uint32x4_t t) {
return _mm_xor_si128(t, _mm_set1_epi32(0xffffffff));
}
#define kinc_uint32x4_shift_left(t, shift) _mm_slli_epi32((t), (shift))
#define kinc_uint32x4_shift_right(t, shift) _mm_srli_epi32((t), (shift))
#elif defined(KINC_NEON)
static inline kinc_uint32x4_t kinc_uint32x4_intrin_load(const uint32_t *values) {
return vld1q_u32(values);
}
static inline kinc_uint32x4_t kinc_uint32x4_intrin_load_unaligned(const uint32_t *values) {
return kinc_uint32x4_intrin_load(values);
}
static inline kinc_uint32x4_t kinc_uint32x4_load(const uint32_t values[4]) {
return (kinc_uint32x4_t){values[0], values[1], values[2], values[3]};
}
static inline kinc_uint32x4_t kinc_uint32x4_load_all(uint32_t t) {
return (kinc_uint32x4_t){t, t, t, t};
}
static inline void kinc_uint32x4_store(uint32_t *destination, kinc_uint32x4_t value) {
vst1q_u32(destination, value);
}
static inline void kinc_uint32x4_store_unaligned(uint32_t *destination, kinc_uint32x4_t value) {
kinc_uint32x4_store(destination, value);
}
static inline uint32_t kinc_uint32x4_get(kinc_uint32x4_t t, int index) {
return t[index];
}
static inline kinc_uint32x4_t kinc_uint32x4_add(kinc_uint32x4_t a, kinc_uint32x4_t b) {
return vaddq_u32(a, b);
}
static inline kinc_uint32x4_t kinc_uint32x4_sub(kinc_uint32x4_t a, kinc_uint32x4_t b) {
return vsubq_u32(a, b);
}
static inline kinc_uint32x4_t kinc_uint32x4_max(kinc_uint32x4_t a, kinc_uint32x4_t b) {
return vmaxq_u32(a, b);
}
static inline kinc_uint32x4_t kinc_uint32x4_min(kinc_uint32x4_t a, kinc_uint32x4_t b) {
return vminq_u32(a, b);
}
static inline kinc_uint32x4_mask_t kinc_uint32x4_cmpeq(kinc_uint32x4_t a, kinc_uint32x4_t b) {
return vceqq_u32(a, b);
}
static inline kinc_uint32x4_mask_t kinc_uint32x4_cmpneq(kinc_uint32x4_t a, kinc_uint32x4_t b) {
return vmvnq_u32(vceqq_u32(a, b));
}
static inline kinc_uint32x4_mask_t kinc_uint32x4_cmpge(kinc_uint32x4_t a, kinc_uint32x4_t b) {
return vcgeq_u32(a, b);
}
static inline kinc_uint32x4_mask_t kinc_uint32x4_cmpgt(kinc_uint32x4_t a, kinc_uint32x4_t b) {
return vcgtq_u32(a, b);
}
static inline kinc_uint32x4_mask_t kinc_uint32x4_cmple(kinc_uint32x4_t a, kinc_uint32x4_t b) {
return vcleq_u32(a, b);
}
static inline kinc_uint32x4_mask_t kinc_uint32x4_cmplt(kinc_uint32x4_t a, kinc_uint32x4_t b) {
return vcltq_u32(a, b);
}
static inline kinc_uint32x4_t kinc_uint32x4_sel(kinc_uint32x4_t a, kinc_uint32x4_t b, kinc_uint32x4_mask_t mask) {
return vbslq_u32(mask, a, b);
}
static inline kinc_uint32x4_t kinc_uint32x4_or(kinc_uint32x4_t a, kinc_uint32x4_t b) {
return vorrq_u32(a, b);
}
static inline kinc_uint32x4_t kinc_uint32x4_and(kinc_uint32x4_t a, kinc_uint32x4_t b) {
return vandq_u32(a, b);
}
static inline kinc_uint32x4_t kinc_uint32x4_xor(kinc_uint32x4_t a, kinc_uint32x4_t b) {
return veorq_u32(a, b);
}
static inline kinc_uint32x4_t kinc_uint32x4_not(kinc_uint32x4_t t) {
return vmvnq_u32(t);
}
#define kinc_uint32x4_shift_left(t, shift) vshlq_n_u32((t), (shift))
#define kinc_uint32x4_shift_right(t, shift) vshrq_n_u32((t), (shift))
#else
static inline kinc_uint32x4_t kinc_uint32x4_intrin_load(const uint32_t *values) {
kinc_uint32x4_t value;
value.values[0] = values[0];
value.values[1] = values[1];
value.values[2] = values[2];
value.values[3] = values[3];
return value;
}
static inline kinc_uint32x4_t kinc_uint32x4_intrin_load_unaligned(const uint32_t *values) {
return kinc_uint32x4_intrin_load(values);
}
static inline kinc_uint32x4_t kinc_uint32x4_load(const uint32_t values[4]) {
kinc_uint32x4_t value;
value.values[0] = values[0];
value.values[1] = values[1];
value.values[2] = values[2];
value.values[3] = values[3];
return value;
}
static inline kinc_uint32x4_t kinc_uint32x4_load_all(uint32_t t) {
kinc_uint32x4_t value;
value.values[0] = t;
value.values[1] = t;
value.values[2] = t;
value.values[3] = t;
return value;
}
static inline void kinc_uint32x4_store(uint32_t *destination, kinc_uint32x4_t value) {
destination[0] = value.values[0];
destination[1] = value.values[1];
destination[2] = value.values[2];
destination[3] = value.values[3];
}
static inline void kinc_uint32x4_store_unaligned(uint32_t *destination, kinc_uint32x4_t value) {
kinc_uint32x4_store(destination, value);
}
static inline uint32_t kinc_uint32x4_get(kinc_uint32x4_t t, int index) {
return t.values[index];
}
static inline kinc_uint32x4_t kinc_uint32x4_add(kinc_uint32x4_t a, kinc_uint32x4_t b) {
kinc_uint32x4_t value;
value.values[0] = a.values[0] + b.values[0];
value.values[1] = a.values[1] + b.values[1];
value.values[2] = a.values[2] + b.values[2];
value.values[3] = a.values[3] + b.values[3];
return value;
}
static inline kinc_uint32x4_t kinc_uint32x4_sub(kinc_uint32x4_t a, kinc_uint32x4_t b) {
kinc_uint32x4_t value;
value.values[0] = a.values[0] - b.values[0];
value.values[1] = a.values[1] - b.values[1];
value.values[2] = a.values[2] - b.values[2];
value.values[3] = a.values[3] - b.values[3];
return value;
}
static inline kinc_uint32x4_t kinc_uint32x4_max(kinc_uint32x4_t a, kinc_uint32x4_t b) {
kinc_uint32x4_t value;
value.values[0] = a.values[0] > b.values[0] ? a.values[0] : b.values[0];
value.values[1] = a.values[1] > b.values[1] ? a.values[1] : b.values[1];
value.values[2] = a.values[2] > b.values[2] ? a.values[2] : b.values[2];
value.values[3] = a.values[3] > b.values[3] ? a.values[3] : b.values[3];
return value;
}
static inline kinc_uint32x4_t kinc_uint32x4_min(kinc_uint32x4_t a, kinc_uint32x4_t b) {
kinc_uint32x4_t value;
value.values[0] = a.values[0] > b.values[0] ? b.values[0] : a.values[0];
value.values[1] = a.values[1] > b.values[1] ? b.values[1] : a.values[1];
value.values[2] = a.values[2] > b.values[2] ? b.values[2] : a.values[2];
value.values[3] = a.values[3] > b.values[3] ? b.values[3] : a.values[3];
return value;
}
static inline kinc_uint32x4_mask_t kinc_uint32x4_cmpeq(kinc_uint32x4_t a, kinc_uint32x4_t b) {
kinc_uint32x4_mask_t mask;
mask.values[0] = a.values[0] == b.values[0] ? 0xffffffff : 0;
mask.values[1] = a.values[1] == b.values[1] ? 0xffffffff : 0;
mask.values[2] = a.values[2] == b.values[2] ? 0xffffffff : 0;
mask.values[3] = a.values[3] == b.values[3] ? 0xffffffff : 0;
return mask;
}
static inline kinc_uint32x4_mask_t kinc_uint32x4_cmpneq(kinc_uint32x4_t a, kinc_uint32x4_t b) {
kinc_uint32x4_mask_t mask;
mask.values[0] = a.values[0] != b.values[0] ? 0xffffffff : 0;
mask.values[1] = a.values[1] != b.values[1] ? 0xffffffff : 0;
mask.values[2] = a.values[2] != b.values[2] ? 0xffffffff : 0;
mask.values[3] = a.values[3] != b.values[3] ? 0xffffffff : 0;
return mask;
}
static inline kinc_uint32x4_mask_t kinc_uint32x4_cmpge(kinc_uint32x4_t a, kinc_uint32x4_t b) {
kinc_uint32x4_mask_t mask;
mask.values[0] = a.values[0] >= b.values[0] ? 0xffffffff : 0;
mask.values[1] = a.values[1] >= b.values[1] ? 0xffffffff : 0;
mask.values[2] = a.values[2] >= b.values[2] ? 0xffffffff : 0;
mask.values[3] = a.values[3] >= b.values[3] ? 0xffffffff : 0;
return mask;
}
static inline kinc_uint32x4_mask_t kinc_uint32x4_cmpgt(kinc_uint32x4_t a, kinc_uint32x4_t b) {
kinc_uint32x4_mask_t mask;
mask.values[0] = a.values[0] > b.values[0] ? 0xffffffff : 0;
mask.values[1] = a.values[1] > b.values[1] ? 0xffffffff : 0;
mask.values[2] = a.values[2] > b.values[2] ? 0xffffffff : 0;
mask.values[3] = a.values[3] > b.values[3] ? 0xffffffff : 0;
return mask;
}
static inline kinc_uint32x4_mask_t kinc_uint32x4_cmple(kinc_uint32x4_t a, kinc_uint32x4_t b) {
kinc_uint32x4_mask_t mask;
mask.values[0] = a.values[0] <= b.values[0] ? 0xffffffff : 0;
mask.values[1] = a.values[1] <= b.values[1] ? 0xffffffff : 0;
mask.values[2] = a.values[2] <= b.values[2] ? 0xffffffff : 0;
mask.values[3] = a.values[3] <= b.values[3] ? 0xffffffff : 0;
return mask;
}
static inline kinc_uint32x4_mask_t kinc_uint32x4_cmplt(kinc_uint32x4_t a, kinc_uint32x4_t b) {
kinc_uint32x4_mask_t mask;
mask.values[0] = a.values[0] < b.values[0] ? 0xffffffff : 0;
mask.values[1] = a.values[1] < b.values[1] ? 0xffffffff : 0;
mask.values[2] = a.values[2] < b.values[2] ? 0xffffffff : 0;
mask.values[3] = a.values[3] < b.values[3] ? 0xffffffff : 0;
return mask;
}
static inline kinc_uint32x4_t kinc_uint32x4_sel(kinc_uint32x4_t a, kinc_uint32x4_t b, kinc_uint32x4_mask_t mask) {
kinc_uint32x4_t value;
value.values[0] = mask.values[0] != 0 ? a.values[0] : b.values[0];
value.values[1] = mask.values[1] != 0 ? a.values[1] : b.values[1];
value.values[2] = mask.values[2] != 0 ? a.values[2] : b.values[2];
value.values[3] = mask.values[3] != 0 ? a.values[3] : b.values[3];
return value;
}
static inline kinc_uint32x4_t kinc_uint32x4_or(kinc_uint32x4_t a, kinc_uint32x4_t b) {
kinc_uint32x4_t value;
value.values[0] = a.values[0] | b.values[0];
value.values[1] = a.values[1] | b.values[1];
value.values[2] = a.values[2] | b.values[2];
value.values[3] = a.values[3] | b.values[3];
return value;
}
static inline kinc_uint32x4_t kinc_uint32x4_and(kinc_uint32x4_t a, kinc_uint32x4_t b) {
kinc_uint32x4_t value;
value.values[0] = a.values[0] & b.values[0];
value.values[1] = a.values[1] & b.values[1];
value.values[2] = a.values[2] & b.values[2];
value.values[3] = a.values[3] & b.values[3];
return value;
}
static inline kinc_uint32x4_t kinc_uint32x4_xor(kinc_uint32x4_t a, kinc_uint32x4_t b) {
kinc_uint32x4_t value;
value.values[0] = a.values[0] ^ b.values[0];
value.values[1] = a.values[1] ^ b.values[1];
value.values[2] = a.values[2] ^ b.values[2];
value.values[3] = a.values[3] ^ b.values[3];
return value;
}
static inline kinc_uint32x4_t kinc_uint32x4_not(kinc_uint32x4_t t) {
kinc_uint32x4_t value;
value.values[0] = ~t.values[0];
value.values[1] = ~t.values[1];
value.values[2] = ~t.values[2];
value.values[3] = ~t.values[3];
return value;
}
static inline kinc_uint32x4_t kinc_uint32x4_shift_left(kinc_uint32x4_t t, const int shift) {
kinc_uint32x4_t value;
value.values[0] = t.values[0] << shift;
value.values[1] = t.values[1] << shift;
value.values[2] = t.values[2] << shift;
value.values[3] = t.values[3] << shift;
return value;
}
static inline kinc_uint32x4_t kinc_uint32x4_shift_right(kinc_uint32x4_t t, const int shift) {
kinc_uint32x4_t value;
value.values[0] = t.values[0] >> shift;
value.values[1] = t.values[1] >> shift;
value.values[2] = t.values[2] >> shift;
value.values[3] = t.values[3] >> shift;
return value;
}
#endif
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,615 @@
#pragma once
#include "types.h"
/*! \file int8x16.h
\brief Provides 128bit sixteen-element unsigned 8-bit integer SIMD operations which are mapped to equivalent SSE2 or Neon operations.
*/
#ifdef __cplusplus
extern "C" {
#endif
#if defined(KINC_SSE2)
static inline kinc_uint8x16_t kinc_uint8x16_intrin_load(const uint8_t *values) {
return _mm_load_si128((const kinc_uint8x16_t *)values);
}
static inline kinc_uint8x16_t kinc_uint8x16_intrin_load_unaligned(const uint8_t *values) {
return _mm_loadu_si128((const kinc_uint8x16_t *)values);
}
static inline kinc_uint8x16_t kinc_uint8x16_load(const uint8_t values[16]) {
return _mm_set_epi8(values[15], values[14], values[13], values[12], values[11], values[10], values[9], values[8], values[7], values[6], values[5],
values[4], values[3], values[2], values[1], values[0]);
}
static inline kinc_uint8x16_t kinc_uint8x16_load_all(uint8_t t) {
return _mm_set1_epi8(t);
}
static inline void kinc_uint8x16_store(uint8_t *destination, kinc_uint8x16_t value) {
_mm_store_si128((kinc_uint8x16_t *)destination, value);
}
static inline void kinc_uint8x16_store_unaligned(uint8_t *destination, kinc_uint8x16_t value) {
_mm_storeu_si128((kinc_uint8x16_t *)destination, value);
}
static inline uint8_t kinc_uint8x16_get(kinc_uint8x16_t t, int index) {
union {
__m128i value;
uint8_t elements[16];
} converter;
converter.value = t;
return converter.elements[index];
}
static inline kinc_uint8x16_t kinc_uint8x16_add(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return _mm_add_epi8(a, b);
}
static inline kinc_uint8x16_t kinc_uint8x16_sub(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return _mm_sub_epi8(a, b);
}
static inline kinc_uint8x16_t kinc_uint8x16_max(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return _mm_max_epu8(a, b);
}
static inline kinc_uint8x16_t kinc_uint8x16_min(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return _mm_min_epu8(a, b);
}
static inline kinc_uint8x16_mask_t kinc_uint8x16_cmpeq(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return _mm_cmpeq_epi8(a, b);
}
static inline kinc_uint8x16_mask_t kinc_uint8x16_cmpge(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return _mm_cmpeq_epi8(_mm_max_epu8(a, b), a);
}
static inline kinc_uint8x16_mask_t kinc_uint8x16_cmpgt(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return _mm_xor_si128(_mm_cmpeq_epi8(_mm_max_epu8(b, a), b), _mm_set1_epi8(-1));
}
static inline kinc_uint8x16_mask_t kinc_uint8x16_cmple(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return _mm_cmpeq_epi8(_mm_max_epu8(b, a), b);
}
static inline kinc_uint8x16_mask_t kinc_uint8x16_cmplt(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return kinc_uint8x16_cmpgt(b, a);
}
static inline kinc_uint8x16_mask_t kinc_uint8x16_cmpneq(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return _mm_andnot_si128(_mm_cmpeq_epi8(a, b), _mm_set1_epi32(0xffffffff));
}
static inline kinc_uint8x16_t kinc_uint8x16_sel(kinc_uint8x16_t a, kinc_uint8x16_t b, kinc_uint8x16_mask_t mask) {
return _mm_xor_si128(b, _mm_and_si128(mask, _mm_xor_si128(a, b)));
}
static inline kinc_uint8x16_t kinc_uint8x16_or(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return _mm_or_si128(a, b);
}
static inline kinc_uint8x16_t kinc_uint8x16_and(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return _mm_and_si128(a, b);
}
static inline kinc_uint8x16_t kinc_uint8x16_xor(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return _mm_xor_si128(a, b);
}
static inline kinc_uint8x16_t kinc_uint8x16_not(kinc_uint8x16_t t) {
return _mm_xor_si128(t, _mm_set1_epi32(0xffffffff));
}
#elif defined(KINC_NEON)
static inline kinc_uint8x16_t kinc_uint8x16_intrin_load(const uint8_t *values) {
return vld1q_u8(values);
}
static inline kinc_uint8x16_t kinc_uint8x16_intrin_load_unaligned(const uint8_t *values) {
return kinc_uint8x16_intrin_load(values);
}
static inline kinc_uint8x16_t kinc_uint8x16_load(const uint8_t values[16]) {
return (kinc_uint8x16_t){values[0], values[1], values[2], values[3], values[4], values[5], values[6], values[7],
values[8], values[9], values[10], values[11], values[12], values[13], values[14], values[15]};
}
static inline kinc_uint8x16_t kinc_uint8x16_load_all(uint8_t t) {
return (kinc_uint8x16_t){t, t, t, t, t, t, t, t, t, t, t, t, t, t, t, t};
}
static inline void kinc_uint8x16_store(uint8_t *destination, kinc_uint8x16_t value) {
vst1q_u8(destination, value);
}
static inline void kinc_uint8x16_store_unaligned(uint8_t *destination, kinc_uint8x16_t value) {
kinc_uint8x16_store(destination, value);
}
static inline uint8_t kinc_uint8x16_get(kinc_uint8x16_t t, int index) {
return t[index];
}
static inline kinc_uint8x16_t kinc_uint8x16_add(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return vaddq_u8(a, b);
}
static inline kinc_uint8x16_t kinc_uint8x16_sub(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return vsubq_u8(a, b);
}
static inline kinc_uint8x16_t kinc_uint8x16_max(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return vmaxq_u8(a, b);
}
static inline kinc_uint8x16_t kinc_uint8x16_min(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return vminq_u8(a, b);
}
static inline kinc_uint8x16_mask_t kinc_uint8x16_cmpeq(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return vceqq_u8(a, b);
}
static inline kinc_uint8x16_mask_t kinc_uint8x16_cmpge(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return vcgeq_u8(a, b);
}
static inline kinc_uint8x16_mask_t kinc_uint8x16_cmpgt(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return vcgtq_u8(a, b);
}
static inline kinc_uint8x16_mask_t kinc_uint8x16_cmple(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return vcleq_u8(a, b);
}
static inline kinc_uint8x16_mask_t kinc_uint8x16_cmplt(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return vcltq_u8(a, b);
}
static inline kinc_uint8x16_mask_t kinc_uint8x16_cmpneq(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return vmvnq_u8(vceqq_u8(a, b));
}
static inline kinc_uint8x16_t kinc_uint8x16_sel(kinc_uint8x16_t a, kinc_uint8x16_t b, kinc_uint8x16_mask_t mask) {
return vbslq_u8(mask, a, b);
}
static inline kinc_uint8x16_t kinc_uint8x16_or(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return vorrq_u8(a, b);
}
static inline kinc_uint8x16_t kinc_uint8x16_and(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return vandq_u8(a, b);
}
static inline kinc_uint8x16_t kinc_uint8x16_xor(kinc_uint8x16_t a, kinc_uint8x16_t b) {
return veorq_u8(a, b);
}
static inline kinc_uint8x16_t kinc_uint8x16_not(kinc_uint8x16_t t) {
return vmvnq_u8(t);
}
#else
static inline kinc_uint8x16_t kinc_uint8x16_intrin_load(const uint8_t *values) {
kinc_uint8x16_t value;
value.values[0] = values[0];
value.values[1] = values[1];
value.values[2] = values[2];
value.values[3] = values[3];
value.values[4] = values[4];
value.values[5] = values[5];
value.values[6] = values[6];
value.values[7] = values[7];
value.values[8] = values[8];
value.values[9] = values[9];
value.values[10] = values[10];
value.values[11] = values[11];
value.values[12] = values[12];
value.values[13] = values[13];
value.values[14] = values[14];
value.values[15] = values[15];
return value;
}
static inline kinc_uint8x16_t kinc_uint8x16_intrin_load_unaligned(const uint8_t *values) {
return kinc_uint8x16_intrin_load(values);
}
static inline kinc_uint8x16_t kinc_uint8x16_load(const uint8_t values[16]) {
kinc_uint8x16_t value;
value.values[0] = values[0];
value.values[1] = values[1];
value.values[2] = values[2];
value.values[3] = values[3];
value.values[4] = values[4];
value.values[5] = values[5];
value.values[6] = values[6];
value.values[7] = values[7];
value.values[8] = values[8];
value.values[9] = values[9];
value.values[10] = values[10];
value.values[11] = values[11];
value.values[12] = values[12];
value.values[13] = values[13];
value.values[14] = values[14];
value.values[15] = values[15];
return value;
}
static inline kinc_uint8x16_t kinc_uint8x16_load_all(uint8_t t) {
kinc_uint8x16_t value;
value.values[0] = t;
value.values[1] = t;
value.values[2] = t;
value.values[3] = t;
value.values[4] = t;
value.values[5] = t;
value.values[6] = t;
value.values[7] = t;
value.values[8] = t;
value.values[9] = t;
value.values[10] = t;
value.values[11] = t;
value.values[12] = t;
value.values[13] = t;
value.values[14] = t;
value.values[15] = t;
return value;
}
static inline void kinc_uint8x16_store(uint8_t *destination, kinc_uint8x16_t value) {
destination[0] = value.values[0];
destination[1] = value.values[1];
destination[2] = value.values[2];
destination[3] = value.values[3];
destination[4] = value.values[4];
destination[5] = value.values[5];
destination[6] = value.values[6];
destination[7] = value.values[7];
destination[8] = value.values[8];
destination[9] = value.values[9];
destination[10] = value.values[10];
destination[11] = value.values[11];
destination[12] = value.values[12];
destination[13] = value.values[13];
destination[14] = value.values[14];
destination[15] = value.values[15];
}
static inline void kinc_uint8x16_store_unaligned(uint8_t *destination, kinc_uint8x16_t value) {
return kinc_uint8x16_store(destination, value);
}
static inline uint8_t kinc_uint8x16_get(kinc_uint8x16_t t, int index) {
return t.values[index];
}
static inline kinc_uint8x16_t kinc_uint8x16_add(kinc_uint8x16_t a, kinc_uint8x16_t b) {
kinc_uint8x16_t value;
value.values[0] = a.values[0] + b.values[0];
value.values[1] = a.values[1] + b.values[1];
value.values[2] = a.values[2] + b.values[2];
value.values[3] = a.values[3] + b.values[3];
value.values[4] = a.values[4] + b.values[4];
value.values[5] = a.values[5] + b.values[5];
value.values[6] = a.values[6] + b.values[6];
value.values[7] = a.values[7] + b.values[7];
value.values[8] = a.values[8] + b.values[8];
value.values[9] = a.values[9] + b.values[9];
value.values[10] = a.values[10] + b.values[10];
value.values[11] = a.values[11] + b.values[11];
value.values[12] = a.values[12] + b.values[12];
value.values[13] = a.values[13] + b.values[13];
value.values[14] = a.values[14] + b.values[14];
value.values[15] = a.values[15] + b.values[15];
return value;
}
static inline kinc_uint8x16_t kinc_uint8x16_sub(kinc_uint8x16_t a, kinc_uint8x16_t b) {
kinc_uint8x16_t value;
value.values[0] = a.values[0] - b.values[0];
value.values[1] = a.values[1] - b.values[1];
value.values[2] = a.values[2] - b.values[2];
value.values[3] = a.values[3] - b.values[3];
value.values[4] = a.values[4] - b.values[4];
value.values[5] = a.values[5] - b.values[5];
value.values[6] = a.values[6] - b.values[6];
value.values[7] = a.values[7] - b.values[7];
value.values[8] = a.values[8] - b.values[8];
value.values[9] = a.values[9] - b.values[9];
value.values[10] = a.values[10] - b.values[10];
value.values[11] = a.values[11] - b.values[11];
value.values[12] = a.values[12] - b.values[12];
value.values[13] = a.values[13] - b.values[13];
value.values[14] = a.values[14] - b.values[14];
value.values[15] = a.values[15] - b.values[15];
return value;
}
static inline kinc_uint8x16_t kinc_uint8x16_max(kinc_uint8x16_t a, kinc_uint8x16_t b) {
kinc_uint8x16_t value;
value.values[0] = a.values[0] > b.values[0] ? a.values[0] : b.values[0];
value.values[1] = a.values[1] > b.values[1] ? a.values[1] : b.values[1];
value.values[2] = a.values[2] > b.values[2] ? a.values[2] : b.values[2];
value.values[3] = a.values[3] > b.values[3] ? a.values[3] : b.values[3];
value.values[4] = a.values[4] > b.values[4] ? a.values[4] : b.values[4];
value.values[5] = a.values[5] > b.values[5] ? a.values[5] : b.values[5];
value.values[6] = a.values[6] > b.values[6] ? a.values[6] : b.values[6];
value.values[7] = a.values[7] > b.values[7] ? a.values[7] : b.values[7];
value.values[8] = a.values[8] > b.values[8] ? a.values[8] : b.values[8];
value.values[9] = a.values[9] > b.values[9] ? a.values[9] : b.values[9];
value.values[10] = a.values[10] > b.values[10] ? a.values[10] : b.values[10];
value.values[11] = a.values[11] > b.values[11] ? a.values[11] : b.values[11];
value.values[12] = a.values[12] > b.values[12] ? a.values[12] : b.values[12];
value.values[13] = a.values[13] > b.values[13] ? a.values[13] : b.values[13];
value.values[14] = a.values[14] > b.values[14] ? a.values[14] : b.values[14];
value.values[15] = a.values[15] > b.values[15] ? a.values[15] : b.values[15];
return value;
}
static inline kinc_uint8x16_t kinc_uint8x16_min(kinc_uint8x16_t a, kinc_uint8x16_t b) {
kinc_uint8x16_t value;
value.values[0] = a.values[0] > b.values[0] ? b.values[0] : a.values[0];
value.values[1] = a.values[1] > b.values[1] ? b.values[1] : a.values[1];
value.values[2] = a.values[2] > b.values[2] ? b.values[2] : a.values[2];
value.values[3] = a.values[3] > b.values[3] ? b.values[3] : a.values[3];
value.values[4] = a.values[4] > b.values[4] ? b.values[4] : a.values[4];
value.values[5] = a.values[5] > b.values[5] ? b.values[5] : a.values[5];
value.values[6] = a.values[6] > b.values[6] ? b.values[6] : a.values[6];
value.values[7] = a.values[7] > b.values[7] ? b.values[7] : a.values[7];
value.values[8] = a.values[8] > b.values[8] ? b.values[8] : a.values[8];
value.values[9] = a.values[9] > b.values[9] ? b.values[9] : a.values[9];
value.values[10] = a.values[10] > b.values[10] ? b.values[10] : a.values[10];
value.values[11] = a.values[11] > b.values[11] ? b.values[11] : a.values[11];
value.values[12] = a.values[12] > b.values[12] ? b.values[12] : a.values[12];
value.values[13] = a.values[13] > b.values[13] ? b.values[13] : a.values[13];
value.values[14] = a.values[14] > b.values[14] ? b.values[14] : a.values[14];
value.values[15] = a.values[15] > b.values[15] ? b.values[15] : a.values[15];
return value;
}
static inline kinc_uint8x16_mask_t kinc_uint8x16_cmpeq(kinc_uint8x16_t a, kinc_uint8x16_t b) {
kinc_uint8x16_mask_t mask;
mask.values[0] = a.values[0] == b.values[0] ? 0xff : 0;
mask.values[1] = a.values[1] == b.values[1] ? 0xff : 0;
mask.values[2] = a.values[2] == b.values[2] ? 0xff : 0;
mask.values[3] = a.values[3] == b.values[3] ? 0xff : 0;
mask.values[4] = a.values[4] == b.values[4] ? 0xff : 0;
mask.values[5] = a.values[5] == b.values[5] ? 0xff : 0;
mask.values[6] = a.values[6] == b.values[6] ? 0xff : 0;
mask.values[7] = a.values[7] == b.values[7] ? 0xff : 0;
mask.values[8] = a.values[8] == b.values[8] ? 0xff : 0;
mask.values[9] = a.values[9] == b.values[9] ? 0xff : 0;
mask.values[10] = a.values[10] == b.values[10] ? 0xff : 0;
mask.values[11] = a.values[11] == b.values[11] ? 0xff : 0;
mask.values[12] = a.values[12] == b.values[12] ? 0xff : 0;
mask.values[13] = a.values[13] == b.values[13] ? 0xff : 0;
mask.values[14] = a.values[14] == b.values[14] ? 0xff : 0;
mask.values[15] = a.values[15] == b.values[15] ? 0xff : 0;
return mask;
}
static inline kinc_uint8x16_mask_t kinc_uint8x16_cmpge(kinc_uint8x16_t a, kinc_uint8x16_t b) {
kinc_uint8x16_mask_t mask;
mask.values[0] = a.values[0] >= b.values[0] ? 0xff : 0;
mask.values[1] = a.values[1] >= b.values[1] ? 0xff : 0;
mask.values[2] = a.values[2] >= b.values[2] ? 0xff : 0;
mask.values[3] = a.values[3] >= b.values[3] ? 0xff : 0;
mask.values[4] = a.values[4] >= b.values[4] ? 0xff : 0;
mask.values[5] = a.values[5] >= b.values[5] ? 0xff : 0;
mask.values[6] = a.values[6] >= b.values[6] ? 0xff : 0;
mask.values[7] = a.values[7] >= b.values[7] ? 0xff : 0;
mask.values[8] = a.values[8] >= b.values[8] ? 0xff : 0;
mask.values[9] = a.values[9] >= b.values[9] ? 0xff : 0;
mask.values[10] = a.values[10] >= b.values[10] ? 0xff : 0;
mask.values[11] = a.values[11] >= b.values[11] ? 0xff : 0;
mask.values[12] = a.values[12] >= b.values[12] ? 0xff : 0;
mask.values[13] = a.values[13] >= b.values[13] ? 0xff : 0;
mask.values[14] = a.values[14] >= b.values[14] ? 0xff : 0;
mask.values[15] = a.values[15] >= b.values[15] ? 0xff : 0;
return mask;
}
static inline kinc_uint8x16_mask_t kinc_uint8x16_cmpgt(kinc_uint8x16_t a, kinc_uint8x16_t b) {
kinc_uint8x16_mask_t mask;
mask.values[0] = a.values[0] > b.values[0] ? 0xff : 0;
mask.values[1] = a.values[1] > b.values[1] ? 0xff : 0;
mask.values[2] = a.values[2] > b.values[2] ? 0xff : 0;
mask.values[3] = a.values[3] > b.values[3] ? 0xff : 0;
mask.values[4] = a.values[4] > b.values[4] ? 0xff : 0;
mask.values[5] = a.values[5] > b.values[5] ? 0xff : 0;
mask.values[6] = a.values[6] > b.values[6] ? 0xff : 0;
mask.values[7] = a.values[7] > b.values[7] ? 0xff : 0;
mask.values[8] = a.values[8] > b.values[8] ? 0xff : 0;
mask.values[9] = a.values[9] > b.values[9] ? 0xff : 0;
mask.values[10] = a.values[10] > b.values[10] ? 0xff : 0;
mask.values[11] = a.values[11] > b.values[11] ? 0xff : 0;
mask.values[12] = a.values[12] > b.values[12] ? 0xff : 0;
mask.values[13] = a.values[13] > b.values[13] ? 0xff : 0;
mask.values[14] = a.values[14] > b.values[14] ? 0xff : 0;
mask.values[15] = a.values[15] > b.values[15] ? 0xff : 0;
return mask;
}
static inline kinc_uint8x16_mask_t kinc_uint8x16_cmple(kinc_uint8x16_t a, kinc_uint8x16_t b) {
kinc_uint8x16_mask_t mask;
mask.values[0] = a.values[0] <= b.values[0] ? 0xff : 0;
mask.values[1] = a.values[1] <= b.values[1] ? 0xff : 0;
mask.values[2] = a.values[2] <= b.values[2] ? 0xff : 0;
mask.values[3] = a.values[3] <= b.values[3] ? 0xff : 0;
mask.values[4] = a.values[4] <= b.values[4] ? 0xff : 0;
mask.values[5] = a.values[5] <= b.values[5] ? 0xff : 0;
mask.values[6] = a.values[6] <= b.values[6] ? 0xff : 0;
mask.values[7] = a.values[7] <= b.values[7] ? 0xff : 0;
mask.values[8] = a.values[8] <= b.values[8] ? 0xff : 0;
mask.values[9] = a.values[9] <= b.values[9] ? 0xff : 0;
mask.values[10] = a.values[10] <= b.values[10] ? 0xff : 0;
mask.values[11] = a.values[11] <= b.values[11] ? 0xff : 0;
mask.values[12] = a.values[12] <= b.values[12] ? 0xff : 0;
mask.values[13] = a.values[13] <= b.values[13] ? 0xff : 0;
mask.values[14] = a.values[14] <= b.values[14] ? 0xff : 0;
mask.values[15] = a.values[15] <= b.values[15] ? 0xff : 0;
return mask;
}
static inline kinc_uint8x16_mask_t kinc_uint8x16_cmplt(kinc_uint8x16_t a, kinc_uint8x16_t b) {
kinc_uint8x16_mask_t mask;
mask.values[0] = a.values[0] < b.values[0] ? 0xff : 0;
mask.values[1] = a.values[1] < b.values[1] ? 0xff : 0;
mask.values[2] = a.values[2] < b.values[2] ? 0xff : 0;
mask.values[3] = a.values[3] < b.values[3] ? 0xff : 0;
mask.values[4] = a.values[4] < b.values[4] ? 0xff : 0;
mask.values[5] = a.values[5] < b.values[5] ? 0xff : 0;
mask.values[6] = a.values[6] < b.values[6] ? 0xff : 0;
mask.values[7] = a.values[7] < b.values[7] ? 0xff : 0;
mask.values[8] = a.values[8] < b.values[8] ? 0xff : 0;
mask.values[9] = a.values[9] < b.values[9] ? 0xff : 0;
mask.values[10] = a.values[10] < b.values[10] ? 0xff : 0;
mask.values[11] = a.values[11] < b.values[11] ? 0xff : 0;
mask.values[12] = a.values[12] < b.values[12] ? 0xff : 0;
mask.values[13] = a.values[13] < b.values[13] ? 0xff : 0;
mask.values[14] = a.values[14] < b.values[14] ? 0xff : 0;
mask.values[15] = a.values[15] < b.values[15] ? 0xff : 0;
return mask;
}
static inline kinc_uint8x16_mask_t kinc_uint8x16_cmpneq(kinc_uint8x16_t a, kinc_uint8x16_t b) {
kinc_uint8x16_mask_t mask;
mask.values[0] = a.values[0] != b.values[0] ? 0xff : 0;
mask.values[1] = a.values[1] != b.values[1] ? 0xff : 0;
mask.values[2] = a.values[2] != b.values[2] ? 0xff : 0;
mask.values[3] = a.values[3] != b.values[3] ? 0xff : 0;
mask.values[4] = a.values[4] != b.values[4] ? 0xff : 0;
mask.values[5] = a.values[5] != b.values[5] ? 0xff : 0;
mask.values[6] = a.values[6] != b.values[6] ? 0xff : 0;
mask.values[7] = a.values[7] != b.values[7] ? 0xff : 0;
mask.values[8] = a.values[8] != b.values[8] ? 0xff : 0;
mask.values[9] = a.values[9] != b.values[9] ? 0xff : 0;
mask.values[10] = a.values[10] != b.values[10] ? 0xff : 0;
mask.values[11] = a.values[11] != b.values[11] ? 0xff : 0;
mask.values[12] = a.values[12] != b.values[12] ? 0xff : 0;
mask.values[13] = a.values[13] != b.values[13] ? 0xff : 0;
mask.values[14] = a.values[14] != b.values[14] ? 0xff : 0;
mask.values[15] = a.values[15] != b.values[15] ? 0xff : 0;
return mask;
}
static inline kinc_uint8x16_t kinc_uint8x16_sel(kinc_uint8x16_t a, kinc_uint8x16_t b, kinc_uint8x16_mask_t mask) {
kinc_uint8x16_t value;
value.values[0] = mask.values[0] != 0 ? a.values[0] : b.values[0];
value.values[1] = mask.values[1] != 0 ? a.values[1] : b.values[1];
value.values[2] = mask.values[2] != 0 ? a.values[2] : b.values[2];
value.values[3] = mask.values[3] != 0 ? a.values[3] : b.values[3];
value.values[4] = mask.values[4] != 0 ? a.values[4] : b.values[4];
value.values[5] = mask.values[5] != 0 ? a.values[5] : b.values[5];
value.values[6] = mask.values[6] != 0 ? a.values[6] : b.values[6];
value.values[7] = mask.values[7] != 0 ? a.values[7] : b.values[7];
value.values[8] = mask.values[8] != 0 ? a.values[8] : b.values[8];
value.values[9] = mask.values[9] != 0 ? a.values[9] : b.values[9];
value.values[10] = mask.values[10] != 0 ? a.values[10] : b.values[10];
value.values[11] = mask.values[11] != 0 ? a.values[11] : b.values[11];
value.values[12] = mask.values[12] != 0 ? a.values[12] : b.values[12];
value.values[13] = mask.values[13] != 0 ? a.values[13] : b.values[13];
value.values[14] = mask.values[14] != 0 ? a.values[14] : b.values[14];
value.values[15] = mask.values[15] != 0 ? a.values[15] : b.values[15];
return value;
}
static inline kinc_uint8x16_t kinc_uint8x16_or(kinc_uint8x16_t a, kinc_uint8x16_t b) {
kinc_uint8x16_t value;
value.values[0] = a.values[0] | b.values[0];
value.values[1] = a.values[1] | b.values[1];
value.values[2] = a.values[2] | b.values[2];
value.values[3] = a.values[3] | b.values[3];
value.values[4] = a.values[4] | b.values[4];
value.values[5] = a.values[5] | b.values[5];
value.values[6] = a.values[6] | b.values[6];
value.values[7] = a.values[7] | b.values[7];
value.values[8] = a.values[8] | b.values[8];
value.values[9] = a.values[9] | b.values[9];
value.values[10] = a.values[10] | b.values[10];
value.values[11] = a.values[11] | b.values[11];
value.values[12] = a.values[12] | b.values[12];
value.values[13] = a.values[13] | b.values[13];
value.values[14] = a.values[14] | b.values[14];
value.values[15] = a.values[15] | b.values[15];
return value;
}
static inline kinc_uint8x16_t kinc_uint8x16_and(kinc_uint8x16_t a, kinc_uint8x16_t b) {
kinc_uint8x16_t value;
value.values[0] = a.values[0] & b.values[0];
value.values[1] = a.values[1] & b.values[1];
value.values[2] = a.values[2] & b.values[2];
value.values[3] = a.values[3] & b.values[3];
value.values[4] = a.values[4] & b.values[4];
value.values[5] = a.values[5] & b.values[5];
value.values[6] = a.values[6] & b.values[6];
value.values[7] = a.values[7] & b.values[7];
value.values[8] = a.values[8] & b.values[8];
value.values[9] = a.values[9] & b.values[9];
value.values[10] = a.values[10] & b.values[10];
value.values[11] = a.values[11] & b.values[11];
value.values[12] = a.values[12] & b.values[12];
value.values[13] = a.values[13] & b.values[13];
value.values[14] = a.values[14] & b.values[14];
value.values[15] = a.values[15] & b.values[15];
return value;
}
static inline kinc_uint8x16_t kinc_uint8x16_xor(kinc_uint8x16_t a, kinc_uint8x16_t b) {
kinc_uint8x16_t value;
value.values[0] = a.values[0] ^ b.values[0];
value.values[1] = a.values[1] ^ b.values[1];
value.values[2] = a.values[2] ^ b.values[2];
value.values[3] = a.values[3] ^ b.values[3];
value.values[4] = a.values[4] ^ b.values[4];
value.values[5] = a.values[5] ^ b.values[5];
value.values[6] = a.values[6] ^ b.values[6];
value.values[7] = a.values[7] ^ b.values[7];
value.values[8] = a.values[8] ^ b.values[8];
value.values[9] = a.values[9] ^ b.values[9];
value.values[10] = a.values[10] ^ b.values[10];
value.values[11] = a.values[11] ^ b.values[11];
value.values[12] = a.values[12] ^ b.values[12];
value.values[13] = a.values[13] ^ b.values[13];
value.values[14] = a.values[14] ^ b.values[14];
value.values[15] = a.values[15] ^ b.values[15];
return value;
}
static inline kinc_uint8x16_t kinc_uint8x16_not(kinc_uint8x16_t t) {
kinc_uint8x16_t value;
value.values[0] = ~t.values[0];
value.values[1] = ~t.values[1];
value.values[2] = ~t.values[2];
value.values[3] = ~t.values[3];
value.values[4] = ~t.values[4];
value.values[5] = ~t.values[5];
value.values[6] = ~t.values[6];
value.values[7] = ~t.values[7];
value.values[8] = ~t.values[8];
value.values[9] = ~t.values[9];
value.values[10] = ~t.values[10];
value.values[11] = ~t.values[11];
value.values[12] = ~t.values[12];
value.values[13] = ~t.values[13];
value.values[14] = ~t.values[14];
value.values[15] = ~t.values[15];
return value;
}
#endif
#ifdef __cplusplus
}
#endif