From 37a410558ee661d6fe0362b01f825bc4e68474b4 Mon Sep 17 00:00:00 2001 From: Gamer64 <76565986+Gamer64ytb@users.noreply.github.com> Date: Fri, 1 Aug 2025 20:13:16 +0200 Subject: [PATCH] Minor updates --- src/common/vector_math.h | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/src/common/vector_math.h b/src/common/vector_math.h index e16e39cbc6..829a79ce5a 100644 --- a/src/common/vector_math.h +++ b/src/common/vector_math.h @@ -648,24 +648,22 @@ template return a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w; } -#ifdef __ARM_NEON__ -// NEON-accelerated overload for float Vec4 dot product -inline float Dot(const Vec4& a, const Vec4& b) { - // Load 4 floats into NEON registers +template <> +[[nodiscard]] inline float Dot(const Vec4& a, const Vec4& b) { +#ifdef __ARM_NEON float32x4_t va = vld1q_f32(&a.x); float32x4_t vb = vld1q_f32(&b.x); - // Element-wise multiply - float32x4_t prod = vmulq_f32(va, vb); - - // Horizontal add across the vector - #if defined(__aarch64__) - return vaddvq_f32(prod); - #else - float32x2_t sum2 = vadd_f32(vget_high_f32(prod), vget_low_f32(prod)); - return vget_lane_f32(vpadd_f32(sum2, sum2), 0); - #endif -} + float32x4_t result = vmulq_f32(va, vb); +#if defined(__aarch64__) // Use vaddvq_f32 in ARMv8 architectures + return vaddvq_f32(result); +#else // Use manual addition for older architectures + float32x2_t sum2 = vadd_f32(vget_high_f32(result), vget_low_f32(result)); + return vget_lane_f32(vpadd_f32(sum2, sum2), 0); #endif +#else + return a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w; +#endif +} template [[nodiscard]] constexpr Vec3 Cross(const Vec3& a,