core/conv: strip unused memalign() call
The alligned memory allocation is only required for SSE, which
is currently unsupported. Moreover, it's better to use dedicated
_mm_malloc() and _mm_free() from xmmintrin.h instead, which are
introduced by Intel specifically for SIMD computations.
Change-Id: Ide764d1c643527323334ef14335be7f8915f7622
diff --git a/src/viterbi.c b/src/viterbi.c
index ea4fb21..21c6a57 100644
--- a/src/viterbi.c
+++ b/src/viterbi.c
@@ -29,7 +29,6 @@
#define BIT2NRZ(REG,N) (((REG >> N) & 0x01) * 2 - 1) * -1
#define NUM_STATES(K) (K == 7 ? 64 : 16)
-#define SSE_ALIGN 16
/* Forward Metric Units */
void osmo_conv_gen_metrics_k5_n2(const int8_t *seq, const int16_t *out,
@@ -91,18 +90,10 @@
int16_t *, int16_t *, int);
};
-/* Aligned Memory Allocator
- * SSE requires 16-byte memory alignment. We store relevant trellis values
- * (accumulated sums, outputs, and path decisions) as 16 bit signed integers
- * so the allocated memory is casted as such.
- */
+/* Non-aligned Memory Allocator */
static int16_t *vdec_malloc(size_t n)
{
-#ifdef HAVE_SSE3
- return (int16_t *) memalign(SSE_ALIGN, sizeof(int16_t) * n);
-#else
return (int16_t *) malloc(sizeof(int16_t) * n);
-#endif
}
/* Accessor calls */