blob: 1d2f20878e544b1b9a4d4e643d90f7b33447acab [file] [log] [blame]
Thomas Tsou9471d762013-08-20 21:24:24 -04001/*
2 * SSE type conversions
3 * Copyright (C) 2013 Thomas Tsou <tom@tsou.cc>
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2.1 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20#include <malloc.h>
21#include <string.h>
Thomas Tsou17bbb9b2013-10-30 21:24:40 -040022#include "convert.h"
Thomas Tsou9471d762013-08-20 21:24:24 -040023
24#ifdef HAVE_CONFIG_H
25#include "config.h"
26#endif
27
28#ifdef HAVE_SSE3
29#include <xmmintrin.h>
30#include <emmintrin.h>
31
32#ifdef HAVE_SSE4_1
33#include <smmintrin.h>
34
35/* 16*N 16-bit signed integer converted to single precision floats */
36static void _sse_convert_si16_ps_16n(float *restrict out,
37 short *restrict in,
38 int len)
39{
40 __m128i m0, m1, m2, m3, m4, m5;
41 __m128 m6, m7, m8, m9;
42
43 for (int i = 0; i < len / 16; i++) {
44 /* Load (unaligned) packed floats */
45 m0 = _mm_loadu_si128((__m128i *) &in[16 * i + 0]);
46 m1 = _mm_loadu_si128((__m128i *) &in[16 * i + 8]);
47
48 /* Unpack */
49 m2 = _mm_cvtepi16_epi32(m0);
50 m4 = _mm_cvtepi16_epi32(m1);
51 m0 = _mm_shuffle_epi32(m0, _MM_SHUFFLE(1, 0, 3, 2));
52 m1 = _mm_shuffle_epi32(m1, _MM_SHUFFLE(1, 0, 3, 2));
53 m3 = _mm_cvtepi16_epi32(m0);
54 m5 = _mm_cvtepi16_epi32(m1);
55
56 /* Convert */
57 m6 = _mm_cvtepi32_ps(m2);
58 m7 = _mm_cvtepi32_ps(m3);
59 m8 = _mm_cvtepi32_ps(m4);
60 m9 = _mm_cvtepi32_ps(m5);
61
62 /* Store */
63 _mm_storeu_ps(&out[16 * i + 0], m6);
64 _mm_storeu_ps(&out[16 * i + 4], m7);
65 _mm_storeu_ps(&out[16 * i + 8], m8);
66 _mm_storeu_ps(&out[16 * i + 12], m9);
67 }
68}
69
70/* 16*N 16-bit signed integer conversion with remainder */
71static void _sse_convert_si16_ps(float *restrict out,
72 short *restrict in,
73 int len)
74{
75 int start = len / 16 * 16;
76
77 _sse_convert_si16_ps_16n(out, in, len);
78
79 for (int i = 0; i < len % 16; i++)
80 out[start + i] = in[start + i];
81}
82#endif /* HAVE_SSE4_1 */
83
84/* 8*N single precision floats scaled and converted to 16-bit signed integer */
85static void _sse_convert_scale_ps_si16_8n(short *restrict out,
86 float *restrict in,
87 float scale, int len)
88{
89 __m128 m0, m1, m2;
90 __m128i m4, m5;
91
92 for (int i = 0; i < len / 8; i++) {
93 /* Load (unaligned) packed floats */
94 m0 = _mm_loadu_ps(&in[8 * i + 0]);
95 m1 = _mm_loadu_ps(&in[8 * i + 4]);
96 m2 = _mm_load1_ps(&scale);
97
98 /* Scale */
99 m0 = _mm_mul_ps(m0, m2);
100 m1 = _mm_mul_ps(m1, m2);
101
102 /* Convert */
103 m4 = _mm_cvtps_epi32(m0);
104 m5 = _mm_cvtps_epi32(m1);
105
106 /* Pack and store */
107 m5 = _mm_packs_epi32(m4, m5);
108 _mm_storeu_si128((__m128i *) &out[8 * i], m5);
109 }
110}
111
112/* 8*N single precision floats scaled and converted with remainder */
113static void _sse_convert_scale_ps_si16(short *restrict out,
114 float *restrict in,
115 float scale, int len)
116{
117 int start = len / 8 * 8;
118
119 _sse_convert_scale_ps_si16_8n(out, in, scale, len);
120
121 for (int i = 0; i < len % 8; i++)
122 out[start + i] = in[start + i] * scale;
123}
124
125/* 16*N single precision floats scaled and converted to 16-bit signed integer */
126static void _sse_convert_scale_ps_si16_16n(short *restrict out,
127 float *restrict in,
128 float scale, int len)
129{
130 __m128 m0, m1, m2, m3, m4;
131 __m128i m5, m6, m7, m8;
132
133 for (int i = 0; i < len / 16; i++) {
134 /* Load (unaligned) packed floats */
135 m0 = _mm_loadu_ps(&in[16 * i + 0]);
136 m1 = _mm_loadu_ps(&in[16 * i + 4]);
137 m2 = _mm_loadu_ps(&in[16 * i + 8]);
138 m3 = _mm_loadu_ps(&in[16 * i + 12]);
139 m4 = _mm_load1_ps(&scale);
140
141 /* Scale */
142 m0 = _mm_mul_ps(m0, m4);
143 m1 = _mm_mul_ps(m1, m4);
144 m2 = _mm_mul_ps(m2, m4);
145 m3 = _mm_mul_ps(m3, m4);
146
147 /* Convert */
148 m5 = _mm_cvtps_epi32(m0);
149 m6 = _mm_cvtps_epi32(m1);
150 m7 = _mm_cvtps_epi32(m2);
151 m8 = _mm_cvtps_epi32(m3);
152
153 /* Pack and store */
154 m5 = _mm_packs_epi32(m5, m6);
155 m7 = _mm_packs_epi32(m7, m8);
156 _mm_storeu_si128((__m128i *) &out[16 * i + 0], m5);
157 _mm_storeu_si128((__m128i *) &out[16 * i + 8], m7);
158 }
159}
160#else /* HAVE_SSE3 */
161static void convert_scale_ps_si16(short *out, float *in, float scale, int len)
162{
163 for (int i = 0; i < len; i++)
164 out[i] = in[i] * scale;
165}
166#endif
167
Thomas Tsou17bbb9b2013-10-30 21:24:40 -0400168#ifndef HAVE_SSE3
Thomas Tsou9471d762013-08-20 21:24:24 -0400169static void convert_si16_ps(float *out, short *in, int len)
170{
171 for (int i = 0; i < len; i++)
172 out[i] = in[i];
173}
174#endif
175
176void convert_float_short(short *out, float *in, float scale, int len)
177{
178#ifdef HAVE_SSE3
179 if (!(len % 16))
180 _sse_convert_scale_ps_si16_16n(out, in, scale, len);
181 else if (!(len % 8))
182 _sse_convert_scale_ps_si16_8n(out, in, scale, len);
183 else
184 _sse_convert_scale_ps_si16(out, in, scale, len);
185#else
186 convert_scale_ps_si16(out, in, scale, len);
187#endif
188}
189
190void convert_short_float(float *out, short *in, int len)
191{
192#ifdef HAVE_SSE4_1
193 if (!(len % 16))
194 _sse_convert_si16_ps_16n(out, in, len);
195 else
196 _sse_convert_si16_ps(out, in, len);
197#else
198 convert_si16_ps(out, in, len);
199#endif
200}