]> git.tdb.fi Git - ext/openal.git/blob - common/phase_shifter.h
Import OpenAL Soft 1.23.1 sources
[ext/openal.git] / common / phase_shifter.h
1 #ifndef PHASE_SHIFTER_H
2 #define PHASE_SHIFTER_H
3
4 #ifdef HAVE_SSE_INTRINSICS
5 #include <xmmintrin.h>
6 #elif defined(HAVE_NEON)
7 #include <arm_neon.h>
8 #endif
9
10 #include <array>
11 #include <stddef.h>
12
13 #include "alcomplex.h"
14 #include "alspan.h"
15
16
17 /* Implements a wide-band +90 degree phase-shift. Note that this should be
18  * given one sample less of a delay (FilterSize/2 - 1) compared to the direct
19  * signal delay (FilterSize/2) to properly align.
20  */
21 template<size_t FilterSize>
22 struct PhaseShifterT {
23     static_assert(FilterSize >= 16, "FilterSize needs to be at least 16");
24     static_assert((FilterSize&(FilterSize-1)) == 0, "FilterSize needs to be power-of-two");
25
26     alignas(16) std::array<float,FilterSize/2> mCoeffs{};
27
28     /* Some notes on this filter construction.
29      *
30      * A wide-band phase-shift filter needs a delay to maintain linearity. A
31      * dirac impulse in the center of a time-domain buffer represents a filter
32      * passing all frequencies through as-is with a pure delay. Converting that
33      * to the frequency domain, adjusting the phase of each frequency bin by
34      * +90 degrees, then converting back to the time domain, results in a FIR
35      * filter that applies a +90 degree wide-band phase-shift.
36      *
37      * A particularly notable aspect of the time-domain filter response is that
38      * every other coefficient is 0. This allows doubling the effective size of
39      * the filter, by storing only the non-0 coefficients and double-stepping
40      * over the input to apply it.
41      *
42      * Additionally, the resulting filter is independent of the sample rate.
43      * The same filter can be applied regardless of the device's sample rate
44      * and achieve the same effect.
45      */
46     PhaseShifterT()
47     {
48         using complex_d = std::complex<double>;
49         constexpr size_t fft_size{FilterSize};
50         constexpr size_t half_size{fft_size / 2};
51
52         auto fftBuffer = std::make_unique<complex_d[]>(fft_size);
53         std::fill_n(fftBuffer.get(), fft_size, complex_d{});
54         fftBuffer[half_size] = 1.0;
55
56         forward_fft(al::as_span(fftBuffer.get(), fft_size));
57         for(size_t i{0};i < half_size+1;++i)
58             fftBuffer[i] = complex_d{-fftBuffer[i].imag(), fftBuffer[i].real()};
59         for(size_t i{half_size+1};i < fft_size;++i)
60             fftBuffer[i] = std::conj(fftBuffer[fft_size - i]);
61         inverse_fft(al::as_span(fftBuffer.get(), fft_size));
62
63         auto fftiter = fftBuffer.get() + half_size + (FilterSize/2 - 1);
64         for(float &coeff : mCoeffs)
65         {
66             coeff = static_cast<float>(fftiter->real() / double{fft_size});
67             fftiter -= 2;
68         }
69     }
70
71     void process(al::span<float> dst, const float *RESTRICT src) const;
72
73 private:
74 #if defined(HAVE_NEON)
75     /* There doesn't seem to be NEON intrinsics to do this kind of stipple
76      * shuffling, so there's two custom methods for it.
77      */
78     static auto shuffle_2020(float32x4_t a, float32x4_t b)
79     {
80         float32x4_t ret{vmovq_n_f32(vgetq_lane_f32(a, 0))};
81         ret = vsetq_lane_f32(vgetq_lane_f32(a, 2), ret, 1);
82         ret = vsetq_lane_f32(vgetq_lane_f32(b, 0), ret, 2);
83         ret = vsetq_lane_f32(vgetq_lane_f32(b, 2), ret, 3);
84         return ret;
85     }
86     static auto shuffle_3131(float32x4_t a, float32x4_t b)
87     {
88         float32x4_t ret{vmovq_n_f32(vgetq_lane_f32(a, 1))};
89         ret = vsetq_lane_f32(vgetq_lane_f32(a, 3), ret, 1);
90         ret = vsetq_lane_f32(vgetq_lane_f32(b, 1), ret, 2);
91         ret = vsetq_lane_f32(vgetq_lane_f32(b, 3), ret, 3);
92         return ret;
93     }
94     static auto unpacklo(float32x4_t a, float32x4_t b)
95     {
96         float32x2x2_t result{vzip_f32(vget_low_f32(a), vget_low_f32(b))};
97         return vcombine_f32(result.val[0], result.val[1]);
98     }
99     static auto unpackhi(float32x4_t a, float32x4_t b)
100     {
101         float32x2x2_t result{vzip_f32(vget_high_f32(a), vget_high_f32(b))};
102         return vcombine_f32(result.val[0], result.val[1]);
103     }
104     static auto load4(float32_t a, float32_t b, float32_t c, float32_t d)
105     {
106         float32x4_t ret{vmovq_n_f32(a)};
107         ret = vsetq_lane_f32(b, ret, 1);
108         ret = vsetq_lane_f32(c, ret, 2);
109         ret = vsetq_lane_f32(d, ret, 3);
110         return ret;
111     }
112 #endif
113 };
114
115 template<size_t S>
116 inline void PhaseShifterT<S>::process(al::span<float> dst, const float *RESTRICT src) const
117 {
118 #ifdef HAVE_SSE_INTRINSICS
119     if(size_t todo{dst.size()>>1})
120     {
121         auto *out = reinterpret_cast<__m64*>(dst.data());
122         do {
123             __m128 r04{_mm_setzero_ps()};
124             __m128 r14{_mm_setzero_ps()};
125             for(size_t j{0};j < mCoeffs.size();j+=4)
126             {
127                 const __m128 coeffs{_mm_load_ps(&mCoeffs[j])};
128                 const __m128 s0{_mm_loadu_ps(&src[j*2])};
129                 const __m128 s1{_mm_loadu_ps(&src[j*2 + 4])};
130
131                 __m128 s{_mm_shuffle_ps(s0, s1, _MM_SHUFFLE(2, 0, 2, 0))};
132                 r04 = _mm_add_ps(r04, _mm_mul_ps(s, coeffs));
133
134                 s = _mm_shuffle_ps(s0, s1, _MM_SHUFFLE(3, 1, 3, 1));
135                 r14 = _mm_add_ps(r14, _mm_mul_ps(s, coeffs));
136             }
137             src += 2;
138
139             __m128 r4{_mm_add_ps(_mm_unpackhi_ps(r04, r14), _mm_unpacklo_ps(r04, r14))};
140             r4 = _mm_add_ps(r4, _mm_movehl_ps(r4, r4));
141
142             _mm_storel_pi(out, r4);
143             ++out;
144         } while(--todo);
145     }
146     if((dst.size()&1))
147     {
148         __m128 r4{_mm_setzero_ps()};
149         for(size_t j{0};j < mCoeffs.size();j+=4)
150         {
151             const __m128 coeffs{_mm_load_ps(&mCoeffs[j])};
152             const __m128 s{_mm_setr_ps(src[j*2], src[j*2 + 2], src[j*2 + 4], src[j*2 + 6])};
153             r4 = _mm_add_ps(r4, _mm_mul_ps(s, coeffs));
154         }
155         r4 = _mm_add_ps(r4, _mm_shuffle_ps(r4, r4, _MM_SHUFFLE(0, 1, 2, 3)));
156         r4 = _mm_add_ps(r4, _mm_movehl_ps(r4, r4));
157
158         dst.back() = _mm_cvtss_f32(r4);
159     }
160
161 #elif defined(HAVE_NEON)
162
163     size_t pos{0};
164     if(size_t todo{dst.size()>>1})
165     {
166         do {
167             float32x4_t r04{vdupq_n_f32(0.0f)};
168             float32x4_t r14{vdupq_n_f32(0.0f)};
169             for(size_t j{0};j < mCoeffs.size();j+=4)
170             {
171                 const float32x4_t coeffs{vld1q_f32(&mCoeffs[j])};
172                 const float32x4_t s0{vld1q_f32(&src[j*2])};
173                 const float32x4_t s1{vld1q_f32(&src[j*2 + 4])};
174
175                 r04 = vmlaq_f32(r04, shuffle_2020(s0, s1), coeffs);
176                 r14 = vmlaq_f32(r14, shuffle_3131(s0, s1), coeffs);
177             }
178             src += 2;
179
180             float32x4_t r4{vaddq_f32(unpackhi(r04, r14), unpacklo(r04, r14))};
181             float32x2_t r2{vadd_f32(vget_low_f32(r4), vget_high_f32(r4))};
182
183             vst1_f32(&dst[pos], r2);
184             pos += 2;
185         } while(--todo);
186     }
187     if((dst.size()&1))
188     {
189         float32x4_t r4{vdupq_n_f32(0.0f)};
190         for(size_t j{0};j < mCoeffs.size();j+=4)
191         {
192             const float32x4_t coeffs{vld1q_f32(&mCoeffs[j])};
193             const float32x4_t s{load4(src[j*2], src[j*2 + 2], src[j*2 + 4], src[j*2 + 6])};
194             r4 = vmlaq_f32(r4, s, coeffs);
195         }
196         r4 = vaddq_f32(r4, vrev64q_f32(r4));
197         dst[pos] = vget_lane_f32(vadd_f32(vget_low_f32(r4), vget_high_f32(r4)), 0);
198     }
199
200 #else
201
202     for(float &output : dst)
203     {
204         float ret{0.0f};
205         for(size_t j{0};j < mCoeffs.size();++j)
206             ret += src[j*2] * mCoeffs[j];
207
208         output = ret;
209         ++src;
210     }
211 #endif
212 }
213
214 #endif /* PHASE_SHIFTER_H */