2 * This file is part of the OpenAL Soft cross platform audio library
4 * Copyright (C) 2019 by Anis A. Hireche
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * * Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
16 * * Neither the name of Spherical-Harmonic-Transform nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
41 #include "alc/effects/base.h"
43 #include "alnumbers.h"
44 #include "alnumeric.h"
46 #include "core/ambidefs.h"
47 #include "core/bufferline.h"
48 #include "core/context.h"
49 #include "core/devformat.h"
50 #include "core/device.h"
51 #include "core/effectslot.h"
52 #include "core/mixer.h"
53 #include "intrusive_ptr.h"
58 using uint = unsigned int;
60 #define MAX_UPDATE_SAMPLES 256
61 #define NUM_FORMANTS 4
65 #define VOWEL_A_INDEX 0
66 #define VOWEL_B_INDEX 1
68 #define WAVEFORM_FRACBITS 24
69 #define WAVEFORM_FRACONE (1<<WAVEFORM_FRACBITS)
70 #define WAVEFORM_FRACMASK (WAVEFORM_FRACONE-1)
72 inline float Sin(uint index)
74 constexpr float scale{al::numbers::pi_v<float>*2.0f / WAVEFORM_FRACONE};
75 return std::sin(static_cast<float>(index) * scale)*0.5f + 0.5f;
78 inline float Saw(uint index)
79 { return static_cast<float>(index) / float{WAVEFORM_FRACONE}; }
81 inline float Triangle(uint index)
82 { return std::fabs(static_cast<float>(index)*(2.0f/WAVEFORM_FRACONE) - 1.0f); }
84 inline float Half(uint) { return 0.5f; }
86 template<float (&func)(uint)>
87 void Oscillate(float *RESTRICT dst, uint index, const uint step, size_t todo)
89 for(size_t i{0u};i < todo;i++)
92 index &= WAVEFORM_FRACMASK;
104 FormantFilter() = default;
105 FormantFilter(float f0norm, float gain)
106 : mCoeff{std::tan(al::numbers::pi_v<float> * f0norm)}, mGain{gain}
109 inline void process(const float *samplesIn, float *samplesOut, const size_t numInput)
111 /* A state variable filter from a topology-preserving transform.
112 * Based on a talk given by Ivan Cohen: https://www.youtube.com/watch?v=esjHXGPyrhg
114 const float g{mCoeff};
115 const float gain{mGain};
116 const float h{1.0f / (1.0f + (g/Q_FACTOR) + (g*g))};
120 for(size_t i{0u};i < numInput;i++)
122 const float H{(samplesIn[i] - (1.0f/Q_FACTOR + g)*s1 - s2)*h};
123 const float B{g*H + s1};
124 const float L{g*B + s2};
129 // Apply peak and accumulate samples.
130 samplesOut[i] += B * gain;
144 struct VmorpherState final : public EffectState {
146 uint mTargetChannel{InvalidChannelIndex};
148 /* Effect parameters */
149 FormantFilter mFormants[NUM_FILTERS][NUM_FORMANTS];
151 /* Effect gains for each channel */
152 float mCurrentGain{};
154 } mChans[MaxAmbiChannels];
156 void (*mGetSamples)(float*RESTRICT, uint, const uint, size_t){};
161 /* Effects buffers */
162 alignas(16) float mSampleBufferA[MAX_UPDATE_SAMPLES]{};
163 alignas(16) float mSampleBufferB[MAX_UPDATE_SAMPLES]{};
164 alignas(16) float mLfo[MAX_UPDATE_SAMPLES]{};
166 void deviceUpdate(const DeviceBase *device, const BufferStorage *buffer) override;
167 void update(const ContextBase *context, const EffectSlot *slot, const EffectProps *props,
168 const EffectTarget target) override;
169 void process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn,
170 const al::span<FloatBufferLine> samplesOut) override;
172 static std::array<FormantFilter,4> getFiltersByPhoneme(VMorpherPhenome phoneme,
173 float frequency, float pitch);
175 DEF_NEWDEL(VmorpherState)
178 std::array<FormantFilter,4> VmorpherState::getFiltersByPhoneme(VMorpherPhenome phoneme,
179 float frequency, float pitch)
181 /* Using soprano formant set of values to
182 * better match mid-range frequency space.
184 * See: https://www.classes.cs.uchicago.edu/archive/1999/spring/CS295/Computing_Resources/Csound/CsManual3.48b1.HTML/Appendices/table3.html
188 case VMorpherPhenome::A:
190 {( 800 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */
191 {(1150 * pitch) / frequency, 0.501187f}, /* std::pow(10.0f, -6 / 20.0f); */
192 {(2900 * pitch) / frequency, 0.025118f}, /* std::pow(10.0f, -32 / 20.0f); */
193 {(3900 * pitch) / frequency, 0.100000f} /* std::pow(10.0f, -20 / 20.0f); */
195 case VMorpherPhenome::E:
197 {( 350 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */
198 {(2000 * pitch) / frequency, 0.100000f}, /* std::pow(10.0f, -20 / 20.0f); */
199 {(2800 * pitch) / frequency, 0.177827f}, /* std::pow(10.0f, -15 / 20.0f); */
200 {(3600 * pitch) / frequency, 0.009999f} /* std::pow(10.0f, -40 / 20.0f); */
202 case VMorpherPhenome::I:
204 {( 270 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */
205 {(2140 * pitch) / frequency, 0.251188f}, /* std::pow(10.0f, -12 / 20.0f); */
206 {(2950 * pitch) / frequency, 0.050118f}, /* std::pow(10.0f, -26 / 20.0f); */
207 {(3900 * pitch) / frequency, 0.050118f} /* std::pow(10.0f, -26 / 20.0f); */
209 case VMorpherPhenome::O:
211 {( 450 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */
212 {( 800 * pitch) / frequency, 0.281838f}, /* std::pow(10.0f, -11 / 20.0f); */
213 {(2830 * pitch) / frequency, 0.079432f}, /* std::pow(10.0f, -22 / 20.0f); */
214 {(3800 * pitch) / frequency, 0.079432f} /* std::pow(10.0f, -22 / 20.0f); */
216 case VMorpherPhenome::U:
218 {( 325 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */
219 {( 700 * pitch) / frequency, 0.158489f}, /* std::pow(10.0f, -16 / 20.0f); */
220 {(2700 * pitch) / frequency, 0.017782f}, /* std::pow(10.0f, -35 / 20.0f); */
221 {(3800 * pitch) / frequency, 0.009999f} /* std::pow(10.0f, -40 / 20.0f); */
230 void VmorpherState::deviceUpdate(const DeviceBase*, const BufferStorage*)
232 for(auto &e : mChans)
234 e.mTargetChannel = InvalidChannelIndex;
235 std::for_each(std::begin(e.mFormants[VOWEL_A_INDEX]), std::end(e.mFormants[VOWEL_A_INDEX]),
236 std::mem_fn(&FormantFilter::clear));
237 std::for_each(std::begin(e.mFormants[VOWEL_B_INDEX]), std::end(e.mFormants[VOWEL_B_INDEX]),
238 std::mem_fn(&FormantFilter::clear));
239 e.mCurrentGain = 0.0f;
243 void VmorpherState::update(const ContextBase *context, const EffectSlot *slot,
244 const EffectProps *props, const EffectTarget target)
246 const DeviceBase *device{context->mDevice};
247 const float frequency{static_cast<float>(device->Frequency)};
248 const float step{props->Vmorpher.Rate / frequency};
249 mStep = fastf2u(clampf(step*WAVEFORM_FRACONE, 0.0f, float{WAVEFORM_FRACONE-1}));
252 mGetSamples = Oscillate<Half>;
253 else if(props->Vmorpher.Waveform == VMorpherWaveform::Sinusoid)
254 mGetSamples = Oscillate<Sin>;
255 else if(props->Vmorpher.Waveform == VMorpherWaveform::Triangle)
256 mGetSamples = Oscillate<Triangle>;
257 else /*if(props->Vmorpher.Waveform == VMorpherWaveform::Sawtooth)*/
258 mGetSamples = Oscillate<Saw>;
260 const float pitchA{std::pow(2.0f,
261 static_cast<float>(props->Vmorpher.PhonemeACoarseTuning) / 12.0f)};
262 const float pitchB{std::pow(2.0f,
263 static_cast<float>(props->Vmorpher.PhonemeBCoarseTuning) / 12.0f)};
265 auto vowelA = getFiltersByPhoneme(props->Vmorpher.PhonemeA, frequency, pitchA);
266 auto vowelB = getFiltersByPhoneme(props->Vmorpher.PhonemeB, frequency, pitchB);
268 /* Copy the filter coefficients to the input channels. */
269 for(size_t i{0u};i < slot->Wet.Buffer.size();++i)
271 std::copy(vowelA.begin(), vowelA.end(), std::begin(mChans[i].mFormants[VOWEL_A_INDEX]));
272 std::copy(vowelB.begin(), vowelB.end(), std::begin(mChans[i].mFormants[VOWEL_B_INDEX]));
275 mOutTarget = target.Main->Buffer;
276 auto set_channel = [this](size_t idx, uint outchan, float outgain)
278 mChans[idx].mTargetChannel = outchan;
279 mChans[idx].mTargetGain = outgain;
281 target.Main->setAmbiMixParams(slot->Wet, slot->Gain, set_channel);
284 void VmorpherState::process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn, const al::span<FloatBufferLine> samplesOut)
286 /* Following the EFX specification for a conformant implementation which describes
287 * the effect as a pair of 4-band formant filters blended together using an LFO.
289 for(size_t base{0u};base < samplesToDo;)
291 const size_t td{minz(MAX_UPDATE_SAMPLES, samplesToDo-base)};
293 mGetSamples(mLfo, mIndex, mStep, td);
294 mIndex += static_cast<uint>(mStep * td);
295 mIndex &= WAVEFORM_FRACMASK;
297 auto chandata = std::begin(mChans);
298 for(const auto &input : samplesIn)
300 const size_t outidx{chandata->mTargetChannel};
301 if(outidx == InvalidChannelIndex)
307 auto& vowelA = chandata->mFormants[VOWEL_A_INDEX];
308 auto& vowelB = chandata->mFormants[VOWEL_B_INDEX];
310 /* Process first vowel. */
311 std::fill_n(std::begin(mSampleBufferA), td, 0.0f);
312 vowelA[0].process(&input[base], mSampleBufferA, td);
313 vowelA[1].process(&input[base], mSampleBufferA, td);
314 vowelA[2].process(&input[base], mSampleBufferA, td);
315 vowelA[3].process(&input[base], mSampleBufferA, td);
317 /* Process second vowel. */
318 std::fill_n(std::begin(mSampleBufferB), td, 0.0f);
319 vowelB[0].process(&input[base], mSampleBufferB, td);
320 vowelB[1].process(&input[base], mSampleBufferB, td);
321 vowelB[2].process(&input[base], mSampleBufferB, td);
322 vowelB[3].process(&input[base], mSampleBufferB, td);
324 alignas(16) float blended[MAX_UPDATE_SAMPLES];
325 for(size_t i{0u};i < td;i++)
326 blended[i] = lerpf(mSampleBufferA[i], mSampleBufferB[i], mLfo[i]);
328 /* Now, mix the processed sound data to the output. */
329 MixSamples({blended, td}, samplesOut[outidx].data()+base, chandata->mCurrentGain,
330 chandata->mTargetGain, samplesToDo-base);
339 struct VmorpherStateFactory final : public EffectStateFactory {
340 al::intrusive_ptr<EffectState> create() override
341 { return al::intrusive_ptr<EffectState>{new VmorpherState{}}; }
346 EffectStateFactory *VmorpherStateFactory_getFactory()
348 static VmorpherStateFactory VmorpherFactory{};
349 return &VmorpherFactory;