2 * OpenAL cross platform audio library
3 * Copyright (C) 1999-2007 by authors.
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Library General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Library General Public License for more details.
14 * You should have received a copy of the GNU Library General Public
15 * License along with this library; if not, write to the
16 * Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 * Or go to http://www.gnu.org/copyleft/lgpl.html
43 #include "alnumbers.h"
44 #include "alnumeric.h"
48 #include "core/ambidefs.h"
49 #include "core/async_event.h"
50 #include "core/bformatdec.h"
51 #include "core/bs2b.h"
52 #include "core/bsinc_defs.h"
53 #include "core/bsinc_tables.h"
54 #include "core/bufferline.h"
55 #include "core/buffer_storage.h"
56 #include "core/context.h"
57 #include "core/cpu_caps.h"
58 #include "core/cubic_tables.h"
59 #include "core/devformat.h"
60 #include "core/device.h"
61 #include "core/effects/base.h"
62 #include "core/effectslot.h"
63 #include "core/filters/biquad.h"
64 #include "core/filters/nfc.h"
65 #include "core/fpu_ctrl.h"
66 #include "core/hrtf.h"
67 #include "core/mastering.h"
68 #include "core/mixer.h"
69 #include "core/mixer/defs.h"
70 #include "core/mixer/hrtfdefs.h"
71 #include "core/resampler_limits.h"
72 #include "core/uhjfilter.h"
73 #include "core/voice.h"
74 #include "core/voice_change.h"
75 #include "intrusive_ptr.h"
76 #include "opthelpers.h"
77 #include "ringbuffer.h"
103 static_assert(!(MaxResamplerPadding&1), "MaxResamplerPadding is not a multiple of two");
108 using uint = unsigned int;
109 using namespace std::chrono;
111 using namespace std::placeholders;
113 float InitConeScale()
116 if(auto optval = al::getenv("__ALSOFT_HALF_ANGLE_CONES"))
118 if(al::strcasecmp(optval->c_str(), "true") == 0
119 || strtol(optval->c_str(), nullptr, 0) == 1)
125 const float ConeScale{InitConeScale()};
127 /* Localized scalars for mono sources (initialized in aluInit, after
128 * configuration is loaded).
134 /* Source distance scale for NFC filters. */
135 float NfcScale{1.0f};
144 using HrtfDirectMixerFunc = void(*)(const FloatBufferSpan LeftOut, const FloatBufferSpan RightOut,
145 const al::span<const FloatBufferLine> InSamples, float2 *AccumSamples, float *TempBuf,
146 HrtfChannelState *ChanState, const size_t IrSize, const size_t BufferSize);
148 HrtfDirectMixerFunc MixDirectHrtf{MixDirectHrtf_<CTag>};
150 inline HrtfDirectMixerFunc SelectHrtfMixer(void)
153 if((CPUCapFlags&CPU_CAP_NEON))
154 return MixDirectHrtf_<NEONTag>;
157 if((CPUCapFlags&CPU_CAP_SSE))
158 return MixDirectHrtf_<SSETag>;
161 return MixDirectHrtf_<CTag>;
165 inline void BsincPrepare(const uint increment, BsincState *state, const BSincTable *table)
167 size_t si{BSincScaleCount - 1};
170 if(increment > MixerFracOne)
172 sf = MixerFracOne/static_cast<float>(increment) - table->scaleBase;
173 sf = maxf(0.0f, BSincScaleCount*sf*table->scaleRange - 1.0f);
175 /* The interpolation factor is fit to this diagonally-symmetric curve
176 * to reduce the transition ripple caused by interpolating different
177 * scales of the sinc function.
179 sf = 1.0f - std::cos(std::asin(sf - static_cast<float>(si)));
183 state->m = table->m[si];
184 state->l = (state->m/2) - 1;
185 state->filter = table->Tab + table->filterOffset[si];
188 inline ResamplerFunc SelectResampler(Resampler resampler, uint increment)
192 case Resampler::Point:
193 return Resample_<PointTag,CTag>;
194 case Resampler::Linear:
196 if((CPUCapFlags&CPU_CAP_NEON))
197 return Resample_<LerpTag,NEONTag>;
200 if((CPUCapFlags&CPU_CAP_SSE4_1))
201 return Resample_<LerpTag,SSE4Tag>;
204 if((CPUCapFlags&CPU_CAP_SSE2))
205 return Resample_<LerpTag,SSE2Tag>;
207 return Resample_<LerpTag,CTag>;
208 case Resampler::Cubic:
210 if((CPUCapFlags&CPU_CAP_NEON))
211 return Resample_<CubicTag,NEONTag>;
214 if((CPUCapFlags&CPU_CAP_SSE))
215 return Resample_<CubicTag,SSETag>;
217 return Resample_<CubicTag,CTag>;
218 case Resampler::BSinc12:
219 case Resampler::BSinc24:
220 if(increment > MixerFracOne)
223 if((CPUCapFlags&CPU_CAP_NEON))
224 return Resample_<BSincTag,NEONTag>;
227 if((CPUCapFlags&CPU_CAP_SSE))
228 return Resample_<BSincTag,SSETag>;
230 return Resample_<BSincTag,CTag>;
233 case Resampler::FastBSinc12:
234 case Resampler::FastBSinc24:
236 if((CPUCapFlags&CPU_CAP_NEON))
237 return Resample_<FastBSincTag,NEONTag>;
240 if((CPUCapFlags&CPU_CAP_SSE))
241 return Resample_<FastBSincTag,SSETag>;
243 return Resample_<FastBSincTag,CTag>;
246 return Resample_<PointTag,CTag>;
251 void aluInit(CompatFlagBitset flags, const float nfcscale)
253 MixDirectHrtf = SelectHrtfMixer();
254 XScale = flags.test(CompatFlags::ReverseX) ? -1.0f : 1.0f;
255 YScale = flags.test(CompatFlags::ReverseY) ? -1.0f : 1.0f;
256 ZScale = flags.test(CompatFlags::ReverseZ) ? -1.0f : 1.0f;
258 NfcScale = clampf(nfcscale, 0.0001f, 10000.0f);
262 ResamplerFunc PrepareResampler(Resampler resampler, uint increment, InterpState *state)
266 case Resampler::Point:
267 case Resampler::Linear:
269 case Resampler::Cubic:
270 state->cubic.filter = gCubicSpline.Tab.data();
272 case Resampler::FastBSinc12:
273 case Resampler::BSinc12:
274 BsincPrepare(increment, &state->bsinc, &gBSinc12);
276 case Resampler::FastBSinc24:
277 case Resampler::BSinc24:
278 BsincPrepare(increment, &state->bsinc, &gBSinc24);
281 return SelectResampler(resampler, increment);
285 void DeviceBase::ProcessHrtf(const size_t SamplesToDo)
287 /* HRTF is stereo output only. */
288 const uint lidx{RealOut.ChannelIndex[FrontLeft]};
289 const uint ridx{RealOut.ChannelIndex[FrontRight]};
291 MixDirectHrtf(RealOut.Buffer[lidx], RealOut.Buffer[ridx], Dry.Buffer, HrtfAccumData,
292 mHrtfState->mTemp.data(), mHrtfState->mChannels.data(), mHrtfState->mIrSize, SamplesToDo);
295 void DeviceBase::ProcessAmbiDec(const size_t SamplesToDo)
297 AmbiDecoder->process(RealOut.Buffer, Dry.Buffer.data(), SamplesToDo);
300 void DeviceBase::ProcessAmbiDecStablized(const size_t SamplesToDo)
302 /* Decode with front image stablization. */
303 const uint lidx{RealOut.ChannelIndex[FrontLeft]};
304 const uint ridx{RealOut.ChannelIndex[FrontRight]};
305 const uint cidx{RealOut.ChannelIndex[FrontCenter]};
307 AmbiDecoder->processStablize(RealOut.Buffer, Dry.Buffer.data(), lidx, ridx, cidx,
311 void DeviceBase::ProcessUhj(const size_t SamplesToDo)
313 /* UHJ is stereo output only. */
314 const uint lidx{RealOut.ChannelIndex[FrontLeft]};
315 const uint ridx{RealOut.ChannelIndex[FrontRight]};
317 /* Encode to stereo-compatible 2-channel UHJ output. */
318 mUhjEncoder->encode(RealOut.Buffer[lidx].data(), RealOut.Buffer[ridx].data(),
319 {{Dry.Buffer[0].data(), Dry.Buffer[1].data(), Dry.Buffer[2].data()}}, SamplesToDo);
322 void DeviceBase::ProcessBs2b(const size_t SamplesToDo)
324 /* First, decode the ambisonic mix to the "real" output. */
325 AmbiDecoder->process(RealOut.Buffer, Dry.Buffer.data(), SamplesToDo);
327 /* BS2B is stereo output only. */
328 const uint lidx{RealOut.ChannelIndex[FrontLeft]};
329 const uint ridx{RealOut.ChannelIndex[FrontRight]};
331 /* Now apply the BS2B binaural/crossfeed filter. */
332 bs2b_cross_feed(Bs2b.get(), RealOut.Buffer[lidx].data(), RealOut.Buffer[ridx].data(),
339 /* This RNG method was created based on the math found in opusdec. It's quick,
340 * and starting with a seed value of 22222, is suitable for generating
343 inline uint dither_rng(uint *seed) noexcept
345 *seed = (*seed * 96314165) + 907633515;
350 /* Ambisonic upsampler function. It's effectively a matrix multiply. It takes
351 * an 'upsampler' and 'rotator' as the input matrices, and creates a matrix
352 * that behaves as if the B-Format input was first decoded to a speaker array
353 * at its input order, encoded back into the higher order mix, then finally
356 void UpsampleBFormatTransform(
357 const al::span<std::array<float,MaxAmbiChannels>,MaxAmbiChannels> output,
358 const al::span<const std::array<float,MaxAmbiChannels>> upsampler,
359 const al::span<std::array<float,MaxAmbiChannels>,MaxAmbiChannels> rotator, size_t coeffs_order)
361 const size_t num_chans{AmbiChannelsFromOrder(coeffs_order)};
362 for(size_t i{0};i < upsampler.size();++i)
363 output[i].fill(0.0f);
364 for(size_t i{0};i < upsampler.size();++i)
366 for(size_t k{0};k < num_chans;++k)
368 float *RESTRICT out{output[i].data()};
369 /* Write the full number of channels. The compiler will have an
370 * easier time optimizing if it has a fixed length.
372 for(size_t j{0};j < MaxAmbiChannels;++j)
373 out[j] += upsampler[i][k] * rotator[k][j];
379 inline auto& GetAmbiScales(AmbiScaling scaletype) noexcept
383 case AmbiScaling::FuMa: return AmbiScale::FromFuMa();
384 case AmbiScaling::SN3D: return AmbiScale::FromSN3D();
385 case AmbiScaling::UHJ: return AmbiScale::FromUHJ();
386 case AmbiScaling::N3D: break;
388 return AmbiScale::FromN3D();
391 inline auto& GetAmbiLayout(AmbiLayout layouttype) noexcept
393 if(layouttype == AmbiLayout::FuMa) return AmbiIndex::FromFuMa();
394 return AmbiIndex::FromACN();
397 inline auto& GetAmbi2DLayout(AmbiLayout layouttype) noexcept
399 if(layouttype == AmbiLayout::FuMa) return AmbiIndex::FromFuMa2D();
400 return AmbiIndex::FromACN2D();
404 bool CalcContextParams(ContextBase *ctx)
406 ContextProps *props{ctx->mParams.ContextUpdate.exchange(nullptr, std::memory_order_acq_rel)};
407 if(!props) return false;
409 const alu::Vector pos{props->Position[0], props->Position[1], props->Position[2], 1.0f};
410 ctx->mParams.Position = pos;
413 alu::Vector N{props->OrientAt[0], props->OrientAt[1], props->OrientAt[2], 0.0f};
415 alu::Vector V{props->OrientUp[0], props->OrientUp[1], props->OrientUp[2], 0.0f};
417 /* Build and normalize right-vector */
418 alu::Vector U{N.cross_product(V)};
421 const alu::Matrix rot{
422 U[0], V[0], -N[0], 0.0,
423 U[1], V[1], -N[1], 0.0,
424 U[2], V[2], -N[2], 0.0,
426 const alu::Vector vel{props->Velocity[0], props->Velocity[1], props->Velocity[2], 0.0};
428 ctx->mParams.Matrix = rot;
429 ctx->mParams.Velocity = rot * vel;
431 ctx->mParams.Gain = props->Gain * ctx->mGainBoost;
432 ctx->mParams.MetersPerUnit = props->MetersPerUnit;
433 ctx->mParams.AirAbsorptionGainHF = props->AirAbsorptionGainHF;
435 ctx->mParams.DopplerFactor = props->DopplerFactor;
436 ctx->mParams.SpeedOfSound = props->SpeedOfSound * props->DopplerVelocity;
438 ctx->mParams.SourceDistanceModel = props->SourceDistanceModel;
439 ctx->mParams.mDistanceModel = props->mDistanceModel;
441 AtomicReplaceHead(ctx->mFreeContextProps, props);
445 bool CalcEffectSlotParams(EffectSlot *slot, EffectSlot **sorted_slots, ContextBase *context)
447 EffectSlotProps *props{slot->Update.exchange(nullptr, std::memory_order_acq_rel)};
448 if(!props) return false;
450 /* If the effect slot target changed, clear the first sorted entry to force
453 if(slot->Target != props->Target)
454 *sorted_slots = nullptr;
455 slot->Gain = props->Gain;
456 slot->AuxSendAuto = props->AuxSendAuto;
457 slot->Target = props->Target;
458 slot->EffectType = props->Type;
459 slot->mEffectProps = props->Props;
460 if(props->Type == EffectSlotType::Reverb || props->Type == EffectSlotType::EAXReverb)
462 slot->RoomRolloff = props->Props.Reverb.RoomRolloffFactor;
463 slot->DecayTime = props->Props.Reverb.DecayTime;
464 slot->DecayLFRatio = props->Props.Reverb.DecayLFRatio;
465 slot->DecayHFRatio = props->Props.Reverb.DecayHFRatio;
466 slot->DecayHFLimit = props->Props.Reverb.DecayHFLimit;
467 slot->AirAbsorptionGainHF = props->Props.Reverb.AirAbsorptionGainHF;
471 slot->RoomRolloff = 0.0f;
472 slot->DecayTime = 0.0f;
473 slot->DecayLFRatio = 0.0f;
474 slot->DecayHFRatio = 0.0f;
475 slot->DecayHFLimit = false;
476 slot->AirAbsorptionGainHF = 1.0f;
479 EffectState *state{props->State.release()};
480 EffectState *oldstate{slot->mEffectState.release()};
481 slot->mEffectState.reset(state);
483 /* Only release the old state if it won't get deleted, since we can't be
484 * deleting/freeing anything in the mixer.
486 if(!oldstate->releaseIfNoDelete())
488 /* Otherwise, if it would be deleted send it off with a release event. */
489 RingBuffer *ring{context->mAsyncEvents.get()};
490 auto evt_vec = ring->getWriteVector();
491 if(evt_vec.first.len > 0) LIKELY
493 AsyncEvent *evt{al::construct_at(reinterpret_cast<AsyncEvent*>(evt_vec.first.buf),
494 AsyncEvent::ReleaseEffectState)};
495 evt->u.mEffectState = oldstate;
496 ring->writeAdvance(1);
500 /* If writing the event failed, the queue was probably full. Store
501 * the old state in the property object where it can eventually be
502 * cleaned up sometime later (not ideal, but better than blocking
505 props->State.reset(oldstate);
509 AtomicReplaceHead(context->mFreeEffectslotProps, props);
512 if(EffectSlot *target{slot->Target})
513 output = EffectTarget{&target->Wet, nullptr};
516 DeviceBase *device{context->mDevice};
517 output = EffectTarget{&device->Dry, &device->RealOut};
519 state->update(context, slot, &slot->mEffectProps, output);
524 /* Scales the given azimuth toward the side (+/- pi/2 radians) for positions in
527 inline float ScaleAzimuthFront(float azimuth, float scale)
529 const float abs_azi{std::fabs(azimuth)};
530 if(!(abs_azi >= al::numbers::pi_v<float>*0.5f))
531 return std::copysign(minf(abs_azi*scale, al::numbers::pi_v<float>*0.5f), azimuth);
535 /* Wraps the given value in radians to stay between [-pi,+pi] */
536 inline float WrapRadians(float r)
538 static constexpr float Pi{al::numbers::pi_v<float>};
539 static constexpr float Pi2{Pi*2.0f};
540 if(r > Pi) return std::fmod(Pi+r, Pi2) - Pi;
541 if(r < -Pi) return Pi - std::fmod(Pi-r, Pi2);
545 /* Begin ambisonic rotation helpers.
547 * Rotating first-order B-Format just needs a straight-forward X/Y/Z rotation
548 * matrix. Higher orders, however, are more complicated. The method implemented
549 * here is a recursive algorithm (the rotation for first-order is used to help
550 * generate the second-order rotation, which helps generate the third-order
554 * <https://github.com/polarch/Spherical-Harmonic-Transform/blob/master/getSHrotMtx.m>,
555 * provided under the BSD 3-Clause license.
557 * Copyright (c) 2015, Archontis Politis
558 * Copyright (c) 2019, Christopher Robinson
560 * The u, v, and w coefficients used for generating higher-order rotations are
561 * precomputed since they're constant. The second-order coefficients are
562 * followed by the third-order coefficients, etc.
565 constexpr size_t CalcRotatorSize()
566 { return (L*2 + 1)*(L*2 + 1) + CalcRotatorSize<L-1>(); }
568 template<> constexpr size_t CalcRotatorSize<0>() = delete;
569 template<> constexpr size_t CalcRotatorSize<1>() = delete;
570 template<> constexpr size_t CalcRotatorSize<2>() { return 5*5; }
572 struct RotatorCoeffs {
576 std::array<CoeffValues,CalcRotatorSize<MaxAmbiOrder>()> mCoeffs{};
580 auto coeffs = mCoeffs.begin();
582 for(int l=2;l <= MaxAmbiOrder;++l)
584 for(int n{-l};n <= l;++n)
586 for(int m{-l};m <= l;++m)
588 // compute u,v,w terms of Eq.8.1 (Table I)
589 const bool d{m == 0}; // the delta function d_m0
590 const float denom{static_cast<float>((std::abs(n) == l) ?
591 (2*l) * (2*l - 1) : (l*l - n*n))};
593 const int abs_m{std::abs(m)};
594 coeffs->u = std::sqrt(static_cast<float>(l*l - m*m)/denom);
595 coeffs->v = std::sqrt(static_cast<float>(l+abs_m-1) *
596 static_cast<float>(l+abs_m) / denom) * (1.0f+d) * (1.0f - 2.0f*d) * 0.5f;
597 coeffs->w = std::sqrt(static_cast<float>(l-abs_m-1) *
598 static_cast<float>(l-abs_m) / denom) * (1.0f-d) * -0.5f;
605 const RotatorCoeffs RotatorCoeffArray{};
608 * Given the matrix, pre-filled with the (zeroth- and) first-order rotation
609 * coefficients, this fills in the coefficients for the higher orders up to and
610 * including the given order. The matrix is in ACN layout.
612 void AmbiRotator(AmbiRotateMatrix &matrix, const int order)
614 /* Don't do anything for < 2nd order. */
615 if(order < 2) return;
617 auto P = [](const int i, const int l, const int a, const int n, const size_t last_band,
618 const AmbiRotateMatrix &R)
620 const float ri1{ R[ 1+2][static_cast<size_t>(i+2)]};
621 const float rim1{R[-1+2][static_cast<size_t>(i+2)]};
622 const float ri0{ R[ 0+2][static_cast<size_t>(i+2)]};
624 const size_t y{last_band + static_cast<size_t>(a+l-1)};
626 return ri1*R[last_band][y] + rim1*R[last_band + static_cast<size_t>(l-1)*2][y];
628 return ri1*R[last_band + static_cast<size_t>(l-1)*2][y] - rim1*R[last_band][y];
629 return ri0*R[last_band + static_cast<size_t>(n+l-1)][y];
632 auto U = [P](const int l, const int m, const int n, const size_t last_band,
633 const AmbiRotateMatrix &R)
635 return P(0, l, m, n, last_band, R);
637 auto V = [P](const int l, const int m, const int n, const size_t last_band,
638 const AmbiRotateMatrix &R)
640 using namespace al::numbers;
643 const bool d{m == 1};
644 const float p0{P( 1, l, m-1, n, last_band, R)};
645 const float p1{P(-1, l, -m+1, n, last_band, R)};
646 return d ? p0*sqrt2_v<float> : (p0 - p1);
648 const bool d{m == -1};
649 const float p0{P( 1, l, m+1, n, last_band, R)};
650 const float p1{P(-1, l, -m-1, n, last_band, R)};
651 return d ? p1*sqrt2_v<float> : (p0 + p1);
653 auto W = [P](const int l, const int m, const int n, const size_t last_band,
654 const AmbiRotateMatrix &R)
659 const float p0{P( 1, l, m+1, n, last_band, R)};
660 const float p1{P(-1, l, -m-1, n, last_band, R)};
663 const float p0{P( 1, l, m-1, n, last_band, R)};
664 const float p1{P(-1, l, -m+1, n, last_band, R)};
668 // compute rotation matrix of each subsequent band recursively
669 auto coeffs = RotatorCoeffArray.mCoeffs.cbegin();
670 size_t band_idx{4}, last_band{1};
671 for(int l{2};l <= order;++l)
674 for(int n{-l};n <= l;++n,++y)
677 for(int m{-l};m <= l;++m,++x)
682 const float u{coeffs->u};
683 if(u != 0.0f) r += u * U(l, m, n, last_band, matrix);
684 const float v{coeffs->v};
685 if(v != 0.0f) r += v * V(l, m, n, last_band, matrix);
686 const float w{coeffs->w};
687 if(w != 0.0f) r += w * W(l, m, n, last_band, matrix);
693 last_band = band_idx;
694 band_idx += static_cast<uint>(l)*size_t{2} + 1;
697 /* End ambisonic rotation helpers. */
700 constexpr float Deg2Rad(float x) noexcept
701 { return static_cast<float>(al::numbers::pi / 180.0 * x); }
703 struct GainTriplet { float Base, HF, LF; };
705 void CalcPanningAndFilters(Voice *voice, const float xpos, const float ypos, const float zpos,
706 const float Distance, const float Spread, const GainTriplet &DryGain,
707 const al::span<const GainTriplet,MAX_SENDS> WetGain, EffectSlot *(&SendSlots)[MAX_SENDS],
708 const VoiceProps *props, const ContextParams &Context, DeviceBase *Device)
710 static constexpr ChanMap MonoMap[1]{
711 { FrontCenter, 0.0f, 0.0f }
713 { BackLeft, Deg2Rad(-150.0f), Deg2Rad(0.0f) },
714 { BackRight, Deg2Rad( 150.0f), Deg2Rad(0.0f) }
716 { FrontLeft, Deg2Rad( -45.0f), Deg2Rad(0.0f) },
717 { FrontRight, Deg2Rad( 45.0f), Deg2Rad(0.0f) },
718 { BackLeft, Deg2Rad(-135.0f), Deg2Rad(0.0f) },
719 { BackRight, Deg2Rad( 135.0f), Deg2Rad(0.0f) }
721 { FrontLeft, Deg2Rad( -30.0f), Deg2Rad(0.0f) },
722 { FrontRight, Deg2Rad( 30.0f), Deg2Rad(0.0f) },
723 { FrontCenter, Deg2Rad( 0.0f), Deg2Rad(0.0f) },
725 { SideLeft, Deg2Rad(-110.0f), Deg2Rad(0.0f) },
726 { SideRight, Deg2Rad( 110.0f), Deg2Rad(0.0f) }
728 { FrontLeft, Deg2Rad(-30.0f), Deg2Rad(0.0f) },
729 { FrontRight, Deg2Rad( 30.0f), Deg2Rad(0.0f) },
730 { FrontCenter, Deg2Rad( 0.0f), Deg2Rad(0.0f) },
732 { BackCenter, Deg2Rad(180.0f), Deg2Rad(0.0f) },
733 { SideLeft, Deg2Rad(-90.0f), Deg2Rad(0.0f) },
734 { SideRight, Deg2Rad( 90.0f), Deg2Rad(0.0f) }
736 { FrontLeft, Deg2Rad( -30.0f), Deg2Rad(0.0f) },
737 { FrontRight, Deg2Rad( 30.0f), Deg2Rad(0.0f) },
738 { FrontCenter, Deg2Rad( 0.0f), Deg2Rad(0.0f) },
740 { BackLeft, Deg2Rad(-150.0f), Deg2Rad(0.0f) },
741 { BackRight, Deg2Rad( 150.0f), Deg2Rad(0.0f) },
742 { SideLeft, Deg2Rad( -90.0f), Deg2Rad(0.0f) },
743 { SideRight, Deg2Rad( 90.0f), Deg2Rad(0.0f) }
746 ChanMap StereoMap[2]{
747 { FrontLeft, Deg2Rad(-30.0f), Deg2Rad(0.0f) },
748 { FrontRight, Deg2Rad( 30.0f), Deg2Rad(0.0f) }
751 const auto Frequency = static_cast<float>(Device->Frequency);
752 const uint NumSends{Device->NumAuxSends};
754 const size_t num_channels{voice->mChans.size()};
755 ASSUME(num_channels > 0);
757 for(auto &chandata : voice->mChans)
759 chandata.mDryParams.Hrtf.Target = HrtfFilter{};
760 chandata.mDryParams.Gains.Target.fill(0.0f);
761 std::for_each(chandata.mWetParams.begin(), chandata.mWetParams.begin()+NumSends,
762 [](SendParams ¶ms) -> void { params.Gains.Target.fill(0.0f); });
765 DirectMode DirectChannels{props->DirectChannels};
766 const ChanMap *chans{nullptr};
767 switch(voice->mFmtChannels)
771 /* Mono buffers are never played direct. */
772 DirectChannels = DirectMode::Off;
776 if(DirectChannels == DirectMode::Off)
778 /* Convert counter-clockwise to clock-wise, and wrap between
781 StereoMap[0].angle = WrapRadians(-props->StereoPan[0]);
782 StereoMap[1].angle = WrapRadians(-props->StereoPan[1]);
787 case FmtRear: chans = RearMap; break;
788 case FmtQuad: chans = QuadMap; break;
789 case FmtX51: chans = X51Map; break;
790 case FmtX61: chans = X61Map; break;
791 case FmtX71: chans = X71Map; break;
799 DirectChannels = DirectMode::Off;
803 voice->mFlags.reset(VoiceHasHrtf).reset(VoiceHasNfc);
804 if(auto *decoder{voice->mDecoder.get()})
805 decoder->mWidthControl = minf(props->EnhWidth, 0.7f);
807 if(IsAmbisonic(voice->mFmtChannels))
809 /* Special handling for B-Format and UHJ sources. */
811 if(Device->AvgSpeakerDist > 0.0f && voice->mFmtChannels != FmtUHJ2
812 && voice->mFmtChannels != FmtSuperStereo)
814 if(!(Distance > std::numeric_limits<float>::epsilon()))
816 /* NOTE: The NFCtrlFilters were created with a w0 of 0, which
817 * is what we want for FOA input. The first channel may have
818 * been previously re-adjusted if panned, so reset it.
820 voice->mChans[0].mDryParams.NFCtrlFilter.adjust(0.0f);
824 /* Clamp the distance for really close sources, to prevent
827 const float mdist{maxf(Distance*NfcScale, Device->AvgSpeakerDist/4.0f)};
828 const float w0{SpeedOfSoundMetersPerSec / (mdist * Frequency)};
830 /* Only need to adjust the first channel of a B-Format source. */
831 voice->mChans[0].mDryParams.NFCtrlFilter.adjust(w0);
834 voice->mFlags.set(VoiceHasNfc);
837 /* Panning a B-Format sound toward some direction is easy. Just pan the
838 * first (W) channel as a normal mono sound. The angular spread is used
839 * as a directional scalar to blend between full coverage and full
842 const float coverage{!(Distance > std::numeric_limits<float>::epsilon()) ? 1.0f :
843 (al::numbers::inv_pi_v<float>/2.0f * Spread)};
845 auto calc_coeffs = [xpos,ypos,zpos](RenderMode mode)
847 if(mode != RenderMode::Pairwise)
848 return CalcDirectionCoeffs({xpos, ypos, zpos});
850 /* Clamp Y, in case rounding errors caused it to end up outside
853 const float ev{std::asin(clampf(ypos, -1.0f, 1.0f))};
854 /* Negate Z for right-handed coords with -Z in front. */
855 const float az{std::atan2(xpos, -zpos)};
857 /* A scalar of 1.5 for plain stereo results in +/-60 degrees
858 * being moved to +/-90 degrees for direct right and left
861 return CalcAngleCoeffs(ScaleAzimuthFront(az, 1.5f), ev, 0.0f);
863 auto&& scales = GetAmbiScales(voice->mAmbiScaling);
864 auto coeffs = calc_coeffs(Device->mRenderMode);
866 if(!(coverage > 0.0f))
868 ComputePanGains(&Device->Dry, coeffs.data(), DryGain.Base*scales[0],
869 voice->mChans[0].mDryParams.Gains.Target);
870 for(uint i{0};i < NumSends;i++)
872 if(const EffectSlot *Slot{SendSlots[i]})
873 ComputePanGains(&Slot->Wet, coeffs.data(), WetGain[i].Base*scales[0],
874 voice->mChans[0].mWetParams[i].Gains.Target);
879 /* Local B-Format sources have their XYZ channels rotated according
880 * to the orientation.
883 alu::Vector N{props->OrientAt[0], props->OrientAt[1], props->OrientAt[2], 0.0f};
885 alu::Vector V{props->OrientUp[0], props->OrientUp[1], props->OrientUp[2], 0.0f};
887 if(!props->HeadRelative)
889 N = Context.Matrix * N;
890 V = Context.Matrix * V;
892 /* Build and normalize right-vector */
893 alu::Vector U{N.cross_product(V)};
896 /* Build a rotation matrix. Manually fill the zeroth- and first-
897 * order elements, then construct the rotation for the higher
900 AmbiRotateMatrix &shrot = Device->mAmbiRotateMatrix;
901 shrot.fill(AmbiRotateMatrix::value_type{});
904 shrot[1][1] = U[0]; shrot[1][2] = -U[1]; shrot[1][3] = U[2];
905 shrot[2][1] = -V[0]; shrot[2][2] = V[1]; shrot[2][3] = -V[2];
906 shrot[3][1] = -N[0]; shrot[3][2] = N[1]; shrot[3][3] = -N[2];
907 AmbiRotator(shrot, static_cast<int>(Device->mAmbiOrder));
909 /* If the device is higher order than the voice, "upsample" the
912 * NOTE: Starting with second-order, a 2D upsample needs to be
913 * applied with a 2D source and 3D output, even when they're the
914 * same order. This is because higher orders have a height offset
915 * on various channels (i.e. when elevation=0, those height-related
916 * channels should be non-0).
918 AmbiRotateMatrix &mixmatrix = Device->mAmbiRotateMatrix2;
919 if(Device->mAmbiOrder > voice->mAmbiOrder
920 || (Device->mAmbiOrder >= 2 && !Device->m2DMixing
921 && Is2DAmbisonic(voice->mFmtChannels)))
923 if(voice->mAmbiOrder == 1)
925 auto&& upsampler = Is2DAmbisonic(voice->mFmtChannels) ?
926 AmbiScale::FirstOrder2DUp : AmbiScale::FirstOrderUp;
927 UpsampleBFormatTransform(mixmatrix, upsampler, shrot, Device->mAmbiOrder);
929 else if(voice->mAmbiOrder == 2)
931 auto&& upsampler = Is2DAmbisonic(voice->mFmtChannels) ?
932 AmbiScale::SecondOrder2DUp : AmbiScale::SecondOrderUp;
933 UpsampleBFormatTransform(mixmatrix, upsampler, shrot, Device->mAmbiOrder);
935 else if(voice->mAmbiOrder == 3)
937 auto&& upsampler = Is2DAmbisonic(voice->mFmtChannels) ?
938 AmbiScale::ThirdOrder2DUp : AmbiScale::ThirdOrderUp;
939 UpsampleBFormatTransform(mixmatrix, upsampler, shrot, Device->mAmbiOrder);
941 else if(voice->mAmbiOrder == 4)
943 auto&& upsampler = AmbiScale::FourthOrder2DUp;
944 UpsampleBFormatTransform(mixmatrix, upsampler, shrot, Device->mAmbiOrder);
952 /* Convert the rotation matrix for input ordering and scaling, and
953 * whether input is 2D or 3D.
955 const uint8_t *index_map{Is2DAmbisonic(voice->mFmtChannels) ?
956 GetAmbi2DLayout(voice->mAmbiLayout).data() :
957 GetAmbiLayout(voice->mAmbiLayout).data()};
959 /* Scale the panned W signal inversely to coverage (full coverage
960 * means no panned signal), and according to the channel scaling.
962 std::for_each(coeffs.begin(), coeffs.end(),
963 [scale=(1.0f-coverage)*scales[0]](float &coeff) noexcept { coeff *= scale; });
965 for(size_t c{0};c < num_channels;c++)
967 const size_t acn{index_map[c]};
968 const float scale{scales[acn] * coverage};
970 /* For channel 0, combine the B-Format signal (scaled according
971 * to the coverage amount) with the directional pan. For all
972 * other channels, use just the (scaled) B-Format signal.
974 for(size_t x{0};x < MaxAmbiChannels;++x)
975 coeffs[x] += mixmatrix[acn][x] * scale;
977 ComputePanGains(&Device->Dry, coeffs.data(), DryGain.Base,
978 voice->mChans[c].mDryParams.Gains.Target);
980 for(uint i{0};i < NumSends;i++)
982 if(const EffectSlot *Slot{SendSlots[i]})
983 ComputePanGains(&Slot->Wet, coeffs.data(), WetGain[i].Base,
984 voice->mChans[c].mWetParams[i].Gains.Target);
987 coeffs = std::array<float,MaxAmbiChannels>{};
991 else if(DirectChannels != DirectMode::Off && !Device->RealOut.RemixMap.empty())
993 /* Direct source channels always play local. Skip the virtual channels
994 * and write inputs to the matching real outputs.
996 voice->mDirect.Buffer = Device->RealOut.Buffer;
998 for(size_t c{0};c < num_channels;c++)
1000 uint idx{Device->channelIdxByName(chans[c].channel)};
1001 if(idx != InvalidChannelIndex)
1002 voice->mChans[c].mDryParams.Gains.Target[idx] = DryGain.Base;
1003 else if(DirectChannels == DirectMode::RemixMismatch)
1005 auto match_channel = [chans,c](const InputRemixMap &map) noexcept -> bool
1006 { return chans[c].channel == map.channel; };
1007 auto remap = std::find_if(Device->RealOut.RemixMap.cbegin(),
1008 Device->RealOut.RemixMap.cend(), match_channel);
1009 if(remap != Device->RealOut.RemixMap.cend())
1011 for(const auto &target : remap->targets)
1013 idx = Device->channelIdxByName(target.channel);
1014 if(idx != InvalidChannelIndex)
1015 voice->mChans[c].mDryParams.Gains.Target[idx] = DryGain.Base *
1022 /* Auxiliary sends still use normal channel panning since they mix to
1023 * B-Format, which can't channel-match.
1025 for(size_t c{0};c < num_channels;c++)
1028 if(chans[c].channel == LFE)
1031 const auto coeffs = CalcAngleCoeffs(chans[c].angle, chans[c].elevation, 0.0f);
1033 for(uint i{0};i < NumSends;i++)
1035 if(const EffectSlot *Slot{SendSlots[i]})
1036 ComputePanGains(&Slot->Wet, coeffs.data(), WetGain[i].Base,
1037 voice->mChans[c].mWetParams[i].Gains.Target);
1041 else if(Device->mRenderMode == RenderMode::Hrtf)
1043 /* Full HRTF rendering. Skip the virtual channels and render to the
1046 voice->mDirect.Buffer = Device->RealOut.Buffer;
1048 if(Distance > std::numeric_limits<float>::epsilon())
1050 const float src_ev{std::asin(clampf(ypos, -1.0f, 1.0f))};
1051 const float src_az{std::atan2(xpos, -zpos)};
1053 if(voice->mFmtChannels == FmtMono)
1055 Device->mHrtf->getCoeffs(src_ev, src_az, Distance*NfcScale, Spread,
1056 voice->mChans[0].mDryParams.Hrtf.Target.Coeffs,
1057 voice->mChans[0].mDryParams.Hrtf.Target.Delay);
1058 voice->mChans[0].mDryParams.Hrtf.Target.Gain = DryGain.Base;
1060 const auto coeffs = CalcAngleCoeffs(src_az, src_ev, Spread);
1061 for(uint i{0};i < NumSends;i++)
1063 if(const EffectSlot *Slot{SendSlots[i]})
1064 ComputePanGains(&Slot->Wet, coeffs.data(), WetGain[i].Base,
1065 voice->mChans[0].mWetParams[i].Gains.Target);
1068 else for(size_t c{0};c < num_channels;c++)
1070 using namespace al::numbers;
1073 if(chans[c].channel == LFE) continue;
1075 /* Warp the channel position toward the source position as the
1076 * source spread decreases. With no spread, all channels are at
1077 * the source position, at full spread (pi*2), each channel is
1080 const float ev{lerpf(src_ev, chans[c].elevation, inv_pi_v<float>/2.0f * Spread)};
1082 float az{chans[c].angle - src_az};
1083 if(az < -pi_v<float>) az += pi_v<float>*2.0f;
1084 else if(az > pi_v<float>) az -= pi_v<float>*2.0f;
1086 az *= inv_pi_v<float>/2.0f * Spread;
1089 if(az < -pi_v<float>) az += pi_v<float>*2.0f;
1090 else if(az > pi_v<float>) az -= pi_v<float>*2.0f;
1092 Device->mHrtf->getCoeffs(ev, az, Distance*NfcScale, 0.0f,
1093 voice->mChans[c].mDryParams.Hrtf.Target.Coeffs,
1094 voice->mChans[c].mDryParams.Hrtf.Target.Delay);
1095 voice->mChans[c].mDryParams.Hrtf.Target.Gain = DryGain.Base;
1097 const auto coeffs = CalcAngleCoeffs(az, ev, 0.0f);
1098 for(uint i{0};i < NumSends;i++)
1100 if(const EffectSlot *Slot{SendSlots[i]})
1101 ComputePanGains(&Slot->Wet, coeffs.data(), WetGain[i].Base,
1102 voice->mChans[c].mWetParams[i].Gains.Target);
1108 /* With no distance, spread is only meaningful for mono sources
1109 * where it can be 0 or full (non-mono sources are always full
1112 const float spread{Spread * (voice->mFmtChannels == FmtMono)};
1114 /* Local sources on HRTF play with each channel panned to its
1115 * relative location around the listener, providing "virtual
1116 * speaker" responses.
1118 for(size_t c{0};c < num_channels;c++)
1121 if(chans[c].channel == LFE)
1124 /* Get the HRIR coefficients and delays for this channel
1127 Device->mHrtf->getCoeffs(chans[c].elevation, chans[c].angle,
1128 std::numeric_limits<float>::infinity(), spread,
1129 voice->mChans[c].mDryParams.Hrtf.Target.Coeffs,
1130 voice->mChans[c].mDryParams.Hrtf.Target.Delay);
1131 voice->mChans[c].mDryParams.Hrtf.Target.Gain = DryGain.Base;
1133 /* Normal panning for auxiliary sends. */
1134 const auto coeffs = CalcAngleCoeffs(chans[c].angle, chans[c].elevation, spread);
1136 for(uint i{0};i < NumSends;i++)
1138 if(const EffectSlot *Slot{SendSlots[i]})
1139 ComputePanGains(&Slot->Wet, coeffs.data(), WetGain[i].Base,
1140 voice->mChans[c].mWetParams[i].Gains.Target);
1145 voice->mFlags.set(VoiceHasHrtf);
1149 /* Non-HRTF rendering. Use normal panning to the output. */
1151 if(Distance > std::numeric_limits<float>::epsilon())
1153 /* Calculate NFC filter coefficient if needed. */
1154 if(Device->AvgSpeakerDist > 0.0f)
1156 /* Clamp the distance for really close sources, to prevent
1159 const float mdist{maxf(Distance*NfcScale, Device->AvgSpeakerDist/4.0f)};
1160 const float w0{SpeedOfSoundMetersPerSec / (mdist * Frequency)};
1162 /* Adjust NFC filters. */
1163 for(size_t c{0};c < num_channels;c++)
1164 voice->mChans[c].mDryParams.NFCtrlFilter.adjust(w0);
1166 voice->mFlags.set(VoiceHasNfc);
1169 if(voice->mFmtChannels == FmtMono)
1171 auto calc_coeffs = [xpos,ypos,zpos,Spread](RenderMode mode)
1173 if(mode != RenderMode::Pairwise)
1174 return CalcDirectionCoeffs({xpos, ypos, zpos}, Spread);
1175 const float ev{std::asin(clampf(ypos, -1.0f, 1.0f))};
1176 const float az{std::atan2(xpos, -zpos)};
1177 return CalcAngleCoeffs(ScaleAzimuthFront(az, 1.5f), ev, Spread);
1179 const auto coeffs = calc_coeffs(Device->mRenderMode);
1181 ComputePanGains(&Device->Dry, coeffs.data(), DryGain.Base,
1182 voice->mChans[0].mDryParams.Gains.Target);
1183 for(uint i{0};i < NumSends;i++)
1185 if(const EffectSlot *Slot{SendSlots[i]})
1186 ComputePanGains(&Slot->Wet, coeffs.data(), WetGain[i].Base,
1187 voice->mChans[0].mWetParams[i].Gains.Target);
1192 using namespace al::numbers;
1194 const float src_ev{std::asin(clampf(ypos, -1.0f, 1.0f))};
1195 const float src_az{std::atan2(xpos, -zpos)};
1197 for(size_t c{0};c < num_channels;c++)
1199 /* Special-case LFE */
1200 if(chans[c].channel == LFE)
1202 if(Device->Dry.Buffer.data() == Device->RealOut.Buffer.data())
1204 const uint idx{Device->channelIdxByName(chans[c].channel)};
1205 if(idx != InvalidChannelIndex)
1206 voice->mChans[c].mDryParams.Gains.Target[idx] = DryGain.Base;
1211 /* Warp the channel position toward the source position as
1212 * the spread decreases. With no spread, all channels are
1213 * at the source position, at full spread (pi*2), each
1214 * channel position is left unchanged.
1216 const float ev{lerpf(src_ev, chans[c].elevation,
1217 inv_pi_v<float>/2.0f * Spread)};
1219 float az{chans[c].angle - src_az};
1220 if(az < -pi_v<float>) az += pi_v<float>*2.0f;
1221 else if(az > pi_v<float>) az -= pi_v<float>*2.0f;
1223 az *= inv_pi_v<float>/2.0f * Spread;
1226 if(az < -pi_v<float>) az += pi_v<float>*2.0f;
1227 else if(az > pi_v<float>) az -= pi_v<float>*2.0f;
1229 if(Device->mRenderMode == RenderMode::Pairwise)
1230 az = ScaleAzimuthFront(az, 3.0f);
1231 const auto coeffs = CalcAngleCoeffs(az, ev, 0.0f);
1233 ComputePanGains(&Device->Dry, coeffs.data(), DryGain.Base,
1234 voice->mChans[c].mDryParams.Gains.Target);
1235 for(uint i{0};i < NumSends;i++)
1237 if(const EffectSlot *Slot{SendSlots[i]})
1238 ComputePanGains(&Slot->Wet, coeffs.data(), WetGain[i].Base,
1239 voice->mChans[c].mWetParams[i].Gains.Target);
1246 if(Device->AvgSpeakerDist > 0.0f)
1248 /* If the source distance is 0, simulate a plane-wave by using
1249 * infinite distance, which results in a w0 of 0.
1251 static constexpr float w0{0.0f};
1252 for(size_t c{0};c < num_channels;c++)
1253 voice->mChans[c].mDryParams.NFCtrlFilter.adjust(w0);
1255 voice->mFlags.set(VoiceHasNfc);
1258 /* With no distance, spread is only meaningful for mono sources
1259 * where it can be 0 or full (non-mono sources are always full
1262 const float spread{Spread * (voice->mFmtChannels == FmtMono)};
1263 for(size_t c{0};c < num_channels;c++)
1265 /* Special-case LFE */
1266 if(chans[c].channel == LFE)
1268 if(Device->Dry.Buffer.data() == Device->RealOut.Buffer.data())
1270 const uint idx{Device->channelIdxByName(chans[c].channel)};
1271 if(idx != InvalidChannelIndex)
1272 voice->mChans[c].mDryParams.Gains.Target[idx] = DryGain.Base;
1277 const auto coeffs = CalcAngleCoeffs((Device->mRenderMode == RenderMode::Pairwise)
1278 ? ScaleAzimuthFront(chans[c].angle, 3.0f) : chans[c].angle,
1279 chans[c].elevation, spread);
1281 ComputePanGains(&Device->Dry, coeffs.data(), DryGain.Base,
1282 voice->mChans[c].mDryParams.Gains.Target);
1283 for(uint i{0};i < NumSends;i++)
1285 if(const EffectSlot *Slot{SendSlots[i]})
1286 ComputePanGains(&Slot->Wet, coeffs.data(), WetGain[i].Base,
1287 voice->mChans[c].mWetParams[i].Gains.Target);
1294 const float hfNorm{props->Direct.HFReference / Frequency};
1295 const float lfNorm{props->Direct.LFReference / Frequency};
1297 voice->mDirect.FilterType = AF_None;
1298 if(DryGain.HF != 1.0f) voice->mDirect.FilterType |= AF_LowPass;
1299 if(DryGain.LF != 1.0f) voice->mDirect.FilterType |= AF_HighPass;
1301 auto &lowpass = voice->mChans[0].mDryParams.LowPass;
1302 auto &highpass = voice->mChans[0].mDryParams.HighPass;
1303 lowpass.setParamsFromSlope(BiquadType::HighShelf, hfNorm, DryGain.HF, 1.0f);
1304 highpass.setParamsFromSlope(BiquadType::LowShelf, lfNorm, DryGain.LF, 1.0f);
1305 for(size_t c{1};c < num_channels;c++)
1307 voice->mChans[c].mDryParams.LowPass.copyParamsFrom(lowpass);
1308 voice->mChans[c].mDryParams.HighPass.copyParamsFrom(highpass);
1311 for(uint i{0};i < NumSends;i++)
1313 const float hfNorm{props->Send[i].HFReference / Frequency};
1314 const float lfNorm{props->Send[i].LFReference / Frequency};
1316 voice->mSend[i].FilterType = AF_None;
1317 if(WetGain[i].HF != 1.0f) voice->mSend[i].FilterType |= AF_LowPass;
1318 if(WetGain[i].LF != 1.0f) voice->mSend[i].FilterType |= AF_HighPass;
1320 auto &lowpass = voice->mChans[0].mWetParams[i].LowPass;
1321 auto &highpass = voice->mChans[0].mWetParams[i].HighPass;
1322 lowpass.setParamsFromSlope(BiquadType::HighShelf, hfNorm, WetGain[i].HF, 1.0f);
1323 highpass.setParamsFromSlope(BiquadType::LowShelf, lfNorm, WetGain[i].LF, 1.0f);
1324 for(size_t c{1};c < num_channels;c++)
1326 voice->mChans[c].mWetParams[i].LowPass.copyParamsFrom(lowpass);
1327 voice->mChans[c].mWetParams[i].HighPass.copyParamsFrom(highpass);
1332 void CalcNonAttnSourceParams(Voice *voice, const VoiceProps *props, const ContextBase *context)
1334 DeviceBase *Device{context->mDevice};
1335 EffectSlot *SendSlots[MAX_SENDS];
1337 voice->mDirect.Buffer = Device->Dry.Buffer;
1338 for(uint i{0};i < Device->NumAuxSends;i++)
1340 SendSlots[i] = props->Send[i].Slot;
1341 if(!SendSlots[i] || SendSlots[i]->EffectType == EffectSlotType::None)
1343 SendSlots[i] = nullptr;
1344 voice->mSend[i].Buffer = {};
1347 voice->mSend[i].Buffer = SendSlots[i]->Wet.Buffer;
1350 /* Calculate the stepping value */
1351 const auto Pitch = static_cast<float>(voice->mFrequency) /
1352 static_cast<float>(Device->Frequency) * props->Pitch;
1353 if(Pitch > float{MaxPitch})
1354 voice->mStep = MaxPitch<<MixerFracBits;
1356 voice->mStep = maxu(fastf2u(Pitch * MixerFracOne), 1);
1357 voice->mResampler = PrepareResampler(props->mResampler, voice->mStep, &voice->mResampleState);
1359 /* Calculate gains */
1360 GainTriplet DryGain;
1361 DryGain.Base = minf(clampf(props->Gain, props->MinGain, props->MaxGain) * props->Direct.Gain *
1362 context->mParams.Gain, GainMixMax);
1363 DryGain.HF = props->Direct.GainHF;
1364 DryGain.LF = props->Direct.GainLF;
1365 GainTriplet WetGain[MAX_SENDS];
1366 for(uint i{0};i < Device->NumAuxSends;i++)
1368 WetGain[i].Base = minf(clampf(props->Gain, props->MinGain, props->MaxGain) *
1369 props->Send[i].Gain * context->mParams.Gain, GainMixMax);
1370 WetGain[i].HF = props->Send[i].GainHF;
1371 WetGain[i].LF = props->Send[i].GainLF;
1374 CalcPanningAndFilters(voice, 0.0f, 0.0f, -1.0f, 0.0f, 0.0f, DryGain, WetGain, SendSlots, props,
1375 context->mParams, Device);
1378 void CalcAttnSourceParams(Voice *voice, const VoiceProps *props, const ContextBase *context)
1380 DeviceBase *Device{context->mDevice};
1381 const uint NumSends{Device->NumAuxSends};
1383 /* Set mixing buffers and get send parameters. */
1384 voice->mDirect.Buffer = Device->Dry.Buffer;
1385 EffectSlot *SendSlots[MAX_SENDS];
1386 uint UseDryAttnForRoom{0};
1387 for(uint i{0};i < NumSends;i++)
1389 SendSlots[i] = props->Send[i].Slot;
1390 if(!SendSlots[i] || SendSlots[i]->EffectType == EffectSlotType::None)
1391 SendSlots[i] = nullptr;
1392 else if(!SendSlots[i]->AuxSendAuto)
1394 /* If the slot's auxiliary send auto is off, the data sent to the
1395 * effect slot is the same as the dry path, sans filter effects.
1397 UseDryAttnForRoom |= 1u<<i;
1401 voice->mSend[i].Buffer = {};
1403 voice->mSend[i].Buffer = SendSlots[i]->Wet.Buffer;
1406 /* Transform source to listener space (convert to head relative) */
1407 alu::Vector Position{props->Position[0], props->Position[1], props->Position[2], 1.0f};
1408 alu::Vector Velocity{props->Velocity[0], props->Velocity[1], props->Velocity[2], 0.0f};
1409 alu::Vector Direction{props->Direction[0], props->Direction[1], props->Direction[2], 0.0f};
1410 if(!props->HeadRelative)
1412 /* Transform source vectors */
1413 Position = context->mParams.Matrix * (Position - context->mParams.Position);
1414 Velocity = context->mParams.Matrix * Velocity;
1415 Direction = context->mParams.Matrix * Direction;
1419 /* Offset the source velocity to be relative of the listener velocity */
1420 Velocity += context->mParams.Velocity;
1423 const bool directional{Direction.normalize() > 0.0f};
1424 alu::Vector ToSource{Position[0], Position[1], Position[2], 0.0f};
1425 const float Distance{ToSource.normalize()};
1427 /* Calculate distance attenuation */
1428 float ClampedDist{Distance};
1429 float DryGainBase{props->Gain};
1430 float WetGainBase{props->Gain};
1432 switch(context->mParams.SourceDistanceModel ? props->mDistanceModel
1433 : context->mParams.mDistanceModel)
1435 case DistanceModel::InverseClamped:
1436 if(props->MaxDistance < props->RefDistance) break;
1437 ClampedDist = clampf(ClampedDist, props->RefDistance, props->MaxDistance);
1439 case DistanceModel::Inverse:
1440 if(props->RefDistance > 0.0f)
1442 float dist{lerpf(props->RefDistance, ClampedDist, props->RolloffFactor)};
1443 if(dist > 0.0f) DryGainBase *= props->RefDistance / dist;
1445 dist = lerpf(props->RefDistance, ClampedDist, props->RoomRolloffFactor);
1446 if(dist > 0.0f) WetGainBase *= props->RefDistance / dist;
1450 case DistanceModel::LinearClamped:
1451 if(props->MaxDistance < props->RefDistance) break;
1452 ClampedDist = clampf(ClampedDist, props->RefDistance, props->MaxDistance);
1454 case DistanceModel::Linear:
1455 if(props->MaxDistance != props->RefDistance)
1457 float attn{(ClampedDist-props->RefDistance) /
1458 (props->MaxDistance-props->RefDistance) * props->RolloffFactor};
1459 DryGainBase *= maxf(1.0f - attn, 0.0f);
1461 attn = (ClampedDist-props->RefDistance) /
1462 (props->MaxDistance-props->RefDistance) * props->RoomRolloffFactor;
1463 WetGainBase *= maxf(1.0f - attn, 0.0f);
1467 case DistanceModel::ExponentClamped:
1468 if(props->MaxDistance < props->RefDistance) break;
1469 ClampedDist = clampf(ClampedDist, props->RefDistance, props->MaxDistance);
1471 case DistanceModel::Exponent:
1472 if(ClampedDist > 0.0f && props->RefDistance > 0.0f)
1474 const float dist_ratio{ClampedDist/props->RefDistance};
1475 DryGainBase *= std::pow(dist_ratio, -props->RolloffFactor);
1476 WetGainBase *= std::pow(dist_ratio, -props->RoomRolloffFactor);
1480 case DistanceModel::Disable:
1484 /* Calculate directional soundcones */
1485 float ConeHF{1.0f}, WetConeHF{1.0f};
1486 if(directional && props->InnerAngle < 360.0f)
1488 static constexpr float Rad2Deg{static_cast<float>(180.0 / al::numbers::pi)};
1489 const float Angle{Rad2Deg*2.0f * std::acos(-Direction.dot_product(ToSource)) * ConeScale};
1491 float ConeGain{1.0f};
1492 if(Angle >= props->OuterAngle)
1494 ConeGain = props->OuterGain;
1495 ConeHF = lerpf(1.0f, props->OuterGainHF, props->DryGainHFAuto);
1497 else if(Angle >= props->InnerAngle)
1499 const float scale{(Angle-props->InnerAngle) / (props->OuterAngle-props->InnerAngle)};
1500 ConeGain = lerpf(1.0f, props->OuterGain, scale);
1501 ConeHF = lerpf(1.0f, props->OuterGainHF, scale * props->DryGainHFAuto);
1504 DryGainBase *= ConeGain;
1505 WetGainBase *= lerpf(1.0f, ConeGain, props->WetGainAuto);
1507 WetConeHF = lerpf(1.0f, ConeHF, props->WetGainHFAuto);
1510 /* Apply gain and frequency filters */
1511 DryGainBase = clampf(DryGainBase, props->MinGain, props->MaxGain) * context->mParams.Gain;
1512 WetGainBase = clampf(WetGainBase, props->MinGain, props->MaxGain) * context->mParams.Gain;
1514 GainTriplet DryGain{};
1515 DryGain.Base = minf(DryGainBase * props->Direct.Gain, GainMixMax);
1516 DryGain.HF = ConeHF * props->Direct.GainHF;
1517 DryGain.LF = props->Direct.GainLF;
1518 GainTriplet WetGain[MAX_SENDS]{};
1519 for(uint i{0};i < NumSends;i++)
1521 /* If this effect slot's Auxiliary Send Auto is off, then use the dry
1522 * path distance and cone attenuation, otherwise use the wet (room)
1523 * path distance and cone attenuation. The send filter is used instead
1524 * of the direct filter, regardless.
1526 const bool use_room{!(UseDryAttnForRoom&(1u<<i))};
1527 const float gain{use_room ? WetGainBase : DryGainBase};
1528 WetGain[i].Base = minf(gain * props->Send[i].Gain, GainMixMax);
1529 WetGain[i].HF = (use_room ? WetConeHF : ConeHF) * props->Send[i].GainHF;
1530 WetGain[i].LF = props->Send[i].GainLF;
1533 /* Distance-based air absorption and initial send decay. */
1534 if(Distance > props->RefDistance) LIKELY
1536 const float distance_base{(Distance-props->RefDistance) * props->RolloffFactor};
1537 const float distance_meters{distance_base * context->mParams.MetersPerUnit};
1538 const float dryabsorb{distance_meters * props->AirAbsorptionFactor};
1539 if(dryabsorb > std::numeric_limits<float>::epsilon())
1540 DryGain.HF *= std::pow(context->mParams.AirAbsorptionGainHF, dryabsorb);
1542 /* If the source's Auxiliary Send Filter Gain Auto is off, no extra
1543 * adjustment is applied to the send gains.
1545 for(uint i{props->WetGainAuto ? 0u : NumSends};i < NumSends;++i)
1547 if(!SendSlots[i] || !(SendSlots[i]->DecayTime > 0.0f))
1550 auto calc_attenuation = [](float distance, float refdist, float rolloff) noexcept
1552 const float dist{lerpf(refdist, distance, rolloff)};
1553 if(dist > refdist) return refdist / dist;
1557 /* The reverb effect's room rolloff factor always applies to an
1558 * inverse distance rolloff model.
1560 WetGain[i].Base *= calc_attenuation(Distance, props->RefDistance,
1561 SendSlots[i]->RoomRolloff);
1563 if(distance_meters > std::numeric_limits<float>::epsilon())
1564 WetGain[i].HF *= std::pow(SendSlots[i]->AirAbsorptionGainHF, distance_meters);
1566 /* If this effect slot's Auxiliary Send Auto is off, don't apply
1567 * the automatic initial reverb decay (should the reverb's room
1568 * rolloff still apply?).
1570 if(!SendSlots[i]->AuxSendAuto)
1573 GainTriplet DecayDistance;
1574 /* Calculate the distances to where this effect's decay reaches
1577 DecayDistance.Base = SendSlots[i]->DecayTime * SpeedOfSoundMetersPerSec;
1578 DecayDistance.LF = DecayDistance.Base * SendSlots[i]->DecayLFRatio;
1579 DecayDistance.HF = DecayDistance.Base * SendSlots[i]->DecayHFRatio;
1580 if(SendSlots[i]->DecayHFLimit)
1582 const float airAbsorption{SendSlots[i]->AirAbsorptionGainHF};
1583 if(airAbsorption < 1.0f)
1585 /* Calculate the distance to where this effect's air
1586 * absorption reaches -60dB, and limit the effect's HF
1587 * decay distance (so it doesn't take any longer to decay
1588 * than the air would allow).
1590 static constexpr float log10_decaygain{-3.0f/*std::log10(ReverbDecayGain)*/};
1591 const float absorb_dist{log10_decaygain / std::log10(airAbsorption)};
1592 DecayDistance.HF = minf(absorb_dist, DecayDistance.HF);
1596 const float baseAttn = calc_attenuation(Distance, props->RefDistance,
1597 props->RolloffFactor);
1599 /* Apply a decay-time transformation to the wet path, based on the
1600 * source distance. The initial decay of the reverb effect is
1601 * calculated and applied to the wet path.
1603 const float fact{distance_base / DecayDistance.Base};
1604 const float gain{std::pow(ReverbDecayGain, fact)*(1.0f-baseAttn) + baseAttn};
1605 WetGain[i].Base *= gain;
1609 const float hffact{distance_base / DecayDistance.HF};
1610 const float gainhf{std::pow(ReverbDecayGain, hffact)*(1.0f-baseAttn) + baseAttn};
1611 WetGain[i].HF *= minf(gainhf/gain, 1.0f);
1612 const float lffact{distance_base / DecayDistance.LF};
1613 const float gainlf{std::pow(ReverbDecayGain, lffact)*(1.0f-baseAttn) + baseAttn};
1614 WetGain[i].LF *= minf(gainlf/gain, 1.0f);
1620 /* Initial source pitch */
1621 float Pitch{props->Pitch};
1623 /* Calculate velocity-based doppler effect */
1624 float DopplerFactor{props->DopplerFactor * context->mParams.DopplerFactor};
1625 if(DopplerFactor > 0.0f)
1627 const alu::Vector &lvelocity = context->mParams.Velocity;
1628 float vss{Velocity.dot_product(ToSource) * -DopplerFactor};
1629 float vls{lvelocity.dot_product(ToSource) * -DopplerFactor};
1631 const float SpeedOfSound{context->mParams.SpeedOfSound};
1632 if(!(vls < SpeedOfSound))
1634 /* Listener moving away from the source at the speed of sound.
1635 * Sound waves can't catch it.
1639 else if(!(vss < SpeedOfSound))
1641 /* Source moving toward the listener at the speed of sound. Sound
1642 * waves bunch up to extreme frequencies.
1644 Pitch = std::numeric_limits<float>::infinity();
1648 /* Source and listener movement is nominal. Calculate the proper
1651 Pitch *= (SpeedOfSound-vls) / (SpeedOfSound-vss);
1655 /* Adjust pitch based on the buffer and output frequencies, and calculate
1656 * fixed-point stepping value.
1658 Pitch *= static_cast<float>(voice->mFrequency) / static_cast<float>(Device->Frequency);
1659 if(Pitch > float{MaxPitch})
1660 voice->mStep = MaxPitch<<MixerFracBits;
1662 voice->mStep = maxu(fastf2u(Pitch * MixerFracOne), 1);
1663 voice->mResampler = PrepareResampler(props->mResampler, voice->mStep, &voice->mResampleState);
1666 if(props->Radius > Distance)
1667 spread = al::numbers::pi_v<float>*2.0f - Distance/props->Radius*al::numbers::pi_v<float>;
1668 else if(Distance > 0.0f)
1669 spread = std::asin(props->Radius/Distance) * 2.0f;
1671 CalcPanningAndFilters(voice, ToSource[0]*XScale, ToSource[1]*YScale, ToSource[2]*ZScale,
1672 Distance, spread, DryGain, WetGain, SendSlots, props, context->mParams, Device);
1675 void CalcSourceParams(Voice *voice, ContextBase *context, bool force)
1677 VoicePropsItem *props{voice->mUpdate.exchange(nullptr, std::memory_order_acq_rel)};
1678 if(!props && !force) return;
1682 voice->mProps = *props;
1684 AtomicReplaceHead(context->mFreeVoiceProps, props);
1687 if((voice->mProps.DirectChannels != DirectMode::Off && voice->mFmtChannels != FmtMono
1688 && !IsAmbisonic(voice->mFmtChannels))
1689 || voice->mProps.mSpatializeMode == SpatializeMode::Off
1690 || (voice->mProps.mSpatializeMode==SpatializeMode::Auto && voice->mFmtChannels != FmtMono))
1691 CalcNonAttnSourceParams(voice, &voice->mProps, context);
1693 CalcAttnSourceParams(voice, &voice->mProps, context);
1697 void SendSourceStateEvent(ContextBase *context, uint id, VChangeState state)
1699 RingBuffer *ring{context->mAsyncEvents.get()};
1700 auto evt_vec = ring->getWriteVector();
1701 if(evt_vec.first.len < 1) return;
1703 AsyncEvent *evt{al::construct_at(reinterpret_cast<AsyncEvent*>(evt_vec.first.buf),
1704 AsyncEvent::SourceStateChange)};
1705 evt->u.srcstate.id = id;
1708 case VChangeState::Reset:
1709 evt->u.srcstate.state = AsyncEvent::SrcState::Reset;
1711 case VChangeState::Stop:
1712 evt->u.srcstate.state = AsyncEvent::SrcState::Stop;
1714 case VChangeState::Play:
1715 evt->u.srcstate.state = AsyncEvent::SrcState::Play;
1717 case VChangeState::Pause:
1718 evt->u.srcstate.state = AsyncEvent::SrcState::Pause;
1720 /* Shouldn't happen. */
1721 case VChangeState::Restart:
1725 ring->writeAdvance(1);
1728 void ProcessVoiceChanges(ContextBase *ctx)
1730 VoiceChange *cur{ctx->mCurrentVoiceChange.load(std::memory_order_acquire)};
1731 VoiceChange *next{cur->mNext.load(std::memory_order_acquire)};
1734 const auto enabledevt = ctx->mEnabledEvts.load(std::memory_order_acquire);
1738 bool sendevt{false};
1739 if(cur->mState == VChangeState::Reset || cur->mState == VChangeState::Stop)
1741 if(Voice *voice{cur->mVoice})
1743 voice->mCurrentBuffer.store(nullptr, std::memory_order_relaxed);
1744 voice->mLoopBuffer.store(nullptr, std::memory_order_relaxed);
1745 /* A source ID indicates the voice was playing or paused, which
1746 * gets a reset/stop event.
1748 sendevt = voice->mSourceID.exchange(0u, std::memory_order_relaxed) != 0u;
1749 Voice::State oldvstate{Voice::Playing};
1750 voice->mPlayState.compare_exchange_strong(oldvstate, Voice::Stopping,
1751 std::memory_order_relaxed, std::memory_order_acquire);
1752 voice->mPendingChange.store(false, std::memory_order_release);
1754 /* Reset state change events are always sent, even if the voice is
1755 * already stopped or even if there is no voice.
1757 sendevt |= (cur->mState == VChangeState::Reset);
1759 else if(cur->mState == VChangeState::Pause)
1761 Voice *voice{cur->mVoice};
1762 Voice::State oldvstate{Voice::Playing};
1763 sendevt = voice->mPlayState.compare_exchange_strong(oldvstate, Voice::Stopping,
1764 std::memory_order_release, std::memory_order_acquire);
1766 else if(cur->mState == VChangeState::Play)
1768 /* NOTE: When playing a voice, sending a source state change event
1769 * depends if there's an old voice to stop and if that stop is
1770 * successful. If there is no old voice, a playing event is always
1771 * sent. If there is an old voice, an event is sent only if the
1772 * voice is already stopped.
1774 if(Voice *oldvoice{cur->mOldVoice})
1776 oldvoice->mCurrentBuffer.store(nullptr, std::memory_order_relaxed);
1777 oldvoice->mLoopBuffer.store(nullptr, std::memory_order_relaxed);
1778 oldvoice->mSourceID.store(0u, std::memory_order_relaxed);
1779 Voice::State oldvstate{Voice::Playing};
1780 sendevt = !oldvoice->mPlayState.compare_exchange_strong(oldvstate, Voice::Stopping,
1781 std::memory_order_relaxed, std::memory_order_acquire);
1782 oldvoice->mPendingChange.store(false, std::memory_order_release);
1787 Voice *voice{cur->mVoice};
1788 voice->mPlayState.store(Voice::Playing, std::memory_order_release);
1790 else if(cur->mState == VChangeState::Restart)
1792 /* Restarting a voice never sends a source change event. */
1793 Voice *oldvoice{cur->mOldVoice};
1794 oldvoice->mCurrentBuffer.store(nullptr, std::memory_order_relaxed);
1795 oldvoice->mLoopBuffer.store(nullptr, std::memory_order_relaxed);
1796 /* If there's no sourceID, the old voice finished so don't start
1797 * the new one at its new offset.
1799 if(oldvoice->mSourceID.exchange(0u, std::memory_order_relaxed) != 0u)
1801 /* Otherwise, set the voice to stopping if it's not already (it
1802 * might already be, if paused), and play the new voice as
1805 Voice::State oldvstate{Voice::Playing};
1806 oldvoice->mPlayState.compare_exchange_strong(oldvstate, Voice::Stopping,
1807 std::memory_order_relaxed, std::memory_order_acquire);
1809 Voice *voice{cur->mVoice};
1810 voice->mPlayState.store((oldvstate == Voice::Playing) ? Voice::Playing
1811 : Voice::Stopped, std::memory_order_release);
1813 oldvoice->mPendingChange.store(false, std::memory_order_release);
1815 if(sendevt && enabledevt.test(AsyncEvent::SourceStateChange))
1816 SendSourceStateEvent(ctx, cur->mSourceID, cur->mState);
1818 next = cur->mNext.load(std::memory_order_acquire);
1820 ctx->mCurrentVoiceChange.store(cur, std::memory_order_release);
1823 void ProcessParamUpdates(ContextBase *ctx, const EffectSlotArray &slots,
1824 const al::span<Voice*> voices)
1826 ProcessVoiceChanges(ctx);
1828 IncrementRef(ctx->mUpdateCount);
1829 if(!ctx->mHoldUpdates.load(std::memory_order_acquire)) LIKELY
1831 bool force{CalcContextParams(ctx)};
1832 auto sorted_slots = const_cast<EffectSlot**>(slots.data() + slots.size());
1833 for(EffectSlot *slot : slots)
1834 force |= CalcEffectSlotParams(slot, sorted_slots, ctx);
1836 for(Voice *voice : voices)
1838 /* Only update voices that have a source. */
1839 if(voice->mSourceID.load(std::memory_order_relaxed) != 0)
1840 CalcSourceParams(voice, ctx, force);
1843 IncrementRef(ctx->mUpdateCount);
1846 void ProcessContexts(DeviceBase *device, const uint SamplesToDo)
1848 ASSUME(SamplesToDo > 0);
1850 const nanoseconds curtime{device->ClockBase +
1851 nanoseconds{seconds{device->SamplesDone}}/device->Frequency};
1853 for(ContextBase *ctx : *device->mContexts.load(std::memory_order_acquire))
1855 const EffectSlotArray &auxslots = *ctx->mActiveAuxSlots.load(std::memory_order_acquire);
1856 const al::span<Voice*> voices{ctx->getVoicesSpanAcquired()};
1858 /* Process pending propery updates for objects on the context. */
1859 ProcessParamUpdates(ctx, auxslots, voices);
1861 /* Clear auxiliary effect slot mixing buffers. */
1862 for(EffectSlot *slot : auxslots)
1864 for(auto &buffer : slot->Wet.Buffer)
1868 /* Process voices that have a playing source. */
1869 for(Voice *voice : voices)
1871 const Voice::State vstate{voice->mPlayState.load(std::memory_order_acquire)};
1872 if(vstate != Voice::Stopped && vstate != Voice::Pending)
1873 voice->mix(vstate, ctx, curtime, SamplesToDo);
1876 /* Process effects. */
1877 if(const size_t num_slots{auxslots.size()})
1879 auto slots = auxslots.data();
1880 auto slots_end = slots + num_slots;
1882 /* Sort the slots into extra storage, so that effect slots come
1883 * before their effect slot target (or their targets' target).
1885 const al::span<EffectSlot*> sorted_slots{const_cast<EffectSlot**>(slots_end),
1887 /* Skip sorting if it has already been done. */
1888 if(!sorted_slots[0])
1890 /* First, copy the slots to the sorted list, then partition the
1891 * sorted list so that all slots without a target slot go to
1894 std::copy(slots, slots_end, sorted_slots.begin());
1895 auto split_point = std::partition(sorted_slots.begin(), sorted_slots.end(),
1896 [](const EffectSlot *slot) noexcept -> bool
1897 { return slot->Target != nullptr; });
1898 /* There must be at least one slot without a slot target. */
1899 assert(split_point != sorted_slots.end());
1901 /* Simple case: no more than 1 slot has a target slot. Either
1902 * all slots go right to the output, or the remaining one must
1903 * target an already-partitioned slot.
1905 if(split_point - sorted_slots.begin() > 1)
1907 /* At least two slots target other slots. Starting from the
1908 * back of the sorted list, continue partitioning the front
1909 * of the list given each target until all targets are
1910 * accounted for. This ensures all slots without a target
1911 * go last, all slots directly targeting those last slots
1912 * go second-to-last, all slots directly targeting those
1913 * second-last slots go third-to-last, etc.
1915 auto next_target = sorted_slots.end();
1917 /* This shouldn't happen, but if there's unsorted slots
1918 * left that don't target any sorted slots, they can't
1919 * contribute to the output, so leave them.
1921 if(next_target == split_point) UNLIKELY
1925 split_point = std::partition(sorted_slots.begin(), split_point,
1926 [next_target](const EffectSlot *slot) noexcept -> bool
1927 { return slot->Target != *next_target; });
1928 } while(split_point - sorted_slots.begin() > 1);
1932 for(const EffectSlot *slot : sorted_slots)
1934 EffectState *state{slot->mEffectState.get()};
1935 state->process(SamplesToDo, slot->Wet.Buffer, state->mOutTarget);
1939 /* Signal the event handler if there are any events to read. */
1940 RingBuffer *ring{ctx->mAsyncEvents.get()};
1941 if(ring->readSpace() > 0)
1942 ctx->mEventSem.post();
1947 void ApplyDistanceComp(const al::span<FloatBufferLine> Samples, const size_t SamplesToDo,
1948 const DistanceComp::ChanData *distcomp)
1950 ASSUME(SamplesToDo > 0);
1952 for(auto &chanbuffer : Samples)
1954 const float gain{distcomp->Gain};
1955 const size_t base{distcomp->Length};
1956 float *distbuf{al::assume_aligned<16>(distcomp->Buffer)};
1962 float *inout{al::assume_aligned<16>(chanbuffer.data())};
1963 auto inout_end = inout + SamplesToDo;
1964 if(SamplesToDo >= base) LIKELY
1966 auto delay_end = std::rotate(inout, inout_end - base, inout_end);
1967 std::swap_ranges(inout, delay_end, distbuf);
1971 auto delay_start = std::swap_ranges(inout, inout_end, distbuf);
1972 std::rotate(distbuf, delay_start, distbuf + base);
1974 std::transform(inout, inout_end, inout, [gain](float s) { return s * gain; });
1978 void ApplyDither(const al::span<FloatBufferLine> Samples, uint *dither_seed,
1979 const float quant_scale, const size_t SamplesToDo)
1981 ASSUME(SamplesToDo > 0);
1983 /* Dithering. Generate whitenoise (uniform distribution of random values
1984 * between -1 and +1) and add it to the sample values, after scaling up to
1985 * the desired quantization depth amd before rounding.
1987 const float invscale{1.0f / quant_scale};
1988 uint seed{*dither_seed};
1989 auto dither_sample = [&seed,invscale,quant_scale](const float sample) noexcept -> float
1991 float val{sample * quant_scale};
1992 uint rng0{dither_rng(&seed)};
1993 uint rng1{dither_rng(&seed)};
1994 val += static_cast<float>(rng0*(1.0/UINT_MAX) - rng1*(1.0/UINT_MAX));
1995 return fast_roundf(val) * invscale;
1997 for(FloatBufferLine &inout : Samples)
1998 std::transform(inout.begin(), inout.begin()+SamplesToDo, inout.begin(), dither_sample);
1999 *dither_seed = seed;
2003 /* Base template left undefined. Should be marked =delete, but Clang 3.8.1
2004 * chokes on that given the inline specializations.
2006 template<typename T>
2007 inline T SampleConv(float) noexcept;
2009 template<> inline float SampleConv(float val) noexcept
2011 template<> inline int32_t SampleConv(float val) noexcept
2013 /* Floats have a 23-bit mantissa, plus an implied 1 bit and a sign bit.
2014 * This means a normalized float has at most 25 bits of signed precision.
2015 * When scaling and clamping for a signed 32-bit integer, these following
2016 * values are the best a float can give.
2018 return fastf2i(clampf(val*2147483648.0f, -2147483648.0f, 2147483520.0f));
2020 template<> inline int16_t SampleConv(float val) noexcept
2021 { return static_cast<int16_t>(fastf2i(clampf(val*32768.0f, -32768.0f, 32767.0f))); }
2022 template<> inline int8_t SampleConv(float val) noexcept
2023 { return static_cast<int8_t>(fastf2i(clampf(val*128.0f, -128.0f, 127.0f))); }
2025 /* Define unsigned output variations. */
2026 template<> inline uint32_t SampleConv(float val) noexcept
2027 { return static_cast<uint32_t>(SampleConv<int32_t>(val)) + 2147483648u; }
2028 template<> inline uint16_t SampleConv(float val) noexcept
2029 { return static_cast<uint16_t>(SampleConv<int16_t>(val) + 32768); }
2030 template<> inline uint8_t SampleConv(float val) noexcept
2031 { return static_cast<uint8_t>(SampleConv<int8_t>(val) + 128); }
2033 template<DevFmtType T>
2034 void Write(const al::span<const FloatBufferLine> InBuffer, void *OutBuffer, const size_t Offset,
2035 const size_t SamplesToDo, const size_t FrameStep)
2037 ASSUME(FrameStep > 0);
2038 ASSUME(SamplesToDo > 0);
2040 DevFmtType_t<T> *outbase{static_cast<DevFmtType_t<T>*>(OutBuffer) + Offset*FrameStep};
2042 for(const FloatBufferLine &inbuf : InBuffer)
2044 DevFmtType_t<T> *out{outbase++};
2045 auto conv_sample = [FrameStep,&out](const float s) noexcept -> void
2047 *out = SampleConv<DevFmtType_t<T>>(s);
2050 std::for_each(inbuf.begin(), inbuf.begin()+SamplesToDo, conv_sample);
2053 if(const size_t extra{FrameStep - c})
2055 const auto silence = SampleConv<DevFmtType_t<T>>(0.0f);
2056 for(size_t i{0};i < SamplesToDo;++i)
2058 std::fill_n(outbase, extra, silence);
2059 outbase += FrameStep;
2066 uint DeviceBase::renderSamples(const uint numSamples)
2068 const uint samplesToDo{minu(numSamples, BufferLineSize)};
2070 /* Clear main mixing buffers. */
2071 for(FloatBufferLine &buffer : MixBuffer)
2074 /* Increment the mix count at the start (lsb should now be 1). */
2075 IncrementRef(MixCount);
2077 /* Process and mix each context's sources and effects. */
2078 ProcessContexts(this, samplesToDo);
2080 /* Increment the clock time. Every second's worth of samples is converted
2081 * and added to clock base so that large sample counts don't overflow
2082 * during conversion. This also guarantees a stable conversion.
2084 SamplesDone += samplesToDo;
2085 ClockBase += std::chrono::seconds{SamplesDone / Frequency};
2086 SamplesDone %= Frequency;
2088 /* Increment the mix count at the end (lsb should now be 0). */
2089 IncrementRef(MixCount);
2091 /* Apply any needed post-process for finalizing the Dry mix to the RealOut
2092 * (Ambisonic decode, UHJ encode, etc).
2094 postProcess(samplesToDo);
2096 /* Apply compression, limiting sample amplitude if needed or desired. */
2097 if(Limiter) Limiter->process(samplesToDo, RealOut.Buffer.data());
2099 /* Apply delays and attenuation for mismatched speaker distances. */
2101 ApplyDistanceComp(RealOut.Buffer, samplesToDo, ChannelDelays->mChannels.data());
2103 /* Apply dithering. The compressor should have left enough headroom for the
2104 * dither noise to not saturate.
2106 if(DitherDepth > 0.0f)
2107 ApplyDither(RealOut.Buffer, &DitherSeed, DitherDepth, samplesToDo);
2112 void DeviceBase::renderSamples(const al::span<float*> outBuffers, const uint numSamples)
2114 FPUCtl mixer_mode{};
2116 while(const uint todo{numSamples - total})
2118 const uint samplesToDo{renderSamples(todo)};
2120 auto *srcbuf = RealOut.Buffer.data();
2121 for(auto *dstbuf : outBuffers)
2123 std::copy_n(srcbuf->data(), samplesToDo, dstbuf + total);
2127 total += samplesToDo;
2131 void DeviceBase::renderSamples(void *outBuffer, const uint numSamples, const size_t frameStep)
2133 FPUCtl mixer_mode{};
2135 while(const uint todo{numSamples - total})
2137 const uint samplesToDo{renderSamples(todo)};
2139 if(outBuffer) LIKELY
2141 /* Finally, interleave and convert samples, writing to the device's
2146 #define HANDLE_WRITE(T) case T: \
2147 Write<T>(RealOut.Buffer, outBuffer, total, samplesToDo, frameStep); break;
2148 HANDLE_WRITE(DevFmtByte)
2149 HANDLE_WRITE(DevFmtUByte)
2150 HANDLE_WRITE(DevFmtShort)
2151 HANDLE_WRITE(DevFmtUShort)
2152 HANDLE_WRITE(DevFmtInt)
2153 HANDLE_WRITE(DevFmtUInt)
2154 HANDLE_WRITE(DevFmtFloat)
2159 total += samplesToDo;
2163 void DeviceBase::handleDisconnect(const char *msg, ...)
2165 IncrementRef(MixCount);
2166 if(Connected.exchange(false, std::memory_order_acq_rel))
2168 AsyncEvent evt{AsyncEvent::Disconnected};
2171 va_start(args, msg);
2172 int msglen{vsnprintf(evt.u.disconnect.msg, sizeof(evt.u.disconnect.msg), msg, args)};
2175 if(msglen < 0 || static_cast<size_t>(msglen) >= sizeof(evt.u.disconnect.msg))
2176 evt.u.disconnect.msg[sizeof(evt.u.disconnect.msg)-1] = 0;
2178 for(ContextBase *ctx : *mContexts.load())
2180 if(ctx->mEnabledEvts.load(std::memory_order_acquire).test(AsyncEvent::Disconnected))
2182 RingBuffer *ring{ctx->mAsyncEvents.get()};
2183 auto evt_data = ring->getWriteVector().first;
2184 if(evt_data.len > 0)
2186 al::construct_at(reinterpret_cast<AsyncEvent*>(evt_data.buf), evt);
2187 ring->writeAdvance(1);
2188 ctx->mEventSem.post();
2192 if(!ctx->mStopVoicesOnDisconnect)
2194 ProcessVoiceChanges(ctx);
2198 auto voicelist = ctx->getVoicesSpanAcquired();
2199 auto stop_voice = [](Voice *voice) -> void
2201 voice->mCurrentBuffer.store(nullptr, std::memory_order_relaxed);
2202 voice->mLoopBuffer.store(nullptr, std::memory_order_relaxed);
2203 voice->mSourceID.store(0u, std::memory_order_relaxed);
2204 voice->mPlayState.store(Voice::Stopped, std::memory_order_release);
2206 std::for_each(voicelist.begin(), voicelist.end(), stop_voice);
2209 IncrementRef(MixCount);