mirror of
https://git.naxdy.org/Mirror/Ryujinx.git
synced 2024-11-15 09:35:27 +00:00
Amadeus: DSP code generation improvements (#2460)
This improve RyuJIT codegen drastically on the DSP side. This may reduce CPU usage of the DSP thread quite a lot.
This commit is contained in:
parent
97a2133207
commit
b8ad676fb8
|
@ -218,6 +218,7 @@ namespace Ryujinx.Audio.Renderer.Dsp
|
|||
if (_sessionCommandList[i] != null)
|
||||
{
|
||||
_sessionCommandList[i].CommandList.Process(OutputDevices[i]);
|
||||
_sessionCommandList[i].CommandList.Dispose();
|
||||
_sessionCommandList[i] = null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -74,21 +74,19 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
{
|
||||
Span<float> outputBuffer = context.GetBuffer(OutputBufferIndex);
|
||||
|
||||
DataSourceHelper.WaveBufferInformation info = new DataSourceHelper.WaveBufferInformation()
|
||||
DataSourceHelper.WaveBufferInformation info = new DataSourceHelper.WaveBufferInformation
|
||||
{
|
||||
State = State,
|
||||
SourceSampleRate = SampleRate,
|
||||
SampleFormat = SampleFormat.Adpcm,
|
||||
Pitch = Pitch,
|
||||
DecodingBehaviour = DecodingBehaviour,
|
||||
WaveBuffers = WaveBuffers,
|
||||
ExtraParameter = AdpcmParameter,
|
||||
ExtraParameterSize = AdpcmParameterSize,
|
||||
ChannelIndex = 0,
|
||||
ChannelCount = 1,
|
||||
};
|
||||
|
||||
DataSourceHelper.ProcessWaveBuffers(context.MemoryManager, outputBuffer, info, context.SampleRate, (int)context.SampleCount);
|
||||
DataSourceHelper.ProcessWaveBuffers(context.MemoryManager, outputBuffer, ref info, WaveBuffers, ref State.Span[0], context.SampleRate, (int)context.SampleCount);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -65,6 +65,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
IsEffectEnabled = isEnabled;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private uint Read(IVirtualMemoryManager memoryManager, ulong bufferAddress, uint countMax, Span<int> outBuffer, uint count, uint readOffset, uint updateCount)
|
||||
{
|
||||
if (countMax == 0 || bufferAddress == 0)
|
||||
|
@ -104,6 +105,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
return count;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private uint Write(IVirtualMemoryManager memoryManager, ulong outBufferAddress, uint countMax, ReadOnlySpan<int> buffer, uint count, uint writeOffset, uint updateCount)
|
||||
{
|
||||
if (countMax == 0 || outBufferAddress == 0)
|
||||
|
@ -175,8 +177,8 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
}
|
||||
else
|
||||
{
|
||||
ZeroFill(context.MemoryManager, BufferInfo.SendBufferInfo, Unsafe.SizeOf<AuxiliaryBufferInfo>());
|
||||
ZeroFill(context.MemoryManager, BufferInfo.ReturnBufferInfo, Unsafe.SizeOf<AuxiliaryBufferInfo>());
|
||||
context.MemoryManager.Fill(BufferInfo.SendBufferInfo, (ulong)Unsafe.SizeOf<AuxiliaryBufferInfo>(), 0);
|
||||
context.MemoryManager.Fill(BufferInfo.ReturnBufferInfo, (ulong)Unsafe.SizeOf<AuxiliaryBufferInfo>(), 0);
|
||||
|
||||
if (InputBufferIndex != OutputBufferIndex)
|
||||
{
|
||||
|
@ -184,22 +186,5 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void ZeroFill(IVirtualMemoryManager memoryManager, ulong address, int size)
|
||||
{
|
||||
ulong endAddress = address + (ulong)size;
|
||||
|
||||
while (address + 7UL < endAddress)
|
||||
{
|
||||
memoryManager.Write(address, 0UL);
|
||||
address += 8;
|
||||
}
|
||||
|
||||
while (address < endAddress)
|
||||
{
|
||||
memoryManager.Write(address, (byte)0);
|
||||
address++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
using Ryujinx.Audio.Renderer.Dsp.State;
|
||||
using Ryujinx.Audio.Renderer.Parameter;
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace Ryujinx.Audio.Renderer.Dsp.Command
|
||||
{
|
||||
|
@ -49,7 +50,8 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
NodeId = nodeId;
|
||||
}
|
||||
|
||||
private void ProcessBiquadFilter(Span<float> outputBuffer, ReadOnlySpan<float> inputBuffer, uint sampleCount)
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private void ProcessBiquadFilter(ref BiquadFilterState state, Span<float> outputBuffer, ReadOnlySpan<float> inputBuffer, uint sampleCount)
|
||||
{
|
||||
const int fixedPointPrecisionForParameter = 14;
|
||||
|
||||
|
@ -60,8 +62,6 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
float b1 = FixedPointHelper.ToFloat(Parameter.Denominator[0], fixedPointPrecisionForParameter);
|
||||
float b2 = FixedPointHelper.ToFloat(Parameter.Denominator[1], fixedPointPrecisionForParameter);
|
||||
|
||||
ref BiquadFilterState state = ref BiquadFilterState.Span[0];
|
||||
|
||||
for (int i = 0; i < sampleCount; i++)
|
||||
{
|
||||
float input = inputBuffer[i];
|
||||
|
@ -76,14 +76,17 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
|
||||
public void Process(CommandList context)
|
||||
{
|
||||
Span<float> outputBuffer = context.GetBuffer(InputBufferIndex);
|
||||
ref BiquadFilterState state = ref BiquadFilterState.Span[0];
|
||||
|
||||
ReadOnlySpan<float> inputBuffer = context.GetBuffer(InputBufferIndex);
|
||||
Span<float> outputBuffer = context.GetBuffer(OutputBufferIndex);
|
||||
|
||||
if (NeedInitialization)
|
||||
{
|
||||
BiquadFilterState.Span[0] = new BiquadFilterState();
|
||||
state = new BiquadFilterState();
|
||||
}
|
||||
|
||||
ProcessBiquadFilter(outputBuffer, outputBuffer, context.SampleCount);
|
||||
ProcessBiquadFilter(ref state, outputBuffer, inputBuffer, context.SampleCount);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,7 +69,9 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
{
|
||||
for (int i = 0; i < InputCount; i++)
|
||||
{
|
||||
ReadOnlySpan<float> inputBuffer = context.GetBuffer(Input[i]);
|
||||
unsafe
|
||||
{
|
||||
float* inputBuffer = (float*)context.GetBufferPointer(Input[i]);
|
||||
|
||||
ulong targetOffset = CircularBuffer + currentOffset;
|
||||
|
||||
|
@ -89,3 +91,4 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
|
||||
public void Process(CommandList context)
|
||||
{
|
||||
context.Buffers.Span.Fill(0);
|
||||
context.ClearBuffers();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,11 +21,14 @@ using Ryujinx.Common;
|
|||
using Ryujinx.Common.Logging;
|
||||
using Ryujinx.Memory;
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.Collections.Generic;
|
||||
using System.Diagnostics;
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace Ryujinx.Audio.Renderer.Dsp.Command
|
||||
{
|
||||
public class CommandList
|
||||
public class CommandList : IDisposable
|
||||
{
|
||||
public ulong StartTime { get; private set; }
|
||||
public ulong EndTime { get; private set; }
|
||||
|
@ -41,6 +44,10 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
|
||||
public IHardwareDevice OutputDevice { get; private set; }
|
||||
|
||||
private readonly int _sampleCount;
|
||||
private readonly int _buffersEntryCount;
|
||||
private readonly MemoryHandle _buffersMemoryHandle;
|
||||
|
||||
public CommandList(AudioRenderSystem renderSystem) : this(renderSystem.MemoryManager,
|
||||
renderSystem.GetMixBuffer(),
|
||||
renderSystem.GetSampleCount(),
|
||||
|
@ -53,11 +60,15 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
public CommandList(IVirtualMemoryManager memoryManager, Memory<float> mixBuffer, uint sampleCount, uint sampleRate, uint mixBufferCount, uint voiceChannelCountMax)
|
||||
{
|
||||
SampleCount = sampleCount;
|
||||
_sampleCount = (int)SampleCount;
|
||||
SampleRate = sampleRate;
|
||||
BufferCount = mixBufferCount + voiceChannelCountMax;
|
||||
Buffers = mixBuffer;
|
||||
Commands = new List<ICommand>();
|
||||
MemoryManager = memoryManager;
|
||||
|
||||
_buffersEntryCount = Buffers.Length;
|
||||
_buffersMemoryHandle = Buffers.Pin();
|
||||
}
|
||||
|
||||
public void AddCommand(ICommand command)
|
||||
|
@ -70,14 +81,47 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
throw new NotImplementedException();
|
||||
}
|
||||
|
||||
public Memory<float> GetBufferMemory(int index)
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public unsafe IntPtr GetBufferPointer(int index)
|
||||
{
|
||||
return Buffers.Slice(index * (int)SampleCount, (int)SampleCount);
|
||||
if (index >= 0 && index < _buffersEntryCount)
|
||||
{
|
||||
return (IntPtr)((float*)_buffersMemoryHandle.Pointer + index * _sampleCount);
|
||||
}
|
||||
|
||||
throw new ArgumentOutOfRangeException();
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public unsafe void ClearBuffer(int index)
|
||||
{
|
||||
Unsafe.InitBlock((void*)GetBufferPointer(index), 0, SampleCount);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public unsafe void ClearBuffers()
|
||||
{
|
||||
Unsafe.InitBlock(_buffersMemoryHandle.Pointer, 0, (uint)_buffersEntryCount * sizeof(float));
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public unsafe void CopyBuffer(int outputBufferIndex, int inputBufferIndex)
|
||||
{
|
||||
Unsafe.CopyBlock((void*)GetBufferPointer(outputBufferIndex), (void*)GetBufferPointer(inputBufferIndex), SampleCount);
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public Span<float> GetBuffer(int index)
|
||||
{
|
||||
return Buffers.Span.Slice(index * (int)SampleCount, (int)SampleCount);
|
||||
if (index < 0 || index >= _buffersEntryCount)
|
||||
{
|
||||
return Span<float>.Empty;
|
||||
}
|
||||
|
||||
unsafe
|
||||
{
|
||||
return new Span<float>((float*)_buffersMemoryHandle.Pointer + index * _sampleCount, _sampleCount);
|
||||
}
|
||||
}
|
||||
|
||||
public ulong GetTimeElapsedSinceDspStartedProcessing()
|
||||
|
@ -120,5 +164,10 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
|
||||
EndTime = (ulong)PerformanceCounter.ElapsedNanoseconds;
|
||||
}
|
||||
|
||||
public void Dispose()
|
||||
{
|
||||
_buffersMemoryHandle.Dispose();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,10 +43,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
|
||||
public void Process(CommandList context)
|
||||
{
|
||||
ReadOnlySpan<float> inputBuffer = context.GetBuffer(InputBufferIndex);
|
||||
Span<float> outputBuffer = context.GetBuffer(OutputBufferIndex);
|
||||
|
||||
inputBuffer.CopyTo(outputBuffer);
|
||||
context.CopyBuffer(OutputBufferIndex, InputBufferIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -106,14 +106,12 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
{
|
||||
Span<float> outputBuffer = context.GetBuffer(OutputBufferIndex);
|
||||
|
||||
DataSourceHelper.WaveBufferInformation info = new DataSourceHelper.WaveBufferInformation()
|
||||
DataSourceHelper.WaveBufferInformation info = new DataSourceHelper.WaveBufferInformation
|
||||
{
|
||||
State = State,
|
||||
SourceSampleRate = SampleRate,
|
||||
SampleFormat = SampleFormat,
|
||||
Pitch = Pitch,
|
||||
DecodingBehaviour = DecodingBehaviour,
|
||||
WaveBuffers = WaveBuffers,
|
||||
ExtraParameter = ExtraParameter,
|
||||
ExtraParameterSize = ExtraParameterSize,
|
||||
ChannelIndex = (int)ChannelIndex,
|
||||
|
@ -121,7 +119,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
SrcQuality = SrcQuality
|
||||
};
|
||||
|
||||
DataSourceHelper.ProcessWaveBuffers(context.MemoryManager, outputBuffer, info, context.SampleRate, (int)context.SampleCount);
|
||||
DataSourceHelper.ProcessWaveBuffers(context.MemoryManager, outputBuffer, ref info, WaveBuffers, ref State.Span[0], context.SampleRate, (int)context.SampleCount);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ using Ryujinx.Audio.Renderer.Parameter.Effect;
|
|||
using Ryujinx.Audio.Renderer.Server.Effect;
|
||||
using System;
|
||||
using System.Diagnostics;
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace Ryujinx.Audio.Renderer.Dsp.Command
|
||||
{
|
||||
|
@ -64,10 +65,9 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
}
|
||||
}
|
||||
|
||||
private void ProcessDelayMono(Span<float> outputBuffer, ReadOnlySpan<float> inputBuffer, uint sampleCount)
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private unsafe void ProcessDelayMono(ref DelayState state, float* outputBuffer, float* inputBuffer, uint sampleCount)
|
||||
{
|
||||
ref DelayState state = ref State.Span[0];
|
||||
|
||||
float feedbackGain = FixedPointHelper.ToFloat(Parameter.FeedbackGain, FixedPointPrecision);
|
||||
float inGain = FixedPointHelper.ToFloat(Parameter.InGain, FixedPointPrecision);
|
||||
float dryGain = FixedPointHelper.ToFloat(Parameter.DryGain, FixedPointPrecision);
|
||||
|
@ -88,13 +88,14 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
}
|
||||
}
|
||||
|
||||
private void ProcessDelayStereo(Memory<float>[] outputBuffers, ReadOnlyMemory<float>[] inputBuffers, uint sampleCount)
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private unsafe void ProcessDelayStereo(ref DelayState state, Span<IntPtr> outputBuffers, ReadOnlySpan<IntPtr> inputBuffers, uint sampleCount)
|
||||
{
|
||||
ref DelayState state = ref State.Span[0];
|
||||
const ushort channelCount = 2;
|
||||
|
||||
float[] channelInput = new float[Parameter.ChannelCount];
|
||||
float[] delayLineValues = new float[Parameter.ChannelCount];
|
||||
float[] temp = new float[Parameter.ChannelCount];
|
||||
Span<float> channelInput = stackalloc float[channelCount];
|
||||
Span<float> delayLineValues = stackalloc float[channelCount];
|
||||
Span<float> temp = stackalloc float[channelCount];
|
||||
|
||||
float delayFeedbackBaseGain = state.DelayFeedbackBaseGain;
|
||||
float delayFeedbackCrossGain = state.DelayFeedbackCrossGain;
|
||||
|
@ -104,34 +105,35 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
|
||||
for (int i = 0; i < sampleCount; i++)
|
||||
{
|
||||
for (int j = 0; j < Parameter.ChannelCount; j++)
|
||||
for (int j = 0; j < channelCount; j++)
|
||||
{
|
||||
channelInput[j] = inputBuffers[j].Span[i] * 64;
|
||||
channelInput[j] = *((float*)inputBuffers[j] + i) * 64;
|
||||
delayLineValues[j] = state.DelayLines[j].Read();
|
||||
}
|
||||
|
||||
temp[0] = channelInput[0] * inGain + delayLineValues[1] * delayFeedbackCrossGain + delayLineValues[0] * delayFeedbackBaseGain;
|
||||
temp[1] = channelInput[1] * inGain + delayLineValues[0] * delayFeedbackCrossGain + delayLineValues[1] * delayFeedbackBaseGain;
|
||||
|
||||
for (int j = 0; j < Parameter.ChannelCount; j++)
|
||||
for (int j = 0; j < channelCount; j++)
|
||||
{
|
||||
float lowPassResult = state.LowPassFeedbackGain * state.LowPassZ[j] + temp[j] * state.LowPassBaseGain;
|
||||
|
||||
state.LowPassZ[j] = lowPassResult;
|
||||
state.DelayLines[j].Update(lowPassResult);
|
||||
|
||||
outputBuffers[j].Span[i] = (channelInput[j] * dryGain + delayLineValues[j] * outGain) / 64;
|
||||
*((float*)outputBuffers[j] + i) = (channelInput[j] * dryGain + delayLineValues[j] * outGain) / 64;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void ProcessDelayQuadraphonic(Memory<float>[] outputBuffers, ReadOnlyMemory<float>[] inputBuffers, uint sampleCount)
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private unsafe void ProcessDelayQuadraphonic(ref DelayState state, Span<IntPtr> outputBuffers, ReadOnlySpan<IntPtr> inputBuffers, uint sampleCount)
|
||||
{
|
||||
ref DelayState state = ref State.Span[0];
|
||||
const ushort channelCount = 4;
|
||||
|
||||
float[] channelInput = new float[Parameter.ChannelCount];
|
||||
float[] delayLineValues = new float[Parameter.ChannelCount];
|
||||
float[] temp = new float[Parameter.ChannelCount];
|
||||
Span<float> channelInput = stackalloc float[channelCount];
|
||||
Span<float> delayLineValues = stackalloc float[channelCount];
|
||||
Span<float> temp = stackalloc float[channelCount];
|
||||
|
||||
float delayFeedbackBaseGain = state.DelayFeedbackBaseGain;
|
||||
float delayFeedbackCrossGain = state.DelayFeedbackCrossGain;
|
||||
|
@ -141,9 +143,9 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
|
||||
for (int i = 0; i < sampleCount; i++)
|
||||
{
|
||||
for (int j = 0; j < Parameter.ChannelCount; j++)
|
||||
for (int j = 0; j < channelCount; j++)
|
||||
{
|
||||
channelInput[j] = inputBuffers[j].Span[i] * 64;
|
||||
channelInput[j] = *((float*)inputBuffers[j] + i) * 64;
|
||||
delayLineValues[j] = state.DelayLines[j].Read();
|
||||
}
|
||||
|
||||
|
@ -152,25 +154,26 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
temp[2] = channelInput[2] * inGain + (delayLineValues[3] + delayLineValues[0]) * delayFeedbackCrossGain + delayLineValues[2] * delayFeedbackBaseGain;
|
||||
temp[3] = channelInput[3] * inGain + (delayLineValues[1] + delayLineValues[2]) * delayFeedbackCrossGain + delayLineValues[3] * delayFeedbackBaseGain;
|
||||
|
||||
for (int j = 0; j < Parameter.ChannelCount; j++)
|
||||
for (int j = 0; j < channelCount; j++)
|
||||
{
|
||||
float lowPassResult = state.LowPassFeedbackGain * state.LowPassZ[j] + temp[j] * state.LowPassBaseGain;
|
||||
|
||||
state.LowPassZ[j] = lowPassResult;
|
||||
state.DelayLines[j].Update(lowPassResult);
|
||||
|
||||
outputBuffers[j].Span[i] = (channelInput[j] * dryGain + delayLineValues[j] * outGain) / 64;
|
||||
*((float*)outputBuffers[j] + i) = (channelInput[j] * dryGain + delayLineValues[j] * outGain) / 64;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void ProcessDelaySurround(Memory<float>[] outputBuffers, ReadOnlyMemory<float>[] inputBuffers, uint sampleCount)
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private unsafe void ProcessDelaySurround(ref DelayState state, Span<IntPtr> outputBuffers, ReadOnlySpan<IntPtr> inputBuffers, uint sampleCount)
|
||||
{
|
||||
ref DelayState state = ref State.Span[0];
|
||||
const ushort channelCount = 6;
|
||||
|
||||
float[] channelInput = new float[Parameter.ChannelCount];
|
||||
float[] delayLineValues = new float[Parameter.ChannelCount];
|
||||
float[] temp = new float[Parameter.ChannelCount];
|
||||
Span<float> channelInput = stackalloc float[channelCount];
|
||||
Span<float> delayLineValues = stackalloc float[channelCount];
|
||||
Span<float> temp = stackalloc float[channelCount];
|
||||
|
||||
float delayFeedbackBaseGain = state.DelayFeedbackBaseGain;
|
||||
float delayFeedbackCrossGain = state.DelayFeedbackCrossGain;
|
||||
|
@ -180,9 +183,9 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
|
||||
for (int i = 0; i < sampleCount; i++)
|
||||
{
|
||||
for (int j = 0; j < Parameter.ChannelCount; j++)
|
||||
for (int j = 0; j < channelCount; j++)
|
||||
{
|
||||
channelInput[j] = inputBuffers[j].Span[i] * 64;
|
||||
channelInput[j] = *((float*)inputBuffers[j] + i) * 64;
|
||||
delayLineValues[j] = state.DelayLines[j].Read();
|
||||
}
|
||||
|
||||
|
@ -193,49 +196,49 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
temp[4] = channelInput[4] * inGain + (delayLineValues[0] + delayLineValues[1]) * delayFeedbackCrossGain + delayLineValues[4] * delayFeedbackBaseGain;
|
||||
temp[5] = channelInput[5] * inGain + delayLineValues[5] * delayFeedbackBaseGain;
|
||||
|
||||
for (int j = 0; j < Parameter.ChannelCount; j++)
|
||||
for (int j = 0; j < channelCount; j++)
|
||||
{
|
||||
float lowPassResult = state.LowPassFeedbackGain * state.LowPassZ[j] + temp[j] * state.LowPassBaseGain;
|
||||
|
||||
state.LowPassZ[j] = lowPassResult;
|
||||
state.DelayLines[j].Update(lowPassResult);
|
||||
|
||||
outputBuffers[j].Span[i] = (channelInput[j] * dryGain + delayLineValues[j] * outGain) / 64;
|
||||
*((float*)outputBuffers[j] + i) = (channelInput[j] * dryGain + delayLineValues[j] * outGain) / 64;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void ProcessDelay(CommandList context)
|
||||
private unsafe void ProcessDelay(CommandList context, ref DelayState state)
|
||||
{
|
||||
Debug.Assert(Parameter.IsChannelCountValid());
|
||||
|
||||
if (IsEffectEnabled && Parameter.IsChannelCountValid())
|
||||
{
|
||||
ReadOnlyMemory<float>[] inputBuffers = new ReadOnlyMemory<float>[Parameter.ChannelCount];
|
||||
Memory<float>[] outputBuffers = new Memory<float>[Parameter.ChannelCount];
|
||||
Span<IntPtr> inputBuffers = stackalloc IntPtr[Parameter.ChannelCount];
|
||||
Span<IntPtr> outputBuffers = stackalloc IntPtr[Parameter.ChannelCount];
|
||||
|
||||
for (int i = 0; i < Parameter.ChannelCount; i++)
|
||||
{
|
||||
inputBuffers[i] = context.GetBufferMemory(InputBufferIndices[i]);
|
||||
outputBuffers[i] = context.GetBufferMemory(OutputBufferIndices[i]);
|
||||
inputBuffers[i] = context.GetBufferPointer(InputBufferIndices[i]);
|
||||
outputBuffers[i] = context.GetBufferPointer(OutputBufferIndices[i]);
|
||||
}
|
||||
|
||||
switch (Parameter.ChannelCount)
|
||||
{
|
||||
case 1:
|
||||
ProcessDelayMono(outputBuffers[0].Span, inputBuffers[0].Span, context.SampleCount);
|
||||
ProcessDelayMono(ref state, (float*)outputBuffers[0], (float*)inputBuffers[0], context.SampleCount);
|
||||
break;
|
||||
case 2:
|
||||
ProcessDelayStereo(outputBuffers, inputBuffers, context.SampleCount);
|
||||
ProcessDelayStereo(ref state, outputBuffers, inputBuffers, context.SampleCount);
|
||||
break;
|
||||
case 4:
|
||||
ProcessDelayQuadraphonic(outputBuffers, inputBuffers, context.SampleCount);
|
||||
ProcessDelayQuadraphonic(ref state, outputBuffers, inputBuffers, context.SampleCount);
|
||||
break;
|
||||
case 6:
|
||||
ProcessDelaySurround(outputBuffers, inputBuffers, context.SampleCount);
|
||||
ProcessDelaySurround(ref state, outputBuffers, inputBuffers, context.SampleCount);
|
||||
break;
|
||||
default:
|
||||
throw new NotImplementedException($"{Parameter.ChannelCount}");
|
||||
throw new NotImplementedException(Parameter.ChannelCount.ToString());
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -244,7 +247,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
{
|
||||
if (InputBufferIndices[i] != OutputBufferIndices[i])
|
||||
{
|
||||
context.GetBufferMemory(InputBufferIndices[i]).CopyTo(context.GetBufferMemory(OutputBufferIndices[i]));
|
||||
context.CopyBuffer(OutputBufferIndices[i], InputBufferIndices[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -266,7 +269,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
}
|
||||
}
|
||||
|
||||
ProcessDelay(context);
|
||||
ProcessDelay(context, ref state);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
//
|
||||
|
||||
using System;
|
||||
using System.Diagnostics;
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace Ryujinx.Audio.Renderer.Dsp.Command
|
||||
{
|
||||
|
@ -37,8 +39,6 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
|
||||
public Memory<float> DepopBuffer { get; }
|
||||
|
||||
private const int FixedPointPrecisionForDecay = 15;
|
||||
|
||||
public DepopForMixBuffersCommand(Memory<float> depopBuffer, uint bufferOffset, uint mixBufferCount, int nodeId, uint sampleRate)
|
||||
{
|
||||
Enabled = true;
|
||||
|
@ -57,10 +57,13 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
}
|
||||
}
|
||||
|
||||
private float ProcessDepopMix(Span<float> buffer, float depopValue, uint sampleCount)
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private unsafe float ProcessDepopMix(float* buffer, float depopValue, uint sampleCount)
|
||||
{
|
||||
if (depopValue <= 0)
|
||||
if (depopValue < 0)
|
||||
{
|
||||
depopValue = -depopValue;
|
||||
|
||||
for (int i = 0; i < sampleCount; i++)
|
||||
{
|
||||
depopValue = FloatingPointHelper.MultiplyRoundDown(Decay, depopValue);
|
||||
|
@ -81,21 +84,25 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
|
||||
return depopValue;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void Process(CommandList context)
|
||||
{
|
||||
Span<float> depopBuffer = DepopBuffer.Span;
|
||||
|
||||
uint bufferCount = Math.Min(MixBufferOffset + MixBufferCount, context.BufferCount);
|
||||
|
||||
for (int i = (int)MixBufferOffset; i < bufferCount; i++)
|
||||
{
|
||||
float depopValue = DepopBuffer.Span[i];
|
||||
float depopValue = depopBuffer[i];
|
||||
if (depopValue != 0)
|
||||
{
|
||||
Span<float> buffer = context.GetBuffer(i);
|
||||
unsafe
|
||||
{
|
||||
float* buffer = (float*)context.GetBufferPointer(i);
|
||||
|
||||
DepopBuffer.Span[i] = ProcessDepopMix(buffer, depopValue, context.SampleCount);
|
||||
depopBuffer[i] = ProcessDepopMix(buffer, depopValue, context.SampleCount);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,11 +58,13 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
{
|
||||
ref VoiceUpdateState state = ref State.Span[0];
|
||||
|
||||
Span<float> depopBuffer = DepopBuffer.Span;
|
||||
|
||||
for (int i = 0; i < MixBufferCount; i++)
|
||||
{
|
||||
if (state.LastSamples[i] != 0)
|
||||
{
|
||||
DepopBuffer.Span[OutputBufferIndices[i]] += state.LastSamples[i];
|
||||
depopBuffer[OutputBufferIndices[i]] += state.LastSamples[i];
|
||||
|
||||
state.LastSamples[i] = 0;
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
|
||||
public float[] Coefficients { get; }
|
||||
|
||||
public DownMixSurroundToStereoCommand(uint bufferOffset, Span<byte> inputBufferOffset, Span<byte> outputBufferOffset, ReadOnlySpan<float> downMixParameter, int nodeId)
|
||||
public DownMixSurroundToStereoCommand(uint bufferOffset, Span<byte> inputBufferOffset, Span<byte> outputBufferOffset, float[] downMixParameter, int nodeId)
|
||||
{
|
||||
Enabled = true;
|
||||
NodeId = nodeId;
|
||||
|
@ -49,7 +49,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
OutputBufferIndices[i] = (ushort)(bufferOffset + outputBufferOffset[i]);
|
||||
}
|
||||
|
||||
Coefficients = downMixParameter.ToArray();
|
||||
Coefficients = downMixParameter;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
|
@ -69,10 +69,6 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
|
||||
Span<float> stereoLeft = context.GetBuffer(OutputBufferIndices[0]);
|
||||
Span<float> stereoRight = context.GetBuffer(OutputBufferIndices[1]);
|
||||
Span<float> unused2 = context.GetBuffer(OutputBufferIndices[2]);
|
||||
Span<float> unused3 = context.GetBuffer(OutputBufferIndices[3]);
|
||||
Span<float> unused4 = context.GetBuffer(OutputBufferIndices[4]);
|
||||
Span<float> unused5 = context.GetBuffer(OutputBufferIndices[5]);
|
||||
|
||||
for (int i = 0; i < context.SampleCount; i++)
|
||||
{
|
||||
|
@ -80,10 +76,10 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
stereoRight[i] = DownMixSurroundToStereo(Coefficients, backRight[i], lowFrequency[i], frontCenter[i], frontRight[i]);
|
||||
}
|
||||
|
||||
unused2.Fill(0);
|
||||
unused3.Fill(0);
|
||||
unused4.Fill(0);
|
||||
unused5.Fill(0);
|
||||
context.ClearBuffer(OutputBufferIndices[2]);
|
||||
context.ClearBuffer(OutputBufferIndices[3]);
|
||||
context.ClearBuffer(OutputBufferIndices[4]);
|
||||
context.ClearBuffer(OutputBufferIndices[5]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ using Ryujinx.Audio.Renderer.Dsp.State;
|
|||
using Ryujinx.Audio.Renderer.Parameter.Effect;
|
||||
using System;
|
||||
using System.Diagnostics;
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace Ryujinx.Audio.Renderer.Dsp.Command
|
||||
{
|
||||
|
@ -77,31 +78,29 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
}
|
||||
}
|
||||
|
||||
ProcessLimiter(context);
|
||||
ProcessLimiter(context, ref state);
|
||||
}
|
||||
|
||||
private void ProcessLimiter(CommandList context)
|
||||
private unsafe void ProcessLimiter(CommandList context, ref LimiterState state)
|
||||
{
|
||||
Debug.Assert(Parameter.IsChannelCountValid());
|
||||
|
||||
if (IsEffectEnabled && Parameter.IsChannelCountValid())
|
||||
{
|
||||
ref LimiterState state = ref State.Span[0];
|
||||
|
||||
ReadOnlyMemory<float>[] inputBuffers = new ReadOnlyMemory<float>[Parameter.ChannelCount];
|
||||
Memory<float>[] outputBuffers = new Memory<float>[Parameter.ChannelCount];
|
||||
Span<IntPtr> inputBuffers = stackalloc IntPtr[Parameter.ChannelCount];
|
||||
Span<IntPtr> outputBuffers = stackalloc IntPtr[Parameter.ChannelCount];
|
||||
|
||||
for (int i = 0; i < Parameter.ChannelCount; i++)
|
||||
{
|
||||
inputBuffers[i] = context.GetBufferMemory(InputBufferIndices[i]);
|
||||
outputBuffers[i] = context.GetBufferMemory(OutputBufferIndices[i]);
|
||||
inputBuffers[i] = context.GetBufferPointer(InputBufferIndices[i]);
|
||||
outputBuffers[i] = context.GetBufferPointer(OutputBufferIndices[i]);
|
||||
}
|
||||
|
||||
for (int channelIndex = 0; channelIndex < Parameter.ChannelCount; channelIndex++)
|
||||
{
|
||||
for (int sampleIndex = 0; sampleIndex < context.SampleCount; sampleIndex++)
|
||||
{
|
||||
float inputSample = inputBuffers[channelIndex].Span[sampleIndex];
|
||||
float inputSample = *((float*)inputBuffers[channelIndex] + sampleIndex);
|
||||
|
||||
float sampleInputMax = Math.Abs(inputSample * Parameter.InputGain);
|
||||
|
||||
|
@ -132,7 +131,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
|
||||
ref float delayedSample = ref state.DelayedSampleBuffer[channelIndex * Parameter.DelayBufferSampleCountMax + state.DelayedSampleBufferPosition[channelIndex]];
|
||||
|
||||
outputBuffers[channelIndex].Span[sampleIndex] = delayedSample * state.CompressionGain[channelIndex] * Parameter.OutputGain;
|
||||
*((float*)outputBuffers[channelIndex] + sampleIndex) = delayedSample * state.CompressionGain[channelIndex] * Parameter.OutputGain;
|
||||
|
||||
delayedSample = inputSample;
|
||||
|
||||
|
@ -151,7 +150,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
{
|
||||
if (InputBufferIndices[i] != OutputBufferIndices[i])
|
||||
{
|
||||
context.GetBufferMemory(InputBufferIndices[i]).CopyTo(context.GetBufferMemory(OutputBufferIndices[i]));
|
||||
context.CopyBuffer(OutputBufferIndices[i], InputBufferIndices[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ using Ryujinx.Audio.Renderer.Parameter;
|
|||
using Ryujinx.Audio.Renderer.Parameter.Effect;
|
||||
using System;
|
||||
using System.Diagnostics;
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Runtime.InteropServices;
|
||||
|
||||
namespace Ryujinx.Audio.Renderer.Dsp.Command
|
||||
|
@ -81,17 +82,15 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
}
|
||||
}
|
||||
|
||||
ProcessLimiter(context);
|
||||
ProcessLimiter(context, ref state);
|
||||
}
|
||||
|
||||
private void ProcessLimiter(CommandList context)
|
||||
private unsafe void ProcessLimiter(CommandList context, ref LimiterState state)
|
||||
{
|
||||
Debug.Assert(Parameter.IsChannelCountValid());
|
||||
|
||||
if (IsEffectEnabled && Parameter.IsChannelCountValid())
|
||||
{
|
||||
ref LimiterState state = ref State.Span[0];
|
||||
|
||||
if (!ResultState.IsEmpty && Parameter.StatisticsReset)
|
||||
{
|
||||
ref LimiterStatistics statistics = ref MemoryMarshal.Cast<byte, LimiterStatistics>(ResultState.Span[0].SpecificData)[0];
|
||||
|
@ -99,20 +98,20 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
statistics.Reset();
|
||||
}
|
||||
|
||||
ReadOnlyMemory<float>[] inputBuffers = new ReadOnlyMemory<float>[Parameter.ChannelCount];
|
||||
Memory<float>[] outputBuffers = new Memory<float>[Parameter.ChannelCount];
|
||||
Span<IntPtr> inputBuffers = stackalloc IntPtr[Parameter.ChannelCount];
|
||||
Span<IntPtr> outputBuffers = stackalloc IntPtr[Parameter.ChannelCount];
|
||||
|
||||
for (int i = 0; i < Parameter.ChannelCount; i++)
|
||||
{
|
||||
inputBuffers[i] = context.GetBufferMemory(InputBufferIndices[i]);
|
||||
outputBuffers[i] = context.GetBufferMemory(OutputBufferIndices[i]);
|
||||
inputBuffers[i] = context.GetBufferPointer(InputBufferIndices[i]);
|
||||
outputBuffers[i] = context.GetBufferPointer(OutputBufferIndices[i]);
|
||||
}
|
||||
|
||||
for (int channelIndex = 0; channelIndex < Parameter.ChannelCount; channelIndex++)
|
||||
{
|
||||
for (int sampleIndex = 0; sampleIndex < context.SampleCount; sampleIndex++)
|
||||
{
|
||||
float inputSample = inputBuffers[channelIndex].Span[sampleIndex];
|
||||
float inputSample = *((float*)inputBuffers[channelIndex] + sampleIndex);
|
||||
|
||||
float sampleInputMax = Math.Abs(inputSample * Parameter.InputGain);
|
||||
|
||||
|
@ -143,7 +142,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
|
||||
ref float delayedSample = ref state.DelayedSampleBuffer[channelIndex * Parameter.DelayBufferSampleCountMax + state.DelayedSampleBufferPosition[channelIndex]];
|
||||
|
||||
outputBuffers[channelIndex].Span[sampleIndex] = delayedSample * state.CompressionGain[channelIndex] * Parameter.OutputGain;
|
||||
*((float*)outputBuffers[channelIndex] + sampleIndex) = delayedSample * state.CompressionGain[channelIndex] * Parameter.OutputGain;
|
||||
|
||||
delayedSample = inputSample;
|
||||
|
||||
|
@ -170,7 +169,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
{
|
||||
if (InputBufferIndices[i] != OutputBufferIndices[i])
|
||||
{
|
||||
context.GetBufferMemory(InputBufferIndices[i]).CopyTo(context.GetBufferMemory(OutputBufferIndices[i]));
|
||||
context.CopyBuffer(OutputBufferIndices[i], InputBufferIndices[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,6 +50,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
Volume = volume;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private void ProcessMixAvx(Span<float> outputMix, ReadOnlySpan<float> inputMix)
|
||||
{
|
||||
Vector256<float> volumeVec = Vector256.Create(Volume);
|
||||
|
@ -70,6 +71,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
}
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private void ProcessMixSse41(Span<float> outputMix, ReadOnlySpan<float> inputMix)
|
||||
{
|
||||
Vector128<float> volumeVec = Vector128.Create(Volume);
|
||||
|
@ -120,6 +122,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
}
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private void ProcessMix(Span<float> outputMix, ReadOnlySpan<float> inputMix)
|
||||
{
|
||||
if (Avx.IsSupported)
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
using Ryujinx.Audio.Renderer.Common;
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace Ryujinx.Audio.Renderer.Dsp.Command
|
||||
{
|
||||
|
@ -55,6 +56,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
LastSampleIndex = lastSampleIndex;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private float ProcessMixRamp(Span<float> outputBuffer, ReadOnlySpan<float> inputBuffer, int sampleCount)
|
||||
{
|
||||
float ramp = (Volume1 - Volume0) / sampleCount;
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
using Ryujinx.Audio.Renderer.Common;
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace Ryujinx.Audio.Renderer.Dsp.Command
|
||||
{
|
||||
|
@ -63,6 +64,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
State = state;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private float ProcessMixRampGrouped(Span<float> outputBuffer, ReadOnlySpan<float> inputBuffer, float volume0, float volume1, int sampleCount)
|
||||
{
|
||||
float ramp = (volume1 - volume0) / sampleCount;
|
||||
|
|
|
@ -73,21 +73,19 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
{
|
||||
Span<float> outputBuffer = context.GetBuffer(OutputBufferIndex);
|
||||
|
||||
DataSourceHelper.WaveBufferInformation info = new DataSourceHelper.WaveBufferInformation()
|
||||
DataSourceHelper.WaveBufferInformation info = new DataSourceHelper.WaveBufferInformation
|
||||
{
|
||||
State = State,
|
||||
SourceSampleRate = SampleRate,
|
||||
SampleFormat = SampleFormat.PcmInt16,
|
||||
Pitch = Pitch,
|
||||
DecodingBehaviour = DecodingBehaviour,
|
||||
WaveBuffers = WaveBuffers,
|
||||
ExtraParameter = 0,
|
||||
ExtraParameterSize = 0,
|
||||
ChannelIndex = (int)ChannelIndex,
|
||||
ChannelCount = (int)ChannelCount,
|
||||
};
|
||||
|
||||
DataSourceHelper.ProcessWaveBuffers(context.MemoryManager, outputBuffer, info, context.SampleRate, (int)context.SampleCount);
|
||||
DataSourceHelper.ProcessWaveBuffers(context.MemoryManager, outputBuffer, ref info, WaveBuffers, ref State.Span[0], context.SampleRate, (int)context.SampleCount);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -73,21 +73,19 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
{
|
||||
Span<float> outputBuffer = context.GetBuffer(OutputBufferIndex);
|
||||
|
||||
DataSourceHelper.WaveBufferInformation info = new DataSourceHelper.WaveBufferInformation()
|
||||
DataSourceHelper.WaveBufferInformation info = new DataSourceHelper.WaveBufferInformation
|
||||
{
|
||||
State = State,
|
||||
SourceSampleRate = SampleRate,
|
||||
SampleFormat = SampleFormat.PcmInt16,
|
||||
Pitch = Pitch,
|
||||
DecodingBehaviour = DecodingBehaviour,
|
||||
WaveBuffers = WaveBuffers,
|
||||
ExtraParameter = 0,
|
||||
ExtraParameterSize = 0,
|
||||
ChannelIndex = (int)ChannelIndex,
|
||||
ChannelCount = (int)ChannelCount,
|
||||
};
|
||||
|
||||
DataSourceHelper.ProcessWaveBuffers(context.MemoryManager, outputBuffer, info, context.SampleRate, (int)context.SampleCount);
|
||||
DataSourceHelper.ProcessWaveBuffers(context.MemoryManager, outputBuffer, ref info, WaveBuffers, ref State.Span[0], context.SampleRate, (int)context.SampleCount);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ using Ryujinx.Audio.Renderer.Parameter.Effect;
|
|||
using Ryujinx.Audio.Renderer.Server.Effect;
|
||||
using System;
|
||||
using System.Diagnostics;
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace Ryujinx.Audio.Renderer.Dsp.Command
|
||||
{
|
||||
|
@ -81,44 +82,46 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
}
|
||||
}
|
||||
|
||||
private void ProcessReverb3dMono(Memory<float>[] outputBuffers, ReadOnlyMemory<float>[] inputBuffers, uint sampleCount)
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private void ProcessReverb3dMono(ref Reverb3dState state, ReadOnlySpan<IntPtr> outputBuffers, ReadOnlySpan<IntPtr> inputBuffers, uint sampleCount)
|
||||
{
|
||||
ProcessReverb3dGeneric(outputBuffers, inputBuffers, sampleCount, OutputEarlyIndicesTableMono, TargetEarlyDelayLineIndicesTableMono, TargetOutputFeedbackIndicesTableMono);
|
||||
ProcessReverb3dGeneric(ref state, outputBuffers, inputBuffers, sampleCount, OutputEarlyIndicesTableMono, TargetEarlyDelayLineIndicesTableMono, TargetOutputFeedbackIndicesTableMono);
|
||||
}
|
||||
|
||||
private void ProcessReverb3dStereo(Memory<float>[] outputBuffers, ReadOnlyMemory<float>[] inputBuffers, uint sampleCount)
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private void ProcessReverb3dStereo(ref Reverb3dState state, ReadOnlySpan<IntPtr> outputBuffers, ReadOnlySpan<IntPtr> inputBuffers, uint sampleCount)
|
||||
{
|
||||
ProcessReverb3dGeneric(outputBuffers, inputBuffers, sampleCount, OutputEarlyIndicesTableStereo, TargetEarlyDelayLineIndicesTableStereo, TargetOutputFeedbackIndicesTableStereo);
|
||||
ProcessReverb3dGeneric(ref state, outputBuffers, inputBuffers, sampleCount, OutputEarlyIndicesTableStereo, TargetEarlyDelayLineIndicesTableStereo, TargetOutputFeedbackIndicesTableStereo);
|
||||
}
|
||||
|
||||
private void ProcessReverb3dQuadraphonic(Memory<float>[] outputBuffers, ReadOnlyMemory<float>[] inputBuffers, uint sampleCount)
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private void ProcessReverb3dQuadraphonic(ref Reverb3dState state, ReadOnlySpan<IntPtr> outputBuffers, ReadOnlySpan<IntPtr> inputBuffers, uint sampleCount)
|
||||
{
|
||||
ProcessReverb3dGeneric(outputBuffers, inputBuffers, sampleCount, OutputEarlyIndicesTableQuadraphonic, TargetEarlyDelayLineIndicesTableQuadraphonic, TargetOutputFeedbackIndicesTableQuadraphonic);
|
||||
ProcessReverb3dGeneric(ref state, outputBuffers, inputBuffers, sampleCount, OutputEarlyIndicesTableQuadraphonic, TargetEarlyDelayLineIndicesTableQuadraphonic, TargetOutputFeedbackIndicesTableQuadraphonic);
|
||||
}
|
||||
|
||||
private void ProcessReverb3dSurround(Memory<float>[] outputBuffers, ReadOnlyMemory<float>[] inputBuffers, uint sampleCount)
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private void ProcessReverb3dSurround(ref Reverb3dState state, ReadOnlySpan<IntPtr> outputBuffers, ReadOnlySpan<IntPtr> inputBuffers, uint sampleCount)
|
||||
{
|
||||
ProcessReverb3dGeneric(outputBuffers, inputBuffers, sampleCount, OutputEarlyIndicesTableSurround, TargetEarlyDelayLineIndicesTableSurround, TargetOutputFeedbackIndicesTableSurround);
|
||||
ProcessReverb3dGeneric(ref state, outputBuffers, inputBuffers, sampleCount, OutputEarlyIndicesTableSurround, TargetEarlyDelayLineIndicesTableSurround, TargetOutputFeedbackIndicesTableSurround);
|
||||
}
|
||||
|
||||
private void ProcessReverb3dGeneric(Memory<float>[] outputBuffers, ReadOnlyMemory<float>[] inputBuffers, uint sampleCount, ReadOnlySpan<int> outputEarlyIndicesTable, ReadOnlySpan<int> targetEarlyDelayLineIndicesTable, ReadOnlySpan<int> targetOutputFeedbackIndicesTable)
|
||||
private unsafe void ProcessReverb3dGeneric(ref Reverb3dState state, ReadOnlySpan<IntPtr> outputBuffers, ReadOnlySpan<IntPtr> inputBuffers, uint sampleCount, ReadOnlySpan<int> outputEarlyIndicesTable, ReadOnlySpan<int> targetEarlyDelayLineIndicesTable, ReadOnlySpan<int> targetOutputFeedbackIndicesTable)
|
||||
{
|
||||
const int delayLineSampleIndexOffset = 1;
|
||||
|
||||
ref Reverb3dState state = ref State.Span[0];
|
||||
|
||||
bool isMono = Parameter.ChannelCount == 1;
|
||||
bool isSurround = Parameter.ChannelCount == 6;
|
||||
|
||||
float[] outputValues = new float[Constants.ChannelCountMax];
|
||||
float[] channelInput = new float[Parameter.ChannelCount];
|
||||
float[] feedbackValues = new float[4];
|
||||
float[] feedbackOutputValues = new float[4];
|
||||
float[] values = new float[4];
|
||||
Span<float> outputValues = stackalloc float[Constants.ChannelCountMax];
|
||||
Span<float> channelInput = stackalloc float[Parameter.ChannelCount];
|
||||
Span<float> feedbackValues = stackalloc float[4];
|
||||
Span<float> feedbackOutputValues = stackalloc float[4];
|
||||
Span<float> values = stackalloc float[4];
|
||||
|
||||
for (int sampleIndex = 0; sampleIndex < sampleCount; sampleIndex++)
|
||||
{
|
||||
outputValues.AsSpan().Fill(0);
|
||||
outputValues.Fill(0);
|
||||
|
||||
float tapOut = state.PreDelayLine.TapUnsafe(state.ReflectionDelayTime, delayLineSampleIndexOffset);
|
||||
|
||||
|
@ -136,7 +139,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
|
||||
for (int channelIndex = 0; channelIndex < Parameter.ChannelCount; channelIndex++)
|
||||
{
|
||||
channelInput[channelIndex] = inputBuffers[channelIndex].Span[sampleIndex];
|
||||
channelInput[channelIndex] = *((float*)inputBuffers[channelIndex] + sampleIndex);
|
||||
targetPreDelayValue += channelInput[channelIndex];
|
||||
}
|
||||
|
||||
|
@ -180,53 +183,53 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
|
||||
if (targetOutputFeedbackIndex >= 0)
|
||||
{
|
||||
outputBuffers[channelIndex].Span[sampleIndex] = (outputValues[channelIndex] + values[targetOutputFeedbackIndex] + channelInput[channelIndex] * state.DryGain);
|
||||
*((float*)outputBuffers[channelIndex] + sampleIndex) = (outputValues[channelIndex] + values[targetOutputFeedbackIndex] + channelInput[channelIndex] * state.DryGain);
|
||||
}
|
||||
}
|
||||
|
||||
if (isMono)
|
||||
{
|
||||
outputBuffers[0].Span[sampleIndex] += values[1];
|
||||
*((float*)outputBuffers[0] + sampleIndex) += values[1];
|
||||
}
|
||||
|
||||
if (isSurround)
|
||||
{
|
||||
outputBuffers[4].Span[sampleIndex] += (outputValues[4] + state.BackLeftDelayLine.Update((values[2] - values[3]) * 0.5f) + channelInput[4] * state.DryGain);
|
||||
*((float*)outputBuffers[4] + sampleIndex) += (outputValues[4] + state.BackLeftDelayLine.Update((values[2] - values[3]) * 0.5f) + channelInput[4] * state.DryGain);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void ProcessReverb3d(CommandList context)
|
||||
public void ProcessReverb3d(CommandList context, ref Reverb3dState state)
|
||||
{
|
||||
Debug.Assert(Parameter.IsChannelCountValid());
|
||||
|
||||
if (IsEffectEnabled && Parameter.IsChannelCountValid())
|
||||
{
|
||||
ReadOnlyMemory<float>[] inputBuffers = new ReadOnlyMemory<float>[Parameter.ChannelCount];
|
||||
Memory<float>[] outputBuffers = new Memory<float>[Parameter.ChannelCount];
|
||||
Span<IntPtr> inputBuffers = stackalloc IntPtr[Parameter.ChannelCount];
|
||||
Span<IntPtr> outputBuffers = stackalloc IntPtr[Parameter.ChannelCount];
|
||||
|
||||
for (int i = 0; i < Parameter.ChannelCount; i++)
|
||||
{
|
||||
inputBuffers[i] = context.GetBufferMemory(InputBufferIndices[i]);
|
||||
outputBuffers[i] = context.GetBufferMemory(OutputBufferIndices[i]);
|
||||
inputBuffers[i] = context.GetBufferPointer(InputBufferIndices[i]);
|
||||
outputBuffers[i] = context.GetBufferPointer(OutputBufferIndices[i]);
|
||||
}
|
||||
|
||||
switch (Parameter.ChannelCount)
|
||||
{
|
||||
case 1:
|
||||
ProcessReverb3dMono(outputBuffers, inputBuffers, context.SampleCount);
|
||||
ProcessReverb3dMono(ref state, outputBuffers, inputBuffers, context.SampleCount);
|
||||
break;
|
||||
case 2:
|
||||
ProcessReverb3dStereo(outputBuffers, inputBuffers, context.SampleCount);
|
||||
ProcessReverb3dStereo(ref state, outputBuffers, inputBuffers, context.SampleCount);
|
||||
break;
|
||||
case 4:
|
||||
ProcessReverb3dQuadraphonic(outputBuffers, inputBuffers, context.SampleCount);
|
||||
ProcessReverb3dQuadraphonic(ref state, outputBuffers, inputBuffers, context.SampleCount);
|
||||
break;
|
||||
case 6:
|
||||
ProcessReverb3dSurround(outputBuffers, inputBuffers, context.SampleCount);
|
||||
ProcessReverb3dSurround(ref state, outputBuffers, inputBuffers, context.SampleCount);
|
||||
break;
|
||||
default:
|
||||
throw new NotImplementedException($"{Parameter.ChannelCount}");
|
||||
throw new NotImplementedException(Parameter.ChannelCount.ToString());
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -235,7 +238,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
{
|
||||
if (InputBufferIndices[i] != OutputBufferIndices[i])
|
||||
{
|
||||
context.GetBufferMemory(InputBufferIndices[i]).CopyTo(context.GetBufferMemory(OutputBufferIndices[i]));
|
||||
context.CopyBuffer(OutputBufferIndices[i], InputBufferIndices[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -257,7 +260,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
}
|
||||
}
|
||||
|
||||
ProcessReverb3d(context);
|
||||
ProcessReverb3d(context, ref state);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ using Ryujinx.Audio.Renderer.Dsp.State;
|
|||
using Ryujinx.Audio.Renderer.Parameter.Effect;
|
||||
using System;
|
||||
using System.Diagnostics;
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace Ryujinx.Audio.Renderer.Dsp.Command
|
||||
{
|
||||
|
@ -86,9 +87,11 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
IsLongSizePreDelaySupported = isLongSizePreDelaySupported;
|
||||
}
|
||||
|
||||
private void ProcessReverbMono(Memory<float>[] outputBuffers, ReadOnlyMemory<float>[] inputBuffers, uint sampleCount)
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private void ProcessReverbMono(ref ReverbState state, ReadOnlySpan<IntPtr> outputBuffers, ReadOnlySpan<IntPtr> inputBuffers, uint sampleCount)
|
||||
{
|
||||
ProcessReverbGeneric(outputBuffers,
|
||||
ProcessReverbGeneric(ref state,
|
||||
outputBuffers,
|
||||
inputBuffers,
|
||||
sampleCount,
|
||||
OutputEarlyIndicesTableMono,
|
||||
|
@ -97,9 +100,11 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
OutputIndicesTableMono);
|
||||
}
|
||||
|
||||
private void ProcessReverbStereo(Memory<float>[] outputBuffers, ReadOnlyMemory<float>[] inputBuffers, uint sampleCount)
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private void ProcessReverbStereo(ref ReverbState state, ReadOnlySpan<IntPtr> outputBuffers, ReadOnlySpan<IntPtr> inputBuffers, uint sampleCount)
|
||||
{
|
||||
ProcessReverbGeneric(outputBuffers,
|
||||
ProcessReverbGeneric(ref state,
|
||||
outputBuffers,
|
||||
inputBuffers,
|
||||
sampleCount,
|
||||
OutputEarlyIndicesTableStereo,
|
||||
|
@ -108,9 +113,11 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
OutputIndicesTableStereo);
|
||||
}
|
||||
|
||||
private void ProcessReverbQuadraphonic(Memory<float>[] outputBuffers, ReadOnlyMemory<float>[] inputBuffers, uint sampleCount)
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private void ProcessReverbQuadraphonic(ref ReverbState state, ReadOnlySpan<IntPtr> outputBuffers, ReadOnlySpan<IntPtr> inputBuffers, uint sampleCount)
|
||||
{
|
||||
ProcessReverbGeneric(outputBuffers,
|
||||
ProcessReverbGeneric(ref state,
|
||||
outputBuffers,
|
||||
inputBuffers,
|
||||
sampleCount,
|
||||
OutputEarlyIndicesTableQuadraphonic,
|
||||
|
@ -119,9 +126,11 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
OutputIndicesTableQuadraphonic);
|
||||
}
|
||||
|
||||
private void ProcessReverbSurround(Memory<float>[] outputBuffers, ReadOnlyMemory<float>[] inputBuffers, uint sampleCount)
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private void ProcessReverbSurround(ref ReverbState state, ReadOnlySpan<IntPtr> outputBuffers, ReadOnlySpan<IntPtr> inputBuffers, uint sampleCount)
|
||||
{
|
||||
ProcessReverbGeneric(outputBuffers,
|
||||
ProcessReverbGeneric(ref state,
|
||||
outputBuffers,
|
||||
inputBuffers,
|
||||
sampleCount,
|
||||
OutputEarlyIndicesTableSurround,
|
||||
|
@ -130,10 +139,8 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
OutputIndicesTableSurround);
|
||||
}
|
||||
|
||||
private void ProcessReverbGeneric(Memory<float>[] outputBuffers, ReadOnlyMemory<float>[] inputBuffers, uint sampleCount, ReadOnlySpan<int> outputEarlyIndicesTable, ReadOnlySpan<int> targetEarlyDelayLineIndicesTable, ReadOnlySpan<int> targetOutputFeedbackIndicesTable, ReadOnlySpan<int> outputIndicesTable)
|
||||
private unsafe void ProcessReverbGeneric(ref ReverbState state, ReadOnlySpan<IntPtr> outputBuffers, ReadOnlySpan<IntPtr> inputBuffers, uint sampleCount, ReadOnlySpan<int> outputEarlyIndicesTable, ReadOnlySpan<int> targetEarlyDelayLineIndicesTable, ReadOnlySpan<int> targetOutputFeedbackIndicesTable, ReadOnlySpan<int> outputIndicesTable)
|
||||
{
|
||||
ref ReverbState state = ref State.Span[0];
|
||||
|
||||
bool isSurround = Parameter.ChannelCount == 6;
|
||||
|
||||
float reverbGain = FixedPointHelper.ToFloat(Parameter.ReverbGain, FixedPointPrecision);
|
||||
|
@ -141,14 +148,14 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
float outGain = FixedPointHelper.ToFloat(Parameter.OutGain, FixedPointPrecision);
|
||||
float dryGain = FixedPointHelper.ToFloat(Parameter.DryGain, FixedPointPrecision);
|
||||
|
||||
float[] outputValues = new float[Constants.ChannelCountMax];
|
||||
float[] feedbackValues = new float[4];
|
||||
float[] feedbackOutputValues = new float[4];
|
||||
float[] channelInput = new float[Parameter.ChannelCount];
|
||||
Span<float> outputValues = stackalloc float[Constants.ChannelCountMax];
|
||||
Span<float> feedbackValues = stackalloc float[4];
|
||||
Span<float> feedbackOutputValues = stackalloc float[4];
|
||||
Span<float> channelInput = stackalloc float[Parameter.ChannelCount];
|
||||
|
||||
for (int sampleIndex = 0; sampleIndex < sampleCount; sampleIndex++)
|
||||
{
|
||||
outputValues.AsSpan().Fill(0);
|
||||
outputValues.Fill(0);
|
||||
|
||||
for (int i = 0; i < targetEarlyDelayLineIndicesTable.Length; i++)
|
||||
{
|
||||
|
@ -169,7 +176,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
|
||||
for (int channelIndex = 0; channelIndex < Parameter.ChannelCount; channelIndex++)
|
||||
{
|
||||
channelInput[channelIndex] = inputBuffers[channelIndex].Span[sampleIndex] * 64;
|
||||
channelInput[channelIndex] = *((float*)inputBuffers[channelIndex] + sampleIndex) * 64;
|
||||
targetPreDelayValue += channelInput[channelIndex] * reverbGain;
|
||||
}
|
||||
|
||||
|
@ -212,42 +219,42 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
|
||||
for (int channelIndex = 0; channelIndex < Parameter.ChannelCount; channelIndex++)
|
||||
{
|
||||
outputBuffers[channelIndex].Span[sampleIndex] = (outputValues[channelIndex] * outGain + channelInput[channelIndex] * dryGain) / 64;
|
||||
*((float*)outputBuffers[channelIndex] + sampleIndex) = (outputValues[channelIndex] * outGain + channelInput[channelIndex] * dryGain) / 64;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void ProcessReverb(CommandList context)
|
||||
private void ProcessReverb(CommandList context, ref ReverbState state)
|
||||
{
|
||||
Debug.Assert(Parameter.IsChannelCountValid());
|
||||
|
||||
if (IsEffectEnabled && Parameter.IsChannelCountValid())
|
||||
{
|
||||
ReadOnlyMemory<float>[] inputBuffers = new ReadOnlyMemory<float>[Parameter.ChannelCount];
|
||||
Memory<float>[] outputBuffers = new Memory<float>[Parameter.ChannelCount];
|
||||
Span<IntPtr> inputBuffers = stackalloc IntPtr[Parameter.ChannelCount];
|
||||
Span<IntPtr> outputBuffers = stackalloc IntPtr[Parameter.ChannelCount];
|
||||
|
||||
for (int i = 0; i < Parameter.ChannelCount; i++)
|
||||
{
|
||||
inputBuffers[i] = context.GetBufferMemory(InputBufferIndices[i]);
|
||||
outputBuffers[i] = context.GetBufferMemory(OutputBufferIndices[i]);
|
||||
inputBuffers[i] = context.GetBufferPointer(InputBufferIndices[i]);
|
||||
outputBuffers[i] = context.GetBufferPointer(OutputBufferIndices[i]);
|
||||
}
|
||||
|
||||
switch (Parameter.ChannelCount)
|
||||
{
|
||||
case 1:
|
||||
ProcessReverbMono(outputBuffers, inputBuffers, context.SampleCount);
|
||||
ProcessReverbMono(ref state, outputBuffers, inputBuffers, context.SampleCount);
|
||||
break;
|
||||
case 2:
|
||||
ProcessReverbStereo(outputBuffers, inputBuffers, context.SampleCount);
|
||||
ProcessReverbStereo(ref state, outputBuffers, inputBuffers, context.SampleCount);
|
||||
break;
|
||||
case 4:
|
||||
ProcessReverbQuadraphonic(outputBuffers, inputBuffers, context.SampleCount);
|
||||
ProcessReverbQuadraphonic(ref state, outputBuffers, inputBuffers, context.SampleCount);
|
||||
break;
|
||||
case 6:
|
||||
ProcessReverbSurround(outputBuffers, inputBuffers, context.SampleCount);
|
||||
ProcessReverbSurround(ref state, outputBuffers, inputBuffers, context.SampleCount);
|
||||
break;
|
||||
default:
|
||||
throw new NotImplementedException($"{Parameter.ChannelCount}");
|
||||
throw new NotImplementedException(Parameter.ChannelCount.ToString());
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -256,7 +263,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
{
|
||||
if (InputBufferIndices[i] != OutputBufferIndices[i])
|
||||
{
|
||||
context.GetBufferMemory(InputBufferIndices[i]).CopyTo(context.GetBufferMemory(OutputBufferIndices[i]));
|
||||
context.CopyBuffer(OutputBufferIndices[i], InputBufferIndices[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -278,7 +285,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
}
|
||||
}
|
||||
|
||||
ProcessReverb(context);
|
||||
ProcessReverb(context, ref state);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,7 +61,6 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
UpsamplerInfo = info;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private Span<float> GetBuffer(int index, int sampleCount)
|
||||
{
|
||||
return UpsamplerInfo.OutputBuffer.Span.Slice(index * sampleCount, sampleCount);
|
||||
|
|
|
@ -50,6 +50,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
Volume = volume;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private void ProcessVolumeAvx(Span<float> outputBuffer, ReadOnlySpan<float> inputBuffer)
|
||||
{
|
||||
Vector256<float> volumeVec = Vector256.Create(Volume);
|
||||
|
@ -70,6 +71,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
}
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private void ProcessVolumeSse41(Span<float> outputBuffer, ReadOnlySpan<float> inputBuffer)
|
||||
{
|
||||
Vector128<float> volumeVec = Vector128.Create(Volume);
|
||||
|
@ -90,6 +92,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
}
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private void ProcessVolumeAdvSimd(Span<float> outputBuffer, ReadOnlySpan<float> inputBuffer)
|
||||
{
|
||||
Vector128<float> volumeVec = Vector128.Create(Volume);
|
||||
|
@ -110,6 +113,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
}
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private void ProcessVolume(Span<float> outputBuffer, ReadOnlySpan<float> inputBuffer)
|
||||
{
|
||||
if (Avx.IsSupported)
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
//
|
||||
|
||||
using System;
|
||||
using System.Runtime.CompilerServices;
|
||||
|
||||
namespace Ryujinx.Audio.Renderer.Dsp.Command
|
||||
{
|
||||
|
@ -47,6 +48,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Command
|
|||
Volume1 = volume1;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private void ProcessVolumeRamp(Span<float> outputBuffer, ReadOnlySpan<float> inputBuffer, int sampleCount)
|
||||
{
|
||||
float ramp = (Volume1 - Volume0) / sampleCount;
|
||||
|
|
|
@ -21,7 +21,6 @@ using Ryujinx.Audio.Renderer.Dsp.State;
|
|||
using Ryujinx.Common.Logging;
|
||||
using Ryujinx.Memory;
|
||||
using System;
|
||||
using System.Buffers;
|
||||
using System.Diagnostics;
|
||||
using System.Runtime.CompilerServices;
|
||||
using System.Runtime.InteropServices;
|
||||
|
@ -36,50 +35,43 @@ namespace Ryujinx.Audio.Renderer.Dsp
|
|||
{
|
||||
private const int FixedPointPrecision = 15;
|
||||
|
||||
public class WaveBufferInformation
|
||||
public struct WaveBufferInformation
|
||||
{
|
||||
public Memory<VoiceUpdateState> State;
|
||||
public uint SourceSampleRate;
|
||||
public SampleFormat SampleFormat;
|
||||
public float Pitch;
|
||||
public DecodingBehaviour DecodingBehaviour;
|
||||
public WaveBuffer[] WaveBuffers;
|
||||
public ulong ExtraParameter;
|
||||
public ulong ExtraParameterSize;
|
||||
public int ChannelIndex;
|
||||
public int ChannelCount;
|
||||
public DecodingBehaviour DecodingBehaviour;
|
||||
public SampleRateConversionQuality SrcQuality;
|
||||
public SampleFormat SampleFormat;
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static int GetPitchLimitBySrcQuality(SampleRateConversionQuality quality)
|
||||
{
|
||||
switch (quality)
|
||||
return quality switch
|
||||
{
|
||||
case SampleRateConversionQuality.Default:
|
||||
case SampleRateConversionQuality.Low:
|
||||
return 4;
|
||||
case SampleRateConversionQuality.High:
|
||||
return 8;
|
||||
default:
|
||||
throw new ArgumentException($"{quality}");
|
||||
}
|
||||
SampleRateConversionQuality.Default or SampleRateConversionQuality.Low => 4,
|
||||
SampleRateConversionQuality.High => 8,
|
||||
_ => throw new ArgumentException(quality.ToString()),
|
||||
};
|
||||
}
|
||||
|
||||
public static void ProcessWaveBuffers(IVirtualMemoryManager memoryManager, Span<float> outputBuffer, WaveBufferInformation info, uint targetSampleRate, int sampleCount)
|
||||
public static void ProcessWaveBuffers(IVirtualMemoryManager memoryManager, Span<float> outputBuffer, ref WaveBufferInformation info, Span<WaveBuffer> wavebuffers, ref VoiceUpdateState voiceState, uint targetSampleRate, int sampleCount)
|
||||
{
|
||||
const int tempBufferSize = 0x3F00;
|
||||
|
||||
ref VoiceUpdateState state = ref info.State.Span[0];
|
||||
Span<short> tempBuffer = stackalloc short[tempBufferSize];
|
||||
|
||||
short[] tempBuffer = ArrayPool<short>.Shared.Rent(tempBufferSize);
|
||||
float sampleRateRatio = (float)info.SourceSampleRate / targetSampleRate * info.Pitch;
|
||||
|
||||
float sampleRateRatio = ((float)info.SourceSampleRate / targetSampleRate * info.Pitch);
|
||||
|
||||
float fraction = state.Fraction;
|
||||
int waveBufferIndex = (int)state.WaveBufferIndex;
|
||||
ulong playedSampleCount = state.PlayedSampleCount;
|
||||
int offset = state.Offset;
|
||||
uint waveBufferConsumed = state.WaveBufferConsumed;
|
||||
float fraction = voiceState.Fraction;
|
||||
int waveBufferIndex = (int)voiceState.WaveBufferIndex;
|
||||
ulong playedSampleCount = voiceState.PlayedSampleCount;
|
||||
int offset = voiceState.Offset;
|
||||
uint waveBufferConsumed = voiceState.WaveBufferConsumed;
|
||||
|
||||
int pitchMaxLength = GetPitchLimitBySrcQuality(info.SrcQuality);
|
||||
|
||||
|
@ -101,7 +93,7 @@ namespace Ryujinx.Audio.Renderer.Dsp
|
|||
|
||||
if (!info.DecodingBehaviour.HasFlag(DecodingBehaviour.SkipPitchAndSampleRateConversion))
|
||||
{
|
||||
state.Pitch.ToSpan().Slice(0, pitchMaxLength).CopyTo(tempBuffer.AsSpan());
|
||||
voiceState.Pitch.ToSpan().Slice(0, pitchMaxLength).CopyTo(tempBuffer);
|
||||
tempBufferIndex += pitchMaxLength;
|
||||
}
|
||||
|
||||
|
@ -115,33 +107,31 @@ namespace Ryujinx.Audio.Renderer.Dsp
|
|||
{
|
||||
if (waveBufferIndex >= Constants.VoiceWaveBufferCount)
|
||||
{
|
||||
Logger.Error?.Print(LogClass.AudioRenderer, $"Invalid WaveBuffer index {waveBufferIndex}");
|
||||
|
||||
waveBufferIndex = 0;
|
||||
playedSampleCount = 0;
|
||||
}
|
||||
|
||||
if (!state.IsWaveBufferValid[waveBufferIndex])
|
||||
if (!voiceState.IsWaveBufferValid[waveBufferIndex])
|
||||
{
|
||||
isStarving = true;
|
||||
break;
|
||||
}
|
||||
|
||||
ref WaveBuffer waveBuffer = ref info.WaveBuffers[waveBufferIndex];
|
||||
ref WaveBuffer waveBuffer = ref wavebuffers[waveBufferIndex];
|
||||
|
||||
if (offset == 0 && info.SampleFormat == SampleFormat.Adpcm && waveBuffer.Context != 0)
|
||||
{
|
||||
state.LoopContext = memoryManager.Read<AdpcmLoopContext>(waveBuffer.Context);
|
||||
voiceState.LoopContext = memoryManager.Read<AdpcmLoopContext>(waveBuffer.Context);
|
||||
}
|
||||
|
||||
Span<short> tempSpan = tempBuffer.AsSpan().Slice(tempBufferIndex + y);
|
||||
Span<short> tempSpan = tempBuffer.Slice(tempBufferIndex + y);
|
||||
|
||||
int decodedSampleCount = -1;
|
||||
|
||||
int targetSampleStartOffset;
|
||||
int targetSampleEndOffset;
|
||||
|
||||
if (state.LoopCount > 0 && waveBuffer.LoopStartSampleOffset != 0 && waveBuffer.LoopEndSampleOffset != 0 && waveBuffer.LoopStartSampleOffset <= waveBuffer.LoopEndSampleOffset)
|
||||
if (voiceState.LoopCount > 0 && waveBuffer.LoopStartSampleOffset != 0 && waveBuffer.LoopEndSampleOffset != 0 && waveBuffer.LoopStartSampleOffset <= waveBuffer.LoopEndSampleOffset)
|
||||
{
|
||||
targetSampleStartOffset = (int)waveBuffer.LoopStartSampleOffset;
|
||||
targetSampleEndOffset = (int)waveBuffer.LoopEndSampleOffset;
|
||||
|
@ -166,7 +156,7 @@ namespace Ryujinx.Audio.Renderer.Dsp
|
|||
}
|
||||
|
||||
ReadOnlySpan<short> coefficients = MemoryMarshal.Cast<byte, short>(memoryManager.GetSpan(info.ExtraParameter, (int)info.ExtraParameterSize));
|
||||
decodedSampleCount = AdpcmHelper.Decode(tempSpan, waveBufferAdpcm, targetSampleStartOffset, targetSampleEndOffset, offset, sampleCountToDecode - y, coefficients, ref state.LoopContext);
|
||||
decodedSampleCount = AdpcmHelper.Decode(tempSpan, waveBufferAdpcm, targetSampleStartOffset, targetSampleEndOffset, offset, sampleCountToDecode - y, coefficients, ref voiceState.LoopContext);
|
||||
break;
|
||||
case SampleFormat.PcmInt16:
|
||||
ReadOnlySpan<short> waveBufferPcm16 = ReadOnlySpan<short>.Empty;
|
||||
|
@ -195,7 +185,7 @@ namespace Ryujinx.Audio.Renderer.Dsp
|
|||
decodedSampleCount = PcmHelper.Decode(tempSpan, waveBufferPcmFloat, targetSampleStartOffset, targetSampleEndOffset, info.ChannelIndex, info.ChannelCount);
|
||||
break;
|
||||
default:
|
||||
Logger.Warning?.Print(LogClass.AudioRenderer, $"Unsupported sample format {info.SampleFormat}");
|
||||
Logger.Error?.Print(LogClass.AudioRenderer, $"Unsupported sample format " + info.SampleFormat);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -203,9 +193,9 @@ namespace Ryujinx.Audio.Renderer.Dsp
|
|||
|
||||
if (decodedSampleCount < 0)
|
||||
{
|
||||
Logger.Warning?.Print(LogClass.AudioRenderer, $"Decoding failed, skipping WaveBuffer");
|
||||
Logger.Warning?.Print(LogClass.AudioRenderer, "Decoding failed, skipping WaveBuffer");
|
||||
|
||||
state.MarkEndOfBufferWaveBufferProcessing(ref waveBuffer, ref waveBufferIndex, ref waveBufferConsumed, ref playedSampleCount);
|
||||
voiceState.MarkEndOfBufferWaveBufferProcessing(ref waveBuffer, ref waveBufferIndex, ref waveBufferConsumed, ref playedSampleCount);
|
||||
decodedSampleCount = 0;
|
||||
}
|
||||
|
||||
|
@ -219,13 +209,13 @@ namespace Ryujinx.Audio.Renderer.Dsp
|
|||
|
||||
if (waveBuffer.Looping)
|
||||
{
|
||||
state.LoopCount++;
|
||||
voiceState.LoopCount++;
|
||||
|
||||
if (waveBuffer.LoopCount >= 0)
|
||||
{
|
||||
if (decodedSampleCount == 0 || state.LoopCount > waveBuffer.LoopCount)
|
||||
if (decodedSampleCount == 0 || voiceState.LoopCount > waveBuffer.LoopCount)
|
||||
{
|
||||
state.MarkEndOfBufferWaveBufferProcessing(ref waveBuffer, ref waveBufferIndex, ref waveBufferConsumed, ref playedSampleCount);
|
||||
voiceState.MarkEndOfBufferWaveBufferProcessing(ref waveBuffer, ref waveBufferIndex, ref waveBufferConsumed, ref playedSampleCount);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -242,13 +232,12 @@ namespace Ryujinx.Audio.Renderer.Dsp
|
|||
}
|
||||
else
|
||||
{
|
||||
state.MarkEndOfBufferWaveBufferProcessing(ref waveBuffer, ref waveBufferIndex, ref waveBufferConsumed, ref playedSampleCount);
|
||||
voiceState.MarkEndOfBufferWaveBufferProcessing(ref waveBuffer, ref waveBufferIndex, ref waveBufferConsumed, ref playedSampleCount);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Span<float> outputSpan = outputBuffer.Slice(i);
|
||||
Span<int> outputSpanInt = MemoryMarshal.Cast<float, int>(outputSpan);
|
||||
Span<int> outputSpanInt = MemoryMarshal.Cast<float, int>(outputBuffer.Slice(i));
|
||||
|
||||
if (info.DecodingBehaviour.HasFlag(DecodingBehaviour.SkipPitchAndSampleRateConversion))
|
||||
{
|
||||
|
@ -259,7 +248,7 @@ namespace Ryujinx.Audio.Renderer.Dsp
|
|||
}
|
||||
else
|
||||
{
|
||||
Span<short> tempSpan = tempBuffer.AsSpan().Slice(tempBufferIndex + y);
|
||||
Span<short> tempSpan = tempBuffer.Slice(tempBufferIndex + y);
|
||||
|
||||
tempSpan.Slice(0, sampleCountToDecode - y).Fill(0);
|
||||
|
||||
|
@ -267,7 +256,7 @@ namespace Ryujinx.Audio.Renderer.Dsp
|
|||
|
||||
ResamplerHelper.Resample(outputBuffer, tempBuffer, sampleRateRatio, ref fraction, sampleCountToProcess, info.SrcQuality, y != sourceSampleCountToProcess || info.Pitch != 1.0f);
|
||||
|
||||
tempBuffer.AsSpan().Slice(sampleCountToDecode, pitchMaxLength).CopyTo(state.Pitch.ToSpan());
|
||||
tempBuffer.Slice(sampleCountToDecode, pitchMaxLength).CopyTo(voiceState.Pitch.ToSpan());
|
||||
}
|
||||
|
||||
i += sampleCountToProcess;
|
||||
|
@ -275,16 +264,15 @@ namespace Ryujinx.Audio.Renderer.Dsp
|
|||
|
||||
Debug.Assert(sourceSampleCountToProcess == i || !isStarving);
|
||||
|
||||
state.WaveBufferConsumed = waveBufferConsumed;
|
||||
state.Offset = offset;
|
||||
state.PlayedSampleCount = playedSampleCount;
|
||||
state.WaveBufferIndex = (uint)waveBufferIndex;
|
||||
state.Fraction = fraction;
|
||||
}
|
||||
|
||||
ArrayPool<short>.Shared.Return(tempBuffer);
|
||||
voiceState.WaveBufferConsumed = waveBufferConsumed;
|
||||
voiceState.Offset = offset;
|
||||
voiceState.PlayedSampleCount = playedSampleCount;
|
||||
voiceState.WaveBufferIndex = (uint)waveBufferIndex;
|
||||
voiceState.Fraction = fraction;
|
||||
}
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void ToFloatAvx(Span<float> output, ReadOnlySpan<int> input, int sampleCount)
|
||||
{
|
||||
ReadOnlySpan<Vector256<int>> inputVec = MemoryMarshal.Cast<int, Vector256<int>>(input);
|
||||
|
@ -303,6 +291,7 @@ namespace Ryujinx.Audio.Renderer.Dsp
|
|||
}
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void ToFloatSse2(Span<float> output, ReadOnlySpan<int> input, int sampleCount)
|
||||
{
|
||||
ReadOnlySpan<Vector128<int>> inputVec = MemoryMarshal.Cast<int, Vector128<int>>(input);
|
||||
|
@ -321,6 +310,7 @@ namespace Ryujinx.Audio.Renderer.Dsp
|
|||
}
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
private static void ToFloatAdvSimd(Span<float> output, ReadOnlySpan<int> input, int sampleCount)
|
||||
{
|
||||
ReadOnlySpan<Vector128<int>> inputVec = MemoryMarshal.Cast<int, Vector128<int>>(input);
|
||||
|
@ -348,6 +338,7 @@ namespace Ryujinx.Audio.Renderer.Dsp
|
|||
}
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static void ToFloat(Span<float> output, ReadOnlySpan<int> input, int sampleCount)
|
||||
{
|
||||
if (Avx.IsSupported)
|
||||
|
@ -368,6 +359,7 @@ namespace Ryujinx.Audio.Renderer.Dsp
|
|||
}
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static void ToIntAvx(Span<int> output, ReadOnlySpan<float> input, int sampleCount)
|
||||
{
|
||||
ReadOnlySpan<Vector256<float>> inputVec = MemoryMarshal.Cast<float, Vector256<float>>(input);
|
||||
|
@ -386,6 +378,7 @@ namespace Ryujinx.Audio.Renderer.Dsp
|
|||
}
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static void ToIntSse2(Span<int> output, ReadOnlySpan<float> input, int sampleCount)
|
||||
{
|
||||
ReadOnlySpan<Vector128<float>> inputVec = MemoryMarshal.Cast<float, Vector128<float>>(input);
|
||||
|
@ -404,6 +397,7 @@ namespace Ryujinx.Audio.Renderer.Dsp
|
|||
}
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static void ToIntAdvSimd(Span<int> output, ReadOnlySpan<float> input, int sampleCount)
|
||||
{
|
||||
ReadOnlySpan<Vector128<float>> inputVec = MemoryMarshal.Cast<float, Vector128<float>>(input);
|
||||
|
@ -431,6 +425,7 @@ namespace Ryujinx.Audio.Renderer.Dsp
|
|||
}
|
||||
}
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static void ToInt(Span<int> output, ReadOnlySpan<float> input, int sampleCount)
|
||||
{
|
||||
if (Avx.IsSupported)
|
||||
|
|
|
@ -32,6 +32,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.Effect
|
|||
float TapUnsafe(uint sampleIndex, int offset);
|
||||
float Tap(uint sampleIndex);
|
||||
|
||||
[MethodImpl(MethodImplOptions.AggressiveInlining)]
|
||||
public static float Tap(Span<float> workBuffer, int baseIndex, int sampleIndex, int delaySampleCount)
|
||||
{
|
||||
int targetIndex = baseIndex - sampleIndex;
|
||||
|
|
|
@ -94,10 +94,10 @@ namespace Ryujinx.Audio.Renderer.Dsp.State
|
|||
0.0f
|
||||
};
|
||||
|
||||
public IDelayLine[] FdnDelayLines { get; }
|
||||
public DelayLine[] FdnDelayLines { get; }
|
||||
public DecayDelay[] DecayDelays { get; }
|
||||
public IDelayLine PreDelayLine { get; }
|
||||
public IDelayLine BackLeftDelayLine { get; }
|
||||
public DelayLine PreDelayLine { get; }
|
||||
public DelayLine BackLeftDelayLine { get; }
|
||||
public uint[] EarlyDelayTime { get; }
|
||||
public float[] EarlyGain { get; }
|
||||
public uint PreDelayLineDelayTime { get; private set; }
|
||||
|
@ -122,7 +122,7 @@ namespace Ryujinx.Audio.Renderer.Dsp.State
|
|||
|
||||
public ReverbState(ref ReverbParameter parameter, ulong workBuffer, bool isLongSizePreDelaySupported)
|
||||
{
|
||||
FdnDelayLines = new IDelayLine[4];
|
||||
FdnDelayLines = new DelayLine[4];
|
||||
DecayDelays = new DecayDelay[4];
|
||||
EarlyDelayTime = new uint[EarlyModeCount];
|
||||
EarlyGain = new float[EarlyModeCount];
|
||||
|
|
|
@ -478,7 +478,7 @@ namespace Ryujinx.Audio.Renderer.Server
|
|||
/// <param name="outputBufferOffset">The output buffer offset.</param>
|
||||
/// <param name="downMixParameter">The downmixer parameters to use.</param>
|
||||
/// <param name="nodeId">The node id associated to this command.</param>
|
||||
public void GenerateDownMixSurroundToStereo(uint bufferOffset, Span<byte> inputBufferOffset, Span<byte> outputBufferOffset, ReadOnlySpan<float> downMixParameter, int nodeId)
|
||||
public void GenerateDownMixSurroundToStereo(uint bufferOffset, Span<byte> inputBufferOffset, Span<byte> outputBufferOffset, float[] downMixParameter, int nodeId)
|
||||
{
|
||||
DownMixSurroundToStereoCommand command = new DownMixSurroundToStereoCommand(bufferOffset, inputBufferOffset, outputBufferOffset, downMixParameter, nodeId);
|
||||
|
||||
|
|
Loading…
Reference in a new issue