Revert "Adjust naming conventions and general refactoring in HLE Project (#490)" (#526)

This reverts commit 85dbb9559a.
This commit is contained in:
gdkchan 2018-12-04 22:52:39 -02:00 committed by GitHub
parent 85dbb9559a
commit 3615a70cae
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
299 changed files with 12276 additions and 12268 deletions

View file

@ -9,19 +9,19 @@ namespace Ryujinx.HLE.HOS.Services.Aud.AudioOut
{
class IAudioOut : IpcService, IDisposable
{
private Dictionary<int, ServiceProcessRequest> _commands;
private Dictionary<int, ServiceProcessRequest> m_Commands;
public override IReadOnlyDictionary<int, ServiceProcessRequest> Commands => _commands;
public override IReadOnlyDictionary<int, ServiceProcessRequest> Commands => m_Commands;
private IAalOutput _audioOut;
private IAalOutput AudioOut;
private KEvent _releaseEvent;
private KEvent ReleaseEvent;
private int _track;
private int Track;
public IAudioOut(IAalOutput audioOut, KEvent releaseEvent, int track)
public IAudioOut(IAalOutput AudioOut, KEvent ReleaseEvent, int Track)
{
_commands = new Dictionary<int, ServiceProcessRequest>
m_Commands = new Dictionary<int, ServiceProcessRequest>()
{
{ 0, GetAudioOutState },
{ 1, StartAudioOut },
@ -34,116 +34,116 @@ namespace Ryujinx.HLE.HOS.Services.Aud.AudioOut
{ 8, GetReleasedAudioOutBufferAuto }
};
_audioOut = audioOut;
_releaseEvent = releaseEvent;
_track = track;
this.AudioOut = AudioOut;
this.ReleaseEvent = ReleaseEvent;
this.Track = Track;
}
public long GetAudioOutState(ServiceCtx context)
public long GetAudioOutState(ServiceCtx Context)
{
context.ResponseData.Write((int)_audioOut.GetState(_track));
Context.ResponseData.Write((int)AudioOut.GetState(Track));
return 0;
}
public long StartAudioOut(ServiceCtx context)
public long StartAudioOut(ServiceCtx Context)
{
_audioOut.Start(_track);
AudioOut.Start(Track);
return 0;
}
public long StopAudioOut(ServiceCtx context)
public long StopAudioOut(ServiceCtx Context)
{
_audioOut.Stop(_track);
AudioOut.Stop(Track);
return 0;
}
public long AppendAudioOutBuffer(ServiceCtx context)
public long AppendAudioOutBuffer(ServiceCtx Context)
{
return AppendAudioOutBufferImpl(context, context.Request.SendBuff[0].Position);
return AppendAudioOutBufferImpl(Context, Context.Request.SendBuff[0].Position);
}
public long RegisterBufferEvent(ServiceCtx context)
public long RegisterBufferEvent(ServiceCtx Context)
{
if (context.Process.HandleTable.GenerateHandle(_releaseEvent.ReadableEvent, out int handle) != KernelResult.Success)
if (Context.Process.HandleTable.GenerateHandle(ReleaseEvent.ReadableEvent, out int Handle) != KernelResult.Success)
{
throw new InvalidOperationException("Out of handles!");
}
context.Response.HandleDesc = IpcHandleDesc.MakeCopy(handle);
Context.Response.HandleDesc = IpcHandleDesc.MakeCopy(Handle);
return 0;
}
public long GetReleasedAudioOutBuffer(ServiceCtx context)
public long GetReleasedAudioOutBuffer(ServiceCtx Context)
{
long position = context.Request.ReceiveBuff[0].Position;
long size = context.Request.ReceiveBuff[0].Size;
long Position = Context.Request.ReceiveBuff[0].Position;
long Size = Context.Request.ReceiveBuff[0].Size;
return GetReleasedAudioOutBufferImpl(context, position, size);
return GetReleasedAudioOutBufferImpl(Context, Position, Size);
}
public long ContainsAudioOutBuffer(ServiceCtx context)
public long ContainsAudioOutBuffer(ServiceCtx Context)
{
long tag = context.RequestData.ReadInt64();
long Tag = Context.RequestData.ReadInt64();
context.ResponseData.Write(_audioOut.ContainsBuffer(_track, tag) ? 1 : 0);
Context.ResponseData.Write(AudioOut.ContainsBuffer(Track, Tag) ? 1 : 0);
return 0;
}
public long AppendAudioOutBufferAuto(ServiceCtx context)
public long AppendAudioOutBufferAuto(ServiceCtx Context)
{
(long position, long size) = context.Request.GetBufferType0x21();
(long Position, long Size) = Context.Request.GetBufferType0x21();
return AppendAudioOutBufferImpl(context, position);
return AppendAudioOutBufferImpl(Context, Position);
}
public long AppendAudioOutBufferImpl(ServiceCtx context, long position)
public long AppendAudioOutBufferImpl(ServiceCtx Context, long Position)
{
long tag = context.RequestData.ReadInt64();
long Tag = Context.RequestData.ReadInt64();
AudioOutData data = MemoryHelper.Read<AudioOutData>(
context.Memory,
position);
AudioOutData Data = MemoryHelper.Read<AudioOutData>(
Context.Memory,
Position);
byte[] buffer = context.Memory.ReadBytes(
data.SampleBufferPtr,
data.SampleBufferSize);
byte[] Buffer = Context.Memory.ReadBytes(
Data.SampleBufferPtr,
Data.SampleBufferSize);
_audioOut.AppendBuffer(_track, tag, buffer);
AudioOut.AppendBuffer(Track, Tag, Buffer);
return 0;
}
public long GetReleasedAudioOutBufferAuto(ServiceCtx context)
public long GetReleasedAudioOutBufferAuto(ServiceCtx Context)
{
(long position, long size) = context.Request.GetBufferType0x22();
(long Position, long Size) = Context.Request.GetBufferType0x22();
return GetReleasedAudioOutBufferImpl(context, position, size);
return GetReleasedAudioOutBufferImpl(Context, Position, Size);
}
public long GetReleasedAudioOutBufferImpl(ServiceCtx context, long position, long size)
public long GetReleasedAudioOutBufferImpl(ServiceCtx Context, long Position, long Size)
{
uint count = (uint)((ulong)size >> 3);
uint Count = (uint)((ulong)Size >> 3);
long[] releasedBuffers = _audioOut.GetReleasedBuffers(_track, (int)count);
long[] ReleasedBuffers = AudioOut.GetReleasedBuffers(Track, (int)Count);
for (uint index = 0; index < count; index++)
for (uint Index = 0; Index < Count; Index++)
{
long tag = 0;
long Tag = 0;
if (index < releasedBuffers.Length)
if (Index < ReleasedBuffers.Length)
{
tag = releasedBuffers[index];
Tag = ReleasedBuffers[Index];
}
context.Memory.WriteInt64(position + index * 8, tag);
Context.Memory.WriteInt64(Position + Index * 8, Tag);
}
context.ResponseData.Write(releasedBuffers.Length);
Context.ResponseData.Write(ReleasedBuffers.Length);
return 0;
}
@ -153,11 +153,11 @@ namespace Ryujinx.HLE.HOS.Services.Aud.AudioOut
Dispose(true);
}
protected virtual void Dispose(bool disposing)
protected virtual void Dispose(bool Disposing)
{
if (disposing)
if (Disposing)
{
_audioOut.CloseTrack(_track);
AudioOut.CloseTrack(Track);
}
}
}

View file

@ -22,33 +22,33 @@ namespace Ryujinx.HLE.HOS.Services.Aud.AudioRenderer
//high latency).
private const int MixBufferSamplesCount = 960;
private Dictionary<int, ServiceProcessRequest> _commands;
private Dictionary<int, ServiceProcessRequest> m_Commands;
public override IReadOnlyDictionary<int, ServiceProcessRequest> Commands => _commands;
public override IReadOnlyDictionary<int, ServiceProcessRequest> Commands => m_Commands;
private KEvent _updateEvent;
private KEvent UpdateEvent;
private MemoryManager _memory;
private MemoryManager Memory;
private IAalOutput _audioOut;
private IAalOutput AudioOut;
private AudioRendererParameter _params;
private AudioRendererParameter Params;
private MemoryPoolContext[] _memoryPools;
private MemoryPoolContext[] MemoryPools;
private VoiceContext[] _voices;
private VoiceContext[] Voices;
private int _track;
private int Track;
private PlayState _playState;
private PlayState PlayState;
public IAudioRenderer(
Horizon system,
MemoryManager memory,
IAalOutput audioOut,
Horizon System,
MemoryManager Memory,
IAalOutput AudioOut,
AudioRendererParameter Params)
{
_commands = new Dictionary<int, ServiceProcessRequest>
m_Commands = new Dictionary<int, ServiceProcessRequest>()
{
{ 0, GetSampleRate },
{ 1, GetSampleCount },
@ -60,75 +60,75 @@ namespace Ryujinx.HLE.HOS.Services.Aud.AudioRenderer
{ 7, QuerySystemEvent }
};
_updateEvent = new KEvent(system);
UpdateEvent = new KEvent(System);
_memory = memory;
_audioOut = audioOut;
_params = Params;
this.Memory = Memory;
this.AudioOut = AudioOut;
this.Params = Params;
_track = audioOut.OpenTrack(
Track = AudioOut.OpenTrack(
AudioConsts.HostSampleRate,
AudioConsts.HostChannelsCount,
AudioCallback);
_memoryPools = CreateArray<MemoryPoolContext>(Params.EffectCount + Params.VoiceCount * 4);
MemoryPools = CreateArray<MemoryPoolContext>(Params.EffectCount + Params.VoiceCount * 4);
_voices = CreateArray<VoiceContext>(Params.VoiceCount);
Voices = CreateArray<VoiceContext>(Params.VoiceCount);
InitializeAudioOut();
_playState = PlayState.Stopped;
PlayState = PlayState.Stopped;
}
// GetSampleRate() -> u32
public long GetSampleRate(ServiceCtx context)
public long GetSampleRate(ServiceCtx Context)
{
context.ResponseData.Write(_params.SampleRate);
Context.ResponseData.Write(Params.SampleRate);
return 0;
}
// GetSampleCount() -> u32
public long GetSampleCount(ServiceCtx context)
public long GetSampleCount(ServiceCtx Context)
{
context.ResponseData.Write(_params.SampleCount);
Context.ResponseData.Write(Params.SampleCount);
return 0;
}
// GetMixBufferCount() -> u32
public long GetMixBufferCount(ServiceCtx context)
public long GetMixBufferCount(ServiceCtx Context)
{
context.ResponseData.Write(_params.MixCount);
Context.ResponseData.Write(Params.MixCount);
return 0;
}
// GetState() -> u32
private long GetState(ServiceCtx context)
private long GetState(ServiceCtx Context)
{
context.ResponseData.Write((int)_playState);
Context.ResponseData.Write((int)PlayState);
Logger.PrintStub(LogClass.ServiceAudio, $"Stubbed. Renderer State: {Enum.GetName(typeof(PlayState), _playState)}");
Logger.PrintStub(LogClass.ServiceAudio, $"Stubbed. Renderer State: {Enum.GetName(typeof(PlayState), PlayState)}");
return 0;
}
private void AudioCallback()
{
_updateEvent.ReadableEvent.Signal();
UpdateEvent.ReadableEvent.Signal();
}
private static T[] CreateArray<T>(int size) where T : new()
private static T[] CreateArray<T>(int Size) where T : new()
{
T[] output = new T[size];
T[] Output = new T[Size];
for (int index = 0; index < size; index++)
for (int Index = 0; Index < Size; Index++)
{
output[index] = new T();
Output[Index] = new T();
}
return output;
return Output;
}
private void InitializeAudioOut()
@ -137,258 +137,258 @@ namespace Ryujinx.HLE.HOS.Services.Aud.AudioRenderer
AppendMixedBuffer(1);
AppendMixedBuffer(2);
_audioOut.Start(_track);
AudioOut.Start(Track);
}
public long RequestUpdateAudioRenderer(ServiceCtx context)
public long RequestUpdateAudioRenderer(ServiceCtx Context)
{
long outputPosition = context.Request.ReceiveBuff[0].Position;
long outputSize = context.Request.ReceiveBuff[0].Size;
long OutputPosition = Context.Request.ReceiveBuff[0].Position;
long OutputSize = Context.Request.ReceiveBuff[0].Size;
MemoryHelper.FillWithZeros(context.Memory, outputPosition, (int)outputSize);
MemoryHelper.FillWithZeros(Context.Memory, OutputPosition, (int)OutputSize);
long inputPosition = context.Request.SendBuff[0].Position;
long InputPosition = Context.Request.SendBuff[0].Position;
StructReader reader = new StructReader(context.Memory, inputPosition);
StructWriter writer = new StructWriter(context.Memory, outputPosition);
StructReader Reader = new StructReader(Context.Memory, InputPosition);
StructWriter Writer = new StructWriter(Context.Memory, OutputPosition);
UpdateDataHeader inputHeader = reader.Read<UpdateDataHeader>();
UpdateDataHeader InputHeader = Reader.Read<UpdateDataHeader>();
reader.Read<BehaviorIn>(inputHeader.BehaviorSize);
Reader.Read<BehaviorIn>(InputHeader.BehaviorSize);
MemoryPoolIn[] memoryPoolsIn = reader.Read<MemoryPoolIn>(inputHeader.MemoryPoolSize);
MemoryPoolIn[] MemoryPoolsIn = Reader.Read<MemoryPoolIn>(InputHeader.MemoryPoolSize);
for (int index = 0; index < memoryPoolsIn.Length; index++)
for (int Index = 0; Index < MemoryPoolsIn.Length; Index++)
{
MemoryPoolIn memoryPool = memoryPoolsIn[index];
MemoryPoolIn MemoryPool = MemoryPoolsIn[Index];
if (memoryPool.State == MemoryPoolState.RequestAttach)
if (MemoryPool.State == MemoryPoolState.RequestAttach)
{
_memoryPools[index].OutStatus.State = MemoryPoolState.Attached;
MemoryPools[Index].OutStatus.State = MemoryPoolState.Attached;
}
else if (memoryPool.State == MemoryPoolState.RequestDetach)
else if (MemoryPool.State == MemoryPoolState.RequestDetach)
{
_memoryPools[index].OutStatus.State = MemoryPoolState.Detached;
MemoryPools[Index].OutStatus.State = MemoryPoolState.Detached;
}
}
reader.Read<VoiceChannelResourceIn>(inputHeader.VoiceResourceSize);
Reader.Read<VoiceChannelResourceIn>(InputHeader.VoiceResourceSize);
VoiceIn[] voicesIn = reader.Read<VoiceIn>(inputHeader.VoiceSize);
VoiceIn[] VoicesIn = Reader.Read<VoiceIn>(InputHeader.VoiceSize);
for (int index = 0; index < voicesIn.Length; index++)
for (int Index = 0; Index < VoicesIn.Length; Index++)
{
VoiceIn voice = voicesIn[index];
VoiceIn Voice = VoicesIn[Index];
VoiceContext voiceCtx = _voices[index];
VoiceContext VoiceCtx = Voices[Index];
voiceCtx.SetAcquireState(voice.Acquired != 0);
VoiceCtx.SetAcquireState(Voice.Acquired != 0);
if (voice.Acquired == 0)
if (Voice.Acquired == 0)
{
continue;
}
if (voice.FirstUpdate != 0)
if (Voice.FirstUpdate != 0)
{
voiceCtx.AdpcmCtx = GetAdpcmDecoderContext(
voice.AdpcmCoeffsPosition,
voice.AdpcmCoeffsSize);
VoiceCtx.AdpcmCtx = GetAdpcmDecoderContext(
Voice.AdpcmCoeffsPosition,
Voice.AdpcmCoeffsSize);
voiceCtx.SampleFormat = voice.SampleFormat;
voiceCtx.SampleRate = voice.SampleRate;
voiceCtx.ChannelsCount = voice.ChannelsCount;
VoiceCtx.SampleFormat = Voice.SampleFormat;
VoiceCtx.SampleRate = Voice.SampleRate;
VoiceCtx.ChannelsCount = Voice.ChannelsCount;
voiceCtx.SetBufferIndex(voice.BaseWaveBufferIndex);
VoiceCtx.SetBufferIndex(Voice.BaseWaveBufferIndex);
}
voiceCtx.WaveBuffers[0] = voice.WaveBuffer0;
voiceCtx.WaveBuffers[1] = voice.WaveBuffer1;
voiceCtx.WaveBuffers[2] = voice.WaveBuffer2;
voiceCtx.WaveBuffers[3] = voice.WaveBuffer3;
voiceCtx.Volume = voice.Volume;
voiceCtx.PlayState = voice.PlayState;
VoiceCtx.WaveBuffers[0] = Voice.WaveBuffer0;
VoiceCtx.WaveBuffers[1] = Voice.WaveBuffer1;
VoiceCtx.WaveBuffers[2] = Voice.WaveBuffer2;
VoiceCtx.WaveBuffers[3] = Voice.WaveBuffer3;
VoiceCtx.Volume = Voice.Volume;
VoiceCtx.PlayState = Voice.PlayState;
}
UpdateAudio();
UpdateDataHeader outputHeader = new UpdateDataHeader();
UpdateDataHeader OutputHeader = new UpdateDataHeader();
int updateHeaderSize = Marshal.SizeOf<UpdateDataHeader>();
int UpdateHeaderSize = Marshal.SizeOf<UpdateDataHeader>();
outputHeader.Revision = IAudioRendererManager.RevMagic;
outputHeader.BehaviorSize = 0xb0;
outputHeader.MemoryPoolSize = (_params.EffectCount + _params.VoiceCount * 4) * 0x10;
outputHeader.VoiceSize = _params.VoiceCount * 0x10;
outputHeader.EffectSize = _params.EffectCount * 0x10;
outputHeader.SinkSize = _params.SinkCount * 0x20;
outputHeader.PerformanceManagerSize = 0x10;
outputHeader.TotalSize = updateHeaderSize +
outputHeader.BehaviorSize +
outputHeader.MemoryPoolSize +
outputHeader.VoiceSize +
outputHeader.EffectSize +
outputHeader.SinkSize +
outputHeader.PerformanceManagerSize;
OutputHeader.Revision = IAudioRendererManager.RevMagic;
OutputHeader.BehaviorSize = 0xb0;
OutputHeader.MemoryPoolSize = (Params.EffectCount + Params.VoiceCount * 4) * 0x10;
OutputHeader.VoiceSize = Params.VoiceCount * 0x10;
OutputHeader.EffectSize = Params.EffectCount * 0x10;
OutputHeader.SinkSize = Params.SinkCount * 0x20;
OutputHeader.PerformanceManagerSize = 0x10;
OutputHeader.TotalSize = UpdateHeaderSize +
OutputHeader.BehaviorSize +
OutputHeader.MemoryPoolSize +
OutputHeader.VoiceSize +
OutputHeader.EffectSize +
OutputHeader.SinkSize +
OutputHeader.PerformanceManagerSize;
writer.Write(outputHeader);
Writer.Write(OutputHeader);
foreach (MemoryPoolContext memoryPool in _memoryPools)
foreach (MemoryPoolContext MemoryPool in MemoryPools)
{
writer.Write(memoryPool.OutStatus);
Writer.Write(MemoryPool.OutStatus);
}
foreach (VoiceContext voice in _voices)
foreach (VoiceContext Voice in Voices)
{
writer.Write(voice.OutStatus);
Writer.Write(Voice.OutStatus);
}
return 0;
}
public long StartAudioRenderer(ServiceCtx context)
public long StartAudioRenderer(ServiceCtx Context)
{
Logger.PrintStub(LogClass.ServiceAudio, "Stubbed.");
_playState = PlayState.Playing;
PlayState = PlayState.Playing;
return 0;
}
public long StopAudioRenderer(ServiceCtx context)
public long StopAudioRenderer(ServiceCtx Context)
{
Logger.PrintStub(LogClass.ServiceAudio, "Stubbed.");
_playState = PlayState.Stopped;
PlayState = PlayState.Stopped;
return 0;
}
public long QuerySystemEvent(ServiceCtx context)
public long QuerySystemEvent(ServiceCtx Context)
{
if (context.Process.HandleTable.GenerateHandle(_updateEvent.ReadableEvent, out int handle) != KernelResult.Success)
if (Context.Process.HandleTable.GenerateHandle(UpdateEvent.ReadableEvent, out int Handle) != KernelResult.Success)
{
throw new InvalidOperationException("Out of handles!");
}
context.Response.HandleDesc = IpcHandleDesc.MakeCopy(handle);
Context.Response.HandleDesc = IpcHandleDesc.MakeCopy(Handle);
return 0;
}
private AdpcmDecoderContext GetAdpcmDecoderContext(long position, long size)
private AdpcmDecoderContext GetAdpcmDecoderContext(long Position, long Size)
{
if (size == 0)
if (Size == 0)
{
return null;
}
AdpcmDecoderContext context = new AdpcmDecoderContext();
AdpcmDecoderContext Context = new AdpcmDecoderContext();
context.Coefficients = new short[size >> 1];
Context.Coefficients = new short[Size >> 1];
for (int offset = 0; offset < size; offset += 2)
for (int Offset = 0; Offset < Size; Offset += 2)
{
context.Coefficients[offset >> 1] = _memory.ReadInt16(position + offset);
Context.Coefficients[Offset >> 1] = Memory.ReadInt16(Position + Offset);
}
return context;
return Context;
}
private void UpdateAudio()
{
long[] released = _audioOut.GetReleasedBuffers(_track, 2);
long[] Released = AudioOut.GetReleasedBuffers(Track, 2);
for (int index = 0; index < released.Length; index++)
for (int Index = 0; Index < Released.Length; Index++)
{
AppendMixedBuffer(released[index]);
AppendMixedBuffer(Released[Index]);
}
}
private void AppendMixedBuffer(long tag)
private unsafe void AppendMixedBuffer(long Tag)
{
int[] mixBuffer = new int[MixBufferSamplesCount * AudioConsts.HostChannelsCount];
int[] MixBuffer = new int[MixBufferSamplesCount * AudioConsts.HostChannelsCount];
foreach (VoiceContext voice in _voices)
foreach (VoiceContext Voice in Voices)
{
if (!voice.Playing)
if (!Voice.Playing)
{
continue;
}
int outOffset = 0;
int pendingSamples = MixBufferSamplesCount;
float volume = voice.Volume;
int OutOffset = 0;
int PendingSamples = MixBufferSamplesCount;
float Volume = Voice.Volume;
while (pendingSamples > 0)
while (PendingSamples > 0)
{
int[] samples = voice.GetBufferData(_memory, pendingSamples, out int returnedSamples);
int[] Samples = Voice.GetBufferData(Memory, PendingSamples, out int ReturnedSamples);
if (returnedSamples == 0)
if (ReturnedSamples == 0)
{
break;
}
pendingSamples -= returnedSamples;
PendingSamples -= ReturnedSamples;
for (int offset = 0; offset < samples.Length; offset++)
for (int Offset = 0; Offset < Samples.Length; Offset++)
{
mixBuffer[outOffset++] += (int)(samples[offset] * voice.Volume);
MixBuffer[OutOffset++] += (int)(Samples[Offset] * Voice.Volume);
}
}
}
_audioOut.AppendBuffer(_track, tag, GetFinalBuffer(mixBuffer));
AudioOut.AppendBuffer(Track, Tag, GetFinalBuffer(MixBuffer));
}
private static unsafe short[] GetFinalBuffer(int[] buffer)
private unsafe static short[] GetFinalBuffer(int[] Buffer)
{
short[] output = new short[buffer.Length];
short[] Output = new short[Buffer.Length];
int offset = 0;
int Offset = 0;
// Perform Saturation using SSE2 if supported
if (Sse2.IsSupported)
{
fixed (int* inptr = buffer)
fixed (short* outptr = output)
fixed (int* inptr = Buffer)
fixed (short* outptr = Output)
{
for (; offset + 32 <= buffer.Length; offset += 32)
for (; Offset + 32 <= Buffer.Length; Offset += 32)
{
// Unroll the loop a little to ensure the CPU pipeline
// is always full.
Vector128<int> block1A = Sse2.LoadVector128(inptr + offset + 0);
Vector128<int> block1B = Sse2.LoadVector128(inptr + offset + 4);
Vector128<int> block1A = Sse2.LoadVector128(inptr + Offset + 0);
Vector128<int> block1B = Sse2.LoadVector128(inptr + Offset + 4);
Vector128<int> block2A = Sse2.LoadVector128(inptr + offset + 8);
Vector128<int> block2B = Sse2.LoadVector128(inptr + offset + 12);
Vector128<int> block2A = Sse2.LoadVector128(inptr + Offset + 8);
Vector128<int> block2B = Sse2.LoadVector128(inptr + Offset + 12);
Vector128<int> block3A = Sse2.LoadVector128(inptr + offset + 16);
Vector128<int> block3B = Sse2.LoadVector128(inptr + offset + 20);
Vector128<int> block3A = Sse2.LoadVector128(inptr + Offset + 16);
Vector128<int> block3B = Sse2.LoadVector128(inptr + Offset + 20);
Vector128<int> block4A = Sse2.LoadVector128(inptr + offset + 24);
Vector128<int> block4B = Sse2.LoadVector128(inptr + offset + 28);
Vector128<int> block4A = Sse2.LoadVector128(inptr + Offset + 24);
Vector128<int> block4B = Sse2.LoadVector128(inptr + Offset + 28);
Vector128<short> output1 = Sse2.PackSignedSaturate(block1A, block1B);
Vector128<short> output2 = Sse2.PackSignedSaturate(block2A, block2B);
Vector128<short> output3 = Sse2.PackSignedSaturate(block3A, block3B);
Vector128<short> output4 = Sse2.PackSignedSaturate(block4A, block4B);
Sse2.Store(outptr + offset + 0, output1);
Sse2.Store(outptr + offset + 8, output2);
Sse2.Store(outptr + offset + 16, output3);
Sse2.Store(outptr + offset + 24, output4);
Sse2.Store(outptr + Offset + 0, output1);
Sse2.Store(outptr + Offset + 8, output2);
Sse2.Store(outptr + Offset + 16, output3);
Sse2.Store(outptr + Offset + 24, output4);
}
}
}
// Process left overs
for (; offset < buffer.Length; offset++)
for (; Offset < Buffer.Length; Offset++)
{
output[offset] = DspUtils.Saturate(buffer[offset]);
Output[Offset] = DspUtils.Saturate(Buffer[Offset]);
}
return output;
return Output;
}
public void Dispose()
@ -396,11 +396,11 @@ namespace Ryujinx.HLE.HOS.Services.Aud.AudioRenderer
Dispose(true);
}
protected virtual void Dispose(bool disposing)
protected virtual void Dispose(bool Disposing)
{
if (disposing)
if (Disposing)
{
_audioOut.CloseTrack(_track);
AudioOut.CloseTrack(Track);
}
}
}

View file

@ -1,6 +1,6 @@
namespace Ryujinx.HLE.HOS.Services.Aud.AudioRenderer
{
enum MemoryPoolState
enum MemoryPoolState : int
{
Invalid = 0,
Unknown = 1,

View file

@ -5,7 +5,7 @@ namespace Ryujinx.HLE.HOS.Services.Aud.AudioRenderer
static class Resampler
{
#region "LookUp Tables"
private static short[] _curveLut0 = new short[]
private static short[] CurveLut0 = new short[]
{
6600, 19426, 6722, 3, 6479, 19424, 6845, 9, 6359, 19419, 6968, 15, 6239, 19412, 7093, 22,
6121, 19403, 7219, 28, 6004, 19391, 7345, 34, 5888, 19377, 7472, 41, 5773, 19361, 7600, 48,
@ -41,7 +41,7 @@ namespace Ryujinx.HLE.HOS.Services.Aud.AudioRenderer
22, 7093, 19412, 6239, 15, 6968, 19419, 6359, 9, 6845, 19424, 6479, 3, 6722, 19426, 6600
};
private static short[] _curveLut1 = new short[]
private static short[] CurveLut1 = new short[]
{
-68, 32639, 69, -5, -200, 32630, 212, -15, -328, 32613, 359, -26, -450, 32586, 512, -36,
-568, 32551, 669, -47, -680, 32507, 832, -58, -788, 32454, 1000, -69, -891, 32393, 1174, -80,
@ -77,7 +77,7 @@ namespace Ryujinx.HLE.HOS.Services.Aud.AudioRenderer
-36, 512, 32586, -450, -26, 359, 32613, -328, -15, 212, 32630, -200, -5, 69, 32639, -68
};
private static short[] _curveLut2 = new short[]
private static short[] CurveLut2 = new short[]
{
3195, 26287, 3329, -32, 3064, 26281, 3467, -34, 2936, 26270, 3608, -38, 2811, 26253, 3751, -42,
2688, 26230, 3897, -46, 2568, 26202, 4046, -50, 2451, 26169, 4199, -54, 2338, 26130, 4354, -58,
@ -115,77 +115,77 @@ namespace Ryujinx.HLE.HOS.Services.Aud.AudioRenderer
#endregion
public static int[] Resample2Ch(
int[] buffer,
int srcSampleRate,
int dstSampleRate,
int samplesCount,
ref int fracPart)
int[] Buffer,
int SrcSampleRate,
int DstSampleRate,
int SamplesCount,
ref int FracPart)
{
if (buffer == null)
if (Buffer == null)
{
throw new ArgumentNullException(nameof(buffer));
throw new ArgumentNullException(nameof(Buffer));
}
if (srcSampleRate <= 0)
if (SrcSampleRate <= 0)
{
throw new ArgumentOutOfRangeException(nameof(srcSampleRate));
throw new ArgumentOutOfRangeException(nameof(SrcSampleRate));
}
if (dstSampleRate <= 0)
if (DstSampleRate <= 0)
{
throw new ArgumentOutOfRangeException(nameof(dstSampleRate));
throw new ArgumentOutOfRangeException(nameof(DstSampleRate));
}
double ratio = (double)srcSampleRate / dstSampleRate;
double Ratio = (double)SrcSampleRate / DstSampleRate;
int newSamplesCount = (int)(samplesCount / ratio);
int NewSamplesCount = (int)(SamplesCount / Ratio);
int step = (int)(ratio * 0x8000);
int Step = (int)(Ratio * 0x8000);
int[] output = new int[newSamplesCount * 2];
int[] Output = new int[NewSamplesCount * 2];
short[] lut;
short[] Lut;
if (step > 0xaaaa)
if (Step > 0xaaaa)
{
lut = _curveLut0;
Lut = CurveLut0;
}
else if (step <= 0x8000)
else if (Step <= 0x8000)
{
lut = _curveLut1;
Lut = CurveLut1;
}
else
{
lut = _curveLut2;
Lut = CurveLut2;
}
int inOffs = 0;
int InOffs = 0;
for (int outOffs = 0; outOffs < output.Length; outOffs += 2)
for (int OutOffs = 0; OutOffs < Output.Length; OutOffs += 2)
{
int lutIndex = (fracPart >> 8) * 4;
int LutIndex = (FracPart >> 8) * 4;
int sample0 = buffer[(inOffs + 0) * 2 + 0] * lut[lutIndex + 0] +
buffer[(inOffs + 1) * 2 + 0] * lut[lutIndex + 1] +
buffer[(inOffs + 2) * 2 + 0] * lut[lutIndex + 2] +
buffer[(inOffs + 3) * 2 + 0] * lut[lutIndex + 3];
int Sample0 = Buffer[(InOffs + 0) * 2 + 0] * Lut[LutIndex + 0] +
Buffer[(InOffs + 1) * 2 + 0] * Lut[LutIndex + 1] +
Buffer[(InOffs + 2) * 2 + 0] * Lut[LutIndex + 2] +
Buffer[(InOffs + 3) * 2 + 0] * Lut[LutIndex + 3];
int sample1 = buffer[(inOffs + 0) * 2 + 1] * lut[lutIndex + 0] +
buffer[(inOffs + 1) * 2 + 1] * lut[lutIndex + 1] +
buffer[(inOffs + 2) * 2 + 1] * lut[lutIndex + 2] +
buffer[(inOffs + 3) * 2 + 1] * lut[lutIndex + 3];
int Sample1 = Buffer[(InOffs + 0) * 2 + 1] * Lut[LutIndex + 0] +
Buffer[(InOffs + 1) * 2 + 1] * Lut[LutIndex + 1] +
Buffer[(InOffs + 2) * 2 + 1] * Lut[LutIndex + 2] +
Buffer[(InOffs + 3) * 2 + 1] * Lut[LutIndex + 3];
int newOffset = fracPart + step;
int NewOffset = FracPart + Step;
inOffs += newOffset >> 15;
InOffs += NewOffset >> 15;
fracPart = newOffset & 0x7fff;
FracPart = NewOffset & 0x7fff;
output[outOffs + 0] = sample0 >> 15;
output[outOffs + 1] = sample1 >> 15;
Output[OutOffs + 0] = Sample0 >> 15;
Output[OutOffs + 1] = Sample1 >> 15;
}
return output;
return Output;
}
}
}

View file

@ -6,13 +6,13 @@ namespace Ryujinx.HLE.HOS.Services.Aud.AudioRenderer
{
class VoiceContext
{
private bool _acquired;
private bool _bufferReload;
private bool Acquired;
private bool BufferReload;
private int _resamplerFracPart;
private int ResamplerFracPart;
private int _bufferIndex;
private int _offset;
private int BufferIndex;
private int Offset;
public int SampleRate;
public int ChannelsCount;
@ -29,138 +29,138 @@ namespace Ryujinx.HLE.HOS.Services.Aud.AudioRenderer
public VoiceOut OutStatus;
private int[] _samples;
private int[] Samples;
public bool Playing => _acquired && PlayState == PlayState.Playing;
public bool Playing => Acquired && PlayState == PlayState.Playing;
public VoiceContext()
{
WaveBuffers = new WaveBuffer[4];
}
public void SetAcquireState(bool newState)
public void SetAcquireState(bool NewState)
{
if (_acquired && !newState)
if (Acquired && !NewState)
{
//Release.
Reset();
}
_acquired = newState;
Acquired = NewState;
}
private void Reset()
{
_bufferReload = true;
BufferReload = true;
_bufferIndex = 0;
_offset = 0;
BufferIndex = 0;
Offset = 0;
OutStatus.PlayedSamplesCount = 0;
OutStatus.PlayedWaveBuffersCount = 0;
OutStatus.VoiceDropsCount = 0;
}
public int[] GetBufferData(MemoryManager memory, int maxSamples, out int samplesCount)
public int[] GetBufferData(MemoryManager Memory, int MaxSamples, out int SamplesCount)
{
if (!Playing)
{
samplesCount = 0;
SamplesCount = 0;
return null;
}
if (_bufferReload)
if (BufferReload)
{
_bufferReload = false;
BufferReload = false;
UpdateBuffer(memory);
UpdateBuffer(Memory);
}
WaveBuffer wb = WaveBuffers[_bufferIndex];
WaveBuffer Wb = WaveBuffers[BufferIndex];
int maxSize = _samples.Length - _offset;
int MaxSize = Samples.Length - Offset;
int size = maxSamples * AudioConsts.HostChannelsCount;
int Size = MaxSamples * AudioConsts.HostChannelsCount;
if (size > maxSize)
if (Size > MaxSize)
{
size = maxSize;
Size = MaxSize;
}
int[] output = new int[size];
int[] Output = new int[Size];
Array.Copy(_samples, _offset, output, 0, size);
Array.Copy(Samples, Offset, Output, 0, Size);
samplesCount = size / AudioConsts.HostChannelsCount;
SamplesCount = Size / AudioConsts.HostChannelsCount;
OutStatus.PlayedSamplesCount += samplesCount;
OutStatus.PlayedSamplesCount += SamplesCount;
_offset += size;
Offset += Size;
if (_offset == _samples.Length)
if (Offset == Samples.Length)
{
_offset = 0;
Offset = 0;
if (wb.Looping == 0)
if (Wb.Looping == 0)
{
SetBufferIndex((_bufferIndex + 1) & 3);
SetBufferIndex((BufferIndex + 1) & 3);
}
OutStatus.PlayedWaveBuffersCount++;
if (wb.LastBuffer != 0)
if (Wb.LastBuffer != 0)
{
PlayState = PlayState.Paused;
}
}
return output;
return Output;
}
private void UpdateBuffer(MemoryManager memory)
private void UpdateBuffer(MemoryManager Memory)
{
//TODO: Implement conversion for formats other
//than interleaved stereo (2 channels).
//As of now, it assumes that HostChannelsCount == 2.
WaveBuffer wb = WaveBuffers[_bufferIndex];
WaveBuffer Wb = WaveBuffers[BufferIndex];
if (wb.Position == 0)
if (Wb.Position == 0)
{
_samples = new int[0];
Samples = new int[0];
return;
}
if (SampleFormat == SampleFormat.PcmInt16)
{
int samplesCount = (int)(wb.Size / (sizeof(short) * ChannelsCount));
int SamplesCount = (int)(Wb.Size / (sizeof(short) * ChannelsCount));
_samples = new int[samplesCount * AudioConsts.HostChannelsCount];
Samples = new int[SamplesCount * AudioConsts.HostChannelsCount];
if (ChannelsCount == 1)
{
for (int index = 0; index < samplesCount; index++)
for (int Index = 0; Index < SamplesCount; Index++)
{
short sample = memory.ReadInt16(wb.Position + index * 2);
short Sample = Memory.ReadInt16(Wb.Position + Index * 2);
_samples[index * 2 + 0] = sample;
_samples[index * 2 + 1] = sample;
Samples[Index * 2 + 0] = Sample;
Samples[Index * 2 + 1] = Sample;
}
}
else
{
for (int index = 0; index < samplesCount * 2; index++)
for (int Index = 0; Index < SamplesCount * 2; Index++)
{
_samples[index] = memory.ReadInt16(wb.Position + index * 2);
Samples[Index] = Memory.ReadInt16(Wb.Position + Index * 2);
}
}
}
else if (SampleFormat == SampleFormat.Adpcm)
{
byte[] buffer = memory.ReadBytes(wb.Position, wb.Size);
byte[] Buffer = Memory.ReadBytes(Wb.Position, Wb.Size);
_samples = AdpcmDecoder.Decode(buffer, AdpcmCtx);
Samples = AdpcmDecoder.Decode(Buffer, AdpcmCtx);
}
else
{
@ -172,24 +172,24 @@ namespace Ryujinx.HLE.HOS.Services.Aud.AudioRenderer
//TODO: We should keep the frames being discarded (see the 4 below)
//on a buffer and include it on the next samples buffer, to allow
//the resampler to do seamless interpolation between wave buffers.
int samplesCount = _samples.Length / AudioConsts.HostChannelsCount;
int SamplesCount = Samples.Length / AudioConsts.HostChannelsCount;
samplesCount = Math.Max(samplesCount - 4, 0);
SamplesCount = Math.Max(SamplesCount - 4, 0);
_samples = Resampler.Resample2Ch(
_samples,
Samples = Resampler.Resample2Ch(
Samples,
SampleRate,
AudioConsts.HostSampleRate,
samplesCount,
ref _resamplerFracPart);
SamplesCount,
ref ResamplerFracPart);
}
}
public void SetBufferIndex(int index)
public void SetBufferIndex(int Index)
{
_bufferIndex = index & 3;
BufferIndex = Index & 3;
_bufferReload = true;
BufferReload = true;
}
}
}

View file

@ -10,15 +10,15 @@ namespace Ryujinx.HLE.HOS.Services.Aud
{
class IAudioDevice : IpcService
{
private Dictionary<int, ServiceProcessRequest> _commands;
private Dictionary<int, ServiceProcessRequest> m_Commands;
public override IReadOnlyDictionary<int, ServiceProcessRequest> Commands => _commands;
public override IReadOnlyDictionary<int, ServiceProcessRequest> Commands => m_Commands;
private KEvent _systemEvent;
private KEvent SystemEvent;
public IAudioDevice(Horizon system)
public IAudioDevice(Horizon System)
{
_commands = new Dictionary<int, ServiceProcessRequest>
m_Commands = new Dictionary<int, ServiceProcessRequest>()
{
{ 0, ListAudioDeviceName },
{ 1, SetAudioDeviceOutputVolume },
@ -33,197 +33,197 @@ namespace Ryujinx.HLE.HOS.Services.Aud
{ 12, QueryAudioDeviceOutputEvent }
};
_systemEvent = new KEvent(system);
SystemEvent = new KEvent(System);
//TODO: We shouldn't be signaling this here.
_systemEvent.ReadableEvent.Signal();
SystemEvent.ReadableEvent.Signal();
}
public long ListAudioDeviceName(ServiceCtx context)
public long ListAudioDeviceName(ServiceCtx Context)
{
string[] deviceNames = SystemStateMgr.AudioOutputs;
string[] DeviceNames = SystemStateMgr.AudioOutputs;
context.ResponseData.Write(deviceNames.Length);
Context.ResponseData.Write(DeviceNames.Length);
long position = context.Request.ReceiveBuff[0].Position;
long size = context.Request.ReceiveBuff[0].Size;
long Position = Context.Request.ReceiveBuff[0].Position;
long Size = Context.Request.ReceiveBuff[0].Size;
long basePosition = position;
long BasePosition = Position;
foreach (string name in deviceNames)
foreach (string Name in DeviceNames)
{
byte[] buffer = Encoding.ASCII.GetBytes(name + "\0");
byte[] Buffer = Encoding.ASCII.GetBytes(Name + "\0");
if ((position - basePosition) + buffer.Length > size)
if ((Position - BasePosition) + Buffer.Length > Size)
{
Logger.PrintError(LogClass.ServiceAudio, $"Output buffer size {size} too small!");
Logger.PrintError(LogClass.ServiceAudio, $"Output buffer size {Size} too small!");
break;
}
context.Memory.WriteBytes(position, buffer);
Context.Memory.WriteBytes(Position, Buffer);
position += buffer.Length;
Position += Buffer.Length;
}
return 0;
}
public long SetAudioDeviceOutputVolume(ServiceCtx context)
public long SetAudioDeviceOutputVolume(ServiceCtx Context)
{
float volume = context.RequestData.ReadSingle();
float Volume = Context.RequestData.ReadSingle();
long position = context.Request.SendBuff[0].Position;
long size = context.Request.SendBuff[0].Size;
long Position = Context.Request.SendBuff[0].Position;
long Size = Context.Request.SendBuff[0].Size;
byte[] deviceNameBuffer = context.Memory.ReadBytes(position, size);
byte[] DeviceNameBuffer = Context.Memory.ReadBytes(Position, Size);
string deviceName = Encoding.ASCII.GetString(deviceNameBuffer);
string DeviceName = Encoding.ASCII.GetString(DeviceNameBuffer);
Logger.PrintStub(LogClass.ServiceAudio, "Stubbed.");
return 0;
}
public long GetActiveAudioDeviceName(ServiceCtx context)
public long GetActiveAudioDeviceName(ServiceCtx Context)
{
string name = context.Device.System.State.ActiveAudioOutput;
string Name = Context.Device.System.State.ActiveAudioOutput;
long position = context.Request.ReceiveBuff[0].Position;
long size = context.Request.ReceiveBuff[0].Size;
long Position = Context.Request.ReceiveBuff[0].Position;
long Size = Context.Request.ReceiveBuff[0].Size;
byte[] deviceNameBuffer = Encoding.ASCII.GetBytes(name + "\0");
byte[] DeviceNameBuffer = Encoding.ASCII.GetBytes(Name + "\0");
if ((ulong)deviceNameBuffer.Length <= (ulong)size)
if ((ulong)DeviceNameBuffer.Length <= (ulong)Size)
{
context.Memory.WriteBytes(position, deviceNameBuffer);
Context.Memory.WriteBytes(Position, DeviceNameBuffer);
}
else
{
Logger.PrintError(LogClass.ServiceAudio, $"Output buffer size {size} too small!");
Logger.PrintError(LogClass.ServiceAudio, $"Output buffer size {Size} too small!");
}
return 0;
}
public long QueryAudioDeviceSystemEvent(ServiceCtx context)
public long QueryAudioDeviceSystemEvent(ServiceCtx Context)
{
if (context.Process.HandleTable.GenerateHandle(_systemEvent.ReadableEvent, out int handle) != KernelResult.Success)
if (Context.Process.HandleTable.GenerateHandle(SystemEvent.ReadableEvent, out int Handle) != KernelResult.Success)
{
throw new InvalidOperationException("Out of handles!");
}
context.Response.HandleDesc = IpcHandleDesc.MakeCopy(handle);
Context.Response.HandleDesc = IpcHandleDesc.MakeCopy(Handle);
Logger.PrintStub(LogClass.ServiceAudio, "Stubbed.");
return 0;
}
public long GetActiveChannelCount(ServiceCtx context)
public long GetActiveChannelCount(ServiceCtx Context)
{
context.ResponseData.Write(2);
Context.ResponseData.Write(2);
Logger.PrintStub(LogClass.ServiceAudio, "Stubbed.");
return 0;
}
public long ListAudioDeviceNameAuto(ServiceCtx context)
public long ListAudioDeviceNameAuto(ServiceCtx Context)
{
string[] deviceNames = SystemStateMgr.AudioOutputs;
string[] DeviceNames = SystemStateMgr.AudioOutputs;
context.ResponseData.Write(deviceNames.Length);
Context.ResponseData.Write(DeviceNames.Length);
(long position, long size) = context.Request.GetBufferType0x22();
(long Position, long Size) = Context.Request.GetBufferType0x22();
long basePosition = position;
long BasePosition = Position;
foreach (string name in deviceNames)
foreach (string Name in DeviceNames)
{
byte[] buffer = Encoding.UTF8.GetBytes(name + '\0');
byte[] Buffer = Encoding.UTF8.GetBytes(Name + '\0');
if ((position - basePosition) + buffer.Length > size)
if ((Position - BasePosition) + Buffer.Length > Size)
{
Logger.PrintError(LogClass.ServiceAudio, $"Output buffer size {size} too small!");
Logger.PrintError(LogClass.ServiceAudio, $"Output buffer size {Size} too small!");
break;
}
context.Memory.WriteBytes(position, buffer);
Context.Memory.WriteBytes(Position, Buffer);
position += buffer.Length;
Position += Buffer.Length;
}
return 0;
}
public long SetAudioDeviceOutputVolumeAuto(ServiceCtx context)
public long SetAudioDeviceOutputVolumeAuto(ServiceCtx Context)
{
float volume = context.RequestData.ReadSingle();
float Volume = Context.RequestData.ReadSingle();
(long position, long size) = context.Request.GetBufferType0x21();
(long Position, long Size) = Context.Request.GetBufferType0x21();
byte[] deviceNameBuffer = context.Memory.ReadBytes(position, size);
byte[] DeviceNameBuffer = Context.Memory.ReadBytes(Position, Size);
string deviceName = Encoding.UTF8.GetString(deviceNameBuffer);
string DeviceName = Encoding.UTF8.GetString(DeviceNameBuffer);
Logger.PrintStub(LogClass.ServiceAudio, "Stubbed.");
return 0;
}
public long GetAudioDeviceOutputVolumeAuto(ServiceCtx context)
public long GetAudioDeviceOutputVolumeAuto(ServiceCtx Context)
{
context.ResponseData.Write(1f);
Context.ResponseData.Write(1f);
Logger.PrintStub(LogClass.ServiceAudio, "Stubbed.");
return 0;
}
public long GetActiveAudioDeviceNameAuto(ServiceCtx context)
public long GetActiveAudioDeviceNameAuto(ServiceCtx Context)
{
string name = context.Device.System.State.ActiveAudioOutput;
string Name = Context.Device.System.State.ActiveAudioOutput;
(long position, long size) = context.Request.GetBufferType0x22();
(long Position, long Size) = Context.Request.GetBufferType0x22();
byte[] deviceNameBuffer = Encoding.UTF8.GetBytes(name + '\0');
byte[] DeviceNameBuffer = Encoding.UTF8.GetBytes(Name + '\0');
if ((ulong)deviceNameBuffer.Length <= (ulong)size)
if ((ulong)DeviceNameBuffer.Length <= (ulong)Size)
{
context.Memory.WriteBytes(position, deviceNameBuffer);
Context.Memory.WriteBytes(Position, DeviceNameBuffer);
}
else
{
Logger.PrintError(LogClass.ServiceAudio, $"Output buffer size {size} too small!");
Logger.PrintError(LogClass.ServiceAudio, $"Output buffer size {Size} too small!");
}
return 0;
}
public long QueryAudioDeviceInputEvent(ServiceCtx context)
public long QueryAudioDeviceInputEvent(ServiceCtx Context)
{
if (context.Process.HandleTable.GenerateHandle(_systemEvent.ReadableEvent, out int handle) != KernelResult.Success)
if (Context.Process.HandleTable.GenerateHandle(SystemEvent.ReadableEvent, out int Handle) != KernelResult.Success)
{
throw new InvalidOperationException("Out of handles!");
}
context.Response.HandleDesc = IpcHandleDesc.MakeCopy(handle);
Context.Response.HandleDesc = IpcHandleDesc.MakeCopy(Handle);
Logger.PrintStub(LogClass.ServiceAudio, "Stubbed.");
return 0;
}
public long QueryAudioDeviceOutputEvent(ServiceCtx context)
public long QueryAudioDeviceOutputEvent(ServiceCtx Context)
{
if (context.Process.HandleTable.GenerateHandle(_systemEvent.ReadableEvent, out int handle) != KernelResult.Success)
if (Context.Process.HandleTable.GenerateHandle(SystemEvent.ReadableEvent, out int Handle) != KernelResult.Success)
{
throw new InvalidOperationException("Out of handles!");
}
context.Response.HandleDesc = IpcHandleDesc.MakeCopy(handle);
Context.Response.HandleDesc = IpcHandleDesc.MakeCopy(Handle);
Logger.PrintStub(LogClass.ServiceAudio, "Stubbed.");

View file

@ -19,13 +19,13 @@ namespace Ryujinx.HLE.HOS.Services.Aud
private const int DefaultChannelsCount = 2;
private Dictionary<int, ServiceProcessRequest> _commands;
private Dictionary<int, ServiceProcessRequest> m_Commands;
public override IReadOnlyDictionary<int, ServiceProcessRequest> Commands => _commands;
public override IReadOnlyDictionary<int, ServiceProcessRequest> Commands => m_Commands;
public IAudioOutManager()
{
_commands = new Dictionary<int, ServiceProcessRequest>
m_Commands = new Dictionary<int, ServiceProcessRequest>()
{
{ 0, ListAudioOuts },
{ 1, OpenAudioOut },
@ -34,135 +34,135 @@ namespace Ryujinx.HLE.HOS.Services.Aud
};
}
public long ListAudioOuts(ServiceCtx context)
public long ListAudioOuts(ServiceCtx Context)
{
return ListAudioOutsImpl(
context,
context.Request.ReceiveBuff[0].Position,
context.Request.ReceiveBuff[0].Size);
Context,
Context.Request.ReceiveBuff[0].Position,
Context.Request.ReceiveBuff[0].Size);
}
public long OpenAudioOut(ServiceCtx context)
public long OpenAudioOut(ServiceCtx Context)
{
return OpenAudioOutImpl(
context,
context.Request.SendBuff[0].Position,
context.Request.SendBuff[0].Size,
context.Request.ReceiveBuff[0].Position,
context.Request.ReceiveBuff[0].Size);
Context,
Context.Request.SendBuff[0].Position,
Context.Request.SendBuff[0].Size,
Context.Request.ReceiveBuff[0].Position,
Context.Request.ReceiveBuff[0].Size);
}
public long ListAudioOutsAuto(ServiceCtx context)
public long ListAudioOutsAuto(ServiceCtx Context)
{
(long recvPosition, long recvSize) = context.Request.GetBufferType0x22();
(long RecvPosition, long RecvSize) = Context.Request.GetBufferType0x22();
return ListAudioOutsImpl(context, recvPosition, recvSize);
return ListAudioOutsImpl(Context, RecvPosition, RecvSize);
}
public long OpenAudioOutAuto(ServiceCtx context)
public long OpenAudioOutAuto(ServiceCtx Context)
{
(long sendPosition, long sendSize) = context.Request.GetBufferType0x21();
(long recvPosition, long recvSize) = context.Request.GetBufferType0x22();
(long SendPosition, long SendSize) = Context.Request.GetBufferType0x21();
(long RecvPosition, long RecvSize) = Context.Request.GetBufferType0x22();
return OpenAudioOutImpl(
context,
sendPosition,
sendSize,
recvPosition,
recvSize);
Context,
SendPosition,
SendSize,
RecvPosition,
RecvSize);
}
private long ListAudioOutsImpl(ServiceCtx context, long position, long size)
private long ListAudioOutsImpl(ServiceCtx Context, long Position, long Size)
{
int nameCount = 0;
int NameCount = 0;
byte[] deviceNameBuffer = Encoding.ASCII.GetBytes(DefaultAudioOutput + "\0");
byte[] DeviceNameBuffer = Encoding.ASCII.GetBytes(DefaultAudioOutput + "\0");
if ((ulong)deviceNameBuffer.Length <= (ulong)size)
if ((ulong)DeviceNameBuffer.Length <= (ulong)Size)
{
context.Memory.WriteBytes(position, deviceNameBuffer);
Context.Memory.WriteBytes(Position, DeviceNameBuffer);
nameCount++;
NameCount++;
}
else
{
Logger.PrintError(LogClass.ServiceAudio, $"Output buffer size {size} too small!");
Logger.PrintError(LogClass.ServiceAudio, $"Output buffer size {Size} too small!");
}
context.ResponseData.Write(nameCount);
Context.ResponseData.Write(NameCount);
return 0;
}
private long OpenAudioOutImpl(ServiceCtx context, long sendPosition, long sendSize, long receivePosition, long receiveSize)
private long OpenAudioOutImpl(ServiceCtx Context, long SendPosition, long SendSize, long ReceivePosition, long ReceiveSize)
{
string deviceName = MemoryHelper.ReadAsciiString(
context.Memory,
sendPosition,
sendSize);
string DeviceName = MemoryHelper.ReadAsciiString(
Context.Memory,
SendPosition,
SendSize);
if (deviceName == string.Empty)
if (DeviceName == string.Empty)
{
deviceName = DefaultAudioOutput;
DeviceName = DefaultAudioOutput;
}
if (deviceName != DefaultAudioOutput)
if (DeviceName != DefaultAudioOutput)
{
Logger.PrintWarning(LogClass.Audio, "Invalid device name!");
return MakeError(ErrorModule.Audio, AudErr.DeviceNotFound);
}
byte[] deviceNameBuffer = Encoding.ASCII.GetBytes(deviceName + "\0");
byte[] DeviceNameBuffer = Encoding.ASCII.GetBytes(DeviceName + "\0");
if ((ulong)deviceNameBuffer.Length <= (ulong)receiveSize)
if ((ulong)DeviceNameBuffer.Length <= (ulong)ReceiveSize)
{
context.Memory.WriteBytes(receivePosition, deviceNameBuffer);
Context.Memory.WriteBytes(ReceivePosition, DeviceNameBuffer);
}
else
{
Logger.PrintError(LogClass.ServiceAudio, $"Output buffer size {receiveSize} too small!");
Logger.PrintError(LogClass.ServiceAudio, $"Output buffer size {ReceiveSize} too small!");
}
int sampleRate = context.RequestData.ReadInt32();
int channels = context.RequestData.ReadInt32();
int SampleRate = Context.RequestData.ReadInt32();
int Channels = Context.RequestData.ReadInt32();
if (sampleRate == 0)
if (SampleRate == 0)
{
sampleRate = DefaultSampleRate;
SampleRate = DefaultSampleRate;
}
if (sampleRate != DefaultSampleRate)
if (SampleRate != DefaultSampleRate)
{
Logger.PrintWarning(LogClass.Audio, "Invalid sample rate!");
return MakeError(ErrorModule.Audio, AudErr.UnsupportedSampleRate);
}
channels = (ushort)channels;
Channels = (ushort)Channels;
if (channels == 0)
if (Channels == 0)
{
channels = DefaultChannelsCount;
Channels = DefaultChannelsCount;
}
KEvent releaseEvent = new KEvent(context.Device.System);
KEvent ReleaseEvent = new KEvent(Context.Device.System);
ReleaseCallback callback = () =>
ReleaseCallback Callback = () =>
{
releaseEvent.ReadableEvent.Signal();
ReleaseEvent.ReadableEvent.Signal();
};
IAalOutput audioOut = context.Device.AudioOut;
IAalOutput AudioOut = Context.Device.AudioOut;
int track = audioOut.OpenTrack(sampleRate, channels, callback);
int Track = AudioOut.OpenTrack(SampleRate, Channels, Callback);
MakeObject(context, new IAudioOut(audioOut, releaseEvent, track));
MakeObject(Context, new IAudioOut(AudioOut, ReleaseEvent, Track));
context.ResponseData.Write(sampleRate);
context.ResponseData.Write(channels);
context.ResponseData.Write((int)SampleFormat.PcmInt16);
context.ResponseData.Write((int)PlaybackState.Stopped);
Context.ResponseData.Write(SampleRate);
Context.ResponseData.Write(Channels);
Context.ResponseData.Write((int)SampleFormat.PcmInt16);
Context.ResponseData.Write((int)PlaybackState.Stopped);
return 0;
}

View file

@ -20,13 +20,13 @@ namespace Ryujinx.HLE.HOS.Services.Aud
public const int RevMagic = Rev0Magic + (Rev << 24);
private Dictionary<int, ServiceProcessRequest> _commands;
private Dictionary<int, ServiceProcessRequest> m_Commands;
public override IReadOnlyDictionary<int, ServiceProcessRequest> Commands => _commands;
public override IReadOnlyDictionary<int, ServiceProcessRequest> Commands => m_Commands;
public IAudioRendererManager()
{
_commands = new Dictionary<int, ServiceProcessRequest>
m_Commands = new Dictionary<int, ServiceProcessRequest>()
{
{ 0, OpenAudioRenderer },
{ 1, GetAudioRendererWorkBufferSize },
@ -35,81 +35,81 @@ namespace Ryujinx.HLE.HOS.Services.Aud
};
}
public long OpenAudioRenderer(ServiceCtx context)
public long OpenAudioRenderer(ServiceCtx Context)
{
IAalOutput audioOut = context.Device.AudioOut;
IAalOutput AudioOut = Context.Device.AudioOut;
AudioRendererParameter Params = GetAudioRendererParameter(context);
AudioRendererParameter Params = GetAudioRendererParameter(Context);
MakeObject(context, new IAudioRenderer(
context.Device.System,
context.Memory,
audioOut,
MakeObject(Context, new IAudioRenderer(
Context.Device.System,
Context.Memory,
AudioOut,
Params));
return 0;
}
public long GetAudioRendererWorkBufferSize(ServiceCtx context)
public long GetAudioRendererWorkBufferSize(ServiceCtx Context)
{
AudioRendererParameter Params = GetAudioRendererParameter(context);
AudioRendererParameter Params = GetAudioRendererParameter(Context);
int revision = (Params.Revision - Rev0Magic) >> 24;
int Revision = (Params.Revision - Rev0Magic) >> 24;
if (revision <= Rev)
if (Revision <= Rev)
{
bool isSplitterSupported = revision >= 3;
bool IsSplitterSupported = Revision >= 3;
long size;
long Size;
size = IntUtils.AlignUp(Params.Unknown8 * 4, 64);
size += Params.MixCount * 0x400;
size += (Params.MixCount + 1) * 0x940;
size += Params.VoiceCount * 0x3F0;
size += IntUtils.AlignUp((Params.MixCount + 1) * 8, 16);
size += IntUtils.AlignUp(Params.VoiceCount * 8, 16);
size += IntUtils.AlignUp(
Size = IntUtils.AlignUp(Params.Unknown8 * 4, 64);
Size += Params.MixCount * 0x400;
Size += (Params.MixCount + 1) * 0x940;
Size += Params.VoiceCount * 0x3F0;
Size += IntUtils.AlignUp((Params.MixCount + 1) * 8, 16);
Size += IntUtils.AlignUp(Params.VoiceCount * 8, 16);
Size += IntUtils.AlignUp(
((Params.SinkCount + Params.MixCount) * 0x3C0 + Params.SampleCount * 4) *
(Params.Unknown8 + 6), 64);
size += (Params.SinkCount + Params.MixCount) * 0x2C0;
size += (Params.EffectCount + Params.VoiceCount * 4) * 0x30 + 0x50;
Size += (Params.SinkCount + Params.MixCount) * 0x2C0;
Size += (Params.EffectCount + Params.VoiceCount * 4) * 0x30 + 0x50;
if (isSplitterSupported)
if (IsSplitterSupported)
{
size += IntUtils.AlignUp((
Size += IntUtils.AlignUp((
NodeStatesGetWorkBufferSize(Params.MixCount + 1) +
EdgeMatrixGetWorkBufferSize(Params.MixCount + 1)), 16);
size += Params.SplitterDestinationDataCount * 0xE0;
size += Params.SplitterCount * 0x20;
size += IntUtils.AlignUp(Params.SplitterDestinationDataCount * 4, 16);
Size += Params.SplitterDestinationDataCount * 0xE0;
Size += Params.SplitterCount * 0x20;
Size += IntUtils.AlignUp(Params.SplitterDestinationDataCount * 4, 16);
}
size = Params.EffectCount * 0x4C0 +
Size = Params.EffectCount * 0x4C0 +
Params.SinkCount * 0x170 +
Params.VoiceCount * 0x100 +
IntUtils.AlignUp(size, 64) + 0x40;
IntUtils.AlignUp(Size, 64) + 0x40;
if (Params.PerformanceManagerCount >= 1)
{
size += (((Params.EffectCount +
Size += (((Params.EffectCount +
Params.SinkCount +
Params.VoiceCount +
Params.MixCount + 1) * 16 + 0x658) *
(Params.PerformanceManagerCount + 1) + 0x13F) & ~0x3FL;
}
size = (size + 0x1907D) & ~0xFFFL;
Size = (Size + 0x1907D) & ~0xFFFL;
context.ResponseData.Write(size);
Context.ResponseData.Write(Size);
Logger.PrintDebug(LogClass.ServiceAudio, $"WorkBufferSize is 0x{size:x16}.");
Logger.PrintDebug(LogClass.ServiceAudio, $"WorkBufferSize is 0x{Size:x16}.");
return 0;
}
else
{
context.ResponseData.Write(0L);
Context.ResponseData.Write(0L);
Logger.PrintWarning(LogClass.ServiceAudio, $"Library Revision 0x{Params.Revision:x8} is not supported!");
@ -117,71 +117,71 @@ namespace Ryujinx.HLE.HOS.Services.Aud
}
}
private AudioRendererParameter GetAudioRendererParameter(ServiceCtx context)
private AudioRendererParameter GetAudioRendererParameter(ServiceCtx Context)
{
AudioRendererParameter Params = new AudioRendererParameter();
Params.SampleRate = context.RequestData.ReadInt32();
Params.SampleCount = context.RequestData.ReadInt32();
Params.Unknown8 = context.RequestData.ReadInt32();
Params.MixCount = context.RequestData.ReadInt32();
Params.VoiceCount = context.RequestData.ReadInt32();
Params.SinkCount = context.RequestData.ReadInt32();
Params.EffectCount = context.RequestData.ReadInt32();
Params.PerformanceManagerCount = context.RequestData.ReadInt32();
Params.VoiceDropEnable = context.RequestData.ReadInt32();
Params.SplitterCount = context.RequestData.ReadInt32();
Params.SplitterDestinationDataCount = context.RequestData.ReadInt32();
Params.Unknown2C = context.RequestData.ReadInt32();
Params.Revision = context.RequestData.ReadInt32();
Params.SampleRate = Context.RequestData.ReadInt32();
Params.SampleCount = Context.RequestData.ReadInt32();
Params.Unknown8 = Context.RequestData.ReadInt32();
Params.MixCount = Context.RequestData.ReadInt32();
Params.VoiceCount = Context.RequestData.ReadInt32();
Params.SinkCount = Context.RequestData.ReadInt32();
Params.EffectCount = Context.RequestData.ReadInt32();
Params.PerformanceManagerCount = Context.RequestData.ReadInt32();
Params.VoiceDropEnable = Context.RequestData.ReadInt32();
Params.SplitterCount = Context.RequestData.ReadInt32();
Params.SplitterDestinationDataCount = Context.RequestData.ReadInt32();
Params.Unknown2C = Context.RequestData.ReadInt32();
Params.Revision = Context.RequestData.ReadInt32();
return Params;
}
private static int NodeStatesGetWorkBufferSize(int value)
private static int NodeStatesGetWorkBufferSize(int Value)
{
int result = IntUtils.AlignUp(value, 64);
int Result = IntUtils.AlignUp(Value, 64);
if (result < 0)
if (Result < 0)
{
result |= 7;
Result |= 7;
}
return 4 * (value * value) + 0x12 * value + 2 * (result / 8);
return 4 * (Value * Value) + 0x12 * Value + 2 * (Result / 8);
}
private static int EdgeMatrixGetWorkBufferSize(int value)
private static int EdgeMatrixGetWorkBufferSize(int Value)
{
int result = IntUtils.AlignUp(value * value, 64);
int Result = IntUtils.AlignUp(Value * Value, 64);
if (result < 0)
if (Result < 0)
{
result |= 7;
Result |= 7;
}
return result / 8;
return Result / 8;
}
// GetAudioDeviceService(nn::applet::AppletResourceUserId) -> object<nn::audio::detail::IAudioDevice>
public long GetAudioDeviceService(ServiceCtx context)
public long GetAudioDeviceService(ServiceCtx Context)
{
long appletResourceUserId = context.RequestData.ReadInt64();
long AppletResourceUserId = Context.RequestData.ReadInt64();
MakeObject(context, new IAudioDevice(context.Device.System));
MakeObject(Context, new IAudioDevice(Context.Device.System));
return 0;
}
// GetAudioDeviceServiceWithRevisionInfo(nn::applet::AppletResourceUserId, u32) -> object<nn::audio::detail::IAudioDevice>
private long GetAudioDeviceServiceWithRevisionInfo(ServiceCtx context)
private long GetAudioDeviceServiceWithRevisionInfo(ServiceCtx Context)
{
long appletResourceUserId = context.RequestData.ReadInt64();
int revisionInfo = context.RequestData.ReadInt32();
long AppletResourceUserId = Context.RequestData.ReadInt64();
int RevisionInfo = Context.RequestData.ReadInt32();
Logger.PrintStub(LogClass.ServiceAudio, $"Stubbed. AppletResourceUserId: {appletResourceUserId} - " +
$"RevisionInfo: {revisionInfo}");
Logger.PrintStub(LogClass.ServiceAudio, $"Stubbed. AppletResourceUserId: {AppletResourceUserId} - " +
$"RevisionInfo: {RevisionInfo}");
return GetAudioDeviceService(context);
return GetAudioDeviceService(Context);
}
}
}

View file

@ -10,80 +10,80 @@ namespace Ryujinx.HLE.HOS.Services.Aud
{
private const int FixedSampleRate = 48000;
private Dictionary<int, ServiceProcessRequest> _commands;
private Dictionary<int, ServiceProcessRequest> m_Commands;
public override IReadOnlyDictionary<int, ServiceProcessRequest> Commands => _commands;
public override IReadOnlyDictionary<int, ServiceProcessRequest> Commands => m_Commands;
private int _sampleRate;
private int _channelsCount;
private int SampleRate;
private int ChannelsCount;
private OpusDecoder _decoder;
private OpusDecoder Decoder;
public IHardwareOpusDecoder(int sampleRate, int channelsCount)
public IHardwareOpusDecoder(int SampleRate, int ChannelsCount)
{
_commands = new Dictionary<int, ServiceProcessRequest>
m_Commands = new Dictionary<int, ServiceProcessRequest>()
{
{ 0, DecodeInterleaved },
{ 4, DecodeInterleavedWithPerf }
};
_sampleRate = sampleRate;
_channelsCount = channelsCount;
this.SampleRate = SampleRate;
this.ChannelsCount = ChannelsCount;
_decoder = new OpusDecoder(FixedSampleRate, channelsCount);
Decoder = new OpusDecoder(FixedSampleRate, ChannelsCount);
}
public long DecodeInterleavedWithPerf(ServiceCtx context)
public long DecodeInterleavedWithPerf(ServiceCtx Context)
{
long result = DecodeInterleaved(context);
long Result = DecodeInterleaved(Context);
//TODO: Figure out what this value is.
//According to switchbrew, it is now used.
context.ResponseData.Write(0L);
Context.ResponseData.Write(0L);
return result;
return Result;
}
public long DecodeInterleaved(ServiceCtx context)
public long DecodeInterleaved(ServiceCtx Context)
{
long inPosition = context.Request.SendBuff[0].Position;
long inSize = context.Request.SendBuff[0].Size;
long InPosition = Context.Request.SendBuff[0].Position;
long InSize = Context.Request.SendBuff[0].Size;
if (inSize < 8)
if (InSize < 8)
{
return MakeError(ErrorModule.Audio, AudErr.OpusInvalidInput);
}
long outPosition = context.Request.ReceiveBuff[0].Position;
long outSize = context.Request.ReceiveBuff[0].Size;
long OutPosition = Context.Request.ReceiveBuff[0].Position;
long OutSize = Context.Request.ReceiveBuff[0].Size;
byte[] opusData = context.Memory.ReadBytes(inPosition, inSize);
byte[] OpusData = Context.Memory.ReadBytes(InPosition, InSize);
int processed = ((opusData[0] << 24) |
(opusData[1] << 16) |
(opusData[2] << 8) |
(opusData[3] << 0)) + 8;
int Processed = ((OpusData[0] << 24) |
(OpusData[1] << 16) |
(OpusData[2] << 8) |
(OpusData[3] << 0)) + 8;
if ((uint)processed > (ulong)inSize)
if ((uint)Processed > (ulong)InSize)
{
return MakeError(ErrorModule.Audio, AudErr.OpusInvalidInput);
}
short[] pcm = new short[outSize / 2];
short[] Pcm = new short[OutSize / 2];
int frameSize = pcm.Length / (_channelsCount * 2);
int FrameSize = Pcm.Length / (ChannelsCount * 2);
int samples = _decoder.Decode(opusData, 0, opusData.Length, pcm, 0, frameSize);
int Samples = Decoder.Decode(OpusData, 0, OpusData.Length, Pcm, 0, FrameSize);
foreach (short sample in pcm)
foreach (short Sample in Pcm)
{
context.Memory.WriteInt16(outPosition, sample);
Context.Memory.WriteInt16(OutPosition, Sample);
outPosition += 2;
OutPosition += 2;
}
context.ResponseData.Write(processed);
context.ResponseData.Write(samples);
Context.ResponseData.Write(Processed);
Context.ResponseData.Write(Samples);
return 0;
}

View file

@ -5,68 +5,68 @@ namespace Ryujinx.HLE.HOS.Services.Aud
{
class IHardwareOpusDecoderManager : IpcService
{
private Dictionary<int, ServiceProcessRequest> _commands;
private Dictionary<int, ServiceProcessRequest> m_Commands;
public override IReadOnlyDictionary<int, ServiceProcessRequest> Commands => _commands;
public override IReadOnlyDictionary<int, ServiceProcessRequest> Commands => m_Commands;
public IHardwareOpusDecoderManager()
{
_commands = new Dictionary<int, ServiceProcessRequest>
m_Commands = new Dictionary<int, ServiceProcessRequest>()
{
{ 0, Initialize },
{ 1, GetWorkBufferSize }
};
}
public long Initialize(ServiceCtx context)
public long Initialize(ServiceCtx Context)
{
int sampleRate = context.RequestData.ReadInt32();
int channelsCount = context.RequestData.ReadInt32();
int SampleRate = Context.RequestData.ReadInt32();
int ChannelsCount = Context.RequestData.ReadInt32();
MakeObject(context, new IHardwareOpusDecoder(sampleRate, channelsCount));
MakeObject(Context, new IHardwareOpusDecoder(SampleRate, ChannelsCount));
return 0;
}
public long GetWorkBufferSize(ServiceCtx context)
public long GetWorkBufferSize(ServiceCtx Context)
{
//Note: The sample rate is ignored because it is fixed to 48KHz.
int sampleRate = context.RequestData.ReadInt32();
int channelsCount = context.RequestData.ReadInt32();
int SampleRate = Context.RequestData.ReadInt32();
int ChannelsCount = Context.RequestData.ReadInt32();
context.ResponseData.Write(GetOpusDecoderSize(channelsCount));
Context.ResponseData.Write(GetOpusDecoderSize(ChannelsCount));
return 0;
}
private static int GetOpusDecoderSize(int channelsCount)
private static int GetOpusDecoderSize(int ChannelsCount)
{
const int silkDecoderSize = 0x2198;
const int SilkDecoderSize = 0x2198;
if (channelsCount < 1 || channelsCount > 2)
if (ChannelsCount < 1 || ChannelsCount > 2)
{
return 0;
}
int celtDecoderSize = GetCeltDecoderSize(channelsCount);
int CeltDecoderSize = GetCeltDecoderSize(ChannelsCount);
int opusDecoderSize = (channelsCount * 0x800 + 0x4807) & -0x800 | 0x50;
int OpusDecoderSize = (ChannelsCount * 0x800 + 0x4807) & -0x800 | 0x50;
return opusDecoderSize + silkDecoderSize + celtDecoderSize;
return OpusDecoderSize + SilkDecoderSize + CeltDecoderSize;
}
private static int GetCeltDecoderSize(int channelsCount)
private static int GetCeltDecoderSize(int ChannelsCount)
{
const int decodeBufferSize = 0x2030;
const int celtDecoderSize = 0x58;
const int celtSigSize = 0x4;
const int overlap = 120;
const int eBandsCount = 21;
const int DecodeBufferSize = 0x2030;
const int CeltDecoderSize = 0x58;
const int CeltSigSize = 0x4;
const int Overlap = 120;
const int EBandsCount = 21;
return (decodeBufferSize + overlap * 4) * channelsCount +
eBandsCount * 16 +
celtDecoderSize +
celtSigSize;
return (DecodeBufferSize + Overlap * 4) * ChannelsCount +
EBandsCount * 16 +
CeltDecoderSize +
CeltSigSize;
}
}
}