mirror of
https://git.naxdy.org/Mirror/Ryujinx.git
synced 2024-11-15 01:25:25 +00:00
a64fee29dc
* Flush in the middle of long command buffers. * Vulkan: add situational "Fast Flush" mode The AutoFlushCounter class was added to periodically flush Vulkan command buffers throughout a frame, which reduces latency to the GPU as commands are submitted and processed much sooner. This was done by allowing command buffers to flush when framebuffer attachments changed. However, some games have incredibly long render passes with a large number of draws, and really aggressive data access that forces GPU sync. The Vulkan backend could potentially end up building a single command buffer for 4-5ms if a pass has enough draws, such as in BOTW. In the scenario where sync is waited on immediately after submission, this would have to wait for the completion of a much longer command buffer than usual. The solution is to force command buffer submission periodically in a "fast flush" mode. This will end up splitting render passes, but it will only enable if sync is aggressive enough. This should improve performance in GPU limited scenarios, or in games that aggressively wait on synchronization. In some games, it may only kick in when res scaling. It won't trigger in games like SMO where sync is not an issue. Improves performance in Pokemon Scarlet/Violet (res scaled) and BOTW (in general). * Add conversions in milliseconds next to flush timers.
207 lines
5.9 KiB
C#
207 lines
5.9 KiB
C#
using Ryujinx.Common.Logging;
|
|
using Silk.NET.Vulkan;
|
|
using System.Collections.Generic;
|
|
using System.Diagnostics;
|
|
using System.Linq;
|
|
|
|
namespace Ryujinx.Graphics.Vulkan
|
|
{
|
|
class SyncManager
|
|
{
|
|
private class SyncHandle
|
|
{
|
|
public ulong ID;
|
|
public MultiFenceHolder Waitable;
|
|
public ulong FlushId;
|
|
public bool Signalled;
|
|
|
|
public bool NeedsFlush(ulong currentFlushId)
|
|
{
|
|
return (long)(FlushId - currentFlushId) >= 0;
|
|
}
|
|
}
|
|
|
|
private ulong _firstHandle = 0;
|
|
|
|
private readonly VulkanRenderer _gd;
|
|
private readonly Device _device;
|
|
private List<SyncHandle> _handles;
|
|
private ulong FlushId;
|
|
private long WaitTicks;
|
|
|
|
public SyncManager(VulkanRenderer gd, Device device)
|
|
{
|
|
_gd = gd;
|
|
_device = device;
|
|
_handles = new List<SyncHandle>();
|
|
}
|
|
|
|
public void RegisterFlush()
|
|
{
|
|
FlushId++;
|
|
}
|
|
|
|
public void Create(ulong id, bool strict)
|
|
{
|
|
ulong flushId = FlushId;
|
|
MultiFenceHolder waitable = new MultiFenceHolder();
|
|
if (strict || _gd.InterruptAction == null)
|
|
{
|
|
_gd.FlushAllCommands();
|
|
_gd.CommandBufferPool.AddWaitable(waitable);
|
|
}
|
|
else
|
|
{
|
|
// Don't flush commands, instead wait for the current command buffer to finish.
|
|
// If this sync is waited on before the command buffer is submitted, interrupt the gpu thread and flush it manually.
|
|
|
|
_gd.CommandBufferPool.AddInUseWaitable(waitable);
|
|
}
|
|
|
|
SyncHandle handle = new SyncHandle
|
|
{
|
|
ID = id,
|
|
Waitable = waitable,
|
|
FlushId = flushId
|
|
};
|
|
|
|
lock (_handles)
|
|
{
|
|
_handles.Add(handle);
|
|
}
|
|
}
|
|
|
|
public ulong GetCurrent()
|
|
{
|
|
lock (_handles)
|
|
{
|
|
ulong lastHandle = _firstHandle;
|
|
|
|
foreach (SyncHandle handle in _handles)
|
|
{
|
|
lock (handle)
|
|
{
|
|
if (handle.Waitable == null)
|
|
{
|
|
continue;
|
|
}
|
|
|
|
if (handle.ID > lastHandle)
|
|
{
|
|
bool signaled = handle.Signalled || handle.Waitable.WaitForFences(_gd.Api, _device, 0);
|
|
if (signaled)
|
|
{
|
|
lastHandle = handle.ID;
|
|
handle.Signalled = true;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return lastHandle;
|
|
}
|
|
}
|
|
|
|
public void Wait(ulong id)
|
|
{
|
|
SyncHandle result = null;
|
|
|
|
lock (_handles)
|
|
{
|
|
if ((long)(_firstHandle - id) > 0)
|
|
{
|
|
return; // The handle has already been signalled or deleted.
|
|
}
|
|
|
|
foreach (SyncHandle handle in _handles)
|
|
{
|
|
if (handle.ID == id)
|
|
{
|
|
result = handle;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
if (result != null)
|
|
{
|
|
lock (result)
|
|
{
|
|
if (result.Waitable == null)
|
|
{
|
|
return;
|
|
}
|
|
|
|
long beforeTicks = Stopwatch.GetTimestamp();
|
|
|
|
if (result.NeedsFlush(FlushId))
|
|
{
|
|
_gd.InterruptAction(() =>
|
|
{
|
|
if (result.NeedsFlush(FlushId))
|
|
{
|
|
_gd.FlushAllCommands();
|
|
}
|
|
});
|
|
}
|
|
|
|
bool signaled = result.Signalled || result.Waitable.WaitForFences(_gd.Api, _device, 1000000000);
|
|
|
|
if (!signaled)
|
|
{
|
|
Logger.Error?.PrintMsg(LogClass.Gpu, $"VK Sync Object {result.ID} failed to signal within 1000ms. Continuing...");
|
|
}
|
|
else
|
|
{
|
|
WaitTicks += Stopwatch.GetTimestamp() - beforeTicks;
|
|
result.Signalled = true;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
public void Cleanup()
|
|
{
|
|
// Iterate through handles and remove any that have already been signalled.
|
|
|
|
while (true)
|
|
{
|
|
SyncHandle first = null;
|
|
lock (_handles)
|
|
{
|
|
first = _handles.FirstOrDefault();
|
|
}
|
|
|
|
if (first == null || first.NeedsFlush(FlushId)) break;
|
|
|
|
bool signaled = first.Waitable.WaitForFences(_gd.Api, _device, 0);
|
|
if (signaled)
|
|
{
|
|
// Delete the sync object.
|
|
lock (_handles)
|
|
{
|
|
lock (first)
|
|
{
|
|
_firstHandle = first.ID + 1;
|
|
_handles.RemoveAt(0);
|
|
first.Waitable = null;
|
|
}
|
|
}
|
|
} else
|
|
{
|
|
// This sync handle and any following have not been reached yet.
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
public long GetAndResetWaitTicks()
|
|
{
|
|
long result = WaitTicks;
|
|
WaitTicks = 0;
|
|
|
|
return result;
|
|
}
|
|
}
|
|
}
|