mirror of
https://git.naxdy.org/Mirror/Ryujinx.git
synced 2024-12-28 19:43:04 +00:00
492a046335
* Initial implementation of buffer mirrors Generally slower right now, goal is to reduce render passes in games that do inline updates Fix support buffer mirrors Reintroduce vertex buffer mirror Add storage buffer support Optimisation part 1 More optimisation Avoid useless data copies. Remove unused cbIndex stuff Properly set write flag for storage buffers. Fix minor issues Not sure why this was here. Fix BufferRangeList Fix some big issues Align storage buffers rather than getting full buffer as a range Improves mirrorability of read-only storage buffers Increase staging buffer size, as it now contains mirrors Fix some issues with buffers not updating Fix buffer SetDataUnchecked offset for one of the paths when using mirrors Fix buffer mirrors interaction with buffer textures Fix mirror rebinding Move GetBuffer calls on indirect draws before BeginRenderPass to avoid draws without render pass Fix mirrors rebase Fix rebase 2023 * Fix crash when using stale vertex buffer Similar to `Get` with a size that's too large, just treat it as a clamp. * Explicitly set support buffer as mirrorable * Address feedback * Remove unused fragment of MVK workaround * Replace logging for staging buffer OOM * Address format issues * Address more format issues * Mini cleanup * Address more things * Rename BufferRangeList * Support bounding range for ClearMirrors and UploadPendingData * Add maximum size for vertex buffer mirrors * Enable index buffer mirrors Enabled on all platforms for the IbStreamer. * Feedback * Remove mystery BufferCache change Probably macos related? * Fix mirrors not creating when staging buffer is empty. * Change log level to debug
82 lines
2.5 KiB
C#
82 lines
2.5 KiB
C#
namespace Ryujinx.Graphics.Vulkan
|
|
{
|
|
internal class BufferUsageBitmap
|
|
{
|
|
private readonly BitMap _bitmap;
|
|
private readonly int _size;
|
|
private readonly int _granularity;
|
|
private readonly int _bits;
|
|
private readonly int _writeBitOffset;
|
|
|
|
private readonly int _intsPerCb;
|
|
private readonly int _bitsPerCb;
|
|
|
|
public BufferUsageBitmap(int size, int granularity)
|
|
{
|
|
_size = size;
|
|
_granularity = granularity;
|
|
|
|
// There are two sets of bits - one for read tracking, and the other for write.
|
|
int bits = (size + (granularity - 1)) / granularity;
|
|
_writeBitOffset = bits;
|
|
_bits = bits << 1;
|
|
|
|
_intsPerCb = (_bits + (BitMap.IntSize - 1)) / BitMap.IntSize;
|
|
_bitsPerCb = _intsPerCb * BitMap.IntSize;
|
|
|
|
_bitmap = new BitMap(_bitsPerCb * CommandBufferPool.MaxCommandBuffers);
|
|
}
|
|
|
|
public void Add(int cbIndex, int offset, int size, bool write)
|
|
{
|
|
if (size == 0)
|
|
{
|
|
return;
|
|
}
|
|
|
|
// Some usages can be out of bounds (vertex buffer on amd), so bound if necessary.
|
|
if (offset + size > _size)
|
|
{
|
|
size = _size - offset;
|
|
}
|
|
|
|
int cbBase = cbIndex * _bitsPerCb + (write ? _writeBitOffset : 0);
|
|
int start = cbBase + offset / _granularity;
|
|
int end = cbBase + (offset + size - 1) / _granularity;
|
|
|
|
_bitmap.SetRange(start, end);
|
|
}
|
|
|
|
public bool OverlapsWith(int cbIndex, int offset, int size, bool write = false)
|
|
{
|
|
if (size == 0)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
int cbBase = cbIndex * _bitsPerCb + (write ? _writeBitOffset : 0);
|
|
int start = cbBase + offset / _granularity;
|
|
int end = cbBase + (offset + size - 1) / _granularity;
|
|
|
|
return _bitmap.IsSet(start, end);
|
|
}
|
|
|
|
public bool OverlapsWith(int offset, int size, bool write)
|
|
{
|
|
for (int i = 0; i < CommandBufferPool.MaxCommandBuffers; i++)
|
|
{
|
|
if (OverlapsWith(i, offset, size, write))
|
|
{
|
|
return true;
|
|
}
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
public void Clear(int cbIndex)
|
|
{
|
|
_bitmap.ClearInt(cbIndex * _intsPerCb, (cbIndex + 1) * _intsPerCb - 1);
|
|
}
|
|
}
|
|
}
|