2019-10-13 06:02:07 +00:00
|
|
|
using OpenTK.Graphics.OpenGL;
|
|
|
|
using Ryujinx.Graphics.GAL;
|
|
|
|
using System;
|
|
|
|
|
|
|
|
namespace Ryujinx.Graphics.OpenGL
|
|
|
|
{
|
2020-05-23 09:46:09 +00:00
|
|
|
static class Buffer
|
2019-10-13 06:02:07 +00:00
|
|
|
{
|
2021-01-12 21:50:54 +00:00
|
|
|
public static void Clear(BufferHandle destination, int offset, int size, uint value)
|
|
|
|
{
|
|
|
|
GL.BindBuffer(BufferTarget.CopyWriteBuffer, destination.ToInt32());
|
|
|
|
|
|
|
|
unsafe
|
|
|
|
{
|
|
|
|
uint* valueArr = stackalloc uint[1];
|
|
|
|
|
|
|
|
valueArr[0] = value;
|
|
|
|
|
|
|
|
GL.ClearBufferSubData(
|
|
|
|
BufferTarget.CopyWriteBuffer,
|
|
|
|
PixelInternalFormat.Rgba8ui,
|
|
|
|
(IntPtr)offset,
|
|
|
|
(IntPtr)size,
|
|
|
|
PixelFormat.RgbaInteger,
|
|
|
|
PixelType.UnsignedByte,
|
|
|
|
(IntPtr)valueArr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-25 20:23:42 +00:00
|
|
|
public static BufferHandle Create()
|
|
|
|
{
|
|
|
|
return Handle.FromInt32<BufferHandle>(GL.GenBuffer());
|
|
|
|
}
|
|
|
|
|
2020-05-23 09:46:09 +00:00
|
|
|
public static BufferHandle Create(int size)
|
2019-10-13 06:02:07 +00:00
|
|
|
{
|
2020-05-23 09:46:09 +00:00
|
|
|
int handle = GL.GenBuffer();
|
2019-10-13 06:02:07 +00:00
|
|
|
|
2020-05-23 09:46:09 +00:00
|
|
|
GL.BindBuffer(BufferTarget.CopyWriteBuffer, handle);
|
2019-10-13 06:02:07 +00:00
|
|
|
GL.BufferData(BufferTarget.CopyWriteBuffer, size, IntPtr.Zero, BufferUsageHint.DynamicDraw);
|
2020-05-23 09:46:09 +00:00
|
|
|
|
|
|
|
return Handle.FromInt32<BufferHandle>(handle);
|
2019-10-13 06:02:07 +00:00
|
|
|
}
|
|
|
|
|
2023-05-01 19:05:12 +00:00
|
|
|
public static BufferHandle CreatePersistent(int size)
|
|
|
|
{
|
|
|
|
int handle = GL.GenBuffer();
|
|
|
|
|
|
|
|
GL.BindBuffer(BufferTarget.CopyWriteBuffer, handle);
|
|
|
|
GL.BufferStorage(BufferTarget.CopyWriteBuffer, size, IntPtr.Zero,
|
|
|
|
BufferStorageFlags.MapPersistentBit |
|
|
|
|
BufferStorageFlags.MapCoherentBit |
|
|
|
|
BufferStorageFlags.ClientStorageBit |
|
|
|
|
BufferStorageFlags.MapReadBit);
|
|
|
|
|
|
|
|
return Handle.FromInt32<BufferHandle>(handle);
|
|
|
|
}
|
|
|
|
|
2020-05-23 09:46:09 +00:00
|
|
|
public static void Copy(BufferHandle source, BufferHandle destination, int srcOffset, int dstOffset, int size)
|
2019-10-13 06:02:07 +00:00
|
|
|
{
|
2020-05-23 09:46:09 +00:00
|
|
|
GL.BindBuffer(BufferTarget.CopyReadBuffer, source.ToInt32());
|
|
|
|
GL.BindBuffer(BufferTarget.CopyWriteBuffer, destination.ToInt32());
|
2019-10-13 06:02:07 +00:00
|
|
|
|
|
|
|
GL.CopyBufferSubData(
|
|
|
|
BufferTarget.CopyReadBuffer,
|
|
|
|
BufferTarget.CopyWriteBuffer,
|
|
|
|
(IntPtr)srcOffset,
|
|
|
|
(IntPtr)dstOffset,
|
|
|
|
(IntPtr)size);
|
|
|
|
}
|
|
|
|
|
2023-03-19 20:56:48 +00:00
|
|
|
public static unsafe PinnedSpan<byte> GetData(OpenGLRenderer renderer, BufferHandle buffer, int offset, int size)
|
2019-10-13 06:02:07 +00:00
|
|
|
{
|
2023-03-19 20:56:48 +00:00
|
|
|
// Data in the persistent buffer and host array is guaranteed to be available
|
|
|
|
// until the next time the host thread requests data.
|
|
|
|
|
2023-05-01 19:05:12 +00:00
|
|
|
if (renderer.PersistentBuffers.TryGet(buffer, out IntPtr ptr))
|
|
|
|
{
|
|
|
|
return new PinnedSpan<byte>(IntPtr.Add(ptr, offset).ToPointer(), size);
|
|
|
|
}
|
|
|
|
else if (HwCapabilities.UsePersistentBufferForFlush)
|
Return mapped buffer pointer directly for flush, WriteableRegion for textures (#2494)
* Return mapped buffer pointer directly for flush, WriteableRegion for textures
A few changes here to generally improve performance, even for platforms not using the persistent buffer flush.
- Texture and buffer flush now return a ReadOnlySpan<byte>. It's guaranteed that this span is pinned in memory, but it will be overwritten on the next flush from that thread, so it is expected that the data is used before calling again.
- As a result, persistent mappings no longer copy to a new array - rather the persistent map is returned directly as a Span<>. A similar host array is used for the glGet flushes instead of allocating new arrays each time.
- Texture flushes now do their layout conversion into a WriteableRegion when the texture is not MultiRange, which allows the flush to happen directly into guest memory rather than into a temporary span, then copied over. This avoids another copy when doing layout conversion.
Overall, this saves 1 data copy for buffer flush, 1 copy for linear textures with matching source/target stride, and 2 copies for block textures or linear textures with mismatching strides.
* Fix tests
* Fix array pointer for Mesa/Intel path
* Address some feedback
* Update method for getting array pointer.
2021-07-19 22:10:54 +00:00
|
|
|
{
|
2023-03-19 20:56:48 +00:00
|
|
|
return PinnedSpan<byte>.UnsafeFromSpan(renderer.PersistentBuffers.Default.GetBufferData(buffer, offset, size));
|
Return mapped buffer pointer directly for flush, WriteableRegion for textures (#2494)
* Return mapped buffer pointer directly for flush, WriteableRegion for textures
A few changes here to generally improve performance, even for platforms not using the persistent buffer flush.
- Texture and buffer flush now return a ReadOnlySpan<byte>. It's guaranteed that this span is pinned in memory, but it will be overwritten on the next flush from that thread, so it is expected that the data is used before calling again.
- As a result, persistent mappings no longer copy to a new array - rather the persistent map is returned directly as a Span<>. A similar host array is used for the glGet flushes instead of allocating new arrays each time.
- Texture flushes now do their layout conversion into a WriteableRegion when the texture is not MultiRange, which allows the flush to happen directly into guest memory rather than into a temporary span, then copied over. This avoids another copy when doing layout conversion.
Overall, this saves 1 data copy for buffer flush, 1 copy for linear textures with matching source/target stride, and 2 copies for block textures or linear textures with mismatching strides.
* Fix tests
* Fix array pointer for Mesa/Intel path
* Address some feedback
* Update method for getting array pointer.
2021-07-19 22:10:54 +00:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
IntPtr target = renderer.PersistentBuffers.Default.GetHostArray(size);
|
2019-10-13 06:02:07 +00:00
|
|
|
|
Return mapped buffer pointer directly for flush, WriteableRegion for textures (#2494)
* Return mapped buffer pointer directly for flush, WriteableRegion for textures
A few changes here to generally improve performance, even for platforms not using the persistent buffer flush.
- Texture and buffer flush now return a ReadOnlySpan<byte>. It's guaranteed that this span is pinned in memory, but it will be overwritten on the next flush from that thread, so it is expected that the data is used before calling again.
- As a result, persistent mappings no longer copy to a new array - rather the persistent map is returned directly as a Span<>. A similar host array is used for the glGet flushes instead of allocating new arrays each time.
- Texture flushes now do their layout conversion into a WriteableRegion when the texture is not MultiRange, which allows the flush to happen directly into guest memory rather than into a temporary span, then copied over. This avoids another copy when doing layout conversion.
Overall, this saves 1 data copy for buffer flush, 1 copy for linear textures with matching source/target stride, and 2 copies for block textures or linear textures with mismatching strides.
* Fix tests
* Fix array pointer for Mesa/Intel path
* Address some feedback
* Update method for getting array pointer.
2021-07-19 22:10:54 +00:00
|
|
|
GL.BindBuffer(BufferTarget.CopyReadBuffer, buffer.ToInt32());
|
2019-10-13 06:02:07 +00:00
|
|
|
|
Return mapped buffer pointer directly for flush, WriteableRegion for textures (#2494)
* Return mapped buffer pointer directly for flush, WriteableRegion for textures
A few changes here to generally improve performance, even for platforms not using the persistent buffer flush.
- Texture and buffer flush now return a ReadOnlySpan<byte>. It's guaranteed that this span is pinned in memory, but it will be overwritten on the next flush from that thread, so it is expected that the data is used before calling again.
- As a result, persistent mappings no longer copy to a new array - rather the persistent map is returned directly as a Span<>. A similar host array is used for the glGet flushes instead of allocating new arrays each time.
- Texture flushes now do their layout conversion into a WriteableRegion when the texture is not MultiRange, which allows the flush to happen directly into guest memory rather than into a temporary span, then copied over. This avoids another copy when doing layout conversion.
Overall, this saves 1 data copy for buffer flush, 1 copy for linear textures with matching source/target stride, and 2 copies for block textures or linear textures with mismatching strides.
* Fix tests
* Fix array pointer for Mesa/Intel path
* Address some feedback
* Update method for getting array pointer.
2021-07-19 22:10:54 +00:00
|
|
|
GL.GetBufferSubData(BufferTarget.CopyReadBuffer, (IntPtr)offset, size, target);
|
2019-10-13 06:02:07 +00:00
|
|
|
|
2023-03-19 20:56:48 +00:00
|
|
|
return new PinnedSpan<byte>(target.ToPointer(), size);
|
Return mapped buffer pointer directly for flush, WriteableRegion for textures (#2494)
* Return mapped buffer pointer directly for flush, WriteableRegion for textures
A few changes here to generally improve performance, even for platforms not using the persistent buffer flush.
- Texture and buffer flush now return a ReadOnlySpan<byte>. It's guaranteed that this span is pinned in memory, but it will be overwritten on the next flush from that thread, so it is expected that the data is used before calling again.
- As a result, persistent mappings no longer copy to a new array - rather the persistent map is returned directly as a Span<>. A similar host array is used for the glGet flushes instead of allocating new arrays each time.
- Texture flushes now do their layout conversion into a WriteableRegion when the texture is not MultiRange, which allows the flush to happen directly into guest memory rather than into a temporary span, then copied over. This avoids another copy when doing layout conversion.
Overall, this saves 1 data copy for buffer flush, 1 copy for linear textures with matching source/target stride, and 2 copies for block textures or linear textures with mismatching strides.
* Fix tests
* Fix array pointer for Mesa/Intel path
* Address some feedback
* Update method for getting array pointer.
2021-07-19 22:10:54 +00:00
|
|
|
}
|
2019-10-13 06:02:07 +00:00
|
|
|
}
|
|
|
|
|
2020-10-25 20:23:42 +00:00
|
|
|
public static void Resize(BufferHandle handle, int size)
|
|
|
|
{
|
|
|
|
GL.BindBuffer(BufferTarget.CopyWriteBuffer, handle.ToInt32());
|
|
|
|
GL.BufferData(BufferTarget.CopyWriteBuffer, size, IntPtr.Zero, BufferUsageHint.StreamCopy);
|
|
|
|
}
|
|
|
|
|
2020-05-23 09:46:09 +00:00
|
|
|
public static void SetData(BufferHandle buffer, int offset, ReadOnlySpan<byte> data)
|
2019-10-13 06:02:07 +00:00
|
|
|
{
|
2020-05-23 09:46:09 +00:00
|
|
|
GL.BindBuffer(BufferTarget.CopyWriteBuffer, buffer.ToInt32());
|
2019-10-13 06:02:07 +00:00
|
|
|
|
|
|
|
unsafe
|
|
|
|
{
|
|
|
|
fixed (byte* ptr = data)
|
|
|
|
{
|
|
|
|
GL.BufferSubData(BufferTarget.CopyWriteBuffer, (IntPtr)offset, data.Length, (IntPtr)ptr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-23 09:46:09 +00:00
|
|
|
public static void Delete(BufferHandle buffer)
|
2019-10-13 06:02:07 +00:00
|
|
|
{
|
2020-05-23 09:46:09 +00:00
|
|
|
GL.DeleteBuffer(buffer.ToInt32());
|
2019-10-13 06:02:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|