Back to the origins: Make memory manager take guest PA rather than host address once again

This commit is contained in:
gdkchan 2021-06-21 17:23:21 -03:00 committed by gdk
parent f3bfd799e1
commit b4b7d4723e
12 changed files with 180 additions and 329 deletions

View file

@ -25,6 +25,7 @@ namespace Ryujinx.Cpu
private const int PointerTagBit = 62;
private readonly MemoryBlock _backingMemory;
private readonly InvalidAccessHandler _invalidAccessHandler;
/// <summary>
@ -50,10 +51,12 @@ namespace Ryujinx.Cpu
/// <summary>
/// Creates a new instance of the memory manager.
/// </summary>
/// <param name="backingMemory">Physical backing memory where virtual memory will be mapped to</param>
/// <param name="addressSpaceSize">Size of the address space</param>
/// <param name="invalidAccessHandler">Optional function to handle invalid memory accesses</param>
public MemoryManager(ulong addressSpaceSize, InvalidAccessHandler invalidAccessHandler = null)
public MemoryManager(MemoryBlock backingMemory, ulong addressSpaceSize, InvalidAccessHandler invalidAccessHandler = null)
{
_backingMemory = backingMemory;
_invalidAccessHandler = invalidAccessHandler;
ulong asSize = PageSize;
@ -73,18 +76,19 @@ namespace Ryujinx.Cpu
}
/// <inheritdoc/>
public void Map(ulong va, nuint hostAddress, ulong size)
public void Map(ulong va, ulong pa, ulong size)
{
AssertValidAddressAndSize(va, size);
ulong remainingSize = size;
ulong oVa = va;
ulong oPa = pa;
while (remainingSize != 0)
{
_pageTable.Write((va / PageSize) * PteSize, hostAddress);
_pageTable.Write((va / PageSize) * PteSize, PaToPte(pa));
va += PageSize;
hostAddress += PageSize;
pa += PageSize;
remainingSize -= PageSize;
}
Tracking.Map(oVa, size);
@ -107,7 +111,7 @@ namespace Ryujinx.Cpu
ulong remainingSize = size;
while (remainingSize != 0)
{
_pageTable.Write((va / PageSize) * PteSize, (nuint)0);
_pageTable.Write((va / PageSize) * PteSize, 0UL);
va += PageSize;
remainingSize -= PageSize;
@ -177,7 +181,7 @@ namespace Ryujinx.Cpu
if (IsContiguousAndMapped(va, data.Length))
{
data.CopyTo(GetHostSpanContiguous(va, data.Length));
data.CopyTo(_backingMemory.GetSpan(GetPhysicalAddressInternal(va), data.Length));
}
else
{
@ -185,18 +189,22 @@ namespace Ryujinx.Cpu
if ((va & PageMask) != 0)
{
ulong pa = GetPhysicalAddressInternal(va);
size = Math.Min(data.Length, PageSize - (int)(va & PageMask));
data.Slice(0, size).CopyTo(GetHostSpanContiguous(va, size));
data.Slice(0, size).CopyTo(_backingMemory.GetSpan(pa, size));
offset += size;
}
for (; offset < data.Length; offset += size)
{
ulong pa = GetPhysicalAddressInternal(va + (ulong)offset);
size = Math.Min(data.Length - offset, PageSize);
data.Slice(offset, size).CopyTo(GetHostSpanContiguous(va + (ulong)offset, size));
data.Slice(offset, size).CopyTo(_backingMemory.GetSpan(pa, size));
}
}
}
@ -224,7 +232,7 @@ namespace Ryujinx.Cpu
if (IsContiguousAndMapped(va, size))
{
return GetHostSpanContiguous(va, size);
return _backingMemory.GetSpan(GetPhysicalAddressInternal(va), size);
}
else
{
@ -251,7 +259,7 @@ namespace Ryujinx.Cpu
SignalMemoryTracking(va, (ulong)size, true);
}
return new WritableRegion(null, va, new NativeMemoryManager<byte>((byte*)GetHostAddress(va), size).Memory);
return new WritableRegion(null, va, _backingMemory.GetMemory(GetPhysicalAddressInternal(va), size));
}
else
{
@ -264,7 +272,7 @@ namespace Ryujinx.Cpu
}
/// <inheritdoc/>
public unsafe ref T GetRef<T>(ulong va) where T : unmanaged
public ref T GetRef<T>(ulong va) where T : unmanaged
{
if (!IsContiguous(va, Unsafe.SizeOf<T>()))
{
@ -273,7 +281,7 @@ namespace Ryujinx.Cpu
SignalMemoryTracking(va, (ulong)Unsafe.SizeOf<T>(), true);
return ref *(T*)GetHostAddress(va);
return ref _backingMemory.GetRef<T>(GetPhysicalAddressInternal(va));
}
/// <summary>
@ -315,7 +323,7 @@ namespace Ryujinx.Cpu
return false;
}
if (GetHostAddress(va) + PageSize != GetHostAddress(va + PageSize))
if (GetPhysicalAddressInternal(va) + PageSize != GetPhysicalAddressInternal(va + PageSize))
{
return false;
}
@ -327,11 +335,11 @@ namespace Ryujinx.Cpu
}
/// <inheritdoc/>
public IEnumerable<HostMemoryRange> GetPhysicalRegions(ulong va, ulong size)
public IEnumerable<MemoryRange> GetPhysicalRegions(ulong va, ulong size)
{
if (size == 0)
{
return Enumerable.Empty<HostMemoryRange>();
return Enumerable.Empty<MemoryRange>();
}
if (!ValidateAddress(va) || !ValidateAddressAndSize(va, size))
@ -341,9 +349,9 @@ namespace Ryujinx.Cpu
int pages = GetPagesCount(va, (uint)size, out va);
var regions = new List<HostMemoryRange>();
var regions = new List<MemoryRange>();
nuint regionStart = GetHostAddress(va);
ulong regionStart = GetPhysicalAddressInternal(va);
ulong regionSize = PageSize;
for (int page = 0; page < pages - 1; page++)
@ -353,12 +361,12 @@ namespace Ryujinx.Cpu
return null;
}
nuint newHostAddress = GetHostAddress(va + PageSize);
ulong newPa = GetPhysicalAddressInternal(va + PageSize);
if (GetHostAddress(va) + PageSize != newHostAddress)
if (GetPhysicalAddressInternal(va) + PageSize != newPa)
{
regions.Add(new HostMemoryRange(regionStart, regionSize));
regionStart = newHostAddress;
regions.Add(new MemoryRange(regionStart, regionSize));
regionStart = newPa;
regionSize = 0;
}
@ -366,7 +374,7 @@ namespace Ryujinx.Cpu
regionSize += PageSize;
}
regions.Add(new HostMemoryRange(regionStart, regionSize));
regions.Add(new MemoryRange(regionStart, regionSize));
return regions;
}
@ -386,18 +394,22 @@ namespace Ryujinx.Cpu
if ((va & PageMask) != 0)
{
ulong pa = GetPhysicalAddressInternal(va);
size = Math.Min(data.Length, PageSize - (int)(va & PageMask));
GetHostSpanContiguous(va, size).CopyTo(data.Slice(0, size));
_backingMemory.GetSpan(pa, size).CopyTo(data.Slice(0, size));
offset += size;
}
for (; offset < data.Length; offset += size)
{
ulong pa = GetPhysicalAddressInternal(va + (ulong)offset);
size = Math.Min(data.Length - offset, PageSize);
GetHostSpanContiguous(va + (ulong)offset, size).CopyTo(data.Slice(offset, size));
_backingMemory.GetSpan(pa, size).CopyTo(data.Slice(offset, size));
}
}
catch (InvalidMemoryRegionException)
@ -446,7 +458,7 @@ namespace Ryujinx.Cpu
return false;
}
return _pageTable.Read<nuint>((va / PageSize) * PteSize) != 0;
return _pageTable.Read<ulong>((va / PageSize) * PteSize) != 0;
}
private bool ValidateAddress(ulong va)
@ -480,37 +492,20 @@ namespace Ryujinx.Cpu
}
}
/// <summary>
/// Get a span representing the given virtual address and size range in host memory.
/// This function assumes that the requested virtual memory region is contiguous.
/// </summary>
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range in bytes</param>
/// <returns>A span representing the given virtual range in host memory</returns>
/// <exception cref="InvalidMemoryRegionException">Throw when the base virtual address is not mapped</exception>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private unsafe Span<byte> GetHostSpanContiguous(ulong va, int size)
private ulong GetPhysicalAddress(ulong va)
{
return new Span<byte>((void*)GetHostAddress(va), size);
}
/// <summary>
/// Get the host address for a given virtual address, using the page table.
/// </summary>
/// <param name="va">Virtual address</param>
/// <returns>The corresponding host address for the given virtual address</returns>
/// <exception cref="InvalidMemoryRegionException">Throw when the virtual address is not mapped</exception>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private nuint GetHostAddress(ulong va)
{
nuint pageBase = _pageTable.Read<nuint>((va / PageSize) * PteSize) & unchecked((nuint)0xffff_ffff_ffffUL);
if (pageBase == 0)
// We return -1L if the virtual address is invalid or unmapped.
if (!ValidateAddress(va) || !IsMapped(va))
{
ThrowInvalidMemoryRegionException($"Not mapped: va=0x{va:X16}");
return ulong.MaxValue;
}
return pageBase + (nuint)(va & PageMask);
return GetPhysicalAddressInternal(va);
}
private ulong GetPhysicalAddressInternal(ulong va)
{
return PteToPa(_pageTable.Read<ulong>((va / PageSize) * PteSize) & ~(0xffffUL << 48)) + (va & PageMask);
}
/// <inheritdoc/>
@ -604,6 +599,16 @@ namespace Ryujinx.Cpu
}
}
private ulong PaToPte(ulong pa)
{
return (ulong)_backingMemory.GetPointer(pa, PageSize);
}
private ulong PteToPa(ulong pte)
{
return (ulong)((long)pte - _backingMemory.Pointer.ToInt64());
}
/// <summary>
/// Disposes of resources used by the memory manager.
/// </summary>

View file

@ -136,7 +136,7 @@ namespace Ryujinx.Cpu
}
/// <inheritdoc/>
public void Map(ulong va, nuint hostAddress, ulong size)
public void Map(ulong va, ulong pa, ulong size)
{
AssertValidAddressAndSize(va, size);
@ -389,16 +389,9 @@ namespace Ryujinx.Cpu
}
/// <inheritdoc/>
public IEnumerable<HostMemoryRange> GetPhysicalRegions(ulong va, ulong size)
public IEnumerable<MemoryRange> GetPhysicalRegions(ulong va, ulong size)
{
if (size == 0)
{
return Enumerable.Empty<HostMemoryRange>();
}
AssertMapped(va, size);
return new HostMemoryRange[] { new HostMemoryRange(_addressSpaceMirror.GetPointer(va, size), size) };
throw new NotImplementedException();
}
/// <inheritdoc/>

View file

@ -24,7 +24,7 @@ namespace Ryujinx.HLE.HOS
switch (mode)
{
case MemoryManagerMode.SoftwarePageTable:
return new ArmProcessContext<MemoryManager>(pid, _gpu, new MemoryManager(addressSpaceSize, invalidAccessHandler), for64Bit);
return new ArmProcessContext<MemoryManager>(pid, _gpu, new MemoryManager(context.Memory, addressSpaceSize, invalidAccessHandler), for64Bit);
case MemoryManagerMode.HostMapped:
case MemoryManagerMode.HostMappedUnsafe:

View file

@ -1,10 +1,7 @@
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.Memory;
using Ryujinx.Memory.Range;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
@ -20,9 +17,13 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
}
/// <inheritdoc/>
protected override IEnumerable<HostMemoryRange> GetPhysicalRegions(ulong va, ulong size)
protected override void GetPhysicalRegions(ulong va, ulong size, KPageList pageList)
{
return _cpuMemory.GetPhysicalRegions(va, size);
var ranges = _cpuMemory.GetPhysicalRegions(va, size);
foreach (var range in ranges)
{
pageList.AddRange(range.Address + DramMemoryMap.DramBase, range.Size / PageSize);
}
}
/// <inheritdoc/>
@ -34,7 +35,8 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
/// <inheritdoc/>
protected override KernelResult MapMemory(ulong src, ulong dst, ulong pagesCount, KMemoryPermission oldSrcPermission, KMemoryPermission newDstPermission)
{
var srcRanges = GetPhysicalRegions(src, pagesCount * PageSize);
KPageList pageList = new KPageList();
GetPhysicalRegions(src, pagesCount * PageSize, pageList);
KernelResult result = Reprotect(src, pagesCount, KMemoryPermission.None);
@ -43,7 +45,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
return result;
}
result = MapPages(dst, srcRanges, newDstPermission);
result = MapPages(dst, pageList, newDstPermission, false, 0);
if (result != KernelResult.Success)
{
@ -59,10 +61,13 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
ulong size = pagesCount * PageSize;
var srcRanges = GetPhysicalRegions(src, size);
var dstRanges = GetPhysicalRegions(dst, size);
KPageList srcPageList = new KPageList();
KPageList dstPageList = new KPageList();
if (!dstRanges.SequenceEqual(srcRanges))
GetPhysicalRegions(src, size, srcPageList);
GetPhysicalRegions(dst, size, dstPageList);
if (!dstPageList.IsEqual(srcPageList))
{
return KernelResult.InvalidMemRange;
}
@ -78,7 +83,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
if (result != KernelResult.Success)
{
KernelResult mapResult = MapPages(dst, dstRanges, oldDstPermission);
KernelResult mapResult = MapPages(dst, dstPageList, oldDstPermission, false, 0);
Debug.Assert(mapResult == KernelResult.Success);
}
@ -92,7 +97,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
Context.Memory.Commit(srcPa - DramMemoryMap.DramBase, size);
_cpuMemory.Map(dstVa, Context.Memory.GetPointer(srcPa - DramMemoryMap.DramBase, size), size);
_cpuMemory.Map(dstVa, srcPa - DramMemoryMap.DramBase, size);
if (DramMemoryMap.IsHeapPhysicalAddress(srcPa))
{
@ -121,7 +126,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
Context.Memory.Commit(addr, size);
_cpuMemory.Map(currentVa, Context.Memory.GetPointer(addr, size), size);
_cpuMemory.Map(currentVa, addr, size);
if (shouldFillPages)
{
@ -136,33 +141,6 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
return KernelResult.Success;
}
/// <inheritdoc/>
protected override KernelResult MapPages(ulong address, IEnumerable<HostMemoryRange> ranges, KMemoryPermission permission)
{
ulong currentVa = address;
foreach (var range in ranges)
{
ulong size = range.Size;
ulong pa = GetDramAddressFromHostAddress(range.Address);
if (pa != ulong.MaxValue)
{
pa += DramMemoryMap.DramBase;
if (DramMemoryMap.IsHeapPhysicalAddress(pa))
{
Context.MemoryManager.IncrementPagesReferenceCount(pa, size / PageSize);
}
}
_cpuMemory.Map(currentVa, range.Address, size);
currentVa += size;
}
return KernelResult.Success;
}
/// <inheritdoc/>
protected override KernelResult Unmap(ulong address, ulong pagesCount)
{
@ -172,13 +150,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
foreach (var region in regions)
{
ulong pa = GetDramAddressFromHostAddress(region.Address);
if (pa == ulong.MaxValue)
{
continue;
}
pa += DramMemoryMap.DramBase;
ulong pa = region.Address + DramMemoryMap.DramBase;
if (DramMemoryMap.IsHeapPhysicalAddress(pa))
{
pagesToClose.AddRange(pa, region.Size / PageSize);
@ -217,15 +189,5 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
_cpuMemory.Write(va, data);
}
private ulong GetDramAddressFromHostAddress(nuint hostAddress)
{
if (hostAddress < (nuint)(ulong)Context.Memory.Pointer || hostAddress >= (nuint)((ulong)Context.Memory.Pointer + Context.Memory.Size))
{
return ulong.MaxValue;
}
return hostAddress - (ulong)Context.Memory.Pointer;
}
}
}

View file

@ -380,7 +380,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
}
}
public KernelResult UnmapPages(ulong address, ulong pagesCount, IEnumerable<HostMemoryRange> ranges, MemoryState stateExpected)
public KernelResult UnmapPages(ulong address, ulong pagesCount, KPageList pageList, MemoryState stateExpected)
{
ulong size = pagesCount * PageSize;
@ -405,9 +405,11 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
lock (_blockManager)
{
var currentRanges = GetPhysicalRegions(address, size);
KPageList currentPageList = new KPageList();
if (!currentRanges.SequenceEqual(ranges))
GetPhysicalRegions(address, size, currentPageList);
if (!currentPageList.IsEqual(pageList))
{
return KernelResult.InvalidMemRange;
}
@ -1828,7 +1830,10 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
ulong alignedSize = endAddrTruncated - addressRounded;
KernelResult result = MapPages(currentVa, srcPageTable.GetPhysicalRegions(addressRounded, alignedSize), permission);
KPageList pageList = new KPageList();
srcPageTable.GetPhysicalRegions(addressRounded, alignedSize, pageList);
KernelResult result = MapPages(currentVa, pageList, permission);
if (result != KernelResult.Success)
{
@ -2041,7 +2046,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
MemoryAttribute.Borrowed);
}
public KernelResult BorrowTransferMemory(List<HostMemoryRange> ranges, ulong address, ulong size, KMemoryPermission permission)
public KernelResult BorrowTransferMemory(KPageList pageList, ulong address, ulong size, KMemoryPermission permission)
{
return SetAttributesAndChangePermission(
address,
@ -2054,7 +2059,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
MemoryAttribute.None,
permission,
MemoryAttribute.Borrowed,
ranges);
pageList);
}
private KernelResult SetAttributesAndChangePermission(
@ -2068,7 +2073,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
MemoryAttribute attributeExpected,
KMemoryPermission newPermission,
MemoryAttribute attributeSetMask,
List<HostMemoryRange> ranges = null)
KPageList pageList = null)
{
if (address + size <= address || !InsideAddrSpace(address, size))
{
@ -2093,7 +2098,10 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
ulong pagesCount = size / PageSize;
ranges?.AddRange(GetPhysicalRegions(address, size));
if (pageList != null)
{
GetPhysicalRegions(address, pagesCount * PageSize, pageList);
}
if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
{
@ -2143,7 +2151,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
MemoryAttribute.Borrowed);
}
public KernelResult UnborrowTransferMemory(ulong address, ulong size, List<HostMemoryRange> ranges)
public KernelResult UnborrowTransferMemory(ulong address, ulong size, KPageList pageList)
{
return ClearAttributesAndChangePermission(
address,
@ -2156,7 +2164,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
MemoryAttribute.Borrowed,
KMemoryPermission.ReadAndWrite,
MemoryAttribute.Borrowed,
ranges);
pageList);
}
private KernelResult ClearAttributesAndChangePermission(
@ -2170,7 +2178,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
MemoryAttribute attributeExpected,
KMemoryPermission newPermission,
MemoryAttribute attributeClearMask,
List<HostMemoryRange> ranges = null)
KPageList pageList = null)
{
if (address + size <= address || !InsideAddrSpace(address, size))
{
@ -2195,11 +2203,13 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
ulong pagesCount = size / PageSize;
if (ranges != null)
if (pageList != null)
{
var currentRanges = GetPhysicalRegions(address, size);
KPageList currentPageList = new KPageList();
if (!currentRanges.SequenceEqual(ranges))
GetPhysicalRegions(address, pagesCount * PageSize, currentPageList);
if (!currentPageList.IsEqual(pageList))
{
return KernelResult.InvalidMemRange;
}
@ -2741,8 +2751,8 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
/// </summary>
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range</param>
/// <returns>Array of physical regions</returns>
protected abstract IEnumerable<HostMemoryRange> GetPhysicalRegions(ulong va, ulong size);
/// <param name="pageList">Page list where the ranges will be added</param>
protected abstract void GetPhysicalRegions(ulong va, ulong size, KPageList pageList);
/// <summary>
/// Gets a read-only span of data from CPU mapped memory.
@ -2803,16 +2813,6 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
/// <returns>Result of the mapping operation</returns>
protected abstract KernelResult MapPages(ulong address, KPageList pageList, KMemoryPermission permission, bool shouldFillPages = false, byte fillValue = 0);
/// <summary>
/// Maps a region of memory into the specified host memory ranges.
/// </summary>
/// <param name="address">Destination virtual address that should be mapped</param>
/// <param name="ranges">Ranges of host memory that should be mapped</param>
/// <param name="permission">Permission of the region to be mapped</param>
/// <returns>Result of the mapping operation</returns>
/// <exception cref="NotSupportedException">The implementation does not support memory aliasing</exception>
protected abstract KernelResult MapPages(ulong address, IEnumerable<HostMemoryRange> ranges, KMemoryPermission permission);
/// <summary>
/// Unmaps a region of memory that was previously mapped with one of the page mapping methods.
/// </summary>

View file

@ -21,9 +21,9 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
}
/// <inheritdoc/>
protected override IEnumerable<HostMemoryRange> GetPhysicalRegions(ulong va, ulong size)
protected override void GetPhysicalRegions(ulong va, ulong size, KPageList pageList)
{
return _cpuMemory.GetPhysicalRegions(va, size);
throw new NotImplementedException();
}
/// <inheritdoc/>
@ -97,12 +97,6 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
return KernelResult.Success;
}
/// <inheritdoc/>
protected override KernelResult MapPages(ulong address, IEnumerable<HostMemoryRange> ranges, KMemoryPermission permission)
{
throw new NotSupportedException();
}
/// <inheritdoc/>
protected override KernelResult Unmap(ulong address, ulong pagesCount)
{

View file

@ -60,11 +60,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
return result;
}
public KernelResult UnmapFromProcess(
KPageTableBase memoryManager,
ulong address,
ulong size,
KProcess process)
public KernelResult UnmapFromProcess(KPageTableBase memoryManager, ulong address, ulong size, KProcess process)
{
ulong pagesCountRounded = BitUtils.DivRoundUp(size, KPageTableBase.PageSize);
@ -76,9 +72,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
return KernelResult.InvalidSize;
}
var ranges = _storage.GetRanges();
return memoryManager.UnmapPages(address, pagesCount, ranges, MemoryState.SharedMemory);
return memoryManager.UnmapPages(address, pagesCount, pageList, MemoryState.SharedMemory);
}
}
}

View file

@ -1,9 +1,7 @@
using Ryujinx.Common;
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.HLE.HOS.Kernel.Process;
using Ryujinx.Memory.Range;
using System;
using System.Collections.Generic;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
@ -14,7 +12,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
// TODO: Remove when we no longer need to read it from the owner directly.
public KProcess Creator => _creator;
private readonly List<HostMemoryRange> _ranges;
private readonly KPageList _pageList;
private readonly SharedMemoryStorage _storage;
@ -28,7 +26,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
public KTransferMemory(KernelContext context) : base(context)
{
_ranges = new List<HostMemoryRange>();
_pageList = new KPageList();
}
public KTransferMemory(KernelContext context, SharedMemoryStorage storage) : base(context)
@ -46,7 +44,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
_creator = creator;
KernelResult result = creator.MemoryManager.BorrowTransferMemory(_ranges, address, size, permission);
KernelResult result = creator.MemoryManager.BorrowTransferMemory(_pageList, address, size, permission);
if (result != KernelResult.Success)
{
@ -127,11 +125,9 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
return KernelResult.InvalidSize;
}
var ranges = _storage.GetRanges();
MemoryState state = Permission == KMemoryPermission.None ? MemoryState.TransferMemoryIsolated : MemoryState.TransferMemory;
KernelResult result = memoryManager.UnmapPages(address, pagesCount, ranges, state);
KernelResult result = memoryManager.UnmapPages(address, pagesCount, pageList, state);
if (result == KernelResult.Success)
{
@ -145,7 +141,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
if (_hasBeenInitialized)
{
if (!_isMapped && _creator.MemoryManager.UnborrowTransferMemory(Address, Size, _ranges) != KernelResult.Success)
if (!_isMapped && _creator.MemoryManager.UnborrowTransferMemory(Address, Size, _pageList) != KernelResult.Success)
{
throw new InvalidOperationException("Unexpected failure restoring transfer memory attributes.");
}

View file

@ -73,28 +73,6 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
}
}
public IEnumerable<HostMemoryRange> GetRanges()
{
if (_borrowerMemory == null)
{
var ranges = new List<HostMemoryRange>();
foreach (KPageNode pageNode in _pageList)
{
ulong address = pageNode.Address - DramMemoryMap.DramBase;
ulong size = pageNode.PagesCount * KPageTableBase.PageSize;
ranges.Add(new HostMemoryRange(_context.Memory.GetPointer(address, size), size));
}
return ranges;
}
else
{
return _borrowerMemory.GetPhysicalRegions(_borrowerVa, _size);
}
}
public KPageList GetPageList()
{
return _pageList;

View file

@ -6,7 +6,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Process
{
public IProcessContext Create(KernelContext context, long pid, ulong addressSpaceSize, InvalidAccessHandler invalidAccessHandler, bool for64Bit)
{
return new ProcessContext(new AddressSpaceManager(addressSpaceSize));
return new ProcessContext(new AddressSpaceManager(context.Memory, addressSpaceSize));
}
}
}

View file

@ -13,9 +13,9 @@ namespace Ryujinx.Memory
/// </summary>
public sealed class AddressSpaceManager : IVirtualMemoryManager, IWritableBlock
{
public const int PageBits = PageTable<nuint>.PageBits;
public const int PageSize = PageTable<nuint>.PageSize;
public const int PageMask = PageTable<nuint>.PageMask;
public const int PageBits = PageTable<ulong>.PageBits;
public const int PageSize = PageTable<ulong>.PageSize;
public const int PageMask = PageTable<ulong>.PageMask;
/// <summary>
/// Address space width in bits.
@ -24,14 +24,15 @@ namespace Ryujinx.Memory
private readonly ulong _addressSpaceSize;
private readonly PageTable<nuint> _pageTable;
private readonly MemoryBlock _backingMemory;
private readonly PageTable<ulong> _pageTable;
/// <summary>
/// Creates a new instance of the memory manager.
/// </summary>
/// <param name="backingMemory">Physical backing memory where virtual memory will be mapped to</param>
/// <param name="addressSpaceSize">Size of the address space</param>
public AddressSpaceManager(ulong addressSpaceSize)
public AddressSpaceManager(MemoryBlock backingMemory, ulong addressSpaceSize)
{
ulong asSize = PageSize;
int asBits = PageBits;
@ -44,37 +45,26 @@ namespace Ryujinx.Memory
AddressSpaceBits = asBits;
_addressSpaceSize = asSize;
_pageTable = new PageTable<nuint>();
_backingMemory = backingMemory;
_pageTable = new PageTable<ulong>();
}
/// <summary>
/// Maps a virtual memory range into a physical memory range.
/// </summary>
/// <remarks>
/// Addresses and size must be page aligned.
/// </remarks>
/// <param name="va">Virtual memory address</param>
/// <param name="hostAddress">Physical memory address</param>
/// <param name="size">Size to be mapped</param>
public void Map(ulong va, nuint hostAddress, ulong size)
/// <inheritdoc/>
public void Map(ulong va, ulong pa, ulong size)
{
AssertValidAddressAndSize(va, size);
while (size != 0)
{
_pageTable.Map(va, hostAddress);
_pageTable.Map(va, pa);
va += PageSize;
hostAddress += PageSize;
pa += PageSize;
size -= PageSize;
}
}
/// <summary>
/// Unmaps a previously mapped range of virtual memory.
/// </summary>
/// <param name="va">Virtual address of the range to be unmapped</param>
/// <param name="size">Size of the range to be unmapped</param>
/// <inheritdoc/>
public void Unmap(ulong va, ulong size)
{
AssertValidAddressAndSize(va, size);
@ -88,47 +78,25 @@ namespace Ryujinx.Memory
}
}
/// <summary>
/// Reads data from mapped memory.
/// </summary>
/// <typeparam name="T">Type of the data being read</typeparam>
/// <param name="va">Virtual address of the data in memory</param>
/// <returns>The data</returns>
/// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
/// <inheritdoc/>
public T Read<T>(ulong va) where T : unmanaged
{
return MemoryMarshal.Cast<byte, T>(GetSpan(va, Unsafe.SizeOf<T>()))[0];
}
/// <summary>
/// Reads data from mapped memory.
/// </summary>
/// <param name="va">Virtual address of the data in memory</param>
/// <param name="data">Span to store the data being read into</param>
/// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
/// <inheritdoc/>
public void Read(ulong va, Span<byte> data)
{
ReadImpl(va, data);
}
/// <summary>
/// Writes data to mapped memory.
/// </summary>
/// <typeparam name="T">Type of the data being written</typeparam>
/// <param name="va">Virtual address to write the data into</param>
/// <param name="value">Data to be written</param>
/// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
/// <inheritdoc/>
public void Write<T>(ulong va, T value) where T : unmanaged
{
Write(va, MemoryMarshal.Cast<T, byte>(MemoryMarshal.CreateSpan(ref value, 1)));
}
/// <summary>
/// Writes data to mapped memory.
/// </summary>
/// <param name="va">Virtual address to write the data into</param>
/// <param name="data">Data to be written</param>
/// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
/// <inheritdoc/>
public void Write(ulong va, ReadOnlySpan<byte> data)
{
if (data.Length == 0)
@ -140,7 +108,7 @@ namespace Ryujinx.Memory
if (IsContiguousAndMapped(va, data.Length))
{
data.CopyTo(GetHostSpanContiguous(va, data.Length));
data.CopyTo(_backingMemory.GetSpan(GetPhysicalAddressInternal(va), data.Length));
}
else
{
@ -148,34 +116,27 @@ namespace Ryujinx.Memory
if ((va & PageMask) != 0)
{
ulong pa = GetPhysicalAddressInternal(va);
size = Math.Min(data.Length, PageSize - (int)(va & PageMask));
data.Slice(0, size).CopyTo(GetHostSpanContiguous(va, size));
data.Slice(0, size).CopyTo(_backingMemory.GetSpan(pa, size));
offset += size;
}
for (; offset < data.Length; offset += size)
{
ulong pa = GetPhysicalAddressInternal(va + (ulong)offset);
size = Math.Min(data.Length - offset, PageSize);
data.Slice(offset, size).CopyTo(GetHostSpanContiguous(va + (ulong)offset, size));
data.Slice(offset, size).CopyTo(_backingMemory.GetSpan(pa, size));
}
}
}
/// <summary>
/// Gets a read-only span of data from mapped memory.
/// </summary>
/// <remarks>
/// This may perform a allocation if the data is not contiguous in memory.
/// For this reason, the span is read-only, you can't modify the data.
/// </remarks>
/// <param name="va">Virtual address of the data</param>
/// <param name="size">Size of the data</param>
/// <param name="tracked">True if read tracking is triggered on the span</param>
/// <returns>A read-only span of the data</returns>
/// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
/// <inheritdoc/>
public ReadOnlySpan<byte> GetSpan(ulong va, int size, bool tracked = false)
{
if (size == 0)
@ -185,7 +146,7 @@ namespace Ryujinx.Memory
if (IsContiguousAndMapped(va, size))
{
return GetHostSpanContiguous(va, size);
return _backingMemory.GetSpan(GetPhysicalAddressInternal(va), size);
}
else
{
@ -197,19 +158,7 @@ namespace Ryujinx.Memory
}
}
/// <summary>
/// Gets a region of memory that can be written to.
/// </summary>
/// <remarks>
/// If the requested region is not contiguous in physical memory,
/// this will perform an allocation, and flush the data (writing it
/// back to the backing memory) on disposal.
/// </remarks>
/// <param name="va">Virtual address of the data</param>
/// <param name="size">Size of the data</param>
/// <param name="tracked">True if write tracking is triggered on the span</param>
/// <returns>A writable region of memory containing the data</returns>
/// <exception cref="InvalidMemoryRegionException">Throw for unhandled invalid or unmapped memory accesses</exception>
/// <inheritdoc/>
public unsafe WritableRegion GetWritableRegion(ulong va, int size, bool tracked = false)
{
if (size == 0)
@ -219,7 +168,7 @@ namespace Ryujinx.Memory
if (IsContiguousAndMapped(va, size))
{
return new WritableRegion(null, va, new NativeMemoryManager<byte>((byte*)GetHostAddress(va), size).Memory);
return new WritableRegion(null, va, _backingMemory.GetMemory(GetPhysicalAddressInternal(va), size));
}
else
{
@ -231,33 +180,18 @@ namespace Ryujinx.Memory
}
}
/// <summary>
/// Gets a reference for the given type at the specified virtual memory address.
/// </summary>
/// <remarks>
/// The data must be located at a contiguous memory region.
/// </remarks>
/// <typeparam name="T">Type of the data to get the reference</typeparam>
/// <param name="va">Virtual address of the data</param>
/// <returns>A reference to the data in memory</returns>
/// <exception cref="MemoryNotContiguousException">Throw if the specified memory region is not contiguous in physical memory</exception>
public unsafe ref T GetRef<T>(ulong va) where T : unmanaged
/// <inheritdoc/>
public ref T GetRef<T>(ulong va) where T : unmanaged
{
if (!IsContiguous(va, Unsafe.SizeOf<T>()))
{
ThrowMemoryNotContiguous();
}
return ref *(T*)GetHostAddress(va);
return ref _backingMemory.GetRef<T>(GetPhysicalAddressInternal(va));
}
/// <summary>
/// Computes the number of pages in a virtual address range.
/// </summary>
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range</param>
/// <param name="startVa">The virtual address of the beginning of the first page</param>
/// <remarks>This function does not differentiate between allocated and unallocated pages.</remarks>
/// <inheritdoc/>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private int GetPagesCount(ulong va, uint size, out ulong startVa)
{
@ -290,7 +224,7 @@ namespace Ryujinx.Memory
return false;
}
if (GetHostAddress(va) + PageSize != GetHostAddress(va + PageSize))
if (GetPhysicalAddressInternal(va) + PageSize != GetPhysicalAddressInternal(va + PageSize))
{
return false;
}
@ -301,18 +235,12 @@ namespace Ryujinx.Memory
return true;
}
/// <summary>
/// Gets the physical regions that make up the given virtual address region.
/// If any part of the virtual region is unmapped, null is returned.
/// </summary>
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range</param>
/// <returns>Array of physical regions</returns>
public IEnumerable<HostMemoryRange> GetPhysicalRegions(ulong va, ulong size)
/// <inheritdoc/>
public IEnumerable<MemoryRange> GetPhysicalRegions(ulong va, ulong size)
{
if (size == 0)
{
return Enumerable.Empty<HostMemoryRange>();
return Enumerable.Empty<MemoryRange>();
}
if (!ValidateAddress(va) || !ValidateAddressAndSize(va, size))
@ -322,9 +250,9 @@ namespace Ryujinx.Memory
int pages = GetPagesCount(va, (uint)size, out va);
var regions = new List<HostMemoryRange>();
var regions = new List<MemoryRange>();
nuint regionStart = GetHostAddress(va);
ulong regionStart = GetPhysicalAddressInternal(va);
ulong regionSize = PageSize;
for (int page = 0; page < pages - 1; page++)
@ -334,12 +262,12 @@ namespace Ryujinx.Memory
return null;
}
nuint newHostAddress = GetHostAddress(va + PageSize);
ulong newPa = GetPhysicalAddressInternal(va + PageSize);
if (GetHostAddress(va) + PageSize != newHostAddress)
if (GetPhysicalAddressInternal(va) + PageSize != newPa)
{
regions.Add(new HostMemoryRange(regionStart, regionSize));
regionStart = newHostAddress;
regions.Add(new MemoryRange(regionStart, regionSize));
regionStart = newPa;
regionSize = 0;
}
@ -347,7 +275,7 @@ namespace Ryujinx.Memory
regionSize += PageSize;
}
regions.Add(new HostMemoryRange(regionStart, regionSize));
regions.Add(new MemoryRange(regionStart, regionSize));
return regions;
}
@ -365,26 +293,26 @@ namespace Ryujinx.Memory
if ((va & PageMask) != 0)
{
ulong pa = GetPhysicalAddressInternal(va);
size = Math.Min(data.Length, PageSize - (int)(va & PageMask));
GetHostSpanContiguous(va, size).CopyTo(data.Slice(0, size));
_backingMemory.GetSpan(pa, size).CopyTo(data.Slice(0, size));
offset += size;
}
for (; offset < data.Length; offset += size)
{
ulong pa = GetPhysicalAddressInternal(va + (ulong)offset);
size = Math.Min(data.Length - offset, PageSize);
GetHostSpanContiguous(va + (ulong)offset, size).CopyTo(data.Slice(offset, size));
_backingMemory.GetSpan(pa, size).CopyTo(data.Slice(offset, size));
}
}
/// <summary>
/// Checks if the page at a given virtual address is mapped.
/// </summary>
/// <param name="va">Virtual address to check</param>
/// <returns>True if the address is mapped, false otherwise</returns>
/// <inheritdoc/>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public bool IsMapped(ulong va)
{
@ -396,12 +324,7 @@ namespace Ryujinx.Memory
return _pageTable.Read(va) != 0;
}
/// <summary>
/// Checks if a memory range is mapped.
/// </summary>
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range in bytes</param>
/// <returns>True if the entire range is mapped, false otherwise</returns>
/// <inheritdoc/>
public bool IsRangeMapped(ulong va, ulong size)
{
if (size == 0UL)
@ -460,14 +383,20 @@ namespace Ryujinx.Memory
}
}
private unsafe Span<byte> GetHostSpanContiguous(ulong va, int size)
private ulong GetPhysicalAddress(ulong va)
{
return new Span<byte>((void*)GetHostAddress(va), size);
// We return -1L if the virtual address is invalid or unmapped.
if (!ValidateAddress(va) || !IsMapped(va))
{
return ulong.MaxValue;
}
return GetPhysicalAddressInternal(va);
}
private nuint GetHostAddress(ulong va)
private ulong GetPhysicalAddressInternal(ulong va)
{
return _pageTable.Read(va) + (nuint)(va & PageMask);
return _pageTable.Read(va) + (va & PageMask);
}
/// <summary>

View file

@ -13,9 +13,9 @@ namespace Ryujinx.Memory
/// Addresses and size must be page aligned.
/// </remarks>
/// <param name="va">Virtual memory address</param>
/// <param name="hostAddress">Pointer where the region should be mapped to</param>
/// <param name="pa">Physical memory address where the region should be mapped to</param>
/// <param name="size">Size to be mapped</param>
void Map(ulong va, nuint hostAddress, ulong size);
void Map(ulong va, ulong pa, ulong size);
/// <summary>
/// Unmaps a previously mapped range of virtual memory.
@ -111,7 +111,7 @@ namespace Ryujinx.Memory
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range</param>
/// <returns>Array of physical regions</returns>
IEnumerable<HostMemoryRange> GetPhysicalRegions(ulong va, ulong size);
IEnumerable<MemoryRange> GetPhysicalRegions(ulong va, ulong size);
/// <summary>
/// Checks if the page at a given CPU virtual address is mapped.