using Ryujinx.Common; using Ryujinx.Common.Logging; using Ryujinx.Cpu.Signal; using Ryujinx.Memory; using System; using System.Collections.Generic; using System.Linq; using System.Runtime.InteropServices; using System.Threading; using static Ryujinx.Cpu.MemoryEhMeilleure; namespace ARMeilleure.Common { /// /// Represents a table of guest address to a value. /// /// Type of the value public unsafe class AddressTable : IAddressTable where TEntry : unmanaged { /// /// If true, the sparse 2-level table should be used to improve performance. /// If false, the platform doesn't properly support it, or will be negatively impacted. /// public static bool UseSparseTable => true; private readonly struct AddressTablePage { public readonly bool IsSparse; public readonly IntPtr Address; public AddressTablePage(bool isSparse, IntPtr address) { IsSparse = isSparse; Address = address; } } /// /// A sparsely mapped block of memory with a signal handler to map pages as they're accessed. /// private readonly struct TableSparseBlock : IDisposable { public readonly SparseMemoryBlock Block; public readonly TrackingEventDelegate TrackingEvent; public TableSparseBlock(ulong size, Action ensureMapped, PageInitDelegate pageInit) { var block = new SparseMemoryBlock(size, pageInit, null); TrackingEvent = (ulong address, ulong size, bool write) => { Logger.Error?.PrintMsg(LogClass.Cpu, $"Triggered from exception"); ulong pointer = (ulong)block.Block.Pointer + address; ensureMapped((IntPtr)pointer); return pointer; }; bool added = NativeSignalHandler.AddTrackedRegion( (nuint)block.Block.Pointer, (nuint)(block.Block.Pointer + (IntPtr)block.Block.Size), Marshal.GetFunctionPointerForDelegate(TrackingEvent)); if (!added) { throw new InvalidOperationException("Number of allowed tracked regions exceeded."); } Block = block; } public void Dispose() { NativeSignalHandler.RemoveTrackedRegion((nuint)Block.Block.Pointer); Block.Dispose(); } } private bool _disposed; private TEntry** _table; private readonly List _pages; private TEntry _fill; private readonly bool _sparse; private readonly MemoryBlock _sparseFill; private readonly SparseMemoryBlock _fillBottomLevel; private readonly TEntry* _fillBottomLevelPtr; private readonly List _sparseReserved; private readonly ReaderWriterLockSlim _sparseLock; private ulong _sparseBlockSize; private ulong _sparseReservedOffset; /// public ulong Mask { get; } /// public AddressTableLevel[] Levels { get; } /// public TEntry Fill { get { return _fill; } set { UpdateFill(value); } } /// public IntPtr Base { get { ObjectDisposedException.ThrowIf(_disposed, this); lock (_pages) { return (IntPtr)GetRootPage(); } } } /// /// Constructs a new instance of the class with the specified list of /// . /// /// Levels for the address table /// True if the bottom page should be sparsely mapped /// is null /// Length of is less than 2 public AddressTable(AddressTableLevel[] levels, bool sparse) { ArgumentNullException.ThrowIfNull(levels); if (levels.Length < 2) { throw new ArgumentException("Table must be at least 2 levels deep.", nameof(levels)); } _pages = new List(capacity: 16); Levels = levels; Mask = 0; foreach (var level in Levels) { Mask |= level.Mask; } _sparse = sparse; if (sparse) { // If the address table is sparse, allocate a fill block _sparseFill = new MemoryBlock(65536, MemoryAllocationFlags.Mirrorable); ulong bottomLevelSize = (1ul << levels.Last().Length) * (ulong)sizeof(TEntry); _fillBottomLevel = new SparseMemoryBlock(bottomLevelSize, null, _sparseFill); _fillBottomLevelPtr = (TEntry*)_fillBottomLevel.Block.Pointer; _sparseReserved = new List(); _sparseLock = new ReaderWriterLockSlim(); _sparseBlockSize = bottomLevelSize; } } /// /// Create an instance for an ARM function table. /// Selects the best table structure for A32/A64, taking into account whether sparse mapping is supported. /// /// True if the guest is A64, false otherwise /// An for ARM function lookup public static AddressTable CreateForArm(bool for64Bits) { bool sparse = UseSparseTable; return new AddressTable(AddressTablePresets.GetArmPreset(for64Bits, sparse), sparse); } /// /// Update the fill value for the bottom level of the table. /// /// New fill value private void UpdateFill(TEntry fillValue) { if (_sparseFill != null) { Span span = _sparseFill.GetSpan(0, (int)_sparseFill.Size); MemoryMarshal.Cast(span).Fill(fillValue); } _fill = fillValue; } /// /// Signal that the given code range exists. /// /// /// public void SignalCodeRange(ulong address, ulong size) { AddressTableLevel bottom = Levels.Last(); ulong bottomLevelEntries = 1ul << bottom.Length; ulong entryIndex = address >> bottom.Index; ulong entries = size >> bottom.Index; entries += entryIndex - BitUtils.AlignDown(entryIndex, bottomLevelEntries); _sparseBlockSize = Math.Max(_sparseBlockSize, BitUtils.AlignUp(entries, bottomLevelEntries) * (ulong)sizeof(TEntry)); } /// public bool IsValid(ulong address) { return (address & ~Mask) == 0; } /// public ref TEntry GetValue(ulong address) { ObjectDisposedException.ThrowIf(_disposed, this); if (!IsValid(address)) { throw new ArgumentException($"Address 0x{address:X} is not mapped onto the table.", nameof(address)); } lock (_pages) { TEntry* page = GetPage(address); int index = Levels[^1].GetValue(address); EnsureMapped((IntPtr)(page + index)); return ref page[index]; } } /// /// Gets the leaf page for the specified guest . /// /// Guest address /// Leaf page for the specified guest private TEntry* GetPage(ulong address) { TEntry** page = GetRootPage(); for (int i = 0; i < Levels.Length - 1; i++) { ref AddressTableLevel level = ref Levels[i]; ref TEntry* nextPage = ref page[level.GetValue(address)]; if (nextPage == null || nextPage == _fillBottomLevelPtr) { ref AddressTableLevel nextLevel = ref Levels[i + 1]; if (i == Levels.Length - 2) { nextPage = (TEntry*)Allocate(1 << nextLevel.Length, Fill, leaf: true); } else { nextPage = (TEntry*)Allocate(1 << nextLevel.Length, GetFillValue(i), leaf: false); } } page = (TEntry**)nextPage; } return (TEntry*)page; } private void EnsureMapped(IntPtr ptr) { if (_sparse) { // Check sparse allocations to see if the pointer is in any of them. // Ensure the page is committed if there's a match. _sparseLock.EnterReadLock(); try { foreach (TableSparseBlock reserved in _sparseReserved) { SparseMemoryBlock sparse = reserved.Block; if (ptr >= sparse.Block.Pointer && ptr < sparse.Block.Pointer + (IntPtr)sparse.Block.Size) { sparse.EnsureMapped((ulong)(ptr - sparse.Block.Pointer)); break; } } } finally { _sparseLock.ExitReadLock(); } } } /// /// Get the fill value for a non-leaf level of the table. /// /// Level to get the fill value for /// The fill value private IntPtr GetFillValue(int level) { if (_fillBottomLevel != null && level == Levels.Length - 2) { return (IntPtr)_fillBottomLevelPtr; } else { return IntPtr.Zero; } } /// /// Lazily initialize and get the root page of the . /// /// Root page of the private TEntry** GetRootPage() { if (_table == null) { _table = (TEntry**)Allocate(1 << Levels[0].Length, GetFillValue(0), leaf: false); } return _table; } private int initedSize = 0; private int reservedSize = 0; /// /// Initialize a leaf page with the fill value. /// /// Page to initialize private void InitLeafPage(Span page) { MemoryMarshal.Cast(page).Fill(_fill); initedSize += page.Length; Ryujinx.Common.Logging.Logger.Info?.PrintMsg(LogClass.Cpu, $"Using memory {initedSize}/{reservedSize} bytes"); } private TableSparseBlock ReserveNewSparseBlock() { var block = new TableSparseBlock(_sparseBlockSize, EnsureMapped, InitLeafPage); _sparseReserved.Add(block); _sparseReservedOffset = 0; return block; } /// /// Allocates a block of memory of the specified type and length. /// /// Type of elements /// Number of elements /// Fill value /// if leaf; otherwise /// Allocated block private IntPtr Allocate(int length, T fill, bool leaf) where T : unmanaged { var size = sizeof(T) * length; reservedSize += size; AddressTablePage page; if (_sparse && leaf) { _sparseLock.EnterWriteLock(); SparseMemoryBlock block; if (_sparseReserved.Count == 0) { block = ReserveNewSparseBlock().Block; } else { block = _sparseReserved.Last().Block; if (_sparseReservedOffset == block.Block.Size) { block = ReserveNewSparseBlock().Block; } } page = new AddressTablePage(true, block.Block.Pointer + (IntPtr)_sparseReservedOffset); _sparseReservedOffset += (ulong)size; _sparseLock.ExitWriteLock(); } else { var address = (IntPtr)NativeAllocator.Instance.Allocate((uint)size); page = new AddressTablePage(false, address); var span = new Span((void*)page.Address, length); span.Fill(fill); } _pages.Add(page); //TranslatorEventSource.Log.AddressTableAllocated(size, leaf); return page.Address; } /// /// Releases all resources used by the instance. /// public void Dispose() { Dispose(true); GC.SuppressFinalize(this); } /// /// Releases all unmanaged and optionally managed resources used by the /// instance. /// /// to dispose managed resources also; otherwise just unmanaged resouces protected virtual void Dispose(bool disposing) { if (!_disposed) { foreach (var page in _pages) { if (!page.IsSparse) { Marshal.FreeHGlobal(page.Address); } } if (_sparse) { foreach (TableSparseBlock block in _sparseReserved) { block.Dispose(); } _sparseReserved.Clear(); _fillBottomLevel.Dispose(); _sparseFill.Dispose(); _sparseLock.Dispose(); } _disposed = true; } } /// /// Frees resources used by the instance. /// ~AddressTable() { Dispose(false); } } }