Support memory aliasing (#2954)

* Back to the origins: Make memory manager take guest PA rather than host address once again

* Direct mapping with alias support on Windows

* Fixes and remove more of the emulated shared memory

* Linux support

* Make shared and transfer memory not depend on SharedMemoryStorage

* More efficient view mapping on Windows (no more restricted to 4KB pages at a time)

* Handle potential access violations caused by partial unmap

* Implement host mapping using shared memory on Linux

* Add new GetPhysicalAddressChecked method, used to ensure the virtual address is mapped before address translation

Also align GetRef behaviour with software memory manager

* We don't need a mirrorable memory block for software memory manager mode

* Disable memory aliasing tests while we don't have shared memory support on Mac

* Shared memory & SIGBUS handler for macOS

* Fix typo + nits + re-enable memory tests

* Set MAP_JIT_DARWIN on x86 Mac too

* Add back the address space mirror

* Only set MAP_JIT_DARWIN if we are mapping as executable

* Disable aliasing tests again (still fails on Mac)

* Fix UnmapView4KB (by not casting size to int)

* Use ref counting on memory blocks to delay closing the shared memory handle until all blocks using it are disposed

* Address PR feedback

* Make RO hold a reference to the guest process memory manager to avoid early disposal

Co-authored-by: nastys <nastys@users.noreply.github.com>
This commit is contained in:
gdkchan 2022-05-02 20:30:02 -03:00 committed by GitHub
parent 4a892fbdc9
commit 95017b8c66
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
41 changed files with 2373 additions and 2155 deletions

View file

@ -1,10 +1,7 @@
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.Memory;
using Ryujinx.Memory.Range;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
@ -12,17 +9,19 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
private readonly IVirtualMemoryManager _cpuMemory;
public override bool SupportsMemoryAliasing => true;
public KPageTable(KernelContext context, IVirtualMemoryManager cpuMemory) : base(context)
{
_cpuMemory = cpuMemory;
}
/// <inheritdoc/>
protected override IEnumerable<HostMemoryRange> GetPhysicalRegions(ulong va, ulong size)
protected override void GetPhysicalRegions(ulong va, ulong size, KPageList pageList)
{
return _cpuMemory.GetPhysicalRegions(va, size);
var ranges = _cpuMemory.GetPhysicalRegions(va, size);
foreach (var range in ranges)
{
pageList.AddRange(range.Address + DramMemoryMap.DramBase, range.Size / PageSize);
}
}
/// <inheritdoc/>
@ -34,7 +33,8 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
/// <inheritdoc/>
protected override KernelResult MapMemory(ulong src, ulong dst, ulong pagesCount, KMemoryPermission oldSrcPermission, KMemoryPermission newDstPermission)
{
var srcRanges = GetPhysicalRegions(src, pagesCount * PageSize);
KPageList pageList = new KPageList();
GetPhysicalRegions(src, pagesCount * PageSize, pageList);
KernelResult result = Reprotect(src, pagesCount, KMemoryPermission.None);
@ -43,7 +43,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
return result;
}
result = MapPages(dst, srcRanges, newDstPermission);
result = MapPages(dst, pageList, newDstPermission, false, 0);
if (result != KernelResult.Success)
{
@ -59,10 +59,13 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
ulong size = pagesCount * PageSize;
var srcRanges = GetPhysicalRegions(src, size);
var dstRanges = GetPhysicalRegions(dst, size);
KPageList srcPageList = new KPageList();
KPageList dstPageList = new KPageList();
if (!dstRanges.SequenceEqual(srcRanges))
GetPhysicalRegions(src, size, srcPageList);
GetPhysicalRegions(dst, size, dstPageList);
if (!dstPageList.IsEqual(srcPageList))
{
return KernelResult.InvalidMemRange;
}
@ -78,7 +81,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
if (result != KernelResult.Success)
{
KernelResult mapResult = MapPages(dst, dstRanges, oldDstPermission);
KernelResult mapResult = MapPages(dst, dstPageList, oldDstPermission, false, 0);
Debug.Assert(mapResult == KernelResult.Success);
}
@ -92,7 +95,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
Context.Memory.Commit(srcPa - DramMemoryMap.DramBase, size);
_cpuMemory.Map(dstVa, Context.Memory.GetPointer(srcPa - DramMemoryMap.DramBase, size), size);
_cpuMemory.Map(dstVa, srcPa - DramMemoryMap.DramBase, size);
if (DramMemoryMap.IsHeapPhysicalAddress(srcPa))
{
@ -121,7 +124,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
Context.Memory.Commit(addr, size);
_cpuMemory.Map(currentVa, Context.Memory.GetPointer(addr, size), size);
_cpuMemory.Map(currentVa, addr, size);
if (shouldFillPages)
{
@ -136,33 +139,6 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
return KernelResult.Success;
}
/// <inheritdoc/>
protected override KernelResult MapPages(ulong address, IEnumerable<HostMemoryRange> ranges, KMemoryPermission permission)
{
ulong currentVa = address;
foreach (var range in ranges)
{
ulong size = range.Size;
ulong pa = GetDramAddressFromHostAddress(range.Address);
if (pa != ulong.MaxValue)
{
pa += DramMemoryMap.DramBase;
if (DramMemoryMap.IsHeapPhysicalAddress(pa))
{
Context.MemoryManager.IncrementPagesReferenceCount(pa, size / PageSize);
}
}
_cpuMemory.Map(currentVa, range.Address, size);
currentVa += size;
}
return KernelResult.Success;
}
/// <inheritdoc/>
protected override KernelResult Unmap(ulong address, ulong pagesCount)
{
@ -172,13 +148,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
foreach (var region in regions)
{
ulong pa = GetDramAddressFromHostAddress(region.Address);
if (pa == ulong.MaxValue)
{
continue;
}
pa += DramMemoryMap.DramBase;
ulong pa = region.Address + DramMemoryMap.DramBase;
if (DramMemoryMap.IsHeapPhysicalAddress(pa))
{
pagesToClose.AddRange(pa, region.Size / PageSize);
@ -217,15 +187,5 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
_cpuMemory.Write(va, data);
}
private ulong GetDramAddressFromHostAddress(nuint hostAddress)
{
if (hostAddress < (nuint)(ulong)Context.Memory.Pointer || hostAddress >= (nuint)((ulong)Context.Memory.Pointer + Context.Memory.Size))
{
return ulong.MaxValue;
}
return hostAddress - (ulong)Context.Memory.Pointer;
}
}
}

View file

@ -1,11 +1,9 @@
using Ryujinx.Common;
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.HLE.HOS.Kernel.Process;
using Ryujinx.Memory.Range;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
@ -73,8 +71,6 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
private MersenneTwister _randomNumberGenerator;
public abstract bool SupportsMemoryAliasing { get; }
private MemoryFillValue _heapFillValue;
private MemoryFillValue _ipcFillValue;
@ -305,7 +301,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
TlsIoRegionStart = tlsIoRegion.Start;
TlsIoRegionEnd = tlsIoRegion.End;
// TODO: Check kernel configuration via secure monitor call when implemented to set memory fill values.
// TODO: Check kernel configuration via secure monitor call when implemented to set memory fill values.
_currentHeapAddr = HeapRegionStart;
_heapCapacity = 0;
@ -380,8 +376,9 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
}
}
public KernelResult UnmapPages(ulong address, ulong pagesCount, IEnumerable<HostMemoryRange> ranges, MemoryState stateExpected)
public KernelResult UnmapPages(ulong address, KPageList pageList, MemoryState stateExpected)
{
ulong pagesCount = pageList.GetPagesCount();
ulong size = pagesCount * PageSize;
ulong endAddr = address + size;
@ -405,9 +402,11 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
lock (_blockManager)
{
var currentRanges = GetPhysicalRegions(address, size);
KPageList currentPageList = new KPageList();
if (!currentRanges.SequenceEqual(ranges))
GetPhysicalRegions(address, size, currentPageList);
if (!currentPageList.IsEqual(pageList))
{
return KernelResult.InvalidMemRange;
}
@ -1690,11 +1689,6 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
bool send,
out ulong dst)
{
if (!SupportsMemoryAliasing)
{
throw new NotSupportedException("Memory aliasing not supported, can't map IPC buffers.");
}
dst = 0;
if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
@ -1828,7 +1822,10 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
ulong alignedSize = endAddrTruncated - addressRounded;
KernelResult result = MapPages(currentVa, srcPageTable.GetPhysicalRegions(addressRounded, alignedSize), permission);
KPageList pageList = new KPageList();
srcPageTable.GetPhysicalRegions(addressRounded, alignedSize, pageList);
KernelResult result = MapPages(currentVa, pageList, permission);
if (result != KernelResult.Success)
{
@ -2041,7 +2038,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
MemoryAttribute.Borrowed);
}
public KernelResult BorrowTransferMemory(List<HostMemoryRange> ranges, ulong address, ulong size, KMemoryPermission permission)
public KernelResult BorrowTransferMemory(KPageList pageList, ulong address, ulong size, KMemoryPermission permission)
{
return SetAttributesAndChangePermission(
address,
@ -2054,7 +2051,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
MemoryAttribute.None,
permission,
MemoryAttribute.Borrowed,
ranges);
pageList);
}
private KernelResult SetAttributesAndChangePermission(
@ -2068,7 +2065,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
MemoryAttribute attributeExpected,
KMemoryPermission newPermission,
MemoryAttribute attributeSetMask,
List<HostMemoryRange> ranges = null)
KPageList pageList = null)
{
if (address + size <= address || !InsideAddrSpace(address, size))
{
@ -2093,7 +2090,10 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
ulong pagesCount = size / PageSize;
ranges?.AddRange(GetPhysicalRegions(address, size));
if (pageList != null)
{
GetPhysicalRegions(address, pagesCount * PageSize, pageList);
}
if (!_slabManager.CanAllocate(MaxBlocksNeededForInsertion))
{
@ -2143,7 +2143,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
MemoryAttribute.Borrowed);
}
public KernelResult UnborrowTransferMemory(ulong address, ulong size, List<HostMemoryRange> ranges)
public KernelResult UnborrowTransferMemory(ulong address, ulong size, KPageList pageList)
{
return ClearAttributesAndChangePermission(
address,
@ -2156,7 +2156,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
MemoryAttribute.Borrowed,
KMemoryPermission.ReadAndWrite,
MemoryAttribute.Borrowed,
ranges);
pageList);
}
private KernelResult ClearAttributesAndChangePermission(
@ -2170,7 +2170,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
MemoryAttribute attributeExpected,
KMemoryPermission newPermission,
MemoryAttribute attributeClearMask,
List<HostMemoryRange> ranges = null)
KPageList pageList = null)
{
if (address + size <= address || !InsideAddrSpace(address, size))
{
@ -2195,11 +2195,13 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
ulong pagesCount = size / PageSize;
if (ranges != null)
if (pageList != null)
{
var currentRanges = GetPhysicalRegions(address, size);
KPageList currentPageList = new KPageList();
if (!currentRanges.SequenceEqual(ranges))
GetPhysicalRegions(address, pagesCount * PageSize, currentPageList);
if (!currentPageList.IsEqual(pageList))
{
return KernelResult.InvalidMemRange;
}
@ -2741,8 +2743,8 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
/// </summary>
/// <param name="va">Virtual address of the range</param>
/// <param name="size">Size of the range</param>
/// <returns>Array of physical regions</returns>
protected abstract IEnumerable<HostMemoryRange> GetPhysicalRegions(ulong va, ulong size);
/// <param name="pageList">Page list where the ranges will be added</param>
protected abstract void GetPhysicalRegions(ulong va, ulong size, KPageList pageList);
/// <summary>
/// Gets a read-only span of data from CPU mapped memory.
@ -2803,16 +2805,6 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
/// <returns>Result of the mapping operation</returns>
protected abstract KernelResult MapPages(ulong address, KPageList pageList, KMemoryPermission permission, bool shouldFillPages = false, byte fillValue = 0);
/// <summary>
/// Maps a region of memory into the specified host memory ranges.
/// </summary>
/// <param name="address">Destination virtual address that should be mapped</param>
/// <param name="ranges">Ranges of host memory that should be mapped</param>
/// <param name="permission">Permission of the region to be mapped</param>
/// <returns>Result of the mapping operation</returns>
/// <exception cref="NotSupportedException">The implementation does not support memory aliasing</exception>
protected abstract KernelResult MapPages(ulong address, IEnumerable<HostMemoryRange> ranges, KMemoryPermission permission);
/// <summary>
/// Unmaps a region of memory that was previously mapped with one of the page mapping methods.
/// </summary>

View file

@ -1,139 +0,0 @@
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.Memory;
using Ryujinx.Memory.Range;
using System;
using System.Collections.Generic;
using System.Linq;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
class KPageTableHostMapped : KPageTableBase
{
private const int CopyChunckSize = 0x100000;
private readonly IVirtualMemoryManager _cpuMemory;
public override bool SupportsMemoryAliasing => false;
public KPageTableHostMapped(KernelContext context, IVirtualMemoryManager cpuMemory) : base(context)
{
_cpuMemory = cpuMemory;
}
/// <inheritdoc/>
protected override IEnumerable<HostMemoryRange> GetPhysicalRegions(ulong va, ulong size)
{
return _cpuMemory.GetPhysicalRegions(va, size);
}
/// <inheritdoc/>
protected override ReadOnlySpan<byte> GetSpan(ulong va, int size)
{
return _cpuMemory.GetSpan(va, size);
}
/// <inheritdoc/>
protected override KernelResult MapMemory(ulong src, ulong dst, ulong pagesCount, KMemoryPermission oldSrcPermission, KMemoryPermission newDstPermission)
{
ulong size = pagesCount * PageSize;
_cpuMemory.Map(dst, 0, size);
ulong currentSize = size;
while (currentSize > 0)
{
ulong copySize = Math.Min(currentSize, CopyChunckSize);
_cpuMemory.Write(dst, _cpuMemory.GetSpan(src, (int)copySize));
currentSize -= copySize;
}
return KernelResult.Success;
}
/// <inheritdoc/>
protected override KernelResult UnmapMemory(ulong dst, ulong src, ulong pagesCount, KMemoryPermission oldDstPermission, KMemoryPermission newSrcPermission)
{
ulong size = pagesCount * PageSize;
// TODO: Validation.
ulong currentSize = size;
while (currentSize > 0)
{
ulong copySize = Math.Min(currentSize, CopyChunckSize);
_cpuMemory.Write(src, _cpuMemory.GetSpan(dst, (int)copySize));
currentSize -= copySize;
}
_cpuMemory.Unmap(dst, size);
return KernelResult.Success;
}
/// <inheritdoc/>
protected override KernelResult MapPages(ulong dstVa, ulong pagesCount, ulong srcPa, KMemoryPermission permission, bool shouldFillPages, byte fillValue)
{
_cpuMemory.Map(dstVa, 0, pagesCount * PageSize);
if (shouldFillPages)
{
_cpuMemory.Fill(dstVa, pagesCount * PageSize, fillValue);
}
return KernelResult.Success;
}
/// <inheritdoc/>
protected override KernelResult MapPages(ulong address, KPageList pageList, KMemoryPermission permission, bool shouldFillPages, byte fillValue)
{
ulong pagesCount = pageList.GetPagesCount();
_cpuMemory.Map(address, 0, pagesCount * PageSize);
if (shouldFillPages)
{
_cpuMemory.Fill(address, pagesCount * PageSize, fillValue);
}
return KernelResult.Success;
}
/// <inheritdoc/>
protected override KernelResult MapPages(ulong address, IEnumerable<HostMemoryRange> ranges, KMemoryPermission permission)
{
throw new NotSupportedException();
}
/// <inheritdoc/>
protected override KernelResult Unmap(ulong address, ulong pagesCount)
{
_cpuMemory.Unmap(address, pagesCount * PageSize);
return KernelResult.Success;
}
/// <inheritdoc/>
protected override KernelResult Reprotect(ulong address, ulong pagesCount, KMemoryPermission permission)
{
// TODO.
return KernelResult.Success;
}
/// <inheritdoc/>
protected override KernelResult ReprotectWithAttributes(ulong address, ulong pagesCount, KMemoryPermission permission)
{
// TODO.
return KernelResult.Success;
}
/// <inheritdoc/>
protected override void SignalMemoryTracking(ulong va, ulong size, bool write)
{
_cpuMemory.SignalMemoryTracking(va, size, write);
}
/// <inheritdoc/>
protected override void Write(ulong va, ReadOnlySpan<byte> data)
{
_cpuMemory.Write(va, data);
}
}
}

View file

@ -6,7 +6,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
class KSharedMemory : KAutoObject
{
private readonly SharedMemoryStorage _storage;
private readonly KPageList _pageList;
private readonly ulong _ownerPid;
@ -20,7 +20,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
KMemoryPermission ownerPermission,
KMemoryPermission userPermission) : base(context)
{
_storage = storage;
_pageList = storage.GetPageList();
_ownerPid = ownerPid;
_ownerPermission = ownerPermission;
_userPermission = userPermission;
@ -33,10 +33,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
KProcess process,
KMemoryPermission permission)
{
ulong pagesCountRounded = BitUtils.DivRoundUp(size, KPageTableBase.PageSize);
var pageList = _storage.GetPageList();
if (pageList.GetPagesCount() != pagesCountRounded)
if (_pageList.GetPagesCount() != BitUtils.DivRoundUp(size, KPageTableBase.PageSize))
{
return KernelResult.InvalidSize;
}
@ -50,35 +47,17 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
return KernelResult.InvalidPermission;
}
KernelResult result = memoryManager.MapPages(address, pageList, MemoryState.SharedMemory, permission);
if (result == KernelResult.Success && !memoryManager.SupportsMemoryAliasing)
{
_storage.Borrow(process, address);
}
return result;
return memoryManager.MapPages(address, _pageList, MemoryState.SharedMemory, permission);
}
public KernelResult UnmapFromProcess(
KPageTableBase memoryManager,
ulong address,
ulong size,
KProcess process)
public KernelResult UnmapFromProcess(KPageTableBase memoryManager, ulong address, ulong size, KProcess process)
{
ulong pagesCountRounded = BitUtils.DivRoundUp(size, KPageTableBase.PageSize);
var pageList = _storage.GetPageList();
ulong pagesCount = pageList.GetPagesCount();
if (pagesCount != pagesCountRounded)
if (_pageList.GetPagesCount() != BitUtils.DivRoundUp(size, KPageTableBase.PageSize))
{
return KernelResult.InvalidSize;
}
var ranges = _storage.GetRanges();
return memoryManager.UnmapPages(address, pagesCount, ranges, MemoryState.SharedMemory);
return memoryManager.UnmapPages(address, _pageList, MemoryState.SharedMemory);
}
}
}

View file

@ -1,9 +1,7 @@
using Ryujinx.Common;
using Ryujinx.HLE.HOS.Kernel.Common;
using Ryujinx.HLE.HOS.Kernel.Process;
using Ryujinx.Memory.Range;
using System;
using System.Collections.Generic;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
@ -14,9 +12,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
// TODO: Remove when we no longer need to read it from the owner directly.
public KProcess Creator => _creator;
private readonly List<HostMemoryRange> _ranges;
private readonly SharedMemoryStorage _storage;
private readonly KPageList _pageList;
public ulong Address { get; private set; }
public ulong Size { get; private set; }
@ -28,12 +24,12 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
public KTransferMemory(KernelContext context) : base(context)
{
_ranges = new List<HostMemoryRange>();
_pageList = new KPageList();
}
public KTransferMemory(KernelContext context, SharedMemoryStorage storage) : base(context)
{
_storage = storage;
_pageList = storage.GetPageList();
Permission = KMemoryPermission.ReadAndWrite;
_hasBeenInitialized = true;
@ -46,7 +42,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
_creator = creator;
KernelResult result = creator.MemoryManager.BorrowTransferMemory(_ranges, address, size, permission);
KernelResult result = creator.MemoryManager.BorrowTransferMemory(_pageList, address, size, permission);
if (result != KernelResult.Success)
{
@ -71,15 +67,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
KProcess process,
KMemoryPermission permission)
{
if (_storage == null)
{
throw new NotImplementedException();
}
ulong pagesCountRounded = BitUtils.DivRoundUp(size, KPageTableBase.PageSize);
var pageList = _storage.GetPageList();
if (pageList.GetPagesCount() != pagesCountRounded)
if (_pageList.GetPagesCount() != BitUtils.DivRoundUp(size, KPageTableBase.PageSize))
{
return KernelResult.InvalidSize;
}
@ -91,16 +79,11 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
MemoryState state = Permission == KMemoryPermission.None ? MemoryState.TransferMemoryIsolated : MemoryState.TransferMemory;
KernelResult result = memoryManager.MapPages(address, pageList, state, KMemoryPermission.ReadAndWrite);
KernelResult result = memoryManager.MapPages(address, _pageList, state, KMemoryPermission.ReadAndWrite);
if (result == KernelResult.Success)
{
_isMapped = true;
if (!memoryManager.SupportsMemoryAliasing)
{
_storage.Borrow(process, address);
}
}
return result;
@ -112,26 +95,14 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
ulong size,
KProcess process)
{
if (_storage == null)
{
throw new NotImplementedException();
}
ulong pagesCountRounded = BitUtils.DivRoundUp(size, KPageTableBase.PageSize);
var pageList = _storage.GetPageList();
ulong pagesCount = pageList.GetPagesCount();
if (pagesCount != pagesCountRounded)
if (_pageList.GetPagesCount() != BitUtils.DivRoundUp(size, KPageTableBase.PageSize))
{
return KernelResult.InvalidSize;
}
var ranges = _storage.GetRanges();
MemoryState state = Permission == KMemoryPermission.None ? MemoryState.TransferMemoryIsolated : MemoryState.TransferMemory;
KernelResult result = memoryManager.UnmapPages(address, pagesCount, ranges, state);
KernelResult result = memoryManager.UnmapPages(address, _pageList, state);
if (result == KernelResult.Success)
{
@ -145,7 +116,7 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
{
if (_hasBeenInitialized)
{
if (!_isMapped && _creator.MemoryManager.UnborrowTransferMemory(Address, Size, _ranges) != KernelResult.Success)
if (!_isMapped && _creator.MemoryManager.UnborrowTransferMemory(Address, Size, _pageList) != KernelResult.Success)
{
throw new InvalidOperationException("Unexpected failure restoring transfer memory attributes.");
}

View file

@ -1,8 +1,4 @@
using Ryujinx.HLE.HOS.Kernel.Process;
using Ryujinx.Memory;
using Ryujinx.Memory.Range;
using System;
using System.Collections.Generic;
using System;
namespace Ryujinx.HLE.HOS.Kernel.Memory
{
@ -12,9 +8,6 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
private readonly KPageList _pageList;
private readonly ulong _size;
private IVirtualMemoryManager _borrowerMemory;
private ulong _borrowerVa;
public SharedMemoryStorage(KernelContext context, KPageList pageList)
{
_context = context;
@ -29,24 +22,6 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
}
}
public void Borrow(KProcess dstProcess, ulong va)
{
ulong currentOffset = 0;
foreach (KPageNode pageNode in _pageList)
{
ulong address = pageNode.Address - DramMemoryMap.DramBase;
ulong size = pageNode.PagesCount * KPageTableBase.PageSize;
dstProcess.CpuMemory.Write(va + currentOffset, _context.Memory.GetSpan(address + currentOffset, (int)size));
currentOffset += size;
}
_borrowerMemory = dstProcess.CpuMemory;
_borrowerVa = va;
}
public void ZeroFill()
{
for (ulong offset = 0; offset < _size; offset += sizeof(ulong))
@ -57,42 +32,13 @@ namespace Ryujinx.HLE.HOS.Kernel.Memory
public ref T GetRef<T>(ulong offset) where T : unmanaged
{
if (_borrowerMemory == null)
if (_pageList.Nodes.Count == 1)
{
if (_pageList.Nodes.Count == 1)
{
ulong address = _pageList.Nodes.First.Value.Address - DramMemoryMap.DramBase;
return ref _context.Memory.GetRef<T>(address + offset);
}
throw new NotImplementedException("Non-contiguous shared memory is not yet supported.");
ulong address = _pageList.Nodes.First.Value.Address - DramMemoryMap.DramBase;
return ref _context.Memory.GetRef<T>(address + offset);
}
else
{
return ref _borrowerMemory.GetRef<T>(_borrowerVa + offset);
}
}
public IEnumerable<HostMemoryRange> GetRanges()
{
if (_borrowerMemory == null)
{
var ranges = new List<HostMemoryRange>();
foreach (KPageNode pageNode in _pageList)
{
ulong address = pageNode.Address - DramMemoryMap.DramBase;
ulong size = pageNode.PagesCount * KPageTableBase.PageSize;
ranges.Add(new HostMemoryRange(_context.Memory.GetPointer(address, size), size));
}
return ranges;
}
else
{
return _borrowerMemory.GetPhysicalRegions(_borrowerVa, _size);
}
throw new NotImplementedException("Non-contiguous shared memory is not yet supported.");
}
public KPageList GetPageList()