From 649f308d4b9eb8b17f73ff0182486a48c4b58362 Mon Sep 17 00:00:00 2001 From: fincs Date: Sun, 20 Dec 2020 12:53:16 +0100 Subject: [PATCH] =?UTF-8?q?Delete=20old=20virtmem=20API=20=F0=9F=A6=80?= =?UTF-8?q?=F0=9F=A6=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- nx/include/switch/kernel/virtmem.h | 16 --------- nx/source/kernel/virtmem.c | 55 ------------------------------ 2 files changed, 71 deletions(-) diff --git a/nx/include/switch/kernel/virtmem.h b/nx/include/switch/kernel/virtmem.h index 0a56d075..24115305 100644 --- a/nx/include/switch/kernel/virtmem.h +++ b/nx/include/switch/kernel/virtmem.h @@ -10,22 +10,6 @@ /// Address space reservation type (see \ref virtmemAddReservation) typedef struct VirtmemReservation VirtmemReservation; -/** - * @brief Reserves a slice of general purpose address space sequentially. - * @param size Desired size of the slice (rounded up to page alignment). - * @return Pointer to the slice of address space. - * @deprecated This function is prone to race conditions, please use \ref virtmemFindAslr or \ref virtmemFindCodeMemory (and, if necessary, \ref virtmemAddReservation) instead. - */ -void* DEPRECATED virtmemReserve(size_t size); - -/** - * @brief Relinquishes a slice of address space reserved with virtmemReserve (currently no-op). - * @param addr Pointer to the slice. - * @param size Size of the slice. - * @deprecated This function is a companion of \ref virtmemReserve which is deprecated. - */ -void DEPRECATED virtmemFree(void* addr, size_t size); - /// Locks the virtual memory manager mutex. void virtmemLock(void); diff --git a/nx/source/kernel/virtmem.c b/nx/source/kernel/virtmem.c index c07b6f31..650a7332 100644 --- a/nx/source/kernel/virtmem.c +++ b/nx/source/kernel/virtmem.c @@ -30,7 +30,6 @@ static MemRegion g_StackRegion; static VirtmemReservation *g_Reservations; -static uintptr_t g_SequentialAddr; static bool g_IsLegacyKernel; static Result _memregionInitWithInfo(MemRegion* r, InfoType id0_addr, InfoType id0_sz) { @@ -195,60 +194,6 @@ void virtmemSetup(void) { } } -void* virtmemReserve(size_t size) { - // Page align the size - size = (size + 0xFFF) &~ 0xFFF; - - // Main allocation loop - mutexLock(&g_VirtmemMutex); - uintptr_t cur_addr = g_SequentialAddr; - void* ret = NULL; - for (;;) { - // Roll over if we reached the end. - if (!_memregionIsInside(&g_AslrRegion, cur_addr, cur_addr + size)) - cur_addr = g_AslrRegion.start; - - // Avoid mapping within the alias region. - if (_memregionOverlaps(&g_AliasRegion, cur_addr, cur_addr + size)) { - cur_addr = g_AliasRegion.end; - continue; - } - - // Avoid mapping within the heap region. - if (_memregionOverlaps(&g_HeapRegion, cur_addr, cur_addr + size)) { - cur_addr = g_HeapRegion.end; - continue; - } - - // Avoid mapping within the stack region. - if (_memregionOverlaps(&g_StackRegion, cur_addr, cur_addr + size)) { - cur_addr = g_StackRegion.end; - continue; - } - - // Avoid mapping in areas that are already used. - if (_memregionIsMapped(cur_addr, cur_addr + size, SEQUENTIAL_GUARD_REGION_SIZE, &cur_addr)) - continue; - - // Avoid mapping in areas that are reserved. - if (_memregionIsReserved(cur_addr, cur_addr + size, SEQUENTIAL_GUARD_REGION_SIZE, &cur_addr)) - continue; - - // We found a suitable address for the block. - g_SequentialAddr = cur_addr + size + SEQUENTIAL_GUARD_REGION_SIZE; - ret = (void*)cur_addr; - break; - } - mutexUnlock(&g_VirtmemMutex); - - return ret; -} - -void virtmemFree(void* addr, size_t size) { - IGNORE_ARG(addr); - IGNORE_ARG(size); -} - void virtmemLock(void) { mutexLock(&g_VirtmemMutex); }