mirror of
https://github.com/switchbrew/libnx.git
synced 2025-06-21 12:32:40 +02:00
Delete old virtmem API 🦀🦀
This commit is contained in:
parent
637dd12b2d
commit
649f308d4b
@ -10,22 +10,6 @@
|
||||
/// Address space reservation type (see \ref virtmemAddReservation)
|
||||
typedef struct VirtmemReservation VirtmemReservation;
|
||||
|
||||
/**
|
||||
* @brief Reserves a slice of general purpose address space sequentially.
|
||||
* @param size Desired size of the slice (rounded up to page alignment).
|
||||
* @return Pointer to the slice of address space.
|
||||
* @deprecated This function is prone to race conditions, please use \ref virtmemFindAslr or \ref virtmemFindCodeMemory (and, if necessary, \ref virtmemAddReservation) instead.
|
||||
*/
|
||||
void* DEPRECATED virtmemReserve(size_t size);
|
||||
|
||||
/**
|
||||
* @brief Relinquishes a slice of address space reserved with virtmemReserve (currently no-op).
|
||||
* @param addr Pointer to the slice.
|
||||
* @param size Size of the slice.
|
||||
* @deprecated This function is a companion of \ref virtmemReserve which is deprecated.
|
||||
*/
|
||||
void DEPRECATED virtmemFree(void* addr, size_t size);
|
||||
|
||||
/// Locks the virtual memory manager mutex.
|
||||
void virtmemLock(void);
|
||||
|
||||
|
@ -30,7 +30,6 @@ static MemRegion g_StackRegion;
|
||||
|
||||
static VirtmemReservation *g_Reservations;
|
||||
|
||||
static uintptr_t g_SequentialAddr;
|
||||
static bool g_IsLegacyKernel;
|
||||
|
||||
static Result _memregionInitWithInfo(MemRegion* r, InfoType id0_addr, InfoType id0_sz) {
|
||||
@ -195,60 +194,6 @@ void virtmemSetup(void) {
|
||||
}
|
||||
}
|
||||
|
||||
void* virtmemReserve(size_t size) {
|
||||
// Page align the size
|
||||
size = (size + 0xFFF) &~ 0xFFF;
|
||||
|
||||
// Main allocation loop
|
||||
mutexLock(&g_VirtmemMutex);
|
||||
uintptr_t cur_addr = g_SequentialAddr;
|
||||
void* ret = NULL;
|
||||
for (;;) {
|
||||
// Roll over if we reached the end.
|
||||
if (!_memregionIsInside(&g_AslrRegion, cur_addr, cur_addr + size))
|
||||
cur_addr = g_AslrRegion.start;
|
||||
|
||||
// Avoid mapping within the alias region.
|
||||
if (_memregionOverlaps(&g_AliasRegion, cur_addr, cur_addr + size)) {
|
||||
cur_addr = g_AliasRegion.end;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Avoid mapping within the heap region.
|
||||
if (_memregionOverlaps(&g_HeapRegion, cur_addr, cur_addr + size)) {
|
||||
cur_addr = g_HeapRegion.end;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Avoid mapping within the stack region.
|
||||
if (_memregionOverlaps(&g_StackRegion, cur_addr, cur_addr + size)) {
|
||||
cur_addr = g_StackRegion.end;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Avoid mapping in areas that are already used.
|
||||
if (_memregionIsMapped(cur_addr, cur_addr + size, SEQUENTIAL_GUARD_REGION_SIZE, &cur_addr))
|
||||
continue;
|
||||
|
||||
// Avoid mapping in areas that are reserved.
|
||||
if (_memregionIsReserved(cur_addr, cur_addr + size, SEQUENTIAL_GUARD_REGION_SIZE, &cur_addr))
|
||||
continue;
|
||||
|
||||
// We found a suitable address for the block.
|
||||
g_SequentialAddr = cur_addr + size + SEQUENTIAL_GUARD_REGION_SIZE;
|
||||
ret = (void*)cur_addr;
|
||||
break;
|
||||
}
|
||||
mutexUnlock(&g_VirtmemMutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void virtmemFree(void* addr, size_t size) {
|
||||
IGNORE_ARG(addr);
|
||||
IGNORE_ARG(size);
|
||||
}
|
||||
|
||||
void virtmemLock(void) {
|
||||
mutexLock(&g_VirtmemMutex);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user