Delete old virtmem API 🦀🦀

This commit is contained in:
fincs 2020-12-20 12:53:16 +01:00
parent 637dd12b2d
commit 649f308d4b
No known key found for this signature in database
GPG Key ID: 62C7609ADA219C60
2 changed files with 0 additions and 71 deletions

View File

@ -10,22 +10,6 @@
/// Address space reservation type (see \ref virtmemAddReservation)
typedef struct VirtmemReservation VirtmemReservation;
/**
* @brief Reserves a slice of general purpose address space sequentially.
* @param size Desired size of the slice (rounded up to page alignment).
* @return Pointer to the slice of address space.
* @deprecated This function is prone to race conditions, please use \ref virtmemFindAslr or \ref virtmemFindCodeMemory (and, if necessary, \ref virtmemAddReservation) instead.
*/
void* DEPRECATED virtmemReserve(size_t size);
/**
* @brief Relinquishes a slice of address space reserved with virtmemReserve (currently no-op).
* @param addr Pointer to the slice.
* @param size Size of the slice.
* @deprecated This function is a companion of \ref virtmemReserve which is deprecated.
*/
void DEPRECATED virtmemFree(void* addr, size_t size);
/// Locks the virtual memory manager mutex.
void virtmemLock(void);

View File

@ -30,7 +30,6 @@ static MemRegion g_StackRegion;
static VirtmemReservation *g_Reservations;
static uintptr_t g_SequentialAddr;
static bool g_IsLegacyKernel;
static Result _memregionInitWithInfo(MemRegion* r, InfoType id0_addr, InfoType id0_sz) {
@ -195,60 +194,6 @@ void virtmemSetup(void) {
}
}
void* virtmemReserve(size_t size) {
// Page align the size
size = (size + 0xFFF) &~ 0xFFF;
// Main allocation loop
mutexLock(&g_VirtmemMutex);
uintptr_t cur_addr = g_SequentialAddr;
void* ret = NULL;
for (;;) {
// Roll over if we reached the end.
if (!_memregionIsInside(&g_AslrRegion, cur_addr, cur_addr + size))
cur_addr = g_AslrRegion.start;
// Avoid mapping within the alias region.
if (_memregionOverlaps(&g_AliasRegion, cur_addr, cur_addr + size)) {
cur_addr = g_AliasRegion.end;
continue;
}
// Avoid mapping within the heap region.
if (_memregionOverlaps(&g_HeapRegion, cur_addr, cur_addr + size)) {
cur_addr = g_HeapRegion.end;
continue;
}
// Avoid mapping within the stack region.
if (_memregionOverlaps(&g_StackRegion, cur_addr, cur_addr + size)) {
cur_addr = g_StackRegion.end;
continue;
}
// Avoid mapping in areas that are already used.
if (_memregionIsMapped(cur_addr, cur_addr + size, SEQUENTIAL_GUARD_REGION_SIZE, &cur_addr))
continue;
// Avoid mapping in areas that are reserved.
if (_memregionIsReserved(cur_addr, cur_addr + size, SEQUENTIAL_GUARD_REGION_SIZE, &cur_addr))
continue;
// We found a suitable address for the block.
g_SequentialAddr = cur_addr + size + SEQUENTIAL_GUARD_REGION_SIZE;
ret = (void*)cur_addr;
break;
}
mutexUnlock(&g_VirtmemMutex);
return ret;
}
void virtmemFree(void* addr, size_t size) {
IGNORE_ARG(addr);
IGNORE_ARG(size);
}
void virtmemLock(void) {
mutexLock(&g_VirtmemMutex);
}