virtmem: Add memory reservation system

jit: Use virtmemFindCodeMemory on both codepaths; use virtmem reservations
in order to support the JitType_SetProcessMemoryPermission codepath
This commit is contained in:
fincs 2020-10-30 16:07:38 +01:00
parent 490d96e6eb
commit 8310f438ca
No known key found for this signature in database
GPG Key ID: 62C7609ADA219C60
4 changed files with 106 additions and 13 deletions

View File

@ -6,6 +6,7 @@
*/ */
#pragma once #pragma once
#include "../types.h" #include "../types.h"
#include "virtmem.h"
/// JIT implementation type. /// JIT implementation type.
typedef enum { typedef enum {
@ -21,7 +22,10 @@ typedef struct {
void* rx_addr; void* rx_addr;
void* rw_addr; void* rw_addr;
bool is_executable; bool is_executable;
Handle handle; union {
Handle handle;
VirtmemReservation* rv;
};
} Jit; } Jit;
/** /**

View File

@ -7,6 +7,9 @@
#pragma once #pragma once
#include "../types.h" #include "../types.h"
/// Address space reservation type (see \ref virtmemAddReservation)
typedef struct VirtmemReservation VirtmemReservation;
/** /**
* @brief Reserves a slice of general purpose address space sequentially. * @brief Reserves a slice of general purpose address space sequentially.
* @param size Desired size of the slice (rounded up to page alignment). * @param size Desired size of the slice (rounded up to page alignment).
@ -53,3 +56,19 @@ void* virtmemFindStack(size_t size, size_t guard_size);
* @note The virtual memory manager mutex must be held during the find-and-map process (see \ref virtmemLock and \ref virtmemUnlock). * @note The virtual memory manager mutex must be held during the find-and-map process (see \ref virtmemLock and \ref virtmemUnlock).
*/ */
void* virtmemFindCodeMemory(size_t size, size_t guard_size); void* virtmemFindCodeMemory(size_t size, size_t guard_size);
/**
* @brief Reserves a range of memory address space.
* @param mem Pointer to the address space slice.
* @param size Size of the slice.
* @return Pointer to a reservation object, or NULL on failure.
* @remark This function is intended to be used in lieu of a memory map operation when the memory won't be mapped straight away.
* @note The virtual memory manager mutex must be held during the find-and-reserve process (see \ref virtmemLock and \ref virtmemUnlock).
*/
VirtmemReservation* virtmemAddReservation(void* mem, size_t size);
/**
* @brief Releases a memory address space reservation.
* @param rv Reservation to release.
*/
void virtmemRemoveReservation(VirtmemReservation* rv);

View File

@ -46,8 +46,15 @@ Result jitCreate(Jit* j, size_t size)
switch (j->type) switch (j->type)
{ {
case JitType_SetProcessMemoryPermission: case JitType_SetProcessMemoryPermission:
j->rx_addr = virtmemReserve(j->size); virtmemLock();
j->rx_addr = virtmemFindCodeMemory(j->size, 0x1000);
j->rw_addr = j->src_addr; j->rw_addr = j->src_addr;
j->rv = virtmemAddReservation(j->rx_addr, j->size);
virtmemUnlock();
if (!j->rv) {
rc = MAKERESULT(Module_Libnx, LibnxError_OutOfMemory);
}
break; break;
case JitType_CodeMemory: case JitType_CodeMemory:
@ -55,14 +62,14 @@ Result jitCreate(Jit* j, size_t size)
if (R_SUCCEEDED(rc)) if (R_SUCCEEDED(rc))
{ {
virtmemLock(); virtmemLock();
j->rw_addr = virtmemFindAslr(j->size, 0x1000); j->rw_addr = virtmemFindCodeMemory(j->size, 0x1000);
rc = svcControlCodeMemory(j->handle, CodeMapOperation_MapOwner, j->rw_addr, j->size, Perm_Rw); rc = svcControlCodeMemory(j->handle, CodeMapOperation_MapOwner, j->rw_addr, j->size, Perm_Rw);
virtmemUnlock(); virtmemUnlock();
if (R_SUCCEEDED(rc)) if (R_SUCCEEDED(rc))
{ {
virtmemLock(); virtmemLock();
j->rx_addr = virtmemFindAslr(j->size, 0x1000); j->rx_addr = virtmemFindCodeMemory(j->size, 0x1000);
rc = svcControlCodeMemory(j->handle, CodeMapOperation_MapSlave, j->rx_addr, j->size, Perm_Rx); rc = svcControlCodeMemory(j->handle, CodeMapOperation_MapSlave, j->rx_addr, j->size, Perm_Rx);
virtmemUnlock(); virtmemUnlock();
@ -151,7 +158,9 @@ Result jitClose(Jit* j)
rc = jitTransitionToWritable(j); rc = jitTransitionToWritable(j);
if (R_SUCCEEDED(rc)) { if (R_SUCCEEDED(rc)) {
virtmemFree(j->rx_addr, j->size); virtmemLock();
virtmemRemoveReservation(j->rv);
virtmemUnlock();
} }
break; break;

View File

@ -5,6 +5,7 @@
#include "kernel/virtmem.h" #include "kernel/virtmem.h"
#include "kernel/random.h" #include "kernel/random.h"
#include "runtime/diag.h" #include "runtime/diag.h"
#include <stdlib.h>
#define SEQUENTIAL_GUARD_REGION_SIZE 0x1000 #define SEQUENTIAL_GUARD_REGION_SIZE 0x1000
#define RANDOM_MAX_ATTEMPTS 0x200 #define RANDOM_MAX_ATTEMPTS 0x200
@ -14,6 +15,12 @@ typedef struct {
uintptr_t end; uintptr_t end;
} MemRegion; } MemRegion;
struct VirtmemReservation {
VirtmemReservation *next;
VirtmemReservation *prev;
MemRegion region;
};
static Mutex g_VirtmemMutex; static Mutex g_VirtmemMutex;
static MemRegion g_AliasRegion; static MemRegion g_AliasRegion;
@ -21,6 +28,8 @@ static MemRegion g_HeapRegion;
static MemRegion g_AslrRegion; static MemRegion g_AslrRegion;
static MemRegion g_StackRegion; static MemRegion g_StackRegion;
static VirtmemReservation *g_Reservations;
static uintptr_t g_SequentialAddr; static uintptr_t g_SequentialAddr;
static bool g_IsLegacyKernel; static bool g_IsLegacyKernel;
@ -54,7 +63,7 @@ NX_INLINE bool _memregionOverlaps(MemRegion* r, uintptr_t start, uintptr_t end)
return start < r->end && r->start < end; return start < r->end && r->start < end;
} }
NX_INLINE bool _memregionIsUnmapped(uintptr_t start, uintptr_t end, uintptr_t guard, uintptr_t* out_end) { NX_INLINE bool _memregionIsMapped(uintptr_t start, uintptr_t end, uintptr_t guard, uintptr_t* out_end) {
// Adjust start/end by the desired guard size. // Adjust start/end by the desired guard size.
start -= guard; start -= guard;
end += guard; end += guard;
@ -66,14 +75,30 @@ NX_INLINE bool _memregionIsUnmapped(uintptr_t start, uintptr_t end, uintptr_t gu
if (R_FAILED(rc)) if (R_FAILED(rc))
diagAbortWithResult(MAKERESULT(Module_Libnx, LibnxError_BadQueryMemory)); diagAbortWithResult(MAKERESULT(Module_Libnx, LibnxError_BadQueryMemory));
// Return error if there's anything mapped. // Return true if there's anything mapped.
uintptr_t memend = meminfo.addr + meminfo.size; uintptr_t memend = meminfo.addr + meminfo.size;
if (meminfo.type != MemType_Unmapped || end > memend) { if (meminfo.type != MemType_Unmapped || end > memend) {
if (out_end) *out_end = memend + guard; if (out_end) *out_end = memend + guard;
return false; return true;
} }
return true; return false;
}
NX_INLINE bool _memregionIsReserved(uintptr_t start, uintptr_t end, uintptr_t guard, uintptr_t* out_end) {
// Adjust start/end by the desired guard size.
start -= guard;
end += guard;
// Go through each reservation and check if any of them overlap the desired address range.
for (VirtmemReservation *rv = g_Reservations; rv; rv = rv->next) {
if (_memregionOverlaps(&rv->region, start, end)) {
if (out_end) *out_end = rv->region.end + guard;
return true;
}
}
return false;
} }
static void* _memregionFindRandom(MemRegion* r, size_t size, size_t guard_size) { static void* _memregionFindRandom(MemRegion* r, size_t size, size_t guard_size) {
@ -107,9 +132,16 @@ static void* _memregionFindRandom(MemRegion* r, size_t size, size_t guard_size)
break; break;
} }
// Check that the desired memory range is unmapped. // Check that there isn't anything mapped at the desired memory range.
if (_memregionIsUnmapped(cur_addr, cur_addr + size, guard_size, NULL)) if (_memregionIsMapped(cur_addr, cur_addr + size, guard_size, NULL))
return (void*)cur_addr; // we found a suitable address! continue;
// Check that the desired memory range doesn't overlap any reservations.
if (_memregionIsReserved(cur_addr, cur_addr + size, guard_size, NULL))
continue;
// We found a suitable address!
return (void*)cur_addr;
} }
return NULL; return NULL;
@ -195,7 +227,11 @@ void* virtmemReserve(size_t size) {
} }
// Avoid mapping in areas that are already used. // Avoid mapping in areas that are already used.
if (!_memregionIsUnmapped(cur_addr, cur_addr + size, SEQUENTIAL_GUARD_REGION_SIZE, &cur_addr)) if (_memregionIsMapped(cur_addr, cur_addr + size, SEQUENTIAL_GUARD_REGION_SIZE, &cur_addr))
continue;
// Avoid mapping in areas that are reserved.
if (_memregionIsReserved(cur_addr, cur_addr + size, SEQUENTIAL_GUARD_REGION_SIZE, &cur_addr))
continue; continue;
// We found a suitable address for the block. // We found a suitable address for the block.
@ -236,3 +272,28 @@ void* virtmemFindCodeMemory(size_t size, size_t guard_size) {
// [1.0.0] requires CodeMemory to be mapped within the stack region. // [1.0.0] requires CodeMemory to be mapped within the stack region.
return _memregionFindRandom(g_IsLegacyKernel ? &g_StackRegion : &g_AslrRegion, size, guard_size); return _memregionFindRandom(g_IsLegacyKernel ? &g_StackRegion : &g_AslrRegion, size, guard_size);
} }
VirtmemReservation* virtmemAddReservation(void* mem, size_t size) {
if (!mutexIsLockedByCurrentThread(&g_VirtmemMutex)) return NULL;
VirtmemReservation* rv = (VirtmemReservation*)malloc(sizeof(VirtmemReservation));
if (rv) {
rv->region.start = (uintptr_t)mem;
rv->region.end = rv->region.start + size;
rv->next = g_Reservations;
rv->prev = NULL;
g_Reservations = rv;
if (rv->next)
rv->next->prev = rv;
}
return rv;
}
void virtmemRemoveReservation(VirtmemReservation* rv) {
if (!mutexIsLockedByCurrentThread(&g_VirtmemMutex)) return;
if (rv->next)
rv->next->prev = rv->prev;
if (rv->prev)
rv->prev->next = rv->next;
else
g_Reservations = rv->next;
}