virtmem: Major rewrite to support ASLR, see details:

- Added virtmemLock/Unlock, needed for atomic find-and-map operations
- Added virtmemFindAslr, which looks for a random free area in AslrRegion
  - virtmemReserve still exists for legacy callers who rely on sequential
    allocation in order to avoid race conditions from concurrent uses
- Added virtmemFindStack, which searches within StackRegion instead
  - Removed virtmemReserveStack/FreeStack
- Changed shmem/thread/tmem/codememory-jit to use the new virtmem API
  - Legacy jit still uses virtmemReserve
This commit is contained in:
fincs 2020-08-06 00:45:36 +02:00
parent e655b48c41
commit a05a44fca8
No known key found for this signature in database
GPG Key ID: 62C7609ADA219C60
6 changed files with 202 additions and 179 deletions

View File

@ -8,9 +8,9 @@
#include "../types.h"
/**
* @brief Reserves a slice of general purpose address space.
* @param size The size of the slice of address space that will be reserved (rounded up to page alignment).
* @return Pointer to the slice of address space, or NULL on failure.
* @brief Reserves a slice of general purpose address space sequentially.
* @param size Desired size of the slice (rounded up to page alignment).
* @return Pointer to the slice of address space.
*/
void* virtmemReserve(size_t size);
@ -21,16 +21,26 @@ void* virtmemReserve(size_t size);
*/
void virtmemFree(void* addr, size_t size);
/**
* @brief Reserves a slice of address space inside the stack memory mapping region (for use with svcMapMemory).
* @param size The size of the slice of address space that will be reserved (rounded up to page alignment).
* @return Pointer to the slice of address space, or NULL on failure.
*/
void* virtmemReserveStack(size_t size);
/// Locks the virtual memory manager mutex.
void virtmemLock(void);
/// Unlocks the virtual memory manager mutex.
void virtmemUnlock(void);
/**
* @brief Relinquishes a slice of address space reserved with virtmemReserveStack (currently no-op).
* @param addr Pointer to the slice.
* @param size Size of the slice.
* @brief Finds a random slice of free general purpose address space.
* @param size Desired size of the slice (rounded up to page alignment).
* @param guard_size Desired size of the unmapped guard areas surrounding the slice (rounded up to page alignment).
* @return Pointer to the slice of address space, or NULL on failure.
* @note The virtual memory manager mutex must be held during the find-and-map process (see \ref virtmemLock and \ref virtmemUnlock).
*/
void virtmemFreeStack(void* addr, size_t size);
void* virtmemFindAslr(size_t size, size_t guard_size);
/**
* @brief Finds a random slice of free stack address space.
* @param size Desired size of the slice (rounded up to page alignment).
* @param guard_size Desired size of the unmapped guard areas surrounding the slice (rounded up to page alignment).
* @return Pointer to the slice of address space, or NULL on failure.
* @note The virtual memory manager mutex must be held during the find-and-map process (see \ref virtmemLock and \ref virtmemUnlock).
*/
void* virtmemFindStack(size_t size, size_t guard_size);

View File

@ -16,7 +16,7 @@ Result jitCreate(Jit* j, size_t size)
// On [5.0.0+] this is only usable with a kernel patch, as svcControlCodeMemory now errors if it's used under the same process which owns the object.
// The homebrew loading environment is responsible for hinting the syscalls if they are available/usable for jit.
if (envIsSyscallHinted(0x4B) && envIsSyscallHinted(0x4C)) {
type = JitType_CodeMemory;
type = JitType_CodeMemory;
}
// Fall back to JitType_SetProcessMemoryPermission if available.
else if (envIsSyscallHinted(0x73) && envIsSyscallHinted(0x77) && envIsSyscallHinted(0x78)
@ -38,7 +38,6 @@ Result jitCreate(Jit* j, size_t size)
j->type = type;
j->size = size;
j->src_addr = src_addr;
j->rx_addr = virtmemReserve(j->size);
j->handle = INVALID_HANDLE;
j->is_executable = 0;
@ -47,19 +46,25 @@ Result jitCreate(Jit* j, size_t size)
switch (j->type)
{
case JitType_SetProcessMemoryPermission:
j->rx_addr = virtmemReserve(j->size);
j->rw_addr = j->src_addr;
break;
case JitType_CodeMemory:
j->rw_addr = virtmemReserve(j->size);
rc = svcCreateCodeMemory(&j->handle, j->src_addr, j->size);
if (R_SUCCEEDED(rc))
{
virtmemLock();
j->rw_addr = virtmemFindAslr(j->size, 0x1000);
rc = svcControlCodeMemory(j->handle, CodeMapOperation_MapOwner, j->rw_addr, j->size, Perm_Rw);
virtmemUnlock();
if (R_SUCCEEDED(rc))
{
virtmemLock();
j->rx_addr = virtmemFindAslr(j->size, 0x1000);
rc = svcControlCodeMemory(j->handle, CodeMapOperation_MapSlave, j->rx_addr, j->size, Perm_Rx);
virtmemUnlock();
if (R_FAILED(rc)) {
svcControlCodeMemory(j->handle, CodeMapOperation_UnmapOwner, j->rw_addr, j->size, 0);
@ -73,7 +78,6 @@ Result jitCreate(Jit* j, size_t size)
}
if (R_FAILED(rc)) {
virtmemFree(j->rw_addr, j->size);
j->rw_addr = NULL;
}
@ -81,7 +85,6 @@ Result jitCreate(Jit* j, size_t size)
}
if (R_FAILED(rc)) {
virtmemFree(j->rx_addr, j->size);
free(j->src_addr);
j->src_addr = NULL;
}
@ -156,12 +159,9 @@ Result jitClose(Jit* j)
rc = svcControlCodeMemory(j->handle, CodeMapOperation_UnmapOwner, j->rw_addr, j->size, 0);
if (R_SUCCEEDED(rc)) {
virtmemFree(j->rw_addr, j->size);
rc = svcControlCodeMemory(j->handle, CodeMapOperation_UnmapSlave, j->rx_addr, j->size, 0);
if (R_SUCCEEDED(rc)) {
virtmemFree(j->rx_addr, j->size);
svcCloseHandle(j->handle);
}
}

View File

@ -34,16 +34,14 @@ Result shmemMap(SharedMemory* s)
if (s->map_addr == NULL)
{
void* addr = virtmemReserve(s->size);
virtmemLock();
void* addr = virtmemFindAslr(s->size, 0x1000);
rc = svcMapSharedMemory(s->handle, addr, s->size, s->perm);
virtmemUnlock();
if (R_SUCCEEDED(rc)) {
s->map_addr = addr;
}
else {
virtmemFree(addr, s->size);
}
}
else {
rc = MAKERESULT(Module_Libnx, LibnxError_AlreadyMapped);
@ -59,7 +57,6 @@ Result shmemUnmap(SharedMemory* s)
rc = svcUnmapSharedMemory(s->handle, s->map_addr, s->size);
if (R_SUCCEEDED(rc)) {
virtmemFree(s->map_addr, s->size);
s->map_addr = NULL;
}

View File

@ -124,9 +124,11 @@ Result threadCreate(
}
// Stack size may be unaligned in either case.
virtmemLock();
const size_t aligned_stack_sz = (stack_sz + tls_sz + reent_sz +0xFFF) & ~0xFFF;
void* stack_mirror = virtmemReserveStack(aligned_stack_sz);
void* stack_mirror = virtmemFindStack(aligned_stack_sz, 0x4000);
Result rc = svcMapMemory(stack_mirror, stack_mem, aligned_stack_sz);
virtmemUnlock();
if (R_SUCCEEDED(rc))
{
@ -179,7 +181,6 @@ Result threadCreate(
}
if (R_FAILED(rc)) {
virtmemFreeStack(stack_mirror, aligned_stack_sz);
if (owns_stack_mem) {
free(stack_mem);
}
@ -238,7 +239,6 @@ Result threadClose(Thread* t) {
rc = svcUnmapMemory(t->stack_mirror, t->stack_mem, aligned_stack_sz);
if (R_SUCCEEDED(rc)) {
virtmemFreeStack(t->stack_mirror, aligned_stack_sz);
if (t->owns_stack_mem) {
free(t->stack_mem);
}

View File

@ -71,16 +71,14 @@ Result tmemMap(TransferMemory* t)
if (t->map_addr == NULL)
{
void* addr = virtmemReserve(t->size);
virtmemLock();
void* addr = virtmemFindAslr(t->size, 0x1000);
rc = svcMapTransferMemory(t->handle, addr, t->size, t->perm);
virtmemUnlock();
if (R_SUCCEEDED(rc)) {
t->map_addr = addr;
}
else {
virtmemFree(addr, t->size);
}
}
else {
rc = MAKERESULT(Module_Libnx, LibnxError_AlreadyMapped);
@ -96,7 +94,6 @@ Result tmemUnmap(TransferMemory* t)
rc = svcUnmapTransferMemory(t->handle, t->map_addr, t->size);
if (R_SUCCEEDED(rc)) {
virtmemFree(t->map_addr, t->size);
t->map_addr = NULL;
}

View File

@ -4,26 +4,26 @@
#include "kernel/mutex.h"
#include "kernel/svc.h"
#include "kernel/virtmem.h"
#include "kernel/random.h"
#define SEQUENTIAL_GUARD_REGION_SIZE 0x1000
#define RANDOM_MAX_ATTEMPTS 0x200
typedef struct {
u64 start;
u64 end;
} VirtualRegion;
uintptr_t start;
uintptr_t end;
} MemRegion;
enum {
REGION_STACK=0,
REGION_HEAP=1,
REGION_LEGACY_ALIAS=2,
REGION_MAX
};
static Mutex g_VirtmemMutex;
static VirtualRegion g_AddressSpace;
static VirtualRegion g_Region[REGION_MAX];
static u64 g_CurrentAddr;
static u64 g_CurrentMapAddr;
static Mutex g_VirtMemMutex;
static MemRegion g_AliasRegion;
static MemRegion g_HeapRegion;
static MemRegion g_AslrRegion;
static MemRegion g_StackRegion;
static Result _GetRegionFromInfo(VirtualRegion* r, u64 id0_addr, u32 id0_sz) {
static uintptr_t g_SequentialAddr;
static Result _memregionInitWithInfo(MemRegion* r, InfoType id0_addr, InfoType id0_sz) {
u64 base;
Result rc = svcGetInfo(&base, id0_addr, CUR_PROCESS_HANDLE, 0);
@ -40,172 +40,191 @@ static Result _GetRegionFromInfo(VirtualRegion* r, u64 id0_addr, u32 id0_sz) {
return rc;
}
static inline bool _InRegion(VirtualRegion* r, u64 addr) {
return (addr >= r->start) && (addr < r->end);
static void _memregionInitHardcoded(MemRegion* r, uintptr_t start, uintptr_t end) {
r->start = start;
r->end = end;
}
NX_INLINE bool _memregionIsInside(MemRegion* r, uintptr_t start, uintptr_t end) {
return start >= r->start && end <= r->end;
}
NX_INLINE bool _memregionOverlaps(MemRegion* r, uintptr_t start, uintptr_t end) {
return start < r->end && r->start < end;
}
NX_INLINE bool _memregionIsUnmapped(uintptr_t start, uintptr_t end, uintptr_t guard, uintptr_t* out_end) {
// Adjust start/end by the desired guard size.
start -= guard;
end += guard;
// Query memory properties.
MemoryInfo meminfo;
u32 pageinfo;
Result rc = svcQueryMemory(&meminfo, &pageinfo, start);
if (R_FAILED(rc))
fatalThrow(MAKERESULT(Module_Libnx, LibnxError_BadQueryMemory));
// Return error if there's anything mapped.
uintptr_t memend = meminfo.addr + meminfo.size;
if (meminfo.type != MemType_Unmapped || end > memend) {
if (out_end) *out_end = memend + guard;
return false;
}
return true;
}
static void* _memregionFindRandom(MemRegion* r, size_t size, size_t guard_size) {
// Page align the sizes.
size = (size + 0xFFF) &~ 0xFFF;
guard_size = (guard_size + 0xFFF) &~ 0xFFF;
// Ensure the requested size isn't greater than the memory region itself...
uintptr_t region_size = r->end - r->start;
if (size > region_size)
return NULL;
// Main allocation loop.
uintptr_t aslr_max_page_offset = (region_size - size) >> 12;
for (unsigned i = 0; i < RANDOM_MAX_ATTEMPTS; i ++) {
// Calculate a random memory range outside reserved areas.
uintptr_t cur_addr;
for (;;) {
uintptr_t page_offset = (uintptr_t)randomGet64() % (aslr_max_page_offset + 1);
cur_addr = (uintptr_t)r->start + (page_offset << 12);
// Avoid mapping within the alias region.
if (_memregionOverlaps(&g_AliasRegion, cur_addr, cur_addr + size))
continue;
// Avoid mapping within the heap region.
if (_memregionOverlaps(&g_HeapRegion, cur_addr, cur_addr + size))
continue;
// Found it.
break;
}
// Check that the desired memory range is unmapped.
if (_memregionIsUnmapped(cur_addr, cur_addr + size, guard_size, NULL))
return (void*)cur_addr; // we found a suitable address!
}
return NULL;
}
void virtmemSetup(void) {
if (R_FAILED(_GetRegionFromInfo(&g_AddressSpace, InfoType_AslrRegionAddress, InfoType_AslrRegionSize))) {
// [1.0.0] doesn't expose address space size so we have to do this dirty hack to detect it.
Result rc;
// Retrieve memory region information for the reserved alias region.
rc = _memregionInitWithInfo(&g_AliasRegion, InfoType_AliasRegionAddress, InfoType_AliasRegionSize);
if (R_FAILED(rc)) {
// Wat.
fatalThrow(MAKERESULT(Module_Libnx, LibnxError_WeirdKernel));
}
// Retrieve memory region information for the reserved heap region.
rc = _memregionInitWithInfo(&g_HeapRegion, InfoType_HeapRegionAddress, InfoType_HeapRegionSize);
if (R_FAILED(rc)) {
// Wat.
fatalThrow(MAKERESULT(Module_Libnx, LibnxError_BadGetInfo_Heap));
}
// Retrieve memory region information for the aslr/stack regions if available [2.0.0+]
rc = _memregionInitWithInfo(&g_AslrRegion, InfoType_AslrRegionAddress, InfoType_AslrRegionSize);
if (R_SUCCEEDED(rc)) {
rc = _memregionInitWithInfo(&g_StackRegion, InfoType_StackRegionAddress, InfoType_StackRegionSize);
if (R_FAILED(rc))
fatalThrow(MAKERESULT(Module_Libnx, LibnxError_BadGetInfo_Stack));
}
else {
// [1.0.0] doesn't expose aslr/stack region information so we have to do this dirty hack to detect it.
// Forgive me.
Result rc = svcUnmapMemory((void*) 0xFFFFFFFFFFFFE000ULL, (void*) 0xFFFFFE000ull, 0x1000);
if (rc == 0xD401) {
rc = svcUnmapMemory((void*)0xFFFFFFFFFFFFE000UL, (void*)0xFFFFFE000UL, 0x1000);
if (rc == KERNELRESULT(InvalidMemoryState)) {
// Invalid src-address error means that a valid 36-bit address was rejected.
// Thus we are 32-bit.
g_AddressSpace.start = 0x200000ull;
g_AddressSpace.end = 0x100000000ull;
g_Region[REGION_STACK].start = 0x200000ull;
g_Region[REGION_STACK].end = 0x40000000ull;
_memregionInitHardcoded(&g_AslrRegion, 0x200000ull, 0x100000000ull);
_memregionInitHardcoded(&g_StackRegion, 0x200000ull, 0x40000000ull);
}
else if (rc == 0xDC01) {
else if (rc == KERNELRESULT(InvalidMemoryRange)) {
// Invalid dst-address error means our 36-bit src-address was valid.
// Thus we are 36-bit.
g_AddressSpace.start = 0x8000000ull;
g_AddressSpace.end = 0x1000000000ull;
g_Region[REGION_STACK].start = 0x8000000ull;
g_Region[REGION_STACK].end = 0x80000000ull;
_memregionInitHardcoded(&g_AslrRegion, 0x8000000ull, 0x1000000000ull);
_memregionInitHardcoded(&g_StackRegion, 0x8000000ull, 0x80000000ull);
}
else {
// Wat.
fatalThrow(MAKERESULT(Module_Libnx, LibnxError_WeirdKernel));
}
} else {
if (R_FAILED(_GetRegionFromInfo(&g_Region[REGION_STACK], InfoType_StackRegionAddress, InfoType_StackRegionSize))) {
fatalThrow(MAKERESULT(Module_Libnx, LibnxError_BadGetInfo_Stack));
}
}
if (R_FAILED(_GetRegionFromInfo(&g_Region[REGION_HEAP], InfoType_HeapRegionAddress, InfoType_HeapRegionSize))) {
fatalThrow(MAKERESULT(Module_Libnx, LibnxError_BadGetInfo_Heap));
}
_GetRegionFromInfo(&g_Region[REGION_LEGACY_ALIAS], InfoType_AliasRegionAddress, InfoType_AliasRegionSize);
}
void* virtmemReserve(size_t size) {
Result rc;
MemoryInfo meminfo;
u32 pageinfo;
size_t i;
// Page align the size
size = (size + 0xFFF) &~ 0xFFF;
mutexLock(&g_VirtMemMutex);
u64 addr = g_CurrentAddr;
// Main allocation loop
mutexLock(&g_VirtmemMutex);
uintptr_t cur_addr = g_SequentialAddr;
void* ret = NULL;
for (;;) {
// Roll over if we reached the end.
if (!_memregionIsInside(&g_AslrRegion, cur_addr, cur_addr + size))
cur_addr = g_AslrRegion.start;
while (1)
{
// Add a guard page.
addr += 0x1000;
// If we go outside address space, let's go back to start.
if (!_InRegion(&g_AddressSpace, addr)) {
addr = g_AddressSpace.start;
}
// Query information about address.
rc = svcQueryMemory(&meminfo, &pageinfo, addr);
if (R_FAILED(rc)) {
fatalThrow(MAKERESULT(Module_Libnx, LibnxError_BadQueryMemory));
}
if (meminfo.type != 0) {
// Address is already taken, let's move past it.
addr = meminfo.addr + meminfo.size;
// Avoid mapping within the alias region.
if (_memregionOverlaps(&g_AliasRegion, cur_addr, cur_addr + size)) {
cur_addr = g_AliasRegion.end;
continue;
}
if (addr + size > meminfo.addr + meminfo.size) {
// We can't fit in this region, let's move past it.
addr = meminfo.addr + meminfo.size;
// Avoid mapping within the heap region.
if (_memregionOverlaps(&g_HeapRegion, cur_addr, cur_addr + size)) {
cur_addr = g_HeapRegion.end;
continue;
}
// Check if we end up in a reserved region.
for(i=0; i<REGION_MAX; i++)
{
u64 end = addr + size - 1;
if (_InRegion(&g_Region[i], addr) || _InRegion(&g_Region[i], end)) {
break;
}
}
// Did we?
if (i != REGION_MAX) {
addr = g_Region[i].end;
// Avoid mapping within the stack region.
if (_memregionOverlaps(&g_StackRegion, cur_addr, cur_addr + size)) {
cur_addr = g_StackRegion.end;
continue;
}
// Not in a reserved region, we're good to go!
// Avoid mapping in areas that are already used.
if (!_memregionIsUnmapped(cur_addr, cur_addr + size, SEQUENTIAL_GUARD_REGION_SIZE, &cur_addr))
continue;
// We found a suitable address for the block.
g_SequentialAddr = cur_addr + size + SEQUENTIAL_GUARD_REGION_SIZE;
ret = (void*)cur_addr;
break;
}
mutexUnlock(&g_VirtmemMutex);
g_CurrentAddr = addr + size;
mutexUnlock(&g_VirtMemMutex);
return (void*) addr;
return ret;
}
void virtmemFree(void* addr, size_t size) {
void virtmemFree(void* addr, size_t size) {
IGNORE_ARG(addr);
IGNORE_ARG(size);
}
void* virtmemReserveStack(size_t size)
{
Result rc;
MemoryInfo meminfo;
u32 pageinfo;
size = (size + 0xFFF) &~ 0xFFF;
mutexLock(&g_VirtMemMutex);
u64 addr = g_CurrentMapAddr;
while (1)
{
// Add a guard page.
addr += 0x1000;
// Make sure we stay inside the reserved map region.
if (!_InRegion(&g_Region[REGION_STACK], addr)) {
addr = g_Region[REGION_STACK].start;
}
// Query information about address.
rc = svcQueryMemory(&meminfo, &pageinfo, addr);
if (R_FAILED(rc)) {
fatalThrow(MAKERESULT(Module_Libnx, LibnxError_BadQueryMemory));
}
if (meminfo.type != 0) {
// Address is already taken, let's move past it.
addr = meminfo.addr + meminfo.size;
continue;
}
if (addr + size > meminfo.addr + meminfo.size) {
// We can't fit in this region, let's move past it.
addr = meminfo.addr + meminfo.size;
continue;
}
break;
}
g_CurrentMapAddr = addr + size;
mutexUnlock(&g_VirtMemMutex);
return (void*) addr;
void virtmemLock(void) {
mutexLock(&g_VirtmemMutex);
}
void virtmemFreeStack(void* addr, size_t size) {
IGNORE_ARG(addr);
IGNORE_ARG(size);
void virtmemUnlock(void) {
mutexUnlock(&g_VirtmemMutex);
}
void* virtmemFindAslr(size_t size, size_t guard_size) {
if (!mutexIsLockedByCurrentThread(&g_VirtmemMutex)) return NULL;
return _memregionFindRandom(&g_AslrRegion, size, guard_size);
}
void* virtmemFindStack(size_t size, size_t guard_size) {
if (!mutexIsLockedByCurrentThread(&g_VirtmemMutex)) return NULL;
return _memregionFindRandom(&g_StackRegion, size, guard_size);
}