mirror of
https://github.com/switchbrew/libnx.git
synced 2025-07-16 15:52:14 +02:00
virtmem: Add virtmemFindCodeMemory to work around a quirk in 1.x kernel
This commit is contained in:
parent
eecf2a9d32
commit
490d96e6eb
@ -44,3 +44,12 @@ void* virtmemFindAslr(size_t size, size_t guard_size);
|
|||||||
* @note The virtual memory manager mutex must be held during the find-and-map process (see \ref virtmemLock and \ref virtmemUnlock).
|
* @note The virtual memory manager mutex must be held during the find-and-map process (see \ref virtmemLock and \ref virtmemUnlock).
|
||||||
*/
|
*/
|
||||||
void* virtmemFindStack(size_t size, size_t guard_size);
|
void* virtmemFindStack(size_t size, size_t guard_size);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Finds a random slice of free code memory address space.
|
||||||
|
* @param size Desired size of the slice (rounded up to page alignment).
|
||||||
|
* @param guard_size Desired size of the unmapped guard areas surrounding the slice (rounded up to page alignment).
|
||||||
|
* @return Pointer to the slice of address space, or NULL on failure.
|
||||||
|
* @note The virtual memory manager mutex must be held during the find-and-map process (see \ref virtmemLock and \ref virtmemUnlock).
|
||||||
|
*/
|
||||||
|
void* virtmemFindCodeMemory(size_t size, size_t guard_size);
|
||||||
|
@ -22,6 +22,7 @@ static MemRegion g_AslrRegion;
|
|||||||
static MemRegion g_StackRegion;
|
static MemRegion g_StackRegion;
|
||||||
|
|
||||||
static uintptr_t g_SequentialAddr;
|
static uintptr_t g_SequentialAddr;
|
||||||
|
static bool g_IsLegacyKernel;
|
||||||
|
|
||||||
static Result _memregionInitWithInfo(MemRegion* r, InfoType id0_addr, InfoType id0_sz) {
|
static Result _memregionInitWithInfo(MemRegion* r, InfoType id0_addr, InfoType id0_sz) {
|
||||||
u64 base;
|
u64 base;
|
||||||
@ -141,6 +142,7 @@ void virtmemSetup(void) {
|
|||||||
else {
|
else {
|
||||||
// [1.0.0] doesn't expose aslr/stack region information so we have to do this dirty hack to detect it.
|
// [1.0.0] doesn't expose aslr/stack region information so we have to do this dirty hack to detect it.
|
||||||
// Forgive me.
|
// Forgive me.
|
||||||
|
g_IsLegacyKernel = true;
|
||||||
rc = svcUnmapMemory((void*)0xFFFFFFFFFFFFE000UL, (void*)0xFFFFFE000UL, 0x1000);
|
rc = svcUnmapMemory((void*)0xFFFFFFFFFFFFE000UL, (void*)0xFFFFFE000UL, 0x1000);
|
||||||
if (rc == KERNELRESULT(InvalidMemoryState)) {
|
if (rc == KERNELRESULT(InvalidMemoryState)) {
|
||||||
// Invalid src-address error means that a valid 36-bit address was rejected.
|
// Invalid src-address error means that a valid 36-bit address was rejected.
|
||||||
@ -228,3 +230,9 @@ void* virtmemFindStack(size_t size, size_t guard_size) {
|
|||||||
if (!mutexIsLockedByCurrentThread(&g_VirtmemMutex)) return NULL;
|
if (!mutexIsLockedByCurrentThread(&g_VirtmemMutex)) return NULL;
|
||||||
return _memregionFindRandom(&g_StackRegion, size, guard_size);
|
return _memregionFindRandom(&g_StackRegion, size, guard_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void* virtmemFindCodeMemory(size_t size, size_t guard_size) {
|
||||||
|
if (!mutexIsLockedByCurrentThread(&g_VirtmemMutex)) return NULL;
|
||||||
|
// [1.0.0] requires CodeMemory to be mapped within the stack region.
|
||||||
|
return _memregionFindRandom(g_IsLegacyKernel ? &g_StackRegion : &g_AslrRegion, size, guard_size);
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user