Cache version values, fix guard paging of threads on 2.0.0+

This commit is contained in:
plutoo 2018-01-01 22:28:12 +01:00
parent 91afc81ce1
commit 5cad71213b
3 changed files with 35 additions and 15 deletions

View File

@ -48,8 +48,8 @@ Result threadCreate(
} }
else { else {
// todo: svcMapMemory returns 0xDC01 // todo: svcMapMemory returns 0xDC01
void* stack_mirror = stack;//virtmemReserveMap(stack_sz); void* stack_mirror = virtmemReserveMap(stack_sz);
//rc = svcMapMemory(stack_mirror, stack, stack_sz); rc = svcMapMemory(stack_mirror, stack, stack_sz);
if (R_SUCCEEDED(rc)) if (R_SUCCEEDED(rc))
{ {

View File

@ -1,17 +1,35 @@
// Copyright 2017 plutoo // Copyright 2017 plutoo
#include <switch.h> #include <switch.h>
bool kernelAbove200() { static bool g_IsAbove200;
static bool g_IsAbove300;
static bool g_IsAbove400;
static bool g_HasCached = 0;
static void _CacheValues()
{
// This is actually thread safe, might cache twice but that's fine.
if (!g_HasCached)
{
u64 tmp; u64 tmp;
return svcGetInfo(&tmp, 12, INVALID_HANDLE, 0) != 0xF001; g_IsAbove200 = (svcGetInfo(&tmp, 12, INVALID_HANDLE, 0) != 0xF001);
g_IsAbove300 = (svcGetInfo(&tmp, 18, INVALID_HANDLE, 0) != 0xF001);
g_IsAbove400 = (svcGetInfo(&tmp, 19, INVALID_HANDLE, 0) != 0xF001);
g_HasCached = true;
}
}
bool kernelAbove200() {
_CacheValues();
return g_IsAbove200;
} }
bool kernelAbove300() { bool kernelAbove300() {
u64 tmp; _CacheValues();
return svcGetInfo(&tmp, 18, INVALID_HANDLE, 0) != 0xF001; return g_IsAbove300;
} }
bool kernelAbove400() { bool kernelAbove400() {
u64 tmp; _CacheValues();
return svcGetInfo(&tmp, 19, INVALID_HANDLE, 0) != 0xF001; return g_IsAbove300;
} }

View File

@ -6,9 +6,9 @@ typedef struct {
} VirtualRegion; } VirtualRegion;
enum { enum {
REGION_MAP =0, REGION_STACK=0,
REGION_HEAP=1, REGION_HEAP=1,
REGION_UNK =2, REGION_NEW_STACK=2,
REGION_MAX REGION_MAX
}; };
@ -46,7 +46,7 @@ void virtmemSetup() {
g_AddressSpace.end = 0x100000000ull; g_AddressSpace.end = 0x100000000ull;
} }
if (R_FAILED(_GetRegionFromInfo(&g_Region[REGION_MAP], 2, 3))) { if (R_FAILED(_GetRegionFromInfo(&g_Region[REGION_STACK], 2, 3))) {
fatalSimple(MAKERESULT(MODULE_LIBNX, LIBNX_BADGETINFO)); fatalSimple(MAKERESULT(MODULE_LIBNX, LIBNX_BADGETINFO));
} }
@ -56,7 +56,7 @@ void virtmemSetup() {
// Failure is OK, happens on 1.0.0 // Failure is OK, happens on 1.0.0
// In that case, g_UnkRegion will default to (0, 0). // In that case, g_UnkRegion will default to (0, 0).
_GetRegionFromInfo(&g_Region[REGION_UNK], 14, 15); _GetRegionFromInfo(&g_Region[REGION_NEW_STACK], 14, 15);
} }
void* virtmemReserve(size_t size) { void* virtmemReserve(size_t size) {
@ -137,6 +137,8 @@ void* virtmemReserveMap(size_t size)
size = (size + 0xFFF) &~ 0xFFF; size = (size + 0xFFF) &~ 0xFFF;
int region_idx = kernelAbove200() ? REGION_NEW_STACK : REGION_STACK;
u64 addr = g_CurrentMapAddr; u64 addr = g_CurrentMapAddr;
while (1) while (1)
{ {
@ -144,8 +146,8 @@ void* virtmemReserveMap(size_t size)
addr += 0x1000; addr += 0x1000;
// Make sure we stay inside the reserved map region. // Make sure we stay inside the reserved map region.
if (!_InRegion(&g_Region[REGION_MAP], addr)) { if (!_InRegion(&g_Region[region_idx], addr)) {
addr = g_Region[REGION_MAP].start; addr = g_Region[region_idx].start;
} }
// Query information about address. // Query information about address.