kern: improve resource region size definitions/calculations

This commit is contained in:
Michael Scire 2020-08-17 16:45:41 -07:00
parent 959122a849
commit 5bc3307ffa
5 changed files with 45 additions and 5 deletions

View File

@ -74,6 +74,10 @@ namespace ams::kern::arch::arm64::init {
static ALWAYS_INLINE void ClearNewPageTable(KPhysicalAddress address) { static ALWAYS_INLINE void ClearNewPageTable(KPhysicalAddress address) {
ClearPhysicalMemory(address, PageSize); ClearPhysicalMemory(address, PageSize);
} }
public:
static consteval size_t GetMaximumOverheadSize(size_t size) {
return (util::DivideUp(size, L1BlockSize) + util::DivideUp(size, L2BlockSize)) * PageSize;
}
private: private:
size_t NOINLINE GetBlockCount(KVirtualAddress virt_addr, size_t size, size_t block_size) { size_t NOINLINE GetBlockCount(KVirtualAddress virt_addr, size_t size, size_t block_size) {
const KVirtualAddress end_virt_addr = virt_addr + size; const KVirtualAddress end_virt_addr = virt_addr + size;

View File

@ -20,7 +20,7 @@
namespace ams::kern { namespace ams::kern {
constexpr u32 InitialProcessBinaryMagic = util::FourCC<'I','N','I','1'>::Code; constexpr u32 InitialProcessBinaryMagic = util::FourCC<'I','N','I','1'>::Code;
constexpr size_t InitialProcessBinarySizeMax = 0xC00000; constexpr size_t InitialProcessBinarySizeMax = 12_MB;
struct InitialProcessBinaryHeader { struct InitialProcessBinaryHeader {
u32 magic; u32 magic;

View File

@ -39,6 +39,19 @@ namespace ams::kern {
constexpr size_t KernelPhysicalAddressSpaceLast = KernelPhysicalAddressSpaceEnd - 1ul; constexpr size_t KernelPhysicalAddressSpaceLast = KernelPhysicalAddressSpaceEnd - 1ul;
constexpr size_t KernelPhysicalAddressSpaceSize = KernelPhysicalAddressSpaceEnd - KernelPhysicalAddressSpaceBase; constexpr size_t KernelPhysicalAddressSpaceSize = KernelPhysicalAddressSpaceEnd - KernelPhysicalAddressSpaceBase;
constexpr size_t KernelPageTableHeapSize = init::KInitialPageTable::GetMaximumOverheadSize(8_GB);
constexpr size_t KernelInitialPageHeapSize = 128_KB;
constexpr size_t KernelSlabHeapDataSize = 5_MB;
constexpr size_t KernelSlabHeapGapsSize = 2_MB - 64_KB;
constexpr size_t KernelSlabHeapGapsSizeDeprecated = 2_MB;
constexpr size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSize;
/* NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x860. */
constexpr size_t KernelSlabHeapAdditionalSize = 0x68000;
constexpr size_t KernelResourceSize = KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize;
enum KMemoryRegionType : u32 { enum KMemoryRegionType : u32 {
KMemoryRegionAttr_CarveoutProtected = 0x04000000, KMemoryRegionAttr_CarveoutProtected = 0x04000000,
KMemoryRegionAttr_DidKernelMap = 0x08000000, KMemoryRegionAttr_DidKernelMap = 0x08000000,
@ -544,6 +557,7 @@ namespace ams::kern {
} }
static void InitializeLinearMemoryRegionTrees(KPhysicalAddress aligned_linear_phys_start, KVirtualAddress linear_virtual_start); static void InitializeLinearMemoryRegionTrees(KPhysicalAddress aligned_linear_phys_start, KVirtualAddress linear_virtual_start);
static size_t GetResourceRegionSizeForInit();
static NOINLINE auto GetKernelRegionExtents() { return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel); } static NOINLINE auto GetKernelRegionExtents() { return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel); }
static NOINLINE auto GetKernelCodeRegionExtents() { return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelCode); } static NOINLINE auto GetKernelCodeRegionExtents() { return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelCode); }

View File

@ -71,8 +71,12 @@ namespace ams::kern::init {
constexpr size_t SlabCountExtraKThread = 160; constexpr size_t SlabCountExtraKThread = 160;
/* This is used for gaps between the slab allocators. */ namespace test {
constexpr size_t SlabRegionReservedSize = 2_MB - 64_KB;
constexpr size_t RequiredSizeForExtraThreadCount = SlabCountExtraKThread * (sizeof(KThread) + (sizeof(KLinkedListNode) * 17) + (sizeof(KThreadLocalPage) / 8) + sizeof(KEventInfo));
static_assert(RequiredSizeForExtraThreadCount <= KernelSlabHeapAdditionalSize);
}
/* Global to hold our resource counts. */ /* Global to hold our resource counts. */
KSlabResourceCounts g_slab_resource_counts = { KSlabResourceCounts g_slab_resource_counts = {
@ -121,6 +125,10 @@ namespace ams::kern::init {
} }
} }
size_t CalculateSlabHeapGapSize() {
return (kern::GetTargetFirmware() >= TargetFirmware_10_0_0) ? KernelSlabHeapGapsSize : KernelSlabHeapGapsSizeDeprecated;
}
size_t CalculateTotalSlabHeapSize() { size_t CalculateTotalSlabHeapSize() {
size_t size = 0; size_t size = 0;
@ -135,7 +143,7 @@ namespace ams::kern::init {
#undef ADD_SLAB_SIZE #undef ADD_SLAB_SIZE
/* Add the reserved size. */ /* Add the reserved size. */
size += SlabRegionReservedSize; size += CalculateSlabHeapGapSize();
return size; return size;
} }
@ -175,11 +183,12 @@ namespace ams::kern::init {
} }
/* Create an array to represent the gaps between the slabs. */ /* Create an array to represent the gaps between the slabs. */
const size_t total_gap_size = CalculateSlabHeapGapSize();
size_t slab_gaps[util::size(slab_types)]; size_t slab_gaps[util::size(slab_types)];
for (size_t i = 0; i < util::size(slab_gaps); i++) { for (size_t i = 0; i < util::size(slab_gaps); i++) {
/* Note: This is an off-by-one error from Nintendo's intention, because GenerateRandomRange is inclusive. */ /* Note: This is an off-by-one error from Nintendo's intention, because GenerateRandomRange is inclusive. */
/* However, Nintendo also has the off-by-one error, and it's "harmless", so we will include it ourselves. */ /* However, Nintendo also has the off-by-one error, and it's "harmless", so we will include it ourselves. */
slab_gaps[i] = KSystemControl::GenerateRandomRange(0, SlabRegionReservedSize); slab_gaps[i] = KSystemControl::GenerateRandomRange(0, total_gap_size);
} }
/* Sort the array, so that we can treat differences between values as offsets to the starts of slabs. */ /* Sort the array, so that we can treat differences between values as offsets to the starts of slabs. */

View File

@ -168,6 +168,19 @@ namespace ams::kern {
} }
} }
size_t KMemoryLayout::GetResourceRegionSizeForInit() {
/* Calculate resource region size based on whether we allow extra threads. */
const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit();
size_t resource_region_size = KernelResourceSize + (use_extra_resources ? KernelSlabHeapAdditionalSize : 0);
/* 10.0.0 reduced the slab heap gaps by 64K. */
if (kern::GetTargetFirmware() < ams::TargetFirmware_10_0_0) {
resource_region_size += (KernelSlabHeapGapsSizeDeprecated - KernelSlabHeapGapsSize);
}
return resource_region_size;
}
namespace init { namespace init {
namespace { namespace {