diff --git a/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp b/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp index 48af48ef..44f6226c 100644 --- a/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp +++ b/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp @@ -394,6 +394,77 @@ namespace ams::kern::arch::arm64::init { return l3_entry->GetBlock() + (GetInteger(virt_addr) & (L3BlockSize - 1)); } + KPhysicalAddress GetPhysicalAddressOfRandomizedRange(KVirtualAddress virt_addr, size_t size) const { + /* Define tracking variables for ourselves to use. */ + KPhysicalAddress min_phys_addr = Null; + KPhysicalAddress max_phys_addr = Null; + + /* Ensure the range we're querying is valid. */ + const KVirtualAddress end_virt_addr = virt_addr + size; + if (virt_addr > end_virt_addr) { + MESOSPHERE_INIT_ABORT_UNLESS(size == 0); + return min_phys_addr; + } + + auto UpdateExtents = [&](const KPhysicalAddress block, size_t block_size) ALWAYS_INLINE_LAMBDA { + /* Ensure that we are allowed to have the block here. */ + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), block_size)); + MESOSPHERE_INIT_ABORT_UNLESS(block_size <= GetInteger(end_virt_addr) - GetInteger(virt_addr)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), block_size)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, block_size)); + + const KPhysicalAddress block_end = block + block_size; + + /* We want to update min phys addr when it's 0 or > block. */ + /* This is equivalent in two's complement to (n - 1) >= block. */ + if ((GetInteger(min_phys_addr) - 1) >= GetInteger(block)) { + min_phys_addr = block; + } + + /* Update max phys addr when it's 0 or < block_end. */ + if (GetInteger(max_phys_addr) < GetInteger(block_end) || GetInteger(max_phys_addr) == 0) { + max_phys_addr = block_end; + } + + /* Traverse onwards. */ + virt_addr += block_size; + }; + + while (virt_addr < end_virt_addr) { + L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr); + + /* If an L1 block is mapped, update. */ + if (l1_entry->IsBlock()) { + UpdateExtents(l1_entry->GetBlock(), L1BlockSize); + continue; + } + + /* Not a block, so we must have a table. */ + MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable()); + + L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr); + if (l2_entry->IsBlock()) { + UpdateExtents(l2_entry->GetBlock(), l2_entry->IsContiguous() ? L2ContiguousBlockSize : L2BlockSize); + continue; + } + + /* Not a block, so we must have a table. */ + MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable()); + + /* We must have a mapped l3 entry to inspect. */ + L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr); + MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock()); + + UpdateExtents(l3_entry->GetBlock(), l3_entry->IsContiguous() ? L3ContiguousBlockSize : L3BlockSize); + } + + /* Ensure we got the right range. */ + MESOSPHERE_INIT_ABORT_UNLESS(GetInteger(max_phys_addr) - GetInteger(min_phys_addr) == size); + + /* Write the address that we found. */ + return min_phys_addr; + } + bool IsFree(KVirtualAddress virt_addr, size_t size) { /* Ensure that addresses and sizes are page aligned. */ MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize)); diff --git a/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp b/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp index 03311a65..5cd51a91 100644 --- a/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp +++ b/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp @@ -156,6 +156,7 @@ namespace ams::kern::arch::arm64::cpu { void ClearPageToZeroImpl(void *); void FlushEntireDataCacheSharedForInit(); void FlushEntireDataCacheLocalForInit(); + void StoreEntireCacheForInit(); void FlushEntireDataCache(); diff --git a/libmesosphere/include/mesosphere/arch/arm64/kern_k_spin_lock.hpp b/libmesosphere/include/mesosphere/arch/arm64/kern_k_spin_lock.hpp index 9ed30784..f1997ecb 100644 --- a/libmesosphere/include/mesosphere/arch/arm64/kern_k_spin_lock.hpp +++ b/libmesosphere/include/mesosphere/arch/arm64/kern_k_spin_lock.hpp @@ -26,14 +26,14 @@ namespace ams::kern::arch::arm64 { constexpr KNotAlignedSpinLock() : packed_tickets(0) { /* ... */ } void Lock() { - u32 tmp0, tmp1; + u32 tmp0, tmp1, tmp2; __asm__ __volatile__( " prfm pstl1keep, %[packed_tickets]\n" "1:\n" " ldaxr %w[tmp0], %[packed_tickets]\n" - " add %w[tmp0], %w[tmp0], #0x10000\n" - " stxr %w[tmp1], %w[tmp0], %[packed_tickets]\n" + " add %w[tmp2], %w[tmp0], #0x10000\n" + " stxr %w[tmp1], %w[tmp2], %[packed_tickets]\n" " cbnz %w[tmp1], 1b\n" " \n" " and %w[tmp1], %w[tmp0], #0xFFFF\n" @@ -46,7 +46,7 @@ namespace ams::kern::arch::arm64 { " cmp %w[tmp1], %w[tmp0], lsr #16\n" " b.ne 2b\n" "3:\n" - : [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [packed_tickets]"+Q"(this->packed_tickets) + : [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [tmp2]"=&r"(tmp2), [packed_tickets]"+Q"(this->packed_tickets) : : "cc", "memory" ); diff --git a/libmesosphere/include/mesosphere/kern_k_page_bitmap.hpp b/libmesosphere/include/mesosphere/kern_k_page_bitmap.hpp index f75e4a70..56ef731e 100644 --- a/libmesosphere/include/mesosphere/kern_k_page_bitmap.hpp +++ b/libmesosphere/include/mesosphere/kern_k_page_bitmap.hpp @@ -51,11 +51,11 @@ namespace ams::kern { u64 selected = 0; u64 cur_num_bits = BITSIZEOF(bitmap) / 2; - u64 cur_mask = (1ull << cur_num_bits) / 2; + u64 cur_mask = (1ull << cur_num_bits) - 1; while (cur_num_bits) { - const u64 high = (bitmap >> 0) & cur_mask; - const u64 low = (bitmap >> cur_num_bits) & cur_mask; + const u64 low = (bitmap >> 0) & cur_mask; + const u64 high = (bitmap >> cur_num_bits) & cur_mask; bool choose_low; if (high == 0) { diff --git a/libmesosphere/source/arch/arm64/kern_cpu.cpp b/libmesosphere/source/arch/arm64/kern_cpu.cpp index 733ba174..60fd0473 100644 --- a/libmesosphere/source/arch/arm64/kern_cpu.cpp +++ b/libmesosphere/source/arch/arm64/kern_cpu.cpp @@ -333,6 +333,13 @@ namespace ams::kern::arch::arm64::cpu { return PerformCacheOperationBySetWayLocal(FlushDataCacheLineBySetWayImpl); } + void StoreEntireCacheForInit() { + PerformCacheOperationBySetWayLocal(StoreDataCacheLineBySetWayImpl); + PerformCacheOperationBySetWayShared(StoreDataCacheLineBySetWayImpl); + DataSynchronizationBarrierInnerShareable(); + InvalidateEntireInstructionCache(); + } + void FlushEntireDataCache() { return PerformCacheOperationBySetWayShared(FlushDataCacheLineBySetWayImpl); } diff --git a/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp b/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp index 829be5ab..cdeb7790 100644 --- a/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp +++ b/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp @@ -324,7 +324,7 @@ namespace ams::kern::board::nintendo::nx { EnsureRandomGeneratorInitialized(); - return GenerateRandomU64(); + return GenerateRandomU64FromGenerator(); } void KSystemControl::SleepSystem() {