diff --git a/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp b/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp index 805b967d..b653da7b 100644 --- a/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp +++ b/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp @@ -110,7 +110,7 @@ namespace ams::kern::arch::arm64::init { L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr); /* If an L1 block is mapped or we're empty, advance by L1BlockSize. */ - if (l1_entry->IsMappedBlock() || l1_entry->IsEmpty()) { + if (l1_entry->IsMappedBlock() || l1_entry->IsMappedEmpty()) { MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize)); MESOSPHERE_INIT_ABORT_UNLESS(static_cast(end_virt_addr - virt_addr) >= L1BlockSize); virt_addr += L1BlockSize; @@ -126,7 +126,7 @@ namespace ams::kern::arch::arm64::init { /* Table, so check if we're mapped in L2. */ L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr); - if (l2_entry->IsMappedBlock() || l2_entry->IsEmpty()) { + if (l2_entry->IsMappedBlock() || l2_entry->IsMappedEmpty()) { const size_t advance_size = (l2_entry->IsMappedBlock() && l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize; MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size)); MESOSPHERE_INIT_ABORT_UNLESS(static_cast(end_virt_addr - virt_addr) >= advance_size); @@ -144,7 +144,7 @@ namespace ams::kern::arch::arm64::init { L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr); /* L3 must be block or empty. */ - MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock() || l3_entry->IsEmpty()); + MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock() || l3_entry->IsMappedEmpty()); const size_t advance_size = (l3_entry->IsMappedBlock() && l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize; MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size)); @@ -164,7 +164,7 @@ namespace ams::kern::arch::arm64::init { L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr); /* If an L1 block is mapped or we're empty, advance by L1BlockSize. */ - if (l1_entry->IsMappedBlock() || l1_entry->IsEmpty()) { + if (l1_entry->IsMappedBlock() || l1_entry->IsMappedEmpty()) { MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize)); MESOSPHERE_INIT_ABORT_UNLESS(static_cast(end_virt_addr - virt_addr) >= L1BlockSize); if (l1_entry->IsMappedBlock() && block_size == L1BlockSize) { @@ -182,7 +182,7 @@ namespace ams::kern::arch::arm64::init { /* Table, so check if we're mapped in L2. */ L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr); - if (l2_entry->IsMappedBlock() || l2_entry->IsEmpty()) { + if (l2_entry->IsMappedBlock() || l2_entry->IsMappedEmpty()) { const size_t advance_size = (l2_entry->IsMappedBlock() && l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize; MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size)); MESOSPHERE_INIT_ABORT_UNLESS(static_cast(end_virt_addr - virt_addr) >= advance_size); @@ -202,7 +202,7 @@ namespace ams::kern::arch::arm64::init { L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr); /* L3 must be block or empty. */ - MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock() || l3_entry->IsEmpty()); + MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock() || l3_entry->IsMappedEmpty()); const size_t advance_size = (l3_entry->IsMappedBlock() && l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize; MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size)); diff --git a/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp b/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp index 6fce69f8..ed32262f 100644 --- a/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp +++ b/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp @@ -122,7 +122,7 @@ namespace ams::kern::arch::arm64 { /* Construct a new attribute. */ constexpr explicit ALWAYS_INLINE PageTableEntry(Permission perm, PageAttribute p_a, Shareable share, MappingFlag m) - : m_attributes(static_cast(perm) | static_cast(AccessFlag_Accessed) | static_cast(p_a) | static_cast(share) | static_cast(ExtensionFlag_Valid) | static_cast(m)) + : m_attributes(static_cast(perm) | static_cast(AccessFlag_Accessed) | static_cast(p_a) | static_cast(share) | static_cast(m)) { /* ... */ } @@ -205,6 +205,7 @@ namespace ams::kern::arch::arm64 { constexpr ALWAYS_INLINE bool IsMappedBlock() const { return this->GetBits(0, 2) == 1; } constexpr ALWAYS_INLINE bool IsMappedTable() const { return this->GetBits(0, 2) == 3; } + constexpr ALWAYS_INLINE bool IsMappedEmpty() const { return this->GetBits(0, 2) == 0; } constexpr ALWAYS_INLINE bool IsMapped() const { return this->GetBits(0, 1) != 0; } constexpr ALWAYS_INLINE decltype(auto) SetUserExecuteNever(bool en) { this->SetBit(54, en); return *this; } diff --git a/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp b/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp index 32dfb00e..150aa1d1 100644 --- a/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp +++ b/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp @@ -21,12 +21,6 @@ namespace ams::kern::arch::arm64 { m_table = static_cast(tb); m_is_kernel = true; m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize; - } - - void KPageTableImpl::InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end) { - m_table = static_cast(tb); - m_is_kernel = false; - m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize; /* Page table entries created by KInitialPageTable need to be iterated and modified to ensure KPageTable invariants. */ PageTableEntry *level_entries[EntryLevel_Count] = { nullptr, nullptr, m_table }; @@ -68,7 +62,6 @@ namespace ams::kern::arch::arm64 { /* Advance. */ while (true) { /* Advance to the next entry at the current level. */ - ++level_entries[level]; if (!util::IsAligned(reinterpret_cast(++level_entries[level]), PageSize)) { break; } @@ -83,6 +76,12 @@ namespace ams::kern::arch::arm64 { } } + void KPageTableImpl::InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end) { + m_table = static_cast(tb); + m_is_kernel = false; + m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize; + } + L1PageTableEntry *KPageTableImpl::Finalize() { return m_table; }