/* * Copyright (c) Atmosphère-NX * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ #include namespace ams::kern::arch::arm64 { void KPageTableImpl::InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end) { m_table = static_cast(tb); m_is_kernel = true; m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize; } void KPageTableImpl::InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end) { m_table = static_cast(tb); m_is_kernel = false; m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize; } L1PageTableEntry *KPageTableImpl::Finalize() { return m_table; } // bool KPageTableImpl::ExtractL3Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L3PageTableEntry *l3_entry, KProcessAddress virt_addr) const { // /* Set the L3 entry. */ // out_context->l3_entry = l3_entry; // // if (l3_entry->IsBlock()) { // /* Set the output entry. */ // out_entry->phys_addr = l3_entry->GetBlock() + (virt_addr & (L3BlockSize - 1)); // if (l3_entry->IsContiguous()) { // out_entry->block_size = L3ContiguousBlockSize; // } else { // out_entry->block_size = L3BlockSize; // } // out_entry->sw_reserved_bits = l3_entry->GetSoftwareReservedBits(); // out_entry->attr = 0; // // return true; // } else { // out_entry->phys_addr = Null; // out_entry->block_size = L3BlockSize; // out_entry->sw_reserved_bits = 0; // out_entry->attr = 0; // return false; // } // } // // bool KPageTableImpl::ExtractL2Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L2PageTableEntry *l2_entry, KProcessAddress virt_addr) const { // /* Set the L2 entry. */ // out_context->l2_entry = l2_entry; // // if (l2_entry->IsBlock()) { // /* Set the output entry. */ // out_entry->phys_addr = l2_entry->GetBlock() + (virt_addr & (L2BlockSize - 1)); // if (l2_entry->IsContiguous()) { // out_entry->block_size = L2ContiguousBlockSize; // } else { // out_entry->block_size = L2BlockSize; // } // out_entry->sw_reserved_bits = l2_entry->GetSoftwareReservedBits(); // out_entry->attr = 0; // // /* Set the output context. */ // out_context->l3_entry = nullptr; // return true; // } else if (l2_entry->IsTable()) { // return this->ExtractL3Entry(out_entry, out_context, this->GetL3EntryFromTable(GetPageTableVirtualAddress(l2_entry->GetTable()), virt_addr), virt_addr); // } else { // out_entry->phys_addr = Null; // out_entry->block_size = L2BlockSize; // out_entry->sw_reserved_bits = 0; // out_entry->attr = 0; // // out_context->l3_entry = nullptr; // return false; // } // } // // bool KPageTableImpl::ExtractL1Entry(TraversalEntry *out_entry, TraversalContext *out_context, const L1PageTableEntry *l1_entry, KProcessAddress virt_addr) const { // /* Set the L1 entry. */ // out_context->level_entries[EntryLevel_L1] = l1_entry; // // if (l1_entry->IsBlock()) { // /* Set the output entry. */ // out_entry->phys_addr = l1_entry->GetBlock() + (virt_addr & (L1BlockSize - 1)); // if (l1_entry->IsContiguous()) { // out_entry->block_size = L1ContiguousBlockSize; // } else { // out_entry->block_size = L1BlockSize; // } // out_entry->sw_reserved_bits = l1_entry->GetSoftwareReservedBits(); // // /* Set the output context. */ // out_context->l2_entry = nullptr; // out_context->l3_entry = nullptr; // return true; // } else if (l1_entry->IsTable()) { // return this->ExtractL2Entry(out_entry, out_context, this->GetL2EntryFromTable(GetPageTableVirtualAddress(l1_entry->GetTable()), virt_addr), virt_addr); // } else { // out_entry->phys_addr = Null; // out_entry->block_size = L1BlockSize; // out_entry->sw_reserved_bits = 0; // out_entry->attr = 0; // // out_context->l2_entry = nullptr; // out_context->l3_entry = nullptr; // return false; // } // } bool KPageTableImpl::BeginTraversal(TraversalEntry *out_entry, TraversalContext *out_context, KProcessAddress address) const { /* Setup invalid defaults. */ *out_entry = {}; *out_context = {}; /* Validate that we can read the actual entry. */ const size_t l0_index = GetL0Index(address); const size_t l1_index = GetL1Index(address); if (m_is_kernel) { /* Kernel entries must be accessed via TTBR1. */ if ((l0_index != MaxPageTableEntries - 1) || (l1_index < MaxPageTableEntries - m_num_entries)) { return false; } } else { /* User entries must be accessed with TTBR0. */ if ((l0_index != 0) || l1_index >= m_num_entries) { return false; } } /* Get the L1 entry, and check if it's a table. */ out_context->level_entries[EntryLevel_L1] = this->GetL1Entry(address); if (out_context->level_entries[EntryLevel_L1]->IsMappedTable()) { /* Get the L2 entry, and check if it's a table. */ out_context->level_entries[EntryLevel_L2] = this->GetL2EntryFromTable(GetPageTableVirtualAddress(out_context->level_entries[EntryLevel_L1]->GetTable()), address); if (out_context->level_entries[EntryLevel_L2]->IsMappedTable()) { /* Get the L3 entry. */ out_context->level_entries[EntryLevel_L3] = this->GetL3EntryFromTable(GetPageTableVirtualAddress(out_context->level_entries[EntryLevel_L2]->GetTable()), address); /* It's either a page or not. */ out_context->level = EntryLevel_L3; } else { /* Not a L2 table, so possibly an L2 block. */ out_context->level = EntryLevel_L2; } } else { /* Not a L1 table, so possibly an L1 block. */ out_context->level = EntryLevel_L1; } /* Determine other fields. */ const auto *pte = out_context->level_entries[out_context->level]; out_context->is_contiguous = pte->IsContiguous(); out_entry->sw_reserved_bits = pte->GetSoftwareReservedBits(); out_entry->attr = 0; out_entry->phys_addr = this->GetBlock(pte, out_context->level) + this->GetOffset(address, out_context->level); out_entry->block_size = static_cast(1) << (PageBits + LevelBits * out_context->level + 4 * out_context->is_contiguous); return out_context->level == EntryLevel_L3 ? pte->IsPage() : pte->IsBlock(); } bool KPageTableImpl::ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const { /* Advance entry. */ auto *cur_pte = context->level_entries[context->level]; auto *next_pte = reinterpret_cast(context->is_contiguous ? util::AlignDown(reinterpret_cast(cur_pte), 0x10 * sizeof(PageTableEntry)) + 0x10 * sizeof(PageTableEntry) : reinterpret_cast(cur_pte) + sizeof(PageTableEntry)); /* Set the pte. */ context->level_entries[context->level] = next_pte; /* Advance appropriately. */ while (context->level < EntryLevel_L1 && util::IsAligned(reinterpret_cast(context->level_entries[context->level]), PageSize)) { /* Advance the above table by one entry. */ context->level_entries[context->level + 1]++; context->level = static_cast(util::ToUnderlying(context->level) + 1); } /* Check if we've hit the end of the L1 table. */ if (context->level == EntryLevel_L1) { if (context->level_entries[EntryLevel_L1] - static_cast(m_table) >= m_num_entries) { *context = {}; *out_entry = {}; return false; } } /* We may have advanced to a new table, and if we have we should descend. */ while (context->level > EntryLevel_L3 && context->level_entries[context->level]->IsMappedTable()) { context->level_entries[context->level - 1] = GetPointer(GetPageTableVirtualAddress(context->level_entries[context->level]->GetTable())); context->level = static_cast(util::ToUnderlying(context->level) - 1); } const auto *pte = context->level_entries[context->level]; context->is_contiguous = pte->IsContiguous(); out_entry->sw_reserved_bits = pte->GetSoftwareReservedBits(); out_entry->attr = 0; out_entry->phys_addr = this->GetBlock(pte, context->level); out_entry->block_size = static_cast(1) << (PageBits + LevelBits * context->level + 4 * context->is_contiguous); return context->level == EntryLevel_L3 ? pte->IsPage() : pte->IsBlock(); } bool KPageTableImpl::GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const { /* Validate that we can read the actual entry. */ const size_t l0_index = GetL0Index(address); const size_t l1_index = GetL1Index(address); if (m_is_kernel) { /* Kernel entries must be accessed via TTBR1. */ if ((l0_index != MaxPageTableEntries - 1) || (l1_index < MaxPageTableEntries - m_num_entries)) { return false; } } else { /* User entries must be accessed with TTBR0. */ if ((l0_index != 0) || l1_index >= m_num_entries) { return false; } } /* Get the L1 entry, and check if it's a table. */ const PageTableEntry *pte = this->GetL1Entry(address); EntryLevel level = EntryLevel_L1; if (pte->IsMappedTable()) { /* Get the L2 entry, and check if it's a table. */ pte = this->GetL2EntryFromTable(GetPageTableVirtualAddress(pte->GetTable()), address); level = EntryLevel_L2; if (pte->IsMappedTable()) { pte = this->GetL3EntryFromTable(GetPageTableVirtualAddress(pte->GetTable()), address); level = EntryLevel_L3; } } const bool is_block = level == EntryLevel_L3 ? pte->IsPage() : pte->IsBlock(); if (is_block) { *out = this->GetBlock(pte, level) + this->GetOffset(address, level); } else { *out = Null; } return is_block; } void KPageTableImpl::Dump(uintptr_t start, size_t size) const { /* If zero size, there's nothing to dump. */ if (size == 0) { return; } /* Define extents. */ const uintptr_t end = start + size; const uintptr_t last = end - 1; /* Define tracking variables. */ bool unmapped = false; uintptr_t unmapped_start = 0; /* Walk the table. */ uintptr_t cur = start; while (cur < end) { /* Validate that we can read the actual entry. */ const size_t l0_index = GetL0Index(cur); const size_t l1_index = GetL1Index(cur); if (m_is_kernel) { /* Kernel entries must be accessed via TTBR1. */ if ((l0_index != MaxPageTableEntries - 1) || (l1_index < MaxPageTableEntries - m_num_entries)) { return; } } else { /* User entries must be accessed with TTBR0. */ if ((l0_index != 0) || l1_index >= m_num_entries) { return; } } /* Try to get from l1 table. */ const L1PageTableEntry *l1_entry = this->GetL1Entry(cur); if (l1_entry->IsBlock()) { /* Update. */ cur = util::AlignDown(cur, L1BlockSize); if (unmapped) { unmapped = false; MESOSPHERE_RELEASE_LOG("%016lx - %016lx: not mapped\n", unmapped_start, cur - 1); } /* Print. */ MESOSPHERE_RELEASE_LOG("%016lx: %016lx PA=%p SZ=1G Mapped=%d UXN=%d PXN=%d Cont=%d nG=%d AF=%d SH=%x RO=%d UA=%d NS=%d AttrIndx=%d NoMerge=%d,%d,%d\n", cur, *reinterpret_cast(l1_entry), reinterpret_cast(GetInteger(l1_entry->GetBlock())), l1_entry->IsMapped(), l1_entry->IsUserExecuteNever(), l1_entry->IsPrivilegedExecuteNever(), l1_entry->IsContiguous(), !l1_entry->IsGlobal(), static_cast(l1_entry->GetAccessFlagInteger()), static_cast(l1_entry->GetShareableInteger()), l1_entry->IsReadOnly(), l1_entry->IsUserAccessible(), l1_entry->IsNonSecure(), static_cast(l1_entry->GetPageAttributeInteger()), l1_entry->IsHeadMergeDisabled(), l1_entry->IsHeadAndBodyMergeDisabled(), l1_entry->IsTailMergeDisabled()); /* Advance. */ cur += L1BlockSize; continue; } else if (!l1_entry->IsTable()) { /* Update. */ cur = util::AlignDown(cur, L1BlockSize); if (!unmapped) { unmapped_start = cur; unmapped = true; } /* Advance. */ cur += L1BlockSize; continue; } /* Try to get from l2 table. */ const L2PageTableEntry *l2_entry = this->GetL2Entry(l1_entry, cur); if (l2_entry->IsBlock()) { /* Update. */ cur = util::AlignDown(cur, L2BlockSize); if (unmapped) { unmapped = false; MESOSPHERE_RELEASE_LOG("%016lx - %016lx: not mapped\n", unmapped_start, cur - 1); } /* Print. */ MESOSPHERE_RELEASE_LOG("%016lx: %016lx PA=%p SZ=2M Mapped=%d UXN=%d PXN=%d Cont=%d nG=%d AF=%d SH=%x RO=%d UA=%d NS=%d AttrIndx=%d NoMerge=%d,%d,%d\n", cur, *reinterpret_cast(l2_entry), reinterpret_cast(GetInteger(l2_entry->GetBlock())), l2_entry->IsMapped(), l2_entry->IsUserExecuteNever(), l2_entry->IsPrivilegedExecuteNever(), l2_entry->IsContiguous(), !l2_entry->IsGlobal(), static_cast(l2_entry->GetAccessFlagInteger()), static_cast(l2_entry->GetShareableInteger()), l2_entry->IsReadOnly(), l2_entry->IsUserAccessible(), l2_entry->IsNonSecure(), static_cast(l2_entry->GetPageAttributeInteger()), l2_entry->IsHeadMergeDisabled(), l2_entry->IsHeadAndBodyMergeDisabled(), l2_entry->IsTailMergeDisabled()); /* Advance. */ cur += L2BlockSize; continue; } else if (!l2_entry->IsTable()) { /* Update. */ cur = util::AlignDown(cur, L2BlockSize); if (!unmapped) { unmapped_start = cur; unmapped = true; } /* Advance. */ cur += L2BlockSize; continue; } /* Try to get from l3 table. */ const L3PageTableEntry *l3_entry = this->GetL3Entry(l2_entry, cur); if (l3_entry->IsBlock()) { /* Update. */ cur = util::AlignDown(cur, L3BlockSize); if (unmapped) { unmapped = false; MESOSPHERE_RELEASE_LOG("%016lx - %016lx: not mapped\n", unmapped_start, cur - 1); } /* Print. */ MESOSPHERE_RELEASE_LOG("%016lx: %016lx PA=%p SZ=4K Mapped=%d UXN=%d PXN=%d Cont=%d nG=%d AF=%d SH=%x RO=%d UA=%d NS=%d AttrIndx=%d NoMerge=%d,%d,%d\n", cur, *reinterpret_cast(l3_entry), reinterpret_cast(GetInteger(l3_entry->GetBlock())), l3_entry->IsMapped(), l3_entry->IsUserExecuteNever(), l3_entry->IsPrivilegedExecuteNever(), l3_entry->IsContiguous(), !l3_entry->IsGlobal(), static_cast(l3_entry->GetAccessFlagInteger()), static_cast(l3_entry->GetShareableInteger()), l3_entry->IsReadOnly(), l3_entry->IsUserAccessible(), l3_entry->IsNonSecure(), static_cast(l3_entry->GetPageAttributeInteger()), l3_entry->IsHeadMergeDisabled(), l3_entry->IsHeadAndBodyMergeDisabled(), l3_entry->IsTailMergeDisabled()); /* Advance. */ cur += L3BlockSize; continue; } else { /* Update. */ cur = util::AlignDown(cur, L3BlockSize); if (!unmapped) { unmapped_start = cur; unmapped = true; } /* Advance. */ cur += L3BlockSize; continue; } } /* Print the last unmapped range if necessary. */ if (unmapped) { MESOSPHERE_RELEASE_LOG("%016lx - %016lx: not mapped\n", unmapped_start, last); } } size_t KPageTableImpl::CountPageTables() const { size_t num_tables = 0; #if defined(MESOSPHERE_BUILD_FOR_DEBUGGING) { ++num_tables; for (size_t l1_index = 0; l1_index < m_num_entries; ++l1_index) { auto &l1_entry = m_table[l1_index]; if (l1_entry.IsTable()) { ++num_tables; for (size_t l2_index = 0; l2_index < MaxPageTableEntries; ++l2_index) { auto *l2_entry = GetPointer(GetTableEntry(KMemoryLayout::GetLinearVirtualAddress(l1_entry.GetTable()), l2_index)); if (l2_entry->IsTable()) { ++num_tables; } } } } } #endif return num_tables; } }