kern: update KernelLdr for 19.0.0 (new checks, dummy function call).

Also, fix a few very embarassing mistakes in kernel ldr:
* We have been mapping the page table region RWX for a few years now, accidentally.
* My attempt at making initial page tables not use bit 58 was broken in multiple ways.
This commit is contained in:
Michael Scire 2025-04-07 16:46:58 -07:00
parent 4c9b758348
commit 715eff2102
3 changed files with 14 additions and 14 deletions

View File

@ -110,7 +110,7 @@ namespace ams::kern::arch::arm64::init {
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
/* If an L1 block is mapped or we're empty, advance by L1BlockSize. */
if (l1_entry->IsMappedBlock() || l1_entry->IsEmpty()) {
if (l1_entry->IsMappedBlock() || l1_entry->IsMappedEmpty()) {
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= L1BlockSize);
virt_addr += L1BlockSize;
@ -126,7 +126,7 @@ namespace ams::kern::arch::arm64::init {
/* Table, so check if we're mapped in L2. */
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
if (l2_entry->IsMappedBlock() || l2_entry->IsEmpty()) {
if (l2_entry->IsMappedBlock() || l2_entry->IsMappedEmpty()) {
const size_t advance_size = (l2_entry->IsMappedBlock() && l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize;
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size));
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= advance_size);
@ -144,7 +144,7 @@ namespace ams::kern::arch::arm64::init {
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
/* L3 must be block or empty. */
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock() || l3_entry->IsEmpty());
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock() || l3_entry->IsMappedEmpty());
const size_t advance_size = (l3_entry->IsMappedBlock() && l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize;
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size));
@ -164,7 +164,7 @@ namespace ams::kern::arch::arm64::init {
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
/* If an L1 block is mapped or we're empty, advance by L1BlockSize. */
if (l1_entry->IsMappedBlock() || l1_entry->IsEmpty()) {
if (l1_entry->IsMappedBlock() || l1_entry->IsMappedEmpty()) {
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= L1BlockSize);
if (l1_entry->IsMappedBlock() && block_size == L1BlockSize) {
@ -182,7 +182,7 @@ namespace ams::kern::arch::arm64::init {
/* Table, so check if we're mapped in L2. */
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
if (l2_entry->IsMappedBlock() || l2_entry->IsEmpty()) {
if (l2_entry->IsMappedBlock() || l2_entry->IsMappedEmpty()) {
const size_t advance_size = (l2_entry->IsMappedBlock() && l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize;
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size));
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= advance_size);
@ -202,7 +202,7 @@ namespace ams::kern::arch::arm64::init {
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
/* L3 must be block or empty. */
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock() || l3_entry->IsEmpty());
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock() || l3_entry->IsMappedEmpty());
const size_t advance_size = (l3_entry->IsMappedBlock() && l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize;
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size));

View File

@ -122,7 +122,7 @@ namespace ams::kern::arch::arm64 {
/* Construct a new attribute. */
constexpr explicit ALWAYS_INLINE PageTableEntry(Permission perm, PageAttribute p_a, Shareable share, MappingFlag m)
: m_attributes(static_cast<u64>(perm) | static_cast<u64>(AccessFlag_Accessed) | static_cast<u64>(p_a) | static_cast<u64>(share) | static_cast<u64>(ExtensionFlag_Valid) | static_cast<u64>(m))
: m_attributes(static_cast<u64>(perm) | static_cast<u64>(AccessFlag_Accessed) | static_cast<u64>(p_a) | static_cast<u64>(share) | static_cast<u64>(m))
{
/* ... */
}
@ -205,6 +205,7 @@ namespace ams::kern::arch::arm64 {
constexpr ALWAYS_INLINE bool IsMappedBlock() const { return this->GetBits(0, 2) == 1; }
constexpr ALWAYS_INLINE bool IsMappedTable() const { return this->GetBits(0, 2) == 3; }
constexpr ALWAYS_INLINE bool IsMappedEmpty() const { return this->GetBits(0, 2) == 0; }
constexpr ALWAYS_INLINE bool IsMapped() const { return this->GetBits(0, 1) != 0; }
constexpr ALWAYS_INLINE decltype(auto) SetUserExecuteNever(bool en) { this->SetBit(54, en); return *this; }

View File

@ -21,12 +21,6 @@ namespace ams::kern::arch::arm64 {
m_table = static_cast<L1PageTableEntry *>(tb);
m_is_kernel = true;
m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize;
}
void KPageTableImpl::InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end) {
m_table = static_cast<L1PageTableEntry *>(tb);
m_is_kernel = false;
m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize;
/* Page table entries created by KInitialPageTable need to be iterated and modified to ensure KPageTable invariants. */
PageTableEntry *level_entries[EntryLevel_Count] = { nullptr, nullptr, m_table };
@ -68,7 +62,6 @@ namespace ams::kern::arch::arm64 {
/* Advance. */
while (true) {
/* Advance to the next entry at the current level. */
++level_entries[level];
if (!util::IsAligned(reinterpret_cast<uintptr_t>(++level_entries[level]), PageSize)) {
break;
}
@ -83,6 +76,12 @@ namespace ams::kern::arch::arm64 {
}
}
void KPageTableImpl::InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end) {
m_table = static_cast<L1PageTableEntry *>(tb);
m_is_kernel = false;
m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize;
}
L1PageTableEntry *KPageTableImpl::Finalize() {
return m_table;
}