Merge pull request #1 from Atmosphere-NX/master

merge
This commit is contained in:
Zerofo 2021-10-26 08:49:31 +08:00 committed by GitHub
commit 9107b3f378
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
76 changed files with 21038 additions and 458 deletions

View File

@ -0,0 +1,5 @@
export ATMOSPHERE_DEFINES += -DATMOSPHERE_BOARD_QEMU_VIRT -D__SWITCH__
export ATMOSPHERE_SETTINGS +=
export ATMOSPHERE_CFLAGS +=
export ATMOSPHERE_CXXFLAGS +=
export ATMOSPHERE_ASFLAGS +=

View File

@ -52,6 +52,21 @@ export ATMOSPHERE_OS_NAME := horizon
export ATMOSPHERE_CPU_EXTENSIONS :=
endif
else ifeq ($(ATMOSPHERE_BOARD),qemu-virt)
ifeq ($(ATMOSPHERE_CPU),arm-cortex-a57)
export ATMOSPHERE_ARCH_DIR := arm64
export ATMOSPHERE_BOARD_DIR := qemu/virt
export ATMOSPHERE_OS_DIR := horizon
export ATMOSPHERE_ARCH_NAME := arm64
export ATMOSPHERE_BOARD_NAME := qemu_virt
export ATMOSPHERE_OS_NAME := horizon
export ATMOSPHERE_CPU_EXTENSIONS := arm_crypto_extension aarch64_crypto_extension
endif
endif
ifeq ($(ATMOSPHERE_CPU),arm-cortex-a57)

View File

@ -104,6 +104,18 @@ $(eval $(call ATMOSPHERE_ADD_TARGET, audit, $(TARGET)_audit.a, \
ATMOSPHERE_BUILD_SETTINGS="-DMESOSPHERE_BUILD_FOR_AUDITING" \
))
$(eval $(call ATMOSPHERE_ADD_TARGET, qemu_virt_release, $(TARGET)_qemu_virt.a, \
ATMOSPHERE_BUILD_SETTINGS="" \
))
$(eval $(call ATMOSPHERE_ADD_TARGET, qemu_virt_debug, $(TARGET)_qemu_virt_debug.a, \
ATMOSPHERE_BUILD_SETTINGS="-DMESOSPHERE_BUILD_FOR_DEBUGGING" \
))
$(eval $(call ATMOSPHERE_ADD_TARGET, qemu_virt_audit, $(TARGET)_qemu_virt_audit.a, \
ATMOSPHERE_BUILD_SETTINGS="-DMESOSPHERE_BUILD_FOR_AUDITING" \
))
#---------------------------------------------------------------------------------
-include $(ATMOSPHERE_BOARD_NAME)_$(ATMOSPHERE_ARCH_NAME).mk

View File

@ -30,6 +30,8 @@ namespace ams::kern::arch::arm64::cpu {
#if defined(ATMOSPHERE_BOARD_NINTENDO_NX)
constexpr inline size_t NumCores = 4;
#elif defined(ATMOSPHERE_BOARD_QEMU_VIRT)
constexpr inline size_t NumCores = 4;
#else
#error "Unknown Board for cpu::NumCores"
#endif
@ -50,6 +52,10 @@ namespace ams::kern::arch::arm64::cpu {
__asm__ __volatile__("dmb sy" ::: "memory");
}
ALWAYS_INLINE void DataMemoryBarrierInnerShareable() {
__asm__ __volatile__("dmb ish" ::: "memory");
}
ALWAYS_INLINE void InstructionMemoryBarrier() {
__asm__ __volatile__("isb" ::: "memory");
}

View File

@ -36,6 +36,10 @@ namespace ams::kern::arch::arm64 {
KInterruptName_SecurePhysicalTimer = 29,
KInterruptName_NonSecurePhysicalTimer = 30,
KInterruptName_LegacyNIrq = 31,
#elif defined(ATMOSPHERE_BOARD_QEMU_VIRT)
KInterruptName_VirtualTimer = 27,
KInterruptName_SecurePhysicalTimer = 29,
KInterruptName_NonSecurePhysicalTimer = 30,
#endif
#if defined(ATMOSPHERE_BOARD_NINTENDO_NX)

View File

@ -0,0 +1,95 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/kern_select_interrupt_manager.hpp>
namespace ams::kern::arch::arm64::smc {
template<int SmcId, bool DisableInterrupt>
void SecureMonitorCall(u64 *buf) {
/* Load arguments into registers. */
register u64 x0 asm("x0") = buf[0];
register u64 x1 asm("x1") = buf[1];
register u64 x2 asm("x2") = buf[2];
register u64 x3 asm("x3") = buf[3];
register u64 x4 asm("x4") = buf[4];
register u64 x5 asm("x5") = buf[5];
register u64 x6 asm("x6") = buf[6];
register u64 x7 asm("x7") = buf[7];
/* Perform the call. */
if constexpr (DisableInterrupt) {
KScopedInterruptDisable di;
/* Backup the current thread pointer. */
const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
__asm__ __volatile__("smc %c[smc_id]"
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
: [smc_id]"i"(SmcId)
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
);
/* Restore the current thread pointer into X18. */
cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
} else {
/* Backup the current thread pointer. */
const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
__asm__ __volatile__("smc %c[smc_id]"
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
: [smc_id]"i"(SmcId)
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
);
/* Restore the current thread pointer into X18. */
cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
}
/* Store arguments to output. */
buf[0] = x0;
buf[1] = x1;
buf[2] = x2;
buf[3] = x3;
buf[4] = x4;
buf[5] = x5;
buf[6] = x6;
buf[7] = x7;
}
enum PsciFunction {
PsciFunction_CpuSuspend = 0xC4000001,
PsciFunction_CpuOff = 0x84000002,
PsciFunction_CpuOn = 0xC4000003,
};
template<int SmcId, bool DisableInterrupt>
u64 PsciCall(PsciFunction function, u64 x1 = 0, u64 x2 = 0, u64 x3 = 0, u64 x4 = 0, u64 x5 = 0, u64 x6 = 0, u64 x7 = 0) {
ams::svc::lp64::SecureMonitorArguments args = { { function, x1, x2, x3, x4, x5, x6, x7 } };
SecureMonitorCall<SmcId, DisableInterrupt>(args.r);
return args.r[0];
}
template<int SmcId, bool DisableInterrupt>
u64 CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
return PsciCall<SmcId, DisableInterrupt>(PsciFunction_CpuOn, core_id, entrypoint, arg);
}
}

View File

@ -44,13 +44,13 @@ namespace ams::kern::board::generic {
return ams::kern::svc::ResultNotImplemented();
}
Result ALWAYS_INLINE Map(size_t *out_mapped_size, const KPageGroup &pg, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool refresh_mappings) {
MESOSPHERE_UNUSED(out_mapped_size, pg, device_address, device_perm, refresh_mappings);
Result ALWAYS_INLINE Map(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool is_aligned) {
MESOSPHERE_UNUSED(page_table, process_address, size, device_address, device_perm, is_aligned);
return ams::kern::svc::ResultNotImplemented();
}
Result ALWAYS_INLINE Unmap(const KPageGroup &pg, KDeviceVirtualAddress device_address) {
MESOSPHERE_UNUSED(pg, device_address);
Result ALWAYS_INLINE Unmap(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address) {
MESOSPHERE_UNUSED(page_table, process_address, size, device_address);
return ams::kern::svc::ResultNotImplemented();
}

View File

@ -15,9 +15,12 @@
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_typed_address.hpp>
namespace ams::kern {
constexpr inline KPhysicalAddress MainMemoryAddress = 0x80000000;
constexpr inline size_t MainMemorySize = 4_GB;
constexpr inline size_t MainMemorySizeMax = 8_GB;

View File

@ -15,23 +15,17 @@
*/
#pragma once
#include <mesosphere/kern_common.hpp>
namespace ams::kern {
struct InitialProcessBinaryLayout;
}
#include <mesosphere/kern_k_system_control_base.hpp>
namespace ams::kern::board::nintendo::nx {
class KSystemControl {
class KSystemControl : public KSystemControlBase {
public:
class Init {
class Init : public KSystemControlBase::Init {
public:
/* Initialization. */
static size_t GetRealMemorySize();
static size_t GetIntendedMemorySize();
static KPhysicalAddress GetKernelPhysicalBaseAddress(uintptr_t base_address);
static void GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out);
static bool ShouldIncreaseThreadResourceLimit();
static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
static size_t GetApplicationPoolSize();
@ -40,7 +34,7 @@ namespace ams::kern::board::nintendo::nx {
static u8 GetDebugLogUartPort();
/* Randomness. */
static void GenerateRandomBytes(void *dst, size_t size);
static void GenerateRandom(u64 *dst, size_t count);
static u64 GenerateRandomRange(u64 min, u64 max);
};
public:
@ -50,7 +44,7 @@ namespace ams::kern::board::nintendo::nx {
static NOINLINE u32 GetCreateProcessMemoryPool();
/* Randomness. */
static void GenerateRandomBytes(void *dst, size_t size);
static void GenerateRandom(u64 *dst, size_t count);
static u64 GenerateRandomRange(u64 min, u64 max);
static u64 GenerateRandomU64();
@ -58,23 +52,12 @@ namespace ams::kern::board::nintendo::nx {
static void ReadWriteRegisterPrivileged(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value);
static Result ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value);
static ALWAYS_INLINE u32 ReadRegisterPrivileged(ams::svc::PhysicalAddress address) {
u32 v;
ReadWriteRegisterPrivileged(std::addressof(v), address, 0x00000000u, 0);
return v;
}
static ALWAYS_INLINE void WriteRegisterPrivileged(ams::svc::PhysicalAddress address, u32 value) {
u32 v;
ReadWriteRegisterPrivileged(std::addressof(v), address, 0xFFFFFFFFu, value);
}
/* Power management. */
static void SleepSystem();
static NORETURN void StopSystem(void *arg = nullptr);
/* User access. */
static void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args);
static void CallSecureMonitorFromUserImpl(ams::svc::lp64::SecureMonitorArguments *args);
/* Secure Memory. */
static size_t CalculateRequiredSecureMemorySize(size_t size, u32 pool);

View File

@ -0,0 +1,33 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
namespace ams::kern::board::qemu::virt::impl::cpu {
/* Virtual to Physical core map. */
constexpr inline const s32 VirtualToPhysicalCoreMap[BITSIZEOF(u64)] = {
0, 1, 2, 3, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 3,
};
}

View File

@ -0,0 +1,27 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_typed_address.hpp>
namespace ams::kern {
constexpr inline KPhysicalAddress MainMemoryAddress = 0x40000000;
constexpr inline size_t MainMemorySize = 4_GB;
constexpr inline size_t MainMemorySizeMax = 8_GB;
}

View File

@ -0,0 +1,20 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* All architectures must define NumBoardDeviceRegions. */
constexpr inline const auto NumBoardDeviceRegions = 0;
/* UNUSED: .Derive(NumBoardDeviceRegions, 0); */

View File

@ -0,0 +1,28 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_system_control_base.hpp>
namespace ams::kern::board::qemu::virt {
class KSystemControl : public KSystemControlBase {
public:
/* User access. */
static void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args);
};
}

View File

@ -38,4 +38,13 @@
/* at the cost of storing class tokens inside the class object. */
/* However, as of (10/16/2021) KAutoObject has an unused class member */
/* of the right side, and so this does not actually cost any space. */
#define MESOSPHERE_ENABLE_DEVIRTUALIZED_DYNAMIC_CAST
#define MESOSPHERE_ENABLE_DEVIRTUALIZED_DYNAMIC_CAST
/* NOTE: This uses currently-reserved bits inside the MapRange capability */
/* in order to support large physical addresses (40-bit instead of 36). */
/* This is toggleable in order to disable it if N ever uses those bits. */
#if defined(ATMOSPHERE_BOARD_NINTENDO_NX)
//#define MESOSPHERE_ENABLE_LARGE_PHYSICAL_ADDRESS_CAPABILITIES
#else
#define MESOSPHERE_ENABLE_LARGE_PHYSICAL_ADDRESS_CAPABILITIES
#endif

View File

@ -43,6 +43,8 @@ namespace ams::kern {
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
#define MESOSPHERE_DEBUG_LOG_USE_UART
#elif defined(ATMOSPHERE_BOARD_QEMU_VIRT)
#define MESOSPHERE_DEBUG_LOG_USE_SEMIHOSTING
#else
#error "Unknown board for Default Debug Log Source"
#endif

View File

@ -82,7 +82,11 @@ namespace ams::kern {
DEFINE_FIELD(Index, Mask, 3);
};
#if defined(MESOSPHERE_ENABLE_LARGE_PHYSICAL_ADDRESS_CAPABILITIES)
static constexpr u64 PhysicalMapAllowedMask = (1ul << 40) - 1;
#else
static constexpr u64 PhysicalMapAllowedMask = (1ul << 36) - 1;
#endif
struct MapRange {
using IdBits = Field<0, CapabilityId<CapabilityType::MapRange> + 1>;
@ -94,9 +98,15 @@ namespace ams::kern {
struct MapRangeSize {
using IdBits = Field<0, CapabilityId<CapabilityType::MapRange> + 1>;
DEFINE_FIELD(Pages, IdBits, 20);
DEFINE_FIELD(Pages, IdBits, 20);
#if defined(MESOSPHERE_ENABLE_LARGE_PHYSICAL_ADDRESS_CAPABILITIES)
DEFINE_FIELD(AddressHigh, Pages, 4);
DEFINE_FIELD(Normal, AddressHigh, 1, bool);
#else
DEFINE_FIELD(Reserved, Pages, 4);
DEFINE_FIELD(Normal, Reserved, 1, bool);
#endif
};
struct MapIoPage {

View File

@ -20,6 +20,8 @@
#if defined(ATMOSPHERE_BOARD_NINTENDO_NX)
#include <mesosphere/board/nintendo/nx/kern_k_memory_layout.hpp>
#elif defined(ATMOSPHERE_BOARD_QEMU_VIRT)
#include <mesosphere/board/qemu/virt/kern_k_memory_layout.hpp>
#else
#error "Unknown board for KMemoryLayout"
#endif
@ -210,13 +212,17 @@ namespace ams::kern {
static NOINLINE auto GetKernelPageTableHeapRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelPtHeap); }
static NOINLINE auto GetKernelInitPageTableRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelInitPt); }
static NOINLINE auto GetKernelPoolManagementRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramPoolManagement); }
static NOINLINE auto GetKernelPoolPartitionRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramPoolPartition); }
static NOINLINE auto GetKernelPoolManagementRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramPoolManagement); }
static NOINLINE auto GetKernelSystemPoolRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramSystemPool); }
static NOINLINE auto GetKernelSystemNonSecurePoolRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramSystemNonSecurePool); }
static NOINLINE auto GetKernelAppletPoolRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramAppletPool); }
static NOINLINE auto GetKernelApplicationPoolRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramApplicationPool); }
static NOINLINE bool HasKernelSystemNonSecurePoolRegion() { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DramSystemNonSecurePool) != nullptr; }
static NOINLINE bool HasKernelAppletPoolRegion() { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DramAppletPool) != nullptr; }
static NOINLINE bool HasKernelApplicationPoolRegion() { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DramApplicationPool) != nullptr; }
static NOINLINE auto GetKernelTraceBufferRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelTraceBuffer); }
};

View File

@ -48,39 +48,20 @@ namespace ams::kern {
}
size_t SelectRandomBit(u64 bitmap) {
u64 selected = 0;
u64 selected = 0;
u64 cur_num_bits = BITSIZEOF(bitmap) / 2;
u64 cur_mask = (1ull << cur_num_bits) - 1;
for (size_t cur_num_bits = BITSIZEOF(bitmap) / 2; cur_num_bits != 0; cur_num_bits /= 2) {
const u64 high = (bitmap >> cur_num_bits);
const u64 low = (bitmap & (~(UINT64_C(0xFFFFFFFFFFFFFFFF) << cur_num_bits)));
while (cur_num_bits) {
const u64 low = (bitmap >> 0) & cur_mask;
const u64 high = (bitmap >> cur_num_bits) & cur_mask;
bool choose_low;
if (high == 0) {
/* If only low val is set, choose low. */
choose_low = true;
} else if (low == 0) {
/* If only high val is set, choose high. */
choose_low = false;
} else {
/* If both are set, choose random. */
choose_low = this->GenerateRandomBit();
}
/* If we chose low, proceed with low. */
if (choose_low) {
bitmap = low;
selected += 0;
} else {
/* Choose high if we have high and (don't have low or select high randomly). */
if (high && (low == 0 || this->GenerateRandomBit())) {
bitmap = high;
selected += cur_num_bits;
} else {
bitmap = low;
selected += 0;
}
/* Proceed. */
cur_num_bits /= 2;
cur_mask >>= cur_num_bits;
}
return selected;

View File

@ -74,6 +74,9 @@ namespace ams::kern {
/* Release an instance of the lock. */
if ((--m_lock_count) == 0) {
/* Perform a memory barrier here. */
cpu::DataMemoryBarrierInnerShareable();
/* We're no longer going to hold the lock. Take note of what cores need scheduling. */
const u64 cores_needing_scheduling = SchedulerType::UpdateHighestPriorityThreads();

View File

@ -0,0 +1,110 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_spin_lock.hpp>
namespace ams::kern {
struct InitialProcessBinaryLayout;
}
namespace ams::kern {
class KSystemControlBase {
protected:
/* Nintendo uses std::mt19937_t for randomness. */
/* To save space (and because mt19337_t isn't secure anyway), */
/* We will use TinyMT. */
static constinit inline bool s_initialized_random_generator;
static constinit inline util::TinyMT s_random_generator{util::ConstantInitialize};
static constinit inline KSpinLock s_random_lock;
public:
class Init {
public:
/* Initialization. */
static size_t GetRealMemorySize();
static size_t GetIntendedMemorySize();
static KPhysicalAddress GetKernelPhysicalBaseAddress(KPhysicalAddress base_address);
static void GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out);
static bool ShouldIncreaseThreadResourceLimit();
static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
static size_t GetApplicationPoolSize();
static size_t GetAppletPoolSize();
static size_t GetMinimumNonSecureSystemPoolSize();
static u8 GetDebugLogUartPort();
/* Randomness. */
static void GenerateRandom(u64 *dst, size_t count);
static u64 GenerateRandomRange(u64 min, u64 max);
};
public:
/* Initialization. */
static NOINLINE void InitializePhase1(bool skip_target_system = false);
static NOINLINE void InitializePhase2();
static NOINLINE u32 GetCreateProcessMemoryPool();
/* Randomness. */
static void GenerateRandom(u64 *dst, size_t count);
static u64 GenerateRandomRange(u64 min, u64 max);
static u64 GenerateRandomU64();
/* Register access Access. */
static Result ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value);
static void ReadWriteRegisterPrivileged(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value);
static u32 ReadRegisterPrivileged(ams::svc::PhysicalAddress address);
static void WriteRegisterPrivileged(ams::svc::PhysicalAddress address, u32 value);
/* Power management. */
static void SleepSystem();
static NORETURN void StopSystem(void *arg = nullptr);
/* User access. */
#if defined(ATMOSPHERE_ARCH_ARM64)
static void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args);
#endif
/* Secure Memory. */
static size_t CalculateRequiredSecureMemorySize(size_t size, u32 pool);
static Result AllocateSecureMemory(KVirtualAddress *out, size_t size, u32 pool);
static void FreeSecureMemory(KVirtualAddress address, size_t size, u32 pool);
protected:
template<typename F>
static ALWAYS_INLINE u64 GenerateUniformRange(u64 min, u64 max, F f) {
/* Handle the case where the difference is too large to represent. */
if (max == std::numeric_limits<u64>::max() && min == std::numeric_limits<u64>::min()) {
return f();
}
/* Iterate until we get a value in range. */
const u64 range_size = ((max + 1) - min);
const u64 effective_max = (std::numeric_limits<u64>::max() / range_size) * range_size;
while (true) {
if (const u64 rnd = f(); rnd < effective_max) {
return min + (rnd % range_size);
}
}
}
/* User access. */
#if defined(ATMOSPHERE_ARCH_ARM64)
static void CallSecureMonitorFromUserImpl(ams::svc::lp64::SecureMonitorArguments *args);
#endif
};
}

View File

@ -21,6 +21,7 @@ namespace ams::kern {
class KTargetSystem {
private:
friend class KSystemControlBase;
friend class KSystemControl;
private:
static inline constinit bool s_is_debug_mode;

View File

@ -39,6 +39,16 @@
}
#elif defined(ATMOSPHERE_BOARD_QEMU_VIRT)
#include <mesosphere/board/qemu/virt/kern_cpu_map.hpp>
namespace ams::kern::cpu {
using namespace ams::kern::board::qemu::virt::impl::cpu;
}
#else
#error "Unknown board for CPU Map"
#endif

View File

@ -15,6 +15,7 @@
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_system_control_base.hpp>
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
#include <mesosphere/board/nintendo/nx/kern_k_system_control.hpp>
@ -23,6 +24,28 @@
using ams::kern::board::nintendo::nx::KSystemControl;
}
#elif defined(ATMOSPHERE_BOARD_QEMU_VIRT)
#include <mesosphere/board/qemu/virt/kern_k_system_control.hpp>
namespace ams::kern {
using ams::kern::board::qemu::virt::KSystemControl;
}
#else
#error "Unknown board for KSystemControl"
#endif
namespace ams::kern {
ALWAYS_INLINE u32 KSystemControlBase::ReadRegisterPrivileged(ams::svc::PhysicalAddress address) {
u32 v;
KSystemControl::ReadWriteRegisterPrivileged(std::addressof(v), address, 0x00000000u, 0);
return v;
}
ALWAYS_INLINE void KSystemControlBase::WriteRegisterPrivileged(ams::svc::PhysicalAddress address, u32 value) {
u32 v;
KSystemControl::ReadWriteRegisterPrivileged(std::addressof(v), address, 0xFFFFFFFFu, value);
}
}

View File

@ -76,7 +76,7 @@ namespace ams::kern::arch::arm64::cpu {
} else {
m_counter = cpu::GetPerformanceCounter(m_which);
}
DataMemoryBarrier();
DataMemoryBarrierInnerShareable();
m_done = true;
return nullptr;
}
@ -384,11 +384,13 @@ namespace ams::kern::arch::arm64::cpu {
/* Store cache from L1 up to (level of coherence - 1). */
for (int level = 0; level < levels_of_coherency - 1; ++level) {
PerformCacheOperationBySetWayImpl<true>(level, StoreDataCacheLineBySetWayImpl);
DataSynchronizationBarrier();
}
/* Flush cache from (level of coherence - 1) down to L0. */
for (int level = levels_of_coherency; level > 0; --level) {
PerformCacheOperationBySetWayImpl<true>(level - 1, FlushDataCacheLineBySetWayImpl);
DataSynchronizationBarrier();
}
}

View File

@ -1130,7 +1130,7 @@ namespace ams::kern::board::nintendo::nx {
size_t cur_size;
{
/* Get the current contiguous range. */
KPageTableBase::MemoryRange contig_range = {};
KPageTableBase::MemoryRange contig_range = { .address = Null<KPhysicalAddress>, .size = 0 };
R_TRY(page_table->OpenMemoryRangeForMapDeviceAddressSpace(std::addressof(contig_range), process_address + mapped_size, size - mapped_size, ConvertToKMemoryPermission(device_perm), is_aligned));
/* Ensure we close the range when we're done. */
@ -1288,7 +1288,7 @@ namespace ams::kern::board::nintendo::nx {
MESOSPHERE_ASSERT(((device_address + size - 1) & ~DeviceVirtualAddressMask) == 0);
/* We need to traverse the ranges that make up our mapping, to make sure they're all good. Start by getting a contiguous range. */
KPageTableBase::MemoryRange contig_range = {};
KPageTableBase::MemoryRange contig_range = { .address = Null<KPhysicalAddress>, .size = 0 };
if (R_FAILED(page_table->OpenMemoryRangeForUnmapDeviceAddressSpace(std::addressof(contig_range), process_address, size))) {
return false;
}

View File

@ -73,7 +73,7 @@ namespace ams::kern::board::nintendo::nx {
void PowerOnCpu(int core_id, KPhysicalAddress entry_phys_addr, u64 context_id) {
/* Request the secure monitor power on the core. */
smc::CpuOn(cpu::MultiprocessorAffinityRegisterAccessor().GetCpuOnArgument() | core_id, GetInteger(entry_phys_addr), context_id);
::ams::kern::arch::arm64::smc::CpuOn<smc::SmcId_Supervisor, true>(cpu::MultiprocessorAffinityRegisterAccessor().GetCpuOnArgument() | core_id, GetInteger(entry_phys_addr), context_id);
}
void WaitOtherCpuPowerOff() {

View File

@ -21,8 +21,7 @@ namespace ams::kern::board::nintendo::nx {
namespace {
constexpr uintptr_t DramPhysicalAddress = 0x80000000;
constexpr size_t SecureAlignment = 128_KB;
constexpr size_t SecureAlignment = 128_KB;
/* Global variables for panic. */
constinit bool g_call_smc_on_panic;
@ -38,22 +37,6 @@ namespace ams::kern::board::nintendo::nx {
constinit KPhysicalAddress g_secure_region_phys_addr = Null<KPhysicalAddress>;
constinit size_t g_secure_region_size = 0;
/* Global variables for randomness. */
/* Nintendo uses std::mt19937_t for randomness. */
/* To save space (and because mt19337_t isn't secure anyway), */
/* We will use TinyMT. */
constinit bool g_initialized_random_generator;
constinit util::TinyMT g_random_generator{util::ConstantInitialize};
constinit KSpinLock g_random_lock;
ALWAYS_INLINE size_t GetRealMemorySizeForInit() {
/* TODO: Move this into a header for the MC in general. */
constexpr u32 MemoryControllerConfigurationRegister = 0x70019050;
u32 config_value;
MESOSPHERE_INIT_ABORT_UNLESS(smc::init::ReadWriteRegister(&config_value, MemoryControllerConfigurationRegister, 0, 0));
return static_cast<size_t>(config_value & 0x3FFF) << 20;
}
ALWAYS_INLINE util::BitPack32 GetKernelConfigurationForInit() {
u64 value = 0;
smc::init::GetConfig(&value, 1, smc::ConfigItem::KernelConfiguration);
@ -86,7 +69,7 @@ namespace ams::kern::board::nintendo::nx {
ALWAYS_INLINE u64 GenerateRandomU64ForInit() {
u64 value;
smc::init::GenerateRandomBytes(&value, sizeof(value));
smc::init::GenerateRandomBytes(std::addressof(value), sizeof(value));
return value;
}
@ -96,27 +79,6 @@ namespace ams::kern::board::nintendo::nx {
return value;
}
ALWAYS_INLINE u64 GenerateRandomU64FromGenerator() {
return g_random_generator.GenerateRandomU64();
}
template<typename F>
ALWAYS_INLINE u64 GenerateUniformRange(u64 min, u64 max, F f) {
/* Handle the case where the difference is too large to represent. */
if (max == std::numeric_limits<u64>::max() && min == std::numeric_limits<u64>::min()) {
return f();
}
/* Iterate until we get a value in range. */
const u64 range_size = ((max + 1) - min);
const u64 effective_max = (std::numeric_limits<u64>::max() / range_size) * range_size;
while (true) {
if (const u64 rnd = f(); rnd < effective_max) {
return min + (rnd % range_size);
}
}
}
ALWAYS_INLINE u64 GetConfigU64(smc::ConfigItem which) {
u64 value;
smc::GetConfig(&value, 1, which);
@ -324,6 +286,14 @@ namespace ams::kern::board::nintendo::nx {
}
/* Initialization. */
size_t KSystemControl::Init::GetRealMemorySize() {
/* TODO: Move this into a header for the MC in general. */
constexpr u32 MemoryControllerConfigurationRegister = 0x70019050;
u32 config_value;
MESOSPHERE_INIT_ABORT_UNLESS(smc::init::ReadWriteRegister(&config_value, MemoryControllerConfigurationRegister, 0, 0));
return static_cast<size_t>(config_value & 0x3FFF) << 20;
}
size_t KSystemControl::Init::GetIntendedMemorySize() {
switch (GetKernelConfigurationForInit().Get<smc::KernelConfiguration::MemorySize>()) {
case smc::MemorySize_4GB:
@ -336,23 +306,6 @@ namespace ams::kern::board::nintendo::nx {
}
}
KPhysicalAddress KSystemControl::Init::GetKernelPhysicalBaseAddress(uintptr_t base_address) {
const size_t real_dram_size = GetRealMemorySizeForInit();
const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize();
if (intended_dram_size * 2 < real_dram_size) {
return base_address;
} else {
return base_address + ((real_dram_size - intended_dram_size) / 2);
}
}
void KSystemControl::Init::GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out) {
*out = {
.address = GetInteger(GetKernelPhysicalBaseAddress(DramPhysicalAddress)) + GetIntendedMemorySize() - KTraceBufferSize - InitialProcessBinarySizeMax,
._08 = 0,
};
}
bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() {
return GetKernelConfigurationForInit().Get<smc::KernelConfiguration::IncreaseThreadResourceLimit>();
}
@ -424,17 +377,17 @@ namespace ams::kern::board::nintendo::nx {
}
void KSystemControl::Init::CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
smc::init::CpuOn(core_id, entrypoint, arg);
MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<smc::SmcId_Supervisor, false>(core_id, entrypoint, arg)) == 0);
}
/* Randomness for Initialization. */
void KSystemControl::Init::GenerateRandomBytes(void *dst, size_t size) {
MESOSPHERE_INIT_ABORT_UNLESS(size <= 0x38);
smc::init::GenerateRandomBytes(dst, size);
void KSystemControl::Init::GenerateRandom(u64 *dst, size_t count) {
MESOSPHERE_INIT_ABORT_UNLESS(count <= 7);
smc::init::GenerateRandomBytes(dst, count * sizeof(u64));
}
u64 KSystemControl::Init::GenerateRandomRange(u64 min, u64 max) {
return GenerateUniformRange(min, max, GenerateRandomU64ForInit);
return KSystemControlBase::GenerateUniformRange(min, max, GenerateRandomU64ForInit);
}
/* System Initialization. */
@ -443,8 +396,8 @@ namespace ams::kern::board::nintendo::nx {
{
u64 seed;
smc::GenerateRandomBytes(std::addressof(seed), sizeof(seed));
g_random_generator.Initialize(reinterpret_cast<u32*>(std::addressof(seed)), sizeof(seed) / sizeof(u32));
g_initialized_random_generator = true;
s_random_generator.Initialize(reinterpret_cast<const u32*>(std::addressof(seed)), sizeof(seed) / sizeof(u32));
s_initialized_random_generator = true;
}
/* Set IsDebugMode. */
@ -483,25 +436,8 @@ namespace ams::kern::board::nintendo::nx {
smc::ConfigureCarveout(0, carveout.GetAddress(), carveout.GetSize());
}
/* System ResourceLimit initialization. */
{
/* Construct the resource limit object. */
KResourceLimit &sys_res_limit = Kernel::GetSystemResourceLimit();
KAutoObject::Create<KResourceLimit>(std::addressof(sys_res_limit));
sys_res_limit.Initialize();
/* Set the initial limits. */
const auto [total_memory_size, kernel_memory_size] = KMemoryLayout::GetTotalAndKernelMemorySizes();
const auto &slab_counts = init::GetSlabResourceCounts();
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_PhysicalMemoryMax, total_memory_size));
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_ThreadCountMax, slab_counts.num_KThread));
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_EventCountMax, slab_counts.num_KEvent));
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_TransferMemoryCountMax, slab_counts.num_KTransferMemory));
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_SessionCountMax, slab_counts.num_KSession));
/* Reserve system memory. */
MESOSPHERE_ABORT_UNLESS(sys_res_limit.Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, kernel_memory_size));
}
/* Initialize the system resource limit (and potentially other things). */
KSystemControlBase::InitializePhase1(true);
}
void KSystemControl::InitializePhase2() {
@ -520,11 +456,8 @@ namespace ams::kern::board::nintendo::nx {
g_secure_applet_memory_address = KMemoryLayout::GetLinearVirtualAddress(secure_applet_memory_phys_addr);
}
/* Initialize KTrace. */
if constexpr (IsKTraceEnabled) {
const auto &ktrace = KMemoryLayout::GetKernelTraceBufferRegion();
KTrace::Initialize(ktrace.GetAddress(), ktrace.GetSize());
}
/* Initialize KTrace (and potentially other init). */
KSystemControlBase::InitializePhase2();
}
u32 KSystemControl::GetCreateProcessMemoryPool() {
@ -546,29 +479,29 @@ namespace ams::kern::board::nintendo::nx {
}
/* Randomness. */
void KSystemControl::GenerateRandomBytes(void *dst, size_t size) {
MESOSPHERE_INIT_ABORT_UNLESS(size <= 0x38);
smc::GenerateRandomBytes(dst, size);
void KSystemControl::GenerateRandom(u64 *dst, size_t count) {
MESOSPHERE_INIT_ABORT_UNLESS(count <= 7);
smc::GenerateRandomBytes(dst, count * sizeof(u64));
}
u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) {
KScopedInterruptDisable intr_disable;
KScopedSpinLock lk(g_random_lock);
KScopedSpinLock lk(s_random_lock);
if (AMS_LIKELY(g_initialized_random_generator)) {
return GenerateUniformRange(min, max, GenerateRandomU64FromGenerator);
if (AMS_LIKELY(s_initialized_random_generator)) {
return KSystemControlBase::GenerateUniformRange(min, max, [] ALWAYS_INLINE_LAMBDA () -> u64 { return s_random_generator.GenerateRandomU64(); });
} else {
return GenerateUniformRange(min, max, GenerateRandomU64FromSmc);
return KSystemControlBase::GenerateUniformRange(min, max, GenerateRandomU64FromSmc);
}
}
u64 KSystemControl::GenerateRandomU64() {
KScopedInterruptDisable intr_disable;
KScopedSpinLock lk(g_random_lock);
KScopedSpinLock lk(s_random_lock);
if (AMS_LIKELY(g_initialized_random_generator)) {
return GenerateRandomU64FromGenerator();
if (AMS_LIKELY(s_initialized_random_generator)) {
return s_random_generator.GenerateRandomU64();
} else {
return GenerateRandomU64FromSmc();
}
@ -672,52 +605,18 @@ namespace ams::kern::board::nintendo::nx {
}
/* User access. */
void KSystemControl::CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
/* Get the function id for the current call. */
u64 function_id = args->r[0];
/* We'll need to map in pages if arguments are pointers. Prepare page groups to do so. */
auto &page_table = GetCurrentProcess().GetPageTable();
auto *bim = page_table.GetBlockInfoManager();
constexpr size_t MaxMappedRegisters = 7;
std::array<KPageGroup, MaxMappedRegisters> page_groups = { KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), };
for (size_t i = 0; i < MaxMappedRegisters; i++) {
const size_t reg_id = i + 1;
if (function_id & (1ul << (8 + reg_id))) {
/* Create and open a new page group for the address. */
KVirtualAddress virt_addr = args->r[reg_id];
if (R_SUCCEEDED(page_table.MakeAndOpenPageGroup(std::addressof(page_groups[i]), util::AlignDown(GetInteger(virt_addr), PageSize), 1, KMemoryState_None, KMemoryState_None, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryAttribute_None))) {
/* Translate the virtual address to a physical address. */
const auto it = page_groups[i].begin();
MESOSPHERE_ASSERT(it != page_groups[i].end());
MESOSPHERE_ASSERT(it->GetNumPages() == 1);
args->r[reg_id] = GetInteger(it->GetAddress()) | (GetInteger(virt_addr) & (PageSize - 1));
} else {
/* If we couldn't map, we should clear the address. */
args->r[reg_id] = 0;
}
}
}
void KSystemControl::CallSecureMonitorFromUserImpl(ams::svc::lp64::SecureMonitorArguments *args) {
/* Invoke the secure monitor. */
smc::CallSecureMonitorFromUser(args);
/* Make sure that we close any pages that we opened. */
for (size_t i = 0; i < MaxMappedRegisters; i++) {
page_groups[i].Close();
}
return smc::CallSecureMonitorFromUser(args);
}
/* Secure Memory. */
size_t KSystemControl::CalculateRequiredSecureMemorySize(size_t size, u32 pool) {
if (pool == KMemoryManager::Pool_Applet) {
return 0;
} else {
return KSystemControlBase::CalculateRequiredSecureMemorySize(size, pool);
}
return size;
}
Result KSystemControl::AllocateSecureMemory(KVirtualAddress *out, size_t size, u32 pool) {

View File

@ -20,18 +20,27 @@ namespace ams::kern::board::nintendo::nx::smc {
namespace {
struct SecureMonitorArguments {
u64 x[8];
};
enum UserFunctionId : u32 {
UserFunctionId_SetConfig = 0xC3000401,
UserFunctionId_SetConfig = 0xC3000401,
UserFunctionId_GetConfigUser = 0xC3000002,
UserFunctionId_GetResult = 0xC3000003,
UserFunctionId_GetResultData = 0xC3000404,
UserFunctionId_ModularExponentiate = 0xC3000E05,
UserFunctionId_GenerateRandomBytes = 0xC3000006,
UserFunctionId_GenerateAesKek = 0xC3000007,
UserFunctionId_LoadAesKey = 0xC3000008,
UserFunctionId_ComputeAes = 0xC3000009,
UserFunctionId_GenerateSpecificAesKey = 0xC300000A,
UserFunctionId_ComputeCmac = 0xC300040B,
UserFunctionId_ReencryptDeviceUniqueData = 0xC300D60C,
UserFunctionId_DecryptDeviceUniqueData = 0xC300100D,
UserFunctionId_ModularExponentiateByStorageKey = 0xC300060F,
UserFunctionId_PrepareEsDeviceUniqueKey = 0xC3000610,
UserFunctionId_LoadPreparedAesKey = 0xC3000011,
UserFunctionId_PrepareEsCommonTitleKey = 0xC3000012,
};
enum FunctionId : u32 {
FunctionId_CpuSuspend = 0xC4000001,
FunctionId_CpuOff = 0x84000002,
FunctionId_CpuOn = 0xC4000003,
FunctionId_GetConfig = 0xC3000004,
FunctionId_GenerateRandomBytes = 0xC3000005,
FunctionId_Panic = 0xC3000006,
@ -42,171 +51,60 @@ namespace ams::kern::board::nintendo::nx::smc {
FunctionId_SetConfig = 0xC3000409,
};
void CallPrivilegedSecureMonitorFunction(SecureMonitorArguments &args) {
/* Load arguments into registers. */
register u64 x0 asm("x0") = args.x[0];
register u64 x1 asm("x1") = args.x[1];
register u64 x2 asm("x2") = args.x[2];
register u64 x3 asm("x3") = args.x[3];
register u64 x4 asm("x4") = args.x[4];
register u64 x5 asm("x5") = args.x[5];
register u64 x6 asm("x6") = args.x[6];
register u64 x7 asm("x7") = args.x[7];
/* Actually make the call. */
{
/* Disable interrupts while making the call. */
KScopedInterruptDisable intr_disable;
{
/* Backup the current thread pointer. */
const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
__asm__ __volatile__("smc #1"
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
:
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
);
/* Restore the current thread pointer into X18. */
cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
/* Store arguments to output. */
args.x[0] = x0;
args.x[1] = x1;
args.x[2] = x2;
args.x[3] = x3;
args.x[4] = x4;
args.x[5] = x5;
args.x[6] = x6;
args.x[7] = x7;
}
}
}
void CallUserSecureMonitorFunction(ams::svc::lp64::SecureMonitorArguments *args) {
/* Load arguments into registers. */
register u64 x0 asm("x0") = args->r[0];
register u64 x1 asm("x1") = args->r[1];
register u64 x2 asm("x2") = args->r[2];
register u64 x3 asm("x3") = args->r[3];
register u64 x4 asm("x4") = args->r[4];
register u64 x5 asm("x5") = args->r[5];
register u64 x6 asm("x6") = args->r[6];
register u64 x7 asm("x7") = args->r[7];
/* Actually make the call. */
{
/* Disable interrupts while making the call. */
KScopedInterruptDisable intr_disable;
{
/* Backup the current thread pointer. */
const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
__asm__ __volatile__("smc #0"
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
:
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
);
/* Restore the current thread pointer into X18. */
cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
/* Store arguments to output. */
args->r[0] = x0;
args->r[1] = x1;
args->r[2] = x2;
args->r[3] = x3;
args->r[4] = x4;
args->r[5] = x5;
args->r[6] = x6;
args->r[7] = x7;
}
}
}
void CallPrivilegedSecureMonitorFunctionForInit(SecureMonitorArguments &args) {
/* Load arguments into registers. */
register u64 x0 asm("x0") = args.x[0];
register u64 x1 asm("x1") = args.x[1];
register u64 x2 asm("x2") = args.x[2];
register u64 x3 asm("x3") = args.x[3];
register u64 x4 asm("x4") = args.x[4];
register u64 x5 asm("x5") = args.x[5];
register u64 x6 asm("x6") = args.x[6];
register u64 x7 asm("x7") = args.x[7];
/* Actually make the call. */
__asm__ __volatile__("smc #1"
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
:
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
);
/* Store arguments to output. */
args.x[0] = x0;
args.x[1] = x1;
args.x[2] = x2;
args.x[3] = x3;
args.x[4] = x4;
args.x[5] = x5;
args.x[6] = x6;
args.x[7] = x7;
}
/* Global lock for generate random bytes. */
KSpinLock g_generate_random_lock;
constinit KSpinLock g_generate_random_lock;
}
/* SMC functionality needed for init. */
namespace init {
void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
SecureMonitorArguments args = { FunctionId_CpuOn, core_id, entrypoint, arg };
CallPrivilegedSecureMonitorFunctionForInit(args);
}
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) {
SecureMonitorArguments args = { FunctionId_GetConfig, static_cast<u32>(config_item) };
CallPrivilegedSecureMonitorFunctionForInit(args);
MESOSPHERE_INIT_ABORT_UNLESS((static_cast<SmcResult>(args.x[0]) == SmcResult::Success));
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GetConfig, static_cast<u32>(config_item) } };
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, false>(args.r);
MESOSPHERE_INIT_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
for (size_t i = 0; i < num_qwords && i < 7; i++) {
out[i] = args.x[1 + i];
out[i] = args.r[1 + i];
}
}
void GenerateRandomBytes(void *dst, size_t size) {
/* Call SmcGenerateRandomBytes() */
SecureMonitorArguments args = { FunctionId_GenerateRandomBytes, size };
MESOSPHERE_INIT_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.x[0]));
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GenerateRandomBytes, size } };
MESOSPHERE_INIT_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.r[0]));
CallPrivilegedSecureMonitorFunctionForInit(args);
MESOSPHERE_INIT_ABORT_UNLESS((static_cast<SmcResult>(args.x[0]) == SmcResult::Success));
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, false>(args.r);
MESOSPHERE_INIT_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
/* Copy output. */
std::memcpy(dst, std::addressof(args.x[1]), size);
std::memcpy(dst, std::addressof(args.r[1]), size);
}
bool ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value) {
SecureMonitorArguments args = { FunctionId_ReadWriteRegister, address, mask, value };
CallPrivilegedSecureMonitorFunctionForInit(args);
*out = args.x[1];
return static_cast<SmcResult>(args.x[0]) == SmcResult::Success;
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ReadWriteRegister, address, mask, value } };
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, false>(args.r);
MESOSPHERE_INIT_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
*out = args.r[1];
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
}
}
bool TryGetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) {
SecureMonitorArguments args = { FunctionId_GetConfig, static_cast<u32>(config_item) };
CallPrivilegedSecureMonitorFunction(args);
if (static_cast<SmcResult>(args.x[0]) != SmcResult::Success) {
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GetConfig, static_cast<u32>(config_item) } };
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
if (AMS_UNLIKELY(static_cast<SmcResult>(args.r[0]) != SmcResult::Success)) {
return false;
}
for (size_t i = 0; i < num_qwords && i < 7; i++) {
out[i] = args.x[1 + i];
out[i] = args.r[1 + i];
}
return true;
@ -217,55 +115,58 @@ namespace ams::kern::board::nintendo::nx::smc {
}
bool SetConfig(ConfigItem config_item, u64 value) {
SecureMonitorArguments args = { FunctionId_SetConfig, static_cast<u32>(config_item), 0, value };
CallPrivilegedSecureMonitorFunction(args);
return static_cast<SmcResult>(args.x[0]) == SmcResult::Success;
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_SetConfig, static_cast<u32>(config_item), 0, value } };
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
}
bool ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) {
SecureMonitorArguments args = { FunctionId_ReadWriteRegister, address, mask, value };
CallPrivilegedSecureMonitorFunction(args);
*out = static_cast<u32>(args.x[1]);
return static_cast<SmcResult>(args.x[0]) == SmcResult::Success;
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ReadWriteRegister, address, mask, value } };
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
*out = static_cast<u32>(args.r[1]);
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
}
void ConfigureCarveout(size_t which, uintptr_t address, size_t size) {
SecureMonitorArguments args = { FunctionId_ConfigureCarveout, static_cast<u64>(which), static_cast<u64>(address), static_cast<u64>(size) };
CallPrivilegedSecureMonitorFunction(args);
MESOSPHERE_ABORT_UNLESS((static_cast<SmcResult>(args.x[0]) == SmcResult::Success));
}
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ConfigureCarveout, static_cast<u64>(which), static_cast<u64>(address), static_cast<u64>(size) } };
void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
SecureMonitorArguments args = { FunctionId_CpuOn, core_id, static_cast<u64>(entrypoint), static_cast<u64>(arg) };
CallPrivilegedSecureMonitorFunction(args);
MESOSPHERE_ABORT_UNLESS((static_cast<SmcResult>(args.x[0]) == SmcResult::Success));
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
MESOSPHERE_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
}
void GenerateRandomBytes(void *dst, size_t size) {
/* Setup for call. */
SecureMonitorArguments args = { FunctionId_GenerateRandomBytes, size };
MESOSPHERE_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.x[0]));
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GenerateRandomBytes, size } };
MESOSPHERE_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.r[0]));
/* Make call. */
{
KScopedInterruptDisable intr_disable;
KScopedSpinLock lk(g_generate_random_lock);
CallPrivilegedSecureMonitorFunction(args);
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
}
MESOSPHERE_ABORT_UNLESS((static_cast<SmcResult>(args.x[0]) == SmcResult::Success));
MESOSPHERE_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
/* Copy output. */
std::memcpy(dst, std::addressof(args.x[1]), size);
std::memcpy(dst, std::addressof(args.r[1]), size);
}
void NORETURN Panic(u32 color) {
SecureMonitorArguments args = { FunctionId_Panic, color };
CallPrivilegedSecureMonitorFunction(args);
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_Panic, color } };
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
AMS_INFINITE_LOOP();
}
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
CallUserSecureMonitorFunction(args);
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_User, true>(args->r);
}
}

View File

@ -15,10 +15,16 @@
*/
#pragma once
#include <mesosphere.hpp>
#include <mesosphere/arch/arm64/kern_secure_monitor_base.hpp>
namespace ams::kern::board::nintendo::nx::smc {
/* Types. */
enum SmcId {
SmcId_User = 0,
SmcId_Supervisor = 1,
};
enum MemorySize {
MemorySize_4GB = 0,
MemorySize_6GB = 1,
@ -105,15 +111,12 @@ namespace ams::kern::board::nintendo::nx::smc {
bool SetConfig(ConfigItem config_item, u64 value);
void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
void NORETURN Panic(u32 color);
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args);
namespace init {
void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item);
void GenerateRandomBytes(void *dst, size_t size);
bool ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value);

View File

@ -0,0 +1,27 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
#include "kern_secure_monitor.hpp"
namespace ams::kern::board::qemu::virt {
/* User access. */
void KSystemControl::CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
/* Invoke the secure monitor. */
return smc::CallSecureMonitorFromUser(args);
}
}

View File

@ -0,0 +1,71 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
#include "kern_secure_monitor.hpp"
namespace ams::kern::board::qemu::virt::smc {
namespace {
enum UserFunctionId : u32 {
UserFunctionId_SetConfig = 0xC3000401,
UserFunctionId_GetConfig = 0xC3000002,
UserFunctionId_GetResult = 0xC3000003,
UserFunctionId_GetResultData = 0xC3000404,
UserFunctionId_ModularExponentiate = 0xC3000E05,
UserFunctionId_GenerateRandomBytes = 0xC3000006,
UserFunctionId_GenerateAesKek = 0xC3000007,
UserFunctionId_LoadAesKey = 0xC3000008,
UserFunctionId_ComputeAes = 0xC3000009,
UserFunctionId_GenerateSpecificAesKey = 0xC300000A,
UserFunctionId_ComputeCmac = 0xC300040B,
UserFunctionId_ReencryptDeviceUniqueData = 0xC300D60C,
UserFunctionId_DecryptDeviceUniqueData = 0xC300100D,
UserFunctionId_ModularExponentiateByStorageKey = 0xC300060F,
UserFunctionId_PrepareEsDeviceUniqueKey = 0xC3000610,
UserFunctionId_LoadPreparedAesKey = 0xC3000011,
UserFunctionId_PrepareEsCommonTitleKey = 0xC3000012,
};
}
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
MESOSPHERE_LOG("Received SMC [%p %p %p %p %p %p %p %p] from %s\n", reinterpret_cast<void *>(args->r[0]), reinterpret_cast<void *>(args->r[1]), reinterpret_cast<void *>(args->r[2]), reinterpret_cast<void *>(args->r[3]), reinterpret_cast<void *>(args->r[4]), reinterpret_cast<void *>(args->r[5]), reinterpret_cast<void *>(args->r[6]), reinterpret_cast<void *>(args->r[7]), GetCurrentProcess().GetName());
switch (args->r[0]) {
case UserFunctionId_GetConfig:
{
switch (static_cast<ConfigItem>(args->r[1])) {
case ConfigItem::ExosphereApiVersion:
args->r[1] = (static_cast<u64>(ATMOSPHERE_RELEASE_VERSION_MAJOR & 0xFF) << 56) |
(static_cast<u64>(ATMOSPHERE_RELEASE_VERSION_MINOR & 0xFF) << 48) |
(static_cast<u64>(ATMOSPHERE_RELEASE_VERSION_MICRO & 0xFF) << 40) |
(static_cast<u64>(13) << 32) |
(static_cast<u64>(GetTargetFirmware()) << 0);
break;
default:
MESOSPHERE_PANIC("Unhandled GetConfig\n");
}
args->r[0] = static_cast<u64>(SmcResult::Success);
}
break;
default:
MESOSPHERE_PANIC("Unhandled SMC [%p %p %p %p %p %p %p %p]", reinterpret_cast<void *>(args->r[0]), reinterpret_cast<void *>(args->r[1]), reinterpret_cast<void *>(args->r[2]), reinterpret_cast<void *>(args->r[3]), reinterpret_cast<void *>(args->r[4]), reinterpret_cast<void *>(args->r[5]), reinterpret_cast<void *>(args->r[6]), reinterpret_cast<void *>(args->r[7]));
}
}
}

View File

@ -0,0 +1,68 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere.hpp>
namespace ams::kern::board::qemu::virt::smc {
enum class ConfigItem : u32 {
/* Standard config items. */
DisableProgramVerification = 1,
DramId = 2,
SecurityEngineIrqNumber = 3,
Version = 4,
HardwareType = 5,
IsRetail = 6,
IsRecoveryBoot = 7,
DeviceId = 8,
BootReason = 9,
MemoryMode = 10,
IsDebugMode = 11,
KernelConfiguration = 12,
IsChargerHiZModeEnabled = 13,
IsQuest = 14,
RegulatorType = 15,
DeviceUniqueKeyGeneration = 16,
Package2Hash = 17,
/* Extension config items for exosphere. */
ExosphereApiVersion = 65000,
ExosphereNeedsReboot = 65001,
ExosphereNeedsShutdown = 65002,
ExosphereGitCommitHash = 65003,
ExosphereHasRcmBugPatch = 65004,
ExosphereBlankProdInfo = 65005,
ExosphereAllowCalWrites = 65006,
ExosphereEmummcType = 65007,
ExospherePayloadAddress = 65008,
ExosphereLogConfiguration = 65009,
ExosphereForceEnableUsb30 = 65010,
ExosphereSupportedHosVersion = 65011,
};
enum class SmcResult {
Success = 0,
NotImplemented = 1,
InvalidArgument = 2,
InProgress = 3,
NoAsyncOperation = 4,
InvalidAsyncOperation = 5,
NotPermitted = 6,
};
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args);
}

View File

@ -32,6 +32,9 @@ namespace ams::kern {
return;
}
#if defined(MESOSPHERE_DEBUG_LOG_USE_SEMIHOSTING)
KDebugLogImpl::PutStringBySemihosting(str);
#else
while (*str) {
/* Get a character. */
const char c = *(str++);
@ -44,6 +47,7 @@ namespace ams::kern {
}
KDebugLogImpl::Flush();
#endif
}
#if defined(MESOSPHERE_ENABLE_DEBUG_PRINT)
@ -54,6 +58,11 @@ namespace ams::kern {
return ResultSuccess();
}
#if defined(MESOSPHERE_DEBUG_LOG_USE_SEMIHOSTING)
/* TODO: should we do this properly? */
KDebugLogImpl::PutStringBySemihosting(user_str.GetUnsafePointer());
MESOSPHERE_UNUSED(len);
#else
for (size_t i = 0; i < len; ++i) {
/* Get a character. */
char c;
@ -67,6 +76,7 @@ namespace ams::kern {
}
KDebugLogImpl::Flush();
#endif
return ResultSuccess();
}

View File

@ -0,0 +1,27 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/* ams::kern::KDebugLogImpl::PutStringBySemihosting(const char *str) */
.section .text._ZN3ams4kern13KDebugLogImpl22PutStringBySemihostingEPKc, "ax", %progbits
.global _ZN3ams4kern13KDebugLogImpl22PutStringBySemihostingEPKc
.type _ZN3ams4kern13KDebugLogImpl22PutStringBySemihostingEPKc, %function
.balign 0x10
_ZN3ams4kern13KDebugLogImpl22PutStringBySemihostingEPKc:
mov x1, x0
mov x0, #0x4
hlt #0xF000
ret

View File

@ -0,0 +1,50 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
#include "kern_debug_log_impl.hpp"
namespace ams::kern {
#if defined(MESOSPHERE_DEBUG_LOG_USE_SEMIHOSTING)
bool KDebugLogImpl::Initialize() {
return true;
}
void KDebugLogImpl::PutChar(char c) {
/* TODO */
AMS_UNUSED(c);
}
void KDebugLogImpl::Flush() {
/* ... */
}
void KDebugLogImpl::Save() {
/* ... */
}
void KDebugLogImpl::Restore() {
/* ... */
}
#else
#error "Unknown Debug device!"
#endif
}

View File

@ -21,6 +21,7 @@ namespace ams::kern {
class KDebugLogImpl {
public:
static NOINLINE bool Initialize();
static NOINLINE void PutStringBySemihosting(const char *s);
static NOINLINE void PutChar(char c);
static NOINLINE void Flush();

View File

@ -117,11 +117,15 @@ namespace ams::kern {
}
Result KCapabilities::MapRange(const util::BitPack32 cap, const util::BitPack32 size_cap, KProcessPageTable *page_table) {
/* Get/validate address/size */
#if defined(MESOSPHERE_ENABLE_LARGE_PHYSICAL_ADDRESS_CAPABILITIES)
const u64 phys_addr = static_cast<u64>(cap.Get<MapRange::Address>() | (size_cap.Get<MapRangeSize::AddressHigh>() << MapRange::Address::Count)) * PageSize;
#else
const u64 phys_addr = static_cast<u64>(cap.Get<MapRange::Address>()) * PageSize;
/* Validate reserved bits are unused. */
R_UNLESS(size_cap.Get<MapRangeSize::Reserved>() == 0, svc::ResultOutOfRange());
/* Get/validate address/size */
const u64 phys_addr = cap.Get<MapRange::Address>() * PageSize;
#endif
const size_t num_pages = size_cap.Get<MapRangeSize::Pages>();
const size_t size = num_pages * PageSize;
R_UNLESS(phys_addr == GetInteger(KPhysicalAddress(phys_addr)), svc::ResultInvalidAddress());

View File

@ -250,7 +250,7 @@ namespace ams::kern {
{
const u32 has_waiter_flag = 1;
WriteToUser(key, std::addressof(has_waiter_flag));
cpu::DataMemoryBarrier();
cpu::DataMemoryBarrierInnerShareable();
}
/* Write the value to userspace. */

View File

@ -180,9 +180,11 @@ namespace ams::kern {
}
/* Flush caches. */
/* NOTE: official kernel does an entire cache flush by set/way here, which is incorrect as other cores are online. */
/* We will simply flush by virtual address, since that's what ARM says is correct to do. */
MESOSPHERE_R_ABORT_UNLESS(cpu::FlushDataCache(GetVoidPointer(address), params.code_num_pages * PageSize));
/* NOTE: This seems incorrect according to arm spec, which says not to flush via set/way after boot. */
/* However, Nintendo flushes the entire cache here and not doing so has caused reports of abort with ESR_EL1 */
/* as 0x02000000 (unknown abort) to occur. */
MESOSPHERE_UNUSED(params);
cpu::FlushEntireDataCache();
cpu::InvalidateEntireInstructionCache();
return ResultSuccess();

View File

@ -19,7 +19,6 @@ namespace ams::kern {
namespace {
constexpr uintptr_t DramPhysicalAddress = 0x80000000;
constexpr size_t ReservedEarlyDramSize = 0x60000;
constexpr size_t CarveoutAlignment = 0x20000;
@ -100,7 +99,7 @@ namespace ams::kern {
void SetupDramPhysicalMemoryRegions() {
const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize();
const KPhysicalAddress physical_memory_base_address = KSystemControl::Init::GetKernelPhysicalBaseAddress(DramPhysicalAddress);
const KPhysicalAddress physical_memory_base_address = KSystemControl::Init::GetKernelPhysicalBaseAddress(ams::kern::MainMemoryAddress);
/* Insert blocks into the tree. */
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(physical_memory_base_address), intended_memory_size, KMemoryRegionType_Dram));
@ -173,16 +172,21 @@ namespace ams::kern {
InsertPoolPartitionRegionIntoBothTrees(unsafe_system_pool_start, unsafe_system_pool_size, KMemoryRegionType_DramSystemNonSecurePool, KMemoryRegionType_VirtualDramSystemNonSecurePool, cur_pool_attr);
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(unsafe_system_pool_size);
/* Insert the pool management region. */
/* Determine final total overhead size. */
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize((unsafe_system_pool_start - pool_partitions_start) - total_overhead_size);
const uintptr_t pool_management_start = unsafe_system_pool_start - total_overhead_size;
/* NOTE: Nintendo's kernel has layout [System, Management] but we have [Management, System]. This ensures the four UserPool regions are contiguous. */
/* Insert the system pool. */
const uintptr_t system_pool_start = pool_partitions_start + total_overhead_size;
const size_t system_pool_size = unsafe_system_pool_start - system_pool_start;
InsertPoolPartitionRegionIntoBothTrees(system_pool_start, system_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr);
/* Insert the pool management region. */
const uintptr_t pool_management_start = pool_partitions_start;
const size_t pool_management_size = total_overhead_size;
u32 pool_management_attr = 0;
InsertPoolPartitionRegionIntoBothTrees(pool_management_start, pool_management_size, KMemoryRegionType_DramPoolManagement, KMemoryRegionType_VirtualDramPoolManagement, pool_management_attr);
/* Insert the system pool. */
const uintptr_t system_pool_size = pool_management_start - pool_partitions_start;
InsertPoolPartitionRegionIntoBothTrees(pool_partitions_start, system_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr);
} else {
/* On < 5.0.0, setup a legacy 2-pool layout for backwards compatibility. */
@ -249,14 +253,18 @@ namespace ams::kern {
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(second_application_pool_size);
}
/* Insert the secure pool. */
InsertPoolPartitionRegionIntoBothTrees(pool_partitions_start, secure_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr);
/* Insert the pool management region. */
/* Validate the true overhead size. */
MESOSPHERE_INIT_ABORT_UNLESS(total_overhead_size <= approximate_total_overhead_size);
const uintptr_t pool_management_start = pool_partitions_start + secure_pool_size;
const size_t pool_management_size = unsafe_memory_start - pool_management_start;
/* NOTE: Nintendo's kernel has layout [System, Management] but we have [Management, System]. This ensures the UserPool regions are contiguous. */
/* Insert the secure pool. */
const uintptr_t secure_pool_start = unsafe_memory_start - secure_pool_size;
InsertPoolPartitionRegionIntoBothTrees(secure_pool_start, secure_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr);
/* Insert the pool management region. */
const uintptr_t pool_management_start = pool_partitions_start;
const size_t pool_management_size = secure_pool_start - pool_management_start;
MESOSPHERE_INIT_ABORT_UNLESS(total_overhead_size <= pool_management_size);
u32 pool_management_attr = 0;

View File

@ -0,0 +1,144 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
constexpr size_t ReservedEarlyDramSize = 0x00080000;
template<typename... T> requires (std::same_as<T, KMemoryRegionAttr> && ...)
constexpr ALWAYS_INLINE KMemoryRegionType GetMemoryRegionType(KMemoryRegionType base, T... attr) {
return util::FromUnderlying<KMemoryRegionType>(util::ToUnderlying(base) | (util::ToUnderlying<T>(attr) | ...));
}
void InsertPoolPartitionRegionIntoBothTrees(size_t start, size_t size, KMemoryRegionType phys_type, KMemoryRegionType virt_type, u32 &cur_attr) {
const u32 attr = cur_attr++;
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(start, size, phys_type, attr));
const KMemoryRegion *phys = KMemoryLayout::GetPhysicalMemoryRegionTree().FindByTypeAndAttribute(phys_type, attr);
MESOSPHERE_INIT_ABORT_UNLESS(phys != nullptr);
MESOSPHERE_INIT_ABORT_UNLESS(phys->GetEndAddress() != 0);
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(phys->GetPairAddress(), size, virt_type, attr));
}
}
namespace init {
void SetupDevicePhysicalMemoryRegions() {
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x08000000, 0x10000, GetMemoryRegionType(KMemoryRegionType_InterruptDistributor, KMemoryRegionAttr_ShouldKernelMap)));
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x08010000, 0x10000, GetMemoryRegionType(KMemoryRegionType_InterruptCpuInterface, KMemoryRegionAttr_ShouldKernelMap)));
}
void SetupDramPhysicalMemoryRegions() {
const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize();
const KPhysicalAddress physical_memory_base_address = KSystemControl::Init::GetKernelPhysicalBaseAddress(ams::kern::MainMemoryAddress);
/* Insert blocks into the tree. */
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(physical_memory_base_address), intended_memory_size, KMemoryRegionType_Dram));
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(physical_memory_base_address), ReservedEarlyDramSize, KMemoryRegionType_DramReservedEarly));
/* Insert the KTrace block at the end of Dram, if KTrace is enabled. */
static_assert(!IsKTraceEnabled || KTraceBufferSize > 0);
if constexpr (IsKTraceEnabled) {
const KPhysicalAddress ktrace_buffer_phys_addr = physical_memory_base_address + intended_memory_size - KTraceBufferSize;
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(ktrace_buffer_phys_addr), KTraceBufferSize, KMemoryRegionType_KernelTraceBuffer));
}
}
void SetupPoolPartitionMemoryRegions() {
/* Start by identifying the extents of the DRAM memory region. */
const auto dram_extents = KMemoryLayout::GetMainMemoryPhysicalExtents();
MESOSPHERE_INIT_ABORT_UNLESS(dram_extents.GetEndAddress() != 0);
/* Determine the end of the pool region. */
const uintptr_t pool_end = dram_extents.GetEndAddress() - KTraceBufferSize;
/* Find the start of the kernel DRAM region. */
const KMemoryRegion *kernel_dram_region = KMemoryLayout::GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DramKernelBase);
MESOSPHERE_INIT_ABORT_UNLESS(kernel_dram_region != nullptr);
/* Find the start of the pool partitions region. */
const KMemoryRegion *pool_partitions_region = KMemoryLayout::GetPhysicalMemoryRegionTree().FindByTypeAndAttribute(KMemoryRegionType_DramPoolPartition, 0);
MESOSPHERE_INIT_ABORT_UNLESS(pool_partitions_region != nullptr);
const uintptr_t pool_partitions_start = pool_partitions_region->GetAddress();
/* Setup the pool partition layouts. */
/* Get Application and Applet pool sizes. */
const size_t application_pool_size = KSystemControl::Init::GetApplicationPoolSize();
const size_t applet_pool_size = KSystemControl::Init::GetAppletPoolSize();
const size_t unsafe_system_pool_min_size = KSystemControl::Init::GetMinimumNonSecureSystemPoolSize();
/* Decide on starting addresses for our pools. */
const uintptr_t application_pool_start = pool_end - application_pool_size;
const uintptr_t applet_pool_start = application_pool_start - applet_pool_size;
const uintptr_t unsafe_system_pool_start = util::AlignDown(applet_pool_start - unsafe_system_pool_min_size, PageSize);
const size_t unsafe_system_pool_size = applet_pool_start - unsafe_system_pool_start;
/* We want to arrange application pool depending on where the middle of dram is. */
const uintptr_t dram_midpoint = (dram_extents.GetAddress() + dram_extents.GetEndAddress()) / 2;
u32 cur_pool_attr = 0;
size_t total_overhead_size = 0;
/* Insert the application pool. */
if (application_pool_size > 0) {
if (dram_extents.GetEndAddress() <= dram_midpoint || dram_midpoint <= application_pool_start) {
InsertPoolPartitionRegionIntoBothTrees(application_pool_start, application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr);
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(application_pool_size);
} else {
const size_t first_application_pool_size = dram_midpoint - application_pool_start;
const size_t second_application_pool_size = application_pool_start + application_pool_size - dram_midpoint;
InsertPoolPartitionRegionIntoBothTrees(application_pool_start, first_application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr);
InsertPoolPartitionRegionIntoBothTrees(dram_midpoint, second_application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr);
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(first_application_pool_size);
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(second_application_pool_size);
}
}
/* Insert the applet pool. */
if (applet_pool_size > 0) {
InsertPoolPartitionRegionIntoBothTrees(applet_pool_start, applet_pool_size, KMemoryRegionType_DramAppletPool, KMemoryRegionType_VirtualDramAppletPool, cur_pool_attr);
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(applet_pool_size);
}
/* Insert the nonsecure system pool. */
if (unsafe_system_pool_size > 0) {
InsertPoolPartitionRegionIntoBothTrees(unsafe_system_pool_start, unsafe_system_pool_size, KMemoryRegionType_DramSystemNonSecurePool, KMemoryRegionType_VirtualDramSystemNonSecurePool, cur_pool_attr);
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(unsafe_system_pool_size);
}
/* Determine final total overhead size. */
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize((unsafe_system_pool_start - pool_partitions_start) - total_overhead_size);
/* NOTE: Nintendo's kernel has layout [System, Management] but we have [Management, System]. This ensures the four UserPool regions are contiguous. */
/* Insert the system pool. */
const uintptr_t system_pool_start = pool_partitions_start + total_overhead_size;
const size_t system_pool_size = unsafe_system_pool_start - system_pool_start;
InsertPoolPartitionRegionIntoBothTrees(system_pool_start, system_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr);
/* Insert the pool management region. */
const uintptr_t pool_management_start = pool_partitions_start;
const size_t pool_management_size = total_overhead_size;
u32 pool_management_attr = 0;
InsertPoolPartitionRegionIntoBothTrees(pool_management_start, pool_management_size, KMemoryRegionType_DramPoolManagement, KMemoryRegionType_VirtualDramPoolManagement, pool_management_attr);
}
}
}

View File

@ -238,7 +238,7 @@ namespace ams::kern {
}
/* Generate random entropy. */
KSystemControl::GenerateRandomBytes(m_entropy, sizeof(m_entropy));
KSystemControl::GenerateRandom(m_entropy, util::size(m_entropy));
/* Clear remaining fields. */
m_num_running_threads = 0;

View File

@ -0,0 +1,295 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
#if defined(ATMOSPHERE_ARCH_ARM64)
#include <mesosphere/arch/arm64/kern_secure_monitor_base.hpp>
#endif
namespace ams::kern {
/* Initialization. */
size_t KSystemControlBase::Init::GetRealMemorySize() {
return ams::kern::MainMemorySize;
}
size_t KSystemControlBase::Init::GetIntendedMemorySize() {
return ams::kern::MainMemorySize;
}
KPhysicalAddress KSystemControlBase::Init::GetKernelPhysicalBaseAddress(KPhysicalAddress base_address) {
const size_t real_dram_size = KSystemControl::Init::GetRealMemorySize();
const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize();
if (intended_dram_size * 2 < real_dram_size) {
return base_address;
} else {
return base_address + ((real_dram_size - intended_dram_size) / 2);
}
}
void KSystemControlBase::Init::GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out) {
*out = {
.address = GetInteger(KSystemControl::Init::GetKernelPhysicalBaseAddress(ams::kern::MainMemoryAddress)) + KSystemControl::Init::GetIntendedMemorySize() - KTraceBufferSize - InitialProcessBinarySizeMax,
._08 = 0,
};
}
bool KSystemControlBase::Init::ShouldIncreaseThreadResourceLimit() {
return true;
}
size_t KSystemControlBase::Init::GetApplicationPoolSize() {
return 0;
}
size_t KSystemControlBase::Init::GetAppletPoolSize() {
return 0;
}
size_t KSystemControlBase::Init::GetMinimumNonSecureSystemPoolSize() {
return 0;
}
u8 KSystemControlBase::Init::GetDebugLogUartPort() {
return 0;
}
void KSystemControlBase::Init::CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
#if defined(ATMOSPHERE_ARCH_ARM64)
MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<0, false>(core_id, entrypoint, arg)) == 0);
#else
AMS_INFINITE_LOOP();
#endif
}
/* Randomness for Initialization. */
void KSystemControlBase::Init::GenerateRandom(u64 *dst, size_t count) {
if (AMS_UNLIKELY(!s_initialized_random_generator)) {
const u64 seed = KHardwareTimer::GetTick();
s_random_generator.Initialize(reinterpret_cast<const u32*>(std::addressof(seed)), sizeof(seed) / sizeof(u32));
s_initialized_random_generator = true;
}
for (size_t i = 0; i < count; ++i) {
dst[i] = s_random_generator.GenerateRandomU64();
}
}
u64 KSystemControlBase::Init::GenerateRandomRange(u64 min, u64 max) {
if (AMS_UNLIKELY(!s_initialized_random_generator)) {
const u64 seed = KHardwareTimer::GetTick();
s_random_generator.Initialize(reinterpret_cast<const u32*>(std::addressof(seed)), sizeof(seed) / sizeof(u32));
s_initialized_random_generator = true;
}
return KSystemControlBase::GenerateUniformRange(min, max, [] ALWAYS_INLINE_LAMBDA () -> u64 { return s_random_generator.GenerateRandomU64(); });
}
/* System Initialization. */
void KSystemControlBase::InitializePhase1(bool skip_target_system) {
/* Initialize the rng, if we somehow haven't already. */
if (AMS_UNLIKELY(!s_initialized_random_generator)) {
const u64 seed = KHardwareTimer::GetTick();
s_random_generator.Initialize(reinterpret_cast<const u32*>(std::addressof(seed)), sizeof(seed) / sizeof(u32));
s_initialized_random_generator = true;
}
/* Configure KTargetSystem, if we haven't already by an implementation SystemControl. */
if (!skip_target_system) {
/* Set IsDebugMode. */
{
KTargetSystem::SetIsDebugMode(true);
/* If debug mode, we want to initialize uart logging. */
KTargetSystem::EnableDebugLogging(true);
KDebugLog::Initialize();
}
/* Set Kernel Configuration. */
{
KTargetSystem::EnableDebugMemoryFill(false);
KTargetSystem::EnableUserExceptionHandlers(true);
KTargetSystem::EnableDynamicResourceLimits(true);
KTargetSystem::EnableUserPmuAccess(false);
}
/* Set Kernel Debugging. */
{
/* NOTE: This is used to restrict access to SvcKernelDebug/SvcChangeKernelTraceState. */
/* Mesosphere may wish to not require this, as we'd ideally keep ProgramVerification enabled for userland. */
KTargetSystem::EnableKernelDebugging(true);
}
}
/* System ResourceLimit initialization. */
{
/* Construct the resource limit object. */
KResourceLimit &sys_res_limit = Kernel::GetSystemResourceLimit();
KAutoObject::Create<KResourceLimit>(std::addressof(sys_res_limit));
sys_res_limit.Initialize();
/* Set the initial limits. */
const auto [total_memory_size, kernel_memory_size] = KMemoryLayout::GetTotalAndKernelMemorySizes();
const auto &slab_counts = init::GetSlabResourceCounts();
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_PhysicalMemoryMax, total_memory_size));
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_ThreadCountMax, slab_counts.num_KThread));
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_EventCountMax, slab_counts.num_KEvent));
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_TransferMemoryCountMax, slab_counts.num_KTransferMemory));
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_SessionCountMax, slab_counts.num_KSession));
/* Reserve system memory. */
MESOSPHERE_ABORT_UNLESS(sys_res_limit.Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, kernel_memory_size));
}
}
void KSystemControlBase::InitializePhase2() {
/* Initialize KTrace. */
if constexpr (IsKTraceEnabled) {
const auto &ktrace = KMemoryLayout::GetKernelTraceBufferRegion();
KTrace::Initialize(ktrace.GetAddress(), ktrace.GetSize());
}
}
u32 KSystemControlBase::GetCreateProcessMemoryPool() {
return KMemoryManager::Pool_System;
}
/* Privileged Access. */
void KSystemControlBase::ReadWriteRegisterPrivileged(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) {
/* TODO */
MESOSPHERE_UNUSED(out, address, mask, value);
MESOSPHERE_UNIMPLEMENTED();
}
Result KSystemControlBase::ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) {
MESOSPHERE_UNUSED(out, address, mask, value);
return svc::ResultNotImplemented();
}
/* Randomness. */
void KSystemControlBase::GenerateRandom(u64 *dst, size_t count) {
KScopedInterruptDisable intr_disable;
KScopedSpinLock lk(s_random_lock);
for (size_t i = 0; i < count; ++i) {
dst[i] = s_random_generator.GenerateRandomU64();
}
}
u64 KSystemControlBase::GenerateRandomRange(u64 min, u64 max) {
KScopedInterruptDisable intr_disable;
KScopedSpinLock lk(s_random_lock);
return KSystemControlBase::GenerateUniformRange(min, max, [] ALWAYS_INLINE_LAMBDA () -> u64 { return s_random_generator.GenerateRandomU64(); });
}
u64 KSystemControlBase::GenerateRandomU64() {
KScopedInterruptDisable intr_disable;
KScopedSpinLock lk(s_random_lock);
return s_random_generator.GenerateRandomU64();
}
void KSystemControlBase::SleepSystem() {
MESOSPHERE_LOG("SleepSystem() was called\n");
}
void KSystemControlBase::StopSystem(void *) {
MESOSPHERE_LOG("KSystemControlBase::StopSystem\n");
AMS_INFINITE_LOOP();
}
/* User access. */
#if defined(ATMOSPHERE_ARCH_ARM64)
void KSystemControlBase::CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
/* Get the function id for the current call. */
u64 function_id = args->r[0];
/* We'll need to map in pages if arguments are pointers. Prepare page groups to do so. */
auto &page_table = GetCurrentProcess().GetPageTable();
auto *bim = page_table.GetBlockInfoManager();
constexpr size_t MaxMappedRegisters = 7;
std::array<KPageGroup, MaxMappedRegisters> page_groups = { KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), };
for (size_t i = 0; i < MaxMappedRegisters; i++) {
const size_t reg_id = i + 1;
if (function_id & (1ul << (8 + reg_id))) {
/* Create and open a new page group for the address. */
KVirtualAddress virt_addr = args->r[reg_id];
if (R_SUCCEEDED(page_table.MakeAndOpenPageGroup(std::addressof(page_groups[i]), util::AlignDown(GetInteger(virt_addr), PageSize), 1, KMemoryState_None, KMemoryState_None, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryAttribute_None))) {
/* Translate the virtual address to a physical address. */
const auto it = page_groups[i].begin();
MESOSPHERE_ASSERT(it != page_groups[i].end());
MESOSPHERE_ASSERT(it->GetNumPages() == 1);
args->r[reg_id] = GetInteger(it->GetAddress()) | (GetInteger(virt_addr) & (PageSize - 1));
} else {
/* If we couldn't map, we should clear the address. */
args->r[reg_id] = 0;
}
}
}
/* Invoke the secure monitor. */
KSystemControl::CallSecureMonitorFromUserImpl(args);
/* Make sure that we close any pages that we opened. */
for (size_t i = 0; i < MaxMappedRegisters; i++) {
page_groups[i].Close();
}
}
void KSystemControlBase::CallSecureMonitorFromUserImpl(ams::svc::lp64::SecureMonitorArguments *args) {
/* By default, we don't actually support secure monitor, so just set args to a failure code. */
args->r[0] = 1;
}
#endif
/* Secure Memory. */
size_t KSystemControlBase::CalculateRequiredSecureMemorySize(size_t size, u32 pool) {
MESOSPHERE_UNUSED(pool);
return size;
}
Result KSystemControlBase::AllocateSecureMemory(KVirtualAddress *out, size_t size, u32 pool) {
/* Ensure the size is aligned. */
constexpr size_t Alignment = PageSize;
R_UNLESS(util::IsAligned(size, Alignment), svc::ResultInvalidSize());
/* Allocate the memory. */
const size_t num_pages = size / PageSize;
const KPhysicalAddress paddr = Kernel::GetMemoryManager().AllocateAndOpenContinuous(num_pages, Alignment / PageSize, KMemoryManager::EncodeOption(static_cast<KMemoryManager::Pool>(pool), KMemoryManager::Direction_FromFront));
R_UNLESS(paddr != Null<KPhysicalAddress>, svc::ResultOutOfMemory());
*out = KPageTable::GetHeapVirtualAddress(paddr);
return ResultSuccess();
}
void KSystemControlBase::FreeSecureMemory(KVirtualAddress address, size_t size, u32 pool) {
/* Ensure the size is aligned. */
constexpr size_t Alignment = PageSize;
MESOSPHERE_UNUSED(pool);
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(address), Alignment));
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, Alignment));
/* Close the secure region's pages. */
Kernel::GetMemoryManager().Close(KPageTable::GetHeapPhysicalAddress(address), size / PageSize);
}
}

View File

@ -139,14 +139,20 @@ namespace ams::kern {
PrintMemoryRegion(" InitPageTable", KMemoryLayout::GetKernelInitPageTableRegionPhysicalExtents());
PrintMemoryRegion(" MemoryPoolRegion", KMemoryLayout::GetKernelPoolPartitionRegionPhysicalExtents());
if (GetTargetFirmware() >= TargetFirmware_5_0_0) {
PrintMemoryRegion(" System", KMemoryLayout::GetKernelSystemPoolRegionPhysicalExtents());
PrintMemoryRegion(" Management", KMemoryLayout::GetKernelPoolManagementRegionPhysicalExtents());
PrintMemoryRegion(" SystemUnsafe", KMemoryLayout::GetKernelSystemNonSecurePoolRegionPhysicalExtents());
PrintMemoryRegion(" Applet", KMemoryLayout::GetKernelAppletPoolRegionPhysicalExtents());
PrintMemoryRegion(" Application", KMemoryLayout::GetKernelApplicationPoolRegionPhysicalExtents());
PrintMemoryRegion(" System", KMemoryLayout::GetKernelSystemPoolRegionPhysicalExtents());
if (KMemoryLayout::HasKernelSystemNonSecurePoolRegion()) {
PrintMemoryRegion(" SystemUnsafe", KMemoryLayout::GetKernelSystemNonSecurePoolRegionPhysicalExtents());
}
if (KMemoryLayout::HasKernelAppletPoolRegion()) {
PrintMemoryRegion(" Applet", KMemoryLayout::GetKernelAppletPoolRegionPhysicalExtents());
}
if (KMemoryLayout::HasKernelApplicationPoolRegion()) {
PrintMemoryRegion(" Application", KMemoryLayout::GetKernelApplicationPoolRegionPhysicalExtents());
}
} else {
PrintMemoryRegion(" Secure", KMemoryLayout::GetKernelSystemPoolRegionPhysicalExtents());
PrintMemoryRegion(" Management", KMemoryLayout::GetKernelPoolManagementRegionPhysicalExtents());
PrintMemoryRegion(" Secure", KMemoryLayout::GetKernelSystemPoolRegionPhysicalExtents());
PrintMemoryRegion(" Unsafe", KMemoryLayout::GetKernelApplicationPoolRegionPhysicalExtents());
}
if constexpr (IsKTraceEnabled) {

View File

@ -36,7 +36,7 @@ namespace ams::kern::svc {
size_t remaining = size;
while (remaining > 0) {
/* Get a contiguous range to operate on. */
KPageTableBase::MemoryRange contig_range = {};
KPageTableBase::MemoryRange contig_range = { .address = Null<KPhysicalAddress>, .size = 0 };
R_TRY(page_table.OpenMemoryRangeForProcessCacheOperation(std::addressof(contig_range), cur_address, aligned_end - cur_address));
/* Close the range when we're done operating on it. */

View File

@ -123,6 +123,7 @@ $(OFILES) : $(GCH_FILES)
$(OFILES_SRC) : $(HFILES_BIN)
ams_environment_weak.o: CXXFLAGS += -fno-lto
hos_version_api_weak_for_unit_test.o: CXXFLAGS += -fno-lto
pm_info_api_weak.o: CXXFLAGS += -fno-lto
hos_stratosphere_api.o: CXXFLAGS += -fno-lto

View File

@ -26,6 +26,11 @@
return ::svcSetHeapSize(reinterpret_cast<void **>(out_address), size);
}
ALWAYS_INLINE Result SetHeapSize(uintptr_t *out_address, ::ams::svc::Size size) {
static_assert(sizeof(::ams::svc::Address) == sizeof(uintptr_t));
return ::svcSetHeapSize(reinterpret_cast<void **>(out_address), size);
}
ALWAYS_INLINE Result SetMemoryPermission(::ams::svc::Address address, ::ams::svc::Size size, ::ams::svc::MemoryPermission perm) {
return ::svcSetMemoryPermission(reinterpret_cast<void *>(static_cast<uintptr_t>(address)), size, static_cast<u32>(perm));
}

View File

@ -48,6 +48,7 @@ namespace ams::hos {
}
bool IsUnitTestProgramForSetVersion();
void InitializeVersionInternal(bool allow_approximate);
void InitializeForStratosphere() {
@ -58,7 +59,7 @@ namespace ams::hos {
hos::InitializeVersionInternal(CanAllowTemporaryApproximateVersion());
/* Check that we're running under mesosphere. */
AMS_ABORT_UNLESS(svc::IsKernelMesosphere());
AMS_ABORT_UNLESS(IsUnitTestProgramForSetVersion() || svc::IsKernelMesosphere());
}
}

View File

@ -61,51 +61,61 @@ namespace ams::hos {
}
bool IsUnitTestProgramForSetVersion();
void InitializeVersionInternal(bool allow_approximate) {
/* Get the current (and previous approximation of) target firmware. */
hos::Version prev, current;
bool has_prev = false;
{
/* Acquire exclusive access to set hos version. */
std::scoped_lock lk(g_hos_init_lock);
hos::Version current = hos::Version_Current;
/* Save the previous value of g_hos_version. */
prev = g_hos_version;
has_prev = g_set_hos_version;
/* Set hos version = exosphere api version target firmware. */
g_hos_version = static_cast<hos::Version>(GetExosphereApiInfo(allow_approximate).GetTargetFirmware());
/* Save the current value of g_hos_version. */
current = g_hos_version;
/* Note that we've set a previous hos version. */
/* If we're unit testing, just set the version and move on. */
if (IsUnitTestProgramForSetVersion()) {
g_hos_version = hos::Version_Current;
g_set_hos_version = true;
}
} else {
/* Get the current (and previous approximation of) target firmware. */
hos::Version prev;
bool has_prev = false;
{
/* Acquire exclusive access to set hos version. */
std::scoped_lock lk(g_hos_init_lock);
/* Ensure that this is a hos version we can sanely *try* to run. */
/* To be friendly, we will only require that we recognize the major and minor versions. */
/* We can consider only recognizing major in the future, but micro seems safe to ignore as */
/* there are no breaking IPC changes in minor updates. */
{
constexpr u32 MaxMajor = (static_cast<u32>(hos::Version_Max) >> 24) & 0xFF;
constexpr u32 MaxMinor = (static_cast<u32>(hos::Version_Max) >> 16) & 0xFF;
/* Save the previous value of g_hos_version. */
prev = g_hos_version;
has_prev = g_set_hos_version;
const u32 major = (static_cast<u32>(current) >> 24) & 0xFF;
const u32 minor = (static_cast<u32>(current) >> 16) & 0xFF;
/* Set hos version = exosphere api version target firmware. */
g_hos_version = static_cast<hos::Version>(GetExosphereApiInfo(allow_approximate).GetTargetFirmware());
const bool is_safely_tryable_version = (current <= hos::Version_Max) || (major == MaxMajor && minor <= MaxMinor);
AMS_ABORT_UNLESS(is_safely_tryable_version);
}
/* Save the current value of g_hos_version. */
current = g_hos_version;
/* Ensure that this is a hos version compatible with previous approximations. */
if (has_prev) {
AMS_ABORT_UNLESS(current >= prev);
/* Note that we've set a previous hos version. */
g_set_hos_version = true;
}
const u32 current_major = (static_cast<u32>(current) >> 24) & 0xFF;
const u32 prev_major = (static_cast<u32>(prev) >> 24) & 0xFF;
/* Ensure that this is a hos version we can sanely *try* to run. */
/* To be friendly, we will only require that we recognize the major and minor versions. */
/* We can consider only recognizing major in the future, but micro seems safe to ignore as */
/* there are no breaking IPC changes in minor updates. */
{
constexpr u32 MaxMajor = (static_cast<u32>(hos::Version_Max) >> 24) & 0xFF;
constexpr u32 MaxMinor = (static_cast<u32>(hos::Version_Max) >> 16) & 0xFF;
AMS_ABORT_UNLESS(current_major == prev_major);
const u32 major = (static_cast<u32>(current) >> 24) & 0xFF;
const u32 minor = (static_cast<u32>(current) >> 16) & 0xFF;
const bool is_safely_tryable_version = (current <= hos::Version_Max) || (major == MaxMajor && minor <= MaxMinor);
AMS_ABORT_UNLESS(is_safely_tryable_version);
}
/* Ensure that this is a hos version compatible with previous approximations. */
if (has_prev) {
AMS_ABORT_UNLESS(current >= prev);
const u32 current_major = (static_cast<u32>(current) >> 24) & 0xFF;
const u32 prev_major = (static_cast<u32>(prev) >> 24) & 0xFF;
AMS_ABORT_UNLESS(current_major == prev_major);
}
}
/* Set the version for libnx. */

View File

@ -0,0 +1,24 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stratosphere.hpp>
namespace ams::hos {
WEAK_SYMBOL bool IsUnitTestProgramForSetVersion() {
return false;
}
}

View File

@ -38,6 +38,8 @@ namespace ams {
namespace init {
void InitializeSystemModuleBeforeConstructors();
void InitializeSystemModule();
void FinalizeSystemModule();
@ -66,6 +68,9 @@ extern "C" void __libnx_initheap(void) {
extern "C" void __appInit(void) {
/* The very first thing all stratosphere code must do is initialize the os library. */
::ams::hos::InitializeForStratosphere();
/* Perform pre-C++ constructor init. */
::ams::init::InitializeSystemModuleBeforeConstructors();
}
extern "C" void __appExit(void) {

View File

@ -17,6 +17,10 @@
namespace ams::init {
WEAK_SYMBOL void InitializeSystemModuleBeforeConstructors() {
/* This should only be used in exceptional circumstances. */
}
WEAK_SYMBOL void InitializeSystemModule() {
/* TODO: What should we do here, if anything? */
/* Nintendo does nndiagStartup(); nn::diag::InitializeSystemProcessAbortObserver(); */

View File

@ -80,3 +80,26 @@ namespace ams::impl {
#define AMS_UNUSED(...) ::ams::impl::UnusedImpl(__VA_ARGS__)
#define AMS_INFINITE_LOOP() do { __asm__ __volatile__("" ::: "memory"); } while (1)
#define AMS__NARG__(...) AMS__NARG_I_(__VA_ARGS__,AMS__RSEQ_N())
#define AMS__NARG_I_(...) AMS__ARG_N(__VA_ARGS__)
#define AMS__ARG_N( \
_1, _2, _3, _4, _5, _6, _7, _8, _9,_10, \
_11,_12,_13,_14,_15,_16,_17,_18,_19,_20, \
_21,_22,_23,_24,_25,_26,_27,_28,_29,_30, \
_31,_32,_33,_34,_35,_36,_37,_38,_39,_40, \
_41,_42,_43,_44,_45,_46,_47,_48,_49,_50, \
_51,_52,_53,_54,_55,_56,_57,_58,_59,_60, \
_61,_62,_63,N,...) N
#define AMS__RSEQ_N() \
63,62,61,60, \
59,58,57,56,55,54,53,52,51,50, \
49,48,47,46,45,44,43,42,41,40, \
39,38,37,36,35,34,33,32,31,30, \
29,28,27,26,25,24,23,22,21,20, \
19,18,17,16,15,14,13,12,11,10, \
9,8,7,6,5,4,3,2,1,0
#define AMS__VMACRO_(name, n) name##_##n
#define AMS__VMACRO(name, n) AMS__VMACRO_(name, n)
#define AMS_VMACRO(func, ...) AMS__VMACRO(func, AMS__NARG__(__VA_ARGS__)) (__VA_ARGS__)

View File

@ -0,0 +1,23 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours/svc/svc_types_common.hpp>
namespace ams::svc::board::qemu::virt {
constexpr inline const s64 TicksPerSecond = 19'200'000;
}

View File

@ -24,8 +24,15 @@
using namespace ams::svc::board::nintendo::nx;
}
#elif defined(ATMOSPHERE_BOARD_QEMU_VIRT)
#include <vapours/svc/board/qemu/virt/svc_hardware_constants.hpp>
namespace ams::svc {
using namespace ams::svc::board::qemu::virt;
}
#else
#error "Unknown board for svc::DeviceName"
#error "Unknown board for svc Hardware Constants"
#endif

View File

@ -58,7 +58,7 @@ namespace ams::svc {
/* This is the highest SVC version supported by Atmosphere, to be updated on new kernel releases. */
/* NOTE: Official kernel versions have SVC major = SDK major + 4, SVC minor = SDK minor. */
constexpr inline u32 SupportedKernelMajorVersion = ConvertToSvcMajorVersion(13);
constexpr inline u32 SupportedKernelMinorVersion = ConvertToSvcMinorVersion( 3);
constexpr inline u32 SupportedKernelMinorVersion = ConvertToSvcMinorVersion( 4);
constexpr inline u32 SupportedKernelVersion = EncodeKernelVersion(SupportedKernelMajorVersion, SupportedKernelMinorVersion);

View File

@ -26,4 +26,264 @@ namespace ams::util {
struct ConstantInitializeTag final {};
constexpr inline const ConstantInitializeTag ConstantInitialize{};
namespace impl {
constexpr int ToIntegerForIsConstexprConstructible(...) { return {}; }
template<typename T, auto...Lambdas> requires (std::is_constructible<T, decltype(Lambdas())...>::value)
using ToIntegralConstantForIsConstexprConstructible = std::integral_constant<int, ToIntegerForIsConstexprConstructible(T(Lambdas()...))>;
template<typename T, auto...Lambdas, int = ToIntegralConstantForIsConstexprConstructible<T, Lambdas...>::value>
std::true_type IsConstexprConstructibleImpl(int);
template<typename T, auto...Lambdas>
std::false_type IsConstexprConstructibleImpl(long);
template<typename T>
consteval inline auto ConvertToLambdaForIsConstexprConstructible() { return [] { return T{}; }; }
template<auto V>
consteval inline auto ConvertToLambdaForIsConstexprConstructible() { return [] { return V; }; }
namespace ambiguous_parse {
struct AmbiguousParseHelperForIsConstexprConstructible {
constexpr inline AmbiguousParseHelperForIsConstexprConstructible operator-() { return *this; }
template<typename T>
constexpr inline operator T() {
return T{};
}
};
constexpr inline auto operator -(auto v, AmbiguousParseHelperForIsConstexprConstructible) { return v; }
}
#define AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(TYPE_OR_VALUE) [] { ::ams::util::impl::ambiguous_parse::AmbiguousParseHelperForIsConstexprConstructible p; auto v = (TYPE_OR_VALUE)-p; return v; }
}
template<typename T, typename...ArgTypes>
using is_constexpr_constructible = decltype(impl::IsConstexprConstructibleImpl<T, impl::ConvertToLambdaForIsConstexprConstructible<ArgTypes>()...>(0));
template<typename T, auto...Args>
using is_constexpr_constructible_by_values = decltype(impl::IsConstexprConstructibleImpl<T, impl::ConvertToLambdaForIsConstexprConstructible<Args>()...>(0));
#define AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE_1(_1) \
(decltype(::ams::util::impl::IsConstexprConstructibleImpl<_1>(0))::value)
#define AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE_2(_1, _2) \
(decltype(::ams::util::impl::IsConstexprConstructibleImpl<_1, \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_2) \
>(0))::value)
#define AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE_3(_1, _2, _3) \
(decltype(::ams::util::impl::IsConstexprConstructibleImpl<_1, \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_2), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_3) \
>(0))::value)
#define AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE_4(_1, _2, _3, _4) \
(decltype(::ams::util::impl::IsConstexprConstructibleImpl<_1, \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_2), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_3), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_4) \
>(0))::value)
#define AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE_5(_1, _2, _3, _4, _5) \
(decltype(::ams::util::impl::IsConstexprConstructibleImpl<_1, \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_2), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_3), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_4), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_5) \
>(0))::value)
#define AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE_6(_1, _2, _3, _4, _5, _6) \
(decltype(::ams::util::impl::IsConstexprConstructibleImpl<_1, \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_2), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_3), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_4), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_5), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_6) \
>(0))::value)
#define AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE_7(_1, _2, _3, _4, _5, _6, _7) \
(decltype(::ams::util::impl::IsConstexprConstructibleImpl<_1, \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_2), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_3), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_4), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_5), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_6), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_7) \
>(0))::value)
#define AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE_8(_1, _2, _3, _4, _5, _6, _7, _8) \
(decltype(::ams::util::impl::IsConstexprConstructibleImpl<_1, \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_2), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_3), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_4), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_5), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_6), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_7), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_8) \
>(0))::value)
#define AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE_9(_1, _2, _3, _4, _5, _6, _7, _8, _9) \
(decltype(::ams::util::impl::IsConstexprConstructibleImpl<_1, \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_2), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_3), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_4), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_5), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_6), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_7), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_8), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_9) \
>(0))::value)
#define AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE_10(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10) \
(decltype(::ams::util::impl::IsConstexprConstructibleImpl<_1, \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_2), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_3), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_4), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_5), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_6), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_7), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_8), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_9), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_10) \
>(0))::value)
#define AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE_11(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11) \
(decltype(::ams::util::impl::IsConstexprConstructibleImpl<_1, \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_2), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_3), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_4), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_5), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_6), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_7), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_8), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_9), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_10), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_11) \
>(0))::value)
#define AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE_12(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12) \
(decltype(::ams::util::impl::IsConstexprConstructibleImpl<_1, \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_2), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_3), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_4), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_5), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_6), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_7), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_8), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_9), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_10), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_11), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_12) \
>(0))::value)
#define AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE_13(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13) \
(decltype(::ams::util::impl::IsConstexprConstructibleImpl<_1, \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_2), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_3), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_4), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_5), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_6), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_7), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_8), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_9), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_10), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_11), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_12), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_13) \
>(0))::value)
#define AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE_14(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14) \
(decltype(::ams::util::impl::IsConstexprConstructibleImpl<_1, \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_2), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_3), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_4), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_5), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_6), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_7), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_8), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_9), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_10), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_11), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_12), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_13), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_14) \
>(0))::value)
#define AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE_15(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15) \
(decltype(::ams::util::impl::IsConstexprConstructibleImpl<_1, \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_2), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_3), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_4), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_5), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_6), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_7), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_8), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_9), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_10), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_11), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_12), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_13), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_14), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_15) \
>(0))::value)
#define AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE_16(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _16) \
(decltype(::ams::util::impl::IsConstexprConstructibleImpl<_1, \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_2), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_3), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_4), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_5), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_6), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_7), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_8), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_9), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_10), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_11), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_12), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_13), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_14), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_15), \
AMS_UTIL_IMPL_CONVERT_TV_TO_LAMBDA(_16) \
>(0))::value)
#define AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE(...) AMS_VMACRO(AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE, __VA_ARGS__)
#if 0
namespace test {
struct S {
private:
int m_v;
public:
S() { }
constexpr S(int v) : m_v() { }
constexpr S(int v, double z) : m_v(v) { }
};
consteval inline int test_constexpr_int() { return 0; }
inline int test_not_constexpr_int() { return 0; }
static_assert(!AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE(S));
static_assert(AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE(S, int));
static_assert(AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE(S, 0));
static_assert(AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE(S, test_constexpr_int()));
static_assert(!AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE(S, test_not_constexpr_int()));
static_assert(AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE(S, int, double));
static_assert(AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE(S, int, 0.0));
static_assert(AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE(S, 0, double));
static_assert(AMS_UTIL_IS_CONSTEXPR_CONSTRUCTIBLE(S, 0, 0.0));
}
#endif
}

View File

@ -8,29 +8,47 @@ ATMOSPHERE_BUILD_CONFIGS += $(strip $1)
$(strip $1): mesosphere$(strip $2).bin
mesosphere$(strip $2).bin: kernel/kernel$(strip $2).bin kernel_ldr/kernel_ldr$(strip $2).bin
@python build_mesosphere.py kernel_ldr/kernel_ldr$(strip $2).bin kernel/kernel$(strip $2).bin mesosphere$(strip $2).bin
@python build_mesosphere.py kernel_ldr/kernel_ldr$(strip $2).bin kernel/kernel$(strip $2).bin mesosphere$(strip $2).bin $(4)
@echo "Built mesosphere$(strip $2).bin..."
kernel/kernel$(strip $2).bin: check_libmeso$(strip $1)
@$$(MAKE) -C kernel $(strip $1)
@$$(MAKE) -C kernel $(strip $1) $(3)
kernel_ldr/kernel_ldr$(strip $2).bin: check_libmeso$(strip $1)
@$$(MAKE) -C kernel_ldr $(strip $1)
@$$(MAKE) -C kernel_ldr $(strip $1) $(3)
check_libmeso$(strip $1):
@$$(MAKE) -C ../libraries/libmesosphere $(strip $1)
@$$(MAKE) -C ../libraries/libmesosphere $(strip $1) $(3)
clean-$(strip $1):
@$$(MAKE) -C ../libraries/libmesosphere clean-$(strip $1)
@$$(MAKE) -C kernel clean-$(strip $1)
@$$(MAKE) -C kernel_ldr clean-$(strip $1)
@$$(MAKE) -C ../libraries/libmesosphere clean-$(strip $1) $(3)
@$$(MAKE) -C kernel clean-$(strip $1) $(3)
@$$(MAKE) -C kernel_ldr clean-$(strip $1) $(3)
@rm -f mesosphere$(strip $2).bin
endef
$(eval $(call ATMOSPHERE_ADD_TARGET, release, ,))
$(eval $(call ATMOSPHERE_ADD_TARGET, debug, _debug,))
$(eval $(call ATMOSPHERE_ADD_TARGET, audit, _audit,))
$(eval $(call ATMOSPHERE_ADD_TARGET, release, ,,))
$(eval $(call ATMOSPHERE_ADD_TARGET, debug, _debug,,))
$(eval $(call ATMOSPHERE_ADD_TARGET, audit, _audit,,))
$(eval $(call ATMOSPHERE_ADD_TARGET, qemu_virt_release, _qemu_virt, \
ATMOSPHERE_BOARD="qemu-virt" \
ATMOSPHERE_CPU="arm-cortex-a57" \
, ../tests/TestSvc/TestSvc.kip \
))
$(eval $(call ATMOSPHERE_ADD_TARGET, qemu_virt_debug, _qemu_virt_debug, \
ATMOSPHERE_BOARD="qemu-virt" \
ATMOSPHERE_CPU="arm-cortex-a57" \
, ../tests/TestSvc/TestSvc.kip \
))
$(eval $(call ATMOSPHERE_ADD_TARGET, qemu_virt_audit, _qemu_virt_audit, \
ATMOSPHERE_BOARD="qemu-virt" \
ATMOSPHERE_CPU="arm-cortex-a57" \
, ../tests/TestSvc/TestSvc.kip \
))
clean:
@$(MAKE) -C ../libraries/libmesosphere clean

View File

@ -10,8 +10,8 @@ def align_up(val, algn):
return val - (val % algn)
def main(argc, argv):
if argc != 4:
print('Usage: %s kernel_ldr.bin kernel.bin output.bin' % argv[0])
if argc < 4:
print('Usage: %s kernel_ldr.bin kernel.bin output.bin [initial_process.kip ...]' % argv[0])
return 1
with open(argv[1], 'rb') as f:
kernel_ldr = f.read()
@ -30,16 +30,25 @@ def main(argc, argv):
kernel += b'\x00' * (kernel_end - len(kernel))
assert (kernel_end == len(kernel))
embedded_ini = b''
try:
with open('ini.bin', 'rb') as f:
embedded_ini = f.read()
except:
pass
embedded_kips = b''
num_kips = 0
for kip_file in argv[4:]:
try:
with open(kip_file, 'rb') as f:
data = f.read()
if data.startswith(b'KIP1'):
embedded_kips += data
num_kips += 1
except:
pass
if num_kips > 0:
embedded_ini_header = pk('<4sIII', b'INI1', len(embedded_kips) + 0x10, num_kips, 0)
else:
embedded_ini_header = b''
embedded_ini_offset = align_up(kernel_end, 0x1000)
embedded_ini_end = embedded_ini_offset + len(embedded_ini) # TODO: Create and embed an INI, eventually.
embedded_ini_end = embedded_ini_offset + len(embedded_ini_header) + len(embedded_kips)
kernel_ldr_offset = align_up(embedded_ini_end, 0x1000) + (0x1000 if len(embedded_ini) == 0 else 0)
kernel_ldr_offset = align_up(embedded_ini_end, 0x1000) + (0x1000 if len(embedded_ini_header) == 0 else 0)
kernel_ldr_end = kernel_ldr_offset + len(kernel_ldr)
mesosphere_end = align_up(kernel_ldr_end, 0x1000)
@ -48,7 +57,8 @@ def main(argc, argv):
f.write(pk('<QQI', embedded_ini_offset, kernel_ldr_offset, atmosphere_target_firmware(13, 0, 0)))
f.write(kernel[kernel_metadata_offset + 0x18:])
f.seek(embedded_ini_offset)
f.write(embedded_ini)
f.write(embedded_ini_header)
f.write(embedded_kips)
f.seek(embedded_ini_end)
f.seek(kernel_ldr_offset)
f.write(kernel_ldr)

View File

@ -85,6 +85,18 @@ $(eval $(call ATMOSPHERE_ADD_TARGET, audit, _audit, \
ATMOSPHERE_BUILD_SETTINGS="-DMESOSPHERE_BUILD_FOR_AUDITING" \
))
$(eval $(call ATMOSPHERE_ADD_TARGET, qemu_virt_release, _qemu_virt, \
ATMOSPHERE_BUILD_SETTINGS="" \
))
$(eval $(call ATMOSPHERE_ADD_TARGET, qemu_virt_debug, _qemu_virt_debug, \
ATMOSPHERE_BUILD_SETTINGS="-DMESOSPHERE_BUILD_FOR_DEBUGGING" \
))
$(eval $(call ATMOSPHERE_ADD_TARGET, qemu_virt_audit, _qemu_virt_audit, \
ATMOSPHERE_BUILD_SETTINGS="-DMESOSPHERE_BUILD_FOR_AUDITING" \
))
$(ATMOSPHERE_BUILD_DIR)/%:
@[ -d $@ ] || mkdir -p $@

View File

@ -126,7 +126,8 @@ _ZN3ams4kern10KScheduler12ScheduleImplEv:
cmp x7, x18
b.ne 1f
/* If they're the same, then we can just return as there's nothing to do. */
/* If they're the same, then we can just issue a memory barrier and return. */
dmb ish
ret
0: /* The interrupt task thread is runnable. */

View File

@ -85,6 +85,18 @@ $(eval $(call ATMOSPHERE_ADD_TARGET, audit, _audit, \
ATMOSPHERE_BUILD_SETTINGS="-DMESOSPHERE_BUILD_FOR_AUDITING" \
))
$(eval $(call ATMOSPHERE_ADD_TARGET, qemu_virt_release, _qemu_virt, \
ATMOSPHERE_BUILD_SETTINGS="" \
))
$(eval $(call ATMOSPHERE_ADD_TARGET, qemu_virt_debug, _qemu_virt_debug, \
ATMOSPHERE_BUILD_SETTINGS="-DMESOSPHERE_BUILD_FOR_DEBUGGING" \
))
$(eval $(call ATMOSPHERE_ADD_TARGET, qemu_virt_audit, _qemu_virt_audit, \
ATMOSPHERE_BUILD_SETTINGS="-DMESOSPHERE_BUILD_FOR_AUDITING" \
))
$(ATMOSPHERE_BUILD_DIR)/%:
@[ -d $@ ] || mkdir -p $@

View File

@ -0,0 +1,23 @@
Boost Software License - Version 1.0 - August 17th, 2003
Permission is hereby granted, free of charge, to any person or organization
obtaining a copy of the software and accompanying documentation covered by
this license (the "Software") to use, reproduce, display, distribute,
execute, and transmit the Software, and to prepare derivative works of the
Software, and to permit third-parties to whom the Software is furnished to
do so, all subject to the following:
The copyright notices in the Software and this entire statement, including
the above license grant, this restriction and the following disclaimer,
must be included in all copies of the Software, in whole or in part, and
all derivative works of the Software, unless such copies or derivative
works are solely in the form of machine-executable object code generated by
a source language processor.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

116
tests/TestSvc/Makefile Normal file
View File

@ -0,0 +1,116 @@
#---------------------------------------------------------------------------------
# pull in common stratosphere sysmodule configuration
#---------------------------------------------------------------------------------
include $(dir $(abspath $(lastword $(MAKEFILE_LIST))))/../../libraries/config/templates/stratosphere.mk
#---------------------------------------------------------------------------------
# no real need to edit anything past this point unless you need to add additional
# rules for different file extensions
#---------------------------------------------------------------------------------
ifneq ($(BUILD),$(notdir $(CURDIR)))
#---------------------------------------------------------------------------------
export OUTPUT := $(CURDIR)/$(TARGET)
export TOPDIR := $(CURDIR)
export VPATH := $(foreach dir,$(SOURCES),$(CURDIR)/$(dir)) \
$(foreach dir,$(DATA),$(CURDIR)/$(dir))
export DEPSDIR := $(CURDIR)/$(BUILD)
CFILES := $(call FIND_SOURCE_FILES,$(SOURCES),c)
CPPFILES := $(call FIND_SOURCE_FILES,$(SOURCES),cpp)
SFILES := $(call FIND_SOURCE_FILES,$(SOURCES),s)
BINFILES := $(foreach dir,$(DATA),$(notdir $(wildcard $(dir)/*.*)))
#---------------------------------------------------------------------------------
# use CXX for linking C++ projects, CC for standard C
#---------------------------------------------------------------------------------
ifeq ($(strip $(CPPFILES)),)
#---------------------------------------------------------------------------------
export LD := $(CC)
#---------------------------------------------------------------------------------
else
#---------------------------------------------------------------------------------
export LD := $(CXX)
#---------------------------------------------------------------------------------
endif
#---------------------------------------------------------------------------------
export OFILES := $(addsuffix .o,$(BINFILES)) \
$(CPPFILES:.cpp=.o) $(CFILES:.c=.o) $(SFILES:.s=.o)
export INCLUDE := $(foreach dir,$(INCLUDES),-I$(CURDIR)/$(dir)) \
$(foreach dir,$(LIBDIRS),-I$(dir)/include) \
-I$(CURDIR)/$(BUILD)
export LIBPATHS := $(foreach dir,$(LIBDIRS),-L$(dir)/lib)
export BUILD_EXEFS_SRC := $(TOPDIR)/$(EXEFS_SRC)
ifeq ($(strip $(CONFIG_JSON)),)
jsons := $(wildcard *.json)
ifneq (,$(findstring $(TARGET).json,$(jsons)))
export APP_JSON := $(TOPDIR)/$(TARGET).json
else
ifneq (,$(findstring config.json,$(jsons)))
export APP_JSON := $(TOPDIR)/config.json
endif
endif
else
export APP_JSON := $(TOPDIR)/$(CONFIG_JSON)
endif
.PHONY: $(BUILD) clean all
#---------------------------------------------------------------------------------
all: $(BUILD)
$(BUILD):
@[ -d $@ ] || mkdir -p $@
@$(MAKE) --no-print-directory -C $(BUILD) -f $(CURDIR)/Makefile
#---------------------------------------------------------------------------------
clean:
@echo clean ...
@rm -fr $(BUILD) $(TARGET).kip $(TARGET).elf
#---------------------------------------------------------------------------------
else
.PHONY: all
DEPENDS := $(OFILES:.o=.d)
#---------------------------------------------------------------------------------
# main targets
#---------------------------------------------------------------------------------
all : $(OUTPUT).kip $(OUTPUT).nsp
$(OUTPUT).nsp : $(OUTPUT).nso $(OUTPUT).npdm
$(OUTPUT).nso : $(OUTPUT).elf
$(OUTPUT).kip : $(OUTPUT).elf
$(OUTPUT).elf : $(OFILES)
$(OUTPUT).npdm : $(OUTPUT).npdm.json
@echo built ... $< $@
@npdmtool $< $@
@echo built ... $(notdir $@)
#---------------------------------------------------------------------------------
# you need a rule like this for each extension you use as binary data
#---------------------------------------------------------------------------------
%.bin.o : %.bin
#---------------------------------------------------------------------------------
@echo $(notdir $<)
@$(bin2o)
-include $(DEPENDS)
#---------------------------------------------------------------------------------------
endif
#---------------------------------------------------------------------------------------

128
tests/TestSvc/TestSvc.json Normal file
View File

@ -0,0 +1,128 @@
{
"name": "TestSvc",
"title_id": "0x5555555555555555",
"main_thread_stack_size": "0x8000",
"main_thread_priority": 28,
"default_cpu_id": 3,
"process_category": 1,
"use_secure_memory": true,
"immortal": true,
"kernel_capabilities": [
{
"type": "handle_table_size",
"value": 0
},
{
"type": "syscalls",
"value": {
"svcUnknown00": "0x00",
"svcSetHeapSize": "0x01",
"svcSetMemoryPermission": "0x02",
"svcSetMemoryAttribute": "0x03",
"svcMapMemory": "0x04",
"svcUnmapMemory": "0x05",
"svcQueryMemory": "0x06",
"svcExitProcess": "0x07",
"svcCreateThread": "0x08",
"svcStartThread": "0x09",
"svcExitThread": "0x0A",
"svcSleepThread": "0x0B",
"svcGetThreadPriority": "0x0C",
"svcSetThreadPriority": "0x0D",
"svcGetThreadCoreMask": "0x0E",
"svcSetThreadCoreMask": "0x0F",
"svcGetCurrentProcessorNumber": "0x10",
"svcSignalEvent": "0x11",
"svcClearEvent": "0x12",
"svcMapSharedMemory": "0x13",
"svcUnmapSharedMemory": "0x14",
"svcCreateTransferMemory": "0x15",
"svcCloseHandle": "0x16",
"svcResetSignal": "0x17",
"svcWaitSynchronization": "0x18",
"svcCancelSynchronization": "0x19",
"svcArbitrateLock": "0x1A",
"svcArbitrateUnlock": "0x1B",
"svcWaitProcessWideKeyAtomic": "0x1C",
"svcSignalProcessWideKey": "0x1D",
"svcGetSystemTick": "0x1E",
"svcConnectToNamedPort": "0x1F",
"svcSendSyncRequestLight": "0x20",
"svcSendSyncRequest": "0x21",
"svcSendSyncRequestWithUserBuffer": "0x22",
"svcSendAsyncRequestWithUserBuffer": "0x23",
"svcGetProcessId": "0x24",
"svcGetThreadId": "0x25",
"svcBreak": "0x26",
"svcOutputDebugString": "0x27",
"svcReturnFromException": "0x28",
"svcGetInfo": "0x29",
"svcFlushEntireDataCache": "0x2A",
"svcFlushDataCache": "0x2B",
"svcMapPhysicalMemory": "0x2C",
"svcUnmapPhysicalMemory": "0x2D",
"svcGetDebugFutureThreadInfo": "0x2E",
"svcGetLastThreadInfo": "0x2F",
"svcGetResourceLimitLimitValue": "0x30",
"svcGetResourceLimitCurrentValue": "0x31",
"svcSetThreadActivity": "0x32",
"svcGetThreadContext3": "0x33",
"svcWaitForAddress": "0x34",
"svcSignalToAddress": "0x35",
"svcSynchronizePreemptionState": "0x36",
"svcGetResourceLimitPeakValue": "0x37",
"svcUnknown38": "0x38",
"svcUnknown39": "0x39",
"svcUnknown3a": "0x3A",
"svcUnknown3b": "0x3B",
"svcKernelDebug": "0x3C",
"svcChangeKernelTraceState": "0x3D",
"svcUnknown3e": "0x3E",
"svcUnknown3f": "0x3F",
"svcCreateSession": "0x40",
"svcAcceptSession": "0x41",
"svcReplyAndReceiveLight": "0x42",
"svcReplyAndReceive": "0x43",
"svcReplyAndReceiveWithUserBuffer": "0x44",
"svcCreateEvent": "0x45",
"svcUnknown46": "0x46",
"svcUnknown47": "0x47",
"svcMapPhysicalMemoryUnsafe": "0x48",
"svcUnmapPhysicalMemoryUnsafe": "0x49",
"svcSetUnsafeLimit": "0x4A",
"svcCreateCodeMemory": "0x4B",
"svcControlCodeMemory": "0x4C",
"svcSleepSystem": "0x4D",
"svcReadWriteRegister": "0x4E",
"svcSetProcessActivity": "0x4F",
"svcCreateSharedMemory": "0x50",
"svcMapTransferMemory": "0x51",
"svcUnmapTransferMemory": "0x52",
"svcQueryIoMapping": "0x55",
"svcDebugActiveProcess": "0x60",
"svcBreakDebugProcess": "0x61",
"svcTerminateDebugProcess": "0x62",
"svcGetDebugEvent": "0x63",
"svcContinueDebugEvent": "0x64",
"svcGetProcessList": "0x65",
"svcGetThreadList": "0x66",
"svcGetDebugThreadContext": "0x67",
"svcSetDebugThreadContext": "0x68",
"svcQueryDebugProcessMemory": "0x69",
"svcReadDebugProcessMemory": "0x6A",
"svcWriteDebugProcessMemory": "0x6B",
"svcSetHardwareBreakPoint": "0x6C",
"svcGetDebugThreadParam": "0x6D",
"svcGetSystemInfo": "0x6F",
"svcConnectToPort": "0x72",
"svcSetProcessMemoryPermission": "0x73",
"svcMapProcessMemory": "0x74",
"svcUnmapProcessMemory": "0x75",
"svcQueryProcessMemory": "0x76",
"svcMapProcessCodeMemory": "0x77",
"svcUnmapProcessCodeMemory": "0x78",
"svcCallSecureMonitor": "0x7F"
}
}
]
}

View File

@ -0,0 +1,147 @@
{
"name": "TestSvc",
"title_id": "0x5555555555555555",
"title_id_range_min": "0x5555555555555555",
"title_id_range_max": "0x5555555555555555",
"main_thread_stack_size": "0x8000",
"main_thread_priority": 28,
"default_cpu_id": 3,
"process_category": 0,
"is_retail": true,
"pool_partition": 2,
"is_64_bit": true,
"address_space_type": 3,
"disable_device_address_space_merge": true,
"filesystem_access": {
"permissions": "0xFFFFFFFFFFFFFFFF"
},
"service_access": ["*"],
"service_host": ["*"],
"kernel_capabilities": [
{
"type": "kernel_flags",
"value": {
"highest_thread_priority": 63,
"lowest_thread_priority": 16,
"lowest_cpu_id": 0,
"highest_cpu_id": 3
}
},
{
"type": "handle_table_size",
"value": 0
},
{
"type": "syscalls",
"value": {
"svcUnknown00": "0x00",
"svcSetHeapSize": "0x01",
"svcSetMemoryPermission": "0x02",
"svcSetMemoryAttribute": "0x03",
"svcMapMemory": "0x04",
"svcUnmapMemory": "0x05",
"svcQueryMemory": "0x06",
"svcExitProcess": "0x07",
"svcCreateThread": "0x08",
"svcStartThread": "0x09",
"svcExitThread": "0x0A",
"svcSleepThread": "0x0B",
"svcGetThreadPriority": "0x0C",
"svcSetThreadPriority": "0x0D",
"svcGetThreadCoreMask": "0x0E",
"svcSetThreadCoreMask": "0x0F",
"svcGetCurrentProcessorNumber": "0x10",
"svcSignalEvent": "0x11",
"svcClearEvent": "0x12",
"svcMapSharedMemory": "0x13",
"svcUnmapSharedMemory": "0x14",
"svcCreateTransferMemory": "0x15",
"svcCloseHandle": "0x16",
"svcResetSignal": "0x17",
"svcWaitSynchronization": "0x18",
"svcCancelSynchronization": "0x19",
"svcArbitrateLock": "0x1A",
"svcArbitrateUnlock": "0x1B",
"svcWaitProcessWideKeyAtomic": "0x1C",
"svcSignalProcessWideKey": "0x1D",
"svcGetSystemTick": "0x1E",
"svcConnectToNamedPort": "0x1F",
"svcSendSyncRequestLight": "0x20",
"svcSendSyncRequest": "0x21",
"svcSendSyncRequestWithUserBuffer": "0x22",
"svcSendAsyncRequestWithUserBuffer": "0x23",
"svcGetProcessId": "0x24",
"svcGetThreadId": "0x25",
"svcBreak": "0x26",
"svcOutputDebugString": "0x27",
"svcReturnFromException": "0x28",
"svcGetInfo": "0x29",
"svcFlushEntireDataCache": "0x2A",
"svcFlushDataCache": "0x2B",
"svcMapPhysicalMemory": "0x2C",
"svcUnmapPhysicalMemory": "0x2D",
"svcGetDebugFutureThreadInfo": "0x2E",
"svcGetLastThreadInfo": "0x2F",
"svcGetResourceLimitLimitValue": "0x30",
"svcGetResourceLimitCurrentValue": "0x31",
"svcSetThreadActivity": "0x32",
"svcGetThreadContext3": "0x33",
"svcWaitForAddress": "0x34",
"svcSignalToAddress": "0x35",
"svcSynchronizePreemptionState": "0x36",
"svcGetResourceLimitPeakValue": "0x37",
"svcUnknown38": "0x38",
"svcUnknown39": "0x39",
"svcUnknown3a": "0x3A",
"svcUnknown3b": "0x3B",
"svcKernelDebug": "0x3C",
"svcChangeKernelTraceState": "0x3D",
"svcUnknown3e": "0x3E",
"svcUnknown3f": "0x3F",
"svcCreateSession": "0x40",
"svcAcceptSession": "0x41",
"svcReplyAndReceiveLight": "0x42",
"svcReplyAndReceive": "0x43",
"svcReplyAndReceiveWithUserBuffer": "0x44",
"svcCreateEvent": "0x45",
"svcUnknown46": "0x46",
"svcUnknown47": "0x47",
"svcMapPhysicalMemoryUnsafe": "0x48",
"svcUnmapPhysicalMemoryUnsafe": "0x49",
"svcSetUnsafeLimit": "0x4A",
"svcCreateCodeMemory": "0x4B",
"svcControlCodeMemory": "0x4C",
"svcSleepSystem": "0x4D",
"svcReadWriteRegister": "0x4E",
"svcSetProcessActivity": "0x4F",
"svcCreateSharedMemory": "0x50",
"svcMapTransferMemory": "0x51",
"svcUnmapTransferMemory": "0x52",
"svcQueryIoMapping": "0x55",
"svcDebugActiveProcess": "0x60",
"svcBreakDebugProcess": "0x61",
"svcTerminateDebugProcess": "0x62",
"svcGetDebugEvent": "0x63",
"svcContinueDebugEvent": "0x64",
"svcGetProcessList": "0x65",
"svcGetThreadList": "0x66",
"svcGetDebugThreadContext": "0x67",
"svcSetDebugThreadContext": "0x68",
"svcQueryDebugProcessMemory": "0x69",
"svcReadDebugProcessMemory": "0x6A",
"svcWriteDebugProcessMemory": "0x6B",
"svcSetHardwareBreakPoint": "0x6C",
"svcGetDebugThreadParam": "0x6D",
"svcGetSystemInfo": "0x6F",
"svcConnectToPort": "0x72",
"svcSetProcessMemoryPermission": "0x73",
"svcMapProcessMemory": "0x74",
"svcUnmapProcessMemory": "0x75",
"svcQueryProcessMemory": "0x76",
"svcMapProcessCodeMemory": "0x77",
"svcUnmapProcessCodeMemory": "0x78",
"svcCallSecureMonitor": "0x7F"
}
}
]
}

17959
tests/TestSvc/source/catch.hpp Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,102 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stratosphere.hpp>
#define CATCH_CONFIG_RUNNER
#include "util_catch.hpp"
namespace ams {
namespace {
constexpr size_t MallocBufferSize = 16_MB;
alignas(os::MemoryPageSize) constinit u8 g_malloc_buffer[MallocBufferSize];
}
namespace hos {
bool IsUnitTestProgramForSetVersion() { return true; }
}
namespace init {
void InitializeSystemModuleBeforeConstructors() {
/* Catch has global-ctors which allocate, so we need to do this earlier than normal. */
init::InitializeAllocator(g_malloc_buffer, sizeof(g_malloc_buffer));
}
void InitializeSystemModule() { /* ... */ }
void FinalizeSystemModule() { /* ... */ }
void Startup() { /* ... */ }
}
void NORETURN Exit(int rc) {
AMS_UNUSED(rc);
AMS_ABORT("Exit called by immortal process");
}
void Main() {
/* Ensure our thread priority and core mask is correct. */
{
auto * const cur_thread = os::GetCurrentThread();
os::SetThreadCoreMask(cur_thread, 3, (1ul << 3));
os::ChangeThreadPriority(cur_thread, 0);
}
/* Run tests. */
Catch::Session().run(os::GetHostArgc(), os::GetHostArgv());
AMS_INFINITE_LOOP();
/* This can never be reached. */
AMS_ASSUME(false);
}
}
namespace Catch {
namespace {
class OutputDebugStringStream : public std::stringbuf {
public:
OutputDebugStringStream() = default;
~OutputDebugStringStream() { pubsync(); }
int sync() override {
const auto message = str();
return R_SUCCEEDED(ams::svc::OutputDebugString(message.c_str(), message.length())) ? 0 : -1;
}
};
}
std::ostream& cout() {
static std::ostream ret(new OutputDebugStringStream);
return ret;
}
std::ostream& clog() {
return cout();
}
std::ostream& cerr() {
return clog();
}
}

View File

@ -0,0 +1,91 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stratosphere.hpp>
#include "util_common.hpp"
#include "util_scoped_heap.hpp"
namespace ams::test {
namespace {
constinit volatile bool g_spinloop;
void TestPreemptionPriorityThreadFunction(volatile bool *executed) {
/* While we should, note that we're executing. */
while (g_spinloop) {
__asm__ __volatile__("" ::: "memory");
*executed = true;
__asm__ __volatile__("" ::: "memory");
}
/* Exit the thread. */
svc::ExitThread();
}
}
CATCH_TEST_CASE( "The scheduler is preemptive at the preemptive priority and cooperative for all other priorities" ) {
/* Create heap. */
ScopedHeap heap(3 * os::MemoryPageSize);
CATCH_REQUIRE(R_SUCCEEDED(svc::SetMemoryPermission(heap.GetAddress() + os::MemoryPageSize, os::MemoryPageSize, svc::MemoryPermission_None)));
ON_SCOPE_EXIT {
CATCH_REQUIRE(R_SUCCEEDED(svc::SetMemoryPermission(heap.GetAddress() + os::MemoryPageSize, os::MemoryPageSize, svc::MemoryPermission_ReadWrite)));
};
const uintptr_t sp_0 = heap.GetAddress() + 1 * os::MemoryPageSize;
const uintptr_t sp_1 = heap.GetAddress() + 3 * os::MemoryPageSize;
for (s32 core = 0; core < NumCores; ++core) {
for (s32 priority = HighestTestPriority; priority <= LowestTestPriority; ++priority) {
svc::Handle thread_handles[2];
volatile bool thread_executed[2] = { false, false };
/* Start spinlooping. */
g_spinloop = true;
/* Create threads. */
CATCH_REQUIRE(R_SUCCEEDED(svc::CreateThread(thread_handles + 0, reinterpret_cast<uintptr_t>(&TestPreemptionPriorityThreadFunction), reinterpret_cast<uintptr_t>(thread_executed + 0), sp_0, priority, core)));
CATCH_REQUIRE(R_SUCCEEDED(svc::CreateThread(thread_handles + 1, reinterpret_cast<uintptr_t>(&TestPreemptionPriorityThreadFunction), reinterpret_cast<uintptr_t>(thread_executed + 1), sp_1, priority, core)));
/* Start threads. */
CATCH_REQUIRE(R_SUCCEEDED(svc::StartThread(thread_handles[0])));
CATCH_REQUIRE(R_SUCCEEDED(svc::StartThread(thread_handles[1])));
/* Wait long enough that we can be confident the threads have been balanced. */
svc::SleepThread(PreemptionTimeSpan.GetNanoSeconds() * 10);
/* Check that we're in a coherent state. */
if (IsPreemptionPriority(core, priority)) {
CATCH_REQUIRE(thread_executed[0] & thread_executed[1]);
} else {
CATCH_REQUIRE(thread_executed[0] ^ thread_executed[1]);
}
/* Stop spinlooping. */
g_spinloop = false;
/* Wait for threads to exit. */
s32 dummy;
CATCH_REQUIRE(R_SUCCEEDED(svc::WaitSynchronization(std::addressof(dummy), thread_handles + 0, 1, -1)));
CATCH_REQUIRE(R_SUCCEEDED(svc::WaitSynchronization(std::addressof(dummy), thread_handles + 1, 1, -1)));
/* Close thread handles. */
CATCH_REQUIRE(R_SUCCEEDED(svc::CloseHandle(thread_handles[0])));
CATCH_REQUIRE(R_SUCCEEDED(svc::CloseHandle(thread_handles[1])));
}
}
}
}

View File

@ -0,0 +1,133 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stratosphere.hpp>
#include "util_common.hpp"
#include "util_check_memory.hpp"
namespace ams::test {
namespace {
size_t GetPhysicalMemorySizeMax() {
u64 v;
R_ABORT_UNLESS(svc::GetInfo(std::addressof(v), svc::InfoType_ResourceLimit, svc::InvalidHandle, 0));
const svc::Handle resource_limit = v;
ON_SCOPE_EXIT { svc::CloseHandle(resource_limit); };
s64 size;
R_ABORT_UNLESS(svc::GetResourceLimitLimitValue(std::addressof(size), resource_limit, svc::LimitableResource_PhysicalMemoryMax));
return static_cast<size_t>(size);
}
size_t GetPhysicalMemorySizeAvailable() {
u64 v;
R_ABORT_UNLESS(svc::GetInfo(std::addressof(v), svc::InfoType_ResourceLimit, svc::InvalidHandle, 0));
const svc::Handle resource_limit = v;
ON_SCOPE_EXIT { svc::CloseHandle(resource_limit); };
s64 total;
R_ABORT_UNLESS(svc::GetResourceLimitLimitValue(std::addressof(total), resource_limit, svc::LimitableResource_PhysicalMemoryMax));
s64 current;
R_ABORT_UNLESS(svc::GetResourceLimitCurrentValue(std::addressof(current), resource_limit, svc::LimitableResource_PhysicalMemoryMax));
return static_cast<size_t>(total - current);
}
}
CATCH_TEST_CASE("svc::SetHeapSize") {
svc::MemoryInfo mem_info;
svc::PageInfo page_info;
uintptr_t dummy;
/* Reset the heap. */
uintptr_t addr;
CATCH_REQUIRE(R_SUCCEEDED(svc::SetHeapSize(std::addressof(addr), 0)));
/* Ensure that we don't leak memory. */
const size_t initial_memory = GetPhysicalMemorySizeAvailable();
ON_SCOPE_EXIT { CATCH_REQUIRE(initial_memory == GetPhysicalMemorySizeAvailable()); };
CATCH_SECTION("Unaligned and too big sizes fail") {
for (size_t i = 1; i < svc::HeapSizeAlignment; i = util::AlignUp(i + 1, os::MemoryPageSize)){
CATCH_REQUIRE(svc::ResultInvalidSize::Includes(svc::SetHeapSize(std::addressof(dummy), i)));
}
CATCH_REQUIRE(svc::ResultInvalidSize::Includes(svc::SetHeapSize(std::addressof(dummy), 64_GB)));
}
CATCH_SECTION("Larger size than address space fails") {
CATCH_REQUIRE(svc::ResultOutOfMemory::Includes(svc::SetHeapSize(std::addressof(dummy), util::AlignUp(svc::AddressMemoryRegionHeap39Size + 1, svc::HeapSizeAlignment))));
}
CATCH_SECTION("Bounded by resource limit") {
CATCH_REQUIRE(svc::ResultLimitReached::Includes(svc::SetHeapSize(std::addressof(dummy), util::AlignUp(GetPhysicalMemorySizeMax() + 1, svc::HeapSizeAlignment))));
CATCH_REQUIRE(svc::ResultLimitReached::Includes(svc::SetHeapSize(std::addressof(dummy), util::AlignUp(GetPhysicalMemorySizeAvailable() + 1, svc::HeapSizeAlignment))));
}
CATCH_SECTION("SetHeapSize gives heap memory") {
CATCH_REQUIRE(R_SUCCEEDED(svc::SetHeapSize(std::addressof(addr), svc::HeapSizeAlignment)));
TestMemory(addr, svc::HeapSizeAlignment, svc::MemoryState_Normal, svc::MemoryPermission_ReadWrite, 0);
CATCH_REQUIRE(R_SUCCEEDED(svc::SetHeapSize(std::addressof(addr), 0)));
}
CATCH_SECTION("SetHeapSize cannot remove read-only heap") {
CATCH_REQUIRE(R_SUCCEEDED(svc::SetHeapSize(std::addressof(addr), svc::HeapSizeAlignment)));
CATCH_REQUIRE(R_SUCCEEDED(svc::QueryMemory(std::addressof(mem_info), std::addressof(page_info), addr)));
TestMemory(addr, svc::HeapSizeAlignment, svc::MemoryState_Normal, svc::MemoryPermission_ReadWrite, 0);
CATCH_REQUIRE(R_SUCCEEDED(svc::SetMemoryPermission(addr, svc::HeapSizeAlignment, svc::MemoryPermission_Read)));
TestMemory(addr, svc::HeapSizeAlignment, svc::MemoryState_Normal, svc::MemoryPermission_Read, 0);
CATCH_REQUIRE(svc::ResultInvalidCurrentMemory::Includes(svc::SetHeapSize(std::addressof(dummy), 0)));
CATCH_REQUIRE(R_SUCCEEDED(svc::SetMemoryPermission(addr, svc::HeapSizeAlignment, svc::MemoryPermission_ReadWrite)));
TestMemory(addr, svc::HeapSizeAlignment, svc::MemoryState_Normal, svc::MemoryPermission_ReadWrite, 0);
CATCH_REQUIRE(R_SUCCEEDED(svc::SetHeapSize(std::addressof(addr), 0)));
}
CATCH_SECTION("Heap memory does not survive unmap/re-map") {
CATCH_REQUIRE(R_SUCCEEDED(svc::SetHeapSize(std::addressof(addr), 2 * svc::HeapSizeAlignment)));
u8 * const heap = reinterpret_cast<u8 *>(addr);
std::memset(heap, 0xAA, svc::HeapSizeAlignment);
std::memset(heap + svc::HeapSizeAlignment, 0xBB, svc::HeapSizeAlignment);
CATCH_REQUIRE(heap[svc::HeapSizeAlignment] == 0xBB);
CATCH_REQUIRE(std::memcmp(heap + svc::HeapSizeAlignment, heap + svc::HeapSizeAlignment + 1, svc::HeapSizeAlignment - 1) == 0);
CATCH_REQUIRE(R_SUCCEEDED(svc::SetHeapSize(std::addressof(addr), svc::HeapSizeAlignment)));
CATCH_REQUIRE(heap[0] == 0xAA);
CATCH_REQUIRE(std::memcmp(heap, heap + 1, svc::HeapSizeAlignment - 1) == 0);
CATCH_REQUIRE(R_SUCCEEDED(svc::SetHeapSize(std::addressof(addr), 2 * svc::HeapSizeAlignment)));
CATCH_REQUIRE(heap[svc::HeapSizeAlignment] == 0x00);
CATCH_REQUIRE(std::memcmp(heap + svc::HeapSizeAlignment, heap + svc::HeapSizeAlignment + 1, svc::HeapSizeAlignment - 1) == 0);
CATCH_REQUIRE(R_SUCCEEDED(svc::SetHeapSize(std::addressof(addr), 0)));
}
}
}

View File

@ -0,0 +1,156 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stratosphere.hpp>
#include "util_common.hpp"
#include "util_check_memory.hpp"
#include "util_scoped_heap.hpp"
namespace ams::test {
namespace {
bool CanSetMemoryPermission(u8 state) {
return state == svc::MemoryState_CodeData || state == svc::MemoryState_AliasCodeData || state == svc::MemoryState_Normal;
}
}
alignas(os::MemoryPageSize) constinit u8 g_memory_permission_buffer[2 * os::MemoryPageSize];
CATCH_TEST_CASE("svc::SetMemoryPermission invalid arguments") {
const uintptr_t buffer = reinterpret_cast<uintptr_t>(g_memory_permission_buffer);
for (size_t i = 1; i < os::MemoryPageSize; ++i) {
CATCH_REQUIRE(svc::ResultInvalidAddress::Includes(svc::SetMemoryPermission(buffer + i, os::MemoryPageSize, svc::MemoryPermission_Read)));
CATCH_REQUIRE(svc::ResultInvalidSize::Includes(svc::SetMemoryPermission(buffer, os::MemoryPageSize + i, svc::MemoryPermission_Read)));
}
CATCH_REQUIRE(svc::ResultInvalidSize::Includes(svc::SetMemoryPermission(buffer, 0, svc::MemoryPermission_Read)));
{
const u64 vmem_end = util::AlignDown(std::numeric_limits<u64>::max(), os::MemoryPageSize);
CATCH_REQUIRE(svc::ResultInvalidCurrentMemory::Includes(svc::SetMemoryPermission(vmem_end, 2 * os::MemoryPageSize, svc::MemoryPermission_Read)));
}
CATCH_REQUIRE(svc::ResultInvalidCurrentMemory::Includes(svc::SetMemoryPermission(svc::AddressMap39End, os::MemoryPageSize, svc::MemoryPermission_Read)));
for (size_t i = 0; i < 0x100; ++i) {
const auto perm = static_cast<svc::MemoryPermission>(i);
if (perm == svc::MemoryPermission_None || perm == svc::MemoryPermission_Read || perm == svc::MemoryPermission_ReadWrite) {
continue;
}
CATCH_REQUIRE(svc::ResultInvalidNewMemoryPermission::Includes(svc::SetMemoryPermission(buffer, os::MemoryPageSize, perm)));
}
CATCH_REQUIRE(svc::ResultInvalidNewMemoryPermission::Includes(svc::SetMemoryPermission(buffer, os::MemoryPageSize, svc::MemoryPermission_ReadExecute)));
CATCH_REQUIRE(svc::ResultInvalidNewMemoryPermission::Includes(svc::SetMemoryPermission(buffer, os::MemoryPageSize, svc::MemoryPermission_Write)));
CATCH_REQUIRE(svc::ResultInvalidNewMemoryPermission::Includes(svc::SetMemoryPermission(buffer, os::MemoryPageSize, svc::MemoryPermission_DontCare)));
}
CATCH_TEST_CASE("svc::SetMemoryPermission works on specific states") {
/* Check that we have CodeData. */
const uintptr_t bss_buffer = reinterpret_cast<uintptr_t>(g_memory_permission_buffer);
TestMemory(bss_buffer, sizeof(g_memory_permission_buffer), svc::MemoryState_CodeData, svc::MemoryPermission_ReadWrite, 0);
/* Create a heap. */
ScopedHeap scoped_heap(2 * svc::HeapSizeAlignment);
TestMemory(scoped_heap.GetAddress(), scoped_heap.GetSize(), svc::MemoryState_Normal, svc::MemoryPermission_ReadWrite, 0);
/* TODO: Ensure we have alias code data? */
uintptr_t addr = 0;
while (true) {
/* Get current mapping. */
svc::MemoryInfo mem_info;
svc::PageInfo page_info;
CATCH_REQUIRE(R_SUCCEEDED(svc::QueryMemory(std::addressof(mem_info), std::addressof(page_info), addr)));
/* Try to set permission. */
if (CanSetMemoryPermission(mem_info.state) && mem_info.attribute == 0) {
CATCH_REQUIRE(R_SUCCEEDED(svc::SetMemoryPermission(mem_info.base_address, mem_info.size, svc::MemoryPermission_ReadWrite)));
TestMemory(mem_info.base_address, mem_info.size, mem_info.state, svc::MemoryPermission_ReadWrite, mem_info.attribute);
CATCH_REQUIRE(R_SUCCEEDED(svc::SetMemoryPermission(mem_info.base_address, mem_info.size, mem_info.permission)));
} else {
CATCH_REQUIRE(svc::ResultInvalidCurrentMemory::Includes(svc::SetMemoryPermission(mem_info.base_address, mem_info.size, svc::MemoryPermission_Read)));
}
const uintptr_t next_address = mem_info.base_address + mem_info.size;
if (next_address <= addr) {
break;
}
addr = next_address;
}
}
CATCH_TEST_CASE("svc::SetMemoryPermission allows for free movement between RW-, R--, ---") {
/* Define helper. */
auto test_set_memory_permission = [](uintptr_t address, size_t size){
/* Get the permission. */
svc::MemoryInfo mem_info;
svc::PageInfo page_info;
CATCH_REQUIRE(R_SUCCEEDED(svc::QueryMemory(std::addressof(mem_info), std::addressof(page_info), address)));
const svc::MemoryPermission legal_states[] = { svc::MemoryPermission_None, svc::MemoryPermission_Read, svc::MemoryPermission_ReadWrite };
for (const auto src_state : legal_states) {
for (const auto dst_state : legal_states) {
CATCH_REQUIRE(R_SUCCEEDED(svc::SetMemoryPermission(address, size, svc::MemoryPermission_None)));
CATCH_REQUIRE(R_SUCCEEDED(svc::SetMemoryPermission(address, size, src_state)));
CATCH_REQUIRE(R_SUCCEEDED(svc::SetMemoryPermission(address, size, dst_state)));
CATCH_REQUIRE(R_SUCCEEDED(svc::SetMemoryPermission(address, size, svc::MemoryPermission_None)));
}
}
CATCH_REQUIRE(R_SUCCEEDED(svc::SetMemoryPermission(address, size, mem_info.permission)));
};
/* Test that we can freely move about .bss buffers. */
test_set_memory_permission(reinterpret_cast<uintptr_t>(g_memory_permission_buffer), sizeof(g_memory_permission_buffer));
/* Create a heap. */
ScopedHeap scoped_heap(svc::HeapSizeAlignment);
TestMemory(scoped_heap.GetAddress(), scoped_heap.GetSize(), svc::MemoryState_Normal, svc::MemoryPermission_ReadWrite, 0);
/* Test that we can freely move about heap. */
test_set_memory_permission(scoped_heap.GetAddress(), scoped_heap.GetSize());
/* TODO: AliasCodeData */
}
CATCH_TEST_CASE("svc::SetMemoryPermission fails when the memory has non-zero attribute") {
const uintptr_t bss_buffer = reinterpret_cast<uintptr_t>(g_memory_permission_buffer);
TestMemory(bss_buffer, sizeof(g_memory_permission_buffer), svc::MemoryState_CodeData, svc::MemoryPermission_ReadWrite, 0);
CATCH_REQUIRE(R_SUCCEEDED(svc::SetMemoryPermission(bss_buffer, sizeof(g_memory_permission_buffer), svc::MemoryPermission_None)));
CATCH_REQUIRE(R_SUCCEEDED(svc::SetMemoryPermission(bss_buffer, sizeof(g_memory_permission_buffer), svc::MemoryPermission_Read)));
CATCH_REQUIRE(R_SUCCEEDED(svc::SetMemoryPermission(bss_buffer, sizeof(g_memory_permission_buffer), svc::MemoryPermission_ReadWrite)));
CATCH_REQUIRE(R_SUCCEEDED(svc::SetMemoryAttribute(bss_buffer, sizeof(g_memory_permission_buffer), svc::MemoryAttribute_Uncached, svc::MemoryAttribute_Uncached)));
TestMemory(bss_buffer, sizeof(g_memory_permission_buffer), svc::MemoryState_CodeData, svc::MemoryPermission_ReadWrite, svc::MemoryAttribute_Uncached);
CATCH_REQUIRE(svc::ResultInvalidCurrentMemory::Includes(svc::SetMemoryPermission(bss_buffer, sizeof(g_memory_permission_buffer), svc::MemoryPermission_None)));
CATCH_REQUIRE(svc::ResultInvalidCurrentMemory::Includes(svc::SetMemoryPermission(bss_buffer, sizeof(g_memory_permission_buffer), svc::MemoryPermission_Read)));
CATCH_REQUIRE(svc::ResultInvalidCurrentMemory::Includes(svc::SetMemoryPermission(bss_buffer, sizeof(g_memory_permission_buffer), svc::MemoryPermission_ReadWrite)));
CATCH_REQUIRE(R_SUCCEEDED(svc::SetMemoryAttribute(bss_buffer, sizeof(g_memory_permission_buffer), svc::MemoryAttribute_Uncached, 0)));
TestMemory(bss_buffer, sizeof(g_memory_permission_buffer), svc::MemoryState_CodeData, svc::MemoryPermission_ReadWrite, 0);
CATCH_REQUIRE(R_SUCCEEDED(svc::SetMemoryPermission(bss_buffer, sizeof(g_memory_permission_buffer), svc::MemoryPermission_None)));
CATCH_REQUIRE(R_SUCCEEDED(svc::SetMemoryPermission(bss_buffer, sizeof(g_memory_permission_buffer), svc::MemoryPermission_Read)));
CATCH_REQUIRE(R_SUCCEEDED(svc::SetMemoryPermission(bss_buffer, sizeof(g_memory_permission_buffer), svc::MemoryPermission_ReadWrite)));
}
}

View File

@ -0,0 +1,253 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stratosphere.hpp>
#include "util_common.hpp"
#include "util_scoped_heap.hpp"
namespace ams::test {
namespace {
constinit svc::Handle g_read_handles[3] = { svc::InvalidHandle, svc::InvalidHandle, svc::InvalidHandle };
constinit svc::Handle g_write_handles[3] = { svc::InvalidHandle, svc::InvalidHandle, svc::InvalidHandle };
constinit s64 g_thread_wait_ns;
constinit bool g_should_switch_threads;
constinit bool g_switched_threads;
constinit bool g_correct_switch_threads;
void WaitSynchronization(svc::Handle handle) {
s32 dummy;
R_ABORT_UNLESS(svc::WaitSynchronization(std::addressof(dummy), std::addressof(handle), 1, -1));
}
void TestYieldHigherOrSamePriorityThread() {
/* Wait to run. */
WaitSynchronization(g_read_handles[0]);
/* Reset our event. */
R_ABORT_UNLESS(svc::ClearEvent(g_read_handles[0]));
/* Signal the other thread's event. */
R_ABORT_UNLESS(svc::SignalEvent(g_write_handles[1]));
/* Wait, potentially yielding to the lower/same priority thread. */
g_switched_threads = false;
svc::SleepThread(g_thread_wait_ns);
/* Check whether we switched correctly. */
g_correct_switch_threads = g_should_switch_threads == g_switched_threads;
/* Exit. */
svc::ExitThread();
}
void TestYieldLowerOrSamePriorityThread() {
/* Signal thread the higher/same priority thread to run. */
R_ABORT_UNLESS(svc::SignalEvent(g_write_handles[0]));
/* Wait to run. */
WaitSynchronization(g_read_handles[1]);
/* Reset our event. */
R_ABORT_UNLESS(svc::ClearEvent(g_read_handles[1]));
/* We've switched to the lower/same priority thread. */
g_switched_threads = true;
/* Wait to be instructed to exit. */
WaitSynchronization(g_read_handles[2]);
/* Reset the exit signal. */
R_ABORT_UNLESS(svc::ClearEvent(g_read_handles[2]));
/* Exit. */
svc::ExitThread();
}
void TestYieldSamePriority(uintptr_t sp_higher, uintptr_t sp_lower) {
/* Test each core. */
for (s32 core = 0; core < NumCores; ++core) {
for (s32 priority = HighestTestPriority; priority <= LowestTestPriority && !IsPreemptionPriority(core, priority); ++priority) {
svc::Handle thread_handles[2];
/* Create threads. */
CATCH_REQUIRE(R_SUCCEEDED(svc::CreateThread(thread_handles + 0, reinterpret_cast<uintptr_t>(&TestYieldHigherOrSamePriorityThread), 0, sp_higher, priority, core)));
CATCH_REQUIRE(R_SUCCEEDED(svc::CreateThread(thread_handles + 1, reinterpret_cast<uintptr_t>(&TestYieldLowerOrSamePriorityThread), 0, sp_lower, priority, core)));
/* Start threads. */
CATCH_REQUIRE(R_SUCCEEDED(svc::StartThread(thread_handles[1])));
CATCH_REQUIRE(R_SUCCEEDED(svc::StartThread(thread_handles[0])));
/* Wait for higher priority thread. */
WaitSynchronization(thread_handles[0]);
CATCH_REQUIRE(R_SUCCEEDED(svc::CloseHandle(thread_handles[0])));
/* Signal the lower priority thread to exit. */
CATCH_REQUIRE(R_SUCCEEDED(svc::SignalEvent(g_write_handles[2])));
/* Wait for the lower priority thread. */
WaitSynchronization(thread_handles[1]);
CATCH_REQUIRE(R_SUCCEEDED(svc::CloseHandle(thread_handles[1])));
/* Check that the switch was correct. */
CATCH_REQUIRE(g_correct_switch_threads);
}
}
}
void TestYieldDifferentPriority(uintptr_t sp_higher, uintptr_t sp_lower) {
/* Test each core. */
for (s32 core = 0; core < NumCores; ++core) {
for (s32 priority = HighestTestPriority; priority < LowestTestPriority && !IsPreemptionPriority(core, priority); ++priority) {
svc::Handle thread_handles[2];
/* Create threads. */
CATCH_REQUIRE(R_SUCCEEDED(svc::CreateThread(thread_handles + 0, reinterpret_cast<uintptr_t>(&TestYieldHigherOrSamePriorityThread), 0, sp_higher, priority, core)));
CATCH_REQUIRE(R_SUCCEEDED(svc::CreateThread(thread_handles + 1, reinterpret_cast<uintptr_t>(&TestYieldLowerOrSamePriorityThread), 0, sp_lower, priority + 1, core)));
/* Start threads. */
CATCH_REQUIRE(R_SUCCEEDED(svc::StartThread(thread_handles[1])));
CATCH_REQUIRE(R_SUCCEEDED(svc::StartThread(thread_handles[0])));
/* Wait for higher priority thread. */
WaitSynchronization(thread_handles[0]);
CATCH_REQUIRE(R_SUCCEEDED(svc::CloseHandle(thread_handles[0])));
/* Signal the lower priority thread to exit. */
CATCH_REQUIRE(R_SUCCEEDED(svc::SignalEvent(g_write_handles[2])));
/* Wait for the lower priority thread. */
WaitSynchronization(thread_handles[1]);
CATCH_REQUIRE(R_SUCCEEDED(svc::CloseHandle(thread_handles[1])));
/* Check that the switch was correct. */
CATCH_REQUIRE(g_correct_switch_threads);
}
}
}
}
CATCH_TEST_CASE( "svc::SleepThread: Thread sleeps for time specified" ) {
for (s64 ns = 1; ns < TimeSpan::FromSeconds(1).GetNanoSeconds(); ns *= 2) {
const auto start = os::GetSystemTickOrdered();
svc::SleepThread(ns);
const auto end = os::GetSystemTickOrdered();
const s64 taken_ns = (end - start).ToTimeSpan().GetNanoSeconds();
CATCH_REQUIRE( taken_ns >= ns );
}
}
CATCH_TEST_CASE( "svc::SleepThread: Yield is behaviorally correct" ) {
/* Create events. */
for (size_t i = 0; i < util::size(g_write_handles); ++i) {
g_read_handles[i] = svc::InvalidHandle;
g_write_handles[i] = svc::InvalidHandle;
CATCH_REQUIRE(R_SUCCEEDED(svc::CreateEvent(g_write_handles + i, g_read_handles + i)));
}
ON_SCOPE_EXIT {
for (size_t i = 0; i < util::size(g_write_handles); ++i) {
CATCH_REQUIRE(R_SUCCEEDED(svc::CloseHandle(g_read_handles[i])));
CATCH_REQUIRE(R_SUCCEEDED(svc::CloseHandle(g_write_handles[i])));
g_read_handles[i] = svc::InvalidHandle;
g_write_handles[i] = svc::InvalidHandle;
}
};
/* Create heap. */
ScopedHeap heap(3 * os::MemoryPageSize);
CATCH_REQUIRE(R_SUCCEEDED(svc::SetMemoryPermission(heap.GetAddress() + os::MemoryPageSize, os::MemoryPageSize, svc::MemoryPermission_None)));
ON_SCOPE_EXIT {
CATCH_REQUIRE(R_SUCCEEDED(svc::SetMemoryPermission(heap.GetAddress() + os::MemoryPageSize, os::MemoryPageSize, svc::MemoryPermission_ReadWrite)));
};
const uintptr_t sp_higher = heap.GetAddress() + 1 * os::MemoryPageSize;
const uintptr_t sp_lower = heap.GetAddress() + 3 * os::MemoryPageSize;
CATCH_SECTION("svc::SleepThread: Yields do not switch to a thread of lower priority.") {
/* Test yield without migration. */
{
/* Configure for yield test. */
g_should_switch_threads = false;
g_thread_wait_ns = static_cast<s64>(svc::YieldType_WithoutCoreMigration);
TestYieldDifferentPriority(sp_higher, sp_lower);
}
/* Test yield with migration. */
{
/* Configure for yield test. */
g_should_switch_threads = false;
g_thread_wait_ns = static_cast<s64>(svc::YieldType_WithoutCoreMigration);
TestYieldDifferentPriority(sp_higher, sp_lower);
}
}
CATCH_SECTION("svc::SleepThread: ToAnyThread switches to a thread of same or lower priority.") {
/* Test to same priority. */
{
/* Configure for yield test. */
g_should_switch_threads = true;
g_thread_wait_ns = static_cast<s64>(svc::YieldType_ToAnyThread);
TestYieldSamePriority(sp_higher, sp_lower);
}
/* Test to lower priority. */
{
/* Configure for yield test. */
g_should_switch_threads = true;
g_thread_wait_ns = static_cast<s64>(svc::YieldType_ToAnyThread);
TestYieldDifferentPriority(sp_higher, sp_lower);
}
}
CATCH_SECTION("svc::SleepThread: Yield switches to another thread of same priority.") {
/* Test yield without migration. */
{
/* Configure for yield test. */
g_should_switch_threads = true;
g_thread_wait_ns = static_cast<s64>(svc::YieldType_WithoutCoreMigration);
TestYieldSamePriority(sp_higher, sp_lower);
}
/* Test yield with migration. */
{
/* Configure for yield test. */
g_should_switch_threads = true;
g_thread_wait_ns = static_cast<s64>(svc::YieldType_WithCoreMigration);
TestYieldSamePriority(sp_higher, sp_lower);
}
}
CATCH_SECTION("svc::SleepThread: Yield with bogus timeout does not switch to another thread same priority") {
/* Configure for yield test. */
g_should_switch_threads = false;
g_thread_wait_ns = INT64_C(-5);
TestYieldSamePriority(sp_higher, sp_lower);
}
}
}

View File

@ -0,0 +1,23 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <stratosphere.hpp>
#define CATCH_CONFIG_NOSTDOUT
#define CATCH_CONFIG_PREFIX_ALL
#define CATCH_CONFIG_DISABLE_EXCEPTIONS
#define CATCH_CONFIG_NO_POSIX_SIGNALS
#include "catch.hpp"

View File

@ -0,0 +1,46 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "util_catch.hpp"
namespace ams::test {
inline void TestMemory(uintptr_t address, svc::MemoryState state, svc::MemoryPermission perm, u32 attr) {
svc::MemoryInfo mem_info;
svc::PageInfo page_info;
CATCH_REQUIRE(R_SUCCEEDED(svc::QueryMemory(std::addressof(mem_info), std::addressof(page_info), address)));
CATCH_REQUIRE(mem_info.base_address <= address);
CATCH_REQUIRE(address < (mem_info.base_address + mem_info.size));
CATCH_REQUIRE(mem_info.state == state);
CATCH_REQUIRE(mem_info.permission == perm);
CATCH_REQUIRE(mem_info.attribute == attr);
}
inline void TestMemory(uintptr_t address, size_t size, svc::MemoryState state, svc::MemoryPermission perm, u32 attr) {
svc::MemoryInfo mem_info;
svc::PageInfo page_info;
CATCH_REQUIRE(R_SUCCEEDED(svc::QueryMemory(std::addressof(mem_info), std::addressof(page_info), address)));
CATCH_REQUIRE(mem_info.base_address <= address);
CATCH_REQUIRE(mem_info.base_address < (address + size));
CATCH_REQUIRE((address + size) <= (mem_info.base_address + mem_info.size));
CATCH_REQUIRE(mem_info.state == state);
CATCH_REQUIRE(mem_info.permission == perm);
CATCH_REQUIRE(mem_info.attribute == attr);
}
}

View File

@ -0,0 +1,35 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "util_catch.hpp"
namespace ams::test {
static constexpr s32 NumCores = 4;
static constexpr s32 DpcManagerNormalThreadPriority = 59;
static constexpr s32 DpcManagerPreemptionThreadPriority = 63;
static constexpr s32 HighestTestPriority = 32;
static constexpr s32 LowestTestPriority = svc::LowestThreadPriority;
static_assert(HighestTestPriority < LowestTestPriority);
static constexpr TimeSpan PreemptionTimeSpan = TimeSpan::FromMilliSeconds(10);
constexpr inline bool IsPreemptionPriority(s32 core, s32 priority) {
return priority == ((core == (NumCores - 1)) ? DpcManagerPreemptionThreadPriority : DpcManagerNormalThreadPriority);
}
}

View File

@ -0,0 +1,48 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "util_catch.hpp"
namespace ams::test {
class ScopedHeap {
NON_COPYABLE(ScopedHeap);
NON_MOVEABLE(ScopedHeap);
private:
uintptr_t m_address;
size_t m_size;
public:
explicit ScopedHeap(size_t size) {
this->SetHeapSize(size);
}
~ScopedHeap() {
const auto result = svc::SetHeapSize(std::addressof(m_address), 0);
CATCH_REQUIRE(R_SUCCEEDED(result));
}
void SetHeapSize(size_t size) {
m_size = util::AlignUp(size, svc::HeapSizeAlignment);
const auto result = svc::SetHeapSize(std::addressof(m_address), m_size);
CATCH_REQUIRE(R_SUCCEEDED(result));
}
uintptr_t GetAddress() const { return m_address; }
size_t GetSize() const { return m_size; }
};
}