From 6ee305464a430ebb1dd5604301285ce4f0549753 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Fri, 17 Jan 2020 22:02:45 -0800 Subject: [PATCH 01/97] mesosphere: Implement SVC table auto-generation --- .../libmesosphere/include/mesosphere.hpp | 3 + .../include/mesosphere/kern_svc.hpp | 19 + .../svc/kern_svc_k_user_pointer.hpp | 32 ++ .../mesosphere/svc/kern_svc_prototypes.hpp | 36 ++ .../mesosphere/svc/kern_svc_tables.hpp | 29 + .../source/svc/kern_svc_tables.cpp | 67 +++ .../svc_codegen_impl_code_generator.hpp | 276 +++++++++ .../svc/codegen/svc_codegen_impl_common.hpp | 193 +++++++ .../svc_codegen_impl_kernel_svc_wrapper.hpp | 540 ++++++++++++++++++ .../svc/codegen/svc_codegen_impl_layout.hpp | 354 ++++++++++++ .../svc_codegen_impl_layout_conversion.hpp | 491 ++++++++++++++++ .../codegen/svc_codegen_impl_meta_code.hpp | 236 ++++++++ .../codegen/svc_codegen_impl_parameter.hpp | 192 +++++++ .../svc_codegen_kernel_svc_wrapper.hpp | 51 ++ .../include/vapours/svc/svc_codegen.hpp | 23 + .../include/vapours/svc/svc_definitions.hpp | 52 +- .../include/vapours/svc/svc_types_common.hpp | 24 + 17 files changed, 2614 insertions(+), 4 deletions(-) create mode 100644 libraries/libmesosphere/include/mesosphere/kern_svc.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/svc/kern_svc_prototypes.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/svc/kern_svc_tables.hpp create mode 100644 libraries/libmesosphere/source/svc/kern_svc_tables.cpp create mode 100644 libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_code_generator.hpp create mode 100644 libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_common.hpp create mode 100644 libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_kernel_svc_wrapper.hpp create mode 100644 libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout.hpp create mode 100644 libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout_conversion.hpp create mode 100644 libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_meta_code.hpp create mode 100644 libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_parameter.hpp create mode 100644 libraries/libvapours/include/vapours/svc/codegen/svc_codegen_kernel_svc_wrapper.hpp create mode 100644 libraries/libvapours/include/vapours/svc/svc_codegen.hpp diff --git a/libraries/libmesosphere/include/mesosphere.hpp b/libraries/libmesosphere/include/mesosphere.hpp index f155cebc1..135b6b5a6 100644 --- a/libraries/libmesosphere/include/mesosphere.hpp +++ b/libraries/libmesosphere/include/mesosphere.hpp @@ -36,3 +36,6 @@ /* Core functionality. */ #include "mesosphere/kern_select_interrupts.hpp" #include "mesosphere/kern_select_k_system_control.hpp" + +/* Supervisor Calls. */ +#include "mesosphere/kern_svc.hpp" diff --git a/libraries/libmesosphere/include/mesosphere/kern_svc.hpp b/libraries/libmesosphere/include/mesosphere/kern_svc.hpp new file mode 100644 index 000000000..6eaa75f9a --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_svc.hpp @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "svc/kern_svc_k_user_pointer.hpp" +#include "svc/kern_svc_prototypes.hpp" +#include "svc/kern_svc_tables.hpp" diff --git a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp new file mode 100644 index 000000000..368ff39dd --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern::svc { + + /* TODO: Actually implement this type. */ + template + struct KUserPointer : impl::KUserPointerTag { + public: + static_assert(std::is_pointer::value); + static constexpr bool IsInput = std::is_const::type>::value; + private: + T pointer; + }; + + +} diff --git a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_prototypes.hpp b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_prototypes.hpp new file mode 100644 index 000000000..3412373ba --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_prototypes.hpp @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include "kern_svc_k_user_pointer.hpp" + +namespace ams::kern::svc { + + #define AMS_KERN_SVC_DECLARE_PROTOTYPE_64(ID, RETURN_TYPE, NAME, ...) \ + RETURN_TYPE NAME##64(__VA_ARGS__); + #define AMS_KERN_SVC_DECLARE_PROTOTYPE_64_FROM_32(ID, RETURN_TYPE, NAME, ...) \ + RETURN_TYPE NAME##64From32(__VA_ARGS__); + + AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_DECLARE_PROTOTYPE_64, lp64) + AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_DECLARE_PROTOTYPE_64_FROM_32, ilp32) + + /* TODO: Support _32 ABI */ + + #undef AMS_KERN_SVC_DECLARE_PROTOTYPE_64 + #undef AMS_KERN_SVC_DECLARE_PROTOTYPE_64_FROM_32 + + +} diff --git a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_tables.hpp b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_tables.hpp new file mode 100644 index 000000000..42b4a62bd --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_tables.hpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern::svc { + + static constexpr size_t NumSupervisorCalls = 0x80; + using SvcTableEntry = void (*)(); + + /* TODO: 32-bit ABI */ + + extern const std::array SvcTable64From32; + extern const std::array SvcTable64; + +} diff --git a/libraries/libmesosphere/source/svc/kern_svc_tables.cpp b/libraries/libmesosphere/source/svc/kern_svc_tables.cpp new file mode 100644 index 000000000..a2fd6a28d --- /dev/null +++ b/libraries/libmesosphere/source/svc/kern_svc_tables.cpp @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include + +namespace ams::kern::svc { + + namespace { + + #define DECLARE_SVC_STRUCT(ID, RETURN_TYPE, NAME, ...) \ + class NAME { \ + private: \ + using Impl = ::ams::svc::codegen::KernelSvcWrapper<::ams::kern::svc::NAME##64, ::ams::kern::svc::NAME##64From32>; \ + public: \ + static NOINLINE void Call64() { return Impl::Call64(); } \ + static NOINLINE void Call64From32() { return Impl::Call64From32(); } \ + }; + + + + /* Set omit-frame-pointer to prevent GCC from emitting MOV X29, SP instructions. */ + #pragma GCC push_options + #pragma GCC optimize ("omit-frame-pointer") + + AMS_SVC_FOREACH_KERN_DEFINITION(DECLARE_SVC_STRUCT, _) + + #pragma GCC pop_options + + } + + /* TODO: 32-bit ABI */ + const std::array SvcTable64From32 = [] { + std::array table = {}; + + #define AMS_KERN_SVC_SET_TABLE_ENTRY(ID, RETURN_TYPE, NAME, ...) \ + table[ID] = NAME::Call64From32; + AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_SET_TABLE_ENTRY, _) + #undef AMS_KERN_SVC_SET_TABLE_ENTRY + + return table; + }(); + + const std::array SvcTable64 = [] { + std::array table = {}; + + #define AMS_KERN_SVC_SET_TABLE_ENTRY(ID, RETURN_TYPE, NAME, ...) \ + table[ID] = NAME::Call64; + AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_SET_TABLE_ENTRY, _) + #undef AMS_KERN_SVC_SET_TABLE_ENTRY + + return table; + }(); + +} diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_code_generator.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_code_generator.hpp new file mode 100644 index 000000000..6a13d93e0 --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_code_generator.hpp @@ -0,0 +1,276 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "svc_codegen_impl_common.hpp" + +namespace ams::svc::codegen::impl { + + #define SVC_CODEGEN_FOR_I_FROM_0_TO_64(HANDLER, ...) \ + HANDLER( 0, ## __VA_ARGS__); HANDLER( 1, ## __VA_ARGS__); HANDLER( 2, ## __VA_ARGS__); HANDLER( 3, ## __VA_ARGS__); \ + HANDLER( 4, ## __VA_ARGS__); HANDLER( 5, ## __VA_ARGS__); HANDLER( 6, ## __VA_ARGS__); HANDLER( 7, ## __VA_ARGS__); \ + HANDLER( 8, ## __VA_ARGS__); HANDLER( 9, ## __VA_ARGS__); HANDLER(10, ## __VA_ARGS__); HANDLER(11, ## __VA_ARGS__); \ + HANDLER(12, ## __VA_ARGS__); HANDLER(13, ## __VA_ARGS__); HANDLER(14, ## __VA_ARGS__); HANDLER(15, ## __VA_ARGS__); \ + HANDLER(16, ## __VA_ARGS__); HANDLER(17, ## __VA_ARGS__); HANDLER(18, ## __VA_ARGS__); HANDLER(19, ## __VA_ARGS__); \ + HANDLER(20, ## __VA_ARGS__); HANDLER(21, ## __VA_ARGS__); HANDLER(22, ## __VA_ARGS__); HANDLER(23, ## __VA_ARGS__); \ + HANDLER(24, ## __VA_ARGS__); HANDLER(25, ## __VA_ARGS__); HANDLER(26, ## __VA_ARGS__); HANDLER(27, ## __VA_ARGS__); \ + HANDLER(28, ## __VA_ARGS__); HANDLER(29, ## __VA_ARGS__); HANDLER(30, ## __VA_ARGS__); HANDLER(31, ## __VA_ARGS__); \ + HANDLER(32, ## __VA_ARGS__); HANDLER(33, ## __VA_ARGS__); HANDLER(34, ## __VA_ARGS__); HANDLER(35, ## __VA_ARGS__); \ + HANDLER(36, ## __VA_ARGS__); HANDLER(37, ## __VA_ARGS__); HANDLER(38, ## __VA_ARGS__); HANDLER(39, ## __VA_ARGS__); \ + HANDLER(40, ## __VA_ARGS__); HANDLER(41, ## __VA_ARGS__); HANDLER(42, ## __VA_ARGS__); HANDLER(43, ## __VA_ARGS__); \ + HANDLER(44, ## __VA_ARGS__); HANDLER(45, ## __VA_ARGS__); HANDLER(46, ## __VA_ARGS__); HANDLER(47, ## __VA_ARGS__); \ + HANDLER(48, ## __VA_ARGS__); HANDLER(49, ## __VA_ARGS__); HANDLER(50, ## __VA_ARGS__); HANDLER(51, ## __VA_ARGS__); \ + HANDLER(52, ## __VA_ARGS__); HANDLER(53, ## __VA_ARGS__); HANDLER(54, ## __VA_ARGS__); HANDLER(55, ## __VA_ARGS__); \ + HANDLER(56, ## __VA_ARGS__); HANDLER(57, ## __VA_ARGS__); HANDLER(58, ## __VA_ARGS__); HANDLER(59, ## __VA_ARGS__); \ + HANDLER(60, ## __VA_ARGS__); HANDLER(61, ## __VA_ARGS__); HANDLER(62, ## __VA_ARGS__); HANDLER(63, ## __VA_ARGS__); + + + class Aarch64CodeGenerator { + private: + struct RegisterPair { + size_t First; + size_t Second; + }; + + template + struct RegisterPairHelper; + + template + struct RegisterPairHelper { + static constexpr size_t PairCount = 1 + RegisterPairHelper::PairCount; + static constexpr std::array Pairs = [] { + std::array pairs = {}; + pairs[0] = RegisterPair{First, Second}; + if constexpr (RegisterPairHelper::PairCount) { + for (size_t i = 0; i < RegisterPairHelper::PairCount; i++) { + pairs[1+i] = RegisterPairHelper::Pairs[i]; + } + } + return pairs; + }(); + }; + + template + struct RegisterPairHelper { + static constexpr size_t PairCount = 1; + static constexpr std::array Pairs = { RegisterPair{First, Second} }; + }; + + template + struct RegisterPairHelper { + static constexpr size_t PairCount = 0; + static constexpr std::array Pairs = {}; + }; + + template + static ALWAYS_INLINE void ClearRegister() { + __asm__ __volatile__("mov x%c[r], xzr" :: [r]"i"(Reg) : "memory"); + } + + template + static ALWAYS_INLINE void SaveRegister() { + __asm__ __volatile__("str x%c[r], [sp, -16]!" :: [r]"i"(Reg) : "memory"); + } + + template + static ALWAYS_INLINE void RestoreRegister() { + __asm__ __volatile__("ldr x%c[r], [sp], 16" :: [r]"i"(Reg) : "memory"); + } + + template + static ALWAYS_INLINE void SaveRegisterPair() { + __asm__ __volatile__("stp x%c[r0], x%c[r1], [sp, -16]!" :: [r0]"i"(Reg0), [r1]"i"(Reg1) : "memory"); + } + + template + static ALWAYS_INLINE void RestoreRegisterPair() { + __asm__ __volatile__("ldp x%c[r0], x%c[r1], [sp], 16" :: [r0]"i"(Reg0), [r1]"i"(Reg1) : "memory"); + } + + template + static ALWAYS_INLINE void SaveRegistersImpl() { + #define SVC_CODEGEN_HANDLER(n) \ + do { if constexpr ((63 - n) < Pairs.size()) { SaveRegisterPair(); } } while (0) + + if constexpr (sizeof...(Rest) % 2 == 1) { + /* Even number of registers. */ + constexpr auto Pairs = RegisterPairHelper::Pairs; + static_assert(Pairs.size() <= 8); + SVC_CODEGEN_FOR_I_FROM_0_TO_64(SVC_CODEGEN_HANDLER) + } else if constexpr (sizeof...(Rest) > 0) { + /* Odd number of registers. */ + constexpr auto Pairs = RegisterPairHelper::Pairs; + static_assert(Pairs.size() <= 8); + SVC_CODEGEN_FOR_I_FROM_0_TO_64(SVC_CODEGEN_HANDLER) + + SaveRegister(); + } else { + /* Only one register. */ + SaveRegister(); + } + + #undef SVC_CODEGEN_HANDLER + } + + template + static ALWAYS_INLINE void RestoreRegistersImpl() { + #define SVC_CODEGEN_HANDLER(n) \ + do { if constexpr (n < Pairs.size()) { RestoreRegisterPair(); } } while (0) + + if constexpr (sizeof...(Rest) % 2 == 1) { + /* Even number of registers. */ + constexpr auto Pairs = RegisterPairHelper::Pairs; + static_assert(Pairs.size() <= 8); + SVC_CODEGEN_FOR_I_FROM_0_TO_64(SVC_CODEGEN_HANDLER) + } else if constexpr (sizeof...(Rest) > 0) { + /* Odd number of registers. */ + RestoreRegister(); + + constexpr auto Pairs = RegisterPairHelper::Pairs; + static_assert(Pairs.size() <= 8); + SVC_CODEGEN_FOR_I_FROM_0_TO_64(SVC_CODEGEN_HANDLER) + } else { + /* Only one register. */ + RestoreRegister(); + } + + #undef SVC_CODEGEN_HANDLER + } + + public: + template + static ALWAYS_INLINE void SaveRegisters() { + if constexpr (sizeof...(Registers) > 0) { + SaveRegistersImpl(); + } + } + + template + static ALWAYS_INLINE void RestoreRegisters() { + if constexpr (sizeof...(Registers) > 0) { + RestoreRegistersImpl(); + } + } + + template + static ALWAYS_INLINE void ClearRegisters() { + static_assert(sizeof...(Registers) <= 8); + (ClearRegister(), ...); + } + + template + static ALWAYS_INLINE void AllocateStackSpace() { + if constexpr (Size > 0) { + __asm__ __volatile__("sub sp, sp, %c[size]" :: [size]"i"(util::AlignUp(Size, 16)) : "memory"); + } + } + + template + static ALWAYS_INLINE void FreeStackSpace() { + if constexpr (Size > 0) { + __asm__ __volatile__("add sp, sp, %c[size]" :: [size]"i"(util::AlignUp(Size, 16)) : "memory"); + } + } + + template + static ALWAYS_INLINE void MoveRegister() { + __asm__ __volatile__("mov x%c[dst], x%c[src]" :: [dst]"i"(Dst), [src]"i"(Src) : "memory"); + } + + template + static ALWAYS_INLINE void LoadFromStack() { + if constexpr (Size == 4) { + __asm__ __volatile__("ldr w%c[r], [sp, %c[offset]]" :: [r]"i"(Reg), [offset]"i"(Offset) : "memory"); + } else if constexpr (Size == 8) { + __asm__ __volatile__("ldr x%c[r], [sp, %c[offset]]" :: [r]"i"(Reg), [offset]"i"(Offset) : "memory"); + } else { + static_assert(Size != Size); + } + } + + template + static ALWAYS_INLINE void LoadPairFromStack() { + if constexpr (Size == 4) { + __asm__ __volatile__("ldp w%c[r0], w%c[r1], [sp, %c[offset]]" :: [r0]"i"(Reg0), [r1]"i"(Reg1), [offset]"i"(Offset) : "memory"); + } else if constexpr (Size == 8) { + __asm__ __volatile__("ldp x%c[r0], x%c[r1], [sp, %c[offset]]" :: [r0]"i"(Reg0), [r1]"i"(Reg1), [offset]"i"(Offset) : "memory"); + } else { + static_assert(Size != Size); + } + } + + template + static ALWAYS_INLINE void StoreToStack() { + if constexpr (Size == 4) { + __asm__ __volatile__("str w%c[r], [sp, %c[offset]]" :: [r]"i"(Reg), [offset]"i"(Offset) : "memory"); + } else if constexpr (Size == 8) { + __asm__ __volatile__("str x%c[r], [sp, %c[offset]]" :: [r]"i"(Reg), [offset]"i"(Offset) : "memory"); + } else { + static_assert(Size != Size); + } + } + + template + static ALWAYS_INLINE void StorePairToStack() { + if constexpr (Size == 4) { + __asm__ __volatile__("stp w%c[r0], w%c[r1], [sp, %c[offset]]" :: [r0]"i"(Reg0), [r1]"i"(Reg1), [offset]"i"(Offset) : "memory"); + } else if constexpr (Size == 8) { + __asm__ __volatile__("stp x%c[r0], x%c[r1], [sp, %c[offset]]" :: [r0]"i"(Reg0), [r1]"i"(Reg1), [offset]"i"(Offset) : "memory"); + } else { + static_assert(Size != Size); + } + } + + template + static ALWAYS_INLINE void Pack() { + __asm__ __volatile__("orr x%c[dst], x%c[low], x%c[high], lsl #32" :: [dst]"i"(Dst), [low]"i"(Low), [high]"i"(High) : "memory"); + } + + template + static ALWAYS_INLINE void Unpack() { + if constexpr (Src != Low) { + MoveRegister(); + } + + __asm__ __volatile__("lsr x%c[high], x%c[src], #32" :: [high]"i"(High), [src]"i"(Src) : "memory"); + } + + template + static ALWAYS_INLINE void LoadStackAddress() { + if constexpr (Offset > 0) { + __asm__ __volatile__("add x%c[dst], sp, %c[offset]" :: [dst]"i"(Dst), [offset]"i"(Offset) : "memory"); + } else if constexpr (Offset == 0) { + __asm__ __volatile__("mov x%c[dst], sp" :: [dst]"i"(Dst) : "memory"); + } + } + }; + + class Aarch32CodeGenerator { + /* TODO */ + }; + + template + static ALWAYS_INLINE void GenerateCodeForMetaCode(MetaCodeHolder) { + constexpr auto MetaCode = UNWRAP_TEMPLATE_CONSTANT(MetaCodeHolder); + constexpr size_t NumOperations = MetaCode.GetNumOperations(); + static_assert(NumOperations <= 64); + #define SVC_CODEGEN_HANDLER(n) do { if constexpr (n < NumOperations) { constexpr auto Operation = MetaCode.GetOperation(n); GenerateCodeForOperation(WRAP_TEMPLATE_CONSTANT(Operation)); } } while (0) + SVC_CODEGEN_FOR_I_FROM_0_TO_64(SVC_CODEGEN_HANDLER) + #undef SVC_CODEGEN_HANDLER + } + + #undef SVC_CODEGEN_FOR_I_FROM_0_TO_64 + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_common.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_common.hpp new file mode 100644 index 000000000..c87b4e7c3 --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_common.hpp @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once + +namespace ams::svc::codegen::impl { + + template + constexpr inline bool IsIntegral = std::is_integral::value; + + template<> + constexpr inline bool IsIntegral<::ams::svc::Address> = true; + + template<> + constexpr inline bool IsIntegral<::ams::svc::Size> = true; + + template + constexpr inline bool IsKUserPointer = std::is_base_of::value; + + template + constexpr inline bool IsIntegralOrUserPointer = IsIntegral || IsUserPointer || IsKUserPointer; + + template + constexpr std::index_sequence IndexSequenceCat(std::index_sequence, std::index_sequence) { + return std::index_sequence{}; + } + + template + constexpr inline std::array ConvertToArray(std::index_sequence) { + return std::array{ Is... }; + } + + template + class FunctionTraits { + private: + template + static R GetReturnTypeImpl(R(*)(A...)); + + template + static std::tuple GetArgsImpl(R(*)(A...)); + public: + using ReturnType = decltype(GetReturnTypeImpl(Function)); + using ArgsType = decltype(GetArgsImpl(Function)); + }; + + enum class CodeGenerationKind { + SvcInvocationToKernelProcedure, + PrepareForKernelProcedureToSvcInvocation, + KernelProcedureToSvcInvocation, + Invalid, + }; + + enum class ArgumentType { + In, + Out, + InUserPointer, + OutUserPointer, + Invalid, + }; + + template + constexpr inline ArgumentType GetArgumentType = [] { + static_assert(!std::is_reference::value, "SVC ABI: Reference types not allowed."); + static_assert(sizeof(T) <= sizeof(uint64_t), "SVC ABI: Type too large"); + if constexpr (std::is_pointer::value) { + static_assert(!std::is_const::type>::value, "SVC ABI: Output (T*) must not be const"); + return ArgumentType::Out; + } else if constexpr (IsUserPointer || IsKUserPointer) { + if constexpr (T::IsInput) { + return ArgumentType::InUserPointer; + } else { + return ArgumentType::OutUserPointer; + } + } else { + return ArgumentType::In; + } + }(); + + template + struct AbiType { + static constexpr size_t RegisterSize = RS; + static constexpr size_t RegisterCount = RC; + static constexpr size_t ArgumentRegisterCount = ARC; + static constexpr size_t PointerSize = PC; + + template + static constexpr size_t GetSize() { + if constexpr (std::is_same::value || std::is_same::value || IsUserPointer || IsKUserPointer) { + return PointerSize; + } else if constexpr(std::is_pointer::value) { + /* Out parameter. */ + return GetSize::type>(); + } else if constexpr (std::is_same::value) { + return 0; + } else { + return sizeof(T); + } + } + + template + static constexpr inline size_t Size = GetSize(); + }; + + using Aarch64Lp64Abi = AbiType<8, 8, 8, 8>; + using Aarch64Ilp32Abi = AbiType<8, 8, 8, 4>; + using Aarch32Ilp32Abi = AbiType<4, 4, 4, 4>; + + using Aarch64SvcInvokeAbi = AbiType<8, 8, 8, 8>; + using Aarch32SvcInvokeAbi = AbiType<4, 8, 4, 4>; + + struct Abi { + size_t register_size; + size_t register_count; + size_t pointer_size; + + template + static constexpr Abi Convert() { return { AbiType::RegisterSize, AbiType::RegisterCount, AbiType::PointerSize }; } + }; + + template + constexpr inline bool IsPassedByPointer = [] { + if (GetArgumentType != ArgumentType::In) { + return true; + } + + return (!IsIntegral && AbiType::template Size > AbiType::RegisterSize); + }(); + + template + class RegisterAllocator { + private: + std::array map; + public: + constexpr explicit RegisterAllocator() : map() { /* ... */ } + + constexpr bool IsAllocated(size_t i) const { return this->map[i]; } + constexpr bool IsFree(size_t i) const { return !this->IsAllocated(i); } + + constexpr void Allocate(size_t i) { + if (this->IsAllocated(i)) { + std::abort(); + } + + this->map[i] = true; + } + + constexpr bool TryAllocate(size_t i) { + if (this->IsAllocated(i)) { + return false; + } + + this->map[i] = true; + return true; + } + + constexpr size_t AllocateFirstFree() { + for (size_t i = 0; i < N; i++) { + if (!this->IsAllocated(i)) { + this->map[i] = true; + return i; + } + } + + std::abort(); + } + + constexpr void Free(size_t i) { + if (!this->IsAllocated(i)) { + std::abort(); + } + + this->map[i] = false; + } + + constexpr size_t GetRegisterCount() const { + return N; + } + }; + + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_kernel_svc_wrapper.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_kernel_svc_wrapper.hpp new file mode 100644 index 000000000..3fffe60fa --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_kernel_svc_wrapper.hpp @@ -0,0 +1,540 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "svc_codegen_impl_common.hpp" +#include "svc_codegen_impl_parameter.hpp" +#include "svc_codegen_impl_layout.hpp" +#include "svc_codegen_impl_meta_code.hpp" +#include "svc_codegen_impl_layout_conversion.hpp" +#include "svc_codegen_impl_code_generator.hpp" + +namespace ams::svc::codegen::impl { + + template + class KernelSvcWrapperHelperImpl; + + template + class KernelSvcWrapperHelperImpl<_SvcAbiType, _UserAbiType, _KernelAbiType, ReturnType, std::tuple> { + private: + static constexpr bool TryToPerformCoalescingOptimizations = true; + + template + static constexpr void CoalesceOperations(MetaCodeGenerator &out_mcg, const std::array stack_modified, size_t stack_top) { + enum class State { WaitingForRegister, ParsingRegister, ParsedRegister, EmittingCode }; + State cur_state = State::WaitingForRegister; + size_t num_regs = 0; + size_t registers[2] = { InvalidRegisterId, InvalidRegisterId }; + size_t widths[2] = {}; + size_t index = 0; + size_t store_base = 0; + while (index < stack_top) { + if (cur_state == State::WaitingForRegister) { + while (stack_modified[index] == InvalidRegisterId && index < stack_top) { + index++; + } + cur_state = State::ParsingRegister; + } else if (cur_state == State::ParsingRegister) { + const size_t start_index = index; + if (num_regs == 0) { + store_base = start_index; + } + const size_t reg = stack_modified[index]; + registers[num_regs] = reg; + while (index < stack_top && index < start_index + KernelAbiType::RegisterSize && stack_modified[index] == reg) { + widths[num_regs]++; + index++; + } + num_regs++; + cur_state = State::ParsedRegister; + } else if (cur_state == State::ParsedRegister) { + if (num_regs == 2 || stack_modified[index] == InvalidRegisterId) { + cur_state = State::EmittingCode; + } else { + cur_state = State::ParsingRegister; + } + } else if (cur_state == State::EmittingCode) { + /* Emit an operation! */ + MetaCode::Operation st_op = {}; + + if (num_regs == 2) { + if (registers[0] == registers[1]) { + std::abort(); + } + if (widths[0] == widths[1]) { + st_op.kind = PairKind; + st_op.num_parameters = 4; + st_op.parameters[0] = registers[0]; + st_op.parameters[1] = registers[1]; + st_op.parameters[2] = store_base; + st_op.parameters[3] = widths[0]; + } else { + std::abort(); + } + } else if (num_regs == 1) { + st_op.kind = SingleKind; + st_op.num_parameters = 3; + st_op.parameters[0] = registers[0]; + st_op.parameters[1] = store_base; + st_op.parameters[2] = widths[0]; + } else { + std::abort(); + } + + out_mcg.AddOperationDirectly(st_op); + + /* Go back to beginning of parse. */ + for (size_t i = 0; i < num_regs; i++) { + registers[i] = InvalidRegisterId; + widths[i] = 0; + } + num_regs = 0; + cur_state = State::WaitingForRegister; + } else { + std::abort(); + } + } + + if (cur_state == State::ParsedRegister) { + /* Emit an operation! */ + if (num_regs == 2 && widths[0] == widths[1]) { + MetaCode::Operation st_op = {}; + st_op.kind = PairKind; + st_op.num_parameters = 4; + st_op.parameters[0] = registers[0]; + st_op.parameters[1] = registers[1]; + st_op.parameters[2] = store_base; + st_op.parameters[3] = widths[0]; + out_mcg.AddOperationDirectly(st_op); + } else { + for (size_t i = 0; i < num_regs; i++) { + MetaCode::Operation st_op = {}; + st_op.kind = SingleKind; + st_op.num_parameters = 3; + st_op.parameters[0] = registers[i]; + st_op.parameters[1] = store_base; + st_op.parameters[2] = widths[i]; + + store_base += widths[i]; + out_mcg.AddOperationDirectly(st_op); + } + } + } + } + + /* Basic optimization of store coalescing. */ + template + static constexpr bool TryPrepareForKernelProcedureToSvcInvocationCoalescing(std::tuple, MetaCodeGenerator &out_mcg, RegisterAllocator &out_allocator) { + /* For debugging, allow ourselves to disable these optimizations. */ + if constexpr (!TryToPerformCoalescingOptimizations) { + return false; + } + + /* Generate expected code. */ + MetaCodeGenerator mcg; + RegisterAllocator allocator = out_allocator; + (Conversion::template GenerateCode(mcg, allocator), ...); + MetaCode mc = mcg.GetMetaCode(); + + /* This is a naive optimization pass. */ + /* We want to reorder code of the form: */ + /* - Store to Stack sequence 0... */ + /* - Load Stack Address 0 */ + /* - Store to Stack 1... */ + /* - Load Stack Address 1 */ + /* Into the form: */ + /* - Store to stack Sequence 0 + 1... */ + /* - Load Stack Address 0 + 1... */ + /* But only if they are semantically equivalent. */ + + /* We'll do a simple, naive pass to check if any registers are stored to stack that are modified. */ + /* This shouldn't happen in any cases we care about, so we can probably get away with it. */ + /* TODO: Eventually this should be e.g. operation.ModifiesRegister() / operation.CanReorderBefore() */ + /* However, this will be more work, and if it's not necessary it can be put off until it is. */ + constexpr size_t MaxStackIndex = 0x100; + constexpr size_t InvalidRegisterId = N; + bool register_modified[N] = {}; + std::array stack_address_loaded = {}; + for (size_t i = 0; i < N; i++) { stack_address_loaded[i] = MaxStackIndex; } + std::array stack_modified = {}; + for (size_t i = 0; i < MaxStackIndex; i++) { stack_modified[i] = InvalidRegisterId; } + size_t stack_top = 0; + for (size_t i = 0; i < mc.GetNumOperations(); i++) { + const auto mco = mc.GetOperation(i); + if (mco.kind == MetaCode::OperationKind::StoreToStack) { + if (register_modified[mco.parameters[0]]) { + return false; + } + const size_t offset = mco.parameters[1]; + const size_t width = mco.parameters[2] == 0 ? KernelAbiType::RegisterSize : mco.parameters[2]; + for (size_t j = 0; j < width; j++) { + const size_t index = offset + j; + if (index >= MaxStackIndex) { + std::abort(); + } + if (stack_modified[index] != InvalidRegisterId) { + return false; + } + stack_modified[index] = mco.parameters[0]; + stack_top = std::max(index + 1, stack_top); + } + } else if (mco.kind == MetaCode::OperationKind::LoadStackAddress) { + if (stack_address_loaded[mco.parameters[0]] != MaxStackIndex) { + return false; + } + if (register_modified[mco.parameters[0]]) { + return false; + } + if (mco.parameters[1] >= MaxStackIndex) { + std::abort(); + } + stack_address_loaded[mco.parameters[0]] = mco.parameters[1]; + register_modified[mco.parameters[0]] = true; + } else { + /* TODO: Better operation reasoning process. */ + return false; + } + } + + /* Looks like we can reorder! */ + /* Okay, let's do this the naive way, too. */ + constexpr auto PairKind = MetaCode::OperationKind::StorePairToStack; + constexpr auto SingleKind = MetaCode::OperationKind::StoreToStack; + CoalesceOperations(out_mcg, stack_modified, stack_top); + for (size_t i = 0; i < N; i++) { + if (stack_address_loaded[i] != MaxStackIndex) { + MetaCode::Operation load_op = {}; + load_op.kind = MetaCode::OperationKind::LoadStackAddress; + load_op.num_parameters = 2; + load_op.parameters[0] = i; + load_op.parameters[1] = stack_address_loaded[i]; + out_mcg.AddOperationDirectly(load_op); + } + } + + /* Ensure the out allocator state is correct. */ + out_allocator = allocator; + + return true; + } + + /* Basic optimization of load coalescing. */ + template + static constexpr bool TryKernelProcedureToSvcInvocationCoalescing(std::tuple, MetaCodeGenerator &out_mcg, RegisterAllocator &out_allocator) { + /* For debugging, allow ourselves to disable these optimizations. */ + if constexpr (!TryToPerformCoalescingOptimizations) { + return false; + } + + /* Generate expected code. */ + MetaCodeGenerator mcg; + RegisterAllocator allocator = out_allocator; + (Conversion::template GenerateCode(mcg, allocator), ...); + MetaCode mc = mcg.GetMetaCode(); + + /* This is a naive optimization pass. */ + /* We want to coalesce all sequential stack loads, if possible. */ + /* But only if they are semantically equivalent. */ + + /* We'll do a simple, naive pass to check if any registers are used after being loaded from stack that. */ + /* This shouldn't happen in any cases we care about, so we can probably get away with it. */ + /* TODO: Eventually this should be e.g. operation.ModifiesRegister() / operation.CanReorderBefore() */ + /* However, this will be more work, and if it's not necessary it can be put off until it is. */ + constexpr size_t MaxStackIndex = 0x100; + constexpr size_t InvalidRegisterId = N; + bool register_modified[N] = {}; + std::array stack_offset_loaded = {}; + for (size_t i = 0; i < N; i++) { stack_offset_loaded[i] = MaxStackIndex; } + std::array stack_modified = {}; + for (size_t i = 0; i < MaxStackIndex; i++) { stack_modified[i] = InvalidRegisterId; } + size_t stack_top = 0; + for (size_t i = 0; i < mc.GetNumOperations(); i++) { + const auto mco = mc.GetOperation(i); + if (mco.kind == MetaCode::OperationKind::Unpack) { + if (register_modified[mco.parameters[0]] || register_modified[mco.parameters[1]] || register_modified[mco.parameters[2]]) { + return false; + } + register_modified[mco.parameters[0]] = true; + register_modified[mco.parameters[1]] = true; + } else if (mco.kind == MetaCode::OperationKind::LoadFromStack) { + if (stack_offset_loaded[mco.parameters[0]] != MaxStackIndex) { + return false; + } + if (register_modified[mco.parameters[0]] != false) { + return false; + } + if (mco.parameters[1] >= MaxStackIndex) { + std::abort(); + } + stack_offset_loaded[mco.parameters[0]] = mco.parameters[1]; + register_modified[mco.parameters[0]] = true; + + const size_t offset = mco.parameters[1]; + const size_t width = mco.parameters[2] == 0 ? KernelAbiType::RegisterSize : mco.parameters[2]; + for (size_t j = 0; j < width; j++) { + const size_t index = offset + j; + if (index >= MaxStackIndex) { + std::abort(); + } + if (stack_modified[index] != InvalidRegisterId) { + return false; + } + stack_modified[index] = mco.parameters[0]; + stack_top = std::max(index + 1, stack_top); + } + } else { + /* TODO: Better operation reasoning process. */ + return false; + } + } + + /* Any operations that don't load from stack, we can just re-add. */ + for (size_t i = 0; i < mc.GetNumOperations(); i++) { + const auto mco = mc.GetOperation(i); + if (mco.kind != MetaCode::OperationKind::LoadFromStack) { + out_mcg.AddOperationDirectly(mco); + } + } + constexpr auto PairKind = MetaCode::OperationKind::LoadPairFromStack; + constexpr auto SingleKind = MetaCode::OperationKind::LoadFromStack; + CoalesceOperations(out_mcg, stack_modified, stack_top); + + /* Ensure the out allocator state is correct. */ + out_allocator = allocator; + + return true; + } + + template + struct TypeIndexFilter { + template + static constexpr auto GetFilteredTupleImpl(UseArrayHolder, std::tuple, std::index_sequence) { + constexpr auto UseArray = UNWRAP_TEMPLATE_CONSTANT(UseArrayHolder); + static_assert(sizeof...(TailType) == sizeof...(TailIndex)); + static_assert(HeadIndex <= UseArray.size()); + + if constexpr (sizeof...(TailType) == 0) { + if constexpr (!UseArray[HeadIndex]) { + return std::tuple{}; + } else { + return std::tuple<>{}; + } + } else { + auto tail_tuple = GetFilteredTupleImpl(UseArrayHolder{}, std::tuple{}, std::index_sequence{}); + if constexpr (!UseArray[HeadIndex]) { + return std::tuple_cat(std::tuple{}, tail_tuple); + } else { + return std::tuple_cat(std::tuple<>{}, tail_tuple); + } + } + } + + template + static constexpr auto GetFilteredTuple(UseArrayHolder) { + return GetFilteredTupleImpl(UseArrayHolder{}, std::tuple{}, std::make_index_sequence()); + } + }; + + template + static constexpr auto GetModifiedOperations(AllocatorHolder, std::tuple ops) { + constexpr size_t ModifyRegister = [] { + auto allocator = UNWRAP_TEMPLATE_CONSTANT(AllocatorHolder); + return allocator.AllocateFirstFree(); + }(); + + using ModifiedFirstOperation = typename FirstOperation::template ModifiedType; + using NewMoveOperation = typename LayoutConversionBase::template OperationMove; + return std::tuple{}; + } + + template + static constexpr auto GenerateBeforeOperations(MetaCodeGenerator &mcg, AllocatorHolder, std::tuple ops) -> RegisterAllocator { + constexpr size_t NumOperations = 1 + sizeof...(OtherOperations); + using OperationsTuple = decltype(ops); + using FilterHelper = TypeIndexFilter; + + constexpr auto ProcessOperation = [](MetaCodeGenerator &pr_mcg, auto &allocator, Operation) { + if (Conversion::template CanGenerateCode(allocator)) { + Conversion::template GenerateCode(pr_mcg, allocator); + return true; + } + return false; + }; + + constexpr auto ProcessResults = [ProcessOperation](std::tuple) { + auto allocator = UNWRAP_TEMPLATE_CONSTANT(AllocatorHolder); + MetaCodeGenerator pr_mcg; + auto use_array = std::array{ ProcessOperation(pr_mcg, allocator, Operations{})... }; + return std::make_tuple(use_array, allocator, pr_mcg); + }(OperationsTuple{}); + + constexpr auto CanGenerate = std::get<0>(ProcessResults); + constexpr auto AfterAllocator = std::get<1>(ProcessResults); + constexpr auto GeneratedCode = std::get<2>(ProcessResults).GetMetaCode(); + + for (size_t i = 0; i < GeneratedCode.GetNumOperations(); i++) { + mcg.AddOperationDirectly(GeneratedCode.GetOperation(i)); + } + + constexpr auto FilteredOperations = FilterHelper::template GetFilteredTuple(WRAP_TEMPLATE_CONSTANT(CanGenerate)); + static_assert(std::tuple_size::value <= NumOperations); + if constexpr (std::tuple_size::value > 0) { + if constexpr (std::tuple_size::value != NumOperations) { + return GenerateBeforeOperations(mcg, WRAP_TEMPLATE_CONSTANT(AfterAllocator), FilteredOperations); + } else { + /* No progress was made, so we need to make a change. */ + constexpr auto ModifiedOperations = GetModifiedOperations(WRAP_TEMPLATE_CONSTANT(AfterAllocator), FilteredOperations); + return GenerateBeforeOperations(mcg, WRAP_TEMPLATE_CONSTANT(AfterAllocator), ModifiedOperations); + } + } else { + return AfterAllocator; + } + } + + static constexpr MetaCode GenerateOriginalBeforeMetaCode() { + MetaCodeGenerator mcg; + RegisterAllocator allocator; + static_assert(SvcAbiType::RegisterCount == KernelAbiType::RegisterCount); + + /* Reserve registers used by the input layout. */ + constexpr auto InitialAllocator = [] { + RegisterAllocator initial_allocator; + for (size_t i = 0; i < SvcAbiType::RegisterCount; i++) { + if (Conversion::LayoutForSvc.GetInputLayout().UsesRegister(i)) { + initial_allocator.Allocate(i); + } + } + return initial_allocator; + }(); + + /* Save every register that needs to be preserved to the stack. */ + if constexpr (Conversion::NumPreserveRegisters > 0) { + [&mcg](std::index_sequence) { + mcg.template SaveRegisters(); + }(typename Conversion::PreserveRegisters{}); + } + + /* Allocate space on the stack for parameters that need it. */ + if constexpr (UsedStackSpace > 0) { + mcg.template AllocateStackSpace(); + } + + /* Generate code for before operations. */ + if constexpr (Conversion::NumBeforeOperations > 0) { + allocator = GenerateBeforeOperations(mcg, WRAP_TEMPLATE_CONSTANT(InitialAllocator), typename Conversion::BeforeOperations{}); + } else { + allocator = InitialAllocator; + } + + /* Generate code for after operations. */ + if constexpr (Conversion::NumAfterOperations > 0) { + if (!TryPrepareForKernelProcedureToSvcInvocationCoalescing(typename Conversion::AfterOperations{}, mcg, allocator)) { + /* We're not eligible for the straightforward optimization. */ + [&mcg, &allocator](std::index_sequence) { + (Conversion::template GenerateCode::type, CodeGenerationKind::PrepareForKernelProcedureToSvcInvocation>(mcg, allocator), ...); + }(std::make_index_sequence()); + } + } + + return mcg.GetMetaCode(); + } + public: + using SvcAbiType = _SvcAbiType; + using UserAbiType = _UserAbiType; + using KernelAbiType = _KernelAbiType; + + using Conversion = LayoutConversion; + + static constexpr size_t UsedStackSpace = Conversion::NonAbiUsedStackIndices * KernelAbiType::RegisterSize; + + static constexpr MetaCode OriginalBeforeMetaCode = [] { + return GenerateOriginalBeforeMetaCode(); + }(); + + static constexpr MetaCode OriginalAfterMetaCode = [] { + MetaCodeGenerator mcg; + RegisterAllocator allocator; + static_assert(SvcAbiType::RegisterCount == KernelAbiType::RegisterCount); + + /* Generate code for after operations. */ + if constexpr (Conversion::NumAfterOperations > 0) { + if (!TryKernelProcedureToSvcInvocationCoalescing(typename Conversion::AfterOperations{}, mcg, allocator)) { + [&mcg, &allocator](std::index_sequence) { + (Conversion::template GenerateCode::type, CodeGenerationKind::KernelProcedureToSvcInvocation>(mcg, allocator), ...); + }(std::make_index_sequence()); + } + } + + /* Allocate space on the stack for parameters that need it. */ + if constexpr (UsedStackSpace > 0) { + mcg.template FreeStackSpace(); + } + + if constexpr (Conversion::NumClearRegisters > 0) { + [&mcg](std::index_sequence) { + mcg.template ClearRegisters(); + }(typename Conversion::ClearRegisters{}); + } + + /* Restore registers we previously saved to the stack. */ + if constexpr (Conversion::NumPreserveRegisters > 0) { + [&mcg](std::index_sequence) { + mcg.template RestoreRegisters(); + }(typename Conversion::PreserveRegisters{}); + } + + return mcg.GetMetaCode(); + }(); + + /* TODO: Implement meta code optimization via separate layer. */ + /* Right now some basic optimizations are just implemented by the above generators. */ + static constexpr MetaCode OptimizedBeforeMetaCode = OriginalBeforeMetaCode; + static constexpr MetaCode OptimizedAfterMetaCode = OriginalAfterMetaCode; + }; + + template + class KernelSvcWrapperHelper { + private: + using Traits = FunctionTraits; + public: + using Impl = KernelSvcWrapperHelperImpl<_SvcAbiType, _UserAbiType, _KernelAbiType, typename Traits::ReturnType, typename Traits::ArgsType>; + + static constexpr bool IsAarch64Kernel = std::is_same<_KernelAbiType, Aarch64Lp64Abi>::value; + static constexpr bool IsAarch32Kernel = std::is_same<_KernelAbiType, Aarch32Ilp32Abi>::value; + static_assert(IsAarch64Kernel || IsAarch32Kernel); + + using CodeGenerator = typename std::conditional::type; + + static constexpr auto BeforeMetaCode = Impl::OptimizedBeforeMetaCode; + static constexpr auto AfterMetaCode = Impl::OptimizedAfterMetaCode; + + +/* Set omit-frame-pointer to prevent GCC from emitting MOV X29, SP instructions. */ +#pragma GCC push_options +#pragma GCC optimize ("omit-frame-pointer") + + static ALWAYS_INLINE void WrapSvcFunction() { + /* Generate appropriate assembly. */ + GenerateCodeForMetaCode(WRAP_TEMPLATE_CONSTANT(BeforeMetaCode)); + ON_SCOPE_EXIT { GenerateCodeForMetaCode(WRAP_TEMPLATE_CONSTANT(AfterMetaCode)); }; + + return reinterpret_cast(Function)(); + } + +#pragma GCC pop_options + }; + + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout.hpp new file mode 100644 index 000000000..132b13ae1 --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout.hpp @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "svc_codegen_impl_common.hpp" +#include "svc_codegen_impl_parameter.hpp" + +namespace ams::svc::codegen::impl { + + class ParameterLayout { + public: + static constexpr size_t MaxParameters = 8; + private: + static constexpr size_t InvalidIndex = std::numeric_limits::max(); + private: + /* ABI parameters. */ + Abi abi; + + /* Parameter storage. */ + size_t num_parameters; + Parameter parameters[MaxParameters]; + public: + constexpr explicit ParameterLayout(Abi a) + : abi(a), num_parameters(0), parameters() + { /* ... */ } + + constexpr void AddSingle(Parameter::Identifier id, ArgumentType type, size_t ts, size_t ps, bool p, Storage s, size_t idx) { + for (size_t i = 0; i < this->num_parameters; i++) { + if (this->parameters[i].Is(id)) { + this->parameters[i].AddLocation(Location(s, idx)); + return; + } + } + this->parameters[this->num_parameters++] = Parameter(id, type, ts, ps, p, Location(s, idx)); + } + + constexpr size_t Add(Parameter::Identifier id, ArgumentType type, size_t ts, size_t ps, bool p, Storage s, size_t i) { + size_t required_registers = 0; + + while (required_registers * this->abi.register_size < ps) { + this->AddSingle(id, type, ts, ps, p, s, i++); + required_registers++; + } + + return required_registers; + } + + constexpr bool UsesLocation(Location l) const { + for (size_t i = 0; i < this->num_parameters; i++) { + if (this->parameters[i].UsesLocation(l)) { + return true; + } + } + return false; + } + + constexpr bool UsesRegister(size_t i) const { + return this->UsesLocation(Location(Storage::Register, i)); + } + + constexpr bool IsRegisterFree(size_t i) const { + return !(this->UsesRegister(i)); + } + + constexpr size_t GetNumParameters() const { + return this->num_parameters; + } + + constexpr Parameter GetParameter(size_t i) const { + return this->parameters[i]; + } + + constexpr bool HasParameter(Parameter::Identifier id) const { + for (size_t i = 0; i < this->num_parameters; i++) { + if (this->parameters[i].Is(id)) { + return true; + } + } + return false; + } + + constexpr Parameter GetParameter(Parameter::Identifier id) const { + for (size_t i = 0; i < this->num_parameters; i++) { + if (this->parameters[i].Is(id)) { + return this->parameters[i]; + } + } + std::abort(); + } + }; + + class ProcedureLayout { + private: + Abi abi; + ParameterLayout input; + ParameterLayout output; + private: + template + constexpr void ProcessArgument(size_t i, size_t &NGRN, size_t &NSAA) { + /* We currently don't implement support for floating point types. */ + static_assert(!std::is_floating_point::value); + static_assert(!std::is_same::value); + + constexpr size_t ArgumentTypeSize = AbiType::template Size; + constexpr bool PassedByPointer = IsPassedByPointer; + constexpr size_t ArgumentPassSize = PassedByPointer ? AbiType::PointerSize : ArgumentTypeSize; + + /* TODO: Is there ever a case where this is not the correct alignment? */ + constexpr size_t ArgumentAlignment = ArgumentPassSize; + + /* Ensure NGRN is aligned. */ + if constexpr (ArgumentAlignment > AbiType::RegisterSize) { + NGRN += (NGRN & 1); + } + + /* TODO: We don't support splitting arguments between registers and stack, but AAPCS32 does. */ + /* Is this a problem? Nintendo seems to not ever do this. */ + + auto id = Parameter::Identifier("FunctionParameter", i); + + /* Allocate integral types specially per aapcs. */ + constexpr ArgumentType Type = GetArgumentType; + const size_t registers_available = AbiType::RegisterCount - NGRN; + if constexpr (!PassedByPointer && IsIntegralOrUserPointer && ArgumentTypeSize > AbiType::RegisterSize) { + if (registers_available >= 2) { + this->input.Add(id, Type, ArgumentTypeSize, ArgumentPassSize, PassedByPointer, Storage::Register, NGRN); + NGRN += 2; + } else { + /* Argument went on stack, so stop allocating arguments in registers. */ + NGRN = AbiType::RegisterCount; + + NSAA += (NSAA & 1); + this->input.Add(id, Type, ArgumentTypeSize, ArgumentPassSize, PassedByPointer, Storage::Stack, NSAA); + NSAA += 2; + } + } else { + if (ArgumentPassSize <= AbiType::RegisterSize * registers_available) { + NGRN += this->input.Add(id, Type, ArgumentTypeSize, ArgumentPassSize, PassedByPointer, Storage::Register, NGRN); + } else { + /* Argument went on stack, so stop allocating arguments in registers. */ + NGRN = AbiType::RegisterCount; + + /* TODO: Stack pointer alignment is only ensured for aapcs64. */ + /* What should we do here? */ + + NSAA += this->input.Add(id, Type, ArgumentTypeSize, ArgumentPassSize, PassedByPointer, Storage::Stack, NSAA); + } + } + } + public: + constexpr explicit ProcedureLayout(Abi a) : abi(a), input(a), output(a) { /* ... */ } + + template + static constexpr ProcedureLayout Create() { + ProcedureLayout layout(Abi::Convert()); + + /* 1. The Next General-purpose Register Number (NGRN) is set to zero. */ + [[maybe_unused]] size_t NGRN = 0; + + /* 2. The next stacked argument address (NSAA) is set to the current stack-pointer value (SP). */ + [[maybe_unused]] size_t NSAA = 0; /* Should be considered an offset from stack pointer. */ + + /* 3. Handle the return type. */ + /* TODO: It's unclear how to handle the non-integral and too-large case. */ + if constexpr (!std::is_same::value) { + constexpr size_t ReturnTypeSize = AbiType::template Size; + layout.output.Add(Parameter::Identifier("ReturnType"), ArgumentType::Invalid, ReturnTypeSize, ReturnTypeSize, false, Storage::Register, 0); + static_assert(IsIntegral || ReturnTypeSize <= AbiType::RegisterSize); + } + + /* Process all arguments, in order. */ + size_t i = 0; + (layout.ProcessArgument(i++, NGRN, NSAA), ...); + + return layout; + } + + constexpr ParameterLayout GetInputLayout() const { + return this->input; + } + + constexpr ParameterLayout GetOutputLayout() const { + return this->output; + } + + constexpr Parameter GetParameter(Parameter::Identifier id) const { + if (this->input.HasParameter(id)) { + return this->input.GetParameter(id); + } else { + return this->output.GetParameter(id); + } + } + }; + + class SvcInvocationLayout { + private: + Abi abi; + ParameterLayout input; + ParameterLayout output; + private: + template + constexpr void ForEachInputArgument(ParameterLayout param_layout, F f) { + /* We want to iterate over the parameters in sorted order. */ + std::array map = {}; + const size_t num_parameters = param_layout.GetNumParameters(); + for (size_t i = 0; i < num_parameters; i++) { + map[i] = i; + } + for (size_t i = 1; i < num_parameters; i++) { + for (size_t j = i; j > 0 && param_layout.GetParameter(map[j-1]).GetLocation(0) > param_layout.GetParameter(map[j]).GetLocation(0); j--) { + /* std::swap is not constexpr until c++20 :( */ + /* TODO: std::swap(map[j], map[j-1]); */ + const size_t tmp = map[j]; + map[j] = map[j-1]; + map[j-1] = tmp; + } + } + + for (size_t i = 0; i < param_layout.GetNumParameters(); i++) { + const auto Parameter = param_layout.GetParameter(map[i]); + if (Parameter.GetArgumentType() == ArgumentType::In && !Parameter.IsPassedByPointer()) { + f(Parameter); + } + } + for (size_t i = 0; i < param_layout.GetNumParameters(); i++) { + const auto Parameter = param_layout.GetParameter(map[i]); + if (Parameter.GetArgumentType() == ArgumentType::InUserPointer) { + f(Parameter); + } + } + for (size_t i = 0; i < param_layout.GetNumParameters(); i++) { + const auto Parameter = param_layout.GetParameter(map[i]); + if (Parameter.GetArgumentType() == ArgumentType::OutUserPointer) { + f(Parameter); + } + } + } + + template + constexpr void ForEachInputPointerArgument(ParameterLayout param_layout, F f) { + for (size_t i = 0; i < param_layout.GetNumParameters(); i++) { + const auto Parameter = param_layout.GetParameter(i); + if (Parameter.GetArgumentType() == ArgumentType::In && Parameter.IsPassedByPointer()) { + f(Parameter); + } + } + } + + template + constexpr void ForEachOutputArgument(ParameterLayout param_layout, F f) { + for (size_t i = 0; i < param_layout.GetNumParameters(); i++) { + const auto Parameter = param_layout.GetParameter(i); + if (Parameter.GetArgumentType() == ArgumentType::Out) { + f(Parameter); + } + } + } + + template + static constexpr void AddRegisterParameter(ParameterLayout &dst_layout, RegisterAllocator ®_allocator, Parameter param) { + for (size_t i = 0; i < param.GetNumLocations(); i++) { + const auto location = param.GetLocation(i); + if (location.GetStorage() == Storage::Register) { + reg_allocator.Allocate(location.GetIndex()); + dst_layout.AddSingle(param.GetIdentifier(), param.GetArgumentType(), param.GetTypeSize(), param.GetPassedSize(), param.IsPassedByPointer(), Storage::Register, location.GetIndex()); + } + } + } + + template + static constexpr void AddStackParameter(ParameterLayout &dst_layout, RegisterAllocator ®_allocator, Parameter param) { + for (size_t i = 0; i < param.GetNumLocations(); i++) { + const auto location = param.GetLocation(i); + if (location.GetStorage() == Storage::Stack) { + const size_t free_reg = reg_allocator.AllocateFirstFree(); + dst_layout.AddSingle(param.GetIdentifier(), param.GetArgumentType(), param.GetTypeSize(), param.GetPassedSize(), param.IsPassedByPointer(), Storage::Register, free_reg); + } + } + } + + template + static constexpr void AddIndirectParameter(ParameterLayout &dst_layout, RegisterAllocator ®_allocator, Parameter param) { + const size_t type_size = param.GetTypeSize(); + for (size_t sz = 0; sz < type_size; sz += AbiType::RegisterSize) { + const size_t free_reg = reg_allocator.AllocateFirstFree(); + dst_layout.AddSingle(param.GetIdentifier(), param.GetArgumentType(), type_size, type_size, false, Storage::Register, free_reg); + } + } + public: + constexpr explicit SvcInvocationLayout(Abi a) : abi(a), input(a), output(a) { /* ... */ } + + template + static constexpr SvcInvocationLayout Create(ProcedureLayout procedure_layout) { + SvcInvocationLayout layout(Abi::Convert()); + RegisterAllocator input_register_allocator, output_register_allocator; + + /* Input first wants to map in register -> register */ + layout.ForEachInputArgument(procedure_layout.GetInputLayout(), [&](Parameter parameter) { + AddRegisterParameter(layout.input, input_register_allocator, parameter); + }); + + /* And then input wants to map in stack -> stack */ + layout.ForEachInputArgument(procedure_layout.GetInputLayout(), [&](Parameter parameter) { + AddStackParameter(layout.input, input_register_allocator, parameter); + }); + + /* And then input wants to map in indirects -> register */ + layout.ForEachInputPointerArgument(procedure_layout.GetInputLayout(), [&](Parameter parameter) { + AddIndirectParameter(layout.input, input_register_allocator, parameter); + }); + + /* Handle the return type. */ + if (procedure_layout.GetOutputLayout().GetNumParameters() > 0) { + if (procedure_layout.GetOutputLayout().GetNumParameters() != 1) { + std::abort(); + } + const auto return_param = procedure_layout.GetOutputLayout().GetParameter(0); + if (return_param.GetIdentifier() != Parameter::Identifier("ReturnType")) { + std::abort(); + } + AddRegisterParameter(layout.output, output_register_allocator, return_param); + } + + /* Handle other outputs. */ + layout.ForEachOutputArgument(procedure_layout.GetInputLayout(), [&](Parameter parameter) { + AddIndirectParameter(layout.output, output_register_allocator, parameter); + }); + + return layout; + } + + constexpr ParameterLayout GetInputLayout() const { + return this->input; + } + + constexpr ParameterLayout GetOutputLayout() const { + return this->output; + } + }; + + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout_conversion.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout_conversion.hpp new file mode 100644 index 000000000..2e3d95775 --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout_conversion.hpp @@ -0,0 +1,491 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "svc_codegen_impl_common.hpp" +#include "svc_codegen_impl_parameter.hpp" +#include "svc_codegen_impl_layout.hpp" +#include "svc_codegen_impl_meta_code.hpp" + +namespace ams::svc::codegen::impl { + + class LayoutConversionBase { + public: + enum class OperationKind { + Move, + LoadAndStore, + PackAndUnpack, + Scatter, + Invalid, + }; + + class OperationMoveImpl; + class OperationLoadAndStoreImpl; + class OperationPackAndUnpackImpl; + class OperationScatterImpl; + + class OperationBase{}; + + template + class Operation : public OperationBase { + public: + static constexpr OperationKind Kind = _Kind; + static constexpr size_t RegisterSize = RS; + static constexpr size_t PassedSize = PS; + static constexpr size_t StackOffset = SO; + static constexpr size_t ProcedureIndex = PIdx; + + static constexpr size_t NumSvcIndices = sizeof...(SIdx); + static constexpr std::array SvcIndices = { SIdx... }; + static constexpr std::index_sequence SvcIndexSequence = {}; + + template + static constexpr size_t SvcIndex = SvcIndices[I]; + + template + static void ForEachSvcIndex(F f) { + (f(SIdx), ...); + } + + using ImplType = typename std::conditional::type>::type>::type>::type; + + template + using ModifiedType = Operation; + }; + + template + using OperationMove = Operation; + + template + using OperationLoadAndStore = Operation; + + template + using OperationPackAndUnpack = Operation; + + template + using OperationScatter = Operation; + + class OperationMoveImpl { + public: + template + static constexpr bool CanGenerateCodeForSvcInvocationToKernelProcedure(RegisterAllocator allocator) { + static_assert(Operation::Kind == OperationKind::Move); + allocator.Free(Operation::template SvcIndex<0>); + return allocator.TryAllocate(Operation::ProcedureIndex); + } + + template + static constexpr void GenerateCodeForSvcInvocationToKernelProcedure(MetaCodeGenerator &mcg, RegisterAllocator &allocator) { + static_assert(Operation::Kind == OperationKind::Move); + allocator.Free(Operation::template SvcIndex<0>); + allocator.Allocate(Operation::ProcedureIndex); + mcg.template MoveRegister>(); + } + }; + + class OperationLoadAndStoreImpl { + public: + template + static constexpr bool CanGenerateCodeForSvcInvocationToKernelProcedure(RegisterAllocator allocator) { + static_assert(Operation::Kind == OperationKind::LoadAndStore); + allocator.Free(Operation::template SvcIndex<0>); + return true; + } + + template + static constexpr void GenerateCodeForSvcInvocationToKernelProcedure(MetaCodeGenerator &mcg, RegisterAllocator &allocator) { + static_assert(Operation::Kind == OperationKind::LoadAndStore); + allocator.Free(Operation::template SvcIndex<0>); + constexpr size_t StackOffset = Operation::ProcedureIndex * Operation::RegisterSize; + mcg.template StoreToStack, StackOffset>(); + } + }; + + class OperationPackAndUnpackImpl { + public: + template + static constexpr bool CanGenerateCodeForSvcInvocationToKernelProcedure(RegisterAllocator allocator) { + static_assert(Operation::Kind == OperationKind::PackAndUnpack); + allocator.Free(Operation::template SvcIndex<0>); + allocator.Free(Operation::template SvcIndex<1>); + return allocator.TryAllocate(Operation::ProcedureIndex); + } + + template + static constexpr void GenerateCodeForSvcInvocationToKernelProcedure(MetaCodeGenerator &mcg, RegisterAllocator &allocator) { + static_assert(Operation::Kind == OperationKind::PackAndUnpack); + allocator.Free(Operation::template SvcIndex<0>); + allocator.Free(Operation::template SvcIndex<1>); + allocator.Allocate(Operation::ProcedureIndex); + mcg.template Pack, Operation::template SvcIndex<1>>(); + } + + template + static constexpr void GenerateCodeForPrepareForKernelProcedureToSvcInvocation(MetaCodeGenerator &mcg) { + static_assert(Operation::Kind == OperationKind::PackAndUnpack); + /* ... */ + } + + template + static constexpr void GenerateCodeForKernelProcedureToSvcInvocation(MetaCodeGenerator &mcg) { + static_assert(Operation::Kind == OperationKind::PackAndUnpack); + mcg.template Unpack, Operation::template SvcIndex<1>, Operation::ProcedureIndex>(); + } + }; + + class OperationScatterImpl { + public: + template + static constexpr bool CanGenerateCodeForSvcInvocationToKernelProcedure(RegisterAllocator allocator) { + static_assert(Operation::Kind == OperationKind::Scatter); + [&allocator](std::index_sequence) { + (allocator.Free(SvcIndex), ...); + }(Operation::SvcIndexSequence); + return allocator.TryAllocate(Operation::ProcedureIndex); + } + + template + static constexpr void GenerateCodeForSvcInvocationToKernelProcedure(MetaCodeGenerator &mcg, RegisterAllocator &allocator) { + static_assert(Operation::Kind == OperationKind::Scatter); + [&allocator](std::index_sequence) { + (allocator.Free(SvcIndex), ...); + }(Operation::SvcIndexSequence); + allocator.Allocate(Operation::ProcedureIndex); + + [&mcg](std::index_sequence) { + (mcg.template StoreToStack, Operation::StackOffset + Operation::RegisterSize * (Is), Operation::RegisterSize>(), ...); + }(std::make_index_sequence()); + + mcg.template LoadStackAddress(); + } + + template + static constexpr void GenerateCodeForPrepareForKernelProcedureToSvcInvocation(MetaCodeGenerator &mcg) { + static_assert(Operation::Kind == OperationKind::Scatter); + + [&mcg](std::index_sequence) { + (mcg.template StoreToStack, Operation::StackOffset + Operation::RegisterSize * (Is), Operation::RegisterSize>(), ...); + }(std::make_index_sequence()); + + mcg.template LoadStackAddress(); + } + + template + static constexpr void GenerateCodeForKernelProcedureToSvcInvocation(MetaCodeGenerator &mcg) { + static_assert(Operation::Kind == OperationKind::Scatter); + + [&mcg](std::index_sequence) { + (mcg.template LoadFromStack, Operation::StackOffset + Operation::RegisterSize * (Is), Operation::RegisterSize>(), ...); + }(std::make_index_sequence()); + } + }; + }; + + template + class LayoutConversion { + public: + using SvcAbiType = _SvcAbiType; + using UserAbiType = _UserAbiType; + using KernelAbiType = _KernelAbiType; + + static constexpr auto LayoutForUser = ProcedureLayout::Create(); + static constexpr auto LayoutForSvc = SvcInvocationLayout::Create(LayoutForUser); + static constexpr auto LayoutForKernel = ProcedureLayout::Create(); + private: + template + static constexpr size_t DetermineUsedStackIndices() { + [[maybe_unused]] constexpr auto Procedure = LayoutForKernel; + [[maybe_unused]] constexpr ParameterLayout Svc = Input ? LayoutForSvc.GetInputLayout() : LayoutForSvc.GetOutputLayout(); + + if constexpr (ParameterIndex >= Svc.GetNumParameters()) { + /* Base case: we're done. */ + return Used; + } else { + /* We're processing more parameters. */ + constexpr Parameter SvcParam = Svc.GetParameter(ParameterIndex); + constexpr Parameter ProcedureParam = Procedure.GetParameter(SvcParam.GetIdentifier()); + + if constexpr (SvcParam.IsPassedByPointer() == ProcedureParam.IsPassedByPointer()) { + /* We're not scattering, so stack won't be used. */ + return DetermineUsedStackIndices(); + } else { + /* We're scattering, and so we're using stack. */ + static_assert(ProcedureParam.GetNumLocations() == 1); + + constexpr size_t IndicesPerRegister = KernelAbiType::RegisterSize / SvcAbiType::RegisterSize; + static_assert(IndicesPerRegister > 0); + + constexpr size_t RequiredCount = util::AlignUp(SvcParam.GetNumLocations(), IndicesPerRegister) / IndicesPerRegister; + + return DetermineUsedStackIndices(); + } + } + } + + static constexpr size_t AbiUsedStackIndices = [] { + constexpr auto KernLayout = LayoutForKernel.GetInputLayout(); + + size_t used = 0; + for (size_t i = 0; i < KernLayout.GetNumParameters(); i++) { + const auto Param = KernLayout.GetParameter(i); + for (size_t j = 0; j < Param.GetNumLocations(); j++) { + const auto Loc = Param.GetLocation(j); + if (Loc.GetStorage() == Storage::Stack) { + used = std::max(used, Loc.GetIndex() + 1); + } + } + } + + return used; + }(); + + static constexpr size_t BeforeUsedStackIndices = DetermineUsedStackIndices(); + static constexpr size_t AfterUsedStackIndices = DetermineUsedStackIndices(); + + template + static constexpr auto ZipMoveOperations() { + constexpr auto Procedure = LayoutForKernel; + constexpr ParameterLayout Svc = Input ? LayoutForSvc.GetInputLayout() : LayoutForSvc.GetOutputLayout(); + + static_assert(ParameterIndex < Svc.GetNumParameters()); + + constexpr Parameter SvcParam = Svc.GetParameter(ParameterIndex); + constexpr Parameter ProcedureParam = Procedure.GetParameter(SvcParam.GetIdentifier()); + + static_assert(SvcParam.IsPassedByPointer() == ProcedureParam.IsPassedByPointer()); + static_assert(SvcParam.GetNumLocations() == ProcedureParam.GetNumLocations()); + + if constexpr (LocationIndex >= SvcParam.GetNumLocations()) { + /* Base case: we're done. */ + return std::tuple<>{}; + } else { + constexpr Location SvcLoc = SvcParam.GetLocation(LocationIndex); + constexpr Location ProcedureLoc = ProcedureParam.GetLocation(LocationIndex); + + if constexpr (SvcLoc == ProcedureLoc) { + /* No need to emit an operation if we're not changing where we are. */ + return ZipMoveOperations(); + } else { + /* Svc location needs to be in a register. */ + static_assert(SvcLoc.GetStorage() == Storage::Register); + + constexpr size_t Size = KernelAbiType::RegisterSize; + + if constexpr (ProcedureLoc.GetStorage() == Storage::Register) { + using OperationType = LayoutConversionBase::OperationMove; + constexpr auto cur_op = std::make_tuple(OperationType{}); + return std::tuple_cat(cur_op, ZipMoveOperations()); + } else { + using OperationType = LayoutConversionBase::OperationLoadAndStore; + constexpr auto cur_op = std::make_tuple(OperationType{}); + return std::tuple_cat(cur_op, ZipMoveOperations()); + } + } + } + } + + template + static constexpr auto DetermineConversionOperations() { + [[maybe_unused]] constexpr auto Procedure = LayoutForKernel; + [[maybe_unused]] constexpr ParameterLayout Svc = Input ? LayoutForSvc.GetInputLayout() : LayoutForSvc.GetOutputLayout(); + [[maybe_unused]] constexpr std::array ParameterMap = [](SvcHolder){ + /* We want to iterate over the parameters in sorted order. */ + constexpr ParameterLayout CapturedSvc = UNWRAP_TEMPLATE_CONSTANT(SvcHolder); + std::array map{}; + const size_t num_parameters = CapturedSvc.GetNumParameters(); + for (size_t i = 0; i < num_parameters; i++) { + map[i] = i; + } + for (size_t i = 1; i < num_parameters; i++) { + for (size_t j = i; j > 0 && CapturedSvc.GetParameter(map[j-1]).GetLocation(0) > CapturedSvc.GetParameter(map[j]).GetLocation(0); j--) { + /* std::swap is not constexpr until c++20 :( */ + /* TODO: std::swap(map[j], map[j-1]); */ + const size_t tmp = map[j]; + map[j] = map[j-1]; + map[j-1] = tmp; + } + } + return map; + }(WRAP_TEMPLATE_CONSTANT(Svc)); + + if constexpr (ParameterIndex >= Svc.GetNumParameters()) { + /* Base case: we're done. */ + if constexpr (Input) { + static_assert(StackIndex == BeforeUsedStackIndices + AbiUsedStackIndices); + } else { + static_assert(StackIndex == AfterUsedStackIndices + BeforeUsedStackIndices + AbiUsedStackIndices); + } + return std::tuple<>{}; + } else { + /* We're processing more parameters. */ + constexpr Parameter SvcParam = Svc.GetParameter(ParameterMap[ParameterIndex]); + constexpr Parameter ProcedureParam = Procedure.GetParameter(SvcParam.GetIdentifier()); + + if constexpr (SvcParam.IsPassedByPointer() == ProcedureParam.IsPassedByPointer()) { + if constexpr (SvcParam.GetNumLocations() == ProcedureParam.GetNumLocations()) { + /* Normal moves and loads/stores. */ + return std::tuple_cat(ZipMoveOperations(), DetermineConversionOperations()); + } else { + /* We're packing. */ + /* Make sure we're handling the 2 -> 1 case. */ + static_assert(SvcParam.GetNumLocations() == 2); + static_assert(ProcedureParam.GetNumLocations() == 1); + + constexpr Location ProcedureLoc = ProcedureParam.GetLocation(0); + constexpr Location SvcLoc0 = SvcParam.GetLocation(0); + constexpr Location SvcLoc1 = SvcParam.GetLocation(1); + static_assert(ProcedureLoc.GetStorage() == Storage::Register); + static_assert(SvcLoc0.GetStorage() == Storage::Register); + static_assert(SvcLoc1.GetStorage() == Storage::Register); + + constexpr size_t Size = KernelAbiType::RegisterSize; + + using OperationType = LayoutConversionBase::OperationPackAndUnpack; + + constexpr auto cur_op = std::make_tuple(OperationType{}); + + return std::tuple_cat(cur_op, DetermineConversionOperations()); + } + } else { + /* One operation, since we're scattering. */ + static_assert(ProcedureParam.GetNumLocations() == 1); + constexpr Location ProcedureLoc = ProcedureParam.GetLocation(0); + + constexpr size_t IndicesPerRegister = KernelAbiType::RegisterSize / SvcAbiType::RegisterSize; + static_assert(IndicesPerRegister > 0); + + constexpr size_t RequiredCount = util::AlignUp(SvcParam.GetNumLocations(), IndicesPerRegister) / IndicesPerRegister; + + if constexpr (ProcedureLoc.GetStorage() == Storage::Register) { + /* Scattering. In register during kernel call. */ + constexpr size_t RegisterSize = SvcAbiType::RegisterSize; + constexpr size_t PassedSize = ProcedureParam.GetTypeSize(); + + /* TODO: C++20 templated lambdas. For now, use GCC extension syntax. */ + constexpr auto SvcIndexSequence = [](SvcParamWrapper, std::index_sequence) { + constexpr Parameter CapturedSvcParam = UNWRAP_TEMPLATE_CONSTANT(SvcParamWrapper); + return std::index_sequence{}; + }(WRAP_TEMPLATE_CONSTANT(SvcParam), std::make_index_sequence()); + + constexpr auto OperationValue = [](ProcedureLocWrapper, std::index_sequence) { + constexpr Location CapturedProcedureLoc = UNWRAP_TEMPLATE_CONSTANT(ProcedureLocWrapper); + return LayoutConversionBase::OperationScatter{}; + }(WRAP_TEMPLATE_CONSTANT(ProcedureLoc), SvcIndexSequence); + + constexpr auto cur_op = std::make_tuple(OperationValue); + + return std::tuple_cat(cur_op, DetermineConversionOperations()); + } else { + /* TODO: How should on-stack-during-kernel-call be handled? */ + static_assert(ProcedureLoc.GetStorage() == Storage::Register); + } + } + } + } + + static constexpr size_t PreserveRegisterStartIndex = SvcAbiType::ArgumentRegisterCount; + static constexpr size_t PreserveRegisterEndIndex = std::min(KernelAbiType::ArgumentRegisterCount, SvcAbiType::RegisterCount); + static constexpr size_t ClearRegisterStartIndex = 0; + static constexpr size_t ClearRegisterEndIndex = std::min(KernelAbiType::ArgumentRegisterCount, SvcAbiType::RegisterCount); + + template + static constexpr bool ShouldPreserveRegister = (PreserveRegisterStartIndex <= Index && Index < PreserveRegisterEndIndex) && + LayoutForSvc.GetInputLayout().IsRegisterFree(Index) && LayoutForSvc.GetOutputLayout().IsRegisterFree(Index); + + template + static constexpr bool ShouldClearRegister = (ClearRegisterStartIndex <= Index && Index < ClearRegisterEndIndex) && + LayoutForSvc.GetOutputLayout().IsRegisterFree(Index) && !ShouldPreserveRegister; + + template + static constexpr auto DeterminePreserveRegisters() { + static_assert(PreserveRegisterStartIndex <= Index && Index <= PreserveRegisterEndIndex); + + if constexpr (Index >= PreserveRegisterEndIndex) { + /* Base case: we're done. */ + return std::index_sequence<>{}; + } else { + if constexpr (ShouldPreserveRegister) { + /* Preserve this register. */ + return IndexSequenceCat(std::index_sequence{}, DeterminePreserveRegisters()); + } else { + /* We don't need to preserve register, so we can skip onwards. */ + return IndexSequenceCat(std::index_sequence<>{}, DeterminePreserveRegisters()); + } + } + } + + template + static constexpr auto DetermineClearRegisters() { + static_assert(ClearRegisterStartIndex <= Index && Index <= ClearRegisterEndIndex); + + if constexpr (Index >= ClearRegisterEndIndex) { + /* Base case: we're done. */ + return std::index_sequence<>{}; + } else { + if constexpr (ShouldClearRegister) { + /* Clear this register. */ + return IndexSequenceCat(std::index_sequence{}, DetermineClearRegisters()); + } else { + /* We don't need to preserve register, so we can skip onwards. */ + return IndexSequenceCat(std::index_sequence<>{}, DetermineClearRegisters()); + } + } + } + public: + static constexpr size_t NonAbiUsedStackIndices = AfterUsedStackIndices + BeforeUsedStackIndices; + using BeforeOperations = decltype(DetermineConversionOperations()); + using AfterOperations = decltype(DetermineConversionOperations()); + + static constexpr size_t NumBeforeOperations = std::tuple_size::value; + static constexpr size_t NumAfterOperations = std::tuple_size::value; + + using PreserveRegisters = decltype(DeterminePreserveRegisters()); + using ClearRegisters = decltype(DetermineClearRegisters()); + + static constexpr size_t NumPreserveRegisters = PreserveRegisters::size(); + static constexpr size_t NumClearRegisters = ClearRegisters::size(); + + static constexpr auto PreserveRegistersArray = ConvertToArray(PreserveRegisters{}); + static constexpr auto ClearRegistersArray = ConvertToArray(ClearRegisters{}); + public: + template + static constexpr bool CanGenerateCode(RegisterAllocator allocator) { + if constexpr (CodeGenKind == CodeGenerationKind::SvcInvocationToKernelProcedure) { + return Operation::ImplType::template CanGenerateCodeForSvcInvocationToKernelProcedure(allocator); + } else { + static_assert(CodeGenKind != CodeGenKind, "Invalid CodeGenerationKind"); + } + } + + template + static constexpr void GenerateCode(MetaCodeGenerator &mcg, RegisterAllocator &allocator) { + if constexpr (CodeGenKind == CodeGenerationKind::SvcInvocationToKernelProcedure) { + Operation::ImplType::template GenerateCodeForSvcInvocationToKernelProcedure(mcg, allocator); + } else if constexpr (CodeGenKind == CodeGenerationKind::PrepareForKernelProcedureToSvcInvocation) { + Operation::ImplType::template GenerateCodeForPrepareForKernelProcedureToSvcInvocation(mcg); + } else if constexpr (CodeGenKind == CodeGenerationKind::KernelProcedureToSvcInvocation) { + Operation::ImplType::template GenerateCodeForKernelProcedureToSvcInvocation(mcg); + } else { + static_assert(CodeGenKind != CodeGenKind, "Invalid CodeGenerationKind"); + } + } + }; + + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_meta_code.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_meta_code.hpp new file mode 100644 index 000000000..682c29237 --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_meta_code.hpp @@ -0,0 +1,236 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "svc_codegen_impl_common.hpp" + +namespace ams::svc::codegen::impl { + + class MetaCode { + public: + static constexpr size_t MaxOperations = 0x40; + + enum class OperationKind { + SaveRegisters, + RestoreRegisters, + ClearRegisters, + AllocateStackSpace, + FreeStackSpace, + MoveRegister, + LoadFromStack, + LoadPairFromStack, + StoreToStack, + StorePairToStack, + Pack, + Unpack, + LoadStackAddress, + }; + + static constexpr const char *GetOperationKindName(OperationKind k) { + #define META_CODE_OPERATION_KIND_ENUM_CASE(s) case OperationKind::s: return #s + switch (k) { + META_CODE_OPERATION_KIND_ENUM_CASE(SaveRegisters); + META_CODE_OPERATION_KIND_ENUM_CASE(RestoreRegisters); + META_CODE_OPERATION_KIND_ENUM_CASE(ClearRegisters); + META_CODE_OPERATION_KIND_ENUM_CASE(AllocateStackSpace); + META_CODE_OPERATION_KIND_ENUM_CASE(FreeStackSpace); + META_CODE_OPERATION_KIND_ENUM_CASE(MoveRegister); + META_CODE_OPERATION_KIND_ENUM_CASE(LoadFromStack); + META_CODE_OPERATION_KIND_ENUM_CASE(LoadPairFromStack); + META_CODE_OPERATION_KIND_ENUM_CASE(StoreToStack); + META_CODE_OPERATION_KIND_ENUM_CASE(StorePairToStack); + META_CODE_OPERATION_KIND_ENUM_CASE(Pack); + META_CODE_OPERATION_KIND_ENUM_CASE(Unpack); + META_CODE_OPERATION_KIND_ENUM_CASE(LoadStackAddress); + default: + std::abort(); + } + #undef META_CODE_OPERATION_KIND_ENUM_CASE + } + + struct Operation { + OperationKind kind; + size_t num_parameters; + size_t parameters[16]; + }; + + template + static constexpr inline Operation MakeOperation() { + Operation op = {}; + static_assert(sizeof...(Is) <= sizeof(op.parameters) / sizeof(op.parameters[0])); + + op.kind = Kind; + op.num_parameters = sizeof...(Is); + + size_t i = 0; + ((op.parameters[i++] = Is), ...); + + return op; + } + private: + size_t num_operations; + std::array operations; + public: + constexpr explicit MetaCode() : num_operations(0), operations() { /* ... */ } + + constexpr size_t GetNumOperations() const { + return this->num_operations; + } + + constexpr Operation GetOperation(size_t i) const { + return this->operations[i]; + } + + constexpr void AddOperation(Operation op) { + this->operations[this->num_operations++] = op; + } + }; + + template + static constexpr auto GetOperationParameterSequence() { + constexpr auto _Operation = UNWRAP_TEMPLATE_CONSTANT(_OperationHolder); + constexpr size_t NumParameters = _Operation.num_parameters; + + return [](OperationHolder, std::index_sequence) { + constexpr auto Operation = UNWRAP_TEMPLATE_CONSTANT(OperationHolder); + return std::index_sequence{}; + }(_OperationHolder{}, std::make_index_sequence()); + } + + template + static ALWAYS_INLINE void GenerateCodeForOperationImpl(std::index_sequence) { + #define META_CODE_OPERATION_KIND_GENERATE_CODE(KIND) else if constexpr (Kind == MetaCode::OperationKind::KIND) { CodeGenerator::template KIND(); } + if constexpr (false) { /* ... */ } + META_CODE_OPERATION_KIND_GENERATE_CODE(SaveRegisters) + META_CODE_OPERATION_KIND_GENERATE_CODE(RestoreRegisters) + META_CODE_OPERATION_KIND_GENERATE_CODE(ClearRegisters) + META_CODE_OPERATION_KIND_GENERATE_CODE(AllocateStackSpace) + META_CODE_OPERATION_KIND_GENERATE_CODE(FreeStackSpace) + META_CODE_OPERATION_KIND_GENERATE_CODE(MoveRegister) + META_CODE_OPERATION_KIND_GENERATE_CODE(LoadFromStack) + META_CODE_OPERATION_KIND_GENERATE_CODE(LoadPairFromStack) + META_CODE_OPERATION_KIND_GENERATE_CODE(StoreToStack) + META_CODE_OPERATION_KIND_GENERATE_CODE(StorePairToStack) + META_CODE_OPERATION_KIND_GENERATE_CODE(Pack) + META_CODE_OPERATION_KIND_GENERATE_CODE(Unpack) + META_CODE_OPERATION_KIND_GENERATE_CODE(LoadStackAddress) + else { static_assert(Kind != Kind, "Unknown MetaOperationKind"); } + #undef META_CODE_OPERATION_KIND_GENERATE_CODE + } + + template + static ALWAYS_INLINE void GenerateCodeForOperation(OperationHolder) { + constexpr auto Operation = UNWRAP_TEMPLATE_CONSTANT(OperationHolder); + GenerateCodeForOperationImpl(GetOperationParameterSequence()); + } + + class MetaCodeGenerator { + private: + using OperationKind = typename MetaCode::OperationKind; + private: + MetaCode meta_code; + public: + constexpr explicit MetaCodeGenerator() : meta_code() { /* ... */ } + + constexpr MetaCode GetMetaCode() const { + return this->meta_code; + } + + constexpr void AddOperationDirectly(MetaCode::Operation op) { + this->meta_code.AddOperation(op); + } + + template + constexpr void SaveRegisters() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void RestoreRegisters() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void ClearRegisters() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void AllocateStackSpace() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void FreeStackSpace() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void MoveRegister() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void LoadFromStack() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void LoadPairFromStack() { + static_assert(Offset % Size == 0); + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void StoreToStack() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void StorePairToStack() { + static_assert(Offset % Size == 0); + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void Pack() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void Unpack() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + + template + constexpr void LoadStackAddress() { + constexpr auto op = MetaCode::MakeOperation(); + this->meta_code.AddOperation(op); + } + }; + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_parameter.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_parameter.hpp new file mode 100644 index 000000000..c97bcb3f1 --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_parameter.hpp @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "svc_codegen_impl_common.hpp" + +namespace ams::svc::codegen::impl { + + enum class Storage { + Register, + Stack, + Invalid, + }; + + class Location { + private: + static constexpr size_t InvalidIndex = std::numeric_limits::max(); + private: + Storage storage; + size_t index; + public: + constexpr explicit Location() : storage(Storage::Invalid), index(InvalidIndex) { /* ... */ } + constexpr explicit Location(Storage s, size_t i) : storage(s), index(i) { /* ... */ } + + constexpr size_t GetIndex() const { return this->index; } + constexpr Storage GetStorage() const { return this->storage; } + + constexpr bool IsValid() const { + return this->index != InvalidIndex && this->storage != Storage::Invalid; + } + + constexpr bool operator==(const Location &rhs) const { + return this->index == rhs.index && this->storage == rhs.storage; + } + + constexpr bool operator<(const Location &rhs) const { + if (this->storage < rhs.storage) { + return true; + } else if (this->storage > rhs.storage) { + return false; + } else { + return this->index < rhs.index; + } + } + + constexpr bool operator>(const Location &rhs) const { + if (this->storage > rhs.storage) { + return true; + } else if (this->storage < rhs.storage) { + return false; + } else { + return this->index > rhs.index; + } + } + + constexpr bool operator!=(const Location &rhs) const { + return !(*this == rhs); + } + }; + + class Parameter { + public: + static constexpr size_t MaxLocations = 8; + static constexpr size_t IdentifierLengthMax = 0x40; + class Identifier { + private: + char name[IdentifierLengthMax]; + size_t index; + public: + constexpr explicit Identifier() : name(), index() { /* ... */ } + constexpr explicit Identifier(const char *nm, size_t idx = 0) : name(), index(idx) { + for (size_t i = 0; i < IdentifierLengthMax && nm[i]; i++) { + this->name[i] = nm[i]; + } + } + + constexpr bool operator==(const Identifier &rhs) const { + for (size_t i = 0; i < IdentifierLengthMax; i++) { + if (this->name[i] != rhs.name[i]) { + return false; + } + } + return this->index == rhs.index; + } + + constexpr bool operator!=(const Identifier &rhs) const { + return !(*this == rhs); + } + }; + private: + Identifier identifier; + ArgumentType type; + size_t type_size; + size_t passed_size; + bool passed_by_pointer; + size_t num_locations; + Location locations[MaxLocations]; + public: + constexpr explicit Parameter() + : identifier(), type(ArgumentType::Invalid), type_size(0), passed_size(0), passed_by_pointer(0), num_locations(0), locations() + { /* ... */ } + + constexpr explicit Parameter(Identifier id, ArgumentType t, size_t ts, size_t ps, bool p, Location l) + : identifier(id), type(t), type_size(ts), passed_size(ps), passed_by_pointer(p), num_locations(1), locations() + { + this->locations[0] = l; + } + + constexpr Identifier GetIdentifier() const { + return this->identifier; + } + + constexpr bool Is(Identifier rhs) const { + return this->identifier == rhs; + } + + constexpr ArgumentType GetArgumentType() const { + return this->type; + } + + constexpr size_t GetTypeSize() const { + return this->type_size; + } + + constexpr size_t GetPassedSize() const { + return this->passed_size; + } + + constexpr bool IsPassedByPointer() const { + return this->passed_by_pointer; + } + + constexpr size_t GetNumLocations() const { + return this->num_locations; + } + + constexpr Location GetLocation(size_t i) const { + return this->locations[i]; + } + + constexpr void AddLocation(Location l) { + this->locations[this->num_locations++] = l; + } + + constexpr bool UsesLocation(Location l) const { + for (size_t i = 0; i < this->num_locations; i++) { + if (this->locations[i] == l) { + return true; + } + } + return false; + } + + constexpr bool operator==(const Parameter &rhs) const { + if (!(this->identifier == rhs.identifier && + this->type == rhs.type && + this->type_size == rhs.type_size && + this->passed_size == rhs.passed_size && + this->passed_by_pointer == rhs.passed_by_pointer && + this->num_locations == rhs.num_locations)) + { + return false; + } + + for (size_t i = 0; i < this->num_locations; i++) { + if (!(this->locations[i] == rhs.locations[i])) { + return false; + } + } + + return true; + } + + constexpr bool operator!=(const Parameter &rhs) const { + return !(*this == rhs); + } + }; + + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_kernel_svc_wrapper.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_kernel_svc_wrapper.hpp new file mode 100644 index 000000000..a992442d3 --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_kernel_svc_wrapper.hpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include "svc_codegen_impl_kernel_svc_wrapper.hpp" + +namespace ams::svc::codegen { + +#if defined(ATMOSPHERE_ARCH_ARM64) || defined(ATMOSPHERE_ARCH_ARM) + + template + class KernelSvcWrapper { + private: + /* TODO: using Aarch32 = */ + using Aarch64 = impl::KernelSvcWrapperHelper; + using Aarch64From32 = impl::KernelSvcWrapperHelper; + public: +/* Set omit-frame-pointer to prevent GCC from emitting MOV X29, SP instructions. */ +#pragma GCC push_options +#pragma GCC optimize ("omit-frame-pointer") + + static ALWAYS_INLINE void Call64() { + Aarch64::WrapSvcFunction(); + } + + static ALWAYS_INLINE void Call64From32() { + Aarch64From32::WrapSvcFunction(); + } + +#pragma GCC pop_options + }; + +#else + + #error "Unknown architecture for Kernel SVC Code Generation" + +#endif + +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/svc/svc_codegen.hpp b/libraries/libvapours/include/vapours/svc/svc_codegen.hpp new file mode 100644 index 000000000..59e7c1b1a --- /dev/null +++ b/libraries/libvapours/include/vapours/svc/svc_codegen.hpp @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once + +/* NOTE: This header must not be included by svc.hpp. */ +#include "svc_common.hpp" +#include "svc_types.hpp" +#include "svc_definitions.hpp" + +#include "codegen/svc_codegen_kernel_svc_wrapper.hpp" diff --git a/libraries/libvapours/include/vapours/svc/svc_definitions.hpp b/libraries/libvapours/include/vapours/svc/svc_definitions.hpp index ea0062e83..fa305f536 100644 --- a/libraries/libvapours/include/vapours/svc/svc_definitions.hpp +++ b/libraries/libvapours/include/vapours/svc/svc_definitions.hpp @@ -20,13 +20,13 @@ #define AMS_SVC_KERN_INPUT_HANDLER(TYPE, NAME) TYPE NAME #define AMS_SVC_KERN_OUTPUT_HANDLER(TYPE, NAME) TYPE *NAME -#define AMS_SVC_KERN_INPTR_HANDLER(TYPE, NAME) ::ams::kern::KUserPointer NAME -#define AMS_SVC_KERN_OUTPTR_HANDLER(TYPE, NAME) ::ams::kern::KUserPointer NAME +#define AMS_SVC_KERN_INPTR_HANDLER(TYPE, NAME) ::ams::kern::svc::KUserPointer NAME +#define AMS_SVC_KERN_OUTPTR_HANDLER(TYPE, NAME) ::ams::kern::svc::KUserPointer NAME #define AMS_SVC_USER_INPUT_HANDLER(TYPE, NAME) TYPE NAME #define AMS_SVC_USER_OUTPUT_HANDLER(TYPE, NAME) TYPE *NAME -#define AMS_SVC_USER_INPTR_HANDLER(TYPE, NAME) const TYPE *NAME -#define AMS_SVC_USER_OUTPTR_HANDLER(TYPE, NAME) TYPE *NAME +#define AMS_SVC_USER_INPTR_HANDLER(TYPE, NAME) ::ams::svc::UserPointer NAME +#define AMS_SVC_USER_OUTPTR_HANDLER(TYPE, NAME) ::ams::svc::UserPointer NAME #define AMS_SVC_FOREACH_DEFINITION_IMPL(HANDLER, NAMESPACE, INPUT, OUTPUT, INPTR, OUTPTR) \ HANDLER(0x01, Result, SetHeapSize, OUTPUT(::ams::svc::Address, out_address), INPUT(::ams::svc::Size, size)) \ @@ -181,5 +181,49 @@ namespace ams::svc { } +/* NOTE: Change this to 1 to test the SVC definitions for user-pointer validity. */ +#if 0 +namespace ams::svc::test { + + namespace impl { + + template + struct Validator { + private: + std::array valid; + public: + constexpr Validator(Ts... args) : valid{static_cast(args)...} { /* ... */ } + + constexpr bool IsValid() const { + for (size_t i = 0; i < sizeof...(Ts); i++) { + if (!this->valid[i]) { + return false; + } + } + return true; + } + }; + + } + + + #define AMS_SVC_TEST_EMPTY_HANDLER(TYPE, NAME) true + #define AMS_SVC_TEST_INPTR_HANDLER(TYPE, NAME) (sizeof(::ams::svc::UserPointer) == sizeof(uintptr_t) && std::is_trivially_destructible<::ams::svc::UserPointer>::value) + #define AMS_SVC_TEST_OUTPTR_HANDLER(TYPE, NAME) (sizeof(::ams::svc::UserPointer) == sizeof(uintptr_t) && std::is_trivially_destructible<::ams::svc::UserPointer>::value) + + #define AMS_SVC_TEST_VERIFY_USER_POINTERS(ID, RETURN_TYPE, NAME, ...) \ + static_assert(impl::Validator(__VA_ARGS__).IsValid(), "Invalid User Pointer in svc::" #NAME); + + AMS_SVC_FOREACH_DEFINITION_IMPL(AMS_SVC_TEST_VERIFY_USER_POINTERS, lp64, AMS_SVC_TEST_EMPTY_HANDLER, AMS_SVC_TEST_EMPTY_HANDLER, AMS_SVC_TEST_INPTR_HANDLER, AMS_SVC_TEST_OUTPTR_HANDLER); + AMS_SVC_FOREACH_DEFINITION_IMPL(AMS_SVC_TEST_VERIFY_USER_POINTERS, ilp32, AMS_SVC_TEST_EMPTY_HANDLER, AMS_SVC_TEST_EMPTY_HANDLER, AMS_SVC_TEST_INPTR_HANDLER, AMS_SVC_TEST_OUTPTR_HANDLER); + + #undef AMS_SVC_TEST_VERIFY_USER_POINTERS + #undef AMS_SVC_TEST_INPTR_HANDLER + #undef AMS_SVC_TEST_OUTPTR_HANDLER + #undef AMS_SVC_TEST_EMPTY_HANDLER + +} #endif +#endif /* ATMOSPHERE_IS_STRATOSPHERE */ + diff --git a/libraries/libvapours/include/vapours/svc/svc_types_common.hpp b/libraries/libvapours/include/vapours/svc/svc_types_common.hpp index 72f5b13df..34dfc3b3d 100644 --- a/libraries/libvapours/include/vapours/svc/svc_types_common.hpp +++ b/libraries/libvapours/include/vapours/svc/svc_types_common.hpp @@ -17,6 +17,12 @@ #pragma once #include "svc_common.hpp" +namespace ams::kern::svc::impl { + + struct KUserPointerTag{}; + +} + namespace ams::svc { /* Utility classes required to encode information into the type system for SVC veneers. */ @@ -40,6 +46,24 @@ namespace ams::svc { static_assert(sizeof(Address) == sizeof(uintptr_t)); static_assert(std::is_trivially_destructible
::value); + namespace impl { + + struct UserPointerTag{}; + + } + + template + struct UserPointer : impl::UserPointerTag { + public: + static_assert(std::is_pointer::value); + static constexpr bool IsInput = std::is_const::type>::value; + private: + T pointer; + }; + + template + static constexpr inline bool IsUserPointer = std::is_base_of::value; + using PhysicalAddress = u64; /* Memory types. */ From 3982afdd6b169b5f8a3cba7647e38567d5a03d51 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 23 Jan 2020 18:13:32 -0800 Subject: [PATCH 02/97] meso: skeleton buildsystem for kernel --- libraries/config/templates/mesosphere.mk | 3 +- mesosphere/Makefile | 20 +- mesosphere/kernel/Makefile | 116 ++++++++++ mesosphere/kernel/kernel.ld | 205 ++++++++++++++++++ mesosphere/kernel/kernel.specs | 7 + mesosphere/kernel/source/arch/arm64/start.s | 25 +++ .../source/{ => arch/arm64}/exceptions.s | 0 .../{ => arch/arm64}/kern_init_loader_asm.s | 0 .../source/{ => arch/arm64}/start.s | 0 9 files changed, 368 insertions(+), 8 deletions(-) create mode 100644 mesosphere/kernel/Makefile create mode 100644 mesosphere/kernel/kernel.ld create mode 100644 mesosphere/kernel/kernel.specs create mode 100644 mesosphere/kernel/source/arch/arm64/start.s rename mesosphere/kernel_ldr/source/{ => arch/arm64}/exceptions.s (100%) rename mesosphere/kernel_ldr/source/{ => arch/arm64}/kern_init_loader_asm.s (100%) rename mesosphere/kernel_ldr/source/{ => arch/arm64}/start.s (100%) diff --git a/libraries/config/templates/mesosphere.mk b/libraries/config/templates/mesosphere.mk index 39301c66a..af0503858 100644 --- a/libraries/config/templates/mesosphere.mk +++ b/libraries/config/templates/mesosphere.mk @@ -12,7 +12,7 @@ export CFLAGS := $(ATMOSPHERE_CFLAGS) $(SETTINGS) $(DEFINES) $(INCLUDE) export CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) export ASFLAGS := $(ATMOSPHERE_ASFLAGS) $(SETTINGS) $(DEFINES) -export LDFLAGS = -specs=$(TOPDIR)/kernel_ldr.specs -nostdlib -nostartfiles -g $(SETTINGS) -Wl,-Map,$(notdir $*.map) +export LDFLAGS = -specs=$(TOPDIR)/$(notdir $(TOPDIR)).specs -nostdlib -nostartfiles -g $(SETTINGS) -Wl,-Map,$(notdir $*.map) export CXXWRAPS := -Wl,--wrap,__cxa_pure_virtual \ -Wl,--wrap,__cxa_throw \ @@ -25,7 +25,6 @@ export CXXWRAPS := -Wl,--wrap,__cxa_pure_virtual \ -Wl,--wrap,__cxa_call_terminate \ -Wl,--wrap,__gxx_personality_v0 \ -Wl,--wrap,_Unwind_Resume \ - -Wl,--wrap,_Unwind_Resume \ -Wl,--wrap,_ZSt19__throw_logic_errorPKc \ -Wl,--wrap,_ZSt20__throw_length_errorPKc \ -Wl,--wrap,_ZNSt11logic_errorC2EPKc diff --git a/mesosphere/Makefile b/mesosphere/Makefile index a0f8875d3..31b597a1c 100644 --- a/mesosphere/Makefile +++ b/mesosphere/Makefile @@ -1,12 +1,20 @@ -MODULES := kernel_ldr +TARGETS := kernel kernel_ldr +CLEAN_TARGETS := $(foreach target,$(TARGETS),$(target)-clean) SUBFOLDERS := $(MODULES) -TOPTARGETS := all clean +all: mesosphere.bin -$(TOPTARGETS): $(SUBFOLDERS) +clean: $(CLEAN_TARGETS) + @rm -f mesosphere.bin -$(SUBFOLDERS): - $(MAKE) -C $@ $(MAKECMDGOALS) +mesosphere.bin: $(TARGETS) + @echo "todo: py script" -.PHONY: $(TOPTARGETS) $(SUBFOLDERS) +$(TARGETS): + $(MAKE) -C $@ + +$(CLEAN_TARGETS): + $(MAKE) -C $(@:-clean=) clean + +.PHONY: all clean $(TARGETS) $(CLEAN_TARGETS) diff --git a/mesosphere/kernel/Makefile b/mesosphere/kernel/Makefile new file mode 100644 index 000000000..42e8c77ea --- /dev/null +++ b/mesosphere/kernel/Makefile @@ -0,0 +1,116 @@ +#--------------------------------------------------------------------------------- +# pull in common atmosphere configuration +#--------------------------------------------------------------------------------- +include $(dir $(abspath $(lastword $(MAKEFILE_LIST))))/../../libraries/config/templates/mesosphere.mk + +#--------------------------------------------------------------------------------- +# no real need to edit anything past this point unless you need to add additional +# rules for different file extensions +#--------------------------------------------------------------------------------- +ifneq ($(BUILD),$(notdir $(CURDIR))) +#--------------------------------------------------------------------------------- + +export OUTPUT := $(CURDIR)/$(TARGET) +export TOPDIR := $(CURDIR) +export DEPSDIR := $(CURDIR)/$(BUILD) + +export VPATH := $(foreach dir,$(SOURCES),$(CURDIR)/$(dir)) \ + $(foreach dir,$(DATA),$(CURDIR)/$(dir)) + +CFILES := $(foreach dir,$(SOURCES),$(filter-out $(notdir $(wildcard $(dir)/*.arch.*.c)) $(notdir $(wildcard $(dir)/*.board.*.c)) $(notdir $(wildcard $(dir)/*.os.*.c)), \ + $(notdir $(wildcard $(dir)/*.c)))) +CFILES += $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.arch.$(ATMOSPHERE_ARCH_NAME).c))) +CFILES += $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.board.$(ATMOSPHERE_BOARD_NAME).c))) +CFILES += $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.os.$(ATMOSPHERE_OS_NAME).c))) + +CPPFILES := $(foreach dir,$(SOURCES),$(filter-out $(notdir $(wildcard $(dir)/*.arch.*.cpp)) $(notdir $(wildcard $(dir)/*.board.*.cpp)) $(notdir $(wildcard $(dir)/*.os.*.cpp)), \ + $(notdir $(wildcard $(dir)/*.cpp)))) +CPPFILES += $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.arch.$(ATMOSPHERE_ARCH_NAME).cpp))) +CPPFILES += $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.board.$(ATMOSPHERE_BOARD_NAME).cpp))) +CPPFILES += $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.os.$(ATMOSPHERE_OS_NAME).cpp))) + +SFILES := $(foreach dir,$(SOURCES),$(filter-out $(notdir $(wildcard $(dir)/*.arch.*.s)) $(notdir $(wildcard $(dir)/*.board.*.s)) $(notdir $(wildcard $(dir)/*.os.*.s)), \ + $(notdir $(wildcard $(dir)/*.s)))) +SFILES += $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.arch.$(ATMOSPHERE_ARCH_NAME).s))) +SFILES += $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.board.$(ATMOSPHERE_BOARD_NAME).s))) +SFILES += $(foreach dir,$(SOURCES),$(notdir $(wildcard $(dir)/*.os.$(ATMOSPHERE_OS_NAME).s))) + +#--------------------------------------------------------------------------------- +# use CXX for linking C++ projects, CC for standard C +#--------------------------------------------------------------------------------- +ifeq ($(strip $(CPPFILES)),) +#--------------------------------------------------------------------------------- + export LD := $(CC) +#--------------------------------------------------------------------------------- +else +#--------------------------------------------------------------------------------- + export LD := $(CXX) +#--------------------------------------------------------------------------------- +endif +#--------------------------------------------------------------------------------- + +export OFILES_BIN := $(addsuffix .o,$(BINFILES)) +export OFILES_SRC := $(CPPFILES:.cpp=.o) $(CFILES:.c=.o) $(SFILES:.s=.o) +export OFILES := $(OFILES_BIN) $(OFILES_SRC) +export HFILES_BIN := $(addsuffix .h,$(subst .,_,$(subst -,_,$(BINFILES)))) + +export INCLUDE := $(foreach dir,$(INCLUDES),-I$(CURDIR)/$(dir)) \ + $(foreach dir,$(LIBDIRS),-I$(dir)/include) \ + -I. + +export LIBPATHS := $(foreach dir,$(LIBDIRS),-L$(dir)/lib) + +.PHONY: $(BUILD) clean all + +#--------------------------------------------------------------------------------- +all: $(BUILD) check_libmeso + +$(BUILD): check_libmeso + @[ -d $@ ] || mkdir -p $@ + @$(MAKE) --no-print-directory -C $(BUILD) -f $(CURDIR)/Makefile + +check_libmeso: + @$(MAKE) --no-print-directory -C ../../libraries/libmesosphere + +#--------------------------------------------------------------------------------- +clean: + @echo clean ... + @rm -fr $(BUILD) $(OUTPUT).bin $(OUTPUT).elf + +#--------------------------------------------------------------------------------- +else +.PHONY: all + +DEPENDS := $(OFILES:.o=.d) + +#--------------------------------------------------------------------------------- +# main targets +#--------------------------------------------------------------------------------- +all : $(OUTPUT).bin + +$(OUTPUT).bin : $(OUTPUT).elf + $(OBJCOPY) -S -O binary --set-section-flags .bss=alloc,load,contents $< $@ + @echo built ... $(notdir $@) + +$(OUTPUT).elf : $(OFILES) ../../../libraries/libmesosphere/lib/libmesosphere.a + +%.elf: + @echo linking $(notdir $@) + $(LD) $(LDFLAGS) $(OFILES) $(LIBPATHS) $(LIBS) -o $@ + @$(NM) -CSn $@ > $(notdir $*.lst) + +$(OFILES_SRC) : $(HFILES_BIN) + +#--------------------------------------------------------------------------------- +# you need a rule like this for each extension you use as binary data +#--------------------------------------------------------------------------------- +%.bin.o %_bin.h: %.bin +#--------------------------------------------------------------------------------- + @echo $(notdir $<) + @$(bin2o) + +-include $(DEPENDS) + +#--------------------------------------------------------------------------------------- +endif +#--------------------------------------------------------------------------------------- diff --git a/mesosphere/kernel/kernel.ld b/mesosphere/kernel/kernel.ld new file mode 100644 index 000000000..1a49877b5 --- /dev/null +++ b/mesosphere/kernel/kernel.ld @@ -0,0 +1,205 @@ +OUTPUT_ARCH(aarch64) +ENTRY(_start) + +PHDRS +{ + code PT_LOAD FLAGS(5) /* Read | Execute */; + rodata PT_LOAD FLAGS(4) /* Read */; + data PT_LOAD FLAGS(6) /* Read | Write */; + dyn PT_DYNAMIC; +} + +SECTIONS +{ + /* =========== CODE section =========== */ + PROVIDE(__start__ = 0x0); + . = __start__; + __code_start = . ; + + .crt0 : + { + KEEP (*(.crt0)) + . = ALIGN(8); + } :code + + .init : + { + KEEP( *(.init) ) + . = ALIGN(8); + } :code + + .plt : + { + *(.plt) + *(.iplt) + . = ALIGN(8); + } :code + + .text : + { + *(.text.unlikely .text.*_unlikely .text.unlikely.*) + *(.text.exit .text.exit.*) + *(.text.startup .text.startup.*) + *(.text.hot .text.hot.*) + *(.text .stub .text.* .gnu.linkonce.t.*) + . = ALIGN(8); + } :code + + .fini : + { + KEEP( *(.fini) ) + . = ALIGN(8); + } :code + + + /* .vectors. */ + . = ALIGN(2K); + __vectors_start__ = . ; + .vectors : + { + KEEP( *(.vectors) ) + . = ALIGN(8); + } :code + + /* =========== RODATA section =========== */ + . = ALIGN(0x1000); + __rodata_start = . ; + + .rodata : + { + *(.rodata .rodata.* .gnu.linkonce.r.*) + . = ALIGN(8); + } :rodata + + .eh_frame_hdr : { __eh_frame_hdr_start = .; *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) __eh_frame_hdr_end = .; } :rodata + .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) } :rodata + .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table .gcc_except_table.*) } :rodata + .gnu_extab : ONLY_IF_RO { *(.gnu_extab*) } : rodata + + .dynamic : { *(.dynamic) } :rodata :dyn + .dynsym : { *(.dynsym) } :rodata + .dynstr : { *(.dynstr) } :rodata + .rela.dyn : { *(.rela.*) } :rodata + .interp : { *(.interp) } :rodata + .hash : { *(.hash) } :rodata + .gnu.hash : { *(.gnu.hash) } :rodata + .gnu.version : { *(.gnu.version) } :rodata + .gnu.version_d : { *(.gnu.version_d) } :rodata + .gnu.version_r : { *(.gnu.version_r) } :rodata + .note.gnu.build-id : { *(.note.gnu.build-id) } :rodata + + /* =========== DATA section =========== */ + . = ALIGN(0x1000); + __data_start = . ; + + .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) *(.eh_frame.*) } :data + .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } :data + .gnu_extab : ONLY_IF_RW { *(.gnu_extab*) } : data + .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } :data + + .preinit_array ALIGN(8) : + { + PROVIDE (__preinit_array_start = .); + KEEP (*(.preinit_array)) + PROVIDE (__preinit_array_end = .); + } :data + + .init_array ALIGN(8) : + { + PROVIDE (__init_array_start = .); + KEEP (*(SORT(.init_array.*))) + KEEP (*(.init_array)) + PROVIDE (__init_array_end = .); + } :data + + .fini_array ALIGN(8) : + { + PROVIDE (__fini_array_start = .); + KEEP (*(.fini_array)) + KEEP (*(SORT(.fini_array.*))) + PROVIDE (__fini_array_end = .); + } :data + + .ctors ALIGN(8) : + { + KEEP (*crtbegin.o(.ctors)) /* MUST be first -- GCC requires it */ + KEEP (*(EXCLUDE_FILE (*crtend.o) .ctors)) + KEEP (*(SORT(.ctors.*))) + KEEP (*(.ctors)) + } :data + + .dtors ALIGN(8) : + { + KEEP (*crtbegin.o(.dtors)) + KEEP (*(EXCLUDE_FILE (*crtend.o) .dtors)) + KEEP (*(SORT(.dtors.*))) + KEEP (*(.dtors)) + } :data + + __got_start__ = .; + + .got : { *(.got) *(.igot) } :data + .got.plt : { *(.got.plt) *(.igot.plt) } :data + + __got_end__ = .; + + .data ALIGN(8) : + { + *(.data .data.* .gnu.linkonce.d.*) + SORT(CONSTRUCTORS) + } :data + + __bss_start__ = .; + .bss ALIGN(8) : + { + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + *(COMMON) + . = ALIGN(8); + } : data + __bss_end__ = .; + + . = ALIGN(0x1000); + + __end__ = ABSOLUTE(.); + + /* ================== + ==== Metadata ==== + ================== */ + + /* Discard sections that difficult post-processing */ + /DISCARD/ : { *(.group .comment .note) } + + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } +} diff --git a/mesosphere/kernel/kernel.specs b/mesosphere/kernel/kernel.specs new file mode 100644 index 000000000..c83d201a5 --- /dev/null +++ b/mesosphere/kernel/kernel.specs @@ -0,0 +1,7 @@ +%rename link old_link + +*link: +%(old_link) -T %:getenv(TOPDIR /kernel.ld) -pie --gc-sections -z text -z nodynamic-undefined-weak -nostdlib -nostartfiles + +*startfile: +crti%O%s crtbegin%O%s diff --git a/mesosphere/kernel/source/arch/arm64/start.s b/mesosphere/kernel/source/arch/arm64/start.s new file mode 100644 index 000000000..c1a234119 --- /dev/null +++ b/mesosphere/kernel/source/arch/arm64/start.s @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/* For some reason GAS doesn't know about it, even with .cpu cortex-a57 */ +#define cpuactlr_el1 s3_1_c15_c2_0 +#define cpuectlr_el1 s3_1_c15_c2_1 + +.section .crt0.text.start, "ax", %progbits +.global _start +_start: + /* TODO */ + b _start \ No newline at end of file diff --git a/mesosphere/kernel_ldr/source/exceptions.s b/mesosphere/kernel_ldr/source/arch/arm64/exceptions.s similarity index 100% rename from mesosphere/kernel_ldr/source/exceptions.s rename to mesosphere/kernel_ldr/source/arch/arm64/exceptions.s diff --git a/mesosphere/kernel_ldr/source/kern_init_loader_asm.s b/mesosphere/kernel_ldr/source/arch/arm64/kern_init_loader_asm.s similarity index 100% rename from mesosphere/kernel_ldr/source/kern_init_loader_asm.s rename to mesosphere/kernel_ldr/source/arch/arm64/kern_init_loader_asm.s diff --git a/mesosphere/kernel_ldr/source/start.s b/mesosphere/kernel_ldr/source/arch/arm64/start.s similarity index 100% rename from mesosphere/kernel_ldr/source/start.s rename to mesosphere/kernel_ldr/source/arch/arm64/start.s From bce71331284f028efd1bf99ec9dd9e58fe16cdcd Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 23 Jan 2020 19:00:42 -0800 Subject: [PATCH 03/97] meso: add script to build mesosphere.bin --- .../mesosphere/init/kern_init_layout.hpp | 2 +- .../{ => arch/arm64}/svc/kern_svc_tables.cpp | 4 +- mesosphere/Makefile | 3 +- mesosphere/build_mesosphere.py | 45 +++++++++++++++++++ mesosphere/kernel/source/arch/arm64/start.s | 25 ++++++++++- .../kernel_ldr/source/kern_init_loader.cpp | 6 ++- 6 files changed, 79 insertions(+), 6 deletions(-) rename libraries/libmesosphere/source/{ => arch/arm64}/svc/kern_svc_tables.cpp (96%) create mode 100644 mesosphere/build_mesosphere.py diff --git a/libraries/libmesosphere/include/mesosphere/init/kern_init_layout.hpp b/libraries/libmesosphere/include/mesosphere/init/kern_init_layout.hpp index 904defc41..620ef85fe 100644 --- a/libraries/libmesosphere/include/mesosphere/init/kern_init_layout.hpp +++ b/libraries/libmesosphere/include/mesosphere/init/kern_init_layout.hpp @@ -27,7 +27,7 @@ namespace ams::kern::init { u32 rw_end_offset; u32 bss_offset; u32 bss_end_offset; - u32 ini_end_offset; + u32 ini_load_offset; u32 dynamic_offset; u32 init_array_offset; u32 init_array_end_offset; diff --git a/libraries/libmesosphere/source/svc/kern_svc_tables.cpp b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_tables.cpp similarity index 96% rename from libraries/libmesosphere/source/svc/kern_svc_tables.cpp rename to libraries/libmesosphere/source/arch/arm64/svc/kern_svc_tables.cpp index a2fd6a28d..bc5b264fa 100644 --- a/libraries/libmesosphere/source/svc/kern_svc_tables.cpp +++ b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_tables.cpp @@ -16,6 +16,8 @@ #include #include +/* TODO: Enable compilation of this file when the kernel supports supervisor calls. */ +#if 0 namespace ams::kern::svc { namespace { @@ -41,7 +43,6 @@ namespace ams::kern::svc { } - /* TODO: 32-bit ABI */ const std::array SvcTable64From32 = [] { std::array table = {}; @@ -65,3 +66,4 @@ namespace ams::kern::svc { }(); } +#endif \ No newline at end of file diff --git a/mesosphere/Makefile b/mesosphere/Makefile index 31b597a1c..f511441d1 100644 --- a/mesosphere/Makefile +++ b/mesosphere/Makefile @@ -9,7 +9,8 @@ clean: $(CLEAN_TARGETS) @rm -f mesosphere.bin mesosphere.bin: $(TARGETS) - @echo "todo: py script" + @python build_mesosphere.py + @echo "Built mesosphere.bin..." $(TARGETS): $(MAKE) -C $@ diff --git a/mesosphere/build_mesosphere.py b/mesosphere/build_mesosphere.py new file mode 100644 index 000000000..d7e4087da --- /dev/null +++ b/mesosphere/build_mesosphere.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +import sys, os +from struct import pack as pk, unpack as up + +def align_up(val, algn): + val += algn - 1 + return val - (val % algn) + + +def main(argc, argv): + if argc != 1: + print('Usage: %s' % argv[0]) + return 1 + with open('kernel_ldr/kernel_ldr.bin', 'rb') as f: + kernel_ldr = f.read() + with open('kernel/kernel.bin', 'rb') as f: + kernel = f.read() + kernel_metadata_offset = up('= len(kernel)) + + embedded_ini = b'' + embedded_ini_offset = align_up(kernel_end, 0x1000) + 0x1000 + embedded_ini_end = embedded_ini_offset + 0 # TODO: Create and embed an INI, eventually. + + kernel_ldr_offset = align_up(embedded_ini_end, 0x1000) + 0x1000 + kernel_ldr_end = kernel_ldr_offset + len(kernel_ldr) + + with open('mesosphere.bin', 'wb') as f: + f.write(kernel[:kernel_metadata_offset + 8]) + f.write(pk('bss_offset; - const uintptr_t ini_end_offset = layout->ini_end_offset; + const uintptr_t ini_load_offset = layout->ini_load_offset; const uintptr_t dynamic_offset = layout->dynamic_offset; const uintptr_t init_array_offset = layout->init_array_offset; const uintptr_t init_array_end_offset = layout->init_array_end_offset; @@ -277,9 +277,11 @@ namespace ams::kern::init::loader { /* Decide if Kernel should have enlarged resource region. */ const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit(); const size_t resource_region_size = KernelResourceRegionSize + (use_extra_resources ? ExtraKernelResourceSize : 0); + static_assert(KernelResourceRegionSize > InitialProcessBinarySizeMax); + static_assert(KernelResourceRegionSize + ExtraKernelResourceSize > InitialProcessBinarySizeMax); /* Setup the INI1 header in memory for the kernel. */ - const uintptr_t ini_end_address = base_address + ini_end_offset + resource_region_size; + const uintptr_t ini_end_address = base_address + ini_load_offset + resource_region_size; const uintptr_t ini_load_address = ini_end_address - InitialProcessBinarySizeMax; if (ini_base_address != ini_load_address) { /* The INI is not at the correct address, so we need to relocate it. */ From 24d41ce55e8a3de4afb1f9e6a1610196f50908e6 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 23 Jan 2020 21:12:36 -0800 Subject: [PATCH 04/97] kern: push code through call to kernelldr --- mesosphere/build_mesosphere.py | 10 +- mesosphere/kernel/source/arch/arm64/start.s | 315 +++++++++++++++++++- 2 files changed, 311 insertions(+), 14 deletions(-) diff --git a/mesosphere/build_mesosphere.py b/mesosphere/build_mesosphere.py index d7e4087da..e5fc15d58 100644 --- a/mesosphere/build_mesosphere.py +++ b/mesosphere/build_mesosphere.py @@ -15,10 +15,10 @@ def main(argc, argv): kernel_ldr = f.read() with open('kernel/kernel.bin', 'rb') as f: kernel = f.read() - kernel_metadata_offset = up('= len(kernel)) embedded_ini = b'' @@ -29,9 +29,9 @@ def main(argc, argv): kernel_ldr_end = kernel_ldr_offset + len(kernel_ldr) with open('mesosphere.bin', 'wb') as f: - f.write(kernel[:kernel_metadata_offset + 8]) - f.write(pk('> 0x00) & 0xFFFF); \ + movk reg, #(((val) >> 0x10) & 0xFFFF), lsl#16 + +#define LOAD_IMMEDIATE_64(reg, val) \ + mov reg, #(((val) >> 0x00) & 0xFFFF); \ + movk reg, #(((val) >> 0x10) & 0xFFFF), lsl#16; \ + movk reg, #(((val) >> 0x20) & 0xFFFF), lsl#32; \ + movk reg, #(((val) >> 0x30) & 0xFFFF), lsl#48 + +#define LOAD_FROM_LABEL(reg, label) \ + adr reg, label; \ + ldr reg, [reg] + .section .crt0.text.start, "ax", %progbits .global _start _start: - /* TODO */ - b _start - .word (__metadata_begin - _start) - + b _ZN3ams4kern4init10StartCore0Emm __metadata_begin: .ascii "MSS0" /* Magic */ - .word 0 /* KInitArguments */ - .word 0 /* INI1 base address. */ - .word 0 /* Kernel Loader base address. */ +__metadata_ini_offset: + .quad 0 /* INI1 base address. */ +__metadata_kernelldr_offset: + .quad 0 /* Kernel Loader base address. */ __metadata_kernel_layout: .word _start - _start /* rx_offset */ .word __rodata_start - _start /* rx_end_offset */ @@ -43,6 +55,291 @@ __metadata_kernel_layout: .word _DYNAMIC - _start /* dynamic_offset */ .word __init_array_start - _start /* init_array_offset */ .word __init_array_end - _start /* init_array_end_offset */ -.if (. - __metadata_begin) != 0x40 +.if (. - __metadata_begin) != 0x44 .error "Incorrect Mesosphere Metadata" -.endif \ No newline at end of file +.endif + +/* ams::kern::init::StartCore0(uintptr_t, uintptr_t) */ +.section .crt0.text._ZN3ams4kern4init10StartCore0Emm, "ax", %progbits +.global _ZN3ams4kern4init10StartCore0Emm +.type _ZN3ams4kern4init10StartCore0Emm, %function +_ZN3ams4kern4init10StartCore0Emm: + /* Mask all interrupts. */ + msr daifset, #0xF + + /* Save arguments for later use. */ + mov x19, x0 + mov x20, x1 + + /* Check our current EL. We want to be executing out of EL1. */ + /* If we're in EL2, we'll need to deprivilege ourselves. */ + mrs x1, currentel + cmp x1, #0x4 + b.eq core0_el1 + cmp x1, #0x8 + b.eq core0_el2 +core0_el3: + b core0_el3 +core0_el2: + bl _ZN3ams4kern4init16JumpFromEL2ToEL1Ev +core0_el1: + bl _ZN3ams4kern4init19DisableMmuAndCachesEv + + /* We want to invoke kernel loader. */ + adr x0, _start + adr x1, __metadata_kernel_layout + LOAD_FROM_LABEL(x2, __metadata_ini_offset) + add x2, x0, x2 + LOAD_FROM_LABEL(x3, __metadata_kernelldr_offset) + add x3, x0, x3 + blr x3 + + /* TODO: Finish post-kernelldr init code. */ +1: + b 1b + + +/* ams::kern::init::JumpFromEL2ToEL1() */ +.section .crt0.text._ZN3ams4kern4init16JumpFromEL2ToEL1Ev, "ax", %progbits +.global _ZN3ams4kern4init16JumpFromEL2ToEL1Ev +.type _ZN3ams4kern4init16JumpFromEL2ToEL1Ev, %function +_ZN3ams4kern4init16JumpFromEL2ToEL1Ev: + /* We're going to want to ERET to our caller. */ + msr elr_el2, x30 + + /* Ensure that the cache is coherent. */ + bl _ZN3ams4kern5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv + dsb sy + + bl _ZN3ams4kern5arm643cpu38FlushEntireDataCacheSharedWithoutStackEv + dsb sy + + bl _ZN3ams4kern5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv + dsb sy + + /* Invalidate the entire TLB, and ensure instruction consistency. */ + tlbi vmalle1is + dsb sy + isb + + /* Setup system registers for deprivileging. */ + /* ACTLR_EL2: */ + /* - CPUACTLR access control = 1 */ + /* - CPUECTLR access control = 1 */ + /* - L2CTLR access control = 1 */ + /* - L2ECTLR access control = 1 */ + /* - L2ACTLR access control = 1 */ + mov x0, #0x73 + msr actlr_el2, x0 + + /* HCR_EL2: */ + /* - RW = 1 (el1 is aarch64) */ + mov x0, #0x80000000 + msr hcr_el2, x0 + + /* SCTLR_EL1: */ + /* - EOS = 1 */ + /* - EIS = 1 */ + /* - SPAN = 1 */ + LOAD_IMMEDIATE_32(x0, 0x00C00800) + msr sctlr_el1, x0 + + /* DACR32_EL2: */ + /* - Manager access for all D */ + mov x0, #0xFFFFFFFF + msr dacr32_el2, x0 + + /* SPSR_EL2: */ + /* - EL1h */ + /* - IRQ masked */ + /* - FIQ masked */ + mov x0, #0xC5 + msr spsr_el2, x0 + + eret + +/* ams::kern::init::DisableMmuAndCaches() */ +.section .crt0.text._ZN3ams4kern4init19DisableMmuAndCachesEv, "ax", %progbits +.global _ZN3ams4kern4init19DisableMmuAndCachesEv +.type _ZN3ams4kern4init19DisableMmuAndCachesEv, %function +_ZN3ams4kern4init19DisableMmuAndCachesEv: + /* The stack isn't set up, so we'll need to trash a register. */ + mov x22, x30 + + /* Ensure that the cache is coherent. */ + bl _ZN3ams4kern5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv + dsb sy + + bl _ZN3ams4kern5arm643cpu38FlushEntireDataCacheSharedWithoutStackEv + dsb sy + + bl _ZN3ams4kern5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv + dsb sy + + /* Invalidate the entire TLB, and ensure instruction consistency. */ + tlbi vmalle1is + dsb sy + isb + + /* Invalidate the instruction cache, and ensure instruction consistency. */ + ic ialluis + dsb sy + isb + + /* Set SCTLR_EL1 to disable the caches and mmu. */ + /* SCTLR_EL1: */ + /* - M = 0 */ + /* - C = 0 */ + /* - I = 0 */ + mrs x0, sctlr_el1 + LOAD_IMMEDIATE_64(x1, ~0x1005) + and x0, x0, x1 + msr sctlr_el1, x0 + + mov x30, x22 + ret + +/* ams::kern::arm64::cpu::FlushEntireDataCacheLocalWithoutStack() */ +.section .crt0.text._ZN3ams4kern5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv, "ax", %progbits +.global _ZN3ams4kern5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv +.type _ZN3ams4kern5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv, %function +_ZN3ams4kern5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv: + /* The stack isn't set up, so we'll need to trash a register. */ + mov x23, x30 + + /* CacheLineIdAccessor clidr_el1; */ + mrs x10, clidr_el1 + /* const int levels_of_unification = clidr_el1.GetLevelsOfUnification(); */ + ubfx x10, x10, #0x15, 3 + + /* int level = levels_of_unification - 1 */ + sub w9, w10, #1 + + /* while (level >= 0) { */ +begin_flush_cache_local_loop: + cmn w9, #1 + b.eq done_flush_cache_local_loop + + /* FlushEntireDataCacheImplWithoutStack(level); */ + mov w0, w9 + bl _ZN3ams4kern5arm643cpu36FlushEntireDataCacheImplWithoutStackEv + + /* level--; */ + sub w9, w9, #1 + + /* } */ + b begin_flush_cache_local_loop + +done_flush_cache_local_loop: + mov x30, x23 + ret + +/* ams::kern::arm64::cpu::FlushEntireDataCacheSharedWithoutStack() */ +.section .crt0.text._ZN3ams4kern5arm643cpu38FlushEntireDataCacheSharedWithoutStackEv, "ax", %progbits +.global _ZN3ams4kern5arm643cpu38FlushEntireDataCacheSharedWithoutStackEv +.type _ZN3ams4kern5arm643cpu38FlushEntireDataCacheSharedWithoutStackEv, %function +_ZN3ams4kern5arm643cpu38FlushEntireDataCacheSharedWithoutStackEv: + /* The stack isn't set up, so we'll need to trash a register. */ + mov x23, x30 + + /* CacheLineIdAccessor clidr_el1; */ + mrs x10, clidr_el1 + /* const int levels_of_coherency = clidr_el1.GetLevelsOfCoherency(); */ + ubfx x9, x10, #0x18, 3 + /* const int levels_of_unification = clidr_el1.GetLevelsOfUnification(); */ + ubfx x10, x10, #0x15, 3 + + /* int level = levels_of_coherency */ + + /* while (level >= levels_of_unification) { */ +begin_flush_cache_shared_loop: + cmp w10, w9 + b.gt done_flush_cache_shared_loop + + /* FlushEntireDataCacheImplWithoutStack(level); */ + mov w0, w9 + bl _ZN3ams4kern5arm643cpu36FlushEntireDataCacheImplWithoutStackEv + + /* level--; */ + sub w9, w9, #1 + + /* } */ + b begin_flush_cache_shared_loop + +done_flush_cache_shared_loop: + mov x30, x23 + ret + +/* ams::kern::arm64::cpu::FlushEntireDataCacheImplWithoutStack() */ +.section .crt0.text._ZN3ams4kern5arm643cpu36FlushEntireDataCacheImplWithoutStackEv, "ax", %progbits +.global _ZN3ams4kern5arm643cpu36FlushEntireDataCacheImplWithoutStackEv +.type _ZN3ams4kern5arm643cpu36FlushEntireDataCacheImplWithoutStackEv, %function +_ZN3ams4kern5arm643cpu36FlushEntireDataCacheImplWithoutStackEv: + /* const u64 level_sel_value = static_cast(level << 1); */ + lsl w6, w0, #1 + sxtw x6, w6 + + /* cpu::SetCsselrEl1(level_sel_value); */ + msr csselr_el1, x6 + + /* cpu::InstructionMemoryBarrier(); */ + isb + + /* CacheSizeIdAccessor ccsidr_el1; */ + mrs x3, ccsidr_el1 + + /* const int num_ways = ccsidr_el1.GetAssociativity(); */ + ubfx x7, x3, #3, #0xA + mov w8, w7 + + /* const int line_size = ccsidr_el1.GetLineSize(); */ + and x4, x3, #7 + + /* const u64 way_shift = static_cast(__builtin_clz(num_ways)); */ + clz w7, w7 + + /* const u64 set_shift = static_cast(line_size + 4); */ + add w4, w4, #4 + + /* const int num_sets = ccsidr_el1.GetNumberOfSets(); */ + ubfx w3, w3, #0xD, #0xF + + /* int way = 0; */ + mov x5, #0 + + /* while (way <= num_ways) { */ +begin_flush_cache_impl_way_loop: + cmp w8, w5 + b.lt done_flush_cache_impl_way_loop + + /* int set = 0; */ + mov x0, #0 + + /* while (set <= num_sets) { */ +begin_flush_cache_impl_set_loop: + cmp w3, w0 + b.lt done_flush_cache_impl_set_loop + + /* const u64 cisw_value = (static_cast(way) << way_shift) | (static_cast(set) << set_shift) | level_sel_value; */ + lsl x2, x5, x7 + lsl x1, x0, x4 + orr x1, x1, x2 + orr x1, x1, x6 + + /* __asm__ __volatile__("dc cisw, %0" :: "r"(cisw_value) : "memory"); */ + dc cisw, x1 + + /* set++; */ + add x0, x0, #1 + + /* } */ + b begin_flush_cache_impl_set_loop +done_flush_cache_impl_set_loop: + + /* way++; */ + add x5, x5, 1 + + /* } */ + b begin_flush_cache_impl_way_loop +done_flush_cache_impl_way_loop: + ret From 308ddecc9c4f5267454fc6b095bb508df2795129 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 23 Jan 2020 21:57:40 -0800 Subject: [PATCH 05/97] kern: fix bss end align (now gets through kernelldr on hardware) --- mesosphere/kernel/kernel.ld | 3 ++- mesosphere/kernel/source/arch/arm64/start.s | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/mesosphere/kernel/kernel.ld b/mesosphere/kernel/kernel.ld index 1a49877b5..765c33ef4 100644 --- a/mesosphere/kernel/kernel.ld +++ b/mesosphere/kernel/kernel.ld @@ -157,10 +157,11 @@ SECTIONS *(COMMON) . = ALIGN(8); } : data - __bss_end__ = .; . = ALIGN(0x1000); + __bss_end__ = .; + __end__ = ABSOLUTE(.); /* ================== diff --git a/mesosphere/kernel/source/arch/arm64/start.s b/mesosphere/kernel/source/arch/arm64/start.s index 71118be28..dcac66891 100644 --- a/mesosphere/kernel/source/arch/arm64/start.s +++ b/mesosphere/kernel/source/arch/arm64/start.s @@ -220,7 +220,7 @@ begin_flush_cache_local_loop: cmn w9, #1 b.eq done_flush_cache_local_loop - /* FlushEntireDataCacheImplWithoutStack(level); */ + /* FlushEntireDataCacheImplWithoutStack(level); */ mov w0, w9 bl _ZN3ams4kern5arm643cpu36FlushEntireDataCacheImplWithoutStackEv @@ -256,7 +256,7 @@ begin_flush_cache_shared_loop: cmp w10, w9 b.gt done_flush_cache_shared_loop - /* FlushEntireDataCacheImplWithoutStack(level); */ + /* FlushEntireDataCacheImplWithoutStack(level); */ mov w0, w9 bl _ZN3ams4kern5arm643cpu36FlushEntireDataCacheImplWithoutStackEv From 866771fdaed1a67c17ca86fcac77ef97c8830ceb Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 23 Jan 2020 22:02:15 -0800 Subject: [PATCH 06/97] kern: reuse data cache code during init --- mesosphere/kernel/source/arch/arm64/start.s | 66 +++++++++++---------- 1 file changed, 34 insertions(+), 32 deletions(-) diff --git a/mesosphere/kernel/source/arch/arm64/start.s b/mesosphere/kernel/source/arch/arm64/start.s index dcac66891..c1a22ce98 100644 --- a/mesosphere/kernel/source/arch/arm64/start.s +++ b/mesosphere/kernel/source/arch/arm64/start.s @@ -107,20 +107,8 @@ _ZN3ams4kern4init16JumpFromEL2ToEL1Ev: /* We're going to want to ERET to our caller. */ msr elr_el2, x30 - /* Ensure that the cache is coherent. */ - bl _ZN3ams4kern5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv - dsb sy - - bl _ZN3ams4kern5arm643cpu38FlushEntireDataCacheSharedWithoutStackEv - dsb sy - - bl _ZN3ams4kern5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv - dsb sy - - /* Invalidate the entire TLB, and ensure instruction consistency. */ - tlbi vmalle1is - dsb sy - isb + /* Flush the entire data cache and invalidate the entire TLB. */ + bl _ZN3ams4kern5arm643cpu32FlushEntireDataCacheWithoutStackEv /* Setup system registers for deprivileging. */ /* ACTLR_EL2: */ @@ -166,20 +154,8 @@ _ZN3ams4kern4init19DisableMmuAndCachesEv: /* The stack isn't set up, so we'll need to trash a register. */ mov x22, x30 - /* Ensure that the cache is coherent. */ - bl _ZN3ams4kern5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv - dsb sy - - bl _ZN3ams4kern5arm643cpu38FlushEntireDataCacheSharedWithoutStackEv - dsb sy - - bl _ZN3ams4kern5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv - dsb sy - - /* Invalidate the entire TLB, and ensure instruction consistency. */ - tlbi vmalle1is - dsb sy - isb + /* Flush the entire data cache and invalidate the entire TLB. */ + bl _ZN3ams4kern5arm643cpu32FlushEntireDataCacheWithoutStackEv /* Invalidate the instruction cache, and ensure instruction consistency. */ ic ialluis @@ -199,13 +175,39 @@ _ZN3ams4kern4init19DisableMmuAndCachesEv: mov x30, x22 ret +/* ams::kern::arm64::cpu::FlushEntireDataCacheWithoutStack() */ +.section .crt0.text._ZN3ams4kern5arm643cpu32FlushEntireDataCacheWithoutStackEv, "ax", %progbits +.global _ZN3ams4kern5arm643cpu32FlushEntireDataCacheWithoutStackEv +.type _ZN3ams4kern5arm643cpu32FlushEntireDataCacheWithoutStackEv, %function +_ZN3ams4kern5arm643cpu32FlushEntireDataCacheWithoutStackEv: + /* The stack isn't set up, so we'll need to trash a register. */ + mov x23, x30 + + /* Ensure that the cache is coherent. */ + bl _ZN3ams4kern5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv + dsb sy + + bl _ZN3ams4kern5arm643cpu38FlushEntireDataCacheSharedWithoutStackEv + dsb sy + + bl _ZN3ams4kern5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv + dsb sy + + /* Invalidate the entire TLB, and ensure instruction consistency. */ + tlbi vmalle1is + dsb sy + isb + + mov x30, x23 + ret + /* ams::kern::arm64::cpu::FlushEntireDataCacheLocalWithoutStack() */ .section .crt0.text._ZN3ams4kern5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv, "ax", %progbits .global _ZN3ams4kern5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv .type _ZN3ams4kern5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv, %function _ZN3ams4kern5arm643cpu37FlushEntireDataCacheLocalWithoutStackEv: /* The stack isn't set up, so we'll need to trash a register. */ - mov x23, x30 + mov x24, x30 /* CacheLineIdAccessor clidr_el1; */ mrs x10, clidr_el1 @@ -231,7 +233,7 @@ begin_flush_cache_local_loop: b begin_flush_cache_local_loop done_flush_cache_local_loop: - mov x30, x23 + mov x30, x24 ret /* ams::kern::arm64::cpu::FlushEntireDataCacheSharedWithoutStack() */ @@ -240,7 +242,7 @@ done_flush_cache_local_loop: .type _ZN3ams4kern5arm643cpu38FlushEntireDataCacheSharedWithoutStackEv, %function _ZN3ams4kern5arm643cpu38FlushEntireDataCacheSharedWithoutStackEv: /* The stack isn't set up, so we'll need to trash a register. */ - mov x23, x30 + mov x24, x30 /* CacheLineIdAccessor clidr_el1; */ mrs x10, clidr_el1 @@ -267,7 +269,7 @@ begin_flush_cache_shared_loop: b begin_flush_cache_shared_loop done_flush_cache_shared_loop: - mov x30, x23 + mov x30, x24 ret /* ams::kern::arm64::cpu::FlushEntireDataCacheImplWithoutStack() */ From b3bfd6c4c99118510ee06a164ed3cc0228fa90b8 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 23 Jan 2020 22:37:23 -0800 Subject: [PATCH 07/97] meso: discard .interp section --- mesosphere/kernel/kernel.ld | 3 +-- mesosphere/kernel_ldr/kernel_ldr.ld | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/mesosphere/kernel/kernel.ld b/mesosphere/kernel/kernel.ld index 765c33ef4..08648015b 100644 --- a/mesosphere/kernel/kernel.ld +++ b/mesosphere/kernel/kernel.ld @@ -80,7 +80,6 @@ SECTIONS .dynsym : { *(.dynsym) } :rodata .dynstr : { *(.dynstr) } :rodata .rela.dyn : { *(.rela.*) } :rodata - .interp : { *(.interp) } :rodata .hash : { *(.hash) } :rodata .gnu.hash : { *(.gnu.hash) } :rodata .gnu.version : { *(.gnu.version) } :rodata @@ -169,7 +168,7 @@ SECTIONS ================== */ /* Discard sections that difficult post-processing */ - /DISCARD/ : { *(.group .comment .note) } + /DISCARD/ : { *(.group .comment .note .interp) } /* Stabs debugging sections. */ .stab 0 : { *(.stab) } diff --git a/mesosphere/kernel_ldr/kernel_ldr.ld b/mesosphere/kernel_ldr/kernel_ldr.ld index 74d739cbd..c56886133 100644 --- a/mesosphere/kernel_ldr/kernel_ldr.ld +++ b/mesosphere/kernel_ldr/kernel_ldr.ld @@ -77,7 +77,6 @@ SECTIONS .dynsym : { *(.dynsym) } :krnlldr .dynstr : { *(.dynstr) } :krnlldr .rela.dyn : { *(.rela.*) } :krnlldr - .interp : { *(.interp) } :krnlldr .hash : { *(.hash) } :krnlldr .gnu.hash : { *(.gnu.hash) } :krnlldr .gnu.version : { *(.gnu.version) } :krnlldr @@ -167,7 +166,7 @@ SECTIONS ================== */ /* Discard sections that difficult post-processing */ - /DISCARD/ : { *(.group .comment .note) } + /DISCARD/ : { *(.group .comment .note .interp) } /* Stabs debugging sections. */ .stab 0 : { *(.stab) } From 32fb22e361d45e0a6cd8085d69bb01e8ffe33303 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Fri, 24 Jan 2020 00:47:43 -0800 Subject: [PATCH 08/97] kern: finish implementing assembly init routines --- libraries/config/common.mk | 8 +- .../libmesosphere/include/mesosphere.hpp | 1 + .../arch/arm64/init/kern_k_init_arguments.hpp | 34 ++++++ .../arm64/init/kern_k_init_page_table.hpp | 27 +++++ .../mesosphere/arch/arm64/kern_cpu.hpp | 13 ++ .../init/kern_init_arguments_select.hpp | 29 +++++ mesosphere/kernel/kernel.ld | 2 +- .../source/arch/arm64/init/kern_init_core.cpp | 46 +++++++ .../source/arch/arm64/{ => init}/start.s | 113 +++++++++++++++++- .../kernel_ldr/source/arch/arm64/start.s | 6 +- .../kernel_ldr/source/kern_init_loader.cpp | 31 +---- 11 files changed, 270 insertions(+), 40 deletions(-) create mode 100644 libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_arguments.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/init/kern_init_arguments_select.hpp create mode 100644 mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp rename mesosphere/kernel/source/arch/arm64/{ => init}/start.s (72%) diff --git a/libraries/config/common.mk b/libraries/config/common.mk index 937e10d2c..db2074599 100644 --- a/libraries/config/common.mk +++ b/libraries/config/common.mk @@ -76,14 +76,14 @@ TARGET := $(notdir $(CURDIR)) BUILD := build DATA := data INCLUDES := include -SOURCES ?= $(foreach d,$(filter-out source/arch source/board,$(wildcard source)),$(call DIR_WILDCARD,$d) $d) +SOURCES ?= source $(foreach d,$(filter-out source/arch source/board source,$(wildcard source)),$(call DIR_WILDCARD,$d) $d) ifneq ($(strip $(wildcard source/$(ATMOSPHERE_ARCH_DIR)/.*)),) -SOURCES += $(call DIR_WILDCARD,source/$(ATMOSPHERE_ARCH_DIR)) +SOURCES += source/$(ATMOSPHERE_ARCH_DIR) $(call DIR_WILDCARD,source/$(ATMOSPHERE_ARCH_DIR)) endif ifneq ($(strip $(wildcard source/$(ATMOSPHERE_BOARD_DIR)/.*)),) -SOURCES += $(call DIR_WILDCARD,source/$(ATMOSPHERE_BOARD_DIR)) +SOURCES += source/$(ATMOSPHERE_BOARD_DIR $(call DIR_WILDCARD,source/$(ATMOSPHERE_BOARD_DIR)) endif ifneq ($(strip $(wildcard source/$(ATMOSPHERE_OS_DIR)/.*)),) -SOURCES += $(call DIR_WILDCARD,source/$(ATMOSPHERE_OS_DIR)) +SOURCES += source/$(ATMOSPHERE_OS_DIR $(call DIR_WILDCARD,source/$(ATMOSPHERE_OS_DIR)) endif diff --git a/libraries/libmesosphere/include/mesosphere.hpp b/libraries/libmesosphere/include/mesosphere.hpp index 135b6b5a6..3371f426f 100644 --- a/libraries/libmesosphere/include/mesosphere.hpp +++ b/libraries/libmesosphere/include/mesosphere.hpp @@ -32,6 +32,7 @@ #include "mesosphere/init/kern_init_elf.hpp" #include "mesosphere/init/kern_init_layout.hpp" #include "mesosphere/init/kern_init_page_table_select.hpp" +#include "mesosphere/init/kern_init_arguments_select.hpp" /* Core functionality. */ #include "mesosphere/kern_select_interrupts.hpp" diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_arguments.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_arguments.hpp new file mode 100644 index 000000000..cdddbc135 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_arguments.hpp @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once + +namespace ams::kern::init { + + struct KInitArguments { + u64 ttbr0; + u64 ttbr1; + u64 tcr; + u64 mair; + u64 cpuactlr; + u64 cpuectlr; + u64 sctlr; + u64 sp; + u64 entrypoint; + u64 argument; + u64 setup_function; + }; + +} \ No newline at end of file diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp index 96d7296b5..58a0c153c 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp @@ -495,4 +495,31 @@ namespace ams::kern::init { }; + class KInitialPageAllocator : public KInitialPageTable::IPageAllocator { + private: + uintptr_t next_address; + public: + constexpr ALWAYS_INLINE KInitialPageAllocator() : next_address(Null) { /* ... */ } + + ALWAYS_INLINE void Initialize(uintptr_t address) { + this->next_address = address; + } + + ALWAYS_INLINE uintptr_t GetFinalState() { + const uintptr_t final_address = this->next_address; + this->next_address = Null; + return final_address; + } + public: + virtual KPhysicalAddress Allocate() override { + MESOSPHERE_ABORT_UNLESS(this->next_address != Null); + const uintptr_t allocated = this->next_address; + this->next_address += PageSize; + std::memset(reinterpret_cast(allocated), 0, PageSize); + return allocated; + } + + /* No need to override free. The default does nothing, and so would we. */ + }; + } diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp index 1e1610c95..86412c3a4 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp @@ -19,6 +19,19 @@ namespace ams::kern::arm64::cpu { +#if defined(ATMOSPHERE_CPU_ARM_CORTEX_A57) || defined(ATMOSPHERE_CPU_ARM_CORTEX_A53) + constexpr inline size_t InstructionCacheLineSize = 0x40; + constexpr inline size_t DataCacheLineSize = 0x40; +#else + #error "Unknown CPU for cache line sizes" +#endif + +#if defined(ATMOSPHERE_BOARD_NINTENDO_SWITCH) + static constexpr size_t NumCores = 4; +#else + #error "Unknown Board for cpu::NumCores" +#endif + /* Helpers for managing memory state. */ ALWAYS_INLINE void DataSynchronizationBarrier() { __asm__ __volatile__("dsb sy" ::: "memory"); diff --git a/libraries/libmesosphere/include/mesosphere/init/kern_init_arguments_select.hpp b/libraries/libmesosphere/include/mesosphere/init/kern_init_arguments_select.hpp new file mode 100644 index 000000000..6c124528d --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/init/kern_init_arguments_select.hpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +#ifdef ATMOSPHERE_ARCH_ARM64 + #include "../arch/arm64/init/kern_k_init_arguments.hpp" +#else + #error "Unknown architecture for KInitArguments" +#endif + +namespace ams::kern::init { + + KPhysicalAddress GetInitArgumentsAddress(s32 core_id); + +} diff --git a/mesosphere/kernel/kernel.ld b/mesosphere/kernel/kernel.ld index 08648015b..2d6497b27 100644 --- a/mesosphere/kernel/kernel.ld +++ b/mesosphere/kernel/kernel.ld @@ -18,7 +18,7 @@ SECTIONS .crt0 : { - KEEP (*(.crt0)) + KEEP (*(.crt0 .crt0.*)) . = ALIGN(8); } :code diff --git a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp new file mode 100644 index 000000000..9f8bdf691 --- /dev/null +++ b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2018-2019 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::init { + + namespace { + + /* Global Allocator. */ + KInitialPageAllocator g_initial_page_allocator; + + /* Global initial arguments array. */ + KInitArguments g_init_arguments[cpu::NumCores]; + + } + + void InitializeCore(uintptr_t arg0, uintptr_t initial_page_allocator_state) { + /* TODO */ + } + + KPhysicalAddress GetInitArgumentsAddress(s32 core) { + return KPhysicalAddress(std::addressof(g_init_arguments[core])); + } + + void InitializeDebugRegisters() { + /* TODO */ + } + + void InitializeExceptionVectors() { + /* TODO */ + } + +} \ No newline at end of file diff --git a/mesosphere/kernel/source/arch/arm64/start.s b/mesosphere/kernel/source/arch/arm64/init/start.s similarity index 72% rename from mesosphere/kernel/source/arch/arm64/start.s rename to mesosphere/kernel/source/arch/arm64/init/start.s index c1a22ce98..e769e7c14 100644 --- a/mesosphere/kernel/source/arch/arm64/start.s +++ b/mesosphere/kernel/source/arch/arm64/init/start.s @@ -94,9 +94,116 @@ core0_el1: add x3, x0, x3 blr x3 - /* TODO: Finish post-kernelldr init code. */ -1: - b 1b + /* At this point kernelldr has been invoked, and we are relocated at a random virtual address. */ + /* Next thing to do is to set up our memory management and slabheaps -- all the other core initialization. */ + /* Call ams::kern::init::InitializeCore(uintptr_t, uintptr_t) */ + mov x1, x0 /* Kernelldr returns a KInitialPageAllocator state for the kernel to re-use. */ + mov x0, xzr /* Official kernel always passes zero, when this is non-zero the address is mapped. */ + bl _ZN3ams4kern4init14InitializeCoreEmm + + /* Get the init arguments for core 0. */ + mov x0, xzr + bl _ZN3ams4kern4init23GetInitArgumentsAddressEi + + bl _ZN3ams4kern4init16InvokeEntrypointEPKNS1_14KInitArgumentsE + +/* ams::kern::init::StartOtherCore(const ams::kern::init::KInitArguments *) */ +.section .crt0.text._ZN3ams4kern4init14StartOtherCoreEPKNS1_14KInitArgumentsE, "ax", %progbits +.global _ZN3ams4kern4init14StartOtherCoreEPKNS1_14KInitArgumentsE +.type _ZN3ams4kern4init14StartOtherCoreEPKNS1_14KInitArgumentsE, %function +_ZN3ams4kern4init14StartOtherCoreEPKNS1_14KInitArgumentsE: + /* Preserve the KInitArguments pointer in a register. */ + mov x20, x0 + + /* Check our current EL. We want to be executing out of EL1. */ + /* If we're in EL2, we'll need to deprivilege ourselves. */ + mrs x1, currentel + cmp x1, #0x4 + b.eq othercore_el1 + cmp x1, #0x8 + b.eq othercore_el2 +othercore_el3: + b othercore_el3 +othercore_el2: + bl _ZN3ams4kern4init16JumpFromEL2ToEL1Ev +othercore_el1: + bl _ZN3ams4kern4init19DisableMmuAndCachesEv + + /* Setup system registers using values from our KInitArguments. */ + ldr x1, [x20, #0x00] + msr ttbr0_el1, x1 + ldr x1, [x20, #0x08] + msr ttbr1_el1, x1 + ldr x1, [x20, #0x10] + msr tcr_el1, x1 + ldr x1, [x20, #0x18] + msr mair_el1, x1 + + /* Perform cpu-specific setup. */ + mrs x1, midr_el1 + ubfx x2, x1, #0x18, #0x8 /* Extract implementer bits. */ + cmp x2, #0x41 /* Implementer::ArmLimited */ + b.ne othercore_cpu_specific_setup_end + ubfx x2, x1, #0x4, #0xC /* Extract primary part number. */ + cmp x2, #0xD07 /* PrimaryPartNumber::CortexA57 */ + b.eq othercore_cpu_specific_setup_cortex_a57 + cmp x2, #0xD03 /* PrimaryPartNumber::CortexA53 */ + b.eq othercore_cpu_specific_setup_cortex_a53 + b othercore_cpu_specific_setup_end +othercore_cpu_specific_setup_cortex_a57: +othercore_cpu_specific_setup_cortex_a53: + ldr x1, [x20, #0x20] + msr cpuactlr_el1, x1 + ldr x1, [x20, #0x28] + msr cpuectlr_el1, x1 + +othercore_cpu_specific_setup_end: + /* Ensure instruction consistency. */ + dsb sy + isb + + /* Set sctlr_el1 and ensure instruction consistency. */ + ldr x1, [x20, #0x30] + msr sctlr_el1, x1 + + dsb sy + isb + + /* Jump to the virtual address equivalent to ams::kern::init::InvokeEntrypoint */ + ldr x1, [x20, #0x50] + adr x2, _ZN3ams4kern4init14StartOtherCoreEPKNS1_14KInitArgumentsE + sub x1, x1, x2 + adr x2, _ZN3ams4kern4init16InvokeEntrypointEPKNS1_14KInitArgumentsE + add x1, x1, x2 + mov x0, x20 + br x1 + +/* ams::kern::init::InvokeEntrypoint(const ams::kern::init::KInitArguments *) */ +.section .crt0.text._ZN3ams4kern4init16InvokeEntrypointEPKNS1_14KInitArgumentsE, "ax", %progbits +.global _ZN3ams4kern4init16InvokeEntrypointEPKNS1_14KInitArgumentsE +.type _ZN3ams4kern4init16InvokeEntrypointEPKNS1_14KInitArgumentsE, %function +_ZN3ams4kern4init16InvokeEntrypointEPKNS1_14KInitArgumentsE: + /* Preserve the KInitArguments pointer in a register. */ + mov x20, x0 + + /* Clear CPACR_EL1. This will prevent classes of traps (SVE, etc). */ + msr cpacr_el1, xzr + isb + + /* Setup the stack pointer. */ + ldr x1, [x20, #0x38] + mov sp, x1 + + /* Ensure that system debug registers are setup. */ + bl _ZN3ams4kern4init24InitializeDebugRegistersEv + + /* Ensure that the exception vectors are setup. */ + bl _ZN3ams4kern4init26InitializeExceptionVectorsEv + + /* Jump to the entrypoint. */ + ldr x1, [x20, #0x40] + ldr x0, [x20, #0x48] + br x1 /* ams::kern::init::JumpFromEL2ToEL1() */ diff --git a/mesosphere/kernel_ldr/source/arch/arm64/start.s b/mesosphere/kernel_ldr/source/arch/arm64/start.s index eaf209211..7d2eacadc 100644 --- a/mesosphere/kernel_ldr/source/arch/arm64/start.s +++ b/mesosphere/kernel_ldr/source/arch/arm64/start.s @@ -75,10 +75,10 @@ _start: bl _ZN3ams4kern4init6loader4MainEmPNS1_12KernelLayoutEm str x0, [sp, #0x00] - /* Call ams::kern::init::loader::Finalize() */ - bl _ZN3ams4kern4init6loader8FinalizeEv + /* Call ams::kern::init::loader::GetFinalPageAllocatorState() */ + bl _ZN3ams4kern4init6loader26GetFinalPageAllocatorStateEv - /* X0 is now the next address for the page allocator. */ + /* X0 is now the saved state for the page allocator. */ /* We will return this to the kernel. */ /* Return to the newly-relocated kernel. */ diff --git a/mesosphere/kernel_ldr/source/kern_init_loader.cpp b/mesosphere/kernel_ldr/source/kern_init_loader.cpp index c00477270..ca9b0e272 100644 --- a/mesosphere/kernel_ldr/source/kern_init_loader.cpp +++ b/mesosphere/kernel_ldr/source/kern_init_loader.cpp @@ -34,33 +34,6 @@ namespace ams::kern::init::loader { constexpr size_t InitialPageTableRegionSize = 0x200000; - class KInitialPageAllocator : public KInitialPageTable::IPageAllocator { - private: - uintptr_t next_address; - public: - constexpr ALWAYS_INLINE KInitialPageAllocator() : next_address(Null) { /* ... */ } - - ALWAYS_INLINE void Initialize(uintptr_t address) { - this->next_address = address; - } - - ALWAYS_INLINE uintptr_t Finalize() { - const uintptr_t final_address = this->next_address; - this->next_address = Null; - return final_address; - } - public: - virtual KPhysicalAddress Allocate() override { - MESOSPHERE_ABORT_UNLESS(this->next_address != Null); - const uintptr_t allocated = this->next_address; - this->next_address += PageSize; - std::memset(reinterpret_cast(allocated), 0, PageSize); - return allocated; - } - - /* No need to override free. The default does nothing, and so would we. */ - }; - /* Global Allocator. */ KInitialPageAllocator g_initial_page_allocator; @@ -335,8 +308,8 @@ namespace ams::kern::init::loader { return GetInteger(virtual_base_address) - base_address; } - uintptr_t Finalize() { - return g_initial_page_allocator.Finalize(); + uintptr_t GetFinalPageAllocatorState() { + return g_initial_page_allocator.GetFinalState(); } } \ No newline at end of file From 875b62f06af351d5362705be56fddf679cbbb4a1 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Fri, 24 Jan 2020 00:50:32 -0800 Subject: [PATCH 09/97] config: fix typos --- libraries/config/common.mk | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/config/common.mk b/libraries/config/common.mk index db2074599..aa88292fd 100644 --- a/libraries/config/common.mk +++ b/libraries/config/common.mk @@ -82,8 +82,8 @@ ifneq ($(strip $(wildcard source/$(ATMOSPHERE_ARCH_DIR)/.*)),) SOURCES += source/$(ATMOSPHERE_ARCH_DIR) $(call DIR_WILDCARD,source/$(ATMOSPHERE_ARCH_DIR)) endif ifneq ($(strip $(wildcard source/$(ATMOSPHERE_BOARD_DIR)/.*)),) -SOURCES += source/$(ATMOSPHERE_BOARD_DIR $(call DIR_WILDCARD,source/$(ATMOSPHERE_BOARD_DIR)) +SOURCES += source/$(ATMOSPHERE_BOARD_DIR) $(call DIR_WILDCARD,source/$(ATMOSPHERE_BOARD_DIR)) endif ifneq ($(strip $(wildcard source/$(ATMOSPHERE_OS_DIR)/.*)),) -SOURCES += source/$(ATMOSPHERE_OS_DIR $(call DIR_WILDCARD,source/$(ATMOSPHERE_OS_DIR)) +SOURCES += source/$(ATMOSPHERE_OS_DIR) $(call DIR_WILDCARD,source/$(ATMOSPHERE_OS_DIR)) endif From b2e522c0a0e8120ad95c7bda478eb1a2ebe93420 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Fri, 24 Jan 2020 02:12:37 -0800 Subject: [PATCH 10/97] meso: find -exec sed -i'' -e 's/2018-2019 Atmo/2018-2020 Atmo/g' {} + --- .../mesosphere/arch/arm64/init/kern_k_init_arguments.hpp | 2 +- .../include/mesosphere/init/kern_init_arguments_select.hpp | 2 +- libraries/libmesosphere/include/mesosphere/kern_svc.hpp | 2 +- .../include/mesosphere/svc/kern_svc_k_user_pointer.hpp | 2 +- .../include/mesosphere/svc/kern_svc_prototypes.hpp | 2 +- .../libmesosphere/include/mesosphere/svc/kern_svc_tables.hpp | 2 +- .../libmesosphere/source/arch/arm64/svc/kern_svc_tables.cpp | 2 +- .../vapours/svc/codegen/svc_codegen_impl_code_generator.hpp | 2 +- .../include/vapours/svc/codegen/svc_codegen_impl_common.hpp | 2 +- .../vapours/svc/codegen/svc_codegen_impl_kernel_svc_wrapper.hpp | 2 +- .../include/vapours/svc/codegen/svc_codegen_impl_layout.hpp | 2 +- .../vapours/svc/codegen/svc_codegen_impl_layout_conversion.hpp | 2 +- .../include/vapours/svc/codegen/svc_codegen_impl_meta_code.hpp | 2 +- .../include/vapours/svc/codegen/svc_codegen_impl_parameter.hpp | 2 +- .../vapours/svc/codegen/svc_codegen_kernel_svc_wrapper.hpp | 2 +- libraries/libvapours/include/vapours/svc/svc_codegen.hpp | 2 +- mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp | 2 +- 17 files changed, 17 insertions(+), 17 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_arguments.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_arguments.hpp index cdddbc135..b0334cb14 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_arguments.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_arguments.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Atmosphère-NX + * Copyright (c) 2018-2020 Atmosphère-NX * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/libraries/libmesosphere/include/mesosphere/init/kern_init_arguments_select.hpp b/libraries/libmesosphere/include/mesosphere/init/kern_init_arguments_select.hpp index 6c124528d..47e2ec4a7 100644 --- a/libraries/libmesosphere/include/mesosphere/init/kern_init_arguments_select.hpp +++ b/libraries/libmesosphere/include/mesosphere/init/kern_init_arguments_select.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Atmosphère-NX + * Copyright (c) 2018-2020 Atmosphère-NX * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/libraries/libmesosphere/include/mesosphere/kern_svc.hpp b/libraries/libmesosphere/include/mesosphere/kern_svc.hpp index 6eaa75f9a..694b30165 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_svc.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_svc.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Atmosphère-NX + * Copyright (c) 2018-2020 Atmosphère-NX * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp index 368ff39dd..7ea3811f4 100644 --- a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp +++ b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Atmosphère-NX + * Copyright (c) 2018-2020 Atmosphère-NX * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_prototypes.hpp b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_prototypes.hpp index 3412373ba..c66e4f8ed 100644 --- a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_prototypes.hpp +++ b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_prototypes.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Atmosphère-NX + * Copyright (c) 2018-2020 Atmosphère-NX * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_tables.hpp b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_tables.hpp index 42b4a62bd..0afb1940d 100644 --- a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_tables.hpp +++ b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_tables.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Atmosphère-NX + * Copyright (c) 2018-2020 Atmosphère-NX * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_tables.cpp b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_tables.cpp index bc5b264fa..ed4a1b15a 100644 --- a/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_tables.cpp +++ b/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_tables.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Atmosphère-NX + * Copyright (c) 2018-2020 Atmosphère-NX * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_code_generator.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_code_generator.hpp index 6a13d93e0..e435c7569 100644 --- a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_code_generator.hpp +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_code_generator.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Atmosphère-NX + * Copyright (c) 2018-2020 Atmosphère-NX * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_common.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_common.hpp index c87b4e7c3..6e2bf8d7f 100644 --- a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_common.hpp +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_common.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Atmosphère-NX + * Copyright (c) 2018-2020 Atmosphère-NX * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_kernel_svc_wrapper.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_kernel_svc_wrapper.hpp index 3fffe60fa..58d45246b 100644 --- a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_kernel_svc_wrapper.hpp +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_kernel_svc_wrapper.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Atmosphère-NX + * Copyright (c) 2018-2020 Atmosphère-NX * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout.hpp index 132b13ae1..54ef0e020 100644 --- a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout.hpp +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Atmosphère-NX + * Copyright (c) 2018-2020 Atmosphère-NX * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout_conversion.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout_conversion.hpp index 2e3d95775..d4abc7a18 100644 --- a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout_conversion.hpp +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_layout_conversion.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Atmosphère-NX + * Copyright (c) 2018-2020 Atmosphère-NX * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_meta_code.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_meta_code.hpp index 682c29237..e7544702b 100644 --- a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_meta_code.hpp +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_meta_code.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Atmosphère-NX + * Copyright (c) 2018-2020 Atmosphère-NX * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_parameter.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_parameter.hpp index c97bcb3f1..3f5d74526 100644 --- a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_parameter.hpp +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_impl_parameter.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Atmosphère-NX + * Copyright (c) 2018-2020 Atmosphère-NX * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_kernel_svc_wrapper.hpp b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_kernel_svc_wrapper.hpp index a992442d3..beaf8d318 100644 --- a/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_kernel_svc_wrapper.hpp +++ b/libraries/libvapours/include/vapours/svc/codegen/svc_codegen_kernel_svc_wrapper.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Atmosphère-NX + * Copyright (c) 2018-2020 Atmosphère-NX * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/libraries/libvapours/include/vapours/svc/svc_codegen.hpp b/libraries/libvapours/include/vapours/svc/svc_codegen.hpp index 59e7c1b1a..4095a22a3 100644 --- a/libraries/libvapours/include/vapours/svc/svc_codegen.hpp +++ b/libraries/libvapours/include/vapours/svc/svc_codegen.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Atmosphère-NX + * Copyright (c) 2018-2020 Atmosphère-NX * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp index 9f8bdf691..e1529fbe3 100644 --- a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp +++ b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2019 Atmosphère-NX + * Copyright (c) 2018-2020 Atmosphère-NX * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, From e7dee2a9fcc8ec52dd84e96a821cda64730ebad7 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Tue, 28 Jan 2020 22:09:47 -0800 Subject: [PATCH 11/97] kern: Implement most of memory init (all cores hit main, but still more to do) --- libraries/config/common.mk | 2 +- .../libmesosphere/include/mesosphere.hpp | 6 +- .../arm64/init/kern_k_init_page_table.hpp | 101 +++-- .../mesosphere/arch/arm64/kern_cpu.hpp | 3 + .../arch/arm64/kern_cpu_system_registers.hpp | 91 +++- .../nintendo/switch/kern_k_system_control.hpp | 2 + .../init/kern_init_arguments_select.hpp | 2 + .../mesosphere/kern_k_memory_layout.hpp | 391 ++++++++++++++++++ .../include/mesosphere/kern_main.hpp | 23 ++ .../include/mesosphere/kern_panic.hpp | 10 +- .../source/arch/arm64/kern_cpu.cpp | 27 +- ..._k_memory_layout.board.nintendo_switch.cpp | 56 +++ .../nintendo/switch/kern_k_system_control.cpp | 34 +- .../nintendo/switch/kern_secure_monitor.cpp | 5 + .../nintendo/switch/kern_secure_monitor.hpp | 1 + .../source/kern_k_memory_layout.cpp | 223 ++++++++++ libraries/libmesosphere/source/kern_main.cpp | 25 ++ .../libvapours/include/vapours/includes.hpp | 2 +- .../util/util_intrusive_red_black_tree.hpp | 24 +- .../source/arch/arm64/init/kern_init_core.cpp | 281 ++++++++++++- .../kernel_ldr/source/arch/arm64/start.s | 10 +- .../kernel_ldr/source/kern_init_loader.cpp | 8 +- 22 files changed, 1246 insertions(+), 81 deletions(-) create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_main.hpp create mode 100644 libraries/libmesosphere/source/board/nintendo/switch/kern_k_memory_layout.board.nintendo_switch.cpp create mode 100644 libraries/libmesosphere/source/kern_k_memory_layout.cpp create mode 100644 libraries/libmesosphere/source/kern_main.cpp diff --git a/libraries/config/common.mk b/libraries/config/common.mk index aa88292fd..e71dc1500 100644 --- a/libraries/config/common.mk +++ b/libraries/config/common.mk @@ -76,7 +76,7 @@ TARGET := $(notdir $(CURDIR)) BUILD := build DATA := data INCLUDES := include -SOURCES ?= source $(foreach d,$(filter-out source/arch source/board source,$(wildcard source)),$(call DIR_WILDCARD,$d) $d) +SOURCES ?= source $(foreach d,$(filter-out source/arch source/board source,$(wildcard source/*)),$(if $(wildcard $d/.),$(call DIR_WILDCARD,$d) $d,)) ifneq ($(strip $(wildcard source/$(ATMOSPHERE_ARCH_DIR)/.*)),) SOURCES += source/$(ATMOSPHERE_ARCH_DIR) $(call DIR_WILDCARD,source/$(ATMOSPHERE_ARCH_DIR)) diff --git a/libraries/libmesosphere/include/mesosphere.hpp b/libraries/libmesosphere/include/mesosphere.hpp index 3371f426f..d9d27e02e 100644 --- a/libraries/libmesosphere/include/mesosphere.hpp +++ b/libraries/libmesosphere/include/mesosphere.hpp @@ -27,16 +27,20 @@ /* Core pre-initialization includes. */ #include "mesosphere/kern_select_cpu.hpp" +#include "mesosphere/kern_select_k_system_control.hpp" /* Initialization headers. */ #include "mesosphere/init/kern_init_elf.hpp" #include "mesosphere/init/kern_init_layout.hpp" #include "mesosphere/init/kern_init_page_table_select.hpp" #include "mesosphere/init/kern_init_arguments_select.hpp" +#include "mesosphere/kern_k_memory_layout.hpp" /* Core functionality. */ #include "mesosphere/kern_select_interrupts.hpp" -#include "mesosphere/kern_select_k_system_control.hpp" /* Supervisor Calls. */ #include "mesosphere/kern_svc.hpp" + +/* Main functionality. */ +#include "mesosphere/kern_main.hpp" diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp index 58a0c153c..e7bc3d758 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp @@ -17,7 +17,7 @@ #include #include #include -#include "../kern_cpu.hpp" +#include namespace ams::kern::init { @@ -190,10 +190,14 @@ namespace ams::kern::init { virtual KPhysicalAddress Allocate() { return Null; } virtual void Free(KPhysicalAddress phys_addr) { /* Nothing to do here. */ (void)(phys_addr); } }; + + struct NoClear{}; private: KPhysicalAddress l1_table; public: - constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1) : l1_table(l1) { + constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1, NoClear) : l1_table(l1) { /* ... */ } + + constexpr ALWAYS_INLINE KInitialPageTable(KPhysicalAddress l1) : KInitialPageTable(l1, NoClear{}) { ClearNewPageTable(this->l1_table); } @@ -224,9 +228,9 @@ namespace ams::kern::init { public: void NOINLINE Map(KVirtualAddress virt_addr, size_t size, KPhysicalAddress phys_addr, const PageTableEntry &attr, IPageAllocator &allocator) { /* Ensure that addresses and sizes are page aligned. */ - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(phys_addr), PageSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(phys_addr), PageSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, PageSize)); /* Iteratively map pages until the requested region is mapped. */ while (size > 0) { @@ -309,10 +313,37 @@ namespace ams::kern::init { } } + KPhysicalAddress GetPhysicalAddress(KVirtualAddress virt_addr) const { + /* Get the L1 entry. */ + const L1PageTableEntry *l1_entry = GetL1Entry(this->l1_table, virt_addr); + + if (l1_entry->IsBlock()) { + return l1_entry->GetBlock() + (GetInteger(virt_addr) & (L1BlockSize - 1)); + } + + MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable()); + + /* Get the L2 entry. */ + const L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr); + + if (l2_entry->IsBlock()) { + return l2_entry->GetBlock() + (GetInteger(virt_addr) & (L2BlockSize - 1)); + } + + MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable()); + + /* Get the L3 entry. */ + const L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr); + + MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock()); + + return l3_entry->GetBlock() + (GetInteger(virt_addr) & (L3BlockSize - 1)); + } + bool IsFree(KVirtualAddress virt_addr, size_t size) { /* Ensure that addresses and sizes are page aligned. */ - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, PageSize)); const KVirtualAddress end_virt_addr = virt_addr + size; while (virt_addr < end_virt_addr) { @@ -360,8 +391,8 @@ namespace ams::kern::init { cpu::DataSynchronizationBarrierInnerShareable(); /* Ensure that addresses and sizes are page aligned. */ - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, PageSize)); /* Iteratively reprotect pages until the requested region is reprotected. */ while (size > 0) { @@ -371,9 +402,9 @@ namespace ams::kern::init { if (l1_entry->IsBlock()) { /* Ensure that we are allowed to have an L1 block here. */ const KPhysicalAddress block = l1_entry->GetBlock(); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L1BlockSize)); - MESOSPHERE_ABORT_UNLESS(l1_entry->IsCompatibleWithAttribute(attr_before, false)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L1BlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsCompatibleWithAttribute(attr_before, false)); /* Invalidate the existing L1 block. */ *static_cast(l1_entry) = InvalidPageTableEntry; @@ -389,7 +420,7 @@ namespace ams::kern::init { } /* Not a block, so we must be a table. */ - MESOSPHERE_ABORT_UNLESS(l1_entry->IsTable()); + MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsTable()); L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr); if (l2_entry->IsBlock()) { @@ -397,14 +428,14 @@ namespace ams::kern::init { if (l2_entry->IsContiguous()) { /* Ensure that we are allowed to have a contiguous L2 block here. */ - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2ContiguousBlockSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L2ContiguousBlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2ContiguousBlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L2ContiguousBlockSize)); /* Invalidate the existing contiguous L2 block. */ for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) { /* Ensure that the entry is valid. */ - MESOSPHERE_ABORT_UNLESS(l2_entry[i].IsCompatibleWithAttribute(attr_before, true)); + MESOSPHERE_INIT_ABORT_UNLESS(l2_entry[i].IsCompatibleWithAttribute(attr_before, true)); static_cast(l2_entry)[i] = InvalidPageTableEntry; } cpu::DataSynchronizationBarrierInnerShareable(); @@ -419,10 +450,10 @@ namespace ams::kern::init { size -= L2ContiguousBlockSize; } else { /* Ensure that we are allowed to have an L2 block here. */ - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2BlockSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2BlockSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L2BlockSize)); - MESOSPHERE_ABORT_UNLESS(l2_entry->IsCompatibleWithAttribute(attr_before, false)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2BlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2BlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L2BlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsCompatibleWithAttribute(attr_before, false)); /* Invalidate the existing L2 block. */ *static_cast(l2_entry) = InvalidPageTableEntry; @@ -440,23 +471,23 @@ namespace ams::kern::init { } /* Not a block, so we must be a table. */ - MESOSPHERE_ABORT_UNLESS(l2_entry->IsTable()); + MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsTable()); /* We must have a mapped l3 entry to reprotect. */ L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr); - MESOSPHERE_ABORT_UNLESS(l3_entry->IsBlock()); + MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsBlock()); const KPhysicalAddress block = l3_entry->GetBlock(); if (l3_entry->IsContiguous()) { /* Ensure that we are allowed to have a contiguous L3 block here. */ - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3ContiguousBlockSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L3ContiguousBlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3ContiguousBlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L3ContiguousBlockSize)); /* Invalidate the existing contiguous L3 block. */ for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) { /* Ensure that the entry is valid. */ - MESOSPHERE_ABORT_UNLESS(l3_entry[i].IsCompatibleWithAttribute(attr_before, true)); + MESOSPHERE_INIT_ABORT_UNLESS(l3_entry[i].IsCompatibleWithAttribute(attr_before, true)); static_cast(l3_entry)[i] = InvalidPageTableEntry; } cpu::DataSynchronizationBarrierInnerShareable(); @@ -471,10 +502,10 @@ namespace ams::kern::init { size -= L3ContiguousBlockSize; } else { /* Ensure that we are allowed to have an L3 block here. */ - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3BlockSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3BlockSize)); - MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, L3BlockSize)); - MESOSPHERE_ABORT_UNLESS(l3_entry->IsCompatibleWithAttribute(attr_before, false)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3BlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3BlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, L3BlockSize)); + MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsCompatibleWithAttribute(attr_before, false)); /* Invalidate the existing L3 block. */ *static_cast(l3_entry) = InvalidPageTableEntry; @@ -505,14 +536,18 @@ namespace ams::kern::init { this->next_address = address; } - ALWAYS_INLINE uintptr_t GetFinalState() { + ALWAYS_INLINE uintptr_t GetFinalNextAddress() { const uintptr_t final_address = this->next_address; this->next_address = Null; return final_address; } + + ALWAYS_INLINE uintptr_t GetFinalState() { + return this->GetFinalNextAddress(); + } public: virtual KPhysicalAddress Allocate() override { - MESOSPHERE_ABORT_UNLESS(this->next_address != Null); + MESOSPHERE_INIT_ABORT_UNLESS(this->next_address != Null); const uintptr_t allocated = this->next_address; this->next_address += PageSize; std::memset(reinterpret_cast(allocated), 0, PageSize); diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp index 86412c3a4..af5dc71ca 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp @@ -59,6 +59,9 @@ namespace ams::kern::arm64::cpu { EnsureInstructionConsistency(); } + /* Synchronization helpers. */ + NOINLINE void SynchronizeAllCores(); + /* Cache management helpers. */ void FlushEntireDataCacheShared(); void FlushEntireDataCacheLocal(); diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp index d7677f9f4..d816b155f 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp @@ -37,8 +37,8 @@ namespace ams::kern::arm64::cpu { MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Ttbr0El1, ttbr0_el1) MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Ttbr1El1, ttbr1_el1) - MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(MairEl1, mair_el1) MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(TcrEl1, tcr_el1) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(MairEl1, mair_el1) MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(SctlrEl1, sctlr_el1) @@ -48,19 +48,88 @@ namespace ams::kern::arm64::cpu { MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CsselrEl1, csselr_el1) /* Base class for register accessors. */ - class GenericRegisterAccessor { + class GenericRegisterAccessorBase { + NON_COPYABLE(GenericRegisterAccessorBase); + NON_MOVEABLE(GenericRegisterAccessorBase); private: u64 value; public: - ALWAYS_INLINE GenericRegisterAccessor(u64 v) : value(v) { /* ... */ } + constexpr ALWAYS_INLINE GenericRegisterAccessorBase(u64 v) : value(v) { /* ... */ } protected: + constexpr ALWAYS_INLINE u64 GetValue() const { + return this->value; + } + constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const { return (this->value >> offset) & ((1ul << count) - 1); } }; - /* Special code for main id register. */ - class MainIdRegisterAccessor : public GenericRegisterAccessor { + template + class GenericRegisterAccessor : public GenericRegisterAccessorBase { + public: + constexpr ALWAYS_INLINE GenericRegisterAccessor(u64 v) : GenericRegisterAccessorBase(v) { /* ... */ } + protected: + ALWAYS_INLINE void Store() const { + static_cast(this)->Store(); + } + }; + + #define MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(name) class name##RegisterAccessor : public GenericRegisterAccessor + + #define MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(accessor, reg_name) \ + ALWAYS_INLINE accessor##RegisterAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(reg_name)) { /* ... */ } \ + constexpr ALWAYS_INLINE accessor##RegisterAccessor(u64 v) : GenericRegisterAccessor(v) { /* ... */ } \ + \ + ALWAYS_INLINE void Store() { const u64 v = this->GetValue(); MESOSPHERE_CPU_SET_SYSREG(reg_name, v); } + + /* Accessors. */ + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MemoryAccessIndirection) { + public: + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MemoryAccessIndirection, mair_el1) + }; + + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(TranslationControl) { + public: + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(TranslationControl, tcr_el1) + + constexpr ALWAYS_INLINE size_t GetT1Size() const { + const size_t shift_value = this->GetBits(16, 6); + return size_t(1) << (size_t(64) - shift_value); + } + }; + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MultiprocessorAffinity) { + public: + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MultiprocessorAffinity, mpidr_el1) + + constexpr ALWAYS_INLINE u64 GetAff0() const { + return this->GetBits(0, 8); + } + + constexpr ALWAYS_INLINE u64 GetAff1() const { + return this->GetBits(8, 8); + } + + constexpr ALWAYS_INLINE u64 GetAff2() const { + return this->GetBits(16, 8); + } + + constexpr ALWAYS_INLINE u64 GetAff3() const { + return this->GetBits(32, 8); + } + + constexpr ALWAYS_INLINE u64 GetCpuOnArgument() const { + constexpr u64 Mask = 0x000000FF00FFFF00ul; + return this->GetValue() & Mask; + } + }; + + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(ThreadId) { + public: + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(ThreadId, tpidr_el1) + }; + + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MainId) { public: enum class Implementer { ArmLimited = 0x41, @@ -70,7 +139,7 @@ namespace ams::kern::arm64::cpu { CortexA57 = 0xD07, }; public: - ALWAYS_INLINE MainIdRegisterAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(midr_el1)) { /* ... */ } + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MainId, midr_el1) public: constexpr ALWAYS_INLINE Implementer GetImplementer() const { return static_cast(this->GetBits(24, 8)); @@ -94,9 +163,9 @@ namespace ams::kern::arm64::cpu { }; /* Accessors for cache registers. */ - class CacheLineIdAccessor : public GenericRegisterAccessor { + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CacheLineId) { public: - ALWAYS_INLINE CacheLineIdAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(clidr_el1)) { /* ... */ } + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CacheLineId, clidr_el1) public: constexpr ALWAYS_INLINE int GetLevelsOfCoherency() const { return static_cast(this->GetBits(24, 3)); @@ -109,9 +178,9 @@ namespace ams::kern::arm64::cpu { /* TODO: Other bitfield accessors? */ }; - class CacheSizeIdAccessor : public GenericRegisterAccessor { + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CacheSizeId) { public: - ALWAYS_INLINE CacheSizeIdAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(ccsidr_el1)) { /* ... */ } + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CacheSizeId, ccsidr_el1) public: constexpr ALWAYS_INLINE int GetNumberOfSets() const { return static_cast(this->GetBits(13, 15)); @@ -128,6 +197,8 @@ namespace ams::kern::arm64::cpu { /* TODO: Other bitfield accessors? */ }; + #undef MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS + #undef MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS #undef MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS #undef MESOSPHERE_CPU_GET_SYSREG #undef MESOSPHERE_CPU_SET_SYSREG diff --git a/libraries/libmesosphere/include/mesosphere/board/nintendo/switch/kern_k_system_control.hpp b/libraries/libmesosphere/include/mesosphere/board/nintendo/switch/kern_k_system_control.hpp index 449af1f0e..631cc471a 100644 --- a/libraries/libmesosphere/include/mesosphere/board/nintendo/switch/kern_k_system_control.hpp +++ b/libraries/libmesosphere/include/mesosphere/board/nintendo/switch/kern_k_system_control.hpp @@ -23,8 +23,10 @@ namespace ams::kern { class Init { public: /* Initialization. */ + static size_t GetIntendedMemorySize(); static KPhysicalAddress GetKernelPhysicalBaseAddress(uintptr_t base_address); static bool ShouldIncreaseThreadResourceLimit(); + static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg); /* Randomness. */ static void GenerateRandomBytes(void *dst, size_t size); diff --git a/libraries/libmesosphere/include/mesosphere/init/kern_init_arguments_select.hpp b/libraries/libmesosphere/include/mesosphere/init/kern_init_arguments_select.hpp index 47e2ec4a7..9c8858b72 100644 --- a/libraries/libmesosphere/include/mesosphere/init/kern_init_arguments_select.hpp +++ b/libraries/libmesosphere/include/mesosphere/init/kern_init_arguments_select.hpp @@ -25,5 +25,7 @@ namespace ams::kern::init { KPhysicalAddress GetInitArgumentsAddress(s32 core_id); + void SetInitArguments(s32 core_id, KPhysicalAddress address, uintptr_t arg); + void StoreInitArguments(); } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp new file mode 100644 index 000000000..4aa49dee4 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp @@ -0,0 +1,391 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern { + + constexpr size_t KernelAslrAlignment = 2_MB; + constexpr size_t KernelVirtualAddressSpaceWidth = size_t(1ul) << 39ul; + constexpr size_t KernelPhysicalAddressSpaceWidth = size_t(1ul) << 48ul; + + constexpr size_t KernelVirtualAddressSpaceBase = 0ul - KernelVirtualAddressSpaceWidth; + constexpr size_t KernelVirtualAddressSpaceEnd = KernelVirtualAddressSpaceBase + (KernelVirtualAddressSpaceWidth - KernelAslrAlignment); + constexpr size_t KernelVirtualAddressSpaceLast = KernelVirtualAddressSpaceEnd - 1ul; + constexpr size_t KernelVirtualAddressSpaceSize = KernelVirtualAddressSpaceEnd - KernelVirtualAddressSpaceBase; + + constexpr size_t KernelPhysicalAddressSpaceBase = 0ul; + constexpr size_t KernelPhysicalAddressSpaceEnd = KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceWidth; + constexpr size_t KernelPhysicalAddressSpaceLast = KernelPhysicalAddressSpaceEnd - 1ul; + constexpr size_t KernelPhysicalAddressSpaceSize = KernelPhysicalAddressSpaceEnd - KernelPhysicalAddressSpaceBase; + + enum KMemoryRegionType : u32 { + KMemoryRegionAttr_CarveoutProtected = 0x04000000, + KMemoryRegionAttr_DidKernelMap = 0x08000000, + KMemoryRegionAttr_ShouldKernelMap = 0x10000000, + KMemoryRegionAttr_UserReadOnly = 0x20000000, + KMemoryRegionAttr_NoUserMap = 0x40000000, + KMemoryRegionAttr_LinearMapped = 0x80000000, + + KMemoryRegionType_None = 0, + KMemoryRegionType_Kernel = 1, + KMemoryRegionType_Dram = 2, + KMemoryRegionType_CoreLocal = 4, + + KMemoryRegionType_VirtualKernelPtHeap = 0x2A, + KMemoryRegionType_VirtualKernelTraceBuffer = 0x4A, + KMemoryRegionType_VirtualKernelInitPt = 0x19A, + + KMemoryRegionType_Uart = 0x1D, + KMemoryRegionType_InterruptDistributor = 0x4D, + KMemoryRegionType_InterruptController = 0x2D, + + KMemoryRegionType_MemoryController = 0x55, + KMemoryRegionType_MemoryController0 = 0x95, + KMemoryRegionType_MemoryController1 = 0x65, + KMemoryRegionType_PowerManagementController = 0x1A5, + + KMemoryRegionType_KernelAutoMap = KMemoryRegionType_Kernel | KMemoryRegionAttr_ShouldKernelMap, + + KMemoryRegionType_KernelTemp = 0x31, + + KMemoryRegionType_KernelCode = 0x19, + KMemoryRegionType_KernelStack = 0x29, + KMemoryRegionType_KernelMisc = 0x49, + KMemoryRegionType_KernelSlab = 0x89, + + KMemoryRegionType_KernelMiscMainStack = 0xB49, + KMemoryRegionType_KernelMiscMappedDevice = 0xD49, + KMemoryRegionType_KernelMiscIdleStack = 0x1349, + KMemoryRegionType_KernelMiscUnknownDebug = 0x1549, + KMemoryRegionType_KernelMiscExceptionStack = 0x2349, + + KMemoryRegionType_DramLinearMapped = KMemoryRegionType_Dram | KMemoryRegionAttr_LinearMapped, + + KMemoryRegionType_DramReservedEarly = 0x16 | KMemoryRegionAttr_NoUserMap, + KMemoryRegionType_DramPoolPartition = 0x26 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped, + + KMemoryRegionType_DramKernel = 0xE | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected, + KMemoryRegionType_DramKernelCode = 0xCE | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected, + KMemoryRegionType_DramKernelSlab = 0x14E | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected, + KMemoryRegionType_DramKernelPtHeap = 0x24E | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_LinearMapped, + KMemoryRegionType_DramKernelInitPt = 0x44E | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_LinearMapped, + + /* These regions aren't normally mapped in retail kernel. */ + KMemoryRegionType_KernelTraceBuffer = 0xA6 | KMemoryRegionAttr_UserReadOnly | KMemoryRegionAttr_LinearMapped, + KMemoryRegionType_OnMemoryBootImage = 0x156, + KMemoryRegionType_DTB = 0x256, + }; + + constexpr ALWAYS_INLINE KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) { + if (type_id == (type_id | KMemoryRegionType_KernelTraceBuffer)) { + return KMemoryRegionType_VirtualKernelTraceBuffer; + } else if (type_id == (type_id | KMemoryRegionType_DramKernelPtHeap)) { + return KMemoryRegionType_VirtualKernelPtHeap; + } else { + return KMemoryRegionType_Dram; + } + } + + class KMemoryBlock : public util::IntrusiveRedBlackTreeBaseNode { + NON_COPYABLE(KMemoryBlock); + NON_MOVEABLE(KMemoryBlock); + private: + uintptr_t address; + uintptr_t pair_address; + size_t block_size; + u32 attributes; + u32 type_id; + public: + static constexpr ALWAYS_INLINE int Compare(const KMemoryBlock &lhs, const KMemoryBlock &rhs) { + if (lhs.address < rhs.address) { + return -1; + } else if (lhs.address == rhs.address) { + return 0; + } else { + return 1; + } + } + public: + constexpr ALWAYS_INLINE KMemoryBlock() : address(0), pair_address(0), block_size(0), attributes(0), type_id(0) { /* ... */ } + constexpr ALWAYS_INLINE KMemoryBlock(uintptr_t a, size_t bl, uintptr_t p, u32 r, u32 t) : + address(a), pair_address(p), block_size(bl), attributes(r), type_id(t) + { + /* ... */ + } + constexpr ALWAYS_INLINE KMemoryBlock(uintptr_t a, size_t bl, u32 r, u32 t) : KMemoryBlock(a, bl, std::numeric_limits::max(), r, t) { /* ... */ } + + constexpr ALWAYS_INLINE uintptr_t GetAddress() const { + return this->address; + } + + constexpr ALWAYS_INLINE uintptr_t GetPairAddress() const { + return this->pair_address; + } + + constexpr ALWAYS_INLINE size_t GetSize() const { + return this->block_size; + } + + constexpr ALWAYS_INLINE uintptr_t GetEndAddress() const { + return this->GetAddress() + this->GetSize(); + } + + constexpr ALWAYS_INLINE uintptr_t GetLastAddress() const { + return this->GetEndAddress() - 1; + } + + constexpr ALWAYS_INLINE u32 GetAttributes() const { + return this->attributes; + } + + constexpr ALWAYS_INLINE u32 GetType() const { + return this->type_id; + } + + constexpr ALWAYS_INLINE void SetType(u32 type) { + MESOSPHERE_INIT_ABORT_UNLESS(this->CanDerive(type)); + this->type_id = type; + } + + constexpr ALWAYS_INLINE bool Contains(uintptr_t address) const { + return this->GetAddress() <= address && address < this->GetLastAddress(); + } + + constexpr ALWAYS_INLINE bool IsDerivedFrom(u32 type) const { + return (this->GetType() | type) == this->GetType(); + } + + constexpr ALWAYS_INLINE bool HasTypeAttribute(KMemoryRegionType attr) const { + return (this->GetType() | attr) == this->GetType(); + } + + constexpr ALWAYS_INLINE bool CanDerive(u32 type) const { + return (this->GetType() | type) == type; + } + + constexpr ALWAYS_INLINE void SetPairAddress(uintptr_t a) { + this->pair_address = a; + } + + constexpr ALWAYS_INLINE void SetTypeAttribute(KMemoryRegionType attr) { + this->type_id |= attr; + } + }; + static_assert(std::is_trivially_destructible::value); + + class KMemoryBlockTree { + public: + struct DerivedRegionExtents { + const KMemoryBlock *first_block; + const KMemoryBlock *last_block; + }; + private: + using TreeType = util::IntrusiveRedBlackTreeBaseTraits::TreeType; + using value_type = TreeType::value_type; + using size_type = TreeType::size_type; + using difference_type = TreeType::difference_type; + using pointer = TreeType::pointer; + using const_pointer = TreeType::const_pointer; + using reference = TreeType::reference; + using const_reference = TreeType::const_reference; + using iterator = TreeType::iterator; + using const_iterator = TreeType::const_iterator; + private: + TreeType tree; + public: + constexpr ALWAYS_INLINE KMemoryBlockTree() : tree() { /* ... */ } + public: + iterator FindContainingBlock(uintptr_t address) { + for (auto it = this->begin(); it != this->end(); it++) { + if (it->Contains(address)) { + return it; + } + } + MESOSPHERE_INIT_ABORT(); + } + + iterator FindFirstBlockByTypeAttr(u32 type_id, u32 attr = 0) { + for (auto it = this->begin(); it != this->end(); it++) { + if (it->GetType() == type_id && it->GetAttributes() == attr) { + return it; + } + } + MESOSPHERE_INIT_ABORT(); + } + + DerivedRegionExtents GetDerivedRegionExtents(u32 type_id) { + DerivedRegionExtents extents = { .first_block = nullptr, .last_block = nullptr }; + + for (auto it = this->cbegin(); it != this->cend(); it++) { + if (it->IsDerivedFrom(type_id)) { + if (extents.first_block == nullptr) { + extents.first_block = std::addressof(*it); + } + extents.last_block = std::addressof(*it); + } + } + + MESOSPHERE_INIT_ABORT_UNLESS(extents.first_block != nullptr); + MESOSPHERE_INIT_ABORT_UNLESS(extents.last_block != nullptr); + + return extents; + } + public: + NOINLINE bool Insert(uintptr_t address, size_t size, u32 type_id, u32 new_attr = 0, u32 old_attr = 0); + NOINLINE KVirtualAddress GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id); + + ALWAYS_INLINE KVirtualAddress GetRandomAlignedRegionWithGuard(size_t size, size_t alignment, u32 type_id, size_t guard_size) { + return this->GetRandomAlignedRegion(size + 2 * guard_size, alignment, type_id) + guard_size; + } + public: + /* Iterator accessors. */ + iterator begin() { + return this->tree.begin(); + } + + const_iterator begin() const { + return this->tree.begin(); + } + + iterator end() { + return this->tree.end(); + } + + const_iterator end() const { + return this->tree.end(); + } + + const_iterator cbegin() const { + return this->begin(); + } + + const_iterator cend() const { + return this->end(); + } + + iterator iterator_to(reference ref) { + return this->tree.iterator_to(ref); + } + + const_iterator iterator_to(const_reference ref) const { + return this->tree.iterator_to(ref); + } + + /* Content management. */ + bool empty() const { + return this->tree.empty(); + } + + reference back() { + return this->tree.back(); + } + + const_reference back() const { + return this->tree.back(); + } + + reference front() { + return this->tree.front(); + } + + const_reference front() const { + return this->tree.front(); + } + + /* GCC over-eagerly inlines this operation. */ + NOINLINE iterator insert(reference ref) { + return this->tree.insert(ref); + } + + NOINLINE iterator erase(iterator it) { + return this->tree.erase(it); + } + + iterator find(const_reference ref) const { + return this->tree.find(ref); + } + + iterator nfind(const_reference ref) const { + return this->tree.nfind(ref); + } + }; + + class KMemoryBlockAllocator { + NON_COPYABLE(KMemoryBlockAllocator); + NON_MOVEABLE(KMemoryBlockAllocator); + public: + static constexpr size_t MaxMemoryBlocks = 1000; + friend class KMemoryLayout; + private: + KMemoryBlock block_heap[MaxMemoryBlocks]; + size_t num_blocks; + private: + constexpr ALWAYS_INLINE KMemoryBlockAllocator() : block_heap(), num_blocks() { /* ... */ } + public: + ALWAYS_INLINE KMemoryBlock *Allocate() { + /* Ensure we stay within the bounds of our heap. */ + MESOSPHERE_INIT_ABORT_UNLESS(this->num_blocks < MaxMemoryBlocks); + + return &this->block_heap[this->num_blocks++]; + } + + template + ALWAYS_INLINE KMemoryBlock *Create(Args&&... args) { + KMemoryBlock *block = this->Allocate(); + new (block) KMemoryBlock(std::forward(args)...); + return block; + } + }; + + class KMemoryLayout { + private: + static /* constinit */ inline uintptr_t s_linear_phys_to_virt_diff; + static /* constinit */ inline uintptr_t s_linear_virt_to_phys_diff; + static /* constinit */ inline KMemoryBlockAllocator s_block_allocator; + static /* constinit */ inline KMemoryBlockTree s_virtual_tree; + static /* constinit */ inline KMemoryBlockTree s_physical_tree; + static /* constinit */ inline KMemoryBlockTree s_virtual_linear_tree; + static /* constinit */ inline KMemoryBlockTree s_physical_linear_tree; + public: + static ALWAYS_INLINE KMemoryBlockAllocator &GetMemoryBlockAllocator() { return s_block_allocator; } + static ALWAYS_INLINE KMemoryBlockTree &GetVirtualMemoryBlockTree() { return s_virtual_tree; } + static ALWAYS_INLINE KMemoryBlockTree &GetPhysicalMemoryBlockTree() { return s_physical_tree; } + static ALWAYS_INLINE KMemoryBlockTree &GetVirtualLinearMemoryBlockTree() { return s_virtual_linear_tree; } + static ALWAYS_INLINE KMemoryBlockTree &GetPhysicalLinearMemoryBlockTree() { return s_physical_linear_tree; } + + static NOINLINE KVirtualAddress GetMainStackTopAddress(s32 core_id) { + return GetVirtualMemoryBlockTree().FindFirstBlockByTypeAttr(KMemoryRegionType_KernelMiscMainStack, static_cast(core_id))->GetEndAddress(); + } + + static void InitializeLinearMemoryBlockTrees(KPhysicalAddress aligned_linear_phys_start, KVirtualAddress linear_virtual_start); + }; + + + namespace init { + + /* These should be generic, regardless of board. */ + void SetupCoreLocalRegionMemoryBlocks(KInitialPageTable &page_table, KInitialPageAllocator &page_allocator); + void SetupPoolPartitionMemoryBlocks(); + + /* These may be implemented in a board-specific manner. */ + void SetupDevicePhysicalMemoryBlocks(); + void SetupDramPhysicalMemoryBlocks(); + + } + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_main.hpp b/libraries/libmesosphere/include/mesosphere/kern_main.hpp new file mode 100644 index 000000000..d01cbbbfe --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_main.hpp @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern { + + NORETURN void HorizonKernelMain(s32 core_id); + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_panic.hpp b/libraries/libmesosphere/include/mesosphere/kern_panic.hpp index 16aa4a190..c245b3904 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_panic.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_panic.hpp @@ -44,10 +44,18 @@ namespace ams::kern { #define MESOSPHERE_R_ASSERT(expr) MESOSPHERE_ASSERT_IMPL(R_SUCCEEDED(expr), "Result assertion failed: %s", #expr) #define MESOSPHERE_ABORT() MESOSPHERE_PANIC("Abort()"); +#define MESOSPHERE_INIT_ABORT() do { /* ... */ } while (true) #define MESOSPHERE_ABORT_UNLESS(expr) \ ({ \ - if (AMS_UNLIKELY(!(expr))) { \ + if (AMS_UNLIKELY(!(expr))) { \ MESOSPHERE_PANIC("Abort(): %s", #expr); \ } \ }) + +#define MESOSPHERE_INIT_ABORT_UNLESS(expr) \ + ({ \ + if (AMS_UNLIKELY(!(expr))) { \ + MESOSPHERE_INIT_ABORT(); \ + } \ + }) diff --git a/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp b/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp index 2574b6db2..43748f070 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp @@ -19,6 +19,8 @@ namespace ams::kern::arm64::cpu { namespace { + std::atomic g_all_core_sync_count; + void FlushEntireDataCacheImpl(int level) { /* Used in multiple locations. */ const u64 level_sel_value = static_cast(level << 1); @@ -28,7 +30,7 @@ namespace ams::kern::arm64::cpu { cpu::InstructionMemoryBarrier(); /* Get cache size id info. */ - CacheSizeIdAccessor ccsidr_el1; + CacheSizeIdRegisterAccessor ccsidr_el1; const int num_sets = ccsidr_el1.GetNumberOfSets(); const int num_ways = ccsidr_el1.GetAssociativity(); const int line_size = ccsidr_el1.GetLineSize(); @@ -49,7 +51,7 @@ namespace ams::kern::arm64::cpu { } void FlushEntireDataCacheShared() { - CacheLineIdAccessor clidr_el1; + CacheLineIdRegisterAccessor clidr_el1; const int levels_of_coherency = clidr_el1.GetLevelsOfCoherency(); const int levels_of_unification = clidr_el1.GetLevelsOfUnification(); @@ -59,11 +61,28 @@ namespace ams::kern::arm64::cpu { } void FlushEntireDataCacheLocal() { - CacheLineIdAccessor clidr_el1; + CacheLineIdRegisterAccessor clidr_el1; const int levels_of_unification = clidr_el1.GetLevelsOfUnification(); for (int level = levels_of_unification - 1; level >= 0; level--) { FlushEntireDataCacheImpl(level); } } -} \ No newline at end of file + + NOINLINE void SynchronizeAllCores() { + /* Wait until the count can be read. */ + while (!(g_all_core_sync_count < static_cast(cpu::NumCores))) { /* ... */ } + + const s32 per_core_idx = g_all_core_sync_count.fetch_add(1); + + /* Loop until it's our turn. This will act on each core in order. */ + while (g_all_core_sync_count != per_core_idx + static_cast(cpu::NumCores)) { /* ... */ } + + if (g_all_core_sync_count != 2 * static_cast(cpu::NumCores) - 1) { + g_all_core_sync_count++; + } else { + g_all_core_sync_count = 0; + } + } + +} diff --git a/libraries/libmesosphere/source/board/nintendo/switch/kern_k_memory_layout.board.nintendo_switch.cpp b/libraries/libmesosphere/source/board/nintendo/switch/kern_k_memory_layout.board.nintendo_switch.cpp new file mode 100644 index 000000000..794c13887 --- /dev/null +++ b/libraries/libmesosphere/source/board/nintendo/switch/kern_k_memory_layout.board.nintendo_switch.cpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + namespace { + + constexpr uintptr_t DramPhysicalAddress = 0x80000000; + constexpr size_t ReservedEarlyDramSize = 0x60000; + + } + + namespace init { + + void SetupDevicePhysicalMemoryBlocks() { + /* TODO: Give these constexpr defines somewhere? */ + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x70006000, 0x40, KMemoryRegionType_Uart | KMemoryRegionAttr_ShouldKernelMap)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x70019000, 0x1000, KMemoryRegionType_MemoryController | KMemoryRegionAttr_NoUserMap)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x7001C000, 0x1000, KMemoryRegionType_MemoryController0 | KMemoryRegionAttr_NoUserMap)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x7001D000, 0x1000, KMemoryRegionType_MemoryController1 | KMemoryRegionAttr_NoUserMap)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x7000E000, 0x400, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x7000E400, 0xC00, KMemoryRegionType_PowerManagementController | KMemoryRegionAttr_NoUserMap)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x50040000, 0x1000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x50041000, 0x1000, KMemoryRegionType_InterruptDistributor | KMemoryRegionAttr_ShouldKernelMap | KMemoryRegionAttr_NoUserMap)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x50042000, 0x1000, KMemoryRegionType_InterruptController | KMemoryRegionAttr_ShouldKernelMap | KMemoryRegionAttr_NoUserMap)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x50043000, 0x1D000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x6000F000, 0x1000, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(0x6001DC00, 0x400, KMemoryRegionType_None | KMemoryRegionAttr_NoUserMap)); + } + + void SetupDramPhysicalMemoryBlocks() { + const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize(); + const KPhysicalAddress physical_memory_base_address = KSystemControl::Init::GetKernelPhysicalBaseAddress(DramPhysicalAddress); + + /* Insert blocks into the tree. */ + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(GetInteger(physical_memory_base_address), intended_memory_size, KMemoryRegionType_Dram)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(GetInteger(physical_memory_base_address), ReservedEarlyDramSize, KMemoryRegionType_DramReservedEarly)); + } + + } + +} \ No newline at end of file diff --git a/libraries/libmesosphere/source/board/nintendo/switch/kern_k_system_control.cpp b/libraries/libmesosphere/source/board/nintendo/switch/kern_k_system_control.cpp index d28453f4d..881f834eb 100644 --- a/libraries/libmesosphere/source/board/nintendo/switch/kern_k_system_control.cpp +++ b/libraries/libmesosphere/source/board/nintendo/switch/kern_k_system_control.cpp @@ -24,7 +24,7 @@ namespace ams::kern { /* TODO: Move this into a header for the MC in general. */ constexpr u32 MemoryControllerConfigurationRegister = 0x70019050; u32 config_value; - MESOSPHERE_ABORT_UNLESS(smc::init::ReadWriteRegister(&config_value, MemoryControllerConfigurationRegister, 0, 0)); + MESOSPHERE_INIT_ABORT_UNLESS(smc::init::ReadWriteRegister(&config_value, MemoryControllerConfigurationRegister, 0, 0)); return static_cast(config_value & 0x3FFF) << 20; } @@ -40,24 +40,24 @@ namespace ams::kern { return value; } - ALWAYS_INLINE size_t GetIntendedMemorySizeForInit() { - switch (GetKernelConfigurationForInit().Get()) { - case smc::MemorySize_4GB: - default: /* All invalid modes should go to 4GB. */ - return 4_GB; - case smc::MemorySize_6GB: - return 6_GB; - case smc::MemorySize_8GB: - return 8_GB; - } - } - } /* Initialization. */ + size_t KSystemControl::Init::GetIntendedMemorySize() { + switch (GetKernelConfigurationForInit().Get()) { + case smc::MemorySize_4GB: + default: /* All invalid modes should go to 4GB. */ + return 4_GB; + case smc::MemorySize_6GB: + return 6_GB; + case smc::MemorySize_8GB: + return 8_GB; + } + } + KPhysicalAddress KSystemControl::Init::GetKernelPhysicalBaseAddress(uintptr_t base_address) { const size_t real_dram_size = GetRealMemorySizeForInit(); - const size_t intended_dram_size = GetIntendedMemorySizeForInit(); + const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize(); if (intended_dram_size * 2 < real_dram_size) { return base_address; } else { @@ -69,9 +69,13 @@ namespace ams::kern { return GetKernelConfigurationForInit().Get(); } + void KSystemControl::Init::CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) { + smc::init::CpuOn(core_id, entrypoint, arg); + } + /* Randomness for Initialization. */ void KSystemControl::Init::GenerateRandomBytes(void *dst, size_t size) { - MESOSPHERE_ABORT_UNLESS(size <= 0x38); + MESOSPHERE_INIT_ABORT_UNLESS(size <= 0x38); smc::init::GenerateRandomBytes(dst, size); } diff --git a/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.cpp b/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.cpp index e60e6bbe1..c57143260 100644 --- a/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.cpp +++ b/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.cpp @@ -103,6 +103,11 @@ namespace ams::kern::smc { /* SMC functionality needed for init. */ namespace init { + void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) { + SecureMonitorArguments args = { FunctionId_CpuOn, core_id, entrypoint, arg }; + CallPrivilegedSecureMonitorFunctionForInit(args); + } + void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) { SecureMonitorArguments args = { FunctionId_GetConfig, static_cast(config_item) }; CallPrivilegedSecureMonitorFunctionForInit(args); diff --git a/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.hpp b/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.hpp index 434dba413..1417c29e8 100644 --- a/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.hpp +++ b/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.hpp @@ -79,6 +79,7 @@ namespace ams::kern::smc { namespace init { + void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg); void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item); void GenerateRandomBytes(void *dst, size_t size); bool ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value); diff --git a/libraries/libmesosphere/source/kern_k_memory_layout.cpp b/libraries/libmesosphere/source/kern_k_memory_layout.cpp new file mode 100644 index 000000000..8cb084bda --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_memory_layout.cpp @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + bool KMemoryBlockTree::Insert(uintptr_t address, size_t size, u32 type_id, u32 new_attr, u32 old_attr) { + /* Locate the memory block that contains the address. */ + auto it = this->FindContainingBlock(address); + + /* We require that the old attr is correct. */ + if (it->GetAttributes() != old_attr) { + return false; + } + + /* We further require that the block can be split from the old block. */ + const uintptr_t inserted_block_end = address + size; + const uintptr_t inserted_block_last = inserted_block_end - 1; + if (it->GetLastAddress() < inserted_block_last) { + return false; + } + + /* Further, we require that the type id is a valid transformation. */ + if (!it->CanDerive(type_id)) { + return false; + } + + /* Cache information from the block before we remove it. */ + KMemoryBlock *cur_block = std::addressof(*it); + const uintptr_t old_address = it->GetAddress(); + const size_t old_size = it->GetSize(); + const uintptr_t old_end = old_address + old_size; + const uintptr_t old_last = old_end - 1; + const uintptr_t old_pair = it->GetPairAddress(); + const u32 old_type = it->GetType(); + + /* Erase the existing block from the tree. */ + this->erase(it); + + /* If we need to insert a block before the region, do so. */ + if (old_address != address) { + new (cur_block) KMemoryBlock(old_address, address - old_address, old_pair, old_attr, old_type); + this->insert(*cur_block); + cur_block = KMemoryLayout::GetMemoryBlockAllocator().Allocate(); + } + + /* Insert a new block. */ + const uintptr_t new_pair = (old_pair != std::numeric_limits::max()) ? old_pair + (address - old_address) : old_pair; + new (cur_block) KMemoryBlock(address, size, new_pair, new_attr, type_id); + this->insert(*cur_block); + + /* If we need to insert a block after the region, do so. */ + if (old_last != inserted_block_last) { + const uintptr_t after_pair = (old_pair != std::numeric_limits::max()) ? old_pair + (inserted_block_end - old_address) : old_pair; + this->insert(*KMemoryLayout::GetMemoryBlockAllocator().Create(inserted_block_end, old_end - inserted_block_end, after_pair, old_attr, old_type)); + } + + return true; + } + + KVirtualAddress KMemoryBlockTree::GetRandomAlignedRegion(size_t size, size_t alignment, u32 type_id) { + /* We want to find the total extents of the type id. */ + const auto extents = this->GetDerivedRegionExtents(type_id); + + /* Ensure that our alignment is correct. */ + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(extents.first_block->GetAddress(), alignment)); + + const uintptr_t first_address = extents.first_block->GetAddress(); + const uintptr_t last_address = extents.last_block->GetLastAddress(); + + while (true) { + const uintptr_t candidate = util::AlignDown(KSystemControl::Init::GenerateRandomRange(first_address, last_address), alignment); + + /* Ensure that the candidate doesn't overflow with the size. */ + if (!(candidate < candidate + size)) { + continue; + } + + const uintptr_t candidate_last = candidate + size - 1; + + /* Ensure that the candidate fits within the region. */ + if (candidate_last > last_address) { + continue; + } + + /* Locate the candidate block, and ensure it fits. */ + const KMemoryBlock *candidate_block = std::addressof(*this->FindContainingBlock(candidate)); + if (candidate_last > candidate_block->GetLastAddress()) { + continue; + } + + /* Ensure that the block has the correct type id. */ + if (candidate_block->GetType() != type_id) + continue; + + return candidate; + } + } + + void KMemoryLayout::InitializeLinearMemoryBlockTrees(KPhysicalAddress aligned_linear_phys_start, KVirtualAddress linear_virtual_start) { + /* Set static differences. */ + s_linear_phys_to_virt_diff = GetInteger(linear_virtual_start) - GetInteger(aligned_linear_phys_start); + s_linear_virt_to_phys_diff = GetInteger(aligned_linear_phys_start) - GetInteger(linear_virtual_start); + + /* Initialize linear trees. */ + for (auto &block : GetPhysicalMemoryBlockTree()) { + if (!block.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) { + continue; + } + GetPhysicalLinearMemoryBlockTree().insert(*GetMemoryBlockAllocator().Create(block.GetAddress(), block.GetSize(), block.GetAttributes(), block.GetType())); + } + + for (auto &block : GetVirtualMemoryBlockTree()) { + if (!block.IsDerivedFrom(KMemoryRegionType_Dram)) { + continue; + } + GetVirtualLinearMemoryBlockTree().insert(*GetMemoryBlockAllocator().Create(block.GetAddress(), block.GetSize(), block.GetAttributes(), block.GetType())); + } + } + + namespace init { + + namespace { + + + constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable); + + constexpr size_t CoreLocalRegionAlign = PageSize; + constexpr size_t CoreLocalRegionSize = PageSize * (1 + cpu::NumCores); + constexpr size_t CoreLocalRegionSizeWithGuards = CoreLocalRegionSize + 2 * PageSize; + constexpr size_t CoreLocalRegionBoundsAlign = 1_GB; + /* TODO: static_assert(CoreLocalRegionSize == sizeof(KCoreLocalRegion)); */ + + KVirtualAddress GetCoreLocalRegionVirtualAddress() { + while (true) { + const uintptr_t candidate_start = GetInteger(KMemoryLayout::GetVirtualMemoryBlockTree().GetRandomAlignedRegion(CoreLocalRegionSizeWithGuards, CoreLocalRegionAlign, KMemoryRegionType_None)); + const uintptr_t candidate_end = candidate_start + CoreLocalRegionSizeWithGuards; + const uintptr_t candidate_last = candidate_end - 1; + + const KMemoryBlock *containing_block = std::addressof(*KMemoryLayout::GetVirtualMemoryBlockTree().FindContainingBlock(candidate_start)); + + if (candidate_last > containing_block->GetLastAddress()) { + continue; + } + + if (containing_block->GetType() != KMemoryRegionType_None) { + continue; + } + + if (util::AlignDown(candidate_start, CoreLocalRegionBoundsAlign) != util::AlignDown(candidate_last, CoreLocalRegionBoundsAlign)) { + continue; + } + + if (containing_block->GetAddress() > util::AlignDown(candidate_start, CoreLocalRegionBoundsAlign)) { + continue; + } + + if (util::AlignUp(candidate_last, CoreLocalRegionBoundsAlign) - 1 > containing_block->GetLastAddress()) { + continue; + } + + return candidate_start + PageSize; + } + + } + + } + + void SetupCoreLocalRegionMemoryBlocks(KInitialPageTable &page_table, KInitialPageAllocator &page_allocator) { + const KVirtualAddress core_local_virt_start = GetCoreLocalRegionVirtualAddress(); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(GetInteger(core_local_virt_start), CoreLocalRegionSize, KMemoryRegionType_CoreLocal)); + + /* Allocate a page for each core. */ + KPhysicalAddress core_local_region_start_phys[cpu::NumCores] = {}; + for (size_t i = 0; i < cpu::NumCores; i++) { + core_local_region_start_phys[i] = page_allocator.Allocate(); + } + + /* Allocate an l1 page table for each core. */ + KPhysicalAddress core_l1_ttbr1_phys[cpu::NumCores] = {}; + core_l1_ttbr1_phys[0] = util::AlignDown(cpu::GetTtbr1El1(), PageSize); + for (size_t i = 1; i < cpu::NumCores; i++) { + core_l1_ttbr1_phys[i] = page_allocator.Allocate(); + std::memcpy(reinterpret_cast(GetInteger(core_l1_ttbr1_phys[i])), reinterpret_cast(GetInteger(core_l1_ttbr1_phys[0])), PageSize); + } + + /* Use the l1 page table for each core to map the core local region for each core. */ + for (size_t i = 0; i < cpu::NumCores; i++) { + KInitialPageTable temp_pt(core_l1_ttbr1_phys[i], KInitialPageTable::NoClear{}); + temp_pt.Map(core_local_virt_start, PageSize, core_l1_ttbr1_phys[i], KernelRwDataAttribute, page_allocator); + for (size_t j = 0; j < cpu::NumCores; j++) { + temp_pt.Map(core_local_virt_start + (j + 1) * PageSize, PageSize, core_l1_ttbr1_phys[j], KernelRwDataAttribute, page_allocator); + } + + /* Setup the InitArguments. */ + SetInitArguments(static_cast(i), core_local_region_start_phys[i], GetInteger(core_l1_ttbr1_phys[i])); + } + + /* Ensure the InitArguments are flushed to cache. */ + StoreInitArguments(); + } + + void SetupPoolPartitionMemoryBlocks() { + /* TODO */ + } + + } + + +} diff --git a/libraries/libmesosphere/source/kern_main.cpp b/libraries/libmesosphere/source/kern_main.cpp new file mode 100644 index 000000000..da4c7de80 --- /dev/null +++ b/libraries/libmesosphere/source/kern_main.cpp @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + NORETURN void HorizonKernelMain(s32 core_id) { + cpu::SynchronizeAllCores(); + while (true) { /* ... */ } + } + +} diff --git a/libraries/libvapours/include/vapours/includes.hpp b/libraries/libvapours/include/vapours/includes.hpp index 8d4e73436..7e1bc682b 100644 --- a/libraries/libvapours/include/vapours/includes.hpp +++ b/libraries/libvapours/include/vapours/includes.hpp @@ -31,13 +31,13 @@ #include #include #include +#include #include /* Stratosphere wants stdlib headers, others do not.. */ #ifdef ATMOSPHERE_IS_STRATOSPHERE /* C++ headers. */ -#include #include #include #include diff --git a/libraries/libvapours/include/vapours/util/util_intrusive_red_black_tree.hpp b/libraries/libvapours/include/vapours/util/util_intrusive_red_black_tree.hpp index 538dd5ee5..ad2212076 100644 --- a/libraries/libvapours/include/vapours/util/util_intrusive_red_black_tree.hpp +++ b/libraries/libvapours/include/vapours/util/util_intrusive_red_black_tree.hpp @@ -133,7 +133,7 @@ namespace ams::util { } /* Define accessors using RB_* functions. */ - void InitializeImpl() { + constexpr ALWAYS_INLINE void InitializeImpl() { RB_INIT(&this->root); } @@ -166,7 +166,7 @@ namespace ams::util { } public: - IntrusiveRedBlackTree() { + constexpr ALWAYS_INLINE IntrusiveRedBlackTree() : root() { this->InitializeImpl(); } @@ -187,6 +187,14 @@ namespace ams::util { return const_iterator(Traits::GetParent(static_cast(nullptr))); } + const_iterator cbegin() const { + return this->begin(); + } + + const_iterator cend() const { + return this->end(); + } + iterator iterator_to(reference ref) { return iterator(&ref); } @@ -201,19 +209,19 @@ namespace ams::util { } reference back() { - return Traits::GetParent(this->GetMaxImpl()); + return *Traits::GetParent(this->GetMaxImpl()); } const_reference back() const { - return Traits::GetParent(this->GetMaxImpl()); + return *Traits::GetParent(this->GetMaxImpl()); } reference front() { - return Traits::GetParent(this->GetMinImpl()); + return *Traits::GetParent(this->GetMinImpl()); } const_reference front() const { - return Traits::GetParent(this->GetMinImpl()); + return *Traits::GetParent(this->GetMinImpl()); } iterator insert(reference ref) { @@ -244,7 +252,7 @@ namespace ams::util { class IntrusiveRedBlackTreeMemberTraits { public: template - using ListType = IntrusiveRedBlackTree; + using TreeType = IntrusiveRedBlackTree; private: template friend class IntrusiveRedBlackTree; @@ -276,7 +284,7 @@ namespace ams::util { class IntrusiveRedBlackTreeBaseTraits { public: template - using ListType = IntrusiveRedBlackTree; + using TreeType = IntrusiveRedBlackTree; private: template friend class IntrusiveRedBlackTree; diff --git a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp index e1529fbe3..33b020a87 100644 --- a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp +++ b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp @@ -15,24 +15,297 @@ */ #include +extern "C" void _start(); +extern "C" void __end__(); + namespace ams::kern::init { + /* Prototypes for functions declared in ASM that we need to reference. */ + void StartOtherCore(const ams::kern::init::KInitArguments *init_args); + namespace { + constexpr size_t KernelResourceRegionSize = 0x1728000; + constexpr size_t ExtraKernelResourceSize = 0x68000; + static_assert(ExtraKernelResourceSize + KernelResourceRegionSize == 0x1790000); + /* Global Allocator. */ KInitialPageAllocator g_initial_page_allocator; /* Global initial arguments array. */ - KInitArguments g_init_arguments[cpu::NumCores]; + KPhysicalAddress g_init_arguments_phys_addr[cpu::NumCores]; + + /* Page table attributes. */ + constexpr PageTableEntry KernelRoDataAttribute(PageTableEntry::Permission_KernelR, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable); + constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable); + constexpr PageTableEntry KernelMmioAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_Device_nGnRE, PageTableEntry::Shareable_OuterShareable); + + void MapStackForCore(KInitialPageTable &page_table, KMemoryRegionType type, u32 core_id) { + constexpr size_t StackSize = PageSize; + constexpr size_t StackAlign = PageSize; + const KVirtualAddress stack_start_virt = KMemoryLayout::GetVirtualMemoryBlockTree().GetRandomAlignedRegionWithGuard(StackSize, StackAlign, KMemoryRegionType_KernelMisc, PageSize); + const KPhysicalAddress stack_start_phys = g_initial_page_allocator.Allocate(); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(GetInteger(stack_start_virt), StackSize, type, core_id)); + + page_table.Map(stack_start_virt, StackSize, stack_start_phys, KernelRwDataAttribute, g_initial_page_allocator); + } + + void StoreDataCache(const void *addr, size_t size) { + uintptr_t start = util::AlignDown(reinterpret_cast(addr), cpu::DataCacheLineSize); + uintptr_t end = reinterpret_cast(addr) + size; + for (uintptr_t cur = start; cur < end; cur += cpu::DataCacheLineSize) { + __asm__ __volatile__("dc cvac, %[cur]" :: [cur]"r"(cur) : "memory"); + } + cpu::DataSynchronizationBarrier(); + } + + void TurnOnAllCores(uintptr_t start_other_core_phys) { + cpu::MultiprocessorAffinityRegisterAccessor mpidr; + const auto arg = mpidr.GetCpuOnArgument(); + const auto current_core = mpidr.GetAff0(); + + for (s32 i = 0; i < static_cast(cpu::NumCores); i++) { + if (static_cast(current_core) != i) { + KSystemControl::Init::CpuOn(arg | i, start_other_core_phys, GetInteger(g_init_arguments_phys_addr[i])); + } + } + } } - void InitializeCore(uintptr_t arg0, uintptr_t initial_page_allocator_state) { + void InitializeCore(uintptr_t misc_unk_debug_phys_addr, uintptr_t initial_page_allocator_state) { + /* Ensure our first argument is page aligned (as we will map it if it is non-zero). */ + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(misc_unk_debug_phys_addr, PageSize)); + + /* Clear TPIDR_EL1 to zero. */ + cpu::ThreadIdRegisterAccessor(0).Store(); + + /* Restore the page allocator state setup by kernel loader. */ + g_initial_page_allocator.Initialize(initial_page_allocator_state); + + /* Ensure that the T1SZ is correct (and what we expect). */ + MESOSPHERE_INIT_ABORT_UNLESS((cpu::TranslationControlRegisterAccessor().GetT1Size() / L1BlockSize) == MaxPageTableEntries); + + /* Create page table object for use during initialization. */ + KInitialPageTable ttbr1_table(util::AlignDown(cpu::GetTtbr1El1(), PageSize), KInitialPageTable::NoClear{}); + + /* Initialize the slab allocator counts. */ /* TODO */ + + /* Insert the root block for the virtual memory tree, from which all other blocks will derive. */ + KMemoryLayout::GetVirtualMemoryBlockTree().insert(*KMemoryLayout::GetMemoryBlockAllocator().Create(KernelVirtualAddressSpaceBase, KernelVirtualAddressSpaceSize, 0, 0)); + + /* Insert the root block for the physical memory tree, from which all other blocks will derive. */ + KMemoryLayout::GetPhysicalMemoryBlockTree().insert(*KMemoryLayout::GetMemoryBlockAllocator().Create(KernelPhysicalAddressSpaceBase, KernelPhysicalAddressSpaceSize, 0, 0)); + + /* Save start and end for ease of use. */ + const uintptr_t code_start_virt_addr = reinterpret_cast(_start); + const uintptr_t code_end_virt_addr = reinterpret_cast(__end__); + + /* Setup the containing kernel region. */ + constexpr size_t KernelRegionSize = 1_GB; + constexpr size_t KernelRegionAlign = 1_GB; + const KVirtualAddress kernel_region_start = util::AlignDown(code_start_virt_addr, KernelRegionAlign); + size_t kernel_region_size = KernelRegionSize; + if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) { + kernel_region_size = KernelVirtualAddressSpaceEnd - GetInteger(kernel_region_start); + } + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(GetInteger(kernel_region_start), kernel_region_size, KMemoryRegionType_Kernel)); + + /* Setup the code region. */ + constexpr size_t CodeRegionAlign = PageSize; + const KVirtualAddress code_region_start = util::AlignDown(code_start_virt_addr, CodeRegionAlign); + const KVirtualAddress code_region_end = util::AlignUp(code_end_virt_addr, CodeRegionAlign); + const size_t code_region_size = GetInteger(code_region_end) - GetInteger(code_region_start); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(GetInteger(code_region_start), code_region_size, KMemoryRegionType_KernelCode)); + + /* Setup the misc region. */ + constexpr size_t MiscRegionSize = 32_MB; + constexpr size_t MiscRegionAlign = KernelAslrAlignment; + const KVirtualAddress misc_region_start = KMemoryLayout::GetVirtualMemoryBlockTree().GetRandomAlignedRegion(MiscRegionSize, MiscRegionAlign, KMemoryRegionType_Kernel); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(GetInteger(misc_region_start), MiscRegionSize, KMemoryRegionType_KernelMisc)); + + /* Setup the stack region. */ + constexpr size_t StackRegionSize = 14_MB; + constexpr size_t StackRegionAlign = KernelAslrAlignment; + const KVirtualAddress stack_region_start = KMemoryLayout::GetVirtualMemoryBlockTree().GetRandomAlignedRegion(StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(GetInteger(stack_region_start), StackRegionSize, KMemoryRegionType_KernelStack)); + + /* Decide if Kernel should have enlarged resource region (slab region + page table heap region). */ + const bool use_extra_resources = KSystemControl::Init::ShouldIncreaseThreadResourceLimit(); + const size_t resource_region_size = KernelResourceRegionSize + (use_extra_resources ? ExtraKernelResourceSize : 0); + + /* Determine the size of the slab region. */ + const size_t slab_region_size = 0x647000; /* TODO: Calculate this on the fly. */ + MESOSPHERE_INIT_ABORT_UNLESS(slab_region_size <= resource_region_size); + + /* Setup the slab region. */ + const KPhysicalAddress code_start_phys_addr = ttbr1_table.GetPhysicalAddress(code_start_virt_addr); + const KPhysicalAddress code_end_phys_addr = code_start_phys_addr + (code_end_virt_addr - code_start_virt_addr); + const KPhysicalAddress slab_start_phys_addr = code_end_phys_addr; + const KPhysicalAddress slab_end_phys_addr = slab_start_phys_addr + slab_region_size; + constexpr size_t SlabRegionAlign = KernelAslrAlignment; + const size_t slab_region_needed_size = util::AlignUp(GetInteger(code_end_phys_addr) + slab_region_size, SlabRegionAlign) - util::AlignDown(GetInteger(code_end_phys_addr), SlabRegionAlign); + const KVirtualAddress slab_region_start = KMemoryLayout::GetVirtualMemoryBlockTree().GetRandomAlignedRegion(slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) + (GetInteger(code_end_phys_addr) % SlabRegionAlign); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(GetInteger(slab_region_start), slab_region_size, KMemoryRegionType_KernelSlab)); + + /* Set the slab region's pair block. */ + KMemoryLayout::GetVirtualMemoryBlockTree().FindFirstBlockByTypeAttr(KMemoryRegionType_KernelSlab)->SetPairAddress(GetInteger(slab_start_phys_addr)); + + /* Setup the temp region. */ + constexpr size_t TempRegionSize = 128_MB; + constexpr size_t TempRegionAlign = KernelAslrAlignment; + const KVirtualAddress temp_region_start = KMemoryLayout::GetVirtualMemoryBlockTree().GetRandomAlignedRegion(TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(GetInteger(temp_region_start), TempRegionSize, KMemoryRegionType_KernelTemp)); + + /* Setup the Misc Unknown Debug region, if it's not zero. */ + if (misc_unk_debug_phys_addr) { + constexpr size_t MiscUnknownDebugRegionSize = PageSize; + constexpr size_t MiscUnknownDebugRegionAlign = PageSize; + const KVirtualAddress misc_unk_debug_virt_addr = KMemoryLayout::GetVirtualMemoryBlockTree().GetRandomAlignedRegionWithGuard(MiscUnknownDebugRegionSize, MiscUnknownDebugRegionAlign, KMemoryRegionType_KernelMisc, PageSize); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(GetInteger(misc_unk_debug_virt_addr), MiscUnknownDebugRegionSize, KMemoryRegionType_KernelMiscUnknownDebug)); + ttbr1_table.Map(misc_unk_debug_virt_addr, MiscUnknownDebugRegionSize, misc_unk_debug_phys_addr, KernelRoDataAttribute, g_initial_page_allocator); + } + + /* Setup board-specific device physical blocks. */ + SetupDevicePhysicalMemoryBlocks(); + + /* Automatically map in devices that have auto-map attributes. */ + for (auto &block : KMemoryLayout::GetPhysicalMemoryBlockTree()) { + /* We only care about automatically-mapped blocks. */ + if (!block.IsDerivedFrom(KMemoryRegionType_KernelAutoMap)) { + continue; + } + + /* If this block has already been mapped, no need to consider it. */ + if (block.HasTypeAttribute(KMemoryRegionAttr_DidKernelMap)) { + continue; + } + + /* Set the attribute to note we've mapped this block. */ + block.SetTypeAttribute(KMemoryRegionAttr_DidKernelMap); + + /* Create a virtual pair block and insert it into the tree. */ + const KPhysicalAddress map_phys_addr = util::AlignDown(block.GetAddress(), PageSize); + const size_t map_size = util::AlignUp(block.GetEndAddress(), PageSize) - GetInteger(map_phys_addr); + const KVirtualAddress map_virt_addr = KMemoryLayout::GetVirtualMemoryBlockTree().GetRandomAlignedRegionWithGuard(map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(GetInteger(map_virt_addr), map_size, KMemoryRegionType_KernelMiscMappedDevice)); + block.SetPairAddress(GetInteger(map_virt_addr) + block.GetAddress() - GetInteger(map_phys_addr)); + + /* Map the page in to our page table. */ + ttbr1_table.Map(map_virt_addr, map_size, map_phys_addr, KernelMmioAttribute, g_initial_page_allocator); + } + + /* Setup the basic DRAM blocks. */ + SetupDramPhysicalMemoryBlocks(); + + /* Insert a physical block for the kernel code region. */ + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(GetInteger(code_start_phys_addr), (code_end_virt_addr - code_start_virt_addr), KMemoryRegionType_DramKernelCode)); + KMemoryLayout::GetPhysicalMemoryBlockTree().FindFirstBlockByTypeAttr(KMemoryRegionType_DramKernelCode)->SetPairAddress(code_start_virt_addr); + + /* Insert a physical block for the kernel slab region. */ + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(GetInteger(slab_start_phys_addr), slab_region_size, KMemoryRegionType_DramKernelSlab)); + KMemoryLayout::GetPhysicalMemoryBlockTree().FindFirstBlockByTypeAttr(KMemoryRegionType_DramKernelSlab)->SetPairAddress(GetInteger(slab_region_start)); + + /* Map and clear the slab region. */ + ttbr1_table.Map(slab_region_start, slab_region_size, slab_start_phys_addr, KernelRwDataAttribute, g_initial_page_allocator); + std::memset(GetVoidPointer(slab_region_start), 0, slab_region_size); + + /* Determine size available for kernel page table heaps, requiring > 8 MB. */ + const KPhysicalAddress resource_end_phys_addr = slab_start_phys_addr + resource_region_size; + const size_t page_table_heap_size = GetInteger(resource_end_phys_addr) - GetInteger(slab_end_phys_addr); + MESOSPHERE_INIT_ABORT_UNLESS(page_table_heap_size / 4_MB > 2); + + /* Insert a physical block for the kernel page table heap region */ + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(GetInteger(slab_end_phys_addr), page_table_heap_size, KMemoryRegionType_DramKernelPtHeap)); + + /* All DRAM blocks that we haven't tagged by this point will be mapped under the linear mapping. Tag them. */ + for (auto &block : KMemoryLayout::GetPhysicalMemoryBlockTree()) { + if (block.GetType() == KMemoryRegionType_Dram) { + block.SetTypeAttribute(KMemoryRegionAttr_LinearMapped); + } + } + + /* Setup the linear mapping region. */ + constexpr size_t LinearRegionAlign = 1_GB; + const auto linear_extents = KMemoryLayout::GetPhysicalMemoryBlockTree().GetDerivedRegionExtents(KMemoryRegionAttr_LinearMapped); + const KPhysicalAddress aligned_linear_phys_start = util::AlignDown(linear_extents.first_block->GetAddress(), LinearRegionAlign); + const size_t linear_region_size = util::AlignUp(linear_extents.last_block->GetEndAddress(), LinearRegionAlign) - GetInteger(aligned_linear_phys_start); + const KVirtualAddress linear_region_start = KMemoryLayout::GetVirtualMemoryBlockTree().GetRandomAlignedRegionWithGuard(linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign); + + const uintptr_t linear_region_phys_to_virt_diff = GetInteger(linear_region_start) - GetInteger(aligned_linear_phys_start); + + /* Map and create blocks for all the linearly-mapped data. */ + for (auto &block : KMemoryLayout::GetPhysicalMemoryBlockTree()) { + if (!block.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) { + continue; + } + + const uintptr_t block_virt_addr = block.GetAddress() + linear_region_phys_to_virt_diff; + ttbr1_table.Map(block_virt_addr, block.GetSize(), block.GetAddress(), KernelRwDataAttribute, g_initial_page_allocator); + + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(block_virt_addr, block.GetSize(), GetTypeForVirtualLinearMapping(block.GetType()))); + KMemoryLayout::GetVirtualMemoryBlockTree().FindContainingBlock(block_virt_addr)->SetPairAddress(block.GetAddress()); + } + + /* Create blocks for and map all core-specific stacks. */ + for (size_t i = 0; i < cpu::NumCores; i++) { + MapStackForCore(ttbr1_table, KMemoryRegionType_KernelMiscMainStack, i); + MapStackForCore(ttbr1_table, KMemoryRegionType_KernelMiscIdleStack, i); + MapStackForCore(ttbr1_table, KMemoryRegionType_KernelMiscExceptionStack, i); + } + + /* Setup the KCoreLocalRegion blocks. */ + SetupCoreLocalRegionMemoryBlocks(ttbr1_table, g_initial_page_allocator); + + /* Finalize the page allocator, we're done allocating at this point. */ + const KPhysicalAddress final_init_page_table_end_address = g_initial_page_allocator.GetFinalNextAddress(); + const size_t init_page_table_region_size = GetInteger(final_init_page_table_end_address) - GetInteger(resource_end_phys_addr); + + /* Insert blocks for the initial page table region. */ + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(GetInteger(resource_end_phys_addr), init_page_table_region_size, KMemoryRegionType_DramKernelInitPt)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(GetInteger(resource_end_phys_addr) + linear_region_phys_to_virt_diff, init_page_table_region_size, KMemoryRegionType_VirtualKernelInitPt)); + + /* All linear-mapped DRAM blocks that we haven't tagged by this point will be allocated to some pool partition. Tag them. */ + for (auto &block : KMemoryLayout::GetPhysicalMemoryBlockTree()) { + if (block.GetType() == KMemoryRegionType_DramLinearMapped) { + block.SetType(KMemoryRegionType_DramPoolPartition); + } + } + + /* Setup all other memory blocks needed to arrange the pool partitions. */ + SetupPoolPartitionMemoryBlocks(); + + /* Cache all linear blocks in their own trees for faster access, later. */ + KMemoryLayout::InitializeLinearMemoryBlockTrees(aligned_linear_phys_start, linear_region_start); + + /* Turn on all other cores. */ + TurnOnAllCores(GetInteger(ttbr1_table.GetPhysicalAddress(reinterpret_cast(::ams::kern::init::StartOtherCore)))); } - KPhysicalAddress GetInitArgumentsAddress(s32 core) { - return KPhysicalAddress(std::addressof(g_init_arguments[core])); + KPhysicalAddress GetInitArgumentsAddress(s32 core_id) { + return g_init_arguments_phys_addr[core_id]; + } + + void SetInitArguments(s32 core_id, KPhysicalAddress address, uintptr_t arg) { + KInitArguments *init_args = reinterpret_cast(GetInteger(address)); + init_args->ttbr0 = cpu::GetTtbr0El1(); + init_args->ttbr1 = arg; + init_args->tcr = cpu::GetTcrEl1(); + init_args->mair = cpu::GetMairEl1(); + init_args->cpuactlr = cpu::GetCpuActlrEl1(); + init_args->cpuectlr = cpu::GetCpuEctlrEl1(); + init_args->sctlr = cpu::GetSctlrEl1(); + init_args->sp = GetInteger(KMemoryLayout::GetMainStackTopAddress(core_id)); + init_args->entrypoint = reinterpret_cast(::ams::kern::HorizonKernelMain); + init_args->argument = static_cast(core_id); + init_args->setup_function = reinterpret_cast(::ams::kern::init::StartOtherCore); + g_init_arguments_phys_addr[core_id] = address; + } + + + void StoreInitArguments() { + StoreDataCache(g_init_arguments_phys_addr, sizeof(g_init_arguments_phys_addr)); } void InitializeDebugRegisters() { diff --git a/mesosphere/kernel_ldr/source/arch/arm64/start.s b/mesosphere/kernel_ldr/source/arch/arm64/start.s index 7d2eacadc..2d9c3dffd 100644 --- a/mesosphere/kernel_ldr/source/arch/arm64/start.s +++ b/mesosphere/kernel_ldr/source/arch/arm64/start.s @@ -40,9 +40,10 @@ _start: /* Stack is now set up. */ /* Apply relocations and call init array for KernelLdr. */ - sub sp, sp, #0x20 + sub sp, sp, #0x30 stp x0, x1, [sp, #0x00] stp x2, x30, [sp, #0x10] + stp xzr, xzr, [sp, #0x20] adr x0, _start adr x1, __external_references ldr x1, [x1, #0x18] /* .dynamic. */ @@ -75,6 +76,11 @@ _start: bl _ZN3ams4kern4init6loader4MainEmPNS1_12KernelLayoutEm str x0, [sp, #0x00] + /* Get ams::kern::init::loader::AllocateKernelInitStack(). */ + bl _ZN3ams4kern4init6loader23AllocateKernelInitStackEv + str x0, [sp, #0x20] + + /* Call ams::kern::init::loader::GetFinalPageAllocatorState() */ bl _ZN3ams4kern4init6loader26GetFinalPageAllocatorStateEv @@ -85,6 +91,8 @@ _start: ldr x1, [sp, #0x18] /* Return address to Kernel */ ldr x2, [sp, #0x00] /* Relocated kernel base address diff. */ add x1, x2, x1 + ldr x2, [sp, #0x20] + mov sp, x2 br x1 diff --git a/mesosphere/kernel_ldr/source/kern_init_loader.cpp b/mesosphere/kernel_ldr/source/kern_init_loader.cpp index ca9b0e272..9eaefb116 100644 --- a/mesosphere/kernel_ldr/source/kern_init_loader.cpp +++ b/mesosphere/kernel_ldr/source/kern_init_loader.cpp @@ -97,8 +97,8 @@ namespace ams::kern::init::loader { /* TODO: Define these bits properly elsewhere, document exactly what each bit set is doing .*/ constexpr u64 MairValue = 0x0000000044FF0400ul; constexpr u64 TcrValue = 0x00000011B5193519ul; - cpu::SetMairEl1(MairValue); - cpu::SetTcrEl1(TcrValue); + cpu::MemoryAccessIndirectionRegisterAccessor(MairValue).Store(); + cpu::TranslationControlRegisterAccessor(TcrValue).Store(); /* Perform cpu-specific setup. */ { @@ -308,6 +308,10 @@ namespace ams::kern::init::loader { return GetInteger(virtual_base_address) - base_address; } + KPhysicalAddress AllocateKernelInitStack() { + return g_initial_page_allocator.Allocate() + PageSize; + } + uintptr_t GetFinalPageAllocatorState() { return g_initial_page_allocator.GetFinalState(); } From 7c703903ea947fdb3a8ecb475af132cd5365d3f6 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 29 Jan 2020 01:49:04 -0800 Subject: [PATCH 12/97] kern: implement pool partition memblock setup --- .../libmesosphere/include/mesosphere.hpp | 2 + .../nintendo/switch/kern_k_system_control.hpp | 3 + .../mesosphere/kern_k_memory_layout.hpp | 25 ++++++- .../mesosphere/kern_k_memory_manager.hpp | 35 ++++++++++ .../include/mesosphere/kern_k_page_heap.hpp | 60 +++++++++++++++++ .../nintendo/switch/kern_k_system_control.cpp | 65 ++++++++++++++++++ .../nintendo/switch/kern_secure_monitor.hpp | 9 +++ .../source/kern_k_memory_layout.cpp | 66 ++++++++++++++++++- .../source/kern_k_memory_manager.cpp | 34 ++++++++++ .../libmesosphere/source/kern_k_page_heap.cpp | 28 ++++++++ .../source/arch/arm64/init/kern_init_core.cpp | 1 + 11 files changed, 325 insertions(+), 3 deletions(-) create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp create mode 100644 libraries/libmesosphere/source/kern_k_memory_manager.cpp create mode 100644 libraries/libmesosphere/source/kern_k_page_heap.cpp diff --git a/libraries/libmesosphere/include/mesosphere.hpp b/libraries/libmesosphere/include/mesosphere.hpp index d9d27e02e..cd979eb25 100644 --- a/libraries/libmesosphere/include/mesosphere.hpp +++ b/libraries/libmesosphere/include/mesosphere.hpp @@ -38,6 +38,8 @@ /* Core functionality. */ #include "mesosphere/kern_select_interrupts.hpp" +#include "mesosphere/kern_k_page_heap.hpp" +#include "mesosphere/kern_k_memory_manager.hpp" /* Supervisor Calls. */ #include "mesosphere/kern_svc.hpp" diff --git a/libraries/libmesosphere/include/mesosphere/board/nintendo/switch/kern_k_system_control.hpp b/libraries/libmesosphere/include/mesosphere/board/nintendo/switch/kern_k_system_control.hpp index 631cc471a..f568ac270 100644 --- a/libraries/libmesosphere/include/mesosphere/board/nintendo/switch/kern_k_system_control.hpp +++ b/libraries/libmesosphere/include/mesosphere/board/nintendo/switch/kern_k_system_control.hpp @@ -27,6 +27,9 @@ namespace ams::kern { static KPhysicalAddress GetKernelPhysicalBaseAddress(uintptr_t base_address); static bool ShouldIncreaseThreadResourceLimit(); static void CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg); + static size_t GetApplicationPoolSize(); + static size_t GetAppletPoolSize(); + static size_t GetMinimumNonSecureSystemPoolSize(); /* Randomness. */ static void GenerateRandomBytes(void *dst, size_t size); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp index 4aa49dee4..d1bd1d8f3 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp @@ -50,6 +50,12 @@ namespace ams::kern { KMemoryRegionType_VirtualKernelTraceBuffer = 0x4A, KMemoryRegionType_VirtualKernelInitPt = 0x19A, + KMemoryRegionType_VirtualDramMetadataPool = 0x29A, + KMemoryRegionType_VirtualDramApplicationPool = 0x271A, + KMemoryRegionType_VirtualDramAppletPool = 0x1B1A, + KMemoryRegionType_VirtualDramSystemNonSecurePool = 0x331A, + KMemoryRegionType_VirtualDramSystemPool = 0x2B1A, + KMemoryRegionType_Uart = 0x1D, KMemoryRegionType_InterruptDistributor = 0x4D, KMemoryRegionType_InterruptController = 0x2D, @@ -76,8 +82,13 @@ namespace ams::kern { KMemoryRegionType_DramLinearMapped = KMemoryRegionType_Dram | KMemoryRegionAttr_LinearMapped, - KMemoryRegionType_DramReservedEarly = 0x16 | KMemoryRegionAttr_NoUserMap, - KMemoryRegionType_DramPoolPartition = 0x26 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped, + KMemoryRegionType_DramReservedEarly = 0x16 | KMemoryRegionAttr_NoUserMap, + KMemoryRegionType_DramPoolPartition = 0x26 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped, + KMemoryRegionType_DramMetadataPool = 0x166 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_CarveoutProtected, + KMemoryRegionType_DramApplicationPool = 0x7A6 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped, + KMemoryRegionType_DramAppletPool = 0xBA6 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped, + KMemoryRegionType_DramSystemNonSecurePool = 0xDA6 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped, + KMemoryRegionType_DramSystemPool = 0x13A6 | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_CarveoutProtected, KMemoryRegionType_DramKernel = 0xE | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected, KMemoryRegionType_DramKernelCode = 0xCE | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected, @@ -228,6 +239,16 @@ namespace ams::kern { MESOSPHERE_INIT_ABORT(); } + iterator FindFirstDerivedBlock(u32 type_id) { + for (auto it = this->begin(); it != this->end(); it++) { + if (it->IsDerivedFrom(type_id)) { + return it; + } + } + MESOSPHERE_INIT_ABORT(); + } + + DerivedRegionExtents GetDerivedRegionExtents(u32 type_id) { DerivedRegionExtents extents = { .first_block = nullptr, .last_block = nullptr }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp new file mode 100644 index 000000000..f958d4e34 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern { + + class KMemoryManager { + public: + static constexpr size_t PageSize = 0x1000; /* TODO: Elsewhere? */ + private: + class Impl { + public: + static size_t CalculateMetadataOverheadSize(size_t region_size); + }; + public: + static size_t CalculateMetadataOverheadSize(size_t region_size) { + return Impl::CalculateMetadataOverheadSize(region_size); + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp new file mode 100644 index 000000000..2a74efdc3 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern { + + class KPageHeap { + private: + class Block { + private: + class Bitmap { + /* TODO: This is a four-level bitmap tracking page usage. */ + private: + static constexpr s32 GetRequiredDepth(size_t region_size) { + s32 depth = 0; + while (true) { + region_size /= BITSIZEOF(u64); + depth++; + if (region_size == 0) { + return depth; + } + } + } + public: + static constexpr size_t CalculateMetadataOverheadSize(size_t region_size) { + size_t overhead_bits = 0; + for (s32 depth = GetRequiredDepth(region_size) - 1; depth >= 0; depth--) { + region_size = util::AlignUp(region_size, BITSIZEOF(u64)) / BITSIZEOF(u64); + overhead_bits += region_size; + } + return overhead_bits * sizeof(u64); + } + }; + public: + static constexpr size_t CalculateMetadataOverheadSize(size_t region_size, size_t cur_block_shift, size_t next_block_shift) { + const size_t cur_block_size = (1ul << cur_block_shift); + const size_t next_block_size = (1ul << next_block_shift); + const size_t align = (next_block_shift != 0) ? next_block_size : cur_block_size; + return Bitmap::CalculateMetadataOverheadSize((align * 2 + util::AlignUp(region_size, align)) / cur_block_size); + } + }; + public: + static size_t CalculateMetadataOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts); + }; + +} diff --git a/libraries/libmesosphere/source/board/nintendo/switch/kern_k_system_control.cpp b/libraries/libmesosphere/source/board/nintendo/switch/kern_k_system_control.cpp index 881f834eb..6a73bad56 100644 --- a/libraries/libmesosphere/source/board/nintendo/switch/kern_k_system_control.cpp +++ b/libraries/libmesosphere/source/board/nintendo/switch/kern_k_system_control.cpp @@ -34,6 +34,30 @@ namespace ams::kern { return util::BitPack32{static_cast(value)}; } + ALWAYS_INLINE u32 GetMemoryModeForInit() { + u64 value = 0; + smc::init::GetConfig(&value, 1, smc::ConfigItem::MemoryMode); + return static_cast(value); + } + + ALWAYS_INLINE smc::MemoryArrangement GetMemoryArrangeForInit() { + switch(GetMemoryModeForInit() & 0x3F) { + case 0x01: + default: + return smc::MemoryArrangement_4GB; + case 0x02: + return smc::MemoryArrangement_4GBForAppletDev; + case 0x03: + return smc::MemoryArrangement_4GBForSystemDev; + case 0x11: + return smc::MemoryArrangement_6GB; + case 0x12: + return smc::MemoryArrangement_6GBForAppletDev; + case 0x21: + return smc::MemoryArrangement_8GB; + } + } + ALWAYS_INLINE u64 GenerateRandomU64ForInit() { u64 value; smc::init::GenerateRandomBytes(&value, sizeof(value)); @@ -69,6 +93,47 @@ namespace ams::kern { return GetKernelConfigurationForInit().Get(); } + size_t KSystemControl::Init::GetApplicationPoolSize() { + switch (GetMemoryArrangeForInit()) { + case smc::MemoryArrangement_4GB: + default: + return 3285_MB; + case smc::MemoryArrangement_4GBForAppletDev: + return 2048_MB; + case smc::MemoryArrangement_4GBForSystemDev: + return 3285_MB; + case smc::MemoryArrangement_6GB: + return 4916_MB; + case smc::MemoryArrangement_6GBForAppletDev: + return 3285_MB; + case smc::MemoryArrangement_8GB: + return 4916_MB; + } + } + + size_t KSystemControl::Init::GetAppletPoolSize() { + switch (GetMemoryArrangeForInit()) { + case smc::MemoryArrangement_4GB: + default: + return 507_MB; + case smc::MemoryArrangement_4GBForAppletDev: + return 1554_MB; + case smc::MemoryArrangement_4GBForSystemDev: + return 448_MB; + case smc::MemoryArrangement_6GB: + return 562_MB; + case smc::MemoryArrangement_6GBForAppletDev: + return 2193_MB; + case smc::MemoryArrangement_8GB: + return 2193_MB; + } + } + + size_t KSystemControl::Init::GetMinimumNonSecureSystemPoolSize() { + /* TODO: Where does this constant actually come from? */ + return 0x29C8000; + } + void KSystemControl::Init::CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) { smc::init::CpuOn(core_id, entrypoint, arg); } diff --git a/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.hpp b/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.hpp index 1417c29e8..4d0a43315 100644 --- a/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.hpp +++ b/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.hpp @@ -25,6 +25,15 @@ namespace ams::kern::smc { MemorySize_8GB = 2, }; + enum MemoryArrangement { + MemoryArrangement_4GB = 0, + MemoryArrangement_4GBForAppletDev = 1, + MemoryArrangement_4GBForSystemDev = 2, + MemoryArrangement_6GB = 3, + MemoryArrangement_6GBForAppletDev = 4, + MemoryArrangement_8GB = 5, + }; + enum class ConfigItem : u32 { /* Standard config items. */ DisableProgramVerification = 1, diff --git a/libraries/libmesosphere/source/kern_k_memory_layout.cpp b/libraries/libmesosphere/source/kern_k_memory_layout.cpp index 8cb084bda..3f185e6fc 100644 --- a/libraries/libmesosphere/source/kern_k_memory_layout.cpp +++ b/libraries/libmesosphere/source/kern_k_memory_layout.cpp @@ -138,6 +138,9 @@ namespace ams::kern { constexpr PageTableEntry KernelRwDataAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable); + constexpr size_t CarveoutAlignment = 0x20000; + constexpr size_t CarveoutSizeMax = 512_MB - CarveoutAlignment; + constexpr size_t CoreLocalRegionAlign = PageSize; constexpr size_t CoreLocalRegionSize = PageSize * (1 + cpu::NumCores); constexpr size_t CoreLocalRegionSizeWithGuards = CoreLocalRegionSize + 2 * PageSize; @@ -177,6 +180,12 @@ namespace ams::kern { } + void InsertPoolPartitionBlockIntoBothTrees(size_t start, size_t size, KMemoryRegionType phys_type, KMemoryRegionType virt_type, u32 &cur_attr) { + const u32 attr = cur_attr++; + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryBlockTree().Insert(start, size, phys_type, attr)); + MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(KMemoryLayout::GetPhysicalMemoryBlockTree().FindFirstBlockByTypeAttr(phys_type, attr)->GetPairAddress(), size, virt_type, attr)); + } + } void SetupCoreLocalRegionMemoryBlocks(KInitialPageTable &page_table, KInitialPageAllocator &page_allocator) { @@ -214,7 +223,62 @@ namespace ams::kern { } void SetupPoolPartitionMemoryBlocks() { - /* TODO */ + /* Start by identifying the extents of the DRAM memory region. */ + const auto dram_extents = KMemoryLayout::GetPhysicalMemoryBlockTree().GetDerivedRegionExtents(KMemoryRegionType_Dram); + + /* Get Application and Applet pool sizes. */ + const size_t application_pool_size = KSystemControl::Init::GetApplicationPoolSize(); + const size_t applet_pool_size = KSystemControl::Init::GetAppletPoolSize(); + const size_t unsafe_system_pool_min_size = KSystemControl::Init::GetMinimumNonSecureSystemPoolSize(); + + /* Find the start of the kernel DRAM region. */ + const uintptr_t kernel_dram_start = KMemoryLayout::GetPhysicalMemoryBlockTree().FindFirstDerivedBlock(KMemoryRegionType_DramKernel)->GetAddress(); + MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(kernel_dram_start, CarveoutAlignment)); + + /* Find the start of the pool partitions region. */ + const uintptr_t pool_partitions_start = KMemoryLayout::GetPhysicalMemoryBlockTree().FindFirstBlockByTypeAttr(KMemoryRegionType_DramPoolPartition)->GetAddress(); + + /* Decide on starting addresses for our pools. */ + const uintptr_t application_pool_start = dram_extents.last_block->GetEndAddress() - application_pool_size; + const uintptr_t applet_pool_start = application_pool_start - applet_pool_size; + const uintptr_t unsafe_system_pool_start = std::min(kernel_dram_start + CarveoutSizeMax, util::AlignDown(applet_pool_start - unsafe_system_pool_min_size, CarveoutAlignment)); + const size_t unsafe_system_pool_size = applet_pool_start - unsafe_system_pool_start; + + /* We want to arrange application pool depending on where the middle of dram is. */ + const uintptr_t dram_midpoint = (dram_extents.first_block->GetAddress() + dram_extents.last_block->GetEndAddress()) / 2; + u32 cur_pool_attr = 0; + size_t total_overhead_size = 0; + if (dram_extents.last_block->GetEndAddress() <= dram_midpoint || dram_midpoint <= application_pool_start) { + InsertPoolPartitionBlockIntoBothTrees(application_pool_start, application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr); + total_overhead_size += KMemoryManager::CalculateMetadataOverheadSize(application_pool_size); + } else { + const size_t first_application_pool_size = dram_midpoint - application_pool_start; + const size_t second_application_pool_size = application_pool_start + application_pool_size - dram_midpoint; + InsertPoolPartitionBlockIntoBothTrees(application_pool_start, first_application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr); + InsertPoolPartitionBlockIntoBothTrees(dram_midpoint, second_application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr); + total_overhead_size += KMemoryManager::CalculateMetadataOverheadSize(first_application_pool_size); + total_overhead_size += KMemoryManager::CalculateMetadataOverheadSize(second_application_pool_size); + } + + /* Insert the applet pool. */ + InsertPoolPartitionBlockIntoBothTrees(applet_pool_start, applet_pool_size, KMemoryRegionType_DramAppletPool, KMemoryRegionType_VirtualDramAppletPool, cur_pool_attr); + total_overhead_size += KMemoryManager::CalculateMetadataOverheadSize(applet_pool_size); + + /* Insert the nonsecure system pool. */ + InsertPoolPartitionBlockIntoBothTrees(unsafe_system_pool_start, unsafe_system_pool_size, KMemoryRegionType_DramSystemNonSecurePool, KMemoryRegionType_VirtualDramSystemNonSecurePool, cur_pool_attr); + total_overhead_size += KMemoryManager::CalculateMetadataOverheadSize(unsafe_system_pool_size); + + /* Insert the metadata pool. */ + total_overhead_size += KMemoryManager::CalculateMetadataOverheadSize((unsafe_system_pool_start - pool_partitions_start) - total_overhead_size); + const uintptr_t metadata_pool_start = unsafe_system_pool_start - total_overhead_size; + const size_t metadata_pool_size = total_overhead_size; + u32 metadata_pool_attr = 0; + InsertPoolPartitionBlockIntoBothTrees(metadata_pool_start, metadata_pool_size, KMemoryRegionType_DramMetadataPool, KMemoryRegionType_VirtualDramMetadataPool, metadata_pool_attr); + + /* Insert the system pool. */ + const uintptr_t system_pool_size = metadata_pool_start - pool_partitions_start; + InsertPoolPartitionBlockIntoBothTrees(pool_partitions_start, system_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr); + } } diff --git a/libraries/libmesosphere/source/kern_k_memory_manager.cpp b/libraries/libmesosphere/source/kern_k_memory_manager.cpp new file mode 100644 index 000000000..57483fa69 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_memory_manager.cpp @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + namespace { + + constexpr size_t g_memory_block_page_shifts[] = { 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E }; + constexpr size_t NumMemoryBlockPageShifts = util::size(g_memory_block_page_shifts); + + } + + size_t KMemoryManager::Impl::CalculateMetadataOverheadSize(size_t region_size) { + const size_t ref_count_size = (region_size / PageSize) * sizeof(u16); + const size_t bitmap_size = (util::AlignUp((region_size / PageSize), BITSIZEOF(u64)) / BITSIZEOF(u64)) * sizeof(u64); + const size_t page_heap_size = KPageHeap::CalculateMetadataOverheadSize(region_size, g_memory_block_page_shifts, NumMemoryBlockPageShifts); + return util::AlignUp(page_heap_size + bitmap_size + ref_count_size, PageSize); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_page_heap.cpp b/libraries/libmesosphere/source/kern_k_page_heap.cpp new file mode 100644 index 000000000..6dce64507 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_page_heap.cpp @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + size_t KPageHeap::CalculateMetadataOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts) { + size_t overhead_size = 0; + for (size_t i = 0; i < num_block_shifts; i++) { + overhead_size += KPageHeap::Block::CalculateMetadataOverheadSize(region_size, block_shifts[i], (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0); + } + return util::AlignUp(overhead_size, KMemoryManager::PageSize); + } + +} diff --git a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp index 33b020a87..bdae68d1c 100644 --- a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp +++ b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp @@ -245,6 +245,7 @@ namespace ams::kern::init { ttbr1_table.Map(block_virt_addr, block.GetSize(), block.GetAddress(), KernelRwDataAttribute, g_initial_page_allocator); MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().Insert(block_virt_addr, block.GetSize(), GetTypeForVirtualLinearMapping(block.GetType()))); + block.SetPairAddress(block_virt_addr); KMemoryLayout::GetVirtualMemoryBlockTree().FindContainingBlock(block_virt_addr)->SetPairAddress(block.GetAddress()); } From 7820e5b759ec8a8708017fff0f96a71701ab8f30 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 29 Jan 2020 02:42:41 -0800 Subject: [PATCH 13/97] kern: implement debug register/vectors init --- .../arch/arm64/kern_cpu_system_registers.hpp | 88 +++++++++++ .../source/arch/arm64/exception_vectors.s | 141 ++++++++++++++++++ .../source/arch/arm64/init/kern_init_core.cpp | 70 ++++++++- 3 files changed, 297 insertions(+), 2 deletions(-) create mode 100644 mesosphere/kernel/source/arch/arm64/exception_vectors.s diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp index d816b155f..408f0518e 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp @@ -40,6 +40,8 @@ namespace ams::kern::arm64::cpu { MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(TcrEl1, tcr_el1) MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(MairEl1, mair_el1) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(VbarEl1, vbar_el1) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(SctlrEl1, sctlr_el1) MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CpuActlrEl1, s3_1_c15_c2_0) @@ -47,6 +49,24 @@ namespace ams::kern::arm64::cpu { MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CsselrEl1, csselr_el1) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(OslarEl1, oslar_el1) + + #define FOR_I_IN_0_TO_15(HANDLER, ...) \ + HANDLER(0, ## __VA_ARGS__) HANDLER(1, ## __VA_ARGS__) HANDLER(2, ## __VA_ARGS__) HANDLER(3, ## __VA_ARGS__) \ + HANDLER(4, ## __VA_ARGS__) HANDLER(5, ## __VA_ARGS__) HANDLER(6, ## __VA_ARGS__) HANDLER(7, ## __VA_ARGS__) \ + HANDLER(8, ## __VA_ARGS__) HANDLER(9, ## __VA_ARGS__) HANDLER(10, ## __VA_ARGS__) HANDLER(11, ## __VA_ARGS__) \ + HANDLER(12, ## __VA_ARGS__) HANDLER(13, ## __VA_ARGS__) HANDLER(14, ## __VA_ARGS__) HANDLER(15, ## __VA_ARGS__) \ + + #define MESOSPHERE_CPU_DEFINE_DBG_SYSREG_ACCESSORS(ID, ...) \ + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(DbgWcr##ID##El1, dbgwcr##ID##_el1) \ + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(DbgWvr##ID##El1, dbgwvr##ID##_el1) \ + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(DbgBcr##ID##El1, dbgbcr##ID##_el1) \ + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(DbgBvr##ID##El1, dbgbvr##ID##_el1) + + FOR_I_IN_0_TO_15(MESOSPHERE_CPU_DEFINE_DBG_SYSREG_ACCESSORS) + + #undef MESOSPHERE_CPU_DEFINE_DBG_SYSREG_ACCESSORS + /* Base class for register accessors. */ class GenericRegisterAccessorBase { NON_COPYABLE(GenericRegisterAccessorBase); @@ -63,6 +83,21 @@ namespace ams::kern::arm64::cpu { constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const { return (this->value >> offset) & ((1ul << count) - 1); } + + constexpr ALWAYS_INLINE void SetBits(size_t offset, size_t count, u64 value) { + const u64 mask = ((1ul << count) - 1) << offset; + this->value &= ~mask; + this->value |= (value & mask) << offset; + } + + constexpr ALWAYS_INLINE void SetBit(size_t offset, bool enabled) { + const u64 mask = 1ul << offset; + if (enabled) { + this->value |= mask; + } else { + this->value &= ~mask; + } + } }; template @@ -98,6 +133,43 @@ namespace ams::kern::arm64::cpu { return size_t(1) << (size_t(64) - shift_value); } }; + + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(DebugFeature) { + public: + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(DebugFeature, id_aa64dfr0_el1) + + constexpr ALWAYS_INLINE size_t GetNumWatchpoints() const { + return this->GetBits(20, 4); + } + + constexpr ALWAYS_INLINE size_t GetNumBreakpoints() const { + return this->GetBits(12, 4); + } + }; + + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MonitorDebugSystemControl) { + public: + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MonitorDebugSystemControl, mdscr_el1) + + constexpr ALWAYS_INLINE bool GetMde() const { + return this->GetBits(15, 1) != 0; + } + + constexpr ALWAYS_INLINE size_t GetTdcc() const { + return this->GetBits(12, 1) != 0; + } + + constexpr ALWAYS_INLINE decltype(auto) SetMde(bool set) { + this->SetBit(15, set); + return *this; + } + + constexpr ALWAYS_INLINE decltype(auto) SetTdcc(bool set) { + this->SetBit(12, set); + return *this; + } + }; + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MultiprocessorAffinity) { public: MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MultiprocessorAffinity, mpidr_el1) @@ -129,6 +201,21 @@ namespace ams::kern::arm64::cpu { MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(ThreadId, tpidr_el1) }; + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(OsLockAccess) { + public: + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(OsLockAccess, oslar_el1) + }; + + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(ContextId) { + public: + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(ContextId, contextidr_el1) + + constexpr ALWAYS_INLINE decltype(auto) SetProcId(u32 proc_id) { + this->SetBits(0, BITSIZEOF(proc_id), proc_id); + return *this; + } + }; + MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MainId) { public: enum class Implementer { @@ -197,6 +284,7 @@ namespace ams::kern::arm64::cpu { /* TODO: Other bitfield accessors? */ }; + #undef FOR_I_IN_0_TO_15 #undef MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS #undef MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS #undef MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS diff --git a/mesosphere/kernel/source/arch/arm64/exception_vectors.s b/mesosphere/kernel/source/arch/arm64/exception_vectors.s new file mode 100644 index 000000000..89fd0de6d --- /dev/null +++ b/mesosphere/kernel/source/arch/arm64/exception_vectors.s @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/* Some macros taken from https://github.com/ARM-software/arm-trusted-firmware/blob/master/include/common/aarch64/asm_macros.S */ +/* + * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +/* + * Declare the exception vector table, enforcing it is aligned on a + * 2KB boundary, as required by the ARMv8 architecture. + * Use zero bytes as the fill value to be stored in the padding bytes + * so that it inserts illegal AArch64 instructions. This increases + * security, robustness and potentially facilitates debugging. + */ +.macro vector_base label, section_name=.vectors +.section \section_name, "ax" +.align 11, 0 +\label: +.endm + +/* + * Create an entry in the exception vector table, enforcing it is + * aligned on a 128-byte boundary, as required by the ARMv8 architecture. + * Use zero bytes as the fill value to be stored in the padding bytes + * so that it inserts illegal AArch64 instructions. This increases + * security, robustness and potentially facilitates debugging. + */ +.macro vector_entry label, section_name=.vectors +.cfi_sections .debug_frame +.section \section_name, "ax" +.align 7, 0 +.type \label, %function +.func \label +.cfi_startproc +\label: +.endm + +/* + * This macro verifies that the given vector doesnt exceed the + * architectural limit of 32 instructions. This is meant to be placed + * immediately after the last instruction in the vector. It takes the + * vector entry as the parameter + */ +.macro check_vector_size since + .endfunc + .cfi_endproc + .if (. - \since) > (32 * 4) + .error "Vector exceeds 32 instructions" + .endif +.endm + +/* Actual Vectors for Kernel. */ +.global _ZN3ams4kern16ExceptionVectorsEv +vector_base _ZN3ams4kern16ExceptionVectorsEv + +/* Current EL, SP0 */ +.global unknown_exception +unknown_exception: +vector_entry synch_sp0 + /* Just infinite loop. */ + b unknown_exception + check_vector_size synch_sp0 + +vector_entry irq_sp0 + b unknown_exception + check_vector_size irq_sp0 + +vector_entry fiq_sp0 + b unknown_exception + check_vector_size fiq_sp0 + +vector_entry serror_sp0 + b unknown_exception + check_vector_size serror_sp0 + +/* Current EL, SPx */ +vector_entry synch_spx + b unknown_exception + check_vector_size synch_spx + +vector_entry irq_spx + b unknown_exception + check_vector_size irq_spx + +vector_entry fiq_spx + b unknown_exception + check_vector_size fiq_spx + +vector_entry serror_spx + b unknown_exception + check_vector_size serror_spx + +/* Lower EL, A64 */ +vector_entry synch_a64 + b unknown_exception + check_vector_size synch_a64 + +vector_entry irq_a64 + b unknown_exception + check_vector_size irq_a64 + +vector_entry fiq_a64 + b unknown_exception + check_vector_size fiq_a64 + +vector_entry serror_a64 + b unknown_exception + check_vector_size serror_a64 + +/* Lower EL, A32 */ +vector_entry synch_a32 + b unknown_exception + check_vector_size synch_a32 + +vector_entry irq_a32 + b unknown_exception + check_vector_size irq_a32 + +vector_entry fiq_a32 + b unknown_exception + check_vector_size fiq_a32 + +vector_entry serror_a32 + b unknown_exception + check_vector_size serror_a32 \ No newline at end of file diff --git a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp index bdae68d1c..513727ab7 100644 --- a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp +++ b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp @@ -18,6 +18,12 @@ extern "C" void _start(); extern "C" void __end__(); +namespace ams::kern { + + void ExceptionVectors(); + +} + namespace ams::kern::init { /* Prototypes for functions declared in ASM that we need to reference. */ @@ -310,11 +316,71 @@ namespace ams::kern::init { } void InitializeDebugRegisters() { - /* TODO */ + /* Determine how many watchpoints and breakpoints we have */ + cpu::DebugFeatureRegisterAccessor aa64dfr0; + const auto num_watchpoints = aa64dfr0.GetNumWatchpoints(); + const auto num_breakpoints = aa64dfr0.GetNumBreakpoints(); + cpu::EnsureInstructionConsistency(); + + /* Clear the debug monitor register and the os lock access register. */ + cpu::MonitorDebugSystemControlRegisterAccessor(0).Store(); + cpu::EnsureInstructionConsistency(); + cpu::OsLockAccessRegisterAccessor(0).Store(); + cpu::EnsureInstructionConsistency(); + + /* Clear all debug watchpoints/breakpoints. */ + #define FOR_I_IN_15_TO_1(HANDLER, ...) \ + HANDLER(15, ## __VA_ARGS__) HANDLER(14, ## __VA_ARGS__) HANDLER(13, ## __VA_ARGS__) HANDLER(12, ## __VA_ARGS__) \ + HANDLER(11, ## __VA_ARGS__) HANDLER(10, ## __VA_ARGS__) HANDLER(9, ## __VA_ARGS__) HANDLER(8, ## __VA_ARGS__) \ + HANDLER(7, ## __VA_ARGS__) HANDLER(6, ## __VA_ARGS__) HANDLER(5, ## __VA_ARGS__) HANDLER(4, ## __VA_ARGS__) \ + HANDLER(3, ## __VA_ARGS__) HANDLER(2, ## __VA_ARGS__) HANDLER(1, ## __VA_ARGS__) + + #define MESOSPHERE_INITIALIZE_WATCHPOINT_CASE(ID, ...) \ + case ID: \ + cpu::SetDbgWcr##ID##El1(__VA_ARGS__); \ + cpu::SetDbgWvr##ID##El1(__VA_ARGS__); \ + + #define MESOSPHERE_INITIALIZE_BREAKPOINT_CASE(ID, ...) \ + case ID: \ + cpu::SetDbgBcr##ID##El1(__VA_ARGS__); \ + cpu::SetDbgBvr##ID##El1(__VA_ARGS__); \ + [[fallthrough]]; + + + switch (num_watchpoints) { + FOR_I_IN_15_TO_1(MESOSPHERE_INITIALIZE_WATCHPOINT_CASE, 0) + default: + break; + } + cpu::SetDbgWcr0El1(0); + cpu::SetDbgWvr0El1(0); + + switch (num_breakpoints) { + FOR_I_IN_15_TO_1(MESOSPHERE_INITIALIZE_BREAKPOINT_CASE, 0) + default: + break; + } + cpu::SetDbgBcr0El1(0); + cpu::SetDbgBvr0El1(0); + + #undef MESOSPHERE_INITIALIZE_WATCHPOINT_CASE + #undef MESOSPHERE_INITIALIZE_BREAKPOINT_CASE + #undef FOR_I_IN_15_TO_1 + + cpu::EnsureInstructionConsistency(); + + /* Initialize the context id register to all 1s. */ + cpu::ContextIdRegisterAccessor(0).SetProcId(std::numeric_limits::max()).Store(); + cpu::EnsureInstructionConsistency(); + + /* Configure the debug monitor register. */ + cpu::MonitorDebugSystemControlRegisterAccessor(0).SetMde(true).SetTdcc(true).Store(); + cpu::EnsureInstructionConsistency(); } void InitializeExceptionVectors() { - /* TODO */ + cpu::SetVbarEl1(reinterpret_cast(::ams::kern::ExceptionVectors)); + cpu::EnsureInstructionConsistency(); } } \ No newline at end of file From 507ab467097a64a87daa4a3e68075ed9347e2cb2 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 29 Jan 2020 04:36:18 -0800 Subject: [PATCH 14/97] cpu: optimize core barrier --- .../source/arch/arm64/kern_cpu.cpp | 29 ++++----- .../source/arch/arm64/kern_cpu_asm.s | 63 +++++++++++++++++++ 2 files changed, 78 insertions(+), 14 deletions(-) create mode 100644 libraries/libmesosphere/source/arch/arm64/kern_cpu_asm.s diff --git a/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp b/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp index 43748f070..ec4097944 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp @@ -17,9 +17,14 @@ namespace ams::kern::arm64::cpu { + /* Declare prototype to be implemented in asm. */ + void SynchronizeAllCoresImpl(s32 *sync_var, s32 num_cores); + + namespace { - std::atomic g_all_core_sync_count; + /* Expose this as a global, for asm to use. */ + s32 g_all_core_sync_count; void FlushEntireDataCacheImpl(int level) { /* Used in multiple locations. */ @@ -48,6 +53,14 @@ namespace ams::kern::arm64::cpu { } } + ALWAYS_INLINE void SetEventLocally() { + __asm__ __volatile__("sevl" ::: "memory"); + } + + ALWAYS_INLINE void WaitForEvent() { + __asm__ __volatile__("wfe" ::: "memory"); + } + } void FlushEntireDataCacheShared() { @@ -70,19 +83,7 @@ namespace ams::kern::arm64::cpu { } NOINLINE void SynchronizeAllCores() { - /* Wait until the count can be read. */ - while (!(g_all_core_sync_count < static_cast(cpu::NumCores))) { /* ... */ } - - const s32 per_core_idx = g_all_core_sync_count.fetch_add(1); - - /* Loop until it's our turn. This will act on each core in order. */ - while (g_all_core_sync_count != per_core_idx + static_cast(cpu::NumCores)) { /* ... */ } - - if (g_all_core_sync_count != 2 * static_cast(cpu::NumCores) - 1) { - g_all_core_sync_count++; - } else { - g_all_core_sync_count = 0; - } + SynchronizeAllCoresImpl(&g_all_core_sync_count, static_cast(cpu::NumCores)); } } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_cpu_asm.s b/libraries/libmesosphere/source/arch/arm64/kern_cpu_asm.s new file mode 100644 index 000000000..fbc000e8b --- /dev/null +++ b/libraries/libmesosphere/source/arch/arm64/kern_cpu_asm.s @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/* ams::kern::arm64::cpu::SynchronizeAllCoresImpl(int *sync_var, int num_cores) */ +.section .text._ZN3ams4kern5arm643cpu23SynchronizeAllCoresImplEPii, "ax", %progbits +.global _ZN3ams4kern5arm643cpu23SynchronizeAllCoresImplEPii +.type _ZN3ams4kern5arm643cpu23SynchronizeAllCoresImplEPii, %function +_ZN3ams4kern5arm643cpu23SynchronizeAllCoresImplEPii: + /* Loop until the sync var is less than num cores. */ + sevl +1: + wfe + ldaxr w2, [x0] + cmp w2, w1 + b.gt 1b + + /* Increment the sync var. */ +2: + ldaxr w2, [x0] + add w3, w2, #1 + stlxr w4, w3, [x0] + cbnz w4, 2b + + /* Loop until the sync var matches our ticket. */ + add w3, w2, w1 + sevl +3: + wfe + ldaxr w2, [x0] + cmp w2, w3 + b.ne 3b + + /* Check if the ticket is the last. */ + sub w2, w1, #1 + add w2, w2, w1 + cmp w3, w2 + b.eq 5f + + /* Our ticket is not the last one. Increment. */ +4: + ldaxr w2, [x0] + add w3, w2, #1 + stlxr w4, w3, [x0] + cbnz w4, 4b + ret + + /* Our ticket is the last one. */ +5: + stlr wzr, [x0] + ret From ad0d2faa6c4924fdae959b7a2cec758259f6c765 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 29 Jan 2020 05:37:14 -0800 Subject: [PATCH 15/97] meso: makefile improvements, sysreg accessors --- .../include/mesosphere/arch/arm64/kern_cpu.hpp | 12 ++++++++++++ .../arch/arm64/kern_cpu_system_registers.hpp | 1 + .../include/mesosphere/kern_k_memory_layout.hpp | 9 +++++++++ mesosphere/Makefile | 15 ++++++++++----- 4 files changed, 32 insertions(+), 5 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp index af5dc71ca..a1cf21ac2 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp @@ -71,4 +71,16 @@ namespace ams::kern::arm64::cpu { EnsureInstructionConsistency(); } + ALWAYS_INLINE uintptr_t GetCoreLocalRegionAddress() { + register uintptr_t x18 asm("x18"); + __asm__ __volatile__("" : [x18]"=r"(x18)); + return x18; + } + + ALWAYS_INLINE void SetCoreLocalRegionAddress(uintptr_t value) { + register uintptr_t x18 asm("x18") = value; + __asm__ __volatile__("":: [x18]"r"(x18)); + SetTpidrEl1(value); + } + } diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp index 408f0518e..3debbfcff 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp @@ -39,6 +39,7 @@ namespace ams::kern::arm64::cpu { MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(TcrEl1, tcr_el1) MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(MairEl1, mair_el1) + MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(TpidrEl1, tpidr_el1) MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(VbarEl1, vbar_el1) diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp index d1bd1d8f3..020757b5d 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp @@ -239,6 +239,15 @@ namespace ams::kern { MESOSPHERE_INIT_ABORT(); } + iterator FindFirstBlockByType(u32 type_id) { + for (auto it = this->begin(); it != this->end(); it++) { + if (it->GetType() == type_id) { + return it; + } + } + MESOSPHERE_INIT_ABORT(); + } + iterator FindFirstDerivedBlock(u32 type_id) { for (auto it = this->begin(); it != this->end(); it++) { if (it->IsDerivedFrom(type_id)) { diff --git a/mesosphere/Makefile b/mesosphere/Makefile index f511441d1..5a02471ac 100644 --- a/mesosphere/Makefile +++ b/mesosphere/Makefile @@ -1,5 +1,5 @@ -TARGETS := kernel kernel_ldr -CLEAN_TARGETS := $(foreach target,$(TARGETS),$(target)-clean) +TARGETS := kernel.bin kernel_ldr.bin +CLEAN_TARGETS := $(foreach target,$(TARGETS),$(target:.bin=)-clean) SUBFOLDERS := $(MODULES) @@ -12,10 +12,15 @@ mesosphere.bin: $(TARGETS) @python build_mesosphere.py @echo "Built mesosphere.bin..." -$(TARGETS): - $(MAKE) -C $@ +$(TARGETS): check_libmeso + $(MAKE) -C $(@:.bin=) + @cp $(@:.bin=)/$(@) $(@) + +check_libmeso: + @$(MAKE) --no-print-directory -C ../libraries/libmesosphere $(CLEAN_TARGETS): $(MAKE) -C $(@:-clean=) clean + @rm -f $(@:-clean=).bin -.PHONY: all clean $(TARGETS) $(CLEAN_TARGETS) +.PHONY: all clean $(CLEAN_TARGETS) From 981bb1f15d7a09f16973f24876a81455bc4a6872 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 29 Jan 2020 14:26:24 -0800 Subject: [PATCH 16/97] kern: skeleton enough types to init KCoreLocalRegion in main() --- .../libmesosphere/include/mesosphere.hpp | 6 +- .../arch/arm64/kern_k_hardware_timer.hpp | 33 +++++ .../arm64/kern_k_interrupt_controller.hpp | 122 ++++++++++++++++++ .../arch/arm64/kern_k_interrupt_manager.hpp | 82 ++++++++++++ .../arch/arm64/kern_k_spin_lock.hpp | 111 ++++++++++++++++ .../mesosphere/kern_k_core_local_region.hpp | 61 +++++++++ .../mesosphere/kern_k_current_context.hpp | 89 +++++++++++++ .../mesosphere/kern_k_hardware_timer_base.hpp | 41 ++++++ .../mesosphere/kern_k_interrupt_task.hpp | 44 +++++++ .../kern_k_interrupt_task_manager.hpp | 46 +++++++ .../mesosphere/kern_k_memory_layout.hpp | 12 ++ .../include/mesosphere/kern_k_scheduler.hpp | 45 +++++++ .../include/mesosphere/kern_k_spin_lock.hpp | 74 +++++++++++ .../include/mesosphere/kern_k_thread.hpp | 25 ++++ .../include/mesosphere/kern_k_timer_task.hpp | 47 +++++++ .../include/mesosphere/kern_kernel.hpp | 40 ++++++ .../mesosphere/kern_select_hardware_timer.hpp | 31 +++++ .../kern_select_interrupt_controller.hpp | 31 +++++ ....hpp => kern_select_interrupt_manager.hpp} | 28 +++- .../arch/arm64/kern_k_hardware_timer.cpp | 24 ++++ .../source/kern_k_interrupt_task_manager.cpp | 28 ++-- ...ped_interrupt.cpp => kern_k_scheduler.cpp} | 23 ++-- .../libmesosphere/source/kern_kernel.cpp | 49 +++++++ libraries/libmesosphere/source/kern_main.cpp | 7 + 24 files changed, 1066 insertions(+), 33 deletions(-) create mode 100644 libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_hardware_timer.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_controller.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_manager.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_spin_lock.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_core_local_region.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_current_context.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_hardware_timer_base.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_spin_lock.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_timer_task.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_kernel.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_select_hardware_timer.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_select_interrupt_controller.hpp rename libraries/libmesosphere/include/mesosphere/{kern_select_interrupts.hpp => kern_select_interrupt_manager.hpp} (58%) create mode 100644 libraries/libmesosphere/source/arch/arm64/kern_k_hardware_timer.cpp rename mesosphere/kernel_ldr/source/kern_k_scoped_interrupt.cpp => libraries/libmesosphere/source/kern_k_interrupt_task_manager.cpp (54%) rename libraries/libmesosphere/source/{kern_k_scoped_interrupt.cpp => kern_k_scheduler.cpp} (60%) create mode 100644 libraries/libmesosphere/source/kern_kernel.cpp diff --git a/libraries/libmesosphere/include/mesosphere.hpp b/libraries/libmesosphere/include/mesosphere.hpp index cd979eb25..ea3442e7e 100644 --- a/libraries/libmesosphere/include/mesosphere.hpp +++ b/libraries/libmesosphere/include/mesosphere.hpp @@ -37,9 +37,13 @@ #include "mesosphere/kern_k_memory_layout.hpp" /* Core functionality. */ -#include "mesosphere/kern_select_interrupts.hpp" +#include "mesosphere/kern_select_interrupt_manager.hpp" +#include "mesosphere/kern_k_spin_lock.hpp" #include "mesosphere/kern_k_page_heap.hpp" #include "mesosphere/kern_k_memory_manager.hpp" +#include "mesosphere/kern_k_interrupt_task_manager.hpp" +#include "mesosphere/kern_k_core_local_region.hpp" +#include "mesosphere/kern_kernel.hpp" /* Supervisor Calls. */ #include "mesosphere/kern_svc.hpp" diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_hardware_timer.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_hardware_timer.hpp new file mode 100644 index 000000000..6f912bf22 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_hardware_timer.hpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern::arm64 { + + class KHardwareTimer : public KHardwareTimerBase { + public: + static constexpr s32 InterruptId = 30; /* Nintendo uses the non-secure timer interrupt. */ + public: + constexpr KHardwareTimer() : KHardwareTimerBase() { /* ... */ } + + virtual void DoTask() override; + + /* TODO: Actually implement more of KHardwareTimer, */ + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_controller.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_controller.hpp new file mode 100644 index 000000000..e76df1cf3 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_controller.hpp @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern::arm64 { + + struct GicDistributor { + u32 ctlr; + u32 typer; + u32 iidr; + u32 reserved_0x0c; + u32 statusr; + u32 reserved_0x14[3]; + u32 impldef_0x20[8]; + u32 setspi_nsr; + u32 reserved_0x44; + u32 clrspi_nsr; + u32 reserved_0x4c; + u32 setspi_sr; + u32 reserved_0x54; + u32 clrspi_sr; + u32 reserved_0x5c[9]; + u32 igroupr[32]; + u32 isenabler[32]; + u32 icenabler[32]; + u32 ispendr[32]; + u32 icpendr[32]; + u32 isactiver[32]; + u32 icactiver[32]; + u8 ipriorityr[1020]; + u32 _0x7fc; + u8 itargetsr[1020]; + u32 _0xbfc; + u32 icfgr[64]; + u32 igrpmodr[32]; + u32 _0xd80[32]; + u32 nsacr[64]; + u32 sgir; + u32 _0xf04[3]; + u32 cpendsgir[4]; + u32 spendsgir[4]; + u32 reserved_0xf30[52]; + }; + static_assert(std::is_pod::value); + static_assert(sizeof(GicDistributor) == 0x1000); + + struct GicController { + u32 ctlr; + u32 pmr; + u32 bpr; + u32 iar; + u32 eoir; + u32 rpr; + u32 hppir; + u32 abpr; + u32 aiar; + u32 aeoir; + u32 ahppir; + u32 statusr; + u32 reserved_30[4]; + u32 impldef_40[36]; + u32 apr[4]; + u32 nsapr[4]; + u32 reserved_f0[3]; + u32 iidr; + u32 reserved_100[960]; + u32 dir; + u32 _0x1004[1023]; + }; + static_assert(std::is_pod::value); + static_assert(sizeof(GicController) == 0x2000); + + struct KInterruptController { + NON_COPYABLE(KInterruptController); + NON_MOVEABLE(KInterruptController); + public: + static constexpr size_t NumLocalInterrupts = 32; + static constexpr size_t NumGlobalInterrupts = 988; + static constexpr size_t NumInterrupts = NumLocalInterrupts + NumGlobalInterrupts; + public: + struct LocalState { + u32 local_isenabler[NumLocalInterrupts / 32]; + u32 local_ipriorityr[NumLocalInterrupts / 4]; + u32 local_targetsr[NumLocalInterrupts / 4]; + u32 local_icfgr[NumLocalInterrupts / 16]; + }; + + struct GlobalState { + u32 global_isenabler[NumGlobalInterrupts / 32]; + u32 global_ipriorityr[NumGlobalInterrupts / 4]; + u32 global_targetsr[NumGlobalInterrupts / 4]; + u32 global_icfgr[NumGlobalInterrupts / 16]; + }; + private: + static inline volatile GicDistributor *s_gicd; + static inline volatile GicController *s_gicc; + static inline u32 s_mask[cpu::NumCores]; + private: + volatile GicDistributor *gicd; + volatile GicController *gicc; + public: + KInterruptController() { /* Don't initialize anything -- this will be taken care of by ::Initialize() */ } + + /* TODO: Actually implement KInterruptController functionality. */ + }; +} diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_manager.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_manager.hpp new file mode 100644 index 000000000..132e24eee --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_manager.hpp @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include +#include + +namespace ams::kern::arm64 { + + class KInterruptManager { + NON_COPYABLE(KInterruptManager); + NON_MOVEABLE(KInterruptManager); + private: + struct KCoreLocalInterruptEntry { + KInterruptHandler *handler; + bool manually_cleared; + bool needs_clear; + u8 priority; + }; + + struct KGlobalInterruptEntry { + KInterruptHandler *handler; + bool manually_cleared; + bool needs_clear; + }; + private: + static inline KSpinLock s_lock; + static inline KGlobalInterruptEntry s_global_interrupts[KInterruptController::NumGlobalInterrupts]; + static inline KInterruptController::GlobalState s_global_state; + static inline bool s_global_state_saved; + private: + KCoreLocalInterruptEntry core_local_interrupts[KInterruptController::NumLocalInterrupts]; + KInterruptController interrupt_controller; + KInterruptController::LocalState local_state; + bool local_state_saved; + public: + KInterruptManager() : local_state_saved(false) { /* Leave things mostly uninitalized. We'll call ::Initialize() later. */ } + /* TODO: Actually implement KInterruptManager functionality. */ + public: + static ALWAYS_INLINE u32 DisableInterrupts() { + u64 intr_state; + __asm__ __volatile__("mrs %[intr_state], daif" : [intr_state]"=r"(intr_state)); + __asm__ __volatile__("msr daif, %[intr_state]" :: [intr_state]"r"(intr_state | 0x80)); + return intr_state; + } + + static ALWAYS_INLINE u32 EnableInterrupts() { + u64 intr_state; + __asm__ __volatile__("mrs %[intr_state], daif" : [intr_state]"=r"(intr_state)); + __asm__ __volatile__("msr daif, %[intr_state]" :: [intr_state]"r"(intr_state & 0x7F)); + return intr_state; + } + + static ALWAYS_INLINE void RestoreInterrupts(u32 intr_state) { + u64 cur_state; + __asm__ __volatile__("mrs %[cur_state], daif" : [cur_state]"=r"(cur_state)); + __asm__ __volatile__("msr daif, %[intr_state]" :: [intr_state]"r"((cur_state & 0x7F) | (intr_state & 0x80))); + } + + static ALWAYS_INLINE bool AreInterruptsEnabled() { + u64 intr_state; + __asm__ __volatile__("mrs %[intr_state], daif" : [intr_state]"=r"(intr_state)); + return (intr_state & 0x80) == 0; + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_spin_lock.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_spin_lock.hpp new file mode 100644 index 000000000..adf87720e --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_spin_lock.hpp @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern::arm64 { + + class KNotAlignedSpinLock { + private: + u32 packed_tickets; + public: + constexpr KNotAlignedSpinLock() : packed_tickets(0) { /* ... */ } + + void Lock() { + u32 tmp0, tmp1; + + __asm__ __volatile__( + " prfm pstl1keep, %[packed_tickets]\n" + "loop1:\n" + " ldaxr %w[tmp0], %[packed_tickets]\n" + " add %w[tmp0], %w[tmp0], #0x10000\n" + " stxr %w[tmp1], %w[tmp0], %[packed_tickets]\n" + " cbnz %w[tmp1], loop1\n" + " \n" + " and %w[tmp1], %w[tmp0], #0xFFFF\n" + " cmp %w[tmp1], %w[tmp0], lsr #16\n" + " b.eq done" + " sevl\n" + "loop2:\n" + " wfe\n" + " ldaxrh %w[tmp1], %[packed_tickets]\n" + " cmp %w[tmp1], %w[tmp0], lsr #16\n" + " b.ne loop2\n" + "done:\n" + : [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [packed_tickets]"+Q"(this->packed_tickets) + : + : "cc", "memory" + ); + } + + void Unlock() { + const u32 value = this->packed_tickets + 1; + __asm__ __volatile__( + " stlrh %w[value], %[packed_tickets]\n" + : [packed_tickets]"+Q"(this->packed_tickets) + : [value]"r"(value) + : "memory" + ); + } + }; + static_assert(sizeof(KNotAlignedSpinLock) == sizeof(u32)); + + class KAlignedSpinLock { + private: + alignas(cpu::DataCacheLineSize) u16 current_ticket; + alignas(cpu::DataCacheLineSize) u16 next_ticket; + public: + constexpr KAlignedSpinLock() : current_ticket(0), next_ticket(0) { /* ... */ } + + void Lock() { + u32 tmp0, tmp1, got_lock; + + __asm__ __volatile__( + " prfm pstl1keep, %[next_ticket]\n" + "loop1:\n" + " ldaxrh %w[tmp0], %[next_ticket]\n" + " add %w[tmp1], %w[tmp0], #0x1\n" + " stxrh %w[got_lock], %w[tmp1], %[next_ticket]\n" + " cbnz %w[got_lock], loop1\n" + " \n" + " sevl\n" + "loop2:\n" + " wfe\n" + " ldaxrh %w[tmp1], %[current_ticket]\n" + " cmp %w[tmp1], %w[tmp0]\n" + " b.ne loop2\n" + : [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [got_lock]"=&r"(got_lock), [next_ticket]"+Q"(this->next_ticket) + : [current_ticket]"Q"(this->current_ticket) + : "cc", "memory" + ); + } + + void Unlock() { + const u32 value = this->current_ticket + 1; + __asm__ __volatile__( + " stlrh %w[value], %[current_ticket]\n" + : [current_ticket]"+Q"(this->current_ticket) + : [value]"r"(value) + : "memory" + ); + } + }; + static_assert(sizeof(KAlignedSpinLock) == 2 * cpu::DataCacheLineSize); + + using KSpinLock = KAlignedSpinLock; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_core_local_region.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_core_local_region.hpp new file mode 100644 index 000000000..4422df408 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_core_local_region.hpp @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +namespace ams::kern { + + struct KCoreLocalContext { + KCurrentContext current; + KScheduler scheduler; + KInterruptTaskManager interrupt_task_manager; + KInterruptManager interrupt_manager; + KHardwareTimer hardware_timer; + /* Everything after this point is for debugging. */ + /* Retail kernel doesn't even consistently update these fields. */ + u64 num_sw_interrupts; + u64 num_hw_interrupts; + std::atomic num_svc; + u64 num_process_switches; + u64 num_thread_switches; + u64 num_fpu_switches; + u64 num_scheduler_updates; + u64 num_invoked_scheduler_updates; + std::atomic num_specific_svc[0x80]; + u32 perf_counters[6]; + }; + static_assert(sizeof(KCoreLocalContext) < KMemoryManager::PageSize); + + struct KCoreLocalPage { + KCoreLocalContext context; + u8 padding[KMemoryManager::PageSize - sizeof(KCoreLocalContext)]; + }; + static_assert(sizeof(KCoreLocalPage) == KMemoryManager::PageSize); + + struct KCoreLocalRegion { + KCoreLocalPage current; + KCoreLocalPage absolute[cpu::NumCores]; + }; + static_assert(sizeof(KCoreLocalRegion) == KMemoryManager::PageSize * (1 + cpu::NumCores)); + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_current_context.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_current_context.hpp new file mode 100644 index 000000000..1cf1abb69 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_current_context.hpp @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern { + + class KThread; + class KProcess; + class KScheduler; + class KInterruptTaskManager; + + struct KCurrentContext { + KThread *current_thread; + KProcess *current_process; + KScheduler *scheduler; + KInterruptTaskManager *interrupt_task_manager; + s32 core_id; + void *exception_stack_bottom; + }; + static_assert(std::is_pod::value); + static_assert(sizeof(KCurrentContext) <= cpu::DataCacheLineSize); + + namespace impl { + + ALWAYS_INLINE KCurrentContext &GetCurrentContext() { + return *reinterpret_cast(cpu::GetCoreLocalRegionAddress()); + } + + } + + ALWAYS_INLINE KThread *GetCurrentThreadPointer() { + return impl::GetCurrentContext().current_thread; + } + + ALWAYS_INLINE KThread &GetCurrentThread() { + return *GetCurrentThreadPointer(); + } + + ALWAYS_INLINE KProcess *GetCurrentProcessPointer() { + return impl::GetCurrentContext().current_process; + } + + ALWAYS_INLINE KProcess &GetCurrentProcess() { + return *GetCurrentProcessPointer(); + } + + ALWAYS_INLINE KScheduler *GetCurrentSchedulerPointer() { + return impl::GetCurrentContext().scheduler; + } + + ALWAYS_INLINE KScheduler &GetCurrentScheduler() { + return *GetCurrentSchedulerPointer(); + } + + ALWAYS_INLINE KInterruptTaskManager *GetCurrentInterruptTaskManagerPointer() { + return impl::GetCurrentContext().interrupt_task_manager; + } + + ALWAYS_INLINE KInterruptTaskManager &GetCurrentInterruptTaskManager() { + return *GetCurrentInterruptTaskManagerPointer(); + } + + ALWAYS_INLINE s32 GetCurrentCoreId() { + return impl::GetCurrentContext().core_id; + } + + ALWAYS_INLINE void SetCurrentThread(KThread *new_thread) { + impl::GetCurrentContext().current_thread = new_thread; + } + + ALWAYS_INLINE void SetCurrentProcess(KProcess *new_process) { + impl::GetCurrentContext().current_process = new_process; + } + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_hardware_timer_base.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_hardware_timer_base.hpp new file mode 100644 index 000000000..c03d372ca --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_hardware_timer_base.hpp @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include + +namespace ams::kern { + + class KHardwareTimerBase : public KInterruptTask { + private: + using TimerTaskTree = util::IntrusiveRedBlackTreeBaseTraits::TreeType; + private: + KSpinLock lock; + TimerTaskTree task_tree; + KTimerTask *next_task; + public: + constexpr KHardwareTimerBase() : lock(), task_tree(), next_task(nullptr) { /* ... */ } + + virtual KInterruptTask *OnInterrupt(s32 interrupt_id) override { return this; } + protected: + KSpinLock &GetLock() { return this->lock; } + + /* TODO: Actually implement more of KHardwareTimerBase */ + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task.hpp new file mode 100644 index 000000000..642fa3203 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task.hpp @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once + +namespace ams::kern { + + class KInterruptTask; + + class KInterruptHandler { + public: + virtual KInterruptTask *OnInterrupt(s32 interrupt_id) = 0; + }; + + class KInterruptTask : public KInterruptHandler { + private: + KInterruptTask *next_task; + public: + constexpr ALWAYS_INLINE KInterruptTask() : next_task(nullptr) { /* ... */ } + + ALWAYS_INLINE KInterruptTask *GetNextTask() const { + return this->next_task; + } + + ALWAYS_INLINE void SetNextTask(KInterruptTask *t) { + this->next_task = t; + } + + virtual void DoTask() = 0; + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp new file mode 100644 index 000000000..cc3111b7a --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern { + + class KThread; + + class KInterruptTaskManager { + private: + class TaskQueue { + private: + KInterruptTask *head; + KInterruptTask *tail; + public: + constexpr TaskQueue() : head(nullptr), tail(nullptr) { /* ... */ } + + ALWAYS_INLINE KInterruptTask *GetHead() { return this->head; } + ALWAYS_INLINE bool IsEmpty() const { return this->head == nullptr; } + ALWAYS_INLINE void Clear() { this->head = nullptr; this->tail = nullptr; } + + void Enqueue(KInterruptTask *task); + void Dequeue(); + }; + private: + TaskQueue task_queue; + KThread *thread; + public: + /* TODO: Actually implement KInterruptTaskManager. This is a placeholder. */ + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp index 020757b5d..cabb55c4c 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp @@ -402,6 +402,18 @@ namespace ams::kern { return GetVirtualMemoryBlockTree().FindFirstBlockByTypeAttr(KMemoryRegionType_KernelMiscMainStack, static_cast(core_id))->GetEndAddress(); } + static NOINLINE KVirtualAddress GetIdleStackTopAddress(s32 core_id) { + return GetVirtualMemoryBlockTree().FindFirstBlockByTypeAttr(KMemoryRegionType_KernelMiscIdleStack, static_cast(core_id))->GetEndAddress(); + } + + static NOINLINE KVirtualAddress GetExceptionStackBottomAddress(s32 core_id) { + return GetVirtualMemoryBlockTree().FindFirstBlockByTypeAttr(KMemoryRegionType_KernelMiscExceptionStack, static_cast(core_id))->GetAddress(); + } + + static NOINLINE KVirtualAddress GetCoreLocalRegionAddress() { + return GetVirtualMemoryBlockTree().FindFirstBlockByTypeAttr(KMemoryRegionType_CoreLocal)->GetAddress(); + } + static void InitializeLinearMemoryBlockTrees(KPhysicalAddress aligned_linear_phys_start, KVirtualAddress linear_virtual_start); }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp new file mode 100644 index 000000000..7476d1aea --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern { + + class KScheduler { + NON_COPYABLE(KScheduler); + NON_MOVEABLE(KScheduler); + public: + struct SchedulingState { + std::atomic needs_scheduling; + bool interrupt_task_thread_runnable; + bool should_count_idle; + u64 idle_count; + KThread *highest_priority_thread; + void *idle_thread_stack; + }; + private: + SchedulingState state; + bool is_active; + s32 core_id; + KThread *prev_thread; + u64 last_context_switch_time; + KThread *idle_thread; + public: + KScheduler(); + /* TODO: Actually implement KScheduler. This is a placeholder. */ + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_spin_lock.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_spin_lock.hpp new file mode 100644 index 000000000..55706313c --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_spin_lock.hpp @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include "kern_panic.hpp" + +#if defined(ATMOSPHERE_ARCH_ARM64) + + #include + namespace ams::kern { + using ams::kern::arm64::KAlignedSpinLock; + using ams::kern::arm64::KNotAlignedSpinLock; + using ams::kern::arm64::KSpinLock; + } + +#else + + #error "Unknown architecture for KInterruptManager" + +#endif + + +namespace ams::kern { + + class KScopedSpinLock { + private: + KSpinLock *lock_ptr; + public: + explicit ALWAYS_INLINE KScopedSpinLock(KSpinLock *l) : lock_ptr(l) { + this->lock_ptr->Lock(); + } + ALWAYS_INLINE ~KScopedSpinLock() { + this->lock_ptr->Unlock(); + } + }; + + class KScopedAlignedSpinLock { + private: + KAlignedSpinLock *lock_ptr; + public: + explicit ALWAYS_INLINE KScopedAlignedSpinLock(KAlignedSpinLock *l) : lock_ptr(l) { + this->lock_ptr->Lock(); + } + ALWAYS_INLINE ~KScopedAlignedSpinLock() { + this->lock_ptr->Unlock(); + } + }; + + class KScopedNotAlignedSpinLock { + private: + KNotAlignedSpinLock *lock_ptr; + public: + explicit ALWAYS_INLINE KScopedNotAlignedSpinLock(KNotAlignedSpinLock *l) : lock_ptr(l) { + this->lock_ptr->Lock(); + } + ALWAYS_INLINE ~KScopedNotAlignedSpinLock() { + this->lock_ptr->Unlock(); + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp new file mode 100644 index 000000000..8959a381e --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once + +namespace ams::kern { + + + class KThread { + /* TODO: This should be a KAutoObject, and this is a placeholder definition. */ + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_timer_task.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_timer_task.hpp new file mode 100644 index 000000000..d57899ce8 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_timer_task.hpp @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern { + + class KTimerTask : public util::IntrusiveRedBlackTreeBaseNode { + private: + s64 time; + public: + static constexpr ALWAYS_INLINE int Compare(const KTimerTask &lhs, const KTimerTask &rhs) { + if (lhs.GetTime() < rhs.GetTime()) { + return -1; + } else { + return 1; + } + } + public: + constexpr ALWAYS_INLINE KTimerTask() : time(0) { /* ... */ } + + constexpr ALWAYS_INLINE void SetTime(s64 t) { + this->time = t; + } + + constexpr ALWAYS_INLINE s64 GetTime() const { + return this->time; + } + + virtual void OnTimer() = 0; + + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_kernel.hpp b/libraries/libmesosphere/include/mesosphere/kern_kernel.hpp new file mode 100644 index 000000000..4eb1e97d5 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_kernel.hpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include + +namespace ams::kern { + + class Kernel { + public: + enum class State : u8 { + Invalid = 0, + Initializing = 1, + Initialized = 2, + }; + private: + static inline State s_state = State::Invalid; + public: + static void Initialize(s32 core_id); + + static ALWAYS_INLINE State GetState() { return s_state; } + static ALWAYS_INLINE void SetState(State state) { s_state = state; } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_select_hardware_timer.hpp b/libraries/libmesosphere/include/mesosphere/kern_select_hardware_timer.hpp new file mode 100644 index 000000000..db8d6e4f4 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_select_hardware_timer.hpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include "kern_panic.hpp" + +#if defined(ATMOSPHERE_ARCH_ARM64) + + #include + namespace ams::kern { + using ams::kern::arm64::KHardwareTimer; + } + +#else + + #error "Unknown architecture for KHardwareTimer" + +#endif diff --git a/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_controller.hpp b/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_controller.hpp new file mode 100644 index 000000000..e9bbd2ff6 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_controller.hpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include "kern_panic.hpp" + +#if defined(ATMOSPHERE_ARCH_ARM64) + + #include + namespace ams::kern { + using ams::kern::arm64::KInterruptController; + } + +#else + + #error "Unknown architecture for KInterruptController" + +#endif diff --git a/libraries/libmesosphere/include/mesosphere/kern_select_interrupts.hpp b/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_manager.hpp similarity index 58% rename from libraries/libmesosphere/include/mesosphere/kern_select_interrupts.hpp rename to libraries/libmesosphere/include/mesosphere/kern_select_interrupt_manager.hpp index 312450b7c..cab08dd0d 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_select_interrupts.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_manager.hpp @@ -17,26 +17,42 @@ #include #include "kern_panic.hpp" -namespace ams::kern { +#if defined(ATMOSPHERE_ARCH_ARM64) - /* TODO: Actually select between architecture-specific interrupt code. */ + #include + namespace ams::kern { + using ams::kern::arm64::KInterruptManager; + } + +#else + + #error "Unknown architecture for KInterruptManager" + +#endif + + +namespace ams::kern { /* Enable or disable interrupts for the lifetime of an object. */ class KScopedInterruptDisable { NON_COPYABLE(KScopedInterruptDisable); NON_MOVEABLE(KScopedInterruptDisable); + private: + u32 prev_intr_state; public: - KScopedInterruptDisable(); - ~KScopedInterruptDisable(); + ALWAYS_INLINE KScopedInterruptDisable() : prev_intr_state(KInterruptManager::DisableInterrupts()) { /* ... */ } + ~KScopedInterruptDisable() { KInterruptManager::RestoreInterrupts(prev_intr_state); } }; class KScopedInterruptEnable { NON_COPYABLE(KScopedInterruptEnable); NON_MOVEABLE(KScopedInterruptEnable); + private: + u32 prev_intr_state; public: - KScopedInterruptEnable(); - ~KScopedInterruptEnable(); + ALWAYS_INLINE KScopedInterruptEnable() : prev_intr_state(KInterruptManager::EnableInterrupts()) { /* ... */ } + ~KScopedInterruptEnable() { KInterruptManager::RestoreInterrupts(prev_intr_state); } }; } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_hardware_timer.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_hardware_timer.cpp new file mode 100644 index 000000000..6dbf242b3 --- /dev/null +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_hardware_timer.cpp @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::arm64 { + + void KHardwareTimer::DoTask() { + /* TODO: Actually implement this. */ + } + +} diff --git a/mesosphere/kernel_ldr/source/kern_k_scoped_interrupt.cpp b/libraries/libmesosphere/source/kern_k_interrupt_task_manager.cpp similarity index 54% rename from mesosphere/kernel_ldr/source/kern_k_scoped_interrupt.cpp rename to libraries/libmesosphere/source/kern_k_interrupt_task_manager.cpp index 00ef6f718..8daa3d49d 100644 --- a/mesosphere/kernel_ldr/source/kern_k_scoped_interrupt.cpp +++ b/libraries/libmesosphere/source/kern_k_interrupt_task_manager.cpp @@ -17,20 +17,24 @@ namespace ams::kern { - inline KScopedInterruptDisable::KScopedInterruptDisable() { - /* Intentionally do nothing, KernelLdr doesn't have interrupts set up. */ + void KInterruptTaskManager::TaskQueue::Enqueue(KInterruptTask *task) { + /* Insert the task into the queue. */ + if (this->tail != nullptr) { + this->tail->SetNextTask(task); + } else { + this->head = task; + } + + this->tail = task; } - inline KScopedInterruptDisable::~KScopedInterruptDisable() { - /* Intentionally do nothing, KernelLdr doesn't have interrupts set up. */ - } - - inline KScopedInterruptEnable::KScopedInterruptEnable() { - /* Intentionally do nothing, KernelLdr doesn't have interrupts set up. */ - } - - inline KScopedInterruptEnable::~KScopedInterruptEnable() { - /* Intentionally do nothing, KernelLdr doesn't have interrupts set up. */ + void KInterruptTaskManager::TaskQueue::Dequeue() { + if (this->head == this->tail) { + this->head = nullptr; + this->tail = nullptr; + } else { + this->head = this->head->GetNextTask(); + } } } diff --git a/libraries/libmesosphere/source/kern_k_scoped_interrupt.cpp b/libraries/libmesosphere/source/kern_k_scheduler.cpp similarity index 60% rename from libraries/libmesosphere/source/kern_k_scoped_interrupt.cpp rename to libraries/libmesosphere/source/kern_k_scheduler.cpp index b342684db..1ec51bd8a 100644 --- a/libraries/libmesosphere/source/kern_k_scoped_interrupt.cpp +++ b/libraries/libmesosphere/source/kern_k_scheduler.cpp @@ -17,20 +17,15 @@ namespace ams::kern { - WEAK_SYMBOL KScopedInterruptDisable::KScopedInterruptDisable() { - /* TODO: Disable interrupts. */ - } - - WEAK_SYMBOL KScopedInterruptDisable::~KScopedInterruptDisable() { - /* TODO: un-disable interrupts. */ - } - - WEAK_SYMBOL KScopedInterruptEnable::KScopedInterruptEnable() { - /* TODO: Enable interrupts. */ - } - - WEAK_SYMBOL KScopedInterruptEnable::~KScopedInterruptEnable() { - /* TODO: un-enable interrupts. */ + KScheduler::KScheduler() + : is_active(false), core_id(0), prev_thread(nullptr), last_context_switch_time(0), idle_thread(nullptr) + { + this->state.needs_scheduling = true; + this->state.interrupt_task_thread_runnable = false; + this->state.should_count_idle = false; + this->state.idle_count = 0; + this->state.idle_thread_stack = nullptr; + this->state.highest_priority_thread = nullptr; } } diff --git a/libraries/libmesosphere/source/kern_kernel.cpp b/libraries/libmesosphere/source/kern_kernel.cpp new file mode 100644 index 000000000..e00cdb563 --- /dev/null +++ b/libraries/libmesosphere/source/kern_kernel.cpp @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + NOINLINE void Kernel::Initialize(s32 core_id) { + /* Construct the core local region object in place. */ + KCoreLocalContext *clc = GetPointer(KMemoryLayout::GetCoreLocalRegionAddress()); + new (clc) KCoreLocalContext; + + /* Set the core local region address into the global register. */ + cpu::SetCoreLocalRegionAddress(reinterpret_cast(clc)); + + /* Initialize current context. */ + clc->current.current_thread = nullptr; + clc->current.current_process = nullptr; + clc->current.scheduler = std::addressof(clc->scheduler); + clc->current.interrupt_task_manager = std::addressof(clc->interrupt_task_manager); + clc->current.core_id = core_id; + clc->current.exception_stack_bottom = GetVoidPointer(KMemoryLayout::GetExceptionStackBottomAddress(core_id)); + + /* Clear debugging counters. */ + clc->num_sw_interrupts = 0; + clc->num_hw_interrupts = 0; + clc->num_svc = 0; + clc->num_process_switches = 0; + clc->num_thread_switches = 0; + clc->num_fpu_switches = 0; + + for (size_t i = 0; i < util::size(clc->perf_counters); i++) { + clc->perf_counters[i] = 0; + } + } + +} diff --git a/libraries/libmesosphere/source/kern_main.cpp b/libraries/libmesosphere/source/kern_main.cpp index da4c7de80..0906fe3fc 100644 --- a/libraries/libmesosphere/source/kern_main.cpp +++ b/libraries/libmesosphere/source/kern_main.cpp @@ -18,7 +18,14 @@ namespace ams::kern { NORETURN void HorizonKernelMain(s32 core_id) { + /* Setup the Core Local Region, and note that we're initializing. */ + Kernel::Initialize(core_id); + Kernel::SetState(Kernel::State::Initializing); + + /* Ensure that all cores get to this point before proceeding. */ cpu::SynchronizeAllCores(); + + /* TODO: Implement more of Main() */ while (true) { /* ... */ } } From bb4ade30e40adfc974e57a58272d93c611ecebaf Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 29 Jan 2020 14:55:26 -0800 Subject: [PATCH 17/97] kern: optimize memory block tree lookups --- .../mesosphere/kern_k_memory_layout.hpp | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp index cabb55c4c..1db2aa76d 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp @@ -123,12 +123,12 @@ namespace ams::kern { u32 type_id; public: static constexpr ALWAYS_INLINE int Compare(const KMemoryBlock &lhs, const KMemoryBlock &rhs) { - if (lhs.address < rhs.address) { + if (lhs.GetAddress() < rhs.GetAddress()) { return -1; - } else if (lhs.address == rhs.address) { - return 0; - } else { + } else if (lhs.GetLastAddress() > rhs.GetLastAddress()) { return 1; + } else { + return 0; } } public: @@ -222,12 +222,11 @@ namespace ams::kern { constexpr ALWAYS_INLINE KMemoryBlockTree() : tree() { /* ... */ } public: iterator FindContainingBlock(uintptr_t address) { - for (auto it = this->begin(); it != this->end(); it++) { - if (it->Contains(address)) { - return it; - } - } - MESOSPHERE_INIT_ABORT(); + auto it = this->find(KMemoryBlock(address, 1, 0, 0)); + MESOSPHERE_INIT_ABORT_UNLESS(it != this->end()); + MESOSPHERE_INIT_ABORT_UNLESS(it->Contains(address)); + + return it; } iterator FindFirstBlockByTypeAttr(u32 type_id, u32 attr = 0) { From 2faf3d33b52e93fac375619a865675f34a12bbc5 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 29 Jan 2020 22:06:25 -0800 Subject: [PATCH 18/97] kern: Implement KAutoObject, KSlabHeap, KLightLock --- .../libmesosphere/include/mesosphere.hpp | 5 + .../include/mesosphere/kern_k_auto_object.hpp | 217 ++++++++++++++++++ .../kern_k_auto_object_container.hpp | 65 ++++++ .../include/mesosphere/kern_k_class_token.hpp | 127 ++++++++++ .../include/mesosphere/kern_k_light_lock.hpp | 73 ++++++ .../include/mesosphere/kern_k_slab_heap.hpp | 183 +++++++++++++++ .../include/mesosphere/kern_k_thread.hpp | 15 +- .../include/mesosphere/kern_kernel.hpp | 3 +- .../include/mesosphere/kern_slab_helpers.hpp | 116 ++++++++++ .../source/kern_k_auto_object.cpp | 25 ++ .../source/kern_k_auto_object_container.cpp | 51 ++++ .../source/kern_k_light_lock.cpp | 28 +++ .../libmesosphere/source/kern_kernel.cpp | 7 +- libraries/libmesosphere/source/kern_main.cpp | 9 + .../util/util_intrusive_red_black_tree.hpp | 2 +- .../source/arch/arm64/init/kern_init_core.cpp | 2 +- 16 files changed, 923 insertions(+), 5 deletions(-) create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_auto_object_container.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_class_token.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_slab_helpers.hpp create mode 100644 libraries/libmesosphere/source/kern_k_auto_object.cpp create mode 100644 libraries/libmesosphere/source/kern_k_auto_object_container.cpp create mode 100644 libraries/libmesosphere/source/kern_k_light_lock.cpp diff --git a/libraries/libmesosphere/include/mesosphere.hpp b/libraries/libmesosphere/include/mesosphere.hpp index ea3442e7e..88e755c83 100644 --- a/libraries/libmesosphere/include/mesosphere.hpp +++ b/libraries/libmesosphere/include/mesosphere.hpp @@ -43,8 +43,13 @@ #include "mesosphere/kern_k_memory_manager.hpp" #include "mesosphere/kern_k_interrupt_task_manager.hpp" #include "mesosphere/kern_k_core_local_region.hpp" +#include "mesosphere/kern_k_slab_heap.hpp" +#include "mesosphere/kern_k_light_lock.hpp" #include "mesosphere/kern_kernel.hpp" +/* Auto Objects. */ +#include "mesosphere/kern_k_auto_object.hpp" + /* Supervisor Calls. */ #include "mesosphere/kern_svc.hpp" diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp new file mode 100644 index 000000000..19bcf949d --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp @@ -0,0 +1,217 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include + +namespace ams::kern { + + class KProcess; + + #define MESOSPHERE_AUTOOBJECT_TRAITS(CLASS) \ + private: \ + friend class KClassTokenGenerator; \ + static constexpr inline auto ObjectType = KClassTokenGenerator::ObjectType::CLASS; \ + static constexpr inline const char * const TypeName = #CLASS; \ + static constexpr inline ClassTokenType ClassToken = ClassToken; \ + public: \ + static constexpr ALWAYS_INLINE TypeObj GetStaticTypeObj() { return TypeObj(TypeName, ClassToken); } \ + static constexpr ALWAYS_INLINE const char *GetStaticTypeName() { return TypeName; } \ + virtual TypeObj GetTypeObj() const { return TypeObj(TypeName, ClassToken); } \ + virtual const char *GetTypeName() { return TypeName; } \ + private: + + + + class KAutoObject { + NON_COPYABLE(KAutoObject); + NON_MOVEABLE(KAutoObject); + protected: + class TypeObj { + private: + const char *name; + ClassTokenType class_token; + public: + constexpr explicit TypeObj(const char *n, ClassTokenType tok) : name(n), class_token(tok) { /* ... */ } + + constexpr ALWAYS_INLINE const char *GetName() const { return this->name; } + constexpr ALWAYS_INLINE ClassTokenType GetClassToken() const { return this->class_token; } + + constexpr ALWAYS_INLINE bool operator==(const TypeObj &rhs) { + return this->GetClassToken() == rhs.GetClassToken(); + } + + constexpr ALWAYS_INLINE bool operator!=(const TypeObj &rhs) { + return this->GetClassToken() != rhs.GetClassToken(); + } + + constexpr ALWAYS_INLINE bool IsDerivedFrom(const TypeObj &rhs) { + return (this->GetClassToken() | rhs.GetClassToken()) == this->GetClassToken(); + } + }; + private: + std::atomic ref_count; + public: + static KAutoObject *Create(KAutoObject *ptr); + public: + constexpr ALWAYS_INLINE explicit KAutoObject() : ref_count(0) { /* ... */ } + virtual ~KAutoObject() { /* ... */ } + + /* Destroy is responsible for destroying the auto object's resources when ref_count hits zero. */ + virtual void Destroy() { /* ... */ } + + /* Finalize is responsible for cleaning up resource, but does not destroy the object. */ + virtual void Finalize() { /* ... */ } + + virtual KProcess *GetOwner() const { return nullptr; } + + u32 GetReferenceCount() const { + return this->ref_count; + } + + ALWAYS_INLINE bool IsDerivedFrom(const TypeObj &rhs) const { + return this->GetTypeObj().IsDerivedFrom(rhs); + } + + ALWAYS_INLINE bool IsDerivedFrom(const KAutoObject &rhs) const { + return this->IsDerivedFrom(rhs.GetTypeObj()); + } + + template + ALWAYS_INLINE Derived DynamicCast() { + static_assert(std::is_pointer::value); + using DerivedType = typename std::remove_pointer::type; + + if (AMS_LIKELY(this->IsDerivedFrom(DerivedType::GetStaticTypeObj()))) { + return static_cast(this); + } else { + return nullptr; + } + } + + template + ALWAYS_INLINE const Derived DynamicCast() const { + static_assert(std::is_pointer::value); + using DerivedType = typename std::remove_pointer::type; + + if (AMS_LIKELY(this->IsDerivedFrom(DerivedType::GetStaticTypeObj()))) { + return static_cast(this); + } else { + return nullptr; + } + } + + ALWAYS_INLINE bool Open() { + /* Atomically increment the reference count, only if it's positive. */ + u32 cur_ref_count = this->ref_count.load(std::memory_order_acquire); + do { + if (AMS_UNLIKELY(cur_ref_count == 0)) { + return false; + } + MESOSPHERE_ABORT_UNLESS(cur_ref_count < cur_ref_count + 1); + } while (!this->ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count + 1, std::memory_order_relaxed)); + + return true; + } + + ALWAYS_INLINE void Close() { + /* Atomically decrement the reference count, not allowing it to become negative. */ + u32 cur_ref_count = this->ref_count.load(std::memory_order_acquire); + do { + MESOSPHERE_ABORT_UNLESS(cur_ref_count > 0); + } while (!this->ref_count.compare_exchange_weak(cur_ref_count, cur_ref_count - 1, std::memory_order_relaxed)); + + /* If ref count hits zero, destroy the object. */ + if (cur_ref_count - 1 == 0) { + this->Destroy(); + } + } + + /* Ensure that we have functional type object getters. */ + MESOSPHERE_AUTOOBJECT_TRAITS(KAutoObject); + }; + + class KAutoObjectWithListContainer; + + class KAutoObjectWithList : public KAutoObject { + private: + friend class KAutoObjectWithListContainer; + private: + util::IntrusiveRedBlackTreeNode list_node; + public: + static ALWAYS_INLINE int Compare(const KAutoObjectWithList &lhs, const KAutoObjectWithList &rhs) { + const u64 lid = lhs.GetId(); + const u64 rid = rhs.GetId(); + + if (lid < rid) { + return -1; + } else if (lid > rid) { + return 1; + } else { + return 0; + } + } + public: + virtual u64 GetId() const { + return reinterpret_cast(this); + } + }; + + template + class KScopedAutoObject { + static_assert(std::is_base_of::value); + NON_COPYABLE(KScopedAutoObject); + private: + T *obj; + private: + constexpr ALWAYS_INLINE void Swap(KScopedAutoObject &rhs) { + /* TODO: C++20 constexpr std::swap */ + T *tmp = rhs.obj; + rhs.obj = this->obj; + this->obj = tmp; + } + public: + constexpr ALWAYS_INLINE KScopedAutoObject() : obj(nullptr) { /* ... */ } + constexpr ALWAYS_INLINE KScopedAutoObject(T *o) : obj(o) { /* ... */ } + ALWAYS_INLINE ~KScopedAutoObject() { + if (this->obj != nullptr) { + this->obj->Close(); + } + this->obj = nullptr; + } + + constexpr ALWAYS_INLINE KScopedAutoObject(KScopedAutoObject &&rhs) { + this->obj = rhs.obj; + rhs.obj = nullptr; + } + + constexpr ALWAYS_INLINE KScopedAutoObject &operator=(KScopedAutoObject &&rhs) { + rhs.Swap(*this); + return *this; + } + + constexpr ALWAYS_INLINE T *operator->() { return this->obj; } + constexpr ALWAYS_INLINE T &operator*() { return *this->obj; } + + constexpr ALWAYS_INLINE void Reset(T *o) { + KScopedAutoObject(o).Swap(*this); + } + }; + + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_auto_object_container.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object_container.hpp new file mode 100644 index 000000000..52504f667 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object_container.hpp @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KAutoObjectWithListContainer { + NON_COPYABLE(KAutoObjectWithListContainer); + NON_MOVEABLE(KAutoObjectWithListContainer); + private: + using ListType = util::IntrusiveRedBlackTreeMemberTraits<&KAutoObjectWithList::list_node>::TreeType; + public: + class ListAccessor : public KScopedLightLock { + private: + ListType &list; + public: + explicit ListAccessor(KAutoObjectWithListContainer *container) : KScopedLightLock(container->lock), list(container->object_list) { /* ... */ } + explicit ListAccessor(KAutoObjectWithListContainer &container) : KScopedLightLock(container.lock), list(container.object_list) { /* ... */ } + + typename ListType::iterator begin() const { + return this->list.begin(); + } + + typename ListType::iterator end() const { + return this->list.end(); + } + + typename ListType::iterator find(typename ListType::const_reference ref) const { + return this->list.find(ref); + } + }; + + friend class ListAccessor; + private: + KLightLock lock; + ListType object_list; + public: + constexpr KAutoObjectWithListContainer() : lock(), object_list() { /* ... */ } + + void Initialize() { /* Nothing to do. */ } + void Finalize() { /* Nothing to do. */ } + + Result Register(KAutoObjectWithList *obj); + Result Unregister(KAutoObjectWithList *obj); + size_t GetOwnedCount(KProcess *owner); + }; + + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_class_token.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_class_token.hpp new file mode 100644 index 000000000..b1640f6a8 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_class_token.hpp @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KAutoObject; + + class KClassTokenGenerator { + public: + using TokenBaseType = u16; + public: + static constexpr size_t BaseClassBits = 8; + static constexpr size_t FinalClassBits = (sizeof(TokenBaseType) * CHAR_BIT) - BaseClassBits; + /* One bit per base class. */ + static constexpr size_t NumBaseClasses = BaseClassBits; + /* Final classes are permutations of three bits. */ + static constexpr size_t NumFinalClasses = [] { + TokenBaseType index = 0; + for (size_t i = 0; i < FinalClassBits; i++) { + for (size_t j = i + 1; j < FinalClassBits; j++) { + for (size_t k = j + 1; k < FinalClassBits; k++) { + index++; + } + } + } + return index; + }(); + private: + template + static constexpr inline TokenBaseType BaseClassToken = BIT(Index); + + template + static constexpr inline TokenBaseType FinalClassToken = [] { + TokenBaseType index = 0; + for (size_t i = 0; i < FinalClassBits; i++) { + for (size_t j = i + 1; j < FinalClassBits; j++) { + for (size_t k = j + 1; k < FinalClassBits; k++) { + if ((index++) == Index) { + return ((1ul << i) | (1ul << j) | (1ul << k)) << BaseClassBits; + } + } + } + } + __builtin_unreachable(); + }(); + + template + static constexpr inline TokenBaseType GetClassToken() { + static_assert(std::is_base_of::value); + if constexpr (std::is_same::value) { + static_assert(T::ObjectType == ObjectType::BaseClassesStart); + return BaseClassToken<0>; + } else if constexpr (!std::is_final::value) { + static_assert(ObjectType::BaseClassesStart < T::ObjectType && T::ObjectType < ObjectType::BaseClassesEnd); + constexpr auto ClassIndex = static_cast(T::ObjectType) - static_cast(ObjectType::BaseClassesStart); + return BaseClassToken | GetClassToken(); + } else if constexpr (ObjectType::FinalClassesStart <= T::ObjectType && T::ObjectType < ObjectType::FinalClassesEnd) { + constexpr auto ClassIndex = static_cast(T::ObjectType) - static_cast(ObjectType::FinalClassesStart); + return FinalClassToken | GetClassToken(); + } else { + static_assert(!std::is_same::value, "GetClassToken: Invalid Type"); + } + }; + public: + enum class ObjectType { + BaseClassesStart = 0, + + KAutoObject = BaseClassesStart, + KSynchronizationObject, + KReadableEvent, + + BaseClassesEnd, + + FinalClassesStart = BaseClassesEnd, + + KInterruptEvent = FinalClassesStart, + KDebug, + KThread, + KServerPort, + KServerSession, + KClientPort, + KClientSession, + KProcess, + KResourceLimit, + KLightSession, + KPort, + KSession, + KSharedMemory, + KEvent, + KWritableEvent, + KLightClientSession, + KLightServerSession, + KTransferMemory, + KDeviceAddressSpace, + KSessionRequest, + KCodeMemory, + + FinalClassesEnd = FinalClassesStart + NumFinalClasses, + }; + + template + static constexpr inline TokenBaseType ClassToken = GetClassToken(); + }; + + using ClassTokenType = KClassTokenGenerator::TokenBaseType; + + template + static constexpr inline ClassTokenType ClassToken = KClassTokenGenerator::ClassToken; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp new file mode 100644 index 000000000..eb0cab5e1 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KLightLock { + private: + std::atomic tag; + public: + constexpr KLightLock() : tag(0) { /* ... */ } + + void Lock() { + const uintptr_t cur_thread = reinterpret_cast(GetCurrentThreadPointer()); + + while (true) { + uintptr_t old_tag = this->tag.load(std::memory_order_relaxed); + + while (!this->tag.compare_exchange_weak(old_tag, (old_tag == 0) ? cur_thread : old_tag | 1, std::memory_order_acquire)) { + /* ... */ + } + + if ((old_tag == 0) || ((old_tag | 1) == (cur_thread | 1))) { + break; + } + + this->LockSlowPath(old_tag | 1, cur_thread); + } + } + + void Unlock() { + const uintptr_t cur_thread = reinterpret_cast(GetCurrentThreadPointer()); + uintptr_t expected = cur_thread; + if (!this->tag.compare_exchange_weak(expected, 0, std::memory_order_release)) { + this->UnlockSlowPath(cur_thread); + } + } + + void LockSlowPath(uintptr_t owner, uintptr_t cur_thread); + void UnlockSlowPath(uintptr_t cur_thread); + }; + + class KScopedLightLock { + private: + KLightLock *lock; + public: + explicit ALWAYS_INLINE KScopedLightLock(KLightLock *l) : lock(l) { + this->lock->Lock(); + } + ALWAYS_INLINE ~KScopedLightLock() { + this->lock->Unlock(); + } + + explicit ALWAYS_INLINE KScopedLightLock(KLightLock &l) : KScopedLightLock(std::addressof(l)) { /* ... */ } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp new file mode 100644 index 000000000..e504ef539 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + namespace impl { + + class KSlabHeapImpl { + NON_COPYABLE(KSlabHeapImpl); + NON_MOVEABLE(KSlabHeapImpl); + public: + struct Node { + Node *next; + }; + private: + std::atomic head; + size_t obj_size; + public: + constexpr KSlabHeapImpl() : head(nullptr), obj_size(0) { /* ... */ } + + void Initialize(size_t size) { + MESOSPHERE_INIT_ABORT_UNLESS(this->head == nullptr); + this->obj_size = size; + } + + Node *GetHead() const { + return this->head; + } + + size_t GetObjectSize() const { + return this->obj_size; + } + + void *Allocate() { + Node *ret = this->head.load(); + + do { + if (AMS_UNLIKELY(ret == nullptr)) { + break; + } + } while (!this->head.compare_exchange_weak(ret, ret->next)); + + return ret; + } + + void Free(void *obj) { + Node *node = reinterpret_cast(obj); + + Node *cur_head = this->head.load(); + do { + node->next = cur_head; + } while (!this->head.compare_exchange_weak(cur_head, node)); + } + }; + + } + + class KSlabHeapBase { + NON_COPYABLE(KSlabHeapBase); + NON_MOVEABLE(KSlabHeapBase); + private: + using Impl = impl::KSlabHeapImpl; + private: + Impl impl; + uintptr_t peak; + uintptr_t start; + uintptr_t end; + private: + ALWAYS_INLINE Impl *GetImpl() { + return std::addressof(this->impl); + } + ALWAYS_INLINE const Impl *GetImpl() const { + return std::addressof(this->impl); + } + public: + constexpr KSlabHeapBase() : impl(), peak(0), start(0), end(0) { /* ... */ } + + ALWAYS_INLINE bool Contains(uintptr_t address) const { + return this->start <= address && address < this->end; + } + + void InitializeImpl(size_t obj_size, void *memory, size_t memory_size) { + /* Ensure we don't initialize a slab using null memory. */ + MESOSPHERE_ABORT_UNLESS(memory != nullptr); + + /* Initialize the base allocator. */ + this->GetImpl()->Initialize(obj_size); + + /* Set our tracking variables. */ + const size_t num_obj = (memory_size / obj_size); + this->start = reinterpret_cast(memory); + this->end = this->start + num_obj * obj_size; + this->peak = this->start; + + /* Free the objects. */ + u8 *cur = reinterpret_cast(this->end); + + for (size_t i = 0; i < num_obj; i++) { + cur -= obj_size; + this->GetImpl()->Free(cur); + } + } + + size_t GetSlabHeapSize() const { + return (this->end - this->start) / this->GetObjectSize(); + } + + size_t GetObjectSize() const { + return this->GetImpl()->GetObjectSize(); + } + + void *AllocateImpl() { + void *obj = this->GetImpl()->Allocate(); + + /* TODO: under some debug define, track the peak for statistics, as N does? */ + + return obj; + } + + void FreeImpl(void *obj) { + /* Don't allow freeing an object that wasn't allocated from this heap. */ + MESOSPHERE_ABORT_UNLESS(this->Contains(reinterpret_cast(obj))); + + this->GetImpl()->Free(obj); + } + + size_t GetObjectIndexImpl(const void *obj) const { + return (reinterpret_cast(obj) - this->start) / this->GetObjectSize(); + } + + size_t GetPeakIndex() const { + return this->GetObjectIndexImpl(reinterpret_cast(this->peak)); + } + + uintptr_t GetSlabHeapAddress() const { + return this->start; + } + }; + + template + class KSlabHeap : public KSlabHeapBase { + public: + constexpr KSlabHeap() : KSlabHeapBase() { /* ... */ } + + void Initialize(void *memory, size_t memory_size) { + this->InitializeImpl(sizeof(T), memory, memory_size); + } + + T *Allocate() { + T *obj = reinterpret_cast(this->AllocateImpl()); + if (AMS_LIKELY(obj != nullptr)) { + new (obj) T(); + } + return obj; + } + + void Free(T *obj) { + this->FreeImpl(obj); + } + + size_t GetObjectIndex(const T *obj) const { + return this->GetObjectIndexImpl(obj); + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp index 8959a381e..b6f35639a 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp @@ -14,11 +14,24 @@ * along with this program. If not, see . */ #pragma once +#include namespace ams::kern { - class KThread { + class KThread : KAutoObjectWithSlabHeapAndContainer { + public: + struct StackParameters { + alignas(0x10) u8 svc_permission[0x10]; + std::atomic dpc_flags; + u8 current_svc_id; + bool is_calling_svc; + bool is_in_exception_handler; + bool has_exception_svc_perms; + s32 disable_count; + void *context; /* TODO: KThreadContext * */ + }; + static_assert(alignof(StackParameters) == 0x10); /* TODO: This should be a KAutoObject, and this is a placeholder definition. */ }; diff --git a/libraries/libmesosphere/include/mesosphere/kern_kernel.hpp b/libraries/libmesosphere/include/mesosphere/kern_kernel.hpp index 4eb1e97d5..c1b95e15a 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_kernel.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_kernel.hpp @@ -31,7 +31,8 @@ namespace ams::kern { private: static inline State s_state = State::Invalid; public: - static void Initialize(s32 core_id); + static NOINLINE void Initialize(s32 core_id); + static NOINLINE void InitializeCoreThreads(s32 core_id); static ALWAYS_INLINE State GetState() { return s_state; } static ALWAYS_INLINE void SetState(State state) { s_state = state; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_slab_helpers.hpp b/libraries/libmesosphere/include/mesosphere/kern_slab_helpers.hpp new file mode 100644 index 000000000..31cf1613a --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_slab_helpers.hpp @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include +#include + +namespace ams::kern { + + template + class KSlabAllocated { + private: + static inline KSlabHeap s_slab_heap; + public: + constexpr KSlabAllocated() { /* ... */ } + + size_t GetSlabIndex() const { + return s_slab_heap.GetIndex(static_cast(this)); + } + public: + static void InitializeSlabHeap(void *memory, size_t memory_size) { + s_slab_heap.Initialize(memory, memory_size); + } + + static ALWAYS_INLINE Derived *Allocate() { + return s_slab_heap.Allocate(); + } + + static ALWAYS_INLINE void Free(Derived *obj) { + s_slab_heap.Free(obj); + } + + static size_t GetObjectSize() { return s_slab_heap.GetObjectSize(); } + static size_t GetSlabHeapSize() { return s_slab_heap.GetSlabHeapSize(); } + static size_t GetPeakIndex() { return s_slab_heap.GetPeakIndex(); } + static uintptr_t GetSlabHeapAddress() { return s_slab_heap.GetSlabHeapAddress(); } + }; + + template + class KAutoObjectWithSlabHeapAndContainer : public Base { + static_assert(std::is_base_of::value); + private: + static inline KSlabHeap s_slab_heap; + static inline KAutoObjectWithListContainer s_container; + private: + static ALWAYS_INLINE Derived *Allocate() { + return s_slab_heap.Allocate(); + } + + static ALWAYS_INLINE void Free(Derived *obj) { + s_slab_heap.Free(obj); + } + public: + constexpr KAutoObjectWithSlabHeapAndContainer() : Base() { /* ... */ } + virtual ~KAutoObjectWithSlabHeapAndContainer() { /* ... */ } + + virtual void Destroy() override { + const bool is_initialized = this->IsInitialized(); + uintptr_t arg = 0; + if (is_initialized) { + s_container.Unregister(this); + arg = this->GetPostDestroyArgument(); + this->Finalize(); + } + Free(static_cast(this)); + if (is_initialized) { + Derived::PostDestroy(arg); + } + } + + virtual bool IsInitialized() const { return true; } + virtual uintptr_t GetPostDestroyArgument() const { return 0; } + + size_t GetSlabIndex() const { + return s_slab_heap.GetIndex(static_cast(this)); + } + public: + static void InitializeSlabHeap(void *memory, size_t memory_size) { + s_slab_heap.Initialize(memory, memory_size); + s_container.Initialize(); + } + + static Derived *Create() { + Derived *obj = Allocate(); + if (AMS_LIKELY(obj != nullptr)) { + KAutoObject::Create(obj); + } + return obj; + } + + static Result Register(Derived *obj) { + return s_container.Register(obj); + } + + static size_t GetObjectSize() { return s_slab_heap.GetObjectSize(); } + static size_t GetSlabHeapSize() { return s_slab_heap.GetSlabHeapSize(); } + static size_t GetPeakIndex() { return s_slab_heap.GetPeakIndex(); } + static uintptr_t GetSlabHeapAddress() { return s_slab_heap.GetSlabHeapAddress(); } + }; + +} diff --git a/libraries/libmesosphere/source/kern_k_auto_object.cpp b/libraries/libmesosphere/source/kern_k_auto_object.cpp new file mode 100644 index 000000000..5a023d5bb --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_auto_object.cpp @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + KAutoObject *KAutoObject::Create(KAutoObject *obj) { + obj->ref_count = 1; + return obj; + } + +} diff --git a/libraries/libmesosphere/source/kern_k_auto_object_container.cpp b/libraries/libmesosphere/source/kern_k_auto_object_container.cpp new file mode 100644 index 000000000..f6cea8023 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_auto_object_container.cpp @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + + Result KAutoObjectWithListContainer::Register(KAutoObjectWithList *obj) { + KScopedLightLock lk(this->lock); + + this->object_list.insert(*obj); + + return ResultSuccess(); + } + + Result KAutoObjectWithListContainer::Unregister(KAutoObjectWithList *obj) { + KScopedLightLock lk(this->lock); + + this->object_list.erase(this->object_list.iterator_to(*obj)); + + return ams::svc::ResultNotFound(); + } + + size_t KAutoObjectWithListContainer::GetOwnedCount(KProcess *owner) { + KScopedLightLock lk(this->lock); + + size_t count = 0; + + for (auto &obj : this->object_list) { + if (obj.GetOwner() == owner) { + count++; + } + } + + return count; + } + +} diff --git a/libraries/libmesosphere/source/kern_k_light_lock.cpp b/libraries/libmesosphere/source/kern_k_light_lock.cpp new file mode 100644 index 000000000..b77bcf888 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_light_lock.cpp @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + void KLightLock::LockSlowPath(uintptr_t owner, uintptr_t cur_thread) { + /* TODO: Implement (requires KThread, KScheduler) */ + } + + void KLightLock::UnlockSlowPath(uintptr_t cur_thread) { + /* TODO: Implement (requires KThread, KScheduler) */ + } + +} diff --git a/libraries/libmesosphere/source/kern_kernel.cpp b/libraries/libmesosphere/source/kern_kernel.cpp index e00cdb563..ea5b5a4f5 100644 --- a/libraries/libmesosphere/source/kern_kernel.cpp +++ b/libraries/libmesosphere/source/kern_kernel.cpp @@ -17,7 +17,7 @@ namespace ams::kern { - NOINLINE void Kernel::Initialize(s32 core_id) { + void Kernel::Initialize(s32 core_id) { /* Construct the core local region object in place. */ KCoreLocalContext *clc = GetPointer(KMemoryLayout::GetCoreLocalRegionAddress()); new (clc) KCoreLocalContext; @@ -46,4 +46,9 @@ namespace ams::kern { } } + void Kernel::InitializeCoreThreads(s32 core_id) { + /* TODO: This function wants to setup the main thread and the idle thread. */ + /* It also wants to initialize the scheduler/interrupt manager/hardware timer. */ + } + } diff --git a/libraries/libmesosphere/source/kern_main.cpp b/libraries/libmesosphere/source/kern_main.cpp index 0906fe3fc..26a891bf5 100644 --- a/libraries/libmesosphere/source/kern_main.cpp +++ b/libraries/libmesosphere/source/kern_main.cpp @@ -25,6 +25,15 @@ namespace ams::kern { /* Ensure that all cores get to this point before proceeding. */ cpu::SynchronizeAllCores(); + /* Initialize the main and idle thread for each core. */ + /* Synchronize after each init to ensure the cores go in order. */ + for (size_t i = 0; i < cpu::NumCores; i++) { + if (static_cast(i) == core_id) { + Kernel::InitializeCoreThreads(core_id); + } + cpu::SynchronizeAllCores(); + } + /* TODO: Implement more of Main() */ while (true) { /* ... */ } } diff --git a/libraries/libvapours/include/vapours/util/util_intrusive_red_black_tree.hpp b/libraries/libvapours/include/vapours/util/util_intrusive_red_black_tree.hpp index ad2212076..93d244cb5 100644 --- a/libraries/libvapours/include/vapours/util/util_intrusive_red_black_tree.hpp +++ b/libraries/libvapours/include/vapours/util/util_intrusive_red_black_tree.hpp @@ -274,7 +274,7 @@ namespace ams::util { } private: static constexpr TYPED_STORAGE(Derived) DerivedStorage = {}; - static_assert(std::addressof(GetParent(GetNode(GetPointer(DerivedStorage)))) == GetPointer(DerivedStorage)); + static_assert(GetParent(GetNode(GetPointer(DerivedStorage))) == GetPointer(DerivedStorage)); }; template diff --git a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp index 513727ab7..8c555c259 100644 --- a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp +++ b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp @@ -303,7 +303,7 @@ namespace ams::kern::init { init_args->cpuactlr = cpu::GetCpuActlrEl1(); init_args->cpuectlr = cpu::GetCpuEctlrEl1(); init_args->sctlr = cpu::GetSctlrEl1(); - init_args->sp = GetInteger(KMemoryLayout::GetMainStackTopAddress(core_id)); + init_args->sp = GetInteger(KMemoryLayout::GetMainStackTopAddress(core_id)) - sizeof(KThread::StackParameters); init_args->entrypoint = reinterpret_cast(::ams::kern::HorizonKernelMain); init_args->argument = static_cast(core_id); init_args->setup_function = reinterpret_cast(::ams::kern::init::StartOtherCore); From 7d6b16d7fb0965e578b6862defc27437abd512ca Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Wed, 29 Jan 2020 23:46:55 -0800 Subject: [PATCH 19/97] kern: Skeleton KSynchronizationObject --- .../include/mesosphere/kern_k_auto_object.hpp | 26 +- .../include/mesosphere/kern_k_linked_list.hpp | 225 ++++++++++++++++++ .../kern_k_synchronization_object.hpp | 52 ++++ .../include/mesosphere/kern_k_thread.hpp | 7 +- .../source/kern_k_synchronization_object.cpp | 54 +++++ .../vapours/util/util_intrusive_list.hpp | 2 +- 6 files changed, 350 insertions(+), 16 deletions(-) create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_linked_list.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_synchronization_object.hpp create mode 100644 libraries/libmesosphere/source/kern_k_synchronization_object.cpp diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp index 19bcf949d..ea38cc063 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp @@ -23,24 +23,27 @@ namespace ams::kern { class KProcess; - #define MESOSPHERE_AUTOOBJECT_TRAITS(CLASS) \ + #define MESOSPHERE_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \ + NON_COPYABLE(CLASS); \ + NON_MOVEABLE(CLASS); \ private: \ friend class KClassTokenGenerator; \ - static constexpr inline auto ObjectType = KClassTokenGenerator::ObjectType::CLASS; \ + static constexpr inline auto ObjectType = ::ams::kern::KClassTokenGenerator::ObjectType::CLASS; \ static constexpr inline const char * const TypeName = #CLASS; \ - static constexpr inline ClassTokenType ClassToken = ClassToken; \ + static constexpr inline ClassTokenType ClassToken() { return ::ams::kern::ClassToken; } \ public: \ - static constexpr ALWAYS_INLINE TypeObj GetStaticTypeObj() { return TypeObj(TypeName, ClassToken); } \ + using BaseClass = BASE_CLASS; \ + static constexpr ALWAYS_INLINE TypeObj GetStaticTypeObj() { \ + constexpr ClassTokenType Token = ClassToken(); \ + return TypeObj(TypeName, Token); \ + } \ static constexpr ALWAYS_INLINE const char *GetStaticTypeName() { return TypeName; } \ - virtual TypeObj GetTypeObj() const { return TypeObj(TypeName, ClassToken); } \ - virtual const char *GetTypeName() { return TypeName; } \ + virtual TypeObj GetTypeObj() const { return GetStaticTypeObj(); } \ + virtual const char *GetTypeName() { return GetStaticTypeName(); } \ private: - class KAutoObject { - NON_COPYABLE(KAutoObject); - NON_MOVEABLE(KAutoObject); protected: class TypeObj { private: @@ -64,6 +67,8 @@ namespace ams::kern { return (this->GetClassToken() | rhs.GetClassToken()) == this->GetClassToken(); } }; + private: + MESOSPHERE_AUTOOBJECT_TRAITS(KAutoObject, KAutoObject); private: std::atomic ref_count; public: @@ -141,9 +146,6 @@ namespace ams::kern { this->Destroy(); } } - - /* Ensure that we have functional type object getters. */ - MESOSPHERE_AUTOOBJECT_TRAITS(KAutoObject); }; class KAutoObjectWithListContainer; diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_linked_list.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_linked_list.hpp new file mode 100644 index 000000000..80a5f463f --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_linked_list.hpp @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KLinkedListNode : public util::IntrusiveListBaseNode, public KSlabAllocated { + private: + void *item; + public: + constexpr KLinkedListNode() : util::IntrusiveListBaseNode(), item(nullptr) { /* ... */ } + + constexpr void Initialize(void *it) { + this->item = it; + } + + constexpr void *GetItem() const { + return this->item; + } + }; + static_assert(sizeof(KLinkedListNode) == sizeof(util::IntrusiveListNode) + sizeof(void *)); + + template + class KLinkedList : private util::IntrusiveListBaseTraits::ListType { + private: + using BaseList = util::IntrusiveListBaseTraits::ListType; + public: + template + class Iterator; + + using value_type = T; + using size_type = size_t; + using difference_type = ptrdiff_t; + using pointer = value_type *; + using const_pointer = const value_type *; + using reference = value_type &; + using const_reference = const value_type &; + using iterator = Iterator; + using const_iterator = Iterator; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + + template + class Iterator { + private: + using BaseIterator = BaseList::Iterator; + friend class KLinkedList; + public: + using iterator_category = std::bidirectional_iterator_tag; + using value_type = typename KLinkedList::value_type; + using difference_type = typename KLinkedList::difference_type; + using pointer = typename std::conditional::type; + using reference = typename std::conditional::type; + private: + BaseIterator base_it; + public: + explicit Iterator(BaseIterator it) : base_it(it) { /* ... */ } + + T *GetItem() const { + static_cast(this->base_it->GetItem()); + } + + bool operator==(const Iterator &rhs) const { + return this->base_it == rhs.base_it; + } + + bool operator!=(const Iterator &rhs) const { + return !(*this == rhs); + } + + pointer operator->() const { + return this->GetItem(); + } + + reference operator*() const { + return *this->GetItem(); + } + + Iterator &operator++() { + ++this->base_it; + return *this; + } + + Iterator &operator--() { + --this->base_it; + return *this; + } + + Iterator operator++(int) { + const Iterator it{*this}; + ++(*this); + return it; + } + + Iterator operator--(int) { + const Iterator it{*this}; + --(*this); + return it; + } + + operator Iterator() const { + return Iterator(this->base_it); + } + }; + public: + constexpr KLinkedList() : BaseList() { /* ... */ } + + /* Iterator accessors. */ + iterator begin() { + return iterator(BaseList::begin()); + } + + const_iterator begin() const { + return const_iterator(BaseList::begin()); + } + + iterator end() { + return iterator(BaseList::end()); + } + + const_iterator end() const { + return const_iterator(BaseList::end()); + } + + const_iterator cbegin() const { + return this->begin(); + } + + const_iterator cend() const { + return this->end(); + } + + reverse_iterator rbegin() { + return reverse_iterator(this->end()); + } + + const_reverse_iterator rbegin() const { + return const_reverse_iterator(this->end()); + } + + reverse_iterator rend() { + return reverse_iterator(this->begin()); + } + + const_reverse_iterator rend() const { + return const_reverse_iterator(this->begin()); + } + + const_reverse_iterator crbegin() const { + return this->rbegin(); + } + + const_reverse_iterator crend() const { + return this->rend(); + } + + /* Content management. */ + using BaseList::empty; + using BaseList::size; + + reference back() { + return *(--this->end()); + } + + const_reference back() const { + return *(--this->end()); + } + + reference front() { + return *this->begin(); + } + + const_reference front() const { + return *this->begin(); + } + + iterator insert(const_iterator pos, reference ref) { + KLinkedListNode *node = KLinkedListNode::Allocate(); + MESOSPHERE_ABORT_UNLESS(node != nullptr); + node->Initialize(std::addressof(ref)); + return iterator(BaseList::insert(pos.base_it, *node)); + } + + void push_back(reference ref) { + this->insert(this->end(), ref); + } + + void push_front(reference ref) { + this->insert(this->begin(), ref); + } + + void pop_back() { + this->erase(--this->end()); + } + + void pop_front() { + this->erase(this->begin()); + } + + iterator erase(const iterator pos) { + KLinkedListNode *freed_node = std::addressof(*pos.base_it); + iterator ret = iterator(BaseList::erase(pos.base_it)); + KLinkedListNode::Free(freed_node); + + return ret; + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_synchronization_object.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_synchronization_object.hpp new file mode 100644 index 000000000..929aaca66 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_synchronization_object.hpp @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KThread; + + class KSynchronizationObject : public KAutoObjectWithList { + MESOSPHERE_AUTOOBJECT_TRAITS(KSynchronizationObject, KAutoObject); + public: + using ThreadList = KLinkedList; + using iterator = ThreadList::iterator; + private: + ThreadList thread_list; + protected: + constexpr ALWAYS_INLINE explicit KSynchronizationObject() : KAutoObjectWithList(), thread_list() { /* ... */ } + virtual ~KSynchronizationObject() { /* ... */ } + + virtual void OnFinalizeSynchronizationObject() { /* ... */ } + + void NotifyAvailable(); + void NotifyAbort(Result abort_reason); + public: + virtual void Finalize() override; + virtual bool IsSignaled() const = 0; + virtual void DebugWaiters(); + + iterator AddWaiterThread(KThread *thread); + iterator RemoveWaiterThread(iterator it); + + iterator begin(); + iterator end(); + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp index b6f35639a..c1345698a 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp @@ -15,11 +15,12 @@ */ #pragma once #include +#include namespace ams::kern { - - class KThread : KAutoObjectWithSlabHeapAndContainer { + class KThread final : public KAutoObjectWithSlabHeapAndContainer { + MESOSPHERE_AUTOOBJECT_TRAITS(KThread, KSynchronizationObject); public: struct StackParameters { alignas(0x10) u8 svc_permission[0x10]; @@ -32,7 +33,7 @@ namespace ams::kern { void *context; /* TODO: KThreadContext * */ }; static_assert(alignof(StackParameters) == 0x10); - /* TODO: This should be a KAutoObject, and this is a placeholder definition. */ + /* TODO: This is a placeholder definition. */ }; } diff --git a/libraries/libmesosphere/source/kern_k_synchronization_object.cpp b/libraries/libmesosphere/source/kern_k_synchronization_object.cpp new file mode 100644 index 000000000..942bf2759 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_synchronization_object.cpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + void NotifyAvailable() { + /* TODO: Implement this. */ + MESOSPHERE_ABORT(); + } + + void NotifyAbort(Result abort_reason) { + MESOSPHERE_ABORT(); + } + + void KSynchronizationObject::Finalize() { + this->OnFinalizeSynchronizationObject(); + KAutoObject::Finalize(); + } + + void KSynchronizationObject::DebugWaiters() { + /* TODO: Do useful debug operation here. */ + } + + KSynchronizationObject::iterator KSynchronizationObject::AddWaiterThread(KThread *thread) { + return this->thread_list.insert(this->thread_list.end(), *thread); + } + + KSynchronizationObject::iterator KSynchronizationObject::RemoveWaiterThread(KSynchronizationObject::iterator it) { + return this->thread_list.erase(it); + } + + KSynchronizationObject::iterator KSynchronizationObject::begin() { + return this->thread_list.begin(); + } + + KSynchronizationObject::iterator KSynchronizationObject::end() { + return this->thread_list.end(); + } + +} diff --git a/libraries/libvapours/include/vapours/util/util_intrusive_list.hpp b/libraries/libvapours/include/vapours/util/util_intrusive_list.hpp index 0c1cf1468..3a88f6f03 100644 --- a/libraries/libvapours/include/vapours/util/util_intrusive_list.hpp +++ b/libraries/libvapours/include/vapours/util/util_intrusive_list.hpp @@ -183,7 +183,7 @@ namespace ams::util { } }; public: - IntrusiveListImpl() : root_node() { /* ... */ } + constexpr IntrusiveListImpl() : root_node() { /* ... */ } /* Iterator accessors. */ iterator begin() { From d5a4c17ee787f86eba3d1b6804959221d41b84e3 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 30 Jan 2020 01:41:59 -0800 Subject: [PATCH 20/97] kern: fully implement slabheap init --- .../libmesosphere/include/mesosphere.hpp | 1 + .../arch/arm64/kern_k_spin_lock.hpp | 16 +- .../nintendo/switch/kern_k_system_control.hpp | 4 + .../mesosphere/init/kern_init_slab_setup.hpp | 45 +++++ .../mesosphere/kern_k_memory_layout.hpp | 6 +- .../include/mesosphere/kern_k_spin_lock.hpp | 5 + .../nintendo/switch/kern_k_system_control.cpp | 26 +++ .../nintendo/switch/kern_secure_monitor.cpp | 27 ++- .../nintendo/switch/kern_secure_monitor.hpp | 1 + .../source/init/kern_init_slab_setup.cpp | 190 ++++++++++++++++++ libraries/libmesosphere/source/kern_main.cpp | 7 + .../source/arch/arm64/init/kern_init_core.cpp | 4 +- 12 files changed, 319 insertions(+), 13 deletions(-) create mode 100644 libraries/libmesosphere/include/mesosphere/init/kern_init_slab_setup.hpp create mode 100644 libraries/libmesosphere/source/init/kern_init_slab_setup.cpp diff --git a/libraries/libmesosphere/include/mesosphere.hpp b/libraries/libmesosphere/include/mesosphere.hpp index 88e755c83..e00f1659b 100644 --- a/libraries/libmesosphere/include/mesosphere.hpp +++ b/libraries/libmesosphere/include/mesosphere.hpp @@ -32,6 +32,7 @@ /* Initialization headers. */ #include "mesosphere/init/kern_init_elf.hpp" #include "mesosphere/init/kern_init_layout.hpp" +#include "mesosphere/init/kern_init_slab_setup.hpp" #include "mesosphere/init/kern_init_page_table_select.hpp" #include "mesosphere/init/kern_init_arguments_select.hpp" #include "mesosphere/kern_k_memory_layout.hpp" diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_spin_lock.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_spin_lock.hpp index adf87720e..303a3702a 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_spin_lock.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_spin_lock.hpp @@ -30,21 +30,21 @@ namespace ams::kern::arm64 { __asm__ __volatile__( " prfm pstl1keep, %[packed_tickets]\n" - "loop1:\n" + "1:\n" " ldaxr %w[tmp0], %[packed_tickets]\n" " add %w[tmp0], %w[tmp0], #0x10000\n" " stxr %w[tmp1], %w[tmp0], %[packed_tickets]\n" - " cbnz %w[tmp1], loop1\n" + " cbnz %w[tmp1], 1b\n" " \n" " and %w[tmp1], %w[tmp0], #0xFFFF\n" " cmp %w[tmp1], %w[tmp0], lsr #16\n" " b.eq done" " sevl\n" - "loop2:\n" + "2:\n" " wfe\n" " ldaxrh %w[tmp1], %[packed_tickets]\n" " cmp %w[tmp1], %w[tmp0], lsr #16\n" - " b.ne loop2\n" + " b.ne 2b\n" "done:\n" : [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [packed_tickets]"+Q"(this->packed_tickets) : @@ -76,18 +76,18 @@ namespace ams::kern::arm64 { __asm__ __volatile__( " prfm pstl1keep, %[next_ticket]\n" - "loop1:\n" + "1:\n" " ldaxrh %w[tmp0], %[next_ticket]\n" " add %w[tmp1], %w[tmp0], #0x1\n" " stxrh %w[got_lock], %w[tmp1], %[next_ticket]\n" - " cbnz %w[got_lock], loop1\n" + " cbnz %w[got_lock], 1b\n" " \n" " sevl\n" - "loop2:\n" + "2:\n" " wfe\n" " ldaxrh %w[tmp1], %[current_ticket]\n" " cmp %w[tmp1], %w[tmp0]\n" - " b.ne loop2\n" + " b.ne 2b\n" : [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [got_lock]"=&r"(got_lock), [next_ticket]"+Q"(this->next_ticket) : [current_ticket]"Q"(this->current_ticket) : "cc", "memory" diff --git a/libraries/libmesosphere/include/mesosphere/board/nintendo/switch/kern_k_system_control.hpp b/libraries/libmesosphere/include/mesosphere/board/nintendo/switch/kern_k_system_control.hpp index f568ac270..88e2b8374 100644 --- a/libraries/libmesosphere/include/mesosphere/board/nintendo/switch/kern_k_system_control.hpp +++ b/libraries/libmesosphere/include/mesosphere/board/nintendo/switch/kern_k_system_control.hpp @@ -36,6 +36,10 @@ namespace ams::kern { static u64 GenerateRandomRange(u64 min, u64 max); }; public: + /* Randomness. */ + static void GenerateRandomBytes(void *dst, size_t size); + static u64 GenerateRandomRange(u64 min, u64 max); + /* Panic. */ static NORETURN void StopSystem(); }; diff --git a/libraries/libmesosphere/include/mesosphere/init/kern_init_slab_setup.hpp b/libraries/libmesosphere/include/mesosphere/init/kern_init_slab_setup.hpp new file mode 100644 index 000000000..cec7a44ab --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/init/kern_init_slab_setup.hpp @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern::init { + + struct KSlabResourceCounts { + size_t num_KProcess; + size_t num_KThread; + size_t num_KEvent; + size_t num_KInterruptEvent; + size_t num_KPort; + size_t num_KSharedMemory; + size_t num_KTransferMemory; + size_t num_KCodeMemory; + size_t num_KDeviceAddressSpace; + size_t num_KSession; + size_t num_KLightSession; + size_t num_KObjectName; + size_t num_KResourceLimit; + size_t num_KDebug; + }; + + NOINLINE void InitializeSlabResourceCounts(); + const KSlabResourceCounts &GetSlabResourceCounts(); + + size_t CalculateTotalSlabHeapSize(); + NOINLINE void InitializeSlabHeaps(); + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp index 1db2aa76d..c72eee498 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp @@ -409,8 +409,12 @@ namespace ams::kern { return GetVirtualMemoryBlockTree().FindFirstBlockByTypeAttr(KMemoryRegionType_KernelMiscExceptionStack, static_cast(core_id))->GetAddress(); } + static NOINLINE KVirtualAddress GetSlabRegionAddress() { + return GetVirtualMemoryBlockTree().FindFirstBlockByType(KMemoryRegionType_KernelSlab)->GetAddress(); + } + static NOINLINE KVirtualAddress GetCoreLocalRegionAddress() { - return GetVirtualMemoryBlockTree().FindFirstBlockByTypeAttr(KMemoryRegionType_CoreLocal)->GetAddress(); + return GetVirtualMemoryBlockTree().FindFirstBlockByType(KMemoryRegionType_CoreLocal)->GetAddress(); } static void InitializeLinearMemoryBlockTrees(KPhysicalAddress aligned_linear_phys_start, KVirtualAddress linear_virtual_start); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_spin_lock.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_spin_lock.hpp index 55706313c..915c94fe9 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_spin_lock.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_spin_lock.hpp @@ -45,6 +45,8 @@ namespace ams::kern { ALWAYS_INLINE ~KScopedSpinLock() { this->lock_ptr->Unlock(); } + + explicit ALWAYS_INLINE KScopedSpinLock(KSpinLock &l) : KScopedSpinLock(std::addressof(l)) { /* ... */ } }; class KScopedAlignedSpinLock { @@ -57,6 +59,7 @@ namespace ams::kern { ALWAYS_INLINE ~KScopedAlignedSpinLock() { this->lock_ptr->Unlock(); } + explicit ALWAYS_INLINE KScopedAlignedSpinLock(KAlignedSpinLock &l) : KScopedAlignedSpinLock(std::addressof(l)) { /* ... */ } }; class KScopedNotAlignedSpinLock { @@ -69,6 +72,8 @@ namespace ams::kern { ALWAYS_INLINE ~KScopedNotAlignedSpinLock() { this->lock_ptr->Unlock(); } + + explicit ALWAYS_INLINE KScopedNotAlignedSpinLock(KNotAlignedSpinLock &l) : KScopedNotAlignedSpinLock(std::addressof(l)) { /* ... */ } }; } diff --git a/libraries/libmesosphere/source/board/nintendo/switch/kern_k_system_control.cpp b/libraries/libmesosphere/source/board/nintendo/switch/kern_k_system_control.cpp index 6a73bad56..5f501b6d2 100644 --- a/libraries/libmesosphere/source/board/nintendo/switch/kern_k_system_control.cpp +++ b/libraries/libmesosphere/source/board/nintendo/switch/kern_k_system_control.cpp @@ -20,6 +20,12 @@ namespace ams::kern { namespace { + /* Global variables for randomness. */ + /* Incredibly, N really does use std:: randomness... */ + bool g_initialized_random_generator; + std::mt19937 g_random_generator; + KSpinLock g_random_lock; + ALWAYS_INLINE size_t GetRealMemorySizeForInit() { /* TODO: Move this into a header for the MC in general. */ constexpr u32 MemoryControllerConfigurationRegister = 0x70019050; @@ -154,6 +160,26 @@ namespace ams::kern { } } + /* Randomness. */ + void KSystemControl::GenerateRandomBytes(void *dst, size_t size) { + MESOSPHERE_INIT_ABORT_UNLESS(size <= 0x38); + smc::GenerateRandomBytes(dst, size); + } + + u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) { + KScopedInterruptDisable intr_disable; + KScopedSpinLock lk(g_random_lock); + + if (AMS_UNLIKELY(!g_initialized_random_generator)) { + u64 seed; + GenerateRandomBytes(&seed, sizeof(seed)); + g_random_generator.seed(seed); + g_initialized_random_generator = true; + } + + return (std::uniform_int_distribution(min, max))(g_random_generator); + } + void KSystemControl::StopSystem() { /* Display a panic screen via exosphere. */ smc::Panic(0xF00); diff --git a/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.cpp b/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.cpp index c57143260..8eb223ab0 100644 --- a/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.cpp +++ b/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.cpp @@ -55,7 +55,9 @@ namespace ams::kern::smc { : : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory" ); - /* TODO: Restore X18 */ + + /* Restore the CoreLocalRegion into X18. */ + cpu::SetCoreLocalRegionAddress(cpu::GetTpidrEl1()); } /* Store arguments to output. */ @@ -98,6 +100,9 @@ namespace ams::kern::smc { args.x[7] = x7; } + /* Global lock for generate random bytes. */ + KSpinLock g_generate_random_lock; + } /* SMC functionality needed for init. */ @@ -119,9 +124,9 @@ namespace ams::kern::smc { void GenerateRandomBytes(void *dst, size_t size) { /* Call SmcGenerateRandomBytes() */ - /* TODO: Lock this to ensure only one core calls at once. */ SecureMonitorArguments args = { FunctionId_GenerateRandomBytes, size }; MESOSPHERE_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.x[0])); + CallPrivilegedSecureMonitorFunctionForInit(args); MESOSPHERE_ABORT_UNLESS((static_cast(args.x[0]) == SmcResult::Success)); @@ -138,6 +143,24 @@ namespace ams::kern::smc { } + + void GenerateRandomBytes(void *dst, size_t size) { + /* Setup for call. */ + SecureMonitorArguments args = { FunctionId_GenerateRandomBytes, size }; + MESOSPHERE_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.x[0])); + + /* Make call. */ + { + KScopedInterruptDisable intr_disable; + KScopedSpinLock lk(g_generate_random_lock); + CallPrivilegedSecureMonitorFunction(args); + } + MESOSPHERE_ABORT_UNLESS((static_cast(args.x[0]) == SmcResult::Success)); + + /* Copy output. */ + std::memcpy(dst, &args.x[1], size); + } + void NORETURN Panic(u32 color) { SecureMonitorArguments args = { FunctionId_Panic, color }; CallPrivilegedSecureMonitorFunction(args); diff --git a/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.hpp b/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.hpp index 4d0a43315..d6bce37f2 100644 --- a/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.hpp +++ b/libraries/libmesosphere/source/board/nintendo/switch/kern_secure_monitor.hpp @@ -84,6 +84,7 @@ namespace ams::kern::smc { }; /* TODO: Rest of Secure Monitor API. */ + void GenerateRandomBytes(void *dst, size_t size); void NORETURN Panic(u32 color); namespace init { diff --git a/libraries/libmesosphere/source/init/kern_init_slab_setup.cpp b/libraries/libmesosphere/source/init/kern_init_slab_setup.cpp new file mode 100644 index 000000000..a4dcb37f3 --- /dev/null +++ b/libraries/libmesosphere/source/init/kern_init_slab_setup.cpp @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::init { + + #define SLAB_COUNT(CLASS) g_slab_resource_counts.num_##CLASS + + #define FOREACH_SLAB_TYPE(HANDLER, ...) \ + HANDLER(KProcess, (SLAB_COUNT(KProcess)), ## __VA_ARGS__) \ + HANDLER(KThread, (SLAB_COUNT(KThread)), ## __VA_ARGS__) \ + HANDLER(KLinkedListNode, (SLAB_COUNT(KThread) * 17), ## __VA_ARGS__) \ + HANDLER(KEvent, (SLAB_COUNT(KEvent)), ## __VA_ARGS__) \ + HANDLER(KInterruptEvent, (SLAB_COUNT(KInterruptEvent)), ## __VA_ARGS__) \ + HANDLER(KInterruptEventTask, (SLAB_COUNT(KInterruptEvent)), ## __VA_ARGS__) \ + HANDLER(KPort, (SLAB_COUNT(KPort)), ## __VA_ARGS__) \ + HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ## __VA_ARGS__) \ + HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ## __VA_ARGS__) \ + HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ## __VA_ARGS__) \ + HANDLER(KCodeMemory, (SLAB_COUNT(KCodeMemory)), ## __VA_ARGS__) \ + HANDLER(KDeviceAddressSpace, (SLAB_COUNT(KDeviceAddressSpace)), ## __VA_ARGS__) \ + HANDLER(KSession, (SLAB_COUNT(KSession)), ## __VA_ARGS__) \ + HANDLER(KSessionRequest, (SLAB_COUNT(KSession) * 2), ## __VA_ARGS__) \ + HANDLER(KLightSession, (SLAB_COUNT(KLightSession)), ## __VA_ARGS__) \ + HANDLER(KThreadLocalPage, (SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), ## __VA_ARGS__) \ + HANDLER(KObjectName, (SLAB_COUNT(KObjectName)), ## __VA_ARGS__) \ + HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ## __VA_ARGS__) \ + HANDLER(KEventInfo, (SLAB_COUNT(KThread) + SLAB_COUNT(KDebug)), ## __VA_ARGS__) \ + HANDLER(KDebug, (SLAB_COUNT(KDebug)), ## __VA_ARGS__) + + namespace { + + + #define DEFINE_SLAB_TYPE_ENUM_MEMBER(NAME, COUNT, ...) KSlabType_##NAME, + + enum KSlabType : u32 { + FOREACH_SLAB_TYPE(DEFINE_SLAB_TYPE_ENUM_MEMBER) + KSlabType_Count, + }; + + #undef DEFINE_SLAB_TYPE_ENUM_MEMBER + + /* Constexpr counts. */ + constexpr size_t SlabCountKProcess = 80; + constexpr size_t SlabCountKThread = 800; + constexpr size_t SlabCountKEvent = 700; + constexpr size_t SlabCountKInterruptEvent = 100; + constexpr size_t SlabCountKPort = 256; + constexpr size_t SlabCountKSharedMemory = 80; + constexpr size_t SlabCountKTransferMemory = 200; + constexpr size_t SlabCountKCodeMemory = 10; + constexpr size_t SlabCountKDeviceAddressSpace = 300; + constexpr size_t SlabCountKSession = 900; + constexpr size_t SlabCountKLightSession = 100; + constexpr size_t SlabCountKObjectName = 7; + constexpr size_t SlabCountKResourceLimit = 5; + constexpr size_t SlabCountKDebug = cpu::NumCores; + + constexpr size_t SlabCountExtraKThread = 160; + + /* This is used for gaps between the slab allocators. */ + constexpr size_t SlabRegionReservedSize = 2_MB; + + /* Global to hold our resource counts. */ + KSlabResourceCounts g_slab_resource_counts = { + .num_KProcess = SlabCountKProcess, + .num_KThread = SlabCountKThread, + .num_KEvent = SlabCountKEvent, + .num_KInterruptEvent = SlabCountKInterruptEvent, + .num_KPort = SlabCountKPort, + .num_KSharedMemory = SlabCountKSharedMemory, + .num_KTransferMemory = SlabCountKTransferMemory, + .num_KCodeMemory = SlabCountKCodeMemory, + .num_KDeviceAddressSpace = SlabCountKDeviceAddressSpace, + .num_KSession = SlabCountKSession, + .num_KLightSession = SlabCountKLightSession, + .num_KObjectName = SlabCountKObjectName, + .num_KResourceLimit = SlabCountKResourceLimit, + .num_KDebug = SlabCountKDebug, + }; + + template + NOINLINE KVirtualAddress InitializeSlabHeap(KVirtualAddress address, size_t num_objects) { + const size_t size = util::AlignUp(sizeof(T) * num_objects, alignof(void *)); + KVirtualAddress start = util::AlignUp(GetInteger(address), alignof(T)); + + if (size > 0) { + MESOSPHERE_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryBlockTree().FindContainingBlock(GetInteger(start) + size - 1)->IsDerivedFrom(KMemoryRegionType_KernelSlab)); + T::InitializeSlabHeap(GetVoidPointer(start), size); + } + + return start + size; + } + + } + + + const KSlabResourceCounts &GetSlabResourceCounts() { + return g_slab_resource_counts; + } + + void InitializeSlabResourceCounts() { + /* Note: Nintendo initializes all fields here, but we initialize all constants at compile-time. */ + if (KSystemControl::Init::ShouldIncreaseThreadResourceLimit()) { + g_slab_resource_counts.num_KThread += SlabCountExtraKThread; + } + } + + size_t CalculateTotalSlabHeapSize() { + size_t size = 0; + + #define ADD_SLAB_SIZE(NAME, COUNT, ...) ({ \ + size += alignof(NAME); \ + size += util::AlignUp(sizeof(NAME) * (COUNT), alignof(void *)); \ + }); + + /* NOTE: This can't be used right now because we don't have all these types implemented. */ + /* Once we do, uncomment the following and stop using the hardcoded size. */ + /* TODO: FOREACH_SLAB_TYPE(ADD_SLAB_SIZE) */ + size = 0x647000; + + return size; + } + + void InitializeSlabHeaps() { + /* Get the start of the slab region, since that's where we'll be working. */ + KVirtualAddress address = KMemoryLayout::GetSlabRegionAddress(); + + /* Initialize slab type array to be in sorted order. */ + KSlabType slab_types[KSlabType_Count]; + for (size_t i = 0; i < util::size(slab_types); i++) { slab_types[i] = static_cast(i); } + + /* N shuffles the slab type array with the following simple algorithm. */ + for (size_t i = 0; i < util::size(slab_types); i++) { + const size_t rnd = KSystemControl::GenerateRandomRange(0, util::size(slab_types)); + std::swap(slab_types[i], slab_types[rnd]); + } + + /* Create an array to represent the gaps between the slabs. */ + size_t slab_gaps[util::size(slab_types)]; + for (size_t i = 0; i < util::size(slab_gaps); i++) { + /* Note: This is an off-by-one error from Nintendo's intention, because GenerateRandomRange is inclusive. */ + /* However, Nintendo also has the off-by-one error, and it's "harmless", so we will include it ourselves. */ + slab_gaps[i] = KSystemControl::GenerateRandomRange(0, SlabRegionReservedSize); + } + + /* Sort the array, so that we can treat differences between values as offsets to the starts of slabs. */ + for (size_t i = 1; i < util::size(slab_gaps); i++) { + for (size_t j = i; j > 0 && slab_gaps[j-1] > slab_gaps[j]; j--) { + std::swap(slab_gaps[j], slab_gaps[j-1]); + } + } + + for (size_t i = 0; i < util::size(slab_types); i++) { + /* Add the random gap to the address. */ + address += (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1]; + + #define INITIALIZE_SLAB_HEAP(NAME, COUNT, ...) \ + case KSlabType_##NAME: \ + address = InitializeSlabHeap(address, COUNT); \ + break; + + /* Initialize the slabheap. */ + switch (slab_types[i]) { + /* NOTE: This can't be used right now because we don't have all these types implemented. */ + /* Once we do, uncomment the following. */ + /* TODO: FOREACH_SLAB_TYPE(INITIALIZE_SLAB_HEAP) */ + case KSlabType_KThread: + address = InitializeSlabHeap(address, SLAB_COUNT(KThread)); + break; + default: + MESOSPHERE_ABORT(); + } + } + } + +} \ No newline at end of file diff --git a/libraries/libmesosphere/source/kern_main.cpp b/libraries/libmesosphere/source/kern_main.cpp index 26a891bf5..bad569bfc 100644 --- a/libraries/libmesosphere/source/kern_main.cpp +++ b/libraries/libmesosphere/source/kern_main.cpp @@ -34,6 +34,13 @@ namespace ams::kern { cpu::SynchronizeAllCores(); } + if (core_id == 0) { + /* Note: this is not actually done here, it's done later in main after more stuff is setup. */ + /* However, for testing (and to manifest this code in the produced binary, this is here for now. */ + /* TODO: Do this better. */ + init::InitializeSlabHeaps(); + } + /* TODO: Implement more of Main() */ while (true) { /* ... */ } } diff --git a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp index 8c555c259..9f011c316 100644 --- a/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp +++ b/mesosphere/kernel/source/arch/arm64/init/kern_init_core.cpp @@ -96,7 +96,7 @@ namespace ams::kern::init { KInitialPageTable ttbr1_table(util::AlignDown(cpu::GetTtbr1El1(), PageSize), KInitialPageTable::NoClear{}); /* Initialize the slab allocator counts. */ - /* TODO */ + InitializeSlabResourceCounts(); /* Insert the root block for the virtual memory tree, from which all other blocks will derive. */ KMemoryLayout::GetVirtualMemoryBlockTree().insert(*KMemoryLayout::GetMemoryBlockAllocator().Create(KernelVirtualAddressSpaceBase, KernelVirtualAddressSpaceSize, 0, 0)); @@ -142,7 +142,7 @@ namespace ams::kern::init { const size_t resource_region_size = KernelResourceRegionSize + (use_extra_resources ? ExtraKernelResourceSize : 0); /* Determine the size of the slab region. */ - const size_t slab_region_size = 0x647000; /* TODO: Calculate this on the fly. */ + const size_t slab_region_size = CalculateTotalSlabHeapSize(); MESOSPHERE_INIT_ABORT_UNLESS(slab_region_size <= resource_region_size); /* Setup the slab region. */ From 484f1326516d11c91cca6d9745ca52d79a23531e Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 30 Jan 2020 15:29:51 -0800 Subject: [PATCH 21/97] kern: implement KHandleTable, other cleanup --- .../libmesosphere/include/mesosphere.hpp | 48 +-- .../arm64/init/kern_k_init_page_table.hpp | 2 +- .../mesosphere/arch/arm64/kern_cpu.hpp | 2 +- .../init/kern_init_arguments_select.hpp | 2 +- .../include/mesosphere/init/kern_init_elf.hpp | 20 +- .../init/kern_init_page_table_select.hpp | 2 +- .../include/mesosphere/kern_common.hpp | 19 ++ .../mesosphere/kern_initial_process.hpp | 3 +- .../include/mesosphere/kern_k_auto_object.hpp | 21 +- .../kern_k_auto_object_container.hpp | 8 +- .../include/mesosphere/kern_k_class_token.hpp | 3 +- .../mesosphere/kern_k_core_local_region.hpp | 2 +- .../mesosphere/kern_k_handle_table.hpp | 283 ++++++++++++++++++ .../include/mesosphere/kern_k_light_lock.hpp | 6 +- .../include/mesosphere/kern_k_linked_list.hpp | 5 +- .../mesosphere/kern_k_memory_layout.hpp | 2 +- .../mesosphere/kern_k_memory_manager.hpp | 2 +- .../include/mesosphere/kern_k_page_heap.hpp | 2 +- .../include/mesosphere/kern_k_slab_heap.hpp | 17 +- .../include/mesosphere/kern_k_spin_lock.hpp | 3 +- .../kern_k_synchronization_object.hpp | 6 +- .../include/mesosphere/kern_k_thread.hpp | 22 ++ .../include/mesosphere/kern_k_timer_task.hpp | 2 +- .../mesosphere/kern_k_typed_address.hpp | 2 +- .../include/mesosphere/kern_main.hpp | 2 +- .../include/mesosphere/kern_panic.hpp | 10 +- .../include/mesosphere/kern_select_cpu.hpp | 2 +- .../mesosphere/kern_select_hardware_timer.hpp | 3 +- .../kern_select_interrupt_controller.hpp | 3 +- .../kern_select_interrupt_manager.hpp | 3 +- .../kern_select_k_system_control.hpp | 2 +- .../include/mesosphere/kern_slab_helpers.hpp | 3 +- .../include/mesosphere/kern_svc.hpp | 7 +- .../svc/kern_svc_k_user_pointer.hpp | 1 + .../mesosphere/svc/kern_svc_prototypes.hpp | 3 +- .../mesosphere/svc/kern_svc_results.hpp | 70 +++++ .../mesosphere/svc/kern_svc_tables.hpp | 1 + .../source/kern_k_auto_object_container.cpp | 6 + .../source/kern_k_handle_table.cpp | 155 ++++++++++ .../source/kern_k_synchronization_object.cpp | 17 ++ .../include/vapours/svc/svc_common.hpp | 27 +- 41 files changed, 710 insertions(+), 89 deletions(-) create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_handle_table.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/svc/kern_svc_results.hpp create mode 100644 libraries/libmesosphere/source/kern_k_handle_table.cpp diff --git a/libraries/libmesosphere/include/mesosphere.hpp b/libraries/libmesosphere/include/mesosphere.hpp index e00f1659b..6f829e2c1 100644 --- a/libraries/libmesosphere/include/mesosphere.hpp +++ b/libraries/libmesosphere/include/mesosphere.hpp @@ -19,40 +19,42 @@ #include /* First, pull in core macros (panic, etc). */ -#include "mesosphere/kern_panic.hpp" +#include +#include /* Primitive types. */ -#include "mesosphere/kern_k_typed_address.hpp" -#include "mesosphere/kern_initial_process.hpp" +#include +#include /* Core pre-initialization includes. */ -#include "mesosphere/kern_select_cpu.hpp" -#include "mesosphere/kern_select_k_system_control.hpp" +#include +#include /* Initialization headers. */ -#include "mesosphere/init/kern_init_elf.hpp" -#include "mesosphere/init/kern_init_layout.hpp" -#include "mesosphere/init/kern_init_slab_setup.hpp" -#include "mesosphere/init/kern_init_page_table_select.hpp" -#include "mesosphere/init/kern_init_arguments_select.hpp" -#include "mesosphere/kern_k_memory_layout.hpp" +#include +#include +#include +#include +#include +#include /* Core functionality. */ -#include "mesosphere/kern_select_interrupt_manager.hpp" -#include "mesosphere/kern_k_spin_lock.hpp" -#include "mesosphere/kern_k_page_heap.hpp" -#include "mesosphere/kern_k_memory_manager.hpp" -#include "mesosphere/kern_k_interrupt_task_manager.hpp" -#include "mesosphere/kern_k_core_local_region.hpp" -#include "mesosphere/kern_k_slab_heap.hpp" -#include "mesosphere/kern_k_light_lock.hpp" -#include "mesosphere/kern_kernel.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +#include /* Auto Objects. */ -#include "mesosphere/kern_k_auto_object.hpp" +#include +#include /* Supervisor Calls. */ -#include "mesosphere/kern_svc.hpp" +#include /* Main functionality. */ -#include "mesosphere/kern_main.hpp" +#include diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp index e7bc3d758..7d5a0249c 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp @@ -15,7 +15,7 @@ */ #pragma once #include -#include +#include #include #include diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp index a1cf21ac2..b08bd3b64 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp @@ -15,7 +15,7 @@ */ #pragma once #include -#include "kern_cpu_system_registers.hpp" +#include namespace ams::kern::arm64::cpu { diff --git a/libraries/libmesosphere/include/mesosphere/init/kern_init_arguments_select.hpp b/libraries/libmesosphere/include/mesosphere/init/kern_init_arguments_select.hpp index 9c8858b72..09360267f 100644 --- a/libraries/libmesosphere/include/mesosphere/init/kern_init_arguments_select.hpp +++ b/libraries/libmesosphere/include/mesosphere/init/kern_init_arguments_select.hpp @@ -17,7 +17,7 @@ #include #ifdef ATMOSPHERE_ARCH_ARM64 - #include "../arch/arm64/init/kern_k_init_arguments.hpp" + #include #else #error "Unknown architecture for KInitArguments" #endif diff --git a/libraries/libmesosphere/include/mesosphere/init/kern_init_elf.hpp b/libraries/libmesosphere/include/mesosphere/init/kern_init_elf.hpp index 02aee4a5d..294bee9c6 100644 --- a/libraries/libmesosphere/include/mesosphere/init/kern_init_elf.hpp +++ b/libraries/libmesosphere/include/mesosphere/init/kern_init_elf.hpp @@ -17,23 +17,21 @@ #include #ifdef ATMOSPHERE_ARCH_ARM64 - #include "kern_init_elf64.hpp" + #include + + namespace ams::kern::init::Elf { + using namespace ams::kern::init::Elf::Elf64; + + enum RelocationType { + R_ARCHITECTURE_RELATIVE = 0x403, /* Real name R_AARCH64_RELATIVE */ + }; + } #else #error "Unknown Architecture" #endif namespace ams::kern::init::Elf { - #ifdef ATMOSPHERE_ARCH_ARM64 - using namespace ams::kern::init::Elf::Elf64; - - enum RelocationType { - R_ARCHITECTURE_RELATIVE = 0x403, /* Real name R_AARCH64_RELATIVE */ - }; - #else - #error "Unknown Architecture" - #endif - /* API to apply relocations or call init array. */ void ApplyRelocations(uintptr_t base_address, const Dyn *dynamic); void CallInitArrayFuncs(uintptr_t init_array_start, uintptr_t init_array_end); diff --git a/libraries/libmesosphere/include/mesosphere/init/kern_init_page_table_select.hpp b/libraries/libmesosphere/include/mesosphere/init/kern_init_page_table_select.hpp index d13c8e49c..c68b00d56 100644 --- a/libraries/libmesosphere/include/mesosphere/init/kern_init_page_table_select.hpp +++ b/libraries/libmesosphere/include/mesosphere/init/kern_init_page_table_select.hpp @@ -16,7 +16,7 @@ #pragma once #ifdef ATMOSPHERE_ARCH_ARM64 - #include "../arch/arm64/init/kern_k_init_page_table.hpp" + #include #else #error "Unknown architecture for KInitialPageTable" #endif diff --git a/libraries/libmesosphere/include/mesosphere/kern_common.hpp b/libraries/libmesosphere/include/mesosphere/kern_common.hpp index e69de29bb..7fa55dbdd 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_common.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_common.hpp @@ -0,0 +1,19 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include diff --git a/libraries/libmesosphere/include/mesosphere/kern_initial_process.hpp b/libraries/libmesosphere/include/mesosphere/kern_initial_process.hpp index e4674aa1a..179eee497 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_initial_process.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_initial_process.hpp @@ -14,8 +14,7 @@ * along with this program. If not, see . */ #pragma once -#include -#include "kern_panic.hpp" +#include namespace ams::kern { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp index ea38cc063..04198f0c7 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp @@ -14,8 +14,7 @@ * along with this program. If not, see . */ #pragma once -#include -#include +#include #include #include @@ -74,14 +73,14 @@ namespace ams::kern { public: static KAutoObject *Create(KAutoObject *ptr); public: - constexpr ALWAYS_INLINE explicit KAutoObject() : ref_count(0) { /* ... */ } - virtual ~KAutoObject() { /* ... */ } + constexpr ALWAYS_INLINE explicit KAutoObject() : ref_count(0) { MESOSPHERE_ASSERT_THIS(); } + virtual ~KAutoObject() { MESOSPHERE_ASSERT_THIS(); } /* Destroy is responsible for destroying the auto object's resources when ref_count hits zero. */ - virtual void Destroy() { /* ... */ } + virtual void Destroy() { MESOSPHERE_ASSERT_THIS(); } /* Finalize is responsible for cleaning up resource, but does not destroy the object. */ - virtual void Finalize() { /* ... */ } + virtual void Finalize() { MESOSPHERE_ASSERT_THIS(); } virtual KProcess *GetOwner() const { return nullptr; } @@ -122,6 +121,8 @@ namespace ams::kern { } ALWAYS_INLINE bool Open() { + MESOSPHERE_ASSERT_THIS(); + /* Atomically increment the reference count, only if it's positive. */ u32 cur_ref_count = this->ref_count.load(std::memory_order_acquire); do { @@ -135,6 +136,8 @@ namespace ams::kern { } ALWAYS_INLINE void Close() { + MESOSPHERE_ASSERT_THIS(); + /* Atomically decrement the reference count, not allowing it to become negative. */ u32 cur_ref_count = this->ref_count.load(std::memory_order_acquire); do { @@ -189,7 +192,11 @@ namespace ams::kern { } public: constexpr ALWAYS_INLINE KScopedAutoObject() : obj(nullptr) { /* ... */ } - constexpr ALWAYS_INLINE KScopedAutoObject(T *o) : obj(o) { /* ... */ } + constexpr ALWAYS_INLINE KScopedAutoObject(T *o) : obj(o) { + if (this->obj != nullptr) { + this->obj->Open(); + } + } ALWAYS_INLINE ~KScopedAutoObject() { if (this->obj != nullptr) { this->obj->Close(); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_auto_object_container.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object_container.hpp index 52504f667..3d501bdf1 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_auto_object_container.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_auto_object_container.hpp @@ -14,7 +14,7 @@ * along with this program. If not, see . */ #pragma once -#include +#include #include #include @@ -51,10 +51,10 @@ namespace ams::kern { KLightLock lock; ListType object_list; public: - constexpr KAutoObjectWithListContainer() : lock(), object_list() { /* ... */ } + constexpr KAutoObjectWithListContainer() : lock(), object_list() { MESOSPHERE_ASSERT_THIS(); } - void Initialize() { /* Nothing to do. */ } - void Finalize() { /* Nothing to do. */ } + void Initialize() { MESOSPHERE_ASSERT_THIS(); } + void Finalize() { MESOSPHERE_ASSERT_THIS(); } Result Register(KAutoObjectWithList *obj); Result Unregister(KAutoObjectWithList *obj); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_class_token.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_class_token.hpp index b1640f6a8..fbbfe0abf 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_class_token.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_class_token.hpp @@ -14,8 +14,7 @@ * along with this program. If not, see . */ #pragma once -#include -#include +#include #include namespace ams::kern { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_core_local_region.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_core_local_region.hpp index 4422df408..1d37dd612 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_core_local_region.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_core_local_region.hpp @@ -14,7 +14,7 @@ * along with this program. If not, see . */ #pragma once -#include +#include #include #include #include diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_handle_table.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_handle_table.hpp new file mode 100644 index 000000000..cde564aa3 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_handle_table.hpp @@ -0,0 +1,283 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include + +namespace ams::kern { + + constexpr ALWAYS_INLINE util::BitPack32 GetHandleBitPack(ams::svc::Handle handle) { + return util::BitPack32{handle}; + } + + class KHandleTable { + NON_COPYABLE(KHandleTable); + NON_MOVEABLE(KHandleTable); + public: + static constexpr size_t MaxTableSize = 1024; + private: + using HandleRawValue = util::BitPack32::Field<0, BITSIZEOF(u32), u32>; + using HandleEncoded = util::BitPack32::Field<0, BITSIZEOF(ams::svc::Handle), ams::svc::Handle>; + + using HandleIndex = util::BitPack32::Field<0, 15, u16>; + using HandleLinearId = util::BitPack32::Field; + using HandleReserved = util::BitPack32::Field; + + static constexpr u16 MinLinearId = 1; + static constexpr u16 MaxLinearId = util::BitPack32{std::numeric_limits::max()}.Get(); + + static constexpr ALWAYS_INLINE ams::svc::Handle EncodeHandle(u16 index, u16 linear_id) { + util::BitPack32 pack = {0}; + pack.Set(index); + pack.Set(linear_id); + pack.Set(0); + return pack.Get(); + } + + class Entry { + private: + union { + struct { + u16 linear_id; + u16 type; + } info; + Entry *next_free_entry; + } meta; + KAutoObject *object; + public: + constexpr Entry() : meta(), object(nullptr) { /* ... */ } + + constexpr ALWAYS_INLINE void SetFree(Entry *next) { + this->object = nullptr; + this->meta.next_free_entry = next; + } + + constexpr ALWAYS_INLINE void SetUsed(KAutoObject *obj, u16 linear_id, u16 type) { + this->object = obj; + this->meta.info = { linear_id, type }; + } + + constexpr ALWAYS_INLINE KAutoObject *GetObject() const { return this->object; } + constexpr ALWAYS_INLINE Entry *GetNextFreeEntry() const { return this->meta.next_free_entry; } + constexpr ALWAYS_INLINE u16 GetLinearId() const { return this->meta.info.linear_id; } + constexpr ALWAYS_INLINE u16 GetType() const { return this->meta.info.type; } + }; + private: + mutable KSpinLock lock; + Entry *table; + Entry *free_head; + Entry entries[MaxTableSize]; + u16 table_size; + u16 max_count; + u16 next_linear_id; + u16 count; + public: + constexpr KHandleTable() : + lock(), table(nullptr), free_head(nullptr), entries(), table_size(0), max_count(0), next_linear_id(MinLinearId), count(0) + { MESOSPHERE_ASSERT_THIS(); } + + constexpr NOINLINE Result Initialize(s32 size) { + MESOSPHERE_ASSERT_THIS(); + + R_UNLESS(size <= static_cast(MaxTableSize), svc::ResultOutOfMemory()); + + /* Initialize all fields. */ + this->table = this->entries; + this->table_size = (size <= 0) ? MaxTableSize : table_size; + this->next_linear_id = MinLinearId; + this->count = 0; + this->max_count = 0; + + /* Free all entries. */ + for (size_t i = 0; i < static_cast(this->table_size - 1); i++) { + this->entries[i].SetFree(std::addressof(this->entries[i + 1])); + } + this->entries[this->table_size - 1].SetFree(nullptr); + + this->free_head = std::addressof(this->entries[0]); + + return ResultSuccess(); + } + + constexpr ALWAYS_INLINE size_t GetTableSize() const { return this->table_size; } + constexpr ALWAYS_INLINE size_t GetCount() const { return this->count; } + constexpr ALWAYS_INLINE size_t GetMaxCount() const { return this->max_count; } + + NOINLINE Result Finalize(); + NOINLINE bool Remove(ams::svc::Handle handle); + + template + ALWAYS_INLINE KScopedAutoObject GetObject(ams::svc::Handle handle) const { + MESOSPHERE_ASSERT_THIS(); + KScopedDisableDispatch dd; + KScopedSpinLock lk(this->lock); + + if constexpr (std::is_same::value) { + return this->GetObjectImpl(handle); + } else { + return this->GetObjectImpl(handle)->DynamicCast(); + } + } + + template + ALWAYS_INLINE KScopedAutoObject GetObjectForIpc(ams::svc::Handle handle) const { + /* TODO: static_assert(!std::is_base_of::value); */ + + KAutoObject *obj = this->GetObjectImpl(handle); + if (false /* TODO: obj->DynamicCast() != nullptr */) { + return nullptr; + } + if constexpr (std::is_same::value) { + return obj; + } else { + return obj->DynamicCast(); + } + } + + ALWAYS_INLINE KScopedAutoObject GetObjectByIndex(ams::svc::Handle *out_handle, size_t index) const { + MESOSPHERE_ASSERT_THIS(); + KScopedDisableDispatch dd; + KScopedSpinLock lk(this->lock); + + return this->GetObjectByIndexImpl(out_handle, index); + } + + NOINLINE Result Reserve(ams::svc::Handle *out_handle); + NOINLINE void Unreserve(ams::svc::Handle handle); + + template + ALWAYS_INLINE Result Add(ams::svc::Handle *out_handle, T *obj) { + static_assert(std::is_base_of::value); + return this->Add(out_handle, obj, obj->GetTypeObj().GetClassToken()); + } + + template + ALWAYS_INLINE void Register(ams::svc::Handle handle, T *obj) { + static_assert(std::is_base_of::value); + return this->Add(handle, obj, obj->GetTypeObj().GetClassToken()); + } + private: + NOINLINE Result Add(ams::svc::Handle *out_handle, KAutoObject *obj, u16 type); + NOINLINE void Register(ams::svc::Handle handle, KAutoObject *obj, u16 type); + + constexpr ALWAYS_INLINE Entry *AllocateEntry() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(this->count < this->table_size); + + Entry *entry = this->free_head; + this->free_head = entry->GetNextFreeEntry(); + + this->count++; + this->max_count = std::max(this->max_count, this->count); + + return entry; + } + + constexpr ALWAYS_INLINE void FreeEntry(Entry *entry) { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(this->count > 0); + + entry->SetFree(this->free_head); + this->free_head = entry; + + this->count--; + } + + constexpr ALWAYS_INLINE u16 AllocateLinearId() { + const u16 id = this->next_linear_id++; + if (this->next_linear_id > MaxLinearId) { + this->next_linear_id = MinLinearId; + } + return id; + } + + constexpr ALWAYS_INLINE size_t GetEntryIndex(Entry *entry) { + const size_t index = entry - this->table; + MESOSPHERE_ASSERT(index < this->table_size); + return index; + } + + constexpr ALWAYS_INLINE Entry *FindEntry(ams::svc::Handle handle) const { + MESOSPHERE_ASSERT_THIS(); + + /* Unpack the handle. */ + const auto handle_pack = GetHandleBitPack(handle); + const auto raw_value = handle_pack.Get(); + const auto index = handle_pack.Get(); + const auto linear_id = handle_pack.Get(); + const auto reserved = handle_pack.Get(); + MESOSPHERE_ASSERT(reserved == 0); + + /* Validate our indexing information. */ + if (raw_value == 0) { + return nullptr; + } + if (linear_id == 0) { + return nullptr; + } + if (index >= this->table_size) { + return nullptr; + } + + /* Get the entry, and ensure our serial id is correct. */ + Entry *entry = std::addressof(this->table[index]); + if (entry->GetObject() == nullptr) { + return nullptr; + } + if (entry->GetLinearId() != linear_id) { + return nullptr; + } + + return entry; + } + + constexpr NOINLINE KAutoObject *GetObjectImpl(ams::svc::Handle handle) const { + MESOSPHERE_ASSERT_THIS(); + + /* Handles must not have reserved bits set. */ + if (GetHandleBitPack(handle).Get() != 0) { + return nullptr; + } + + if (Entry *entry = this->FindEntry(handle); entry != nullptr) { + return entry->GetObject(); + } else { + return nullptr; + } + } + + constexpr NOINLINE KAutoObject *GetObjectByIndexImpl(ams::svc::Handle *out_handle, size_t index) const { + MESOSPHERE_ASSERT_THIS(); + + /* Index must be in bounds. */ + if (index >= this->table_size || this->table == nullptr) { + return nullptr; + } + + /* Ensure entry has an object. */ + Entry *entry = std::addressof(this->table[index]); + if (entry->GetObject() == nullptr) { + return nullptr; + } + + *out_handle = EncodeHandle(index, entry->GetLinearId()); + return entry->GetObject(); + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp index eb0cab5e1..d6097fd54 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp @@ -14,7 +14,7 @@ * along with this program. If not, see . */ #pragma once -#include +#include #include #include @@ -27,6 +27,8 @@ namespace ams::kern { constexpr KLightLock() : tag(0) { /* ... */ } void Lock() { + MESOSPHERE_ASSERT_THIS(); + const uintptr_t cur_thread = reinterpret_cast(GetCurrentThreadPointer()); while (true) { @@ -45,6 +47,8 @@ namespace ams::kern { } void Unlock() { + MESOSPHERE_ASSERT_THIS(); + const uintptr_t cur_thread = reinterpret_cast(GetCurrentThreadPointer()); uintptr_t expected = cur_thread; if (!this->tag.compare_exchange_weak(expected, 0, std::memory_order_release)) { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_linked_list.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_linked_list.hpp index 80a5f463f..1084f2864 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_linked_list.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_linked_list.hpp @@ -14,7 +14,7 @@ * along with this program. If not, see . */ #pragma once -#include +#include #include #include @@ -24,9 +24,10 @@ namespace ams::kern { private: void *item; public: - constexpr KLinkedListNode() : util::IntrusiveListBaseNode(), item(nullptr) { /* ... */ } + constexpr KLinkedListNode() : util::IntrusiveListBaseNode(), item(nullptr) { MESOSPHERE_ASSERT_THIS(); } constexpr void Initialize(void *it) { + MESOSPHERE_ASSERT_THIS(); this->item = it; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp index c72eee498..aee5f47af 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp @@ -14,7 +14,7 @@ * along with this program. If not, see . */ #pragma once -#include +#include #include namespace ams::kern { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp index f958d4e34..00f296ec7 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp @@ -14,7 +14,7 @@ * along with this program. If not, see . */ #pragma once -#include +#include namespace ams::kern { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp index 2a74efdc3..bc00103d6 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp @@ -14,7 +14,7 @@ * along with this program. If not, see . */ #pragma once -#include +#include namespace ams::kern { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp index e504ef539..2d24df367 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_slab_heap.hpp @@ -14,8 +14,7 @@ * along with this program. If not, see . */ #pragma once -#include -#include +#include #include namespace ams::kern { @@ -33,7 +32,7 @@ namespace ams::kern { std::atomic head; size_t obj_size; public: - constexpr KSlabHeapImpl() : head(nullptr), obj_size(0) { /* ... */ } + constexpr KSlabHeapImpl() : head(nullptr), obj_size(0) { MESOSPHERE_ASSERT_THIS(); } void Initialize(size_t size) { MESOSPHERE_INIT_ABORT_UNLESS(this->head == nullptr); @@ -49,6 +48,8 @@ namespace ams::kern { } void *Allocate() { + MESOSPHERE_ASSERT_THIS(); + Node *ret = this->head.load(); do { @@ -61,6 +62,8 @@ namespace ams::kern { } void Free(void *obj) { + MESOSPHERE_ASSERT_THIS(); + Node *node = reinterpret_cast(obj); Node *cur_head = this->head.load(); @@ -90,13 +93,15 @@ namespace ams::kern { return std::addressof(this->impl); } public: - constexpr KSlabHeapBase() : impl(), peak(0), start(0), end(0) { /* ... */ } + constexpr KSlabHeapBase() : impl(), peak(0), start(0), end(0) { MESOSPHERE_ASSERT_THIS(); } ALWAYS_INLINE bool Contains(uintptr_t address) const { return this->start <= address && address < this->end; } void InitializeImpl(size_t obj_size, void *memory, size_t memory_size) { + MESOSPHERE_ASSERT_THIS(); + /* Ensure we don't initialize a slab using null memory. */ MESOSPHERE_ABORT_UNLESS(memory != nullptr); @@ -127,6 +132,8 @@ namespace ams::kern { } void *AllocateImpl() { + MESOSPHERE_ASSERT_THIS(); + void *obj = this->GetImpl()->Allocate(); /* TODO: under some debug define, track the peak for statistics, as N does? */ @@ -135,6 +142,8 @@ namespace ams::kern { } void FreeImpl(void *obj) { + MESOSPHERE_ASSERT_THIS(); + /* Don't allow freeing an object that wasn't allocated from this heap. */ MESOSPHERE_ABORT_UNLESS(this->Contains(reinterpret_cast(obj))); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_spin_lock.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_spin_lock.hpp index 915c94fe9..c1088a3b8 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_spin_lock.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_spin_lock.hpp @@ -14,8 +14,7 @@ * along with this program. If not, see . */ #pragma once -#include -#include "kern_panic.hpp" +#include #if defined(ATMOSPHERE_ARCH_ARM64) diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_synchronization_object.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_synchronization_object.hpp index 929aaca66..9f7934cd9 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_synchronization_object.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_synchronization_object.hpp @@ -30,10 +30,10 @@ namespace ams::kern { private: ThreadList thread_list; protected: - constexpr ALWAYS_INLINE explicit KSynchronizationObject() : KAutoObjectWithList(), thread_list() { /* ... */ } - virtual ~KSynchronizationObject() { /* ... */ } + constexpr ALWAYS_INLINE explicit KSynchronizationObject() : KAutoObjectWithList(), thread_list() { MESOSPHERE_ASSERT_THIS(); } + virtual ~KSynchronizationObject() { MESOSPHERE_ASSERT_THIS(); } - virtual void OnFinalizeSynchronizationObject() { /* ... */ } + virtual void OnFinalizeSynchronizationObject() { MESOSPHERE_ASSERT_THIS(); } void NotifyAvailable(); void NotifyAbort(Result abort_reason); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp index c1345698a..a34f4e800 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp @@ -36,4 +36,26 @@ namespace ams::kern { /* TODO: This is a placeholder definition. */ }; + class KScopedDisableDispatch { + public: + explicit ALWAYS_INLINE KScopedDisableDispatch() { + /* TODO */ + } + + ALWAYS_INLINE ~KScopedDisableDispatch() { + /* TODO */ + } + }; + + class KScopedEnableDispatch { + public: + explicit ALWAYS_INLINE KScopedEnableDispatch() { + /* TODO */ + } + + ALWAYS_INLINE ~KScopedEnableDispatch() { + /* TODO */ + } + }; + } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_timer_task.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_timer_task.hpp index d57899ce8..dba2e1c16 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_timer_task.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_timer_task.hpp @@ -14,7 +14,7 @@ * along with this program. If not, see . */ #pragma once -#include +#include namespace ams::kern { diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_typed_address.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_typed_address.hpp index 97af400ca..3e967624d 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_typed_address.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_typed_address.hpp @@ -14,7 +14,7 @@ * along with this program. If not, see . */ #pragma once -#include +#include namespace ams::kern { diff --git a/libraries/libmesosphere/include/mesosphere/kern_main.hpp b/libraries/libmesosphere/include/mesosphere/kern_main.hpp index d01cbbbfe..f1935bdde 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_main.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_main.hpp @@ -14,7 +14,7 @@ * along with this program. If not, see . */ #pragma once -#include +#include namespace ams::kern { diff --git a/libraries/libmesosphere/include/mesosphere/kern_panic.hpp b/libraries/libmesosphere/include/mesosphere/kern_panic.hpp index c245b3904..8a41955c6 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_panic.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_panic.hpp @@ -14,7 +14,7 @@ * along with this program. If not, see . */ #pragma once -#include +#include namespace ams::kern { @@ -37,12 +37,18 @@ namespace ams::kern { } \ }) #else -#define MESOSPHERE_ASSERT_IMPL(expr, ...) do { } while (0) +#define MESOSPHERE_ASSERT_IMPL(expr, ...) do { static_cast(expr); } while (0) #endif #define MESOSPHERE_ASSERT(expr) MESOSPHERE_ASSERT_IMPL(expr, "Assertion failed: %s", #expr) #define MESOSPHERE_R_ASSERT(expr) MESOSPHERE_ASSERT_IMPL(R_SUCCEEDED(expr), "Result assertion failed: %s", #expr) +#ifdef MESOSPHERE_ENABLE_THIS_ASSERT +#define MESOSPHERE_ASSERT_THIS() MESOSPHERE_ASSERT(this != nullptr) +#else +#define MESOSPHERE_ASSERT_THIS() +#endif + #define MESOSPHERE_ABORT() MESOSPHERE_PANIC("Abort()"); #define MESOSPHERE_INIT_ABORT() do { /* ... */ } while (true) diff --git a/libraries/libmesosphere/include/mesosphere/kern_select_cpu.hpp b/libraries/libmesosphere/include/mesosphere/kern_select_cpu.hpp index 93bc2434e..6c3ac104b 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_select_cpu.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_select_cpu.hpp @@ -16,7 +16,7 @@ #pragma once #ifdef ATMOSPHERE_ARCH_ARM64 - #include "arch/arm64/kern_cpu.hpp" + #include namespace ams::kern::cpu { diff --git a/libraries/libmesosphere/include/mesosphere/kern_select_hardware_timer.hpp b/libraries/libmesosphere/include/mesosphere/kern_select_hardware_timer.hpp index db8d6e4f4..bf5fedd70 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_select_hardware_timer.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_select_hardware_timer.hpp @@ -14,8 +14,7 @@ * along with this program. If not, see . */ #pragma once -#include -#include "kern_panic.hpp" +#include #if defined(ATMOSPHERE_ARCH_ARM64) diff --git a/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_controller.hpp b/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_controller.hpp index e9bbd2ff6..236c96bb5 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_controller.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_controller.hpp @@ -14,8 +14,7 @@ * along with this program. If not, see . */ #pragma once -#include -#include "kern_panic.hpp" +#include #if defined(ATMOSPHERE_ARCH_ARM64) diff --git a/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_manager.hpp index cab08dd0d..9fa9790fa 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_manager.hpp @@ -14,8 +14,7 @@ * along with this program. If not, see . */ #pragma once -#include -#include "kern_panic.hpp" +#include #if defined(ATMOSPHERE_ARCH_ARM64) diff --git a/libraries/libmesosphere/include/mesosphere/kern_select_k_system_control.hpp b/libraries/libmesosphere/include/mesosphere/kern_select_k_system_control.hpp index 066a317cc..ff53a5fe3 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_select_k_system_control.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_select_k_system_control.hpp @@ -16,7 +16,7 @@ #pragma once #ifdef ATMOSPHERE_BOARD_NINTENDO_SWITCH - #include "board/nintendo/switch/kern_k_system_control.hpp" + #include #else #error "Unknown board for KSystemControl" #endif diff --git a/libraries/libmesosphere/include/mesosphere/kern_slab_helpers.hpp b/libraries/libmesosphere/include/mesosphere/kern_slab_helpers.hpp index 31cf1613a..a5155dea5 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_slab_helpers.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_slab_helpers.hpp @@ -14,8 +14,7 @@ * along with this program. If not, see . */ #pragma once -#include -#include +#include #include #include #include diff --git a/libraries/libmesosphere/include/mesosphere/kern_svc.hpp b/libraries/libmesosphere/include/mesosphere/kern_svc.hpp index 694b30165..56073530c 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_svc.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_svc.hpp @@ -14,6 +14,7 @@ * along with this program. If not, see . */ #pragma once -#include "svc/kern_svc_k_user_pointer.hpp" -#include "svc/kern_svc_prototypes.hpp" -#include "svc/kern_svc_tables.hpp" +#include +#include +#include +#include diff --git a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp index 7ea3811f4..1c0968b8d 100644 --- a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp +++ b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_k_user_pointer.hpp @@ -15,6 +15,7 @@ */ #pragma once #include +#include namespace ams::kern::svc { diff --git a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_prototypes.hpp b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_prototypes.hpp index c66e4f8ed..dd71d5722 100644 --- a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_prototypes.hpp +++ b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_prototypes.hpp @@ -15,7 +15,8 @@ */ #pragma once #include -#include "kern_svc_k_user_pointer.hpp" +#include +#include namespace ams::kern::svc { diff --git a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_results.hpp b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_results.hpp new file mode 100644 index 000000000..29eb82690 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_results.hpp @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern::svc { + + /* 7 */ using ::ams::svc::ResultOutOfSessions; + + /* 14 */ using ::ams::svc::ResultInvalidArgument; + + /* 33 */ using ::ams::svc::ResultNotImplemented; + + /* 59 */ using ::ams::svc::ResultThreadTerminating; + + /* 70 */ using ::ams::svc::ResultNoEvent; + + /* 101 */ using ::ams::svc::ResultInvalidSize; + /* 102 */ using ::ams::svc::ResultInvalidAddress; + /* 103 */ using ::ams::svc::ResultOutOfResource; + /* 104 */ using ::ams::svc::ResultOutOfMemory; + /* 105 */ using ::ams::svc::ResultOutOfHandles; + /* 106 */ using ::ams::svc::ResultInvalidCurrentMemoryState; + + /* 108 */ using ::ams::svc::ResultInvalidNewMemoryPermissions; + + /* 110 */ using ::ams::svc::ResultInvalidMemoryRegion; + + /* 112 */ using ::ams::svc::ResultInvalidPriority; + /* 113 */ using ::ams::svc::ResultInvalidCoreId; + /* 114 */ using ::ams::svc::ResultInvalidHandle; + /* 115 */ using ::ams::svc::ResultInvalidPointer; + /* 116 */ using ::ams::svc::ResultInvalidCombination; + /* 117 */ using ::ams::svc::ResultTimedOut; + /* 118 */ using ::ams::svc::ResultCancelled; + /* 119 */ using ::ams::svc::ResultOutOfRange; + /* 120 */ using ::ams::svc::ResultInvalidEnumValue; + /* 121 */ using ::ams::svc::ResultNotFound; + /* 122 */ using ::ams::svc::ResultBusy; + /* 123 */ using ::ams::svc::ResultSessionClosed; + /* 124 */ using ::ams::svc::ResultNotHandled; + /* 125 */ using ::ams::svc::ResultInvalidState; + /* 126 */ using ::ams::svc::ResultReservedValue; + /* 127 */ using ::ams::svc::ResultNotSupported; + /* 128 */ using ::ams::svc::ResultDebug; + /* 129 */ using ::ams::svc::ResultThreadNotOwned; + + /* 131 */ using ::ams::svc::ResultPortClosed; + /* 132 */ using ::ams::svc::ResultLimitReached; + + /* 258 */ using ::ams::svc::ResultReceiveListBroken; + /* 259 */ using ::ams::svc::ResultOutOfAddressSpace; + /* 260 */ using ::ams::svc::ResultMessageTooLarge; + + /* 520 */ using ::ams::svc::ResultProcessTerminated; + +} diff --git a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_tables.hpp b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_tables.hpp index 0afb1940d..c1ff646e2 100644 --- a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_tables.hpp +++ b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_tables.hpp @@ -15,6 +15,7 @@ */ #pragma once #include +#include namespace ams::kern::svc { diff --git a/libraries/libmesosphere/source/kern_k_auto_object_container.cpp b/libraries/libmesosphere/source/kern_k_auto_object_container.cpp index f6cea8023..aebbc8ec8 100644 --- a/libraries/libmesosphere/source/kern_k_auto_object_container.cpp +++ b/libraries/libmesosphere/source/kern_k_auto_object_container.cpp @@ -19,6 +19,8 @@ namespace ams::kern { Result KAutoObjectWithListContainer::Register(KAutoObjectWithList *obj) { + MESOSPHERE_ASSERT_THIS(); + KScopedLightLock lk(this->lock); this->object_list.insert(*obj); @@ -27,6 +29,8 @@ namespace ams::kern { } Result KAutoObjectWithListContainer::Unregister(KAutoObjectWithList *obj) { + MESOSPHERE_ASSERT_THIS(); + KScopedLightLock lk(this->lock); this->object_list.erase(this->object_list.iterator_to(*obj)); @@ -35,6 +39,8 @@ namespace ams::kern { } size_t KAutoObjectWithListContainer::GetOwnedCount(KProcess *owner) { + MESOSPHERE_ASSERT_THIS(); + KScopedLightLock lk(this->lock); size_t count = 0; diff --git a/libraries/libmesosphere/source/kern_k_handle_table.cpp b/libraries/libmesosphere/source/kern_k_handle_table.cpp new file mode 100644 index 000000000..ca71003a8 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_handle_table.cpp @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + Result KHandleTable::Finalize() { + MESOSPHERE_ASSERT_THIS(); + + /* Get the table and clear our record of it. */ + Entry *saved_table = nullptr; + u16 saved_table_size = 0; + { + KScopedDisableDispatch dd; + KScopedSpinLock lk(this->lock); + + std::swap(this->table, saved_table); + std::swap(this->table_size, saved_table_size); + } + + /* Close and free all entries. */ + for (size_t i = 0; i < saved_table_size; i++) { + Entry *entry = std::addressof(saved_table[i]); + + if (KAutoObject *obj = entry->GetObject(); obj != nullptr) { + obj->Close(); + this->FreeEntry(entry); + } + } + + return ResultSuccess(); + } + + bool KHandleTable::Remove(ams::svc::Handle handle) { + MESOSPHERE_ASSERT_THIS(); + + /* Don't allow removal of a pseudo-handle. */ + if (ams::svc::IsPseudoHandle(handle)) { + return false; + } + + /* Handles must not have reserved bits set. */ + if (GetHandleBitPack(handle).Get() != 0) { + return false; + } + + /* Find the object and free the entry. */ + KAutoObject *obj = nullptr; + { + KScopedDisableDispatch dd; + KScopedSpinLock lk(this->lock); + + if (Entry *entry = this->FindEntry(handle); entry != nullptr) { + obj = entry->GetObject(); + this->FreeEntry(entry); + } else { + return false; + } + } + + /* Close the object. */ + obj->Close(); + return true; + } + + Result KHandleTable::Add(ams::svc::Handle *out_handle, KAutoObject *obj, u16 type) { + MESOSPHERE_ASSERT_THIS(); + KScopedDisableDispatch dd; + KScopedSpinLock lk(this->lock); + + /* Never exceed our capacity. */ + R_UNLESS(this->count < this->table_size, svc::ResultOutOfHandles()); + + /* Allocate entry, set output handle. */ + { + const auto linear_id = this->AllocateLinearId(); + Entry *entry = this->AllocateEntry(); + entry->SetUsed(obj, linear_id, type); + obj->Open(); + *out_handle = EncodeHandle(this->GetEntryIndex(entry), linear_id); + } + + return ResultSuccess(); + } + + Result KHandleTable::Reserve(ams::svc::Handle *out_handle) { + MESOSPHERE_ASSERT_THIS(); + KScopedDisableDispatch dd; + KScopedSpinLock lk(this->lock); + + /* Never exceed our capacity. */ + R_UNLESS(this->count < this->table_size, svc::ResultOutOfHandles()); + + *out_handle = EncodeHandle(this->GetEntryIndex(this->AllocateEntry()), this->AllocateLinearId()); + return ResultSuccess(); + } + + void KHandleTable::Unreserve(ams::svc::Handle handle) { + MESOSPHERE_ASSERT_THIS(); + KScopedDisableDispatch dd; + KScopedSpinLock lk(this->lock); + + /* Unpack the handle. */ + const auto handle_pack = GetHandleBitPack(handle); + const auto index = handle_pack.Get(); + const auto linear_id = handle_pack.Get(); + const auto reserved = handle_pack.Get(); + MESOSPHERE_ASSERT(reserved == 0); + MESOSPHERE_ASSERT(linear_id != 0); + MESOSPHERE_ASSERT(index < this->table_size); + + /* Free the entry. */ + /* NOTE: This code does not check the linear id. */ + Entry *entry = std::addressof(this->table[index]); + MESOSPHERE_ASSERT(entry->GetObject() == nullptr); + + this->FreeEntry(entry); + } + + void KHandleTable::Register(ams::svc::Handle handle, KAutoObject *obj, u16 type) { + MESOSPHERE_ASSERT_THIS(); + KScopedDisableDispatch dd; + KScopedSpinLock lk(this->lock); + + /* Unpack the handle. */ + const auto handle_pack = GetHandleBitPack(handle); + const auto index = handle_pack.Get(); + const auto linear_id = handle_pack.Get(); + const auto reserved = handle_pack.Get(); + MESOSPHERE_ASSERT(reserved == 0); + MESOSPHERE_ASSERT(linear_id != 0); + MESOSPHERE_ASSERT(index < this->table_size); + + /* Set the entry. */ + Entry *entry = std::addressof(this->table[index]); + MESOSPHERE_ASSERT(entry->GetObject() == nullptr); + + entry->SetUsed(obj, linear_id, type); + obj->Open(); + } + +} diff --git a/libraries/libmesosphere/source/kern_k_synchronization_object.cpp b/libraries/libmesosphere/source/kern_k_synchronization_object.cpp index 942bf2759..54bd488b4 100644 --- a/libraries/libmesosphere/source/kern_k_synchronization_object.cpp +++ b/libraries/libmesosphere/source/kern_k_synchronization_object.cpp @@ -18,36 +18,53 @@ namespace ams::kern { void NotifyAvailable() { + MESOSPHERE_ASSERT_THIS(); + /* TODO: Implement this. */ MESOSPHERE_ABORT(); } void NotifyAbort(Result abort_reason) { + MESOSPHERE_ASSERT_THIS(); + + /* TODO: Implement this. */ MESOSPHERE_ABORT(); } void KSynchronizationObject::Finalize() { + MESOSPHERE_ASSERT_THIS(); + this->OnFinalizeSynchronizationObject(); KAutoObject::Finalize(); } void KSynchronizationObject::DebugWaiters() { + MESOSPHERE_ASSERT_THIS(); + /* TODO: Do useful debug operation here. */ } KSynchronizationObject::iterator KSynchronizationObject::AddWaiterThread(KThread *thread) { + MESOSPHERE_ASSERT_THIS(); + return this->thread_list.insert(this->thread_list.end(), *thread); } KSynchronizationObject::iterator KSynchronizationObject::RemoveWaiterThread(KSynchronizationObject::iterator it) { + MESOSPHERE_ASSERT_THIS(); + return this->thread_list.erase(it); } KSynchronizationObject::iterator KSynchronizationObject::begin() { + MESOSPHERE_ASSERT_THIS(); + return this->thread_list.begin(); } KSynchronizationObject::iterator KSynchronizationObject::end() { + MESOSPHERE_ASSERT_THIS(); + return this->thread_list.end(); } diff --git a/libraries/libvapours/include/vapours/svc/svc_common.hpp b/libraries/libvapours/include/vapours/svc/svc_common.hpp index f60170667..938490191 100644 --- a/libraries/libvapours/include/vapours/svc/svc_common.hpp +++ b/libraries/libvapours/include/vapours/svc/svc_common.hpp @@ -15,7 +15,7 @@ */ #pragma once -#include "../results.hpp" +#include namespace ams::svc { @@ -28,6 +28,31 @@ namespace ams::svc { #error "Unknown target for svc::Handle" #endif + enum class PseudoHandle : Handle { + CurrentThread = 0xFFFF8000, + CurrentProcess = 0xFFFF8001, + }; + + constexpr ALWAYS_INLINE bool operator==(const Handle &lhs, const PseudoHandle &rhs) { + return static_cast(lhs) == static_cast(rhs); + } + + constexpr ALWAYS_INLINE bool operator==(const PseudoHandle &lhs, const Handle &rhs) { + return static_cast(lhs) == static_cast(rhs); + } + + constexpr ALWAYS_INLINE bool operator!=(const Handle &lhs, const PseudoHandle &rhs) { + return !(lhs == rhs); + } + + constexpr ALWAYS_INLINE bool operator!=(const PseudoHandle &lhs, const Handle &rhs) { + return !(lhs == rhs); + } + + constexpr ALWAYS_INLINE bool IsPseudoHandle(const Handle &handle) { + return handle == PseudoHandle::CurrentProcess || handle == PseudoHandle::CurrentThread; + } + #ifdef ATMOSPHERE_ARCH_ARM64 From 059c706f192b43e714ecef02707d9fd27f42a01b Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 30 Jan 2020 16:51:35 -0800 Subject: [PATCH 22/97] kern: implement KThreadLocalPage --- .../libmesosphere/include/mesosphere.hpp | 1 + .../arm64/init/kern_k_init_page_table.hpp | 7 +- .../include/mesosphere/kern_common.hpp | 6 ++ .../mesosphere/kern_k_core_local_region.hpp | 8 +- .../mesosphere/kern_k_memory_layout.hpp | 8 ++ .../mesosphere/kern_k_memory_manager.hpp | 2 - .../include/mesosphere/kern_k_page_buffer.hpp | 42 +++++++++ .../include/mesosphere/kern_k_process.hpp | 31 +++++++ .../mesosphere/kern_k_thread_local_page.hpp | 93 +++++++++++++++++++ .../mesosphere/kern_k_typed_address.hpp | 4 + .../include/mesosphere/kern_panic.hpp | 8 ++ .../libmesosphere/source/kern_k_page_heap.cpp | 2 +- .../source/kern_k_thread_local_page.cpp | 73 +++++++++++++++ .../vapours/results/results_common.hpp | 38 ++++---- .../include/vapours/svc/svc_types_common.hpp | 2 + 15 files changed, 295 insertions(+), 30 deletions(-) create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_page_buffer.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_process.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_thread_local_page.hpp create mode 100644 libraries/libmesosphere/source/kern_k_thread_local_page.cpp diff --git a/libraries/libmesosphere/include/mesosphere.hpp b/libraries/libmesosphere/include/mesosphere.hpp index 6f829e2c1..1131b930d 100644 --- a/libraries/libmesosphere/include/mesosphere.hpp +++ b/libraries/libmesosphere/include/mesosphere.hpp @@ -52,6 +52,7 @@ /* Auto Objects. */ #include #include +#include /* Supervisor Calls. */ #include diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp index 7d5a0249c..194950f3e 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp @@ -21,11 +21,10 @@ namespace ams::kern::init { - constexpr size_t PageSize = 0x1000; - constexpr size_t L1BlockSize = 0x40000000; - constexpr size_t L2BlockSize = 0x200000; + constexpr size_t L1BlockSize = 1_GB; + constexpr size_t L2BlockSize = 2_MB; constexpr size_t L2ContiguousBlockSize = 0x10 * L2BlockSize; - constexpr size_t L3BlockSize = 0x1000; + constexpr size_t L3BlockSize = PageSize; constexpr size_t L3ContiguousBlockSize = 0x10 * L3BlockSize; class PageTableEntry { diff --git a/libraries/libmesosphere/include/mesosphere/kern_common.hpp b/libraries/libmesosphere/include/mesosphere/kern_common.hpp index 7fa55dbdd..8f12b78f7 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_common.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_common.hpp @@ -17,3 +17,9 @@ #include #include #include + +namespace ams::kern { + + constexpr size_t PageSize = 4_KB; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_core_local_region.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_core_local_region.hpp index 1d37dd612..8b4b74a15 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_core_local_region.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_core_local_region.hpp @@ -44,18 +44,18 @@ namespace ams::kern { std::atomic num_specific_svc[0x80]; u32 perf_counters[6]; }; - static_assert(sizeof(KCoreLocalContext) < KMemoryManager::PageSize); + static_assert(sizeof(KCoreLocalContext) < PageSize); struct KCoreLocalPage { KCoreLocalContext context; - u8 padding[KMemoryManager::PageSize - sizeof(KCoreLocalContext)]; + u8 padding[PageSize - sizeof(KCoreLocalContext)]; }; - static_assert(sizeof(KCoreLocalPage) == KMemoryManager::PageSize); + static_assert(sizeof(KCoreLocalPage) == PageSize); struct KCoreLocalRegion { KCoreLocalPage current; KCoreLocalPage absolute[cpu::NumCores]; }; - static_assert(sizeof(KCoreLocalRegion) == KMemoryManager::PageSize * (1 + cpu::NumCores)); + static_assert(sizeof(KCoreLocalRegion) == PageSize * (1 + cpu::NumCores)); } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp index aee5f47af..5eadbcacc 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp @@ -397,6 +397,14 @@ namespace ams::kern { static ALWAYS_INLINE KMemoryBlockTree &GetVirtualLinearMemoryBlockTree() { return s_virtual_linear_tree; } static ALWAYS_INLINE KMemoryBlockTree &GetPhysicalLinearMemoryBlockTree() { return s_physical_linear_tree; } + static ALWAYS_INLINE KVirtualAddress GetLinearVirtualAddress(KPhysicalAddress address) { + return GetInteger(address) + s_linear_phys_to_virt_diff; + } + + static ALWAYS_INLINE KPhysicalAddress GetLinearPhysicalAddress(KVirtualAddress address) { + return GetInteger(address) + s_linear_virt_to_phys_diff; + } + static NOINLINE KVirtualAddress GetMainStackTopAddress(s32 core_id) { return GetVirtualMemoryBlockTree().FindFirstBlockByTypeAttr(KMemoryRegionType_KernelMiscMainStack, static_cast(core_id))->GetEndAddress(); } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp index 00f296ec7..ce9d4443f 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp @@ -19,8 +19,6 @@ namespace ams::kern { class KMemoryManager { - public: - static constexpr size_t PageSize = 0x1000; /* TODO: Elsewhere? */ private: class Impl { public: diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_page_buffer.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_page_buffer.hpp new file mode 100644 index 000000000..b9a99c2c2 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_page_buffer.hpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern { + + class KPageBuffer : public KSlabAllocated { + private: + alignas(PageSize) u8 buffer[PageSize]; + public: + KPageBuffer() { + std::memset(buffer, 0, sizeof(buffer)); + } + + static ALWAYS_INLINE KPageBuffer *FromPhysicalAddress(KPhysicalAddress phys_addr) { + const KVirtualAddress virt_addr = KMemoryLayout::GetLinearVirtualAddress(phys_addr); + + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize)); + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize)); + + return GetPointer(virt_addr); + } + }; + static_assert(sizeof(KPageBuffer) == PageSize); + static_assert(alignof(KPageBuffer) == PageSize); + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp new file mode 100644 index 000000000..2d3e2b21c --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include +#include +#include + +namespace ams::kern { + + class KProcess final : public KAutoObjectWithSlabHeapAndContainer { + MESOSPHERE_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject); + /* TODO: This is a placeholder definition. */ + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread_local_page.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread_local_page.hpp new file mode 100644 index 000000000..9e8c3fe6e --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread_local_page.hpp @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include +#include + +namespace ams::kern { + + class KThread; + class KProcess; + + class KThreadLocalPage : public util::IntrusiveRedBlackTreeBaseNode, public KSlabAllocated { + public: + static constexpr size_t RegionsPerPage = PageSize / ams::svc::ThreadLocalRegionSize; + static_assert(RegionsPerPage > 0); + private: + KProcessAddress virt_addr; + KProcess *owner; + bool is_region_free[RegionsPerPage]; + public: + constexpr explicit KThreadLocalPage(KProcessAddress addr) : virt_addr(addr), owner(nullptr), is_region_free() { + for (size_t i = 0; i < RegionsPerPage; i++) { + this->is_region_free[i] = true; + } + } + + constexpr explicit KThreadLocalPage() : KThreadLocalPage(Null) { /* ... */ } + + constexpr ALWAYS_INLINE KProcessAddress GetAddress() const { return this->virt_addr; } + private: + constexpr ALWAYS_INLINE KProcessAddress GetRegionAddress(size_t i) { + return this->GetAddress() + i * ams::svc::ThreadLocalRegionSize; + } + + constexpr ALWAYS_INLINE bool Contains(KProcessAddress addr) { + return this->GetAddress() <= addr && addr < this->GetAddress() + PageSize; + } + + constexpr ALWAYS_INLINE size_t GetRegionIndex(KProcessAddress addr) { + MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), ams::svc::ThreadLocalRegionSize)); + MESOSPHERE_ASSERT(this->Contains(addr)); + return (addr - this->GetAddress()) / ams::svc::ThreadLocalRegionSize; + } + public: + Result Initialize(KProcess *process); + Result Finalize(); + + KProcessAddress Reserve(); + void Release(KProcessAddress addr); + + bool IsAllUsed() const { + for (size_t i = 0; i < RegionsPerPage; i++) { + if (this->is_region_free[i]) { + return false; + } + } + return true; + } + + bool IsAllFree() const { + for (size_t i = 0; i < RegionsPerPage; i++) { + if (!this->is_region_free[i]) { + return false; + } + } + return true; + } + + bool IsAnyUsed() const { + return !this->IsAllFree(); + } + + bool IsAnyFree() const { + return !this->IsAllUsed(); + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_typed_address.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_typed_address.hpp index 3e967624d..fc177de9b 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_typed_address.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_typed_address.hpp @@ -50,6 +50,10 @@ namespace ams::kern { return this->address - rhs; } + constexpr ALWAYS_INLINE ptrdiff_t operator-(KTypedAddress rhs) const { + return this->address - rhs.address; + } + template constexpr ALWAYS_INLINE KTypedAddress operator+=(I rhs) { static_assert(std::is_integral::value); diff --git a/libraries/libmesosphere/include/mesosphere/kern_panic.hpp b/libraries/libmesosphere/include/mesosphere/kern_panic.hpp index 8a41955c6..63bef9b34 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_panic.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_panic.hpp @@ -65,3 +65,11 @@ namespace ams::kern { MESOSPHERE_INIT_ABORT(); \ } \ }) + +#define MESOSPHERE_R_ABORT_UNLESS(expr) \ + ({ \ + const ::ams::Result _tmp_meso_r_abort_res = static_cast<::ams::Result>(expr); \ + if (AMS_UNLIKELY((R_FAILED(_tmp_meso_r_abort_res))) { \ + MESOSPHERE_PANIC("Result Abort(): %s 2%03d-%04d", #expr, _tmp_meso_r_abort_res.GetModule(), _tmp_meso_r_abort_res.GetDescription()); \ + } \ + }) diff --git a/libraries/libmesosphere/source/kern_k_page_heap.cpp b/libraries/libmesosphere/source/kern_k_page_heap.cpp index 6dce64507..3e61d6a86 100644 --- a/libraries/libmesosphere/source/kern_k_page_heap.cpp +++ b/libraries/libmesosphere/source/kern_k_page_heap.cpp @@ -22,7 +22,7 @@ namespace ams::kern { for (size_t i = 0; i < num_block_shifts; i++) { overhead_size += KPageHeap::Block::CalculateMetadataOverheadSize(region_size, block_shifts[i], (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0); } - return util::AlignUp(overhead_size, KMemoryManager::PageSize); + return util::AlignUp(overhead_size, PageSize); } } diff --git a/libraries/libmesosphere/source/kern_k_thread_local_page.cpp b/libraries/libmesosphere/source/kern_k_thread_local_page.cpp new file mode 100644 index 000000000..efc5a3099 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_thread_local_page.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + Result KThreadLocalPage::Initialize(KProcess *process) { + MESOSPHERE_ASSERT_THIS(); + + /* Set that this process owns us. */ + this->owner = process; + + /* Allocate a new page. */ + KPageBuffer *page_buf = KPageBuffer::Allocate(); + R_UNLESS(page_buf != nullptr, svc::ResultOutOfMemory()); + auto page_buf_guard = SCOPE_GUARD { KPageBuffer::Free(page_buf); }; + + /* Map the address in. */ + /* TODO: R_TRY(this->owner->GetPageTable().Map(...)); */ + + /* We succeeded. */ + page_buf_guard.Cancel(); + return ResultSuccess(); + } + + Result KThreadLocalPage::Finalize() { + MESOSPHERE_ASSERT_THIS(); + + /* Get the physical address of the page. */ + KPhysicalAddress phys_addr = Null; + /* TODO: MESOSPHERE_ABORT_UNLESS(this->owner->GetPageTable().GetPhysicalAddress(&phys_addr, this->GetAddress())); */ + + /* Unmap the page. */ + /* TODO: R_TRY(this->owner->GetPageTable().Unmap(...); */ + + /* Free the page. */ + KPageBuffer::Free(KPageBuffer::FromPhysicalAddress(phys_addr)); + return ResultSuccess(); + } + + KProcessAddress KThreadLocalPage::Reserve() { + MESOSPHERE_ASSERT_THIS(); + + for (size_t i = 0; i < util::size(this->is_region_free); i++) { + if (this->is_region_free[i]) { + this->is_region_free[i] = false; + return this->GetRegionAddress(i); + } + } + + return Null; + } + + void KThreadLocalPage::Release(KProcessAddress addr) { + MESOSPHERE_ASSERT_THIS(); + + this->is_region_free[this->GetRegionIndex(addr)] = true; + } + +} diff --git a/libraries/libvapours/include/vapours/results/results_common.hpp b/libraries/libvapours/include/vapours/results/results_common.hpp index ed34f13d1..cb977ab99 100644 --- a/libraries/libvapours/include/vapours/results/results_common.hpp +++ b/libraries/libvapours/include/vapours/results/results_common.hpp @@ -57,8 +57,8 @@ namespace ams { using BaseType = typename ResultTraits::BaseType; static constexpr BaseType SuccessValue = ResultTraits::SuccessValue; public: - constexpr inline BaseType GetModule() const { return ResultTraits::GetModuleFromValue(static_cast(this)->GetValue()); } - constexpr inline BaseType GetDescription() const { return ResultTraits::GetDescriptionFromValue(static_cast(this)->GetValue()); } + constexpr ALWAYS_INLINE BaseType GetModule() const { return ResultTraits::GetModuleFromValue(static_cast(this)->GetValue()); } + constexpr ALWAYS_INLINE BaseType GetDescription() const { return ResultTraits::GetDescriptionFromValue(static_cast(this)->GetValue()); } }; class ResultConstructor; @@ -81,15 +81,15 @@ namespace ams { /* TODO: It sure would be nice to make this private. */ constexpr Result(typename Base::BaseType v) : value(v) { static_assert(std::is_same::value); } - constexpr inline operator ResultSuccess() const; + constexpr ALWAYS_INLINE operator ResultSuccess() const; NX_CONSTEXPR bool CanAccept(Result result) { return true; } - constexpr inline bool IsSuccess() const { return this->GetValue() == Base::SuccessValue; } - constexpr inline bool IsFailure() const { return !this->IsSuccess(); } - constexpr inline typename Base::BaseType GetModule() const { return Base::GetModule(); } - constexpr inline typename Base::BaseType GetDescription() const { return Base::GetDescription(); } + constexpr ALWAYS_INLINE bool IsSuccess() const { return this->GetValue() == Base::SuccessValue; } + constexpr ALWAYS_INLINE bool IsFailure() const { return !this->IsSuccess(); } + constexpr ALWAYS_INLINE typename Base::BaseType GetModule() const { return Base::GetModule(); } + constexpr ALWAYS_INLINE typename Base::BaseType GetDescription() const { return Base::GetDescription(); } - constexpr inline typename Base::BaseType GetValue() const { return this->value; } + constexpr ALWAYS_INLINE typename Base::BaseType GetValue() const { return this->value; } }; static_assert(sizeof(Result) == sizeof(Result::Base::BaseType), "sizeof(Result) == sizeof(Result::Base::BaseType)"); static_assert(std::is_trivially_destructible::value, "std::is_trivially_destructible::value"); @@ -98,12 +98,12 @@ namespace ams { class ResultConstructor { public: - static constexpr inline Result MakeResult(ResultTraits::BaseType value) { + static constexpr ALWAYS_INLINE Result MakeResult(ResultTraits::BaseType value) { return Result(value); } }; - constexpr inline Result MakeResult(ResultTraits::BaseType value) { + constexpr ALWAYS_INLINE Result MakeResult(ResultTraits::BaseType value) { return ResultConstructor::MakeResult(value); } @@ -116,12 +116,12 @@ namespace ams { constexpr operator Result() const { return result::impl::MakeResult(Base::SuccessValue); } NX_CONSTEXPR bool CanAccept(Result result) { return result.IsSuccess(); } - constexpr inline bool IsSuccess() const { return true; } - constexpr inline bool IsFailure() const { return !this->IsSuccess(); } - constexpr inline typename Base::BaseType GetModule() const { return Base::GetModule(); } - constexpr inline typename Base::BaseType GetDescription() const { return Base::GetDescription(); } + constexpr ALWAYS_INLINE bool IsSuccess() const { return true; } + constexpr ALWAYS_INLINE bool IsFailure() const { return !this->IsSuccess(); } + constexpr ALWAYS_INLINE typename Base::BaseType GetModule() const { return Base::GetModule(); } + constexpr ALWAYS_INLINE typename Base::BaseType GetDescription() const { return Base::GetDescription(); } - constexpr inline typename Base::BaseType GetValue() const { return Base::SuccessValue; } + constexpr ALWAYS_INLINE typename Base::BaseType GetValue() const { return Base::SuccessValue; } }; namespace result::impl { @@ -130,7 +130,7 @@ namespace ams { } - constexpr inline Result::operator ResultSuccess() const { + constexpr ALWAYS_INLINE Result::operator ResultSuccess() const { if (!ResultSuccess::CanAccept(*this)) { result::impl::OnResultAssertion(*this); } @@ -151,10 +151,10 @@ namespace ams { constexpr operator Result() const { return MakeResult(Value); } constexpr operator ResultSuccess() const { OnResultAssertion(Value); } - constexpr inline bool IsSuccess() const { return false; } - constexpr inline bool IsFailure() const { return !this->IsSuccess(); } + constexpr ALWAYS_INLINE bool IsSuccess() const { return false; } + constexpr ALWAYS_INLINE bool IsFailure() const { return !this->IsSuccess(); } - constexpr inline typename Base::BaseType GetValue() const { return Value; } + constexpr ALWAYS_INLINE typename Base::BaseType GetValue() const { return Value; } }; template diff --git a/libraries/libvapours/include/vapours/svc/svc_types_common.hpp b/libraries/libvapours/include/vapours/svc/svc_types_common.hpp index 34dfc3b3d..3bd20e6c8 100644 --- a/libraries/libvapours/include/vapours/svc/svc_types_common.hpp +++ b/libraries/libvapours/include/vapours/svc/svc_types_common.hpp @@ -275,6 +275,8 @@ namespace ams::svc { ThreadActivity_Paused = 1, }; + constexpr size_t ThreadLocalRegionSize = 0x200; + /* Process types. */ enum ProcessInfoType : u32 { ProcessInfoType_ProcessState = 0, From e25a4ca8d74174b06291a5b079793acb1c791993 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 30 Jan 2020 17:07:08 -0800 Subject: [PATCH 23/97] kern: implement KAffinityMask --- .../mesosphere/kern_k_affinity_mask.hpp | 61 +++++++++++++++++++ .../include/mesosphere/kern_k_thread.hpp | 1 + 2 files changed, 62 insertions(+) create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_affinity_mask.hpp diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_affinity_mask.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_affinity_mask.hpp new file mode 100644 index 000000000..a3a263fba --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_affinity_mask.hpp @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern { + + class KAffinityMask { + private: + static constexpr u64 AllowedAffinityMask = (1ul << cpu::NumCores) - 1; + private: + u64 mask; + private: + static constexpr ALWAYS_INLINE u64 GetCoreBit(s32 core) { + MESOSPHERE_ASSERT(0 <= core && core < static_cast(cpu::NumCores)); + return (1ul << core); + } + public: + constexpr ALWAYS_INLINE KAffinityMask() : mask(0) { MESOSPHERE_ASSERT_THIS(); } + + constexpr ALWAYS_INLINE u64 GetAffinityMask() const { return this->mask; } + + constexpr ALWAYS_INLINE void SetAffinityMask(u64 new_mask) { + MESOSPHERE_ASSERT((new_mask & ~AllowedAffinityMask) == 0); + this->mask = new_mask; + } + + constexpr ALWAYS_INLINE bool GetAffinity(s32 core) const { + return this->mask & GetCoreBit(core); + } + + constexpr ALWAYS_INLINE void SetAffinity(s32 core, bool set) { + MESOSPHERE_ASSERT(0 <= core && core < static_cast(cpu::NumCores)); + + if (set) { + this->mask |= GetCoreBit(core); + } else { + this->mask &= ~GetCoreBit(core); + } + } + + constexpr ALWAYS_INLINE void SetAll() { + this->mask = AllowedAffinityMask; + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp index a34f4e800..64d8c1888 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp @@ -16,6 +16,7 @@ #pragma once #include #include +#include namespace ams::kern { From e1adbb6dbafee45fe01f5c47554d2ed19f918e84 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 30 Jan 2020 17:45:54 -0800 Subject: [PATCH 24/97] util: add BitSet --- libraries/libvapours/include/vapours/util.hpp | 1 + .../include/vapours/util/util_bitset.hpp | 91 +++++++++++++++++++ 2 files changed, 92 insertions(+) create mode 100644 libraries/libvapours/include/vapours/util/util_bitset.hpp diff --git a/libraries/libvapours/include/vapours/util.hpp b/libraries/libvapours/include/vapours/util.hpp index 53bf9eac3..5656d33e3 100644 --- a/libraries/libvapours/include/vapours/util.hpp +++ b/libraries/libvapours/include/vapours/util.hpp @@ -21,6 +21,7 @@ #include "util/util_size.hpp" #include "util/util_fourcc.hpp" #include "util/util_bitpack.hpp" +#include "util/util_bitset.hpp" #include "util/util_scope_guard.hpp" #include "util/util_typed_storage.hpp" #include "util/util_intrusive_list.hpp" diff --git a/libraries/libvapours/include/vapours/util/util_bitset.hpp b/libraries/libvapours/include/vapours/util/util_bitset.hpp new file mode 100644 index 000000000..e3f9ae732 --- /dev/null +++ b/libraries/libvapours/include/vapours/util/util_bitset.hpp @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include "../defines.hpp" + +namespace ams::util { + + namespace impl { + + template + class BitSet { + private: + static_assert(std::is_integral::value); + static_assert(std::is_unsigned::value); + static_assert(sizeof(Storage) <= sizeof(u64)); + + static constexpr size_t FlagsPerWord = BITSIZEOF(Storage); + static constexpr size_t NumWords = util::AlignUp(N, FlagsPerWord) / FlagsPerWord; + + static constexpr ALWAYS_INLINE auto CountLeadingZeroImpl(Storage word) { + return __builtin_clzll(static_cast(word)) - (BITSIZEOF(unsigned long long) - FlagsPerWord); + } + + static constexpr ALWAYS_INLINE Storage GetBitMask(size_t bit) { + return Storage(1) << (FlagsPerWord - 1 - bit); + } + private: + Storage words[NumWords]; + public: + constexpr ALWAYS_INLINE BitSet() : words() { /* ... */ } + + constexpr ALWAYS_INLINE void SetBit(size_t i) { + this->words[i / FlagsPerWord] |= GetBitMask(i % FlagsPerWord); + } + + constexpr ALWAYS_INLINE void ClearBit(size_t i) { + this->words[i / FlagsPerWord] &= ~GetBitMask(i % FlagsPerWord); + } + + constexpr ALWAYS_INLINE size_t CountLeadingZero() const { + for (size_t i = 0; i < NumWords; i++) { + if (this->words[i]) { + return FlagsPerWord * i + CountLeadingZeroImpl(this->words[i]); + } + } + return FlagsPerWord * NumWords; + } + + constexpr ALWAYS_INLINE size_t GetNextSet(size_t n) const { + for (size_t i = (n + 1) / FlagsPerWord; i < NumWords; i++) { + Storage word = this->words[i]; + if (!util::IsAligned(n + 1, FlagsPerWord)) { + word &= GetBitMask(n % FlagsPerWord) - 1; + } + if (word) { + return FlagsPerWord * i + CountLeadingZeroImpl(word); + } + } + return FlagsPerWord * NumWords; + } + }; + + } + + template + using BitSet8 = impl::BitSet; + + template + using BitSet16 = impl::BitSet; + + template + using BitSet32 = impl::BitSet; + + template + using BitSet64 = impl::BitSet; + +} From d262ff92ccefe4299097c00e0d3f0fac8044d7b1 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 30 Jan 2020 20:56:24 -0800 Subject: [PATCH 25/97] kern: implement KPriorityQueue --- .../mesosphere/kern_k_priority_queue.hpp | 424 ++++++++++++++++++ .../include/mesosphere/kern_k_scheduler.hpp | 7 + .../include/mesosphere/kern_k_thread.hpp | 23 + .../include/vapours/svc/svc_types_common.hpp | 3 + 4 files changed, 457 insertions(+) create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_priority_queue.hpp diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_priority_queue.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_priority_queue.hpp new file mode 100644 index 000000000..7da585920 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_priority_queue.hpp @@ -0,0 +1,424 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern { + + /* + TODO: C++20 + + template + concept KPriorityQueueAffinityMask = !std::is_reference::value && requires (T &t) { + { t.GetAffinityMask() } -> std::convertible_to; + { t.SetAffinityMask(std::declval()) }; + + { t.GetAffinity(std::declval()) } -> std::same_as; + { t.SetAffinity(std::declval(), std::declval()) }; + { t.SetAll() }; + }; + + template + concept KPriorityQueueMember = !std::is_reference::value && requires (T &t) { + { typename T::QueueEntry() }; + { (typename T::QueueEntry()).Initialize() }; + { (typename T::QueueEntry()).SetPrev(std::addressof(t)) }; + { (typename T::QueueEntry()).SetNext(std::addressof(t)) }; + { (typename T::QueueEntry()).GetNext() } -> std::same_as; + { (typename T::QueueEntry()).GetPrev() } -> std::same_as; + { t.GetPriorityQueueEntry(std::declval()) } -> std::same_as; + + { t.GetAffinityMask() }; + { typename std::remove_cvref::type() } -> KPriorityQueueAffinityMask; + + { t.GetActiveCore() } -> std::convertible_to; + { t.GetPriority() } -> std::convertible_to; + }; + */ + + + template /* TODO C++20: requires KPriorityQueueMember */ + class KPriorityQueue { + public: + using AffinityMaskType = typename std::remove_cv().GetAffinityMask())>::type>::type; + + static_assert(LowestPriority >= 0); + static_assert(HighestPriority >= 0); + static_assert(LowestPriority >= HighestPriority); + static constexpr size_t NumPriority = LowestPriority - HighestPriority + 1; + static constexpr size_t NumCores = _NumCores; + + static constexpr ALWAYS_INLINE bool IsValidCore(s32 core) { + return 0 <= core && core < static_cast(NumCores); + } + + static constexpr ALWAYS_INLINE bool IsValidPriority(s32 priority) { + return HighestPriority <= priority && priority <= LowestPriority + 1; + } + private: + using Entry = typename Member::QueueEntry; + public: + class KPerCoreQueue { + private: + Entry root[NumCores]; + public: + constexpr ALWAYS_INLINE KPerCoreQueue() : root() { + for (size_t i = 0; i < NumCores; i++) { + this->root[i].Initialize(); + } + } + + constexpr ALWAYS_INLINE bool PushBack(s32 core, Member *member) { + /* Get the entry associated with the member. */ + Entry &member_entry = member->GetPriorityQueueEntry(core); + + /* Get the entry associated with the end of the queue. */ + Member *tail = this->root[core].GetPrev(); + Entry &tail_entry = (tail != nullptr) ? tail.GetPriorityQueueEntry(core) : this->root[core]; + + /* Link the entries. */ + member_entry.SetPrev(tail); + member_entry.SetNext(nullptr); + tail_entry.SetNext(member); + this->root[core].SetPrev(member); + + return (tail == nullptr); + } + + constexpr ALWAYS_INLINE bool PushFront(s32 core, Member *member) { + /* Get the entry associated with the member. */ + Entry &member_entry = member->GetPriorityQueueEntry(core); + + /* Get the entry associated with the front of the queue. */ + Member *head = this->root[core].GetNext(); + Entry &head_entry = (head != nullptr) ? head.GetPriorityQueueEntry(core) : this->root[core]; + + /* Link the entries. */ + member_entry.SetPrev(nullptr); + member_entry.SetNext(head); + head.SetPrev(member); + this->root[core].SetNext(member); + + return (head == nullptr); + } + + constexpr ALWAYS_INLINE bool Remove(s32 core, Member *member) { + /* Get the entry associated with the member. */ + Entry &member_entry = member->GetPriorityQueueEntry(core); + + /* Get the entries associated with next and prev. */ + Member *prev = member_entry.GetPrev(); + Member *next = member_entry.GetNext(); + Entry &prev_entry = (prev != nullptr) ? prev.GetPriorityQueueEntry(core) : this->root[core]; + Entry &next_entry = (next != nullptr) ? next.GetPriorityQueueEntry(core) : this->root[core]; + + /* Unlink. */ + prev_entry.SetNext(next); + next_entry.SetPrev(prev); + + return (this->root[core].next == nullptr); + } + + constexpr ALWAYS_INLINE Member *GetFront(s32 core) const { + return this->root[core].GetNext(); + } + }; + + class KPriorityQueueImpl { + private: + KPerCoreQueue queues[NumPriority]; + util::BitSet64 available_priorities[NumCores]; + public: + constexpr ALWAYS_INLINE KPriorityQueueImpl() : queues(), available_priorities() { /* ... */ } + + constexpr ALWAYS_INLINE void PushBack(s32 priority, s32 core, Member *member) { + MESOSPHERE_ASSERT(IsValidCore(core)); + MESOSPHERE_ASSERT(IsValidPriority(priority)); + + if (AMS_LIKELY(priority <= LowestPriority)) { + if (this->queues[priority].PushBack(core, member)) { + this->available_priorities[core].SetBit(priority); + } + } + } + + constexpr ALWAYS_INLINE void PushFront(s32 priority, s32 core, Member *member) { + MESOSPHERE_ASSERT(IsValidCore(core)); + MESOSPHERE_ASSERT(IsValidPriority(priority)); + + if (AMS_LIKELY(priority <= LowestPriority)) { + if (this->queues[priority].PushFront(core, member)) { + this->available_priorities[core].SetBit(priority); + } + } + } + + constexpr ALWAYS_INLINE void Remove(s32 priority, s32 core, Member *member) { + MESOSPHERE_ASSERT(IsValidCore(core)); + MESOSPHERE_ASSERT(IsValidPriority(priority)); + + if (AMS_LIKELY(priority <= LowestPriority)) { + if (this->queues[priority].Remove(core, member)) { + this->available_priorities.ClearBit(priority); + } + } + } + + constexpr ALWAYS_INLINE Member *GetFront(s32 core) const { + MESOSPHERE_ASSERT(IsValidCore(core)); + + const s32 priority = this->available_priorities[core].CountLeadingZero(); + if (AMS_LIKELY(priority <= LowestPriority)) { + return this->queues[priority].GetFront(core); + } else { + return nullptr; + } + } + + constexpr ALWAYS_INLINE Member *GetFront(s32 priority, s32 core) const { + MESOSPHERE_ASSERT(IsValidCore(core)); + MESOSPHERE_ASSERT(IsValidPriority(priority)); + + if (AMS_LIKELY(priority <= LowestPriority)) { + return this->queues[priority].GetFront(core); + } else { + return nullptr; + } + } + + constexpr ALWAYS_INLINE Member *GetNext(s32 core, const Member *member) const { + MESOSPHERE_ASSERT(IsValidCore(core)); + + Member *next = member->GetPriorityQueueEntry(core).GetNext(); + if (next == nullptr) { + const s32 priority = this->available_priorities[core].GetNextSet(member->GetPriority()); + if (AMS_LIKELY(priority <= LowestPriority)) { + next = this->queues[priority].GetFront(core); + } + } + return next; + } + + constexpr ALWAYS_INLINE void MoveToFront(s32 priority, s32 core, Member *member) { + MESOSPHERE_ASSERT(IsValidCore(core)); + MESOSPHERE_ASSERT(IsValidPriority(priority)); + + if (AMS_LIKELY(priority <= LowestPriority)) { + this->queues[priority].Remove(core, member); + this->queues[priority].PushFront(core, member); + } + } + + constexpr ALWAYS_INLINE Member *MoveToBack(s32 priority, s32 core, Member *member) { + MESOSPHERE_ASSERT(IsValidCore(core)); + MESOSPHERE_ASSERT(IsValidPriority(priority)); + + if (AMS_LIKELY(priority <= LowestPriority)) { + this->queues[priority].Remove(core, member); + this->queues[priority].PushBack(core, member); + return this->queues[priority].GetFront(core); + } else { + return nullptr; + } + } + }; + private: + KPriorityQueueImpl scheduled_queue; + KPriorityQueueImpl suggested_queue; + private: + constexpr ALWAYS_INLINE void ClearAffinityBit(u64 &affinity, s32 core) { + affinity &= ~(u64(1ul) << core); + } + + constexpr ALWAYS_INLINE s32 GetNextCore(u64 &affinity) { + const s32 core = __builtin_ctzll(static_cast(affinity)); + ClearAffinityBit(core); + return core; + } + + constexpr ALWAYS_INLINE void PushBack(s32 priority, Member *member) { + MESOSPHERE_ASSERT(IsValidPriority(priority)); + + /* Push onto the scheduled queue for its core, if we can. */ + u64 affinity = member->GetAffinityMask().GetAffinityMask(); + if (const s32 core = member->GetActiveCore(); core >= 0) { + this->scheduled_queue.PushBack(priority, core, member); + ClearAffinityBit(affinity, core); + } + + /* And suggest the thread for all other cores. */ + while (affinity) { + this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); + } + } + + constexpr ALWAYS_INLINE void PushFront(s32 priority, Member *member) { + MESOSPHERE_ASSERT(IsValidPriority(priority)); + + /* Push onto the scheduled queue for its core, if we can. */ + u64 affinity = member->GetAffinityMask().GetAffinityMask(); + if (const s32 core = member->GetActiveCore(); core >= 0) { + this->scheduled_queue.PushFront(priority, core, member); + ClearAffinityBit(affinity, core); + } + + /* And suggest the thread for all other cores. */ + /* Note: Nintendo pushes onto the back of the suggested queue, not the front. */ + while (affinity) { + this->suggested_queue.PushBack(priority, GetNextCore(affinity), member); + } + } + + constexpr ALWAYS_INLINE void Remove(s32 priority, Member *member) { + MESOSPHERE_ASSERT(IsValidPriority(priority)); + + /* Remove from the scheduled queue for its core. */ + u64 affinity = member->GetAffinityMask().GetAffinityMask(); + if (const s32 core = member->GetActiveCore(); core >= 0) { + this->scheduled_queue.Remove(priority, core, member); + ClearAffinityBit(affinity, core); + } + + /* Remove from the suggested queue for all other cores. */ + while (affinity) { + this->suggested_queue.Remove(priority, GetNextCore(affinity), member); + } + } + public: + constexpr ALWAYS_INLINE KPriorityQueue() : scheduled_queue(), suggested_queue() { /* ... */ } + + /* Getters. */ + constexpr ALWAYS_INLINE Member *GetScheduledFront(s32 core) const { + return this->scheduled_queue.GetFront(core); + } + + constexpr ALWAYS_INLINE Member *GetScheduledFront(s32 core, s32 priority) const { + return this->scheduled_queue.GetFront(core, priority); + } + + constexpr ALWAYS_INLINE Member *GetSuggestedFront(s32 core) const { + return this->suggested_queue.GetFront(core); + } + + constexpr ALWAYS_INLINE Member *GetSuggestedFront(s32 core, s32 priority) const { + return this->suggested_queue.GetFront(core, priority); + } + + constexpr ALWAYS_INLINE Member *GetScheduledNext(s32 core, const Member *member) const { + return this->scheduled_queue.GetNext(core, member); + } + + constexpr ALWAYS_INLINE Member *GetSuggestedNext(s32 core, const Member *member) const { + return this->suggested_queue.GetNext(core, member); + } + + constexpr ALWAYS_INLINE Member *GetSamePriorityNext(s32 core, const Member *member) const { + return member->GetPriorityQueueEntry(core).GetNext(); + } + + /* Mutators. */ + constexpr ALWAYS_INLINE void PushBack(Member *member) { + this->PushBack(member, member->GetPriority()); + } + + constexpr ALWAYS_INLINE void Remove(Member *member) { + this->Remove(member, member->GetPriority()); + } + + constexpr ALWAYS_INLINE void MoveToScheduledFront(Member *member) { + this->scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member); + } + + constexpr ALWAYS_INLINE void MoveToScheduledBack(Member *member) { + this->scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(), member); + } + + /* First class fancy operations. */ + constexpr ALWAYS_INLINE void ChangePriority(s32 prev_priority, bool is_running, Member *member) { + MESOSPHERE_ASSERT(IsValidPriority(prev_priority)); + + /* Remove the member from the queues. */ + const s32 new_priority = member->GetPriority(); + this->Remove(prev_priority, member); + + /* And enqueue. If the member is running, we want to keep it running. */ + if (is_running) { + this->PushFront(new_priority, member); + } else { + this->PushBack(new_priority, member); + } + } + + constexpr ALWAYS_INLINE void ChangeAffinityMask(s32 prev_core, const AffinityMaskType &prev_affinity, Member *member) { + /* Get the new information. */ + const s32 priority = member->GetPriority(); + const AffinityMaskType &new_affinity = member->GetAffinityMask(); + const s32 new_core = member->GetActiveCore(); + + /* Remove the member from all queues it was in before. */ + for (s32 core = 0; core < static_cast(NumCores); core++) { + if (prev_affinity.GetAffinity(core)) { + if (core == prev_core) { + this->scheduled_queue.Remove(priority, core, member); + } else { + this->suggested_queue.Remove(priority, core, member); + } + } + } + + /* And add the member to all queues it should be in now. */ + for (s32 core = 0; core < static_cast(NumCores); core++) { + if (prev_affinity.GetAffinity(core)) { + if (core == new_core) { + this->scheduled_queue.PushBack(priority, core, member); + } else { + this->suggested_queue.PushBack(priority, core, member); + } + } + } + } + + constexpr ALWAYS_INLINE void ChangeCore(s32 prev_core, Member *member, bool to_front = false) { + /* Get the new information. */ + const s32 new_core = member->GetActiveCore(); + const s32 priority = member->GetPriority(); + + /* We don't need to do anything if the core is the same. */ + if (prev_core != new_core) { + /* Remove from the scheduled queue for the previous core. */ + if (prev_core >= 0) { + this->scheduled_queue.Remove(priority, prev_core, member); + } + + /* Remove from the suggested queue and add to the scheduled queue for the new core. */ + if (new_core >= 0) { + this->suggested_queue.Remove(priority, prev_core, member); + if (to_front) { + this->scheduled_queue.PushFront(priority, new_core, member); + } else { + this->scheduled_queue.PushBack(priority, new_core, member); + } + } + + /* Add to the suggested queue for the previous core. */ + if (prev_core >= 0) { + this->suggested_queue.PushBack(priority, prev_core, member); + } + } + } + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp index 7476d1aea..620566b46 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp @@ -14,10 +14,17 @@ * along with this program. If not, see . */ #pragma once +#include #include +#include namespace ams::kern { + using KSchedulerPriorityQueue = KPriorityQueue; + static_assert(std::is_same::value); + static_assert(KSchedulerPriorityQueue::NumCores == cpu::NumCores); + static_assert(KSchedulerPriorityQueue::NumPriority == BITSIZEOF(u64)); + class KScheduler { NON_COPYABLE(KScheduler); NON_MOVEABLE(KScheduler); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp index 64d8c1888..49155b7ca 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp @@ -34,6 +34,29 @@ namespace ams::kern { void *context; /* TODO: KThreadContext * */ }; static_assert(alignof(StackParameters) == 0x10); + + struct QueueEntry { + private: + KThread *prev; + KThread *next; + public: + constexpr ALWAYS_INLINE QueueEntry() : prev(nullptr), next(nullptr) { /* ... */ } + + constexpr ALWAYS_INLINE KThread *GetPrev() const { return this->prev; } + constexpr ALWAYS_INLINE KThread *GetNext() const { return this->next; } + constexpr ALWAYS_INLINE void SetPrev(KThread *t) { this->prev = t; } + constexpr ALWAYS_INLINE void SetNext(KThread *t) { this->next = t; } + }; + private: + /* TODO: Other members. These are placeholder to get KScheduler to compile. */ + KAffinityMask affinity_mask; + public: + constexpr KThread() : KAutoObjectWithSlabHeapAndContainer(), affinity_mask() { /* ... */ } + + constexpr ALWAYS_INLINE const KAffinityMask &GetAffinityMask() const { return this->affinity_mask; } + public: + static void PostDestroy(uintptr_t arg); + /* TODO: This is a placeholder definition. */ }; diff --git a/libraries/libvapours/include/vapours/svc/svc_types_common.hpp b/libraries/libvapours/include/vapours/svc/svc_types_common.hpp index 3bd20e6c8..d46f3ae8f 100644 --- a/libraries/libvapours/include/vapours/svc/svc_types_common.hpp +++ b/libraries/libvapours/include/vapours/svc/svc_types_common.hpp @@ -277,6 +277,9 @@ namespace ams::svc { constexpr size_t ThreadLocalRegionSize = 0x200; + constexpr s32 LowestThreadPriority = 63; + constexpr s32 HighestThreadPriority = 0; + /* Process types. */ enum ProcessInfoType : u32 { ProcessInfoType_ProcessState = 0, From 08cb370a4568d1f902b938d24e8df899f92a96ea Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Thu, 30 Jan 2020 22:46:18 -0800 Subject: [PATCH 26/97] kern: implement KThreadContext --- .../libmesosphere/include/mesosphere.hpp | 1 + .../arch/arm64/kern_k_exception_context.hpp | 31 ++++ .../arch/arm64/kern_k_thread_context.hpp | 68 +++++++++ .../mesosphere/kern_k_exception_context.hpp | 26 ++++ .../include/mesosphere/kern_k_thread.hpp | 3 +- .../mesosphere/kern_k_thread_context.hpp | 26 ++++ .../arch/arm64/kern_k_thread_context.cpp | 131 ++++++++++++++++ .../arch/arm64/kern_k_thread_context_asm.s | 143 ++++++++++++++++++ 8 files changed, 428 insertions(+), 1 deletion(-) create mode 100644 libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_exception_context.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_thread_context.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_exception_context.hpp create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_thread_context.hpp create mode 100644 libraries/libmesosphere/source/arch/arm64/kern_k_thread_context.cpp create mode 100644 libraries/libmesosphere/source/arch/arm64/kern_k_thread_context_asm.s diff --git a/libraries/libmesosphere/include/mesosphere.hpp b/libraries/libmesosphere/include/mesosphere.hpp index 1131b930d..98a126a9f 100644 --- a/libraries/libmesosphere/include/mesosphere.hpp +++ b/libraries/libmesosphere/include/mesosphere.hpp @@ -25,6 +25,7 @@ /* Primitive types. */ #include #include +#include /* Core pre-initialization includes. */ #include diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_exception_context.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_exception_context.hpp new file mode 100644 index 000000000..714741086 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_exception_context.hpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern::arm64 { + + struct KExceptionContext { + u64 x[(30 - 0) + 1]; + u64 sp; + u64 pc; + u64 psr; + u64 tpidr; + u64 reserved; + }; + static_assert(sizeof(KExceptionContext) == 0x120); + +} \ No newline at end of file diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_thread_context.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_thread_context.hpp new file mode 100644 index 000000000..9fb2b8ab5 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_thread_context.hpp @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include + +namespace ams::kern { + + class KThread; + +} + +namespace ams::kern::arm64 { + + class KThreadContext { + public: + static constexpr size_t NumCalleeSavedRegisters = (29 - 19) + 1; + static constexpr size_t NumFpuRegisters = 32; + private: + union { + u64 registers[NumCalleeSavedRegisters]; + struct { + u64 x19; + u64 x20; + u64 x21; + u64 x22; + u64 x23; + u64 x24; + u64 x25; + u64 x26; + u64 x27; + u64 x28; + u64 x29; + }; + } callee_saved; + u64 lr; + u64 sp; + u64 cpacr; + u64 fpcr; + u64 fpsr; + alignas(0x10) u128 fpu_registers[NumFpuRegisters]; + bool locked; + private: + static void RestoreFpuRegisters64(const KThreadContext &); + static void RestoreFpuRegisters32(const KThreadContext &); + public: + constexpr explicit KThreadContext() : callee_saved(), lr(), sp(), cpacr(), fpcr(), fpsr(), fpu_registers(), locked() { /* ... */ } + + Result Initialize(KVirtualAddress u_pc, KVirtualAddress k_sp, KVirtualAddress u_sp, uintptr_t arg, bool is_user, bool is_64_bit, bool is_main); + Result Finalize(); + + /* TODO: More methods (especially FPU management) */ + }; + +} \ No newline at end of file diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_exception_context.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_exception_context.hpp new file mode 100644 index 000000000..3edace7de --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_exception_context.hpp @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once + +#ifdef ATMOSPHERE_BOARD_NINTENDO_SWITCH + #include + + namespace ams::kern { + using ams::kern::arm64::KExceptionContext; + } +#else + #error "Unknown board for KExceptionContext" +#endif diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp index 49155b7ca..391019f87 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp @@ -17,6 +17,7 @@ #include #include #include +#include namespace ams::kern { @@ -31,7 +32,7 @@ namespace ams::kern { bool is_in_exception_handler; bool has_exception_svc_perms; s32 disable_count; - void *context; /* TODO: KThreadContext * */ + KThreadContext *context; }; static_assert(alignof(StackParameters) == 0x10); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread_context.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread_context.hpp new file mode 100644 index 000000000..e6c695c1a --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread_context.hpp @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once + +#ifdef ATMOSPHERE_BOARD_NINTENDO_SWITCH + #include + + namespace ams::kern { + using ams::kern::arm64::KThreadContext; + } +#else + #error "Unknown board for KThreadContext" +#endif diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_thread_context.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_thread_context.cpp new file mode 100644 index 000000000..d9950f1e8 --- /dev/null +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_thread_context.cpp @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern::arm64 { + + /* These are implemented elsewhere (asm). */ + void UserModeThreadStarter(); + void SupervisorModeThreadStarter(); + + void OnThreadStart() { + /* TODO: Implement this. */ + } + + namespace { + + uintptr_t SetupStackForUserModeThreadStarter(KVirtualAddress pc, KVirtualAddress k_sp, KVirtualAddress u_sp, uintptr_t arg, bool is_64_bit) { + /* NOTE: Stack layout on entry looks like following: */ + /* SP */ + /* | */ + /* v */ + /* | KExceptionContext (size 0x120) | KThread::StackParameters (size 0x30) | */ + KExceptionContext *ctx = GetPointer(k_sp) - 1; + + /* Clear context. */ + std::memset(ctx, 0, sizeof(*ctx)); + + /* Set PC and argument. */ + ctx->pc = GetInteger(pc); + ctx->x[0] = arg; + + /* Set PSR. */ + if (is_64_bit) { + ctx->psr = 0; + } else { + constexpr u64 PsrArmValue = 0x20; + constexpr u64 PsrThumbValue = 0x00; + ctx->psr = ((pc & 1) == 0 ? PsrArmValue : PsrThumbValue) | (0x10); + } + + /* Set stack pointer. */ + if (is_64_bit) { + ctx->sp = GetInteger(u_sp); + } else { + ctx->x[13] = GetInteger(u_sp); + } + + return reinterpret_cast(ctx); + } + + uintptr_t SetupStackForSupervisorModeThreadStarter(KVirtualAddress pc, KVirtualAddress sp, uintptr_t arg) { + /* NOTE: Stack layout on entry looks like following: */ + /* SP */ + /* | */ + /* v */ + /* | u64 argument | u64 entrypoint | KThread::StackParameters (size 0x30) | */ + static_assert(sizeof(KThread::StackParameters) == 0x30); + + u64 *stack = GetPointer(sp); + *(--stack) = GetInteger(pc); + *(--stack) = arg; + return reinterpret_cast(stack); + } + + } + + Result KThreadContext::Initialize(KVirtualAddress u_pc, KVirtualAddress k_sp, KVirtualAddress u_sp, uintptr_t arg, bool is_user, bool is_64_bit, bool is_main) { + MESOSPHERE_ASSERT(k_sp != Null); + + /* Ensure that the stack pointers are aligned. */ + k_sp = util::AlignDown(GetInteger(k_sp), 16); + u_sp = util::AlignDown(GetInteger(u_sp), 16); + + /* Determine LR and SP. */ + if (is_user) { + /* Usermode thread. */ + this->lr = reinterpret_cast(::ams::kern::arm64::UserModeThreadStarter); + this->sp = SetupStackForUserModeThreadStarter(u_pc, k_sp, u_sp, arg, is_64_bit); + } else { + /* Kernel thread. */ + MESOSPHERE_ASSERT(is_64_bit); + + if (is_main) { + /* Main thread. */ + this->lr = GetInteger(u_pc); + this->sp = GetInteger(k_sp); + } else { + /* Generic Kernel thread. */ + this->lr = reinterpret_cast(::ams::kern::arm64::SupervisorModeThreadStarter); + this->sp = SetupStackForSupervisorModeThreadStarter(u_pc, k_sp, arg); + } + } + + /* Clear callee-saved registers. */ + for (size_t i = 0; i < util::size(this->callee_saved.registers); i++) { + this->callee_saved.registers[i] = 0; + } + + /* Clear FPU state. */ + this->fpcr = 0; + this->fpsr = 0; + this->cpacr = 0; + for (size_t i = 0; i < util::size(this->fpu_registers); i++) { + this->fpu_registers[i] = 0; + } + + /* Lock the context, if we're a main thread. */ + this->locked = is_main; + + return ResultSuccess(); + } + + Result KThreadContext::Finalize() { + /* This doesn't actually do anything. */ + return ResultSuccess(); + } + +} diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_thread_context_asm.s b/libraries/libmesosphere/source/arch/arm64/kern_k_thread_context_asm.s new file mode 100644 index 000000000..730712bf1 --- /dev/null +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_thread_context_asm.s @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +/* ams::kern::arm64::UserModeThreadStarter() */ +.section .text._ZN3ams4kern5arm6421UserModeThreadStarterEv, "ax", %progbits +.global _ZN3ams4kern5arm6421UserModeThreadStarterEv +.type _ZN3ams4kern5arm6421UserModeThreadStarterEv, %function +_ZN3ams4kern5arm6421UserModeThreadStarterEv: + /* NOTE: Stack layout on entry looks like following: */ + /* SP */ + /* | */ + /* v */ + /* | KExceptionContext (size 0x120) | KThread::StackParameters (size 0x30) | */ + + /* Clear the disable count for this thread's stack parameters. */ + str wzr, [sp, #(0x120 + 0x18)] + + /* Call ams::kern::arm64::OnThreadStart() */ + bl _ZN3ams4kern5arm6413OnThreadStartEv + + /* Restore thread state from the KExceptionContext on stack */ + ldp x30, x19, [sp, #(8 * 30)] /* x30 = lr, x19 = sp */ + ldp x20, x21, [sp, #(8 * 30 + 16)] /* x20 = pc, x21 = psr */ + ldr x22, [sp, #(8 * 30 + 32)] /* x22 = tpidr */ + + msr sp_el0, x19 + msr elr_el1, x20 + msr spsr_el1, x21 + msr tpidr_el1, x22 + + ldp x0, x1, [sp, #(8 * 0)] + ldp x2, x3, [sp, #(8 * 2)] + ldp x4, x5, [sp, #(8 * 4)] + ldp x6, x7, [sp, #(8 * 6)] + ldp x8, x9, [sp, #(8 * 8)] + ldp x10, x11, [sp, #(8 * 10)] + ldp x12, x13, [sp, #(8 * 12)] + ldp x14, x15, [sp, #(8 * 14)] + ldp x16, x17, [sp, #(8 * 16)] + ldp x18, x19, [sp, #(8 * 18)] + ldp x20, x21, [sp, #(8 * 20)] + ldp x22, x23, [sp, #(8 * 22)] + ldp x24, x25, [sp, #(8 * 24)] + ldp x26, x27, [sp, #(8 * 26)] + ldp x28, x29, [sp, #(8 * 28)] + + /* Increment stack pointer above the KExceptionContext */ + add sp, sp, #0x120 + + /* Return to EL0 */ + eret + +/* ams::kern::arm64::SupervisorModeThreadStarter() */ +.section .text._ZN3ams4kern5arm6427SupervisorModeThreadStarterEv, "ax", %progbits +.global _ZN3ams4kern5arm6427SupervisorModeThreadStarterEv +.type _ZN3ams4kern5arm6427SupervisorModeThreadStarterEv, %function +_ZN3ams4kern5arm6427SupervisorModeThreadStarterEv: + /* NOTE: Stack layout on entry looks like following: */ + /* SP */ + /* | */ + /* v */ + /* | u64 argument | u64 entrypoint | KThread::StackParameters (size 0x30) | */ + + /* Load the argument and entrypoint. */ + ldp x0, x1, [sp], #0x10 + + /* Clear the disable count for this thread's stack parameters. */ + str wzr, [sp, #(0x18)] + + /* Mask I bit in DAIF */ + msr daifclr, #2 + br x1 + + /* This should never execute, but Nintendo includes an ERET here. */ + eret + + +/* ams::kern::arm64::KThreadContext::RestoreFpuRegisters64(const KThreadContext &) */ +.section .text._ZN3ams4kern5arm6414KThreadContext21RestoreFpuRegisters64ERKS2_, "ax", %progbits +.global _ZN3ams4kern5arm6414KThreadContext21RestoreFpuRegisters64ERKS2_ +.type _ZN3ams4kern5arm6414KThreadContext21RestoreFpuRegisters64ERKS2_, %function +_ZN3ams4kern5arm6414KThreadContext21RestoreFpuRegisters64ERKS2_: + /* Load and restore FPCR and FPSR from the context. */ + ldr x1, [x0, #0x70] + msr fpcr, x1 + ldr x1, [x0, #0x78] + msr fpsr, x1 + + /* Restore the FPU registers. */ + ldp q0, q1, [sp, #(16 * 0 + 0x80)] + ldp q2, q3, [sp, #(16 * 2 + 0x80)] + ldp q4, q5, [sp, #(16 * 4 + 0x80)] + ldp q6, q7, [sp, #(16 * 6 + 0x80)] + ldp q8, q9, [sp, #(16 * 8 + 0x80)] + ldp q10, q11, [sp, #(16 * 10 + 0x80)] + ldp q12, q13, [sp, #(16 * 12 + 0x80)] + ldp q14, q15, [sp, #(16 * 14 + 0x80)] + ldp q16, q17, [sp, #(16 * 16 + 0x80)] + ldp q18, q19, [sp, #(16 * 18 + 0x80)] + ldp q20, q21, [sp, #(16 * 20 + 0x80)] + ldp q22, q23, [sp, #(16 * 22 + 0x80)] + ldp q24, q25, [sp, #(16 * 24 + 0x80)] + ldp q26, q27, [sp, #(16 * 26 + 0x80)] + ldp q28, q29, [sp, #(16 * 28 + 0x80)] + ldp q30, q31, [sp, #(16 * 30 + 0x80)] + + ret + +/* ams::kern::arm64::KThreadContext::RestoreFpuRegisters32(const KThreadContext &) */ +.section .text._ZN3ams4kern5arm6414KThreadContext21RestoreFpuRegisters32ERKS2_, "ax", %progbits +.global _ZN3ams4kern5arm6414KThreadContext21RestoreFpuRegisters32ERKS2_ +.type _ZN3ams4kern5arm6414KThreadContext21RestoreFpuRegisters32ERKS2_, %function +_ZN3ams4kern5arm6414KThreadContext21RestoreFpuRegisters32ERKS2_: + /* Load and restore FPCR and FPSR from the context. */ + ldr x1, [x0, #0x70] + msr fpcr, x1 + ldr x1, [x0, #0x78] + msr fpsr, x1 + + /* Restore the FPU registers. */ + ldp q0, q1, [sp, #(16 * 0 + 0x80)] + ldp q2, q3, [sp, #(16 * 2 + 0x80)] + ldp q4, q5, [sp, #(16 * 4 + 0x80)] + ldp q6, q7, [sp, #(16 * 6 + 0x80)] + ldp q8, q9, [sp, #(16 * 8 + 0x80)] + ldp q10, q11, [sp, #(16 * 10 + 0x80)] + ldp q12, q13, [sp, #(16 * 12 + 0x80)] + ldp q14, q15, [sp, #(16 * 14 + 0x80)] + + ret From d9db723bc821e5ffdd6ec9205e227513c043cf87 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Fri, 31 Jan 2020 00:07:06 -0800 Subject: [PATCH 27/97] kern: add all kthread members --- .../include/mesosphere/kern_k_thread.hpp | 150 ++++++++++++++++-- .../include/vapours/svc/svc_common.hpp | 2 + .../vapours/util/util_intrusive_list.hpp | 32 ++++ .../vapours/util/util_parent_of_member.hpp | 18 ++- 4 files changed, 187 insertions(+), 15 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp index 391019f87..02a8d60ba 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp @@ -18,12 +18,45 @@ #include #include #include +#include namespace ams::kern { class KThread final : public KAutoObjectWithSlabHeapAndContainer { MESOSPHERE_AUTOOBJECT_TRAITS(KThread, KSynchronizationObject); public: + enum ThreadType : u32 { + ThreadType_Main = 0, + ThreadType_Kernel = 1, + ThreadType_HighPriority = 2, + ThreadType_User = 3, + }; + + enum SuspendType : u32 { + SuspendType_Process = 0, + SuspendType_Thread = 1, + SuspendType_Debug = 2, + }; + + enum ThreadState : u16 { + ThreadState_Initialized = 0, + ThreadState_Waiting = 1, + ThreadState_Runnable = 2, + ThreadState_Terminated = 3, + + ThreadState_SuspendShift = 4, + ThreadState_Mask = (1 << ThreadState_SuspendShift) - 1, + + ThreadState_ProcessSuspended = (1 << (SuspendType_Process + ThreadState_SuspendShift)), + ThreadState_ThreadSuspended = (1 << (SuspendType_Thread + ThreadState_SuspendShift)), + ThreadState_DebugSuspended = (1 << (SuspendType_Debug + ThreadState_SuspendShift)), + }; + + enum DpcFlag : u32 { + DpcFlag_Terminating = 0, + DpcFlag_Terminated = 1, + }; + struct StackParameters { alignas(0x10) u8 svc_permission[0x10]; std::atomic dpc_flags; @@ -49,37 +82,136 @@ namespace ams::kern { constexpr ALWAYS_INLINE void SetNext(KThread *t) { this->next = t; } }; private: - /* TODO: Other members. These are placeholder to get KScheduler to compile. */ - KAffinityMask affinity_mask; - public: - constexpr KThread() : KAutoObjectWithSlabHeapAndContainer(), affinity_mask() { /* ... */ } + static constexpr size_t PriorityInheritanceCountMax = 10; + union SyncObjectBuffer { + KSynchronizationObject *sync_objects[ams::svc::MaxWaitSynchronizationHandleCount]; + ams::svc::Handle handles[ams::svc::MaxWaitSynchronizationHandleCount * (sizeof(KSynchronizationObject *) / sizeof(ams::svc::Handle))]; - constexpr ALWAYS_INLINE const KAffinityMask &GetAffinityMask() const { return this->affinity_mask; } + constexpr SyncObjectBuffer() : sync_objects() { /* ... */ } + }; + static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles)); + private: + alignas(16) KThreadContext thread_context; + KAffinityMask affinity_mask; + u64 thread_id; + std::atomic cpu_time; + KSynchronizationObject *synced_object; + KLightLock *waiting_lock; + uintptr_t condvar_key; + uintptr_t entrypoint; + KProcessAddress arbiter_key; + KProcess *parent; + void *kernel_stack_top; + u32 *light_ipc_data; + KProcessAddress tls_address; + void *tls_heap_address; + KLightLock activity_pause_lock; + SyncObjectBuffer sync_object_buffer; + s64 schedule_count; + s64 last_scheduled_tick; + QueueEntry per_core_priority_queue_entry[cpu::NumCores]; + QueueEntry sleeping_queue_entry; + void /* TODO KThreadQueue */ *sleeping_queue; + util::IntrusiveListNode waiter_list_node; + util::IntrusiveRedBlackTreeNode condvar_arbiter_tree_node; + util::IntrusiveListNode process_list_node; + + using WaiterListTraits = util::IntrusiveListMemberTraitsDeferredAssert<&KThread::waiter_list_node>; + using WaiterList = WaiterListTraits::ListType; + + WaiterList waiter_list; + WaiterList paused_waiter_list; + KThread *lock_owner; + void /* TODO KCondVar*/ *cond_var_tree; + uintptr_t debug_params[3]; + u32 arbiter_value; + u32 suspend_request_flags; + u32 suspend_allowed_flags; + Result wait_result; + Result debug_exception_result; + s32 priority; + s32 core_id; + s32 base_priority; + s32 ideal_core_id; + s32 num_kernel_waiters; + KAffinityMask original_affinity_mask; + s32 original_ideal_core_id; + s32 num_core_migration_disables; + ThreadState thread_state; + std::atomic termination_requested; + bool ipc_cancelled; + bool wait_cancelled; + bool cancelable; + bool registered; + bool signaled; + bool initialized; + bool debug_attached; + s8 priority_inheritance_count; + bool resource_limit_release_hint; public: static void PostDestroy(uintptr_t arg); + public: + explicit KThread() /* TODO: : ? */ { MESOSPHERE_ASSERT_THIS(); } + /* TODO: Is a constexpr KThread() possible? */ + private: + StackParameters &GetStackParameters() { + return *(reinterpret_cast(this->kernel_stack_top) - 1); + } + + const StackParameters &GetStackParameters() const { + return *(reinterpret_cast(this->kernel_stack_top) - 1); + } + public: + ALWAYS_INLINE s32 GetDisableDispatchCount() const { + MESOSPHERE_ASSERT_THIS(); + return GetStackParameters().disable_count; + } + + ALWAYS_INLINE void DisableDispatch() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() >= 0); + GetStackParameters().disable_count++; + } + + ALWAYS_INLINE void EnableDispatch() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() > 0); + GetStackParameters().disable_count--; + } + + public: + constexpr ALWAYS_INLINE const KAffinityMask &GetAffinityMask() const { return this->affinity_mask; } + + /* TODO: This is a placeholder definition. */ + public: + static constexpr bool IsWaiterListValid() { + return WaiterListTraits::IsValid(); + } }; + static_assert(alignof(KThread) == 0x10); + static_assert(KThread::IsWaiterListValid()); class KScopedDisableDispatch { public: explicit ALWAYS_INLINE KScopedDisableDispatch() { - /* TODO */ + GetCurrentThread().DisableDispatch(); } ALWAYS_INLINE ~KScopedDisableDispatch() { - /* TODO */ + GetCurrentThread().EnableDispatch(); } }; class KScopedEnableDispatch { public: explicit ALWAYS_INLINE KScopedEnableDispatch() { - /* TODO */ + GetCurrentThread().EnableDispatch(); } ALWAYS_INLINE ~KScopedEnableDispatch() { - /* TODO */ + GetCurrentThread().DisableDispatch(); } }; diff --git a/libraries/libvapours/include/vapours/svc/svc_common.hpp b/libraries/libvapours/include/vapours/svc/svc_common.hpp index 938490191..659bfc261 100644 --- a/libraries/libvapours/include/vapours/svc/svc_common.hpp +++ b/libraries/libvapours/include/vapours/svc/svc_common.hpp @@ -28,6 +28,8 @@ namespace ams::svc { #error "Unknown target for svc::Handle" #endif + static constexpr size_t MaxWaitSynchronizationHandleCount = 0x40; + enum class PseudoHandle : Handle { CurrentThread = 0xFFFF8000, CurrentProcess = 0xFFFF8001, diff --git a/libraries/libvapours/include/vapours/util/util_intrusive_list.hpp b/libraries/libvapours/include/vapours/util/util_intrusive_list.hpp index 3a88f6f03..c7d52e7c5 100644 --- a/libraries/libvapours/include/vapours/util/util_intrusive_list.hpp +++ b/libraries/libvapours/include/vapours/util/util_intrusive_list.hpp @@ -566,6 +566,38 @@ namespace ams::util { static_assert(std::addressof(GetParent(GetNode(GetReference(DerivedStorage)))) == GetPointer(DerivedStorage)); }; + template> + class IntrusiveListMemberTraitsDeferredAssert; + + template + class IntrusiveListMemberTraitsDeferredAssert { + public: + using ListType = IntrusiveList; + + static constexpr bool IsValid() { + TYPED_STORAGE(Derived) DerivedStorage = {}; + return std::addressof(GetParent(GetNode(GetReference(DerivedStorage)))) == GetPointer(DerivedStorage); + } + private: + friend class IntrusiveList; + + static constexpr IntrusiveListNode &GetNode(Derived &parent) { + return parent.*Member; + } + + static constexpr IntrusiveListNode const &GetNode(Derived const &parent) { + return parent.*Member; + } + + static constexpr Derived &GetParent(IntrusiveListNode &node) { + return util::GetParentReference(&node); + } + + static constexpr Derived const &GetParent(IntrusiveListNode const &node) { + return util::GetParentReference(&node); + } + }; + template class IntrusiveListBaseNode : public IntrusiveListNode{}; diff --git a/libraries/libvapours/include/vapours/util/util_parent_of_member.hpp b/libraries/libvapours/include/vapours/util/util_parent_of_member.hpp index 7f7445d30..df2e0c57c 100644 --- a/libraries/libvapours/include/vapours/util/util_parent_of_member.hpp +++ b/libraries/libvapours/include/vapours/util/util_parent_of_member.hpp @@ -26,13 +26,15 @@ namespace ams::util { struct OffsetOfUnionHolder { template union UnionImpl { - using PaddingMember = std::array; + using PaddingMember = char; static constexpr size_t GetOffset() { return Offset; } + #pragma pack(push, 1) struct { PaddingMember padding[Offset]; MemberType members[(sizeof(ParentType) / sizeof(MemberType)) + 1]; } data; + #pragma pack(pop) UnionImpl next_union; }; @@ -47,12 +49,12 @@ namespace ams::util { }; template - union UnionImpl { /* Empty */ }; + union UnionImpl { /* Empty ... */ }; }; template struct OffsetOfCalculator { - using UnionHolder = typename OffsetOfUnionHolder::template UnionImpl; + using UnionHolder = typename OffsetOfUnionHolder::template UnionImpl; union Union { char c; UnionHolder first_union; @@ -81,15 +83,19 @@ namespace ams::util { const auto start = std::addressof(cur_union.data.members[0]); const auto next = GetNextAddress(start, target); + if constexpr (Offset > 0x10) { + __builtin_unreachable(); + } + if (next != target) { - if constexpr (Offset < sizeof(MemberType) / alignof(MemberType)) { + if constexpr (Offset < sizeof(MemberType) - 1) { return OffsetOfImpl(member, cur_union.next_union); } else { - std::abort(); + __builtin_unreachable(); } } - return (next - start) * sizeof(MemberType) + Offset * alignof(MemberType); + return (next - start) * sizeof(MemberType) + Offset; } From b2b1129cc035ea2321e0dec4136d22d07e54b930 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Fri, 31 Jan 2020 01:53:30 -0800 Subject: [PATCH 28/97] kern: mostly implement KThread::Initialize --- .../include/mesosphere/kern_k_process.hpp | 5 + .../include/mesosphere/kern_k_thread.hpp | 58 ++++- .../include/mesosphere/kern_k_worker_task.hpp | 33 +++ .../include/mesosphere/kern_kernel.hpp | 17 +- .../mesosphere/svc/kern_svc_results.hpp | 2 + .../libmesosphere/source/kern_k_thread.cpp | 205 ++++++++++++++++++ .../libmesosphere/source/kern_kernel.cpp | 21 +- libraries/libmesosphere/source/kern_main.cpp | 4 +- .../libmesosphere/source/libc/kern_cxx.c | 27 +++ .../libmesosphere/source/libc/kern_new.cpp | 24 ++ .../include/vapours/results/svc_results.hpp | 2 + 11 files changed, 383 insertions(+), 15 deletions(-) create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_worker_task.hpp create mode 100644 libraries/libmesosphere/source/kern_k_thread.cpp create mode 100644 libraries/libmesosphere/source/libc/kern_cxx.c create mode 100644 libraries/libmesosphere/source/libc/kern_new.cpp diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp index 2d3e2b21c..9c93e55c2 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp @@ -26,6 +26,11 @@ namespace ams::kern { class KProcess final : public KAutoObjectWithSlabHeapAndContainer { MESOSPHERE_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject); /* TODO: This is a placeholder definition. */ + public: + constexpr ALWAYS_INLINE u64 GetCoreMask() const { /* TODO */ return 0; } + constexpr ALWAYS_INLINE u64 GetPriorityMask() const { /* TODO */ return 0; } + + constexpr ALWAYS_INLINE bool Is64Bit() const { /* TODO */ return true; } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp index 02a8d60ba..3f1c58ab1 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp @@ -19,12 +19,19 @@ #include #include #include +#include +#include namespace ams::kern { - class KThread final : public KAutoObjectWithSlabHeapAndContainer { + using KThreadFunction = void (*)(uintptr_t); + + class KThread final : public KAutoObjectWithSlabHeapAndContainer, public KTimerTask, public KWorkerTask { MESOSPHERE_AUTOOBJECT_TRAITS(KThread, KSynchronizationObject); public: + static constexpr s32 MainThreadPriority = 1; + static constexpr s32 IdleThreadPriority = 64; + enum ThreadType : u32 { ThreadType_Main = 0, ThreadType_Kernel = 1, @@ -36,6 +43,10 @@ namespace ams::kern { SuspendType_Process = 0, SuspendType_Thread = 1, SuspendType_Debug = 2, + SuspendType_Unk3 = 3, + SuspendType_Unk4 = 4, + + SuspendType_Count, }; enum ThreadState : u16 { @@ -50,6 +61,10 @@ namespace ams::kern { ThreadState_ProcessSuspended = (1 << (SuspendType_Process + ThreadState_SuspendShift)), ThreadState_ThreadSuspended = (1 << (SuspendType_Thread + ThreadState_SuspendShift)), ThreadState_DebugSuspended = (1 << (SuspendType_Debug + ThreadState_SuspendShift)), + ThreadState_Unk3Suspended = (1 << (SuspendType_Unk3 + ThreadState_SuspendShift)), + ThreadState_Unk4Suspended = (1 << (SuspendType_Unk4 + ThreadState_SuspendShift)), + + ThreadState_SuspendFlagMask = ((1 << SuspendType_Count) - 1) << ThreadState_SuspendShift, }; enum DpcFlag : u32 { @@ -76,6 +91,11 @@ namespace ams::kern { public: constexpr ALWAYS_INLINE QueueEntry() : prev(nullptr), next(nullptr) { /* ... */ } + constexpr ALWAYS_INLINE void Initialize() { + this->prev = nullptr; + this->next = nullptr; + } + constexpr ALWAYS_INLINE KThread *GetPrev() const { return this->prev; } constexpr ALWAYS_INLINE KThread *GetNext() const { return this->next; } constexpr ALWAYS_INLINE void SetPrev(KThread *t) { this->prev = t; } @@ -90,6 +110,8 @@ namespace ams::kern { constexpr SyncObjectBuffer() : sync_objects() { /* ... */ } }; static_assert(sizeof(SyncObjectBuffer::sync_objects) == sizeof(SyncObjectBuffer::handles)); + private: + static inline std::atomic s_next_thread_id = 0; private: alignas(16) KThreadContext thread_context; KAffinityMask affinity_mask; @@ -141,25 +163,27 @@ namespace ams::kern { std::atomic termination_requested; bool ipc_cancelled; bool wait_cancelled; - bool cancelable; + bool cancellable; bool registered; bool signaled; bool initialized; bool debug_attached; s8 priority_inheritance_count; bool resource_limit_release_hint; - public: - static void PostDestroy(uintptr_t arg); public: explicit KThread() /* TODO: : ? */ { MESOSPHERE_ASSERT_THIS(); } + virtual ~KThread() { /* ... */ } /* TODO: Is a constexpr KThread() possible? */ + + Result Initialize(KThreadFunction func, uintptr_t arg, void *kern_stack_top, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type); + private: StackParameters &GetStackParameters() { return *(reinterpret_cast(this->kernel_stack_top) - 1); } const StackParameters &GetStackParameters() const { - return *(reinterpret_cast(this->kernel_stack_top) - 1); + return *(reinterpret_cast(this->kernel_stack_top) - 1); } public: ALWAYS_INLINE s32 GetDisableDispatchCount() const { @@ -178,13 +202,33 @@ namespace ams::kern { MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() > 0); GetStackParameters().disable_count--; } - public: constexpr ALWAYS_INLINE const KAffinityMask &GetAffinityMask() const { return this->affinity_mask; } + ALWAYS_INLINE void *GetStackTop() const { return reinterpret_cast(this->kernel_stack_top) - 1; } + ALWAYS_INLINE void *GetKernelStackTop() const { return this->kernel_stack_top; } + /* TODO: This is kind of a placeholder definition. */ - /* TODO: This is a placeholder definition. */ + ALWAYS_INLINE bool IsInExceptionHandler() const { + return GetStackParameters().is_in_exception_handler; + } + + ALWAYS_INLINE void SetInExceptionHandler() { + GetStackParameters().is_in_exception_handler = true; + } + + public: + /* Overridden parent functions. */ + virtual bool IsInitialized() const override { return this->initialized; } + virtual uintptr_t GetPostDestroyArgument() const override { return reinterpret_cast(this->parent) | (this->resource_limit_release_hint ? 1 : 0); } + + static void PostDestroy(uintptr_t arg); + + virtual void Finalize() override; + virtual bool IsSignaled() const override; + virtual void OnTimer() override; + virtual void DoWorkerTask() override; public: static constexpr bool IsWaiterListValid() { return WaiterListTraits::IsValid(); diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_worker_task.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_worker_task.hpp new file mode 100644 index 000000000..7620adb6a --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_worker_task.hpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include + +namespace ams::kern { + + class KWorkerTask { + private: + KWorkerTask *next_task; + public: + constexpr ALWAYS_INLINE KWorkerTask() : next_task(nullptr) { /* ... */ } + + constexpr ALWAYS_INLINE KWorkerTask *GetNextTask() const { return this->next_task; } + constexpr ALWAYS_INLINE void SetNextTask(KWorkerTask *task) { this->next_task = task; } + + virtual void DoWorkerTask() = 0; + }; + +} diff --git a/libraries/libmesosphere/include/mesosphere/kern_kernel.hpp b/libraries/libmesosphere/include/mesosphere/kern_kernel.hpp index c1b95e15a..600f142e0 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_kernel.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_kernel.hpp @@ -14,10 +14,13 @@ * along with this program. If not, see . */ #pragma once +#include #include +#include #include #include #include +#include namespace ams::kern { @@ -30,12 +33,22 @@ namespace ams::kern { }; private: static inline State s_state = State::Invalid; + static inline KThread s_main_threads[cpu::NumCores]; + static inline KThread s_idle_threads[cpu::NumCores]; public: - static NOINLINE void Initialize(s32 core_id); - static NOINLINE void InitializeCoreThreads(s32 core_id); + static NOINLINE void InitializeCoreLocalRegion(s32 core_id); + static NOINLINE void InitializeMainAndIdleThreads(s32 core_id); static ALWAYS_INLINE State GetState() { return s_state; } static ALWAYS_INLINE void SetState(State state) { s_state = state; } + + static ALWAYS_INLINE KThread &GetMainThread(s32 core_id) { + return s_main_threads[core_id]; + } + + static ALWAYS_INLINE KThread &GetIdleThread(s32 core_id) { + return s_idle_threads[core_id]; + } }; } diff --git a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_results.hpp b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_results.hpp index 29eb82690..6ff150a7d 100644 --- a/libraries/libmesosphere/include/mesosphere/svc/kern_svc_results.hpp +++ b/libraries/libmesosphere/include/mesosphere/svc/kern_svc_results.hpp @@ -24,6 +24,8 @@ namespace ams::kern::svc { /* 33 */ using ::ams::svc::ResultNotImplemented; + /* 57 */ using ::ams::svc::ResultNoSynchronizationObject; + /* 59 */ using ::ams::svc::ResultThreadTerminating; /* 70 */ using ::ams::svc::ResultNoEvent; diff --git a/libraries/libmesosphere/source/kern_k_thread.cpp b/libraries/libmesosphere/source/kern_k_thread.cpp new file mode 100644 index 000000000..41c03f839 --- /dev/null +++ b/libraries/libmesosphere/source/kern_k_thread.cpp @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +namespace ams::kern { + + Result KThread::Initialize(KThreadFunction func, uintptr_t arg, void *kern_stack_top, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type) { + /* Assert parameters are valid. */ + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(kern_stack_top != nullptr); + MESOSPHERE_ASSERT((type == ThreadType_Main) || (ams::svc::HighestThreadPriority <= prio && prio <= ams::svc::LowestThreadPriority)); + MESOSPHERE_ASSERT((owner != nullptr) || (type != ThreadType_User)); + MESOSPHERE_ASSERT(0 <= core && core < static_cast(cpu::NumCores)); + + /* First, clear the TLS address. */ + this->tls_address = Null; + + const uintptr_t kern_stack_top_address = reinterpret_cast(kern_stack_top); + + /* Next, assert things based on the type. */ + switch (type) { + case ThreadType_Main: + { + MESOSPHERE_ASSERT(arg == 0); + } + [[fallthrough]]; + case ThreadType_HighPriority: + { + MESOSPHERE_ASSERT(core == GetCurrentCoreId()); + } + [[fallthrough]]; + case ThreadType_Kernel: + { + MESOSPHERE_ASSERT(user_stack_top == 0); + MESOSPHERE_ASSERT(util::IsAligned(kern_stack_top_address, PageSize)); + } + [[fallthrough]]; + case ThreadType_User: + { + MESOSPHERE_ASSERT((owner == nullptr) || (owner->GetCoreMask() | (1ul << core)) == owner->GetCoreMask()); + MESOSPHERE_ASSERT((owner == nullptr) || (owner->GetPriorityMask() | (1ul << prio)) == owner->GetPriorityMask()); + } + break; + default: + MESOSPHERE_PANIC("KThread::Initialize: Unknown ThreadType %u", static_cast(type)); + break; + } + + /* Set the ideal core ID and affinity mask. */ + this->ideal_core_id = core; + this->affinity_mask.SetAffinity(core, true); + + /* Set the thread state. */ + this->thread_state = (type == ThreadType_Main) ? ThreadState_Runnable : ThreadState_Initialized; + + /* Set TLS address and TLS heap address. */ + /* NOTE: Nintendo wrote TLS address above already, but official code really does write tls address twice. */ + this->tls_address = 0; + this->tls_heap_address = 0; + + /* Set parent and condvar tree. */ + this->parent = nullptr; + this->cond_var_tree = nullptr; + + /* Set sync booleans. */ + this->signaled = false; + this->ipc_cancelled = false; + this->termination_requested = false; + this->wait_cancelled = false; + this->cancellable = false; + + /* Set core ID and wait result. */ + this->core_id = this->ideal_core_id; + this->wait_result = svc::ResultNoSynchronizationObject(); + + /* Set the stack top. */ + this->kernel_stack_top = kern_stack_top; + + /* Set priorities. */ + this->priority = prio; + this->base_priority = prio; + + /* Set sync object and waiting lock to null. */ + this->synced_object = nullptr; + this->waiting_lock = nullptr; + + /* Initialize sleeping queue. */ + this->sleeping_queue_entry.Initialize(); + this->sleeping_queue = nullptr; + + /* Set suspend flags. */ + this->suspend_request_flags = 0; + this->suspend_allowed_flags = ThreadState_SuspendFlagMask; + + /* We're neither debug attached, nor are we nesting our priority inheritance. */ + this->debug_attached = false; + this->priority_inheritance_count = 0; + + /* We haven't been scheduled, and we have done no light IPC. */ + this->schedule_count = -1; + this->last_scheduled_tick = 0; + this->light_ipc_data = nullptr; + + /* We're not waiting for a lock, and we haven't disabled migration. */ + this->lock_owner = nullptr; + this->num_core_migration_disables = 0; + + /* We have no waiters, but we do have an entrypoint. */ + this->num_kernel_waiters = 0; + this->entrypoint = reinterpret_cast(func); + + /* We don't need a release (probably), and we've spent no time on the cpu. */ + this->resource_limit_release_hint = 0; + this->cpu_time = 0; + + /* Clear our stack parameters. */ + std::memset(static_cast(std::addressof(this->GetStackParameters())), 0, sizeof(StackParameters)); + + /* Setup the TLS, if needed. */ + if (type == ThreadType_User) { + /* TODO: R_TRY(owner->CreateThreadLocalRegion(&this->tls_address)); */ + /* TODO: this->tls_heap_address = owner->GetThreadLocalRegionAddress(this->tls_address); */ + std::memset(this->tls_heap_address, 0, ams::svc::ThreadLocalRegionSize); + } + + /* Set parent, if relevant. */ + if (owner != nullptr) { + this->parent = owner; + this->parent->Open(); + /* TODO: this->parent->IncrementThreadCount(); */ + } + + /* Initialize thread context. */ + constexpr bool IsDefault64Bit = sizeof(uintptr_t) == sizeof(u64); + const bool is_64_bit = this->parent ? this->parent->Is64Bit() : IsDefault64Bit; + const bool is_user = (type == ThreadType_User); + const bool is_main = (type == ThreadType_Main); + this->thread_context.Initialize(this->entrypoint, reinterpret_cast(this->GetStackTop()), GetInteger(user_stack_top), arg, is_user, is_64_bit, is_main); + + /* Setup the stack parameters. */ + StackParameters &sp = this->GetStackParameters(); + if (this->parent != nullptr) { + /* TODO: this->parent->CopySvcPermissionTo(pos.svc_permission); */ + } + sp.context = std::addressof(this->thread_context); + sp.disable_count = 1; + this->SetInExceptionHandler(); + + /* Set thread ID. */ + this->thread_id = s_next_thread_id++; + + /* We initialized! */ + this->initialized = true; + + /* Register ourselves with our parent process. */ + if (this->parent != nullptr) { + /* TODO: this->parent->RegisterThread(this); */ + /* TODO: if (this->parent->IsSuspended()) { this->RequestSuspend(SuspendType_Process); } */ + } + + return ResultSuccess(); + } + + void KThread::PostDestroy(uintptr_t arg) { + KProcess *owner = reinterpret_cast(arg & ~1ul); + const bool resource_limit_release_hint = (arg & 1); + if (owner != nullptr) { + /* TODO: Release from owner resource limit. */ + (void)(resource_limit_release_hint); + owner->Close(); + } else { + /* TODO: Release from system resource limit. */ + } + } + + void KThread::Finalize() { + /* TODO */ + } + + bool KThread::IsSignaled() const { + return this->signaled; + } + + void KThread::OnTimer() { + /* TODO */ + } + + void KThread::DoWorkerTask() { + /* TODO */ + } + +} diff --git a/libraries/libmesosphere/source/kern_kernel.cpp b/libraries/libmesosphere/source/kern_kernel.cpp index ea5b5a4f5..7c7c65503 100644 --- a/libraries/libmesosphere/source/kern_kernel.cpp +++ b/libraries/libmesosphere/source/kern_kernel.cpp @@ -17,7 +17,7 @@ namespace ams::kern { - void Kernel::Initialize(s32 core_id) { + void Kernel::InitializeCoreLocalRegion(s32 core_id) { /* Construct the core local region object in place. */ KCoreLocalContext *clc = GetPointer(KMemoryLayout::GetCoreLocalRegionAddress()); new (clc) KCoreLocalContext; @@ -46,9 +46,22 @@ namespace ams::kern { } } - void Kernel::InitializeCoreThreads(s32 core_id) { - /* TODO: This function wants to setup the main thread and the idle thread. */ - /* It also wants to initialize the scheduler/interrupt manager/hardware timer. */ + void Kernel::InitializeMainAndIdleThreads(s32 core_id) { + /* This function wants to setup the main thread and the idle thread. */ + KThread *main_thread = std::addressof(Kernel::GetMainThread(core_id)); + void *main_thread_stack = GetVoidPointer(KMemoryLayout::GetMainStackTopAddress(core_id)); + KThread *idle_thread = std::addressof(Kernel::GetIdleThread(core_id)); + void *idle_thread_stack = GetVoidPointer(KMemoryLayout::GetIdleStackTopAddress(core_id)); + KAutoObject::Create(main_thread); + KAutoObject::Create(idle_thread); + main_thread->Initialize(nullptr, 0, main_thread_stack, 0, KThread::MainThreadPriority, core_id, nullptr, KThread::ThreadType_Main); + idle_thread->Initialize(nullptr, 0, idle_thread_stack, 0, KThread::IdleThreadPriority, core_id, nullptr, KThread::ThreadType_Main); + + /* Set the current thread to be the main thread, and we have no processes running yet. */ + SetCurrentThread(main_thread); + SetCurrentProcess(nullptr); + + /* TODO: We also want to initialize the scheduler/interrupt manager/hardware timer. */ } } diff --git a/libraries/libmesosphere/source/kern_main.cpp b/libraries/libmesosphere/source/kern_main.cpp index bad569bfc..0a773e47a 100644 --- a/libraries/libmesosphere/source/kern_main.cpp +++ b/libraries/libmesosphere/source/kern_main.cpp @@ -19,7 +19,7 @@ namespace ams::kern { NORETURN void HorizonKernelMain(s32 core_id) { /* Setup the Core Local Region, and note that we're initializing. */ - Kernel::Initialize(core_id); + Kernel::InitializeCoreLocalRegion(core_id); Kernel::SetState(Kernel::State::Initializing); /* Ensure that all cores get to this point before proceeding. */ @@ -29,7 +29,7 @@ namespace ams::kern { /* Synchronize after each init to ensure the cores go in order. */ for (size_t i = 0; i < cpu::NumCores; i++) { if (static_cast(i) == core_id) { - Kernel::InitializeCoreThreads(core_id); + Kernel::InitializeMainAndIdleThreads(core_id); } cpu::SynchronizeAllCores(); } diff --git a/libraries/libmesosphere/source/libc/kern_cxx.c b/libraries/libmesosphere/source/libc/kern_cxx.c new file mode 100644 index 000000000..9d3dbd0de --- /dev/null +++ b/libraries/libmesosphere/source/libc/kern_cxx.c @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#ifdef __cplusplus +extern "C" { +#endif + +void __dso_handle() { /* ... */ } +void __cxa_atexit() { /* ... */ } + +#ifdef __cplusplus +} /* extern "C" */ +#endif diff --git a/libraries/libmesosphere/source/libc/kern_new.cpp b/libraries/libmesosphere/source/libc/kern_new.cpp new file mode 100644 index 000000000..8ce1e726f --- /dev/null +++ b/libraries/libmesosphere/source/libc/kern_new.cpp @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +void operator delete (void *deleted) throw() { + MESOSPHERE_PANIC("operator delete(void *) was called: %p", deleted); +} + +void operator delete (void *deleted, size_t size) throw() { + MESOSPHERE_PANIC("operator delete(void *, size_t) was called: %p %zu", deleted, size); +} \ No newline at end of file diff --git a/libraries/libvapours/include/vapours/results/svc_results.hpp b/libraries/libvapours/include/vapours/results/svc_results.hpp index 6dfbe9b6c..ecdcffefe 100644 --- a/libraries/libvapours/include/vapours/results/svc_results.hpp +++ b/libraries/libvapours/include/vapours/results/svc_results.hpp @@ -27,6 +27,8 @@ namespace ams::svc { R_DEFINE_ERROR_RESULT(NotImplemented, 33); + R_DEFINE_ERROR_RESULT(NoSynchronizationObject, 57); + R_DEFINE_ERROR_RESULT(ThreadTerminating, 59); R_DEFINE_ERROR_RESULT(NoEvent, 70); From 00b093ec6251f9f0976eccfe28d7b55d1d49b85d Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Fri, 31 Jan 2020 02:03:34 -0800 Subject: [PATCH 29/97] util: remove spurious error in parent offset calc --- .../libvapours/include/vapours/util/util_parent_of_member.hpp | 4 ---- 1 file changed, 4 deletions(-) diff --git a/libraries/libvapours/include/vapours/util/util_parent_of_member.hpp b/libraries/libvapours/include/vapours/util/util_parent_of_member.hpp index df2e0c57c..66f9b1b99 100644 --- a/libraries/libvapours/include/vapours/util/util_parent_of_member.hpp +++ b/libraries/libvapours/include/vapours/util/util_parent_of_member.hpp @@ -83,10 +83,6 @@ namespace ams::util { const auto start = std::addressof(cur_union.data.members[0]); const auto next = GetNextAddress(start, target); - if constexpr (Offset > 0x10) { - __builtin_unreachable(); - } - if (next != target) { if constexpr (Offset < sizeof(MemberType) - 1) { return OffsetOfImpl(member, cur_union.next_union); From 86eea62cd867ee3d7c9680834dcc2abbbbdbe362 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Fri, 31 Jan 2020 03:37:49 -0800 Subject: [PATCH 30/97] kernel: use .rel.ro --- libraries/config/templates/mesosphere.mk | 2 +- mesosphere/kernel/kernel.ld | 66 +++++++++++++----------- 2 files changed, 37 insertions(+), 31 deletions(-) diff --git a/libraries/config/templates/mesosphere.mk b/libraries/config/templates/mesosphere.mk index af0503858..542d6e970 100644 --- a/libraries/config/templates/mesosphere.mk +++ b/libraries/config/templates/mesosphere.mk @@ -12,7 +12,7 @@ export CFLAGS := $(ATMOSPHERE_CFLAGS) $(SETTINGS) $(DEFINES) $(INCLUDE) export CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) export ASFLAGS := $(ATMOSPHERE_ASFLAGS) $(SETTINGS) $(DEFINES) -export LDFLAGS = -specs=$(TOPDIR)/$(notdir $(TOPDIR)).specs -nostdlib -nostartfiles -g $(SETTINGS) -Wl,-Map,$(notdir $*.map) +export LDFLAGS = -specs=$(TOPDIR)/$(notdir $(TOPDIR)).specs -nostdlib -nostartfiles -g $(SETTINGS) -Wl,-Map,$(notdir $*.map) -Wl,-z,relro,-z,now export CXXWRAPS := -Wl,--wrap,__cxa_pure_virtual \ -Wl,--wrap,__cxa_throw \ diff --git a/mesosphere/kernel/kernel.ld b/mesosphere/kernel/kernel.ld index 2d6497b27..91d0d8c9f 100644 --- a/mesosphere/kernel/kernel.ld +++ b/mesosphere/kernel/kernel.ld @@ -71,6 +71,42 @@ SECTIONS . = ALIGN(8); } :rodata + .data.rel.ro : + { + *(.data.rel.ro .data.rel.ro.*) + . = ALIGN(8); + } :rodata + + .preinit_array ALIGN(8) : + { + PROVIDE (__preinit_array_start = .); + KEEP (*(.preinit_array)) + PROVIDE (__preinit_array_end = .); + } :rodata + + .init_array ALIGN(8) : + { + PROVIDE (__init_array_start = .); + KEEP (*(SORT(.init_array.*))) + KEEP (*(.init_array)) + PROVIDE (__init_array_end = .); + } :rodata + + .fini_array ALIGN(8) : + { + PROVIDE (__fini_array_start = .); + KEEP (*(.fini_array)) + KEEP (*(SORT(.fini_array.*))) + PROVIDE (__fini_array_end = .); + } :rodata + + __got_start__ = .; + + .got : { *(.got) *(.igot) } :rodata + .got.plt : { *(.got.plt) *(.igot.plt) } :rodata + + __got_end__ = .; + .eh_frame_hdr : { __eh_frame_hdr_start = .; *(.eh_frame_hdr) *(.eh_frame_entry .eh_frame_entry.*) __eh_frame_hdr_end = .; } :rodata .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) *(.eh_frame.*) } :rodata .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table .gcc_except_table.*) } :rodata @@ -96,29 +132,6 @@ SECTIONS .gnu_extab : ONLY_IF_RW { *(.gnu_extab*) } : data .exception_ranges : ONLY_IF_RW { *(.exception_ranges .exception_ranges*) } :data - .preinit_array ALIGN(8) : - { - PROVIDE (__preinit_array_start = .); - KEEP (*(.preinit_array)) - PROVIDE (__preinit_array_end = .); - } :data - - .init_array ALIGN(8) : - { - PROVIDE (__init_array_start = .); - KEEP (*(SORT(.init_array.*))) - KEEP (*(.init_array)) - PROVIDE (__init_array_end = .); - } :data - - .fini_array ALIGN(8) : - { - PROVIDE (__fini_array_start = .); - KEEP (*(.fini_array)) - KEEP (*(SORT(.fini_array.*))) - PROVIDE (__fini_array_end = .); - } :data - .ctors ALIGN(8) : { KEEP (*crtbegin.o(.ctors)) /* MUST be first -- GCC requires it */ @@ -135,13 +148,6 @@ SECTIONS KEEP (*(.dtors)) } :data - __got_start__ = .; - - .got : { *(.got) *(.igot) } :data - .got.plt : { *(.got.plt) *(.igot.plt) } :data - - __got_end__ = .; - .data ALIGN(8) : { *(.data .data.* .gnu.linkonce.d.*) From 9accbcf8e1d9c865ee194afeea4cbfc39ec9e74f Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Fri, 31 Jan 2020 04:47:08 -0800 Subject: [PATCH 31/97] meso: use -fno-non-call-exceptions, -fno-use-cxa-atexit --- libraries/config/templates/mesosphere.mk | 4 ++-- libraries/libmesosphere/Makefile | 4 ++-- .../libmesosphere/source/libc/{kern_cxx.c => kern_cxx.cpp} | 3 +-- 3 files changed, 5 insertions(+), 6 deletions(-) rename libraries/libmesosphere/source/libc/{kern_cxx.c => kern_cxx.cpp} (91%) diff --git a/libraries/config/templates/mesosphere.mk b/libraries/config/templates/mesosphere.mk index 542d6e970..a8a36c9e6 100644 --- a/libraries/config/templates/mesosphere.mk +++ b/libraries/config/templates/mesosphere.mk @@ -7,9 +7,9 @@ include $(dir $(abspath $(lastword $(MAKEFILE_LIST))))/../common.mk # options for code generation #--------------------------------------------------------------------------------- export DEFINES := $(ATMOSPHERE_DEFINES) -DATMOSPHERE_IS_MESOSPHERE -export SETTINGS := $(ATMOSPHERE_SETTINGS) -O2 -mgeneral-regs-only -ffixed-x18 -Werror +export SETTINGS := $(ATMOSPHERE_SETTINGS) -O2 -mgeneral-regs-only -ffixed-x18 -Werror -fno-non-call-exceptions export CFLAGS := $(ATMOSPHERE_CFLAGS) $(SETTINGS) $(DEFINES) $(INCLUDE) -export CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) +export CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) -fno-use-cxa-atexit export ASFLAGS := $(ATMOSPHERE_ASFLAGS) $(SETTINGS) $(DEFINES) export LDFLAGS = -specs=$(TOPDIR)/$(notdir $(TOPDIR)).specs -nostdlib -nostartfiles -g $(SETTINGS) -Wl,-Map,$(notdir $*.map) -Wl,-z,relro,-z,now diff --git a/libraries/libmesosphere/Makefile b/libraries/libmesosphere/Makefile index e3047967b..d8799a192 100644 --- a/libraries/libmesosphere/Makefile +++ b/libraries/libmesosphere/Makefile @@ -7,9 +7,9 @@ include $(dir $(abspath $(lastword $(MAKEFILE_LIST))))/../config/common.mk # options for code generation #--------------------------------------------------------------------------------- DEFINES := $(ATMOSPHERE_DEFINES) -DATMOSPHERE_IS_MESOSPHERE -SETTINGS := $(ATMOSPHERE_SETTINGS) -O2 -mgeneral-regs-only -ffixed-x18 -Werror +SETTINGS := $(ATMOSPHERE_SETTINGS) -O2 -mgeneral-regs-only -ffixed-x18 -Werror -fno-non-call-exceptions CFLAGS := $(ATMOSPHERE_CFLAGS) $(SETTINGS) $(DEFINES) $(INCLUDE) -CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) -flto +CXXFLAGS := $(CFLAGS) $(ATMOSPHERE_CXXFLAGS) -fno-use-cxa-atexit -flto ASFLAGS := $(ATMOSPHERE_ASFLAGS) $(SETTINGS) LIBS := diff --git a/libraries/libmesosphere/source/libc/kern_cxx.c b/libraries/libmesosphere/source/libc/kern_cxx.cpp similarity index 91% rename from libraries/libmesosphere/source/libc/kern_cxx.c rename to libraries/libmesosphere/source/libc/kern_cxx.cpp index 9d3dbd0de..9262db6f8 100644 --- a/libraries/libmesosphere/source/libc/kern_cxx.c +++ b/libraries/libmesosphere/source/libc/kern_cxx.cpp @@ -19,8 +19,7 @@ extern "C" { #endif -void __dso_handle() { /* ... */ } -void __cxa_atexit() { /* ... */ } +/* cxx implementation details to be stubbed here, as needed. */ #ifdef __cplusplus } /* extern "C" */ From 797c04d19f635ceea71b9f53f239ba1b757f1190 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Fri, 31 Jan 2020 04:57:28 -0800 Subject: [PATCH 32/97] kern .fini array: one weird trick --- mesosphere/kernel/kernel.ld | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/mesosphere/kernel/kernel.ld b/mesosphere/kernel/kernel.ld index 91d0d8c9f..a42eb1b33 100644 --- a/mesosphere/kernel/kernel.ld +++ b/mesosphere/kernel/kernel.ld @@ -92,14 +92,6 @@ SECTIONS PROVIDE (__init_array_end = .); } :rodata - .fini_array ALIGN(8) : - { - PROVIDE (__fini_array_start = .); - KEEP (*(.fini_array)) - KEEP (*(SORT(.fini_array.*))) - PROVIDE (__fini_array_end = .); - } :rodata - __got_start__ = .; .got : { *(.got) *(.igot) } :rodata @@ -174,7 +166,7 @@ SECTIONS ================== */ /* Discard sections that difficult post-processing */ - /DISCARD/ : { *(.group .comment .note .interp) } + /DISCARD/ : { *(.group .comment .note .interp .fini_array .fini_array.*) } /* Stabs debugging sections. */ .stab 0 : { *(.stab) } From 57222e8301546deea64542929b4fd8bd8d68149e Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Fri, 31 Jan 2020 16:25:17 -0800 Subject: [PATCH 33/97] kern: implement KSchedulerLock --- .../include/mesosphere/kern_k_scheduler.hpp | 20 ++++ .../mesosphere/kern_k_scheduler_lock.hpp | 111 ++++++++++++++++++ libraries/libvapours/include/vapours/util.hpp | 1 + .../vapours/util/util_specialization_of.hpp | 28 +++++ 4 files changed, 160 insertions(+) create mode 100644 libraries/libmesosphere/include/mesosphere/kern_k_scheduler_lock.hpp create mode 100644 libraries/libvapours/include/vapours/util/util_specialization_of.hpp diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp index 620566b46..19639f326 100644 --- a/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp +++ b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp @@ -17,6 +17,7 @@ #include #include #include +#include namespace ams::kern { @@ -25,10 +26,14 @@ namespace ams::kern { static_assert(KSchedulerPriorityQueue::NumCores == cpu::NumCores); static_assert(KSchedulerPriorityQueue::NumPriority == BITSIZEOF(u64)); + class KScopedSchedulerLock; + class KScheduler { NON_COPYABLE(KScheduler); NON_MOVEABLE(KScheduler); public: + using LockType = KAbstractSchedulerLock; + struct SchedulingState { std::atomic needs_scheduling; bool interrupt_task_thread_runnable; @@ -37,6 +42,9 @@ namespace ams::kern { KThread *highest_priority_thread; void *idle_thread_stack; }; + private: + friend class KScopedSchedulerLock; + static inline LockType s_scheduler_lock; private: SchedulingState state; bool is_active; @@ -47,6 +55,18 @@ namespace ams::kern { public: KScheduler(); /* TODO: Actually implement KScheduler. This is a placeholder. */ + public: + /* API used by KSchedulerLock */ + static void DisableScheduling(); + static void EnableScheduling(); + static u64 UpdateHighestPriorityThreads(); + static void EnableSchedulingAndSchedule(u64 cores_needing_scheduling); + }; + + class KScopedSchedulerLock { + public: + ALWAYS_INLINE KScopedSchedulerLock() { KScheduler::s_scheduler_lock.Lock(); } + ALWAYS_INLINE ~KScopedSchedulerLock() { KScheduler::s_scheduler_lock.Unlock(); } }; } diff --git a/libraries/libmesosphere/include/mesosphere/kern_k_scheduler_lock.hpp b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler_lock.hpp new file mode 100644 index 000000000..5c951a438 --- /dev/null +++ b/libraries/libmesosphere/include/mesosphere/kern_k_scheduler_lock.hpp @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#pragma once +#include +#include +#include + +namespace ams::kern { + + class KThread; + + /* + TODO: C++20 + + template + concept KSchedulerLockable = !std::is_reference::value && requires { + { T::DisableScheduling() } -> std::same_as; + { T::EnableScheduling() } -> std::same_as; + { T::UpdateHighestPriorityThreads() } -> std::convertible_to; + { T::EnableSchedulingAndSchedule(std::declval()) } -> std::same_as; + }; + + */ + + template /* TODO C++20: requires KSchedulerLockable */ + class KAbstractSchedulerLock { + private: + KAlignedSpinLock spin_lock; + s32 lock_count; + KThread *owner_thread; + public: + constexpr ALWAYS_INLINE KAbstractSchedulerLock() : spin_lock(), lock_count(0), owner_thread(nullptr) { MESOSPHERE_ASSERT_THIS(); } + + ALWAYS_INLINE bool IsLockedByCurrentThread() const { + MESOSPHERE_ASSERT_THIS(); + + return this->owner_thread == GetCurrentThreadPointer(); + } + + ALWAYS_INLINE void Lock() { + MESOSPHERE_ASSERT_THIS(); + + if (this->IsLockedByCurrentThread()) { + /* If we already own the lock, we can just increment the count. */ + MESOSPHERE_ASSERT(this->lock_count > 0); + this->lock_count++; + } else { + /* Otherwise, we want to disable scheduling and acquire the spinlock. */ + SchedulerType::DisableScheduling(); + this->spin_lock.Lock(); + + /* For debug, ensure that our state is valid. */ + MESOSPHERE_ASSERT(this->lock_count == 0); + MESOSPHERE_ASSERT(this->owner_thread == nullptr); + + /* Increment count, take ownership. */ + this->lock_count = 1; + this->owner_thread = GetCurrentThreadPointer(); + } + } + + ALWAYS_INLINE void Unlock() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + MESOSPHERE_ASSERT(this->lock_count > 0); + + /* Release an instance of the lock. */ + if ((--this->lock_count) == 0) { + /* We're no longer going to hold the lock. Take note of what cores need scheduling. */ + const u64 cores_needing_scheduling = SchedulerType::UpdateHighestPriorityThreads(); + + /* Note that we no longer hold the lock, and unlock the spinlock. */ + this->owner_thread = nullptr; + this->spin_lock.Unlock(); + + /* Enable scheduling, and perform a rescheduling operation. */ + SchedulerType::EnableSchedulingAndSchedule(cores_needing_scheduling); + } + } + + ALWAYS_INLINE void UnlockWithoutRescheduling() { + MESOSPHERE_ASSERT_THIS(); + MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); + MESOSPHERE_ASSERT(this->lock_count > 0); + + /* Release an instance of the lock. */ + if ((--this->lock_count) == 0) { + /* Note that we no longer hold the lock, and unlock the spinlock. */ + this->owner_thread = nullptr; + this->spin_lock.Unlock(); + + /* Enable scheduling, and perform a rescheduling operation. */ + SchedulerType::EnableScheduling(); + } + } + }; + +} diff --git a/libraries/libvapours/include/vapours/util.hpp b/libraries/libvapours/include/vapours/util.hpp index 5656d33e3..97d6c0cba 100644 --- a/libraries/libvapours/include/vapours/util.hpp +++ b/libraries/libvapours/include/vapours/util.hpp @@ -23,6 +23,7 @@ #include "util/util_bitpack.hpp" #include "util/util_bitset.hpp" #include "util/util_scope_guard.hpp" +#include "util/util_specialization_of.hpp" #include "util/util_typed_storage.hpp" #include "util/util_intrusive_list.hpp" #include "util/util_intrusive_red_black_tree.hpp" diff --git a/libraries/libvapours/include/vapours/util/util_specialization_of.hpp b/libraries/libvapours/include/vapours/util/util_specialization_of.hpp new file mode 100644 index 000000000..ce3454def --- /dev/null +++ b/libraries/libvapours/include/vapours/util/util_specialization_of.hpp @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2018-2020 Atmosphère-NX + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#pragma once +#include "../defines.hpp" + +namespace ams::util { + + template class Template> + struct is_specialization_of : std::false_type{}; + + template