From 2913096bc4c167285c80721c499a14f146f7c8b5 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Fri, 17 Sep 2021 16:12:01 -0700 Subject: [PATCH] kern: kill the interrupt task manager thread --- .../arch/arm64/kern_assembly_offsets.h | 10 +-- .../kern_k_interrupt_task_manager.hpp | 19 +++--- .../include/mesosphere/kern_k_scheduler.hpp | 66 +++++++++++-------- .../mesosphere/kern_k_scheduler_lock.hpp | 4 +- .../source/kern_k_interrupt_task_manager.cpp | 64 ++++++------------ libmesosphere/source/kern_k_scheduler.cpp | 31 +++------ libmesosphere/source/kern_main.cpp | 1 - libmesosphere/source/svc/kern_svc_info.cpp | 2 +- 8 files changed, 87 insertions(+), 110 deletions(-) diff --git a/libmesosphere/include/mesosphere/arch/arm64/kern_assembly_offsets.h b/libmesosphere/include/mesosphere/arch/arm64/kern_assembly_offsets.h index e25b8aeb..a49564e9 100644 --- a/libmesosphere/include/mesosphere/arch/arm64/kern_assembly_offsets.h +++ b/libmesosphere/include/mesosphere/arch/arm64/kern_assembly_offsets.h @@ -156,7 +156,9 @@ /* ams::kern::KScheduler (::SchedulingState), https://github.com/Atmosphere-NX/Atmosphere/blob/master/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp */ /* NOTE: Due to constraints on ldarb relative offsets, KSCHEDULER_NEEDS_SCHEDULING cannot trivially be changed, and will require assembly edits. */ -#define KSCHEDULER_NEEDS_SCHEDULING 0x00 -#define KSCHEDULER_INTERRUPT_TASK_THREAD_RUNNABLE 0x01 -#define KSCHEDULER_HIGHEST_PRIORITY_THREAD 0x10 -#define KSCHEDULER_IDLE_THREAD_STACK 0x18 +#define KSCHEDULER_NEEDS_SCHEDULING 0x00 +#define KSCHEDULER_INTERRUPT_TASK_RUNNABLE 0x01 +#define KSCHEDULER_HIGHEST_PRIORITY_THREAD 0x10 +#define KSCHEDULER_IDLE_THREAD_STACK 0x18 +#define KSCHEDULER_PREVIOUS_THREAD 0x20 +#define KSCHEDULER_INTERRUPT_TASK_MANAGER 0x28 diff --git a/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp b/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp index dd1d95c4..ca902b74 100644 --- a/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp +++ b/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp @@ -27,28 +27,25 @@ namespace ams::kern { KInterruptTask *m_head; KInterruptTask *m_tail; public: - constexpr TaskQueue() : m_head(nullptr), m_tail(nullptr) { /* ... */ } + constexpr ALWAYS_INLINE TaskQueue() : m_head(nullptr), m_tail(nullptr) { /* ... */ } - constexpr KInterruptTask *GetHead() { return m_head; } - constexpr bool IsEmpty() const { return m_head == nullptr; } - constexpr void Clear() { m_head = nullptr; m_tail = nullptr; } + constexpr ALWAYS_INLINE KInterruptTask *GetHead() { return m_head; } + constexpr ALWAYS_INLINE bool IsEmpty() const { return m_head == nullptr; } + constexpr ALWAYS_INLINE void Clear() { m_head = nullptr; m_tail = nullptr; } void Enqueue(KInterruptTask *task); void Dequeue(); }; private: TaskQueue m_task_queue; - KThread *m_thread; - private: - static void ThreadFunction(uintptr_t arg); - void ThreadFunctionImpl(); + s64 m_cpu_time; public: - constexpr KInterruptTaskManager() : m_task_queue(), m_thread(nullptr) { /* ... */ } + constexpr KInterruptTaskManager() : m_task_queue(), m_cpu_time(0) { /* ... */ } - constexpr KThread *GetThread() const { return m_thread; } + constexpr ALWAYS_INLINE s64 GetCpuTime() const { return m_cpu_time; } - NOINLINE void Initialize(); void EnqueueTask(KInterruptTask *task); + void DoTasks(); }; } diff --git a/libmesosphere/include/mesosphere/kern_k_scheduler.hpp b/libmesosphere/include/mesosphere/kern_k_scheduler.hpp index 942365e2..87ffc48a 100644 --- a/libmesosphere/include/mesosphere/kern_k_scheduler.hpp +++ b/libmesosphere/include/mesosphere/kern_k_scheduler.hpp @@ -17,6 +17,7 @@ #include #include #include +#include #include namespace ams::kern { @@ -39,11 +40,13 @@ namespace ams::kern { struct SchedulingState { std::atomic needs_scheduling; - bool interrupt_task_thread_runnable; + bool interrupt_task_runnable; bool should_count_idle; u64 idle_count; KThread *highest_priority_thread; void *idle_thread_stack; + KThread *prev_thread; + KInterruptTaskManager *interrupt_task_manager; }; private: friend class KScopedSchedulerLock; @@ -53,28 +56,29 @@ namespace ams::kern { SchedulingState m_state; bool m_is_active; s32 m_core_id; - KThread *m_prev_thread; s64 m_last_context_switch_time; KThread *m_idle_thread; std::atomic m_current_thread; public: constexpr KScheduler() - : m_state(), m_is_active(false), m_core_id(0), m_prev_thread(nullptr), m_last_context_switch_time(0), m_idle_thread(nullptr), m_current_thread(nullptr) + : m_state(), m_is_active(false), m_core_id(0), m_last_context_switch_time(0), m_idle_thread(nullptr), m_current_thread(nullptr) { - m_state.needs_scheduling = true; - m_state.interrupt_task_thread_runnable = false; - m_state.should_count_idle = false; - m_state.idle_count = 0; - m_state.idle_thread_stack = nullptr; + m_state.needs_scheduling = true; + m_state.interrupt_task_runnable = false; + m_state.should_count_idle = false; + m_state.idle_count = 0; + m_state.idle_thread_stack = nullptr; m_state.highest_priority_thread = nullptr; + m_state.prev_thread = nullptr; + m_state.interrupt_task_manager = nullptr; } NOINLINE void Initialize(KThread *idle_thread); NOINLINE void Activate(); ALWAYS_INLINE void SetInterruptTaskRunnable() { - m_state.interrupt_task_thread_runnable = true; - m_state.needs_scheduling = true; + m_state.interrupt_task_runnable = true; + m_state.needs_scheduling = true; } ALWAYS_INLINE void RequestScheduleOnInterrupt() { @@ -94,7 +98,7 @@ namespace ams::kern { } ALWAYS_INLINE KThread *GetPreviousThread() const { - return m_prev_thread; + return m_state.prev_thread; } ALWAYS_INLINE KThread *GetSchedulerCurrentThread() const { @@ -108,8 +112,6 @@ namespace ams::kern { /* Static private API. */ static ALWAYS_INLINE KSchedulerPriorityQueue &GetPriorityQueue() { return s_priority_queue; } static NOINLINE u64 UpdateHighestPriorityThreadsImpl(); - - static NOINLINE void InterruptTaskThreadToRunnable(); public: /* Static public API. */ static ALWAYS_INLINE bool CanSchedule() { return GetCurrentThread().GetDisableDispatchCount() == 0; } @@ -124,13 +126,14 @@ namespace ams::kern { GetCurrentThread().DisableDispatch(); } - static NOINLINE void EnableScheduling(u64 cores_needing_scheduling) { + static ALWAYS_INLINE void EnableScheduling(u64 cores_needing_scheduling) { MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() >= 1); + GetCurrentScheduler().RescheduleOtherCores(cores_needing_scheduling); + if (GetCurrentThread().GetDisableDispatchCount() > 1) { GetCurrentThread().EnableDispatch(); } else { - GetCurrentScheduler().RescheduleOtherCores(cores_needing_scheduling); GetCurrentScheduler().RescheduleCurrentCore(); } } @@ -176,14 +179,23 @@ namespace ams::kern { ALWAYS_INLINE void RescheduleCurrentCore() { MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1); - { - /* Disable interrupts, and then context switch. */ - KScopedInterruptDisable intr_disable; - ON_SCOPE_EXIT { GetCurrentThread().EnableDispatch(); }; - if (m_state.needs_scheduling.load()) { - Schedule(); - } + GetCurrentThread().EnableDispatch(); + + if (m_state.needs_scheduling.load()) { + /* Disable interrupts, and then check again if rescheduling is needed. */ + KScopedInterruptDisable intr_disable; + + GetCurrentScheduler().RescheduleCurrentCoreImpl(); + } + } + + ALWAYS_INLINE void RescheduleCurrentCoreImpl() { + /* Check that scheduling is needed. */ + if (AMS_LIKELY(m_state.needs_scheduling.load())) { + GetCurrentThread().DisableDispatch(); + this->Schedule(); + GetCurrentThread().EnableDispatch(); } } @@ -199,10 +211,12 @@ namespace ams::kern { }; consteval bool KScheduler::ValidateAssemblyOffsets() { - static_assert(__builtin_offsetof(KScheduler, m_state.needs_scheduling) == KSCHEDULER_NEEDS_SCHEDULING); - static_assert(__builtin_offsetof(KScheduler, m_state.interrupt_task_thread_runnable) == KSCHEDULER_INTERRUPT_TASK_THREAD_RUNNABLE); - static_assert(__builtin_offsetof(KScheduler, m_state.highest_priority_thread) == KSCHEDULER_HIGHEST_PRIORITY_THREAD); - static_assert(__builtin_offsetof(KScheduler, m_state.idle_thread_stack) == KSCHEDULER_IDLE_THREAD_STACK); + static_assert(__builtin_offsetof(KScheduler, m_state.needs_scheduling) == KSCHEDULER_NEEDS_SCHEDULING); + static_assert(__builtin_offsetof(KScheduler, m_state.interrupt_task_runnable) == KSCHEDULER_INTERRUPT_TASK_RUNNABLE); + static_assert(__builtin_offsetof(KScheduler, m_state.highest_priority_thread) == KSCHEDULER_HIGHEST_PRIORITY_THREAD); + static_assert(__builtin_offsetof(KScheduler, m_state.idle_thread_stack) == KSCHEDULER_IDLE_THREAD_STACK); + static_assert(__builtin_offsetof(KScheduler, m_state.prev_thread) == KSCHEDULER_PREVIOUS_THREAD); + static_assert(__builtin_offsetof(KScheduler, m_state.interrupt_task_manager) == KSCHEDULER_INTERRUPT_TASK_MANAGER); return true; } diff --git a/libmesosphere/include/mesosphere/kern_k_scheduler_lock.hpp b/libmesosphere/include/mesosphere/kern_k_scheduler_lock.hpp index 0f9e27f9..e8097d36 100644 --- a/libmesosphere/include/mesosphere/kern_k_scheduler_lock.hpp +++ b/libmesosphere/include/mesosphere/kern_k_scheduler_lock.hpp @@ -45,7 +45,7 @@ namespace ams::kern { return m_owner_thread == GetCurrentThreadPointer(); } - void Lock() { + NOINLINE void Lock() { MESOSPHERE_ASSERT_THIS(); if (this->IsLockedByCurrentThread()) { @@ -67,7 +67,7 @@ namespace ams::kern { } } - void Unlock() { + NOINLINE void Unlock() { MESOSPHERE_ASSERT_THIS(); MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); MESOSPHERE_ASSERT(m_lock_count > 0); diff --git a/libmesosphere/source/kern_k_interrupt_task_manager.cpp b/libmesosphere/source/kern_k_interrupt_task_manager.cpp index 1fe3a649..d370b2f5 100644 --- a/libmesosphere/source/kern_k_interrupt_task_manager.cpp +++ b/libmesosphere/source/kern_k_interrupt_task_manager.cpp @@ -59,50 +59,6 @@ namespace ams::kern { #endif } - void KInterruptTaskManager::ThreadFunction(uintptr_t arg) { - reinterpret_cast(arg)->ThreadFunctionImpl(); - } - - void KInterruptTaskManager::ThreadFunctionImpl() { - MESOSPHERE_ASSERT_THIS(); - - while (true) { - /* Get a task. */ - KInterruptTask *task = nullptr; - { - KScopedInterruptDisable di; - - task = m_task_queue.GetHead(); - if (task == nullptr) { - m_thread->SetState(KThread::ThreadState_Waiting); - continue; - } - - m_task_queue.Dequeue(); - } - - /* Do the task. */ - task->DoTask(); - - /* Destroy any objects we may need to close. */ - m_thread->DestroyClosedObjects(); - } - } - - void KInterruptTaskManager::Initialize() { - /* Reserve a thread from the system limit. */ - MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_ThreadCountMax, 1)); - - /* Create and initialize the thread. */ - m_thread = KThread::Create(); - MESOSPHERE_ABORT_UNLESS(m_thread != nullptr); - MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeHighPriorityThread(m_thread, ThreadFunction, reinterpret_cast(this))); - KThread::Register(m_thread); - - /* Run the thread. */ - m_thread->Run(); - } - void KInterruptTaskManager::EnqueueTask(KInterruptTask *task) { MESOSPHERE_ASSERT(!KInterruptManager::AreInterruptsEnabled()); @@ -111,4 +67,24 @@ namespace ams::kern { Kernel::GetScheduler().SetInterruptTaskRunnable(); } + void KInterruptTaskManager::DoTasks() { + /* Execute pending tasks. */ + const s64 start_time = KHardwareTimer::GetTick(); + for (KInterruptTask *task = m_task_queue.GetHead(); task != nullptr; task = m_task_queue.GetHead()) { + /* Dequeue the task. */ + m_task_queue.Dequeue(); + + /* Do the task with interrupts temporarily enabled. */ + { + KScopedInterruptEnable ei; + + task->DoTask(); + } + } + const s64 end_time = KHardwareTimer::GetTick(); + + /* Increment the time we've spent executing. */ + m_cpu_time += end_time - start_time; + } + } diff --git a/libmesosphere/source/kern_k_scheduler.cpp b/libmesosphere/source/kern_k_scheduler.cpp index cce622c9..560bf7b4 100644 --- a/libmesosphere/source/kern_k_scheduler.cpp +++ b/libmesosphere/source/kern_k_scheduler.cpp @@ -55,10 +55,11 @@ namespace ams::kern { } void KScheduler::Initialize(KThread *idle_thread) { - /* Set core ID and idle thread. */ - m_core_id = GetCurrentCoreId(); - m_idle_thread = idle_thread; - m_state.idle_thread_stack = m_idle_thread->GetStackTop(); + /* Set core ID/idle thread/interrupt task manager. */ + m_core_id = GetCurrentCoreId(); + m_idle_thread = idle_thread; + m_state.idle_thread_stack = m_idle_thread->GetStackTop(); + m_state.interrupt_task_manager = std::addressof(Kernel::GetInterruptTaskManager()); /* Insert the main thread into the priority queue. */ { @@ -212,19 +213,9 @@ namespace ams::kern { return cores_needing_scheduling; } - void KScheduler::InterruptTaskThreadToRunnable() { - MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1); - - KThread *task_thread = Kernel::GetInterruptTaskManager().GetThread(); - { - KScopedSchedulerLock sl; - task_thread->SetState(KThread::ThreadState_Runnable); - } - } - void KScheduler::SwitchThread(KThread *next_thread) { - KProcess *cur_process = GetCurrentProcessPointer(); - KThread *cur_thread = GetCurrentThreadPointer(); + KProcess * const cur_process = GetCurrentProcessPointer(); + KThread * const cur_thread = GetCurrentThreadPointer(); /* We never want to schedule a null thread, so use the idle thread if we don't have a next. */ if (next_thread == nullptr) { @@ -257,12 +248,10 @@ namespace ams::kern { if (cur_process != nullptr) { /* NOTE: Combining this into AMS_LIKELY(!... && ...) triggers an internal compiler error: Segmentation fault in GCC 9.2.0. */ if (AMS_LIKELY(!cur_thread->IsTerminationRequested()) && AMS_LIKELY(cur_thread->GetActiveCore() == m_core_id)) { - m_prev_thread = cur_thread; + m_state.prev_thread = cur_thread; } else { - m_prev_thread = nullptr; + m_state.prev_thread = nullptr; } - } else if (cur_thread == m_idle_thread) { - m_prev_thread = nullptr; } MESOSPHERE_KTRACE_THREAD_SWITCH(next_thread); @@ -284,7 +273,7 @@ namespace ams::kern { MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread()); for (size_t i = 0; i < cpu::NumCores; ++i) { /* Get an atomic reference to the core scheduler's previous thread. */ - std::atomic_ref prev_thread(Kernel::GetScheduler(static_cast(i)).m_prev_thread); + std::atomic_ref prev_thread(Kernel::GetScheduler(static_cast(i)).m_state.prev_thread); static_assert(std::atomic_ref::is_always_lock_free); /* Atomically clear the previous thread if it's our target. */ diff --git a/libmesosphere/source/kern_main.cpp b/libmesosphere/source/kern_main.cpp index dc351c70..6450b0ca 100644 --- a/libmesosphere/source/kern_main.cpp +++ b/libmesosphere/source/kern_main.cpp @@ -99,7 +99,6 @@ namespace ams::kern { DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA { KThread::Register(std::addressof(Kernel::GetMainThread(core_id))); KThread::Register(std::addressof(Kernel::GetIdleThread(core_id))); - Kernel::GetInterruptTaskManager().Initialize(); }); /* Activate the scheduler and enable interrupts. */ diff --git a/libmesosphere/source/svc/kern_svc_info.cpp b/libmesosphere/source/svc/kern_svc_info.cpp index c5424095..598ee229 100644 --- a/libmesosphere/source/svc/kern_svc_info.cpp +++ b/libmesosphere/source/svc/kern_svc_info.cpp @@ -189,7 +189,7 @@ namespace ams::kern::svc { R_UNLESS(core_valid, svc::ResultInvalidCombination()); /* Get the idle tick count. */ - *out = Kernel::GetScheduler().GetIdleThread()->GetCpuTime(); + *out = Kernel::GetScheduler().GetIdleThread()->GetCpuTime() - Kernel::GetInterruptTaskManager().GetCpuTime(); } break; case ams::svc::InfoType_RandomEntropy: