diff --git a/nx/include/switch/kernel/mutex.h b/nx/include/switch/kernel/mutex.h index fe6ca3a3..99d4020f 100644 --- a/nx/include/switch/kernel/mutex.h +++ b/nx/include/switch/kernel/mutex.h @@ -20,7 +20,7 @@ typedef _LOCK_RECURSIVE_T RMutex; */ static inline void mutexInit(Mutex* m) { - *m = 0; + *m = INVALID_HANDLE; } /** @@ -42,6 +42,13 @@ bool mutexTryLock(Mutex* m); */ void mutexUnlock(Mutex* m); +/** + * @brief Gets whether the current thread owns the mutex. + * @param m Mutex object. + * @return 1 if the mutex is locked by the current thread, and 0 otherwise. + */ +bool mutexIsLockedByCurrentThread(const Mutex* m); + /** * @brief Initializes a recursive mutex. * @param m Recursive mutex object. diff --git a/nx/source/kernel/mutex.c b/nx/source/kernel/mutex.c index f7fd8c59..c90957da 100644 --- a/nx/source/kernel/mutex.c +++ b/nx/source/kernel/mutex.c @@ -1,69 +1,141 @@ -// Copyright 2017 plutoo +// Copyright 2017 plutoo, 2020 SciresM +#include "result.h" #include "kernel/svc.h" #include "kernel/mutex.h" #include "../internal.h" -#define HAS_LISTENERS 0x40000000 +#define HANDLE_WAIT_MASK 0x40000000u -static u32 _GetTag(void) { +#define LIKELY(expr) (__builtin_expect_with_probability(!!(expr), 1, 1.0)) +#define UNLIKELY(expr) (__builtin_expect_with_probability(!!(expr), 0, 1.0)) + +NX_INLINE u32 _GetTag(void) { return getThreadVars()->handle; } +NX_INLINE u32 _LoadExclusive(u32 *ptr) { + u32 value; + __asm__ __volatile__("ldaxr %w[value], %[ptr]" : [value]"=&r"(value) : [ptr]"Q"(*ptr) : "memory"); + return value; +} + +NX_INLINE int _StoreExclusive(u32 *ptr, u32 value) { + int result; + __asm__ __volatile__("stlxr %w[result], %w[value], %[ptr]" : [result]"=&r"(result) : [value]"r"(value), [ptr]"Q"(*ptr) : "memory"); + return result; +} + +NX_INLINE void _ClearExclusive(void) { + __asm__ __volatile__("clrex" ::: "memory"); +} + void mutexLock(Mutex* m) { - u32 self = _GetTag(); + // Get the current thread handle. + const u32 cur_handle = _GetTag(); - while (1) { - u32 cur = __sync_val_compare_and_swap((u32*)m, 0, self); - - if (cur == 0) { - // We won the race! - return; + u32 value = _LoadExclusive(m); + while (true) { + // If the mutex isn't owned, try to take it. + if (LIKELY(value == INVALID_HANDLE)) { + // If we fail, try again. + if (UNLIKELY(_StoreExclusive(m, cur_handle) != 0)) { + value = _LoadExclusive(m); + continue; + } + break; } - if ((cur &~ HAS_LISTENERS) == self) { - // Kernel assigned it to us! - return; - } - - if (cur & HAS_LISTENERS) { - // The flag is already set, we can use the syscall. - svcArbitrateLock(cur &~ HAS_LISTENERS, (u32*)m, self); - } - else { - // The flag is not set, we need to set it. - u32 old = __sync_val_compare_and_swap((u32*)m, cur, cur | HAS_LISTENERS); - - if (old == cur) { - // Flag was set successfully. - svcArbitrateLock(cur, (u32*)m, self); + // If the mutex doesn't have any waiters, try to register ourselves as the first waiter. + if (LIKELY((value & HANDLE_WAIT_MASK) == 0)) { + // If we fail, try again. + if (UNLIKELY(_StoreExclusive(m, value | HANDLE_WAIT_MASK) != 0)) { + value = _LoadExclusive(m); + continue; } } + + // Ask the kernel to arbitrate the lock for us. + if (UNLIKELY(R_FAILED(svcArbitrateLock(value & ~HANDLE_WAIT_MASK, m, cur_handle)))) { + // This should be impossible under normal circumstances. + svcBreak(0, 0, 0); + } + + // Reload the value, and check if we got the lock. + value = _LoadExclusive(m); + if (LIKELY((value & ~HANDLE_WAIT_MASK) == cur_handle)) { + _ClearExclusive(); + break; + } } + + // __dmb(); // Done only in aarch32 mode. } bool mutexTryLock(Mutex* m) { - u32 self = _GetTag(); - u32 cur = __sync_val_compare_and_swap((u32*)m, 0, self); + // Get the current thread handle. + const u32 cur_handle = _GetTag(); - if (cur == 0) { - // We won the race! - return true; + while (true) { + // Check that the mutex is not owned. + u32 value = _LoadExclusive(m); + if (UNLIKELY(value != INVALID_HANDLE)) { + break; + } + + // __dmb(); // Done only in aarch32 mode. + + if (LIKELY(_StoreExclusive(m, cur_handle) == 0)) { + return true; + } } - if ((cur &~ HAS_LISTENERS) == self) { - // Kernel assigned it to us! - return true; - } - - return 0; + // Release our exclusive hold. + _ClearExclusive(); + + // __dmb(); // Done only in aarch32 mode. + + return false; } void mutexUnlock(Mutex* m) { - u32 old = __sync_val_compare_and_swap((u32*)m, _GetTag(), 0); + // Get the current thread handle. + const u32 cur_handle = _GetTag(); - if (old & HAS_LISTENERS) { - svcArbitrateUnlock((u32*)m); + u32 value = _LoadExclusive(m); + while (true) { + // If we have any listeners, we need to ask the kernel to arbitrate. + if (UNLIKELY(value != cur_handle)) { + _ClearExclusive(); + break; + } + + // __dmb(); // Done only in aarch32 mode. + + // Try to release the lock. + if (LIKELY(_StoreExclusive(m, INVALID_HANDLE) == 0)) { + break; + } + + // Reload the value and try again. + value = _LoadExclusive(m); } + + // __dmb(); // Done only in aarch32 mode. + + if (value & HANDLE_WAIT_MASK) { + // Ask the kernel to arbitrate unlock for us. + if (UNLIKELY(R_FAILED(svcArbitrateUnlock(m)))) { + // This should be impossible under normal circumstances. + svcBreak(0, 0, 0); + } + } +} + +bool mutexIsLockedByCurrentThread(const Mutex* m) { + // Get the current thread handle. + const u32 cur_handle = _GetTag(); + + return (*m & ~HANDLE_WAIT_MASK) == cur_handle; } void rmutexLock(RMutex* m) {