mirror of
https://github.com/switchbrew/libnx.git
synced 2025-06-21 20:42:44 +02:00
mutex: rewrite to optimize
This commit is contained in:
parent
3c08ce6936
commit
3d726ed78c
@ -20,7 +20,7 @@ typedef _LOCK_RECURSIVE_T RMutex;
|
|||||||
*/
|
*/
|
||||||
static inline void mutexInit(Mutex* m)
|
static inline void mutexInit(Mutex* m)
|
||||||
{
|
{
|
||||||
*m = 0;
|
*m = INVALID_HANDLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -42,6 +42,13 @@ bool mutexTryLock(Mutex* m);
|
|||||||
*/
|
*/
|
||||||
void mutexUnlock(Mutex* m);
|
void mutexUnlock(Mutex* m);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Gets whether the current thread owns the mutex.
|
||||||
|
* @param m Mutex object.
|
||||||
|
* @return 1 if the mutex is locked by the current thread, and 0 otherwise.
|
||||||
|
*/
|
||||||
|
bool mutexIsLockedByCurrentThread(const Mutex* m);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Initializes a recursive mutex.
|
* @brief Initializes a recursive mutex.
|
||||||
* @param m Recursive mutex object.
|
* @param m Recursive mutex object.
|
||||||
|
@ -1,69 +1,141 @@
|
|||||||
// Copyright 2017 plutoo
|
// Copyright 2017 plutoo, 2020 SciresM
|
||||||
|
#include "result.h"
|
||||||
#include "kernel/svc.h"
|
#include "kernel/svc.h"
|
||||||
#include "kernel/mutex.h"
|
#include "kernel/mutex.h"
|
||||||
#include "../internal.h"
|
#include "../internal.h"
|
||||||
|
|
||||||
#define HAS_LISTENERS 0x40000000
|
#define HANDLE_WAIT_MASK 0x40000000u
|
||||||
|
|
||||||
static u32 _GetTag(void) {
|
#define LIKELY(expr) (__builtin_expect_with_probability(!!(expr), 1, 1.0))
|
||||||
|
#define UNLIKELY(expr) (__builtin_expect_with_probability(!!(expr), 0, 1.0))
|
||||||
|
|
||||||
|
NX_INLINE u32 _GetTag(void) {
|
||||||
return getThreadVars()->handle;
|
return getThreadVars()->handle;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
NX_INLINE u32 _LoadExclusive(u32 *ptr) {
|
||||||
|
u32 value;
|
||||||
|
__asm__ __volatile__("ldaxr %w[value], %[ptr]" : [value]"=&r"(value) : [ptr]"Q"(*ptr) : "memory");
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
NX_INLINE int _StoreExclusive(u32 *ptr, u32 value) {
|
||||||
|
int result;
|
||||||
|
__asm__ __volatile__("stlxr %w[result], %w[value], %[ptr]" : [result]"=&r"(result) : [value]"r"(value), [ptr]"Q"(*ptr) : "memory");
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
NX_INLINE void _ClearExclusive(void) {
|
||||||
|
__asm__ __volatile__("clrex" ::: "memory");
|
||||||
|
}
|
||||||
|
|
||||||
void mutexLock(Mutex* m) {
|
void mutexLock(Mutex* m) {
|
||||||
u32 self = _GetTag();
|
// Get the current thread handle.
|
||||||
|
const u32 cur_handle = _GetTag();
|
||||||
|
|
||||||
while (1) {
|
u32 value = _LoadExclusive(m);
|
||||||
u32 cur = __sync_val_compare_and_swap((u32*)m, 0, self);
|
while (true) {
|
||||||
|
// If the mutex isn't owned, try to take it.
|
||||||
if (cur == 0) {
|
if (LIKELY(value == INVALID_HANDLE)) {
|
||||||
// We won the race!
|
// If we fail, try again.
|
||||||
return;
|
if (UNLIKELY(_StoreExclusive(m, cur_handle) != 0)) {
|
||||||
|
value = _LoadExclusive(m);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((cur &~ HAS_LISTENERS) == self) {
|
// If the mutex doesn't have any waiters, try to register ourselves as the first waiter.
|
||||||
// Kernel assigned it to us!
|
if (LIKELY((value & HANDLE_WAIT_MASK) == 0)) {
|
||||||
return;
|
// If we fail, try again.
|
||||||
|
if (UNLIKELY(_StoreExclusive(m, value | HANDLE_WAIT_MASK) != 0)) {
|
||||||
|
value = _LoadExclusive(m);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cur & HAS_LISTENERS) {
|
// Ask the kernel to arbitrate the lock for us.
|
||||||
// The flag is already set, we can use the syscall.
|
if (UNLIKELY(R_FAILED(svcArbitrateLock(value & ~HANDLE_WAIT_MASK, m, cur_handle)))) {
|
||||||
svcArbitrateLock(cur &~ HAS_LISTENERS, (u32*)m, self);
|
// This should be impossible under normal circumstances.
|
||||||
|
svcBreak(0, 0, 0);
|
||||||
}
|
}
|
||||||
else {
|
|
||||||
// The flag is not set, we need to set it.
|
|
||||||
u32 old = __sync_val_compare_and_swap((u32*)m, cur, cur | HAS_LISTENERS);
|
|
||||||
|
|
||||||
if (old == cur) {
|
// Reload the value, and check if we got the lock.
|
||||||
// Flag was set successfully.
|
value = _LoadExclusive(m);
|
||||||
svcArbitrateLock(cur, (u32*)m, self);
|
if (LIKELY((value & ~HANDLE_WAIT_MASK) == cur_handle)) {
|
||||||
}
|
_ClearExclusive();
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// __dmb(); // Done only in aarch32 mode.
|
||||||
}
|
}
|
||||||
|
|
||||||
bool mutexTryLock(Mutex* m) {
|
bool mutexTryLock(Mutex* m) {
|
||||||
u32 self = _GetTag();
|
// Get the current thread handle.
|
||||||
u32 cur = __sync_val_compare_and_swap((u32*)m, 0, self);
|
const u32 cur_handle = _GetTag();
|
||||||
|
|
||||||
if (cur == 0) {
|
while (true) {
|
||||||
// We won the race!
|
// Check that the mutex is not owned.
|
||||||
return true;
|
u32 value = _LoadExclusive(m);
|
||||||
|
if (UNLIKELY(value != INVALID_HANDLE)) {
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((cur &~ HAS_LISTENERS) == self) {
|
// __dmb(); // Done only in aarch32 mode.
|
||||||
// Kernel assigned it to us!
|
|
||||||
|
if (LIKELY(_StoreExclusive(m, cur_handle) == 0)) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
// Release our exclusive hold.
|
||||||
|
_ClearExclusive();
|
||||||
|
|
||||||
|
// __dmb(); // Done only in aarch32 mode.
|
||||||
|
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void mutexUnlock(Mutex* m) {
|
void mutexUnlock(Mutex* m) {
|
||||||
u32 old = __sync_val_compare_and_swap((u32*)m, _GetTag(), 0);
|
// Get the current thread handle.
|
||||||
|
const u32 cur_handle = _GetTag();
|
||||||
|
|
||||||
if (old & HAS_LISTENERS) {
|
u32 value = _LoadExclusive(m);
|
||||||
svcArbitrateUnlock((u32*)m);
|
while (true) {
|
||||||
|
// If we have any listeners, we need to ask the kernel to arbitrate.
|
||||||
|
if (UNLIKELY(value != cur_handle)) {
|
||||||
|
_ClearExclusive();
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// __dmb(); // Done only in aarch32 mode.
|
||||||
|
|
||||||
|
// Try to release the lock.
|
||||||
|
if (LIKELY(_StoreExclusive(m, INVALID_HANDLE) == 0)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reload the value and try again.
|
||||||
|
value = _LoadExclusive(m);
|
||||||
|
}
|
||||||
|
|
||||||
|
// __dmb(); // Done only in aarch32 mode.
|
||||||
|
|
||||||
|
if (value & HANDLE_WAIT_MASK) {
|
||||||
|
// Ask the kernel to arbitrate unlock for us.
|
||||||
|
if (UNLIKELY(R_FAILED(svcArbitrateUnlock(m)))) {
|
||||||
|
// This should be impossible under normal circumstances.
|
||||||
|
svcBreak(0, 0, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool mutexIsLockedByCurrentThread(const Mutex* m) {
|
||||||
|
// Get the current thread handle.
|
||||||
|
const u32 cur_handle = _GetTag();
|
||||||
|
|
||||||
|
return (*m & ~HANDLE_WAIT_MASK) == cur_handle;
|
||||||
}
|
}
|
||||||
|
|
||||||
void rmutexLock(RMutex* m) {
|
void rmutexLock(RMutex* m) {
|
||||||
|
Loading…
Reference in New Issue
Block a user