mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 12:44:11 +08:00
locking/ww_mutex: Implement rtmutex based ww_mutex API functions
Add the actual ww_mutex API functions which replace the mutex based variant on RT enabled kernels. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20210815211305.024057938@linutronix.de
This commit is contained in:
parent
add461325e
commit
f8635d509d
@ -25,7 +25,7 @@ obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o
|
||||
obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
|
||||
obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o
|
||||
obj-$(CONFIG_RT_MUTEXES) += rtmutex_api.o
|
||||
obj-$(CONFIG_PREEMPT_RT) += spinlock_rt.o
|
||||
obj-$(CONFIG_PREEMPT_RT) += spinlock_rt.o ww_rt_mutex.o
|
||||
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
|
||||
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
|
||||
obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
|
||||
|
76
kernel/locking/ww_rt_mutex.c
Normal file
76
kernel/locking/ww_rt_mutex.c
Normal file
@ -0,0 +1,76 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* rtmutex API
|
||||
*/
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#define RT_MUTEX_BUILD_MUTEX
|
||||
#define WW_RT
|
||||
#include "rtmutex.c"
|
||||
|
||||
static int __sched
|
||||
__ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx,
|
||||
unsigned int state, unsigned long ip)
|
||||
{
|
||||
struct lockdep_map __maybe_unused *nest_lock = NULL;
|
||||
struct rt_mutex *rtm = &lock->base;
|
||||
int ret;
|
||||
|
||||
might_sleep();
|
||||
|
||||
if (ww_ctx) {
|
||||
if (unlikely(ww_ctx == READ_ONCE(lock->ctx)))
|
||||
return -EALREADY;
|
||||
|
||||
/*
|
||||
* Reset the wounded flag after a kill. No other process can
|
||||
* race and wound us here, since they can't have a valid owner
|
||||
* pointer if we don't have any locks held.
|
||||
*/
|
||||
if (ww_ctx->acquired == 0)
|
||||
ww_ctx->wounded = 0;
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
nest_lock = &ww_ctx->dep_map;
|
||||
#endif
|
||||
}
|
||||
mutex_acquire_nest(&rtm->dep_map, 0, 0, nest_lock, ip);
|
||||
|
||||
if (likely(rt_mutex_cmpxchg_acquire(&rtm->rtmutex, NULL, current))) {
|
||||
if (ww_ctx)
|
||||
ww_mutex_set_context_fastpath(lock, ww_ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = rt_mutex_slowlock(&rtm->rtmutex, ww_ctx, state);
|
||||
|
||||
if (ret)
|
||||
mutex_release(&rtm->dep_map, ip);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __sched
|
||||
ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
||||
{
|
||||
return __ww_rt_mutex_lock(lock, ctx, TASK_UNINTERRUPTIBLE, _RET_IP_);
|
||||
}
|
||||
EXPORT_SYMBOL(ww_mutex_lock);
|
||||
|
||||
int __sched
|
||||
ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
||||
{
|
||||
return __ww_rt_mutex_lock(lock, ctx, TASK_INTERRUPTIBLE, _RET_IP_);
|
||||
}
|
||||
EXPORT_SYMBOL(ww_mutex_lock_interruptible);
|
||||
|
||||
void __sched ww_mutex_unlock(struct ww_mutex *lock)
|
||||
{
|
||||
struct rt_mutex *rtm = &lock->base;
|
||||
|
||||
__ww_mutex_unlock(lock);
|
||||
|
||||
mutex_release(&rtm->dep_map, _RET_IP_);
|
||||
__rt_mutex_unlock(&rtm->rtmutex);
|
||||
}
|
||||
EXPORT_SYMBOL(ww_mutex_unlock);
|
Loading…
Reference in New Issue
Block a user