2019-05-27 14:55:06 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2014-02-03 20:18:49 +08:00
|
|
|
/*
|
2015-05-12 01:57:11 +08:00
|
|
|
* Queued read/write locks
|
2014-02-03 20:18:49 +08:00
|
|
|
*
|
|
|
|
* (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
|
|
|
|
*
|
|
|
|
* Authors: Waiman Long <waiman.long@hp.com>
|
|
|
|
*/
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/bug.h>
|
|
|
|
#include <linux/cpumask.h>
|
|
|
|
#include <linux/percpu.h>
|
|
|
|
#include <linux/hardirq.h>
|
2017-05-25 07:55:10 +08:00
|
|
|
#include <linux/spinlock.h>
|
2022-03-23 02:57:09 +08:00
|
|
|
#include <trace/events/lock.h>
|
2014-02-03 20:18:49 +08:00
|
|
|
|
|
|
|
/**
|
2022-05-11 03:21:33 +08:00
|
|
|
* queued_read_lock_slowpath - acquire read lock of a queued rwlock
|
|
|
|
* @lock: Pointer to queued rwlock structure
|
2014-02-03 20:18:49 +08:00
|
|
|
*/
|
2022-08-11 06:03:46 +08:00
|
|
|
void __lockfunc queued_read_lock_slowpath(struct qrwlock *lock)
|
2014-02-03 20:18:49 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Readers come here when they cannot get the lock without waiting
|
|
|
|
*/
|
|
|
|
if (unlikely(in_interrupt())) {
|
|
|
|
/*
|
2015-06-19 23:50:01 +08:00
|
|
|
* Readers in interrupt context will get the lock immediately
|
2017-10-12 20:20:49 +08:00
|
|
|
* if the writer is just waiting (not holding the lock yet),
|
|
|
|
* so spin with ACQUIRE semantics until the lock is available
|
|
|
|
* without waiting in the queue.
|
2014-02-03 20:18:49 +08:00
|
|
|
*/
|
2017-10-12 20:20:51 +08:00
|
|
|
atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
|
2014-02-03 20:18:49 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
atomic_sub(_QR_BIAS, &lock->cnts);
|
|
|
|
|
2022-03-23 02:57:09 +08:00
|
|
|
trace_contention_begin(lock, LCB_F_SPIN | LCB_F_READ);
|
|
|
|
|
2014-02-03 20:18:49 +08:00
|
|
|
/*
|
|
|
|
* Put the reader into the wait queue
|
|
|
|
*/
|
2015-09-14 15:37:22 +08:00
|
|
|
arch_spin_lock(&lock->wait_lock);
|
2017-10-12 20:20:49 +08:00
|
|
|
atomic_add(_QR_BIAS, &lock->cnts);
|
2014-02-03 20:18:49 +08:00
|
|
|
|
|
|
|
/*
|
2015-08-07 00:54:42 +08:00
|
|
|
* The ACQUIRE semantics of the following spinning code ensure
|
|
|
|
* that accesses can't leak upwards out of our subsequent critical
|
|
|
|
* section in the case that the lock is currently held for write.
|
2014-02-03 20:18:49 +08:00
|
|
|
*/
|
2017-10-12 20:20:51 +08:00
|
|
|
atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
|
2014-02-03 20:18:49 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Signal the next one in queue to become queue head
|
|
|
|
*/
|
2015-09-14 15:37:22 +08:00
|
|
|
arch_spin_unlock(&lock->wait_lock);
|
2022-03-23 02:57:09 +08:00
|
|
|
|
|
|
|
trace_contention_end(lock, 0);
|
2014-02-03 20:18:49 +08:00
|
|
|
}
|
2015-06-19 23:50:00 +08:00
|
|
|
EXPORT_SYMBOL(queued_read_lock_slowpath);
|
2014-02-03 20:18:49 +08:00
|
|
|
|
|
|
|
/**
|
2022-05-11 03:21:33 +08:00
|
|
|
* queued_write_lock_slowpath - acquire write lock of a queued rwlock
|
|
|
|
* @lock : Pointer to queued rwlock structure
|
2014-02-03 20:18:49 +08:00
|
|
|
*/
|
2022-08-11 06:03:46 +08:00
|
|
|
void __lockfunc queued_write_lock_slowpath(struct qrwlock *lock)
|
2014-02-03 20:18:49 +08:00
|
|
|
{
|
2021-04-16 01:27:11 +08:00
|
|
|
int cnts;
|
|
|
|
|
2022-03-23 02:57:09 +08:00
|
|
|
trace_contention_begin(lock, LCB_F_SPIN | LCB_F_WRITE);
|
|
|
|
|
2014-02-03 20:18:49 +08:00
|
|
|
/* Put the writer into the wait queue */
|
2015-09-14 15:37:22 +08:00
|
|
|
arch_spin_lock(&lock->wait_lock);
|
2014-02-03 20:18:49 +08:00
|
|
|
|
|
|
|
/* Try to acquire the lock directly if no reader is present */
|
2021-04-27 02:50:17 +08:00
|
|
|
if (!(cnts = atomic_read(&lock->cnts)) &&
|
|
|
|
atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED))
|
2014-02-03 20:18:49 +08:00
|
|
|
goto unlock;
|
|
|
|
|
2017-10-12 20:20:51 +08:00
|
|
|
/* Set the waiting flag to notify readers that a writer is pending */
|
2021-04-27 02:50:17 +08:00
|
|
|
atomic_or(_QW_WAITING, &lock->cnts);
|
2014-02-03 20:18:49 +08:00
|
|
|
|
2017-10-12 20:20:51 +08:00
|
|
|
/* When no more readers or writers, set the locked flag */
|
2017-10-12 20:20:49 +08:00
|
|
|
do {
|
2021-04-16 01:27:11 +08:00
|
|
|
cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING);
|
|
|
|
} while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED));
|
2014-02-03 20:18:49 +08:00
|
|
|
unlock:
|
2015-09-14 15:37:22 +08:00
|
|
|
arch_spin_unlock(&lock->wait_lock);
|
2022-03-23 02:57:09 +08:00
|
|
|
|
|
|
|
trace_contention_end(lock, 0);
|
2014-02-03 20:18:49 +08:00
|
|
|
}
|
2015-06-19 23:50:00 +08:00
|
|
|
EXPORT_SYMBOL(queued_write_lock_slowpath);
|