mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-19 02:34:01 +08:00
locking/percpu-rwsem: Convert to bool
Use bool where possible. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Davidlohr Bueso <dbueso@suse.de> Acked-by: Will Deacon <will@kernel.org> Acked-by: Waiman Long <longman@redhat.com> Tested-by: Juri Lelli <juri.lelli@redhat.com> Link: https://lkml.kernel.org/r/20200131151539.984626569@infradead.org
This commit is contained in:
parent
1751060e25
commit
206c98ffbe
@ -41,7 +41,7 @@ is_static struct percpu_rw_semaphore name = { \
|
||||
#define DEFINE_STATIC_PERCPU_RWSEM(name) \
|
||||
__DEFINE_PERCPU_RWSEM(name, static)
|
||||
|
||||
extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
|
||||
extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool);
|
||||
extern void __percpu_up_read(struct percpu_rw_semaphore *);
|
||||
|
||||
static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
|
||||
@ -69,9 +69,9 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
|
||||
static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
|
||||
{
|
||||
int ret = 1;
|
||||
bool ret = true;
|
||||
|
||||
preempt_disable();
|
||||
/*
|
||||
|
@ -45,7 +45,7 @@ void percpu_free_rwsem(struct percpu_rw_semaphore *sem)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(percpu_free_rwsem);
|
||||
|
||||
int __percpu_down_read(struct percpu_rw_semaphore *sem, int try)
|
||||
bool __percpu_down_read(struct percpu_rw_semaphore *sem, bool try)
|
||||
{
|
||||
/*
|
||||
* Due to having preemption disabled the decrement happens on
|
||||
@ -69,7 +69,7 @@ int __percpu_down_read(struct percpu_rw_semaphore *sem, int try)
|
||||
* release in percpu_up_write().
|
||||
*/
|
||||
if (likely(!smp_load_acquire(&sem->readers_block)))
|
||||
return 1;
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Per the above comment; we still have preemption disabled and
|
||||
@ -78,7 +78,7 @@ int __percpu_down_read(struct percpu_rw_semaphore *sem, int try)
|
||||
__percpu_up_read(sem);
|
||||
|
||||
if (try)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
/*
|
||||
* We either call schedule() in the wait, or we'll fall through
|
||||
@ -94,7 +94,7 @@ int __percpu_down_read(struct percpu_rw_semaphore *sem, int try)
|
||||
__up_read(&sem->rw_sem);
|
||||
|
||||
preempt_disable();
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__percpu_down_read);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user