2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 17:53:56 +08:00

sched: introduce this_rq_lock_irq()

do_sched_yield() disables IRQs, looks up this_rq() and locks it.  The next
patch is adding another site with the same pattern, so provide a
convenience function for it.

Link: http://lkml.kernel.org/r/20180828172258.3185-8-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Suren Baghdasaryan <surenb@google.com>
Tested-by: Daniel Drake <drake@endlessm.com>
Cc: Christopher Lameter <cl@linux.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Johannes Weiner <jweiner@fb.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Enderborg <peter.enderborg@sony.com>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Vinayak Menon <vinmenon@codeaurora.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Johannes Weiner 2018-10-26 15:06:23 -07:00 committed by Linus Torvalds
parent 1f351d7f75
commit 246b3b3342
2 changed files with 13 additions and 3 deletions

View File

@ -4933,9 +4933,7 @@ static void do_sched_yield(void)
struct rq_flags rf;
struct rq *rq;
local_irq_disable();
rq = this_rq();
rq_lock(rq, &rf);
rq = this_rq_lock_irq(&rf);
schedstat_inc(rq->yld_count);
current->sched_class->yield_task(rq);

View File

@ -1157,6 +1157,18 @@ rq_unlock(struct rq *rq, struct rq_flags *rf)
raw_spin_unlock(&rq->lock);
}
static inline struct rq *
this_rq_lock_irq(struct rq_flags *rf)
__acquires(rq->lock)
{
struct rq *rq;
local_irq_disable();
rq = this_rq();
rq_lock(rq, rf);
return rq;
}
#ifdef CONFIG_NUMA
enum numa_topology_type {
NUMA_DIRECT,