2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-27 03:55:37 +08:00

sched/balancing: Rename newidle_balance() => sched_balance_newidle()

Standardize scheduler load-balancing function names on the
sched_balance_() prefix.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Shrikanth Hegde <sshegde@linux.ibm.com>
Link: https://lore.kernel.org/r/20240308111819.1101550-11-mingo@kernel.org
This commit is contained in:
Ingo Molnar 2024-03-08 12:18:16 +01:00
parent 391b7a5335
commit 7d058285cd

View File

@ -4816,7 +4816,7 @@ static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
return cfs_rq->avg.load_avg;
}
static int newidle_balance(struct rq *this_rq, struct rq_flags *rf);
static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf);
static inline unsigned long task_util(struct task_struct *p)
{
@ -5136,7 +5136,7 @@ attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
static inline void
detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
static inline int newidle_balance(struct rq *rq, struct rq_flags *rf)
static inline int sched_balance_newidle(struct rq *rq, struct rq_flags *rf)
{
return 0;
}
@ -8253,7 +8253,7 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
if (rq->nr_running)
return 1;
return newidle_balance(rq, rf) != 0;
return sched_balance_newidle(rq, rf) != 0;
}
#endif /* CONFIG_SMP */
@ -8505,10 +8505,10 @@ idle:
if (!rf)
return NULL;
new_tasks = newidle_balance(rq, rf);
new_tasks = sched_balance_newidle(rq, rf);
/*
* Because newidle_balance() releases (and re-acquires) rq->lock, it is
* Because sched_balance_newidle() releases (and re-acquires) rq->lock, it is
* possible for any higher priority task to appear. In that case we
* must re-start the pick_next_entity() loop.
*/
@ -11493,7 +11493,7 @@ out_one_pinned:
ld_moved = 0;
/*
* newidle_balance() disregards balance intervals, so we could
* sched_balance_newidle() disregards balance intervals, so we could
* repeatedly reach this code, which would lead to balance_interval
* skyrocketing in a short amount of time. Skip the balance_interval
* increase logic to avoid that.
@ -12277,7 +12277,7 @@ static inline void nohz_newidle_balance(struct rq *this_rq) { }
#endif /* CONFIG_NO_HZ_COMMON */
/*
* newidle_balance is called by schedule() if this_cpu is about to become
* sched_balance_newidle is called by schedule() if this_cpu is about to become
* idle. Attempts to pull tasks from other CPUs.
*
* Returns:
@ -12285,7 +12285,7 @@ static inline void nohz_newidle_balance(struct rq *this_rq) { }
* 0 - failed, no new tasks
* > 0 - success, new (fair) tasks present
*/
static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
{
unsigned long next_balance = jiffies + HZ;
int this_cpu = this_rq->cpu;