mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 00:04:15 +08:00
Merge commit '317f394160e9beb97d19a84c39b7e5eb3d7815a8'
Conflicts: arch/sparc/kernel/smp_32.c With merge conflict help from Daniel Hellstrom. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
90d3ac15e5
@ -585,8 +585,7 @@ handle_ipi(struct pt_regs *regs)
|
||||
|
||||
switch (which) {
|
||||
case IPI_RESCHEDULE:
|
||||
/* Reschedule callback. Everything to be done
|
||||
is done by the interrupt return path. */
|
||||
scheduler_ipi();
|
||||
break;
|
||||
|
||||
case IPI_CALL_FUNC:
|
||||
|
@ -560,10 +560,7 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
|
||||
break;
|
||||
|
||||
case IPI_RESCHEDULE:
|
||||
/*
|
||||
* nothing more to do - eveything is
|
||||
* done on the interrupt return path
|
||||
*/
|
||||
scheduler_ipi();
|
||||
break;
|
||||
|
||||
case IPI_CALL_FUNC:
|
||||
|
@ -177,6 +177,9 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
|
||||
while (msg_queue->count) {
|
||||
msg = &msg_queue->ipi_message[msg_queue->head];
|
||||
switch (msg->type) {
|
||||
case BFIN_IPI_RESCHEDULE:
|
||||
scheduler_ipi();
|
||||
break;
|
||||
case BFIN_IPI_CALL_FUNC:
|
||||
spin_unlock_irqrestore(&msg_queue->lock, flags);
|
||||
ipi_call_function(cpu, msg);
|
||||
|
@ -342,15 +342,18 @@ irqreturn_t crisv32_ipi_interrupt(int irq, void *dev_id)
|
||||
|
||||
ipi = REG_RD(intr_vect, irq_regs[smp_processor_id()], rw_ipi);
|
||||
|
||||
if (ipi.vector & IPI_SCHEDULE) {
|
||||
scheduler_ipi();
|
||||
}
|
||||
if (ipi.vector & IPI_CALL) {
|
||||
func(info);
|
||||
func(info);
|
||||
}
|
||||
if (ipi.vector & IPI_FLUSH_TLB) {
|
||||
if (flush_mm == FLUSH_ALL)
|
||||
__flush_tlb_all();
|
||||
else if (flush_vma == FLUSH_ALL)
|
||||
if (flush_mm == FLUSH_ALL)
|
||||
__flush_tlb_all();
|
||||
else if (flush_vma == FLUSH_ALL)
|
||||
__flush_tlb_mm(flush_mm);
|
||||
else
|
||||
else
|
||||
__flush_tlb_page(flush_vma, flush_addr);
|
||||
}
|
||||
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <linux/irq.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#include <asm/delay.h>
|
||||
#include <asm/intrinsics.h>
|
||||
@ -496,6 +497,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
|
||||
smp_local_flush_tlb();
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
} else if (unlikely(IS_RESCHEDULE(vector))) {
|
||||
scheduler_ipi();
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
} else {
|
||||
ia64_setreg(_IA64_REG_CR_TPR, vector);
|
||||
|
@ -92,6 +92,8 @@ static unsigned short saved_irq_cnt;
|
||||
static int xen_slab_ready;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#include <linux/sched.h>
|
||||
|
||||
/* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ,
|
||||
* it ends up to issue several memory accesses upon percpu data and
|
||||
* thus adds unnecessary traffic to other paths.
|
||||
@ -99,7 +101,13 @@ static int xen_slab_ready;
|
||||
static irqreturn_t
|
||||
xen_dummy_handler(int irq, void *dev_id)
|
||||
{
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t
|
||||
xen_resched_handler(int irq, void *dev_id)
|
||||
{
|
||||
scheduler_ipi();
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
@ -110,7 +118,7 @@ static struct irqaction xen_ipi_irqaction = {
|
||||
};
|
||||
|
||||
static struct irqaction xen_resched_irqaction = {
|
||||
.handler = xen_dummy_handler,
|
||||
.handler = xen_resched_handler,
|
||||
.flags = IRQF_DISABLED,
|
||||
.name = "resched"
|
||||
};
|
||||
|
@ -122,8 +122,6 @@ void smp_send_reschedule(int cpu_id)
|
||||
*
|
||||
* Description: This routine executes on CPU which received
|
||||
* 'RESCHEDULE_IPI'.
|
||||
* Rescheduling is processed at the exit of interrupt
|
||||
* operation.
|
||||
*
|
||||
* Born on Date: 2002.02.05
|
||||
*
|
||||
@ -138,7 +136,7 @@ void smp_send_reschedule(int cpu_id)
|
||||
*==========================================================================*/
|
||||
void smp_reschedule_interrupt(void)
|
||||
{
|
||||
/* nothing to do */
|
||||
scheduler_ipi();
|
||||
}
|
||||
|
||||
/*==========================================================================*
|
||||
|
@ -44,6 +44,8 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
|
||||
|
||||
if (action & SMP_CALL_FUNCTION)
|
||||
smp_call_function_interrupt();
|
||||
if (action & SMP_RESCHEDULE_YOURSELF)
|
||||
scheduler_ipi();
|
||||
|
||||
/* Check if we've been told to flush the icache */
|
||||
if (action & SMP_ICACHE_FLUSH)
|
||||
|
@ -929,7 +929,7 @@ static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
|
||||
|
||||
static void ipi_resched_interrupt(void)
|
||||
{
|
||||
/* Return from interrupt should be enough to cause scheduler check */
|
||||
scheduler_ipi();
|
||||
}
|
||||
|
||||
static void ipi_call_interrupt(void)
|
||||
|
@ -309,6 +309,8 @@ static void ipi_call_dispatch(void)
|
||||
|
||||
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
scheduler_ipi();
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
@ -55,6 +55,8 @@ void titan_mailbox_irq(void)
|
||||
|
||||
if (status & 0x2)
|
||||
smp_call_function_interrupt();
|
||||
if (status & 0x4)
|
||||
scheduler_ipi();
|
||||
break;
|
||||
|
||||
case 1:
|
||||
@ -63,6 +65,8 @@ void titan_mailbox_irq(void)
|
||||
|
||||
if (status & 0x2)
|
||||
smp_call_function_interrupt();
|
||||
if (status & 0x4)
|
||||
scheduler_ipi();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -147,8 +147,10 @@ static void ip27_do_irq_mask0(void)
|
||||
#ifdef CONFIG_SMP
|
||||
if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) {
|
||||
LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
|
||||
scheduler_ipi();
|
||||
} else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) {
|
||||
LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
|
||||
scheduler_ipi();
|
||||
} else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
|
||||
LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
|
||||
smp_call_function_interrupt();
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/io.h>
|
||||
@ -189,10 +190,8 @@ void bcm1480_mailbox_interrupt(void)
|
||||
/* Clear the mailbox to clear the interrupt */
|
||||
__raw_writeq(((u64)action)<<48, mailbox_0_clear_regs[cpu]);
|
||||
|
||||
/*
|
||||
* Nothing to do for SMP_RESCHEDULE_YOURSELF; returning from the
|
||||
* interrupt will do the reschedule for us
|
||||
*/
|
||||
if (action & SMP_RESCHEDULE_YOURSELF)
|
||||
scheduler_ipi();
|
||||
|
||||
if (action & SMP_CALL_FUNCTION)
|
||||
smp_call_function_interrupt();
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/io.h>
|
||||
@ -177,10 +178,8 @@ void sb1250_mailbox_interrupt(void)
|
||||
/* Clear the mailbox to clear the interrupt */
|
||||
____raw_writeq(((u64)action) << 48, mailbox_clear_regs[cpu]);
|
||||
|
||||
/*
|
||||
* Nothing to do for SMP_RESCHEDULE_YOURSELF; returning from the
|
||||
* interrupt will do the reschedule for us
|
||||
*/
|
||||
if (action & SMP_RESCHEDULE_YOURSELF)
|
||||
scheduler_ipi();
|
||||
|
||||
if (action & SMP_CALL_FUNCTION)
|
||||
smp_call_function_interrupt();
|
||||
|
@ -494,14 +494,11 @@ void smp_send_stop(void)
|
||||
* @irq: The interrupt number.
|
||||
* @dev_id: The device ID.
|
||||
*
|
||||
* We need do nothing here, since the scheduling will be effected on our way
|
||||
* back through entry.S.
|
||||
*
|
||||
* Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
|
||||
*/
|
||||
static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
/* do nothing */
|
||||
scheduler_ipi();
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
@ -155,10 +155,7 @@ ipi_interrupt(int irq, void *dev_id)
|
||||
|
||||
case IPI_RESCHEDULE:
|
||||
smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu);
|
||||
/*
|
||||
* Reschedule callback. Everything to be
|
||||
* done is done by the interrupt return path.
|
||||
*/
|
||||
scheduler_ipi();
|
||||
break;
|
||||
|
||||
case IPI_CALL_FUNC:
|
||||
|
@ -116,7 +116,7 @@ void smp_message_recv(int msg)
|
||||
generic_smp_call_function_interrupt();
|
||||
break;
|
||||
case PPC_MSG_RESCHEDULE:
|
||||
/* we notice need_resched on exit */
|
||||
scheduler_ipi();
|
||||
break;
|
||||
case PPC_MSG_CALL_FUNC_SINGLE:
|
||||
generic_smp_call_function_single_interrupt();
|
||||
@ -146,7 +146,7 @@ static irqreturn_t call_function_action(int irq, void *data)
|
||||
|
||||
static irqreturn_t reschedule_action(int irq, void *data)
|
||||
{
|
||||
/* we just need the return path side effect of checking need_resched */
|
||||
scheduler_ipi();
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
@ -165,12 +165,12 @@ static void do_ext_call_interrupt(unsigned int ext_int_code,
|
||||
kstat_cpu(smp_processor_id()).irqs[EXTINT_IPI]++;
|
||||
/*
|
||||
* handle bit signal external calls
|
||||
*
|
||||
* For the ec_schedule signal we have to do nothing. All the work
|
||||
* is done automatically when we return from the interrupt.
|
||||
*/
|
||||
bits = xchg(&S390_lowcore.ext_call_fast, 0);
|
||||
|
||||
if (test_bit(ec_schedule, &bits))
|
||||
scheduler_ipi();
|
||||
|
||||
if (test_bit(ec_call_function, &bits))
|
||||
generic_smp_call_function_interrupt();
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/sched.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/system.h>
|
||||
@ -323,6 +324,7 @@ void smp_message_recv(unsigned int msg)
|
||||
generic_smp_call_function_interrupt();
|
||||
break;
|
||||
case SMP_MSG_RESCHEDULE:
|
||||
scheduler_ipi();
|
||||
break;
|
||||
case SMP_MSG_FUNCTION_SINGLE:
|
||||
generic_smp_call_function_single_interrupt();
|
||||
|
@ -156,11 +156,11 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
||||
|
||||
void smp_resched_interrupt(void)
|
||||
{
|
||||
irq_enter();
|
||||
scheduler_ipi();
|
||||
local_cpu_data().irq_resched_count++;
|
||||
/*
|
||||
* do nothing, since it all was about calling re-schedule
|
||||
* routine called by interrupt return code.
|
||||
*/
|
||||
irq_exit();
|
||||
/* re-schedule routine called by interrupt return code. */
|
||||
}
|
||||
|
||||
void smp_call_function_single_interrupt(void)
|
||||
|
@ -1368,6 +1368,7 @@ void smp_send_reschedule(int cpu)
|
||||
void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
|
||||
{
|
||||
clear_softint(1 << irq);
|
||||
scheduler_ipi();
|
||||
}
|
||||
|
||||
/* This is a nop because we capture all other cpus
|
||||
|
@ -189,12 +189,8 @@ void flush_icache_range(unsigned long start, unsigned long end)
|
||||
/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
|
||||
static irqreturn_t handle_reschedule_ipi(int irq, void *token)
|
||||
{
|
||||
/*
|
||||
* Nothing to do here; when we return from interrupt, the
|
||||
* rescheduling will occur there. But do bump the interrupt
|
||||
* profiler count in the meantime.
|
||||
*/
|
||||
__get_cpu_var(irq_stat).irq_resched_count++;
|
||||
scheduler_ipi();
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -173,7 +173,7 @@ void IPI_handler(int cpu)
|
||||
break;
|
||||
|
||||
case 'R':
|
||||
set_tsk_need_resched(current);
|
||||
scheduler_ipi();
|
||||
break;
|
||||
|
||||
case 'S':
|
||||
|
@ -194,14 +194,13 @@ static void native_stop_other_cpus(int wait)
|
||||
}
|
||||
|
||||
/*
|
||||
* Reschedule call back. Nothing to do,
|
||||
* all the work is done automatically when
|
||||
* we return from the interrupt.
|
||||
* Reschedule call back.
|
||||
*/
|
||||
void smp_reschedule_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
ack_APIC_irq();
|
||||
inc_irq_stat(irq_resched_count);
|
||||
scheduler_ipi();
|
||||
/*
|
||||
* KVM uses this interrupt to force a cpu out of guest mode
|
||||
*/
|
||||
|
@ -46,13 +46,12 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
|
||||
static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
|
||||
|
||||
/*
|
||||
* Reschedule call back. Nothing to do,
|
||||
* all the work is done automatically when
|
||||
* we return from the interrupt.
|
||||
* Reschedule call back.
|
||||
*/
|
||||
static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
inc_irq_stat(irq_resched_count);
|
||||
scheduler_ipi();
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ struct mutex {
|
||||
spinlock_t wait_lock;
|
||||
struct list_head wait_list;
|
||||
#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
|
||||
struct thread_info *owner;
|
||||
struct task_struct *owner;
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
const char *name;
|
||||
|
@ -360,7 +360,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout);
|
||||
extern signed long schedule_timeout_killable(signed long timeout);
|
||||
extern signed long schedule_timeout_uninterruptible(signed long timeout);
|
||||
asmlinkage void schedule(void);
|
||||
extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
|
||||
extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
|
||||
|
||||
struct nsproxy;
|
||||
struct user_namespace;
|
||||
@ -1048,8 +1048,12 @@ struct sched_domain;
|
||||
#define WF_FORK 0x02 /* child wakeup after fork */
|
||||
|
||||
#define ENQUEUE_WAKEUP 1
|
||||
#define ENQUEUE_WAKING 2
|
||||
#define ENQUEUE_HEAD 4
|
||||
#define ENQUEUE_HEAD 2
|
||||
#ifdef CONFIG_SMP
|
||||
#define ENQUEUE_WAKING 4 /* sched_class::task_waking was called */
|
||||
#else
|
||||
#define ENQUEUE_WAKING 0
|
||||
#endif
|
||||
|
||||
#define DEQUEUE_SLEEP 1
|
||||
|
||||
@ -1067,12 +1071,11 @@ struct sched_class {
|
||||
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int (*select_task_rq)(struct rq *rq, struct task_struct *p,
|
||||
int sd_flag, int flags);
|
||||
int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
|
||||
|
||||
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
|
||||
void (*post_schedule) (struct rq *this_rq);
|
||||
void (*task_waking) (struct rq *this_rq, struct task_struct *task);
|
||||
void (*task_waking) (struct task_struct *task);
|
||||
void (*task_woken) (struct rq *this_rq, struct task_struct *task);
|
||||
|
||||
void (*set_cpus_allowed)(struct task_struct *p,
|
||||
@ -1200,10 +1203,10 @@ struct task_struct {
|
||||
int lock_depth; /* BKL lock depth */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#ifdef __ARCH_WANT_UNLOCKED_CTXSW
|
||||
int oncpu;
|
||||
#endif
|
||||
struct task_struct *wake_entry;
|
||||
int on_cpu;
|
||||
#endif
|
||||
int on_rq;
|
||||
|
||||
int prio, static_prio, normal_prio;
|
||||
unsigned int rt_priority;
|
||||
@ -1274,6 +1277,7 @@ struct task_struct {
|
||||
|
||||
/* Revert to default priority/policy when forking */
|
||||
unsigned sched_reset_on_fork:1;
|
||||
unsigned sched_contributes_to_load:1;
|
||||
|
||||
pid_t pid;
|
||||
pid_t tgid;
|
||||
@ -2192,8 +2196,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
|
||||
extern char *get_task_comm(char *to, struct task_struct *tsk);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void scheduler_ipi(void);
|
||||
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
|
||||
#else
|
||||
static inline void scheduler_ipi(void) { }
|
||||
static inline unsigned long wait_task_inactive(struct task_struct *p,
|
||||
long match_state)
|
||||
{
|
||||
|
@ -827,6 +827,11 @@ config SCHED_AUTOGROUP
|
||||
desktop applications. Task group autogeneration is currently based
|
||||
upon task session.
|
||||
|
||||
config SCHED_TTWU_QUEUE
|
||||
bool
|
||||
depends on !SPARC32
|
||||
default y
|
||||
|
||||
config MM_OWNER
|
||||
bool
|
||||
|
||||
|
@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
|
||||
return;
|
||||
|
||||
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
|
||||
DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
|
||||
DEBUG_LOCKS_WARN_ON(lock->owner != current);
|
||||
DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
|
||||
mutex_clear_owner(lock);
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ extern void debug_mutex_init(struct mutex *lock, const char *name,
|
||||
|
||||
static inline void mutex_set_owner(struct mutex *lock)
|
||||
{
|
||||
lock->owner = current_thread_info();
|
||||
lock->owner = current;
|
||||
}
|
||||
|
||||
static inline void mutex_clear_owner(struct mutex *lock)
|
||||
|
@ -160,7 +160,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||
*/
|
||||
|
||||
for (;;) {
|
||||
struct thread_info *owner;
|
||||
struct task_struct *owner;
|
||||
|
||||
/*
|
||||
* If we own the BKL, then don't spin. The owner of
|
||||
|
@ -19,7 +19,7 @@
|
||||
#ifdef CONFIG_SMP
|
||||
static inline void mutex_set_owner(struct mutex *lock)
|
||||
{
|
||||
lock->owner = current_thread_info();
|
||||
lock->owner = current;
|
||||
}
|
||||
|
||||
static inline void mutex_clear_owner(struct mutex *lock)
|
||||
|
639
kernel/sched.c
639
kernel/sched.c
File diff suppressed because it is too large
Load Diff
@ -152,7 +152,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
|
||||
read_lock_irqsave(&tasklist_lock, flags);
|
||||
|
||||
do_each_thread(g, p) {
|
||||
if (!p->se.on_rq || task_cpu(p) != rq_cpu)
|
||||
if (!p->on_rq || task_cpu(p) != rq_cpu)
|
||||
continue;
|
||||
|
||||
print_task(m, rq, p);
|
||||
|
@ -358,6 +358,10 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
|
||||
}
|
||||
|
||||
cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
|
||||
#ifndef CONFIG_64BIT
|
||||
smp_wmb();
|
||||
cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1372,12 +1376,25 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static void task_waking_fair(struct rq *rq, struct task_struct *p)
|
||||
static void task_waking_fair(struct task_struct *p)
|
||||
{
|
||||
struct sched_entity *se = &p->se;
|
||||
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
||||
u64 min_vruntime;
|
||||
|
||||
se->vruntime -= cfs_rq->min_vruntime;
|
||||
#ifndef CONFIG_64BIT
|
||||
u64 min_vruntime_copy;
|
||||
|
||||
do {
|
||||
min_vruntime_copy = cfs_rq->min_vruntime_copy;
|
||||
smp_rmb();
|
||||
min_vruntime = cfs_rq->min_vruntime;
|
||||
} while (min_vruntime != min_vruntime_copy);
|
||||
#else
|
||||
min_vruntime = cfs_rq->min_vruntime;
|
||||
#endif
|
||||
|
||||
se->vruntime -= min_vruntime;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
@ -1657,7 +1674,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
|
||||
* preempt must be disabled.
|
||||
*/
|
||||
static int
|
||||
select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags)
|
||||
select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
|
||||
{
|
||||
struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
|
||||
int cpu = smp_processor_id();
|
||||
@ -1789,10 +1806,7 @@ wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
|
||||
* This is especially important for buddies when the leftmost
|
||||
* task is higher priority than the buddy.
|
||||
*/
|
||||
if (unlikely(se->load.weight != NICE_0_LOAD))
|
||||
gran = calc_delta_fair(gran, se);
|
||||
|
||||
return gran;
|
||||
return calc_delta_fair(gran, se);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -64,3 +64,9 @@ SCHED_FEAT(OWNER_SPIN, 1)
|
||||
* Decrement CPU power based on irq activity
|
||||
*/
|
||||
SCHED_FEAT(NONIRQ_POWER, 1)
|
||||
|
||||
/*
|
||||
* Queue remote wakeups on the target CPU and process them
|
||||
* using the scheduler IPI. Reduces rq->lock contention/bounces.
|
||||
*/
|
||||
SCHED_FEAT(TTWU_QUEUE, 1)
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int
|
||||
select_task_rq_idle(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
|
||||
select_task_rq_idle(struct task_struct *p, int sd_flag, int flags)
|
||||
{
|
||||
return task_cpu(p); /* IDLE tasks as never migrated */
|
||||
}
|
||||
|
@ -977,13 +977,23 @@ static void yield_task_rt(struct rq *rq)
|
||||
static int find_lowest_rq(struct task_struct *task);
|
||||
|
||||
static int
|
||||
select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
|
||||
select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
|
||||
{
|
||||
struct task_struct *curr;
|
||||
struct rq *rq;
|
||||
int cpu;
|
||||
|
||||
if (sd_flag != SD_BALANCE_WAKE)
|
||||
return smp_processor_id();
|
||||
|
||||
cpu = task_cpu(p);
|
||||
rq = cpu_rq(cpu);
|
||||
|
||||
rcu_read_lock();
|
||||
curr = ACCESS_ONCE(rq->curr); /* unlocked access */
|
||||
|
||||
/*
|
||||
* If the current task is an RT task, then
|
||||
* If the current task on @p's runqueue is an RT task, then
|
||||
* try to see if we can wake this RT task up on another
|
||||
* runqueue. Otherwise simply start this RT task
|
||||
* on its current runqueue.
|
||||
@ -997,21 +1007,25 @@ select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
|
||||
* lock?
|
||||
*
|
||||
* For equal prio tasks, we just let the scheduler sort it out.
|
||||
*/
|
||||
if (unlikely(rt_task(rq->curr)) &&
|
||||
(rq->curr->rt.nr_cpus_allowed < 2 ||
|
||||
rq->curr->prio < p->prio) &&
|
||||
(p->rt.nr_cpus_allowed > 1)) {
|
||||
int cpu = find_lowest_rq(p);
|
||||
|
||||
return (cpu == -1) ? task_cpu(p) : cpu;
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
* Otherwise, just let it ride on the affined RQ and the
|
||||
* post-schedule router will push the preempted task away
|
||||
*
|
||||
* This test is optimistic, if we get it wrong the load-balancer
|
||||
* will have to sort it out.
|
||||
*/
|
||||
return task_cpu(p);
|
||||
if (curr && unlikely(rt_task(curr)) &&
|
||||
(curr->rt.nr_cpus_allowed < 2 ||
|
||||
curr->prio < p->prio) &&
|
||||
(p->rt.nr_cpus_allowed > 1)) {
|
||||
int target = find_lowest_rq(p);
|
||||
|
||||
if (target != -1)
|
||||
cpu = target;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return cpu;
|
||||
}
|
||||
|
||||
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
|
||||
@ -1136,7 +1150,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
|
||||
* The previous task needs to be made eligible for pushing
|
||||
* if it is still active
|
||||
*/
|
||||
if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
|
||||
if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
|
||||
enqueue_pushable_task(rq, p);
|
||||
}
|
||||
|
||||
@ -1287,7 +1301,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
|
||||
!cpumask_test_cpu(lowest_rq->cpu,
|
||||
&task->cpus_allowed) ||
|
||||
task_running(rq, task) ||
|
||||
!task->se.on_rq)) {
|
||||
!task->on_rq)) {
|
||||
|
||||
raw_spin_unlock(&lowest_rq->lock);
|
||||
lowest_rq = NULL;
|
||||
@ -1321,7 +1335,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
|
||||
BUG_ON(task_current(rq, p));
|
||||
BUG_ON(p->rt.nr_cpus_allowed <= 1);
|
||||
|
||||
BUG_ON(!p->se.on_rq);
|
||||
BUG_ON(!p->on_rq);
|
||||
BUG_ON(!rt_task(p));
|
||||
|
||||
return p;
|
||||
@ -1467,7 +1481,7 @@ static int pull_rt_task(struct rq *this_rq)
|
||||
*/
|
||||
if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
|
||||
WARN_ON(p == src_rq->curr);
|
||||
WARN_ON(!p->se.on_rq);
|
||||
WARN_ON(!p->on_rq);
|
||||
|
||||
/*
|
||||
* There's a chance that p is higher in priority
|
||||
@ -1538,7 +1552,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
|
||||
* Update the migration status of the RQ if we have an RT task
|
||||
* which is running AND changing its weight value.
|
||||
*/
|
||||
if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
|
||||
if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) {
|
||||
struct rq *rq = task_rq(p);
|
||||
|
||||
if (!task_current(rq, p)) {
|
||||
@ -1608,7 +1622,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
|
||||
* we may need to handle the pulling of RT tasks
|
||||
* now.
|
||||
*/
|
||||
if (p->se.on_rq && !rq->rt.rt_nr_running)
|
||||
if (p->on_rq && !rq->rt.rt_nr_running)
|
||||
pull_rt_task(rq);
|
||||
}
|
||||
|
||||
@ -1638,7 +1652,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
|
||||
* If that current running task is also an RT task
|
||||
* then see if we can move to another run queue.
|
||||
*/
|
||||
if (p->se.on_rq && rq->curr != p) {
|
||||
if (p->on_rq && rq->curr != p) {
|
||||
#ifdef CONFIG_SMP
|
||||
if (rq->rt.overloaded && push_rt_task(rq) &&
|
||||
/* Don't resched if we changed runqueues */
|
||||
@ -1657,7 +1671,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
|
||||
static void
|
||||
prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
|
||||
{
|
||||
if (!p->se.on_rq)
|
||||
if (!p->on_rq)
|
||||
return;
|
||||
|
||||
if (rq->curr == p) {
|
||||
|
@ -9,8 +9,7 @@
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int
|
||||
select_task_rq_stop(struct rq *rq, struct task_struct *p,
|
||||
int sd_flag, int flags)
|
||||
select_task_rq_stop(struct task_struct *p, int sd_flag, int flags)
|
||||
{
|
||||
return task_cpu(p); /* stop tasks as never migrate */
|
||||
}
|
||||
@ -26,7 +25,7 @@ static struct task_struct *pick_next_task_stop(struct rq *rq)
|
||||
{
|
||||
struct task_struct *stop = rq->stop;
|
||||
|
||||
if (stop && stop->se.on_rq)
|
||||
if (stop && stop->on_rq)
|
||||
return stop;
|
||||
|
||||
return NULL;
|
||||
|
Loading…
Reference in New Issue
Block a user