mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-26 06:04:14 +08:00
e761b77252
This is based on Linus' idea of creating cpu_active_map that prevents scheduler load balancer from migrating tasks to the cpu that is going down. It allows us to simplify domain management code and avoid unecessary domain rebuilds during cpu hotplug event handling. Please ignore the cpusets part for now. It needs some more work in order to avoid crazy lock nesting. Although I did simplfy and unify domain reinitialization logic. We now simply call partition_sched_domains() in all the cases. This means that we're using exact same code paths as in cpusets case and hence the test below cover cpusets too. Cpuset changes to make rebuild_sched_domains() callable from various contexts are in the separate patch (right next after this one). This not only boots but also easily handles while true; do make clean; make -j 8; done and while true; do on-off-cpu 1; done at the same time. (on-off-cpu 1 simple does echo 0/1 > /sys/.../cpu1/online thing). Suprisingly the box (dual-core Core2) is quite usable. In fact I'm typing this on right now in gnome-terminal and things are moving just fine. Also this is running with most of the debug features enabled (lockdep, mutex, etc) no BUG_ONs or lockdep complaints so far. I believe I addressed all of the Dmitry's comments for original Linus' version. I changed both fair and rt balancer to mask out non-active cpus. And replaced cpu_is_offline() with !cpu_active() in the main scheduler code where it made sense (to me). Signed-off-by: Max Krasnyanskiy <maxk@qualcomm.com> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Gregory Haskins <ghaskins@novell.com> Cc: dmitry.adamushko@gmail.com Cc: pj@sgi.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
169 lines
4.1 KiB
C
169 lines
4.1 KiB
C
#ifndef _LINUX_CPUSET_H
|
|
#define _LINUX_CPUSET_H
|
|
/*
|
|
* cpuset interface
|
|
*
|
|
* Copyright (C) 2003 BULL SA
|
|
* Copyright (C) 2004-2006 Silicon Graphics, Inc.
|
|
*
|
|
*/
|
|
|
|
#include <linux/sched.h>
|
|
#include <linux/cpumask.h>
|
|
#include <linux/nodemask.h>
|
|
#include <linux/cgroup.h>
|
|
|
|
#ifdef CONFIG_CPUSETS
|
|
|
|
extern int number_of_cpusets; /* How many cpusets are defined in system? */
|
|
|
|
extern int cpuset_init_early(void);
|
|
extern int cpuset_init(void);
|
|
extern void cpuset_init_smp(void);
|
|
extern void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask);
|
|
extern void cpuset_cpus_allowed_locked(struct task_struct *p, cpumask_t *mask);
|
|
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
|
|
#define cpuset_current_mems_allowed (current->mems_allowed)
|
|
void cpuset_init_current_mems_allowed(void);
|
|
void cpuset_update_task_memory_state(void);
|
|
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
|
|
|
|
extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask);
|
|
extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask);
|
|
|
|
static int inline cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
|
|
{
|
|
return number_of_cpusets <= 1 ||
|
|
__cpuset_zone_allowed_softwall(z, gfp_mask);
|
|
}
|
|
|
|
static int inline cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
|
|
{
|
|
return number_of_cpusets <= 1 ||
|
|
__cpuset_zone_allowed_hardwall(z, gfp_mask);
|
|
}
|
|
|
|
extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
|
|
const struct task_struct *tsk2);
|
|
|
|
#define cpuset_memory_pressure_bump() \
|
|
do { \
|
|
if (cpuset_memory_pressure_enabled) \
|
|
__cpuset_memory_pressure_bump(); \
|
|
} while (0)
|
|
extern int cpuset_memory_pressure_enabled;
|
|
extern void __cpuset_memory_pressure_bump(void);
|
|
|
|
extern const struct file_operations proc_cpuset_operations;
|
|
struct seq_file;
|
|
extern void cpuset_task_status_allowed(struct seq_file *m,
|
|
struct task_struct *task);
|
|
|
|
extern void cpuset_lock(void);
|
|
extern void cpuset_unlock(void);
|
|
|
|
extern int cpuset_mem_spread_node(void);
|
|
|
|
static inline int cpuset_do_page_mem_spread(void)
|
|
{
|
|
return current->flags & PF_SPREAD_PAGE;
|
|
}
|
|
|
|
static inline int cpuset_do_slab_mem_spread(void)
|
|
{
|
|
return current->flags & PF_SPREAD_SLAB;
|
|
}
|
|
|
|
extern void cpuset_track_online_nodes(void);
|
|
|
|
extern int current_cpuset_is_being_rebound(void);
|
|
|
|
extern void rebuild_sched_domains(void);
|
|
|
|
#else /* !CONFIG_CPUSETS */
|
|
|
|
static inline int cpuset_init_early(void) { return 0; }
|
|
static inline int cpuset_init(void) { return 0; }
|
|
static inline void cpuset_init_smp(void) {}
|
|
|
|
static inline void cpuset_cpus_allowed(struct task_struct *p, cpumask_t *mask)
|
|
{
|
|
*mask = cpu_possible_map;
|
|
}
|
|
static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
|
|
cpumask_t *mask)
|
|
{
|
|
*mask = cpu_possible_map;
|
|
}
|
|
|
|
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
|
|
{
|
|
return node_possible_map;
|
|
}
|
|
|
|
#define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY])
|
|
static inline void cpuset_init_current_mems_allowed(void) {}
|
|
static inline void cpuset_update_task_memory_state(void) {}
|
|
|
|
static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
|
|
{
|
|
return 1;
|
|
}
|
|
|
|
static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
|
|
{
|
|
return 1;
|
|
}
|
|
|
|
static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
|
|
{
|
|
return 1;
|
|
}
|
|
|
|
static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
|
|
const struct task_struct *tsk2)
|
|
{
|
|
return 1;
|
|
}
|
|
|
|
static inline void cpuset_memory_pressure_bump(void) {}
|
|
|
|
static inline void cpuset_task_status_allowed(struct seq_file *m,
|
|
struct task_struct *task)
|
|
{
|
|
}
|
|
|
|
static inline void cpuset_lock(void) {}
|
|
static inline void cpuset_unlock(void) {}
|
|
|
|
static inline int cpuset_mem_spread_node(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int cpuset_do_page_mem_spread(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int cpuset_do_slab_mem_spread(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void cpuset_track_online_nodes(void) {}
|
|
|
|
static inline int current_cpuset_is_being_rebound(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void rebuild_sched_domains(void)
|
|
{
|
|
partition_sched_domains(0, NULL, NULL);
|
|
}
|
|
|
|
#endif /* !CONFIG_CPUSETS */
|
|
|
|
#endif /* _LINUX_CPUSET_H */
|