mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 06:34:12 +08:00
rcuscale: Measure grace-period kthread CPU time
This commit adds the ability to output the CPU time consumed by the grace-period kthread for the RCU variant under test. The CPU time is whatever is in the designated task's current->stime field, and thus is controlled by whatever CPU-time accounting scheme is in effect. This output appears in microseconds as follows on the console: rcu_scale: Grace-period kthread CPU time: 42367.037 [ paulmck: Apply feedback from Stephen Rothwell and kernel test robot. ] Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Tested-by: Yujie Liu <yujie.liu@intel.com>
This commit is contained in:
parent
bb7bad3dae
commit
5f8e320269
@ -87,6 +87,7 @@ static inline void rcu_read_unlock_trace(void)
|
||||
void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
|
||||
void synchronize_rcu_tasks_trace(void);
|
||||
void rcu_barrier_tasks_trace(void);
|
||||
struct task_struct *get_rcu_tasks_trace_gp_kthread(void);
|
||||
#else
|
||||
/*
|
||||
* The BPF JIT forms these addresses even when it doesn't call these
|
||||
|
@ -141,6 +141,7 @@ struct rcu_scale_ops {
|
||||
void (*gp_barrier)(void);
|
||||
void (*sync)(void);
|
||||
void (*exp_sync)(void);
|
||||
struct task_struct *(*rso_gp_kthread)(void);
|
||||
const char *name;
|
||||
};
|
||||
|
||||
@ -336,6 +337,7 @@ static struct rcu_scale_ops tasks_tracing_ops = {
|
||||
.gp_barrier = rcu_barrier_tasks_trace,
|
||||
.sync = synchronize_rcu_tasks_trace,
|
||||
.exp_sync = synchronize_rcu_tasks_trace,
|
||||
.rso_gp_kthread = get_rcu_tasks_trace_gp_kthread,
|
||||
.name = "tasks-tracing"
|
||||
};
|
||||
|
||||
@ -563,6 +565,8 @@ static struct task_struct **kfree_reader_tasks;
|
||||
static int kfree_nrealthreads;
|
||||
static atomic_t n_kfree_scale_thread_started;
|
||||
static atomic_t n_kfree_scale_thread_ended;
|
||||
static struct task_struct *kthread_tp;
|
||||
static u64 kthread_stime;
|
||||
|
||||
struct kfree_obj {
|
||||
char kfree_obj[8];
|
||||
@ -808,6 +812,18 @@ rcu_scale_cleanup(void)
|
||||
if (gp_exp && gp_async)
|
||||
SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
|
||||
|
||||
// If built-in, just report all of the GP kthread's CPU time.
|
||||
if (IS_BUILTIN(CONFIG_RCU_SCALE_TEST) && !kthread_tp && cur_ops->rso_gp_kthread)
|
||||
kthread_tp = cur_ops->rso_gp_kthread();
|
||||
if (kthread_tp) {
|
||||
u32 ns;
|
||||
u64 us;
|
||||
|
||||
kthread_stime = kthread_tp->stime - kthread_stime;
|
||||
us = div_u64_rem(kthread_stime, 1000, &ns);
|
||||
pr_info("rcu_scale: Grace-period kthread CPU time: %llu.%03u us\n", us, ns);
|
||||
show_rcu_gp_kthreads();
|
||||
}
|
||||
if (kfree_rcu_test) {
|
||||
kfree_scale_cleanup();
|
||||
return;
|
||||
@ -921,6 +937,11 @@ rcu_scale_init(void)
|
||||
if (cur_ops->init)
|
||||
cur_ops->init();
|
||||
|
||||
if (cur_ops->rso_gp_kthread) {
|
||||
kthread_tp = cur_ops->rso_gp_kthread();
|
||||
if (kthread_tp)
|
||||
kthread_stime = kthread_tp->stime;
|
||||
}
|
||||
if (kfree_rcu_test)
|
||||
return kfree_scale_init();
|
||||
|
||||
|
@ -1830,6 +1830,12 @@ void show_rcu_tasks_trace_gp_kthread(void)
|
||||
EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
|
||||
#endif // !defined(CONFIG_TINY_RCU)
|
||||
|
||||
struct task_struct *get_rcu_tasks_trace_gp_kthread(void)
|
||||
{
|
||||
return rcu_tasks_trace.kthread_ptr;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_rcu_tasks_trace_gp_kthread);
|
||||
|
||||
#else /* #ifdef CONFIG_TASKS_TRACE_RCU */
|
||||
static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
|
||||
#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
|
||||
|
Loading…
Reference in New Issue
Block a user