mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-04 01:24:12 +08:00
perf sched: Move start_work_mutex and work_done_wait_mutex initialization to perf_sched__replay()
The start_work_mutex and work_done_wait_mutex are used only for the 'perf sched replay'. Put their initialization in perf_sched__replay () to reduce unnecessary actions in other commands. Simple functional testing: # perf sched record perf bench sched messaging # Running 'sched/messaging' benchmark: # 20 sender and receiver processes per group # 10 groups == 400 processes run Total time: 0.197 [sec] [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 14.952 MB perf.data (134165 samples) ] # perf sched replay run measurement overhead: 108 nsecs sleep measurement overhead: 65658 nsecs the run test took 999991 nsecs the sleep test took 1079324 nsecs nr_run_events: 42378 nr_sleep_events: 43102 nr_wakeup_events: 31852 target-less wakeups: 17 multi-target wakeups: 712 task 0 ( swapper: 0), nr_events: 10451 task 1 ( swapper: 1), nr_events: 3 task 2 ( swapper: 2), nr_events: 1 <SNIP> task 717 ( sched-messaging: 74483), nr_events: 152 task 718 ( sched-messaging: 74484), nr_events: 1944 task 719 ( sched-messaging: 74485), nr_events: 73 task 720 ( sched-messaging: 74486), nr_events: 163 task 721 ( sched-messaging: 74487), nr_events: 942 task 722 ( sched-messaging: 74488), nr_events: 78 task 723 ( sched-messaging: 74489), nr_events: 1090 ------------------------------------------------------------ #1 : 1366.507, ravg: 1366.51, cpu: 7682.70 / 7682.70 #2 : 1410.072, ravg: 1370.86, cpu: 7723.88 / 7686.82 #3 : 1396.296, ravg: 1373.41, cpu: 7568.20 / 7674.96 #4 : 1381.019, ravg: 1374.17, cpu: 7531.81 / 7660.64 #5 : 1393.826, ravg: 1376.13, cpu: 7725.25 / 7667.11 #6 : 1401.581, ravg: 1378.68, cpu: 7594.82 / 7659.88 #7 : 1381.337, ravg: 1378.94, cpu: 7371.22 / 7631.01 #8 : 1373.842, ravg: 1378.43, cpu: 7894.92 / 7657.40 #9 : 1364.697, ravg: 1377.06, cpu: 7324.91 / 7624.15 #10 : 1363.613, ravg: 1375.72, cpu: 7209.55 / 7582.69 # echo $? 0 Signed-off-by: Yang Jihong <yangjihong1@huawei.com> Signed-off-by: Namhyung Kim <namhyung@kernel.org> Link: https://lore.kernel.org/r/20240206083228.172607-2-yangjihong1@huawei.com
This commit is contained in:
parent
5f70c6c559
commit
c690786351
@ -3285,15 +3285,20 @@ static int perf_sched__map(struct perf_sched *sched)
|
||||
|
||||
static int perf_sched__replay(struct perf_sched *sched)
|
||||
{
|
||||
int ret;
|
||||
unsigned long i;
|
||||
|
||||
mutex_init(&sched->start_work_mutex);
|
||||
mutex_init(&sched->work_done_wait_mutex);
|
||||
|
||||
calibrate_run_measurement_overhead(sched);
|
||||
calibrate_sleep_measurement_overhead(sched);
|
||||
|
||||
test_calibrations(sched);
|
||||
|
||||
if (perf_sched__read_events(sched))
|
||||
return -1;
|
||||
ret = perf_sched__read_events(sched);
|
||||
if (ret)
|
||||
goto out_mutex_destroy;
|
||||
|
||||
printf("nr_run_events: %ld\n", sched->nr_run_events);
|
||||
printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
|
||||
@ -3318,7 +3323,11 @@ static int perf_sched__replay(struct perf_sched *sched)
|
||||
|
||||
sched->thread_funcs_exit = true;
|
||||
destroy_tasks(sched);
|
||||
return 0;
|
||||
|
||||
out_mutex_destroy:
|
||||
mutex_destroy(&sched->start_work_mutex);
|
||||
mutex_destroy(&sched->work_done_wait_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void setup_sorting(struct perf_sched *sched, const struct option *options,
|
||||
@ -3556,8 +3565,6 @@ int cmd_sched(int argc, const char **argv)
|
||||
unsigned int i;
|
||||
int ret = 0;
|
||||
|
||||
mutex_init(&sched.start_work_mutex);
|
||||
mutex_init(&sched.work_done_wait_mutex);
|
||||
sched.curr_thread = calloc(MAX_CPUS, sizeof(*sched.curr_thread));
|
||||
if (!sched.curr_thread) {
|
||||
ret = -ENOMEM;
|
||||
@ -3645,8 +3652,6 @@ out:
|
||||
free(sched.curr_pid);
|
||||
free(sched.cpu_last_switched);
|
||||
free(sched.curr_thread);
|
||||
mutex_destroy(&sched.start_work_mutex);
|
||||
mutex_destroy(&sched.work_done_wait_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user