linux/tools/perf/util/affinity.c
Arnaldo Carvalho de Melo 1855b796f2 perf affinity: Allow passing a NULL arg to affinity__cleanup()
Just like with free(), NULL is checked to avoid having all callers do
it.

Its convenient for when not using affinity setup/cleanup for dummy CPU
maps, i.e. CPU maps for pid targets.

Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20220117160931.1191712-2-acme@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-01-18 09:24:58 -03:00

80 lines
1.9 KiB
C

// SPDX-License-Identifier: GPL-2.0
/* Manage affinity to optimize IPIs inside the kernel perf API. */
#define _GNU_SOURCE 1
#include <sched.h>
#include <stdlib.h>
#include <linux/bitmap.h>
#include <linux/zalloc.h>
#include "perf.h"
#include "cpumap.h"
#include "affinity.h"
static int get_cpu_set_size(void)
{
int sz = cpu__max_cpu().cpu + 8 - 1;
/*
* sched_getaffinity doesn't like masks smaller than the kernel.
* Hopefully that's big enough.
*/
if (sz < 4096)
sz = 4096;
return sz / 8;
}
int affinity__setup(struct affinity *a)
{
int cpu_set_size = get_cpu_set_size();
a->orig_cpus = bitmap_zalloc(cpu_set_size * 8);
if (!a->orig_cpus)
return -1;
sched_getaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
a->sched_cpus = bitmap_zalloc(cpu_set_size * 8);
if (!a->sched_cpus) {
zfree(&a->orig_cpus);
return -1;
}
bitmap_zero((unsigned long *)a->sched_cpus, cpu_set_size);
a->changed = false;
return 0;
}
/*
* perf_event_open does an IPI internally to the target CPU.
* It is more efficient to change perf's affinity to the target
* CPU and then set up all events on that CPU, so we amortize
* CPU communication.
*/
void affinity__set(struct affinity *a, int cpu)
{
int cpu_set_size = get_cpu_set_size();
if (cpu == -1)
return;
a->changed = true;
set_bit(cpu, a->sched_cpus);
/*
* We ignore errors because affinity is just an optimization.
* This could happen for example with isolated CPUs or cpusets.
* In this case the IPIs inside the kernel's perf API still work.
*/
sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->sched_cpus);
clear_bit(cpu, a->sched_cpus);
}
static void __affinity__cleanup(struct affinity *a)
{
int cpu_set_size = get_cpu_set_size();
if (a->changed)
sched_setaffinity(0, cpu_set_size, (cpu_set_t *)a->orig_cpus);
zfree(&a->sched_cpus);
zfree(&a->orig_cpus);
}
void affinity__cleanup(struct affinity *a)
{
if (a != NULL)
__affinity__cleanup(a);
}