mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-19 10:14:23 +08:00
x86/events/amd/iommu: Enable support for multiple IOMMUs
Add support for multiple IOMMUs to perf by exposing an AMD IOMMU PMU for each IOMMU found in the system via: /bus/event_source/devices/amd_iommu_x where x is the IOMMU index. This allows users to specify different events to be programmed into the performance counters of each IOMMU. Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> [ Improve readability, shorten names. ] Signed-off-by: Borislav Petkov <bp@suse.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Jörg Rödel <joro@8bytes.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/1490166162-10002-11-git-send-email-Suravee.Suthikulpanit@amd.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
cf25f904ef
commit
25df39f2cf
@ -34,16 +34,21 @@
|
||||
#define GET_DOMID_MASK(x) (((x)->conf1 >> 16) & 0xFFFFULL)
|
||||
#define GET_PASID_MASK(x) (((x)->conf1 >> 32) & 0xFFFFFULL)
|
||||
|
||||
static struct perf_amd_iommu __perf_iommu;
|
||||
#define IOMMU_NAME_SIZE 16
|
||||
|
||||
struct perf_amd_iommu {
|
||||
struct list_head list;
|
||||
struct pmu pmu;
|
||||
struct amd_iommu *iommu;
|
||||
char name[IOMMU_NAME_SIZE];
|
||||
u8 max_banks;
|
||||
u8 max_counters;
|
||||
u64 cntr_assign_mask;
|
||||
raw_spinlock_t lock;
|
||||
};
|
||||
|
||||
static LIST_HEAD(perf_amd_iommu_list);
|
||||
|
||||
/*---------------------------------------------
|
||||
* sysfs format attributes
|
||||
*---------------------------------------------*/
|
||||
@ -233,9 +238,14 @@ static int perf_iommu_event_init(struct perf_event *event)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct amd_iommu *perf_event_2_iommu(struct perf_event *ev)
|
||||
{
|
||||
return (container_of(ev->pmu, struct perf_amd_iommu, pmu))->iommu;
|
||||
}
|
||||
|
||||
static void perf_iommu_enable_event(struct perf_event *ev)
|
||||
{
|
||||
struct amd_iommu *iommu = get_amd_iommu(0);
|
||||
struct amd_iommu *iommu = perf_event_2_iommu(ev);
|
||||
struct hw_perf_event *hwc = &ev->hw;
|
||||
u8 bank = hwc->iommu_bank;
|
||||
u8 cntr = hwc->iommu_cntr;
|
||||
@ -265,7 +275,7 @@ static void perf_iommu_enable_event(struct perf_event *ev)
|
||||
|
||||
static void perf_iommu_disable_event(struct perf_event *event)
|
||||
{
|
||||
struct amd_iommu *iommu = get_amd_iommu(0);
|
||||
struct amd_iommu *iommu = perf_event_2_iommu(event);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
u64 reg = 0ULL;
|
||||
|
||||
@ -285,7 +295,7 @@ static void perf_iommu_start(struct perf_event *event, int flags)
|
||||
|
||||
if (flags & PERF_EF_RELOAD) {
|
||||
u64 prev_raw_count = local64_read(&hwc->prev_count);
|
||||
struct amd_iommu *iommu = get_amd_iommu(0);
|
||||
struct amd_iommu *iommu = perf_event_2_iommu(event);
|
||||
|
||||
amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
|
||||
IOMMU_PC_COUNTER_REG, &prev_raw_count);
|
||||
@ -300,7 +310,7 @@ static void perf_iommu_read(struct perf_event *event)
|
||||
{
|
||||
u64 count, prev, delta;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
struct amd_iommu *iommu = get_amd_iommu(0);
|
||||
struct amd_iommu *iommu = perf_event_2_iommu(event);
|
||||
|
||||
if (amd_iommu_pc_get_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
|
||||
IOMMU_PC_COUNTER_REG, &count))
|
||||
@ -388,11 +398,6 @@ static __init int _init_events_attrs(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __init void amd_iommu_pc_exit(void)
|
||||
{
|
||||
kfree(amd_iommu_events_group.attrs);
|
||||
}
|
||||
|
||||
const struct attribute_group *amd_iommu_attr_groups[] = {
|
||||
&amd_iommu_format_group,
|
||||
&amd_iommu_cpumask_group,
|
||||
@ -400,46 +405,57 @@ const struct attribute_group *amd_iommu_attr_groups[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static __init int
|
||||
_init_perf_amd_iommu(struct perf_amd_iommu *perf_iommu, char *name)
|
||||
static struct pmu iommu_pmu = {
|
||||
.event_init = perf_iommu_event_init,
|
||||
.add = perf_iommu_add,
|
||||
.del = perf_iommu_del,
|
||||
.start = perf_iommu_start,
|
||||
.stop = perf_iommu_stop,
|
||||
.read = perf_iommu_read,
|
||||
.task_ctx_nr = perf_invalid_context,
|
||||
.attr_groups = amd_iommu_attr_groups,
|
||||
};
|
||||
|
||||
static __init int init_one_iommu(unsigned int idx)
|
||||
{
|
||||
struct perf_amd_iommu *perf_iommu;
|
||||
int ret;
|
||||
|
||||
perf_iommu = kzalloc(sizeof(struct perf_amd_iommu), GFP_KERNEL);
|
||||
if (!perf_iommu)
|
||||
return -ENOMEM;
|
||||
|
||||
raw_spin_lock_init(&perf_iommu->lock);
|
||||
|
||||
/* Init cpumask attributes to only core 0 */
|
||||
cpumask_set_cpu(0, &iommu_cpumask);
|
||||
perf_iommu->pmu = iommu_pmu;
|
||||
perf_iommu->iommu = get_amd_iommu(idx);
|
||||
perf_iommu->max_banks = amd_iommu_pc_get_max_banks(idx);
|
||||
perf_iommu->max_counters = amd_iommu_pc_get_max_counters(idx);
|
||||
|
||||
perf_iommu->max_banks = amd_iommu_pc_get_max_banks(0);
|
||||
perf_iommu->max_counters = amd_iommu_pc_get_max_counters(0);
|
||||
if (!perf_iommu->max_banks || !perf_iommu->max_counters)
|
||||
if (!perf_iommu->iommu ||
|
||||
!perf_iommu->max_banks ||
|
||||
!perf_iommu->max_counters) {
|
||||
kfree(perf_iommu);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
perf_iommu->pmu.attr_groups = amd_iommu_attr_groups;
|
||||
ret = perf_pmu_register(&perf_iommu->pmu, name, -1);
|
||||
if (ret)
|
||||
pr_err("Error initializing AMD IOMMU perf counters.\n");
|
||||
else
|
||||
pr_info("Detected AMD IOMMU (%d banks, %d counters/bank).\n",
|
||||
amd_iommu_pc_get_max_banks(0),
|
||||
amd_iommu_pc_get_max_counters(0));
|
||||
snprintf(perf_iommu->name, IOMMU_NAME_SIZE, "amd_iommu_%u", idx);
|
||||
|
||||
ret = perf_pmu_register(&perf_iommu->pmu, perf_iommu->name, -1);
|
||||
if (!ret) {
|
||||
pr_info("Detected AMD IOMMU #%d (%d banks, %d counters/bank).\n",
|
||||
idx, perf_iommu->max_banks, perf_iommu->max_counters);
|
||||
list_add_tail(&perf_iommu->list, &perf_amd_iommu_list);
|
||||
} else {
|
||||
pr_warn("Error initializing IOMMU %d.\n", idx);
|
||||
kfree(perf_iommu);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct perf_amd_iommu __perf_iommu = {
|
||||
.pmu = {
|
||||
.task_ctx_nr = perf_invalid_context,
|
||||
.event_init = perf_iommu_event_init,
|
||||
.add = perf_iommu_add,
|
||||
.del = perf_iommu_del,
|
||||
.start = perf_iommu_start,
|
||||
.stop = perf_iommu_stop,
|
||||
.read = perf_iommu_read,
|
||||
},
|
||||
};
|
||||
|
||||
static __init int amd_iommu_pc_init(void)
|
||||
{
|
||||
unsigned int i, cnt = 0;
|
||||
int ret;
|
||||
|
||||
/* Make sure the IOMMU PC resource is available */
|
||||
@ -450,11 +466,25 @@ static __init int amd_iommu_pc_init(void)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = _init_perf_amd_iommu(&__perf_iommu, "amd_iommu");
|
||||
if (ret)
|
||||
amd_iommu_pc_exit();
|
||||
/*
|
||||
* An IOMMU PMU is specific to an IOMMU, and can function independently.
|
||||
* So we go through all IOMMUs and ignore the one that fails init
|
||||
* unless all IOMMU are failing.
|
||||
*/
|
||||
for (i = 0; i < amd_iommu_get_num_iommus(); i++) {
|
||||
ret = init_one_iommu(i);
|
||||
if (!ret)
|
||||
cnt++;
|
||||
}
|
||||
|
||||
return ret;
|
||||
if (!cnt) {
|
||||
kfree(amd_iommu_events_group.attrs);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Init cpumask attributes to only core 0 */
|
||||
cpumask_set_cpu(0, &iommu_cpumask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
device_initcall(amd_iommu_pc_init);
|
||||
|
Loading…
Reference in New Issue
Block a user