mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 20:24:12 +08:00
- Pass only an initialized perf event attribute to the LSM hook
- Fix a use-after-free on the perf syscall's error path - A potential integer overflow fix in amd_core_pmu_init() - Fix the cgroup events tracking after the context handling rewrite - Return the proper value from the inherit_event() function on error -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmOxotcACgkQEsHwGGHe VUqN0g//fCMUbsO/TRfDavqFMdAYi93EHWwd2I8KUDd2TPKY6KA2gQndw8aEvAF8 iO/zO0osD0+23ANmpLBIGuyrM41UqiG+U/Q70++t/0yaD4nYegcpIamg518MEAK9 HxP4Gl3A9Yy4QbcOgLGq+ouOvUljJAB3jhPc+KsWmgxsLrua4JZeEgIwNm/4yLOG cgetqX7l1A5ASPxU3MO+wSyjEHVIq6rA6y3HllpIHLYy5/TyvkhNZiykAxRn8Exy yznyYkifNUfCh0TOusMLmK5qR/UFPbGbZfxBlw8ni9sIocsz02U3N4XOwBPVgi8x 2OVyH1j+cZUuBuk4AH39FmaFDe/PZxFiYvInH/Y1vmS5uqi5v7hn23VLxqykPbdw Drz/vo7YiuPvM3R1ibf0yXN53mn+zMzKFRYRQwPWN6c1ocxEltaeaDcRsLhh8ql+ RM2mKVvWjELRuFWQpV8KCKuEpCz/niQ/8wvwXCxtPyJt9wiTPu4+j7K+p27T7FQR 33zPSGgN/nCuVEBOIsyi3B7zzIoRy0l3nM6tOTZXM/TVFhLyda0fJUV6Ydi39CGy Mf1FnLd+9nrR+Oqinh/DzJ5nqoSrSxPV9FXRu+0bMsYD+lbiMtOZFw8VaQzxR1gD dUOve8zUgQAusdvGeJU1+iOmdAxbiSS9s3pqw7G+d+O4a/x7PBE= =M9iU -----END PGP SIGNATURE----- Merge tag 'perf_urgent_for_v6.2_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull perf fixes from Borislav Petkov: - Pass only an initialized perf event attribute to the LSM hook - Fix a use-after-free on the perf syscall's error path - A potential integer overflow fix in amd_core_pmu_init() - Fix the cgroup events tracking after the context handling rewrite - Return the proper value from the inherit_event() function on error * tag 'perf_urgent_for_v6.2_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/core: Call LSM hook after copying perf_event_attr perf: Fix use-after-free in error path perf/x86/amd: fix potential integer overflow on shift of a int perf/core: Fix cgroup events tracking perf core: Return error pointer if inherit_event() fails to find pmu_ctx
This commit is contained in:
commit
150aae354b
@ -1387,7 +1387,7 @@ static int __init amd_core_pmu_init(void)
|
||||
* numbered counter following it.
|
||||
*/
|
||||
for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
|
||||
even_ctr_mask |= 1 << i;
|
||||
even_ctr_mask |= BIT_ULL(i);
|
||||
|
||||
pair_constraint = (struct event_constraint)
|
||||
__EVENT_CONSTRAINT(0, even_ctr_mask, 0,
|
||||
|
@ -380,7 +380,6 @@ enum event_type_t {
|
||||
|
||||
/*
|
||||
* perf_sched_events : >0 events exist
|
||||
* perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
|
||||
*/
|
||||
|
||||
static void perf_sched_delayed(struct work_struct *work);
|
||||
@ -389,7 +388,6 @@ static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
|
||||
static DEFINE_MUTEX(perf_sched_mutex);
|
||||
static atomic_t perf_sched_count;
|
||||
|
||||
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
|
||||
static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
|
||||
|
||||
static atomic_t nr_mmap_events __read_mostly;
|
||||
@ -844,9 +842,16 @@ static void perf_cgroup_switch(struct task_struct *task)
|
||||
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
|
||||
struct perf_cgroup *cgrp;
|
||||
|
||||
cgrp = perf_cgroup_from_task(task, NULL);
|
||||
/*
|
||||
* cpuctx->cgrp is set when the first cgroup event enabled,
|
||||
* and is cleared when the last cgroup event disabled.
|
||||
*/
|
||||
if (READ_ONCE(cpuctx->cgrp) == NULL)
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
|
||||
|
||||
cgrp = perf_cgroup_from_task(task, NULL);
|
||||
if (READ_ONCE(cpuctx->cgrp) == cgrp)
|
||||
return;
|
||||
|
||||
@ -3631,8 +3636,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
|
||||
* to check if we have to switch out PMU state.
|
||||
* cgroup event are system-wide mode only
|
||||
*/
|
||||
if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
|
||||
perf_cgroup_switch(next);
|
||||
perf_cgroup_switch(next);
|
||||
}
|
||||
|
||||
static bool perf_less_group_idx(const void *l, const void *r)
|
||||
@ -4974,15 +4978,6 @@ static void unaccount_pmu_sb_event(struct perf_event *event)
|
||||
detach_sb_event(event);
|
||||
}
|
||||
|
||||
static void unaccount_event_cpu(struct perf_event *event, int cpu)
|
||||
{
|
||||
if (event->parent)
|
||||
return;
|
||||
|
||||
if (is_cgroup_event(event))
|
||||
atomic_dec(&per_cpu(perf_cgroup_events, cpu));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
static DEFINE_SPINLOCK(nr_freq_lock);
|
||||
#endif
|
||||
@ -5048,8 +5043,6 @@ static void unaccount_event(struct perf_event *event)
|
||||
schedule_delayed_work(&perf_sched_work, HZ);
|
||||
}
|
||||
|
||||
unaccount_event_cpu(event, event->cpu);
|
||||
|
||||
unaccount_pmu_sb_event(event);
|
||||
}
|
||||
|
||||
@ -11679,15 +11672,6 @@ static void account_pmu_sb_event(struct perf_event *event)
|
||||
attach_sb_event(event);
|
||||
}
|
||||
|
||||
static void account_event_cpu(struct perf_event *event, int cpu)
|
||||
{
|
||||
if (event->parent)
|
||||
return;
|
||||
|
||||
if (is_cgroup_event(event))
|
||||
atomic_inc(&per_cpu(perf_cgroup_events, cpu));
|
||||
}
|
||||
|
||||
/* Freq events need the tick to stay alive (see perf_event_task_tick). */
|
||||
static void account_freq_event_nohz(void)
|
||||
{
|
||||
@ -11775,8 +11759,6 @@ static void account_event(struct perf_event *event)
|
||||
}
|
||||
enabled:
|
||||
|
||||
account_event_cpu(event, event->cpu);
|
||||
|
||||
account_pmu_sb_event(event);
|
||||
}
|
||||
|
||||
@ -12339,12 +12321,12 @@ SYSCALL_DEFINE5(perf_event_open,
|
||||
if (flags & ~PERF_FLAG_ALL)
|
||||
return -EINVAL;
|
||||
|
||||
/* Do we allow access to perf_event_open(2) ? */
|
||||
err = security_perf_event_open(&attr, PERF_SECURITY_OPEN);
|
||||
err = perf_copy_attr(attr_uptr, &attr);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = perf_copy_attr(attr_uptr, &attr);
|
||||
/* Do we allow access to perf_event_open(2) ? */
|
||||
err = security_perf_event_open(&attr, PERF_SECURITY_OPEN);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -12689,7 +12671,8 @@ SYSCALL_DEFINE5(perf_event_open,
|
||||
return event_fd;
|
||||
|
||||
err_context:
|
||||
/* event->pmu_ctx freed by free_event() */
|
||||
put_pmu_ctx(event->pmu_ctx);
|
||||
event->pmu_ctx = NULL; /* _free_event() */
|
||||
err_locked:
|
||||
mutex_unlock(&ctx->mutex);
|
||||
perf_unpin_context(ctx);
|
||||
@ -12802,6 +12785,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
|
||||
|
||||
err_pmu_ctx:
|
||||
put_pmu_ctx(pmu_ctx);
|
||||
event->pmu_ctx = NULL; /* _free_event() */
|
||||
err_unlock:
|
||||
mutex_unlock(&ctx->mutex);
|
||||
perf_unpin_context(ctx);
|
||||
@ -12822,13 +12806,11 @@ static void __perf_pmu_remove(struct perf_event_context *ctx,
|
||||
|
||||
perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) {
|
||||
perf_remove_from_context(event, 0);
|
||||
unaccount_event_cpu(event, cpu);
|
||||
put_pmu_ctx(event->pmu_ctx);
|
||||
list_add(&event->migrate_entry, events);
|
||||
|
||||
for_each_sibling_event(sibling, event) {
|
||||
perf_remove_from_context(sibling, 0);
|
||||
unaccount_event_cpu(sibling, cpu);
|
||||
put_pmu_ctx(sibling->pmu_ctx);
|
||||
list_add(&sibling->migrate_entry, events);
|
||||
}
|
||||
@ -12847,7 +12829,6 @@ static void __perf_pmu_install_event(struct pmu *pmu,
|
||||
|
||||
if (event->state >= PERF_EVENT_STATE_OFF)
|
||||
event->state = PERF_EVENT_STATE_INACTIVE;
|
||||
account_event_cpu(event, cpu);
|
||||
perf_install_in_context(ctx, event, cpu);
|
||||
}
|
||||
|
||||
@ -13231,7 +13212,7 @@ inherit_event(struct perf_event *parent_event,
|
||||
pmu_ctx = find_get_pmu_context(child_event->pmu, child_ctx, child_event);
|
||||
if (IS_ERR(pmu_ctx)) {
|
||||
free_event(child_event);
|
||||
return NULL;
|
||||
return ERR_CAST(pmu_ctx);
|
||||
}
|
||||
child_event->pmu_ctx = pmu_ctx;
|
||||
|
||||
@ -13742,8 +13723,7 @@ static int __perf_cgroup_move(void *info)
|
||||
struct task_struct *task = info;
|
||||
|
||||
preempt_disable();
|
||||
if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
|
||||
perf_cgroup_switch(task);
|
||||
perf_cgroup_switch(task);
|
||||
preempt_enable();
|
||||
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user