2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-19 02:34:01 +08:00

Merge branch 'perf/urgent' into perf/core

Merge in two hw_breakpoint fixes, before applying another 5.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2013-06-20 17:57:08 +02:00
commit f070a4dba9
2 changed files with 13 additions and 7 deletions

View File

@ -365,10 +365,14 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
return insn.length; return insn.length;
} }
static void __kprobes arch_copy_kprobe(struct kprobe *p) static int __kprobes arch_copy_kprobe(struct kprobe *p)
{ {
int ret;
/* Copy an instruction with recovering if other optprobe modifies it.*/ /* Copy an instruction with recovering if other optprobe modifies it.*/
__copy_instruction(p->ainsn.insn, p->addr); ret = __copy_instruction(p->ainsn.insn, p->addr);
if (!ret)
return -EINVAL;
/* /*
* __copy_instruction can modify the displacement of the instruction, * __copy_instruction can modify the displacement of the instruction,
@ -384,6 +388,8 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
/* Also, displacement change doesn't affect the first byte */ /* Also, displacement change doesn't affect the first byte */
p->opcode = p->ainsn.insn[0]; p->opcode = p->ainsn.insn[0];
return 0;
} }
int __kprobes arch_prepare_kprobe(struct kprobe *p) int __kprobes arch_prepare_kprobe(struct kprobe *p)
@ -397,8 +403,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
p->ainsn.insn = get_insn_slot(); p->ainsn.insn = get_insn_slot();
if (!p->ainsn.insn) if (!p->ainsn.insn)
return -ENOMEM; return -ENOMEM;
arch_copy_kprobe(p);
return 0; return arch_copy_kprobe(p);
} }
void __kprobes arch_arm_kprobe(struct kprobe *p) void __kprobes arch_arm_kprobe(struct kprobe *p)

View File

@ -120,7 +120,7 @@ static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
list_for_each_entry(iter, &bp_task_head, hw.bp_list) { list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
if (iter->hw.bp_target == tsk && if (iter->hw.bp_target == tsk &&
find_slot_idx(iter) == type && find_slot_idx(iter) == type &&
cpu == iter->cpu) (iter->cpu < 0 || cpu == iter->cpu))
count += hw_breakpoint_weight(iter); count += hw_breakpoint_weight(iter);
} }
@ -149,7 +149,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
return; return;
} }
for_each_online_cpu(cpu) { for_each_possible_cpu(cpu) {
unsigned int nr; unsigned int nr;
nr = per_cpu(nr_cpu_bp_pinned[type], cpu); nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
@ -235,7 +235,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
if (cpu >= 0) { if (cpu >= 0) {
toggle_bp_task_slot(bp, cpu, enable, type, weight); toggle_bp_task_slot(bp, cpu, enable, type, weight);
} else { } else {
for_each_online_cpu(cpu) for_each_possible_cpu(cpu)
toggle_bp_task_slot(bp, cpu, enable, type, weight); toggle_bp_task_slot(bp, cpu, enable, type, weight);
} }