mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-15 00:34:10 +08:00
s390: support KPROBES_ON_FTRACE
Instead of using our own kprobes-on-ftrace handling convert the code to support KPROBES_ON_FTRACE. Signed-off-by: Sven Schnelle <svens@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
This commit is contained in:
parent
5f490a520b
commit
657480d9c0
@ -24,7 +24,7 @@
|
||||
| parisc: | ok |
|
||||
| powerpc: | ok |
|
||||
| riscv: | TODO |
|
||||
| s390: | TODO |
|
||||
| s390: | ok |
|
||||
| sh: | TODO |
|
||||
| sparc: | TODO |
|
||||
| um: | TODO |
|
||||
|
@ -156,6 +156,7 @@ config S390
|
||||
select HAVE_KERNEL_UNCOMPRESSED
|
||||
select HAVE_KERNEL_XZ
|
||||
select HAVE_KPROBES
|
||||
select HAVE_KPROBES_ON_FTRACE
|
||||
select HAVE_KRETPROBES
|
||||
select HAVE_KVM
|
||||
select HAVE_LIVEPATCH
|
||||
|
@ -54,7 +54,6 @@ typedef u16 kprobe_opcode_t;
|
||||
struct arch_specific_insn {
|
||||
/* copy of original instruction */
|
||||
kprobe_opcode_t *insn;
|
||||
unsigned int is_ftrace_insn : 1;
|
||||
};
|
||||
|
||||
struct prev_kprobe {
|
||||
|
@ -72,15 +72,6 @@ static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
|
||||
{
|
||||
#ifdef CONFIG_KPROBES
|
||||
if (insn->opc == BREAKPOINT_INSTRUCTION)
|
||||
return 1;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
|
||||
{
|
||||
#ifdef CONFIG_KPROBES
|
||||
@ -114,16 +105,6 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
||||
/* Initial code replacement */
|
||||
ftrace_generate_orig_insn(&orig);
|
||||
ftrace_generate_nop_insn(&new);
|
||||
} else if (is_kprobe_on_ftrace(&old)) {
|
||||
/*
|
||||
* If we find a breakpoint instruction, a kprobe has been
|
||||
* placed at the beginning of the function. We write the
|
||||
* constant KPROBE_ON_FTRACE_NOP into the remaining four
|
||||
* bytes of the original instruction so that the kprobes
|
||||
* handler can execute a nop, if it reaches this breakpoint.
|
||||
*/
|
||||
ftrace_generate_kprobe_call_insn(&orig);
|
||||
ftrace_generate_kprobe_nop_insn(&new);
|
||||
} else {
|
||||
/* Replace ftrace call with a nop. */
|
||||
ftrace_generate_call_insn(&orig, rec->ip);
|
||||
@ -142,21 +123,10 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||
|
||||
if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
|
||||
return -EFAULT;
|
||||
if (is_kprobe_on_ftrace(&old)) {
|
||||
/*
|
||||
* If we find a breakpoint instruction, a kprobe has been
|
||||
* placed at the beginning of the function. We write the
|
||||
* constant KPROBE_ON_FTRACE_CALL into the remaining four
|
||||
* bytes of the original instruction so that the kprobes
|
||||
* handler can execute a brasl if it reaches this breakpoint.
|
||||
*/
|
||||
ftrace_generate_kprobe_nop_insn(&orig);
|
||||
ftrace_generate_kprobe_call_insn(&new);
|
||||
} else {
|
||||
/* Replace nop with an ftrace call. */
|
||||
ftrace_generate_nop_insn(&orig);
|
||||
ftrace_generate_call_insn(&new, rec->ip);
|
||||
}
|
||||
/* Replace nop with an ftrace call. */
|
||||
ftrace_generate_nop_insn(&orig);
|
||||
ftrace_generate_call_insn(&new, rec->ip);
|
||||
|
||||
/* Verify that the to be replaced code matches what we expect. */
|
||||
if (memcmp(&orig, &old, sizeof(old)))
|
||||
return -EINVAL;
|
||||
@ -241,3 +211,45 @@ int ftrace_disable_ftrace_graph_caller(void)
|
||||
}
|
||||
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
|
||||
#ifdef CONFIG_KPROBES_ON_FTRACE
|
||||
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
|
||||
struct ftrace_ops *ops, struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb;
|
||||
struct kprobe *p = get_kprobe((kprobe_opcode_t *)ip);
|
||||
|
||||
if (unlikely(!p) || kprobe_disabled(p))
|
||||
return;
|
||||
|
||||
if (kprobe_running()) {
|
||||
kprobes_inc_nmissed_count(p);
|
||||
return;
|
||||
}
|
||||
|
||||
__this_cpu_write(current_kprobe, p);
|
||||
|
||||
kcb = get_kprobe_ctlblk();
|
||||
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
|
||||
instruction_pointer_set(regs, ip);
|
||||
|
||||
if (!p->pre_handler || !p->pre_handler(p, regs)) {
|
||||
|
||||
instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE);
|
||||
|
||||
if (unlikely(p->post_handler)) {
|
||||
kcb->kprobe_status = KPROBE_HIT_SSDONE;
|
||||
p->post_handler(p, regs, 0);
|
||||
}
|
||||
}
|
||||
__this_cpu_write(current_kprobe, NULL);
|
||||
}
|
||||
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
|
||||
|
||||
int arch_prepare_kprobe_ftrace(struct kprobe *p)
|
||||
{
|
||||
p->ainsn.insn = NULL;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
@ -56,21 +56,10 @@ struct kprobe_insn_cache kprobe_s390_insn_slots = {
|
||||
|
||||
static void copy_instruction(struct kprobe *p)
|
||||
{
|
||||
unsigned long ip = (unsigned long) p->addr;
|
||||
s64 disp, new_disp;
|
||||
u64 addr, new_addr;
|
||||
|
||||
if (ftrace_location(ip) == ip) {
|
||||
/*
|
||||
* If kprobes patches the instruction that is morphed by
|
||||
* ftrace make sure that kprobes always sees the branch
|
||||
* "jg .+24" that skips the mcount block or the "brcl 0,0"
|
||||
* in case of hotpatch.
|
||||
*/
|
||||
ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
|
||||
p->ainsn.is_ftrace_insn = 1;
|
||||
} else
|
||||
memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8));
|
||||
memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8));
|
||||
p->opcode = p->ainsn.insn[0];
|
||||
if (!probe_is_insn_relative_long(p->ainsn.insn))
|
||||
return;
|
||||
@ -136,11 +125,6 @@ int arch_prepare_kprobe(struct kprobe *p)
|
||||
}
|
||||
NOKPROBE_SYMBOL(arch_prepare_kprobe);
|
||||
|
||||
int arch_check_ftrace_location(struct kprobe *p)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct swap_insn_args {
|
||||
struct kprobe *p;
|
||||
unsigned int arm_kprobe : 1;
|
||||
@ -149,28 +133,11 @@ struct swap_insn_args {
|
||||
static int swap_instruction(void *data)
|
||||
{
|
||||
struct swap_insn_args *args = data;
|
||||
struct ftrace_insn new_insn, *insn;
|
||||
struct kprobe *p = args->p;
|
||||
size_t len;
|
||||
u16 opc;
|
||||
|
||||
new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
|
||||
len = sizeof(new_insn.opc);
|
||||
if (!p->ainsn.is_ftrace_insn)
|
||||
goto skip_ftrace;
|
||||
len = sizeof(new_insn);
|
||||
insn = (struct ftrace_insn *) p->addr;
|
||||
if (args->arm_kprobe) {
|
||||
if (is_ftrace_nop(insn))
|
||||
new_insn.disp = KPROBE_ON_FTRACE_NOP;
|
||||
else
|
||||
new_insn.disp = KPROBE_ON_FTRACE_CALL;
|
||||
} else {
|
||||
ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr);
|
||||
if (insn->disp == KPROBE_ON_FTRACE_NOP)
|
||||
ftrace_generate_nop_insn(&new_insn);
|
||||
}
|
||||
skip_ftrace:
|
||||
s390_kernel_write(p->addr, &new_insn, len);
|
||||
opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
|
||||
s390_kernel_write(p->addr, &opc, sizeof(opc));
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(swap_instruction);
|
||||
@ -464,24 +431,6 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs)
|
||||
unsigned long ip = regs->psw.addr;
|
||||
int fixup = probe_get_fixup_type(p->ainsn.insn);
|
||||
|
||||
/* Check if the kprobes location is an enabled ftrace caller */
|
||||
if (p->ainsn.is_ftrace_insn) {
|
||||
struct ftrace_insn *insn = (struct ftrace_insn *) p->addr;
|
||||
struct ftrace_insn call_insn;
|
||||
|
||||
ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr);
|
||||
/*
|
||||
* A kprobe on an enabled ftrace call site actually single
|
||||
* stepped an unconditional branch (ftrace nop equivalent).
|
||||
* Now we need to fixup things and pretend that a brasl r0,...
|
||||
* was executed instead.
|
||||
*/
|
||||
if (insn->disp == KPROBE_ON_FTRACE_CALL) {
|
||||
ip += call_insn.disp * 2 - MCOUNT_INSN_SIZE;
|
||||
regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn);
|
||||
}
|
||||
}
|
||||
|
||||
if (fixup & FIXUP_PSW_NORMAL)
|
||||
ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
|
||||
|
||||
|
@ -42,6 +42,9 @@ ENTRY(ftrace_caller)
|
||||
.globl ftrace_regs_caller
|
||||
.set ftrace_regs_caller,ftrace_caller
|
||||
stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller
|
||||
lghi %r14,0 # save condition code
|
||||
ipm %r14 # don't put any instructions
|
||||
sllg %r14,%r14,16 # clobbering CC before this point
|
||||
lgr %r1,%r15
|
||||
#if !(defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT))
|
||||
aghi %r0,MCOUNT_RETURN_FIXUP
|
||||
@ -54,6 +57,9 @@ ENTRY(ftrace_caller)
|
||||
# allocate pt_regs and stack frame for ftrace_trace_function
|
||||
aghi %r15,-STACK_FRAME_SIZE
|
||||
stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
|
||||
stg %r14,(STACK_PTREGS_PSW)(%r15)
|
||||
lg %r14,(__SF_GPRS+8*8)(%r1) # restore original return address
|
||||
stosm (STACK_PTREGS_PSW)(%r15),0
|
||||
aghi %r1,-TRACED_FUNC_FRAME_SIZE
|
||||
stg %r1,__SF_BACKCHAIN(%r15)
|
||||
stg %r0,(STACK_PTREGS_PSW+8)(%r15)
|
||||
|
Loading…
Reference in New Issue
Block a user