mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-25 03:55:09 +08:00
c0f7ac3a9e
Introduce x86 arch-specific optimization code, which supports both of x86-32 and x86-64. This code also supports safety checking, which decodes whole of a function in which probe is inserted, and checks following conditions before optimization: - The optimized instructions which will be replaced by a jump instruction don't straddle the function boundary. - There is no indirect jump instruction, because it will jumps into the address range which is replaced by jump operand. - There is no jump/loop instruction which jumps into the address range which is replaced by jump operand. - Don't optimize kprobes if it is in functions into which fixup code will jumps. This uses text_poke_multibyte() which doesn't support modifying code on NMI/MCE handler. However, since kprobes itself doesn't support NMI/MCE code probing, it's not a problem. Changes in v9: - Use *_text_reserved() for checking the probe can be optimized. - Verify jump address range is in 2G range when preparing slot. - Backup original code when switching optimized buffer, instead of preparing buffer, because there can be int3 of other probes in preparing phase. - Check kprobe is disabled in arch_check_optimized_kprobe(). - Strictly check indirect jump opcodes (ff /4, ff /5). Changes in v6: - Split stop_machine-based jump patching code. - Update comments and coding style. Changes in v5: - Introduce stop_machine-based jump replacing. Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com> Cc: systemtap <systemtap@sources.redhat.com> Cc: DLE <dle-develop@lists.sourceforge.net> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Jim Keniston <jkenisto@us.ibm.com> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Anders Kaseorg <andersk@ksplice.com> Cc: Tim Abbott <tabbott@ksplice.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Jason Baron <jbaron@redhat.com> Cc: Mathieu Desnoyers <compudj@krystal.dyndns.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> LKML-Reference: <20100225133446.6725.78994.stgit@localhost6.localdomain6> Signed-off-by: Ingo Molnar <mingo@elte.hu>
118 lines
3.7 KiB
C
118 lines
3.7 KiB
C
#ifndef _ASM_X86_KPROBES_H
|
|
#define _ASM_X86_KPROBES_H
|
|
/*
|
|
* Kernel Probes (KProbes)
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
*
|
|
* Copyright (C) IBM Corporation, 2002, 2004
|
|
*
|
|
* See arch/x86/kernel/kprobes.c for x86 kprobes history.
|
|
*/
|
|
#include <linux/types.h>
|
|
#include <linux/ptrace.h>
|
|
#include <linux/percpu.h>
|
|
|
|
#define __ARCH_WANT_KPROBES_INSN_SLOT
|
|
|
|
struct pt_regs;
|
|
struct kprobe;
|
|
|
|
typedef u8 kprobe_opcode_t;
|
|
#define BREAKPOINT_INSTRUCTION 0xcc
|
|
#define RELATIVEJUMP_OPCODE 0xe9
|
|
#define RELATIVEJUMP_SIZE 5
|
|
#define RELATIVECALL_OPCODE 0xe8
|
|
#define RELATIVE_ADDR_SIZE 4
|
|
#define MAX_INSN_SIZE 16
|
|
#define MAX_STACK_SIZE 64
|
|
#define MIN_STACK_SIZE(ADDR) \
|
|
(((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
|
|
THREAD_SIZE - (unsigned long)(ADDR))) \
|
|
? (MAX_STACK_SIZE) \
|
|
: (((unsigned long)current_thread_info()) + \
|
|
THREAD_SIZE - (unsigned long)(ADDR)))
|
|
|
|
#define flush_insn_slot(p) do { } while (0)
|
|
|
|
/* optinsn template addresses */
|
|
extern kprobe_opcode_t optprobe_template_entry;
|
|
extern kprobe_opcode_t optprobe_template_val;
|
|
extern kprobe_opcode_t optprobe_template_call;
|
|
extern kprobe_opcode_t optprobe_template_end;
|
|
#define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE)
|
|
#define MAX_OPTINSN_SIZE \
|
|
(((unsigned long)&optprobe_template_end - \
|
|
(unsigned long)&optprobe_template_entry) + \
|
|
MAX_OPTIMIZED_LENGTH + RELATIVEJUMP_SIZE)
|
|
|
|
extern const int kretprobe_blacklist_size;
|
|
|
|
void arch_remove_kprobe(struct kprobe *p);
|
|
void kretprobe_trampoline(void);
|
|
|
|
/* Architecture specific copy of original instruction*/
|
|
struct arch_specific_insn {
|
|
/* copy of the original instruction */
|
|
kprobe_opcode_t *insn;
|
|
/*
|
|
* boostable = -1: This instruction type is not boostable.
|
|
* boostable = 0: This instruction type is boostable.
|
|
* boostable = 1: This instruction has been boosted: we have
|
|
* added a relative jump after the instruction copy in insn,
|
|
* so no single-step and fixup are needed (unless there's
|
|
* a post_handler or break_handler).
|
|
*/
|
|
int boostable;
|
|
};
|
|
|
|
struct arch_optimized_insn {
|
|
/* copy of the original instructions */
|
|
kprobe_opcode_t copied_insn[RELATIVE_ADDR_SIZE];
|
|
/* detour code buffer */
|
|
kprobe_opcode_t *insn;
|
|
/* the size of instructions copied to detour code buffer */
|
|
size_t size;
|
|
};
|
|
|
|
/* Return true (!0) if optinsn is prepared for optimization. */
|
|
static inline int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
|
|
{
|
|
return optinsn->size;
|
|
}
|
|
|
|
struct prev_kprobe {
|
|
struct kprobe *kp;
|
|
unsigned long status;
|
|
unsigned long old_flags;
|
|
unsigned long saved_flags;
|
|
};
|
|
|
|
/* per-cpu kprobe control block */
|
|
struct kprobe_ctlblk {
|
|
unsigned long kprobe_status;
|
|
unsigned long kprobe_old_flags;
|
|
unsigned long kprobe_saved_flags;
|
|
unsigned long *jprobe_saved_sp;
|
|
struct pt_regs jprobe_saved_regs;
|
|
kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
|
|
struct prev_kprobe prev_kprobe;
|
|
};
|
|
|
|
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
|
|
extern int kprobe_exceptions_notify(struct notifier_block *self,
|
|
unsigned long val, void *data);
|
|
#endif /* _ASM_X86_KPROBES_H */
|