2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-20 03:04:01 +08:00

Merge branch 'tip/x86/ftrace' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/ftrace

Conflicts:
	include/linux/ftrace.h
	kernel/trace/ftrace.c
This commit is contained in:
Ingo Molnar 2009-02-22 18:12:01 +01:00
commit c478f87869
8 changed files with 130 additions and 20 deletions

View File

@ -104,6 +104,11 @@ void clflush_cache_range(void *addr, unsigned int size);
#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void);
extern const int rodata_test_data;
void set_kernel_text_rw(void);
void set_kernel_text_ro(void);
#else
static inline void set_kernel_text_rw(void) { }
static inline void set_kernel_text_ro(void) { }
#endif
#ifdef CONFIG_DEBUG_RODATA_TEST

View File

@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/list.h>
#include <asm/cacheflush.h>
#include <asm/ftrace.h>
#include <linux/ftrace.h>
#include <asm/nops.h>
@ -26,6 +27,18 @@
#ifdef CONFIG_DYNAMIC_FTRACE
int ftrace_arch_code_modify_prepare(void)
{
set_kernel_text_rw();
return 0;
}
int ftrace_arch_code_modify_post_process(void)
{
set_kernel_text_ro();
return 0;
}
union ftrace_code_union {
char code[MCOUNT_INSN_SIZE];
struct {
@ -111,6 +124,10 @@ static void ftrace_mod_code(void)
*/
mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
MCOUNT_INSN_SIZE);
/* if we fail, then kill any new writers */
if (mod_code_status)
mod_code_write = 0;
}
void ftrace_nmi_enter(void)

View File

@ -283,10 +283,13 @@ void __devinit vmi_time_ap_init(void)
#endif
/** vmi clocksource */
static struct clocksource clocksource_vmi;
static cycle_t read_real_cycles(void)
{
return vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
return ret >= clocksource_vmi.cycle_last ?
ret : clocksource_vmi.cycle_last;
}
static struct clocksource clocksource_vmi = {

View File

@ -1155,17 +1155,47 @@ static noinline int do_test_wp_bit(void)
const int rodata_test_data = 0xC3;
EXPORT_SYMBOL_GPL(rodata_test_data);
static int kernel_set_to_readonly;
void set_kernel_text_rw(void)
{
unsigned long start = PFN_ALIGN(_text);
unsigned long size = PFN_ALIGN(_etext) - start;
if (!kernel_set_to_readonly)
return;
pr_debug("Set kernel text: %lx - %lx for read write\n",
start, start+size);
set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
}
void set_kernel_text_ro(void)
{
unsigned long start = PFN_ALIGN(_text);
unsigned long size = PFN_ALIGN(_etext) - start;
if (!kernel_set_to_readonly)
return;
pr_debug("Set kernel text: %lx - %lx for read only\n",
start, start+size);
set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
}
void mark_rodata_ro(void)
{
unsigned long start = PFN_ALIGN(_text);
unsigned long size = PFN_ALIGN(_etext) - start;
#ifndef CONFIG_DYNAMIC_FTRACE
/* Dynamic tracing modifies the kernel text section */
set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
printk(KERN_INFO "Write protecting the kernel text: %luk\n",
size >> 10);
kernel_set_to_readonly = 1;
#ifdef CONFIG_CPA_DEBUG
printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
start, start+size);
@ -1174,7 +1204,6 @@ void mark_rodata_ro(void)
printk(KERN_INFO "Testing CPA: write protecting again\n");
set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
#endif
#endif /* CONFIG_DYNAMIC_FTRACE */
start += size;
size = (unsigned long)__end_rodata - start;

View File

@ -986,21 +986,48 @@ void free_initmem(void)
const int rodata_test_data = 0xC3;
EXPORT_SYMBOL_GPL(rodata_test_data);
static int kernel_set_to_readonly;
void set_kernel_text_rw(void)
{
unsigned long start = PFN_ALIGN(_stext);
unsigned long end = PFN_ALIGN(__start_rodata);
if (!kernel_set_to_readonly)
return;
pr_debug("Set kernel text: %lx - %lx for read write\n",
start, end);
set_memory_rw(start, (end - start) >> PAGE_SHIFT);
}
void set_kernel_text_ro(void)
{
unsigned long start = PFN_ALIGN(_stext);
unsigned long end = PFN_ALIGN(__start_rodata);
if (!kernel_set_to_readonly)
return;
pr_debug("Set kernel text: %lx - %lx for read only\n",
start, end);
set_memory_ro(start, (end - start) >> PAGE_SHIFT);
}
void mark_rodata_ro(void)
{
unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata);
unsigned long rodata_start =
((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
#ifdef CONFIG_DYNAMIC_FTRACE
/* Dynamic tracing modifies the kernel text section */
start = rodata_start;
#endif
printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
(end - start) >> 10);
set_memory_ro(start, (end - start) >> PAGE_SHIFT);
kernel_set_to_readonly = 1;
/*
* The rodata section (but not the kernel text!) should also be
* not-executable.

View File

@ -508,18 +508,13 @@ static int split_large_page(pte_t *kpte, unsigned long address)
#endif
/*
* Install the new, split up pagetable. Important details here:
* Install the new, split up pagetable.
*
* On Intel the NX bit of all levels must be cleared to make a
* page executable. See section 4.13.2 of Intel 64 and IA-32
* Architectures Software Developer's Manual).
*
* Mark the entry present. The current mapping might be
* set to not present, which we preserved above.
* We use the standard kernel pagetable protections for the new
* pagetable protections, the actual ptes set above control the
* primary protection behavior:
*/
ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
pgprot_val(ref_prot) |= _PAGE_PRESENT;
__set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
__set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
base = NULL;
out_unlock:

View File

@ -106,6 +106,9 @@ struct ftrace_func_command {
/* asm/ftrace.h must be defined for archs supporting dynamic ftrace */
#include <asm/ftrace.h>
int ftrace_arch_code_modify_prepare(void);
int ftrace_arch_code_modify_post_process(void);
struct seq_file;
struct ftrace_probe_ops {

View File

@ -557,8 +557,11 @@ static void ftrace_replace_code(int enable)
if ((system_state == SYSTEM_BOOTING) ||
!core_kernel_text(rec->ip)) {
ftrace_free_rec(rec);
} else
} else {
ftrace_bug(failed, rec->ip);
/* Stop processing */
return;
}
}
} while_for_each_ftrace_rec();
}
@ -580,6 +583,24 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
return 1;
}
/*
* archs can override this function if they must do something
* before the modifying code is performed.
*/
int __weak ftrace_arch_code_modify_prepare(void)
{
return 0;
}
/*
* archs can override this function if they must do something
* after the modifying code is performed.
*/
int __weak ftrace_arch_code_modify_post_process(void)
{
return 0;
}
static int __ftrace_modify_code(void *data)
{
int *command = data;
@ -602,7 +623,17 @@ static int __ftrace_modify_code(void *data)
static void ftrace_run_update_code(int command)
{
int ret;
ret = ftrace_arch_code_modify_prepare();
FTRACE_WARN_ON(ret);
if (ret)
return;
stop_machine(__ftrace_modify_code, &command, NULL);
ret = ftrace_arch_code_modify_post_process();
FTRACE_WARN_ON(ret);
}
static ftrace_func_t saved_ftrace_func;