2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 01:34:00 +08:00

Merge branch 'sh/kgdb' into sh-latest

This commit is contained in:
Paul Mundt 2012-04-17 16:22:04 +09:00
commit 9d773d378d
2 changed files with 96 additions and 39 deletions

View File

@ -4,18 +4,6 @@
#include <asm/cacheflush.h>
#include <asm/ptrace.h>
/* Same as pt_regs but has vbr in place of syscall_nr */
struct kgdb_regs {
unsigned long regs[16];
unsigned long pc;
unsigned long pr;
unsigned long sr;
unsigned long gbr;
unsigned long mach;
unsigned long macl;
unsigned long vbr;
};
enum regnames {
GDB_R0, GDB_R1, GDB_R2, GDB_R3, GDB_R4, GDB_R5, GDB_R6, GDB_R7,
GDB_R8, GDB_R9, GDB_R10, GDB_R11, GDB_R12, GDB_R13, GDB_R14, GDB_R15,
@ -23,17 +11,27 @@ enum regnames {
GDB_PC, GDB_PR, GDB_SR, GDB_GBR, GDB_MACH, GDB_MACL, GDB_VBR,
};
#define NUMREGBYTES ((GDB_VBR + 1) * 4)
#define _GP_REGS 16
#define _EXTRA_REGS 7
#define GDB_SIZEOF_REG sizeof(u32)
#define DBG_MAX_REG_NUM (_GP_REGS + _EXTRA_REGS)
#define NUMREGBYTES (DBG_MAX_REG_NUM * sizeof(GDB_SIZEOF_REG))
static inline void arch_kgdb_breakpoint(void)
{
__asm__ __volatile__ ("trapa #0x3c\n");
}
#define BUFMAX 2048
#define CACHE_FLUSH_IS_SAFE 1
#define BREAK_INSTR_SIZE 2
#define BUFMAX 2048
#ifdef CONFIG_SMP
# define CACHE_FLUSH_IS_SAFE 0
#else
# define CACHE_FLUSH_IS_SAFE 1
#endif
#define GDB_ADJUSTS_BREAK_OFFSET
#endif /* __ASM_SH_KGDB_H */

View File

@ -1,7 +1,7 @@
/*
* SuperH KGDB support
*
* Copyright (C) 2008 - 2009 Paul Mundt
* Copyright (C) 2008 - 2012 Paul Mundt
*
* Single stepping taken from the old stub by Henry Bell and Jeremy Siegel.
*
@ -164,42 +164,89 @@ static void undo_single_step(struct pt_regs *linux_regs)
stepped_opcode = 0;
}
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
{ "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
{ "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
{ "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
{ "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
{ "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
{ "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
{ "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
{ "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
{ "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
{ "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
{ "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
{ "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
{ "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
{ "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
{ "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
{ "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
{ "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, pc) },
{ "pr", GDB_SIZEOF_REG, offsetof(struct pt_regs, pr) },
{ "sr", GDB_SIZEOF_REG, offsetof(struct pt_regs, sr) },
{ "gbr", GDB_SIZEOF_REG, offsetof(struct pt_regs, gbr) },
{ "mach", GDB_SIZEOF_REG, offsetof(struct pt_regs, mach) },
{ "macl", GDB_SIZEOF_REG, offsetof(struct pt_regs, macl) },
{ "vbr", GDB_SIZEOF_REG, -1 },
};
int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
{
int i;
if (regno < 0 || regno >= DBG_MAX_REG_NUM)
return -EINVAL;
for (i = 0; i < 16; i++)
gdb_regs[GDB_R0 + i] = regs->regs[i];
if (dbg_reg_def[regno].offset != -1)
memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
dbg_reg_def[regno].size);
gdb_regs[GDB_PC] = regs->pc;
gdb_regs[GDB_PR] = regs->pr;
gdb_regs[GDB_SR] = regs->sr;
gdb_regs[GDB_GBR] = regs->gbr;
gdb_regs[GDB_MACH] = regs->mach;
gdb_regs[GDB_MACL] = regs->macl;
__asm__ __volatile__ ("stc vbr, %0" : "=r" (gdb_regs[GDB_VBR]));
return 0;
}
void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
{
int i;
if (regno >= DBG_MAX_REG_NUM || regno < 0)
return NULL;
for (i = 0; i < 16; i++)
regs->regs[GDB_R0 + i] = gdb_regs[GDB_R0 + i];
if (dbg_reg_def[regno].size != -1)
memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
dbg_reg_def[regno].size);
regs->pc = gdb_regs[GDB_PC];
regs->pr = gdb_regs[GDB_PR];
regs->sr = gdb_regs[GDB_SR];
regs->gbr = gdb_regs[GDB_GBR];
regs->mach = gdb_regs[GDB_MACH];
regs->macl = gdb_regs[GDB_MACL];
switch (regno) {
case GDB_VBR:
__asm__ __volatile__ ("stc vbr, %0" : "=r" (mem));
break;
}
return dbg_reg_def[regno].name;
}
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
struct pt_regs *thread_regs = task_pt_regs(p);
int reg;
/* Initialize to zero */
for (reg = 0; reg < DBG_MAX_REG_NUM; reg++)
gdb_regs[reg] = 0;
/*
* Copy out GP regs 8 to 14.
*
* switch_to() relies on SR.RB toggling, so regs 0->7 are banked
* and need privileged instructions to get to. The r15 value we
* fetch from the thread info directly.
*/
for (reg = GDB_R8; reg < GDB_R15; reg++)
gdb_regs[reg] = thread_regs->regs[reg];
gdb_regs[GDB_R15] = p->thread.sp;
gdb_regs[GDB_PC] = p->thread.pc;
/*
* Additional registers we have context for
*/
gdb_regs[GDB_PR] = thread_regs->pr;
gdb_regs[GDB_GBR] = thread_regs->gbr;
}
int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
@ -264,6 +311,18 @@ BUILD_TRAP_HANDLER(singlestep)
local_irq_restore(flags);
}
static void kgdb_call_nmi_hook(void *ignored)
{
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
}
void kgdb_roundup_cpus(unsigned long flags)
{
local_irq_enable();
smp_call_function(kgdb_call_nmi_hook, NULL, 0);
local_irq_disable();
}
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
int ret;