mirror of
https://github.com/qemu/qemu.git
synced 2024-11-23 19:03:38 +08:00
Use a dedicated function to request exit from execution loop
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6762 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
parent
9e995645b5
commit
3098dba01c
@ -760,7 +760,6 @@ extern CPUState *cpu_single_env;
|
||||
extern int64_t qemu_icount;
|
||||
extern int use_icount;
|
||||
|
||||
#define CPU_INTERRUPT_EXIT 0x01 /* wants exit from main loop */
|
||||
#define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */
|
||||
#define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */
|
||||
#define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */
|
||||
@ -774,6 +773,8 @@ extern int use_icount;
|
||||
void cpu_interrupt(CPUState *s, int mask);
|
||||
void cpu_reset_interrupt(CPUState *env, int mask);
|
||||
|
||||
void cpu_exit(CPUState *s);
|
||||
|
||||
/* Breakpoint/watchpoint flags */
|
||||
#define BP_MEM_READ 0x01
|
||||
#define BP_MEM_WRITE 0x02
|
||||
|
@ -215,7 +215,7 @@ static void host_signal_handler(int host_signum, siginfo_t *info,
|
||||
#endif
|
||||
if (queue_signal(sig, &tinfo) == 1) {
|
||||
/* interrupt the virtual CPU as soon as possible */
|
||||
cpu_interrupt(global_env, CPU_INTERRUPT_EXIT);
|
||||
cpu_exit(global_env);
|
||||
}
|
||||
}
|
||||
|
||||
|
58
exec.c
58
exec.c
@ -523,7 +523,9 @@ static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
|
||||
|
||||
qemu_get_be32s(f, &env->halted);
|
||||
qemu_get_be32s(f, &env->interrupt_request);
|
||||
env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
|
||||
/* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
|
||||
version_id is increased. */
|
||||
env->interrupt_request &= ~0x01;
|
||||
tlb_flush(env, 1);
|
||||
|
||||
return 0;
|
||||
@ -1499,28 +1501,36 @@ void cpu_set_log_filename(const char *filename)
|
||||
cpu_set_log(loglevel);
|
||||
}
|
||||
|
||||
/* mask must never be zero, except for A20 change call */
|
||||
void cpu_interrupt(CPUState *env, int mask)
|
||||
static void cpu_unlink_tb(CPUState *env)
|
||||
{
|
||||
#if !defined(USE_NPTL)
|
||||
TranslationBlock *tb;
|
||||
static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
|
||||
#endif
|
||||
int old_mask;
|
||||
|
||||
if (mask & CPU_INTERRUPT_EXIT) {
|
||||
env->exit_request = 1;
|
||||
mask &= ~CPU_INTERRUPT_EXIT;
|
||||
}
|
||||
|
||||
old_mask = env->interrupt_request;
|
||||
env->interrupt_request |= mask;
|
||||
#if defined(USE_NPTL)
|
||||
/* FIXME: TB unchaining isn't SMP safe. For now just ignore the
|
||||
problem and hope the cpu will stop of its own accord. For userspace
|
||||
emulation this often isn't actually as bad as it sounds. Often
|
||||
signals are used primarily to interrupt blocking syscalls. */
|
||||
#else
|
||||
TranslationBlock *tb;
|
||||
static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
|
||||
|
||||
tb = env->current_tb;
|
||||
/* if the cpu is currently executing code, we must unlink it and
|
||||
all the potentially executing TB */
|
||||
if (tb && !testandset(&interrupt_lock)) {
|
||||
env->current_tb = NULL;
|
||||
tb_reset_jump_recursive(tb);
|
||||
resetlock(&interrupt_lock);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* mask must never be zero, except for A20 change call */
|
||||
void cpu_interrupt(CPUState *env, int mask)
|
||||
{
|
||||
int old_mask;
|
||||
|
||||
old_mask = env->interrupt_request;
|
||||
env->interrupt_request |= mask;
|
||||
|
||||
if (use_icount) {
|
||||
env->icount_decr.u16.high = 0xffff;
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
@ -1530,16 +1540,8 @@ void cpu_interrupt(CPUState *env, int mask)
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
tb = env->current_tb;
|
||||
/* if the cpu is currently executing code, we must unlink it and
|
||||
all the potentially executing TB */
|
||||
if (tb && !testandset(&interrupt_lock)) {
|
||||
env->current_tb = NULL;
|
||||
tb_reset_jump_recursive(tb);
|
||||
resetlock(&interrupt_lock);
|
||||
}
|
||||
cpu_unlink_tb(env);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void cpu_reset_interrupt(CPUState *env, int mask)
|
||||
@ -1547,6 +1549,12 @@ void cpu_reset_interrupt(CPUState *env, int mask)
|
||||
env->interrupt_request &= ~mask;
|
||||
}
|
||||
|
||||
void cpu_exit(CPUState *env)
|
||||
{
|
||||
env->exit_request = 1;
|
||||
cpu_unlink_tb(env);
|
||||
}
|
||||
|
||||
const CPULogItem cpu_log_items[] = {
|
||||
{ CPU_LOG_TB_OUT_ASM, "out_asm",
|
||||
"show generated host assembly code for each compiled TB" },
|
||||
|
@ -2012,7 +2012,7 @@ void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...)
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
gdb_handlesig(s->c_cpu, 0);
|
||||
#else
|
||||
cpu_interrupt(s->c_cpu, CPU_INTERRUPT_EXIT);
|
||||
cpu_exit(s->c_cpu);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
2
hw/dma.c
2
hw/dma.c
@ -449,7 +449,7 @@ void DMA_schedule(int nchan)
|
||||
{
|
||||
CPUState *env = cpu_single_env;
|
||||
if (env)
|
||||
cpu_interrupt(env, CPU_INTERRUPT_EXIT);
|
||||
cpu_exit(env);
|
||||
}
|
||||
|
||||
static void dma_reset(void *opaque)
|
||||
|
@ -653,7 +653,7 @@ void DBDMA_schedule(void)
|
||||
{
|
||||
CPUState *env = cpu_single_env;
|
||||
if (env)
|
||||
cpu_interrupt(env, CPU_INTERRUPT_EXIT);
|
||||
cpu_exit(env);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -200,7 +200,7 @@ static inline void start_exclusive(void)
|
||||
for (other = first_cpu; other; other = other->next_cpu) {
|
||||
if (other->running) {
|
||||
pending_cpus++;
|
||||
cpu_interrupt(other, CPU_INTERRUPT_EXIT);
|
||||
cpu_exit(other);
|
||||
}
|
||||
}
|
||||
if (pending_cpus > 1) {
|
||||
|
@ -460,7 +460,7 @@ static void host_signal_handler(int host_signum, siginfo_t *info,
|
||||
host_to_target_siginfo_noswap(&tinfo, info);
|
||||
if (queue_signal(thread_env, sig, &tinfo) == 1) {
|
||||
/* interrupt the virtual CPU as soon as possible */
|
||||
cpu_interrupt(thread_env, CPU_INTERRUPT_EXIT);
|
||||
cpu_exit(thread_env);
|
||||
}
|
||||
}
|
||||
|
||||
|
14
vl.c
14
vl.c
@ -1181,7 +1181,7 @@ void qemu_mod_timer(QEMUTimer *ts, int64_t expire_time)
|
||||
}
|
||||
/* Interrupt execution to force deadline recalculation. */
|
||||
if (use_icount && cpu_single_env) {
|
||||
cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
|
||||
cpu_exit(cpu_single_env);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1348,7 +1348,7 @@ static void host_alarm_handler(int host_signum)
|
||||
|
||||
if (env) {
|
||||
/* stop the currently executing cpu because a timer occured */
|
||||
cpu_interrupt(env, CPU_INTERRUPT_EXIT);
|
||||
cpu_exit(env);
|
||||
#ifdef USE_KQEMU
|
||||
if (env->kqemu_enabled) {
|
||||
kqemu_cpu_interrupt(env);
|
||||
@ -3326,7 +3326,7 @@ void qemu_service_io(void)
|
||||
{
|
||||
CPUState *env = cpu_single_env;
|
||||
if (env) {
|
||||
cpu_interrupt(env, CPU_INTERRUPT_EXIT);
|
||||
cpu_exit(env);
|
||||
#ifdef USE_KQEMU
|
||||
if (env->kqemu_enabled) {
|
||||
kqemu_cpu_interrupt(env);
|
||||
@ -3407,7 +3407,7 @@ void qemu_bh_schedule(QEMUBH *bh)
|
||||
bh->idle = 0;
|
||||
/* stop the currently executing CPU to execute the BH ASAP */
|
||||
if (env) {
|
||||
cpu_interrupt(env, CPU_INTERRUPT_EXIT);
|
||||
cpu_exit(env);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3618,21 +3618,21 @@ void qemu_system_reset_request(void)
|
||||
reset_requested = 1;
|
||||
}
|
||||
if (cpu_single_env)
|
||||
cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
|
||||
cpu_exit(cpu_single_env);
|
||||
}
|
||||
|
||||
void qemu_system_shutdown_request(void)
|
||||
{
|
||||
shutdown_requested = 1;
|
||||
if (cpu_single_env)
|
||||
cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
|
||||
cpu_exit(cpu_single_env);
|
||||
}
|
||||
|
||||
void qemu_system_powerdown_request(void)
|
||||
{
|
||||
powerdown_requested = 1;
|
||||
if (cpu_single_env)
|
||||
cpu_interrupt(cpu_single_env, CPU_INTERRUPT_EXIT);
|
||||
cpu_exit(cpu_single_env);
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
|
Loading…
Reference in New Issue
Block a user