mirror of
https://github.com/qemu/qemu.git
synced 2024-11-25 03:43:37 +08:00
tcg: Rework tb_invalidated_flag
'tb_invalidated_flag' was meant to catch two events: * some TB has been invalidated by tb_phys_invalidate(); * the whole translation buffer has been flushed by tb_flush(). Then it was checked: * in cpu_exec() to ensure that the last executed TB can be safely linked to directly call the next one; * in cpu_exec_nocache() to decide if the original TB should be provided for further possible invalidation along with the temporarily generated TB. It is always safe to patch an invalidated TB since it is not going to be used anyway. It is also safe to call tb_phys_invalidate() for an already invalidated TB. Thus, setting this flag in tb_phys_invalidate() is simply unnecessary. Moreover, it can prevent from pretty proper linking of TBs, if any arbitrary TB has been invalidated. So just don't touch it in tb_phys_invalidate(). If this flag is only used to catch whether tb_flush() has been called then rename it to 'tb_flushed'. Declare it as 'bool' and stick to using only 'true' and 'false' to set its value. Also, instead of setting it in tb_gen_code(), just after tb_flush() has been called, do it right inside of tb_flush(). In cpu_exec(), this flag is used to track if tb_flush() has been called and have made 'next_tb' (a reference to the last executed TB) invalid for linking it to directly call the next TB. tb_flush() can be called during the CPU execution loop from tb_gen_code(), during TB execution or by another thread while 'tb_lock' is released. Catch for translation buffer flush reliably by resetting this flag once before first TB lookup and each time we find it set before trying to add a direct jump. Don't touch in in tb_find_physical(). Each vCPU has its own execution loop in multithreaded mode and thus should have its own copy of the flag to be able to reset it with its own 'next_tb' and don't affect any other vCPU execution thread. So make this flag per-vCPU and move it to CPUState. In cpu_exec_nocache(), we only need to check if tb_flush() has been called from tb_gen_code() called by cpu_exec_nocache() itself. To do this reliably, preserve the old value of the flag, reset it before calling tb_gen_code(), check afterwards, and combine the saved value back to the flag. This patch is based on the patch "tcg: move tb_invalidated_flag to CPUState" from Paolo Bonzini <pbonzini@redhat.com>. Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com> Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org> Signed-off-by: Richard Henderson <rth@twiddle.net>
This commit is contained in:
parent
819af24b9c
commit
6f789be56d
21
cpu-exec.c
21
cpu-exec.c
@ -202,16 +202,20 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
|
||||
TranslationBlock *orig_tb, bool ignore_icount)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
bool old_tb_flushed;
|
||||
|
||||
/* Should never happen.
|
||||
We only end up here when an existing TB is too long. */
|
||||
if (max_cycles > CF_COUNT_MASK)
|
||||
max_cycles = CF_COUNT_MASK;
|
||||
|
||||
old_tb_flushed = cpu->tb_flushed;
|
||||
cpu->tb_flushed = false;
|
||||
tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
|
||||
max_cycles | CF_NOCACHE
|
||||
| (ignore_icount ? CF_IGNORE_ICOUNT : 0));
|
||||
tb->orig_tb = tcg_ctx.tb_ctx.tb_invalidated_flag ? NULL : orig_tb;
|
||||
tb->orig_tb = cpu->tb_flushed ? NULL : orig_tb;
|
||||
cpu->tb_flushed |= old_tb_flushed;
|
||||
cpu->current_tb = tb;
|
||||
/* execute the generated code */
|
||||
trace_exec_tb_nocache(tb, tb->pc);
|
||||
@ -232,8 +236,6 @@ static TranslationBlock *tb_find_physical(CPUState *cpu,
|
||||
unsigned int h;
|
||||
tb_page_addr_t phys_pc, phys_page1;
|
||||
|
||||
tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
|
||||
|
||||
/* find translated block using physical mappings */
|
||||
phys_pc = get_page_addr_code(env, pc);
|
||||
phys_page1 = phys_pc & TARGET_PAGE_MASK;
|
||||
@ -446,6 +448,7 @@ int cpu_exec(CPUState *cpu)
|
||||
}
|
||||
|
||||
last_tb = NULL; /* forget the last executed TB after exception */
|
||||
cpu->tb_flushed = false; /* reset before first TB lookup */
|
||||
for(;;) {
|
||||
interrupt_request = cpu->interrupt_request;
|
||||
if (unlikely(interrupt_request)) {
|
||||
@ -510,14 +513,12 @@ int cpu_exec(CPUState *cpu)
|
||||
}
|
||||
tb_lock();
|
||||
tb = tb_find_fast(cpu);
|
||||
/* Note: we do it here to avoid a gcc bug on Mac OS X when
|
||||
doing it in tb_find_slow */
|
||||
if (tcg_ctx.tb_ctx.tb_invalidated_flag) {
|
||||
/* as some TB could have been invalidated because
|
||||
of memory exceptions while generating the code, we
|
||||
must recompute the hash index here */
|
||||
if (cpu->tb_flushed) {
|
||||
/* Ensure that no TB jump will be modified as the
|
||||
* translation buffer has been flushed.
|
||||
*/
|
||||
last_tb = NULL;
|
||||
tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
|
||||
cpu->tb_flushed = false;
|
||||
}
|
||||
/* See if we can patch the calling TB. */
|
||||
if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
|
||||
|
@ -304,8 +304,6 @@ struct TBContext {
|
||||
/* statistics */
|
||||
int tb_flush_count;
|
||||
int tb_phys_invalidate_count;
|
||||
|
||||
int tb_invalidated_flag;
|
||||
};
|
||||
|
||||
void tb_free(TranslationBlock *tb);
|
||||
|
@ -238,6 +238,7 @@ struct kvm_run;
|
||||
* @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
|
||||
* @tcg_exit_req: Set to force TCG to stop executing linked TBs for this
|
||||
* CPU and return to its top level loop.
|
||||
* @tb_flushed: Indicates the translation buffer has been flushed.
|
||||
* @singlestep_enabled: Flags for single-stepping.
|
||||
* @icount_extra: Instructions until next timer event.
|
||||
* @icount_decr: Number of cycles left, with interrupt flag in high bit.
|
||||
@ -289,6 +290,7 @@ struct CPUState {
|
||||
bool stopped;
|
||||
bool crash_occurred;
|
||||
bool exit_request;
|
||||
bool tb_flushed;
|
||||
uint32_t interrupt_request;
|
||||
int singlestep_enabled;
|
||||
int64_t icount_extra;
|
||||
|
@ -843,6 +843,7 @@ void tb_flush(CPUState *cpu)
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
|
||||
cpu->tb_flushed = true;
|
||||
}
|
||||
|
||||
memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
|
||||
@ -1011,8 +1012,6 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
|
||||
invalidate_page_bitmap(p);
|
||||
}
|
||||
|
||||
tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
|
||||
|
||||
/* remove the TB from the hash list */
|
||||
h = tb_jmp_cache_hash_func(tb->pc);
|
||||
CPU_FOREACH(cpu) {
|
||||
@ -1178,8 +1177,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
/* cannot fail at this point */
|
||||
tb = tb_alloc(pc);
|
||||
assert(tb != NULL);
|
||||
/* Don't forget to invalidate previous TB info. */
|
||||
tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
|
||||
}
|
||||
|
||||
gen_code_buf = tcg_ctx.code_gen_ptr;
|
||||
|
Loading…
Reference in New Issue
Block a user