mirror of
https://github.com/qemu/qemu.git
synced 2024-11-26 21:33:40 +08:00
tcg: Make tb_flush() thread safe
Use async_safe_run_on_cpu() to make tb_flush() thread safe. This is possible now that code generation does not happen in the middle of execution. It can happen that multiple threads schedule a safe work to flush the translation buffer. To keep statistics and debugging output sane, always check if the translation buffer has already been flushed. Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com> Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org> [AJB: minor re-base fixes] Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Message-Id: <1470158864-17651-13-git-send-email-alex.bennee@linaro.org> Reviewed-by: Richard Henderson <rth@twiddle.net> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
53f5ed9506
commit
3359baad36
12
cpu-exec.c
12
cpu-exec.c
@ -204,20 +204,16 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
|
||||
TranslationBlock *orig_tb, bool ignore_icount)
|
||||
{
|
||||
TranslationBlock *tb;
|
||||
bool old_tb_flushed;
|
||||
|
||||
/* Should never happen.
|
||||
We only end up here when an existing TB is too long. */
|
||||
if (max_cycles > CF_COUNT_MASK)
|
||||
max_cycles = CF_COUNT_MASK;
|
||||
|
||||
old_tb_flushed = cpu->tb_flushed;
|
||||
cpu->tb_flushed = false;
|
||||
tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
|
||||
max_cycles | CF_NOCACHE
|
||||
| (ignore_icount ? CF_IGNORE_ICOUNT : 0));
|
||||
tb->orig_tb = cpu->tb_flushed ? NULL : orig_tb;
|
||||
cpu->tb_flushed |= old_tb_flushed;
|
||||
tb->orig_tb = orig_tb;
|
||||
/* execute the generated code */
|
||||
trace_exec_tb_nocache(tb, tb->pc);
|
||||
cpu_tb_exec(cpu, tb);
|
||||
@ -338,10 +334,7 @@ static inline TranslationBlock *tb_find(CPUState *cpu,
|
||||
tb_lock();
|
||||
have_tb_lock = true;
|
||||
}
|
||||
/* Check if translation buffer has been flushed */
|
||||
if (cpu->tb_flushed) {
|
||||
cpu->tb_flushed = false;
|
||||
} else if (!tb->invalid) {
|
||||
if (!tb->invalid) {
|
||||
tb_add_jump(last_tb, tb_exit, tb);
|
||||
}
|
||||
}
|
||||
@ -606,7 +599,6 @@ int cpu_exec(CPUState *cpu)
|
||||
break;
|
||||
}
|
||||
|
||||
atomic_mb_set(&cpu->tb_flushed, false); /* reset before first TB lookup */
|
||||
for(;;) {
|
||||
cpu_handle_interrupt(cpu, &last_tb);
|
||||
tb = tb_find(cpu, last_tb, tb_exit);
|
||||
|
@ -38,7 +38,7 @@ struct TBContext {
|
||||
QemuMutex tb_lock;
|
||||
|
||||
/* statistics */
|
||||
int tb_flush_count;
|
||||
unsigned tb_flush_count;
|
||||
int tb_phys_invalidate_count;
|
||||
};
|
||||
|
||||
|
@ -253,7 +253,6 @@ struct qemu_work_item;
|
||||
* @crash_occurred: Indicates the OS reported a crash (panic) for this CPU
|
||||
* @tcg_exit_req: Set to force TCG to stop executing linked TBs for this
|
||||
* CPU and return to its top level loop.
|
||||
* @tb_flushed: Indicates the translation buffer has been flushed.
|
||||
* @singlestep_enabled: Flags for single-stepping.
|
||||
* @icount_extra: Instructions until next timer event.
|
||||
* @icount_decr: Number of cycles left, with interrupt flag in high bit.
|
||||
@ -306,7 +305,6 @@ struct CPUState {
|
||||
bool unplug;
|
||||
bool crash_occurred;
|
||||
bool exit_request;
|
||||
bool tb_flushed;
|
||||
uint32_t interrupt_request;
|
||||
int singlestep_enabled;
|
||||
int64_t icount_extra;
|
||||
|
@ -834,12 +834,19 @@ static void page_flush_tb(void)
|
||||
}
|
||||
|
||||
/* flush all the translation blocks */
|
||||
/* XXX: tb_flush is currently not thread safe */
|
||||
void tb_flush(CPUState *cpu)
|
||||
static void do_tb_flush(CPUState *cpu, void *data)
|
||||
{
|
||||
if (!tcg_enabled()) {
|
||||
return;
|
||||
unsigned tb_flush_req = (unsigned) (uintptr_t) data;
|
||||
|
||||
tb_lock();
|
||||
|
||||
/* If it's already been done on request of another CPU,
|
||||
* just retry.
|
||||
*/
|
||||
if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_req) {
|
||||
goto done;
|
||||
}
|
||||
|
||||
#if defined(DEBUG_FLUSH)
|
||||
printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
|
||||
(unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
|
||||
@ -858,7 +865,6 @@ void tb_flush(CPUState *cpu)
|
||||
for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
|
||||
atomic_set(&cpu->tb_jmp_cache[i], NULL);
|
||||
}
|
||||
atomic_mb_set(&cpu->tb_flushed, true);
|
||||
}
|
||||
|
||||
tcg_ctx.tb_ctx.nb_tbs = 0;
|
||||
@ -868,7 +874,19 @@ void tb_flush(CPUState *cpu)
|
||||
tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
|
||||
/* XXX: flush processor icache at this point if cache flush is
|
||||
expensive */
|
||||
tcg_ctx.tb_ctx.tb_flush_count++;
|
||||
atomic_mb_set(&tcg_ctx.tb_ctx.tb_flush_count,
|
||||
tcg_ctx.tb_ctx.tb_flush_count + 1);
|
||||
|
||||
done:
|
||||
tb_unlock();
|
||||
}
|
||||
|
||||
void tb_flush(CPUState *cpu)
|
||||
{
|
||||
if (tcg_enabled()) {
|
||||
uintptr_t tb_flush_req = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count);
|
||||
async_safe_run_on_cpu(cpu, do_tb_flush, (void *) tb_flush_req);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef DEBUG_TB_CHECK
|
||||
@ -1175,9 +1193,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
||||
buffer_overflow:
|
||||
/* flush must be done */
|
||||
tb_flush(cpu);
|
||||
/* cannot fail at this point */
|
||||
tb = tb_alloc(pc);
|
||||
assert(tb != NULL);
|
||||
mmap_unlock();
|
||||
cpu_loop_exit(cpu);
|
||||
}
|
||||
|
||||
gen_code_buf = tcg_ctx.code_gen_ptr;
|
||||
@ -1775,7 +1792,8 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
|
||||
qht_statistics_destroy(&hst);
|
||||
|
||||
cpu_fprintf(f, "\nStatistics:\n");
|
||||
cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count);
|
||||
cpu_fprintf(f, "TB flush count %u\n",
|
||||
atomic_read(&tcg_ctx.tb_ctx.tb_flush_count));
|
||||
cpu_fprintf(f, "TB invalidate count %d\n",
|
||||
tcg_ctx.tb_ctx.tb_phys_invalidate_count);
|
||||
cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
|
||||
|
Loading…
Reference in New Issue
Block a user