vcpu_dirty: share the same field in CPUState for all accelerators

This patch simply replaces the separate boolean field in CPUState that
kvm, hax (and upcoming hvf) have for keeping track of vcpu dirtiness
with a single shared field.

Signed-off-by: Sergio Andres Gomez Del Real <Sergio.G.DelReal@gmail.com>
Message-Id: <20170618191101.3457-1-Sergio.G.DelReal@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sergio Andres Gomez Del Real 2017-06-18 14:11:01 -05:00 committed by Paolo Bonzini
parent 0c7a8b9baa
commit 99f318322e
4 changed files with 20 additions and 19 deletions

View File

@ -318,7 +318,7 @@ int kvm_init_vcpu(CPUState *cpu)
cpu->kvm_fd = ret;
cpu->kvm_state = s;
cpu->kvm_vcpu_dirty = true;
cpu->vcpu_dirty = true;
mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
if (mmap_size < 0) {
@ -1864,15 +1864,15 @@ void kvm_flush_coalesced_mmio_buffer(void)
static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
{
if (!cpu->kvm_vcpu_dirty) {
if (!cpu->vcpu_dirty) {
kvm_arch_get_registers(cpu);
cpu->kvm_vcpu_dirty = true;
cpu->vcpu_dirty = true;
}
}
void kvm_cpu_synchronize_state(CPUState *cpu)
{
if (!cpu->kvm_vcpu_dirty) {
if (!cpu->vcpu_dirty) {
run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
}
}
@ -1880,7 +1880,7 @@ void kvm_cpu_synchronize_state(CPUState *cpu)
static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
{
kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
cpu->kvm_vcpu_dirty = false;
cpu->vcpu_dirty = false;
}
void kvm_cpu_synchronize_post_reset(CPUState *cpu)
@ -1891,7 +1891,7 @@ void kvm_cpu_synchronize_post_reset(CPUState *cpu)
static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
{
kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
cpu->kvm_vcpu_dirty = false;
cpu->vcpu_dirty = false;
}
void kvm_cpu_synchronize_post_init(CPUState *cpu)
@ -1901,7 +1901,7 @@ void kvm_cpu_synchronize_post_init(CPUState *cpu)
static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
{
cpu->kvm_vcpu_dirty = true;
cpu->vcpu_dirty = true;
}
void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
@ -1982,9 +1982,9 @@ int kvm_cpu_exec(CPUState *cpu)
do {
MemTxAttrs attrs;
if (cpu->kvm_vcpu_dirty) {
if (cpu->vcpu_dirty) {
kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
cpu->kvm_vcpu_dirty = false;
cpu->vcpu_dirty = false;
}
kvm_arch_pre_run(cpu, run);

View File

@ -369,7 +369,6 @@ struct CPUState {
vaddr mem_io_vaddr;
int kvm_fd;
bool kvm_vcpu_dirty;
struct KVMState *kvm_state;
struct kvm_run *kvm_run;
@ -386,6 +385,9 @@ struct CPUState {
uint32_t can_do_io;
int32_t exception_index; /* used by m68k TCG */
/* shared by kvm, hax and hvf */
bool vcpu_dirty;
/* Used to keep track of an outstanding cpu throttle thread for migration
* autoconverge
*/
@ -400,7 +402,6 @@ struct CPUState {
icount_decr_u16 u16;
} icount_decr;
bool hax_vcpu_dirty;
struct hax_vcpu_state *hax_vcpu;
/* The pending_tlb_flush flag is set and cleared atomically to

View File

@ -232,7 +232,7 @@ int hax_init_vcpu(CPUState *cpu)
}
cpu->hax_vcpu = hax_global.vm->vcpus[cpu->cpu_index];
cpu->hax_vcpu_dirty = true;
cpu->vcpu_dirty = true;
qemu_register_reset(hax_reset_vcpu_state, (CPUArchState *) (cpu->env_ptr));
return ret;
@ -599,12 +599,12 @@ static void do_hax_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
CPUArchState *env = cpu->env_ptr;
hax_arch_get_registers(env);
cpu->hax_vcpu_dirty = true;
cpu->vcpu_dirty = true;
}
void hax_cpu_synchronize_state(CPUState *cpu)
{
if (!cpu->hax_vcpu_dirty) {
if (!cpu->vcpu_dirty) {
run_on_cpu(cpu, do_hax_cpu_synchronize_state, RUN_ON_CPU_NULL);
}
}
@ -615,7 +615,7 @@ static void do_hax_cpu_synchronize_post_reset(CPUState *cpu,
CPUArchState *env = cpu->env_ptr;
hax_vcpu_sync_state(env, 1);
cpu->hax_vcpu_dirty = false;
cpu->vcpu_dirty = false;
}
void hax_cpu_synchronize_post_reset(CPUState *cpu)
@ -628,7 +628,7 @@ static void do_hax_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
CPUArchState *env = cpu->env_ptr;
hax_vcpu_sync_state(env, 1);
cpu->hax_vcpu_dirty = false;
cpu->vcpu_dirty = false;
}
void hax_cpu_synchronize_post_init(CPUState *cpu)
@ -638,7 +638,7 @@ void hax_cpu_synchronize_post_init(CPUState *cpu)
static void do_hax_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
{
cpu->hax_vcpu_dirty = true;
cpu->vcpu_dirty = true;
}
void hax_cpu_synchronize_pre_loadvm(CPUState *cpu)

View File

@ -523,7 +523,7 @@ static void kvm_mips_update_state(void *opaque, int running, RunState state)
* already saved and can be restored when it is synced back to KVM.
*/
if (!running) {
if (!cs->kvm_vcpu_dirty) {
if (!cs->vcpu_dirty) {
ret = kvm_mips_save_count(cs);
if (ret < 0) {
fprintf(stderr, "Failed saving count\n");
@ -539,7 +539,7 @@ static void kvm_mips_update_state(void *opaque, int running, RunState state)
return;
}
if (!cs->kvm_vcpu_dirty) {
if (!cs->vcpu_dirty) {
ret = kvm_mips_restore_count(cs);
if (ret < 0) {
fprintf(stderr, "Failed restoring count\n");