KVM: PPC: Always use the GPR accessors

Always use the GPR accessor functions. This will be important later for
Nested APIv2 support which requires additional functionality for
accessing and modifying VCPU state.

Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20230914030600.16993-2-jniethe5@gmail.com
This commit is contained in:
Jordan Niethe 2023-09-14 13:05:50 +10:00 committed by Michael Ellerman
parent 0bb80ecc33
commit 0e85b7df9c
6 changed files with 21 additions and 13 deletions

View File

@ -786,12 +786,12 @@ long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
idx = (ioba >> stt->page_shift) - stt->offset;
page = stt->pages[idx / TCES_PER_PAGE];
if (!page) {
vcpu->arch.regs.gpr[4] = 0;
kvmppc_set_gpr(vcpu, 4, 0);
return H_SUCCESS;
}
tbl = (u64 *)page_address(page);
vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
kvmppc_set_gpr(vcpu, 4, tbl[idx % TCES_PER_PAGE]);
return H_SUCCESS;
}

View File

@ -1267,10 +1267,14 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
return RESUME_HOST;
break;
#endif
case H_RANDOM:
if (!arch_get_random_seed_longs(&vcpu->arch.regs.gpr[4], 1))
case H_RANDOM: {
unsigned long rand;
if (!arch_get_random_seed_longs(&rand, 1))
ret = H_HARDWARE;
kvmppc_set_gpr(vcpu, 4, rand);
break;
}
case H_RPT_INVALIDATE:
ret = kvmppc_h_rpt_invalidate(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),

View File

@ -182,9 +182,13 @@ EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
long kvmppc_rm_h_random(struct kvm_vcpu *vcpu)
{
unsigned long rand;
if (ppc_md.get_random_seed &&
ppc_md.get_random_seed(&vcpu->arch.regs.gpr[4]))
ppc_md.get_random_seed(&rand)) {
kvmppc_set_gpr(vcpu, 4, rand);
return H_SUCCESS;
}
return H_HARDWARE;
}

View File

@ -776,8 +776,8 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
r &= ~HPTE_GR_RESERVED;
}
vcpu->arch.regs.gpr[4 + i * 2] = v;
vcpu->arch.regs.gpr[5 + i * 2] = r;
kvmppc_set_gpr(vcpu, 4 + i * 2, v);
kvmppc_set_gpr(vcpu, 5 + i * 2, r);
}
return H_SUCCESS;
}
@ -824,7 +824,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
}
}
}
vcpu->arch.regs.gpr[4] = gr;
kvmppc_set_gpr(vcpu, 4, gr);
ret = H_SUCCESS;
out:
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
@ -872,7 +872,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
kvmppc_set_dirty_from_hpte(kvm, v, gr);
}
}
vcpu->arch.regs.gpr[4] = gr;
kvmppc_set_gpr(vcpu, 4, gr);
ret = H_SUCCESS;
out:
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);

View File

@ -481,7 +481,7 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
unsigned long xics_rm_h_xirr_x(struct kvm_vcpu *vcpu)
{
vcpu->arch.regs.gpr[5] = get_tb();
kvmppc_set_gpr(vcpu, 5, get_tb());
return xics_rm_h_xirr(vcpu);
}
@ -518,7 +518,7 @@ unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu)
} while (!icp_rm_try_update(icp, old_state, new_state));
/* Return the result in GPR4 */
vcpu->arch.regs.gpr[4] = xirr;
kvmppc_set_gpr(vcpu, 4, xirr);
return check_too_hard(xics, icp);
}

View File

@ -328,7 +328,7 @@ static unsigned long xive_vm_h_xirr(struct kvm_vcpu *vcpu)
*/
/* Return interrupt and old CPPR in GPR4 */
vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24);
kvmppc_set_gpr(vcpu, 4, hirq | (old_cppr << 24));
return H_SUCCESS;
}
@ -364,7 +364,7 @@ static unsigned long xive_vm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server
hirq = xive_vm_scan_interrupts(xc, pending, scan_poll);
/* Return interrupt and old CPPR in GPR4 */
vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24);
kvmppc_set_gpr(vcpu, 4, hirq | (xc->cppr << 24));
return H_SUCCESS;
}