mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-18 09:44:18 +08:00
KVM: PPC: e500: Implement TLB1-in-TLB0 mapping
When a host mapping fault happens in a guest TLB1 entry today, we map the translated guest entry into the host's TLB1. This isn't particularly clever when the guest is mapped by normal 4k pages, since these would be a lot better to put into TLB0 instead. This patch adds the required logic to map 4k TLB1 shadow maps into the host's TLB0. Signed-off-by: Alexander Graf <agraf@suse.de>
This commit is contained in:
parent
b71c9e2fb7
commit
c015c62b13
@ -28,6 +28,7 @@
|
|||||||
|
|
||||||
#define E500_TLB_VALID 1
|
#define E500_TLB_VALID 1
|
||||||
#define E500_TLB_BITMAP 2
|
#define E500_TLB_BITMAP 2
|
||||||
|
#define E500_TLB_TLB0 (1 << 2)
|
||||||
|
|
||||||
struct tlbe_ref {
|
struct tlbe_ref {
|
||||||
pfn_t pfn;
|
pfn_t pfn;
|
||||||
|
@ -216,10 +216,21 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
|
|||||||
vcpu_e500->g2h_tlb1_map[esel] = 0;
|
vcpu_e500->g2h_tlb1_map[esel] = 0;
|
||||||
ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID);
|
ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID);
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) {
|
||||||
|
/*
|
||||||
|
* TLB1 entry is backed by 4k pages. This should happen
|
||||||
|
* rarely and is not worth optimizing. Invalidate everything.
|
||||||
|
*/
|
||||||
|
kvmppc_e500_tlbil_all(vcpu_e500);
|
||||||
|
ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Already invalidated in between */
|
||||||
|
if (!(ref->flags & E500_TLB_VALID))
|
||||||
|
return;
|
||||||
|
|
||||||
/* Guest tlbe is backed by at most one host tlbe per shadow pid. */
|
/* Guest tlbe is backed by at most one host tlbe per shadow pid. */
|
||||||
kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
|
kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
|
||||||
|
|
||||||
@ -487,29 +498,16 @@ static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Caller must ensure that the specified guest TLB entry is safe to insert into
|
static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||||
* the shadow TLB. */
|
struct tlbe_ref *ref,
|
||||||
/* XXX for both one-one and one-to-many , for now use TLB1 */
|
int esel)
|
||||||
static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|
||||||
u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
|
|
||||||
struct kvm_book3e_206_tlb_entry *stlbe, int esel)
|
|
||||||
{
|
{
|
||||||
struct tlbe_ref *ref;
|
unsigned int sesel = vcpu_e500->host_tlb1_nv++;
|
||||||
unsigned int sesel;
|
|
||||||
int r;
|
|
||||||
int stlbsel = 1;
|
|
||||||
|
|
||||||
sesel = vcpu_e500->host_tlb1_nv++;
|
|
||||||
|
|
||||||
if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
|
if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
|
||||||
vcpu_e500->host_tlb1_nv = 0;
|
vcpu_e500->host_tlb1_nv = 0;
|
||||||
|
|
||||||
ref = &vcpu_e500->tlb_refs[1][sesel];
|
vcpu_e500->tlb_refs[1][sesel] = *ref;
|
||||||
r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
|
|
||||||
ref);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
|
vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
|
||||||
vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
|
vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
|
||||||
if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
|
if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
|
||||||
@ -518,7 +516,36 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|||||||
}
|
}
|
||||||
vcpu_e500->h2g_tlb1_rmap[sesel] = esel;
|
vcpu_e500->h2g_tlb1_rmap[sesel] = esel;
|
||||||
|
|
||||||
write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel);
|
return sesel;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Caller must ensure that the specified guest TLB entry is safe to insert into
|
||||||
|
* the shadow TLB. */
|
||||||
|
/* For both one-one and one-to-many */
|
||||||
|
static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||||
|
u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
|
||||||
|
struct kvm_book3e_206_tlb_entry *stlbe, int esel)
|
||||||
|
{
|
||||||
|
struct tlbe_ref ref;
|
||||||
|
int sesel;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
ref.flags = 0;
|
||||||
|
r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
|
||||||
|
&ref);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
/* Use TLB0 when we can only map a page with 4k */
|
||||||
|
if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) {
|
||||||
|
vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0;
|
||||||
|
write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Otherwise map into TLB1 */
|
||||||
|
sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel);
|
||||||
|
write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user