Skip to content

Commit

Permalink
KVM: PPC: e500: Implement TLB1-in-TLB0 mapping
Browse files Browse the repository at this point in the history
When a host mapping fault happens in a guest TLB1 entry today, we
map the translated guest entry into the host's TLB1.

This isn't particularly clever when the guest is mapped by normal 4k
pages, since these would be a lot better to put into TLB0 instead.

This patch adds the required logic to map 4k TLB1 shadow maps into
the host's TLB0.

Signed-off-by: Alexander Graf <[email protected]>
  • Loading branch information
agraf committed Jan 24, 2013
1 parent b71c9e2 commit c015c62
Show file tree
Hide file tree
Showing 2 changed files with 47 additions and 19 deletions.
1 change: 1 addition & 0 deletions arch/powerpc/kvm/e500.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@

#define E500_TLB_VALID 1
#define E500_TLB_BITMAP 2
#define E500_TLB_TLB0 (1 << 2)

struct tlbe_ref {
pfn_t pfn;
Expand Down
65 changes: 46 additions & 19 deletions arch/powerpc/kvm/e500_mmu_host.c
Original file line number Diff line number Diff line change
Expand Up @@ -216,10 +216,21 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
vcpu_e500->g2h_tlb1_map[esel] = 0;
ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID);
local_irq_restore(flags);
}

return;
if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) {
/*
* TLB1 entry is backed by 4k pages. This should happen
* rarely and is not worth optimizing. Invalidate everything.
*/
kvmppc_e500_tlbil_all(vcpu_e500);
ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
}

/* Already invalidated in between */
if (!(ref->flags & E500_TLB_VALID))
return;

/* Guest tlbe is backed by at most one host tlbe per shadow pid. */
kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);

Expand Down Expand Up @@ -487,38 +498,54 @@ static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel,
return 0;
}

static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
struct tlbe_ref *ref,
int esel)
{
unsigned int sesel = vcpu_e500->host_tlb1_nv++;

if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
vcpu_e500->host_tlb1_nv = 0;

vcpu_e500->tlb_refs[1][sesel] = *ref;
vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel];
vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
}
vcpu_e500->h2g_tlb1_rmap[sesel] = esel;

return sesel;
}

/* Caller must ensure that the specified guest TLB entry is safe to insert into
* the shadow TLB. */
/* XXX for both one-one and one-to-many , for now use TLB1 */
/* For both one-one and one-to-many */
static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
struct kvm_book3e_206_tlb_entry *stlbe, int esel)
{
struct tlbe_ref *ref;
unsigned int sesel;
struct tlbe_ref ref;
int sesel;
int r;
int stlbsel = 1;

sesel = vcpu_e500->host_tlb1_nv++;

if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
vcpu_e500->host_tlb1_nv = 0;

ref = &vcpu_e500->tlb_refs[1][sesel];
ref.flags = 0;
r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
ref);
&ref);
if (r)
return r;

vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel];
vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
/* Use TLB0 when we can only map a page with 4k */
if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) {
vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0;
write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0);
return 0;
}
vcpu_e500->h2g_tlb1_rmap[sesel] = esel;

write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel);
/* Otherwise map into TLB1 */
sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel);
write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);

return 0;
}
Expand Down

0 comments on commit c015c62

Please sign in to comment.