Skip to content

Commit c17b98c

Browse files
paulusmackagraf
authored andcommitted
KVM: PPC: Book3S HV: Remove code for PPC970 processors
This removes the code that was added to enable HV KVM to work on PPC970 processors. The PPC970 is an old CPU that doesn't support virtualizing guest memory. Removing PPC970 support also lets us remove the code for allocating and managing contiguous real-mode areas, the code for the !kvm->arch.using_mmu_notifiers case, the code for pinning pages of guest memory when first accessed and keeping track of which pages have been pinned, and the code for handling H_ENTER hypercalls in virtual mode. Book3S HV KVM is now supported only on POWER7 and POWER8 processors. The KVM_CAP_PPC_RMA capability now always returns 0. Signed-off-by: Paul Mackerras <[email protected]> Signed-off-by: Alexander Graf <[email protected]>
1 parent 3c78f78 commit c17b98c

13 files changed

+70
-955
lines changed

arch/powerpc/include/asm/kvm_book3s.h

-2
Original file line numberDiff line numberDiff line change
@@ -170,8 +170,6 @@ extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
170170
unsigned long *nb_ret);
171171
extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
172172
unsigned long gpa, bool dirty);
173-
extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
174-
long pte_index, unsigned long pteh, unsigned long ptel);
175173
extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
176174
long pte_index, unsigned long pteh, unsigned long ptel,
177175
pgd_t *pgdir, bool realmode, unsigned long *idx_ret);

arch/powerpc/include/asm/kvm_book3s_64.h

-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,6 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
3737

3838
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
3939
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
40-
extern unsigned long kvm_rma_pages;
4140
#endif
4241

4342
#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */

arch/powerpc/include/asm/kvm_host.h

-14
Original file line numberDiff line numberDiff line change
@@ -180,11 +180,6 @@ struct kvmppc_spapr_tce_table {
180180
struct page *pages[0];
181181
};
182182

183-
struct kvm_rma_info {
184-
atomic_t use_count;
185-
unsigned long base_pfn;
186-
};
187-
188183
/* XICS components, defined in book3s_xics.c */
189184
struct kvmppc_xics;
190185
struct kvmppc_icp;
@@ -214,16 +209,9 @@ struct revmap_entry {
214209
#define KVMPPC_RMAP_PRESENT 0x100000000ul
215210
#define KVMPPC_RMAP_INDEX 0xfffffffful
216211

217-
/* Low-order bits in memslot->arch.slot_phys[] */
218-
#define KVMPPC_PAGE_ORDER_MASK 0x1f
219-
#define KVMPPC_PAGE_NO_CACHE HPTE_R_I /* 0x20 */
220-
#define KVMPPC_PAGE_WRITETHRU HPTE_R_W /* 0x40 */
221-
#define KVMPPC_GOT_PAGE 0x80
222-
223212
struct kvm_arch_memory_slot {
224213
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
225214
unsigned long *rmap;
226-
unsigned long *slot_phys;
227215
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
228216
};
229217

@@ -242,14 +230,12 @@ struct kvm_arch {
242230
struct kvm_rma_info *rma;
243231
unsigned long vrma_slb_v;
244232
int rma_setup_done;
245-
int using_mmu_notifiers;
246233
u32 hpt_order;
247234
atomic_t vcpus_running;
248235
u32 online_vcores;
249236
unsigned long hpt_npte;
250237
unsigned long hpt_mask;
251238
atomic_t hpte_mod_interest;
252-
spinlock_t slot_phys_lock;
253239
cpumask_t need_tlb_flush;
254240
int hpt_cma_alloc;
255241
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */

arch/powerpc/include/asm/kvm_ppc.h

-2
Original file line numberDiff line numberDiff line change
@@ -170,8 +170,6 @@ extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
170170
unsigned long ioba, unsigned long tce);
171171
extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
172172
unsigned long ioba);
173-
extern struct kvm_rma_info *kvm_alloc_rma(void);
174-
extern void kvm_release_rma(struct kvm_rma_info *ri);
175173
extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
176174
extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
177175
extern int kvmppc_core_init_vm(struct kvm *kvm);

arch/powerpc/kernel/asm-offsets.c

-1
Original file line numberDiff line numberDiff line change
@@ -489,7 +489,6 @@ int main(void)
489489
DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
490490
DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
491491
DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
492-
DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
493492
DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits));
494493
DEFINE(KVM_ENABLED_HCALLS, offsetof(struct kvm, arch.enabled_hcalls));
495494
DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));

arch/powerpc/kvm/book3s_64_mmu_hv.c

+14-186
Original file line numberDiff line numberDiff line change
@@ -39,9 +39,6 @@
3939

4040
#include "trace_hv.h"
4141

42-
/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
43-
#define MAX_LPID_970 63
44-
4542
/* Power architecture requires HPT is at least 256kB */
4643
#define PPC_MIN_HPT_ORDER 18
4744

@@ -231,14 +228,9 @@ int kvmppc_mmu_hv_init(void)
231228
if (!cpu_has_feature(CPU_FTR_HVMODE))
232229
return -EINVAL;
233230

234-
/* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */
235-
if (cpu_has_feature(CPU_FTR_ARCH_206)) {
236-
host_lpid = mfspr(SPRN_LPID); /* POWER7 */
237-
rsvd_lpid = LPID_RSVD;
238-
} else {
239-
host_lpid = 0; /* PPC970 */
240-
rsvd_lpid = MAX_LPID_970;
241-
}
231+
/* POWER7 has 10-bit LPIDs (12-bit in POWER8) */
232+
host_lpid = mfspr(SPRN_LPID);
233+
rsvd_lpid = LPID_RSVD;
242234

243235
kvmppc_init_lpid(rsvd_lpid + 1);
244236

@@ -261,130 +253,12 @@ static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
261253
kvmppc_set_msr(vcpu, msr);
262254
}
263255

264-
/*
265-
* This is called to get a reference to a guest page if there isn't
266-
* one already in the memslot->arch.slot_phys[] array.
267-
*/
268-
static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
269-
struct kvm_memory_slot *memslot,
270-
unsigned long psize)
271-
{
272-
unsigned long start;
273-
long np, err;
274-
struct page *page, *hpage, *pages[1];
275-
unsigned long s, pgsize;
276-
unsigned long *physp;
277-
unsigned int is_io, got, pgorder;
278-
struct vm_area_struct *vma;
279-
unsigned long pfn, i, npages;
280-
281-
physp = memslot->arch.slot_phys;
282-
if (!physp)
283-
return -EINVAL;
284-
if (physp[gfn - memslot->base_gfn])
285-
return 0;
286-
287-
is_io = 0;
288-
got = 0;
289-
page = NULL;
290-
pgsize = psize;
291-
err = -EINVAL;
292-
start = gfn_to_hva_memslot(memslot, gfn);
293-
294-
/* Instantiate and get the page we want access to */
295-
np = get_user_pages_fast(start, 1, 1, pages);
296-
if (np != 1) {
297-
/* Look up the vma for the page */
298-
down_read(&current->mm->mmap_sem);
299-
vma = find_vma(current->mm, start);
300-
if (!vma || vma->vm_start > start ||
301-
start + psize > vma->vm_end ||
302-
!(vma->vm_flags & VM_PFNMAP))
303-
goto up_err;
304-
is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
305-
pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
306-
/* check alignment of pfn vs. requested page size */
307-
if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1)))
308-
goto up_err;
309-
up_read(&current->mm->mmap_sem);
310-
311-
} else {
312-
page = pages[0];
313-
got = KVMPPC_GOT_PAGE;
314-
315-
/* See if this is a large page */
316-
s = PAGE_SIZE;
317-
if (PageHuge(page)) {
318-
hpage = compound_head(page);
319-
s <<= compound_order(hpage);
320-
/* Get the whole large page if slot alignment is ok */
321-
if (s > psize && slot_is_aligned(memslot, s) &&
322-
!(memslot->userspace_addr & (s - 1))) {
323-
start &= ~(s - 1);
324-
pgsize = s;
325-
get_page(hpage);
326-
put_page(page);
327-
page = hpage;
328-
}
329-
}
330-
if (s < psize)
331-
goto out;
332-
pfn = page_to_pfn(page);
333-
}
334-
335-
npages = pgsize >> PAGE_SHIFT;
336-
pgorder = __ilog2(npages);
337-
physp += (gfn - memslot->base_gfn) & ~(npages - 1);
338-
spin_lock(&kvm->arch.slot_phys_lock);
339-
for (i = 0; i < npages; ++i) {
340-
if (!physp[i]) {
341-
physp[i] = ((pfn + i) << PAGE_SHIFT) +
342-
got + is_io + pgorder;
343-
got = 0;
344-
}
345-
}
346-
spin_unlock(&kvm->arch.slot_phys_lock);
347-
err = 0;
348-
349-
out:
350-
if (got)
351-
put_page(page);
352-
return err;
353-
354-
up_err:
355-
up_read(&current->mm->mmap_sem);
356-
return err;
357-
}
358-
359256
long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
360257
long pte_index, unsigned long pteh,
361258
unsigned long ptel, unsigned long *pte_idx_ret)
362259
{
363-
unsigned long psize, gpa, gfn;
364-
struct kvm_memory_slot *memslot;
365260
long ret;
366261

367-
if (kvm->arch.using_mmu_notifiers)
368-
goto do_insert;
369-
370-
psize = hpte_page_size(pteh, ptel);
371-
if (!psize)
372-
return H_PARAMETER;
373-
374-
pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
375-
376-
/* Find the memslot (if any) for this address */
377-
gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
378-
gfn = gpa >> PAGE_SHIFT;
379-
memslot = gfn_to_memslot(kvm, gfn);
380-
if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) {
381-
if (!slot_is_aligned(memslot, psize))
382-
return H_PARAMETER;
383-
if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0)
384-
return H_PARAMETER;
385-
}
386-
387-
do_insert:
388262
/* Protect linux PTE lookup from page table destruction */
389263
rcu_read_lock_sched(); /* this disables preemption too */
390264
ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel,
@@ -399,19 +273,6 @@ long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
399273

400274
}
401275

402-
/*
403-
* We come here on a H_ENTER call from the guest when we are not
404-
* using mmu notifiers and we don't have the requested page pinned
405-
* already.
406-
*/
407-
long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
408-
long pte_index, unsigned long pteh,
409-
unsigned long ptel)
410-
{
411-
return kvmppc_virtmode_do_h_enter(vcpu->kvm, flags, pte_index,
412-
pteh, ptel, &vcpu->arch.gpr[4]);
413-
}
414-
415276
static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
416277
gva_t eaddr)
417278
{
@@ -496,7 +357,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
496357
gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G));
497358

498359
/* Storage key permission check for POWER7 */
499-
if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) {
360+
if (data && virtmode) {
500361
int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
501362
if (amrfield & 1)
502363
gpte->may_read = 0;
@@ -631,9 +492,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
631492
return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
632493
dsisr & DSISR_ISSTORE);
633494

634-
if (!kvm->arch.using_mmu_notifiers)
635-
return -EFAULT; /* should never get here */
636-
637495
/*
638496
* This should never happen, because of the slot_is_aligned()
639497
* check in kvmppc_do_h_enter().
@@ -902,8 +760,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
902760
psize = hpte_page_size(be64_to_cpu(hptep[0]), ptel);
903761
if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) &&
904762
hpte_rpn(ptel, psize) == gfn) {
905-
if (kvm->arch.using_mmu_notifiers)
906-
hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
763+
hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
907764
kvmppc_invalidate_hpte(kvm, hptep, i);
908765
/* Harvest R and C */
909766
rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
@@ -921,15 +778,13 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
921778

922779
int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva)
923780
{
924-
if (kvm->arch.using_mmu_notifiers)
925-
kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
781+
kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
926782
return 0;
927783
}
928784

929785
int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end)
930786
{
931-
if (kvm->arch.using_mmu_notifiers)
932-
kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
787+
kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
933788
return 0;
934789
}
935790

@@ -1011,8 +866,6 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
1011866

1012867
int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end)
1013868
{
1014-
if (!kvm->arch.using_mmu_notifiers)
1015-
return 0;
1016869
return kvm_handle_hva_range(kvm, start, end, kvm_age_rmapp);
1017870
}
1018871

@@ -1049,15 +902,11 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
1049902

1050903
int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva)
1051904
{
1052-
if (!kvm->arch.using_mmu_notifiers)
1053-
return 0;
1054905
return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
1055906
}
1056907

1057908
void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte)
1058909
{
1059-
if (!kvm->arch.using_mmu_notifiers)
1060-
return;
1061910
kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
1062911
}
1063912

@@ -1216,35 +1065,17 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
12161065
struct page *page, *pages[1];
12171066
int npages;
12181067
unsigned long hva, offset;
1219-
unsigned long pa;
1220-
unsigned long *physp;
12211068
int srcu_idx;
12221069

12231070
srcu_idx = srcu_read_lock(&kvm->srcu);
12241071
memslot = gfn_to_memslot(kvm, gfn);
12251072
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
12261073
goto err;
1227-
if (!kvm->arch.using_mmu_notifiers) {
1228-
physp = memslot->arch.slot_phys;
1229-
if (!physp)
1230-
goto err;
1231-
physp += gfn - memslot->base_gfn;
1232-
pa = *physp;
1233-
if (!pa) {
1234-
if (kvmppc_get_guest_page(kvm, gfn, memslot,
1235-
PAGE_SIZE) < 0)
1236-
goto err;
1237-
pa = *physp;
1238-
}
1239-
page = pfn_to_page(pa >> PAGE_SHIFT);
1240-
get_page(page);
1241-
} else {
1242-
hva = gfn_to_hva_memslot(memslot, gfn);
1243-
npages = get_user_pages_fast(hva, 1, 1, pages);
1244-
if (npages < 1)
1245-
goto err;
1246-
page = pages[0];
1247-
}
1074+
hva = gfn_to_hva_memslot(memslot, gfn);
1075+
npages = get_user_pages_fast(hva, 1, 1, pages);
1076+
if (npages < 1)
1077+
goto err;
1078+
page = pages[0];
12481079
srcu_read_unlock(&kvm->srcu, srcu_idx);
12491080

12501081
offset = gpa & (PAGE_SIZE - 1);
@@ -1268,7 +1099,7 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa,
12681099

12691100
put_page(page);
12701101

1271-
if (!dirty || !kvm->arch.using_mmu_notifiers)
1102+
if (!dirty)
12721103
return;
12731104

12741105
/* We need to mark this page dirty in the rmap chain */
@@ -1668,10 +1499,7 @@ void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
16681499
{
16691500
struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
16701501

1671-
if (cpu_has_feature(CPU_FTR_ARCH_206))
1672-
vcpu->arch.slb_nr = 32; /* POWER7 */
1673-
else
1674-
vcpu->arch.slb_nr = 64;
1502+
vcpu->arch.slb_nr = 32; /* POWER7/POWER8 */
16751503

16761504
mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
16771505
mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;

0 commit comments

Comments
 (0)