39
39
40
40
#include "trace_hv.h"
41
41
42
- /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
43
- #define MAX_LPID_970 63
44
-
45
42
/* Power architecture requires HPT is at least 256kB */
46
43
#define PPC_MIN_HPT_ORDER 18
47
44
@@ -231,14 +228,9 @@ int kvmppc_mmu_hv_init(void)
231
228
if (!cpu_has_feature (CPU_FTR_HVMODE ))
232
229
return - EINVAL ;
233
230
234
- /* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */
235
- if (cpu_has_feature (CPU_FTR_ARCH_206 )) {
236
- host_lpid = mfspr (SPRN_LPID ); /* POWER7 */
237
- rsvd_lpid = LPID_RSVD ;
238
- } else {
239
- host_lpid = 0 ; /* PPC970 */
240
- rsvd_lpid = MAX_LPID_970 ;
241
- }
231
+ /* POWER7 has 10-bit LPIDs (12-bit in POWER8) */
232
+ host_lpid = mfspr (SPRN_LPID );
233
+ rsvd_lpid = LPID_RSVD ;
242
234
243
235
kvmppc_init_lpid (rsvd_lpid + 1 );
244
236
@@ -261,130 +253,12 @@ static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
261
253
kvmppc_set_msr (vcpu , msr );
262
254
}
263
255
264
- /*
265
- * This is called to get a reference to a guest page if there isn't
266
- * one already in the memslot->arch.slot_phys[] array.
267
- */
268
- static long kvmppc_get_guest_page (struct kvm * kvm , unsigned long gfn ,
269
- struct kvm_memory_slot * memslot ,
270
- unsigned long psize )
271
- {
272
- unsigned long start ;
273
- long np , err ;
274
- struct page * page , * hpage , * pages [1 ];
275
- unsigned long s , pgsize ;
276
- unsigned long * physp ;
277
- unsigned int is_io , got , pgorder ;
278
- struct vm_area_struct * vma ;
279
- unsigned long pfn , i , npages ;
280
-
281
- physp = memslot -> arch .slot_phys ;
282
- if (!physp )
283
- return - EINVAL ;
284
- if (physp [gfn - memslot -> base_gfn ])
285
- return 0 ;
286
-
287
- is_io = 0 ;
288
- got = 0 ;
289
- page = NULL ;
290
- pgsize = psize ;
291
- err = - EINVAL ;
292
- start = gfn_to_hva_memslot (memslot , gfn );
293
-
294
- /* Instantiate and get the page we want access to */
295
- np = get_user_pages_fast (start , 1 , 1 , pages );
296
- if (np != 1 ) {
297
- /* Look up the vma for the page */
298
- down_read (& current -> mm -> mmap_sem );
299
- vma = find_vma (current -> mm , start );
300
- if (!vma || vma -> vm_start > start ||
301
- start + psize > vma -> vm_end ||
302
- !(vma -> vm_flags & VM_PFNMAP ))
303
- goto up_err ;
304
- is_io = hpte_cache_bits (pgprot_val (vma -> vm_page_prot ));
305
- pfn = vma -> vm_pgoff + ((start - vma -> vm_start ) >> PAGE_SHIFT );
306
- /* check alignment of pfn vs. requested page size */
307
- if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT ) - 1 )))
308
- goto up_err ;
309
- up_read (& current -> mm -> mmap_sem );
310
-
311
- } else {
312
- page = pages [0 ];
313
- got = KVMPPC_GOT_PAGE ;
314
-
315
- /* See if this is a large page */
316
- s = PAGE_SIZE ;
317
- if (PageHuge (page )) {
318
- hpage = compound_head (page );
319
- s <<= compound_order (hpage );
320
- /* Get the whole large page if slot alignment is ok */
321
- if (s > psize && slot_is_aligned (memslot , s ) &&
322
- !(memslot -> userspace_addr & (s - 1 ))) {
323
- start &= ~(s - 1 );
324
- pgsize = s ;
325
- get_page (hpage );
326
- put_page (page );
327
- page = hpage ;
328
- }
329
- }
330
- if (s < psize )
331
- goto out ;
332
- pfn = page_to_pfn (page );
333
- }
334
-
335
- npages = pgsize >> PAGE_SHIFT ;
336
- pgorder = __ilog2 (npages );
337
- physp += (gfn - memslot -> base_gfn ) & ~(npages - 1 );
338
- spin_lock (& kvm -> arch .slot_phys_lock );
339
- for (i = 0 ; i < npages ; ++ i ) {
340
- if (!physp [i ]) {
341
- physp [i ] = ((pfn + i ) << PAGE_SHIFT ) +
342
- got + is_io + pgorder ;
343
- got = 0 ;
344
- }
345
- }
346
- spin_unlock (& kvm -> arch .slot_phys_lock );
347
- err = 0 ;
348
-
349
- out :
350
- if (got )
351
- put_page (page );
352
- return err ;
353
-
354
- up_err :
355
- up_read (& current -> mm -> mmap_sem );
356
- return err ;
357
- }
358
-
359
256
long kvmppc_virtmode_do_h_enter (struct kvm * kvm , unsigned long flags ,
360
257
long pte_index , unsigned long pteh ,
361
258
unsigned long ptel , unsigned long * pte_idx_ret )
362
259
{
363
- unsigned long psize , gpa , gfn ;
364
- struct kvm_memory_slot * memslot ;
365
260
long ret ;
366
261
367
- if (kvm -> arch .using_mmu_notifiers )
368
- goto do_insert ;
369
-
370
- psize = hpte_page_size (pteh , ptel );
371
- if (!psize )
372
- return H_PARAMETER ;
373
-
374
- pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID );
375
-
376
- /* Find the memslot (if any) for this address */
377
- gpa = (ptel & HPTE_R_RPN ) & ~(psize - 1 );
378
- gfn = gpa >> PAGE_SHIFT ;
379
- memslot = gfn_to_memslot (kvm , gfn );
380
- if (memslot && !(memslot -> flags & KVM_MEMSLOT_INVALID )) {
381
- if (!slot_is_aligned (memslot , psize ))
382
- return H_PARAMETER ;
383
- if (kvmppc_get_guest_page (kvm , gfn , memslot , psize ) < 0 )
384
- return H_PARAMETER ;
385
- }
386
-
387
- do_insert :
388
262
/* Protect linux PTE lookup from page table destruction */
389
263
rcu_read_lock_sched (); /* this disables preemption too */
390
264
ret = kvmppc_do_h_enter (kvm , flags , pte_index , pteh , ptel ,
@@ -399,19 +273,6 @@ long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
399
273
400
274
}
401
275
402
- /*
403
- * We come here on a H_ENTER call from the guest when we are not
404
- * using mmu notifiers and we don't have the requested page pinned
405
- * already.
406
- */
407
- long kvmppc_virtmode_h_enter (struct kvm_vcpu * vcpu , unsigned long flags ,
408
- long pte_index , unsigned long pteh ,
409
- unsigned long ptel )
410
- {
411
- return kvmppc_virtmode_do_h_enter (vcpu -> kvm , flags , pte_index ,
412
- pteh , ptel , & vcpu -> arch .gpr [4 ]);
413
- }
414
-
415
276
static struct kvmppc_slb * kvmppc_mmu_book3s_hv_find_slbe (struct kvm_vcpu * vcpu ,
416
277
gva_t eaddr )
417
278
{
@@ -496,7 +357,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
496
357
gpte -> may_execute = gpte -> may_read && !(gr & (HPTE_R_N | HPTE_R_G ));
497
358
498
359
/* Storage key permission check for POWER7 */
499
- if (data && virtmode && cpu_has_feature ( CPU_FTR_ARCH_206 ) ) {
360
+ if (data && virtmode ) {
500
361
int amrfield = hpte_get_skey_perm (gr , vcpu -> arch .amr );
501
362
if (amrfield & 1 )
502
363
gpte -> may_read = 0 ;
@@ -631,9 +492,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
631
492
return kvmppc_hv_emulate_mmio (run , vcpu , gpa , ea ,
632
493
dsisr & DSISR_ISSTORE );
633
494
634
- if (!kvm -> arch .using_mmu_notifiers )
635
- return - EFAULT ; /* should never get here */
636
-
637
495
/*
638
496
* This should never happen, because of the slot_is_aligned()
639
497
* check in kvmppc_do_h_enter().
@@ -902,8 +760,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
902
760
psize = hpte_page_size (be64_to_cpu (hptep [0 ]), ptel );
903
761
if ((be64_to_cpu (hptep [0 ]) & HPTE_V_VALID ) &&
904
762
hpte_rpn (ptel , psize ) == gfn ) {
905
- if (kvm -> arch .using_mmu_notifiers )
906
- hptep [0 ] |= cpu_to_be64 (HPTE_V_ABSENT );
763
+ hptep [0 ] |= cpu_to_be64 (HPTE_V_ABSENT );
907
764
kvmppc_invalidate_hpte (kvm , hptep , i );
908
765
/* Harvest R and C */
909
766
rcbits = be64_to_cpu (hptep [1 ]) & (HPTE_R_R | HPTE_R_C );
@@ -921,15 +778,13 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
921
778
922
779
int kvm_unmap_hva_hv (struct kvm * kvm , unsigned long hva )
923
780
{
924
- if (kvm -> arch .using_mmu_notifiers )
925
- kvm_handle_hva (kvm , hva , kvm_unmap_rmapp );
781
+ kvm_handle_hva (kvm , hva , kvm_unmap_rmapp );
926
782
return 0 ;
927
783
}
928
784
929
785
int kvm_unmap_hva_range_hv (struct kvm * kvm , unsigned long start , unsigned long end )
930
786
{
931
- if (kvm -> arch .using_mmu_notifiers )
932
- kvm_handle_hva_range (kvm , start , end , kvm_unmap_rmapp );
787
+ kvm_handle_hva_range (kvm , start , end , kvm_unmap_rmapp );
933
788
return 0 ;
934
789
}
935
790
@@ -1011,8 +866,6 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
1011
866
1012
867
int kvm_age_hva_hv (struct kvm * kvm , unsigned long start , unsigned long end )
1013
868
{
1014
- if (!kvm -> arch .using_mmu_notifiers )
1015
- return 0 ;
1016
869
return kvm_handle_hva_range (kvm , start , end , kvm_age_rmapp );
1017
870
}
1018
871
@@ -1049,15 +902,11 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
1049
902
1050
903
int kvm_test_age_hva_hv (struct kvm * kvm , unsigned long hva )
1051
904
{
1052
- if (!kvm -> arch .using_mmu_notifiers )
1053
- return 0 ;
1054
905
return kvm_handle_hva (kvm , hva , kvm_test_age_rmapp );
1055
906
}
1056
907
1057
908
void kvm_set_spte_hva_hv (struct kvm * kvm , unsigned long hva , pte_t pte )
1058
909
{
1059
- if (!kvm -> arch .using_mmu_notifiers )
1060
- return ;
1061
910
kvm_handle_hva (kvm , hva , kvm_unmap_rmapp );
1062
911
}
1063
912
@@ -1216,35 +1065,17 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
1216
1065
struct page * page , * pages [1 ];
1217
1066
int npages ;
1218
1067
unsigned long hva , offset ;
1219
- unsigned long pa ;
1220
- unsigned long * physp ;
1221
1068
int srcu_idx ;
1222
1069
1223
1070
srcu_idx = srcu_read_lock (& kvm -> srcu );
1224
1071
memslot = gfn_to_memslot (kvm , gfn );
1225
1072
if (!memslot || (memslot -> flags & KVM_MEMSLOT_INVALID ))
1226
1073
goto err ;
1227
- if (!kvm -> arch .using_mmu_notifiers ) {
1228
- physp = memslot -> arch .slot_phys ;
1229
- if (!physp )
1230
- goto err ;
1231
- physp += gfn - memslot -> base_gfn ;
1232
- pa = * physp ;
1233
- if (!pa ) {
1234
- if (kvmppc_get_guest_page (kvm , gfn , memslot ,
1235
- PAGE_SIZE ) < 0 )
1236
- goto err ;
1237
- pa = * physp ;
1238
- }
1239
- page = pfn_to_page (pa >> PAGE_SHIFT );
1240
- get_page (page );
1241
- } else {
1242
- hva = gfn_to_hva_memslot (memslot , gfn );
1243
- npages = get_user_pages_fast (hva , 1 , 1 , pages );
1244
- if (npages < 1 )
1245
- goto err ;
1246
- page = pages [0 ];
1247
- }
1074
+ hva = gfn_to_hva_memslot (memslot , gfn );
1075
+ npages = get_user_pages_fast (hva , 1 , 1 , pages );
1076
+ if (npages < 1 )
1077
+ goto err ;
1078
+ page = pages [0 ];
1248
1079
srcu_read_unlock (& kvm -> srcu , srcu_idx );
1249
1080
1250
1081
offset = gpa & (PAGE_SIZE - 1 );
@@ -1268,7 +1099,7 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa,
1268
1099
1269
1100
put_page (page );
1270
1101
1271
- if (!dirty || ! kvm -> arch . using_mmu_notifiers )
1102
+ if (!dirty )
1272
1103
return ;
1273
1104
1274
1105
/* We need to mark this page dirty in the rmap chain */
@@ -1668,10 +1499,7 @@ void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
1668
1499
{
1669
1500
struct kvmppc_mmu * mmu = & vcpu -> arch .mmu ;
1670
1501
1671
- if (cpu_has_feature (CPU_FTR_ARCH_206 ))
1672
- vcpu -> arch .slb_nr = 32 ; /* POWER7 */
1673
- else
1674
- vcpu -> arch .slb_nr = 64 ;
1502
+ vcpu -> arch .slb_nr = 32 ; /* POWER7/POWER8 */
1675
1503
1676
1504
mmu -> xlate = kvmppc_mmu_book3s_64_hv_xlate ;
1677
1505
mmu -> reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr ;
0 commit comments