Skip to content

Commit

Permalink
KVM: MMU: remove KVM host pv mmu support
Browse files Browse the repository at this point in the history
The host side pv mmu support has been marked for feature removal in
January 2011.  It's not in use, is slower than shadow or hardware
assisted paging, and a maintenance burden.  It's November 2011, time to
remove it.

Signed-off-by: Chris Wright <[email protected]>
Signed-off-by: Avi Kivity <[email protected]>
  • Loading branch information
chriswright authored and avikivity committed Dec 27, 2011
1 parent 5202397 commit fb92045
Show file tree
Hide file tree
Showing 4 changed files with 0 additions and 169 deletions.
9 changes: 0 additions & 9 deletions Documentation/feature-removal-schedule.txt
Original file line number Diff line number Diff line change
Expand Up @@ -362,15 +362,6 @@ Who: anybody or Florian Mickler <[email protected]>

----------------------------

What: KVM paravirt mmu host support
When: January 2011
Why: The paravirt mmu host support is slower than non-paravirt mmu, both
on newer and older hardware. It is already not exposed to the guest,
and kept only for live migration purposes.
Who: Avi Kivity <[email protected]>

----------------------------

What: iwlwifi 50XX module parameters
When: 3.0
Why: The "..50" modules parameters were used to configure 5000 series and
Expand Down
13 changes: 0 additions & 13 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -244,13 +244,6 @@ struct kvm_mmu_page {
struct rcu_head rcu;
};

struct kvm_pv_mmu_op_buffer {
void *ptr;
unsigned len;
unsigned processed;
char buf[512] __aligned(sizeof(long));
};

struct kvm_pio_request {
unsigned long count;
int in;
Expand Down Expand Up @@ -347,10 +340,6 @@ struct kvm_vcpu_arch {
*/
struct kvm_mmu *walk_mmu;

/* only needed in kvm_pv_mmu_op() path, but it's hot so
* put it here to avoid allocation */
struct kvm_pv_mmu_op_buffer mmu_op_buffer;

struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
struct kvm_mmu_memory_cache mmu_page_cache;
struct kvm_mmu_memory_cache mmu_page_header_cache;
Expand Down Expand Up @@ -667,8 +656,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);

int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes);
int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
gpa_t addr, unsigned long *ret);
u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);

extern bool tdp_enabled;
Expand Down
135 changes: 0 additions & 135 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -2028,20 +2028,6 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);

static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
{
struct kvm_mmu_page *sp;
struct hlist_node *node;
LIST_HEAD(invalid_list);

for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
pgprintk("%s: zap %llx %x\n",
__func__, gfn, sp->role.word);
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
}
kvm_mmu_commit_zap_page(kvm, &invalid_list);
}

static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
{
int slot = memslot_id(kvm, gfn);
Expand Down Expand Up @@ -4004,127 +3990,6 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
return nr_mmu_pages;
}

static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
unsigned len)
{
if (len > buffer->len)
return NULL;
return buffer->ptr;
}

static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
unsigned len)
{
void *ret;

ret = pv_mmu_peek_buffer(buffer, len);
if (!ret)
return ret;
buffer->ptr += len;
buffer->len -= len;
buffer->processed += len;
return ret;
}

static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
gpa_t addr, gpa_t value)
{
int bytes = 8;
int r;

if (!is_long_mode(vcpu) && !is_pae(vcpu))
bytes = 4;

r = mmu_topup_memory_caches(vcpu);
if (r)
return r;

if (!emulator_write_phys(vcpu, addr, &value, bytes))
return -EFAULT;

return 1;
}

static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
{
(void)kvm_set_cr3(vcpu, kvm_read_cr3(vcpu));
return 1;
}

static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
{
spin_lock(&vcpu->kvm->mmu_lock);
mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
spin_unlock(&vcpu->kvm->mmu_lock);
return 1;
}

static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
struct kvm_pv_mmu_op_buffer *buffer)
{
struct kvm_mmu_op_header *header;

header = pv_mmu_peek_buffer(buffer, sizeof *header);
if (!header)
return 0;
switch (header->op) {
case KVM_MMU_OP_WRITE_PTE: {
struct kvm_mmu_op_write_pte *wpte;

wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
if (!wpte)
return 0;
return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
wpte->pte_val);
}
case KVM_MMU_OP_FLUSH_TLB: {
struct kvm_mmu_op_flush_tlb *ftlb;

ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
if (!ftlb)
return 0;
return kvm_pv_mmu_flush_tlb(vcpu);
}
case KVM_MMU_OP_RELEASE_PT: {
struct kvm_mmu_op_release_pt *rpt;

rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
if (!rpt)
return 0;
return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
}
default: return 0;
}
}

int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
gpa_t addr, unsigned long *ret)
{
int r;
struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;

buffer->ptr = buffer->buf;
buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
buffer->processed = 0;

r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
if (r)
goto out;

while (buffer->len) {
r = kvm_pv_mmu_op_one(vcpu, buffer);
if (r < 0)
goto out;
if (r == 0)
break;
}

r = 1;
out:
*ret = buffer->processed;
return r;
}

int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
{
struct kvm_shadow_walk_iterator iterator;
Expand Down
12 changes: 0 additions & 12 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -5273,15 +5273,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_emulate_halt);

static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
unsigned long a1)
{
if (is_long_mode(vcpu))
return a0;
else
return a0 | ((gpa_t)a1 << 32);
}

int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
{
u64 param, ingpa, outgpa, ret;
Expand Down Expand Up @@ -5377,9 +5368,6 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
case KVM_HC_VAPIC_POLL_IRQ:
ret = 0;
break;
case KVM_HC_MMU_OP:
r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
break;
default:
ret = -KVM_ENOSYS;
break;
Expand Down

0 comments on commit fb92045

Please sign in to comment.