Skip to content

Commit

Permalink
Merge branch 'kvm-updates-2.6.27' of git://git.kernel.org/pub/scm/lin…
Browse files Browse the repository at this point in the history
…ux/kernel/git/avi/kvm

* 'kvm-updates-2.6.27' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm:
  KVM: ppc: fix invalidation of large guest pages
  KVM: s390: Fix possible host kernel bug on lctl(g) handling
  KVM: s390: Fix instruction naming for lctlg
  KVM: s390: Fix program check on interrupt delivery handling
  KVM: s390: Change guestaddr type in gaccess
  KVM: s390: Fix guest kconfig
  KVM: s390: Advertise KVM_CAP_USER_MEMORY
  KVM: ia64: Fix irq disabling leak in error handling code
  KVM: VMX: Fix undefined beaviour of EPT after reload kvm-intel.ko
  KVM: VMX: Fix bypass_guest_pf enabling when disable EPT in module parameter
  KVM: task switch: translate guest segment limit to virt-extension byte granular field
  KVM: Avoid instruction emulation when event delivery is pending
  KVM: task switch: use seg regs provided by subarch instead of reading from GDT
  KVM: task switch: segment base is linear address
  KVM: SVM: allow enabling/disabling NPT by reloading only the architecture module
  • Loading branch information
torvalds committed Jul 27, 2008
2 parents 6948385 + cc04454 commit b0d8aa0
Show file tree
Hide file tree
Showing 16 changed files with 143 additions and 138 deletions.
5 changes: 3 additions & 2 deletions arch/ia64/kvm/kvm-ia64.c
Original file line number Diff line number Diff line change
Expand Up @@ -125,9 +125,9 @@ void kvm_arch_hardware_enable(void *garbage)
PAGE_KERNEL));
local_irq_save(saved_psr);
slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
local_irq_restore(saved_psr);
if (slot < 0)
return;
local_irq_restore(saved_psr);

spin_lock(&vp_lock);
status = ia64_pal_vp_init_env(kvm_vsa_base ?
Expand Down Expand Up @@ -160,9 +160,9 @@ void kvm_arch_hardware_disable(void *garbage)

local_irq_save(saved_psr);
slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
local_irq_restore(saved_psr);
if (slot < 0)
return;
local_irq_restore(saved_psr);

status = ia64_pal_vp_exit_env(host_iva);
if (status)
Expand Down Expand Up @@ -1253,6 +1253,7 @@ static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
uninit:
kvm_vcpu_uninit(vcpu);
fail:
local_irq_restore(psr);
return r;
}

Expand Down
5 changes: 3 additions & 2 deletions arch/powerpc/kvm/44x_tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,8 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
vcpu->arch.msr & MSR_PR);
}

void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid)
void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
gva_t eend, u32 asid)
{
unsigned int pid = asid & 0xff;
int i;
Expand All @@ -191,7 +192,7 @@ void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid)
if (!get_tlb_v(stlbe))
continue;

if (eaddr < get_tlb_eaddr(stlbe))
if (eend < get_tlb_eaddr(stlbe))
continue;

if (eaddr > get_tlb_end(stlbe))
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kvm/emulate.c
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ static int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u32 inst)
if (tlbe->word0 & PPC44x_TLB_VALID) {
eaddr = get_tlb_eaddr(tlbe);
asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
kvmppc_mmu_invalidate(vcpu, eaddr, asid);
kvmppc_mmu_invalidate(vcpu, eaddr, get_tlb_end(tlbe), asid);
}

switch (ws) {
Expand Down
62 changes: 33 additions & 29 deletions arch/s390/kvm/gaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,11 @@
#include <asm/uaccess.h>

static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
u64 guestaddr)
unsigned long guestaddr)
{
u64 prefix = vcpu->arch.sie_block->prefix;
u64 origin = vcpu->kvm->arch.guest_origin;
u64 memsize = vcpu->kvm->arch.guest_memsize;
unsigned long prefix = vcpu->arch.sie_block->prefix;
unsigned long origin = vcpu->kvm->arch.guest_origin;
unsigned long memsize = vcpu->kvm->arch.guest_memsize;

if (guestaddr < 2 * PAGE_SIZE)
guestaddr += prefix;
Expand All @@ -37,7 +37,7 @@ static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
return (void __user *) guestaddr;
}

static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
u64 *result)
{
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
Expand All @@ -47,10 +47,10 @@ static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
if (IS_ERR((void __force *) uptr))
return PTR_ERR((void __force *) uptr);

return get_user(*result, (u64 __user *) uptr);
return get_user(*result, (unsigned long __user *) uptr);
}

static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
u32 *result)
{
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
Expand All @@ -63,7 +63,7 @@ static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
return get_user(*result, (u32 __user *) uptr);
}

static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
u16 *result)
{
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
Expand All @@ -76,7 +76,7 @@ static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
return get_user(*result, (u16 __user *) uptr);
}

static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
u8 *result)
{
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
Expand All @@ -87,7 +87,7 @@ static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
return get_user(*result, (u8 __user *) uptr);
}

static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
u64 value)
{
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
Expand All @@ -100,7 +100,7 @@ static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
return put_user(value, (u64 __user *) uptr);
}

static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
u32 value)
{
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
Expand All @@ -113,7 +113,7 @@ static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
return put_user(value, (u32 __user *) uptr);
}

static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
u16 value)
{
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
Expand All @@ -126,7 +126,7 @@ static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
return put_user(value, (u16 __user *) uptr);
}

static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
u8 value)
{
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
Expand All @@ -138,7 +138,8 @@ static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
}


static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest,
static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
unsigned long guestdest,
const void *from, unsigned long n)
{
int rc;
Expand All @@ -153,12 +154,12 @@ static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest,
return 0;
}

static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest,
static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
const void *from, unsigned long n)
{
u64 prefix = vcpu->arch.sie_block->prefix;
u64 origin = vcpu->kvm->arch.guest_origin;
u64 memsize = vcpu->kvm->arch.guest_memsize;
unsigned long prefix = vcpu->arch.sie_block->prefix;
unsigned long origin = vcpu->kvm->arch.guest_origin;
unsigned long memsize = vcpu->kvm->arch.guest_memsize;

if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
goto slowpath;
Expand Down Expand Up @@ -189,7 +190,8 @@ static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest,
}

static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
u64 guestsrc, unsigned long n)
unsigned long guestsrc,
unsigned long n)
{
int rc;
unsigned long i;
Expand All @@ -204,11 +206,11 @@ static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
}

static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
u64 guestsrc, unsigned long n)
unsigned long guestsrc, unsigned long n)
{
u64 prefix = vcpu->arch.sie_block->prefix;
u64 origin = vcpu->kvm->arch.guest_origin;
u64 memsize = vcpu->kvm->arch.guest_memsize;
unsigned long prefix = vcpu->arch.sie_block->prefix;
unsigned long origin = vcpu->kvm->arch.guest_origin;
unsigned long memsize = vcpu->kvm->arch.guest_memsize;

if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
goto slowpath;
Expand Down Expand Up @@ -238,11 +240,12 @@ static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
return __copy_from_guest_slow(vcpu, to, guestsrc, n);
}

static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest,
static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
unsigned long guestdest,
const void *from, unsigned long n)
{
u64 origin = vcpu->kvm->arch.guest_origin;
u64 memsize = vcpu->kvm->arch.guest_memsize;
unsigned long origin = vcpu->kvm->arch.guest_origin;
unsigned long memsize = vcpu->kvm->arch.guest_memsize;

if (guestdest + n > memsize)
return -EFAULT;
Expand All @@ -256,10 +259,11 @@ static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest,
}

static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
u64 guestsrc, unsigned long n)
unsigned long guestsrc,
unsigned long n)
{
u64 origin = vcpu->kvm->arch.guest_origin;
u64 memsize = vcpu->kvm->arch.guest_memsize;
unsigned long origin = vcpu->kvm->arch.guest_origin;
unsigned long memsize = vcpu->kvm->arch.guest_memsize;

if (guestsrc + n > memsize)
return -EFAULT;
Expand Down
14 changes: 10 additions & 4 deletions arch/s390/kvm/intercept.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
#include "kvm-s390.h"
#include "gaccess.h"

static int handle_lctg(struct kvm_vcpu *vcpu)
static int handle_lctlg(struct kvm_vcpu *vcpu)
{
int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
Expand All @@ -30,17 +30,20 @@ static int handle_lctg(struct kvm_vcpu *vcpu)
u64 useraddr;
int reg, rc;

vcpu->stat.instruction_lctg++;
vcpu->stat.instruction_lctlg++;
if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f)
return -ENOTSUPP;

useraddr = disp2;
if (base2)
useraddr += vcpu->arch.guest_gprs[base2];

if (useraddr & 7)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

reg = reg1;

VCPU_EVENT(vcpu, 5, "lctg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
disp2);

do {
Expand Down Expand Up @@ -74,6 +77,9 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
if (base2)
useraddr += vcpu->arch.guest_gprs[base2];

if (useraddr & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
disp2);

Expand All @@ -99,7 +105,7 @@ static intercept_handler_t instruction_handlers[256] = {
[0xae] = kvm_s390_handle_sigp,
[0xb2] = kvm_s390_handle_priv,
[0xb7] = handle_lctl,
[0xeb] = handle_lctg,
[0xeb] = handle_lctlg,
};

static int handle_noop(struct kvm_vcpu *vcpu)
Expand Down
21 changes: 7 additions & 14 deletions arch/s390/kvm/interrupt.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include <asm/lowcore.h>
#include <asm/uaccess.h>
#include <linux/kvm_host.h>
#include <linux/signal.h>
#include "kvm-s390.h"
#include "gaccess.h"

Expand Down Expand Up @@ -246,15 +247,10 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
default:
BUG();
}

if (exception) {
VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering"
" interrupt");
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
if (inti->type == KVM_S390_PROGRAM_INT) {
printk(KERN_WARNING "kvm: recursive program check\n");
BUG();
}
printk("kvm: The guest lowcore is not mapped during interrupt "
"delivery, killing userspace\n");
do_exit(SIGKILL);
}
}

Expand All @@ -277,14 +273,11 @@ static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
__LC_EXT_NEW_PSW, sizeof(psw_t));
if (rc == -EFAULT)
exception = 1;

if (exception) {
VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering" \
" ckc interrupt");
kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
return 0;
printk("kvm: The guest lowcore is not mapped during interrupt "
"delivery, killing userspace\n");
do_exit(SIGKILL);
}

return 1;
}

Expand Down
9 changes: 7 additions & 2 deletions arch/s390/kvm/kvm-s390.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "exit_instruction", VCPU_STAT(exit_instruction) },
{ "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
{ "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
{ "instruction_lctg", VCPU_STAT(instruction_lctg) },
{ "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
{ "instruction_lctl", VCPU_STAT(instruction_lctl) },
{ "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
{ "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
Expand Down Expand Up @@ -112,7 +112,12 @@ long kvm_arch_dev_ioctl(struct file *filp,

int kvm_dev_ioctl_check_extension(long ext)
{
return 0;
switch (ext) {
case KVM_CAP_USER_MEMORY:
return 1;
default:
return 0;
}
}

/* Section: vm related */
Expand Down
5 changes: 3 additions & 2 deletions arch/s390/kvm/sigp.c
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,8 @@
#define SIGP_STAT_RECEIVER_CHECK 0x00000001UL


static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg)
static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
unsigned long *reg)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
int rc;
Expand Down Expand Up @@ -167,7 +168,7 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
}

static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
u64 *reg)
unsigned long *reg)
{
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li;
Expand Down
7 changes: 7 additions & 0 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1814,6 +1814,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
spin_unlock(&vcpu->kvm->mmu_lock);
return r;
}
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);

void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
Expand Down Expand Up @@ -1870,6 +1871,12 @@ void kvm_enable_tdp(void)
}
EXPORT_SYMBOL_GPL(kvm_enable_tdp);

void kvm_disable_tdp(void)
{
tdp_enabled = false;
}
EXPORT_SYMBOL_GPL(kvm_disable_tdp);

static void free_mmu_pages(struct kvm_vcpu *vcpu)
{
struct kvm_mmu_page *sp;
Expand Down
Loading

0 comments on commit b0d8aa0

Please sign in to comment.