Skip to content

Commit

Permalink
KVM: Let KVM_SET_SIGNAL_MASK work as advertised
Browse files Browse the repository at this point in the history
KVM API says for the signal mask you set via KVM_SET_SIGNAL_MASK, that
"any unblocked signal received [...] will cause KVM_RUN to return with
-EINTR" and that "the signal will only be delivered if not blocked by
the original signal mask".

This, however, is only true, when the calling task has a signal handler
registered for a signal. If not, signal evaluation is short-circuited for
SIG_IGN and SIG_DFL, and the signal is either ignored without KVM_RUN
returning or the whole process is terminated.

Make KVM_SET_SIGNAL_MASK behave as advertised by utilizing logic similar
to that in do_sigtimedwait() to avoid short-circuiting of signals.

Signed-off-by: Jan H. Schönherr <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
  • Loading branch information
schnhrr authored and bonzini committed Nov 27, 2017
1 parent b745582 commit 20b7035
Show file tree
Hide file tree
Showing 7 changed files with 37 additions and 25 deletions.
7 changes: 2 additions & 5 deletions arch/mips/kvm/mips.c
Original file line number Diff line number Diff line change
Expand Up @@ -445,10 +445,8 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int r = -EINTR;
sigset_t sigsaved;

if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
kvm_sigset_activate(vcpu);

if (vcpu->mmio_needed) {
if (!vcpu->mmio_is_write)
Expand Down Expand Up @@ -480,8 +478,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
local_irq_enable();

out:
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
kvm_sigset_deactivate(vcpu);

return r;
}
Expand Down
7 changes: 2 additions & 5 deletions arch/powerpc/kvm/powerpc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1407,7 +1407,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int r;
sigset_t sigsaved;

if (vcpu->mmio_needed) {
vcpu->mmio_needed = 0;
Expand Down Expand Up @@ -1448,16 +1447,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
#endif
}

if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
kvm_sigset_activate(vcpu);

if (run->immediate_exit)
r = -EINTR;
else
r = kvmppc_vcpu_run(run, vcpu);

if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
kvm_sigset_deactivate(vcpu);

return r;
}
Expand Down
7 changes: 2 additions & 5 deletions arch/s390/kvm/kvm-s390.c
Original file line number Diff line number Diff line change
Expand Up @@ -3372,7 +3372,6 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
int rc;
sigset_t sigsaved;

if (kvm_run->immediate_exit)
return -EINTR;
Expand All @@ -3382,8 +3381,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
return 0;
}

if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
kvm_sigset_activate(vcpu);

if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
kvm_s390_vcpu_start(vcpu);
Expand Down Expand Up @@ -3417,8 +3415,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
disable_cpu_timer_accounting(vcpu);
store_regs(vcpu, kvm_run);

if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
kvm_sigset_deactivate(vcpu);

vcpu->stat.exit_userspace++;
return rc;
Expand Down
7 changes: 2 additions & 5 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -7267,12 +7267,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
struct fpu *fpu = &current->thread.fpu;
int r;
sigset_t sigsaved;

fpu__initialize(fpu);

if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
kvm_sigset_activate(vcpu);

if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
if (kvm_run->immediate_exit) {
Expand Down Expand Up @@ -7315,8 +7313,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)

out:
post_kvm_run_save(vcpu);
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
kvm_sigset_deactivate(vcpu);

return r;
}
Expand Down
3 changes: 3 additions & 0 deletions include/linux/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -715,6 +715,9 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
unsigned long len);
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);

void kvm_sigset_activate(struct kvm_vcpu *vcpu);
void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);

void kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
Expand Down
8 changes: 3 additions & 5 deletions virt/kvm/arm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -615,7 +615,6 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int ret;
sigset_t sigsaved;

if (unlikely(!kvm_vcpu_initialized(vcpu)))
return -ENOEXEC;
Expand All @@ -633,8 +632,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (run->immediate_exit)
return -EINTR;

if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
kvm_sigset_activate(vcpu);

ret = 1;
run->exit_reason = KVM_EXIT_UNKNOWN;
Expand Down Expand Up @@ -769,8 +767,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_pmu_update_run(vcpu);
}

if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
kvm_sigset_deactivate(vcpu);

return ret;
}

Expand Down
23 changes: 23 additions & 0 deletions virt/kvm/kvm_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -2065,6 +2065,29 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);

void kvm_sigset_activate(struct kvm_vcpu *vcpu)
{
if (!vcpu->sigset_active)
return;

/*
* This does a lockless modification of ->real_blocked, which is fine
* because, only current can change ->real_blocked and all readers of
* ->real_blocked don't care as long ->real_blocked is always a subset
* of ->blocked.
*/
sigprocmask(SIG_SETMASK, &vcpu->sigset, &current->real_blocked);
}

void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
{
if (!vcpu->sigset_active)
return;

sigprocmask(SIG_SETMASK, &current->real_blocked, NULL);
sigemptyset(&current->real_blocked);
}

static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
{
unsigned int old, val, grow;
Expand Down

0 comments on commit 20b7035

Please sign in to comment.