Skip to content

Commit

Permalink
KVM: Allow adjust_tsc_offset to be in host or guest cycles
Browse files Browse the repository at this point in the history
Redefine the API to take a parameter indicating whether an
adjustment is in host or guest cycles.

Signed-off-by: Zachary Amsden <[email protected]>
Signed-off-by: Marcelo Tosatti <[email protected]>
Signed-off-by: Avi Kivity <[email protected]>
  • Loading branch information
matosatti authored and avikivity committed Mar 8, 2012
1 parent 6f526ec commit f1e2b26
Show file tree
Hide file tree
Showing 4 changed files with 19 additions and 4 deletions.
13 changes: 12 additions & 1 deletion arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -646,7 +646,7 @@ struct kvm_x86_ops {
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
int (*get_lpage_level)(void);
bool (*rdtscp_supported)(void);
void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment);
void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host);

void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);

Expand Down Expand Up @@ -676,6 +676,17 @@ struct kvm_arch_async_pf {

extern struct kvm_x86_ops *kvm_x86_ops;

static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
s64 adjustment)
{
kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false);
}

static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
{
kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true);
}

int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void);

Expand Down
6 changes: 5 additions & 1 deletion arch/x86/kvm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1016,10 +1016,14 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
}

static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
{
struct vcpu_svm *svm = to_svm(vcpu);

WARN_ON(adjustment < 0);
if (host)
adjustment = svm_scale_tsc(vcpu, adjustment);

svm->vmcb->control.tsc_offset += adjustment;
if (is_guest_mode(vcpu))
svm->nested.hsave->control.tsc_offset += adjustment;
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kvm/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -1856,7 +1856,7 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
}
}

static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
{
u64 offset = vmcs_read64(TSC_OFFSET);
vmcs_write64(TSC_OFFSET, offset + adjustment);
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -1116,7 +1116,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
if (vcpu->tsc_catchup) {
u64 tsc = compute_guest_tsc(v, kernel_ns);
if (tsc > tsc_timestamp) {
kvm_x86_ops->adjust_tsc_offset(v, tsc - tsc_timestamp);
adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
tsc_timestamp = tsc;
}
}
Expand Down

0 comments on commit f1e2b26

Please sign in to comment.