Skip to content

Commit

Permalink
KVM: arm64: vgic-v3: Take cpu_if pointer directly instead of vcpu
Browse files Browse the repository at this point in the history
If we move the used_lrs field to the version-specific cpu interface
structure, the following functions only operate on the struct
vgic_v3_cpu_if and not the full vcpu:

  __vgic_v3_save_state
  __vgic_v3_restore_state
  __vgic_v3_activate_traps
  __vgic_v3_deactivate_traps
  __vgic_v3_save_aprs
  __vgic_v3_restore_aprs

This is going to be very useful for nested virt, so move the used_lrs
field and change the prototypes and implementations of these functions to
take the cpu_if parameter directly.

No functional change.

Reviewed-by: James Morse <[email protected]>
Signed-off-by: Christoffer Dall <[email protected]>
Signed-off-by: Marc Zyngier <[email protected]>
  • Loading branch information
christofferdall-arm authored and Marc Zyngier committed May 28, 2020
1 parent 0a78791 commit fc5d1f1
Show file tree
Hide file tree
Showing 7 changed files with 54 additions and 53 deletions.
12 changes: 6 additions & 6 deletions arch/arm64/include/asm/kvm_hyp.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,12 +56,12 @@

int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);

void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
void __vgic_v3_activate_traps(struct kvm_vcpu *vcpu);
void __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu);
void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);
void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if);
void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if);
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);

void __timer_enable_traps(struct kvm_vcpu *vcpu);
Expand Down
8 changes: 4 additions & 4 deletions arch/arm64/kvm/hyp/switch.c
Original file line number Diff line number Diff line change
Expand Up @@ -270,17 +270,17 @@ static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
{
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
__vgic_v3_save_state(vcpu);
__vgic_v3_deactivate_traps(vcpu);
__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
__vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
}
}

/* Restore VGICv3 state on non_VEH systems */
static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
{
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
__vgic_v3_activate_traps(vcpu);
__vgic_v3_restore_state(vcpu);
__vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
}
}

Expand Down
33 changes: 10 additions & 23 deletions arch/arm64/kvm/hyp/vgic-v3-sr.c
Original file line number Diff line number Diff line change
Expand Up @@ -194,10 +194,9 @@ static u32 __hyp_text __vgic_v3_read_ap1rn(int n)
return val;
}

void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
void __hyp_text __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
u64 used_lrs = cpu_if->used_lrs;

/*
* Make sure stores to the GIC via the memory mapped interface
Expand Down Expand Up @@ -230,10 +229,9 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
}
}

void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
void __hyp_text __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
u64 used_lrs = cpu_if->used_lrs;
int i;

if (used_lrs || cpu_if->its_vpe.its_vm) {
Expand All @@ -257,10 +255,8 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
}
}

void __hyp_text __vgic_v3_activate_traps(struct kvm_vcpu *vcpu)
void __hyp_text __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;

/*
* VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
* Group0 interrupt (as generated in GICv2 mode) to be
Expand Down Expand Up @@ -306,9 +302,8 @@ void __hyp_text __vgic_v3_activate_traps(struct kvm_vcpu *vcpu)
write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
}

void __hyp_text __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu)
void __hyp_text __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
u64 val;

if (!cpu_if->vgic_sre) {
Expand All @@ -333,15 +328,11 @@ void __hyp_text __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu)
write_gicreg(0, ICH_HCR_EL2);
}

void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu)
void __hyp_text __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
{
struct vgic_v3_cpu_if *cpu_if;
u64 val;
u32 nr_pre_bits;

vcpu = kern_hyp_va(vcpu);
cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;

val = read_gicreg(ICH_VTR_EL2);
nr_pre_bits = vtr_to_nr_pre_bits(val);

Expand Down Expand Up @@ -370,15 +361,11 @@ void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu)
}
}

void __hyp_text __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu)
void __hyp_text __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
{
struct vgic_v3_cpu_if *cpu_if;
u64 val;
u32 nr_pre_bits;

vcpu = kern_hyp_va(vcpu);
cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;

val = read_gicreg(ICH_VTR_EL2);
nr_pre_bits = vtr_to_nr_pre_bits(val);

Expand Down Expand Up @@ -451,7 +438,7 @@ static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu,
u32 vmcr,
u64 *lr_val)
{
unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
u8 priority = GICv3_IDLE_PRIORITY;
int i, lr = -1;

Expand Down Expand Up @@ -490,7 +477,7 @@ static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu,
static int __hyp_text __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu,
int intid, u64 *lr_val)
{
unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
int i;

for (i = 0; i < used_lrs; i++) {
Expand Down
10 changes: 5 additions & 5 deletions arch/arm64/kvm/vgic/vgic-v2.c
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)

cpuif->vgic_hcr &= ~GICH_HCR_UIE;

for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
for (lr = 0; lr < vgic_cpu->vgic_v2.used_lrs; lr++) {
u32 val = cpuif->vgic_lr[lr];
u32 cpuid, intid = val & GICH_LR_VIRTUALID;
struct vgic_irq *irq;
Expand Down Expand Up @@ -120,7 +120,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
vgic_put_irq(vcpu->kvm, irq);
}

vgic_cpu->used_lrs = 0;
cpuif->used_lrs = 0;
}

/*
Expand Down Expand Up @@ -427,7 +427,7 @@ int vgic_v2_probe(const struct gic_kvm_info *info)
static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
{
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
u64 used_lrs = cpu_if->used_lrs;
u64 elrsr;
int i;

Expand All @@ -448,7 +448,7 @@ static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
void vgic_v2_save_state(struct kvm_vcpu *vcpu)
{
void __iomem *base = kvm_vgic_global_state.vctrl_base;
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
u64 used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;

if (!base)
return;
Expand All @@ -463,7 +463,7 @@ void vgic_v2_restore_state(struct kvm_vcpu *vcpu)
{
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
void __iomem *base = kvm_vgic_global_state.vctrl_base;
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
u64 used_lrs = cpu_if->used_lrs;
int i;

if (!base)
Expand Down
14 changes: 8 additions & 6 deletions arch/arm64/kvm/vgic/vgic-v3.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)

cpuif->vgic_hcr &= ~ICH_HCR_UIE;

for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
for (lr = 0; lr < cpuif->used_lrs; lr++) {
u64 val = cpuif->vgic_lr[lr];
u32 intid, cpuid;
struct vgic_irq *irq;
Expand Down Expand Up @@ -111,7 +111,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
vgic_put_irq(vcpu->kvm, irq);
}

vgic_cpu->used_lrs = 0;
cpuif->used_lrs = 0;
}

/* Requires the irq to be locked already */
Expand Down Expand Up @@ -662,10 +662,10 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
if (likely(cpu_if->vgic_sre))
kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);

kvm_call_hyp(__vgic_v3_restore_aprs, vcpu);
kvm_call_hyp(__vgic_v3_restore_aprs, kern_hyp_va(cpu_if));

if (has_vhe())
__vgic_v3_activate_traps(vcpu);
__vgic_v3_activate_traps(cpu_if);

WARN_ON(vgic_v4_load(vcpu));
}
Expand All @@ -680,12 +680,14 @@ void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)

void vgic_v3_put(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;

WARN_ON(vgic_v4_put(vcpu, false));

vgic_v3_vmcr_sync(vcpu);

kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
kvm_call_hyp(__vgic_v3_save_aprs, kern_hyp_va(cpu_if));

if (has_vhe())
__vgic_v3_deactivate_traps(vcpu);
__vgic_v3_deactivate_traps(cpu_if);
}
25 changes: 17 additions & 8 deletions arch/arm64/kvm/vgic/vgic.c
Original file line number Diff line number Diff line change
Expand Up @@ -786,6 +786,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
int count;
bool multi_sgi;
u8 prio = 0xff;
int i = 0;

lockdep_assert_held(&vgic_cpu->ap_list_lock);

Expand Down Expand Up @@ -827,11 +828,14 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
}
}

vcpu->arch.vgic_cpu.used_lrs = count;

/* Nuke remaining LRs */
for ( ; count < kvm_vgic_global_state.nr_lr; count++)
vgic_clear_lr(vcpu, count);
for (i = count ; i < kvm_vgic_global_state.nr_lr; i++)
vgic_clear_lr(vcpu, i);

if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vcpu->arch.vgic_cpu.vgic_v2.used_lrs = count;
else
vcpu->arch.vgic_cpu.vgic_v3.used_lrs = count;
}

static inline bool can_access_vgic_from_kernel(void)
Expand All @@ -849,13 +853,13 @@ static inline void vgic_save_state(struct kvm_vcpu *vcpu)
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_save_state(vcpu);
else
__vgic_v3_save_state(vcpu);
__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
}

/* Sync back the hardware VGIC state into our emulation after a guest's run. */
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
int used_lrs;

/* An empty ap_list_head implies used_lrs == 0 */
if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
Expand All @@ -864,7 +868,12 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
if (can_access_vgic_from_kernel())
vgic_save_state(vcpu);

if (vgic_cpu->used_lrs)
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
else
used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;

if (used_lrs)
vgic_fold_lr_state(vcpu);
vgic_prune_ap_list(vcpu);
}
Expand All @@ -874,7 +883,7 @@ static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
vgic_v2_restore_state(vcpu);
else
__vgic_v3_restore_state(vcpu);
__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
}

/* Flush our emulation state into the GIC hardware before entering the guest. */
Expand Down
5 changes: 4 additions & 1 deletion include/kvm/arm_vgic.h
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,8 @@ struct vgic_v2_cpu_if {
u32 vgic_vmcr;
u32 vgic_apr;
u32 vgic_lr[VGIC_V2_MAX_LRS];

unsigned int used_lrs;
};

struct vgic_v3_cpu_if {
Expand All @@ -291,6 +293,8 @@ struct vgic_v3_cpu_if {
* linking the Linux IRQ subsystem and the ITS together.
*/
struct its_vpe its_vpe;

unsigned int used_lrs;
};

struct vgic_cpu {
Expand All @@ -300,7 +304,6 @@ struct vgic_cpu {
struct vgic_v3_cpu_if vgic_v3;
};

unsigned int used_lrs;
struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];

raw_spinlock_t ap_list_lock; /* Protects the ap_list */
Expand Down

0 comments on commit fc5d1f1

Please sign in to comment.