Skip to content

Commit 7d6c40d

Browse files
paulusmackagraf
authored andcommitted
KVM: PPC: Book3S HV: Use bitmap of active threads rather than count
Currently, the entry_exit_count field in the kvmppc_vcore struct contains two 8-bit counts, one of the threads that have started entering the guest, and one of the threads that have started exiting the guest. This changes it to an entry_exit_map field which contains two bitmaps of 8 bits each. The advantage of doing this is that it gives us a bitmap of which threads need to be signalled when exiting the guest. That means that we no longer need to use the trick of setting the HDEC to 0 to pull the other threads out of the guest, which led in some cases to a spurious HDEC interrupt on the next guest entry. Signed-off-by: Paul Mackerras <[email protected]> Signed-off-by: Alexander Graf <[email protected]>
1 parent fd6d53b commit 7d6c40d

File tree

5 files changed

+44
-49
lines changed

5 files changed

+44
-49
lines changed

arch/powerpc/include/asm/kvm_host.h

+8-7
Original file line numberDiff line numberDiff line change
@@ -263,15 +263,15 @@ struct kvm_arch {
263263

264264
/*
265265
* Struct for a virtual core.
266-
* Note: entry_exit_count combines an entry count in the bottom 8 bits
267-
* and an exit count in the next 8 bits. This is so that we can
268-
* atomically increment the entry count iff the exit count is 0
269-
* without taking the lock.
266+
* Note: entry_exit_map combines a bitmap of threads that have entered
267+
* in the bottom 8 bits and a bitmap of threads that have exited in the
268+
* next 8 bits. This is so that we can atomically set the entry bit
269+
* iff the exit map is 0 without taking a lock.
270270
*/
271271
struct kvmppc_vcore {
272272
int n_runnable;
273273
int num_threads;
274-
int entry_exit_count;
274+
int entry_exit_map;
275275
int napping_threads;
276276
int first_vcpuid;
277277
u16 pcpu;
@@ -296,8 +296,9 @@ struct kvmppc_vcore {
296296
ulong conferring_threads;
297297
};
298298

299-
#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
300-
#define VCORE_EXIT_COUNT(vc) ((vc)->entry_exit_count >> 8)
299+
#define VCORE_ENTRY_MAP(vc) ((vc)->entry_exit_map & 0xff)
300+
#define VCORE_EXIT_MAP(vc) ((vc)->entry_exit_map >> 8)
301+
#define VCORE_IS_EXITING(vc) (VCORE_EXIT_MAP(vc) != 0)
301302

302303
/* Values for vcore_state */
303304
#define VCORE_INACTIVE 0

arch/powerpc/kernel/asm-offsets.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -562,7 +562,7 @@ int main(void)
562562
DEFINE(VCPU_ACOP, offsetof(struct kvm_vcpu, arch.acop));
563563
DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort));
564564
DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1));
565-
DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count));
565+
DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_map));
566566
DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest));
567567
DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads));
568568
DEFINE(VCORE_KVM, offsetof(struct kvmppc_vcore, kvm));

arch/powerpc/kvm/book3s_hv.c

+2-3
Original file line numberDiff line numberDiff line change
@@ -1952,7 +1952,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
19521952
/*
19531953
* Initialize *vc.
19541954
*/
1955-
vc->entry_exit_count = 0;
1955+
vc->entry_exit_map = 0;
19561956
vc->preempt_tb = TB_NIL;
19571957
vc->in_guest = 0;
19581958
vc->napping_threads = 0;
@@ -2119,8 +2119,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
21192119
* this thread straight away and have it join in.
21202120
*/
21212121
if (!signal_pending(current)) {
2122-
if (vc->vcore_state == VCORE_RUNNING &&
2123-
VCORE_EXIT_COUNT(vc) == 0) {
2122+
if (vc->vcore_state == VCORE_RUNNING && !VCORE_IS_EXITING(vc)) {
21242123
kvmppc_create_dtl_entry(vcpu, vc);
21252124
kvmppc_start_thread(vcpu);
21262125
trace_kvm_guest_enter(vcpu);

arch/powerpc/kvm/book3s_hv_builtin.c

+5-5
Original file line numberDiff line numberDiff line change
@@ -115,11 +115,11 @@ long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
115115
int rv = H_SUCCESS; /* => don't yield */
116116

117117
set_bit(vcpu->arch.ptid, &vc->conferring_threads);
118-
while ((get_tb() < stop) && (VCORE_EXIT_COUNT(vc) == 0)) {
119-
threads_running = VCORE_ENTRY_COUNT(vc);
120-
threads_ceded = hweight32(vc->napping_threads);
121-
threads_conferring = hweight32(vc->conferring_threads);
122-
if (threads_ceded + threads_conferring >= threads_running) {
118+
while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) {
119+
threads_running = VCORE_ENTRY_MAP(vc);
120+
threads_ceded = vc->napping_threads;
121+
threads_conferring = vc->conferring_threads;
122+
if ((threads_ceded | threads_conferring) == threads_running) {
123123
rv = H_TOO_HARD; /* => do yield */
124124
break;
125125
}

arch/powerpc/kvm/book3s_hv_rmhandlers.S

+28-33
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,7 @@ kvmppc_primary_no_guest:
185185
or r3, r3, r0
186186
stwcx. r3, 0, r6
187187
bne 1b
188-
/* order napping_threads update vs testing entry_exit_count */
188+
/* order napping_threads update vs testing entry_exit_map */
189189
isync
190190
li r12, 0
191191
lwz r7, VCORE_ENTRY_EXIT(r5)
@@ -406,19 +406,21 @@ kvmppc_hv_entry:
406406
* We don't have to lock against concurrent tlbies,
407407
* but we do have to coordinate across hardware threads.
408408
*/
409-
/* Increment entry count iff exit count is zero. */
410-
ld r5,HSTATE_KVM_VCORE(r13)
411-
addi r9,r5,VCORE_ENTRY_EXIT
412-
21: lwarx r3,0,r9
413-
cmpwi r3,0x100 /* any threads starting to exit? */
409+
/* Set bit in entry map iff exit map is zero. */
410+
ld r5, HSTATE_KVM_VCORE(r13)
411+
li r7, 1
412+
lbz r6, HSTATE_PTID(r13)
413+
sld r7, r7, r6
414+
addi r9, r5, VCORE_ENTRY_EXIT
415+
21: lwarx r3, 0, r9
416+
cmpwi r3, 0x100 /* any threads starting to exit? */
414417
bge secondary_too_late /* if so we're too late to the party */
415-
addi r3,r3,1
416-
stwcx. r3,0,r9
418+
or r3, r3, r7
419+
stwcx. r3, 0, r9
417420
bne 21b
418421

419422
/* Primary thread switches to guest partition. */
420423
ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
421-
lbz r6,HSTATE_PTID(r13)
422424
cmpwi r6,0
423425
bne 20f
424426
ld r6,KVM_SDR1(r9)
@@ -1477,13 +1479,16 @@ kvmhv_do_exit: /* r12 = trap, r13 = paca */
14771479
* We don't have to lock against tlbies but we do
14781480
* have to coordinate the hardware threads.
14791481
*/
1480-
/* Increment the threads-exiting-guest count in the 0xff00
1481-
bits of vcore->entry_exit_count */
1482-
ld r5,HSTATE_KVM_VCORE(r13)
1483-
addi r6,r5,VCORE_ENTRY_EXIT
1484-
41: lwarx r3,0,r6
1485-
addi r0,r3,0x100
1486-
stwcx. r0,0,r6
1482+
/* Set our bit in the threads-exiting-guest map in the 0xff00
1483+
bits of vcore->entry_exit_map */
1484+
ld r5, HSTATE_KVM_VCORE(r13)
1485+
lbz r4, HSTATE_PTID(r13)
1486+
li r7, 0x100
1487+
sld r7, r7, r4
1488+
addi r6, r5, VCORE_ENTRY_EXIT
1489+
41: lwarx r3, 0, r6
1490+
or r0, r3, r7
1491+
stwcx. r0, 0, r6
14871492
bne 41b
14881493
isync /* order stwcx. vs. reading napping_threads */
14891494

@@ -1492,9 +1497,9 @@ kvmhv_do_exit: /* r12 = trap, r13 = paca */
14921497
* up to the kernel or qemu; we can't handle it in real mode.
14931498
* Thus we have to do a partition switch, so we have to
14941499
* collect the other threads, if we are the first thread
1495-
* to take an interrupt. To do this, we set the HDEC to 0,
1496-
* which causes an HDEC interrupt in all threads within 2ns
1497-
* because the HDEC register is shared between all 4 threads.
1500+
* to take an interrupt. To do this, we send a message or
1501+
* IPI to all the threads that have their bit set in the entry
1502+
* map in vcore->entry_exit_map (other than ourselves).
14981503
* However, we don't need to bother if this is an HDEC
14991504
* interrupt, since the other threads will already be on their
15001505
* way here in that case.
@@ -1503,17 +1508,8 @@ kvmhv_do_exit: /* r12 = trap, r13 = paca */
15031508
bge 43f
15041509
cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
15051510
beq 43f
1506-
li r0,0
1507-
mtspr SPRN_HDEC,r0
15081511

1509-
/*
1510-
* Send an IPI to any napping threads, since an HDEC interrupt
1511-
* doesn't wake CPUs up from nap.
1512-
*/
1513-
lwz r3,VCORE_NAPPING_THREADS(r5)
1514-
lbz r4,HSTATE_PTID(r13)
1515-
li r0,1
1516-
sld r0,r0,r4
1512+
srwi r0,r7,8
15171513
andc. r3,r3,r0 /* no sense IPI'ing ourselves */
15181514
beq 43f
15191515
/* Order entry/exit update vs. IPIs */
@@ -2091,12 +2087,11 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
20912087
addi r6,r5,VCORE_NAPPING_THREADS
20922088
31: lwarx r4,0,r6
20932089
or r4,r4,r0
2094-
PPC_POPCNTW(R7,R4)
2095-
cmpw r7,r8
2096-
bge kvm_cede_exit
2090+
cmpw r4,r8
2091+
beq kvm_cede_exit
20972092
stwcx. r4,0,r6
20982093
bne 31b
2099-
/* order napping_threads update vs testing entry_exit_count */
2094+
/* order napping_threads update vs testing entry_exit_map */
21002095
isync
21012096
li r0,NAPPING_CEDE
21022097
stb r0,HSTATE_NAPPING(r13)

0 commit comments

Comments
 (0)