Skip to content

Commit

Permalink
xen/events: Use immediate affinity setting
Browse files Browse the repository at this point in the history
There is absolutely no reason to mimic the x86 deferred affinity
setting. This mechanism is required to handle the hardware induced issues
of IO/APIC and MSI and is not in use when the interrupts are remapped.

XEN does not need this and can simply change the affinity from the calling
context. The core code invokes this with the interrupt descriptor lock held
so it is fully serialized against any other operation.

Mark the interrupts with IRQ_MOVE_PCNTXT to disable the deferred affinity
setting. The conditional mask/unmask operation is already handled in
xen_rebind_evtchn_to_cpu().

This makes XEN on x86 use the same mechanics as on e.g. ARM64 where
deferred affinity setting is not required and not implemented and the code
path in the ack functions is compiled out.

Signed-off-by: Thomas Gleixner <[email protected]>
Cc: Boris Ostrovsky <[email protected]>
Cc: Juergen Gross <[email protected]>
Cc: Stefano Stabellini <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
  • Loading branch information
KAGA-KOKO committed Dec 15, 2020
1 parent 67473b8 commit 1ca1b4e
Showing 1 changed file with 9 additions and 26 deletions.
35 changes: 9 additions & 26 deletions drivers/xen/events/events_base.c
Original file line number Diff line number Diff line change
Expand Up @@ -628,6 +628,11 @@ static void xen_irq_init(unsigned irq)
info->refcnt = -1;

set_info_for_irq(irq, info);
/*
* Interrupt affinity setting can be immediate. No point
* in delaying it until an interrupt is handled.
*/
irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);

INIT_LIST_HEAD(&info->eoi_list);
list_add_tail(&info->list, &xen_irq_list_head);
Expand Down Expand Up @@ -739,18 +744,7 @@ static void eoi_pirq(struct irq_data *data)
if (!VALID_EVTCHN(evtchn))
return;

if (unlikely(irqd_is_setaffinity_pending(data)) &&
likely(!irqd_irq_disabled(data))) {
int masked = test_and_set_mask(evtchn);

clear_evtchn(evtchn);

irq_move_masked_irq(data);

if (!masked)
unmask_evtchn(evtchn);
} else
clear_evtchn(evtchn);
clear_evtchn(evtchn);

if (pirq_needs_eoi(data->irq)) {
rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
Expand Down Expand Up @@ -1641,7 +1635,6 @@ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
mutex_unlock(&irq_mapping_update_lock);

bind_evtchn_to_cpu(evtchn, info->cpu);
/* This will be deferred until interrupt is processed */
irq_set_affinity(irq, cpumask_of(info->cpu));

/* Unmask the event channel. */
Expand Down Expand Up @@ -1688,8 +1681,9 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
bool force)
{
unsigned tcpu = cpumask_first_and(dest, cpu_online_mask);
int ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu);
int ret;

ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu);
if (!ret)
irq_data_update_effective_affinity(data, cpumask_of(tcpu));

Expand Down Expand Up @@ -1719,18 +1713,7 @@ static void ack_dynirq(struct irq_data *data)
if (!VALID_EVTCHN(evtchn))
return;

if (unlikely(irqd_is_setaffinity_pending(data)) &&
likely(!irqd_irq_disabled(data))) {
int masked = test_and_set_mask(evtchn);

clear_evtchn(evtchn);

irq_move_masked_irq(data);

if (!masked)
unmask_evtchn(evtchn);
} else
clear_evtchn(evtchn);
clear_evtchn(evtchn);
}

static void mask_ack_dynirq(struct irq_data *data)
Expand Down

0 comments on commit 1ca1b4e

Please sign in to comment.