Skip to content

Commit

Permalink
iommu/dma: Use a large flush queue and timeout for shadow_on_flush
Browse files Browse the repository at this point in the history
Flush queues currently use a fixed compile time size of 256 entries.
This being a power of 2 allows the compiler to use shift and mask
instead of more expensive modulo operations. With per-CPU flush queues
larger queue sizes would hit per-CPU allocation limits, with a single
flush queue these limits do not apply however. Also with single queues
being particularly suitable for virtualized environments with expensive
IOTLB flushes these benefit especially from larger queues and thus fewer
flushes.

To this end re-order struct iova_fq so we can use a dynamic array and
introduce the flush queue size and timeouts as new options in the
iommu_dma_options struct. So as not to lose the shift and mask
optimization, use a power of 2 for the length and use explicit shift and
mask instead of letting the compiler optimize this.

A large queue size and 1 second timeout is then set for the shadow on
flush case set by s390 paged memory guests. This then brings performance
on par with the previous s390 specific DMA API implementation.

Acked-by: Robin Murphy <[email protected]>
Reviewed-by: Matthew Rosato <[email protected]> #s390
Signed-off-by: Niklas Schnelle <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Joerg Roedel <[email protected]>
  • Loading branch information
niklas88 authored and joergroedel committed Oct 2, 2023
1 parent 32d5bc8 commit 9f5b681
Showing 1 changed file with 32 additions and 18 deletions.
50 changes: 32 additions & 18 deletions drivers/iommu/dma-iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@ enum iommu_dma_queue_type {

struct iommu_dma_options {
enum iommu_dma_queue_type qt;
size_t fq_size;
unsigned int fq_timeout;
};

struct iommu_dma_cookie {
Expand Down Expand Up @@ -98,10 +100,12 @@ static int __init iommu_dma_forcedac_setup(char *str)
early_param("iommu.forcedac", iommu_dma_forcedac_setup);

/* Number of entries per flush queue */
#define IOVA_FQ_SIZE 256
#define IOVA_DEFAULT_FQ_SIZE 256
#define IOVA_SINGLE_FQ_SIZE 32768

/* Timeout (in ms) after which entries are flushed from the queue */
#define IOVA_FQ_TIMEOUT 10
#define IOVA_DEFAULT_FQ_TIMEOUT 10
#define IOVA_SINGLE_FQ_TIMEOUT 1000

/* Flush queue entry for deferred flushing */
struct iova_fq_entry {
Expand All @@ -113,18 +117,19 @@ struct iova_fq_entry {

/* Per-CPU flush queue structure */
struct iova_fq {
struct iova_fq_entry entries[IOVA_FQ_SIZE];
unsigned int head, tail;
spinlock_t lock;
unsigned int head, tail;
unsigned int mod_mask;
struct iova_fq_entry entries[];
};

#define fq_ring_for_each(i, fq) \
for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) & (fq)->mod_mask)

static inline bool fq_full(struct iova_fq *fq)
{
assert_spin_locked(&fq->lock);
return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
return (((fq->tail + 1) & fq->mod_mask) == fq->head);
}

static inline unsigned int fq_ring_add(struct iova_fq *fq)
Expand All @@ -133,7 +138,7 @@ static inline unsigned int fq_ring_add(struct iova_fq *fq)

assert_spin_locked(&fq->lock);

fq->tail = (idx + 1) % IOVA_FQ_SIZE;
fq->tail = (idx + 1) & fq->mod_mask;

return idx;
}
Expand All @@ -155,7 +160,7 @@ static void fq_ring_free_locked(struct iommu_dma_cookie *cookie, struct iova_fq
fq->entries[idx].iova_pfn,
fq->entries[idx].pages);

fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
fq->head = (fq->head + 1) & fq->mod_mask;
}
}

Expand Down Expand Up @@ -240,7 +245,7 @@ static void queue_iova(struct iommu_dma_cookie *cookie,
if (!atomic_read(&cookie->fq_timer_on) &&
!atomic_xchg(&cookie->fq_timer_on, 1))
mod_timer(&cookie->fq_timer,
jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
jiffies + msecs_to_jiffies(cookie->options.fq_timeout));
}

static void iommu_dma_free_fq_single(struct iova_fq *fq)
Expand Down Expand Up @@ -279,43 +284,47 @@ static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie)
iommu_dma_free_fq_percpu(cookie->percpu_fq);
}

static void iommu_dma_init_one_fq(struct iova_fq *fq)
static void iommu_dma_init_one_fq(struct iova_fq *fq, size_t fq_size)
{
int i;

fq->head = 0;
fq->tail = 0;
fq->mod_mask = fq_size - 1;

spin_lock_init(&fq->lock);

for (i = 0; i < IOVA_FQ_SIZE; i++)
for (i = 0; i < fq_size; i++)
INIT_LIST_HEAD(&fq->entries[i].freelist);
}

static int iommu_dma_init_fq_single(struct iommu_dma_cookie *cookie)
{
size_t fq_size = cookie->options.fq_size;
struct iova_fq *queue;

queue = vmalloc(sizeof(*queue));
queue = vmalloc(struct_size(queue, entries, fq_size));
if (!queue)
return -ENOMEM;
iommu_dma_init_one_fq(queue);
iommu_dma_init_one_fq(queue, fq_size);
cookie->single_fq = queue;

return 0;
}

static int iommu_dma_init_fq_percpu(struct iommu_dma_cookie *cookie)
{
size_t fq_size = cookie->options.fq_size;
struct iova_fq __percpu *queue;
int cpu;

queue = alloc_percpu(struct iova_fq);
queue = __alloc_percpu(struct_size(queue, entries, fq_size),
__alignof__(*queue));
if (!queue)
return -ENOMEM;

for_each_possible_cpu(cpu)
iommu_dma_init_one_fq(per_cpu_ptr(queue, cpu));
iommu_dma_init_one_fq(per_cpu_ptr(queue, cpu), fq_size);
cookie->percpu_fq = queue;
return 0;
}
Expand Down Expand Up @@ -635,11 +644,16 @@ static bool dev_use_sg_swiotlb(struct device *dev, struct scatterlist *sg,
static void iommu_dma_init_options(struct iommu_dma_options *options,
struct device *dev)
{
/* Shadowing IOTLB flushes do better with a single queue */
if (dev->iommu->shadow_on_flush)
/* Shadowing IOTLB flushes do better with a single large queue */
if (dev->iommu->shadow_on_flush) {
options->qt = IOMMU_DMA_OPTS_SINGLE_QUEUE;
else
options->fq_timeout = IOVA_SINGLE_FQ_TIMEOUT;
options->fq_size = IOVA_SINGLE_FQ_SIZE;
} else {
options->qt = IOMMU_DMA_OPTS_PER_CPU_QUEUE;
options->fq_size = IOVA_DEFAULT_FQ_SIZE;
options->fq_timeout = IOVA_DEFAULT_FQ_TIMEOUT;
}
}

/**
Expand Down

0 comments on commit 9f5b681

Please sign in to comment.