Skip to content

Commit

Permalink
Merge tag 'dma-mapping-6.10-2024-05-31' of git://git.infradead.org/us…
Browse files Browse the repository at this point in the history
…ers/hch/dma-mapping

Pull dma-mapping fixes from Christoph Hellwig:

 - dma-mapping benchmark error handling fixes (Fedor Pchelkin)

 - correct a config symbol reference in the DMA API documentation (Lukas
   Bulwahn)

* tag 'dma-mapping-6.10-2024-05-31' of git://git.infradead.org/users/hch/dma-mapping:
  Documentation/core-api: correct reference to SWIOTLB_DYNAMIC
  dma-mapping: benchmark: handle NUMA_NO_NODE correctly
  dma-mapping: benchmark: fix node id validation
  dma-mapping: benchmark: avoid needless copy_to_user if benchmark fails
  dma-mapping: benchmark: fix up kthread-related error handling
  • Loading branch information
torvalds committed May 31, 2024
2 parents 7d88cc8 + 82d71b5 commit b050496
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 10 deletions.
2 changes: 1 addition & 1 deletion Documentation/core-api/swiotlb.rst
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ alignment larger than PAGE_SIZE.

Dynamic swiotlb
---------------
When CONFIG_DYNAMIC_SWIOTLB is enabled, swiotlb can do on-demand expansion of
When CONFIG_SWIOTLB_DYNAMIC is enabled, swiotlb can do on-demand expansion of
the amount of memory available for allocation as bounce buffers. If a bounce
buffer request fails due to lack of available space, an asynchronous background
task is kicked off to allocate memory from general system memory and turn it
Expand Down
25 changes: 16 additions & 9 deletions kernel/dma/map_benchmark.c
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,6 @@ static int do_map_benchmark(struct map_benchmark_data *map)
struct task_struct **tsk;
int threads = map->bparam.threads;
int node = map->bparam.node;
const cpumask_t *cpu_mask = cpumask_of_node(node);
u64 loops;
int ret = 0;
int i;
Expand All @@ -118,11 +117,13 @@ static int do_map_benchmark(struct map_benchmark_data *map)
if (IS_ERR(tsk[i])) {
pr_err("create dma_map thread failed\n");
ret = PTR_ERR(tsk[i]);
while (--i >= 0)
kthread_stop(tsk[i]);
goto out;
}

if (node != NUMA_NO_NODE)
kthread_bind_mask(tsk[i], cpu_mask);
kthread_bind_mask(tsk[i], cpumask_of_node(node));
}

/* clear the old value in the previous benchmark */
Expand All @@ -139,13 +140,17 @@ static int do_map_benchmark(struct map_benchmark_data *map)

msleep_interruptible(map->bparam.seconds * 1000);

/* wait for the completion of benchmark threads */
/* wait for the completion of all started benchmark threads */
for (i = 0; i < threads; i++) {
ret = kthread_stop(tsk[i]);
if (ret)
goto out;
int kthread_ret = kthread_stop_put(tsk[i]);

if (kthread_ret)
ret = kthread_ret;
}

if (ret)
goto out;

loops = atomic64_read(&map->loops);
if (likely(loops > 0)) {
u64 map_variance, unmap_variance;
Expand All @@ -170,8 +175,6 @@ static int do_map_benchmark(struct map_benchmark_data *map)
}

out:
for (i = 0; i < threads; i++)
put_task_struct(tsk[i]);
put_device(map->dev);
kfree(tsk);
return ret;
Expand Down Expand Up @@ -208,7 +211,8 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
}

if (map->bparam.node != NUMA_NO_NODE &&
!node_possible(map->bparam.node)) {
(map->bparam.node < 0 || map->bparam.node >= MAX_NUMNODES ||
!node_possible(map->bparam.node))) {
pr_err("invalid numa node\n");
return -EINVAL;
}
Expand Down Expand Up @@ -252,6 +256,9 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
* dma_mask changed by benchmark
*/
dma_set_mask(map->dev, old_dma_mask);

if (ret)
return ret;
break;
default:
return -EINVAL;
Expand Down

0 comments on commit b050496

Please sign in to comment.