Skip to content

Commit

Permalink
dma-contiguous: support numa CMA for specified node
Browse files Browse the repository at this point in the history
The kernel parameter 'cma_pernuma=' only supports reserving the same
size of CMA area for each node. We need to reserve different sizes of
CMA area for specified nodes if these devices belong to different nodes.

Adding another kernel parameter 'numa_cma=' to reserve CMA area for
the specified node. If we want to use one of these parameters, we need to
enable DMA_NUMA_CMA.

At the same time, print the node id in cma_declare_contiguous_nid() if
CONFIG_NUMA is enabled.

Signed-off-by: Yajun Deng <[email protected]>
Signed-off-by: Christoph Hellwig <[email protected]>
  • Loading branch information
Yajun Deng authored and Christoph Hellwig committed Jul 31, 2023
1 parent 22e4a34 commit bf29bfa
Show file tree
Hide file tree
Showing 4 changed files with 102 additions and 29 deletions.
11 changes: 11 additions & 0 deletions Documentation/admin-guide/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -706,6 +706,17 @@
which is located in node nid, if the allocation fails,
they will fallback to the global default memory area.

numa_cma=<node>:nn[MG][,<node>:nn[MG]]
[KNL,CMA]
Sets the size of kernel numa memory area for
contiguous memory allocations. It will reserve CMA
area for the specified node.

With numa CMA enabled, DMA users on node nid will
first try to allocate buffer from the numa area
which is located in node nid, if the allocation fails,
they will fallback to the global default memory area.

cmo_free_hint= [PPC] Format: { yes | no }
Specify whether pages are marked as being inactive
when they are freed. This is used in CMO environments
Expand Down
9 changes: 5 additions & 4 deletions kernel/dma/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -145,15 +145,16 @@ config DMA_CMA

if DMA_CMA

config DMA_PERNUMA_CMA
bool "Enable separate DMA Contiguous Memory Area for each NUMA Node"
config DMA_NUMA_CMA
bool "Enable separate DMA Contiguous Memory Area for NUMA Node"
default NUMA
help
Enable this option to get pernuma CMA areas so that NUMA devices
Enable this option to get numa CMA areas so that NUMA devices
can get local memory by DMA coherent APIs.

You can set the size of pernuma CMA by specifying "cma_pernuma=size"
on the kernel's command line.
or set the node id and its size of CMA by specifying "numa_cma=
<node>:size[,<node>:size]" on the kernel's command line.

comment "Default contiguous memory area size:"

Expand Down
101 changes: 79 additions & 22 deletions kernel/dma/contiguous.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@
#include <linux/sizes.h>
#include <linux/dma-map-ops.h>
#include <linux/cma.h>
#include <linux/nospec.h>

#ifdef CONFIG_CMA_SIZE_MBYTES
#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
Expand Down Expand Up @@ -96,11 +97,44 @@ static int __init early_cma(char *p)
}
early_param("cma", early_cma);

#ifdef CONFIG_DMA_PERNUMA_CMA
#ifdef CONFIG_DMA_NUMA_CMA

static struct cma *dma_contiguous_numa_area[MAX_NUMNODES];
static phys_addr_t numa_cma_size[MAX_NUMNODES] __initdata;
static struct cma *dma_contiguous_pernuma_area[MAX_NUMNODES];
static phys_addr_t pernuma_size_bytes __initdata;

static int __init early_numa_cma(char *p)
{
int nid, count = 0;
unsigned long tmp;
char *s = p;

while (*s) {
if (sscanf(s, "%lu%n", &tmp, &count) != 1)
break;

if (s[count] == ':') {
if (tmp >= MAX_NUMNODES)
break;
nid = array_index_nospec(tmp, MAX_NUMNODES);

s += count + 1;
tmp = memparse(s, &s);
numa_cma_size[nid] = tmp;

if (*s == ',')
s++;
else
break;
} else
break;
}

return 0;
}
early_param("numa_cma", early_numa_cma);

static int __init early_cma_pernuma(char *p)
{
pernuma_size_bytes = memparse(p, &p);
Expand All @@ -127,34 +161,47 @@ static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)

#endif

#ifdef CONFIG_DMA_PERNUMA_CMA
static void __init dma_pernuma_cma_reserve(void)
#ifdef CONFIG_DMA_NUMA_CMA
static void __init dma_numa_cma_reserve(void)
{
int nid;

if (!pernuma_size_bytes)
return;

for_each_online_node(nid) {
for_each_node(nid) {
int ret;
char name[CMA_MAX_NAME];
struct cma **cma = &dma_contiguous_pernuma_area[nid];

snprintf(name, sizeof(name), "pernuma%d", nid);
ret = cma_declare_contiguous_nid(0, pernuma_size_bytes, 0, 0,
0, false, name, cma, nid);
if (ret) {
pr_warn("%s: reservation failed: err %d, node %d", __func__,
ret, nid);
struct cma **cma;

if (!node_online(nid)) {
if (pernuma_size_bytes || numa_cma_size[nid])
pr_warn("invalid node %d specified\n", nid);
continue;
}

pr_debug("%s: reserved %llu MiB on node %d\n", __func__,
(unsigned long long)pernuma_size_bytes / SZ_1M, nid);
if (pernuma_size_bytes) {

cma = &dma_contiguous_pernuma_area[nid];
snprintf(name, sizeof(name), "pernuma%d", nid);
ret = cma_declare_contiguous_nid(0, pernuma_size_bytes, 0, 0,
0, false, name, cma, nid);
if (ret)
pr_warn("%s: reservation failed: err %d, node %d", __func__,
ret, nid);
}

if (numa_cma_size[nid]) {

cma = &dma_contiguous_numa_area[nid];
snprintf(name, sizeof(name), "numa%d", nid);
ret = cma_declare_contiguous_nid(0, numa_cma_size[nid], 0, 0, 0, false,
name, cma, nid);
if (ret)
pr_warn("%s: reservation failed: err %d, node %d", __func__,
ret, nid);
}
}
}
#else
static inline void __init dma_pernuma_cma_reserve(void)
static inline void __init dma_numa_cma_reserve(void)
{
}
#endif
Expand All @@ -175,7 +222,7 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
phys_addr_t selected_limit = limit;
bool fixed = false;

dma_pernuma_cma_reserve();
dma_numa_cma_reserve();

pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);

Expand Down Expand Up @@ -309,7 +356,7 @@ static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp)
*/
struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
{
#ifdef CONFIG_DMA_PERNUMA_CMA
#ifdef CONFIG_DMA_NUMA_CMA
int nid = dev_to_node(dev);
#endif

Expand All @@ -321,7 +368,7 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
if (size <= PAGE_SIZE)
return NULL;

#ifdef CONFIG_DMA_PERNUMA_CMA
#ifdef CONFIG_DMA_NUMA_CMA
if (nid != NUMA_NO_NODE && !(gfp & (GFP_DMA | GFP_DMA32))) {
struct cma *cma = dma_contiguous_pernuma_area[nid];
struct page *page;
Expand All @@ -331,6 +378,13 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
if (page)
return page;
}

cma = dma_contiguous_numa_area[nid];
if (cma) {
page = cma_alloc_aligned(cma, size, gfp);
if (page)
return page;
}
}
#endif
if (!dma_contiguous_default_area)
Expand Down Expand Up @@ -362,10 +416,13 @@ void dma_free_contiguous(struct device *dev, struct page *page, size_t size)
/*
* otherwise, page is from either per-numa cma or default cma
*/
#ifdef CONFIG_DMA_PERNUMA_CMA
#ifdef CONFIG_DMA_NUMA_CMA
if (cma_release(dma_contiguous_pernuma_area[page_to_nid(page)],
page, count))
return;
if (cma_release(dma_contiguous_numa_area[page_to_nid(page)],
page, count))
return;
#endif
if (cma_release(dma_contiguous_default_area, page, count))
return;
Expand Down
10 changes: 7 additions & 3 deletions mm/cma.c
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,9 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
if (alignment && !is_power_of_2(alignment))
return -EINVAL;

if (!IS_ENABLED(CONFIG_NUMA))
nid = NUMA_NO_NODE;

/* Sanitise input arguments. */
alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES);
if (fixed && base & (alignment - 1)) {
Expand Down Expand Up @@ -372,14 +375,15 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
if (ret)
goto free_mem;

pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
&base);
pr_info("Reserved %ld MiB at %pa on node %d\n", (unsigned long)size / SZ_1M,
&base, nid);
return 0;

free_mem:
memblock_phys_free(base, size);
err:
pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
pr_err("Failed to reserve %ld MiB on node %d\n", (unsigned long)size / SZ_1M,
nid);
return ret;
}

Expand Down

0 comments on commit bf29bfa

Please sign in to comment.