Skip to content

Commit

Permalink
mm/cma: change cma mutex to irq safe spinlock
Browse files Browse the repository at this point in the history
Patch series "make hugetlb put_page safe for all calling contexts", v5.

This effort is the result a recent bug report [1].  Syzbot found a
potential deadlock in the hugetlb put_page/free_huge_page_path.  WARNING:
SOFTIRQ-safe -> SOFTIRQ-unsafe lock order detected Since the
free_huge_page_path already has code to 'hand off' page free requests to a
workqueue, a suggestion was proposed to make the in_irq() detection
accurate by always enabling PREEMPT_COUNT [2].  The outcome of that
discussion was that the hugetlb put_page path (free_huge_page) path should
be properly fixed and safe for all calling contexts.

[1] https://lore.kernel.org/linux-mm/[email protected]/
[2] http://lkml.kernel.org/r/[email protected]

This patch (of 8):

cma_release is currently a sleepable operatation because the bitmap
manipulation is protected by cma->lock mutex.  Hugetlb code which relies
on cma_release for CMA backed (giga) hugetlb pages, however, needs to be
irq safe.

The lock doesn't protect any sleepable operation so it can be changed to a
(irq aware) spin lock.  The bitmap processing should be quite fast in
typical case but if cma sizes grow to TB then we will likely need to
replace the lock by a more optimized bitmap implementation.

Link: https://lkml.kernel.org/r/[email protected]
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Mike Kravetz <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Reviewed-by: David Hildenbrand <[email protected]>
Acked-by: Roman Gushchin <[email protected]>
Cc: Shakeel Butt <[email protected]>
Cc: Oscar Salvador <[email protected]>
Cc: Muchun Song <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Miaohe Lin <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Cc: HORIGUCHI NAOYA <[email protected]>
Cc: "Aneesh Kumar K . V" <[email protected]>
Cc: Waiman Long <[email protected]>
Cc: Peter Xu <[email protected]>
Cc: Mina Almasry <[email protected]>
Cc: Hillf Danton <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: Barry Song <[email protected]>
Cc: Will Deacon <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
mjkravetz authored and torvalds committed May 5, 2021
1 parent 15b8365 commit 0ef7dca
Show file tree
Hide file tree
Showing 3 changed files with 14 additions and 14 deletions.
18 changes: 9 additions & 9 deletions mm/cma.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
#include <linux/memblock.h>
#include <linux/err.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/log2.h>
Expand Down Expand Up @@ -83,13 +82,14 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
unsigned int count)
{
unsigned long bitmap_no, bitmap_count;
unsigned long flags;

bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
bitmap_count = cma_bitmap_pages_to_bits(cma, count);

mutex_lock(&cma->lock);
spin_lock_irqsave(&cma->lock, flags);
bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
mutex_unlock(&cma->lock);
spin_unlock_irqrestore(&cma->lock, flags);
}

static void __init cma_activate_area(struct cma *cma)
Expand Down Expand Up @@ -118,7 +118,7 @@ static void __init cma_activate_area(struct cma *cma)
pfn += pageblock_nr_pages)
init_cma_reserved_pageblock(pfn_to_page(pfn));

mutex_init(&cma->lock);
spin_lock_init(&cma->lock);

#ifdef CONFIG_CMA_DEBUGFS
INIT_HLIST_HEAD(&cma->mem_head);
Expand Down Expand Up @@ -392,7 +392,7 @@ static void cma_debug_show_areas(struct cma *cma)
unsigned long nr_part, nr_total = 0;
unsigned long nbits = cma_bitmap_maxno(cma);

mutex_lock(&cma->lock);
spin_lock_irq(&cma->lock);
pr_info("number of available pages: ");
for (;;) {
next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
Expand All @@ -407,7 +407,7 @@ static void cma_debug_show_areas(struct cma *cma)
start = next_zero_bit + nr_zero;
}
pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
mutex_unlock(&cma->lock);
spin_unlock_irq(&cma->lock);
}
#else
static inline void cma_debug_show_areas(struct cma *cma) { }
Expand Down Expand Up @@ -452,12 +452,12 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
return NULL;

for (;;) {
mutex_lock(&cma->lock);
spin_lock_irq(&cma->lock);
bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
bitmap_maxno, start, bitmap_count, mask,
offset);
if (bitmap_no >= bitmap_maxno) {
mutex_unlock(&cma->lock);
spin_unlock_irq(&cma->lock);
break;
}
bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
Expand All @@ -466,7 +466,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
* our exclusive use. If the migration fails we will take the
* lock again and unmark it.
*/
mutex_unlock(&cma->lock);
spin_unlock_irq(&cma->lock);

pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA,
Expand Down
2 changes: 1 addition & 1 deletion mm/cma.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ struct cma {
unsigned long count;
unsigned long *bitmap;
unsigned int order_per_bit; /* Order of pages represented by one bit */
struct mutex lock;
spinlock_t lock;
#ifdef CONFIG_CMA_DEBUGFS
struct hlist_head mem_head;
spinlock_t mem_head_lock;
Expand Down
8 changes: 4 additions & 4 deletions mm/cma_debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,10 +36,10 @@ static int cma_used_get(void *data, u64 *val)
struct cma *cma = data;
unsigned long used;

mutex_lock(&cma->lock);
spin_lock_irq(&cma->lock);
/* pages counter is smaller than sizeof(int) */
used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
mutex_unlock(&cma->lock);
spin_unlock_irq(&cma->lock);
*val = (u64)used << cma->order_per_bit;

return 0;
Expand All @@ -53,15 +53,15 @@ static int cma_maxchunk_get(void *data, u64 *val)
unsigned long start, end = 0;
unsigned long bitmap_maxno = cma_bitmap_maxno(cma);

mutex_lock(&cma->lock);
spin_lock_irq(&cma->lock);
for (;;) {
start = find_next_zero_bit(cma->bitmap, bitmap_maxno, end);
if (start >= bitmap_maxno)
break;
end = find_next_bit(cma->bitmap, bitmap_maxno, start);
maxchunk = max(end - start, maxchunk);
}
mutex_unlock(&cma->lock);
spin_unlock_irq(&cma->lock);
*val = (u64)maxchunk << cma->order_per_bit;

return 0;
Expand Down

0 comments on commit 0ef7dca

Please sign in to comment.