Skip to content

Commit

Permalink
Merge branch 'akpm' (patches from Andrew)
Browse files Browse the repository at this point in the history
Merge misc fixes from Andrew Morton:
 "24 patches, based on 4a09d38.

  Subsystems affected by this patch series: mm (thp, vmalloc, hugetlb,
  memory-failure, and pagealloc), nilfs2, kthread, MAINTAINERS, and
  mailmap"

* emailed patches from Andrew Morton <[email protected]>: (24 commits)
  mailmap: add Marek's other e-mail address and identity without diacritics
  MAINTAINERS: fix Marek's identity again
  mm/page_alloc: do bulk array bounds check after checking populated elements
  mm/page_alloc: __alloc_pages_bulk(): do bounds check before accessing array
  mm/hwpoison: do not lock page again when me_huge_page() successfully recovers
  mm,hwpoison: return -EHWPOISON to denote that the page has already been poisoned
  mm/memory-failure: use a mutex to avoid memory_failure() races
  mm, futex: fix shared futex pgoff on shmem huge page
  kthread: prevent deadlock when kthread_mod_delayed_work() races with kthread_cancel_delayed_work_sync()
  kthread_worker: split code for canceling the delayed work timer
  mm/vmalloc: unbreak kasan vmalloc support
  KVM: s390: prepare for hugepage vmalloc
  mm/vmalloc: add vmalloc_no_huge
  nilfs2: fix memory leak in nilfs_sysfs_delete_device_group
  mm/thp: another PVMW_SYNC fix in page_vma_mapped_walk()
  mm/thp: fix page_vma_mapped_walk() if THP mapped by ptes
  mm: page_vma_mapped_walk(): get vma_address_end() earlier
  mm: page_vma_mapped_walk(): use goto instead of while (1)
  mm: page_vma_mapped_walk(): add a level of indentation
  mm: page_vma_mapped_walk(): crossing page table boundary
  ...
  • Loading branch information
torvalds committed Jun 25, 2021
2 parents 808e9df + 72a461a commit 7ce32ac
Show file tree
Hide file tree
Showing 14 changed files with 258 additions and 160 deletions.
2 changes: 2 additions & 0 deletions .mailmap
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,8 @@ Manivannan Sadhasivam <[email protected]> <[email protected]>
Manivannan Sadhasivam <[email protected]> <[email protected]>
Marcin Nowakowski <[email protected]> <[email protected]>
Marc Zyngier <[email protected]> <[email protected]>
Marek Behún <[email protected]> <[email protected]>
Marek Behún <[email protected]> Marek Behun <[email protected]>
Mark Brown <[email protected]>
Mark Starovoytov <[email protected]> <[email protected]>
Mark Yao <[email protected]> <[email protected]>
Expand Down
4 changes: 2 additions & 2 deletions MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -1816,7 +1816,7 @@ F: drivers/pinctrl/pinctrl-gemini.c
F: drivers/rtc/rtc-ftrtc010.c

ARM/CZ.NIC TURRIS SUPPORT
M: Marek Behun <[email protected]>
M: Marek Behún <[email protected]>
S: Maintained
W: https://www.turris.cz/
F: Documentation/ABI/testing/debugfs-moxtet
Expand Down Expand Up @@ -10945,7 +10945,7 @@ F: include/linux/mv643xx.h

MARVELL MV88X3310 PHY DRIVER
M: Russell King <[email protected]>
M: Marek Behun <[email protected]>
M: Marek Behún <[email protected]>
L: [email protected]
S: Maintained
F: drivers/net/phy/marvell10g.c
Expand Down
7 changes: 6 additions & 1 deletion arch/s390/kvm/pv.c
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,12 @@ static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
/* Allocate variable storage */
vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE);
vlen += uv_info.guest_virt_base_stor_len;
kvm->arch.pv.stor_var = vzalloc(vlen);
/*
* The Create Secure Configuration Ultravisor Call does not support
* using large pages for the virtual memory area.
* This is a hardware limitation.
*/
kvm->arch.pv.stor_var = vmalloc_no_huge(vlen);
if (!kvm->arch.pv.stor_var)
goto out_err;
return 0;
Expand Down
1 change: 1 addition & 0 deletions fs/nilfs2/sysfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -1053,6 +1053,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs)
nilfs_sysfs_delete_superblock_group(nilfs);
nilfs_sysfs_delete_segctor_group(nilfs);
kobject_del(&nilfs->ns_dev_kobj);
kobject_put(&nilfs->ns_dev_kobj);
kfree(nilfs->ns_dev_subgroups);
}

Expand Down
16 changes: 0 additions & 16 deletions include/linux/hugetlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -741,17 +741,6 @@ static inline int hstate_index(struct hstate *h)
return h - hstates;
}

pgoff_t __basepage_index(struct page *page);

/* Return page->index in PAGE_SIZE units */
static inline pgoff_t basepage_index(struct page *page)
{
if (!PageCompound(page))
return page->index;

return __basepage_index(page);
}

extern int dissolve_free_huge_page(struct page *page);
extern int dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn);
Expand Down Expand Up @@ -988,11 +977,6 @@ static inline int hstate_index(struct hstate *h)
return 0;
}

static inline pgoff_t basepage_index(struct page *page)
{
return page->index;
}

static inline int dissolve_free_huge_page(struct page *page)
{
return 0;
Expand Down
13 changes: 7 additions & 6 deletions include/linux/pagemap.h
Original file line number Diff line number Diff line change
Expand Up @@ -516,7 +516,7 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
}

/*
* Get index of the page with in radix-tree
* Get index of the page within radix-tree (but not for hugetlb pages).
* (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
*/
static inline pgoff_t page_to_index(struct page *page)
Expand All @@ -535,15 +535,16 @@ static inline pgoff_t page_to_index(struct page *page)
return pgoff;
}

extern pgoff_t hugetlb_basepage_index(struct page *page);

/*
* Get the offset in PAGE_SIZE.
* (TODO: hugepage should have ->index in PAGE_SIZE)
* Get the offset in PAGE_SIZE (even for hugetlb pages).
* (TODO: hugetlb pages should have ->index in PAGE_SIZE)
*/
static inline pgoff_t page_to_pgoff(struct page *page)
{
if (unlikely(PageHeadHuge(page)))
return page->index << compound_order(page);

if (unlikely(PageHuge(page)))
return hugetlb_basepage_index(page);
return page_to_index(page);
}

Expand Down
1 change: 1 addition & 0 deletions include/linux/vmalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,7 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
const void *caller);
void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
int node, const void *caller);
void *vmalloc_no_huge(unsigned long size);

extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
Expand Down
3 changes: 1 addition & 2 deletions kernel/futex.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@
#include <linux/jhash.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
#include <linux/hugetlb.h>
#include <linux/freezer.h>
#include <linux/memblock.h>
#include <linux/fault-inject.h>
Expand Down Expand Up @@ -650,7 +649,7 @@ static int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,

key->both.offset |= FUT_OFF_INODE; /* inode-based key */
key->shared.i_seq = get_inode_sequence_number(inode);
key->shared.pgoff = basepage_index(tail);
key->shared.pgoff = page_to_pgoff(tail);
rcu_read_unlock();
}

Expand Down
77 changes: 51 additions & 26 deletions kernel/kthread.c
Original file line number Diff line number Diff line change
Expand Up @@ -1093,37 +1093,47 @@ void kthread_flush_work(struct kthread_work *work)
EXPORT_SYMBOL_GPL(kthread_flush_work);

/*
* This function removes the work from the worker queue. Also it makes sure
* that it won't get queued later via the delayed work's timer.
* Make sure that the timer is neither set nor running and could
* not manipulate the work list_head any longer.
*
* The function is called under worker->lock. The lock is temporary
* released but the timer can't be set again in the meantime.
*/
static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
unsigned long *flags)
{
struct kthread_delayed_work *dwork =
container_of(work, struct kthread_delayed_work, work);
struct kthread_worker *worker = work->worker;

/*
* del_timer_sync() must be called to make sure that the timer
* callback is not running. The lock must be temporary released
* to avoid a deadlock with the callback. In the meantime,
* any queuing is blocked by setting the canceling counter.
*/
work->canceling++;
raw_spin_unlock_irqrestore(&worker->lock, *flags);
del_timer_sync(&dwork->timer);
raw_spin_lock_irqsave(&worker->lock, *flags);
work->canceling--;
}

/*
* This function removes the work from the worker queue.
*
* It is called under worker->lock. The caller must make sure that
* the timer used by delayed work is not running, e.g. by calling
* kthread_cancel_delayed_work_timer().
*
* The work might still be in use when this function finishes. See the
* current_work proceed by the worker.
*
* Return: %true if @work was pending and successfully canceled,
* %false if @work was not pending
*/
static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
unsigned long *flags)
static bool __kthread_cancel_work(struct kthread_work *work)
{
/* Try to cancel the timer if exists. */
if (is_dwork) {
struct kthread_delayed_work *dwork =
container_of(work, struct kthread_delayed_work, work);
struct kthread_worker *worker = work->worker;

/*
* del_timer_sync() must be called to make sure that the timer
* callback is not running. The lock must be temporary released
* to avoid a deadlock with the callback. In the meantime,
* any queuing is blocked by setting the canceling counter.
*/
work->canceling++;
raw_spin_unlock_irqrestore(&worker->lock, *flags);
del_timer_sync(&dwork->timer);
raw_spin_lock_irqsave(&worker->lock, *flags);
work->canceling--;
}

/*
* Try to remove the work from a worker list. It might either
* be from worker->work_list or from worker->delayed_work_list.
Expand Down Expand Up @@ -1176,11 +1186,23 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
/* Work must not be used with >1 worker, see kthread_queue_work() */
WARN_ON_ONCE(work->worker != worker);

/* Do not fight with another command that is canceling this work. */
/*
* Temporary cancel the work but do not fight with another command
* that is canceling the work as well.
*
* It is a bit tricky because of possible races with another
* mod_delayed_work() and cancel_delayed_work() callers.
*
* The timer must be canceled first because worker->lock is released
* when doing so. But the work can be removed from the queue (list)
* only when it can be queued again so that the return value can
* be used for reference counting.
*/
kthread_cancel_delayed_work_timer(work, &flags);
if (work->canceling)
goto out;
ret = __kthread_cancel_work(work);

ret = __kthread_cancel_work(work, true, &flags);
fast_queue:
__kthread_queue_delayed_work(worker, dwork, delay);
out:
Expand All @@ -1202,7 +1224,10 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
/* Work must not be used with >1 worker, see kthread_queue_work(). */
WARN_ON_ONCE(work->worker != worker);

ret = __kthread_cancel_work(work, is_dwork, &flags);
if (is_dwork)
kthread_cancel_delayed_work_timer(work, &flags);

ret = __kthread_cancel_work(work);

if (worker->current_work != work)
goto out_fast;
Expand Down
5 changes: 1 addition & 4 deletions mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -1588,15 +1588,12 @@ struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
return NULL;
}

pgoff_t __basepage_index(struct page *page)
pgoff_t hugetlb_basepage_index(struct page *page)
{
struct page *page_head = compound_head(page);
pgoff_t index = page_index(page_head);
unsigned long compound_idx;

if (!PageHuge(page_head))
return page_index(page);

if (compound_order(page_head) >= MAX_ORDER)
compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
else
Expand Down
Loading

0 comments on commit 7ce32ac

Please sign in to comment.