Skip to content

Commit

Permalink
Merge branch 'akpm' (patches from Andrew)
Browse files Browse the repository at this point in the history
Merge misc fixes from Andrew Morton:
 "14 patches.

  Subsystems affected by this patch series: mm (hugetlb, kasan, gup,
  selftests, z3fold, kfence, memblock, and highmem), squashfs, ia64,
  gcov, and mailmap"

* emailed patches from Andrew Morton <[email protected]>:
  mailmap: update Andrey Konovalov's email address
  mm/highmem: fix CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP
  mm: memblock: fix section mismatch warning again
  kfence: make compatible with kmemleak
  gcov: fix clang-11+ support
  ia64: fix format strings for err_inject
  ia64: mca: allocate early mca with GFP_ATOMIC
  squashfs: fix xattr id and id lookup sanity checks
  squashfs: fix inode lookup sanity checks
  z3fold: prevent reclaim/free race for headless pages
  selftests/vm: fix out-of-tree build
  mm/mmu_notifiers: ensure range_end() is paired with range_start()
  kasan: fix per-page tags for non-page_alloc pages
  hugetlb_cgroup: fix imbalanced css_get and css_put pair for shared mappings
  • Loading branch information
torvalds committed Mar 25, 2021
2 parents 2ba9bea + d3e2ff2 commit 0023224
Show file tree
Hide file tree
Showing 20 changed files with 230 additions and 42 deletions.
1 change: 1 addition & 0 deletions .mailmap
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ Andrew Morton <[email protected]>
Andrew Murray <[email protected]> <[email protected]>
Andrew Murray <[email protected]> <[email protected]>
Andrew Vasquez <[email protected]>
Andrey Konovalov <[email protected]> <[email protected]>
Andrey Ryabinin <[email protected]> <[email protected]>
Andrey Ryabinin <[email protected]> <[email protected]>
Andy Adamson <[email protected]>
Expand Down
22 changes: 11 additions & 11 deletions arch/ia64/kernel/err_inject.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ show_##name(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
u32 cpu=dev->id; \
return sprintf(buf, "%lx\n", name[cpu]); \
return sprintf(buf, "%llx\n", name[cpu]); \
}

#define store(name) \
Expand All @@ -86,9 +86,9 @@ store_call_start(struct device *dev, struct device_attribute *attr,

#ifdef ERR_INJ_DEBUG
printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu);
printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]);
printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]);
printk(KERN_DEBUG "err_data_buffer=%lx, %lx, %lx.\n",
printk(KERN_DEBUG "err_type_info=%llx,\n", err_type_info[cpu]);
printk(KERN_DEBUG "err_struct_info=%llx,\n", err_struct_info[cpu]);
printk(KERN_DEBUG "err_data_buffer=%llx, %llx, %llx.\n",
err_data_buffer[cpu].data1,
err_data_buffer[cpu].data2,
err_data_buffer[cpu].data3);
Expand Down Expand Up @@ -117,8 +117,8 @@ store_call_start(struct device *dev, struct device_attribute *attr,

#ifdef ERR_INJ_DEBUG
printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]);
printk(KERN_DEBUG "capabilities=%lx,\n", capabilities[cpu]);
printk(KERN_DEBUG "resources=%lx\n", resources[cpu]);
printk(KERN_DEBUG "capabilities=%llx,\n", capabilities[cpu]);
printk(KERN_DEBUG "resources=%llx\n", resources[cpu]);
#endif
return size;
}
Expand All @@ -131,7 +131,7 @@ show_virtual_to_phys(struct device *dev, struct device_attribute *attr,
char *buf)
{
unsigned int cpu=dev->id;
return sprintf(buf, "%lx\n", phys_addr[cpu]);
return sprintf(buf, "%llx\n", phys_addr[cpu]);
}

static ssize_t
Expand All @@ -145,7 +145,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL);
if (ret<=0) {
#ifdef ERR_INJ_DEBUG
printk("Virtual address %lx is not existing.\n",virt_addr);
printk("Virtual address %llx is not existing.\n", virt_addr);
#endif
return -EINVAL;
}
Expand All @@ -163,7 +163,7 @@ show_err_data_buffer(struct device *dev,
{
unsigned int cpu=dev->id;

return sprintf(buf, "%lx, %lx, %lx\n",
return sprintf(buf, "%llx, %llx, %llx\n",
err_data_buffer[cpu].data1,
err_data_buffer[cpu].data2,
err_data_buffer[cpu].data3);
Expand All @@ -178,13 +178,13 @@ store_err_data_buffer(struct device *dev,
int ret;

#ifdef ERR_INJ_DEBUG
printk("write err_data_buffer=[%lx,%lx,%lx] on cpu%d\n",
printk("write err_data_buffer=[%llx,%llx,%llx] on cpu%d\n",
err_data_buffer[cpu].data1,
err_data_buffer[cpu].data2,
err_data_buffer[cpu].data3,
cpu);
#endif
ret=sscanf(buf, "%lx, %lx, %lx",
ret = sscanf(buf, "%llx, %llx, %llx",
&err_data_buffer[cpu].data1,
&err_data_buffer[cpu].data2,
&err_data_buffer[cpu].data3);
Expand Down
2 changes: 1 addition & 1 deletion arch/ia64/kernel/mca.c
Original file line number Diff line number Diff line change
Expand Up @@ -1824,7 +1824,7 @@ ia64_mca_cpu_init(void *cpu_data)
data = mca_bootmem();
first_time = 0;
} else
data = (void *)__get_free_pages(GFP_KERNEL,
data = (void *)__get_free_pages(GFP_ATOMIC,
get_order(sz));
if (!data)
panic("Could not allocate MCA memory for cpu %d\n",
Expand Down
8 changes: 6 additions & 2 deletions fs/squashfs/export.c
Original file line number Diff line number Diff line change
Expand Up @@ -152,14 +152,18 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
start = le64_to_cpu(table[n]);
end = le64_to_cpu(table[n + 1]);

if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
if (start >= end
|| (end - start) >
(SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
kfree(table);
return ERR_PTR(-EINVAL);
}
}

start = le64_to_cpu(table[indexes - 1]);
if (start >= lookup_table_start || (lookup_table_start - start) > SQUASHFS_METADATA_SIZE) {
if (start >= lookup_table_start ||
(lookup_table_start - start) >
(SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
kfree(table);
return ERR_PTR(-EINVAL);
}
Expand Down
6 changes: 4 additions & 2 deletions fs/squashfs/id.c
Original file line number Diff line number Diff line change
Expand Up @@ -97,14 +97,16 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
start = le64_to_cpu(table[n]);
end = le64_to_cpu(table[n + 1]);

if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
if (start >= end || (end - start) >
(SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
kfree(table);
return ERR_PTR(-EINVAL);
}
}

start = le64_to_cpu(table[indexes - 1]);
if (start >= id_table_start || (id_table_start - start) > SQUASHFS_METADATA_SIZE) {
if (start >= id_table_start || (id_table_start - start) >
(SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
kfree(table);
return ERR_PTR(-EINVAL);
}
Expand Down
1 change: 1 addition & 0 deletions fs/squashfs/squashfs_fs.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

/* size of metadata (inode and directory) blocks */
#define SQUASHFS_METADATA_SIZE 8192
#define SQUASHFS_BLOCK_OFFSET 2

/* default size of block device I/O */
#ifdef CONFIG_SQUASHFS_4K_DEVBLK_SIZE
Expand Down
6 changes: 4 additions & 2 deletions fs/squashfs/xattr_id.c
Original file line number Diff line number Diff line change
Expand Up @@ -109,14 +109,16 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
start = le64_to_cpu(table[n]);
end = le64_to_cpu(table[n + 1]);

if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
if (start >= end || (end - start) >
(SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
kfree(table);
return ERR_PTR(-EINVAL);
}
}

start = le64_to_cpu(table[indexes - 1]);
if (start >= table_start || (table_start - start) > SQUASHFS_METADATA_SIZE) {
if (start >= table_start || (table_start - start) >
(SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
kfree(table);
return ERR_PTR(-EINVAL);
}
Expand Down
15 changes: 13 additions & 2 deletions include/linux/hugetlb_cgroup.h
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,11 @@ static inline bool hugetlb_cgroup_disabled(void)
return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
}

static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
{
css_put(&h_cg->css);
}

extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr);
extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
Expand All @@ -138,7 +143,8 @@ extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,

extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
struct file_region *rg,
unsigned long nr_pages);
unsigned long nr_pages,
bool region_del);

extern void hugetlb_cgroup_file_init(void) __init;
extern void hugetlb_cgroup_migrate(struct page *oldhpage,
Expand All @@ -147,7 +153,8 @@ extern void hugetlb_cgroup_migrate(struct page *oldhpage,
#else
static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
struct file_region *rg,
unsigned long nr_pages)
unsigned long nr_pages,
bool region_del)
{
}

Expand Down Expand Up @@ -185,6 +192,10 @@ static inline bool hugetlb_cgroup_disabled(void)
return true;
}

static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
{
}

static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr)
{
Expand Down
4 changes: 2 additions & 2 deletions include/linux/memblock.h
Original file line number Diff line number Diff line change
Expand Up @@ -460,7 +460,7 @@ static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
/*
* Set the allocation direction to bottom-up or top-down.
*/
static inline __init void memblock_set_bottom_up(bool enable)
static inline __init_memblock void memblock_set_bottom_up(bool enable)
{
memblock.bottom_up = enable;
}
Expand All @@ -470,7 +470,7 @@ static inline __init void memblock_set_bottom_up(bool enable)
* if this is true, that said, memblock will allocate memory
* in bottom-up direction.
*/
static inline __init bool memblock_bottom_up(void)
static inline __init_memblock bool memblock_bottom_up(void)
{
return memblock.bottom_up;
}
Expand Down
18 changes: 15 additions & 3 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -1461,16 +1461,28 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)

#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)

/*
* KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
* setting tags for all pages to native kernel tag value 0xff, as the default
* value 0x00 maps to 0xff.
*/

static inline u8 page_kasan_tag(const struct page *page)
{
if (kasan_enabled())
return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
return 0xff;
u8 tag = 0xff;

if (kasan_enabled()) {
tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
tag ^= 0xff;
}

return tag;
}

static inline void page_kasan_tag_set(struct page *page, u8 tag)
{
if (kasan_enabled()) {
tag ^= 0xff;
page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
}
Expand Down
10 changes: 5 additions & 5 deletions include/linux/mmu_notifier.h
Original file line number Diff line number Diff line change
Expand Up @@ -169,11 +169,11 @@ struct mmu_notifier_ops {
* the last refcount is dropped.
*
* If blockable argument is set to false then the callback cannot
* sleep and has to return with -EAGAIN. 0 should be returned
* otherwise. Please note that if invalidate_range_start approves
* a non-blocking behavior then the same applies to
* invalidate_range_end.
*
* sleep and has to return with -EAGAIN if sleeping would be required.
* 0 should be returned otherwise. Please note that notifiers that can
* fail invalidate_range_start are not allowed to implement
* invalidate_range_end, as there is no mechanism for informing the
* notifier that its start failed.
*/
int (*invalidate_range_start)(struct mmu_notifier *subscription,
const struct mmu_notifier_range *range);
Expand Down
69 changes: 69 additions & 0 deletions kernel/gcov/clang.c
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,9 @@ struct gcov_fn_info {

u32 num_counters;
u64 *counters;
#if CONFIG_CLANG_VERSION < 110000
const char *function_name;
#endif
};

static struct gcov_info *current_info;
Expand Down Expand Up @@ -105,6 +107,7 @@ void llvm_gcov_init(llvm_gcov_callback writeout, llvm_gcov_callback flush)
}
EXPORT_SYMBOL(llvm_gcov_init);

#if CONFIG_CLANG_VERSION < 110000
void llvm_gcda_start_file(const char *orig_filename, const char version[4],
u32 checksum)
{
Expand All @@ -113,7 +116,17 @@ void llvm_gcda_start_file(const char *orig_filename, const char version[4],
current_info->checksum = checksum;
}
EXPORT_SYMBOL(llvm_gcda_start_file);
#else
void llvm_gcda_start_file(const char *orig_filename, u32 version, u32 checksum)
{
current_info->filename = orig_filename;
current_info->version = version;
current_info->checksum = checksum;
}
EXPORT_SYMBOL(llvm_gcda_start_file);
#endif

#if CONFIG_CLANG_VERSION < 110000
void llvm_gcda_emit_function(u32 ident, const char *function_name,
u32 func_checksum, u8 use_extra_checksum, u32 cfg_checksum)
{
Expand All @@ -133,6 +146,24 @@ void llvm_gcda_emit_function(u32 ident, const char *function_name,
list_add_tail(&info->head, &current_info->functions);
}
EXPORT_SYMBOL(llvm_gcda_emit_function);
#else
void llvm_gcda_emit_function(u32 ident, u32 func_checksum,
u8 use_extra_checksum, u32 cfg_checksum)
{
struct gcov_fn_info *info = kzalloc(sizeof(*info), GFP_KERNEL);

if (!info)
return;

INIT_LIST_HEAD(&info->head);
info->ident = ident;
info->checksum = func_checksum;
info->use_extra_checksum = use_extra_checksum;
info->cfg_checksum = cfg_checksum;
list_add_tail(&info->head, &current_info->functions);
}
EXPORT_SYMBOL(llvm_gcda_emit_function);
#endif

void llvm_gcda_emit_arcs(u32 num_counters, u64 *counters)
{
Expand Down Expand Up @@ -295,6 +326,7 @@ void gcov_info_add(struct gcov_info *dst, struct gcov_info *src)
}
}

#if CONFIG_CLANG_VERSION < 110000
static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn)
{
size_t cv_size; /* counter values size */
Expand Down Expand Up @@ -322,6 +354,28 @@ static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn)
kfree(fn_dup);
return NULL;
}
#else
static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn)
{
size_t cv_size; /* counter values size */
struct gcov_fn_info *fn_dup = kmemdup(fn, sizeof(*fn),
GFP_KERNEL);
if (!fn_dup)
return NULL;
INIT_LIST_HEAD(&fn_dup->head);

cv_size = fn->num_counters * sizeof(fn->counters[0]);
fn_dup->counters = vmalloc(cv_size);
if (!fn_dup->counters) {
kfree(fn_dup);
return NULL;
}

memcpy(fn_dup->counters, fn->counters, cv_size);

return fn_dup;
}
#endif

/**
* gcov_info_dup - duplicate profiling data set
Expand Down Expand Up @@ -362,6 +416,7 @@ struct gcov_info *gcov_info_dup(struct gcov_info *info)
* gcov_info_free - release memory for profiling data set duplicate
* @info: profiling data set duplicate to free
*/
#if CONFIG_CLANG_VERSION < 110000
void gcov_info_free(struct gcov_info *info)
{
struct gcov_fn_info *fn, *tmp;
Expand All @@ -375,6 +430,20 @@ void gcov_info_free(struct gcov_info *info)
kfree(info->filename);
kfree(info);
}
#else
void gcov_info_free(struct gcov_info *info)
{
struct gcov_fn_info *fn, *tmp;

list_for_each_entry_safe(fn, tmp, &info->functions, head) {
vfree(fn->counters);
list_del(&fn->head);
kfree(fn);
}
kfree(info->filename);
kfree(info);
}
#endif

#define ITER_STRIDE PAGE_SIZE

Expand Down
Loading

0 comments on commit 0023224

Please sign in to comment.