Skip to content

Commit

Permalink
hugetlb: do not allow pagesize >= MAX_ORDER pool adjustment
Browse files Browse the repository at this point in the history
Huge pages with order >= MAX_ORDER must be allocated at boot via the
kernel command line, they cannot be allocated or freed once the kernel is
up and running.  Currently we allow values to be written to the sysfs and
sysctl files controling pool size for these huge page sizes.  This patch
makes the store functions for nr_hugepages and nr_overcommit_hugepages
return -EINVAL when the pool for a page size >= MAX_ORDER is changed.

[[email protected]: avoid multiple return paths in nr_hugepages_store_common()]
[[email protected]: add checking in hugetlb_overcommit_handler()]
Signed-off-by: Eric B Munson <[email protected]>
Reported-by: CAI Qian <[email protected]>
Cc: Andrea Arcangeli <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Nishanth Aravamudan <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
khers authored and torvalds committed Jan 14, 2011
1 parent 08d4a24 commit adbe872
Showing 1 changed file with 21 additions and 2 deletions.
23 changes: 21 additions & 2 deletions mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -1363,6 +1363,7 @@ static ssize_t nr_hugepages_show_common(struct kobject *kobj,

return sprintf(buf, "%lu\n", nr_huge_pages);
}

static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t len)
Expand All @@ -1375,11 +1376,16 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy,

err = strict_strtoul(buf, 10, &count);
if (err) {
NODEMASK_FREE(nodes_allowed);
return 0;
err = 0; /* This seems wrong */
goto out;
}

h = kobj_to_hstate(kobj, &nid);
if (h->order >= MAX_ORDER) {
err = -EINVAL;
goto out;
}

if (nid == NUMA_NO_NODE) {
/*
* global hstate attribute
Expand All @@ -1405,6 +1411,9 @@ static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
NODEMASK_FREE(nodes_allowed);

return len;
out:
NODEMASK_FREE(nodes_allowed);
return err;
}

static ssize_t nr_hugepages_show(struct kobject *kobj,
Expand Down Expand Up @@ -1447,13 +1456,17 @@ static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
struct hstate *h = kobj_to_hstate(kobj, NULL);
return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
}

static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
int err;
unsigned long input;
struct hstate *h = kobj_to_hstate(kobj, NULL);

if (h->order >= MAX_ORDER)
return -EINVAL;

err = strict_strtoul(buf, 10, &input);
if (err)
return 0;
Expand Down Expand Up @@ -1864,6 +1877,9 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
if (!write)
tmp = h->max_huge_pages;

if (write && h->order >= MAX_ORDER)
return -EINVAL;

table->data = &tmp;
table->maxlen = sizeof(unsigned long);
ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
Expand Down Expand Up @@ -1927,6 +1943,9 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
if (!write)
tmp = h->nr_overcommit_huge_pages;

if (write && h->order >= MAX_ORDER)
return -EINVAL;

table->data = &tmp;
table->maxlen = sizeof(unsigned long);
ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
Expand Down

0 comments on commit adbe872

Please sign in to comment.