Skip to content

Commit

Permalink
slub: make ->object_size unsigned int
Browse files Browse the repository at this point in the history
Linux doesn't support negative length objects.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Alexey Dobriyan <[email protected]>
Acked-by: Christoph Lameter <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Alexey Dobriyan authored and torvalds committed Apr 6, 2018
1 parent a5035de commit 1b473f2
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 6 deletions.
2 changes: 1 addition & 1 deletion include/linux/slub_def.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ struct kmem_cache {
slab_flags_t flags;
unsigned long min_partial;
int size; /* The size of an object including meta data */
int object_size; /* The size of an object without meta data */
unsigned int object_size;/* The size of an object without meta data */
unsigned int offset; /* Free pointer offset. */
#ifdef CONFIG_SLUB_CPU_PARTIAL
/* Number of per cpu partial objects to keep around */
Expand Down
2 changes: 1 addition & 1 deletion mm/slab_common.c
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ static int kmem_cache_sanity_check(const char *name, unsigned int size)
*/
res = probe_kernel_address(s->name, tmp);
if (res) {
pr_err("Slab cache with size %d has lost its name\n",
pr_err("Slab cache with size %u has lost its name\n",
s->object_size);
continue;
}
Expand Down
8 changes: 4 additions & 4 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -681,7 +681,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);

print_section(KERN_ERR, "Object ", p,
min_t(unsigned long, s->object_size, PAGE_SIZE));
min_t(unsigned int, s->object_size, PAGE_SIZE));
if (s->flags & SLAB_RED_ZONE)
print_section(KERN_ERR, "Redzone ", p + s->object_size,
s->inuse - s->object_size);
Expand Down Expand Up @@ -2399,7 +2399,7 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)

pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
nid, gfpflags, &gfpflags);
pr_warn(" cache: %s, object size: %d, buffer size: %d, default order: %d, min order: %d\n",
pr_warn(" cache: %s, object size: %u, buffer size: %d, default order: %d, min order: %d\n",
s->name, s->object_size, s->size, oo_order(s->oo),
oo_order(s->min));

Expand Down Expand Up @@ -4255,7 +4255,7 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
* Adjust the object sizes so that we clear
* the complete object on kzalloc.
*/
s->object_size = max(s->object_size, (int)size);
s->object_size = max(s->object_size, size);
s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));

for_each_memcg_cache(c, s) {
Expand Down Expand Up @@ -4901,7 +4901,7 @@ SLAB_ATTR_RO(align);

static ssize_t object_size_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", s->object_size);
return sprintf(buf, "%u\n", s->object_size);
}
SLAB_ATTR_RO(object_size);

Expand Down

0 comments on commit 1b473f2

Please sign in to comment.