Skip to content

Commit

Permalink
zsmalloc: remove obj_tagged()
Browse files Browse the repository at this point in the history
obj_tagged() is not needed at this point, because objects can only have
one tag: OBJ_ALLOCATED_TAG.  We needed obj_tagged() for the zsmalloc LRU
implementation, which has now been removed.  Simplify zsmalloc code and
revert to the previous implementation that was in place before the
zsmalloc LRU series.

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Sergey Senozhatsky <[email protected]>
Acked-by: Nhat Pham <[email protected]>
Cc: Minchan Kim <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
sergey-senozhatsky authored and akpm00 committed Aug 18, 2023
1 parent 99aa772 commit f9044f1
Showing 1 changed file with 7 additions and 22 deletions.
29 changes: 7 additions & 22 deletions mm/zsmalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -795,8 +795,8 @@ static unsigned long handle_to_obj(unsigned long handle)
return *(unsigned long *)handle;
}

static bool obj_tagged(struct page *page, void *obj, unsigned long *phandle,
int tag)
static inline bool obj_allocated(struct page *page, void *obj,
unsigned long *phandle)
{
unsigned long handle;
struct zspage *zspage = get_zspage(page);
Expand All @@ -807,19 +807,14 @@ static bool obj_tagged(struct page *page, void *obj, unsigned long *phandle,
} else
handle = *(unsigned long *)obj;

if (!(handle & tag))
if (!(handle & OBJ_ALLOCATED_TAG))
return false;

/* Clear all tags before returning the handle */
*phandle = handle & ~OBJ_TAG_MASK;
return true;
}

static inline bool obj_allocated(struct page *page, void *obj, unsigned long *phandle)
{
return obj_tagged(page, obj, phandle, OBJ_ALLOCATED_TAG);
}

static void reset_page(struct page *page)
{
__ClearPageMovable(page);
Expand Down Expand Up @@ -1551,11 +1546,11 @@ static void zs_object_copy(struct size_class *class, unsigned long dst,
}

/*
* Find object with a certain tag in zspage from index object and
* Find alloced object in zspage from index object and
* return handle.
*/
static unsigned long find_tagged_obj(struct size_class *class,
struct page *page, int *obj_idx, int tag)
static unsigned long find_alloced_obj(struct size_class *class,
struct page *page, int *obj_idx)
{
unsigned int offset;
int index = *obj_idx;
Expand All @@ -1566,7 +1561,7 @@ static unsigned long find_tagged_obj(struct size_class *class,
offset += class->size * index;

while (offset < PAGE_SIZE) {
if (obj_tagged(page, addr + offset, &handle, tag))
if (obj_allocated(page, addr + offset, &handle))
break;

offset += class->size;
Expand All @@ -1580,16 +1575,6 @@ static unsigned long find_tagged_obj(struct size_class *class,
return handle;
}

/*
* Find alloced object in zspage from index object and
* return handle.
*/
static unsigned long find_alloced_obj(struct size_class *class,
struct page *page, int *obj_idx)
{
return find_tagged_obj(class, page, obj_idx, OBJ_ALLOCATED_TAG);
}

static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage,
struct zspage *dst_zspage)
{
Expand Down

0 comments on commit f9044f1

Please sign in to comment.