Skip to content

Commit

Permalink
[PATCH] swsusp: rework memory freeing on resume
Browse files Browse the repository at this point in the history
The following patch makes swsusp use the PG_nosave and PG_nosave_free flags to
mark pages that should be freed in case of an error during resume.

This allows us to simplify the code and to use swsusp_free() in all of the
swsusp's resume error paths, which makes them actually work.

Signed-off-by: Rafael J. Wysocki <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
rjwysocki authored and Linus Torvalds committed Oct 31, 2005
1 parent a0f4965 commit 2c1b4a5
Show file tree
Hide file tree
Showing 6 changed files with 65 additions and 150 deletions.
84 changes: 19 additions & 65 deletions arch/x86_64/kernel/suspend.c
Original file line number Diff line number Diff line change
Expand Up @@ -147,57 +147,7 @@ extern int restore_image(void);

pgd_t *temp_level4_pgt;

static void **pages;

static inline void *__add_page(void)
{
void **c;

c = (void **)get_usable_page(GFP_ATOMIC);
if (c) {
*c = pages;
pages = c;
}
return c;
}

static inline void *__next_page(void)
{
void **c;

c = pages;
if (c) {
pages = *c;
*c = NULL;
}
return c;
}

/*
* Try to allocate as many usable pages as needed and daisy chain them.
* If one allocation fails, free the pages allocated so far
*/
static int alloc_usable_pages(unsigned long n)
{
void *p;

pages = NULL;
do
if (!__add_page())
break;
while (--n);
if (n) {
p = __next_page();
while (p) {
free_page((unsigned long)p);
p = __next_page();
}
return -ENOMEM;
}
return 0;
}

static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
{
long i, j;

Expand All @@ -211,7 +161,9 @@ static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long e
if (paddr >= end)
break;

pmd = (pmd_t *)__next_page();
pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
if (!pmd)
return -ENOMEM;
set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
unsigned long pe;
Expand All @@ -223,13 +175,17 @@ static void res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long e
set_pmd(pmd, __pmd(pe));
}
}
return 0;
}

static void set_up_temporary_mappings(void)
static int set_up_temporary_mappings(void)
{
unsigned long start, end, next;
int error;

temp_level4_pgt = (pgd_t *)__next_page();
temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
if (!temp_level4_pgt)
return -ENOMEM;

/* It is safe to reuse the original kernel mapping */
set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
Expand All @@ -240,29 +196,27 @@ static void set_up_temporary_mappings(void)
end = (unsigned long)pfn_to_kaddr(end_pfn);

for (; start < end; start = next) {
pud_t *pud = (pud_t *)__next_page();
pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC);
if (!pud)
return -ENOMEM;
next = start + PGDIR_SIZE;
if (next > end)
next = end;
res_phys_pud_init(pud, __pa(start), __pa(next));
if ((error = res_phys_pud_init(pud, __pa(start), __pa(next))))
return error;
set_pgd(temp_level4_pgt + pgd_index(start),
mk_kernel_pgd(__pa(pud)));
}
return 0;
}

int swsusp_arch_resume(void)
{
unsigned long n;
int error;

n = ((end_pfn << PAGE_SHIFT) + PUD_SIZE - 1) >> PUD_SHIFT;
n += (n + PTRS_PER_PUD - 1) / PTRS_PER_PUD + 1;
pr_debug("swsusp_arch_resume(): pages needed = %lu\n", n);
if (alloc_usable_pages(n)) {
free_eaten_memory();
return -ENOMEM;
}
/* We have got enough memory and from now on we cannot recover */
set_up_temporary_mappings();
if ((error = set_up_temporary_mappings()))
return error;
restore_image();
return 0;
}
Expand Down
3 changes: 1 addition & 2 deletions include/linux/suspend.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,7 @@ void restore_processor_state(void);
struct saved_context;
void __save_processor_state(struct saved_context *ctxt);
void __restore_processor_state(struct saved_context *ctxt);
extern unsigned long get_usable_page(gfp_t gfp_mask);
extern void free_eaten_memory(void);
unsigned long get_safe_page(gfp_t gfp_mask);

/*
* XXX: We try to keep some more pages free so that I/O operations succeed
Expand Down
14 changes: 7 additions & 7 deletions kernel/power/disk.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ extern int swsusp_check(void);
extern int swsusp_read(void);
extern void swsusp_close(void);
extern int swsusp_resume(void);
extern int swsusp_free(void);


static int noresume = 0;
Expand Down Expand Up @@ -252,14 +251,17 @@ static int software_resume(void)

pr_debug("PM: Reading swsusp image.\n");

if ((error = swsusp_read()))
goto Cleanup;
if ((error = swsusp_read())) {
swsusp_free();
goto Thaw;
}

pr_debug("PM: Preparing devices for restore.\n");

if ((error = device_suspend(PMSG_FREEZE))) {
printk("Some devices failed to suspend\n");
goto Free;
swsusp_free();
goto Thaw;
}

mb();
Expand All @@ -268,9 +270,7 @@ static int software_resume(void)
swsusp_resume();
pr_debug("PM: Restore failed, recovering.n");
device_resume();
Free:
swsusp_free();
Cleanup:
Thaw:
unprepare_processes();
Done:
/* For success case, the suspend path will release the lock */
Expand Down
2 changes: 1 addition & 1 deletion kernel/power/power.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ extern asmlinkage int swsusp_arch_suspend(void);
extern asmlinkage int swsusp_arch_resume(void);

extern int restore_highmem(void);
extern void free_pagedir(struct pbe *pblist);
extern struct pbe * alloc_pagedir(unsigned nr_pages);
extern void create_pbe_list(struct pbe *pblist, unsigned nr_pages);
extern void swsusp_free(void);
extern int enough_swap(unsigned nr_pages);
2 changes: 1 addition & 1 deletion kernel/power/snapshot.c
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ static void copy_data_pages(struct pbe *pblist)
* free_pagedir - free pages allocated with alloc_pagedir()
*/

void free_pagedir(struct pbe *pblist)
static void free_pagedir(struct pbe *pblist)
{
struct pbe *pbe;

Expand Down
110 changes: 36 additions & 74 deletions kernel/power/swsusp.c
Original file line number Diff line number Diff line change
Expand Up @@ -629,6 +629,11 @@ int swsusp_resume(void)
* execution continues at place where swsusp_arch_suspend was called
*/
BUG_ON(!error);
/* The only reason why swsusp_arch_resume() can fail is memory being
* very tight, so we have to free it as soon as we can to avoid
* subsequent failures
*/
swsusp_free();
restore_processor_state();
restore_highmem();
touch_softlockup_watchdog();
Expand All @@ -644,54 +649,28 @@ int swsusp_resume(void)
*
* We don't know which pages are usable until we allocate them.
*
* Allocated but unusable (ie eaten) memory pages are linked together
* to create a list, so that we can free them easily
*
* We could have used a type other than (void *)
* for this purpose, but ...
* Allocated but unusable (ie eaten) memory pages are marked so that
* swsusp_free() can release them
*/
static void **eaten_memory = NULL;

static inline void eat_page(void *page)
{
void **c;

c = eaten_memory;
eaten_memory = page;
*eaten_memory = c;
}

unsigned long get_usable_page(gfp_t gfp_mask)
unsigned long get_safe_page(gfp_t gfp_mask)
{
unsigned long m;

m = get_zeroed_page(gfp_mask);
while (!PageNosaveFree(virt_to_page(m))) {
eat_page((void *)m);
do {
m = get_zeroed_page(gfp_mask);
if (!m)
break;
if (m && PageNosaveFree(virt_to_page(m)))
/* This is for swsusp_free() */
SetPageNosave(virt_to_page(m));
} while (m && PageNosaveFree(virt_to_page(m)));
if (m) {
/* This is for swsusp_free() */
SetPageNosave(virt_to_page(m));
SetPageNosaveFree(virt_to_page(m));
}
return m;
}

void free_eaten_memory(void)
{
unsigned long m;
void **c;
int i = 0;

c = eaten_memory;
while (c) {
m = (unsigned long)c;
c = *c;
free_page(m);
i++;
}
eaten_memory = NULL;
pr_debug("swsusp: %d unused pages freed\n", i);
}

/**
* check_pagedir - We ensure here that pages that the PBEs point to
* won't collide with pages where we're going to restore from the loaded
Expand All @@ -709,7 +688,7 @@ static int check_pagedir(struct pbe *pblist)
p->address = 0UL;

for_each_pbe (p, pblist) {
p->address = get_usable_page(GFP_ATOMIC);
p->address = get_safe_page(GFP_ATOMIC);
if (!p->address)
return -ENOMEM;
}
Expand All @@ -728,49 +707,45 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
unsigned long zone_pfn;
struct pbe *pbpage, *tail, *p;
void *m;
int rel = 0, error = 0;
int rel = 0;

if (!pblist) /* a sanity check */
return NULL;

pr_debug("swsusp: Relocating pagedir (%lu pages to check)\n",
swsusp_info.pagedir_pages);

/* Set page flags */
/* Clear page flags */

for_each_zone (zone) {
for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
SetPageNosaveFree(pfn_to_page(zone_pfn +
if (pfn_valid(zone_pfn + zone->zone_start_pfn))
ClearPageNosaveFree(pfn_to_page(zone_pfn +
zone->zone_start_pfn));
}

/* Clear orig addresses */
/* Mark orig addresses */

for_each_pbe (p, pblist)
ClearPageNosaveFree(virt_to_page(p->orig_address));
SetPageNosaveFree(virt_to_page(p->orig_address));

tail = pblist + PB_PAGE_SKIP;

/* Relocate colliding pages */

for_each_pb_page (pbpage, pblist) {
if (!PageNosaveFree(virt_to_page((unsigned long)pbpage))) {
m = (void *)get_usable_page(GFP_ATOMIC | __GFP_COLD);
if (!m) {
error = -ENOMEM;
break;
}
if (PageNosaveFree(virt_to_page((unsigned long)pbpage))) {
m = (void *)get_safe_page(GFP_ATOMIC | __GFP_COLD);
if (!m)
return NULL;
memcpy(m, (void *)pbpage, PAGE_SIZE);
if (pbpage == pblist)
pblist = (struct pbe *)m;
else
tail->next = (struct pbe *)m;

eat_page((void *)pbpage);
pbpage = (struct pbe *)m;

/* We have to link the PBEs again */

for (p = pbpage; p < pbpage + PB_PAGE_SKIP; p++)
if (p->next) /* needed to save the end */
p->next = p + 1;
Expand All @@ -780,15 +755,13 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
tail = pbpage + PB_PAGE_SKIP;
}

if (error) {
printk("\nswsusp: Out of memory\n\n");
free_pagedir(pblist);
free_eaten_memory();
pblist = NULL;
/* Is this even worth handling? It should never ever happen, and we
have just lost user's state, anyway... */
} else
printk("swsusp: Relocated %d pages\n", rel);
/* This is for swsusp_free() */
for_each_pb_page (pbpage, pblist) {
SetPageNosave(virt_to_page(pbpage));
SetPageNosaveFree(virt_to_page(pbpage));
}

printk("swsusp: Relocated %d pages\n", rel);

return pblist;
}
Expand Down Expand Up @@ -1006,9 +979,7 @@ static int read_pagedir(struct pbe *pblist)
break;
}

if (error)
free_pagedir(pblist);
else
if (!error)
BUG_ON(i != swsusp_info.pagedir_pages);

return error;
Expand Down Expand Up @@ -1051,15 +1022,6 @@ static int read_suspend_image(void)
if (!error)
error = data_read(pagedir_nosave);

if (error) { /* We fail cleanly */
free_eaten_memory();
for_each_pbe (p, pagedir_nosave)
if (p->address) {
free_page(p->address);
p->address = 0UL;
}
free_pagedir(pagedir_nosave);
}
return error;
}

Expand Down

0 comments on commit 2c1b4a5

Please sign in to comment.