Loading...
1/*
2 * linux/kernel/power/snapshot.c
3 *
4 * This file provides system snapshot/restore functionality for swsusp.
5 *
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
8 *
9 * This file is released under the GPLv2.
10 *
11 */
12
13#define pr_fmt(fmt) "PM: " fmt
14
15#include <linux/version.h>
16#include <linux/module.h>
17#include <linux/mm.h>
18#include <linux/suspend.h>
19#include <linux/delay.h>
20#include <linux/bitops.h>
21#include <linux/spinlock.h>
22#include <linux/kernel.h>
23#include <linux/pm.h>
24#include <linux/device.h>
25#include <linux/init.h>
26#include <linux/bootmem.h>
27#include <linux/nmi.h>
28#include <linux/syscalls.h>
29#include <linux/console.h>
30#include <linux/highmem.h>
31#include <linux/list.h>
32#include <linux/slab.h>
33#include <linux/compiler.h>
34#include <linux/ktime.h>
35#include <linux/set_memory.h>
36
37#include <linux/uaccess.h>
38#include <asm/mmu_context.h>
39#include <asm/pgtable.h>
40#include <asm/tlbflush.h>
41#include <asm/io.h>
42
43#include "power.h"
44
45#if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
46static bool hibernate_restore_protection;
47static bool hibernate_restore_protection_active;
48
49void enable_restore_image_protection(void)
50{
51 hibernate_restore_protection = true;
52}
53
54static inline void hibernate_restore_protection_begin(void)
55{
56 hibernate_restore_protection_active = hibernate_restore_protection;
57}
58
59static inline void hibernate_restore_protection_end(void)
60{
61 hibernate_restore_protection_active = false;
62}
63
64static inline void hibernate_restore_protect_page(void *page_address)
65{
66 if (hibernate_restore_protection_active)
67 set_memory_ro((unsigned long)page_address, 1);
68}
69
70static inline void hibernate_restore_unprotect_page(void *page_address)
71{
72 if (hibernate_restore_protection_active)
73 set_memory_rw((unsigned long)page_address, 1);
74}
75#else
76static inline void hibernate_restore_protection_begin(void) {}
77static inline void hibernate_restore_protection_end(void) {}
78static inline void hibernate_restore_protect_page(void *page_address) {}
79static inline void hibernate_restore_unprotect_page(void *page_address) {}
80#endif /* CONFIG_STRICT_KERNEL_RWX && CONFIG_ARCH_HAS_SET_MEMORY */
81
82static int swsusp_page_is_free(struct page *);
83static void swsusp_set_page_forbidden(struct page *);
84static void swsusp_unset_page_forbidden(struct page *);
85
86/*
87 * Number of bytes to reserve for memory allocations made by device drivers
88 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
89 * cause image creation to fail (tunable via /sys/power/reserved_size).
90 */
91unsigned long reserved_size;
92
93void __init hibernate_reserved_size_init(void)
94{
95 reserved_size = SPARE_PAGES * PAGE_SIZE;
96}
97
98/*
99 * Preferred image size in bytes (tunable via /sys/power/image_size).
100 * When it is set to N, swsusp will do its best to ensure the image
101 * size will not exceed N bytes, but if that is impossible, it will
102 * try to create the smallest image possible.
103 */
104unsigned long image_size;
105
106void __init hibernate_image_size_init(void)
107{
108 image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
109}
110
111/*
112 * List of PBEs needed for restoring the pages that were allocated before
113 * the suspend and included in the suspend image, but have also been
114 * allocated by the "resume" kernel, so their contents cannot be written
115 * directly to their "original" page frames.
116 */
117struct pbe *restore_pblist;
118
119/* struct linked_page is used to build chains of pages */
120
121#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
122
123struct linked_page {
124 struct linked_page *next;
125 char data[LINKED_PAGE_DATA_SIZE];
126} __packed;
127
128/*
129 * List of "safe" pages (ie. pages that were not used by the image kernel
130 * before hibernation) that may be used as temporary storage for image kernel
131 * memory contents.
132 */
133static struct linked_page *safe_pages_list;
134
135/* Pointer to an auxiliary buffer (1 page) */
136static void *buffer;
137
138#define PG_ANY 0
139#define PG_SAFE 1
140#define PG_UNSAFE_CLEAR 1
141#define PG_UNSAFE_KEEP 0
142
143static unsigned int allocated_unsafe_pages;
144
145/**
146 * get_image_page - Allocate a page for a hibernation image.
147 * @gfp_mask: GFP mask for the allocation.
148 * @safe_needed: Get pages that were not used before hibernation (restore only)
149 *
150 * During image restoration, for storing the PBE list and the image data, we can
151 * only use memory pages that do not conflict with the pages used before
152 * hibernation. The "unsafe" pages have PageNosaveFree set and we count them
153 * using allocated_unsafe_pages.
154 *
155 * Each allocated image page is marked as PageNosave and PageNosaveFree so that
156 * swsusp_free() can release it.
157 */
158static void *get_image_page(gfp_t gfp_mask, int safe_needed)
159{
160 void *res;
161
162 res = (void *)get_zeroed_page(gfp_mask);
163 if (safe_needed)
164 while (res && swsusp_page_is_free(virt_to_page(res))) {
165 /* The page is unsafe, mark it for swsusp_free() */
166 swsusp_set_page_forbidden(virt_to_page(res));
167 allocated_unsafe_pages++;
168 res = (void *)get_zeroed_page(gfp_mask);
169 }
170 if (res) {
171 swsusp_set_page_forbidden(virt_to_page(res));
172 swsusp_set_page_free(virt_to_page(res));
173 }
174 return res;
175}
176
177static void *__get_safe_page(gfp_t gfp_mask)
178{
179 if (safe_pages_list) {
180 void *ret = safe_pages_list;
181
182 safe_pages_list = safe_pages_list->next;
183 memset(ret, 0, PAGE_SIZE);
184 return ret;
185 }
186 return get_image_page(gfp_mask, PG_SAFE);
187}
188
189unsigned long get_safe_page(gfp_t gfp_mask)
190{
191 return (unsigned long)__get_safe_page(gfp_mask);
192}
193
194static struct page *alloc_image_page(gfp_t gfp_mask)
195{
196 struct page *page;
197
198 page = alloc_page(gfp_mask);
199 if (page) {
200 swsusp_set_page_forbidden(page);
201 swsusp_set_page_free(page);
202 }
203 return page;
204}
205
206static void recycle_safe_page(void *page_address)
207{
208 struct linked_page *lp = page_address;
209
210 lp->next = safe_pages_list;
211 safe_pages_list = lp;
212}
213
214/**
215 * free_image_page - Free a page allocated for hibernation image.
216 * @addr: Address of the page to free.
217 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
218 *
219 * The page to free should have been allocated by get_image_page() (page flags
220 * set by it are affected).
221 */
222static inline void free_image_page(void *addr, int clear_nosave_free)
223{
224 struct page *page;
225
226 BUG_ON(!virt_addr_valid(addr));
227
228 page = virt_to_page(addr);
229
230 swsusp_unset_page_forbidden(page);
231 if (clear_nosave_free)
232 swsusp_unset_page_free(page);
233
234 __free_page(page);
235}
236
237static inline void free_list_of_pages(struct linked_page *list,
238 int clear_page_nosave)
239{
240 while (list) {
241 struct linked_page *lp = list->next;
242
243 free_image_page(list, clear_page_nosave);
244 list = lp;
245 }
246}
247
248/*
249 * struct chain_allocator is used for allocating small objects out of
250 * a linked list of pages called 'the chain'.
251 *
252 * The chain grows each time when there is no room for a new object in
253 * the current page. The allocated objects cannot be freed individually.
254 * It is only possible to free them all at once, by freeing the entire
255 * chain.
256 *
257 * NOTE: The chain allocator may be inefficient if the allocated objects
258 * are not much smaller than PAGE_SIZE.
259 */
260struct chain_allocator {
261 struct linked_page *chain; /* the chain */
262 unsigned int used_space; /* total size of objects allocated out
263 of the current page */
264 gfp_t gfp_mask; /* mask for allocating pages */
265 int safe_needed; /* if set, only "safe" pages are allocated */
266};
267
268static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
269 int safe_needed)
270{
271 ca->chain = NULL;
272 ca->used_space = LINKED_PAGE_DATA_SIZE;
273 ca->gfp_mask = gfp_mask;
274 ca->safe_needed = safe_needed;
275}
276
277static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
278{
279 void *ret;
280
281 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
282 struct linked_page *lp;
283
284 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
285 get_image_page(ca->gfp_mask, PG_ANY);
286 if (!lp)
287 return NULL;
288
289 lp->next = ca->chain;
290 ca->chain = lp;
291 ca->used_space = 0;
292 }
293 ret = ca->chain->data + ca->used_space;
294 ca->used_space += size;
295 return ret;
296}
297
298/**
299 * Data types related to memory bitmaps.
300 *
301 * Memory bitmap is a structure consiting of many linked lists of
302 * objects. The main list's elements are of type struct zone_bitmap
303 * and each of them corresonds to one zone. For each zone bitmap
304 * object there is a list of objects of type struct bm_block that
305 * represent each blocks of bitmap in which information is stored.
306 *
307 * struct memory_bitmap contains a pointer to the main list of zone
308 * bitmap objects, a struct bm_position used for browsing the bitmap,
309 * and a pointer to the list of pages used for allocating all of the
310 * zone bitmap objects and bitmap block objects.
311 *
312 * NOTE: It has to be possible to lay out the bitmap in memory
313 * using only allocations of order 0. Additionally, the bitmap is
314 * designed to work with arbitrary number of zones (this is over the
315 * top for now, but let's avoid making unnecessary assumptions ;-).
316 *
317 * struct zone_bitmap contains a pointer to a list of bitmap block
318 * objects and a pointer to the bitmap block object that has been
319 * most recently used for setting bits. Additionally, it contains the
320 * PFNs that correspond to the start and end of the represented zone.
321 *
322 * struct bm_block contains a pointer to the memory page in which
323 * information is stored (in the form of a block of bitmap)
324 * It also contains the pfns that correspond to the start and end of
325 * the represented memory area.
326 *
327 * The memory bitmap is organized as a radix tree to guarantee fast random
328 * access to the bits. There is one radix tree for each zone (as returned
329 * from create_mem_extents).
330 *
331 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
332 * two linked lists for the nodes of the tree, one for the inner nodes and
333 * one for the leave nodes. The linked leave nodes are used for fast linear
334 * access of the memory bitmap.
335 *
336 * The struct rtree_node represents one node of the radix tree.
337 */
338
339#define BM_END_OF_MAP (~0UL)
340
341#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
342#define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
343#define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
344
345/*
346 * struct rtree_node is a wrapper struct to link the nodes
347 * of the rtree together for easy linear iteration over
348 * bits and easy freeing
349 */
350struct rtree_node {
351 struct list_head list;
352 unsigned long *data;
353};
354
355/*
356 * struct mem_zone_bm_rtree represents a bitmap used for one
357 * populated memory zone.
358 */
359struct mem_zone_bm_rtree {
360 struct list_head list; /* Link Zones together */
361 struct list_head nodes; /* Radix Tree inner nodes */
362 struct list_head leaves; /* Radix Tree leaves */
363 unsigned long start_pfn; /* Zone start page frame */
364 unsigned long end_pfn; /* Zone end page frame + 1 */
365 struct rtree_node *rtree; /* Radix Tree Root */
366 int levels; /* Number of Radix Tree Levels */
367 unsigned int blocks; /* Number of Bitmap Blocks */
368};
369
370/* strcut bm_position is used for browsing memory bitmaps */
371
372struct bm_position {
373 struct mem_zone_bm_rtree *zone;
374 struct rtree_node *node;
375 unsigned long node_pfn;
376 int node_bit;
377};
378
379struct memory_bitmap {
380 struct list_head zones;
381 struct linked_page *p_list; /* list of pages used to store zone
382 bitmap objects and bitmap block
383 objects */
384 struct bm_position cur; /* most recently used bit position */
385};
386
387/* Functions that operate on memory bitmaps */
388
389#define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
390#if BITS_PER_LONG == 32
391#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
392#else
393#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
394#endif
395#define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
396
397/**
398 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
399 *
400 * This function is used to allocate inner nodes as well as the
401 * leave nodes of the radix tree. It also adds the node to the
402 * corresponding linked list passed in by the *list parameter.
403 */
404static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
405 struct chain_allocator *ca,
406 struct list_head *list)
407{
408 struct rtree_node *node;
409
410 node = chain_alloc(ca, sizeof(struct rtree_node));
411 if (!node)
412 return NULL;
413
414 node->data = get_image_page(gfp_mask, safe_needed);
415 if (!node->data)
416 return NULL;
417
418 list_add_tail(&node->list, list);
419
420 return node;
421}
422
423/**
424 * add_rtree_block - Add a new leave node to the radix tree.
425 *
426 * The leave nodes need to be allocated in order to keep the leaves
427 * linked list in order. This is guaranteed by the zone->blocks
428 * counter.
429 */
430static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
431 int safe_needed, struct chain_allocator *ca)
432{
433 struct rtree_node *node, *block, **dst;
434 unsigned int levels_needed, block_nr;
435 int i;
436
437 block_nr = zone->blocks;
438 levels_needed = 0;
439
440 /* How many levels do we need for this block nr? */
441 while (block_nr) {
442 levels_needed += 1;
443 block_nr >>= BM_RTREE_LEVEL_SHIFT;
444 }
445
446 /* Make sure the rtree has enough levels */
447 for (i = zone->levels; i < levels_needed; i++) {
448 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
449 &zone->nodes);
450 if (!node)
451 return -ENOMEM;
452
453 node->data[0] = (unsigned long)zone->rtree;
454 zone->rtree = node;
455 zone->levels += 1;
456 }
457
458 /* Allocate new block */
459 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
460 if (!block)
461 return -ENOMEM;
462
463 /* Now walk the rtree to insert the block */
464 node = zone->rtree;
465 dst = &zone->rtree;
466 block_nr = zone->blocks;
467 for (i = zone->levels; i > 0; i--) {
468 int index;
469
470 if (!node) {
471 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
472 &zone->nodes);
473 if (!node)
474 return -ENOMEM;
475 *dst = node;
476 }
477
478 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
479 index &= BM_RTREE_LEVEL_MASK;
480 dst = (struct rtree_node **)&((*dst)->data[index]);
481 node = *dst;
482 }
483
484 zone->blocks += 1;
485 *dst = block;
486
487 return 0;
488}
489
490static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
491 int clear_nosave_free);
492
493/**
494 * create_zone_bm_rtree - Create a radix tree for one zone.
495 *
496 * Allocated the mem_zone_bm_rtree structure and initializes it.
497 * This function also allocated and builds the radix tree for the
498 * zone.
499 */
500static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
501 int safe_needed,
502 struct chain_allocator *ca,
503 unsigned long start,
504 unsigned long end)
505{
506 struct mem_zone_bm_rtree *zone;
507 unsigned int i, nr_blocks;
508 unsigned long pages;
509
510 pages = end - start;
511 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
512 if (!zone)
513 return NULL;
514
515 INIT_LIST_HEAD(&zone->nodes);
516 INIT_LIST_HEAD(&zone->leaves);
517 zone->start_pfn = start;
518 zone->end_pfn = end;
519 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
520
521 for (i = 0; i < nr_blocks; i++) {
522 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
523 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
524 return NULL;
525 }
526 }
527
528 return zone;
529}
530
531/**
532 * free_zone_bm_rtree - Free the memory of the radix tree.
533 *
534 * Free all node pages of the radix tree. The mem_zone_bm_rtree
535 * structure itself is not freed here nor are the rtree_node
536 * structs.
537 */
538static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
539 int clear_nosave_free)
540{
541 struct rtree_node *node;
542
543 list_for_each_entry(node, &zone->nodes, list)
544 free_image_page(node->data, clear_nosave_free);
545
546 list_for_each_entry(node, &zone->leaves, list)
547 free_image_page(node->data, clear_nosave_free);
548}
549
550static void memory_bm_position_reset(struct memory_bitmap *bm)
551{
552 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
553 list);
554 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
555 struct rtree_node, list);
556 bm->cur.node_pfn = 0;
557 bm->cur.node_bit = 0;
558}
559
560static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
561
562struct mem_extent {
563 struct list_head hook;
564 unsigned long start;
565 unsigned long end;
566};
567
568/**
569 * free_mem_extents - Free a list of memory extents.
570 * @list: List of extents to free.
571 */
572static void free_mem_extents(struct list_head *list)
573{
574 struct mem_extent *ext, *aux;
575
576 list_for_each_entry_safe(ext, aux, list, hook) {
577 list_del(&ext->hook);
578 kfree(ext);
579 }
580}
581
582/**
583 * create_mem_extents - Create a list of memory extents.
584 * @list: List to put the extents into.
585 * @gfp_mask: Mask to use for memory allocations.
586 *
587 * The extents represent contiguous ranges of PFNs.
588 */
589static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
590{
591 struct zone *zone;
592
593 INIT_LIST_HEAD(list);
594
595 for_each_populated_zone(zone) {
596 unsigned long zone_start, zone_end;
597 struct mem_extent *ext, *cur, *aux;
598
599 zone_start = zone->zone_start_pfn;
600 zone_end = zone_end_pfn(zone);
601
602 list_for_each_entry(ext, list, hook)
603 if (zone_start <= ext->end)
604 break;
605
606 if (&ext->hook == list || zone_end < ext->start) {
607 /* New extent is necessary */
608 struct mem_extent *new_ext;
609
610 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
611 if (!new_ext) {
612 free_mem_extents(list);
613 return -ENOMEM;
614 }
615 new_ext->start = zone_start;
616 new_ext->end = zone_end;
617 list_add_tail(&new_ext->hook, &ext->hook);
618 continue;
619 }
620
621 /* Merge this zone's range of PFNs with the existing one */
622 if (zone_start < ext->start)
623 ext->start = zone_start;
624 if (zone_end > ext->end)
625 ext->end = zone_end;
626
627 /* More merging may be possible */
628 cur = ext;
629 list_for_each_entry_safe_continue(cur, aux, list, hook) {
630 if (zone_end < cur->start)
631 break;
632 if (zone_end < cur->end)
633 ext->end = cur->end;
634 list_del(&cur->hook);
635 kfree(cur);
636 }
637 }
638
639 return 0;
640}
641
642/**
643 * memory_bm_create - Allocate memory for a memory bitmap.
644 */
645static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
646 int safe_needed)
647{
648 struct chain_allocator ca;
649 struct list_head mem_extents;
650 struct mem_extent *ext;
651 int error;
652
653 chain_init(&ca, gfp_mask, safe_needed);
654 INIT_LIST_HEAD(&bm->zones);
655
656 error = create_mem_extents(&mem_extents, gfp_mask);
657 if (error)
658 return error;
659
660 list_for_each_entry(ext, &mem_extents, hook) {
661 struct mem_zone_bm_rtree *zone;
662
663 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
664 ext->start, ext->end);
665 if (!zone) {
666 error = -ENOMEM;
667 goto Error;
668 }
669 list_add_tail(&zone->list, &bm->zones);
670 }
671
672 bm->p_list = ca.chain;
673 memory_bm_position_reset(bm);
674 Exit:
675 free_mem_extents(&mem_extents);
676 return error;
677
678 Error:
679 bm->p_list = ca.chain;
680 memory_bm_free(bm, PG_UNSAFE_CLEAR);
681 goto Exit;
682}
683
684/**
685 * memory_bm_free - Free memory occupied by the memory bitmap.
686 * @bm: Memory bitmap.
687 */
688static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
689{
690 struct mem_zone_bm_rtree *zone;
691
692 list_for_each_entry(zone, &bm->zones, list)
693 free_zone_bm_rtree(zone, clear_nosave_free);
694
695 free_list_of_pages(bm->p_list, clear_nosave_free);
696
697 INIT_LIST_HEAD(&bm->zones);
698}
699
700/**
701 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
702 *
703 * Find the bit in memory bitmap @bm that corresponds to the given PFN.
704 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
705 *
706 * Walk the radix tree to find the page containing the bit that represents @pfn
707 * and return the position of the bit in @addr and @bit_nr.
708 */
709static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
710 void **addr, unsigned int *bit_nr)
711{
712 struct mem_zone_bm_rtree *curr, *zone;
713 struct rtree_node *node;
714 int i, block_nr;
715
716 zone = bm->cur.zone;
717
718 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
719 goto zone_found;
720
721 zone = NULL;
722
723 /* Find the right zone */
724 list_for_each_entry(curr, &bm->zones, list) {
725 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
726 zone = curr;
727 break;
728 }
729 }
730
731 if (!zone)
732 return -EFAULT;
733
734zone_found:
735 /*
736 * We have found the zone. Now walk the radix tree to find the leaf node
737 * for our PFN.
738 */
739 node = bm->cur.node;
740 if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
741 goto node_found;
742
743 node = zone->rtree;
744 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
745
746 for (i = zone->levels; i > 0; i--) {
747 int index;
748
749 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
750 index &= BM_RTREE_LEVEL_MASK;
751 BUG_ON(node->data[index] == 0);
752 node = (struct rtree_node *)node->data[index];
753 }
754
755node_found:
756 /* Update last position */
757 bm->cur.zone = zone;
758 bm->cur.node = node;
759 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
760
761 /* Set return values */
762 *addr = node->data;
763 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
764
765 return 0;
766}
767
768static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
769{
770 void *addr;
771 unsigned int bit;
772 int error;
773
774 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
775 BUG_ON(error);
776 set_bit(bit, addr);
777}
778
779static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
780{
781 void *addr;
782 unsigned int bit;
783 int error;
784
785 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
786 if (!error)
787 set_bit(bit, addr);
788
789 return error;
790}
791
792static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
793{
794 void *addr;
795 unsigned int bit;
796 int error;
797
798 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
799 BUG_ON(error);
800 clear_bit(bit, addr);
801}
802
803static void memory_bm_clear_current(struct memory_bitmap *bm)
804{
805 int bit;
806
807 bit = max(bm->cur.node_bit - 1, 0);
808 clear_bit(bit, bm->cur.node->data);
809}
810
811static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
812{
813 void *addr;
814 unsigned int bit;
815 int error;
816
817 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
818 BUG_ON(error);
819 return test_bit(bit, addr);
820}
821
822static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
823{
824 void *addr;
825 unsigned int bit;
826
827 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
828}
829
830/*
831 * rtree_next_node - Jump to the next leaf node.
832 *
833 * Set the position to the beginning of the next node in the
834 * memory bitmap. This is either the next node in the current
835 * zone's radix tree or the first node in the radix tree of the
836 * next zone.
837 *
838 * Return true if there is a next node, false otherwise.
839 */
840static bool rtree_next_node(struct memory_bitmap *bm)
841{
842 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
843 bm->cur.node = list_entry(bm->cur.node->list.next,
844 struct rtree_node, list);
845 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
846 bm->cur.node_bit = 0;
847 touch_softlockup_watchdog();
848 return true;
849 }
850
851 /* No more nodes, goto next zone */
852 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
853 bm->cur.zone = list_entry(bm->cur.zone->list.next,
854 struct mem_zone_bm_rtree, list);
855 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
856 struct rtree_node, list);
857 bm->cur.node_pfn = 0;
858 bm->cur.node_bit = 0;
859 return true;
860 }
861
862 /* No more zones */
863 return false;
864}
865
866/**
867 * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
868 * @bm: Memory bitmap.
869 *
870 * Starting from the last returned position this function searches for the next
871 * set bit in @bm and returns the PFN represented by it. If no more bits are
872 * set, BM_END_OF_MAP is returned.
873 *
874 * It is required to run memory_bm_position_reset() before the first call to
875 * this function for the given memory bitmap.
876 */
877static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
878{
879 unsigned long bits, pfn, pages;
880 int bit;
881
882 do {
883 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
884 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
885 bit = find_next_bit(bm->cur.node->data, bits,
886 bm->cur.node_bit);
887 if (bit < bits) {
888 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
889 bm->cur.node_bit = bit + 1;
890 return pfn;
891 }
892 } while (rtree_next_node(bm));
893
894 return BM_END_OF_MAP;
895}
896
897/*
898 * This structure represents a range of page frames the contents of which
899 * should not be saved during hibernation.
900 */
901struct nosave_region {
902 struct list_head list;
903 unsigned long start_pfn;
904 unsigned long end_pfn;
905};
906
907static LIST_HEAD(nosave_regions);
908
909static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
910{
911 struct rtree_node *node;
912
913 list_for_each_entry(node, &zone->nodes, list)
914 recycle_safe_page(node->data);
915
916 list_for_each_entry(node, &zone->leaves, list)
917 recycle_safe_page(node->data);
918}
919
920static void memory_bm_recycle(struct memory_bitmap *bm)
921{
922 struct mem_zone_bm_rtree *zone;
923 struct linked_page *p_list;
924
925 list_for_each_entry(zone, &bm->zones, list)
926 recycle_zone_bm_rtree(zone);
927
928 p_list = bm->p_list;
929 while (p_list) {
930 struct linked_page *lp = p_list;
931
932 p_list = lp->next;
933 recycle_safe_page(lp);
934 }
935}
936
937/**
938 * register_nosave_region - Register a region of unsaveable memory.
939 *
940 * Register a range of page frames the contents of which should not be saved
941 * during hibernation (to be used in the early initialization code).
942 */
943void __init __register_nosave_region(unsigned long start_pfn,
944 unsigned long end_pfn, int use_kmalloc)
945{
946 struct nosave_region *region;
947
948 if (start_pfn >= end_pfn)
949 return;
950
951 if (!list_empty(&nosave_regions)) {
952 /* Try to extend the previous region (they should be sorted) */
953 region = list_entry(nosave_regions.prev,
954 struct nosave_region, list);
955 if (region->end_pfn == start_pfn) {
956 region->end_pfn = end_pfn;
957 goto Report;
958 }
959 }
960 if (use_kmalloc) {
961 /* During init, this shouldn't fail */
962 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
963 BUG_ON(!region);
964 } else {
965 /* This allocation cannot fail */
966 region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
967 }
968 region->start_pfn = start_pfn;
969 region->end_pfn = end_pfn;
970 list_add_tail(®ion->list, &nosave_regions);
971 Report:
972 pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
973 (unsigned long long) start_pfn << PAGE_SHIFT,
974 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
975}
976
977/*
978 * Set bits in this map correspond to the page frames the contents of which
979 * should not be saved during the suspend.
980 */
981static struct memory_bitmap *forbidden_pages_map;
982
983/* Set bits in this map correspond to free page frames. */
984static struct memory_bitmap *free_pages_map;
985
986/*
987 * Each page frame allocated for creating the image is marked by setting the
988 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
989 */
990
991void swsusp_set_page_free(struct page *page)
992{
993 if (free_pages_map)
994 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
995}
996
997static int swsusp_page_is_free(struct page *page)
998{
999 return free_pages_map ?
1000 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
1001}
1002
1003void swsusp_unset_page_free(struct page *page)
1004{
1005 if (free_pages_map)
1006 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1007}
1008
1009static void swsusp_set_page_forbidden(struct page *page)
1010{
1011 if (forbidden_pages_map)
1012 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1013}
1014
1015int swsusp_page_is_forbidden(struct page *page)
1016{
1017 return forbidden_pages_map ?
1018 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1019}
1020
1021static void swsusp_unset_page_forbidden(struct page *page)
1022{
1023 if (forbidden_pages_map)
1024 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1025}
1026
1027/**
1028 * mark_nosave_pages - Mark pages that should not be saved.
1029 * @bm: Memory bitmap.
1030 *
1031 * Set the bits in @bm that correspond to the page frames the contents of which
1032 * should not be saved.
1033 */
1034static void mark_nosave_pages(struct memory_bitmap *bm)
1035{
1036 struct nosave_region *region;
1037
1038 if (list_empty(&nosave_regions))
1039 return;
1040
1041 list_for_each_entry(region, &nosave_regions, list) {
1042 unsigned long pfn;
1043
1044 pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1045 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1046 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1047 - 1);
1048
1049 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1050 if (pfn_valid(pfn)) {
1051 /*
1052 * It is safe to ignore the result of
1053 * mem_bm_set_bit_check() here, since we won't
1054 * touch the PFNs for which the error is
1055 * returned anyway.
1056 */
1057 mem_bm_set_bit_check(bm, pfn);
1058 }
1059 }
1060}
1061
1062/**
1063 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1064 *
1065 * Create bitmaps needed for marking page frames that should not be saved and
1066 * free page frames. The forbidden_pages_map and free_pages_map pointers are
1067 * only modified if everything goes well, because we don't want the bits to be
1068 * touched before both bitmaps are set up.
1069 */
1070int create_basic_memory_bitmaps(void)
1071{
1072 struct memory_bitmap *bm1, *bm2;
1073 int error = 0;
1074
1075 if (forbidden_pages_map && free_pages_map)
1076 return 0;
1077 else
1078 BUG_ON(forbidden_pages_map || free_pages_map);
1079
1080 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1081 if (!bm1)
1082 return -ENOMEM;
1083
1084 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1085 if (error)
1086 goto Free_first_object;
1087
1088 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1089 if (!bm2)
1090 goto Free_first_bitmap;
1091
1092 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1093 if (error)
1094 goto Free_second_object;
1095
1096 forbidden_pages_map = bm1;
1097 free_pages_map = bm2;
1098 mark_nosave_pages(forbidden_pages_map);
1099
1100 pr_debug("Basic memory bitmaps created\n");
1101
1102 return 0;
1103
1104 Free_second_object:
1105 kfree(bm2);
1106 Free_first_bitmap:
1107 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1108 Free_first_object:
1109 kfree(bm1);
1110 return -ENOMEM;
1111}
1112
1113/**
1114 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1115 *
1116 * Free memory bitmaps allocated by create_basic_memory_bitmaps(). The
1117 * auxiliary pointers are necessary so that the bitmaps themselves are not
1118 * referred to while they are being freed.
1119 */
1120void free_basic_memory_bitmaps(void)
1121{
1122 struct memory_bitmap *bm1, *bm2;
1123
1124 if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1125 return;
1126
1127 bm1 = forbidden_pages_map;
1128 bm2 = free_pages_map;
1129 forbidden_pages_map = NULL;
1130 free_pages_map = NULL;
1131 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1132 kfree(bm1);
1133 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1134 kfree(bm2);
1135
1136 pr_debug("Basic memory bitmaps freed\n");
1137}
1138
1139void clear_free_pages(void)
1140{
1141#ifdef CONFIG_PAGE_POISONING_ZERO
1142 struct memory_bitmap *bm = free_pages_map;
1143 unsigned long pfn;
1144
1145 if (WARN_ON(!(free_pages_map)))
1146 return;
1147
1148 memory_bm_position_reset(bm);
1149 pfn = memory_bm_next_pfn(bm);
1150 while (pfn != BM_END_OF_MAP) {
1151 if (pfn_valid(pfn))
1152 clear_highpage(pfn_to_page(pfn));
1153
1154 pfn = memory_bm_next_pfn(bm);
1155 }
1156 memory_bm_position_reset(bm);
1157 pr_info("free pages cleared after restore\n");
1158#endif /* PAGE_POISONING_ZERO */
1159}
1160
1161/**
1162 * snapshot_additional_pages - Estimate the number of extra pages needed.
1163 * @zone: Memory zone to carry out the computation for.
1164 *
1165 * Estimate the number of additional pages needed for setting up a hibernation
1166 * image data structures for @zone (usually, the returned value is greater than
1167 * the exact number).
1168 */
1169unsigned int snapshot_additional_pages(struct zone *zone)
1170{
1171 unsigned int rtree, nodes;
1172
1173 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1174 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1175 LINKED_PAGE_DATA_SIZE);
1176 while (nodes > 1) {
1177 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1178 rtree += nodes;
1179 }
1180
1181 return 2 * rtree;
1182}
1183
1184#ifdef CONFIG_HIGHMEM
1185/**
1186 * count_free_highmem_pages - Compute the total number of free highmem pages.
1187 *
1188 * The returned number is system-wide.
1189 */
1190static unsigned int count_free_highmem_pages(void)
1191{
1192 struct zone *zone;
1193 unsigned int cnt = 0;
1194
1195 for_each_populated_zone(zone)
1196 if (is_highmem(zone))
1197 cnt += zone_page_state(zone, NR_FREE_PAGES);
1198
1199 return cnt;
1200}
1201
1202/**
1203 * saveable_highmem_page - Check if a highmem page is saveable.
1204 *
1205 * Determine whether a highmem page should be included in a hibernation image.
1206 *
1207 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1208 * and it isn't part of a free chunk of pages.
1209 */
1210static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1211{
1212 struct page *page;
1213
1214 if (!pfn_valid(pfn))
1215 return NULL;
1216
1217 page = pfn_to_page(pfn);
1218 if (page_zone(page) != zone)
1219 return NULL;
1220
1221 BUG_ON(!PageHighMem(page));
1222
1223 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) ||
1224 PageReserved(page))
1225 return NULL;
1226
1227 if (page_is_guard(page))
1228 return NULL;
1229
1230 return page;
1231}
1232
1233/**
1234 * count_highmem_pages - Compute the total number of saveable highmem pages.
1235 */
1236static unsigned int count_highmem_pages(void)
1237{
1238 struct zone *zone;
1239 unsigned int n = 0;
1240
1241 for_each_populated_zone(zone) {
1242 unsigned long pfn, max_zone_pfn;
1243
1244 if (!is_highmem(zone))
1245 continue;
1246
1247 mark_free_pages(zone);
1248 max_zone_pfn = zone_end_pfn(zone);
1249 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1250 if (saveable_highmem_page(zone, pfn))
1251 n++;
1252 }
1253 return n;
1254}
1255#else
1256static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1257{
1258 return NULL;
1259}
1260#endif /* CONFIG_HIGHMEM */
1261
1262/**
1263 * saveable_page - Check if the given page is saveable.
1264 *
1265 * Determine whether a non-highmem page should be included in a hibernation
1266 * image.
1267 *
1268 * We should save the page if it isn't Nosave, and is not in the range
1269 * of pages statically defined as 'unsaveable', and it isn't part of
1270 * a free chunk of pages.
1271 */
1272static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1273{
1274 struct page *page;
1275
1276 if (!pfn_valid(pfn))
1277 return NULL;
1278
1279 page = pfn_to_page(pfn);
1280 if (page_zone(page) != zone)
1281 return NULL;
1282
1283 BUG_ON(PageHighMem(page));
1284
1285 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1286 return NULL;
1287
1288 if (PageReserved(page)
1289 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1290 return NULL;
1291
1292 if (page_is_guard(page))
1293 return NULL;
1294
1295 return page;
1296}
1297
1298/**
1299 * count_data_pages - Compute the total number of saveable non-highmem pages.
1300 */
1301static unsigned int count_data_pages(void)
1302{
1303 struct zone *zone;
1304 unsigned long pfn, max_zone_pfn;
1305 unsigned int n = 0;
1306
1307 for_each_populated_zone(zone) {
1308 if (is_highmem(zone))
1309 continue;
1310
1311 mark_free_pages(zone);
1312 max_zone_pfn = zone_end_pfn(zone);
1313 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1314 if (saveable_page(zone, pfn))
1315 n++;
1316 }
1317 return n;
1318}
1319
1320/*
1321 * This is needed, because copy_page and memcpy are not usable for copying
1322 * task structs.
1323 */
1324static inline void do_copy_page(long *dst, long *src)
1325{
1326 int n;
1327
1328 for (n = PAGE_SIZE / sizeof(long); n; n--)
1329 *dst++ = *src++;
1330}
1331
1332/**
1333 * safe_copy_page - Copy a page in a safe way.
1334 *
1335 * Check if the page we are going to copy is marked as present in the kernel
1336 * page tables (this always is the case if CONFIG_DEBUG_PAGEALLOC is not set
1337 * and in that case kernel_page_present() always returns 'true').
1338 */
1339static void safe_copy_page(void *dst, struct page *s_page)
1340{
1341 if (kernel_page_present(s_page)) {
1342 do_copy_page(dst, page_address(s_page));
1343 } else {
1344 kernel_map_pages(s_page, 1, 1);
1345 do_copy_page(dst, page_address(s_page));
1346 kernel_map_pages(s_page, 1, 0);
1347 }
1348}
1349
1350#ifdef CONFIG_HIGHMEM
1351static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1352{
1353 return is_highmem(zone) ?
1354 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1355}
1356
1357static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1358{
1359 struct page *s_page, *d_page;
1360 void *src, *dst;
1361
1362 s_page = pfn_to_page(src_pfn);
1363 d_page = pfn_to_page(dst_pfn);
1364 if (PageHighMem(s_page)) {
1365 src = kmap_atomic(s_page);
1366 dst = kmap_atomic(d_page);
1367 do_copy_page(dst, src);
1368 kunmap_atomic(dst);
1369 kunmap_atomic(src);
1370 } else {
1371 if (PageHighMem(d_page)) {
1372 /*
1373 * The page pointed to by src may contain some kernel
1374 * data modified by kmap_atomic()
1375 */
1376 safe_copy_page(buffer, s_page);
1377 dst = kmap_atomic(d_page);
1378 copy_page(dst, buffer);
1379 kunmap_atomic(dst);
1380 } else {
1381 safe_copy_page(page_address(d_page), s_page);
1382 }
1383 }
1384}
1385#else
1386#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1387
1388static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1389{
1390 safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1391 pfn_to_page(src_pfn));
1392}
1393#endif /* CONFIG_HIGHMEM */
1394
1395static void copy_data_pages(struct memory_bitmap *copy_bm,
1396 struct memory_bitmap *orig_bm)
1397{
1398 struct zone *zone;
1399 unsigned long pfn;
1400
1401 for_each_populated_zone(zone) {
1402 unsigned long max_zone_pfn;
1403
1404 mark_free_pages(zone);
1405 max_zone_pfn = zone_end_pfn(zone);
1406 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1407 if (page_is_saveable(zone, pfn))
1408 memory_bm_set_bit(orig_bm, pfn);
1409 }
1410 memory_bm_position_reset(orig_bm);
1411 memory_bm_position_reset(copy_bm);
1412 for(;;) {
1413 pfn = memory_bm_next_pfn(orig_bm);
1414 if (unlikely(pfn == BM_END_OF_MAP))
1415 break;
1416 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1417 }
1418}
1419
1420/* Total number of image pages */
1421static unsigned int nr_copy_pages;
1422/* Number of pages needed for saving the original pfns of the image pages */
1423static unsigned int nr_meta_pages;
1424/*
1425 * Numbers of normal and highmem page frames allocated for hibernation image
1426 * before suspending devices.
1427 */
1428static unsigned int alloc_normal, alloc_highmem;
1429/*
1430 * Memory bitmap used for marking saveable pages (during hibernation) or
1431 * hibernation image pages (during restore)
1432 */
1433static struct memory_bitmap orig_bm;
1434/*
1435 * Memory bitmap used during hibernation for marking allocated page frames that
1436 * will contain copies of saveable pages. During restore it is initially used
1437 * for marking hibernation image pages, but then the set bits from it are
1438 * duplicated in @orig_bm and it is released. On highmem systems it is next
1439 * used for marking "safe" highmem pages, but it has to be reinitialized for
1440 * this purpose.
1441 */
1442static struct memory_bitmap copy_bm;
1443
1444/**
1445 * swsusp_free - Free pages allocated for hibernation image.
1446 *
1447 * Image pages are alocated before snapshot creation, so they need to be
1448 * released after resume.
1449 */
1450void swsusp_free(void)
1451{
1452 unsigned long fb_pfn, fr_pfn;
1453
1454 if (!forbidden_pages_map || !free_pages_map)
1455 goto out;
1456
1457 memory_bm_position_reset(forbidden_pages_map);
1458 memory_bm_position_reset(free_pages_map);
1459
1460loop:
1461 fr_pfn = memory_bm_next_pfn(free_pages_map);
1462 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1463
1464 /*
1465 * Find the next bit set in both bitmaps. This is guaranteed to
1466 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1467 */
1468 do {
1469 if (fb_pfn < fr_pfn)
1470 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1471 if (fr_pfn < fb_pfn)
1472 fr_pfn = memory_bm_next_pfn(free_pages_map);
1473 } while (fb_pfn != fr_pfn);
1474
1475 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1476 struct page *page = pfn_to_page(fr_pfn);
1477
1478 memory_bm_clear_current(forbidden_pages_map);
1479 memory_bm_clear_current(free_pages_map);
1480 hibernate_restore_unprotect_page(page_address(page));
1481 __free_page(page);
1482 goto loop;
1483 }
1484
1485out:
1486 nr_copy_pages = 0;
1487 nr_meta_pages = 0;
1488 restore_pblist = NULL;
1489 buffer = NULL;
1490 alloc_normal = 0;
1491 alloc_highmem = 0;
1492 hibernate_restore_protection_end();
1493}
1494
1495/* Helper functions used for the shrinking of memory. */
1496
1497#define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1498
1499/**
1500 * preallocate_image_pages - Allocate a number of pages for hibernation image.
1501 * @nr_pages: Number of page frames to allocate.
1502 * @mask: GFP flags to use for the allocation.
1503 *
1504 * Return value: Number of page frames actually allocated
1505 */
1506static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1507{
1508 unsigned long nr_alloc = 0;
1509
1510 while (nr_pages > 0) {
1511 struct page *page;
1512
1513 page = alloc_image_page(mask);
1514 if (!page)
1515 break;
1516 memory_bm_set_bit(©_bm, page_to_pfn(page));
1517 if (PageHighMem(page))
1518 alloc_highmem++;
1519 else
1520 alloc_normal++;
1521 nr_pages--;
1522 nr_alloc++;
1523 }
1524
1525 return nr_alloc;
1526}
1527
1528static unsigned long preallocate_image_memory(unsigned long nr_pages,
1529 unsigned long avail_normal)
1530{
1531 unsigned long alloc;
1532
1533 if (avail_normal <= alloc_normal)
1534 return 0;
1535
1536 alloc = avail_normal - alloc_normal;
1537 if (nr_pages < alloc)
1538 alloc = nr_pages;
1539
1540 return preallocate_image_pages(alloc, GFP_IMAGE);
1541}
1542
1543#ifdef CONFIG_HIGHMEM
1544static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1545{
1546 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1547}
1548
1549/**
1550 * __fraction - Compute (an approximation of) x * (multiplier / base).
1551 */
1552static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1553{
1554 x *= multiplier;
1555 do_div(x, base);
1556 return (unsigned long)x;
1557}
1558
1559static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1560 unsigned long highmem,
1561 unsigned long total)
1562{
1563 unsigned long alloc = __fraction(nr_pages, highmem, total);
1564
1565 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1566}
1567#else /* CONFIG_HIGHMEM */
1568static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1569{
1570 return 0;
1571}
1572
1573static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1574 unsigned long highmem,
1575 unsigned long total)
1576{
1577 return 0;
1578}
1579#endif /* CONFIG_HIGHMEM */
1580
1581/**
1582 * free_unnecessary_pages - Release preallocated pages not needed for the image.
1583 */
1584static unsigned long free_unnecessary_pages(void)
1585{
1586 unsigned long save, to_free_normal, to_free_highmem, free;
1587
1588 save = count_data_pages();
1589 if (alloc_normal >= save) {
1590 to_free_normal = alloc_normal - save;
1591 save = 0;
1592 } else {
1593 to_free_normal = 0;
1594 save -= alloc_normal;
1595 }
1596 save += count_highmem_pages();
1597 if (alloc_highmem >= save) {
1598 to_free_highmem = alloc_highmem - save;
1599 } else {
1600 to_free_highmem = 0;
1601 save -= alloc_highmem;
1602 if (to_free_normal > save)
1603 to_free_normal -= save;
1604 else
1605 to_free_normal = 0;
1606 }
1607 free = to_free_normal + to_free_highmem;
1608
1609 memory_bm_position_reset(©_bm);
1610
1611 while (to_free_normal > 0 || to_free_highmem > 0) {
1612 unsigned long pfn = memory_bm_next_pfn(©_bm);
1613 struct page *page = pfn_to_page(pfn);
1614
1615 if (PageHighMem(page)) {
1616 if (!to_free_highmem)
1617 continue;
1618 to_free_highmem--;
1619 alloc_highmem--;
1620 } else {
1621 if (!to_free_normal)
1622 continue;
1623 to_free_normal--;
1624 alloc_normal--;
1625 }
1626 memory_bm_clear_bit(©_bm, pfn);
1627 swsusp_unset_page_forbidden(page);
1628 swsusp_unset_page_free(page);
1629 __free_page(page);
1630 }
1631
1632 return free;
1633}
1634
1635/**
1636 * minimum_image_size - Estimate the minimum acceptable size of an image.
1637 * @saveable: Number of saveable pages in the system.
1638 *
1639 * We want to avoid attempting to free too much memory too hard, so estimate the
1640 * minimum acceptable size of a hibernation image to use as the lower limit for
1641 * preallocating memory.
1642 *
1643 * We assume that the minimum image size should be proportional to
1644 *
1645 * [number of saveable pages] - [number of pages that can be freed in theory]
1646 *
1647 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1648 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
1649 */
1650static unsigned long minimum_image_size(unsigned long saveable)
1651{
1652 unsigned long size;
1653
1654 size = global_node_page_state(NR_SLAB_RECLAIMABLE)
1655 + global_node_page_state(NR_ACTIVE_ANON)
1656 + global_node_page_state(NR_INACTIVE_ANON)
1657 + global_node_page_state(NR_ACTIVE_FILE)
1658 + global_node_page_state(NR_INACTIVE_FILE);
1659
1660 return saveable <= size ? 0 : saveable - size;
1661}
1662
1663/**
1664 * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1665 *
1666 * To create a hibernation image it is necessary to make a copy of every page
1667 * frame in use. We also need a number of page frames to be free during
1668 * hibernation for allocations made while saving the image and for device
1669 * drivers, in case they need to allocate memory from their hibernation
1670 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1671 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1672 * /sys/power/reserved_size, respectively). To make this happen, we compute the
1673 * total number of available page frames and allocate at least
1674 *
1675 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1676 * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1677 *
1678 * of them, which corresponds to the maximum size of a hibernation image.
1679 *
1680 * If image_size is set below the number following from the above formula,
1681 * the preallocation of memory is continued until the total number of saveable
1682 * pages in the system is below the requested image size or the minimum
1683 * acceptable image size returned by minimum_image_size(), whichever is greater.
1684 */
1685int hibernate_preallocate_memory(void)
1686{
1687 struct zone *zone;
1688 unsigned long saveable, size, max_size, count, highmem, pages = 0;
1689 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1690 ktime_t start, stop;
1691 int error;
1692
1693 pr_info("Preallocating image memory... ");
1694 start = ktime_get();
1695
1696 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1697 if (error)
1698 goto err_out;
1699
1700 error = memory_bm_create(©_bm, GFP_IMAGE, PG_ANY);
1701 if (error)
1702 goto err_out;
1703
1704 alloc_normal = 0;
1705 alloc_highmem = 0;
1706
1707 /* Count the number of saveable data pages. */
1708 save_highmem = count_highmem_pages();
1709 saveable = count_data_pages();
1710
1711 /*
1712 * Compute the total number of page frames we can use (count) and the
1713 * number of pages needed for image metadata (size).
1714 */
1715 count = saveable;
1716 saveable += save_highmem;
1717 highmem = save_highmem;
1718 size = 0;
1719 for_each_populated_zone(zone) {
1720 size += snapshot_additional_pages(zone);
1721 if (is_highmem(zone))
1722 highmem += zone_page_state(zone, NR_FREE_PAGES);
1723 else
1724 count += zone_page_state(zone, NR_FREE_PAGES);
1725 }
1726 avail_normal = count;
1727 count += highmem;
1728 count -= totalreserve_pages;
1729
1730 /* Add number of pages required for page keys (s390 only). */
1731 size += page_key_additional_pages(saveable);
1732
1733 /* Compute the maximum number of saveable pages to leave in memory. */
1734 max_size = (count - (size + PAGES_FOR_IO)) / 2
1735 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1736 /* Compute the desired number of image pages specified by image_size. */
1737 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1738 if (size > max_size)
1739 size = max_size;
1740 /*
1741 * If the desired number of image pages is at least as large as the
1742 * current number of saveable pages in memory, allocate page frames for
1743 * the image and we're done.
1744 */
1745 if (size >= saveable) {
1746 pages = preallocate_image_highmem(save_highmem);
1747 pages += preallocate_image_memory(saveable - pages, avail_normal);
1748 goto out;
1749 }
1750
1751 /* Estimate the minimum size of the image. */
1752 pages = minimum_image_size(saveable);
1753 /*
1754 * To avoid excessive pressure on the normal zone, leave room in it to
1755 * accommodate an image of the minimum size (unless it's already too
1756 * small, in which case don't preallocate pages from it at all).
1757 */
1758 if (avail_normal > pages)
1759 avail_normal -= pages;
1760 else
1761 avail_normal = 0;
1762 if (size < pages)
1763 size = min_t(unsigned long, pages, max_size);
1764
1765 /*
1766 * Let the memory management subsystem know that we're going to need a
1767 * large number of page frames to allocate and make it free some memory.
1768 * NOTE: If this is not done, performance will be hurt badly in some
1769 * test cases.
1770 */
1771 shrink_all_memory(saveable - size);
1772
1773 /*
1774 * The number of saveable pages in memory was too high, so apply some
1775 * pressure to decrease it. First, make room for the largest possible
1776 * image and fail if that doesn't work. Next, try to decrease the size
1777 * of the image as much as indicated by 'size' using allocations from
1778 * highmem and non-highmem zones separately.
1779 */
1780 pages_highmem = preallocate_image_highmem(highmem / 2);
1781 alloc = count - max_size;
1782 if (alloc > pages_highmem)
1783 alloc -= pages_highmem;
1784 else
1785 alloc = 0;
1786 pages = preallocate_image_memory(alloc, avail_normal);
1787 if (pages < alloc) {
1788 /* We have exhausted non-highmem pages, try highmem. */
1789 alloc -= pages;
1790 pages += pages_highmem;
1791 pages_highmem = preallocate_image_highmem(alloc);
1792 if (pages_highmem < alloc)
1793 goto err_out;
1794 pages += pages_highmem;
1795 /*
1796 * size is the desired number of saveable pages to leave in
1797 * memory, so try to preallocate (all memory - size) pages.
1798 */
1799 alloc = (count - pages) - size;
1800 pages += preallocate_image_highmem(alloc);
1801 } else {
1802 /*
1803 * There are approximately max_size saveable pages at this point
1804 * and we want to reduce this number down to size.
1805 */
1806 alloc = max_size - size;
1807 size = preallocate_highmem_fraction(alloc, highmem, count);
1808 pages_highmem += size;
1809 alloc -= size;
1810 size = preallocate_image_memory(alloc, avail_normal);
1811 pages_highmem += preallocate_image_highmem(alloc - size);
1812 pages += pages_highmem + size;
1813 }
1814
1815 /*
1816 * We only need as many page frames for the image as there are saveable
1817 * pages in memory, but we have allocated more. Release the excessive
1818 * ones now.
1819 */
1820 pages -= free_unnecessary_pages();
1821
1822 out:
1823 stop = ktime_get();
1824 pr_cont("done (allocated %lu pages)\n", pages);
1825 swsusp_show_speed(start, stop, pages, "Allocated");
1826
1827 return 0;
1828
1829 err_out:
1830 pr_cont("\n");
1831 swsusp_free();
1832 return -ENOMEM;
1833}
1834
1835#ifdef CONFIG_HIGHMEM
1836/**
1837 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1838 *
1839 * Compute the number of non-highmem pages that will be necessary for creating
1840 * copies of highmem pages.
1841 */
1842static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1843{
1844 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1845
1846 if (free_highmem >= nr_highmem)
1847 nr_highmem = 0;
1848 else
1849 nr_highmem -= free_highmem;
1850
1851 return nr_highmem;
1852}
1853#else
1854static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1855#endif /* CONFIG_HIGHMEM */
1856
1857/**
1858 * enough_free_mem - Check if there is enough free memory for the image.
1859 */
1860static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1861{
1862 struct zone *zone;
1863 unsigned int free = alloc_normal;
1864
1865 for_each_populated_zone(zone)
1866 if (!is_highmem(zone))
1867 free += zone_page_state(zone, NR_FREE_PAGES);
1868
1869 nr_pages += count_pages_for_highmem(nr_highmem);
1870 pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
1871 nr_pages, PAGES_FOR_IO, free);
1872
1873 return free > nr_pages + PAGES_FOR_IO;
1874}
1875
1876#ifdef CONFIG_HIGHMEM
1877/**
1878 * get_highmem_buffer - Allocate a buffer for highmem pages.
1879 *
1880 * If there are some highmem pages in the hibernation image, we may need a
1881 * buffer to copy them and/or load their data.
1882 */
1883static inline int get_highmem_buffer(int safe_needed)
1884{
1885 buffer = get_image_page(GFP_ATOMIC, safe_needed);
1886 return buffer ? 0 : -ENOMEM;
1887}
1888
1889/**
1890 * alloc_highmem_image_pages - Allocate some highmem pages for the image.
1891 *
1892 * Try to allocate as many pages as needed, but if the number of free highmem
1893 * pages is less than that, allocate them all.
1894 */
1895static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1896 unsigned int nr_highmem)
1897{
1898 unsigned int to_alloc = count_free_highmem_pages();
1899
1900 if (to_alloc > nr_highmem)
1901 to_alloc = nr_highmem;
1902
1903 nr_highmem -= to_alloc;
1904 while (to_alloc-- > 0) {
1905 struct page *page;
1906
1907 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
1908 memory_bm_set_bit(bm, page_to_pfn(page));
1909 }
1910 return nr_highmem;
1911}
1912#else
1913static inline int get_highmem_buffer(int safe_needed) { return 0; }
1914
1915static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1916 unsigned int n) { return 0; }
1917#endif /* CONFIG_HIGHMEM */
1918
1919/**
1920 * swsusp_alloc - Allocate memory for hibernation image.
1921 *
1922 * We first try to allocate as many highmem pages as there are
1923 * saveable highmem pages in the system. If that fails, we allocate
1924 * non-highmem pages for the copies of the remaining highmem ones.
1925 *
1926 * In this approach it is likely that the copies of highmem pages will
1927 * also be located in the high memory, because of the way in which
1928 * copy_data_pages() works.
1929 */
1930static int swsusp_alloc(struct memory_bitmap *copy_bm,
1931 unsigned int nr_pages, unsigned int nr_highmem)
1932{
1933 if (nr_highmem > 0) {
1934 if (get_highmem_buffer(PG_ANY))
1935 goto err_out;
1936 if (nr_highmem > alloc_highmem) {
1937 nr_highmem -= alloc_highmem;
1938 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1939 }
1940 }
1941 if (nr_pages > alloc_normal) {
1942 nr_pages -= alloc_normal;
1943 while (nr_pages-- > 0) {
1944 struct page *page;
1945
1946 page = alloc_image_page(GFP_ATOMIC);
1947 if (!page)
1948 goto err_out;
1949 memory_bm_set_bit(copy_bm, page_to_pfn(page));
1950 }
1951 }
1952
1953 return 0;
1954
1955 err_out:
1956 swsusp_free();
1957 return -ENOMEM;
1958}
1959
1960asmlinkage __visible int swsusp_save(void)
1961{
1962 unsigned int nr_pages, nr_highmem;
1963
1964 pr_info("Creating hibernation image:\n");
1965
1966 drain_local_pages(NULL);
1967 nr_pages = count_data_pages();
1968 nr_highmem = count_highmem_pages();
1969 pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
1970
1971 if (!enough_free_mem(nr_pages, nr_highmem)) {
1972 pr_err("Not enough free memory\n");
1973 return -ENOMEM;
1974 }
1975
1976 if (swsusp_alloc(©_bm, nr_pages, nr_highmem)) {
1977 pr_err("Memory allocation failed\n");
1978 return -ENOMEM;
1979 }
1980
1981 /*
1982 * During allocating of suspend pagedir, new cold pages may appear.
1983 * Kill them.
1984 */
1985 drain_local_pages(NULL);
1986 copy_data_pages(©_bm, &orig_bm);
1987
1988 /*
1989 * End of critical section. From now on, we can write to memory,
1990 * but we should not touch disk. This specially means we must _not_
1991 * touch swap space! Except we must write out our image of course.
1992 */
1993
1994 nr_pages += nr_highmem;
1995 nr_copy_pages = nr_pages;
1996 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1997
1998 pr_info("Hibernation image created (%d pages copied)\n", nr_pages);
1999
2000 return 0;
2001}
2002
2003#ifndef CONFIG_ARCH_HIBERNATION_HEADER
2004static int init_header_complete(struct swsusp_info *info)
2005{
2006 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2007 info->version_code = LINUX_VERSION_CODE;
2008 return 0;
2009}
2010
2011static char *check_image_kernel(struct swsusp_info *info)
2012{
2013 if (info->version_code != LINUX_VERSION_CODE)
2014 return "kernel version";
2015 if (strcmp(info->uts.sysname,init_utsname()->sysname))
2016 return "system type";
2017 if (strcmp(info->uts.release,init_utsname()->release))
2018 return "kernel release";
2019 if (strcmp(info->uts.version,init_utsname()->version))
2020 return "version";
2021 if (strcmp(info->uts.machine,init_utsname()->machine))
2022 return "machine";
2023 return NULL;
2024}
2025#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2026
2027unsigned long snapshot_get_image_size(void)
2028{
2029 return nr_copy_pages + nr_meta_pages + 1;
2030}
2031
2032static int init_header(struct swsusp_info *info)
2033{
2034 memset(info, 0, sizeof(struct swsusp_info));
2035 info->num_physpages = get_num_physpages();
2036 info->image_pages = nr_copy_pages;
2037 info->pages = snapshot_get_image_size();
2038 info->size = info->pages;
2039 info->size <<= PAGE_SHIFT;
2040 return init_header_complete(info);
2041}
2042
2043/**
2044 * pack_pfns - Prepare PFNs for saving.
2045 * @bm: Memory bitmap.
2046 * @buf: Memory buffer to store the PFNs in.
2047 *
2048 * PFNs corresponding to set bits in @bm are stored in the area of memory
2049 * pointed to by @buf (1 page at a time).
2050 */
2051static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
2052{
2053 int j;
2054
2055 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2056 buf[j] = memory_bm_next_pfn(bm);
2057 if (unlikely(buf[j] == BM_END_OF_MAP))
2058 break;
2059 /* Save page key for data page (s390 only). */
2060 page_key_read(buf + j);
2061 }
2062}
2063
2064/**
2065 * snapshot_read_next - Get the address to read the next image page from.
2066 * @handle: Snapshot handle to be used for the reading.
2067 *
2068 * On the first call, @handle should point to a zeroed snapshot_handle
2069 * structure. The structure gets populated then and a pointer to it should be
2070 * passed to this function every next time.
2071 *
2072 * On success, the function returns a positive number. Then, the caller
2073 * is allowed to read up to the returned number of bytes from the memory
2074 * location computed by the data_of() macro.
2075 *
2076 * The function returns 0 to indicate the end of the data stream condition,
2077 * and negative numbers are returned on errors. If that happens, the structure
2078 * pointed to by @handle is not updated and should not be used any more.
2079 */
2080int snapshot_read_next(struct snapshot_handle *handle)
2081{
2082 if (handle->cur > nr_meta_pages + nr_copy_pages)
2083 return 0;
2084
2085 if (!buffer) {
2086 /* This makes the buffer be freed by swsusp_free() */
2087 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2088 if (!buffer)
2089 return -ENOMEM;
2090 }
2091 if (!handle->cur) {
2092 int error;
2093
2094 error = init_header((struct swsusp_info *)buffer);
2095 if (error)
2096 return error;
2097 handle->buffer = buffer;
2098 memory_bm_position_reset(&orig_bm);
2099 memory_bm_position_reset(©_bm);
2100 } else if (handle->cur <= nr_meta_pages) {
2101 clear_page(buffer);
2102 pack_pfns(buffer, &orig_bm);
2103 } else {
2104 struct page *page;
2105
2106 page = pfn_to_page(memory_bm_next_pfn(©_bm));
2107 if (PageHighMem(page)) {
2108 /*
2109 * Highmem pages are copied to the buffer,
2110 * because we can't return with a kmapped
2111 * highmem page (we may not be called again).
2112 */
2113 void *kaddr;
2114
2115 kaddr = kmap_atomic(page);
2116 copy_page(buffer, kaddr);
2117 kunmap_atomic(kaddr);
2118 handle->buffer = buffer;
2119 } else {
2120 handle->buffer = page_address(page);
2121 }
2122 }
2123 handle->cur++;
2124 return PAGE_SIZE;
2125}
2126
2127static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2128 struct memory_bitmap *src)
2129{
2130 unsigned long pfn;
2131
2132 memory_bm_position_reset(src);
2133 pfn = memory_bm_next_pfn(src);
2134 while (pfn != BM_END_OF_MAP) {
2135 memory_bm_set_bit(dst, pfn);
2136 pfn = memory_bm_next_pfn(src);
2137 }
2138}
2139
2140/**
2141 * mark_unsafe_pages - Mark pages that were used before hibernation.
2142 *
2143 * Mark the pages that cannot be used for storing the image during restoration,
2144 * because they conflict with the pages that had been used before hibernation.
2145 */
2146static void mark_unsafe_pages(struct memory_bitmap *bm)
2147{
2148 unsigned long pfn;
2149
2150 /* Clear the "free"/"unsafe" bit for all PFNs */
2151 memory_bm_position_reset(free_pages_map);
2152 pfn = memory_bm_next_pfn(free_pages_map);
2153 while (pfn != BM_END_OF_MAP) {
2154 memory_bm_clear_current(free_pages_map);
2155 pfn = memory_bm_next_pfn(free_pages_map);
2156 }
2157
2158 /* Mark pages that correspond to the "original" PFNs as "unsafe" */
2159 duplicate_memory_bitmap(free_pages_map, bm);
2160
2161 allocated_unsafe_pages = 0;
2162}
2163
2164static int check_header(struct swsusp_info *info)
2165{
2166 char *reason;
2167
2168 reason = check_image_kernel(info);
2169 if (!reason && info->num_physpages != get_num_physpages())
2170 reason = "memory size";
2171 if (reason) {
2172 pr_err("Image mismatch: %s\n", reason);
2173 return -EPERM;
2174 }
2175 return 0;
2176}
2177
2178/**
2179 * load header - Check the image header and copy the data from it.
2180 */
2181static int load_header(struct swsusp_info *info)
2182{
2183 int error;
2184
2185 restore_pblist = NULL;
2186 error = check_header(info);
2187 if (!error) {
2188 nr_copy_pages = info->image_pages;
2189 nr_meta_pages = info->pages - info->image_pages - 1;
2190 }
2191 return error;
2192}
2193
2194/**
2195 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2196 * @bm: Memory bitmap.
2197 * @buf: Area of memory containing the PFNs.
2198 *
2199 * For each element of the array pointed to by @buf (1 page at a time), set the
2200 * corresponding bit in @bm.
2201 */
2202static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2203{
2204 int j;
2205
2206 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2207 if (unlikely(buf[j] == BM_END_OF_MAP))
2208 break;
2209
2210 /* Extract and buffer page key for data page (s390 only). */
2211 page_key_memorize(buf + j);
2212
2213 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
2214 memory_bm_set_bit(bm, buf[j]);
2215 else
2216 return -EFAULT;
2217 }
2218
2219 return 0;
2220}
2221
2222#ifdef CONFIG_HIGHMEM
2223/*
2224 * struct highmem_pbe is used for creating the list of highmem pages that
2225 * should be restored atomically during the resume from disk, because the page
2226 * frames they have occupied before the suspend are in use.
2227 */
2228struct highmem_pbe {
2229 struct page *copy_page; /* data is here now */
2230 struct page *orig_page; /* data was here before the suspend */
2231 struct highmem_pbe *next;
2232};
2233
2234/*
2235 * List of highmem PBEs needed for restoring the highmem pages that were
2236 * allocated before the suspend and included in the suspend image, but have
2237 * also been allocated by the "resume" kernel, so their contents cannot be
2238 * written directly to their "original" page frames.
2239 */
2240static struct highmem_pbe *highmem_pblist;
2241
2242/**
2243 * count_highmem_image_pages - Compute the number of highmem pages in the image.
2244 * @bm: Memory bitmap.
2245 *
2246 * The bits in @bm that correspond to image pages are assumed to be set.
2247 */
2248static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2249{
2250 unsigned long pfn;
2251 unsigned int cnt = 0;
2252
2253 memory_bm_position_reset(bm);
2254 pfn = memory_bm_next_pfn(bm);
2255 while (pfn != BM_END_OF_MAP) {
2256 if (PageHighMem(pfn_to_page(pfn)))
2257 cnt++;
2258
2259 pfn = memory_bm_next_pfn(bm);
2260 }
2261 return cnt;
2262}
2263
2264static unsigned int safe_highmem_pages;
2265
2266static struct memory_bitmap *safe_highmem_bm;
2267
2268/**
2269 * prepare_highmem_image - Allocate memory for loading highmem data from image.
2270 * @bm: Pointer to an uninitialized memory bitmap structure.
2271 * @nr_highmem_p: Pointer to the number of highmem image pages.
2272 *
2273 * Try to allocate as many highmem pages as there are highmem image pages
2274 * (@nr_highmem_p points to the variable containing the number of highmem image
2275 * pages). The pages that are "safe" (ie. will not be overwritten when the
2276 * hibernation image is restored entirely) have the corresponding bits set in
2277 * @bm (it must be unitialized).
2278 *
2279 * NOTE: This function should not be called if there are no highmem image pages.
2280 */
2281static int prepare_highmem_image(struct memory_bitmap *bm,
2282 unsigned int *nr_highmem_p)
2283{
2284 unsigned int to_alloc;
2285
2286 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2287 return -ENOMEM;
2288
2289 if (get_highmem_buffer(PG_SAFE))
2290 return -ENOMEM;
2291
2292 to_alloc = count_free_highmem_pages();
2293 if (to_alloc > *nr_highmem_p)
2294 to_alloc = *nr_highmem_p;
2295 else
2296 *nr_highmem_p = to_alloc;
2297
2298 safe_highmem_pages = 0;
2299 while (to_alloc-- > 0) {
2300 struct page *page;
2301
2302 page = alloc_page(__GFP_HIGHMEM);
2303 if (!swsusp_page_is_free(page)) {
2304 /* The page is "safe", set its bit the bitmap */
2305 memory_bm_set_bit(bm, page_to_pfn(page));
2306 safe_highmem_pages++;
2307 }
2308 /* Mark the page as allocated */
2309 swsusp_set_page_forbidden(page);
2310 swsusp_set_page_free(page);
2311 }
2312 memory_bm_position_reset(bm);
2313 safe_highmem_bm = bm;
2314 return 0;
2315}
2316
2317static struct page *last_highmem_page;
2318
2319/**
2320 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2321 *
2322 * For a given highmem image page get a buffer that suspend_write_next() should
2323 * return to its caller to write to.
2324 *
2325 * If the page is to be saved to its "original" page frame or a copy of
2326 * the page is to be made in the highmem, @buffer is returned. Otherwise,
2327 * the copy of the page is to be made in normal memory, so the address of
2328 * the copy is returned.
2329 *
2330 * If @buffer is returned, the caller of suspend_write_next() will write
2331 * the page's contents to @buffer, so they will have to be copied to the
2332 * right location on the next call to suspend_write_next() and it is done
2333 * with the help of copy_last_highmem_page(). For this purpose, if
2334 * @buffer is returned, @last_highmem_page is set to the page to which
2335 * the data will have to be copied from @buffer.
2336 */
2337static void *get_highmem_page_buffer(struct page *page,
2338 struct chain_allocator *ca)
2339{
2340 struct highmem_pbe *pbe;
2341 void *kaddr;
2342
2343 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2344 /*
2345 * We have allocated the "original" page frame and we can
2346 * use it directly to store the loaded page.
2347 */
2348 last_highmem_page = page;
2349 return buffer;
2350 }
2351 /*
2352 * The "original" page frame has not been allocated and we have to
2353 * use a "safe" page frame to store the loaded page.
2354 */
2355 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2356 if (!pbe) {
2357 swsusp_free();
2358 return ERR_PTR(-ENOMEM);
2359 }
2360 pbe->orig_page = page;
2361 if (safe_highmem_pages > 0) {
2362 struct page *tmp;
2363
2364 /* Copy of the page will be stored in high memory */
2365 kaddr = buffer;
2366 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2367 safe_highmem_pages--;
2368 last_highmem_page = tmp;
2369 pbe->copy_page = tmp;
2370 } else {
2371 /* Copy of the page will be stored in normal memory */
2372 kaddr = safe_pages_list;
2373 safe_pages_list = safe_pages_list->next;
2374 pbe->copy_page = virt_to_page(kaddr);
2375 }
2376 pbe->next = highmem_pblist;
2377 highmem_pblist = pbe;
2378 return kaddr;
2379}
2380
2381/**
2382 * copy_last_highmem_page - Copy most the most recent highmem image page.
2383 *
2384 * Copy the contents of a highmem image from @buffer, where the caller of
2385 * snapshot_write_next() has stored them, to the right location represented by
2386 * @last_highmem_page .
2387 */
2388static void copy_last_highmem_page(void)
2389{
2390 if (last_highmem_page) {
2391 void *dst;
2392
2393 dst = kmap_atomic(last_highmem_page);
2394 copy_page(dst, buffer);
2395 kunmap_atomic(dst);
2396 last_highmem_page = NULL;
2397 }
2398}
2399
2400static inline int last_highmem_page_copied(void)
2401{
2402 return !last_highmem_page;
2403}
2404
2405static inline void free_highmem_data(void)
2406{
2407 if (safe_highmem_bm)
2408 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2409
2410 if (buffer)
2411 free_image_page(buffer, PG_UNSAFE_CLEAR);
2412}
2413#else
2414static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2415
2416static inline int prepare_highmem_image(struct memory_bitmap *bm,
2417 unsigned int *nr_highmem_p) { return 0; }
2418
2419static inline void *get_highmem_page_buffer(struct page *page,
2420 struct chain_allocator *ca)
2421{
2422 return ERR_PTR(-EINVAL);
2423}
2424
2425static inline void copy_last_highmem_page(void) {}
2426static inline int last_highmem_page_copied(void) { return 1; }
2427static inline void free_highmem_data(void) {}
2428#endif /* CONFIG_HIGHMEM */
2429
2430#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2431
2432/**
2433 * prepare_image - Make room for loading hibernation image.
2434 * @new_bm: Unitialized memory bitmap structure.
2435 * @bm: Memory bitmap with unsafe pages marked.
2436 *
2437 * Use @bm to mark the pages that will be overwritten in the process of
2438 * restoring the system memory state from the suspend image ("unsafe" pages)
2439 * and allocate memory for the image.
2440 *
2441 * The idea is to allocate a new memory bitmap first and then allocate
2442 * as many pages as needed for image data, but without specifying what those
2443 * pages will be used for just yet. Instead, we mark them all as allocated and
2444 * create a lists of "safe" pages to be used later. On systems with high
2445 * memory a list of "safe" highmem pages is created too.
2446 */
2447static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2448{
2449 unsigned int nr_pages, nr_highmem;
2450 struct linked_page *lp;
2451 int error;
2452
2453 /* If there is no highmem, the buffer will not be necessary */
2454 free_image_page(buffer, PG_UNSAFE_CLEAR);
2455 buffer = NULL;
2456
2457 nr_highmem = count_highmem_image_pages(bm);
2458 mark_unsafe_pages(bm);
2459
2460 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2461 if (error)
2462 goto Free;
2463
2464 duplicate_memory_bitmap(new_bm, bm);
2465 memory_bm_free(bm, PG_UNSAFE_KEEP);
2466 if (nr_highmem > 0) {
2467 error = prepare_highmem_image(bm, &nr_highmem);
2468 if (error)
2469 goto Free;
2470 }
2471 /*
2472 * Reserve some safe pages for potential later use.
2473 *
2474 * NOTE: This way we make sure there will be enough safe pages for the
2475 * chain_alloc() in get_buffer(). It is a bit wasteful, but
2476 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2477 *
2478 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2479 */
2480 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2481 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2482 while (nr_pages > 0) {
2483 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2484 if (!lp) {
2485 error = -ENOMEM;
2486 goto Free;
2487 }
2488 lp->next = safe_pages_list;
2489 safe_pages_list = lp;
2490 nr_pages--;
2491 }
2492 /* Preallocate memory for the image */
2493 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2494 while (nr_pages > 0) {
2495 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2496 if (!lp) {
2497 error = -ENOMEM;
2498 goto Free;
2499 }
2500 if (!swsusp_page_is_free(virt_to_page(lp))) {
2501 /* The page is "safe", add it to the list */
2502 lp->next = safe_pages_list;
2503 safe_pages_list = lp;
2504 }
2505 /* Mark the page as allocated */
2506 swsusp_set_page_forbidden(virt_to_page(lp));
2507 swsusp_set_page_free(virt_to_page(lp));
2508 nr_pages--;
2509 }
2510 return 0;
2511
2512 Free:
2513 swsusp_free();
2514 return error;
2515}
2516
2517/**
2518 * get_buffer - Get the address to store the next image data page.
2519 *
2520 * Get the address that snapshot_write_next() should return to its caller to
2521 * write to.
2522 */
2523static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2524{
2525 struct pbe *pbe;
2526 struct page *page;
2527 unsigned long pfn = memory_bm_next_pfn(bm);
2528
2529 if (pfn == BM_END_OF_MAP)
2530 return ERR_PTR(-EFAULT);
2531
2532 page = pfn_to_page(pfn);
2533 if (PageHighMem(page))
2534 return get_highmem_page_buffer(page, ca);
2535
2536 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2537 /*
2538 * We have allocated the "original" page frame and we can
2539 * use it directly to store the loaded page.
2540 */
2541 return page_address(page);
2542
2543 /*
2544 * The "original" page frame has not been allocated and we have to
2545 * use a "safe" page frame to store the loaded page.
2546 */
2547 pbe = chain_alloc(ca, sizeof(struct pbe));
2548 if (!pbe) {
2549 swsusp_free();
2550 return ERR_PTR(-ENOMEM);
2551 }
2552 pbe->orig_address = page_address(page);
2553 pbe->address = safe_pages_list;
2554 safe_pages_list = safe_pages_list->next;
2555 pbe->next = restore_pblist;
2556 restore_pblist = pbe;
2557 return pbe->address;
2558}
2559
2560/**
2561 * snapshot_write_next - Get the address to store the next image page.
2562 * @handle: Snapshot handle structure to guide the writing.
2563 *
2564 * On the first call, @handle should point to a zeroed snapshot_handle
2565 * structure. The structure gets populated then and a pointer to it should be
2566 * passed to this function every next time.
2567 *
2568 * On success, the function returns a positive number. Then, the caller
2569 * is allowed to write up to the returned number of bytes to the memory
2570 * location computed by the data_of() macro.
2571 *
2572 * The function returns 0 to indicate the "end of file" condition. Negative
2573 * numbers are returned on errors, in which cases the structure pointed to by
2574 * @handle is not updated and should not be used any more.
2575 */
2576int snapshot_write_next(struct snapshot_handle *handle)
2577{
2578 static struct chain_allocator ca;
2579 int error = 0;
2580
2581 /* Check if we have already loaded the entire image */
2582 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2583 return 0;
2584
2585 handle->sync_read = 1;
2586
2587 if (!handle->cur) {
2588 if (!buffer)
2589 /* This makes the buffer be freed by swsusp_free() */
2590 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2591
2592 if (!buffer)
2593 return -ENOMEM;
2594
2595 handle->buffer = buffer;
2596 } else if (handle->cur == 1) {
2597 error = load_header(buffer);
2598 if (error)
2599 return error;
2600
2601 safe_pages_list = NULL;
2602
2603 error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY);
2604 if (error)
2605 return error;
2606
2607 /* Allocate buffer for page keys. */
2608 error = page_key_alloc(nr_copy_pages);
2609 if (error)
2610 return error;
2611
2612 hibernate_restore_protection_begin();
2613 } else if (handle->cur <= nr_meta_pages + 1) {
2614 error = unpack_orig_pfns(buffer, ©_bm);
2615 if (error)
2616 return error;
2617
2618 if (handle->cur == nr_meta_pages + 1) {
2619 error = prepare_image(&orig_bm, ©_bm);
2620 if (error)
2621 return error;
2622
2623 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2624 memory_bm_position_reset(&orig_bm);
2625 restore_pblist = NULL;
2626 handle->buffer = get_buffer(&orig_bm, &ca);
2627 handle->sync_read = 0;
2628 if (IS_ERR(handle->buffer))
2629 return PTR_ERR(handle->buffer);
2630 }
2631 } else {
2632 copy_last_highmem_page();
2633 /* Restore page key for data page (s390 only). */
2634 page_key_write(handle->buffer);
2635 hibernate_restore_protect_page(handle->buffer);
2636 handle->buffer = get_buffer(&orig_bm, &ca);
2637 if (IS_ERR(handle->buffer))
2638 return PTR_ERR(handle->buffer);
2639 if (handle->buffer != buffer)
2640 handle->sync_read = 0;
2641 }
2642 handle->cur++;
2643 return PAGE_SIZE;
2644}
2645
2646/**
2647 * snapshot_write_finalize - Complete the loading of a hibernation image.
2648 *
2649 * Must be called after the last call to snapshot_write_next() in case the last
2650 * page in the image happens to be a highmem page and its contents should be
2651 * stored in highmem. Additionally, it recycles bitmap memory that's not
2652 * necessary any more.
2653 */
2654void snapshot_write_finalize(struct snapshot_handle *handle)
2655{
2656 copy_last_highmem_page();
2657 /* Restore page key for data page (s390 only). */
2658 page_key_write(handle->buffer);
2659 page_key_free();
2660 hibernate_restore_protect_page(handle->buffer);
2661 /* Do that only if we have loaded the image entirely */
2662 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2663 memory_bm_recycle(&orig_bm);
2664 free_highmem_data();
2665 }
2666}
2667
2668int snapshot_image_loaded(struct snapshot_handle *handle)
2669{
2670 return !(!nr_copy_pages || !last_highmem_page_copied() ||
2671 handle->cur <= nr_meta_pages + nr_copy_pages);
2672}
2673
2674#ifdef CONFIG_HIGHMEM
2675/* Assumes that @buf is ready and points to a "safe" page */
2676static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2677 void *buf)
2678{
2679 void *kaddr1, *kaddr2;
2680
2681 kaddr1 = kmap_atomic(p1);
2682 kaddr2 = kmap_atomic(p2);
2683 copy_page(buf, kaddr1);
2684 copy_page(kaddr1, kaddr2);
2685 copy_page(kaddr2, buf);
2686 kunmap_atomic(kaddr2);
2687 kunmap_atomic(kaddr1);
2688}
2689
2690/**
2691 * restore_highmem - Put highmem image pages into their original locations.
2692 *
2693 * For each highmem page that was in use before hibernation and is included in
2694 * the image, and also has been allocated by the "restore" kernel, swap its
2695 * current contents with the previous (ie. "before hibernation") ones.
2696 *
2697 * If the restore eventually fails, we can call this function once again and
2698 * restore the highmem state as seen by the restore kernel.
2699 */
2700int restore_highmem(void)
2701{
2702 struct highmem_pbe *pbe = highmem_pblist;
2703 void *buf;
2704
2705 if (!pbe)
2706 return 0;
2707
2708 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2709 if (!buf)
2710 return -ENOMEM;
2711
2712 while (pbe) {
2713 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2714 pbe = pbe->next;
2715 }
2716 free_image_page(buf, PG_UNSAFE_CLEAR);
2717 return 0;
2718}
2719#endif /* CONFIG_HIGHMEM */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/power/snapshot.c
4 *
5 * This file provides system snapshot/restore functionality for swsusp.
6 *
7 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9 */
10
11#define pr_fmt(fmt) "PM: " fmt
12
13#include <linux/version.h>
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/suspend.h>
17#include <linux/delay.h>
18#include <linux/bitops.h>
19#include <linux/spinlock.h>
20#include <linux/kernel.h>
21#include <linux/pm.h>
22#include <linux/device.h>
23#include <linux/init.h>
24#include <linux/memblock.h>
25#include <linux/nmi.h>
26#include <linux/syscalls.h>
27#include <linux/console.h>
28#include <linux/highmem.h>
29#include <linux/list.h>
30#include <linux/slab.h>
31#include <linux/compiler.h>
32#include <linux/ktime.h>
33#include <linux/set_memory.h>
34
35#include <linux/uaccess.h>
36#include <asm/mmu_context.h>
37#include <asm/pgtable.h>
38#include <asm/tlbflush.h>
39#include <asm/io.h>
40
41#include "power.h"
42
43#if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
44static bool hibernate_restore_protection;
45static bool hibernate_restore_protection_active;
46
47void enable_restore_image_protection(void)
48{
49 hibernate_restore_protection = true;
50}
51
52static inline void hibernate_restore_protection_begin(void)
53{
54 hibernate_restore_protection_active = hibernate_restore_protection;
55}
56
57static inline void hibernate_restore_protection_end(void)
58{
59 hibernate_restore_protection_active = false;
60}
61
62static inline void hibernate_restore_protect_page(void *page_address)
63{
64 if (hibernate_restore_protection_active)
65 set_memory_ro((unsigned long)page_address, 1);
66}
67
68static inline void hibernate_restore_unprotect_page(void *page_address)
69{
70 if (hibernate_restore_protection_active)
71 set_memory_rw((unsigned long)page_address, 1);
72}
73#else
74static inline void hibernate_restore_protection_begin(void) {}
75static inline void hibernate_restore_protection_end(void) {}
76static inline void hibernate_restore_protect_page(void *page_address) {}
77static inline void hibernate_restore_unprotect_page(void *page_address) {}
78#endif /* CONFIG_STRICT_KERNEL_RWX && CONFIG_ARCH_HAS_SET_MEMORY */
79
80static int swsusp_page_is_free(struct page *);
81static void swsusp_set_page_forbidden(struct page *);
82static void swsusp_unset_page_forbidden(struct page *);
83
84/*
85 * Number of bytes to reserve for memory allocations made by device drivers
86 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
87 * cause image creation to fail (tunable via /sys/power/reserved_size).
88 */
89unsigned long reserved_size;
90
91void __init hibernate_reserved_size_init(void)
92{
93 reserved_size = SPARE_PAGES * PAGE_SIZE;
94}
95
96/*
97 * Preferred image size in bytes (tunable via /sys/power/image_size).
98 * When it is set to N, swsusp will do its best to ensure the image
99 * size will not exceed N bytes, but if that is impossible, it will
100 * try to create the smallest image possible.
101 */
102unsigned long image_size;
103
104void __init hibernate_image_size_init(void)
105{
106 image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE;
107}
108
109/*
110 * List of PBEs needed for restoring the pages that were allocated before
111 * the suspend and included in the suspend image, but have also been
112 * allocated by the "resume" kernel, so their contents cannot be written
113 * directly to their "original" page frames.
114 */
115struct pbe *restore_pblist;
116
117/* struct linked_page is used to build chains of pages */
118
119#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
120
121struct linked_page {
122 struct linked_page *next;
123 char data[LINKED_PAGE_DATA_SIZE];
124} __packed;
125
126/*
127 * List of "safe" pages (ie. pages that were not used by the image kernel
128 * before hibernation) that may be used as temporary storage for image kernel
129 * memory contents.
130 */
131static struct linked_page *safe_pages_list;
132
133/* Pointer to an auxiliary buffer (1 page) */
134static void *buffer;
135
136#define PG_ANY 0
137#define PG_SAFE 1
138#define PG_UNSAFE_CLEAR 1
139#define PG_UNSAFE_KEEP 0
140
141static unsigned int allocated_unsafe_pages;
142
143/**
144 * get_image_page - Allocate a page for a hibernation image.
145 * @gfp_mask: GFP mask for the allocation.
146 * @safe_needed: Get pages that were not used before hibernation (restore only)
147 *
148 * During image restoration, for storing the PBE list and the image data, we can
149 * only use memory pages that do not conflict with the pages used before
150 * hibernation. The "unsafe" pages have PageNosaveFree set and we count them
151 * using allocated_unsafe_pages.
152 *
153 * Each allocated image page is marked as PageNosave and PageNosaveFree so that
154 * swsusp_free() can release it.
155 */
156static void *get_image_page(gfp_t gfp_mask, int safe_needed)
157{
158 void *res;
159
160 res = (void *)get_zeroed_page(gfp_mask);
161 if (safe_needed)
162 while (res && swsusp_page_is_free(virt_to_page(res))) {
163 /* The page is unsafe, mark it for swsusp_free() */
164 swsusp_set_page_forbidden(virt_to_page(res));
165 allocated_unsafe_pages++;
166 res = (void *)get_zeroed_page(gfp_mask);
167 }
168 if (res) {
169 swsusp_set_page_forbidden(virt_to_page(res));
170 swsusp_set_page_free(virt_to_page(res));
171 }
172 return res;
173}
174
175static void *__get_safe_page(gfp_t gfp_mask)
176{
177 if (safe_pages_list) {
178 void *ret = safe_pages_list;
179
180 safe_pages_list = safe_pages_list->next;
181 memset(ret, 0, PAGE_SIZE);
182 return ret;
183 }
184 return get_image_page(gfp_mask, PG_SAFE);
185}
186
187unsigned long get_safe_page(gfp_t gfp_mask)
188{
189 return (unsigned long)__get_safe_page(gfp_mask);
190}
191
192static struct page *alloc_image_page(gfp_t gfp_mask)
193{
194 struct page *page;
195
196 page = alloc_page(gfp_mask);
197 if (page) {
198 swsusp_set_page_forbidden(page);
199 swsusp_set_page_free(page);
200 }
201 return page;
202}
203
204static void recycle_safe_page(void *page_address)
205{
206 struct linked_page *lp = page_address;
207
208 lp->next = safe_pages_list;
209 safe_pages_list = lp;
210}
211
212/**
213 * free_image_page - Free a page allocated for hibernation image.
214 * @addr: Address of the page to free.
215 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
216 *
217 * The page to free should have been allocated by get_image_page() (page flags
218 * set by it are affected).
219 */
220static inline void free_image_page(void *addr, int clear_nosave_free)
221{
222 struct page *page;
223
224 BUG_ON(!virt_addr_valid(addr));
225
226 page = virt_to_page(addr);
227
228 swsusp_unset_page_forbidden(page);
229 if (clear_nosave_free)
230 swsusp_unset_page_free(page);
231
232 __free_page(page);
233}
234
235static inline void free_list_of_pages(struct linked_page *list,
236 int clear_page_nosave)
237{
238 while (list) {
239 struct linked_page *lp = list->next;
240
241 free_image_page(list, clear_page_nosave);
242 list = lp;
243 }
244}
245
246/*
247 * struct chain_allocator is used for allocating small objects out of
248 * a linked list of pages called 'the chain'.
249 *
250 * The chain grows each time when there is no room for a new object in
251 * the current page. The allocated objects cannot be freed individually.
252 * It is only possible to free them all at once, by freeing the entire
253 * chain.
254 *
255 * NOTE: The chain allocator may be inefficient if the allocated objects
256 * are not much smaller than PAGE_SIZE.
257 */
258struct chain_allocator {
259 struct linked_page *chain; /* the chain */
260 unsigned int used_space; /* total size of objects allocated out
261 of the current page */
262 gfp_t gfp_mask; /* mask for allocating pages */
263 int safe_needed; /* if set, only "safe" pages are allocated */
264};
265
266static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
267 int safe_needed)
268{
269 ca->chain = NULL;
270 ca->used_space = LINKED_PAGE_DATA_SIZE;
271 ca->gfp_mask = gfp_mask;
272 ca->safe_needed = safe_needed;
273}
274
275static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
276{
277 void *ret;
278
279 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
280 struct linked_page *lp;
281
282 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
283 get_image_page(ca->gfp_mask, PG_ANY);
284 if (!lp)
285 return NULL;
286
287 lp->next = ca->chain;
288 ca->chain = lp;
289 ca->used_space = 0;
290 }
291 ret = ca->chain->data + ca->used_space;
292 ca->used_space += size;
293 return ret;
294}
295
296/**
297 * Data types related to memory bitmaps.
298 *
299 * Memory bitmap is a structure consiting of many linked lists of
300 * objects. The main list's elements are of type struct zone_bitmap
301 * and each of them corresonds to one zone. For each zone bitmap
302 * object there is a list of objects of type struct bm_block that
303 * represent each blocks of bitmap in which information is stored.
304 *
305 * struct memory_bitmap contains a pointer to the main list of zone
306 * bitmap objects, a struct bm_position used for browsing the bitmap,
307 * and a pointer to the list of pages used for allocating all of the
308 * zone bitmap objects and bitmap block objects.
309 *
310 * NOTE: It has to be possible to lay out the bitmap in memory
311 * using only allocations of order 0. Additionally, the bitmap is
312 * designed to work with arbitrary number of zones (this is over the
313 * top for now, but let's avoid making unnecessary assumptions ;-).
314 *
315 * struct zone_bitmap contains a pointer to a list of bitmap block
316 * objects and a pointer to the bitmap block object that has been
317 * most recently used for setting bits. Additionally, it contains the
318 * PFNs that correspond to the start and end of the represented zone.
319 *
320 * struct bm_block contains a pointer to the memory page in which
321 * information is stored (in the form of a block of bitmap)
322 * It also contains the pfns that correspond to the start and end of
323 * the represented memory area.
324 *
325 * The memory bitmap is organized as a radix tree to guarantee fast random
326 * access to the bits. There is one radix tree for each zone (as returned
327 * from create_mem_extents).
328 *
329 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
330 * two linked lists for the nodes of the tree, one for the inner nodes and
331 * one for the leave nodes. The linked leave nodes are used for fast linear
332 * access of the memory bitmap.
333 *
334 * The struct rtree_node represents one node of the radix tree.
335 */
336
337#define BM_END_OF_MAP (~0UL)
338
339#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
340#define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
341#define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
342
343/*
344 * struct rtree_node is a wrapper struct to link the nodes
345 * of the rtree together for easy linear iteration over
346 * bits and easy freeing
347 */
348struct rtree_node {
349 struct list_head list;
350 unsigned long *data;
351};
352
353/*
354 * struct mem_zone_bm_rtree represents a bitmap used for one
355 * populated memory zone.
356 */
357struct mem_zone_bm_rtree {
358 struct list_head list; /* Link Zones together */
359 struct list_head nodes; /* Radix Tree inner nodes */
360 struct list_head leaves; /* Radix Tree leaves */
361 unsigned long start_pfn; /* Zone start page frame */
362 unsigned long end_pfn; /* Zone end page frame + 1 */
363 struct rtree_node *rtree; /* Radix Tree Root */
364 int levels; /* Number of Radix Tree Levels */
365 unsigned int blocks; /* Number of Bitmap Blocks */
366};
367
368/* strcut bm_position is used for browsing memory bitmaps */
369
370struct bm_position {
371 struct mem_zone_bm_rtree *zone;
372 struct rtree_node *node;
373 unsigned long node_pfn;
374 int node_bit;
375};
376
377struct memory_bitmap {
378 struct list_head zones;
379 struct linked_page *p_list; /* list of pages used to store zone
380 bitmap objects and bitmap block
381 objects */
382 struct bm_position cur; /* most recently used bit position */
383};
384
385/* Functions that operate on memory bitmaps */
386
387#define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
388#if BITS_PER_LONG == 32
389#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
390#else
391#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
392#endif
393#define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
394
395/**
396 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
397 *
398 * This function is used to allocate inner nodes as well as the
399 * leave nodes of the radix tree. It also adds the node to the
400 * corresponding linked list passed in by the *list parameter.
401 */
402static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
403 struct chain_allocator *ca,
404 struct list_head *list)
405{
406 struct rtree_node *node;
407
408 node = chain_alloc(ca, sizeof(struct rtree_node));
409 if (!node)
410 return NULL;
411
412 node->data = get_image_page(gfp_mask, safe_needed);
413 if (!node->data)
414 return NULL;
415
416 list_add_tail(&node->list, list);
417
418 return node;
419}
420
421/**
422 * add_rtree_block - Add a new leave node to the radix tree.
423 *
424 * The leave nodes need to be allocated in order to keep the leaves
425 * linked list in order. This is guaranteed by the zone->blocks
426 * counter.
427 */
428static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
429 int safe_needed, struct chain_allocator *ca)
430{
431 struct rtree_node *node, *block, **dst;
432 unsigned int levels_needed, block_nr;
433 int i;
434
435 block_nr = zone->blocks;
436 levels_needed = 0;
437
438 /* How many levels do we need for this block nr? */
439 while (block_nr) {
440 levels_needed += 1;
441 block_nr >>= BM_RTREE_LEVEL_SHIFT;
442 }
443
444 /* Make sure the rtree has enough levels */
445 for (i = zone->levels; i < levels_needed; i++) {
446 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
447 &zone->nodes);
448 if (!node)
449 return -ENOMEM;
450
451 node->data[0] = (unsigned long)zone->rtree;
452 zone->rtree = node;
453 zone->levels += 1;
454 }
455
456 /* Allocate new block */
457 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
458 if (!block)
459 return -ENOMEM;
460
461 /* Now walk the rtree to insert the block */
462 node = zone->rtree;
463 dst = &zone->rtree;
464 block_nr = zone->blocks;
465 for (i = zone->levels; i > 0; i--) {
466 int index;
467
468 if (!node) {
469 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
470 &zone->nodes);
471 if (!node)
472 return -ENOMEM;
473 *dst = node;
474 }
475
476 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
477 index &= BM_RTREE_LEVEL_MASK;
478 dst = (struct rtree_node **)&((*dst)->data[index]);
479 node = *dst;
480 }
481
482 zone->blocks += 1;
483 *dst = block;
484
485 return 0;
486}
487
488static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
489 int clear_nosave_free);
490
491/**
492 * create_zone_bm_rtree - Create a radix tree for one zone.
493 *
494 * Allocated the mem_zone_bm_rtree structure and initializes it.
495 * This function also allocated and builds the radix tree for the
496 * zone.
497 */
498static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
499 int safe_needed,
500 struct chain_allocator *ca,
501 unsigned long start,
502 unsigned long end)
503{
504 struct mem_zone_bm_rtree *zone;
505 unsigned int i, nr_blocks;
506 unsigned long pages;
507
508 pages = end - start;
509 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
510 if (!zone)
511 return NULL;
512
513 INIT_LIST_HEAD(&zone->nodes);
514 INIT_LIST_HEAD(&zone->leaves);
515 zone->start_pfn = start;
516 zone->end_pfn = end;
517 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
518
519 for (i = 0; i < nr_blocks; i++) {
520 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
521 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
522 return NULL;
523 }
524 }
525
526 return zone;
527}
528
529/**
530 * free_zone_bm_rtree - Free the memory of the radix tree.
531 *
532 * Free all node pages of the radix tree. The mem_zone_bm_rtree
533 * structure itself is not freed here nor are the rtree_node
534 * structs.
535 */
536static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
537 int clear_nosave_free)
538{
539 struct rtree_node *node;
540
541 list_for_each_entry(node, &zone->nodes, list)
542 free_image_page(node->data, clear_nosave_free);
543
544 list_for_each_entry(node, &zone->leaves, list)
545 free_image_page(node->data, clear_nosave_free);
546}
547
548static void memory_bm_position_reset(struct memory_bitmap *bm)
549{
550 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
551 list);
552 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
553 struct rtree_node, list);
554 bm->cur.node_pfn = 0;
555 bm->cur.node_bit = 0;
556}
557
558static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
559
560struct mem_extent {
561 struct list_head hook;
562 unsigned long start;
563 unsigned long end;
564};
565
566/**
567 * free_mem_extents - Free a list of memory extents.
568 * @list: List of extents to free.
569 */
570static void free_mem_extents(struct list_head *list)
571{
572 struct mem_extent *ext, *aux;
573
574 list_for_each_entry_safe(ext, aux, list, hook) {
575 list_del(&ext->hook);
576 kfree(ext);
577 }
578}
579
580/**
581 * create_mem_extents - Create a list of memory extents.
582 * @list: List to put the extents into.
583 * @gfp_mask: Mask to use for memory allocations.
584 *
585 * The extents represent contiguous ranges of PFNs.
586 */
587static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
588{
589 struct zone *zone;
590
591 INIT_LIST_HEAD(list);
592
593 for_each_populated_zone(zone) {
594 unsigned long zone_start, zone_end;
595 struct mem_extent *ext, *cur, *aux;
596
597 zone_start = zone->zone_start_pfn;
598 zone_end = zone_end_pfn(zone);
599
600 list_for_each_entry(ext, list, hook)
601 if (zone_start <= ext->end)
602 break;
603
604 if (&ext->hook == list || zone_end < ext->start) {
605 /* New extent is necessary */
606 struct mem_extent *new_ext;
607
608 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
609 if (!new_ext) {
610 free_mem_extents(list);
611 return -ENOMEM;
612 }
613 new_ext->start = zone_start;
614 new_ext->end = zone_end;
615 list_add_tail(&new_ext->hook, &ext->hook);
616 continue;
617 }
618
619 /* Merge this zone's range of PFNs with the existing one */
620 if (zone_start < ext->start)
621 ext->start = zone_start;
622 if (zone_end > ext->end)
623 ext->end = zone_end;
624
625 /* More merging may be possible */
626 cur = ext;
627 list_for_each_entry_safe_continue(cur, aux, list, hook) {
628 if (zone_end < cur->start)
629 break;
630 if (zone_end < cur->end)
631 ext->end = cur->end;
632 list_del(&cur->hook);
633 kfree(cur);
634 }
635 }
636
637 return 0;
638}
639
640/**
641 * memory_bm_create - Allocate memory for a memory bitmap.
642 */
643static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
644 int safe_needed)
645{
646 struct chain_allocator ca;
647 struct list_head mem_extents;
648 struct mem_extent *ext;
649 int error;
650
651 chain_init(&ca, gfp_mask, safe_needed);
652 INIT_LIST_HEAD(&bm->zones);
653
654 error = create_mem_extents(&mem_extents, gfp_mask);
655 if (error)
656 return error;
657
658 list_for_each_entry(ext, &mem_extents, hook) {
659 struct mem_zone_bm_rtree *zone;
660
661 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
662 ext->start, ext->end);
663 if (!zone) {
664 error = -ENOMEM;
665 goto Error;
666 }
667 list_add_tail(&zone->list, &bm->zones);
668 }
669
670 bm->p_list = ca.chain;
671 memory_bm_position_reset(bm);
672 Exit:
673 free_mem_extents(&mem_extents);
674 return error;
675
676 Error:
677 bm->p_list = ca.chain;
678 memory_bm_free(bm, PG_UNSAFE_CLEAR);
679 goto Exit;
680}
681
682/**
683 * memory_bm_free - Free memory occupied by the memory bitmap.
684 * @bm: Memory bitmap.
685 */
686static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
687{
688 struct mem_zone_bm_rtree *zone;
689
690 list_for_each_entry(zone, &bm->zones, list)
691 free_zone_bm_rtree(zone, clear_nosave_free);
692
693 free_list_of_pages(bm->p_list, clear_nosave_free);
694
695 INIT_LIST_HEAD(&bm->zones);
696}
697
698/**
699 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
700 *
701 * Find the bit in memory bitmap @bm that corresponds to the given PFN.
702 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
703 *
704 * Walk the radix tree to find the page containing the bit that represents @pfn
705 * and return the position of the bit in @addr and @bit_nr.
706 */
707static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
708 void **addr, unsigned int *bit_nr)
709{
710 struct mem_zone_bm_rtree *curr, *zone;
711 struct rtree_node *node;
712 int i, block_nr;
713
714 zone = bm->cur.zone;
715
716 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
717 goto zone_found;
718
719 zone = NULL;
720
721 /* Find the right zone */
722 list_for_each_entry(curr, &bm->zones, list) {
723 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
724 zone = curr;
725 break;
726 }
727 }
728
729 if (!zone)
730 return -EFAULT;
731
732zone_found:
733 /*
734 * We have found the zone. Now walk the radix tree to find the leaf node
735 * for our PFN.
736 */
737 node = bm->cur.node;
738 if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
739 goto node_found;
740
741 node = zone->rtree;
742 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
743
744 for (i = zone->levels; i > 0; i--) {
745 int index;
746
747 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
748 index &= BM_RTREE_LEVEL_MASK;
749 BUG_ON(node->data[index] == 0);
750 node = (struct rtree_node *)node->data[index];
751 }
752
753node_found:
754 /* Update last position */
755 bm->cur.zone = zone;
756 bm->cur.node = node;
757 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
758
759 /* Set return values */
760 *addr = node->data;
761 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
762
763 return 0;
764}
765
766static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
767{
768 void *addr;
769 unsigned int bit;
770 int error;
771
772 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
773 BUG_ON(error);
774 set_bit(bit, addr);
775}
776
777static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
778{
779 void *addr;
780 unsigned int bit;
781 int error;
782
783 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
784 if (!error)
785 set_bit(bit, addr);
786
787 return error;
788}
789
790static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
791{
792 void *addr;
793 unsigned int bit;
794 int error;
795
796 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
797 BUG_ON(error);
798 clear_bit(bit, addr);
799}
800
801static void memory_bm_clear_current(struct memory_bitmap *bm)
802{
803 int bit;
804
805 bit = max(bm->cur.node_bit - 1, 0);
806 clear_bit(bit, bm->cur.node->data);
807}
808
809static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
810{
811 void *addr;
812 unsigned int bit;
813 int error;
814
815 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
816 BUG_ON(error);
817 return test_bit(bit, addr);
818}
819
820static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
821{
822 void *addr;
823 unsigned int bit;
824
825 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
826}
827
828/*
829 * rtree_next_node - Jump to the next leaf node.
830 *
831 * Set the position to the beginning of the next node in the
832 * memory bitmap. This is either the next node in the current
833 * zone's radix tree or the first node in the radix tree of the
834 * next zone.
835 *
836 * Return true if there is a next node, false otherwise.
837 */
838static bool rtree_next_node(struct memory_bitmap *bm)
839{
840 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
841 bm->cur.node = list_entry(bm->cur.node->list.next,
842 struct rtree_node, list);
843 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
844 bm->cur.node_bit = 0;
845 touch_softlockup_watchdog();
846 return true;
847 }
848
849 /* No more nodes, goto next zone */
850 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
851 bm->cur.zone = list_entry(bm->cur.zone->list.next,
852 struct mem_zone_bm_rtree, list);
853 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
854 struct rtree_node, list);
855 bm->cur.node_pfn = 0;
856 bm->cur.node_bit = 0;
857 return true;
858 }
859
860 /* No more zones */
861 return false;
862}
863
864/**
865 * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
866 * @bm: Memory bitmap.
867 *
868 * Starting from the last returned position this function searches for the next
869 * set bit in @bm and returns the PFN represented by it. If no more bits are
870 * set, BM_END_OF_MAP is returned.
871 *
872 * It is required to run memory_bm_position_reset() before the first call to
873 * this function for the given memory bitmap.
874 */
875static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
876{
877 unsigned long bits, pfn, pages;
878 int bit;
879
880 do {
881 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
882 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
883 bit = find_next_bit(bm->cur.node->data, bits,
884 bm->cur.node_bit);
885 if (bit < bits) {
886 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
887 bm->cur.node_bit = bit + 1;
888 return pfn;
889 }
890 } while (rtree_next_node(bm));
891
892 return BM_END_OF_MAP;
893}
894
895/*
896 * This structure represents a range of page frames the contents of which
897 * should not be saved during hibernation.
898 */
899struct nosave_region {
900 struct list_head list;
901 unsigned long start_pfn;
902 unsigned long end_pfn;
903};
904
905static LIST_HEAD(nosave_regions);
906
907static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
908{
909 struct rtree_node *node;
910
911 list_for_each_entry(node, &zone->nodes, list)
912 recycle_safe_page(node->data);
913
914 list_for_each_entry(node, &zone->leaves, list)
915 recycle_safe_page(node->data);
916}
917
918static void memory_bm_recycle(struct memory_bitmap *bm)
919{
920 struct mem_zone_bm_rtree *zone;
921 struct linked_page *p_list;
922
923 list_for_each_entry(zone, &bm->zones, list)
924 recycle_zone_bm_rtree(zone);
925
926 p_list = bm->p_list;
927 while (p_list) {
928 struct linked_page *lp = p_list;
929
930 p_list = lp->next;
931 recycle_safe_page(lp);
932 }
933}
934
935/**
936 * register_nosave_region - Register a region of unsaveable memory.
937 *
938 * Register a range of page frames the contents of which should not be saved
939 * during hibernation (to be used in the early initialization code).
940 */
941void __init __register_nosave_region(unsigned long start_pfn,
942 unsigned long end_pfn, int use_kmalloc)
943{
944 struct nosave_region *region;
945
946 if (start_pfn >= end_pfn)
947 return;
948
949 if (!list_empty(&nosave_regions)) {
950 /* Try to extend the previous region (they should be sorted) */
951 region = list_entry(nosave_regions.prev,
952 struct nosave_region, list);
953 if (region->end_pfn == start_pfn) {
954 region->end_pfn = end_pfn;
955 goto Report;
956 }
957 }
958 if (use_kmalloc) {
959 /* During init, this shouldn't fail */
960 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
961 BUG_ON(!region);
962 } else {
963 /* This allocation cannot fail */
964 region = memblock_alloc(sizeof(struct nosave_region),
965 SMP_CACHE_BYTES);
966 if (!region)
967 panic("%s: Failed to allocate %zu bytes\n", __func__,
968 sizeof(struct nosave_region));
969 }
970 region->start_pfn = start_pfn;
971 region->end_pfn = end_pfn;
972 list_add_tail(®ion->list, &nosave_regions);
973 Report:
974 pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
975 (unsigned long long) start_pfn << PAGE_SHIFT,
976 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
977}
978
979/*
980 * Set bits in this map correspond to the page frames the contents of which
981 * should not be saved during the suspend.
982 */
983static struct memory_bitmap *forbidden_pages_map;
984
985/* Set bits in this map correspond to free page frames. */
986static struct memory_bitmap *free_pages_map;
987
988/*
989 * Each page frame allocated for creating the image is marked by setting the
990 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
991 */
992
993void swsusp_set_page_free(struct page *page)
994{
995 if (free_pages_map)
996 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
997}
998
999static int swsusp_page_is_free(struct page *page)
1000{
1001 return free_pages_map ?
1002 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
1003}
1004
1005void swsusp_unset_page_free(struct page *page)
1006{
1007 if (free_pages_map)
1008 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1009}
1010
1011static void swsusp_set_page_forbidden(struct page *page)
1012{
1013 if (forbidden_pages_map)
1014 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1015}
1016
1017int swsusp_page_is_forbidden(struct page *page)
1018{
1019 return forbidden_pages_map ?
1020 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1021}
1022
1023static void swsusp_unset_page_forbidden(struct page *page)
1024{
1025 if (forbidden_pages_map)
1026 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1027}
1028
1029/**
1030 * mark_nosave_pages - Mark pages that should not be saved.
1031 * @bm: Memory bitmap.
1032 *
1033 * Set the bits in @bm that correspond to the page frames the contents of which
1034 * should not be saved.
1035 */
1036static void mark_nosave_pages(struct memory_bitmap *bm)
1037{
1038 struct nosave_region *region;
1039
1040 if (list_empty(&nosave_regions))
1041 return;
1042
1043 list_for_each_entry(region, &nosave_regions, list) {
1044 unsigned long pfn;
1045
1046 pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1047 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1048 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1049 - 1);
1050
1051 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1052 if (pfn_valid(pfn)) {
1053 /*
1054 * It is safe to ignore the result of
1055 * mem_bm_set_bit_check() here, since we won't
1056 * touch the PFNs for which the error is
1057 * returned anyway.
1058 */
1059 mem_bm_set_bit_check(bm, pfn);
1060 }
1061 }
1062}
1063
1064/**
1065 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1066 *
1067 * Create bitmaps needed for marking page frames that should not be saved and
1068 * free page frames. The forbidden_pages_map and free_pages_map pointers are
1069 * only modified if everything goes well, because we don't want the bits to be
1070 * touched before both bitmaps are set up.
1071 */
1072int create_basic_memory_bitmaps(void)
1073{
1074 struct memory_bitmap *bm1, *bm2;
1075 int error = 0;
1076
1077 if (forbidden_pages_map && free_pages_map)
1078 return 0;
1079 else
1080 BUG_ON(forbidden_pages_map || free_pages_map);
1081
1082 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1083 if (!bm1)
1084 return -ENOMEM;
1085
1086 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1087 if (error)
1088 goto Free_first_object;
1089
1090 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1091 if (!bm2)
1092 goto Free_first_bitmap;
1093
1094 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1095 if (error)
1096 goto Free_second_object;
1097
1098 forbidden_pages_map = bm1;
1099 free_pages_map = bm2;
1100 mark_nosave_pages(forbidden_pages_map);
1101
1102 pr_debug("Basic memory bitmaps created\n");
1103
1104 return 0;
1105
1106 Free_second_object:
1107 kfree(bm2);
1108 Free_first_bitmap:
1109 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1110 Free_first_object:
1111 kfree(bm1);
1112 return -ENOMEM;
1113}
1114
1115/**
1116 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1117 *
1118 * Free memory bitmaps allocated by create_basic_memory_bitmaps(). The
1119 * auxiliary pointers are necessary so that the bitmaps themselves are not
1120 * referred to while they are being freed.
1121 */
1122void free_basic_memory_bitmaps(void)
1123{
1124 struct memory_bitmap *bm1, *bm2;
1125
1126 if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1127 return;
1128
1129 bm1 = forbidden_pages_map;
1130 bm2 = free_pages_map;
1131 forbidden_pages_map = NULL;
1132 free_pages_map = NULL;
1133 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1134 kfree(bm1);
1135 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1136 kfree(bm2);
1137
1138 pr_debug("Basic memory bitmaps freed\n");
1139}
1140
1141void clear_free_pages(void)
1142{
1143#ifdef CONFIG_PAGE_POISONING_ZERO
1144 struct memory_bitmap *bm = free_pages_map;
1145 unsigned long pfn;
1146
1147 if (WARN_ON(!(free_pages_map)))
1148 return;
1149
1150 memory_bm_position_reset(bm);
1151 pfn = memory_bm_next_pfn(bm);
1152 while (pfn != BM_END_OF_MAP) {
1153 if (pfn_valid(pfn))
1154 clear_highpage(pfn_to_page(pfn));
1155
1156 pfn = memory_bm_next_pfn(bm);
1157 }
1158 memory_bm_position_reset(bm);
1159 pr_info("free pages cleared after restore\n");
1160#endif /* PAGE_POISONING_ZERO */
1161}
1162
1163/**
1164 * snapshot_additional_pages - Estimate the number of extra pages needed.
1165 * @zone: Memory zone to carry out the computation for.
1166 *
1167 * Estimate the number of additional pages needed for setting up a hibernation
1168 * image data structures for @zone (usually, the returned value is greater than
1169 * the exact number).
1170 */
1171unsigned int snapshot_additional_pages(struct zone *zone)
1172{
1173 unsigned int rtree, nodes;
1174
1175 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1176 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1177 LINKED_PAGE_DATA_SIZE);
1178 while (nodes > 1) {
1179 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1180 rtree += nodes;
1181 }
1182
1183 return 2 * rtree;
1184}
1185
1186#ifdef CONFIG_HIGHMEM
1187/**
1188 * count_free_highmem_pages - Compute the total number of free highmem pages.
1189 *
1190 * The returned number is system-wide.
1191 */
1192static unsigned int count_free_highmem_pages(void)
1193{
1194 struct zone *zone;
1195 unsigned int cnt = 0;
1196
1197 for_each_populated_zone(zone)
1198 if (is_highmem(zone))
1199 cnt += zone_page_state(zone, NR_FREE_PAGES);
1200
1201 return cnt;
1202}
1203
1204/**
1205 * saveable_highmem_page - Check if a highmem page is saveable.
1206 *
1207 * Determine whether a highmem page should be included in a hibernation image.
1208 *
1209 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1210 * and it isn't part of a free chunk of pages.
1211 */
1212static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1213{
1214 struct page *page;
1215
1216 if (!pfn_valid(pfn))
1217 return NULL;
1218
1219 page = pfn_to_online_page(pfn);
1220 if (!page || page_zone(page) != zone)
1221 return NULL;
1222
1223 BUG_ON(!PageHighMem(page));
1224
1225 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1226 return NULL;
1227
1228 if (PageReserved(page) || PageOffline(page))
1229 return NULL;
1230
1231 if (page_is_guard(page))
1232 return NULL;
1233
1234 return page;
1235}
1236
1237/**
1238 * count_highmem_pages - Compute the total number of saveable highmem pages.
1239 */
1240static unsigned int count_highmem_pages(void)
1241{
1242 struct zone *zone;
1243 unsigned int n = 0;
1244
1245 for_each_populated_zone(zone) {
1246 unsigned long pfn, max_zone_pfn;
1247
1248 if (!is_highmem(zone))
1249 continue;
1250
1251 mark_free_pages(zone);
1252 max_zone_pfn = zone_end_pfn(zone);
1253 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1254 if (saveable_highmem_page(zone, pfn))
1255 n++;
1256 }
1257 return n;
1258}
1259#else
1260static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1261{
1262 return NULL;
1263}
1264#endif /* CONFIG_HIGHMEM */
1265
1266/**
1267 * saveable_page - Check if the given page is saveable.
1268 *
1269 * Determine whether a non-highmem page should be included in a hibernation
1270 * image.
1271 *
1272 * We should save the page if it isn't Nosave, and is not in the range
1273 * of pages statically defined as 'unsaveable', and it isn't part of
1274 * a free chunk of pages.
1275 */
1276static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1277{
1278 struct page *page;
1279
1280 if (!pfn_valid(pfn))
1281 return NULL;
1282
1283 page = pfn_to_online_page(pfn);
1284 if (!page || page_zone(page) != zone)
1285 return NULL;
1286
1287 BUG_ON(PageHighMem(page));
1288
1289 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1290 return NULL;
1291
1292 if (PageOffline(page))
1293 return NULL;
1294
1295 if (PageReserved(page)
1296 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1297 return NULL;
1298
1299 if (page_is_guard(page))
1300 return NULL;
1301
1302 return page;
1303}
1304
1305/**
1306 * count_data_pages - Compute the total number of saveable non-highmem pages.
1307 */
1308static unsigned int count_data_pages(void)
1309{
1310 struct zone *zone;
1311 unsigned long pfn, max_zone_pfn;
1312 unsigned int n = 0;
1313
1314 for_each_populated_zone(zone) {
1315 if (is_highmem(zone))
1316 continue;
1317
1318 mark_free_pages(zone);
1319 max_zone_pfn = zone_end_pfn(zone);
1320 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1321 if (saveable_page(zone, pfn))
1322 n++;
1323 }
1324 return n;
1325}
1326
1327/*
1328 * This is needed, because copy_page and memcpy are not usable for copying
1329 * task structs.
1330 */
1331static inline void do_copy_page(long *dst, long *src)
1332{
1333 int n;
1334
1335 for (n = PAGE_SIZE / sizeof(long); n; n--)
1336 *dst++ = *src++;
1337}
1338
1339/**
1340 * safe_copy_page - Copy a page in a safe way.
1341 *
1342 * Check if the page we are going to copy is marked as present in the kernel
1343 * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
1344 * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present()
1345 * always returns 'true'.
1346 */
1347static void safe_copy_page(void *dst, struct page *s_page)
1348{
1349 if (kernel_page_present(s_page)) {
1350 do_copy_page(dst, page_address(s_page));
1351 } else {
1352 kernel_map_pages(s_page, 1, 1);
1353 do_copy_page(dst, page_address(s_page));
1354 kernel_map_pages(s_page, 1, 0);
1355 }
1356}
1357
1358#ifdef CONFIG_HIGHMEM
1359static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1360{
1361 return is_highmem(zone) ?
1362 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1363}
1364
1365static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1366{
1367 struct page *s_page, *d_page;
1368 void *src, *dst;
1369
1370 s_page = pfn_to_page(src_pfn);
1371 d_page = pfn_to_page(dst_pfn);
1372 if (PageHighMem(s_page)) {
1373 src = kmap_atomic(s_page);
1374 dst = kmap_atomic(d_page);
1375 do_copy_page(dst, src);
1376 kunmap_atomic(dst);
1377 kunmap_atomic(src);
1378 } else {
1379 if (PageHighMem(d_page)) {
1380 /*
1381 * The page pointed to by src may contain some kernel
1382 * data modified by kmap_atomic()
1383 */
1384 safe_copy_page(buffer, s_page);
1385 dst = kmap_atomic(d_page);
1386 copy_page(dst, buffer);
1387 kunmap_atomic(dst);
1388 } else {
1389 safe_copy_page(page_address(d_page), s_page);
1390 }
1391 }
1392}
1393#else
1394#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1395
1396static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1397{
1398 safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1399 pfn_to_page(src_pfn));
1400}
1401#endif /* CONFIG_HIGHMEM */
1402
1403static void copy_data_pages(struct memory_bitmap *copy_bm,
1404 struct memory_bitmap *orig_bm)
1405{
1406 struct zone *zone;
1407 unsigned long pfn;
1408
1409 for_each_populated_zone(zone) {
1410 unsigned long max_zone_pfn;
1411
1412 mark_free_pages(zone);
1413 max_zone_pfn = zone_end_pfn(zone);
1414 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1415 if (page_is_saveable(zone, pfn))
1416 memory_bm_set_bit(orig_bm, pfn);
1417 }
1418 memory_bm_position_reset(orig_bm);
1419 memory_bm_position_reset(copy_bm);
1420 for(;;) {
1421 pfn = memory_bm_next_pfn(orig_bm);
1422 if (unlikely(pfn == BM_END_OF_MAP))
1423 break;
1424 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1425 }
1426}
1427
1428/* Total number of image pages */
1429static unsigned int nr_copy_pages;
1430/* Number of pages needed for saving the original pfns of the image pages */
1431static unsigned int nr_meta_pages;
1432/*
1433 * Numbers of normal and highmem page frames allocated for hibernation image
1434 * before suspending devices.
1435 */
1436static unsigned int alloc_normal, alloc_highmem;
1437/*
1438 * Memory bitmap used for marking saveable pages (during hibernation) or
1439 * hibernation image pages (during restore)
1440 */
1441static struct memory_bitmap orig_bm;
1442/*
1443 * Memory bitmap used during hibernation for marking allocated page frames that
1444 * will contain copies of saveable pages. During restore it is initially used
1445 * for marking hibernation image pages, but then the set bits from it are
1446 * duplicated in @orig_bm and it is released. On highmem systems it is next
1447 * used for marking "safe" highmem pages, but it has to be reinitialized for
1448 * this purpose.
1449 */
1450static struct memory_bitmap copy_bm;
1451
1452/**
1453 * swsusp_free - Free pages allocated for hibernation image.
1454 *
1455 * Image pages are alocated before snapshot creation, so they need to be
1456 * released after resume.
1457 */
1458void swsusp_free(void)
1459{
1460 unsigned long fb_pfn, fr_pfn;
1461
1462 if (!forbidden_pages_map || !free_pages_map)
1463 goto out;
1464
1465 memory_bm_position_reset(forbidden_pages_map);
1466 memory_bm_position_reset(free_pages_map);
1467
1468loop:
1469 fr_pfn = memory_bm_next_pfn(free_pages_map);
1470 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1471
1472 /*
1473 * Find the next bit set in both bitmaps. This is guaranteed to
1474 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1475 */
1476 do {
1477 if (fb_pfn < fr_pfn)
1478 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1479 if (fr_pfn < fb_pfn)
1480 fr_pfn = memory_bm_next_pfn(free_pages_map);
1481 } while (fb_pfn != fr_pfn);
1482
1483 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1484 struct page *page = pfn_to_page(fr_pfn);
1485
1486 memory_bm_clear_current(forbidden_pages_map);
1487 memory_bm_clear_current(free_pages_map);
1488 hibernate_restore_unprotect_page(page_address(page));
1489 __free_page(page);
1490 goto loop;
1491 }
1492
1493out:
1494 nr_copy_pages = 0;
1495 nr_meta_pages = 0;
1496 restore_pblist = NULL;
1497 buffer = NULL;
1498 alloc_normal = 0;
1499 alloc_highmem = 0;
1500 hibernate_restore_protection_end();
1501}
1502
1503/* Helper functions used for the shrinking of memory. */
1504
1505#define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1506
1507/**
1508 * preallocate_image_pages - Allocate a number of pages for hibernation image.
1509 * @nr_pages: Number of page frames to allocate.
1510 * @mask: GFP flags to use for the allocation.
1511 *
1512 * Return value: Number of page frames actually allocated
1513 */
1514static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1515{
1516 unsigned long nr_alloc = 0;
1517
1518 while (nr_pages > 0) {
1519 struct page *page;
1520
1521 page = alloc_image_page(mask);
1522 if (!page)
1523 break;
1524 memory_bm_set_bit(©_bm, page_to_pfn(page));
1525 if (PageHighMem(page))
1526 alloc_highmem++;
1527 else
1528 alloc_normal++;
1529 nr_pages--;
1530 nr_alloc++;
1531 }
1532
1533 return nr_alloc;
1534}
1535
1536static unsigned long preallocate_image_memory(unsigned long nr_pages,
1537 unsigned long avail_normal)
1538{
1539 unsigned long alloc;
1540
1541 if (avail_normal <= alloc_normal)
1542 return 0;
1543
1544 alloc = avail_normal - alloc_normal;
1545 if (nr_pages < alloc)
1546 alloc = nr_pages;
1547
1548 return preallocate_image_pages(alloc, GFP_IMAGE);
1549}
1550
1551#ifdef CONFIG_HIGHMEM
1552static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1553{
1554 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1555}
1556
1557/**
1558 * __fraction - Compute (an approximation of) x * (multiplier / base).
1559 */
1560static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1561{
1562 x *= multiplier;
1563 do_div(x, base);
1564 return (unsigned long)x;
1565}
1566
1567static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1568 unsigned long highmem,
1569 unsigned long total)
1570{
1571 unsigned long alloc = __fraction(nr_pages, highmem, total);
1572
1573 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1574}
1575#else /* CONFIG_HIGHMEM */
1576static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1577{
1578 return 0;
1579}
1580
1581static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1582 unsigned long highmem,
1583 unsigned long total)
1584{
1585 return 0;
1586}
1587#endif /* CONFIG_HIGHMEM */
1588
1589/**
1590 * free_unnecessary_pages - Release preallocated pages not needed for the image.
1591 */
1592static unsigned long free_unnecessary_pages(void)
1593{
1594 unsigned long save, to_free_normal, to_free_highmem, free;
1595
1596 save = count_data_pages();
1597 if (alloc_normal >= save) {
1598 to_free_normal = alloc_normal - save;
1599 save = 0;
1600 } else {
1601 to_free_normal = 0;
1602 save -= alloc_normal;
1603 }
1604 save += count_highmem_pages();
1605 if (alloc_highmem >= save) {
1606 to_free_highmem = alloc_highmem - save;
1607 } else {
1608 to_free_highmem = 0;
1609 save -= alloc_highmem;
1610 if (to_free_normal > save)
1611 to_free_normal -= save;
1612 else
1613 to_free_normal = 0;
1614 }
1615 free = to_free_normal + to_free_highmem;
1616
1617 memory_bm_position_reset(©_bm);
1618
1619 while (to_free_normal > 0 || to_free_highmem > 0) {
1620 unsigned long pfn = memory_bm_next_pfn(©_bm);
1621 struct page *page = pfn_to_page(pfn);
1622
1623 if (PageHighMem(page)) {
1624 if (!to_free_highmem)
1625 continue;
1626 to_free_highmem--;
1627 alloc_highmem--;
1628 } else {
1629 if (!to_free_normal)
1630 continue;
1631 to_free_normal--;
1632 alloc_normal--;
1633 }
1634 memory_bm_clear_bit(©_bm, pfn);
1635 swsusp_unset_page_forbidden(page);
1636 swsusp_unset_page_free(page);
1637 __free_page(page);
1638 }
1639
1640 return free;
1641}
1642
1643/**
1644 * minimum_image_size - Estimate the minimum acceptable size of an image.
1645 * @saveable: Number of saveable pages in the system.
1646 *
1647 * We want to avoid attempting to free too much memory too hard, so estimate the
1648 * minimum acceptable size of a hibernation image to use as the lower limit for
1649 * preallocating memory.
1650 *
1651 * We assume that the minimum image size should be proportional to
1652 *
1653 * [number of saveable pages] - [number of pages that can be freed in theory]
1654 *
1655 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1656 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
1657 */
1658static unsigned long minimum_image_size(unsigned long saveable)
1659{
1660 unsigned long size;
1661
1662 size = global_node_page_state(NR_SLAB_RECLAIMABLE)
1663 + global_node_page_state(NR_ACTIVE_ANON)
1664 + global_node_page_state(NR_INACTIVE_ANON)
1665 + global_node_page_state(NR_ACTIVE_FILE)
1666 + global_node_page_state(NR_INACTIVE_FILE);
1667
1668 return saveable <= size ? 0 : saveable - size;
1669}
1670
1671/**
1672 * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1673 *
1674 * To create a hibernation image it is necessary to make a copy of every page
1675 * frame in use. We also need a number of page frames to be free during
1676 * hibernation for allocations made while saving the image and for device
1677 * drivers, in case they need to allocate memory from their hibernation
1678 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1679 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1680 * /sys/power/reserved_size, respectively). To make this happen, we compute the
1681 * total number of available page frames and allocate at least
1682 *
1683 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1684 * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1685 *
1686 * of them, which corresponds to the maximum size of a hibernation image.
1687 *
1688 * If image_size is set below the number following from the above formula,
1689 * the preallocation of memory is continued until the total number of saveable
1690 * pages in the system is below the requested image size or the minimum
1691 * acceptable image size returned by minimum_image_size(), whichever is greater.
1692 */
1693int hibernate_preallocate_memory(void)
1694{
1695 struct zone *zone;
1696 unsigned long saveable, size, max_size, count, highmem, pages = 0;
1697 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1698 ktime_t start, stop;
1699 int error;
1700
1701 pr_info("Preallocating image memory... ");
1702 start = ktime_get();
1703
1704 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1705 if (error)
1706 goto err_out;
1707
1708 error = memory_bm_create(©_bm, GFP_IMAGE, PG_ANY);
1709 if (error)
1710 goto err_out;
1711
1712 alloc_normal = 0;
1713 alloc_highmem = 0;
1714
1715 /* Count the number of saveable data pages. */
1716 save_highmem = count_highmem_pages();
1717 saveable = count_data_pages();
1718
1719 /*
1720 * Compute the total number of page frames we can use (count) and the
1721 * number of pages needed for image metadata (size).
1722 */
1723 count = saveable;
1724 saveable += save_highmem;
1725 highmem = save_highmem;
1726 size = 0;
1727 for_each_populated_zone(zone) {
1728 size += snapshot_additional_pages(zone);
1729 if (is_highmem(zone))
1730 highmem += zone_page_state(zone, NR_FREE_PAGES);
1731 else
1732 count += zone_page_state(zone, NR_FREE_PAGES);
1733 }
1734 avail_normal = count;
1735 count += highmem;
1736 count -= totalreserve_pages;
1737
1738 /* Add number of pages required for page keys (s390 only). */
1739 size += page_key_additional_pages(saveable);
1740
1741 /* Compute the maximum number of saveable pages to leave in memory. */
1742 max_size = (count - (size + PAGES_FOR_IO)) / 2
1743 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1744 /* Compute the desired number of image pages specified by image_size. */
1745 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1746 if (size > max_size)
1747 size = max_size;
1748 /*
1749 * If the desired number of image pages is at least as large as the
1750 * current number of saveable pages in memory, allocate page frames for
1751 * the image and we're done.
1752 */
1753 if (size >= saveable) {
1754 pages = preallocate_image_highmem(save_highmem);
1755 pages += preallocate_image_memory(saveable - pages, avail_normal);
1756 goto out;
1757 }
1758
1759 /* Estimate the minimum size of the image. */
1760 pages = minimum_image_size(saveable);
1761 /*
1762 * To avoid excessive pressure on the normal zone, leave room in it to
1763 * accommodate an image of the minimum size (unless it's already too
1764 * small, in which case don't preallocate pages from it at all).
1765 */
1766 if (avail_normal > pages)
1767 avail_normal -= pages;
1768 else
1769 avail_normal = 0;
1770 if (size < pages)
1771 size = min_t(unsigned long, pages, max_size);
1772
1773 /*
1774 * Let the memory management subsystem know that we're going to need a
1775 * large number of page frames to allocate and make it free some memory.
1776 * NOTE: If this is not done, performance will be hurt badly in some
1777 * test cases.
1778 */
1779 shrink_all_memory(saveable - size);
1780
1781 /*
1782 * The number of saveable pages in memory was too high, so apply some
1783 * pressure to decrease it. First, make room for the largest possible
1784 * image and fail if that doesn't work. Next, try to decrease the size
1785 * of the image as much as indicated by 'size' using allocations from
1786 * highmem and non-highmem zones separately.
1787 */
1788 pages_highmem = preallocate_image_highmem(highmem / 2);
1789 alloc = count - max_size;
1790 if (alloc > pages_highmem)
1791 alloc -= pages_highmem;
1792 else
1793 alloc = 0;
1794 pages = preallocate_image_memory(alloc, avail_normal);
1795 if (pages < alloc) {
1796 /* We have exhausted non-highmem pages, try highmem. */
1797 alloc -= pages;
1798 pages += pages_highmem;
1799 pages_highmem = preallocate_image_highmem(alloc);
1800 if (pages_highmem < alloc)
1801 goto err_out;
1802 pages += pages_highmem;
1803 /*
1804 * size is the desired number of saveable pages to leave in
1805 * memory, so try to preallocate (all memory - size) pages.
1806 */
1807 alloc = (count - pages) - size;
1808 pages += preallocate_image_highmem(alloc);
1809 } else {
1810 /*
1811 * There are approximately max_size saveable pages at this point
1812 * and we want to reduce this number down to size.
1813 */
1814 alloc = max_size - size;
1815 size = preallocate_highmem_fraction(alloc, highmem, count);
1816 pages_highmem += size;
1817 alloc -= size;
1818 size = preallocate_image_memory(alloc, avail_normal);
1819 pages_highmem += preallocate_image_highmem(alloc - size);
1820 pages += pages_highmem + size;
1821 }
1822
1823 /*
1824 * We only need as many page frames for the image as there are saveable
1825 * pages in memory, but we have allocated more. Release the excessive
1826 * ones now.
1827 */
1828 pages -= free_unnecessary_pages();
1829
1830 out:
1831 stop = ktime_get();
1832 pr_cont("done (allocated %lu pages)\n", pages);
1833 swsusp_show_speed(start, stop, pages, "Allocated");
1834
1835 return 0;
1836
1837 err_out:
1838 pr_cont("\n");
1839 swsusp_free();
1840 return -ENOMEM;
1841}
1842
1843#ifdef CONFIG_HIGHMEM
1844/**
1845 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1846 *
1847 * Compute the number of non-highmem pages that will be necessary for creating
1848 * copies of highmem pages.
1849 */
1850static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1851{
1852 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1853
1854 if (free_highmem >= nr_highmem)
1855 nr_highmem = 0;
1856 else
1857 nr_highmem -= free_highmem;
1858
1859 return nr_highmem;
1860}
1861#else
1862static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1863#endif /* CONFIG_HIGHMEM */
1864
1865/**
1866 * enough_free_mem - Check if there is enough free memory for the image.
1867 */
1868static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1869{
1870 struct zone *zone;
1871 unsigned int free = alloc_normal;
1872
1873 for_each_populated_zone(zone)
1874 if (!is_highmem(zone))
1875 free += zone_page_state(zone, NR_FREE_PAGES);
1876
1877 nr_pages += count_pages_for_highmem(nr_highmem);
1878 pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
1879 nr_pages, PAGES_FOR_IO, free);
1880
1881 return free > nr_pages + PAGES_FOR_IO;
1882}
1883
1884#ifdef CONFIG_HIGHMEM
1885/**
1886 * get_highmem_buffer - Allocate a buffer for highmem pages.
1887 *
1888 * If there are some highmem pages in the hibernation image, we may need a
1889 * buffer to copy them and/or load their data.
1890 */
1891static inline int get_highmem_buffer(int safe_needed)
1892{
1893 buffer = get_image_page(GFP_ATOMIC, safe_needed);
1894 return buffer ? 0 : -ENOMEM;
1895}
1896
1897/**
1898 * alloc_highmem_image_pages - Allocate some highmem pages for the image.
1899 *
1900 * Try to allocate as many pages as needed, but if the number of free highmem
1901 * pages is less than that, allocate them all.
1902 */
1903static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1904 unsigned int nr_highmem)
1905{
1906 unsigned int to_alloc = count_free_highmem_pages();
1907
1908 if (to_alloc > nr_highmem)
1909 to_alloc = nr_highmem;
1910
1911 nr_highmem -= to_alloc;
1912 while (to_alloc-- > 0) {
1913 struct page *page;
1914
1915 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
1916 memory_bm_set_bit(bm, page_to_pfn(page));
1917 }
1918 return nr_highmem;
1919}
1920#else
1921static inline int get_highmem_buffer(int safe_needed) { return 0; }
1922
1923static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1924 unsigned int n) { return 0; }
1925#endif /* CONFIG_HIGHMEM */
1926
1927/**
1928 * swsusp_alloc - Allocate memory for hibernation image.
1929 *
1930 * We first try to allocate as many highmem pages as there are
1931 * saveable highmem pages in the system. If that fails, we allocate
1932 * non-highmem pages for the copies of the remaining highmem ones.
1933 *
1934 * In this approach it is likely that the copies of highmem pages will
1935 * also be located in the high memory, because of the way in which
1936 * copy_data_pages() works.
1937 */
1938static int swsusp_alloc(struct memory_bitmap *copy_bm,
1939 unsigned int nr_pages, unsigned int nr_highmem)
1940{
1941 if (nr_highmem > 0) {
1942 if (get_highmem_buffer(PG_ANY))
1943 goto err_out;
1944 if (nr_highmem > alloc_highmem) {
1945 nr_highmem -= alloc_highmem;
1946 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1947 }
1948 }
1949 if (nr_pages > alloc_normal) {
1950 nr_pages -= alloc_normal;
1951 while (nr_pages-- > 0) {
1952 struct page *page;
1953
1954 page = alloc_image_page(GFP_ATOMIC);
1955 if (!page)
1956 goto err_out;
1957 memory_bm_set_bit(copy_bm, page_to_pfn(page));
1958 }
1959 }
1960
1961 return 0;
1962
1963 err_out:
1964 swsusp_free();
1965 return -ENOMEM;
1966}
1967
1968asmlinkage __visible int swsusp_save(void)
1969{
1970 unsigned int nr_pages, nr_highmem;
1971
1972 pr_info("Creating hibernation image:\n");
1973
1974 drain_local_pages(NULL);
1975 nr_pages = count_data_pages();
1976 nr_highmem = count_highmem_pages();
1977 pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
1978
1979 if (!enough_free_mem(nr_pages, nr_highmem)) {
1980 pr_err("Not enough free memory\n");
1981 return -ENOMEM;
1982 }
1983
1984 if (swsusp_alloc(©_bm, nr_pages, nr_highmem)) {
1985 pr_err("Memory allocation failed\n");
1986 return -ENOMEM;
1987 }
1988
1989 /*
1990 * During allocating of suspend pagedir, new cold pages may appear.
1991 * Kill them.
1992 */
1993 drain_local_pages(NULL);
1994 copy_data_pages(©_bm, &orig_bm);
1995
1996 /*
1997 * End of critical section. From now on, we can write to memory,
1998 * but we should not touch disk. This specially means we must _not_
1999 * touch swap space! Except we must write out our image of course.
2000 */
2001
2002 nr_pages += nr_highmem;
2003 nr_copy_pages = nr_pages;
2004 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
2005
2006 pr_info("Hibernation image created (%d pages copied)\n", nr_pages);
2007
2008 return 0;
2009}
2010
2011#ifndef CONFIG_ARCH_HIBERNATION_HEADER
2012static int init_header_complete(struct swsusp_info *info)
2013{
2014 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2015 info->version_code = LINUX_VERSION_CODE;
2016 return 0;
2017}
2018
2019static char *check_image_kernel(struct swsusp_info *info)
2020{
2021 if (info->version_code != LINUX_VERSION_CODE)
2022 return "kernel version";
2023 if (strcmp(info->uts.sysname,init_utsname()->sysname))
2024 return "system type";
2025 if (strcmp(info->uts.release,init_utsname()->release))
2026 return "kernel release";
2027 if (strcmp(info->uts.version,init_utsname()->version))
2028 return "version";
2029 if (strcmp(info->uts.machine,init_utsname()->machine))
2030 return "machine";
2031 return NULL;
2032}
2033#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2034
2035unsigned long snapshot_get_image_size(void)
2036{
2037 return nr_copy_pages + nr_meta_pages + 1;
2038}
2039
2040static int init_header(struct swsusp_info *info)
2041{
2042 memset(info, 0, sizeof(struct swsusp_info));
2043 info->num_physpages = get_num_physpages();
2044 info->image_pages = nr_copy_pages;
2045 info->pages = snapshot_get_image_size();
2046 info->size = info->pages;
2047 info->size <<= PAGE_SHIFT;
2048 return init_header_complete(info);
2049}
2050
2051/**
2052 * pack_pfns - Prepare PFNs for saving.
2053 * @bm: Memory bitmap.
2054 * @buf: Memory buffer to store the PFNs in.
2055 *
2056 * PFNs corresponding to set bits in @bm are stored in the area of memory
2057 * pointed to by @buf (1 page at a time).
2058 */
2059static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
2060{
2061 int j;
2062
2063 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2064 buf[j] = memory_bm_next_pfn(bm);
2065 if (unlikely(buf[j] == BM_END_OF_MAP))
2066 break;
2067 /* Save page key for data page (s390 only). */
2068 page_key_read(buf + j);
2069 }
2070}
2071
2072/**
2073 * snapshot_read_next - Get the address to read the next image page from.
2074 * @handle: Snapshot handle to be used for the reading.
2075 *
2076 * On the first call, @handle should point to a zeroed snapshot_handle
2077 * structure. The structure gets populated then and a pointer to it should be
2078 * passed to this function every next time.
2079 *
2080 * On success, the function returns a positive number. Then, the caller
2081 * is allowed to read up to the returned number of bytes from the memory
2082 * location computed by the data_of() macro.
2083 *
2084 * The function returns 0 to indicate the end of the data stream condition,
2085 * and negative numbers are returned on errors. If that happens, the structure
2086 * pointed to by @handle is not updated and should not be used any more.
2087 */
2088int snapshot_read_next(struct snapshot_handle *handle)
2089{
2090 if (handle->cur > nr_meta_pages + nr_copy_pages)
2091 return 0;
2092
2093 if (!buffer) {
2094 /* This makes the buffer be freed by swsusp_free() */
2095 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2096 if (!buffer)
2097 return -ENOMEM;
2098 }
2099 if (!handle->cur) {
2100 int error;
2101
2102 error = init_header((struct swsusp_info *)buffer);
2103 if (error)
2104 return error;
2105 handle->buffer = buffer;
2106 memory_bm_position_reset(&orig_bm);
2107 memory_bm_position_reset(©_bm);
2108 } else if (handle->cur <= nr_meta_pages) {
2109 clear_page(buffer);
2110 pack_pfns(buffer, &orig_bm);
2111 } else {
2112 struct page *page;
2113
2114 page = pfn_to_page(memory_bm_next_pfn(©_bm));
2115 if (PageHighMem(page)) {
2116 /*
2117 * Highmem pages are copied to the buffer,
2118 * because we can't return with a kmapped
2119 * highmem page (we may not be called again).
2120 */
2121 void *kaddr;
2122
2123 kaddr = kmap_atomic(page);
2124 copy_page(buffer, kaddr);
2125 kunmap_atomic(kaddr);
2126 handle->buffer = buffer;
2127 } else {
2128 handle->buffer = page_address(page);
2129 }
2130 }
2131 handle->cur++;
2132 return PAGE_SIZE;
2133}
2134
2135static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2136 struct memory_bitmap *src)
2137{
2138 unsigned long pfn;
2139
2140 memory_bm_position_reset(src);
2141 pfn = memory_bm_next_pfn(src);
2142 while (pfn != BM_END_OF_MAP) {
2143 memory_bm_set_bit(dst, pfn);
2144 pfn = memory_bm_next_pfn(src);
2145 }
2146}
2147
2148/**
2149 * mark_unsafe_pages - Mark pages that were used before hibernation.
2150 *
2151 * Mark the pages that cannot be used for storing the image during restoration,
2152 * because they conflict with the pages that had been used before hibernation.
2153 */
2154static void mark_unsafe_pages(struct memory_bitmap *bm)
2155{
2156 unsigned long pfn;
2157
2158 /* Clear the "free"/"unsafe" bit for all PFNs */
2159 memory_bm_position_reset(free_pages_map);
2160 pfn = memory_bm_next_pfn(free_pages_map);
2161 while (pfn != BM_END_OF_MAP) {
2162 memory_bm_clear_current(free_pages_map);
2163 pfn = memory_bm_next_pfn(free_pages_map);
2164 }
2165
2166 /* Mark pages that correspond to the "original" PFNs as "unsafe" */
2167 duplicate_memory_bitmap(free_pages_map, bm);
2168
2169 allocated_unsafe_pages = 0;
2170}
2171
2172static int check_header(struct swsusp_info *info)
2173{
2174 char *reason;
2175
2176 reason = check_image_kernel(info);
2177 if (!reason && info->num_physpages != get_num_physpages())
2178 reason = "memory size";
2179 if (reason) {
2180 pr_err("Image mismatch: %s\n", reason);
2181 return -EPERM;
2182 }
2183 return 0;
2184}
2185
2186/**
2187 * load header - Check the image header and copy the data from it.
2188 */
2189static int load_header(struct swsusp_info *info)
2190{
2191 int error;
2192
2193 restore_pblist = NULL;
2194 error = check_header(info);
2195 if (!error) {
2196 nr_copy_pages = info->image_pages;
2197 nr_meta_pages = info->pages - info->image_pages - 1;
2198 }
2199 return error;
2200}
2201
2202/**
2203 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2204 * @bm: Memory bitmap.
2205 * @buf: Area of memory containing the PFNs.
2206 *
2207 * For each element of the array pointed to by @buf (1 page at a time), set the
2208 * corresponding bit in @bm.
2209 */
2210static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2211{
2212 int j;
2213
2214 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2215 if (unlikely(buf[j] == BM_END_OF_MAP))
2216 break;
2217
2218 /* Extract and buffer page key for data page (s390 only). */
2219 page_key_memorize(buf + j);
2220
2221 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
2222 memory_bm_set_bit(bm, buf[j]);
2223 else
2224 return -EFAULT;
2225 }
2226
2227 return 0;
2228}
2229
2230#ifdef CONFIG_HIGHMEM
2231/*
2232 * struct highmem_pbe is used for creating the list of highmem pages that
2233 * should be restored atomically during the resume from disk, because the page
2234 * frames they have occupied before the suspend are in use.
2235 */
2236struct highmem_pbe {
2237 struct page *copy_page; /* data is here now */
2238 struct page *orig_page; /* data was here before the suspend */
2239 struct highmem_pbe *next;
2240};
2241
2242/*
2243 * List of highmem PBEs needed for restoring the highmem pages that were
2244 * allocated before the suspend and included in the suspend image, but have
2245 * also been allocated by the "resume" kernel, so their contents cannot be
2246 * written directly to their "original" page frames.
2247 */
2248static struct highmem_pbe *highmem_pblist;
2249
2250/**
2251 * count_highmem_image_pages - Compute the number of highmem pages in the image.
2252 * @bm: Memory bitmap.
2253 *
2254 * The bits in @bm that correspond to image pages are assumed to be set.
2255 */
2256static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2257{
2258 unsigned long pfn;
2259 unsigned int cnt = 0;
2260
2261 memory_bm_position_reset(bm);
2262 pfn = memory_bm_next_pfn(bm);
2263 while (pfn != BM_END_OF_MAP) {
2264 if (PageHighMem(pfn_to_page(pfn)))
2265 cnt++;
2266
2267 pfn = memory_bm_next_pfn(bm);
2268 }
2269 return cnt;
2270}
2271
2272static unsigned int safe_highmem_pages;
2273
2274static struct memory_bitmap *safe_highmem_bm;
2275
2276/**
2277 * prepare_highmem_image - Allocate memory for loading highmem data from image.
2278 * @bm: Pointer to an uninitialized memory bitmap structure.
2279 * @nr_highmem_p: Pointer to the number of highmem image pages.
2280 *
2281 * Try to allocate as many highmem pages as there are highmem image pages
2282 * (@nr_highmem_p points to the variable containing the number of highmem image
2283 * pages). The pages that are "safe" (ie. will not be overwritten when the
2284 * hibernation image is restored entirely) have the corresponding bits set in
2285 * @bm (it must be unitialized).
2286 *
2287 * NOTE: This function should not be called if there are no highmem image pages.
2288 */
2289static int prepare_highmem_image(struct memory_bitmap *bm,
2290 unsigned int *nr_highmem_p)
2291{
2292 unsigned int to_alloc;
2293
2294 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2295 return -ENOMEM;
2296
2297 if (get_highmem_buffer(PG_SAFE))
2298 return -ENOMEM;
2299
2300 to_alloc = count_free_highmem_pages();
2301 if (to_alloc > *nr_highmem_p)
2302 to_alloc = *nr_highmem_p;
2303 else
2304 *nr_highmem_p = to_alloc;
2305
2306 safe_highmem_pages = 0;
2307 while (to_alloc-- > 0) {
2308 struct page *page;
2309
2310 page = alloc_page(__GFP_HIGHMEM);
2311 if (!swsusp_page_is_free(page)) {
2312 /* The page is "safe", set its bit the bitmap */
2313 memory_bm_set_bit(bm, page_to_pfn(page));
2314 safe_highmem_pages++;
2315 }
2316 /* Mark the page as allocated */
2317 swsusp_set_page_forbidden(page);
2318 swsusp_set_page_free(page);
2319 }
2320 memory_bm_position_reset(bm);
2321 safe_highmem_bm = bm;
2322 return 0;
2323}
2324
2325static struct page *last_highmem_page;
2326
2327/**
2328 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2329 *
2330 * For a given highmem image page get a buffer that suspend_write_next() should
2331 * return to its caller to write to.
2332 *
2333 * If the page is to be saved to its "original" page frame or a copy of
2334 * the page is to be made in the highmem, @buffer is returned. Otherwise,
2335 * the copy of the page is to be made in normal memory, so the address of
2336 * the copy is returned.
2337 *
2338 * If @buffer is returned, the caller of suspend_write_next() will write
2339 * the page's contents to @buffer, so they will have to be copied to the
2340 * right location on the next call to suspend_write_next() and it is done
2341 * with the help of copy_last_highmem_page(). For this purpose, if
2342 * @buffer is returned, @last_highmem_page is set to the page to which
2343 * the data will have to be copied from @buffer.
2344 */
2345static void *get_highmem_page_buffer(struct page *page,
2346 struct chain_allocator *ca)
2347{
2348 struct highmem_pbe *pbe;
2349 void *kaddr;
2350
2351 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2352 /*
2353 * We have allocated the "original" page frame and we can
2354 * use it directly to store the loaded page.
2355 */
2356 last_highmem_page = page;
2357 return buffer;
2358 }
2359 /*
2360 * The "original" page frame has not been allocated and we have to
2361 * use a "safe" page frame to store the loaded page.
2362 */
2363 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2364 if (!pbe) {
2365 swsusp_free();
2366 return ERR_PTR(-ENOMEM);
2367 }
2368 pbe->orig_page = page;
2369 if (safe_highmem_pages > 0) {
2370 struct page *tmp;
2371
2372 /* Copy of the page will be stored in high memory */
2373 kaddr = buffer;
2374 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2375 safe_highmem_pages--;
2376 last_highmem_page = tmp;
2377 pbe->copy_page = tmp;
2378 } else {
2379 /* Copy of the page will be stored in normal memory */
2380 kaddr = safe_pages_list;
2381 safe_pages_list = safe_pages_list->next;
2382 pbe->copy_page = virt_to_page(kaddr);
2383 }
2384 pbe->next = highmem_pblist;
2385 highmem_pblist = pbe;
2386 return kaddr;
2387}
2388
2389/**
2390 * copy_last_highmem_page - Copy most the most recent highmem image page.
2391 *
2392 * Copy the contents of a highmem image from @buffer, where the caller of
2393 * snapshot_write_next() has stored them, to the right location represented by
2394 * @last_highmem_page .
2395 */
2396static void copy_last_highmem_page(void)
2397{
2398 if (last_highmem_page) {
2399 void *dst;
2400
2401 dst = kmap_atomic(last_highmem_page);
2402 copy_page(dst, buffer);
2403 kunmap_atomic(dst);
2404 last_highmem_page = NULL;
2405 }
2406}
2407
2408static inline int last_highmem_page_copied(void)
2409{
2410 return !last_highmem_page;
2411}
2412
2413static inline void free_highmem_data(void)
2414{
2415 if (safe_highmem_bm)
2416 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2417
2418 if (buffer)
2419 free_image_page(buffer, PG_UNSAFE_CLEAR);
2420}
2421#else
2422static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2423
2424static inline int prepare_highmem_image(struct memory_bitmap *bm,
2425 unsigned int *nr_highmem_p) { return 0; }
2426
2427static inline void *get_highmem_page_buffer(struct page *page,
2428 struct chain_allocator *ca)
2429{
2430 return ERR_PTR(-EINVAL);
2431}
2432
2433static inline void copy_last_highmem_page(void) {}
2434static inline int last_highmem_page_copied(void) { return 1; }
2435static inline void free_highmem_data(void) {}
2436#endif /* CONFIG_HIGHMEM */
2437
2438#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2439
2440/**
2441 * prepare_image - Make room for loading hibernation image.
2442 * @new_bm: Unitialized memory bitmap structure.
2443 * @bm: Memory bitmap with unsafe pages marked.
2444 *
2445 * Use @bm to mark the pages that will be overwritten in the process of
2446 * restoring the system memory state from the suspend image ("unsafe" pages)
2447 * and allocate memory for the image.
2448 *
2449 * The idea is to allocate a new memory bitmap first and then allocate
2450 * as many pages as needed for image data, but without specifying what those
2451 * pages will be used for just yet. Instead, we mark them all as allocated and
2452 * create a lists of "safe" pages to be used later. On systems with high
2453 * memory a list of "safe" highmem pages is created too.
2454 */
2455static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2456{
2457 unsigned int nr_pages, nr_highmem;
2458 struct linked_page *lp;
2459 int error;
2460
2461 /* If there is no highmem, the buffer will not be necessary */
2462 free_image_page(buffer, PG_UNSAFE_CLEAR);
2463 buffer = NULL;
2464
2465 nr_highmem = count_highmem_image_pages(bm);
2466 mark_unsafe_pages(bm);
2467
2468 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2469 if (error)
2470 goto Free;
2471
2472 duplicate_memory_bitmap(new_bm, bm);
2473 memory_bm_free(bm, PG_UNSAFE_KEEP);
2474 if (nr_highmem > 0) {
2475 error = prepare_highmem_image(bm, &nr_highmem);
2476 if (error)
2477 goto Free;
2478 }
2479 /*
2480 * Reserve some safe pages for potential later use.
2481 *
2482 * NOTE: This way we make sure there will be enough safe pages for the
2483 * chain_alloc() in get_buffer(). It is a bit wasteful, but
2484 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2485 *
2486 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2487 */
2488 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2489 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2490 while (nr_pages > 0) {
2491 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2492 if (!lp) {
2493 error = -ENOMEM;
2494 goto Free;
2495 }
2496 lp->next = safe_pages_list;
2497 safe_pages_list = lp;
2498 nr_pages--;
2499 }
2500 /* Preallocate memory for the image */
2501 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2502 while (nr_pages > 0) {
2503 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2504 if (!lp) {
2505 error = -ENOMEM;
2506 goto Free;
2507 }
2508 if (!swsusp_page_is_free(virt_to_page(lp))) {
2509 /* The page is "safe", add it to the list */
2510 lp->next = safe_pages_list;
2511 safe_pages_list = lp;
2512 }
2513 /* Mark the page as allocated */
2514 swsusp_set_page_forbidden(virt_to_page(lp));
2515 swsusp_set_page_free(virt_to_page(lp));
2516 nr_pages--;
2517 }
2518 return 0;
2519
2520 Free:
2521 swsusp_free();
2522 return error;
2523}
2524
2525/**
2526 * get_buffer - Get the address to store the next image data page.
2527 *
2528 * Get the address that snapshot_write_next() should return to its caller to
2529 * write to.
2530 */
2531static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2532{
2533 struct pbe *pbe;
2534 struct page *page;
2535 unsigned long pfn = memory_bm_next_pfn(bm);
2536
2537 if (pfn == BM_END_OF_MAP)
2538 return ERR_PTR(-EFAULT);
2539
2540 page = pfn_to_page(pfn);
2541 if (PageHighMem(page))
2542 return get_highmem_page_buffer(page, ca);
2543
2544 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2545 /*
2546 * We have allocated the "original" page frame and we can
2547 * use it directly to store the loaded page.
2548 */
2549 return page_address(page);
2550
2551 /*
2552 * The "original" page frame has not been allocated and we have to
2553 * use a "safe" page frame to store the loaded page.
2554 */
2555 pbe = chain_alloc(ca, sizeof(struct pbe));
2556 if (!pbe) {
2557 swsusp_free();
2558 return ERR_PTR(-ENOMEM);
2559 }
2560 pbe->orig_address = page_address(page);
2561 pbe->address = safe_pages_list;
2562 safe_pages_list = safe_pages_list->next;
2563 pbe->next = restore_pblist;
2564 restore_pblist = pbe;
2565 return pbe->address;
2566}
2567
2568/**
2569 * snapshot_write_next - Get the address to store the next image page.
2570 * @handle: Snapshot handle structure to guide the writing.
2571 *
2572 * On the first call, @handle should point to a zeroed snapshot_handle
2573 * structure. The structure gets populated then and a pointer to it should be
2574 * passed to this function every next time.
2575 *
2576 * On success, the function returns a positive number. Then, the caller
2577 * is allowed to write up to the returned number of bytes to the memory
2578 * location computed by the data_of() macro.
2579 *
2580 * The function returns 0 to indicate the "end of file" condition. Negative
2581 * numbers are returned on errors, in which cases the structure pointed to by
2582 * @handle is not updated and should not be used any more.
2583 */
2584int snapshot_write_next(struct snapshot_handle *handle)
2585{
2586 static struct chain_allocator ca;
2587 int error = 0;
2588
2589 /* Check if we have already loaded the entire image */
2590 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2591 return 0;
2592
2593 handle->sync_read = 1;
2594
2595 if (!handle->cur) {
2596 if (!buffer)
2597 /* This makes the buffer be freed by swsusp_free() */
2598 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2599
2600 if (!buffer)
2601 return -ENOMEM;
2602
2603 handle->buffer = buffer;
2604 } else if (handle->cur == 1) {
2605 error = load_header(buffer);
2606 if (error)
2607 return error;
2608
2609 safe_pages_list = NULL;
2610
2611 error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY);
2612 if (error)
2613 return error;
2614
2615 /* Allocate buffer for page keys. */
2616 error = page_key_alloc(nr_copy_pages);
2617 if (error)
2618 return error;
2619
2620 hibernate_restore_protection_begin();
2621 } else if (handle->cur <= nr_meta_pages + 1) {
2622 error = unpack_orig_pfns(buffer, ©_bm);
2623 if (error)
2624 return error;
2625
2626 if (handle->cur == nr_meta_pages + 1) {
2627 error = prepare_image(&orig_bm, ©_bm);
2628 if (error)
2629 return error;
2630
2631 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2632 memory_bm_position_reset(&orig_bm);
2633 restore_pblist = NULL;
2634 handle->buffer = get_buffer(&orig_bm, &ca);
2635 handle->sync_read = 0;
2636 if (IS_ERR(handle->buffer))
2637 return PTR_ERR(handle->buffer);
2638 }
2639 } else {
2640 copy_last_highmem_page();
2641 /* Restore page key for data page (s390 only). */
2642 page_key_write(handle->buffer);
2643 hibernate_restore_protect_page(handle->buffer);
2644 handle->buffer = get_buffer(&orig_bm, &ca);
2645 if (IS_ERR(handle->buffer))
2646 return PTR_ERR(handle->buffer);
2647 if (handle->buffer != buffer)
2648 handle->sync_read = 0;
2649 }
2650 handle->cur++;
2651 return PAGE_SIZE;
2652}
2653
2654/**
2655 * snapshot_write_finalize - Complete the loading of a hibernation image.
2656 *
2657 * Must be called after the last call to snapshot_write_next() in case the last
2658 * page in the image happens to be a highmem page and its contents should be
2659 * stored in highmem. Additionally, it recycles bitmap memory that's not
2660 * necessary any more.
2661 */
2662void snapshot_write_finalize(struct snapshot_handle *handle)
2663{
2664 copy_last_highmem_page();
2665 /* Restore page key for data page (s390 only). */
2666 page_key_write(handle->buffer);
2667 page_key_free();
2668 hibernate_restore_protect_page(handle->buffer);
2669 /* Do that only if we have loaded the image entirely */
2670 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2671 memory_bm_recycle(&orig_bm);
2672 free_highmem_data();
2673 }
2674}
2675
2676int snapshot_image_loaded(struct snapshot_handle *handle)
2677{
2678 return !(!nr_copy_pages || !last_highmem_page_copied() ||
2679 handle->cur <= nr_meta_pages + nr_copy_pages);
2680}
2681
2682#ifdef CONFIG_HIGHMEM
2683/* Assumes that @buf is ready and points to a "safe" page */
2684static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2685 void *buf)
2686{
2687 void *kaddr1, *kaddr2;
2688
2689 kaddr1 = kmap_atomic(p1);
2690 kaddr2 = kmap_atomic(p2);
2691 copy_page(buf, kaddr1);
2692 copy_page(kaddr1, kaddr2);
2693 copy_page(kaddr2, buf);
2694 kunmap_atomic(kaddr2);
2695 kunmap_atomic(kaddr1);
2696}
2697
2698/**
2699 * restore_highmem - Put highmem image pages into their original locations.
2700 *
2701 * For each highmem page that was in use before hibernation and is included in
2702 * the image, and also has been allocated by the "restore" kernel, swap its
2703 * current contents with the previous (ie. "before hibernation") ones.
2704 *
2705 * If the restore eventually fails, we can call this function once again and
2706 * restore the highmem state as seen by the restore kernel.
2707 */
2708int restore_highmem(void)
2709{
2710 struct highmem_pbe *pbe = highmem_pblist;
2711 void *buf;
2712
2713 if (!pbe)
2714 return 0;
2715
2716 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2717 if (!buf)
2718 return -ENOMEM;
2719
2720 while (pbe) {
2721 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2722 pbe = pbe->next;
2723 }
2724 free_image_page(buf, PG_UNSAFE_CLEAR);
2725 return 0;
2726}
2727#endif /* CONFIG_HIGHMEM */