Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/power/snapshot.c
4 *
5 * This file provides system snapshot/restore functionality for swsusp.
6 *
7 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9 */
10
11#define pr_fmt(fmt) "PM: hibernation: " fmt
12
13#include <linux/version.h>
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/suspend.h>
17#include <linux/delay.h>
18#include <linux/bitops.h>
19#include <linux/spinlock.h>
20#include <linux/kernel.h>
21#include <linux/pm.h>
22#include <linux/device.h>
23#include <linux/init.h>
24#include <linux/memblock.h>
25#include <linux/nmi.h>
26#include <linux/syscalls.h>
27#include <linux/console.h>
28#include <linux/highmem.h>
29#include <linux/list.h>
30#include <linux/slab.h>
31#include <linux/compiler.h>
32#include <linux/ktime.h>
33#include <linux/set_memory.h>
34
35#include <linux/uaccess.h>
36#include <asm/mmu_context.h>
37#include <asm/tlbflush.h>
38#include <asm/io.h>
39
40#include "power.h"
41
42#if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
43static bool hibernate_restore_protection;
44static bool hibernate_restore_protection_active;
45
46void enable_restore_image_protection(void)
47{
48 hibernate_restore_protection = true;
49}
50
51static inline void hibernate_restore_protection_begin(void)
52{
53 hibernate_restore_protection_active = hibernate_restore_protection;
54}
55
56static inline void hibernate_restore_protection_end(void)
57{
58 hibernate_restore_protection_active = false;
59}
60
61static inline void hibernate_restore_protect_page(void *page_address)
62{
63 if (hibernate_restore_protection_active)
64 set_memory_ro((unsigned long)page_address, 1);
65}
66
67static inline void hibernate_restore_unprotect_page(void *page_address)
68{
69 if (hibernate_restore_protection_active)
70 set_memory_rw((unsigned long)page_address, 1);
71}
72#else
73static inline void hibernate_restore_protection_begin(void) {}
74static inline void hibernate_restore_protection_end(void) {}
75static inline void hibernate_restore_protect_page(void *page_address) {}
76static inline void hibernate_restore_unprotect_page(void *page_address) {}
77#endif /* CONFIG_STRICT_KERNEL_RWX && CONFIG_ARCH_HAS_SET_MEMORY */
78
79
80/*
81 * The calls to set_direct_map_*() should not fail because remapping a page
82 * here means that we only update protection bits in an existing PTE.
83 * It is still worth to have a warning here if something changes and this
84 * will no longer be the case.
85 */
86static inline void hibernate_map_page(struct page *page)
87{
88 if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
89 int ret = set_direct_map_default_noflush(page);
90
91 if (ret)
92 pr_warn_once("Failed to remap page\n");
93 } else {
94 debug_pagealloc_map_pages(page, 1);
95 }
96}
97
98static inline void hibernate_unmap_page(struct page *page)
99{
100 if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
101 unsigned long addr = (unsigned long)page_address(page);
102 int ret = set_direct_map_invalid_noflush(page);
103
104 if (ret)
105 pr_warn_once("Failed to remap page\n");
106
107 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
108 } else {
109 debug_pagealloc_unmap_pages(page, 1);
110 }
111}
112
113static int swsusp_page_is_free(struct page *);
114static void swsusp_set_page_forbidden(struct page *);
115static void swsusp_unset_page_forbidden(struct page *);
116
117/*
118 * Number of bytes to reserve for memory allocations made by device drivers
119 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
120 * cause image creation to fail (tunable via /sys/power/reserved_size).
121 */
122unsigned long reserved_size;
123
124void __init hibernate_reserved_size_init(void)
125{
126 reserved_size = SPARE_PAGES * PAGE_SIZE;
127}
128
129/*
130 * Preferred image size in bytes (tunable via /sys/power/image_size).
131 * When it is set to N, swsusp will do its best to ensure the image
132 * size will not exceed N bytes, but if that is impossible, it will
133 * try to create the smallest image possible.
134 */
135unsigned long image_size;
136
137void __init hibernate_image_size_init(void)
138{
139 image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE;
140}
141
142/*
143 * List of PBEs needed for restoring the pages that were allocated before
144 * the suspend and included in the suspend image, but have also been
145 * allocated by the "resume" kernel, so their contents cannot be written
146 * directly to their "original" page frames.
147 */
148struct pbe *restore_pblist;
149
150/* struct linked_page is used to build chains of pages */
151
152#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
153
154struct linked_page {
155 struct linked_page *next;
156 char data[LINKED_PAGE_DATA_SIZE];
157} __packed;
158
159/*
160 * List of "safe" pages (ie. pages that were not used by the image kernel
161 * before hibernation) that may be used as temporary storage for image kernel
162 * memory contents.
163 */
164static struct linked_page *safe_pages_list;
165
166/* Pointer to an auxiliary buffer (1 page) */
167static void *buffer;
168
169#define PG_ANY 0
170#define PG_SAFE 1
171#define PG_UNSAFE_CLEAR 1
172#define PG_UNSAFE_KEEP 0
173
174static unsigned int allocated_unsafe_pages;
175
176/**
177 * get_image_page - Allocate a page for a hibernation image.
178 * @gfp_mask: GFP mask for the allocation.
179 * @safe_needed: Get pages that were not used before hibernation (restore only)
180 *
181 * During image restoration, for storing the PBE list and the image data, we can
182 * only use memory pages that do not conflict with the pages used before
183 * hibernation. The "unsafe" pages have PageNosaveFree set and we count them
184 * using allocated_unsafe_pages.
185 *
186 * Each allocated image page is marked as PageNosave and PageNosaveFree so that
187 * swsusp_free() can release it.
188 */
189static void *get_image_page(gfp_t gfp_mask, int safe_needed)
190{
191 void *res;
192
193 res = (void *)get_zeroed_page(gfp_mask);
194 if (safe_needed)
195 while (res && swsusp_page_is_free(virt_to_page(res))) {
196 /* The page is unsafe, mark it for swsusp_free() */
197 swsusp_set_page_forbidden(virt_to_page(res));
198 allocated_unsafe_pages++;
199 res = (void *)get_zeroed_page(gfp_mask);
200 }
201 if (res) {
202 swsusp_set_page_forbidden(virt_to_page(res));
203 swsusp_set_page_free(virt_to_page(res));
204 }
205 return res;
206}
207
208static void *__get_safe_page(gfp_t gfp_mask)
209{
210 if (safe_pages_list) {
211 void *ret = safe_pages_list;
212
213 safe_pages_list = safe_pages_list->next;
214 memset(ret, 0, PAGE_SIZE);
215 return ret;
216 }
217 return get_image_page(gfp_mask, PG_SAFE);
218}
219
220unsigned long get_safe_page(gfp_t gfp_mask)
221{
222 return (unsigned long)__get_safe_page(gfp_mask);
223}
224
225static struct page *alloc_image_page(gfp_t gfp_mask)
226{
227 struct page *page;
228
229 page = alloc_page(gfp_mask);
230 if (page) {
231 swsusp_set_page_forbidden(page);
232 swsusp_set_page_free(page);
233 }
234 return page;
235}
236
237static void recycle_safe_page(void *page_address)
238{
239 struct linked_page *lp = page_address;
240
241 lp->next = safe_pages_list;
242 safe_pages_list = lp;
243}
244
245/**
246 * free_image_page - Free a page allocated for hibernation image.
247 * @addr: Address of the page to free.
248 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
249 *
250 * The page to free should have been allocated by get_image_page() (page flags
251 * set by it are affected).
252 */
253static inline void free_image_page(void *addr, int clear_nosave_free)
254{
255 struct page *page;
256
257 BUG_ON(!virt_addr_valid(addr));
258
259 page = virt_to_page(addr);
260
261 swsusp_unset_page_forbidden(page);
262 if (clear_nosave_free)
263 swsusp_unset_page_free(page);
264
265 __free_page(page);
266}
267
268static inline void free_list_of_pages(struct linked_page *list,
269 int clear_page_nosave)
270{
271 while (list) {
272 struct linked_page *lp = list->next;
273
274 free_image_page(list, clear_page_nosave);
275 list = lp;
276 }
277}
278
279/*
280 * struct chain_allocator is used for allocating small objects out of
281 * a linked list of pages called 'the chain'.
282 *
283 * The chain grows each time when there is no room for a new object in
284 * the current page. The allocated objects cannot be freed individually.
285 * It is only possible to free them all at once, by freeing the entire
286 * chain.
287 *
288 * NOTE: The chain allocator may be inefficient if the allocated objects
289 * are not much smaller than PAGE_SIZE.
290 */
291struct chain_allocator {
292 struct linked_page *chain; /* the chain */
293 unsigned int used_space; /* total size of objects allocated out
294 of the current page */
295 gfp_t gfp_mask; /* mask for allocating pages */
296 int safe_needed; /* if set, only "safe" pages are allocated */
297};
298
299static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
300 int safe_needed)
301{
302 ca->chain = NULL;
303 ca->used_space = LINKED_PAGE_DATA_SIZE;
304 ca->gfp_mask = gfp_mask;
305 ca->safe_needed = safe_needed;
306}
307
308static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
309{
310 void *ret;
311
312 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
313 struct linked_page *lp;
314
315 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
316 get_image_page(ca->gfp_mask, PG_ANY);
317 if (!lp)
318 return NULL;
319
320 lp->next = ca->chain;
321 ca->chain = lp;
322 ca->used_space = 0;
323 }
324 ret = ca->chain->data + ca->used_space;
325 ca->used_space += size;
326 return ret;
327}
328
329/*
330 * Data types related to memory bitmaps.
331 *
332 * Memory bitmap is a structure consisting of many linked lists of
333 * objects. The main list's elements are of type struct zone_bitmap
334 * and each of them corresponds to one zone. For each zone bitmap
335 * object there is a list of objects of type struct bm_block that
336 * represent each blocks of bitmap in which information is stored.
337 *
338 * struct memory_bitmap contains a pointer to the main list of zone
339 * bitmap objects, a struct bm_position used for browsing the bitmap,
340 * and a pointer to the list of pages used for allocating all of the
341 * zone bitmap objects and bitmap block objects.
342 *
343 * NOTE: It has to be possible to lay out the bitmap in memory
344 * using only allocations of order 0. Additionally, the bitmap is
345 * designed to work with arbitrary number of zones (this is over the
346 * top for now, but let's avoid making unnecessary assumptions ;-).
347 *
348 * struct zone_bitmap contains a pointer to a list of bitmap block
349 * objects and a pointer to the bitmap block object that has been
350 * most recently used for setting bits. Additionally, it contains the
351 * PFNs that correspond to the start and end of the represented zone.
352 *
353 * struct bm_block contains a pointer to the memory page in which
354 * information is stored (in the form of a block of bitmap)
355 * It also contains the pfns that correspond to the start and end of
356 * the represented memory area.
357 *
358 * The memory bitmap is organized as a radix tree to guarantee fast random
359 * access to the bits. There is one radix tree for each zone (as returned
360 * from create_mem_extents).
361 *
362 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
363 * two linked lists for the nodes of the tree, one for the inner nodes and
364 * one for the leave nodes. The linked leave nodes are used for fast linear
365 * access of the memory bitmap.
366 *
367 * The struct rtree_node represents one node of the radix tree.
368 */
369
370#define BM_END_OF_MAP (~0UL)
371
372#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
373#define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
374#define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
375
376/*
377 * struct rtree_node is a wrapper struct to link the nodes
378 * of the rtree together for easy linear iteration over
379 * bits and easy freeing
380 */
381struct rtree_node {
382 struct list_head list;
383 unsigned long *data;
384};
385
386/*
387 * struct mem_zone_bm_rtree represents a bitmap used for one
388 * populated memory zone.
389 */
390struct mem_zone_bm_rtree {
391 struct list_head list; /* Link Zones together */
392 struct list_head nodes; /* Radix Tree inner nodes */
393 struct list_head leaves; /* Radix Tree leaves */
394 unsigned long start_pfn; /* Zone start page frame */
395 unsigned long end_pfn; /* Zone end page frame + 1 */
396 struct rtree_node *rtree; /* Radix Tree Root */
397 int levels; /* Number of Radix Tree Levels */
398 unsigned int blocks; /* Number of Bitmap Blocks */
399};
400
401/* strcut bm_position is used for browsing memory bitmaps */
402
403struct bm_position {
404 struct mem_zone_bm_rtree *zone;
405 struct rtree_node *node;
406 unsigned long node_pfn;
407 int node_bit;
408};
409
410struct memory_bitmap {
411 struct list_head zones;
412 struct linked_page *p_list; /* list of pages used to store zone
413 bitmap objects and bitmap block
414 objects */
415 struct bm_position cur; /* most recently used bit position */
416};
417
418/* Functions that operate on memory bitmaps */
419
420#define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
421#if BITS_PER_LONG == 32
422#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
423#else
424#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
425#endif
426#define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
427
428/**
429 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
430 * @gfp_mask: GFP mask for the allocation.
431 * @safe_needed: Get pages not used before hibernation (restore only)
432 * @ca: Pointer to a linked list of pages ("a chain") to allocate from
433 * @list: Radix Tree node to add.
434 *
435 * This function is used to allocate inner nodes as well as the
436 * leave nodes of the radix tree. It also adds the node to the
437 * corresponding linked list passed in by the *list parameter.
438 */
439static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
440 struct chain_allocator *ca,
441 struct list_head *list)
442{
443 struct rtree_node *node;
444
445 node = chain_alloc(ca, sizeof(struct rtree_node));
446 if (!node)
447 return NULL;
448
449 node->data = get_image_page(gfp_mask, safe_needed);
450 if (!node->data)
451 return NULL;
452
453 list_add_tail(&node->list, list);
454
455 return node;
456}
457
458/**
459 * add_rtree_block - Add a new leave node to the radix tree.
460 *
461 * The leave nodes need to be allocated in order to keep the leaves
462 * linked list in order. This is guaranteed by the zone->blocks
463 * counter.
464 */
465static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
466 int safe_needed, struct chain_allocator *ca)
467{
468 struct rtree_node *node, *block, **dst;
469 unsigned int levels_needed, block_nr;
470 int i;
471
472 block_nr = zone->blocks;
473 levels_needed = 0;
474
475 /* How many levels do we need for this block nr? */
476 while (block_nr) {
477 levels_needed += 1;
478 block_nr >>= BM_RTREE_LEVEL_SHIFT;
479 }
480
481 /* Make sure the rtree has enough levels */
482 for (i = zone->levels; i < levels_needed; i++) {
483 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
484 &zone->nodes);
485 if (!node)
486 return -ENOMEM;
487
488 node->data[0] = (unsigned long)zone->rtree;
489 zone->rtree = node;
490 zone->levels += 1;
491 }
492
493 /* Allocate new block */
494 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
495 if (!block)
496 return -ENOMEM;
497
498 /* Now walk the rtree to insert the block */
499 node = zone->rtree;
500 dst = &zone->rtree;
501 block_nr = zone->blocks;
502 for (i = zone->levels; i > 0; i--) {
503 int index;
504
505 if (!node) {
506 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
507 &zone->nodes);
508 if (!node)
509 return -ENOMEM;
510 *dst = node;
511 }
512
513 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
514 index &= BM_RTREE_LEVEL_MASK;
515 dst = (struct rtree_node **)&((*dst)->data[index]);
516 node = *dst;
517 }
518
519 zone->blocks += 1;
520 *dst = block;
521
522 return 0;
523}
524
525static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
526 int clear_nosave_free);
527
528/**
529 * create_zone_bm_rtree - Create a radix tree for one zone.
530 *
531 * Allocated the mem_zone_bm_rtree structure and initializes it.
532 * This function also allocated and builds the radix tree for the
533 * zone.
534 */
535static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
536 int safe_needed,
537 struct chain_allocator *ca,
538 unsigned long start,
539 unsigned long end)
540{
541 struct mem_zone_bm_rtree *zone;
542 unsigned int i, nr_blocks;
543 unsigned long pages;
544
545 pages = end - start;
546 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
547 if (!zone)
548 return NULL;
549
550 INIT_LIST_HEAD(&zone->nodes);
551 INIT_LIST_HEAD(&zone->leaves);
552 zone->start_pfn = start;
553 zone->end_pfn = end;
554 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
555
556 for (i = 0; i < nr_blocks; i++) {
557 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
558 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
559 return NULL;
560 }
561 }
562
563 return zone;
564}
565
566/**
567 * free_zone_bm_rtree - Free the memory of the radix tree.
568 *
569 * Free all node pages of the radix tree. The mem_zone_bm_rtree
570 * structure itself is not freed here nor are the rtree_node
571 * structs.
572 */
573static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
574 int clear_nosave_free)
575{
576 struct rtree_node *node;
577
578 list_for_each_entry(node, &zone->nodes, list)
579 free_image_page(node->data, clear_nosave_free);
580
581 list_for_each_entry(node, &zone->leaves, list)
582 free_image_page(node->data, clear_nosave_free);
583}
584
585static void memory_bm_position_reset(struct memory_bitmap *bm)
586{
587 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
588 list);
589 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
590 struct rtree_node, list);
591 bm->cur.node_pfn = 0;
592 bm->cur.node_bit = 0;
593}
594
595static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
596
597struct mem_extent {
598 struct list_head hook;
599 unsigned long start;
600 unsigned long end;
601};
602
603/**
604 * free_mem_extents - Free a list of memory extents.
605 * @list: List of extents to free.
606 */
607static void free_mem_extents(struct list_head *list)
608{
609 struct mem_extent *ext, *aux;
610
611 list_for_each_entry_safe(ext, aux, list, hook) {
612 list_del(&ext->hook);
613 kfree(ext);
614 }
615}
616
617/**
618 * create_mem_extents - Create a list of memory extents.
619 * @list: List to put the extents into.
620 * @gfp_mask: Mask to use for memory allocations.
621 *
622 * The extents represent contiguous ranges of PFNs.
623 */
624static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
625{
626 struct zone *zone;
627
628 INIT_LIST_HEAD(list);
629
630 for_each_populated_zone(zone) {
631 unsigned long zone_start, zone_end;
632 struct mem_extent *ext, *cur, *aux;
633
634 zone_start = zone->zone_start_pfn;
635 zone_end = zone_end_pfn(zone);
636
637 list_for_each_entry(ext, list, hook)
638 if (zone_start <= ext->end)
639 break;
640
641 if (&ext->hook == list || zone_end < ext->start) {
642 /* New extent is necessary */
643 struct mem_extent *new_ext;
644
645 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
646 if (!new_ext) {
647 free_mem_extents(list);
648 return -ENOMEM;
649 }
650 new_ext->start = zone_start;
651 new_ext->end = zone_end;
652 list_add_tail(&new_ext->hook, &ext->hook);
653 continue;
654 }
655
656 /* Merge this zone's range of PFNs with the existing one */
657 if (zone_start < ext->start)
658 ext->start = zone_start;
659 if (zone_end > ext->end)
660 ext->end = zone_end;
661
662 /* More merging may be possible */
663 cur = ext;
664 list_for_each_entry_safe_continue(cur, aux, list, hook) {
665 if (zone_end < cur->start)
666 break;
667 if (zone_end < cur->end)
668 ext->end = cur->end;
669 list_del(&cur->hook);
670 kfree(cur);
671 }
672 }
673
674 return 0;
675}
676
677/**
678 * memory_bm_create - Allocate memory for a memory bitmap.
679 */
680static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
681 int safe_needed)
682{
683 struct chain_allocator ca;
684 struct list_head mem_extents;
685 struct mem_extent *ext;
686 int error;
687
688 chain_init(&ca, gfp_mask, safe_needed);
689 INIT_LIST_HEAD(&bm->zones);
690
691 error = create_mem_extents(&mem_extents, gfp_mask);
692 if (error)
693 return error;
694
695 list_for_each_entry(ext, &mem_extents, hook) {
696 struct mem_zone_bm_rtree *zone;
697
698 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
699 ext->start, ext->end);
700 if (!zone) {
701 error = -ENOMEM;
702 goto Error;
703 }
704 list_add_tail(&zone->list, &bm->zones);
705 }
706
707 bm->p_list = ca.chain;
708 memory_bm_position_reset(bm);
709 Exit:
710 free_mem_extents(&mem_extents);
711 return error;
712
713 Error:
714 bm->p_list = ca.chain;
715 memory_bm_free(bm, PG_UNSAFE_CLEAR);
716 goto Exit;
717}
718
719/**
720 * memory_bm_free - Free memory occupied by the memory bitmap.
721 * @bm: Memory bitmap.
722 */
723static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
724{
725 struct mem_zone_bm_rtree *zone;
726
727 list_for_each_entry(zone, &bm->zones, list)
728 free_zone_bm_rtree(zone, clear_nosave_free);
729
730 free_list_of_pages(bm->p_list, clear_nosave_free);
731
732 INIT_LIST_HEAD(&bm->zones);
733}
734
735/**
736 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
737 *
738 * Find the bit in memory bitmap @bm that corresponds to the given PFN.
739 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
740 *
741 * Walk the radix tree to find the page containing the bit that represents @pfn
742 * and return the position of the bit in @addr and @bit_nr.
743 */
744static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
745 void **addr, unsigned int *bit_nr)
746{
747 struct mem_zone_bm_rtree *curr, *zone;
748 struct rtree_node *node;
749 int i, block_nr;
750
751 zone = bm->cur.zone;
752
753 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
754 goto zone_found;
755
756 zone = NULL;
757
758 /* Find the right zone */
759 list_for_each_entry(curr, &bm->zones, list) {
760 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
761 zone = curr;
762 break;
763 }
764 }
765
766 if (!zone)
767 return -EFAULT;
768
769zone_found:
770 /*
771 * We have found the zone. Now walk the radix tree to find the leaf node
772 * for our PFN.
773 */
774
775 /*
776 * If the zone we wish to scan is the current zone and the
777 * pfn falls into the current node then we do not need to walk
778 * the tree.
779 */
780 node = bm->cur.node;
781 if (zone == bm->cur.zone &&
782 ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
783 goto node_found;
784
785 node = zone->rtree;
786 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
787
788 for (i = zone->levels; i > 0; i--) {
789 int index;
790
791 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
792 index &= BM_RTREE_LEVEL_MASK;
793 BUG_ON(node->data[index] == 0);
794 node = (struct rtree_node *)node->data[index];
795 }
796
797node_found:
798 /* Update last position */
799 bm->cur.zone = zone;
800 bm->cur.node = node;
801 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
802
803 /* Set return values */
804 *addr = node->data;
805 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
806
807 return 0;
808}
809
810static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
811{
812 void *addr;
813 unsigned int bit;
814 int error;
815
816 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
817 BUG_ON(error);
818 set_bit(bit, addr);
819}
820
821static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
822{
823 void *addr;
824 unsigned int bit;
825 int error;
826
827 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
828 if (!error)
829 set_bit(bit, addr);
830
831 return error;
832}
833
834static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
835{
836 void *addr;
837 unsigned int bit;
838 int error;
839
840 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
841 BUG_ON(error);
842 clear_bit(bit, addr);
843}
844
845static void memory_bm_clear_current(struct memory_bitmap *bm)
846{
847 int bit;
848
849 bit = max(bm->cur.node_bit - 1, 0);
850 clear_bit(bit, bm->cur.node->data);
851}
852
853static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
854{
855 void *addr;
856 unsigned int bit;
857 int error;
858
859 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
860 BUG_ON(error);
861 return test_bit(bit, addr);
862}
863
864static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
865{
866 void *addr;
867 unsigned int bit;
868
869 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
870}
871
872/*
873 * rtree_next_node - Jump to the next leaf node.
874 *
875 * Set the position to the beginning of the next node in the
876 * memory bitmap. This is either the next node in the current
877 * zone's radix tree or the first node in the radix tree of the
878 * next zone.
879 *
880 * Return true if there is a next node, false otherwise.
881 */
882static bool rtree_next_node(struct memory_bitmap *bm)
883{
884 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
885 bm->cur.node = list_entry(bm->cur.node->list.next,
886 struct rtree_node, list);
887 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
888 bm->cur.node_bit = 0;
889 touch_softlockup_watchdog();
890 return true;
891 }
892
893 /* No more nodes, goto next zone */
894 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
895 bm->cur.zone = list_entry(bm->cur.zone->list.next,
896 struct mem_zone_bm_rtree, list);
897 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
898 struct rtree_node, list);
899 bm->cur.node_pfn = 0;
900 bm->cur.node_bit = 0;
901 return true;
902 }
903
904 /* No more zones */
905 return false;
906}
907
908/**
909 * memory_bm_next_pfn - Find the next set bit in a memory bitmap.
910 * @bm: Memory bitmap.
911 *
912 * Starting from the last returned position this function searches for the next
913 * set bit in @bm and returns the PFN represented by it. If no more bits are
914 * set, BM_END_OF_MAP is returned.
915 *
916 * It is required to run memory_bm_position_reset() before the first call to
917 * this function for the given memory bitmap.
918 */
919static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
920{
921 unsigned long bits, pfn, pages;
922 int bit;
923
924 do {
925 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
926 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
927 bit = find_next_bit(bm->cur.node->data, bits,
928 bm->cur.node_bit);
929 if (bit < bits) {
930 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
931 bm->cur.node_bit = bit + 1;
932 return pfn;
933 }
934 } while (rtree_next_node(bm));
935
936 return BM_END_OF_MAP;
937}
938
939/*
940 * This structure represents a range of page frames the contents of which
941 * should not be saved during hibernation.
942 */
943struct nosave_region {
944 struct list_head list;
945 unsigned long start_pfn;
946 unsigned long end_pfn;
947};
948
949static LIST_HEAD(nosave_regions);
950
951static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
952{
953 struct rtree_node *node;
954
955 list_for_each_entry(node, &zone->nodes, list)
956 recycle_safe_page(node->data);
957
958 list_for_each_entry(node, &zone->leaves, list)
959 recycle_safe_page(node->data);
960}
961
962static void memory_bm_recycle(struct memory_bitmap *bm)
963{
964 struct mem_zone_bm_rtree *zone;
965 struct linked_page *p_list;
966
967 list_for_each_entry(zone, &bm->zones, list)
968 recycle_zone_bm_rtree(zone);
969
970 p_list = bm->p_list;
971 while (p_list) {
972 struct linked_page *lp = p_list;
973
974 p_list = lp->next;
975 recycle_safe_page(lp);
976 }
977}
978
979/**
980 * register_nosave_region - Register a region of unsaveable memory.
981 *
982 * Register a range of page frames the contents of which should not be saved
983 * during hibernation (to be used in the early initialization code).
984 */
985void __init register_nosave_region(unsigned long start_pfn, unsigned long end_pfn)
986{
987 struct nosave_region *region;
988
989 if (start_pfn >= end_pfn)
990 return;
991
992 if (!list_empty(&nosave_regions)) {
993 /* Try to extend the previous region (they should be sorted) */
994 region = list_entry(nosave_regions.prev,
995 struct nosave_region, list);
996 if (region->end_pfn == start_pfn) {
997 region->end_pfn = end_pfn;
998 goto Report;
999 }
1000 }
1001 /* This allocation cannot fail */
1002 region = memblock_alloc(sizeof(struct nosave_region),
1003 SMP_CACHE_BYTES);
1004 if (!region)
1005 panic("%s: Failed to allocate %zu bytes\n", __func__,
1006 sizeof(struct nosave_region));
1007 region->start_pfn = start_pfn;
1008 region->end_pfn = end_pfn;
1009 list_add_tail(®ion->list, &nosave_regions);
1010 Report:
1011 pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
1012 (unsigned long long) start_pfn << PAGE_SHIFT,
1013 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
1014}
1015
1016/*
1017 * Set bits in this map correspond to the page frames the contents of which
1018 * should not be saved during the suspend.
1019 */
1020static struct memory_bitmap *forbidden_pages_map;
1021
1022/* Set bits in this map correspond to free page frames. */
1023static struct memory_bitmap *free_pages_map;
1024
1025/*
1026 * Each page frame allocated for creating the image is marked by setting the
1027 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
1028 */
1029
1030void swsusp_set_page_free(struct page *page)
1031{
1032 if (free_pages_map)
1033 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
1034}
1035
1036static int swsusp_page_is_free(struct page *page)
1037{
1038 return free_pages_map ?
1039 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
1040}
1041
1042void swsusp_unset_page_free(struct page *page)
1043{
1044 if (free_pages_map)
1045 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1046}
1047
1048static void swsusp_set_page_forbidden(struct page *page)
1049{
1050 if (forbidden_pages_map)
1051 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1052}
1053
1054int swsusp_page_is_forbidden(struct page *page)
1055{
1056 return forbidden_pages_map ?
1057 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1058}
1059
1060static void swsusp_unset_page_forbidden(struct page *page)
1061{
1062 if (forbidden_pages_map)
1063 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1064}
1065
1066/**
1067 * mark_nosave_pages - Mark pages that should not be saved.
1068 * @bm: Memory bitmap.
1069 *
1070 * Set the bits in @bm that correspond to the page frames the contents of which
1071 * should not be saved.
1072 */
1073static void mark_nosave_pages(struct memory_bitmap *bm)
1074{
1075 struct nosave_region *region;
1076
1077 if (list_empty(&nosave_regions))
1078 return;
1079
1080 list_for_each_entry(region, &nosave_regions, list) {
1081 unsigned long pfn;
1082
1083 pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1084 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1085 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1086 - 1);
1087
1088 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1089 if (pfn_valid(pfn)) {
1090 /*
1091 * It is safe to ignore the result of
1092 * mem_bm_set_bit_check() here, since we won't
1093 * touch the PFNs for which the error is
1094 * returned anyway.
1095 */
1096 mem_bm_set_bit_check(bm, pfn);
1097 }
1098 }
1099}
1100
1101/**
1102 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1103 *
1104 * Create bitmaps needed for marking page frames that should not be saved and
1105 * free page frames. The forbidden_pages_map and free_pages_map pointers are
1106 * only modified if everything goes well, because we don't want the bits to be
1107 * touched before both bitmaps are set up.
1108 */
1109int create_basic_memory_bitmaps(void)
1110{
1111 struct memory_bitmap *bm1, *bm2;
1112 int error = 0;
1113
1114 if (forbidden_pages_map && free_pages_map)
1115 return 0;
1116 else
1117 BUG_ON(forbidden_pages_map || free_pages_map);
1118
1119 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1120 if (!bm1)
1121 return -ENOMEM;
1122
1123 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1124 if (error)
1125 goto Free_first_object;
1126
1127 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1128 if (!bm2)
1129 goto Free_first_bitmap;
1130
1131 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1132 if (error)
1133 goto Free_second_object;
1134
1135 forbidden_pages_map = bm1;
1136 free_pages_map = bm2;
1137 mark_nosave_pages(forbidden_pages_map);
1138
1139 pr_debug("Basic memory bitmaps created\n");
1140
1141 return 0;
1142
1143 Free_second_object:
1144 kfree(bm2);
1145 Free_first_bitmap:
1146 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1147 Free_first_object:
1148 kfree(bm1);
1149 return -ENOMEM;
1150}
1151
1152/**
1153 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1154 *
1155 * Free memory bitmaps allocated by create_basic_memory_bitmaps(). The
1156 * auxiliary pointers are necessary so that the bitmaps themselves are not
1157 * referred to while they are being freed.
1158 */
1159void free_basic_memory_bitmaps(void)
1160{
1161 struct memory_bitmap *bm1, *bm2;
1162
1163 if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1164 return;
1165
1166 bm1 = forbidden_pages_map;
1167 bm2 = free_pages_map;
1168 forbidden_pages_map = NULL;
1169 free_pages_map = NULL;
1170 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1171 kfree(bm1);
1172 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1173 kfree(bm2);
1174
1175 pr_debug("Basic memory bitmaps freed\n");
1176}
1177
1178static void clear_or_poison_free_page(struct page *page)
1179{
1180 if (page_poisoning_enabled_static())
1181 __kernel_poison_pages(page, 1);
1182 else if (want_init_on_free())
1183 clear_highpage(page);
1184}
1185
1186void clear_or_poison_free_pages(void)
1187{
1188 struct memory_bitmap *bm = free_pages_map;
1189 unsigned long pfn;
1190
1191 if (WARN_ON(!(free_pages_map)))
1192 return;
1193
1194 if (page_poisoning_enabled() || want_init_on_free()) {
1195 memory_bm_position_reset(bm);
1196 pfn = memory_bm_next_pfn(bm);
1197 while (pfn != BM_END_OF_MAP) {
1198 if (pfn_valid(pfn))
1199 clear_or_poison_free_page(pfn_to_page(pfn));
1200
1201 pfn = memory_bm_next_pfn(bm);
1202 }
1203 memory_bm_position_reset(bm);
1204 pr_info("free pages cleared after restore\n");
1205 }
1206}
1207
1208/**
1209 * snapshot_additional_pages - Estimate the number of extra pages needed.
1210 * @zone: Memory zone to carry out the computation for.
1211 *
1212 * Estimate the number of additional pages needed for setting up a hibernation
1213 * image data structures for @zone (usually, the returned value is greater than
1214 * the exact number).
1215 */
1216unsigned int snapshot_additional_pages(struct zone *zone)
1217{
1218 unsigned int rtree, nodes;
1219
1220 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1221 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1222 LINKED_PAGE_DATA_SIZE);
1223 while (nodes > 1) {
1224 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1225 rtree += nodes;
1226 }
1227
1228 return 2 * rtree;
1229}
1230
1231#ifdef CONFIG_HIGHMEM
1232/**
1233 * count_free_highmem_pages - Compute the total number of free highmem pages.
1234 *
1235 * The returned number is system-wide.
1236 */
1237static unsigned int count_free_highmem_pages(void)
1238{
1239 struct zone *zone;
1240 unsigned int cnt = 0;
1241
1242 for_each_populated_zone(zone)
1243 if (is_highmem(zone))
1244 cnt += zone_page_state(zone, NR_FREE_PAGES);
1245
1246 return cnt;
1247}
1248
1249/**
1250 * saveable_highmem_page - Check if a highmem page is saveable.
1251 *
1252 * Determine whether a highmem page should be included in a hibernation image.
1253 *
1254 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1255 * and it isn't part of a free chunk of pages.
1256 */
1257static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1258{
1259 struct page *page;
1260
1261 if (!pfn_valid(pfn))
1262 return NULL;
1263
1264 page = pfn_to_online_page(pfn);
1265 if (!page || page_zone(page) != zone)
1266 return NULL;
1267
1268 BUG_ON(!PageHighMem(page));
1269
1270 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1271 return NULL;
1272
1273 if (PageReserved(page) || PageOffline(page))
1274 return NULL;
1275
1276 if (page_is_guard(page))
1277 return NULL;
1278
1279 return page;
1280}
1281
1282/**
1283 * count_highmem_pages - Compute the total number of saveable highmem pages.
1284 */
1285static unsigned int count_highmem_pages(void)
1286{
1287 struct zone *zone;
1288 unsigned int n = 0;
1289
1290 for_each_populated_zone(zone) {
1291 unsigned long pfn, max_zone_pfn;
1292
1293 if (!is_highmem(zone))
1294 continue;
1295
1296 mark_free_pages(zone);
1297 max_zone_pfn = zone_end_pfn(zone);
1298 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1299 if (saveable_highmem_page(zone, pfn))
1300 n++;
1301 }
1302 return n;
1303}
1304#else
1305static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1306{
1307 return NULL;
1308}
1309#endif /* CONFIG_HIGHMEM */
1310
1311/**
1312 * saveable_page - Check if the given page is saveable.
1313 *
1314 * Determine whether a non-highmem page should be included in a hibernation
1315 * image.
1316 *
1317 * We should save the page if it isn't Nosave, and is not in the range
1318 * of pages statically defined as 'unsaveable', and it isn't part of
1319 * a free chunk of pages.
1320 */
1321static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1322{
1323 struct page *page;
1324
1325 if (!pfn_valid(pfn))
1326 return NULL;
1327
1328 page = pfn_to_online_page(pfn);
1329 if (!page || page_zone(page) != zone)
1330 return NULL;
1331
1332 BUG_ON(PageHighMem(page));
1333
1334 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1335 return NULL;
1336
1337 if (PageOffline(page))
1338 return NULL;
1339
1340 if (PageReserved(page)
1341 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1342 return NULL;
1343
1344 if (page_is_guard(page))
1345 return NULL;
1346
1347 return page;
1348}
1349
1350/**
1351 * count_data_pages - Compute the total number of saveable non-highmem pages.
1352 */
1353static unsigned int count_data_pages(void)
1354{
1355 struct zone *zone;
1356 unsigned long pfn, max_zone_pfn;
1357 unsigned int n = 0;
1358
1359 for_each_populated_zone(zone) {
1360 if (is_highmem(zone))
1361 continue;
1362
1363 mark_free_pages(zone);
1364 max_zone_pfn = zone_end_pfn(zone);
1365 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1366 if (saveable_page(zone, pfn))
1367 n++;
1368 }
1369 return n;
1370}
1371
1372/*
1373 * This is needed, because copy_page and memcpy are not usable for copying
1374 * task structs.
1375 */
1376static inline void do_copy_page(long *dst, long *src)
1377{
1378 int n;
1379
1380 for (n = PAGE_SIZE / sizeof(long); n; n--)
1381 *dst++ = *src++;
1382}
1383
1384/**
1385 * safe_copy_page - Copy a page in a safe way.
1386 *
1387 * Check if the page we are going to copy is marked as present in the kernel
1388 * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
1389 * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present()
1390 * always returns 'true'.
1391 */
1392static void safe_copy_page(void *dst, struct page *s_page)
1393{
1394 if (kernel_page_present(s_page)) {
1395 do_copy_page(dst, page_address(s_page));
1396 } else {
1397 hibernate_map_page(s_page);
1398 do_copy_page(dst, page_address(s_page));
1399 hibernate_unmap_page(s_page);
1400 }
1401}
1402
1403#ifdef CONFIG_HIGHMEM
1404static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1405{
1406 return is_highmem(zone) ?
1407 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1408}
1409
1410static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1411{
1412 struct page *s_page, *d_page;
1413 void *src, *dst;
1414
1415 s_page = pfn_to_page(src_pfn);
1416 d_page = pfn_to_page(dst_pfn);
1417 if (PageHighMem(s_page)) {
1418 src = kmap_atomic(s_page);
1419 dst = kmap_atomic(d_page);
1420 do_copy_page(dst, src);
1421 kunmap_atomic(dst);
1422 kunmap_atomic(src);
1423 } else {
1424 if (PageHighMem(d_page)) {
1425 /*
1426 * The page pointed to by src may contain some kernel
1427 * data modified by kmap_atomic()
1428 */
1429 safe_copy_page(buffer, s_page);
1430 dst = kmap_atomic(d_page);
1431 copy_page(dst, buffer);
1432 kunmap_atomic(dst);
1433 } else {
1434 safe_copy_page(page_address(d_page), s_page);
1435 }
1436 }
1437}
1438#else
1439#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1440
1441static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1442{
1443 safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1444 pfn_to_page(src_pfn));
1445}
1446#endif /* CONFIG_HIGHMEM */
1447
1448static void copy_data_pages(struct memory_bitmap *copy_bm,
1449 struct memory_bitmap *orig_bm)
1450{
1451 struct zone *zone;
1452 unsigned long pfn;
1453
1454 for_each_populated_zone(zone) {
1455 unsigned long max_zone_pfn;
1456
1457 mark_free_pages(zone);
1458 max_zone_pfn = zone_end_pfn(zone);
1459 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1460 if (page_is_saveable(zone, pfn))
1461 memory_bm_set_bit(orig_bm, pfn);
1462 }
1463 memory_bm_position_reset(orig_bm);
1464 memory_bm_position_reset(copy_bm);
1465 for(;;) {
1466 pfn = memory_bm_next_pfn(orig_bm);
1467 if (unlikely(pfn == BM_END_OF_MAP))
1468 break;
1469 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1470 }
1471}
1472
1473/* Total number of image pages */
1474static unsigned int nr_copy_pages;
1475/* Number of pages needed for saving the original pfns of the image pages */
1476static unsigned int nr_meta_pages;
1477/*
1478 * Numbers of normal and highmem page frames allocated for hibernation image
1479 * before suspending devices.
1480 */
1481static unsigned int alloc_normal, alloc_highmem;
1482/*
1483 * Memory bitmap used for marking saveable pages (during hibernation) or
1484 * hibernation image pages (during restore)
1485 */
1486static struct memory_bitmap orig_bm;
1487/*
1488 * Memory bitmap used during hibernation for marking allocated page frames that
1489 * will contain copies of saveable pages. During restore it is initially used
1490 * for marking hibernation image pages, but then the set bits from it are
1491 * duplicated in @orig_bm and it is released. On highmem systems it is next
1492 * used for marking "safe" highmem pages, but it has to be reinitialized for
1493 * this purpose.
1494 */
1495static struct memory_bitmap copy_bm;
1496
1497/**
1498 * swsusp_free - Free pages allocated for hibernation image.
1499 *
1500 * Image pages are allocated before snapshot creation, so they need to be
1501 * released after resume.
1502 */
1503void swsusp_free(void)
1504{
1505 unsigned long fb_pfn, fr_pfn;
1506
1507 if (!forbidden_pages_map || !free_pages_map)
1508 goto out;
1509
1510 memory_bm_position_reset(forbidden_pages_map);
1511 memory_bm_position_reset(free_pages_map);
1512
1513loop:
1514 fr_pfn = memory_bm_next_pfn(free_pages_map);
1515 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1516
1517 /*
1518 * Find the next bit set in both bitmaps. This is guaranteed to
1519 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1520 */
1521 do {
1522 if (fb_pfn < fr_pfn)
1523 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1524 if (fr_pfn < fb_pfn)
1525 fr_pfn = memory_bm_next_pfn(free_pages_map);
1526 } while (fb_pfn != fr_pfn);
1527
1528 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1529 struct page *page = pfn_to_page(fr_pfn);
1530
1531 memory_bm_clear_current(forbidden_pages_map);
1532 memory_bm_clear_current(free_pages_map);
1533 hibernate_restore_unprotect_page(page_address(page));
1534 __free_page(page);
1535 goto loop;
1536 }
1537
1538out:
1539 nr_copy_pages = 0;
1540 nr_meta_pages = 0;
1541 restore_pblist = NULL;
1542 buffer = NULL;
1543 alloc_normal = 0;
1544 alloc_highmem = 0;
1545 hibernate_restore_protection_end();
1546}
1547
1548/* Helper functions used for the shrinking of memory. */
1549
1550#define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1551
1552/**
1553 * preallocate_image_pages - Allocate a number of pages for hibernation image.
1554 * @nr_pages: Number of page frames to allocate.
1555 * @mask: GFP flags to use for the allocation.
1556 *
1557 * Return value: Number of page frames actually allocated
1558 */
1559static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1560{
1561 unsigned long nr_alloc = 0;
1562
1563 while (nr_pages > 0) {
1564 struct page *page;
1565
1566 page = alloc_image_page(mask);
1567 if (!page)
1568 break;
1569 memory_bm_set_bit(©_bm, page_to_pfn(page));
1570 if (PageHighMem(page))
1571 alloc_highmem++;
1572 else
1573 alloc_normal++;
1574 nr_pages--;
1575 nr_alloc++;
1576 }
1577
1578 return nr_alloc;
1579}
1580
1581static unsigned long preallocate_image_memory(unsigned long nr_pages,
1582 unsigned long avail_normal)
1583{
1584 unsigned long alloc;
1585
1586 if (avail_normal <= alloc_normal)
1587 return 0;
1588
1589 alloc = avail_normal - alloc_normal;
1590 if (nr_pages < alloc)
1591 alloc = nr_pages;
1592
1593 return preallocate_image_pages(alloc, GFP_IMAGE);
1594}
1595
1596#ifdef CONFIG_HIGHMEM
1597static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1598{
1599 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1600}
1601
1602/**
1603 * __fraction - Compute (an approximation of) x * (multiplier / base).
1604 */
1605static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1606{
1607 return div64_u64(x * multiplier, base);
1608}
1609
1610static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1611 unsigned long highmem,
1612 unsigned long total)
1613{
1614 unsigned long alloc = __fraction(nr_pages, highmem, total);
1615
1616 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1617}
1618#else /* CONFIG_HIGHMEM */
1619static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1620{
1621 return 0;
1622}
1623
1624static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1625 unsigned long highmem,
1626 unsigned long total)
1627{
1628 return 0;
1629}
1630#endif /* CONFIG_HIGHMEM */
1631
1632/**
1633 * free_unnecessary_pages - Release preallocated pages not needed for the image.
1634 */
1635static unsigned long free_unnecessary_pages(void)
1636{
1637 unsigned long save, to_free_normal, to_free_highmem, free;
1638
1639 save = count_data_pages();
1640 if (alloc_normal >= save) {
1641 to_free_normal = alloc_normal - save;
1642 save = 0;
1643 } else {
1644 to_free_normal = 0;
1645 save -= alloc_normal;
1646 }
1647 save += count_highmem_pages();
1648 if (alloc_highmem >= save) {
1649 to_free_highmem = alloc_highmem - save;
1650 } else {
1651 to_free_highmem = 0;
1652 save -= alloc_highmem;
1653 if (to_free_normal > save)
1654 to_free_normal -= save;
1655 else
1656 to_free_normal = 0;
1657 }
1658 free = to_free_normal + to_free_highmem;
1659
1660 memory_bm_position_reset(©_bm);
1661
1662 while (to_free_normal > 0 || to_free_highmem > 0) {
1663 unsigned long pfn = memory_bm_next_pfn(©_bm);
1664 struct page *page = pfn_to_page(pfn);
1665
1666 if (PageHighMem(page)) {
1667 if (!to_free_highmem)
1668 continue;
1669 to_free_highmem--;
1670 alloc_highmem--;
1671 } else {
1672 if (!to_free_normal)
1673 continue;
1674 to_free_normal--;
1675 alloc_normal--;
1676 }
1677 memory_bm_clear_bit(©_bm, pfn);
1678 swsusp_unset_page_forbidden(page);
1679 swsusp_unset_page_free(page);
1680 __free_page(page);
1681 }
1682
1683 return free;
1684}
1685
1686/**
1687 * minimum_image_size - Estimate the minimum acceptable size of an image.
1688 * @saveable: Number of saveable pages in the system.
1689 *
1690 * We want to avoid attempting to free too much memory too hard, so estimate the
1691 * minimum acceptable size of a hibernation image to use as the lower limit for
1692 * preallocating memory.
1693 *
1694 * We assume that the minimum image size should be proportional to
1695 *
1696 * [number of saveable pages] - [number of pages that can be freed in theory]
1697 *
1698 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1699 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
1700 */
1701static unsigned long minimum_image_size(unsigned long saveable)
1702{
1703 unsigned long size;
1704
1705 size = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B)
1706 + global_node_page_state(NR_ACTIVE_ANON)
1707 + global_node_page_state(NR_INACTIVE_ANON)
1708 + global_node_page_state(NR_ACTIVE_FILE)
1709 + global_node_page_state(NR_INACTIVE_FILE);
1710
1711 return saveable <= size ? 0 : saveable - size;
1712}
1713
1714/**
1715 * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1716 *
1717 * To create a hibernation image it is necessary to make a copy of every page
1718 * frame in use. We also need a number of page frames to be free during
1719 * hibernation for allocations made while saving the image and for device
1720 * drivers, in case they need to allocate memory from their hibernation
1721 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1722 * estimate) and reserved_size divided by PAGE_SIZE (which is tunable through
1723 * /sys/power/reserved_size, respectively). To make this happen, we compute the
1724 * total number of available page frames and allocate at least
1725 *
1726 * ([page frames total] - PAGES_FOR_IO - [metadata pages]) / 2
1727 * - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1728 *
1729 * of them, which corresponds to the maximum size of a hibernation image.
1730 *
1731 * If image_size is set below the number following from the above formula,
1732 * the preallocation of memory is continued until the total number of saveable
1733 * pages in the system is below the requested image size or the minimum
1734 * acceptable image size returned by minimum_image_size(), whichever is greater.
1735 */
1736int hibernate_preallocate_memory(void)
1737{
1738 struct zone *zone;
1739 unsigned long saveable, size, max_size, count, highmem, pages = 0;
1740 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1741 ktime_t start, stop;
1742 int error;
1743
1744 pr_info("Preallocating image memory\n");
1745 start = ktime_get();
1746
1747 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1748 if (error) {
1749 pr_err("Cannot allocate original bitmap\n");
1750 goto err_out;
1751 }
1752
1753 error = memory_bm_create(©_bm, GFP_IMAGE, PG_ANY);
1754 if (error) {
1755 pr_err("Cannot allocate copy bitmap\n");
1756 goto err_out;
1757 }
1758
1759 alloc_normal = 0;
1760 alloc_highmem = 0;
1761
1762 /* Count the number of saveable data pages. */
1763 save_highmem = count_highmem_pages();
1764 saveable = count_data_pages();
1765
1766 /*
1767 * Compute the total number of page frames we can use (count) and the
1768 * number of pages needed for image metadata (size).
1769 */
1770 count = saveable;
1771 saveable += save_highmem;
1772 highmem = save_highmem;
1773 size = 0;
1774 for_each_populated_zone(zone) {
1775 size += snapshot_additional_pages(zone);
1776 if (is_highmem(zone))
1777 highmem += zone_page_state(zone, NR_FREE_PAGES);
1778 else
1779 count += zone_page_state(zone, NR_FREE_PAGES);
1780 }
1781 avail_normal = count;
1782 count += highmem;
1783 count -= totalreserve_pages;
1784
1785 /* Compute the maximum number of saveable pages to leave in memory. */
1786 max_size = (count - (size + PAGES_FOR_IO)) / 2
1787 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1788 /* Compute the desired number of image pages specified by image_size. */
1789 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1790 if (size > max_size)
1791 size = max_size;
1792 /*
1793 * If the desired number of image pages is at least as large as the
1794 * current number of saveable pages in memory, allocate page frames for
1795 * the image and we're done.
1796 */
1797 if (size >= saveable) {
1798 pages = preallocate_image_highmem(save_highmem);
1799 pages += preallocate_image_memory(saveable - pages, avail_normal);
1800 goto out;
1801 }
1802
1803 /* Estimate the minimum size of the image. */
1804 pages = minimum_image_size(saveable);
1805 /*
1806 * To avoid excessive pressure on the normal zone, leave room in it to
1807 * accommodate an image of the minimum size (unless it's already too
1808 * small, in which case don't preallocate pages from it at all).
1809 */
1810 if (avail_normal > pages)
1811 avail_normal -= pages;
1812 else
1813 avail_normal = 0;
1814 if (size < pages)
1815 size = min_t(unsigned long, pages, max_size);
1816
1817 /*
1818 * Let the memory management subsystem know that we're going to need a
1819 * large number of page frames to allocate and make it free some memory.
1820 * NOTE: If this is not done, performance will be hurt badly in some
1821 * test cases.
1822 */
1823 shrink_all_memory(saveable - size);
1824
1825 /*
1826 * The number of saveable pages in memory was too high, so apply some
1827 * pressure to decrease it. First, make room for the largest possible
1828 * image and fail if that doesn't work. Next, try to decrease the size
1829 * of the image as much as indicated by 'size' using allocations from
1830 * highmem and non-highmem zones separately.
1831 */
1832 pages_highmem = preallocate_image_highmem(highmem / 2);
1833 alloc = count - max_size;
1834 if (alloc > pages_highmem)
1835 alloc -= pages_highmem;
1836 else
1837 alloc = 0;
1838 pages = preallocate_image_memory(alloc, avail_normal);
1839 if (pages < alloc) {
1840 /* We have exhausted non-highmem pages, try highmem. */
1841 alloc -= pages;
1842 pages += pages_highmem;
1843 pages_highmem = preallocate_image_highmem(alloc);
1844 if (pages_highmem < alloc) {
1845 pr_err("Image allocation is %lu pages short\n",
1846 alloc - pages_highmem);
1847 goto err_out;
1848 }
1849 pages += pages_highmem;
1850 /*
1851 * size is the desired number of saveable pages to leave in
1852 * memory, so try to preallocate (all memory - size) pages.
1853 */
1854 alloc = (count - pages) - size;
1855 pages += preallocate_image_highmem(alloc);
1856 } else {
1857 /*
1858 * There are approximately max_size saveable pages at this point
1859 * and we want to reduce this number down to size.
1860 */
1861 alloc = max_size - size;
1862 size = preallocate_highmem_fraction(alloc, highmem, count);
1863 pages_highmem += size;
1864 alloc -= size;
1865 size = preallocate_image_memory(alloc, avail_normal);
1866 pages_highmem += preallocate_image_highmem(alloc - size);
1867 pages += pages_highmem + size;
1868 }
1869
1870 /*
1871 * We only need as many page frames for the image as there are saveable
1872 * pages in memory, but we have allocated more. Release the excessive
1873 * ones now.
1874 */
1875 pages -= free_unnecessary_pages();
1876
1877 out:
1878 stop = ktime_get();
1879 pr_info("Allocated %lu pages for snapshot\n", pages);
1880 swsusp_show_speed(start, stop, pages, "Allocated");
1881
1882 return 0;
1883
1884 err_out:
1885 swsusp_free();
1886 return -ENOMEM;
1887}
1888
1889#ifdef CONFIG_HIGHMEM
1890/**
1891 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1892 *
1893 * Compute the number of non-highmem pages that will be necessary for creating
1894 * copies of highmem pages.
1895 */
1896static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1897{
1898 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1899
1900 if (free_highmem >= nr_highmem)
1901 nr_highmem = 0;
1902 else
1903 nr_highmem -= free_highmem;
1904
1905 return nr_highmem;
1906}
1907#else
1908static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1909#endif /* CONFIG_HIGHMEM */
1910
1911/**
1912 * enough_free_mem - Check if there is enough free memory for the image.
1913 */
1914static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1915{
1916 struct zone *zone;
1917 unsigned int free = alloc_normal;
1918
1919 for_each_populated_zone(zone)
1920 if (!is_highmem(zone))
1921 free += zone_page_state(zone, NR_FREE_PAGES);
1922
1923 nr_pages += count_pages_for_highmem(nr_highmem);
1924 pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
1925 nr_pages, PAGES_FOR_IO, free);
1926
1927 return free > nr_pages + PAGES_FOR_IO;
1928}
1929
1930#ifdef CONFIG_HIGHMEM
1931/**
1932 * get_highmem_buffer - Allocate a buffer for highmem pages.
1933 *
1934 * If there are some highmem pages in the hibernation image, we may need a
1935 * buffer to copy them and/or load their data.
1936 */
1937static inline int get_highmem_buffer(int safe_needed)
1938{
1939 buffer = get_image_page(GFP_ATOMIC, safe_needed);
1940 return buffer ? 0 : -ENOMEM;
1941}
1942
1943/**
1944 * alloc_highmem_pages - Allocate some highmem pages for the image.
1945 *
1946 * Try to allocate as many pages as needed, but if the number of free highmem
1947 * pages is less than that, allocate them all.
1948 */
1949static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1950 unsigned int nr_highmem)
1951{
1952 unsigned int to_alloc = count_free_highmem_pages();
1953
1954 if (to_alloc > nr_highmem)
1955 to_alloc = nr_highmem;
1956
1957 nr_highmem -= to_alloc;
1958 while (to_alloc-- > 0) {
1959 struct page *page;
1960
1961 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
1962 memory_bm_set_bit(bm, page_to_pfn(page));
1963 }
1964 return nr_highmem;
1965}
1966#else
1967static inline int get_highmem_buffer(int safe_needed) { return 0; }
1968
1969static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1970 unsigned int n) { return 0; }
1971#endif /* CONFIG_HIGHMEM */
1972
1973/**
1974 * swsusp_alloc - Allocate memory for hibernation image.
1975 *
1976 * We first try to allocate as many highmem pages as there are
1977 * saveable highmem pages in the system. If that fails, we allocate
1978 * non-highmem pages for the copies of the remaining highmem ones.
1979 *
1980 * In this approach it is likely that the copies of highmem pages will
1981 * also be located in the high memory, because of the way in which
1982 * copy_data_pages() works.
1983 */
1984static int swsusp_alloc(struct memory_bitmap *copy_bm,
1985 unsigned int nr_pages, unsigned int nr_highmem)
1986{
1987 if (nr_highmem > 0) {
1988 if (get_highmem_buffer(PG_ANY))
1989 goto err_out;
1990 if (nr_highmem > alloc_highmem) {
1991 nr_highmem -= alloc_highmem;
1992 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1993 }
1994 }
1995 if (nr_pages > alloc_normal) {
1996 nr_pages -= alloc_normal;
1997 while (nr_pages-- > 0) {
1998 struct page *page;
1999
2000 page = alloc_image_page(GFP_ATOMIC);
2001 if (!page)
2002 goto err_out;
2003 memory_bm_set_bit(copy_bm, page_to_pfn(page));
2004 }
2005 }
2006
2007 return 0;
2008
2009 err_out:
2010 swsusp_free();
2011 return -ENOMEM;
2012}
2013
2014asmlinkage __visible int swsusp_save(void)
2015{
2016 unsigned int nr_pages, nr_highmem;
2017
2018 pr_info("Creating image:\n");
2019
2020 drain_local_pages(NULL);
2021 nr_pages = count_data_pages();
2022 nr_highmem = count_highmem_pages();
2023 pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
2024
2025 if (!enough_free_mem(nr_pages, nr_highmem)) {
2026 pr_err("Not enough free memory\n");
2027 return -ENOMEM;
2028 }
2029
2030 if (swsusp_alloc(©_bm, nr_pages, nr_highmem)) {
2031 pr_err("Memory allocation failed\n");
2032 return -ENOMEM;
2033 }
2034
2035 /*
2036 * During allocating of suspend pagedir, new cold pages may appear.
2037 * Kill them.
2038 */
2039 drain_local_pages(NULL);
2040 copy_data_pages(©_bm, &orig_bm);
2041
2042 /*
2043 * End of critical section. From now on, we can write to memory,
2044 * but we should not touch disk. This specially means we must _not_
2045 * touch swap space! Except we must write out our image of course.
2046 */
2047
2048 nr_pages += nr_highmem;
2049 nr_copy_pages = nr_pages;
2050 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
2051
2052 pr_info("Image created (%d pages copied)\n", nr_pages);
2053
2054 return 0;
2055}
2056
2057#ifndef CONFIG_ARCH_HIBERNATION_HEADER
2058static int init_header_complete(struct swsusp_info *info)
2059{
2060 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2061 info->version_code = LINUX_VERSION_CODE;
2062 return 0;
2063}
2064
2065static const char *check_image_kernel(struct swsusp_info *info)
2066{
2067 if (info->version_code != LINUX_VERSION_CODE)
2068 return "kernel version";
2069 if (strcmp(info->uts.sysname,init_utsname()->sysname))
2070 return "system type";
2071 if (strcmp(info->uts.release,init_utsname()->release))
2072 return "kernel release";
2073 if (strcmp(info->uts.version,init_utsname()->version))
2074 return "version";
2075 if (strcmp(info->uts.machine,init_utsname()->machine))
2076 return "machine";
2077 return NULL;
2078}
2079#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2080
2081unsigned long snapshot_get_image_size(void)
2082{
2083 return nr_copy_pages + nr_meta_pages + 1;
2084}
2085
2086static int init_header(struct swsusp_info *info)
2087{
2088 memset(info, 0, sizeof(struct swsusp_info));
2089 info->num_physpages = get_num_physpages();
2090 info->image_pages = nr_copy_pages;
2091 info->pages = snapshot_get_image_size();
2092 info->size = info->pages;
2093 info->size <<= PAGE_SHIFT;
2094 return init_header_complete(info);
2095}
2096
2097/**
2098 * pack_pfns - Prepare PFNs for saving.
2099 * @bm: Memory bitmap.
2100 * @buf: Memory buffer to store the PFNs in.
2101 *
2102 * PFNs corresponding to set bits in @bm are stored in the area of memory
2103 * pointed to by @buf (1 page at a time).
2104 */
2105static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
2106{
2107 int j;
2108
2109 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2110 buf[j] = memory_bm_next_pfn(bm);
2111 if (unlikely(buf[j] == BM_END_OF_MAP))
2112 break;
2113 }
2114}
2115
2116/**
2117 * snapshot_read_next - Get the address to read the next image page from.
2118 * @handle: Snapshot handle to be used for the reading.
2119 *
2120 * On the first call, @handle should point to a zeroed snapshot_handle
2121 * structure. The structure gets populated then and a pointer to it should be
2122 * passed to this function every next time.
2123 *
2124 * On success, the function returns a positive number. Then, the caller
2125 * is allowed to read up to the returned number of bytes from the memory
2126 * location computed by the data_of() macro.
2127 *
2128 * The function returns 0 to indicate the end of the data stream condition,
2129 * and negative numbers are returned on errors. If that happens, the structure
2130 * pointed to by @handle is not updated and should not be used any more.
2131 */
2132int snapshot_read_next(struct snapshot_handle *handle)
2133{
2134 if (handle->cur > nr_meta_pages + nr_copy_pages)
2135 return 0;
2136
2137 if (!buffer) {
2138 /* This makes the buffer be freed by swsusp_free() */
2139 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2140 if (!buffer)
2141 return -ENOMEM;
2142 }
2143 if (!handle->cur) {
2144 int error;
2145
2146 error = init_header((struct swsusp_info *)buffer);
2147 if (error)
2148 return error;
2149 handle->buffer = buffer;
2150 memory_bm_position_reset(&orig_bm);
2151 memory_bm_position_reset(©_bm);
2152 } else if (handle->cur <= nr_meta_pages) {
2153 clear_page(buffer);
2154 pack_pfns(buffer, &orig_bm);
2155 } else {
2156 struct page *page;
2157
2158 page = pfn_to_page(memory_bm_next_pfn(©_bm));
2159 if (PageHighMem(page)) {
2160 /*
2161 * Highmem pages are copied to the buffer,
2162 * because we can't return with a kmapped
2163 * highmem page (we may not be called again).
2164 */
2165 void *kaddr;
2166
2167 kaddr = kmap_atomic(page);
2168 copy_page(buffer, kaddr);
2169 kunmap_atomic(kaddr);
2170 handle->buffer = buffer;
2171 } else {
2172 handle->buffer = page_address(page);
2173 }
2174 }
2175 handle->cur++;
2176 return PAGE_SIZE;
2177}
2178
2179static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2180 struct memory_bitmap *src)
2181{
2182 unsigned long pfn;
2183
2184 memory_bm_position_reset(src);
2185 pfn = memory_bm_next_pfn(src);
2186 while (pfn != BM_END_OF_MAP) {
2187 memory_bm_set_bit(dst, pfn);
2188 pfn = memory_bm_next_pfn(src);
2189 }
2190}
2191
2192/**
2193 * mark_unsafe_pages - Mark pages that were used before hibernation.
2194 *
2195 * Mark the pages that cannot be used for storing the image during restoration,
2196 * because they conflict with the pages that had been used before hibernation.
2197 */
2198static void mark_unsafe_pages(struct memory_bitmap *bm)
2199{
2200 unsigned long pfn;
2201
2202 /* Clear the "free"/"unsafe" bit for all PFNs */
2203 memory_bm_position_reset(free_pages_map);
2204 pfn = memory_bm_next_pfn(free_pages_map);
2205 while (pfn != BM_END_OF_MAP) {
2206 memory_bm_clear_current(free_pages_map);
2207 pfn = memory_bm_next_pfn(free_pages_map);
2208 }
2209
2210 /* Mark pages that correspond to the "original" PFNs as "unsafe" */
2211 duplicate_memory_bitmap(free_pages_map, bm);
2212
2213 allocated_unsafe_pages = 0;
2214}
2215
2216static int check_header(struct swsusp_info *info)
2217{
2218 const char *reason;
2219
2220 reason = check_image_kernel(info);
2221 if (!reason && info->num_physpages != get_num_physpages())
2222 reason = "memory size";
2223 if (reason) {
2224 pr_err("Image mismatch: %s\n", reason);
2225 return -EPERM;
2226 }
2227 return 0;
2228}
2229
2230/**
2231 * load_header - Check the image header and copy the data from it.
2232 */
2233static int load_header(struct swsusp_info *info)
2234{
2235 int error;
2236
2237 restore_pblist = NULL;
2238 error = check_header(info);
2239 if (!error) {
2240 nr_copy_pages = info->image_pages;
2241 nr_meta_pages = info->pages - info->image_pages - 1;
2242 }
2243 return error;
2244}
2245
2246/**
2247 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2248 * @bm: Memory bitmap.
2249 * @buf: Area of memory containing the PFNs.
2250 *
2251 * For each element of the array pointed to by @buf (1 page at a time), set the
2252 * corresponding bit in @bm.
2253 */
2254static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2255{
2256 int j;
2257
2258 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2259 if (unlikely(buf[j] == BM_END_OF_MAP))
2260 break;
2261
2262 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j])) {
2263 memory_bm_set_bit(bm, buf[j]);
2264 } else {
2265 if (!pfn_valid(buf[j]))
2266 pr_err(FW_BUG "Memory map mismatch at 0x%llx after hibernation\n",
2267 (unsigned long long)PFN_PHYS(buf[j]));
2268 return -EFAULT;
2269 }
2270 }
2271
2272 return 0;
2273}
2274
2275#ifdef CONFIG_HIGHMEM
2276/*
2277 * struct highmem_pbe is used for creating the list of highmem pages that
2278 * should be restored atomically during the resume from disk, because the page
2279 * frames they have occupied before the suspend are in use.
2280 */
2281struct highmem_pbe {
2282 struct page *copy_page; /* data is here now */
2283 struct page *orig_page; /* data was here before the suspend */
2284 struct highmem_pbe *next;
2285};
2286
2287/*
2288 * List of highmem PBEs needed for restoring the highmem pages that were
2289 * allocated before the suspend and included in the suspend image, but have
2290 * also been allocated by the "resume" kernel, so their contents cannot be
2291 * written directly to their "original" page frames.
2292 */
2293static struct highmem_pbe *highmem_pblist;
2294
2295/**
2296 * count_highmem_image_pages - Compute the number of highmem pages in the image.
2297 * @bm: Memory bitmap.
2298 *
2299 * The bits in @bm that correspond to image pages are assumed to be set.
2300 */
2301static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2302{
2303 unsigned long pfn;
2304 unsigned int cnt = 0;
2305
2306 memory_bm_position_reset(bm);
2307 pfn = memory_bm_next_pfn(bm);
2308 while (pfn != BM_END_OF_MAP) {
2309 if (PageHighMem(pfn_to_page(pfn)))
2310 cnt++;
2311
2312 pfn = memory_bm_next_pfn(bm);
2313 }
2314 return cnt;
2315}
2316
2317static unsigned int safe_highmem_pages;
2318
2319static struct memory_bitmap *safe_highmem_bm;
2320
2321/**
2322 * prepare_highmem_image - Allocate memory for loading highmem data from image.
2323 * @bm: Pointer to an uninitialized memory bitmap structure.
2324 * @nr_highmem_p: Pointer to the number of highmem image pages.
2325 *
2326 * Try to allocate as many highmem pages as there are highmem image pages
2327 * (@nr_highmem_p points to the variable containing the number of highmem image
2328 * pages). The pages that are "safe" (ie. will not be overwritten when the
2329 * hibernation image is restored entirely) have the corresponding bits set in
2330 * @bm (it must be uninitialized).
2331 *
2332 * NOTE: This function should not be called if there are no highmem image pages.
2333 */
2334static int prepare_highmem_image(struct memory_bitmap *bm,
2335 unsigned int *nr_highmem_p)
2336{
2337 unsigned int to_alloc;
2338
2339 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2340 return -ENOMEM;
2341
2342 if (get_highmem_buffer(PG_SAFE))
2343 return -ENOMEM;
2344
2345 to_alloc = count_free_highmem_pages();
2346 if (to_alloc > *nr_highmem_p)
2347 to_alloc = *nr_highmem_p;
2348 else
2349 *nr_highmem_p = to_alloc;
2350
2351 safe_highmem_pages = 0;
2352 while (to_alloc-- > 0) {
2353 struct page *page;
2354
2355 page = alloc_page(__GFP_HIGHMEM);
2356 if (!swsusp_page_is_free(page)) {
2357 /* The page is "safe", set its bit the bitmap */
2358 memory_bm_set_bit(bm, page_to_pfn(page));
2359 safe_highmem_pages++;
2360 }
2361 /* Mark the page as allocated */
2362 swsusp_set_page_forbidden(page);
2363 swsusp_set_page_free(page);
2364 }
2365 memory_bm_position_reset(bm);
2366 safe_highmem_bm = bm;
2367 return 0;
2368}
2369
2370static struct page *last_highmem_page;
2371
2372/**
2373 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2374 *
2375 * For a given highmem image page get a buffer that suspend_write_next() should
2376 * return to its caller to write to.
2377 *
2378 * If the page is to be saved to its "original" page frame or a copy of
2379 * the page is to be made in the highmem, @buffer is returned. Otherwise,
2380 * the copy of the page is to be made in normal memory, so the address of
2381 * the copy is returned.
2382 *
2383 * If @buffer is returned, the caller of suspend_write_next() will write
2384 * the page's contents to @buffer, so they will have to be copied to the
2385 * right location on the next call to suspend_write_next() and it is done
2386 * with the help of copy_last_highmem_page(). For this purpose, if
2387 * @buffer is returned, @last_highmem_page is set to the page to which
2388 * the data will have to be copied from @buffer.
2389 */
2390static void *get_highmem_page_buffer(struct page *page,
2391 struct chain_allocator *ca)
2392{
2393 struct highmem_pbe *pbe;
2394 void *kaddr;
2395
2396 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2397 /*
2398 * We have allocated the "original" page frame and we can
2399 * use it directly to store the loaded page.
2400 */
2401 last_highmem_page = page;
2402 return buffer;
2403 }
2404 /*
2405 * The "original" page frame has not been allocated and we have to
2406 * use a "safe" page frame to store the loaded page.
2407 */
2408 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2409 if (!pbe) {
2410 swsusp_free();
2411 return ERR_PTR(-ENOMEM);
2412 }
2413 pbe->orig_page = page;
2414 if (safe_highmem_pages > 0) {
2415 struct page *tmp;
2416
2417 /* Copy of the page will be stored in high memory */
2418 kaddr = buffer;
2419 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2420 safe_highmem_pages--;
2421 last_highmem_page = tmp;
2422 pbe->copy_page = tmp;
2423 } else {
2424 /* Copy of the page will be stored in normal memory */
2425 kaddr = safe_pages_list;
2426 safe_pages_list = safe_pages_list->next;
2427 pbe->copy_page = virt_to_page(kaddr);
2428 }
2429 pbe->next = highmem_pblist;
2430 highmem_pblist = pbe;
2431 return kaddr;
2432}
2433
2434/**
2435 * copy_last_highmem_page - Copy most the most recent highmem image page.
2436 *
2437 * Copy the contents of a highmem image from @buffer, where the caller of
2438 * snapshot_write_next() has stored them, to the right location represented by
2439 * @last_highmem_page .
2440 */
2441static void copy_last_highmem_page(void)
2442{
2443 if (last_highmem_page) {
2444 void *dst;
2445
2446 dst = kmap_atomic(last_highmem_page);
2447 copy_page(dst, buffer);
2448 kunmap_atomic(dst);
2449 last_highmem_page = NULL;
2450 }
2451}
2452
2453static inline int last_highmem_page_copied(void)
2454{
2455 return !last_highmem_page;
2456}
2457
2458static inline void free_highmem_data(void)
2459{
2460 if (safe_highmem_bm)
2461 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2462
2463 if (buffer)
2464 free_image_page(buffer, PG_UNSAFE_CLEAR);
2465}
2466#else
2467static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2468
2469static inline int prepare_highmem_image(struct memory_bitmap *bm,
2470 unsigned int *nr_highmem_p) { return 0; }
2471
2472static inline void *get_highmem_page_buffer(struct page *page,
2473 struct chain_allocator *ca)
2474{
2475 return ERR_PTR(-EINVAL);
2476}
2477
2478static inline void copy_last_highmem_page(void) {}
2479static inline int last_highmem_page_copied(void) { return 1; }
2480static inline void free_highmem_data(void) {}
2481#endif /* CONFIG_HIGHMEM */
2482
2483#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2484
2485/**
2486 * prepare_image - Make room for loading hibernation image.
2487 * @new_bm: Uninitialized memory bitmap structure.
2488 * @bm: Memory bitmap with unsafe pages marked.
2489 *
2490 * Use @bm to mark the pages that will be overwritten in the process of
2491 * restoring the system memory state from the suspend image ("unsafe" pages)
2492 * and allocate memory for the image.
2493 *
2494 * The idea is to allocate a new memory bitmap first and then allocate
2495 * as many pages as needed for image data, but without specifying what those
2496 * pages will be used for just yet. Instead, we mark them all as allocated and
2497 * create a lists of "safe" pages to be used later. On systems with high
2498 * memory a list of "safe" highmem pages is created too.
2499 */
2500static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2501{
2502 unsigned int nr_pages, nr_highmem;
2503 struct linked_page *lp;
2504 int error;
2505
2506 /* If there is no highmem, the buffer will not be necessary */
2507 free_image_page(buffer, PG_UNSAFE_CLEAR);
2508 buffer = NULL;
2509
2510 nr_highmem = count_highmem_image_pages(bm);
2511 mark_unsafe_pages(bm);
2512
2513 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2514 if (error)
2515 goto Free;
2516
2517 duplicate_memory_bitmap(new_bm, bm);
2518 memory_bm_free(bm, PG_UNSAFE_KEEP);
2519 if (nr_highmem > 0) {
2520 error = prepare_highmem_image(bm, &nr_highmem);
2521 if (error)
2522 goto Free;
2523 }
2524 /*
2525 * Reserve some safe pages for potential later use.
2526 *
2527 * NOTE: This way we make sure there will be enough safe pages for the
2528 * chain_alloc() in get_buffer(). It is a bit wasteful, but
2529 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2530 *
2531 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2532 */
2533 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2534 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2535 while (nr_pages > 0) {
2536 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2537 if (!lp) {
2538 error = -ENOMEM;
2539 goto Free;
2540 }
2541 lp->next = safe_pages_list;
2542 safe_pages_list = lp;
2543 nr_pages--;
2544 }
2545 /* Preallocate memory for the image */
2546 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2547 while (nr_pages > 0) {
2548 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2549 if (!lp) {
2550 error = -ENOMEM;
2551 goto Free;
2552 }
2553 if (!swsusp_page_is_free(virt_to_page(lp))) {
2554 /* The page is "safe", add it to the list */
2555 lp->next = safe_pages_list;
2556 safe_pages_list = lp;
2557 }
2558 /* Mark the page as allocated */
2559 swsusp_set_page_forbidden(virt_to_page(lp));
2560 swsusp_set_page_free(virt_to_page(lp));
2561 nr_pages--;
2562 }
2563 return 0;
2564
2565 Free:
2566 swsusp_free();
2567 return error;
2568}
2569
2570/**
2571 * get_buffer - Get the address to store the next image data page.
2572 *
2573 * Get the address that snapshot_write_next() should return to its caller to
2574 * write to.
2575 */
2576static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2577{
2578 struct pbe *pbe;
2579 struct page *page;
2580 unsigned long pfn = memory_bm_next_pfn(bm);
2581
2582 if (pfn == BM_END_OF_MAP)
2583 return ERR_PTR(-EFAULT);
2584
2585 page = pfn_to_page(pfn);
2586 if (PageHighMem(page))
2587 return get_highmem_page_buffer(page, ca);
2588
2589 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2590 /*
2591 * We have allocated the "original" page frame and we can
2592 * use it directly to store the loaded page.
2593 */
2594 return page_address(page);
2595
2596 /*
2597 * The "original" page frame has not been allocated and we have to
2598 * use a "safe" page frame to store the loaded page.
2599 */
2600 pbe = chain_alloc(ca, sizeof(struct pbe));
2601 if (!pbe) {
2602 swsusp_free();
2603 return ERR_PTR(-ENOMEM);
2604 }
2605 pbe->orig_address = page_address(page);
2606 pbe->address = safe_pages_list;
2607 safe_pages_list = safe_pages_list->next;
2608 pbe->next = restore_pblist;
2609 restore_pblist = pbe;
2610 return pbe->address;
2611}
2612
2613/**
2614 * snapshot_write_next - Get the address to store the next image page.
2615 * @handle: Snapshot handle structure to guide the writing.
2616 *
2617 * On the first call, @handle should point to a zeroed snapshot_handle
2618 * structure. The structure gets populated then and a pointer to it should be
2619 * passed to this function every next time.
2620 *
2621 * On success, the function returns a positive number. Then, the caller
2622 * is allowed to write up to the returned number of bytes to the memory
2623 * location computed by the data_of() macro.
2624 *
2625 * The function returns 0 to indicate the "end of file" condition. Negative
2626 * numbers are returned on errors, in which cases the structure pointed to by
2627 * @handle is not updated and should not be used any more.
2628 */
2629int snapshot_write_next(struct snapshot_handle *handle)
2630{
2631 static struct chain_allocator ca;
2632 int error = 0;
2633
2634 /* Check if we have already loaded the entire image */
2635 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2636 return 0;
2637
2638 handle->sync_read = 1;
2639
2640 if (!handle->cur) {
2641 if (!buffer)
2642 /* This makes the buffer be freed by swsusp_free() */
2643 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2644
2645 if (!buffer)
2646 return -ENOMEM;
2647
2648 handle->buffer = buffer;
2649 } else if (handle->cur == 1) {
2650 error = load_header(buffer);
2651 if (error)
2652 return error;
2653
2654 safe_pages_list = NULL;
2655
2656 error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY);
2657 if (error)
2658 return error;
2659
2660 hibernate_restore_protection_begin();
2661 } else if (handle->cur <= nr_meta_pages + 1) {
2662 error = unpack_orig_pfns(buffer, ©_bm);
2663 if (error)
2664 return error;
2665
2666 if (handle->cur == nr_meta_pages + 1) {
2667 error = prepare_image(&orig_bm, ©_bm);
2668 if (error)
2669 return error;
2670
2671 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2672 memory_bm_position_reset(&orig_bm);
2673 restore_pblist = NULL;
2674 handle->buffer = get_buffer(&orig_bm, &ca);
2675 handle->sync_read = 0;
2676 if (IS_ERR(handle->buffer))
2677 return PTR_ERR(handle->buffer);
2678 }
2679 } else {
2680 copy_last_highmem_page();
2681 hibernate_restore_protect_page(handle->buffer);
2682 handle->buffer = get_buffer(&orig_bm, &ca);
2683 if (IS_ERR(handle->buffer))
2684 return PTR_ERR(handle->buffer);
2685 if (handle->buffer != buffer)
2686 handle->sync_read = 0;
2687 }
2688 handle->cur++;
2689 return PAGE_SIZE;
2690}
2691
2692/**
2693 * snapshot_write_finalize - Complete the loading of a hibernation image.
2694 *
2695 * Must be called after the last call to snapshot_write_next() in case the last
2696 * page in the image happens to be a highmem page and its contents should be
2697 * stored in highmem. Additionally, it recycles bitmap memory that's not
2698 * necessary any more.
2699 */
2700void snapshot_write_finalize(struct snapshot_handle *handle)
2701{
2702 copy_last_highmem_page();
2703 hibernate_restore_protect_page(handle->buffer);
2704 /* Do that only if we have loaded the image entirely */
2705 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2706 memory_bm_recycle(&orig_bm);
2707 free_highmem_data();
2708 }
2709}
2710
2711int snapshot_image_loaded(struct snapshot_handle *handle)
2712{
2713 return !(!nr_copy_pages || !last_highmem_page_copied() ||
2714 handle->cur <= nr_meta_pages + nr_copy_pages);
2715}
2716
2717#ifdef CONFIG_HIGHMEM
2718/* Assumes that @buf is ready and points to a "safe" page */
2719static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2720 void *buf)
2721{
2722 void *kaddr1, *kaddr2;
2723
2724 kaddr1 = kmap_atomic(p1);
2725 kaddr2 = kmap_atomic(p2);
2726 copy_page(buf, kaddr1);
2727 copy_page(kaddr1, kaddr2);
2728 copy_page(kaddr2, buf);
2729 kunmap_atomic(kaddr2);
2730 kunmap_atomic(kaddr1);
2731}
2732
2733/**
2734 * restore_highmem - Put highmem image pages into their original locations.
2735 *
2736 * For each highmem page that was in use before hibernation and is included in
2737 * the image, and also has been allocated by the "restore" kernel, swap its
2738 * current contents with the previous (ie. "before hibernation") ones.
2739 *
2740 * If the restore eventually fails, we can call this function once again and
2741 * restore the highmem state as seen by the restore kernel.
2742 */
2743int restore_highmem(void)
2744{
2745 struct highmem_pbe *pbe = highmem_pblist;
2746 void *buf;
2747
2748 if (!pbe)
2749 return 0;
2750
2751 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2752 if (!buf)
2753 return -ENOMEM;
2754
2755 while (pbe) {
2756 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2757 pbe = pbe->next;
2758 }
2759 free_image_page(buf, PG_UNSAFE_CLEAR);
2760 return 0;
2761}
2762#endif /* CONFIG_HIGHMEM */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/power/snapshot.c
4 *
5 * This file provides system snapshot/restore functionality for swsusp.
6 *
7 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9 */
10
11#define pr_fmt(fmt) "PM: hibernation: " fmt
12
13#include <linux/version.h>
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/suspend.h>
17#include <linux/delay.h>
18#include <linux/bitops.h>
19#include <linux/spinlock.h>
20#include <linux/kernel.h>
21#include <linux/pm.h>
22#include <linux/device.h>
23#include <linux/init.h>
24#include <linux/memblock.h>
25#include <linux/nmi.h>
26#include <linux/syscalls.h>
27#include <linux/console.h>
28#include <linux/highmem.h>
29#include <linux/list.h>
30#include <linux/slab.h>
31#include <linux/compiler.h>
32#include <linux/ktime.h>
33#include <linux/set_memory.h>
34
35#include <linux/uaccess.h>
36#include <asm/mmu_context.h>
37#include <asm/tlbflush.h>
38#include <asm/io.h>
39
40#include "power.h"
41
42#if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
43static bool hibernate_restore_protection;
44static bool hibernate_restore_protection_active;
45
46void enable_restore_image_protection(void)
47{
48 hibernate_restore_protection = true;
49}
50
51static inline void hibernate_restore_protection_begin(void)
52{
53 hibernate_restore_protection_active = hibernate_restore_protection;
54}
55
56static inline void hibernate_restore_protection_end(void)
57{
58 hibernate_restore_protection_active = false;
59}
60
61static inline int __must_check hibernate_restore_protect_page(void *page_address)
62{
63 if (hibernate_restore_protection_active)
64 return set_memory_ro((unsigned long)page_address, 1);
65 return 0;
66}
67
68static inline int hibernate_restore_unprotect_page(void *page_address)
69{
70 if (hibernate_restore_protection_active)
71 return set_memory_rw((unsigned long)page_address, 1);
72 return 0;
73}
74#else
75static inline void hibernate_restore_protection_begin(void) {}
76static inline void hibernate_restore_protection_end(void) {}
77static inline int __must_check hibernate_restore_protect_page(void *page_address) {return 0; }
78static inline int hibernate_restore_unprotect_page(void *page_address) {return 0; }
79#endif /* CONFIG_STRICT_KERNEL_RWX && CONFIG_ARCH_HAS_SET_MEMORY */
80
81
82/*
83 * The calls to set_direct_map_*() should not fail because remapping a page
84 * here means that we only update protection bits in an existing PTE.
85 * It is still worth to have a warning here if something changes and this
86 * will no longer be the case.
87 */
88static inline void hibernate_map_page(struct page *page)
89{
90 if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
91 int ret = set_direct_map_default_noflush(page);
92
93 if (ret)
94 pr_warn_once("Failed to remap page\n");
95 } else {
96 debug_pagealloc_map_pages(page, 1);
97 }
98}
99
100static inline void hibernate_unmap_page(struct page *page)
101{
102 if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
103 unsigned long addr = (unsigned long)page_address(page);
104 int ret = set_direct_map_invalid_noflush(page);
105
106 if (ret)
107 pr_warn_once("Failed to remap page\n");
108
109 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
110 } else {
111 debug_pagealloc_unmap_pages(page, 1);
112 }
113}
114
115static int swsusp_page_is_free(struct page *);
116static void swsusp_set_page_forbidden(struct page *);
117static void swsusp_unset_page_forbidden(struct page *);
118
119/*
120 * Number of bytes to reserve for memory allocations made by device drivers
121 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
122 * cause image creation to fail (tunable via /sys/power/reserved_size).
123 */
124unsigned long reserved_size;
125
126void __init hibernate_reserved_size_init(void)
127{
128 reserved_size = SPARE_PAGES * PAGE_SIZE;
129}
130
131/*
132 * Preferred image size in bytes (tunable via /sys/power/image_size).
133 * When it is set to N, swsusp will do its best to ensure the image
134 * size will not exceed N bytes, but if that is impossible, it will
135 * try to create the smallest image possible.
136 */
137unsigned long image_size;
138
139void __init hibernate_image_size_init(void)
140{
141 image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE;
142}
143
144/*
145 * List of PBEs needed for restoring the pages that were allocated before
146 * the suspend and included in the suspend image, but have also been
147 * allocated by the "resume" kernel, so their contents cannot be written
148 * directly to their "original" page frames.
149 */
150struct pbe *restore_pblist;
151
152/* struct linked_page is used to build chains of pages */
153
154#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
155
156struct linked_page {
157 struct linked_page *next;
158 char data[LINKED_PAGE_DATA_SIZE];
159} __packed;
160
161/*
162 * List of "safe" pages (ie. pages that were not used by the image kernel
163 * before hibernation) that may be used as temporary storage for image kernel
164 * memory contents.
165 */
166static struct linked_page *safe_pages_list;
167
168/* Pointer to an auxiliary buffer (1 page) */
169static void *buffer;
170
171#define PG_ANY 0
172#define PG_SAFE 1
173#define PG_UNSAFE_CLEAR 1
174#define PG_UNSAFE_KEEP 0
175
176static unsigned int allocated_unsafe_pages;
177
178/**
179 * get_image_page - Allocate a page for a hibernation image.
180 * @gfp_mask: GFP mask for the allocation.
181 * @safe_needed: Get pages that were not used before hibernation (restore only)
182 *
183 * During image restoration, for storing the PBE list and the image data, we can
184 * only use memory pages that do not conflict with the pages used before
185 * hibernation. The "unsafe" pages have PageNosaveFree set and we count them
186 * using allocated_unsafe_pages.
187 *
188 * Each allocated image page is marked as PageNosave and PageNosaveFree so that
189 * swsusp_free() can release it.
190 */
191static void *get_image_page(gfp_t gfp_mask, int safe_needed)
192{
193 void *res;
194
195 res = (void *)get_zeroed_page(gfp_mask);
196 if (safe_needed)
197 while (res && swsusp_page_is_free(virt_to_page(res))) {
198 /* The page is unsafe, mark it for swsusp_free() */
199 swsusp_set_page_forbidden(virt_to_page(res));
200 allocated_unsafe_pages++;
201 res = (void *)get_zeroed_page(gfp_mask);
202 }
203 if (res) {
204 swsusp_set_page_forbidden(virt_to_page(res));
205 swsusp_set_page_free(virt_to_page(res));
206 }
207 return res;
208}
209
210static void *__get_safe_page(gfp_t gfp_mask)
211{
212 if (safe_pages_list) {
213 void *ret = safe_pages_list;
214
215 safe_pages_list = safe_pages_list->next;
216 memset(ret, 0, PAGE_SIZE);
217 return ret;
218 }
219 return get_image_page(gfp_mask, PG_SAFE);
220}
221
222unsigned long get_safe_page(gfp_t gfp_mask)
223{
224 return (unsigned long)__get_safe_page(gfp_mask);
225}
226
227static struct page *alloc_image_page(gfp_t gfp_mask)
228{
229 struct page *page;
230
231 page = alloc_page(gfp_mask);
232 if (page) {
233 swsusp_set_page_forbidden(page);
234 swsusp_set_page_free(page);
235 }
236 return page;
237}
238
239static void recycle_safe_page(void *page_address)
240{
241 struct linked_page *lp = page_address;
242
243 lp->next = safe_pages_list;
244 safe_pages_list = lp;
245}
246
247/**
248 * free_image_page - Free a page allocated for hibernation image.
249 * @addr: Address of the page to free.
250 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
251 *
252 * The page to free should have been allocated by get_image_page() (page flags
253 * set by it are affected).
254 */
255static inline void free_image_page(void *addr, int clear_nosave_free)
256{
257 struct page *page;
258
259 BUG_ON(!virt_addr_valid(addr));
260
261 page = virt_to_page(addr);
262
263 swsusp_unset_page_forbidden(page);
264 if (clear_nosave_free)
265 swsusp_unset_page_free(page);
266
267 __free_page(page);
268}
269
270static inline void free_list_of_pages(struct linked_page *list,
271 int clear_page_nosave)
272{
273 while (list) {
274 struct linked_page *lp = list->next;
275
276 free_image_page(list, clear_page_nosave);
277 list = lp;
278 }
279}
280
281/*
282 * struct chain_allocator is used for allocating small objects out of
283 * a linked list of pages called 'the chain'.
284 *
285 * The chain grows each time when there is no room for a new object in
286 * the current page. The allocated objects cannot be freed individually.
287 * It is only possible to free them all at once, by freeing the entire
288 * chain.
289 *
290 * NOTE: The chain allocator may be inefficient if the allocated objects
291 * are not much smaller than PAGE_SIZE.
292 */
293struct chain_allocator {
294 struct linked_page *chain; /* the chain */
295 unsigned int used_space; /* total size of objects allocated out
296 of the current page */
297 gfp_t gfp_mask; /* mask for allocating pages */
298 int safe_needed; /* if set, only "safe" pages are allocated */
299};
300
301static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
302 int safe_needed)
303{
304 ca->chain = NULL;
305 ca->used_space = LINKED_PAGE_DATA_SIZE;
306 ca->gfp_mask = gfp_mask;
307 ca->safe_needed = safe_needed;
308}
309
310static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
311{
312 void *ret;
313
314 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
315 struct linked_page *lp;
316
317 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
318 get_image_page(ca->gfp_mask, PG_ANY);
319 if (!lp)
320 return NULL;
321
322 lp->next = ca->chain;
323 ca->chain = lp;
324 ca->used_space = 0;
325 }
326 ret = ca->chain->data + ca->used_space;
327 ca->used_space += size;
328 return ret;
329}
330
331/*
332 * Data types related to memory bitmaps.
333 *
334 * Memory bitmap is a structure consisting of many linked lists of
335 * objects. The main list's elements are of type struct zone_bitmap
336 * and each of them corresponds to one zone. For each zone bitmap
337 * object there is a list of objects of type struct bm_block that
338 * represent each blocks of bitmap in which information is stored.
339 *
340 * struct memory_bitmap contains a pointer to the main list of zone
341 * bitmap objects, a struct bm_position used for browsing the bitmap,
342 * and a pointer to the list of pages used for allocating all of the
343 * zone bitmap objects and bitmap block objects.
344 *
345 * NOTE: It has to be possible to lay out the bitmap in memory
346 * using only allocations of order 0. Additionally, the bitmap is
347 * designed to work with arbitrary number of zones (this is over the
348 * top for now, but let's avoid making unnecessary assumptions ;-).
349 *
350 * struct zone_bitmap contains a pointer to a list of bitmap block
351 * objects and a pointer to the bitmap block object that has been
352 * most recently used for setting bits. Additionally, it contains the
353 * PFNs that correspond to the start and end of the represented zone.
354 *
355 * struct bm_block contains a pointer to the memory page in which
356 * information is stored (in the form of a block of bitmap)
357 * It also contains the pfns that correspond to the start and end of
358 * the represented memory area.
359 *
360 * The memory bitmap is organized as a radix tree to guarantee fast random
361 * access to the bits. There is one radix tree for each zone (as returned
362 * from create_mem_extents).
363 *
364 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
365 * two linked lists for the nodes of the tree, one for the inner nodes and
366 * one for the leave nodes. The linked leave nodes are used for fast linear
367 * access of the memory bitmap.
368 *
369 * The struct rtree_node represents one node of the radix tree.
370 */
371
372#define BM_END_OF_MAP (~0UL)
373
374#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
375#define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
376#define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
377
378/*
379 * struct rtree_node is a wrapper struct to link the nodes
380 * of the rtree together for easy linear iteration over
381 * bits and easy freeing
382 */
383struct rtree_node {
384 struct list_head list;
385 unsigned long *data;
386};
387
388/*
389 * struct mem_zone_bm_rtree represents a bitmap used for one
390 * populated memory zone.
391 */
392struct mem_zone_bm_rtree {
393 struct list_head list; /* Link Zones together */
394 struct list_head nodes; /* Radix Tree inner nodes */
395 struct list_head leaves; /* Radix Tree leaves */
396 unsigned long start_pfn; /* Zone start page frame */
397 unsigned long end_pfn; /* Zone end page frame + 1 */
398 struct rtree_node *rtree; /* Radix Tree Root */
399 int levels; /* Number of Radix Tree Levels */
400 unsigned int blocks; /* Number of Bitmap Blocks */
401};
402
403/* struct bm_position is used for browsing memory bitmaps */
404
405struct bm_position {
406 struct mem_zone_bm_rtree *zone;
407 struct rtree_node *node;
408 unsigned long node_pfn;
409 unsigned long cur_pfn;
410 int node_bit;
411};
412
413struct memory_bitmap {
414 struct list_head zones;
415 struct linked_page *p_list; /* list of pages used to store zone
416 bitmap objects and bitmap block
417 objects */
418 struct bm_position cur; /* most recently used bit position */
419};
420
421/* Functions that operate on memory bitmaps */
422
423#define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
424#if BITS_PER_LONG == 32
425#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
426#else
427#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
428#endif
429#define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
430
431/**
432 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
433 * @gfp_mask: GFP mask for the allocation.
434 * @safe_needed: Get pages not used before hibernation (restore only)
435 * @ca: Pointer to a linked list of pages ("a chain") to allocate from
436 * @list: Radix Tree node to add.
437 *
438 * This function is used to allocate inner nodes as well as the
439 * leave nodes of the radix tree. It also adds the node to the
440 * corresponding linked list passed in by the *list parameter.
441 */
442static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
443 struct chain_allocator *ca,
444 struct list_head *list)
445{
446 struct rtree_node *node;
447
448 node = chain_alloc(ca, sizeof(struct rtree_node));
449 if (!node)
450 return NULL;
451
452 node->data = get_image_page(gfp_mask, safe_needed);
453 if (!node->data)
454 return NULL;
455
456 list_add_tail(&node->list, list);
457
458 return node;
459}
460
461/**
462 * add_rtree_block - Add a new leave node to the radix tree.
463 *
464 * The leave nodes need to be allocated in order to keep the leaves
465 * linked list in order. This is guaranteed by the zone->blocks
466 * counter.
467 */
468static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
469 int safe_needed, struct chain_allocator *ca)
470{
471 struct rtree_node *node, *block, **dst;
472 unsigned int levels_needed, block_nr;
473 int i;
474
475 block_nr = zone->blocks;
476 levels_needed = 0;
477
478 /* How many levels do we need for this block nr? */
479 while (block_nr) {
480 levels_needed += 1;
481 block_nr >>= BM_RTREE_LEVEL_SHIFT;
482 }
483
484 /* Make sure the rtree has enough levels */
485 for (i = zone->levels; i < levels_needed; i++) {
486 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
487 &zone->nodes);
488 if (!node)
489 return -ENOMEM;
490
491 node->data[0] = (unsigned long)zone->rtree;
492 zone->rtree = node;
493 zone->levels += 1;
494 }
495
496 /* Allocate new block */
497 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
498 if (!block)
499 return -ENOMEM;
500
501 /* Now walk the rtree to insert the block */
502 node = zone->rtree;
503 dst = &zone->rtree;
504 block_nr = zone->blocks;
505 for (i = zone->levels; i > 0; i--) {
506 int index;
507
508 if (!node) {
509 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
510 &zone->nodes);
511 if (!node)
512 return -ENOMEM;
513 *dst = node;
514 }
515
516 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
517 index &= BM_RTREE_LEVEL_MASK;
518 dst = (struct rtree_node **)&((*dst)->data[index]);
519 node = *dst;
520 }
521
522 zone->blocks += 1;
523 *dst = block;
524
525 return 0;
526}
527
528static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
529 int clear_nosave_free);
530
531/**
532 * create_zone_bm_rtree - Create a radix tree for one zone.
533 *
534 * Allocated the mem_zone_bm_rtree structure and initializes it.
535 * This function also allocated and builds the radix tree for the
536 * zone.
537 */
538static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
539 int safe_needed,
540 struct chain_allocator *ca,
541 unsigned long start,
542 unsigned long end)
543{
544 struct mem_zone_bm_rtree *zone;
545 unsigned int i, nr_blocks;
546 unsigned long pages;
547
548 pages = end - start;
549 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
550 if (!zone)
551 return NULL;
552
553 INIT_LIST_HEAD(&zone->nodes);
554 INIT_LIST_HEAD(&zone->leaves);
555 zone->start_pfn = start;
556 zone->end_pfn = end;
557 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
558
559 for (i = 0; i < nr_blocks; i++) {
560 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
561 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
562 return NULL;
563 }
564 }
565
566 return zone;
567}
568
569/**
570 * free_zone_bm_rtree - Free the memory of the radix tree.
571 *
572 * Free all node pages of the radix tree. The mem_zone_bm_rtree
573 * structure itself is not freed here nor are the rtree_node
574 * structs.
575 */
576static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
577 int clear_nosave_free)
578{
579 struct rtree_node *node;
580
581 list_for_each_entry(node, &zone->nodes, list)
582 free_image_page(node->data, clear_nosave_free);
583
584 list_for_each_entry(node, &zone->leaves, list)
585 free_image_page(node->data, clear_nosave_free);
586}
587
588static void memory_bm_position_reset(struct memory_bitmap *bm)
589{
590 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
591 list);
592 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
593 struct rtree_node, list);
594 bm->cur.node_pfn = 0;
595 bm->cur.cur_pfn = BM_END_OF_MAP;
596 bm->cur.node_bit = 0;
597}
598
599static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
600
601struct mem_extent {
602 struct list_head hook;
603 unsigned long start;
604 unsigned long end;
605};
606
607/**
608 * free_mem_extents - Free a list of memory extents.
609 * @list: List of extents to free.
610 */
611static void free_mem_extents(struct list_head *list)
612{
613 struct mem_extent *ext, *aux;
614
615 list_for_each_entry_safe(ext, aux, list, hook) {
616 list_del(&ext->hook);
617 kfree(ext);
618 }
619}
620
621/**
622 * create_mem_extents - Create a list of memory extents.
623 * @list: List to put the extents into.
624 * @gfp_mask: Mask to use for memory allocations.
625 *
626 * The extents represent contiguous ranges of PFNs.
627 */
628static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
629{
630 struct zone *zone;
631
632 INIT_LIST_HEAD(list);
633
634 for_each_populated_zone(zone) {
635 unsigned long zone_start, zone_end;
636 struct mem_extent *ext, *cur, *aux;
637
638 zone_start = zone->zone_start_pfn;
639 zone_end = zone_end_pfn(zone);
640
641 list_for_each_entry(ext, list, hook)
642 if (zone_start <= ext->end)
643 break;
644
645 if (&ext->hook == list || zone_end < ext->start) {
646 /* New extent is necessary */
647 struct mem_extent *new_ext;
648
649 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
650 if (!new_ext) {
651 free_mem_extents(list);
652 return -ENOMEM;
653 }
654 new_ext->start = zone_start;
655 new_ext->end = zone_end;
656 list_add_tail(&new_ext->hook, &ext->hook);
657 continue;
658 }
659
660 /* Merge this zone's range of PFNs with the existing one */
661 if (zone_start < ext->start)
662 ext->start = zone_start;
663 if (zone_end > ext->end)
664 ext->end = zone_end;
665
666 /* More merging may be possible */
667 cur = ext;
668 list_for_each_entry_safe_continue(cur, aux, list, hook) {
669 if (zone_end < cur->start)
670 break;
671 if (zone_end < cur->end)
672 ext->end = cur->end;
673 list_del(&cur->hook);
674 kfree(cur);
675 }
676 }
677
678 return 0;
679}
680
681/**
682 * memory_bm_create - Allocate memory for a memory bitmap.
683 */
684static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
685 int safe_needed)
686{
687 struct chain_allocator ca;
688 struct list_head mem_extents;
689 struct mem_extent *ext;
690 int error;
691
692 chain_init(&ca, gfp_mask, safe_needed);
693 INIT_LIST_HEAD(&bm->zones);
694
695 error = create_mem_extents(&mem_extents, gfp_mask);
696 if (error)
697 return error;
698
699 list_for_each_entry(ext, &mem_extents, hook) {
700 struct mem_zone_bm_rtree *zone;
701
702 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
703 ext->start, ext->end);
704 if (!zone) {
705 error = -ENOMEM;
706 goto Error;
707 }
708 list_add_tail(&zone->list, &bm->zones);
709 }
710
711 bm->p_list = ca.chain;
712 memory_bm_position_reset(bm);
713 Exit:
714 free_mem_extents(&mem_extents);
715 return error;
716
717 Error:
718 bm->p_list = ca.chain;
719 memory_bm_free(bm, PG_UNSAFE_CLEAR);
720 goto Exit;
721}
722
723/**
724 * memory_bm_free - Free memory occupied by the memory bitmap.
725 * @bm: Memory bitmap.
726 */
727static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
728{
729 struct mem_zone_bm_rtree *zone;
730
731 list_for_each_entry(zone, &bm->zones, list)
732 free_zone_bm_rtree(zone, clear_nosave_free);
733
734 free_list_of_pages(bm->p_list, clear_nosave_free);
735
736 INIT_LIST_HEAD(&bm->zones);
737}
738
739/**
740 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
741 *
742 * Find the bit in memory bitmap @bm that corresponds to the given PFN.
743 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
744 *
745 * Walk the radix tree to find the page containing the bit that represents @pfn
746 * and return the position of the bit in @addr and @bit_nr.
747 */
748static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
749 void **addr, unsigned int *bit_nr)
750{
751 struct mem_zone_bm_rtree *curr, *zone;
752 struct rtree_node *node;
753 int i, block_nr;
754
755 zone = bm->cur.zone;
756
757 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
758 goto zone_found;
759
760 zone = NULL;
761
762 /* Find the right zone */
763 list_for_each_entry(curr, &bm->zones, list) {
764 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
765 zone = curr;
766 break;
767 }
768 }
769
770 if (!zone)
771 return -EFAULT;
772
773zone_found:
774 /*
775 * We have found the zone. Now walk the radix tree to find the leaf node
776 * for our PFN.
777 */
778
779 /*
780 * If the zone we wish to scan is the current zone and the
781 * pfn falls into the current node then we do not need to walk
782 * the tree.
783 */
784 node = bm->cur.node;
785 if (zone == bm->cur.zone &&
786 ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
787 goto node_found;
788
789 node = zone->rtree;
790 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
791
792 for (i = zone->levels; i > 0; i--) {
793 int index;
794
795 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
796 index &= BM_RTREE_LEVEL_MASK;
797 BUG_ON(node->data[index] == 0);
798 node = (struct rtree_node *)node->data[index];
799 }
800
801node_found:
802 /* Update last position */
803 bm->cur.zone = zone;
804 bm->cur.node = node;
805 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
806 bm->cur.cur_pfn = pfn;
807
808 /* Set return values */
809 *addr = node->data;
810 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
811
812 return 0;
813}
814
815static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
816{
817 void *addr;
818 unsigned int bit;
819 int error;
820
821 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
822 BUG_ON(error);
823 set_bit(bit, addr);
824}
825
826static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
827{
828 void *addr;
829 unsigned int bit;
830 int error;
831
832 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
833 if (!error)
834 set_bit(bit, addr);
835
836 return error;
837}
838
839static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
840{
841 void *addr;
842 unsigned int bit;
843 int error;
844
845 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
846 BUG_ON(error);
847 clear_bit(bit, addr);
848}
849
850static void memory_bm_clear_current(struct memory_bitmap *bm)
851{
852 int bit;
853
854 bit = max(bm->cur.node_bit - 1, 0);
855 clear_bit(bit, bm->cur.node->data);
856}
857
858static unsigned long memory_bm_get_current(struct memory_bitmap *bm)
859{
860 return bm->cur.cur_pfn;
861}
862
863static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
864{
865 void *addr;
866 unsigned int bit;
867 int error;
868
869 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
870 BUG_ON(error);
871 return test_bit(bit, addr);
872}
873
874static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
875{
876 void *addr;
877 unsigned int bit;
878
879 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
880}
881
882/*
883 * rtree_next_node - Jump to the next leaf node.
884 *
885 * Set the position to the beginning of the next node in the
886 * memory bitmap. This is either the next node in the current
887 * zone's radix tree or the first node in the radix tree of the
888 * next zone.
889 *
890 * Return true if there is a next node, false otherwise.
891 */
892static bool rtree_next_node(struct memory_bitmap *bm)
893{
894 if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
895 bm->cur.node = list_entry(bm->cur.node->list.next,
896 struct rtree_node, list);
897 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
898 bm->cur.node_bit = 0;
899 touch_softlockup_watchdog();
900 return true;
901 }
902
903 /* No more nodes, goto next zone */
904 if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
905 bm->cur.zone = list_entry(bm->cur.zone->list.next,
906 struct mem_zone_bm_rtree, list);
907 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
908 struct rtree_node, list);
909 bm->cur.node_pfn = 0;
910 bm->cur.node_bit = 0;
911 return true;
912 }
913
914 /* No more zones */
915 return false;
916}
917
918/**
919 * memory_bm_next_pfn - Find the next set bit in a memory bitmap.
920 * @bm: Memory bitmap.
921 *
922 * Starting from the last returned position this function searches for the next
923 * set bit in @bm and returns the PFN represented by it. If no more bits are
924 * set, BM_END_OF_MAP is returned.
925 *
926 * It is required to run memory_bm_position_reset() before the first call to
927 * this function for the given memory bitmap.
928 */
929static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
930{
931 unsigned long bits, pfn, pages;
932 int bit;
933
934 do {
935 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
936 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
937 bit = find_next_bit(bm->cur.node->data, bits,
938 bm->cur.node_bit);
939 if (bit < bits) {
940 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
941 bm->cur.node_bit = bit + 1;
942 bm->cur.cur_pfn = pfn;
943 return pfn;
944 }
945 } while (rtree_next_node(bm));
946
947 bm->cur.cur_pfn = BM_END_OF_MAP;
948 return BM_END_OF_MAP;
949}
950
951/*
952 * This structure represents a range of page frames the contents of which
953 * should not be saved during hibernation.
954 */
955struct nosave_region {
956 struct list_head list;
957 unsigned long start_pfn;
958 unsigned long end_pfn;
959};
960
961static LIST_HEAD(nosave_regions);
962
963static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
964{
965 struct rtree_node *node;
966
967 list_for_each_entry(node, &zone->nodes, list)
968 recycle_safe_page(node->data);
969
970 list_for_each_entry(node, &zone->leaves, list)
971 recycle_safe_page(node->data);
972}
973
974static void memory_bm_recycle(struct memory_bitmap *bm)
975{
976 struct mem_zone_bm_rtree *zone;
977 struct linked_page *p_list;
978
979 list_for_each_entry(zone, &bm->zones, list)
980 recycle_zone_bm_rtree(zone);
981
982 p_list = bm->p_list;
983 while (p_list) {
984 struct linked_page *lp = p_list;
985
986 p_list = lp->next;
987 recycle_safe_page(lp);
988 }
989}
990
991/**
992 * register_nosave_region - Register a region of unsaveable memory.
993 *
994 * Register a range of page frames the contents of which should not be saved
995 * during hibernation (to be used in the early initialization code).
996 */
997void __init register_nosave_region(unsigned long start_pfn, unsigned long end_pfn)
998{
999 struct nosave_region *region;
1000
1001 if (start_pfn >= end_pfn)
1002 return;
1003
1004 if (!list_empty(&nosave_regions)) {
1005 /* Try to extend the previous region (they should be sorted) */
1006 region = list_entry(nosave_regions.prev,
1007 struct nosave_region, list);
1008 if (region->end_pfn == start_pfn) {
1009 region->end_pfn = end_pfn;
1010 goto Report;
1011 }
1012 }
1013 /* This allocation cannot fail */
1014 region = memblock_alloc(sizeof(struct nosave_region),
1015 SMP_CACHE_BYTES);
1016 if (!region)
1017 panic("%s: Failed to allocate %zu bytes\n", __func__,
1018 sizeof(struct nosave_region));
1019 region->start_pfn = start_pfn;
1020 region->end_pfn = end_pfn;
1021 list_add_tail(®ion->list, &nosave_regions);
1022 Report:
1023 pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
1024 (unsigned long long) start_pfn << PAGE_SHIFT,
1025 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
1026}
1027
1028/*
1029 * Set bits in this map correspond to the page frames the contents of which
1030 * should not be saved during the suspend.
1031 */
1032static struct memory_bitmap *forbidden_pages_map;
1033
1034/* Set bits in this map correspond to free page frames. */
1035static struct memory_bitmap *free_pages_map;
1036
1037/*
1038 * Each page frame allocated for creating the image is marked by setting the
1039 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
1040 */
1041
1042void swsusp_set_page_free(struct page *page)
1043{
1044 if (free_pages_map)
1045 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
1046}
1047
1048static int swsusp_page_is_free(struct page *page)
1049{
1050 return free_pages_map ?
1051 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
1052}
1053
1054void swsusp_unset_page_free(struct page *page)
1055{
1056 if (free_pages_map)
1057 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1058}
1059
1060static void swsusp_set_page_forbidden(struct page *page)
1061{
1062 if (forbidden_pages_map)
1063 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1064}
1065
1066int swsusp_page_is_forbidden(struct page *page)
1067{
1068 return forbidden_pages_map ?
1069 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1070}
1071
1072static void swsusp_unset_page_forbidden(struct page *page)
1073{
1074 if (forbidden_pages_map)
1075 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1076}
1077
1078/**
1079 * mark_nosave_pages - Mark pages that should not be saved.
1080 * @bm: Memory bitmap.
1081 *
1082 * Set the bits in @bm that correspond to the page frames the contents of which
1083 * should not be saved.
1084 */
1085static void mark_nosave_pages(struct memory_bitmap *bm)
1086{
1087 struct nosave_region *region;
1088
1089 if (list_empty(&nosave_regions))
1090 return;
1091
1092 list_for_each_entry(region, &nosave_regions, list) {
1093 unsigned long pfn;
1094
1095 pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1096 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1097 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1098 - 1);
1099
1100 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1101 if (pfn_valid(pfn)) {
1102 /*
1103 * It is safe to ignore the result of
1104 * mem_bm_set_bit_check() here, since we won't
1105 * touch the PFNs for which the error is
1106 * returned anyway.
1107 */
1108 mem_bm_set_bit_check(bm, pfn);
1109 }
1110 }
1111}
1112
1113/**
1114 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1115 *
1116 * Create bitmaps needed for marking page frames that should not be saved and
1117 * free page frames. The forbidden_pages_map and free_pages_map pointers are
1118 * only modified if everything goes well, because we don't want the bits to be
1119 * touched before both bitmaps are set up.
1120 */
1121int create_basic_memory_bitmaps(void)
1122{
1123 struct memory_bitmap *bm1, *bm2;
1124 int error;
1125
1126 if (forbidden_pages_map && free_pages_map)
1127 return 0;
1128 else
1129 BUG_ON(forbidden_pages_map || free_pages_map);
1130
1131 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1132 if (!bm1)
1133 return -ENOMEM;
1134
1135 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1136 if (error)
1137 goto Free_first_object;
1138
1139 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1140 if (!bm2)
1141 goto Free_first_bitmap;
1142
1143 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1144 if (error)
1145 goto Free_second_object;
1146
1147 forbidden_pages_map = bm1;
1148 free_pages_map = bm2;
1149 mark_nosave_pages(forbidden_pages_map);
1150
1151 pr_debug("Basic memory bitmaps created\n");
1152
1153 return 0;
1154
1155 Free_second_object:
1156 kfree(bm2);
1157 Free_first_bitmap:
1158 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1159 Free_first_object:
1160 kfree(bm1);
1161 return -ENOMEM;
1162}
1163
1164/**
1165 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1166 *
1167 * Free memory bitmaps allocated by create_basic_memory_bitmaps(). The
1168 * auxiliary pointers are necessary so that the bitmaps themselves are not
1169 * referred to while they are being freed.
1170 */
1171void free_basic_memory_bitmaps(void)
1172{
1173 struct memory_bitmap *bm1, *bm2;
1174
1175 if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1176 return;
1177
1178 bm1 = forbidden_pages_map;
1179 bm2 = free_pages_map;
1180 forbidden_pages_map = NULL;
1181 free_pages_map = NULL;
1182 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1183 kfree(bm1);
1184 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1185 kfree(bm2);
1186
1187 pr_debug("Basic memory bitmaps freed\n");
1188}
1189
1190static void clear_or_poison_free_page(struct page *page)
1191{
1192 if (page_poisoning_enabled_static())
1193 __kernel_poison_pages(page, 1);
1194 else if (want_init_on_free())
1195 clear_highpage(page);
1196}
1197
1198void clear_or_poison_free_pages(void)
1199{
1200 struct memory_bitmap *bm = free_pages_map;
1201 unsigned long pfn;
1202
1203 if (WARN_ON(!(free_pages_map)))
1204 return;
1205
1206 if (page_poisoning_enabled() || want_init_on_free()) {
1207 memory_bm_position_reset(bm);
1208 pfn = memory_bm_next_pfn(bm);
1209 while (pfn != BM_END_OF_MAP) {
1210 if (pfn_valid(pfn))
1211 clear_or_poison_free_page(pfn_to_page(pfn));
1212
1213 pfn = memory_bm_next_pfn(bm);
1214 }
1215 memory_bm_position_reset(bm);
1216 pr_info("free pages cleared after restore\n");
1217 }
1218}
1219
1220/**
1221 * snapshot_additional_pages - Estimate the number of extra pages needed.
1222 * @zone: Memory zone to carry out the computation for.
1223 *
1224 * Estimate the number of additional pages needed for setting up a hibernation
1225 * image data structures for @zone (usually, the returned value is greater than
1226 * the exact number).
1227 */
1228unsigned int snapshot_additional_pages(struct zone *zone)
1229{
1230 unsigned int rtree, nodes;
1231
1232 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1233 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1234 LINKED_PAGE_DATA_SIZE);
1235 while (nodes > 1) {
1236 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1237 rtree += nodes;
1238 }
1239
1240 return 2 * rtree;
1241}
1242
1243/*
1244 * Touch the watchdog for every WD_PAGE_COUNT pages.
1245 */
1246#define WD_PAGE_COUNT (128*1024)
1247
1248static void mark_free_pages(struct zone *zone)
1249{
1250 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
1251 unsigned long flags;
1252 unsigned int order, t;
1253 struct page *page;
1254
1255 if (zone_is_empty(zone))
1256 return;
1257
1258 spin_lock_irqsave(&zone->lock, flags);
1259
1260 max_zone_pfn = zone_end_pfn(zone);
1261 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1262 if (pfn_valid(pfn)) {
1263 page = pfn_to_page(pfn);
1264
1265 if (!--page_count) {
1266 touch_nmi_watchdog();
1267 page_count = WD_PAGE_COUNT;
1268 }
1269
1270 if (page_zone(page) != zone)
1271 continue;
1272
1273 if (!swsusp_page_is_forbidden(page))
1274 swsusp_unset_page_free(page);
1275 }
1276
1277 for_each_migratetype_order(order, t) {
1278 list_for_each_entry(page,
1279 &zone->free_area[order].free_list[t], buddy_list) {
1280 unsigned long i;
1281
1282 pfn = page_to_pfn(page);
1283 for (i = 0; i < (1UL << order); i++) {
1284 if (!--page_count) {
1285 touch_nmi_watchdog();
1286 page_count = WD_PAGE_COUNT;
1287 }
1288 swsusp_set_page_free(pfn_to_page(pfn + i));
1289 }
1290 }
1291 }
1292 spin_unlock_irqrestore(&zone->lock, flags);
1293}
1294
1295#ifdef CONFIG_HIGHMEM
1296/**
1297 * count_free_highmem_pages - Compute the total number of free highmem pages.
1298 *
1299 * The returned number is system-wide.
1300 */
1301static unsigned int count_free_highmem_pages(void)
1302{
1303 struct zone *zone;
1304 unsigned int cnt = 0;
1305
1306 for_each_populated_zone(zone)
1307 if (is_highmem(zone))
1308 cnt += zone_page_state(zone, NR_FREE_PAGES);
1309
1310 return cnt;
1311}
1312
1313/**
1314 * saveable_highmem_page - Check if a highmem page is saveable.
1315 *
1316 * Determine whether a highmem page should be included in a hibernation image.
1317 *
1318 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1319 * and it isn't part of a free chunk of pages.
1320 */
1321static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1322{
1323 struct page *page;
1324
1325 if (!pfn_valid(pfn))
1326 return NULL;
1327
1328 page = pfn_to_online_page(pfn);
1329 if (!page || page_zone(page) != zone)
1330 return NULL;
1331
1332 BUG_ON(!PageHighMem(page));
1333
1334 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1335 return NULL;
1336
1337 if (PageReserved(page) || PageOffline(page))
1338 return NULL;
1339
1340 if (page_is_guard(page))
1341 return NULL;
1342
1343 return page;
1344}
1345
1346/**
1347 * count_highmem_pages - Compute the total number of saveable highmem pages.
1348 */
1349static unsigned int count_highmem_pages(void)
1350{
1351 struct zone *zone;
1352 unsigned int n = 0;
1353
1354 for_each_populated_zone(zone) {
1355 unsigned long pfn, max_zone_pfn;
1356
1357 if (!is_highmem(zone))
1358 continue;
1359
1360 mark_free_pages(zone);
1361 max_zone_pfn = zone_end_pfn(zone);
1362 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1363 if (saveable_highmem_page(zone, pfn))
1364 n++;
1365 }
1366 return n;
1367}
1368#endif /* CONFIG_HIGHMEM */
1369
1370/**
1371 * saveable_page - Check if the given page is saveable.
1372 *
1373 * Determine whether a non-highmem page should be included in a hibernation
1374 * image.
1375 *
1376 * We should save the page if it isn't Nosave, and is not in the range
1377 * of pages statically defined as 'unsaveable', and it isn't part of
1378 * a free chunk of pages.
1379 */
1380static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1381{
1382 struct page *page;
1383
1384 if (!pfn_valid(pfn))
1385 return NULL;
1386
1387 page = pfn_to_online_page(pfn);
1388 if (!page || page_zone(page) != zone)
1389 return NULL;
1390
1391 BUG_ON(PageHighMem(page));
1392
1393 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1394 return NULL;
1395
1396 if (PageOffline(page))
1397 return NULL;
1398
1399 if (PageReserved(page)
1400 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1401 return NULL;
1402
1403 if (page_is_guard(page))
1404 return NULL;
1405
1406 return page;
1407}
1408
1409/**
1410 * count_data_pages - Compute the total number of saveable non-highmem pages.
1411 */
1412static unsigned int count_data_pages(void)
1413{
1414 struct zone *zone;
1415 unsigned long pfn, max_zone_pfn;
1416 unsigned int n = 0;
1417
1418 for_each_populated_zone(zone) {
1419 if (is_highmem(zone))
1420 continue;
1421
1422 mark_free_pages(zone);
1423 max_zone_pfn = zone_end_pfn(zone);
1424 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1425 if (saveable_page(zone, pfn))
1426 n++;
1427 }
1428 return n;
1429}
1430
1431/*
1432 * This is needed, because copy_page and memcpy are not usable for copying
1433 * task structs. Returns true if the page was filled with only zeros,
1434 * otherwise false.
1435 */
1436static inline bool do_copy_page(long *dst, long *src)
1437{
1438 long z = 0;
1439 int n;
1440
1441 for (n = PAGE_SIZE / sizeof(long); n; n--) {
1442 z |= *src;
1443 *dst++ = *src++;
1444 }
1445 return !z;
1446}
1447
1448/**
1449 * safe_copy_page - Copy a page in a safe way.
1450 *
1451 * Check if the page we are going to copy is marked as present in the kernel
1452 * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
1453 * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present()
1454 * always returns 'true'. Returns true if the page was entirely composed of
1455 * zeros, otherwise it will return false.
1456 */
1457static bool safe_copy_page(void *dst, struct page *s_page)
1458{
1459 bool zeros_only;
1460
1461 if (kernel_page_present(s_page)) {
1462 zeros_only = do_copy_page(dst, page_address(s_page));
1463 } else {
1464 hibernate_map_page(s_page);
1465 zeros_only = do_copy_page(dst, page_address(s_page));
1466 hibernate_unmap_page(s_page);
1467 }
1468 return zeros_only;
1469}
1470
1471#ifdef CONFIG_HIGHMEM
1472static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1473{
1474 return is_highmem(zone) ?
1475 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1476}
1477
1478static bool copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1479{
1480 struct page *s_page, *d_page;
1481 void *src, *dst;
1482 bool zeros_only;
1483
1484 s_page = pfn_to_page(src_pfn);
1485 d_page = pfn_to_page(dst_pfn);
1486 if (PageHighMem(s_page)) {
1487 src = kmap_local_page(s_page);
1488 dst = kmap_local_page(d_page);
1489 zeros_only = do_copy_page(dst, src);
1490 kunmap_local(dst);
1491 kunmap_local(src);
1492 } else {
1493 if (PageHighMem(d_page)) {
1494 /*
1495 * The page pointed to by src may contain some kernel
1496 * data modified by kmap_atomic()
1497 */
1498 zeros_only = safe_copy_page(buffer, s_page);
1499 dst = kmap_local_page(d_page);
1500 copy_page(dst, buffer);
1501 kunmap_local(dst);
1502 } else {
1503 zeros_only = safe_copy_page(page_address(d_page), s_page);
1504 }
1505 }
1506 return zeros_only;
1507}
1508#else
1509#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1510
1511static inline int copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1512{
1513 return safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1514 pfn_to_page(src_pfn));
1515}
1516#endif /* CONFIG_HIGHMEM */
1517
1518/*
1519 * Copy data pages will copy all pages into pages pulled from the copy_bm.
1520 * If a page was entirely filled with zeros it will be marked in the zero_bm.
1521 *
1522 * Returns the number of pages copied.
1523 */
1524static unsigned long copy_data_pages(struct memory_bitmap *copy_bm,
1525 struct memory_bitmap *orig_bm,
1526 struct memory_bitmap *zero_bm)
1527{
1528 unsigned long copied_pages = 0;
1529 struct zone *zone;
1530 unsigned long pfn, copy_pfn;
1531
1532 for_each_populated_zone(zone) {
1533 unsigned long max_zone_pfn;
1534
1535 mark_free_pages(zone);
1536 max_zone_pfn = zone_end_pfn(zone);
1537 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1538 if (page_is_saveable(zone, pfn))
1539 memory_bm_set_bit(orig_bm, pfn);
1540 }
1541 memory_bm_position_reset(orig_bm);
1542 memory_bm_position_reset(copy_bm);
1543 copy_pfn = memory_bm_next_pfn(copy_bm);
1544 for(;;) {
1545 pfn = memory_bm_next_pfn(orig_bm);
1546 if (unlikely(pfn == BM_END_OF_MAP))
1547 break;
1548 if (copy_data_page(copy_pfn, pfn)) {
1549 memory_bm_set_bit(zero_bm, pfn);
1550 /* Use this copy_pfn for a page that is not full of zeros */
1551 continue;
1552 }
1553 copied_pages++;
1554 copy_pfn = memory_bm_next_pfn(copy_bm);
1555 }
1556 return copied_pages;
1557}
1558
1559/* Total number of image pages */
1560static unsigned int nr_copy_pages;
1561/* Number of pages needed for saving the original pfns of the image pages */
1562static unsigned int nr_meta_pages;
1563/* Number of zero pages */
1564static unsigned int nr_zero_pages;
1565
1566/*
1567 * Numbers of normal and highmem page frames allocated for hibernation image
1568 * before suspending devices.
1569 */
1570static unsigned int alloc_normal, alloc_highmem;
1571/*
1572 * Memory bitmap used for marking saveable pages (during hibernation) or
1573 * hibernation image pages (during restore)
1574 */
1575static struct memory_bitmap orig_bm;
1576/*
1577 * Memory bitmap used during hibernation for marking allocated page frames that
1578 * will contain copies of saveable pages. During restore it is initially used
1579 * for marking hibernation image pages, but then the set bits from it are
1580 * duplicated in @orig_bm and it is released. On highmem systems it is next
1581 * used for marking "safe" highmem pages, but it has to be reinitialized for
1582 * this purpose.
1583 */
1584static struct memory_bitmap copy_bm;
1585
1586/* Memory bitmap which tracks which saveable pages were zero filled. */
1587static struct memory_bitmap zero_bm;
1588
1589/**
1590 * swsusp_free - Free pages allocated for hibernation image.
1591 *
1592 * Image pages are allocated before snapshot creation, so they need to be
1593 * released after resume.
1594 */
1595void swsusp_free(void)
1596{
1597 unsigned long fb_pfn, fr_pfn;
1598
1599 if (!forbidden_pages_map || !free_pages_map)
1600 goto out;
1601
1602 memory_bm_position_reset(forbidden_pages_map);
1603 memory_bm_position_reset(free_pages_map);
1604
1605loop:
1606 fr_pfn = memory_bm_next_pfn(free_pages_map);
1607 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1608
1609 /*
1610 * Find the next bit set in both bitmaps. This is guaranteed to
1611 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1612 */
1613 do {
1614 if (fb_pfn < fr_pfn)
1615 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1616 if (fr_pfn < fb_pfn)
1617 fr_pfn = memory_bm_next_pfn(free_pages_map);
1618 } while (fb_pfn != fr_pfn);
1619
1620 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1621 struct page *page = pfn_to_page(fr_pfn);
1622
1623 memory_bm_clear_current(forbidden_pages_map);
1624 memory_bm_clear_current(free_pages_map);
1625 hibernate_restore_unprotect_page(page_address(page));
1626 __free_page(page);
1627 goto loop;
1628 }
1629
1630out:
1631 nr_copy_pages = 0;
1632 nr_meta_pages = 0;
1633 nr_zero_pages = 0;
1634 restore_pblist = NULL;
1635 buffer = NULL;
1636 alloc_normal = 0;
1637 alloc_highmem = 0;
1638 hibernate_restore_protection_end();
1639}
1640
1641/* Helper functions used for the shrinking of memory. */
1642
1643#define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1644
1645/**
1646 * preallocate_image_pages - Allocate a number of pages for hibernation image.
1647 * @nr_pages: Number of page frames to allocate.
1648 * @mask: GFP flags to use for the allocation.
1649 *
1650 * Return value: Number of page frames actually allocated
1651 */
1652static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1653{
1654 unsigned long nr_alloc = 0;
1655
1656 while (nr_pages > 0) {
1657 struct page *page;
1658
1659 page = alloc_image_page(mask);
1660 if (!page)
1661 break;
1662 memory_bm_set_bit(©_bm, page_to_pfn(page));
1663 if (PageHighMem(page))
1664 alloc_highmem++;
1665 else
1666 alloc_normal++;
1667 nr_pages--;
1668 nr_alloc++;
1669 }
1670
1671 return nr_alloc;
1672}
1673
1674static unsigned long preallocate_image_memory(unsigned long nr_pages,
1675 unsigned long avail_normal)
1676{
1677 unsigned long alloc;
1678
1679 if (avail_normal <= alloc_normal)
1680 return 0;
1681
1682 alloc = avail_normal - alloc_normal;
1683 if (nr_pages < alloc)
1684 alloc = nr_pages;
1685
1686 return preallocate_image_pages(alloc, GFP_IMAGE);
1687}
1688
1689#ifdef CONFIG_HIGHMEM
1690static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1691{
1692 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1693}
1694
1695/**
1696 * __fraction - Compute (an approximation of) x * (multiplier / base).
1697 */
1698static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1699{
1700 return div64_u64(x * multiplier, base);
1701}
1702
1703static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1704 unsigned long highmem,
1705 unsigned long total)
1706{
1707 unsigned long alloc = __fraction(nr_pages, highmem, total);
1708
1709 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1710}
1711#else /* CONFIG_HIGHMEM */
1712static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1713{
1714 return 0;
1715}
1716
1717static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1718 unsigned long highmem,
1719 unsigned long total)
1720{
1721 return 0;
1722}
1723#endif /* CONFIG_HIGHMEM */
1724
1725/**
1726 * free_unnecessary_pages - Release preallocated pages not needed for the image.
1727 */
1728static unsigned long free_unnecessary_pages(void)
1729{
1730 unsigned long save, to_free_normal, to_free_highmem, free;
1731
1732 save = count_data_pages();
1733 if (alloc_normal >= save) {
1734 to_free_normal = alloc_normal - save;
1735 save = 0;
1736 } else {
1737 to_free_normal = 0;
1738 save -= alloc_normal;
1739 }
1740 save += count_highmem_pages();
1741 if (alloc_highmem >= save) {
1742 to_free_highmem = alloc_highmem - save;
1743 } else {
1744 to_free_highmem = 0;
1745 save -= alloc_highmem;
1746 if (to_free_normal > save)
1747 to_free_normal -= save;
1748 else
1749 to_free_normal = 0;
1750 }
1751 free = to_free_normal + to_free_highmem;
1752
1753 memory_bm_position_reset(©_bm);
1754
1755 while (to_free_normal > 0 || to_free_highmem > 0) {
1756 unsigned long pfn = memory_bm_next_pfn(©_bm);
1757 struct page *page = pfn_to_page(pfn);
1758
1759 if (PageHighMem(page)) {
1760 if (!to_free_highmem)
1761 continue;
1762 to_free_highmem--;
1763 alloc_highmem--;
1764 } else {
1765 if (!to_free_normal)
1766 continue;
1767 to_free_normal--;
1768 alloc_normal--;
1769 }
1770 memory_bm_clear_bit(©_bm, pfn);
1771 swsusp_unset_page_forbidden(page);
1772 swsusp_unset_page_free(page);
1773 __free_page(page);
1774 }
1775
1776 return free;
1777}
1778
1779/**
1780 * minimum_image_size - Estimate the minimum acceptable size of an image.
1781 * @saveable: Number of saveable pages in the system.
1782 *
1783 * We want to avoid attempting to free too much memory too hard, so estimate the
1784 * minimum acceptable size of a hibernation image to use as the lower limit for
1785 * preallocating memory.
1786 *
1787 * We assume that the minimum image size should be proportional to
1788 *
1789 * [number of saveable pages] - [number of pages that can be freed in theory]
1790 *
1791 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1792 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
1793 */
1794static unsigned long minimum_image_size(unsigned long saveable)
1795{
1796 unsigned long size;
1797
1798 size = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B)
1799 + global_node_page_state(NR_ACTIVE_ANON)
1800 + global_node_page_state(NR_INACTIVE_ANON)
1801 + global_node_page_state(NR_ACTIVE_FILE)
1802 + global_node_page_state(NR_INACTIVE_FILE);
1803
1804 return saveable <= size ? 0 : saveable - size;
1805}
1806
1807/**
1808 * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1809 *
1810 * To create a hibernation image it is necessary to make a copy of every page
1811 * frame in use. We also need a number of page frames to be free during
1812 * hibernation for allocations made while saving the image and for device
1813 * drivers, in case they need to allocate memory from their hibernation
1814 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1815 * estimate) and reserved_size divided by PAGE_SIZE (which is tunable through
1816 * /sys/power/reserved_size, respectively). To make this happen, we compute the
1817 * total number of available page frames and allocate at least
1818 *
1819 * ([page frames total] - PAGES_FOR_IO - [metadata pages]) / 2
1820 * - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1821 *
1822 * of them, which corresponds to the maximum size of a hibernation image.
1823 *
1824 * If image_size is set below the number following from the above formula,
1825 * the preallocation of memory is continued until the total number of saveable
1826 * pages in the system is below the requested image size or the minimum
1827 * acceptable image size returned by minimum_image_size(), whichever is greater.
1828 */
1829int hibernate_preallocate_memory(void)
1830{
1831 struct zone *zone;
1832 unsigned long saveable, size, max_size, count, highmem, pages = 0;
1833 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1834 ktime_t start, stop;
1835 int error;
1836
1837 pr_info("Preallocating image memory\n");
1838 start = ktime_get();
1839
1840 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1841 if (error) {
1842 pr_err("Cannot allocate original bitmap\n");
1843 goto err_out;
1844 }
1845
1846 error = memory_bm_create(©_bm, GFP_IMAGE, PG_ANY);
1847 if (error) {
1848 pr_err("Cannot allocate copy bitmap\n");
1849 goto err_out;
1850 }
1851
1852 error = memory_bm_create(&zero_bm, GFP_IMAGE, PG_ANY);
1853 if (error) {
1854 pr_err("Cannot allocate zero bitmap\n");
1855 goto err_out;
1856 }
1857
1858 alloc_normal = 0;
1859 alloc_highmem = 0;
1860 nr_zero_pages = 0;
1861
1862 /* Count the number of saveable data pages. */
1863 save_highmem = count_highmem_pages();
1864 saveable = count_data_pages();
1865
1866 /*
1867 * Compute the total number of page frames we can use (count) and the
1868 * number of pages needed for image metadata (size).
1869 */
1870 count = saveable;
1871 saveable += save_highmem;
1872 highmem = save_highmem;
1873 size = 0;
1874 for_each_populated_zone(zone) {
1875 size += snapshot_additional_pages(zone);
1876 if (is_highmem(zone))
1877 highmem += zone_page_state(zone, NR_FREE_PAGES);
1878 else
1879 count += zone_page_state(zone, NR_FREE_PAGES);
1880 }
1881 avail_normal = count;
1882 count += highmem;
1883 count -= totalreserve_pages;
1884
1885 /* Compute the maximum number of saveable pages to leave in memory. */
1886 max_size = (count - (size + PAGES_FOR_IO)) / 2
1887 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1888 /* Compute the desired number of image pages specified by image_size. */
1889 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1890 if (size > max_size)
1891 size = max_size;
1892 /*
1893 * If the desired number of image pages is at least as large as the
1894 * current number of saveable pages in memory, allocate page frames for
1895 * the image and we're done.
1896 */
1897 if (size >= saveable) {
1898 pages = preallocate_image_highmem(save_highmem);
1899 pages += preallocate_image_memory(saveable - pages, avail_normal);
1900 goto out;
1901 }
1902
1903 /* Estimate the minimum size of the image. */
1904 pages = minimum_image_size(saveable);
1905 /*
1906 * To avoid excessive pressure on the normal zone, leave room in it to
1907 * accommodate an image of the minimum size (unless it's already too
1908 * small, in which case don't preallocate pages from it at all).
1909 */
1910 if (avail_normal > pages)
1911 avail_normal -= pages;
1912 else
1913 avail_normal = 0;
1914 if (size < pages)
1915 size = min_t(unsigned long, pages, max_size);
1916
1917 /*
1918 * Let the memory management subsystem know that we're going to need a
1919 * large number of page frames to allocate and make it free some memory.
1920 * NOTE: If this is not done, performance will be hurt badly in some
1921 * test cases.
1922 */
1923 shrink_all_memory(saveable - size);
1924
1925 /*
1926 * The number of saveable pages in memory was too high, so apply some
1927 * pressure to decrease it. First, make room for the largest possible
1928 * image and fail if that doesn't work. Next, try to decrease the size
1929 * of the image as much as indicated by 'size' using allocations from
1930 * highmem and non-highmem zones separately.
1931 */
1932 pages_highmem = preallocate_image_highmem(highmem / 2);
1933 alloc = count - max_size;
1934 if (alloc > pages_highmem)
1935 alloc -= pages_highmem;
1936 else
1937 alloc = 0;
1938 pages = preallocate_image_memory(alloc, avail_normal);
1939 if (pages < alloc) {
1940 /* We have exhausted non-highmem pages, try highmem. */
1941 alloc -= pages;
1942 pages += pages_highmem;
1943 pages_highmem = preallocate_image_highmem(alloc);
1944 if (pages_highmem < alloc) {
1945 pr_err("Image allocation is %lu pages short\n",
1946 alloc - pages_highmem);
1947 goto err_out;
1948 }
1949 pages += pages_highmem;
1950 /*
1951 * size is the desired number of saveable pages to leave in
1952 * memory, so try to preallocate (all memory - size) pages.
1953 */
1954 alloc = (count - pages) - size;
1955 pages += preallocate_image_highmem(alloc);
1956 } else {
1957 /*
1958 * There are approximately max_size saveable pages at this point
1959 * and we want to reduce this number down to size.
1960 */
1961 alloc = max_size - size;
1962 size = preallocate_highmem_fraction(alloc, highmem, count);
1963 pages_highmem += size;
1964 alloc -= size;
1965 size = preallocate_image_memory(alloc, avail_normal);
1966 pages_highmem += preallocate_image_highmem(alloc - size);
1967 pages += pages_highmem + size;
1968 }
1969
1970 /*
1971 * We only need as many page frames for the image as there are saveable
1972 * pages in memory, but we have allocated more. Release the excessive
1973 * ones now.
1974 */
1975 pages -= free_unnecessary_pages();
1976
1977 out:
1978 stop = ktime_get();
1979 pr_info("Allocated %lu pages for snapshot\n", pages);
1980 swsusp_show_speed(start, stop, pages, "Allocated");
1981
1982 return 0;
1983
1984 err_out:
1985 swsusp_free();
1986 return -ENOMEM;
1987}
1988
1989#ifdef CONFIG_HIGHMEM
1990/**
1991 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1992 *
1993 * Compute the number of non-highmem pages that will be necessary for creating
1994 * copies of highmem pages.
1995 */
1996static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1997{
1998 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1999
2000 if (free_highmem >= nr_highmem)
2001 nr_highmem = 0;
2002 else
2003 nr_highmem -= free_highmem;
2004
2005 return nr_highmem;
2006}
2007#else
2008static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
2009#endif /* CONFIG_HIGHMEM */
2010
2011/**
2012 * enough_free_mem - Check if there is enough free memory for the image.
2013 */
2014static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
2015{
2016 struct zone *zone;
2017 unsigned int free = alloc_normal;
2018
2019 for_each_populated_zone(zone)
2020 if (!is_highmem(zone))
2021 free += zone_page_state(zone, NR_FREE_PAGES);
2022
2023 nr_pages += count_pages_for_highmem(nr_highmem);
2024 pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
2025 nr_pages, PAGES_FOR_IO, free);
2026
2027 return free > nr_pages + PAGES_FOR_IO;
2028}
2029
2030#ifdef CONFIG_HIGHMEM
2031/**
2032 * get_highmem_buffer - Allocate a buffer for highmem pages.
2033 *
2034 * If there are some highmem pages in the hibernation image, we may need a
2035 * buffer to copy them and/or load their data.
2036 */
2037static inline int get_highmem_buffer(int safe_needed)
2038{
2039 buffer = get_image_page(GFP_ATOMIC, safe_needed);
2040 return buffer ? 0 : -ENOMEM;
2041}
2042
2043/**
2044 * alloc_highmem_pages - Allocate some highmem pages for the image.
2045 *
2046 * Try to allocate as many pages as needed, but if the number of free highmem
2047 * pages is less than that, allocate them all.
2048 */
2049static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
2050 unsigned int nr_highmem)
2051{
2052 unsigned int to_alloc = count_free_highmem_pages();
2053
2054 if (to_alloc > nr_highmem)
2055 to_alloc = nr_highmem;
2056
2057 nr_highmem -= to_alloc;
2058 while (to_alloc-- > 0) {
2059 struct page *page;
2060
2061 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
2062 memory_bm_set_bit(bm, page_to_pfn(page));
2063 }
2064 return nr_highmem;
2065}
2066#else
2067static inline int get_highmem_buffer(int safe_needed) { return 0; }
2068
2069static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
2070 unsigned int n) { return 0; }
2071#endif /* CONFIG_HIGHMEM */
2072
2073/**
2074 * swsusp_alloc - Allocate memory for hibernation image.
2075 *
2076 * We first try to allocate as many highmem pages as there are
2077 * saveable highmem pages in the system. If that fails, we allocate
2078 * non-highmem pages for the copies of the remaining highmem ones.
2079 *
2080 * In this approach it is likely that the copies of highmem pages will
2081 * also be located in the high memory, because of the way in which
2082 * copy_data_pages() works.
2083 */
2084static int swsusp_alloc(struct memory_bitmap *copy_bm,
2085 unsigned int nr_pages, unsigned int nr_highmem)
2086{
2087 if (nr_highmem > 0) {
2088 if (get_highmem_buffer(PG_ANY))
2089 goto err_out;
2090 if (nr_highmem > alloc_highmem) {
2091 nr_highmem -= alloc_highmem;
2092 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
2093 }
2094 }
2095 if (nr_pages > alloc_normal) {
2096 nr_pages -= alloc_normal;
2097 while (nr_pages-- > 0) {
2098 struct page *page;
2099
2100 page = alloc_image_page(GFP_ATOMIC);
2101 if (!page)
2102 goto err_out;
2103 memory_bm_set_bit(copy_bm, page_to_pfn(page));
2104 }
2105 }
2106
2107 return 0;
2108
2109 err_out:
2110 swsusp_free();
2111 return -ENOMEM;
2112}
2113
2114asmlinkage __visible int swsusp_save(void)
2115{
2116 unsigned int nr_pages, nr_highmem;
2117
2118 pr_info("Creating image:\n");
2119
2120 drain_local_pages(NULL);
2121 nr_pages = count_data_pages();
2122 nr_highmem = count_highmem_pages();
2123 pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
2124
2125 if (!enough_free_mem(nr_pages, nr_highmem)) {
2126 pr_err("Not enough free memory\n");
2127 return -ENOMEM;
2128 }
2129
2130 if (swsusp_alloc(©_bm, nr_pages, nr_highmem)) {
2131 pr_err("Memory allocation failed\n");
2132 return -ENOMEM;
2133 }
2134
2135 /*
2136 * During allocating of suspend pagedir, new cold pages may appear.
2137 * Kill them.
2138 */
2139 drain_local_pages(NULL);
2140 nr_copy_pages = copy_data_pages(©_bm, &orig_bm, &zero_bm);
2141
2142 /*
2143 * End of critical section. From now on, we can write to memory,
2144 * but we should not touch disk. This specially means we must _not_
2145 * touch swap space! Except we must write out our image of course.
2146 */
2147 nr_pages += nr_highmem;
2148 /* We don't actually copy the zero pages */
2149 nr_zero_pages = nr_pages - nr_copy_pages;
2150 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
2151
2152 pr_info("Image created (%d pages copied, %d zero pages)\n", nr_copy_pages, nr_zero_pages);
2153
2154 return 0;
2155}
2156
2157#ifndef CONFIG_ARCH_HIBERNATION_HEADER
2158static int init_header_complete(struct swsusp_info *info)
2159{
2160 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2161 info->version_code = LINUX_VERSION_CODE;
2162 return 0;
2163}
2164
2165static const char *check_image_kernel(struct swsusp_info *info)
2166{
2167 if (info->version_code != LINUX_VERSION_CODE)
2168 return "kernel version";
2169 if (strcmp(info->uts.sysname,init_utsname()->sysname))
2170 return "system type";
2171 if (strcmp(info->uts.release,init_utsname()->release))
2172 return "kernel release";
2173 if (strcmp(info->uts.version,init_utsname()->version))
2174 return "version";
2175 if (strcmp(info->uts.machine,init_utsname()->machine))
2176 return "machine";
2177 return NULL;
2178}
2179#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2180
2181unsigned long snapshot_get_image_size(void)
2182{
2183 return nr_copy_pages + nr_meta_pages + 1;
2184}
2185
2186static int init_header(struct swsusp_info *info)
2187{
2188 memset(info, 0, sizeof(struct swsusp_info));
2189 info->num_physpages = get_num_physpages();
2190 info->image_pages = nr_copy_pages;
2191 info->pages = snapshot_get_image_size();
2192 info->size = info->pages;
2193 info->size <<= PAGE_SHIFT;
2194 return init_header_complete(info);
2195}
2196
2197#define ENCODED_PFN_ZERO_FLAG ((unsigned long)1 << (BITS_PER_LONG - 1))
2198#define ENCODED_PFN_MASK (~ENCODED_PFN_ZERO_FLAG)
2199
2200/**
2201 * pack_pfns - Prepare PFNs for saving.
2202 * @bm: Memory bitmap.
2203 * @buf: Memory buffer to store the PFNs in.
2204 * @zero_bm: Memory bitmap containing PFNs of zero pages.
2205 *
2206 * PFNs corresponding to set bits in @bm are stored in the area of memory
2207 * pointed to by @buf (1 page at a time). Pages which were filled with only
2208 * zeros will have the highest bit set in the packed format to distinguish
2209 * them from PFNs which will be contained in the image file.
2210 */
2211static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm,
2212 struct memory_bitmap *zero_bm)
2213{
2214 int j;
2215
2216 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2217 buf[j] = memory_bm_next_pfn(bm);
2218 if (unlikely(buf[j] == BM_END_OF_MAP))
2219 break;
2220 if (memory_bm_test_bit(zero_bm, buf[j]))
2221 buf[j] |= ENCODED_PFN_ZERO_FLAG;
2222 }
2223}
2224
2225/**
2226 * snapshot_read_next - Get the address to read the next image page from.
2227 * @handle: Snapshot handle to be used for the reading.
2228 *
2229 * On the first call, @handle should point to a zeroed snapshot_handle
2230 * structure. The structure gets populated then and a pointer to it should be
2231 * passed to this function every next time.
2232 *
2233 * On success, the function returns a positive number. Then, the caller
2234 * is allowed to read up to the returned number of bytes from the memory
2235 * location computed by the data_of() macro.
2236 *
2237 * The function returns 0 to indicate the end of the data stream condition,
2238 * and negative numbers are returned on errors. If that happens, the structure
2239 * pointed to by @handle is not updated and should not be used any more.
2240 */
2241int snapshot_read_next(struct snapshot_handle *handle)
2242{
2243 if (handle->cur > nr_meta_pages + nr_copy_pages)
2244 return 0;
2245
2246 if (!buffer) {
2247 /* This makes the buffer be freed by swsusp_free() */
2248 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2249 if (!buffer)
2250 return -ENOMEM;
2251 }
2252 if (!handle->cur) {
2253 int error;
2254
2255 error = init_header((struct swsusp_info *)buffer);
2256 if (error)
2257 return error;
2258 handle->buffer = buffer;
2259 memory_bm_position_reset(&orig_bm);
2260 memory_bm_position_reset(©_bm);
2261 } else if (handle->cur <= nr_meta_pages) {
2262 clear_page(buffer);
2263 pack_pfns(buffer, &orig_bm, &zero_bm);
2264 } else {
2265 struct page *page;
2266
2267 page = pfn_to_page(memory_bm_next_pfn(©_bm));
2268 if (PageHighMem(page)) {
2269 /*
2270 * Highmem pages are copied to the buffer,
2271 * because we can't return with a kmapped
2272 * highmem page (we may not be called again).
2273 */
2274 void *kaddr;
2275
2276 kaddr = kmap_atomic(page);
2277 copy_page(buffer, kaddr);
2278 kunmap_atomic(kaddr);
2279 handle->buffer = buffer;
2280 } else {
2281 handle->buffer = page_address(page);
2282 }
2283 }
2284 handle->cur++;
2285 return PAGE_SIZE;
2286}
2287
2288static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2289 struct memory_bitmap *src)
2290{
2291 unsigned long pfn;
2292
2293 memory_bm_position_reset(src);
2294 pfn = memory_bm_next_pfn(src);
2295 while (pfn != BM_END_OF_MAP) {
2296 memory_bm_set_bit(dst, pfn);
2297 pfn = memory_bm_next_pfn(src);
2298 }
2299}
2300
2301/**
2302 * mark_unsafe_pages - Mark pages that were used before hibernation.
2303 *
2304 * Mark the pages that cannot be used for storing the image during restoration,
2305 * because they conflict with the pages that had been used before hibernation.
2306 */
2307static void mark_unsafe_pages(struct memory_bitmap *bm)
2308{
2309 unsigned long pfn;
2310
2311 /* Clear the "free"/"unsafe" bit for all PFNs */
2312 memory_bm_position_reset(free_pages_map);
2313 pfn = memory_bm_next_pfn(free_pages_map);
2314 while (pfn != BM_END_OF_MAP) {
2315 memory_bm_clear_current(free_pages_map);
2316 pfn = memory_bm_next_pfn(free_pages_map);
2317 }
2318
2319 /* Mark pages that correspond to the "original" PFNs as "unsafe" */
2320 duplicate_memory_bitmap(free_pages_map, bm);
2321
2322 allocated_unsafe_pages = 0;
2323}
2324
2325static int check_header(struct swsusp_info *info)
2326{
2327 const char *reason;
2328
2329 reason = check_image_kernel(info);
2330 if (!reason && info->num_physpages != get_num_physpages())
2331 reason = "memory size";
2332 if (reason) {
2333 pr_err("Image mismatch: %s\n", reason);
2334 return -EPERM;
2335 }
2336 return 0;
2337}
2338
2339/**
2340 * load_header - Check the image header and copy the data from it.
2341 */
2342static int load_header(struct swsusp_info *info)
2343{
2344 int error;
2345
2346 restore_pblist = NULL;
2347 error = check_header(info);
2348 if (!error) {
2349 nr_copy_pages = info->image_pages;
2350 nr_meta_pages = info->pages - info->image_pages - 1;
2351 }
2352 return error;
2353}
2354
2355/**
2356 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2357 * @bm: Memory bitmap.
2358 * @buf: Area of memory containing the PFNs.
2359 * @zero_bm: Memory bitmap with the zero PFNs marked.
2360 *
2361 * For each element of the array pointed to by @buf (1 page at a time), set the
2362 * corresponding bit in @bm. If the page was originally populated with only
2363 * zeros then a corresponding bit will also be set in @zero_bm.
2364 */
2365static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm,
2366 struct memory_bitmap *zero_bm)
2367{
2368 unsigned long decoded_pfn;
2369 bool zero;
2370 int j;
2371
2372 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2373 if (unlikely(buf[j] == BM_END_OF_MAP))
2374 break;
2375
2376 zero = !!(buf[j] & ENCODED_PFN_ZERO_FLAG);
2377 decoded_pfn = buf[j] & ENCODED_PFN_MASK;
2378 if (pfn_valid(decoded_pfn) && memory_bm_pfn_present(bm, decoded_pfn)) {
2379 memory_bm_set_bit(bm, decoded_pfn);
2380 if (zero) {
2381 memory_bm_set_bit(zero_bm, decoded_pfn);
2382 nr_zero_pages++;
2383 }
2384 } else {
2385 if (!pfn_valid(decoded_pfn))
2386 pr_err(FW_BUG "Memory map mismatch at 0x%llx after hibernation\n",
2387 (unsigned long long)PFN_PHYS(decoded_pfn));
2388 return -EFAULT;
2389 }
2390 }
2391
2392 return 0;
2393}
2394
2395#ifdef CONFIG_HIGHMEM
2396/*
2397 * struct highmem_pbe is used for creating the list of highmem pages that
2398 * should be restored atomically during the resume from disk, because the page
2399 * frames they have occupied before the suspend are in use.
2400 */
2401struct highmem_pbe {
2402 struct page *copy_page; /* data is here now */
2403 struct page *orig_page; /* data was here before the suspend */
2404 struct highmem_pbe *next;
2405};
2406
2407/*
2408 * List of highmem PBEs needed for restoring the highmem pages that were
2409 * allocated before the suspend and included in the suspend image, but have
2410 * also been allocated by the "resume" kernel, so their contents cannot be
2411 * written directly to their "original" page frames.
2412 */
2413static struct highmem_pbe *highmem_pblist;
2414
2415/**
2416 * count_highmem_image_pages - Compute the number of highmem pages in the image.
2417 * @bm: Memory bitmap.
2418 *
2419 * The bits in @bm that correspond to image pages are assumed to be set.
2420 */
2421static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2422{
2423 unsigned long pfn;
2424 unsigned int cnt = 0;
2425
2426 memory_bm_position_reset(bm);
2427 pfn = memory_bm_next_pfn(bm);
2428 while (pfn != BM_END_OF_MAP) {
2429 if (PageHighMem(pfn_to_page(pfn)))
2430 cnt++;
2431
2432 pfn = memory_bm_next_pfn(bm);
2433 }
2434 return cnt;
2435}
2436
2437static unsigned int safe_highmem_pages;
2438
2439static struct memory_bitmap *safe_highmem_bm;
2440
2441/**
2442 * prepare_highmem_image - Allocate memory for loading highmem data from image.
2443 * @bm: Pointer to an uninitialized memory bitmap structure.
2444 * @nr_highmem_p: Pointer to the number of highmem image pages.
2445 *
2446 * Try to allocate as many highmem pages as there are highmem image pages
2447 * (@nr_highmem_p points to the variable containing the number of highmem image
2448 * pages). The pages that are "safe" (ie. will not be overwritten when the
2449 * hibernation image is restored entirely) have the corresponding bits set in
2450 * @bm (it must be uninitialized).
2451 *
2452 * NOTE: This function should not be called if there are no highmem image pages.
2453 */
2454static int prepare_highmem_image(struct memory_bitmap *bm,
2455 unsigned int *nr_highmem_p)
2456{
2457 unsigned int to_alloc;
2458
2459 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2460 return -ENOMEM;
2461
2462 if (get_highmem_buffer(PG_SAFE))
2463 return -ENOMEM;
2464
2465 to_alloc = count_free_highmem_pages();
2466 if (to_alloc > *nr_highmem_p)
2467 to_alloc = *nr_highmem_p;
2468 else
2469 *nr_highmem_p = to_alloc;
2470
2471 safe_highmem_pages = 0;
2472 while (to_alloc-- > 0) {
2473 struct page *page;
2474
2475 page = alloc_page(__GFP_HIGHMEM);
2476 if (!swsusp_page_is_free(page)) {
2477 /* The page is "safe", set its bit the bitmap */
2478 memory_bm_set_bit(bm, page_to_pfn(page));
2479 safe_highmem_pages++;
2480 }
2481 /* Mark the page as allocated */
2482 swsusp_set_page_forbidden(page);
2483 swsusp_set_page_free(page);
2484 }
2485 memory_bm_position_reset(bm);
2486 safe_highmem_bm = bm;
2487 return 0;
2488}
2489
2490static struct page *last_highmem_page;
2491
2492/**
2493 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2494 *
2495 * For a given highmem image page get a buffer that suspend_write_next() should
2496 * return to its caller to write to.
2497 *
2498 * If the page is to be saved to its "original" page frame or a copy of
2499 * the page is to be made in the highmem, @buffer is returned. Otherwise,
2500 * the copy of the page is to be made in normal memory, so the address of
2501 * the copy is returned.
2502 *
2503 * If @buffer is returned, the caller of suspend_write_next() will write
2504 * the page's contents to @buffer, so they will have to be copied to the
2505 * right location on the next call to suspend_write_next() and it is done
2506 * with the help of copy_last_highmem_page(). For this purpose, if
2507 * @buffer is returned, @last_highmem_page is set to the page to which
2508 * the data will have to be copied from @buffer.
2509 */
2510static void *get_highmem_page_buffer(struct page *page,
2511 struct chain_allocator *ca)
2512{
2513 struct highmem_pbe *pbe;
2514 void *kaddr;
2515
2516 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2517 /*
2518 * We have allocated the "original" page frame and we can
2519 * use it directly to store the loaded page.
2520 */
2521 last_highmem_page = page;
2522 return buffer;
2523 }
2524 /*
2525 * The "original" page frame has not been allocated and we have to
2526 * use a "safe" page frame to store the loaded page.
2527 */
2528 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2529 if (!pbe) {
2530 swsusp_free();
2531 return ERR_PTR(-ENOMEM);
2532 }
2533 pbe->orig_page = page;
2534 if (safe_highmem_pages > 0) {
2535 struct page *tmp;
2536
2537 /* Copy of the page will be stored in high memory */
2538 kaddr = buffer;
2539 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2540 safe_highmem_pages--;
2541 last_highmem_page = tmp;
2542 pbe->copy_page = tmp;
2543 } else {
2544 /* Copy of the page will be stored in normal memory */
2545 kaddr = __get_safe_page(ca->gfp_mask);
2546 if (!kaddr)
2547 return ERR_PTR(-ENOMEM);
2548 pbe->copy_page = virt_to_page(kaddr);
2549 }
2550 pbe->next = highmem_pblist;
2551 highmem_pblist = pbe;
2552 return kaddr;
2553}
2554
2555/**
2556 * copy_last_highmem_page - Copy most the most recent highmem image page.
2557 *
2558 * Copy the contents of a highmem image from @buffer, where the caller of
2559 * snapshot_write_next() has stored them, to the right location represented by
2560 * @last_highmem_page .
2561 */
2562static void copy_last_highmem_page(void)
2563{
2564 if (last_highmem_page) {
2565 void *dst;
2566
2567 dst = kmap_atomic(last_highmem_page);
2568 copy_page(dst, buffer);
2569 kunmap_atomic(dst);
2570 last_highmem_page = NULL;
2571 }
2572}
2573
2574static inline int last_highmem_page_copied(void)
2575{
2576 return !last_highmem_page;
2577}
2578
2579static inline void free_highmem_data(void)
2580{
2581 if (safe_highmem_bm)
2582 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2583
2584 if (buffer)
2585 free_image_page(buffer, PG_UNSAFE_CLEAR);
2586}
2587#else
2588static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2589
2590static inline int prepare_highmem_image(struct memory_bitmap *bm,
2591 unsigned int *nr_highmem_p) { return 0; }
2592
2593static inline void *get_highmem_page_buffer(struct page *page,
2594 struct chain_allocator *ca)
2595{
2596 return ERR_PTR(-EINVAL);
2597}
2598
2599static inline void copy_last_highmem_page(void) {}
2600static inline int last_highmem_page_copied(void) { return 1; }
2601static inline void free_highmem_data(void) {}
2602#endif /* CONFIG_HIGHMEM */
2603
2604#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2605
2606/**
2607 * prepare_image - Make room for loading hibernation image.
2608 * @new_bm: Uninitialized memory bitmap structure.
2609 * @bm: Memory bitmap with unsafe pages marked.
2610 * @zero_bm: Memory bitmap containing the zero pages.
2611 *
2612 * Use @bm to mark the pages that will be overwritten in the process of
2613 * restoring the system memory state from the suspend image ("unsafe" pages)
2614 * and allocate memory for the image.
2615 *
2616 * The idea is to allocate a new memory bitmap first and then allocate
2617 * as many pages as needed for image data, but without specifying what those
2618 * pages will be used for just yet. Instead, we mark them all as allocated and
2619 * create a lists of "safe" pages to be used later. On systems with high
2620 * memory a list of "safe" highmem pages is created too.
2621 *
2622 * Because it was not known which pages were unsafe when @zero_bm was created,
2623 * make a copy of it and recreate it within safe pages.
2624 */
2625static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm,
2626 struct memory_bitmap *zero_bm)
2627{
2628 unsigned int nr_pages, nr_highmem;
2629 struct memory_bitmap tmp;
2630 struct linked_page *lp;
2631 int error;
2632
2633 /* If there is no highmem, the buffer will not be necessary */
2634 free_image_page(buffer, PG_UNSAFE_CLEAR);
2635 buffer = NULL;
2636
2637 nr_highmem = count_highmem_image_pages(bm);
2638 mark_unsafe_pages(bm);
2639
2640 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2641 if (error)
2642 goto Free;
2643
2644 duplicate_memory_bitmap(new_bm, bm);
2645 memory_bm_free(bm, PG_UNSAFE_KEEP);
2646
2647 /* Make a copy of zero_bm so it can be created in safe pages */
2648 error = memory_bm_create(&tmp, GFP_ATOMIC, PG_SAFE);
2649 if (error)
2650 goto Free;
2651
2652 duplicate_memory_bitmap(&tmp, zero_bm);
2653 memory_bm_free(zero_bm, PG_UNSAFE_KEEP);
2654
2655 /* Recreate zero_bm in safe pages */
2656 error = memory_bm_create(zero_bm, GFP_ATOMIC, PG_SAFE);
2657 if (error)
2658 goto Free;
2659
2660 duplicate_memory_bitmap(zero_bm, &tmp);
2661 memory_bm_free(&tmp, PG_UNSAFE_CLEAR);
2662 /* At this point zero_bm is in safe pages and it can be used for restoring. */
2663
2664 if (nr_highmem > 0) {
2665 error = prepare_highmem_image(bm, &nr_highmem);
2666 if (error)
2667 goto Free;
2668 }
2669 /*
2670 * Reserve some safe pages for potential later use.
2671 *
2672 * NOTE: This way we make sure there will be enough safe pages for the
2673 * chain_alloc() in get_buffer(). It is a bit wasteful, but
2674 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2675 *
2676 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2677 */
2678 nr_pages = (nr_zero_pages + nr_copy_pages) - nr_highmem - allocated_unsafe_pages;
2679 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2680 while (nr_pages > 0) {
2681 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2682 if (!lp) {
2683 error = -ENOMEM;
2684 goto Free;
2685 }
2686 lp->next = safe_pages_list;
2687 safe_pages_list = lp;
2688 nr_pages--;
2689 }
2690 /* Preallocate memory for the image */
2691 nr_pages = (nr_zero_pages + nr_copy_pages) - nr_highmem - allocated_unsafe_pages;
2692 while (nr_pages > 0) {
2693 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2694 if (!lp) {
2695 error = -ENOMEM;
2696 goto Free;
2697 }
2698 if (!swsusp_page_is_free(virt_to_page(lp))) {
2699 /* The page is "safe", add it to the list */
2700 lp->next = safe_pages_list;
2701 safe_pages_list = lp;
2702 }
2703 /* Mark the page as allocated */
2704 swsusp_set_page_forbidden(virt_to_page(lp));
2705 swsusp_set_page_free(virt_to_page(lp));
2706 nr_pages--;
2707 }
2708 return 0;
2709
2710 Free:
2711 swsusp_free();
2712 return error;
2713}
2714
2715/**
2716 * get_buffer - Get the address to store the next image data page.
2717 *
2718 * Get the address that snapshot_write_next() should return to its caller to
2719 * write to.
2720 */
2721static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2722{
2723 struct pbe *pbe;
2724 struct page *page;
2725 unsigned long pfn = memory_bm_next_pfn(bm);
2726
2727 if (pfn == BM_END_OF_MAP)
2728 return ERR_PTR(-EFAULT);
2729
2730 page = pfn_to_page(pfn);
2731 if (PageHighMem(page))
2732 return get_highmem_page_buffer(page, ca);
2733
2734 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2735 /*
2736 * We have allocated the "original" page frame and we can
2737 * use it directly to store the loaded page.
2738 */
2739 return page_address(page);
2740
2741 /*
2742 * The "original" page frame has not been allocated and we have to
2743 * use a "safe" page frame to store the loaded page.
2744 */
2745 pbe = chain_alloc(ca, sizeof(struct pbe));
2746 if (!pbe) {
2747 swsusp_free();
2748 return ERR_PTR(-ENOMEM);
2749 }
2750 pbe->orig_address = page_address(page);
2751 pbe->address = __get_safe_page(ca->gfp_mask);
2752 if (!pbe->address)
2753 return ERR_PTR(-ENOMEM);
2754 pbe->next = restore_pblist;
2755 restore_pblist = pbe;
2756 return pbe->address;
2757}
2758
2759/**
2760 * snapshot_write_next - Get the address to store the next image page.
2761 * @handle: Snapshot handle structure to guide the writing.
2762 *
2763 * On the first call, @handle should point to a zeroed snapshot_handle
2764 * structure. The structure gets populated then and a pointer to it should be
2765 * passed to this function every next time.
2766 *
2767 * On success, the function returns a positive number. Then, the caller
2768 * is allowed to write up to the returned number of bytes to the memory
2769 * location computed by the data_of() macro.
2770 *
2771 * The function returns 0 to indicate the "end of file" condition. Negative
2772 * numbers are returned on errors, in which cases the structure pointed to by
2773 * @handle is not updated and should not be used any more.
2774 */
2775int snapshot_write_next(struct snapshot_handle *handle)
2776{
2777 static struct chain_allocator ca;
2778 int error;
2779
2780next:
2781 /* Check if we have already loaded the entire image */
2782 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages)
2783 return 0;
2784
2785 if (!handle->cur) {
2786 if (!buffer)
2787 /* This makes the buffer be freed by swsusp_free() */
2788 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2789
2790 if (!buffer)
2791 return -ENOMEM;
2792
2793 handle->buffer = buffer;
2794 } else if (handle->cur == 1) {
2795 error = load_header(buffer);
2796 if (error)
2797 return error;
2798
2799 safe_pages_list = NULL;
2800
2801 error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY);
2802 if (error)
2803 return error;
2804
2805 error = memory_bm_create(&zero_bm, GFP_ATOMIC, PG_ANY);
2806 if (error)
2807 return error;
2808
2809 nr_zero_pages = 0;
2810
2811 hibernate_restore_protection_begin();
2812 } else if (handle->cur <= nr_meta_pages + 1) {
2813 error = unpack_orig_pfns(buffer, ©_bm, &zero_bm);
2814 if (error)
2815 return error;
2816
2817 if (handle->cur == nr_meta_pages + 1) {
2818 error = prepare_image(&orig_bm, ©_bm, &zero_bm);
2819 if (error)
2820 return error;
2821
2822 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2823 memory_bm_position_reset(&orig_bm);
2824 memory_bm_position_reset(&zero_bm);
2825 restore_pblist = NULL;
2826 handle->buffer = get_buffer(&orig_bm, &ca);
2827 if (IS_ERR(handle->buffer))
2828 return PTR_ERR(handle->buffer);
2829 }
2830 } else {
2831 copy_last_highmem_page();
2832 error = hibernate_restore_protect_page(handle->buffer);
2833 if (error)
2834 return error;
2835 handle->buffer = get_buffer(&orig_bm, &ca);
2836 if (IS_ERR(handle->buffer))
2837 return PTR_ERR(handle->buffer);
2838 }
2839 handle->sync_read = (handle->buffer == buffer);
2840 handle->cur++;
2841
2842 /* Zero pages were not included in the image, memset it and move on. */
2843 if (handle->cur > nr_meta_pages + 1 &&
2844 memory_bm_test_bit(&zero_bm, memory_bm_get_current(&orig_bm))) {
2845 memset(handle->buffer, 0, PAGE_SIZE);
2846 goto next;
2847 }
2848
2849 return PAGE_SIZE;
2850}
2851
2852/**
2853 * snapshot_write_finalize - Complete the loading of a hibernation image.
2854 *
2855 * Must be called after the last call to snapshot_write_next() in case the last
2856 * page in the image happens to be a highmem page and its contents should be
2857 * stored in highmem. Additionally, it recycles bitmap memory that's not
2858 * necessary any more.
2859 */
2860int snapshot_write_finalize(struct snapshot_handle *handle)
2861{
2862 int error;
2863
2864 copy_last_highmem_page();
2865 error = hibernate_restore_protect_page(handle->buffer);
2866 /* Do that only if we have loaded the image entirely */
2867 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages) {
2868 memory_bm_recycle(&orig_bm);
2869 free_highmem_data();
2870 }
2871 return error;
2872}
2873
2874int snapshot_image_loaded(struct snapshot_handle *handle)
2875{
2876 return !(!nr_copy_pages || !last_highmem_page_copied() ||
2877 handle->cur <= nr_meta_pages + nr_copy_pages + nr_zero_pages);
2878}
2879
2880#ifdef CONFIG_HIGHMEM
2881/* Assumes that @buf is ready and points to a "safe" page */
2882static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2883 void *buf)
2884{
2885 void *kaddr1, *kaddr2;
2886
2887 kaddr1 = kmap_atomic(p1);
2888 kaddr2 = kmap_atomic(p2);
2889 copy_page(buf, kaddr1);
2890 copy_page(kaddr1, kaddr2);
2891 copy_page(kaddr2, buf);
2892 kunmap_atomic(kaddr2);
2893 kunmap_atomic(kaddr1);
2894}
2895
2896/**
2897 * restore_highmem - Put highmem image pages into their original locations.
2898 *
2899 * For each highmem page that was in use before hibernation and is included in
2900 * the image, and also has been allocated by the "restore" kernel, swap its
2901 * current contents with the previous (ie. "before hibernation") ones.
2902 *
2903 * If the restore eventually fails, we can call this function once again and
2904 * restore the highmem state as seen by the restore kernel.
2905 */
2906int restore_highmem(void)
2907{
2908 struct highmem_pbe *pbe = highmem_pblist;
2909 void *buf;
2910
2911 if (!pbe)
2912 return 0;
2913
2914 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2915 if (!buf)
2916 return -ENOMEM;
2917
2918 while (pbe) {
2919 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2920 pbe = pbe->next;
2921 }
2922 free_image_page(buf, PG_UNSAFE_CLEAR);
2923 return 0;
2924}
2925#endif /* CONFIG_HIGHMEM */