Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * linux/kernel/power/snapshot.c
   4 *
   5 * This file provides system snapshot/restore functionality for swsusp.
   6 *
   7 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
   8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
   9 */
  10
  11#define pr_fmt(fmt) "PM: " fmt
  12
  13#include <linux/version.h>
  14#include <linux/module.h>
  15#include <linux/mm.h>
  16#include <linux/suspend.h>
  17#include <linux/delay.h>
  18#include <linux/bitops.h>
  19#include <linux/spinlock.h>
  20#include <linux/kernel.h>
  21#include <linux/pm.h>
  22#include <linux/device.h>
  23#include <linux/init.h>
  24#include <linux/memblock.h>
  25#include <linux/nmi.h>
  26#include <linux/syscalls.h>
  27#include <linux/console.h>
  28#include <linux/highmem.h>
  29#include <linux/list.h>
  30#include <linux/slab.h>
  31#include <linux/compiler.h>
  32#include <linux/ktime.h>
  33#include <linux/set_memory.h>
  34
  35#include <linux/uaccess.h>
  36#include <asm/mmu_context.h>
  37#include <asm/pgtable.h>
  38#include <asm/tlbflush.h>
  39#include <asm/io.h>
  40
  41#include "power.h"
  42
  43#if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
  44static bool hibernate_restore_protection;
  45static bool hibernate_restore_protection_active;
  46
  47void enable_restore_image_protection(void)
  48{
  49	hibernate_restore_protection = true;
  50}
  51
  52static inline void hibernate_restore_protection_begin(void)
  53{
  54	hibernate_restore_protection_active = hibernate_restore_protection;
  55}
  56
  57static inline void hibernate_restore_protection_end(void)
  58{
  59	hibernate_restore_protection_active = false;
  60}
  61
  62static inline void hibernate_restore_protect_page(void *page_address)
  63{
  64	if (hibernate_restore_protection_active)
  65		set_memory_ro((unsigned long)page_address, 1);
  66}
  67
  68static inline void hibernate_restore_unprotect_page(void *page_address)
  69{
  70	if (hibernate_restore_protection_active)
  71		set_memory_rw((unsigned long)page_address, 1);
  72}
  73#else
  74static inline void hibernate_restore_protection_begin(void) {}
  75static inline void hibernate_restore_protection_end(void) {}
  76static inline void hibernate_restore_protect_page(void *page_address) {}
  77static inline void hibernate_restore_unprotect_page(void *page_address) {}
  78#endif /* CONFIG_STRICT_KERNEL_RWX  && CONFIG_ARCH_HAS_SET_MEMORY */
  79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  80static int swsusp_page_is_free(struct page *);
  81static void swsusp_set_page_forbidden(struct page *);
  82static void swsusp_unset_page_forbidden(struct page *);
  83
  84/*
  85 * Number of bytes to reserve for memory allocations made by device drivers
  86 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
  87 * cause image creation to fail (tunable via /sys/power/reserved_size).
  88 */
  89unsigned long reserved_size;
  90
  91void __init hibernate_reserved_size_init(void)
  92{
  93	reserved_size = SPARE_PAGES * PAGE_SIZE;
  94}
  95
  96/*
  97 * Preferred image size in bytes (tunable via /sys/power/image_size).
  98 * When it is set to N, swsusp will do its best to ensure the image
  99 * size will not exceed N bytes, but if that is impossible, it will
 100 * try to create the smallest image possible.
 101 */
 102unsigned long image_size;
 103
 104void __init hibernate_image_size_init(void)
 105{
 106	image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE;
 107}
 108
 109/*
 110 * List of PBEs needed for restoring the pages that were allocated before
 111 * the suspend and included in the suspend image, but have also been
 112 * allocated by the "resume" kernel, so their contents cannot be written
 113 * directly to their "original" page frames.
 114 */
 115struct pbe *restore_pblist;
 116
 117/* struct linked_page is used to build chains of pages */
 118
 119#define LINKED_PAGE_DATA_SIZE	(PAGE_SIZE - sizeof(void *))
 120
 121struct linked_page {
 122	struct linked_page *next;
 123	char data[LINKED_PAGE_DATA_SIZE];
 124} __packed;
 125
 126/*
 127 * List of "safe" pages (ie. pages that were not used by the image kernel
 128 * before hibernation) that may be used as temporary storage for image kernel
 129 * memory contents.
 130 */
 131static struct linked_page *safe_pages_list;
 132
 133/* Pointer to an auxiliary buffer (1 page) */
 134static void *buffer;
 135
 136#define PG_ANY		0
 137#define PG_SAFE		1
 138#define PG_UNSAFE_CLEAR	1
 139#define PG_UNSAFE_KEEP	0
 140
 141static unsigned int allocated_unsafe_pages;
 142
 143/**
 144 * get_image_page - Allocate a page for a hibernation image.
 145 * @gfp_mask: GFP mask for the allocation.
 146 * @safe_needed: Get pages that were not used before hibernation (restore only)
 147 *
 148 * During image restoration, for storing the PBE list and the image data, we can
 149 * only use memory pages that do not conflict with the pages used before
 150 * hibernation.  The "unsafe" pages have PageNosaveFree set and we count them
 151 * using allocated_unsafe_pages.
 152 *
 153 * Each allocated image page is marked as PageNosave and PageNosaveFree so that
 154 * swsusp_free() can release it.
 155 */
 156static void *get_image_page(gfp_t gfp_mask, int safe_needed)
 157{
 158	void *res;
 159
 160	res = (void *)get_zeroed_page(gfp_mask);
 161	if (safe_needed)
 162		while (res && swsusp_page_is_free(virt_to_page(res))) {
 163			/* The page is unsafe, mark it for swsusp_free() */
 164			swsusp_set_page_forbidden(virt_to_page(res));
 165			allocated_unsafe_pages++;
 166			res = (void *)get_zeroed_page(gfp_mask);
 167		}
 168	if (res) {
 169		swsusp_set_page_forbidden(virt_to_page(res));
 170		swsusp_set_page_free(virt_to_page(res));
 171	}
 172	return res;
 173}
 174
 175static void *__get_safe_page(gfp_t gfp_mask)
 176{
 177	if (safe_pages_list) {
 178		void *ret = safe_pages_list;
 179
 180		safe_pages_list = safe_pages_list->next;
 181		memset(ret, 0, PAGE_SIZE);
 182		return ret;
 183	}
 184	return get_image_page(gfp_mask, PG_SAFE);
 185}
 186
 187unsigned long get_safe_page(gfp_t gfp_mask)
 188{
 189	return (unsigned long)__get_safe_page(gfp_mask);
 190}
 191
 192static struct page *alloc_image_page(gfp_t gfp_mask)
 193{
 194	struct page *page;
 195
 196	page = alloc_page(gfp_mask);
 197	if (page) {
 198		swsusp_set_page_forbidden(page);
 199		swsusp_set_page_free(page);
 200	}
 201	return page;
 202}
 203
 204static void recycle_safe_page(void *page_address)
 205{
 206	struct linked_page *lp = page_address;
 207
 208	lp->next = safe_pages_list;
 209	safe_pages_list = lp;
 210}
 211
 212/**
 213 * free_image_page - Free a page allocated for hibernation image.
 214 * @addr: Address of the page to free.
 215 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
 216 *
 217 * The page to free should have been allocated by get_image_page() (page flags
 218 * set by it are affected).
 219 */
 220static inline void free_image_page(void *addr, int clear_nosave_free)
 221{
 222	struct page *page;
 223
 224	BUG_ON(!virt_addr_valid(addr));
 225
 226	page = virt_to_page(addr);
 227
 228	swsusp_unset_page_forbidden(page);
 229	if (clear_nosave_free)
 230		swsusp_unset_page_free(page);
 231
 232	__free_page(page);
 233}
 234
 235static inline void free_list_of_pages(struct linked_page *list,
 236				      int clear_page_nosave)
 237{
 238	while (list) {
 239		struct linked_page *lp = list->next;
 240
 241		free_image_page(list, clear_page_nosave);
 242		list = lp;
 243	}
 244}
 245
 246/*
 247 * struct chain_allocator is used for allocating small objects out of
 248 * a linked list of pages called 'the chain'.
 249 *
 250 * The chain grows each time when there is no room for a new object in
 251 * the current page.  The allocated objects cannot be freed individually.
 252 * It is only possible to free them all at once, by freeing the entire
 253 * chain.
 254 *
 255 * NOTE: The chain allocator may be inefficient if the allocated objects
 256 * are not much smaller than PAGE_SIZE.
 257 */
 258struct chain_allocator {
 259	struct linked_page *chain;	/* the chain */
 260	unsigned int used_space;	/* total size of objects allocated out
 261					   of the current page */
 262	gfp_t gfp_mask;		/* mask for allocating pages */
 263	int safe_needed;	/* if set, only "safe" pages are allocated */
 264};
 265
 266static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
 267		       int safe_needed)
 268{
 269	ca->chain = NULL;
 270	ca->used_space = LINKED_PAGE_DATA_SIZE;
 271	ca->gfp_mask = gfp_mask;
 272	ca->safe_needed = safe_needed;
 273}
 274
 275static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
 276{
 277	void *ret;
 278
 279	if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
 280		struct linked_page *lp;
 281
 282		lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
 283					get_image_page(ca->gfp_mask, PG_ANY);
 284		if (!lp)
 285			return NULL;
 286
 287		lp->next = ca->chain;
 288		ca->chain = lp;
 289		ca->used_space = 0;
 290	}
 291	ret = ca->chain->data + ca->used_space;
 292	ca->used_space += size;
 293	return ret;
 294}
 295
 296/**
 297 * Data types related to memory bitmaps.
 298 *
 299 * Memory bitmap is a structure consiting of many linked lists of
 300 * objects.  The main list's elements are of type struct zone_bitmap
 301 * and each of them corresonds to one zone.  For each zone bitmap
 302 * object there is a list of objects of type struct bm_block that
 303 * represent each blocks of bitmap in which information is stored.
 304 *
 305 * struct memory_bitmap contains a pointer to the main list of zone
 306 * bitmap objects, a struct bm_position used for browsing the bitmap,
 307 * and a pointer to the list of pages used for allocating all of the
 308 * zone bitmap objects and bitmap block objects.
 309 *
 310 * NOTE: It has to be possible to lay out the bitmap in memory
 311 * using only allocations of order 0.  Additionally, the bitmap is
 312 * designed to work with arbitrary number of zones (this is over the
 313 * top for now, but let's avoid making unnecessary assumptions ;-).
 314 *
 315 * struct zone_bitmap contains a pointer to a list of bitmap block
 316 * objects and a pointer to the bitmap block object that has been
 317 * most recently used for setting bits.  Additionally, it contains the
 318 * PFNs that correspond to the start and end of the represented zone.
 319 *
 320 * struct bm_block contains a pointer to the memory page in which
 321 * information is stored (in the form of a block of bitmap)
 322 * It also contains the pfns that correspond to the start and end of
 323 * the represented memory area.
 324 *
 325 * The memory bitmap is organized as a radix tree to guarantee fast random
 326 * access to the bits. There is one radix tree for each zone (as returned
 327 * from create_mem_extents).
 328 *
 329 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
 330 * two linked lists for the nodes of the tree, one for the inner nodes and
 331 * one for the leave nodes. The linked leave nodes are used for fast linear
 332 * access of the memory bitmap.
 333 *
 334 * The struct rtree_node represents one node of the radix tree.
 335 */
 336
 337#define BM_END_OF_MAP	(~0UL)
 338
 339#define BM_BITS_PER_BLOCK	(PAGE_SIZE * BITS_PER_BYTE)
 340#define BM_BLOCK_SHIFT		(PAGE_SHIFT + 3)
 341#define BM_BLOCK_MASK		((1UL << BM_BLOCK_SHIFT) - 1)
 342
 343/*
 344 * struct rtree_node is a wrapper struct to link the nodes
 345 * of the rtree together for easy linear iteration over
 346 * bits and easy freeing
 347 */
 348struct rtree_node {
 349	struct list_head list;
 350	unsigned long *data;
 351};
 352
 353/*
 354 * struct mem_zone_bm_rtree represents a bitmap used for one
 355 * populated memory zone.
 356 */
 357struct mem_zone_bm_rtree {
 358	struct list_head list;		/* Link Zones together         */
 359	struct list_head nodes;		/* Radix Tree inner nodes      */
 360	struct list_head leaves;	/* Radix Tree leaves           */
 361	unsigned long start_pfn;	/* Zone start page frame       */
 362	unsigned long end_pfn;		/* Zone end page frame + 1     */
 363	struct rtree_node *rtree;	/* Radix Tree Root             */
 364	int levels;			/* Number of Radix Tree Levels */
 365	unsigned int blocks;		/* Number of Bitmap Blocks     */
 366};
 367
 368/* strcut bm_position is used for browsing memory bitmaps */
 369
 370struct bm_position {
 371	struct mem_zone_bm_rtree *zone;
 372	struct rtree_node *node;
 373	unsigned long node_pfn;
 
 374	int node_bit;
 375};
 376
 377struct memory_bitmap {
 378	struct list_head zones;
 379	struct linked_page *p_list;	/* list of pages used to store zone
 380					   bitmap objects and bitmap block
 381					   objects */
 382	struct bm_position cur;	/* most recently used bit position */
 383};
 384
 385/* Functions that operate on memory bitmaps */
 386
 387#define BM_ENTRIES_PER_LEVEL	(PAGE_SIZE / sizeof(unsigned long))
 388#if BITS_PER_LONG == 32
 389#define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 2)
 390#else
 391#define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 3)
 392#endif
 393#define BM_RTREE_LEVEL_MASK	((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
 394
 395/**
 396 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
 
 
 
 
 397 *
 398 * This function is used to allocate inner nodes as well as the
 399 * leave nodes of the radix tree. It also adds the node to the
 400 * corresponding linked list passed in by the *list parameter.
 401 */
 402static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
 403					   struct chain_allocator *ca,
 404					   struct list_head *list)
 405{
 406	struct rtree_node *node;
 407
 408	node = chain_alloc(ca, sizeof(struct rtree_node));
 409	if (!node)
 410		return NULL;
 411
 412	node->data = get_image_page(gfp_mask, safe_needed);
 413	if (!node->data)
 414		return NULL;
 415
 416	list_add_tail(&node->list, list);
 417
 418	return node;
 419}
 420
 421/**
 422 * add_rtree_block - Add a new leave node to the radix tree.
 423 *
 424 * The leave nodes need to be allocated in order to keep the leaves
 425 * linked list in order. This is guaranteed by the zone->blocks
 426 * counter.
 427 */
 428static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
 429			   int safe_needed, struct chain_allocator *ca)
 430{
 431	struct rtree_node *node, *block, **dst;
 432	unsigned int levels_needed, block_nr;
 433	int i;
 434
 435	block_nr = zone->blocks;
 436	levels_needed = 0;
 437
 438	/* How many levels do we need for this block nr? */
 439	while (block_nr) {
 440		levels_needed += 1;
 441		block_nr >>= BM_RTREE_LEVEL_SHIFT;
 442	}
 443
 444	/* Make sure the rtree has enough levels */
 445	for (i = zone->levels; i < levels_needed; i++) {
 446		node = alloc_rtree_node(gfp_mask, safe_needed, ca,
 447					&zone->nodes);
 448		if (!node)
 449			return -ENOMEM;
 450
 451		node->data[0] = (unsigned long)zone->rtree;
 452		zone->rtree = node;
 453		zone->levels += 1;
 454	}
 455
 456	/* Allocate new block */
 457	block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
 458	if (!block)
 459		return -ENOMEM;
 460
 461	/* Now walk the rtree to insert the block */
 462	node = zone->rtree;
 463	dst = &zone->rtree;
 464	block_nr = zone->blocks;
 465	for (i = zone->levels; i > 0; i--) {
 466		int index;
 467
 468		if (!node) {
 469			node = alloc_rtree_node(gfp_mask, safe_needed, ca,
 470						&zone->nodes);
 471			if (!node)
 472				return -ENOMEM;
 473			*dst = node;
 474		}
 475
 476		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
 477		index &= BM_RTREE_LEVEL_MASK;
 478		dst = (struct rtree_node **)&((*dst)->data[index]);
 479		node = *dst;
 480	}
 481
 482	zone->blocks += 1;
 483	*dst = block;
 484
 485	return 0;
 486}
 487
 488static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
 489			       int clear_nosave_free);
 490
 491/**
 492 * create_zone_bm_rtree - Create a radix tree for one zone.
 493 *
 494 * Allocated the mem_zone_bm_rtree structure and initializes it.
 495 * This function also allocated and builds the radix tree for the
 496 * zone.
 497 */
 498static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
 499						      int safe_needed,
 500						      struct chain_allocator *ca,
 501						      unsigned long start,
 502						      unsigned long end)
 503{
 504	struct mem_zone_bm_rtree *zone;
 505	unsigned int i, nr_blocks;
 506	unsigned long pages;
 507
 508	pages = end - start;
 509	zone  = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
 510	if (!zone)
 511		return NULL;
 512
 513	INIT_LIST_HEAD(&zone->nodes);
 514	INIT_LIST_HEAD(&zone->leaves);
 515	zone->start_pfn = start;
 516	zone->end_pfn = end;
 517	nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
 518
 519	for (i = 0; i < nr_blocks; i++) {
 520		if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
 521			free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
 522			return NULL;
 523		}
 524	}
 525
 526	return zone;
 527}
 528
 529/**
 530 * free_zone_bm_rtree - Free the memory of the radix tree.
 531 *
 532 * Free all node pages of the radix tree. The mem_zone_bm_rtree
 533 * structure itself is not freed here nor are the rtree_node
 534 * structs.
 535 */
 536static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
 537			       int clear_nosave_free)
 538{
 539	struct rtree_node *node;
 540
 541	list_for_each_entry(node, &zone->nodes, list)
 542		free_image_page(node->data, clear_nosave_free);
 543
 544	list_for_each_entry(node, &zone->leaves, list)
 545		free_image_page(node->data, clear_nosave_free);
 546}
 547
 548static void memory_bm_position_reset(struct memory_bitmap *bm)
 549{
 550	bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
 551				  list);
 552	bm->cur.node = list_entry(bm->cur.zone->leaves.next,
 553				  struct rtree_node, list);
 554	bm->cur.node_pfn = 0;
 
 555	bm->cur.node_bit = 0;
 556}
 557
 558static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
 559
 560struct mem_extent {
 561	struct list_head hook;
 562	unsigned long start;
 563	unsigned long end;
 564};
 565
 566/**
 567 * free_mem_extents - Free a list of memory extents.
 568 * @list: List of extents to free.
 569 */
 570static void free_mem_extents(struct list_head *list)
 571{
 572	struct mem_extent *ext, *aux;
 573
 574	list_for_each_entry_safe(ext, aux, list, hook) {
 575		list_del(&ext->hook);
 576		kfree(ext);
 577	}
 578}
 579
 580/**
 581 * create_mem_extents - Create a list of memory extents.
 582 * @list: List to put the extents into.
 583 * @gfp_mask: Mask to use for memory allocations.
 584 *
 585 * The extents represent contiguous ranges of PFNs.
 586 */
 587static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
 588{
 589	struct zone *zone;
 590
 591	INIT_LIST_HEAD(list);
 592
 593	for_each_populated_zone(zone) {
 594		unsigned long zone_start, zone_end;
 595		struct mem_extent *ext, *cur, *aux;
 596
 597		zone_start = zone->zone_start_pfn;
 598		zone_end = zone_end_pfn(zone);
 599
 600		list_for_each_entry(ext, list, hook)
 601			if (zone_start <= ext->end)
 602				break;
 603
 604		if (&ext->hook == list || zone_end < ext->start) {
 605			/* New extent is necessary */
 606			struct mem_extent *new_ext;
 607
 608			new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
 609			if (!new_ext) {
 610				free_mem_extents(list);
 611				return -ENOMEM;
 612			}
 613			new_ext->start = zone_start;
 614			new_ext->end = zone_end;
 615			list_add_tail(&new_ext->hook, &ext->hook);
 616			continue;
 617		}
 618
 619		/* Merge this zone's range of PFNs with the existing one */
 620		if (zone_start < ext->start)
 621			ext->start = zone_start;
 622		if (zone_end > ext->end)
 623			ext->end = zone_end;
 624
 625		/* More merging may be possible */
 626		cur = ext;
 627		list_for_each_entry_safe_continue(cur, aux, list, hook) {
 628			if (zone_end < cur->start)
 629				break;
 630			if (zone_end < cur->end)
 631				ext->end = cur->end;
 632			list_del(&cur->hook);
 633			kfree(cur);
 634		}
 635	}
 636
 637	return 0;
 638}
 639
 640/**
 641 * memory_bm_create - Allocate memory for a memory bitmap.
 642 */
 643static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
 644			    int safe_needed)
 645{
 646	struct chain_allocator ca;
 647	struct list_head mem_extents;
 648	struct mem_extent *ext;
 649	int error;
 650
 651	chain_init(&ca, gfp_mask, safe_needed);
 652	INIT_LIST_HEAD(&bm->zones);
 653
 654	error = create_mem_extents(&mem_extents, gfp_mask);
 655	if (error)
 656		return error;
 657
 658	list_for_each_entry(ext, &mem_extents, hook) {
 659		struct mem_zone_bm_rtree *zone;
 660
 661		zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
 662					    ext->start, ext->end);
 663		if (!zone) {
 664			error = -ENOMEM;
 665			goto Error;
 666		}
 667		list_add_tail(&zone->list, &bm->zones);
 668	}
 669
 670	bm->p_list = ca.chain;
 671	memory_bm_position_reset(bm);
 672 Exit:
 673	free_mem_extents(&mem_extents);
 674	return error;
 675
 676 Error:
 677	bm->p_list = ca.chain;
 678	memory_bm_free(bm, PG_UNSAFE_CLEAR);
 679	goto Exit;
 680}
 681
 682/**
 683 * memory_bm_free - Free memory occupied by the memory bitmap.
 684 * @bm: Memory bitmap.
 685 */
 686static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
 687{
 688	struct mem_zone_bm_rtree *zone;
 689
 690	list_for_each_entry(zone, &bm->zones, list)
 691		free_zone_bm_rtree(zone, clear_nosave_free);
 692
 693	free_list_of_pages(bm->p_list, clear_nosave_free);
 694
 695	INIT_LIST_HEAD(&bm->zones);
 696}
 697
 698/**
 699 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
 700 *
 701 * Find the bit in memory bitmap @bm that corresponds to the given PFN.
 702 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
 703 *
 704 * Walk the radix tree to find the page containing the bit that represents @pfn
 705 * and return the position of the bit in @addr and @bit_nr.
 706 */
 707static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
 708			      void **addr, unsigned int *bit_nr)
 709{
 710	struct mem_zone_bm_rtree *curr, *zone;
 711	struct rtree_node *node;
 712	int i, block_nr;
 713
 714	zone = bm->cur.zone;
 715
 716	if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
 717		goto zone_found;
 718
 719	zone = NULL;
 720
 721	/* Find the right zone */
 722	list_for_each_entry(curr, &bm->zones, list) {
 723		if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
 724			zone = curr;
 725			break;
 726		}
 727	}
 728
 729	if (!zone)
 730		return -EFAULT;
 731
 732zone_found:
 733	/*
 734	 * We have found the zone. Now walk the radix tree to find the leaf node
 735	 * for our PFN.
 736	 */
 
 
 
 
 
 
 737	node = bm->cur.node;
 738	if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
 
 739		goto node_found;
 740
 741	node      = zone->rtree;
 742	block_nr  = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
 743
 744	for (i = zone->levels; i > 0; i--) {
 745		int index;
 746
 747		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
 748		index &= BM_RTREE_LEVEL_MASK;
 749		BUG_ON(node->data[index] == 0);
 750		node = (struct rtree_node *)node->data[index];
 751	}
 752
 753node_found:
 754	/* Update last position */
 755	bm->cur.zone = zone;
 756	bm->cur.node = node;
 757	bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
 
 758
 759	/* Set return values */
 760	*addr = node->data;
 761	*bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
 762
 763	return 0;
 764}
 765
 766static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
 767{
 768	void *addr;
 769	unsigned int bit;
 770	int error;
 771
 772	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 773	BUG_ON(error);
 774	set_bit(bit, addr);
 775}
 776
 777static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
 778{
 779	void *addr;
 780	unsigned int bit;
 781	int error;
 782
 783	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 784	if (!error)
 785		set_bit(bit, addr);
 786
 787	return error;
 788}
 789
 790static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
 791{
 792	void *addr;
 793	unsigned int bit;
 794	int error;
 795
 796	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 797	BUG_ON(error);
 798	clear_bit(bit, addr);
 799}
 800
 801static void memory_bm_clear_current(struct memory_bitmap *bm)
 802{
 803	int bit;
 804
 805	bit = max(bm->cur.node_bit - 1, 0);
 806	clear_bit(bit, bm->cur.node->data);
 807}
 808
 
 
 
 
 
 809static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
 810{
 811	void *addr;
 812	unsigned int bit;
 813	int error;
 814
 815	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 816	BUG_ON(error);
 817	return test_bit(bit, addr);
 818}
 819
 820static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
 821{
 822	void *addr;
 823	unsigned int bit;
 824
 825	return !memory_bm_find_bit(bm, pfn, &addr, &bit);
 826}
 827
 828/*
 829 * rtree_next_node - Jump to the next leaf node.
 830 *
 831 * Set the position to the beginning of the next node in the
 832 * memory bitmap. This is either the next node in the current
 833 * zone's radix tree or the first node in the radix tree of the
 834 * next zone.
 835 *
 836 * Return true if there is a next node, false otherwise.
 837 */
 838static bool rtree_next_node(struct memory_bitmap *bm)
 839{
 840	if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
 841		bm->cur.node = list_entry(bm->cur.node->list.next,
 842					  struct rtree_node, list);
 843		bm->cur.node_pfn += BM_BITS_PER_BLOCK;
 844		bm->cur.node_bit  = 0;
 845		touch_softlockup_watchdog();
 846		return true;
 847	}
 848
 849	/* No more nodes, goto next zone */
 850	if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
 851		bm->cur.zone = list_entry(bm->cur.zone->list.next,
 852				  struct mem_zone_bm_rtree, list);
 853		bm->cur.node = list_entry(bm->cur.zone->leaves.next,
 854					  struct rtree_node, list);
 855		bm->cur.node_pfn = 0;
 856		bm->cur.node_bit = 0;
 857		return true;
 858	}
 859
 860	/* No more zones */
 861	return false;
 862}
 863
 864/**
 865 * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
 866 * @bm: Memory bitmap.
 867 *
 868 * Starting from the last returned position this function searches for the next
 869 * set bit in @bm and returns the PFN represented by it.  If no more bits are
 870 * set, BM_END_OF_MAP is returned.
 871 *
 872 * It is required to run memory_bm_position_reset() before the first call to
 873 * this function for the given memory bitmap.
 874 */
 875static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
 876{
 877	unsigned long bits, pfn, pages;
 878	int bit;
 879
 880	do {
 881		pages	  = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
 882		bits      = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
 883		bit	  = find_next_bit(bm->cur.node->data, bits,
 884					  bm->cur.node_bit);
 885		if (bit < bits) {
 886			pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
 887			bm->cur.node_bit = bit + 1;
 
 888			return pfn;
 889		}
 890	} while (rtree_next_node(bm));
 891
 
 892	return BM_END_OF_MAP;
 893}
 894
 895/*
 896 * This structure represents a range of page frames the contents of which
 897 * should not be saved during hibernation.
 898 */
 899struct nosave_region {
 900	struct list_head list;
 901	unsigned long start_pfn;
 902	unsigned long end_pfn;
 903};
 904
 905static LIST_HEAD(nosave_regions);
 906
 907static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
 908{
 909	struct rtree_node *node;
 910
 911	list_for_each_entry(node, &zone->nodes, list)
 912		recycle_safe_page(node->data);
 913
 914	list_for_each_entry(node, &zone->leaves, list)
 915		recycle_safe_page(node->data);
 916}
 917
 918static void memory_bm_recycle(struct memory_bitmap *bm)
 919{
 920	struct mem_zone_bm_rtree *zone;
 921	struct linked_page *p_list;
 922
 923	list_for_each_entry(zone, &bm->zones, list)
 924		recycle_zone_bm_rtree(zone);
 925
 926	p_list = bm->p_list;
 927	while (p_list) {
 928		struct linked_page *lp = p_list;
 929
 930		p_list = lp->next;
 931		recycle_safe_page(lp);
 932	}
 933}
 934
 935/**
 936 * register_nosave_region - Register a region of unsaveable memory.
 937 *
 938 * Register a range of page frames the contents of which should not be saved
 939 * during hibernation (to be used in the early initialization code).
 940 */
 941void __init __register_nosave_region(unsigned long start_pfn,
 942				     unsigned long end_pfn, int use_kmalloc)
 943{
 944	struct nosave_region *region;
 945
 946	if (start_pfn >= end_pfn)
 947		return;
 948
 949	if (!list_empty(&nosave_regions)) {
 950		/* Try to extend the previous region (they should be sorted) */
 951		region = list_entry(nosave_regions.prev,
 952					struct nosave_region, list);
 953		if (region->end_pfn == start_pfn) {
 954			region->end_pfn = end_pfn;
 955			goto Report;
 956		}
 957	}
 958	if (use_kmalloc) {
 959		/* During init, this shouldn't fail */
 960		region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
 961		BUG_ON(!region);
 962	} else {
 963		/* This allocation cannot fail */
 964		region = memblock_alloc(sizeof(struct nosave_region),
 965					SMP_CACHE_BYTES);
 966		if (!region)
 967			panic("%s: Failed to allocate %zu bytes\n", __func__,
 968			      sizeof(struct nosave_region));
 969	}
 970	region->start_pfn = start_pfn;
 971	region->end_pfn = end_pfn;
 972	list_add_tail(&region->list, &nosave_regions);
 973 Report:
 974	pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
 975		(unsigned long long) start_pfn << PAGE_SHIFT,
 976		((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
 977}
 978
 979/*
 980 * Set bits in this map correspond to the page frames the contents of which
 981 * should not be saved during the suspend.
 982 */
 983static struct memory_bitmap *forbidden_pages_map;
 984
 985/* Set bits in this map correspond to free page frames. */
 986static struct memory_bitmap *free_pages_map;
 987
 988/*
 989 * Each page frame allocated for creating the image is marked by setting the
 990 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
 991 */
 992
 993void swsusp_set_page_free(struct page *page)
 994{
 995	if (free_pages_map)
 996		memory_bm_set_bit(free_pages_map, page_to_pfn(page));
 997}
 998
 999static int swsusp_page_is_free(struct page *page)
1000{
1001	return free_pages_map ?
1002		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
1003}
1004
1005void swsusp_unset_page_free(struct page *page)
1006{
1007	if (free_pages_map)
1008		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1009}
1010
1011static void swsusp_set_page_forbidden(struct page *page)
1012{
1013	if (forbidden_pages_map)
1014		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1015}
1016
1017int swsusp_page_is_forbidden(struct page *page)
1018{
1019	return forbidden_pages_map ?
1020		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1021}
1022
1023static void swsusp_unset_page_forbidden(struct page *page)
1024{
1025	if (forbidden_pages_map)
1026		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1027}
1028
1029/**
1030 * mark_nosave_pages - Mark pages that should not be saved.
1031 * @bm: Memory bitmap.
1032 *
1033 * Set the bits in @bm that correspond to the page frames the contents of which
1034 * should not be saved.
1035 */
1036static void mark_nosave_pages(struct memory_bitmap *bm)
1037{
1038	struct nosave_region *region;
1039
1040	if (list_empty(&nosave_regions))
1041		return;
1042
1043	list_for_each_entry(region, &nosave_regions, list) {
1044		unsigned long pfn;
1045
1046		pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1047			 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1048			 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1049				- 1);
1050
1051		for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1052			if (pfn_valid(pfn)) {
1053				/*
1054				 * It is safe to ignore the result of
1055				 * mem_bm_set_bit_check() here, since we won't
1056				 * touch the PFNs for which the error is
1057				 * returned anyway.
1058				 */
1059				mem_bm_set_bit_check(bm, pfn);
1060			}
1061	}
1062}
1063
1064/**
1065 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1066 *
1067 * Create bitmaps needed for marking page frames that should not be saved and
1068 * free page frames.  The forbidden_pages_map and free_pages_map pointers are
1069 * only modified if everything goes well, because we don't want the bits to be
1070 * touched before both bitmaps are set up.
1071 */
1072int create_basic_memory_bitmaps(void)
1073{
1074	struct memory_bitmap *bm1, *bm2;
1075	int error = 0;
1076
1077	if (forbidden_pages_map && free_pages_map)
1078		return 0;
1079	else
1080		BUG_ON(forbidden_pages_map || free_pages_map);
1081
1082	bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1083	if (!bm1)
1084		return -ENOMEM;
1085
1086	error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1087	if (error)
1088		goto Free_first_object;
1089
1090	bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1091	if (!bm2)
1092		goto Free_first_bitmap;
1093
1094	error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1095	if (error)
1096		goto Free_second_object;
1097
1098	forbidden_pages_map = bm1;
1099	free_pages_map = bm2;
1100	mark_nosave_pages(forbidden_pages_map);
1101
1102	pr_debug("Basic memory bitmaps created\n");
1103
1104	return 0;
1105
1106 Free_second_object:
1107	kfree(bm2);
1108 Free_first_bitmap:
1109 	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1110 Free_first_object:
1111	kfree(bm1);
1112	return -ENOMEM;
1113}
1114
1115/**
1116 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1117 *
1118 * Free memory bitmaps allocated by create_basic_memory_bitmaps().  The
1119 * auxiliary pointers are necessary so that the bitmaps themselves are not
1120 * referred to while they are being freed.
1121 */
1122void free_basic_memory_bitmaps(void)
1123{
1124	struct memory_bitmap *bm1, *bm2;
1125
1126	if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1127		return;
1128
1129	bm1 = forbidden_pages_map;
1130	bm2 = free_pages_map;
1131	forbidden_pages_map = NULL;
1132	free_pages_map = NULL;
1133	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1134	kfree(bm1);
1135	memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1136	kfree(bm2);
1137
1138	pr_debug("Basic memory bitmaps freed\n");
1139}
1140
1141void clear_free_pages(void)
 
 
 
 
 
 
 
 
1142{
1143#ifdef CONFIG_PAGE_POISONING_ZERO
1144	struct memory_bitmap *bm = free_pages_map;
1145	unsigned long pfn;
1146
1147	if (WARN_ON(!(free_pages_map)))
1148		return;
1149
1150	memory_bm_position_reset(bm);
1151	pfn = memory_bm_next_pfn(bm);
1152	while (pfn != BM_END_OF_MAP) {
1153		if (pfn_valid(pfn))
1154			clear_highpage(pfn_to_page(pfn));
1155
1156		pfn = memory_bm_next_pfn(bm);
 
 
 
 
 
 
 
 
1157	}
1158	memory_bm_position_reset(bm);
1159	pr_info("free pages cleared after restore\n");
1160#endif /* PAGE_POISONING_ZERO */
1161}
1162
1163/**
1164 * snapshot_additional_pages - Estimate the number of extra pages needed.
1165 * @zone: Memory zone to carry out the computation for.
1166 *
1167 * Estimate the number of additional pages needed for setting up a hibernation
1168 * image data structures for @zone (usually, the returned value is greater than
1169 * the exact number).
1170 */
1171unsigned int snapshot_additional_pages(struct zone *zone)
1172{
1173	unsigned int rtree, nodes;
1174
1175	rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1176	rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1177			      LINKED_PAGE_DATA_SIZE);
1178	while (nodes > 1) {
1179		nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1180		rtree += nodes;
1181	}
1182
1183	return 2 * rtree;
1184}
1185
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1186#ifdef CONFIG_HIGHMEM
1187/**
1188 * count_free_highmem_pages - Compute the total number of free highmem pages.
1189 *
1190 * The returned number is system-wide.
1191 */
1192static unsigned int count_free_highmem_pages(void)
1193{
1194	struct zone *zone;
1195	unsigned int cnt = 0;
1196
1197	for_each_populated_zone(zone)
1198		if (is_highmem(zone))
1199			cnt += zone_page_state(zone, NR_FREE_PAGES);
1200
1201	return cnt;
1202}
1203
1204/**
1205 * saveable_highmem_page - Check if a highmem page is saveable.
1206 *
1207 * Determine whether a highmem page should be included in a hibernation image.
1208 *
1209 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1210 * and it isn't part of a free chunk of pages.
1211 */
1212static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1213{
1214	struct page *page;
1215
1216	if (!pfn_valid(pfn))
1217		return NULL;
1218
1219	page = pfn_to_online_page(pfn);
1220	if (!page || page_zone(page) != zone)
1221		return NULL;
1222
1223	BUG_ON(!PageHighMem(page));
1224
1225	if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page))
1226		return NULL;
1227
1228	if (PageReserved(page) || PageOffline(page))
1229		return NULL;
1230
1231	if (page_is_guard(page))
1232		return NULL;
1233
1234	return page;
1235}
1236
1237/**
1238 * count_highmem_pages - Compute the total number of saveable highmem pages.
1239 */
1240static unsigned int count_highmem_pages(void)
1241{
1242	struct zone *zone;
1243	unsigned int n = 0;
1244
1245	for_each_populated_zone(zone) {
1246		unsigned long pfn, max_zone_pfn;
1247
1248		if (!is_highmem(zone))
1249			continue;
1250
1251		mark_free_pages(zone);
1252		max_zone_pfn = zone_end_pfn(zone);
1253		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1254			if (saveable_highmem_page(zone, pfn))
1255				n++;
1256	}
1257	return n;
1258}
1259#else
1260static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1261{
1262	return NULL;
1263}
1264#endif /* CONFIG_HIGHMEM */
1265
1266/**
1267 * saveable_page - Check if the given page is saveable.
1268 *
1269 * Determine whether a non-highmem page should be included in a hibernation
1270 * image.
1271 *
1272 * We should save the page if it isn't Nosave, and is not in the range
1273 * of pages statically defined as 'unsaveable', and it isn't part of
1274 * a free chunk of pages.
1275 */
1276static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1277{
1278	struct page *page;
1279
1280	if (!pfn_valid(pfn))
1281		return NULL;
1282
1283	page = pfn_to_online_page(pfn);
1284	if (!page || page_zone(page) != zone)
1285		return NULL;
1286
1287	BUG_ON(PageHighMem(page));
1288
1289	if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1290		return NULL;
1291
1292	if (PageOffline(page))
1293		return NULL;
1294
1295	if (PageReserved(page)
1296	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1297		return NULL;
1298
1299	if (page_is_guard(page))
1300		return NULL;
1301
1302	return page;
1303}
1304
1305/**
1306 * count_data_pages - Compute the total number of saveable non-highmem pages.
1307 */
1308static unsigned int count_data_pages(void)
1309{
1310	struct zone *zone;
1311	unsigned long pfn, max_zone_pfn;
1312	unsigned int n = 0;
1313
1314	for_each_populated_zone(zone) {
1315		if (is_highmem(zone))
1316			continue;
1317
1318		mark_free_pages(zone);
1319		max_zone_pfn = zone_end_pfn(zone);
1320		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1321			if (saveable_page(zone, pfn))
1322				n++;
1323	}
1324	return n;
1325}
1326
1327/*
1328 * This is needed, because copy_page and memcpy are not usable for copying
1329 * task structs.
 
1330 */
1331static inline void do_copy_page(long *dst, long *src)
1332{
 
1333	int n;
1334
1335	for (n = PAGE_SIZE / sizeof(long); n; n--)
 
1336		*dst++ = *src++;
 
 
1337}
1338
1339/**
1340 * safe_copy_page - Copy a page in a safe way.
1341 *
1342 * Check if the page we are going to copy is marked as present in the kernel
1343 * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
1344 * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present()
1345 * always returns 'true'.
 
1346 */
1347static void safe_copy_page(void *dst, struct page *s_page)
1348{
 
 
1349	if (kernel_page_present(s_page)) {
1350		do_copy_page(dst, page_address(s_page));
1351	} else {
1352		kernel_map_pages(s_page, 1, 1);
1353		do_copy_page(dst, page_address(s_page));
1354		kernel_map_pages(s_page, 1, 0);
1355	}
 
1356}
1357
1358#ifdef CONFIG_HIGHMEM
1359static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1360{
1361	return is_highmem(zone) ?
1362		saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1363}
1364
1365static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1366{
1367	struct page *s_page, *d_page;
1368	void *src, *dst;
 
1369
1370	s_page = pfn_to_page(src_pfn);
1371	d_page = pfn_to_page(dst_pfn);
1372	if (PageHighMem(s_page)) {
1373		src = kmap_atomic(s_page);
1374		dst = kmap_atomic(d_page);
1375		do_copy_page(dst, src);
1376		kunmap_atomic(dst);
1377		kunmap_atomic(src);
1378	} else {
1379		if (PageHighMem(d_page)) {
1380			/*
1381			 * The page pointed to by src may contain some kernel
1382			 * data modified by kmap_atomic()
1383			 */
1384			safe_copy_page(buffer, s_page);
1385			dst = kmap_atomic(d_page);
1386			copy_page(dst, buffer);
1387			kunmap_atomic(dst);
1388		} else {
1389			safe_copy_page(page_address(d_page), s_page);
1390		}
1391	}
 
1392}
1393#else
1394#define page_is_saveable(zone, pfn)	saveable_page(zone, pfn)
1395
1396static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1397{
1398	safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1399				pfn_to_page(src_pfn));
1400}
1401#endif /* CONFIG_HIGHMEM */
1402
1403static void copy_data_pages(struct memory_bitmap *copy_bm,
1404			    struct memory_bitmap *orig_bm)
 
 
 
 
 
 
 
1405{
 
1406	struct zone *zone;
1407	unsigned long pfn;
1408
1409	for_each_populated_zone(zone) {
1410		unsigned long max_zone_pfn;
1411
1412		mark_free_pages(zone);
1413		max_zone_pfn = zone_end_pfn(zone);
1414		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1415			if (page_is_saveable(zone, pfn))
1416				memory_bm_set_bit(orig_bm, pfn);
1417	}
1418	memory_bm_position_reset(orig_bm);
1419	memory_bm_position_reset(copy_bm);
 
1420	for(;;) {
1421		pfn = memory_bm_next_pfn(orig_bm);
1422		if (unlikely(pfn == BM_END_OF_MAP))
1423			break;
1424		copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
 
 
 
 
 
 
1425	}
 
1426}
1427
1428/* Total number of image pages */
1429static unsigned int nr_copy_pages;
1430/* Number of pages needed for saving the original pfns of the image pages */
1431static unsigned int nr_meta_pages;
 
 
 
1432/*
1433 * Numbers of normal and highmem page frames allocated for hibernation image
1434 * before suspending devices.
1435 */
1436static unsigned int alloc_normal, alloc_highmem;
1437/*
1438 * Memory bitmap used for marking saveable pages (during hibernation) or
1439 * hibernation image pages (during restore)
1440 */
1441static struct memory_bitmap orig_bm;
1442/*
1443 * Memory bitmap used during hibernation for marking allocated page frames that
1444 * will contain copies of saveable pages.  During restore it is initially used
1445 * for marking hibernation image pages, but then the set bits from it are
1446 * duplicated in @orig_bm and it is released.  On highmem systems it is next
1447 * used for marking "safe" highmem pages, but it has to be reinitialized for
1448 * this purpose.
1449 */
1450static struct memory_bitmap copy_bm;
1451
 
 
 
1452/**
1453 * swsusp_free - Free pages allocated for hibernation image.
1454 *
1455 * Image pages are alocated before snapshot creation, so they need to be
1456 * released after resume.
1457 */
1458void swsusp_free(void)
1459{
1460	unsigned long fb_pfn, fr_pfn;
1461
1462	if (!forbidden_pages_map || !free_pages_map)
1463		goto out;
1464
1465	memory_bm_position_reset(forbidden_pages_map);
1466	memory_bm_position_reset(free_pages_map);
1467
1468loop:
1469	fr_pfn = memory_bm_next_pfn(free_pages_map);
1470	fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1471
1472	/*
1473	 * Find the next bit set in both bitmaps. This is guaranteed to
1474	 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1475	 */
1476	do {
1477		if (fb_pfn < fr_pfn)
1478			fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1479		if (fr_pfn < fb_pfn)
1480			fr_pfn = memory_bm_next_pfn(free_pages_map);
1481	} while (fb_pfn != fr_pfn);
1482
1483	if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1484		struct page *page = pfn_to_page(fr_pfn);
1485
1486		memory_bm_clear_current(forbidden_pages_map);
1487		memory_bm_clear_current(free_pages_map);
1488		hibernate_restore_unprotect_page(page_address(page));
1489		__free_page(page);
1490		goto loop;
1491	}
1492
1493out:
1494	nr_copy_pages = 0;
1495	nr_meta_pages = 0;
 
1496	restore_pblist = NULL;
1497	buffer = NULL;
1498	alloc_normal = 0;
1499	alloc_highmem = 0;
1500	hibernate_restore_protection_end();
1501}
1502
1503/* Helper functions used for the shrinking of memory. */
1504
1505#define GFP_IMAGE	(GFP_KERNEL | __GFP_NOWARN)
1506
1507/**
1508 * preallocate_image_pages - Allocate a number of pages for hibernation image.
1509 * @nr_pages: Number of page frames to allocate.
1510 * @mask: GFP flags to use for the allocation.
1511 *
1512 * Return value: Number of page frames actually allocated
1513 */
1514static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1515{
1516	unsigned long nr_alloc = 0;
1517
1518	while (nr_pages > 0) {
1519		struct page *page;
1520
1521		page = alloc_image_page(mask);
1522		if (!page)
1523			break;
1524		memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1525		if (PageHighMem(page))
1526			alloc_highmem++;
1527		else
1528			alloc_normal++;
1529		nr_pages--;
1530		nr_alloc++;
1531	}
1532
1533	return nr_alloc;
1534}
1535
1536static unsigned long preallocate_image_memory(unsigned long nr_pages,
1537					      unsigned long avail_normal)
1538{
1539	unsigned long alloc;
1540
1541	if (avail_normal <= alloc_normal)
1542		return 0;
1543
1544	alloc = avail_normal - alloc_normal;
1545	if (nr_pages < alloc)
1546		alloc = nr_pages;
1547
1548	return preallocate_image_pages(alloc, GFP_IMAGE);
1549}
1550
1551#ifdef CONFIG_HIGHMEM
1552static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1553{
1554	return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1555}
1556
1557/**
1558 *  __fraction - Compute (an approximation of) x * (multiplier / base).
1559 */
1560static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1561{
1562	x *= multiplier;
1563	do_div(x, base);
1564	return (unsigned long)x;
1565}
1566
1567static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1568						  unsigned long highmem,
1569						  unsigned long total)
1570{
1571	unsigned long alloc = __fraction(nr_pages, highmem, total);
1572
1573	return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1574}
1575#else /* CONFIG_HIGHMEM */
1576static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1577{
1578	return 0;
1579}
1580
1581static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1582							 unsigned long highmem,
1583							 unsigned long total)
1584{
1585	return 0;
1586}
1587#endif /* CONFIG_HIGHMEM */
1588
1589/**
1590 * free_unnecessary_pages - Release preallocated pages not needed for the image.
1591 */
1592static unsigned long free_unnecessary_pages(void)
1593{
1594	unsigned long save, to_free_normal, to_free_highmem, free;
1595
1596	save = count_data_pages();
1597	if (alloc_normal >= save) {
1598		to_free_normal = alloc_normal - save;
1599		save = 0;
1600	} else {
1601		to_free_normal = 0;
1602		save -= alloc_normal;
1603	}
1604	save += count_highmem_pages();
1605	if (alloc_highmem >= save) {
1606		to_free_highmem = alloc_highmem - save;
1607	} else {
1608		to_free_highmem = 0;
1609		save -= alloc_highmem;
1610		if (to_free_normal > save)
1611			to_free_normal -= save;
1612		else
1613			to_free_normal = 0;
1614	}
1615	free = to_free_normal + to_free_highmem;
1616
1617	memory_bm_position_reset(&copy_bm);
1618
1619	while (to_free_normal > 0 || to_free_highmem > 0) {
1620		unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1621		struct page *page = pfn_to_page(pfn);
1622
1623		if (PageHighMem(page)) {
1624			if (!to_free_highmem)
1625				continue;
1626			to_free_highmem--;
1627			alloc_highmem--;
1628		} else {
1629			if (!to_free_normal)
1630				continue;
1631			to_free_normal--;
1632			alloc_normal--;
1633		}
1634		memory_bm_clear_bit(&copy_bm, pfn);
1635		swsusp_unset_page_forbidden(page);
1636		swsusp_unset_page_free(page);
1637		__free_page(page);
1638	}
1639
1640	return free;
1641}
1642
1643/**
1644 * minimum_image_size - Estimate the minimum acceptable size of an image.
1645 * @saveable: Number of saveable pages in the system.
1646 *
1647 * We want to avoid attempting to free too much memory too hard, so estimate the
1648 * minimum acceptable size of a hibernation image to use as the lower limit for
1649 * preallocating memory.
1650 *
1651 * We assume that the minimum image size should be proportional to
1652 *
1653 * [number of saveable pages] - [number of pages that can be freed in theory]
1654 *
1655 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1656 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
1657 */
1658static unsigned long minimum_image_size(unsigned long saveable)
1659{
1660	unsigned long size;
1661
1662	size = global_node_page_state(NR_SLAB_RECLAIMABLE)
1663		+ global_node_page_state(NR_ACTIVE_ANON)
1664		+ global_node_page_state(NR_INACTIVE_ANON)
1665		+ global_node_page_state(NR_ACTIVE_FILE)
1666		+ global_node_page_state(NR_INACTIVE_FILE);
1667
1668	return saveable <= size ? 0 : saveable - size;
1669}
1670
1671/**
1672 * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1673 *
1674 * To create a hibernation image it is necessary to make a copy of every page
1675 * frame in use.  We also need a number of page frames to be free during
1676 * hibernation for allocations made while saving the image and for device
1677 * drivers, in case they need to allocate memory from their hibernation
1678 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1679 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1680 * /sys/power/reserved_size, respectively).  To make this happen, we compute the
1681 * total number of available page frames and allocate at least
1682 *
1683 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1684 *  + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1685 *
1686 * of them, which corresponds to the maximum size of a hibernation image.
1687 *
1688 * If image_size is set below the number following from the above formula,
1689 * the preallocation of memory is continued until the total number of saveable
1690 * pages in the system is below the requested image size or the minimum
1691 * acceptable image size returned by minimum_image_size(), whichever is greater.
1692 */
1693int hibernate_preallocate_memory(void)
1694{
1695	struct zone *zone;
1696	unsigned long saveable, size, max_size, count, highmem, pages = 0;
1697	unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1698	ktime_t start, stop;
1699	int error;
1700
1701	pr_info("Preallocating image memory... ");
1702	start = ktime_get();
1703
1704	error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1705	if (error)
 
1706		goto err_out;
 
1707
1708	error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1709	if (error)
 
 
 
 
 
 
 
1710		goto err_out;
 
1711
1712	alloc_normal = 0;
1713	alloc_highmem = 0;
 
1714
1715	/* Count the number of saveable data pages. */
1716	save_highmem = count_highmem_pages();
1717	saveable = count_data_pages();
1718
1719	/*
1720	 * Compute the total number of page frames we can use (count) and the
1721	 * number of pages needed for image metadata (size).
1722	 */
1723	count = saveable;
1724	saveable += save_highmem;
1725	highmem = save_highmem;
1726	size = 0;
1727	for_each_populated_zone(zone) {
1728		size += snapshot_additional_pages(zone);
1729		if (is_highmem(zone))
1730			highmem += zone_page_state(zone, NR_FREE_PAGES);
1731		else
1732			count += zone_page_state(zone, NR_FREE_PAGES);
1733	}
1734	avail_normal = count;
1735	count += highmem;
1736	count -= totalreserve_pages;
1737
1738	/* Add number of pages required for page keys (s390 only). */
1739	size += page_key_additional_pages(saveable);
1740
1741	/* Compute the maximum number of saveable pages to leave in memory. */
1742	max_size = (count - (size + PAGES_FOR_IO)) / 2
1743			- 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1744	/* Compute the desired number of image pages specified by image_size. */
1745	size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1746	if (size > max_size)
1747		size = max_size;
1748	/*
1749	 * If the desired number of image pages is at least as large as the
1750	 * current number of saveable pages in memory, allocate page frames for
1751	 * the image and we're done.
1752	 */
1753	if (size >= saveable) {
1754		pages = preallocate_image_highmem(save_highmem);
1755		pages += preallocate_image_memory(saveable - pages, avail_normal);
1756		goto out;
1757	}
1758
1759	/* Estimate the minimum size of the image. */
1760	pages = minimum_image_size(saveable);
1761	/*
1762	 * To avoid excessive pressure on the normal zone, leave room in it to
1763	 * accommodate an image of the minimum size (unless it's already too
1764	 * small, in which case don't preallocate pages from it at all).
1765	 */
1766	if (avail_normal > pages)
1767		avail_normal -= pages;
1768	else
1769		avail_normal = 0;
1770	if (size < pages)
1771		size = min_t(unsigned long, pages, max_size);
1772
1773	/*
1774	 * Let the memory management subsystem know that we're going to need a
1775	 * large number of page frames to allocate and make it free some memory.
1776	 * NOTE: If this is not done, performance will be hurt badly in some
1777	 * test cases.
1778	 */
1779	shrink_all_memory(saveable - size);
1780
1781	/*
1782	 * The number of saveable pages in memory was too high, so apply some
1783	 * pressure to decrease it.  First, make room for the largest possible
1784	 * image and fail if that doesn't work.  Next, try to decrease the size
1785	 * of the image as much as indicated by 'size' using allocations from
1786	 * highmem and non-highmem zones separately.
1787	 */
1788	pages_highmem = preallocate_image_highmem(highmem / 2);
1789	alloc = count - max_size;
1790	if (alloc > pages_highmem)
1791		alloc -= pages_highmem;
1792	else
1793		alloc = 0;
1794	pages = preallocate_image_memory(alloc, avail_normal);
1795	if (pages < alloc) {
1796		/* We have exhausted non-highmem pages, try highmem. */
1797		alloc -= pages;
1798		pages += pages_highmem;
1799		pages_highmem = preallocate_image_highmem(alloc);
1800		if (pages_highmem < alloc)
 
 
1801			goto err_out;
 
1802		pages += pages_highmem;
1803		/*
1804		 * size is the desired number of saveable pages to leave in
1805		 * memory, so try to preallocate (all memory - size) pages.
1806		 */
1807		alloc = (count - pages) - size;
1808		pages += preallocate_image_highmem(alloc);
1809	} else {
1810		/*
1811		 * There are approximately max_size saveable pages at this point
1812		 * and we want to reduce this number down to size.
1813		 */
1814		alloc = max_size - size;
1815		size = preallocate_highmem_fraction(alloc, highmem, count);
1816		pages_highmem += size;
1817		alloc -= size;
1818		size = preallocate_image_memory(alloc, avail_normal);
1819		pages_highmem += preallocate_image_highmem(alloc - size);
1820		pages += pages_highmem + size;
1821	}
1822
1823	/*
1824	 * We only need as many page frames for the image as there are saveable
1825	 * pages in memory, but we have allocated more.  Release the excessive
1826	 * ones now.
1827	 */
1828	pages -= free_unnecessary_pages();
1829
1830 out:
1831	stop = ktime_get();
1832	pr_cont("done (allocated %lu pages)\n", pages);
1833	swsusp_show_speed(start, stop, pages, "Allocated");
1834
1835	return 0;
1836
1837 err_out:
1838	pr_cont("\n");
1839	swsusp_free();
1840	return -ENOMEM;
1841}
1842
1843#ifdef CONFIG_HIGHMEM
1844/**
1845 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1846 *
1847 * Compute the number of non-highmem pages that will be necessary for creating
1848 * copies of highmem pages.
1849 */
1850static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1851{
1852	unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1853
1854	if (free_highmem >= nr_highmem)
1855		nr_highmem = 0;
1856	else
1857		nr_highmem -= free_highmem;
1858
1859	return nr_highmem;
1860}
1861#else
1862static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1863#endif /* CONFIG_HIGHMEM */
1864
1865/**
1866 * enough_free_mem - Check if there is enough free memory for the image.
1867 */
1868static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1869{
1870	struct zone *zone;
1871	unsigned int free = alloc_normal;
1872
1873	for_each_populated_zone(zone)
1874		if (!is_highmem(zone))
1875			free += zone_page_state(zone, NR_FREE_PAGES);
1876
1877	nr_pages += count_pages_for_highmem(nr_highmem);
1878	pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
1879		 nr_pages, PAGES_FOR_IO, free);
1880
1881	return free > nr_pages + PAGES_FOR_IO;
1882}
1883
1884#ifdef CONFIG_HIGHMEM
1885/**
1886 * get_highmem_buffer - Allocate a buffer for highmem pages.
1887 *
1888 * If there are some highmem pages in the hibernation image, we may need a
1889 * buffer to copy them and/or load their data.
1890 */
1891static inline int get_highmem_buffer(int safe_needed)
1892{
1893	buffer = get_image_page(GFP_ATOMIC, safe_needed);
1894	return buffer ? 0 : -ENOMEM;
1895}
1896
1897/**
1898 * alloc_highmem_image_pages - Allocate some highmem pages for the image.
1899 *
1900 * Try to allocate as many pages as needed, but if the number of free highmem
1901 * pages is less than that, allocate them all.
1902 */
1903static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1904					       unsigned int nr_highmem)
1905{
1906	unsigned int to_alloc = count_free_highmem_pages();
1907
1908	if (to_alloc > nr_highmem)
1909		to_alloc = nr_highmem;
1910
1911	nr_highmem -= to_alloc;
1912	while (to_alloc-- > 0) {
1913		struct page *page;
1914
1915		page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
1916		memory_bm_set_bit(bm, page_to_pfn(page));
1917	}
1918	return nr_highmem;
1919}
1920#else
1921static inline int get_highmem_buffer(int safe_needed) { return 0; }
1922
1923static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1924					       unsigned int n) { return 0; }
1925#endif /* CONFIG_HIGHMEM */
1926
1927/**
1928 * swsusp_alloc - Allocate memory for hibernation image.
1929 *
1930 * We first try to allocate as many highmem pages as there are
1931 * saveable highmem pages in the system.  If that fails, we allocate
1932 * non-highmem pages for the copies of the remaining highmem ones.
1933 *
1934 * In this approach it is likely that the copies of highmem pages will
1935 * also be located in the high memory, because of the way in which
1936 * copy_data_pages() works.
1937 */
1938static int swsusp_alloc(struct memory_bitmap *copy_bm,
1939			unsigned int nr_pages, unsigned int nr_highmem)
1940{
1941	if (nr_highmem > 0) {
1942		if (get_highmem_buffer(PG_ANY))
1943			goto err_out;
1944		if (nr_highmem > alloc_highmem) {
1945			nr_highmem -= alloc_highmem;
1946			nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1947		}
1948	}
1949	if (nr_pages > alloc_normal) {
1950		nr_pages -= alloc_normal;
1951		while (nr_pages-- > 0) {
1952			struct page *page;
1953
1954			page = alloc_image_page(GFP_ATOMIC);
1955			if (!page)
1956				goto err_out;
1957			memory_bm_set_bit(copy_bm, page_to_pfn(page));
1958		}
1959	}
1960
1961	return 0;
1962
1963 err_out:
1964	swsusp_free();
1965	return -ENOMEM;
1966}
1967
1968asmlinkage __visible int swsusp_save(void)
1969{
1970	unsigned int nr_pages, nr_highmem;
1971
1972	pr_info("Creating hibernation image:\n");
1973
1974	drain_local_pages(NULL);
1975	nr_pages = count_data_pages();
1976	nr_highmem = count_highmem_pages();
1977	pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
1978
1979	if (!enough_free_mem(nr_pages, nr_highmem)) {
1980		pr_err("Not enough free memory\n");
1981		return -ENOMEM;
1982	}
1983
1984	if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem)) {
1985		pr_err("Memory allocation failed\n");
1986		return -ENOMEM;
1987	}
1988
1989	/*
1990	 * During allocating of suspend pagedir, new cold pages may appear.
1991	 * Kill them.
1992	 */
1993	drain_local_pages(NULL);
1994	copy_data_pages(&copy_bm, &orig_bm);
1995
1996	/*
1997	 * End of critical section. From now on, we can write to memory,
1998	 * but we should not touch disk. This specially means we must _not_
1999	 * touch swap space! Except we must write out our image of course.
2000	 */
2001
2002	nr_pages += nr_highmem;
2003	nr_copy_pages = nr_pages;
 
2004	nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
2005
2006	pr_info("Hibernation image created (%d pages copied)\n", nr_pages);
2007
2008	return 0;
2009}
2010
2011#ifndef CONFIG_ARCH_HIBERNATION_HEADER
2012static int init_header_complete(struct swsusp_info *info)
2013{
2014	memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2015	info->version_code = LINUX_VERSION_CODE;
2016	return 0;
2017}
2018
2019static char *check_image_kernel(struct swsusp_info *info)
2020{
2021	if (info->version_code != LINUX_VERSION_CODE)
2022		return "kernel version";
2023	if (strcmp(info->uts.sysname,init_utsname()->sysname))
2024		return "system type";
2025	if (strcmp(info->uts.release,init_utsname()->release))
2026		return "kernel release";
2027	if (strcmp(info->uts.version,init_utsname()->version))
2028		return "version";
2029	if (strcmp(info->uts.machine,init_utsname()->machine))
2030		return "machine";
2031	return NULL;
2032}
2033#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2034
2035unsigned long snapshot_get_image_size(void)
2036{
2037	return nr_copy_pages + nr_meta_pages + 1;
2038}
2039
2040static int init_header(struct swsusp_info *info)
2041{
2042	memset(info, 0, sizeof(struct swsusp_info));
2043	info->num_physpages = get_num_physpages();
2044	info->image_pages = nr_copy_pages;
2045	info->pages = snapshot_get_image_size();
2046	info->size = info->pages;
2047	info->size <<= PAGE_SHIFT;
2048	return init_header_complete(info);
2049}
2050
 
 
 
2051/**
2052 * pack_pfns - Prepare PFNs for saving.
2053 * @bm: Memory bitmap.
2054 * @buf: Memory buffer to store the PFNs in.
 
2055 *
2056 * PFNs corresponding to set bits in @bm are stored in the area of memory
2057 * pointed to by @buf (1 page at a time).
 
 
2058 */
2059static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
 
2060{
2061	int j;
2062
2063	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2064		buf[j] = memory_bm_next_pfn(bm);
2065		if (unlikely(buf[j] == BM_END_OF_MAP))
2066			break;
2067		/* Save page key for data page (s390 only). */
2068		page_key_read(buf + j);
2069	}
2070}
2071
2072/**
2073 * snapshot_read_next - Get the address to read the next image page from.
2074 * @handle: Snapshot handle to be used for the reading.
2075 *
2076 * On the first call, @handle should point to a zeroed snapshot_handle
2077 * structure.  The structure gets populated then and a pointer to it should be
2078 * passed to this function every next time.
2079 *
2080 * On success, the function returns a positive number.  Then, the caller
2081 * is allowed to read up to the returned number of bytes from the memory
2082 * location computed by the data_of() macro.
2083 *
2084 * The function returns 0 to indicate the end of the data stream condition,
2085 * and negative numbers are returned on errors.  If that happens, the structure
2086 * pointed to by @handle is not updated and should not be used any more.
2087 */
2088int snapshot_read_next(struct snapshot_handle *handle)
2089{
2090	if (handle->cur > nr_meta_pages + nr_copy_pages)
2091		return 0;
2092
2093	if (!buffer) {
2094		/* This makes the buffer be freed by swsusp_free() */
2095		buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2096		if (!buffer)
2097			return -ENOMEM;
2098	}
2099	if (!handle->cur) {
2100		int error;
2101
2102		error = init_header((struct swsusp_info *)buffer);
2103		if (error)
2104			return error;
2105		handle->buffer = buffer;
2106		memory_bm_position_reset(&orig_bm);
2107		memory_bm_position_reset(&copy_bm);
2108	} else if (handle->cur <= nr_meta_pages) {
2109		clear_page(buffer);
2110		pack_pfns(buffer, &orig_bm);
2111	} else {
2112		struct page *page;
2113
2114		page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
2115		if (PageHighMem(page)) {
2116			/*
2117			 * Highmem pages are copied to the buffer,
2118			 * because we can't return with a kmapped
2119			 * highmem page (we may not be called again).
2120			 */
2121			void *kaddr;
2122
2123			kaddr = kmap_atomic(page);
2124			copy_page(buffer, kaddr);
2125			kunmap_atomic(kaddr);
2126			handle->buffer = buffer;
2127		} else {
2128			handle->buffer = page_address(page);
2129		}
2130	}
2131	handle->cur++;
2132	return PAGE_SIZE;
2133}
2134
2135static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2136				    struct memory_bitmap *src)
2137{
2138	unsigned long pfn;
2139
2140	memory_bm_position_reset(src);
2141	pfn = memory_bm_next_pfn(src);
2142	while (pfn != BM_END_OF_MAP) {
2143		memory_bm_set_bit(dst, pfn);
2144		pfn = memory_bm_next_pfn(src);
2145	}
2146}
2147
2148/**
2149 * mark_unsafe_pages - Mark pages that were used before hibernation.
2150 *
2151 * Mark the pages that cannot be used for storing the image during restoration,
2152 * because they conflict with the pages that had been used before hibernation.
2153 */
2154static void mark_unsafe_pages(struct memory_bitmap *bm)
2155{
2156	unsigned long pfn;
2157
2158	/* Clear the "free"/"unsafe" bit for all PFNs */
2159	memory_bm_position_reset(free_pages_map);
2160	pfn = memory_bm_next_pfn(free_pages_map);
2161	while (pfn != BM_END_OF_MAP) {
2162		memory_bm_clear_current(free_pages_map);
2163		pfn = memory_bm_next_pfn(free_pages_map);
2164	}
2165
2166	/* Mark pages that correspond to the "original" PFNs as "unsafe" */
2167	duplicate_memory_bitmap(free_pages_map, bm);
2168
2169	allocated_unsafe_pages = 0;
2170}
2171
2172static int check_header(struct swsusp_info *info)
2173{
2174	char *reason;
2175
2176	reason = check_image_kernel(info);
2177	if (!reason && info->num_physpages != get_num_physpages())
2178		reason = "memory size";
2179	if (reason) {
2180		pr_err("Image mismatch: %s\n", reason);
2181		return -EPERM;
2182	}
2183	return 0;
2184}
2185
2186/**
2187 * load header - Check the image header and copy the data from it.
2188 */
2189static int load_header(struct swsusp_info *info)
2190{
2191	int error;
2192
2193	restore_pblist = NULL;
2194	error = check_header(info);
2195	if (!error) {
2196		nr_copy_pages = info->image_pages;
2197		nr_meta_pages = info->pages - info->image_pages - 1;
2198	}
2199	return error;
2200}
2201
2202/**
2203 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2204 * @bm: Memory bitmap.
2205 * @buf: Area of memory containing the PFNs.
 
2206 *
2207 * For each element of the array pointed to by @buf (1 page at a time), set the
2208 * corresponding bit in @bm.
 
2209 */
2210static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
 
2211{
 
 
2212	int j;
2213
2214	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2215		if (unlikely(buf[j] == BM_END_OF_MAP))
2216			break;
2217
2218		/* Extract and buffer page key for data page (s390 only). */
2219		page_key_memorize(buf + j);
2220
2221		if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
2222			memory_bm_set_bit(bm, buf[j]);
2223		else
 
 
 
 
 
 
2224			return -EFAULT;
 
2225	}
2226
2227	return 0;
2228}
2229
2230#ifdef CONFIG_HIGHMEM
2231/*
2232 * struct highmem_pbe is used for creating the list of highmem pages that
2233 * should be restored atomically during the resume from disk, because the page
2234 * frames they have occupied before the suspend are in use.
2235 */
2236struct highmem_pbe {
2237	struct page *copy_page;	/* data is here now */
2238	struct page *orig_page;	/* data was here before the suspend */
2239	struct highmem_pbe *next;
2240};
2241
2242/*
2243 * List of highmem PBEs needed for restoring the highmem pages that were
2244 * allocated before the suspend and included in the suspend image, but have
2245 * also been allocated by the "resume" kernel, so their contents cannot be
2246 * written directly to their "original" page frames.
2247 */
2248static struct highmem_pbe *highmem_pblist;
2249
2250/**
2251 * count_highmem_image_pages - Compute the number of highmem pages in the image.
2252 * @bm: Memory bitmap.
2253 *
2254 * The bits in @bm that correspond to image pages are assumed to be set.
2255 */
2256static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2257{
2258	unsigned long pfn;
2259	unsigned int cnt = 0;
2260
2261	memory_bm_position_reset(bm);
2262	pfn = memory_bm_next_pfn(bm);
2263	while (pfn != BM_END_OF_MAP) {
2264		if (PageHighMem(pfn_to_page(pfn)))
2265			cnt++;
2266
2267		pfn = memory_bm_next_pfn(bm);
2268	}
2269	return cnt;
2270}
2271
2272static unsigned int safe_highmem_pages;
2273
2274static struct memory_bitmap *safe_highmem_bm;
2275
2276/**
2277 * prepare_highmem_image - Allocate memory for loading highmem data from image.
2278 * @bm: Pointer to an uninitialized memory bitmap structure.
2279 * @nr_highmem_p: Pointer to the number of highmem image pages.
2280 *
2281 * Try to allocate as many highmem pages as there are highmem image pages
2282 * (@nr_highmem_p points to the variable containing the number of highmem image
2283 * pages).  The pages that are "safe" (ie. will not be overwritten when the
2284 * hibernation image is restored entirely) have the corresponding bits set in
2285 * @bm (it must be unitialized).
2286 *
2287 * NOTE: This function should not be called if there are no highmem image pages.
2288 */
2289static int prepare_highmem_image(struct memory_bitmap *bm,
2290				 unsigned int *nr_highmem_p)
2291{
2292	unsigned int to_alloc;
2293
2294	if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2295		return -ENOMEM;
2296
2297	if (get_highmem_buffer(PG_SAFE))
2298		return -ENOMEM;
2299
2300	to_alloc = count_free_highmem_pages();
2301	if (to_alloc > *nr_highmem_p)
2302		to_alloc = *nr_highmem_p;
2303	else
2304		*nr_highmem_p = to_alloc;
2305
2306	safe_highmem_pages = 0;
2307	while (to_alloc-- > 0) {
2308		struct page *page;
2309
2310		page = alloc_page(__GFP_HIGHMEM);
2311		if (!swsusp_page_is_free(page)) {
2312			/* The page is "safe", set its bit the bitmap */
2313			memory_bm_set_bit(bm, page_to_pfn(page));
2314			safe_highmem_pages++;
2315		}
2316		/* Mark the page as allocated */
2317		swsusp_set_page_forbidden(page);
2318		swsusp_set_page_free(page);
2319	}
2320	memory_bm_position_reset(bm);
2321	safe_highmem_bm = bm;
2322	return 0;
2323}
2324
2325static struct page *last_highmem_page;
2326
2327/**
2328 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2329 *
2330 * For a given highmem image page get a buffer that suspend_write_next() should
2331 * return to its caller to write to.
2332 *
2333 * If the page is to be saved to its "original" page frame or a copy of
2334 * the page is to be made in the highmem, @buffer is returned.  Otherwise,
2335 * the copy of the page is to be made in normal memory, so the address of
2336 * the copy is returned.
2337 *
2338 * If @buffer is returned, the caller of suspend_write_next() will write
2339 * the page's contents to @buffer, so they will have to be copied to the
2340 * right location on the next call to suspend_write_next() and it is done
2341 * with the help of copy_last_highmem_page().  For this purpose, if
2342 * @buffer is returned, @last_highmem_page is set to the page to which
2343 * the data will have to be copied from @buffer.
2344 */
2345static void *get_highmem_page_buffer(struct page *page,
2346				     struct chain_allocator *ca)
2347{
2348	struct highmem_pbe *pbe;
2349	void *kaddr;
2350
2351	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2352		/*
2353		 * We have allocated the "original" page frame and we can
2354		 * use it directly to store the loaded page.
2355		 */
2356		last_highmem_page = page;
2357		return buffer;
2358	}
2359	/*
2360	 * The "original" page frame has not been allocated and we have to
2361	 * use a "safe" page frame to store the loaded page.
2362	 */
2363	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2364	if (!pbe) {
2365		swsusp_free();
2366		return ERR_PTR(-ENOMEM);
2367	}
2368	pbe->orig_page = page;
2369	if (safe_highmem_pages > 0) {
2370		struct page *tmp;
2371
2372		/* Copy of the page will be stored in high memory */
2373		kaddr = buffer;
2374		tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2375		safe_highmem_pages--;
2376		last_highmem_page = tmp;
2377		pbe->copy_page = tmp;
2378	} else {
2379		/* Copy of the page will be stored in normal memory */
2380		kaddr = safe_pages_list;
2381		safe_pages_list = safe_pages_list->next;
 
2382		pbe->copy_page = virt_to_page(kaddr);
2383	}
2384	pbe->next = highmem_pblist;
2385	highmem_pblist = pbe;
2386	return kaddr;
2387}
2388
2389/**
2390 * copy_last_highmem_page - Copy most the most recent highmem image page.
2391 *
2392 * Copy the contents of a highmem image from @buffer, where the caller of
2393 * snapshot_write_next() has stored them, to the right location represented by
2394 * @last_highmem_page .
2395 */
2396static void copy_last_highmem_page(void)
2397{
2398	if (last_highmem_page) {
2399		void *dst;
2400
2401		dst = kmap_atomic(last_highmem_page);
2402		copy_page(dst, buffer);
2403		kunmap_atomic(dst);
2404		last_highmem_page = NULL;
2405	}
2406}
2407
2408static inline int last_highmem_page_copied(void)
2409{
2410	return !last_highmem_page;
2411}
2412
2413static inline void free_highmem_data(void)
2414{
2415	if (safe_highmem_bm)
2416		memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2417
2418	if (buffer)
2419		free_image_page(buffer, PG_UNSAFE_CLEAR);
2420}
2421#else
2422static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2423
2424static inline int prepare_highmem_image(struct memory_bitmap *bm,
2425					unsigned int *nr_highmem_p) { return 0; }
2426
2427static inline void *get_highmem_page_buffer(struct page *page,
2428					    struct chain_allocator *ca)
2429{
2430	return ERR_PTR(-EINVAL);
2431}
2432
2433static inline void copy_last_highmem_page(void) {}
2434static inline int last_highmem_page_copied(void) { return 1; }
2435static inline void free_highmem_data(void) {}
2436#endif /* CONFIG_HIGHMEM */
2437
2438#define PBES_PER_LINKED_PAGE	(LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2439
2440/**
2441 * prepare_image - Make room for loading hibernation image.
2442 * @new_bm: Unitialized memory bitmap structure.
2443 * @bm: Memory bitmap with unsafe pages marked.
 
2444 *
2445 * Use @bm to mark the pages that will be overwritten in the process of
2446 * restoring the system memory state from the suspend image ("unsafe" pages)
2447 * and allocate memory for the image.
2448 *
2449 * The idea is to allocate a new memory bitmap first and then allocate
2450 * as many pages as needed for image data, but without specifying what those
2451 * pages will be used for just yet.  Instead, we mark them all as allocated and
2452 * create a lists of "safe" pages to be used later.  On systems with high
2453 * memory a list of "safe" highmem pages is created too.
 
 
 
2454 */
2455static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
 
2456{
2457	unsigned int nr_pages, nr_highmem;
 
2458	struct linked_page *lp;
2459	int error;
2460
2461	/* If there is no highmem, the buffer will not be necessary */
2462	free_image_page(buffer, PG_UNSAFE_CLEAR);
2463	buffer = NULL;
2464
2465	nr_highmem = count_highmem_image_pages(bm);
2466	mark_unsafe_pages(bm);
2467
2468	error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2469	if (error)
2470		goto Free;
2471
2472	duplicate_memory_bitmap(new_bm, bm);
2473	memory_bm_free(bm, PG_UNSAFE_KEEP);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2474	if (nr_highmem > 0) {
2475		error = prepare_highmem_image(bm, &nr_highmem);
2476		if (error)
2477			goto Free;
2478	}
2479	/*
2480	 * Reserve some safe pages for potential later use.
2481	 *
2482	 * NOTE: This way we make sure there will be enough safe pages for the
2483	 * chain_alloc() in get_buffer().  It is a bit wasteful, but
2484	 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2485	 *
2486	 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2487	 */
2488	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2489	nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2490	while (nr_pages > 0) {
2491		lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2492		if (!lp) {
2493			error = -ENOMEM;
2494			goto Free;
2495		}
2496		lp->next = safe_pages_list;
2497		safe_pages_list = lp;
2498		nr_pages--;
2499	}
2500	/* Preallocate memory for the image */
2501	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2502	while (nr_pages > 0) {
2503		lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2504		if (!lp) {
2505			error = -ENOMEM;
2506			goto Free;
2507		}
2508		if (!swsusp_page_is_free(virt_to_page(lp))) {
2509			/* The page is "safe", add it to the list */
2510			lp->next = safe_pages_list;
2511			safe_pages_list = lp;
2512		}
2513		/* Mark the page as allocated */
2514		swsusp_set_page_forbidden(virt_to_page(lp));
2515		swsusp_set_page_free(virt_to_page(lp));
2516		nr_pages--;
2517	}
2518	return 0;
2519
2520 Free:
2521	swsusp_free();
2522	return error;
2523}
2524
2525/**
2526 * get_buffer - Get the address to store the next image data page.
2527 *
2528 * Get the address that snapshot_write_next() should return to its caller to
2529 * write to.
2530 */
2531static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2532{
2533	struct pbe *pbe;
2534	struct page *page;
2535	unsigned long pfn = memory_bm_next_pfn(bm);
2536
2537	if (pfn == BM_END_OF_MAP)
2538		return ERR_PTR(-EFAULT);
2539
2540	page = pfn_to_page(pfn);
2541	if (PageHighMem(page))
2542		return get_highmem_page_buffer(page, ca);
2543
2544	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2545		/*
2546		 * We have allocated the "original" page frame and we can
2547		 * use it directly to store the loaded page.
2548		 */
2549		return page_address(page);
2550
2551	/*
2552	 * The "original" page frame has not been allocated and we have to
2553	 * use a "safe" page frame to store the loaded page.
2554	 */
2555	pbe = chain_alloc(ca, sizeof(struct pbe));
2556	if (!pbe) {
2557		swsusp_free();
2558		return ERR_PTR(-ENOMEM);
2559	}
2560	pbe->orig_address = page_address(page);
2561	pbe->address = safe_pages_list;
2562	safe_pages_list = safe_pages_list->next;
 
2563	pbe->next = restore_pblist;
2564	restore_pblist = pbe;
2565	return pbe->address;
2566}
2567
2568/**
2569 * snapshot_write_next - Get the address to store the next image page.
2570 * @handle: Snapshot handle structure to guide the writing.
2571 *
2572 * On the first call, @handle should point to a zeroed snapshot_handle
2573 * structure.  The structure gets populated then and a pointer to it should be
2574 * passed to this function every next time.
2575 *
2576 * On success, the function returns a positive number.  Then, the caller
2577 * is allowed to write up to the returned number of bytes to the memory
2578 * location computed by the data_of() macro.
2579 *
2580 * The function returns 0 to indicate the "end of file" condition.  Negative
2581 * numbers are returned on errors, in which cases the structure pointed to by
2582 * @handle is not updated and should not be used any more.
2583 */
2584int snapshot_write_next(struct snapshot_handle *handle)
2585{
2586	static struct chain_allocator ca;
2587	int error = 0;
2588
 
2589	/* Check if we have already loaded the entire image */
2590	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2591		return 0;
2592
2593	handle->sync_read = 1;
2594
2595	if (!handle->cur) {
2596		if (!buffer)
2597			/* This makes the buffer be freed by swsusp_free() */
2598			buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2599
2600		if (!buffer)
2601			return -ENOMEM;
2602
2603		handle->buffer = buffer;
2604	} else if (handle->cur == 1) {
2605		error = load_header(buffer);
2606		if (error)
2607			return error;
2608
2609		safe_pages_list = NULL;
2610
2611		error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2612		if (error)
2613			return error;
2614
2615		/* Allocate buffer for page keys. */
2616		error = page_key_alloc(nr_copy_pages);
2617		if (error)
2618			return error;
2619
 
 
2620		hibernate_restore_protection_begin();
2621	} else if (handle->cur <= nr_meta_pages + 1) {
2622		error = unpack_orig_pfns(buffer, &copy_bm);
2623		if (error)
2624			return error;
2625
2626		if (handle->cur == nr_meta_pages + 1) {
2627			error = prepare_image(&orig_bm, &copy_bm);
2628			if (error)
2629				return error;
2630
2631			chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2632			memory_bm_position_reset(&orig_bm);
 
2633			restore_pblist = NULL;
2634			handle->buffer = get_buffer(&orig_bm, &ca);
2635			handle->sync_read = 0;
2636			if (IS_ERR(handle->buffer))
2637				return PTR_ERR(handle->buffer);
2638		}
2639	} else {
2640		copy_last_highmem_page();
2641		/* Restore page key for data page (s390 only). */
2642		page_key_write(handle->buffer);
2643		hibernate_restore_protect_page(handle->buffer);
2644		handle->buffer = get_buffer(&orig_bm, &ca);
2645		if (IS_ERR(handle->buffer))
2646			return PTR_ERR(handle->buffer);
2647		if (handle->buffer != buffer)
2648			handle->sync_read = 0;
2649	}
 
2650	handle->cur++;
 
 
 
 
 
 
 
 
2651	return PAGE_SIZE;
2652}
2653
2654/**
2655 * snapshot_write_finalize - Complete the loading of a hibernation image.
2656 *
2657 * Must be called after the last call to snapshot_write_next() in case the last
2658 * page in the image happens to be a highmem page and its contents should be
2659 * stored in highmem.  Additionally, it recycles bitmap memory that's not
2660 * necessary any more.
2661 */
2662void snapshot_write_finalize(struct snapshot_handle *handle)
2663{
2664	copy_last_highmem_page();
2665	/* Restore page key for data page (s390 only). */
2666	page_key_write(handle->buffer);
2667	page_key_free();
2668	hibernate_restore_protect_page(handle->buffer);
2669	/* Do that only if we have loaded the image entirely */
2670	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2671		memory_bm_recycle(&orig_bm);
2672		free_highmem_data();
2673	}
2674}
2675
2676int snapshot_image_loaded(struct snapshot_handle *handle)
2677{
2678	return !(!nr_copy_pages || !last_highmem_page_copied() ||
2679			handle->cur <= nr_meta_pages + nr_copy_pages);
2680}
2681
2682#ifdef CONFIG_HIGHMEM
2683/* Assumes that @buf is ready and points to a "safe" page */
2684static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2685				       void *buf)
2686{
2687	void *kaddr1, *kaddr2;
2688
2689	kaddr1 = kmap_atomic(p1);
2690	kaddr2 = kmap_atomic(p2);
2691	copy_page(buf, kaddr1);
2692	copy_page(kaddr1, kaddr2);
2693	copy_page(kaddr2, buf);
2694	kunmap_atomic(kaddr2);
2695	kunmap_atomic(kaddr1);
2696}
2697
2698/**
2699 * restore_highmem - Put highmem image pages into their original locations.
2700 *
2701 * For each highmem page that was in use before hibernation and is included in
2702 * the image, and also has been allocated by the "restore" kernel, swap its
2703 * current contents with the previous (ie. "before hibernation") ones.
2704 *
2705 * If the restore eventually fails, we can call this function once again and
2706 * restore the highmem state as seen by the restore kernel.
2707 */
2708int restore_highmem(void)
2709{
2710	struct highmem_pbe *pbe = highmem_pblist;
2711	void *buf;
2712
2713	if (!pbe)
2714		return 0;
2715
2716	buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2717	if (!buf)
2718		return -ENOMEM;
2719
2720	while (pbe) {
2721		swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2722		pbe = pbe->next;
2723	}
2724	free_image_page(buf, PG_UNSAFE_CLEAR);
2725	return 0;
2726}
2727#endif /* CONFIG_HIGHMEM */
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * linux/kernel/power/snapshot.c
   4 *
   5 * This file provides system snapshot/restore functionality for swsusp.
   6 *
   7 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
   8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
   9 */
  10
  11#define pr_fmt(fmt) "PM: hibernation: " fmt
  12
  13#include <linux/version.h>
  14#include <linux/module.h>
  15#include <linux/mm.h>
  16#include <linux/suspend.h>
  17#include <linux/delay.h>
  18#include <linux/bitops.h>
  19#include <linux/spinlock.h>
  20#include <linux/kernel.h>
  21#include <linux/pm.h>
  22#include <linux/device.h>
  23#include <linux/init.h>
  24#include <linux/memblock.h>
  25#include <linux/nmi.h>
  26#include <linux/syscalls.h>
  27#include <linux/console.h>
  28#include <linux/highmem.h>
  29#include <linux/list.h>
  30#include <linux/slab.h>
  31#include <linux/compiler.h>
  32#include <linux/ktime.h>
  33#include <linux/set_memory.h>
  34
  35#include <linux/uaccess.h>
  36#include <asm/mmu_context.h>
 
  37#include <asm/tlbflush.h>
  38#include <asm/io.h>
  39
  40#include "power.h"
  41
  42#if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
  43static bool hibernate_restore_protection;
  44static bool hibernate_restore_protection_active;
  45
  46void enable_restore_image_protection(void)
  47{
  48	hibernate_restore_protection = true;
  49}
  50
  51static inline void hibernate_restore_protection_begin(void)
  52{
  53	hibernate_restore_protection_active = hibernate_restore_protection;
  54}
  55
  56static inline void hibernate_restore_protection_end(void)
  57{
  58	hibernate_restore_protection_active = false;
  59}
  60
  61static inline void hibernate_restore_protect_page(void *page_address)
  62{
  63	if (hibernate_restore_protection_active)
  64		set_memory_ro((unsigned long)page_address, 1);
  65}
  66
  67static inline void hibernate_restore_unprotect_page(void *page_address)
  68{
  69	if (hibernate_restore_protection_active)
  70		set_memory_rw((unsigned long)page_address, 1);
  71}
  72#else
  73static inline void hibernate_restore_protection_begin(void) {}
  74static inline void hibernate_restore_protection_end(void) {}
  75static inline void hibernate_restore_protect_page(void *page_address) {}
  76static inline void hibernate_restore_unprotect_page(void *page_address) {}
  77#endif /* CONFIG_STRICT_KERNEL_RWX  && CONFIG_ARCH_HAS_SET_MEMORY */
  78
  79
  80/*
  81 * The calls to set_direct_map_*() should not fail because remapping a page
  82 * here means that we only update protection bits in an existing PTE.
  83 * It is still worth to have a warning here if something changes and this
  84 * will no longer be the case.
  85 */
  86static inline void hibernate_map_page(struct page *page)
  87{
  88	if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
  89		int ret = set_direct_map_default_noflush(page);
  90
  91		if (ret)
  92			pr_warn_once("Failed to remap page\n");
  93	} else {
  94		debug_pagealloc_map_pages(page, 1);
  95	}
  96}
  97
  98static inline void hibernate_unmap_page(struct page *page)
  99{
 100	if (IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
 101		unsigned long addr = (unsigned long)page_address(page);
 102		int ret  = set_direct_map_invalid_noflush(page);
 103
 104		if (ret)
 105			pr_warn_once("Failed to remap page\n");
 106
 107		flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
 108	} else {
 109		debug_pagealloc_unmap_pages(page, 1);
 110	}
 111}
 112
 113static int swsusp_page_is_free(struct page *);
 114static void swsusp_set_page_forbidden(struct page *);
 115static void swsusp_unset_page_forbidden(struct page *);
 116
 117/*
 118 * Number of bytes to reserve for memory allocations made by device drivers
 119 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
 120 * cause image creation to fail (tunable via /sys/power/reserved_size).
 121 */
 122unsigned long reserved_size;
 123
 124void __init hibernate_reserved_size_init(void)
 125{
 126	reserved_size = SPARE_PAGES * PAGE_SIZE;
 127}
 128
 129/*
 130 * Preferred image size in bytes (tunable via /sys/power/image_size).
 131 * When it is set to N, swsusp will do its best to ensure the image
 132 * size will not exceed N bytes, but if that is impossible, it will
 133 * try to create the smallest image possible.
 134 */
 135unsigned long image_size;
 136
 137void __init hibernate_image_size_init(void)
 138{
 139	image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE;
 140}
 141
 142/*
 143 * List of PBEs needed for restoring the pages that were allocated before
 144 * the suspend and included in the suspend image, but have also been
 145 * allocated by the "resume" kernel, so their contents cannot be written
 146 * directly to their "original" page frames.
 147 */
 148struct pbe *restore_pblist;
 149
 150/* struct linked_page is used to build chains of pages */
 151
 152#define LINKED_PAGE_DATA_SIZE	(PAGE_SIZE - sizeof(void *))
 153
 154struct linked_page {
 155	struct linked_page *next;
 156	char data[LINKED_PAGE_DATA_SIZE];
 157} __packed;
 158
 159/*
 160 * List of "safe" pages (ie. pages that were not used by the image kernel
 161 * before hibernation) that may be used as temporary storage for image kernel
 162 * memory contents.
 163 */
 164static struct linked_page *safe_pages_list;
 165
 166/* Pointer to an auxiliary buffer (1 page) */
 167static void *buffer;
 168
 169#define PG_ANY		0
 170#define PG_SAFE		1
 171#define PG_UNSAFE_CLEAR	1
 172#define PG_UNSAFE_KEEP	0
 173
 174static unsigned int allocated_unsafe_pages;
 175
 176/**
 177 * get_image_page - Allocate a page for a hibernation image.
 178 * @gfp_mask: GFP mask for the allocation.
 179 * @safe_needed: Get pages that were not used before hibernation (restore only)
 180 *
 181 * During image restoration, for storing the PBE list and the image data, we can
 182 * only use memory pages that do not conflict with the pages used before
 183 * hibernation.  The "unsafe" pages have PageNosaveFree set and we count them
 184 * using allocated_unsafe_pages.
 185 *
 186 * Each allocated image page is marked as PageNosave and PageNosaveFree so that
 187 * swsusp_free() can release it.
 188 */
 189static void *get_image_page(gfp_t gfp_mask, int safe_needed)
 190{
 191	void *res;
 192
 193	res = (void *)get_zeroed_page(gfp_mask);
 194	if (safe_needed)
 195		while (res && swsusp_page_is_free(virt_to_page(res))) {
 196			/* The page is unsafe, mark it for swsusp_free() */
 197			swsusp_set_page_forbidden(virt_to_page(res));
 198			allocated_unsafe_pages++;
 199			res = (void *)get_zeroed_page(gfp_mask);
 200		}
 201	if (res) {
 202		swsusp_set_page_forbidden(virt_to_page(res));
 203		swsusp_set_page_free(virt_to_page(res));
 204	}
 205	return res;
 206}
 207
 208static void *__get_safe_page(gfp_t gfp_mask)
 209{
 210	if (safe_pages_list) {
 211		void *ret = safe_pages_list;
 212
 213		safe_pages_list = safe_pages_list->next;
 214		memset(ret, 0, PAGE_SIZE);
 215		return ret;
 216	}
 217	return get_image_page(gfp_mask, PG_SAFE);
 218}
 219
 220unsigned long get_safe_page(gfp_t gfp_mask)
 221{
 222	return (unsigned long)__get_safe_page(gfp_mask);
 223}
 224
 225static struct page *alloc_image_page(gfp_t gfp_mask)
 226{
 227	struct page *page;
 228
 229	page = alloc_page(gfp_mask);
 230	if (page) {
 231		swsusp_set_page_forbidden(page);
 232		swsusp_set_page_free(page);
 233	}
 234	return page;
 235}
 236
 237static void recycle_safe_page(void *page_address)
 238{
 239	struct linked_page *lp = page_address;
 240
 241	lp->next = safe_pages_list;
 242	safe_pages_list = lp;
 243}
 244
 245/**
 246 * free_image_page - Free a page allocated for hibernation image.
 247 * @addr: Address of the page to free.
 248 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
 249 *
 250 * The page to free should have been allocated by get_image_page() (page flags
 251 * set by it are affected).
 252 */
 253static inline void free_image_page(void *addr, int clear_nosave_free)
 254{
 255	struct page *page;
 256
 257	BUG_ON(!virt_addr_valid(addr));
 258
 259	page = virt_to_page(addr);
 260
 261	swsusp_unset_page_forbidden(page);
 262	if (clear_nosave_free)
 263		swsusp_unset_page_free(page);
 264
 265	__free_page(page);
 266}
 267
 268static inline void free_list_of_pages(struct linked_page *list,
 269				      int clear_page_nosave)
 270{
 271	while (list) {
 272		struct linked_page *lp = list->next;
 273
 274		free_image_page(list, clear_page_nosave);
 275		list = lp;
 276	}
 277}
 278
 279/*
 280 * struct chain_allocator is used for allocating small objects out of
 281 * a linked list of pages called 'the chain'.
 282 *
 283 * The chain grows each time when there is no room for a new object in
 284 * the current page.  The allocated objects cannot be freed individually.
 285 * It is only possible to free them all at once, by freeing the entire
 286 * chain.
 287 *
 288 * NOTE: The chain allocator may be inefficient if the allocated objects
 289 * are not much smaller than PAGE_SIZE.
 290 */
 291struct chain_allocator {
 292	struct linked_page *chain;	/* the chain */
 293	unsigned int used_space;	/* total size of objects allocated out
 294					   of the current page */
 295	gfp_t gfp_mask;		/* mask for allocating pages */
 296	int safe_needed;	/* if set, only "safe" pages are allocated */
 297};
 298
 299static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
 300		       int safe_needed)
 301{
 302	ca->chain = NULL;
 303	ca->used_space = LINKED_PAGE_DATA_SIZE;
 304	ca->gfp_mask = gfp_mask;
 305	ca->safe_needed = safe_needed;
 306}
 307
 308static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
 309{
 310	void *ret;
 311
 312	if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
 313		struct linked_page *lp;
 314
 315		lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
 316					get_image_page(ca->gfp_mask, PG_ANY);
 317		if (!lp)
 318			return NULL;
 319
 320		lp->next = ca->chain;
 321		ca->chain = lp;
 322		ca->used_space = 0;
 323	}
 324	ret = ca->chain->data + ca->used_space;
 325	ca->used_space += size;
 326	return ret;
 327}
 328
 329/*
 330 * Data types related to memory bitmaps.
 331 *
 332 * Memory bitmap is a structure consisting of many linked lists of
 333 * objects.  The main list's elements are of type struct zone_bitmap
 334 * and each of them corresponds to one zone.  For each zone bitmap
 335 * object there is a list of objects of type struct bm_block that
 336 * represent each blocks of bitmap in which information is stored.
 337 *
 338 * struct memory_bitmap contains a pointer to the main list of zone
 339 * bitmap objects, a struct bm_position used for browsing the bitmap,
 340 * and a pointer to the list of pages used for allocating all of the
 341 * zone bitmap objects and bitmap block objects.
 342 *
 343 * NOTE: It has to be possible to lay out the bitmap in memory
 344 * using only allocations of order 0.  Additionally, the bitmap is
 345 * designed to work with arbitrary number of zones (this is over the
 346 * top for now, but let's avoid making unnecessary assumptions ;-).
 347 *
 348 * struct zone_bitmap contains a pointer to a list of bitmap block
 349 * objects and a pointer to the bitmap block object that has been
 350 * most recently used for setting bits.  Additionally, it contains the
 351 * PFNs that correspond to the start and end of the represented zone.
 352 *
 353 * struct bm_block contains a pointer to the memory page in which
 354 * information is stored (in the form of a block of bitmap)
 355 * It also contains the pfns that correspond to the start and end of
 356 * the represented memory area.
 357 *
 358 * The memory bitmap is organized as a radix tree to guarantee fast random
 359 * access to the bits. There is one radix tree for each zone (as returned
 360 * from create_mem_extents).
 361 *
 362 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
 363 * two linked lists for the nodes of the tree, one for the inner nodes and
 364 * one for the leave nodes. The linked leave nodes are used for fast linear
 365 * access of the memory bitmap.
 366 *
 367 * The struct rtree_node represents one node of the radix tree.
 368 */
 369
 370#define BM_END_OF_MAP	(~0UL)
 371
 372#define BM_BITS_PER_BLOCK	(PAGE_SIZE * BITS_PER_BYTE)
 373#define BM_BLOCK_SHIFT		(PAGE_SHIFT + 3)
 374#define BM_BLOCK_MASK		((1UL << BM_BLOCK_SHIFT) - 1)
 375
 376/*
 377 * struct rtree_node is a wrapper struct to link the nodes
 378 * of the rtree together for easy linear iteration over
 379 * bits and easy freeing
 380 */
 381struct rtree_node {
 382	struct list_head list;
 383	unsigned long *data;
 384};
 385
 386/*
 387 * struct mem_zone_bm_rtree represents a bitmap used for one
 388 * populated memory zone.
 389 */
 390struct mem_zone_bm_rtree {
 391	struct list_head list;		/* Link Zones together         */
 392	struct list_head nodes;		/* Radix Tree inner nodes      */
 393	struct list_head leaves;	/* Radix Tree leaves           */
 394	unsigned long start_pfn;	/* Zone start page frame       */
 395	unsigned long end_pfn;		/* Zone end page frame + 1     */
 396	struct rtree_node *rtree;	/* Radix Tree Root             */
 397	int levels;			/* Number of Radix Tree Levels */
 398	unsigned int blocks;		/* Number of Bitmap Blocks     */
 399};
 400
 401/* struct bm_position is used for browsing memory bitmaps */
 402
 403struct bm_position {
 404	struct mem_zone_bm_rtree *zone;
 405	struct rtree_node *node;
 406	unsigned long node_pfn;
 407	unsigned long cur_pfn;
 408	int node_bit;
 409};
 410
 411struct memory_bitmap {
 412	struct list_head zones;
 413	struct linked_page *p_list;	/* list of pages used to store zone
 414					   bitmap objects and bitmap block
 415					   objects */
 416	struct bm_position cur;	/* most recently used bit position */
 417};
 418
 419/* Functions that operate on memory bitmaps */
 420
 421#define BM_ENTRIES_PER_LEVEL	(PAGE_SIZE / sizeof(unsigned long))
 422#if BITS_PER_LONG == 32
 423#define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 2)
 424#else
 425#define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 3)
 426#endif
 427#define BM_RTREE_LEVEL_MASK	((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
 428
 429/**
 430 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
 431 * @gfp_mask: GFP mask for the allocation.
 432 * @safe_needed: Get pages not used before hibernation (restore only)
 433 * @ca: Pointer to a linked list of pages ("a chain") to allocate from
 434 * @list: Radix Tree node to add.
 435 *
 436 * This function is used to allocate inner nodes as well as the
 437 * leave nodes of the radix tree. It also adds the node to the
 438 * corresponding linked list passed in by the *list parameter.
 439 */
 440static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
 441					   struct chain_allocator *ca,
 442					   struct list_head *list)
 443{
 444	struct rtree_node *node;
 445
 446	node = chain_alloc(ca, sizeof(struct rtree_node));
 447	if (!node)
 448		return NULL;
 449
 450	node->data = get_image_page(gfp_mask, safe_needed);
 451	if (!node->data)
 452		return NULL;
 453
 454	list_add_tail(&node->list, list);
 455
 456	return node;
 457}
 458
 459/**
 460 * add_rtree_block - Add a new leave node to the radix tree.
 461 *
 462 * The leave nodes need to be allocated in order to keep the leaves
 463 * linked list in order. This is guaranteed by the zone->blocks
 464 * counter.
 465 */
 466static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
 467			   int safe_needed, struct chain_allocator *ca)
 468{
 469	struct rtree_node *node, *block, **dst;
 470	unsigned int levels_needed, block_nr;
 471	int i;
 472
 473	block_nr = zone->blocks;
 474	levels_needed = 0;
 475
 476	/* How many levels do we need for this block nr? */
 477	while (block_nr) {
 478		levels_needed += 1;
 479		block_nr >>= BM_RTREE_LEVEL_SHIFT;
 480	}
 481
 482	/* Make sure the rtree has enough levels */
 483	for (i = zone->levels; i < levels_needed; i++) {
 484		node = alloc_rtree_node(gfp_mask, safe_needed, ca,
 485					&zone->nodes);
 486		if (!node)
 487			return -ENOMEM;
 488
 489		node->data[0] = (unsigned long)zone->rtree;
 490		zone->rtree = node;
 491		zone->levels += 1;
 492	}
 493
 494	/* Allocate new block */
 495	block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
 496	if (!block)
 497		return -ENOMEM;
 498
 499	/* Now walk the rtree to insert the block */
 500	node = zone->rtree;
 501	dst = &zone->rtree;
 502	block_nr = zone->blocks;
 503	for (i = zone->levels; i > 0; i--) {
 504		int index;
 505
 506		if (!node) {
 507			node = alloc_rtree_node(gfp_mask, safe_needed, ca,
 508						&zone->nodes);
 509			if (!node)
 510				return -ENOMEM;
 511			*dst = node;
 512		}
 513
 514		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
 515		index &= BM_RTREE_LEVEL_MASK;
 516		dst = (struct rtree_node **)&((*dst)->data[index]);
 517		node = *dst;
 518	}
 519
 520	zone->blocks += 1;
 521	*dst = block;
 522
 523	return 0;
 524}
 525
 526static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
 527			       int clear_nosave_free);
 528
 529/**
 530 * create_zone_bm_rtree - Create a radix tree for one zone.
 531 *
 532 * Allocated the mem_zone_bm_rtree structure and initializes it.
 533 * This function also allocated and builds the radix tree for the
 534 * zone.
 535 */
 536static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
 537						      int safe_needed,
 538						      struct chain_allocator *ca,
 539						      unsigned long start,
 540						      unsigned long end)
 541{
 542	struct mem_zone_bm_rtree *zone;
 543	unsigned int i, nr_blocks;
 544	unsigned long pages;
 545
 546	pages = end - start;
 547	zone  = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
 548	if (!zone)
 549		return NULL;
 550
 551	INIT_LIST_HEAD(&zone->nodes);
 552	INIT_LIST_HEAD(&zone->leaves);
 553	zone->start_pfn = start;
 554	zone->end_pfn = end;
 555	nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
 556
 557	for (i = 0; i < nr_blocks; i++) {
 558		if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
 559			free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
 560			return NULL;
 561		}
 562	}
 563
 564	return zone;
 565}
 566
 567/**
 568 * free_zone_bm_rtree - Free the memory of the radix tree.
 569 *
 570 * Free all node pages of the radix tree. The mem_zone_bm_rtree
 571 * structure itself is not freed here nor are the rtree_node
 572 * structs.
 573 */
 574static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
 575			       int clear_nosave_free)
 576{
 577	struct rtree_node *node;
 578
 579	list_for_each_entry(node, &zone->nodes, list)
 580		free_image_page(node->data, clear_nosave_free);
 581
 582	list_for_each_entry(node, &zone->leaves, list)
 583		free_image_page(node->data, clear_nosave_free);
 584}
 585
 586static void memory_bm_position_reset(struct memory_bitmap *bm)
 587{
 588	bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
 589				  list);
 590	bm->cur.node = list_entry(bm->cur.zone->leaves.next,
 591				  struct rtree_node, list);
 592	bm->cur.node_pfn = 0;
 593	bm->cur.cur_pfn = BM_END_OF_MAP;
 594	bm->cur.node_bit = 0;
 595}
 596
 597static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
 598
 599struct mem_extent {
 600	struct list_head hook;
 601	unsigned long start;
 602	unsigned long end;
 603};
 604
 605/**
 606 * free_mem_extents - Free a list of memory extents.
 607 * @list: List of extents to free.
 608 */
 609static void free_mem_extents(struct list_head *list)
 610{
 611	struct mem_extent *ext, *aux;
 612
 613	list_for_each_entry_safe(ext, aux, list, hook) {
 614		list_del(&ext->hook);
 615		kfree(ext);
 616	}
 617}
 618
 619/**
 620 * create_mem_extents - Create a list of memory extents.
 621 * @list: List to put the extents into.
 622 * @gfp_mask: Mask to use for memory allocations.
 623 *
 624 * The extents represent contiguous ranges of PFNs.
 625 */
 626static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
 627{
 628	struct zone *zone;
 629
 630	INIT_LIST_HEAD(list);
 631
 632	for_each_populated_zone(zone) {
 633		unsigned long zone_start, zone_end;
 634		struct mem_extent *ext, *cur, *aux;
 635
 636		zone_start = zone->zone_start_pfn;
 637		zone_end = zone_end_pfn(zone);
 638
 639		list_for_each_entry(ext, list, hook)
 640			if (zone_start <= ext->end)
 641				break;
 642
 643		if (&ext->hook == list || zone_end < ext->start) {
 644			/* New extent is necessary */
 645			struct mem_extent *new_ext;
 646
 647			new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
 648			if (!new_ext) {
 649				free_mem_extents(list);
 650				return -ENOMEM;
 651			}
 652			new_ext->start = zone_start;
 653			new_ext->end = zone_end;
 654			list_add_tail(&new_ext->hook, &ext->hook);
 655			continue;
 656		}
 657
 658		/* Merge this zone's range of PFNs with the existing one */
 659		if (zone_start < ext->start)
 660			ext->start = zone_start;
 661		if (zone_end > ext->end)
 662			ext->end = zone_end;
 663
 664		/* More merging may be possible */
 665		cur = ext;
 666		list_for_each_entry_safe_continue(cur, aux, list, hook) {
 667			if (zone_end < cur->start)
 668				break;
 669			if (zone_end < cur->end)
 670				ext->end = cur->end;
 671			list_del(&cur->hook);
 672			kfree(cur);
 673		}
 674	}
 675
 676	return 0;
 677}
 678
 679/**
 680 * memory_bm_create - Allocate memory for a memory bitmap.
 681 */
 682static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
 683			    int safe_needed)
 684{
 685	struct chain_allocator ca;
 686	struct list_head mem_extents;
 687	struct mem_extent *ext;
 688	int error;
 689
 690	chain_init(&ca, gfp_mask, safe_needed);
 691	INIT_LIST_HEAD(&bm->zones);
 692
 693	error = create_mem_extents(&mem_extents, gfp_mask);
 694	if (error)
 695		return error;
 696
 697	list_for_each_entry(ext, &mem_extents, hook) {
 698		struct mem_zone_bm_rtree *zone;
 699
 700		zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
 701					    ext->start, ext->end);
 702		if (!zone) {
 703			error = -ENOMEM;
 704			goto Error;
 705		}
 706		list_add_tail(&zone->list, &bm->zones);
 707	}
 708
 709	bm->p_list = ca.chain;
 710	memory_bm_position_reset(bm);
 711 Exit:
 712	free_mem_extents(&mem_extents);
 713	return error;
 714
 715 Error:
 716	bm->p_list = ca.chain;
 717	memory_bm_free(bm, PG_UNSAFE_CLEAR);
 718	goto Exit;
 719}
 720
 721/**
 722 * memory_bm_free - Free memory occupied by the memory bitmap.
 723 * @bm: Memory bitmap.
 724 */
 725static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
 726{
 727	struct mem_zone_bm_rtree *zone;
 728
 729	list_for_each_entry(zone, &bm->zones, list)
 730		free_zone_bm_rtree(zone, clear_nosave_free);
 731
 732	free_list_of_pages(bm->p_list, clear_nosave_free);
 733
 734	INIT_LIST_HEAD(&bm->zones);
 735}
 736
 737/**
 738 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
 739 *
 740 * Find the bit in memory bitmap @bm that corresponds to the given PFN.
 741 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
 742 *
 743 * Walk the radix tree to find the page containing the bit that represents @pfn
 744 * and return the position of the bit in @addr and @bit_nr.
 745 */
 746static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
 747			      void **addr, unsigned int *bit_nr)
 748{
 749	struct mem_zone_bm_rtree *curr, *zone;
 750	struct rtree_node *node;
 751	int i, block_nr;
 752
 753	zone = bm->cur.zone;
 754
 755	if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
 756		goto zone_found;
 757
 758	zone = NULL;
 759
 760	/* Find the right zone */
 761	list_for_each_entry(curr, &bm->zones, list) {
 762		if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
 763			zone = curr;
 764			break;
 765		}
 766	}
 767
 768	if (!zone)
 769		return -EFAULT;
 770
 771zone_found:
 772	/*
 773	 * We have found the zone. Now walk the radix tree to find the leaf node
 774	 * for our PFN.
 775	 */
 776
 777	/*
 778	 * If the zone we wish to scan is the current zone and the
 779	 * pfn falls into the current node then we do not need to walk
 780	 * the tree.
 781	 */
 782	node = bm->cur.node;
 783	if (zone == bm->cur.zone &&
 784	    ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
 785		goto node_found;
 786
 787	node      = zone->rtree;
 788	block_nr  = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
 789
 790	for (i = zone->levels; i > 0; i--) {
 791		int index;
 792
 793		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
 794		index &= BM_RTREE_LEVEL_MASK;
 795		BUG_ON(node->data[index] == 0);
 796		node = (struct rtree_node *)node->data[index];
 797	}
 798
 799node_found:
 800	/* Update last position */
 801	bm->cur.zone = zone;
 802	bm->cur.node = node;
 803	bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
 804	bm->cur.cur_pfn = pfn;
 805
 806	/* Set return values */
 807	*addr = node->data;
 808	*bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
 809
 810	return 0;
 811}
 812
 813static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
 814{
 815	void *addr;
 816	unsigned int bit;
 817	int error;
 818
 819	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 820	BUG_ON(error);
 821	set_bit(bit, addr);
 822}
 823
 824static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
 825{
 826	void *addr;
 827	unsigned int bit;
 828	int error;
 829
 830	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 831	if (!error)
 832		set_bit(bit, addr);
 833
 834	return error;
 835}
 836
 837static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
 838{
 839	void *addr;
 840	unsigned int bit;
 841	int error;
 842
 843	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 844	BUG_ON(error);
 845	clear_bit(bit, addr);
 846}
 847
 848static void memory_bm_clear_current(struct memory_bitmap *bm)
 849{
 850	int bit;
 851
 852	bit = max(bm->cur.node_bit - 1, 0);
 853	clear_bit(bit, bm->cur.node->data);
 854}
 855
 856static unsigned long memory_bm_get_current(struct memory_bitmap *bm)
 857{
 858	return bm->cur.cur_pfn;
 859}
 860
 861static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
 862{
 863	void *addr;
 864	unsigned int bit;
 865	int error;
 866
 867	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 868	BUG_ON(error);
 869	return test_bit(bit, addr);
 870}
 871
 872static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
 873{
 874	void *addr;
 875	unsigned int bit;
 876
 877	return !memory_bm_find_bit(bm, pfn, &addr, &bit);
 878}
 879
 880/*
 881 * rtree_next_node - Jump to the next leaf node.
 882 *
 883 * Set the position to the beginning of the next node in the
 884 * memory bitmap. This is either the next node in the current
 885 * zone's radix tree or the first node in the radix tree of the
 886 * next zone.
 887 *
 888 * Return true if there is a next node, false otherwise.
 889 */
 890static bool rtree_next_node(struct memory_bitmap *bm)
 891{
 892	if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
 893		bm->cur.node = list_entry(bm->cur.node->list.next,
 894					  struct rtree_node, list);
 895		bm->cur.node_pfn += BM_BITS_PER_BLOCK;
 896		bm->cur.node_bit  = 0;
 897		touch_softlockup_watchdog();
 898		return true;
 899	}
 900
 901	/* No more nodes, goto next zone */
 902	if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
 903		bm->cur.zone = list_entry(bm->cur.zone->list.next,
 904				  struct mem_zone_bm_rtree, list);
 905		bm->cur.node = list_entry(bm->cur.zone->leaves.next,
 906					  struct rtree_node, list);
 907		bm->cur.node_pfn = 0;
 908		bm->cur.node_bit = 0;
 909		return true;
 910	}
 911
 912	/* No more zones */
 913	return false;
 914}
 915
 916/**
 917 * memory_bm_next_pfn - Find the next set bit in a memory bitmap.
 918 * @bm: Memory bitmap.
 919 *
 920 * Starting from the last returned position this function searches for the next
 921 * set bit in @bm and returns the PFN represented by it.  If no more bits are
 922 * set, BM_END_OF_MAP is returned.
 923 *
 924 * It is required to run memory_bm_position_reset() before the first call to
 925 * this function for the given memory bitmap.
 926 */
 927static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
 928{
 929	unsigned long bits, pfn, pages;
 930	int bit;
 931
 932	do {
 933		pages	  = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
 934		bits      = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
 935		bit	  = find_next_bit(bm->cur.node->data, bits,
 936					  bm->cur.node_bit);
 937		if (bit < bits) {
 938			pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
 939			bm->cur.node_bit = bit + 1;
 940			bm->cur.cur_pfn = pfn;
 941			return pfn;
 942		}
 943	} while (rtree_next_node(bm));
 944
 945	bm->cur.cur_pfn = BM_END_OF_MAP;
 946	return BM_END_OF_MAP;
 947}
 948
 949/*
 950 * This structure represents a range of page frames the contents of which
 951 * should not be saved during hibernation.
 952 */
 953struct nosave_region {
 954	struct list_head list;
 955	unsigned long start_pfn;
 956	unsigned long end_pfn;
 957};
 958
 959static LIST_HEAD(nosave_regions);
 960
 961static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
 962{
 963	struct rtree_node *node;
 964
 965	list_for_each_entry(node, &zone->nodes, list)
 966		recycle_safe_page(node->data);
 967
 968	list_for_each_entry(node, &zone->leaves, list)
 969		recycle_safe_page(node->data);
 970}
 971
 972static void memory_bm_recycle(struct memory_bitmap *bm)
 973{
 974	struct mem_zone_bm_rtree *zone;
 975	struct linked_page *p_list;
 976
 977	list_for_each_entry(zone, &bm->zones, list)
 978		recycle_zone_bm_rtree(zone);
 979
 980	p_list = bm->p_list;
 981	while (p_list) {
 982		struct linked_page *lp = p_list;
 983
 984		p_list = lp->next;
 985		recycle_safe_page(lp);
 986	}
 987}
 988
 989/**
 990 * register_nosave_region - Register a region of unsaveable memory.
 991 *
 992 * Register a range of page frames the contents of which should not be saved
 993 * during hibernation (to be used in the early initialization code).
 994 */
 995void __init register_nosave_region(unsigned long start_pfn, unsigned long end_pfn)
 
 996{
 997	struct nosave_region *region;
 998
 999	if (start_pfn >= end_pfn)
1000		return;
1001
1002	if (!list_empty(&nosave_regions)) {
1003		/* Try to extend the previous region (they should be sorted) */
1004		region = list_entry(nosave_regions.prev,
1005					struct nosave_region, list);
1006		if (region->end_pfn == start_pfn) {
1007			region->end_pfn = end_pfn;
1008			goto Report;
1009		}
1010	}
1011	/* This allocation cannot fail */
1012	region = memblock_alloc(sizeof(struct nosave_region),
1013				SMP_CACHE_BYTES);
1014	if (!region)
1015		panic("%s: Failed to allocate %zu bytes\n", __func__,
1016		      sizeof(struct nosave_region));
 
 
 
 
 
 
1017	region->start_pfn = start_pfn;
1018	region->end_pfn = end_pfn;
1019	list_add_tail(&region->list, &nosave_regions);
1020 Report:
1021	pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
1022		(unsigned long long) start_pfn << PAGE_SHIFT,
1023		((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
1024}
1025
1026/*
1027 * Set bits in this map correspond to the page frames the contents of which
1028 * should not be saved during the suspend.
1029 */
1030static struct memory_bitmap *forbidden_pages_map;
1031
1032/* Set bits in this map correspond to free page frames. */
1033static struct memory_bitmap *free_pages_map;
1034
1035/*
1036 * Each page frame allocated for creating the image is marked by setting the
1037 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
1038 */
1039
1040void swsusp_set_page_free(struct page *page)
1041{
1042	if (free_pages_map)
1043		memory_bm_set_bit(free_pages_map, page_to_pfn(page));
1044}
1045
1046static int swsusp_page_is_free(struct page *page)
1047{
1048	return free_pages_map ?
1049		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
1050}
1051
1052void swsusp_unset_page_free(struct page *page)
1053{
1054	if (free_pages_map)
1055		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1056}
1057
1058static void swsusp_set_page_forbidden(struct page *page)
1059{
1060	if (forbidden_pages_map)
1061		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1062}
1063
1064int swsusp_page_is_forbidden(struct page *page)
1065{
1066	return forbidden_pages_map ?
1067		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1068}
1069
1070static void swsusp_unset_page_forbidden(struct page *page)
1071{
1072	if (forbidden_pages_map)
1073		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1074}
1075
1076/**
1077 * mark_nosave_pages - Mark pages that should not be saved.
1078 * @bm: Memory bitmap.
1079 *
1080 * Set the bits in @bm that correspond to the page frames the contents of which
1081 * should not be saved.
1082 */
1083static void mark_nosave_pages(struct memory_bitmap *bm)
1084{
1085	struct nosave_region *region;
1086
1087	if (list_empty(&nosave_regions))
1088		return;
1089
1090	list_for_each_entry(region, &nosave_regions, list) {
1091		unsigned long pfn;
1092
1093		pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1094			 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1095			 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1096				- 1);
1097
1098		for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1099			if (pfn_valid(pfn)) {
1100				/*
1101				 * It is safe to ignore the result of
1102				 * mem_bm_set_bit_check() here, since we won't
1103				 * touch the PFNs for which the error is
1104				 * returned anyway.
1105				 */
1106				mem_bm_set_bit_check(bm, pfn);
1107			}
1108	}
1109}
1110
1111/**
1112 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1113 *
1114 * Create bitmaps needed for marking page frames that should not be saved and
1115 * free page frames.  The forbidden_pages_map and free_pages_map pointers are
1116 * only modified if everything goes well, because we don't want the bits to be
1117 * touched before both bitmaps are set up.
1118 */
1119int create_basic_memory_bitmaps(void)
1120{
1121	struct memory_bitmap *bm1, *bm2;
1122	int error;
1123
1124	if (forbidden_pages_map && free_pages_map)
1125		return 0;
1126	else
1127		BUG_ON(forbidden_pages_map || free_pages_map);
1128
1129	bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1130	if (!bm1)
1131		return -ENOMEM;
1132
1133	error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1134	if (error)
1135		goto Free_first_object;
1136
1137	bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1138	if (!bm2)
1139		goto Free_first_bitmap;
1140
1141	error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1142	if (error)
1143		goto Free_second_object;
1144
1145	forbidden_pages_map = bm1;
1146	free_pages_map = bm2;
1147	mark_nosave_pages(forbidden_pages_map);
1148
1149	pr_debug("Basic memory bitmaps created\n");
1150
1151	return 0;
1152
1153 Free_second_object:
1154	kfree(bm2);
1155 Free_first_bitmap:
1156	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1157 Free_first_object:
1158	kfree(bm1);
1159	return -ENOMEM;
1160}
1161
1162/**
1163 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1164 *
1165 * Free memory bitmaps allocated by create_basic_memory_bitmaps().  The
1166 * auxiliary pointers are necessary so that the bitmaps themselves are not
1167 * referred to while they are being freed.
1168 */
1169void free_basic_memory_bitmaps(void)
1170{
1171	struct memory_bitmap *bm1, *bm2;
1172
1173	if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1174		return;
1175
1176	bm1 = forbidden_pages_map;
1177	bm2 = free_pages_map;
1178	forbidden_pages_map = NULL;
1179	free_pages_map = NULL;
1180	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1181	kfree(bm1);
1182	memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1183	kfree(bm2);
1184
1185	pr_debug("Basic memory bitmaps freed\n");
1186}
1187
1188static void clear_or_poison_free_page(struct page *page)
1189{
1190	if (page_poisoning_enabled_static())
1191		__kernel_poison_pages(page, 1);
1192	else if (want_init_on_free())
1193		clear_highpage(page);
1194}
1195
1196void clear_or_poison_free_pages(void)
1197{
 
1198	struct memory_bitmap *bm = free_pages_map;
1199	unsigned long pfn;
1200
1201	if (WARN_ON(!(free_pages_map)))
1202		return;
1203
1204	if (page_poisoning_enabled() || want_init_on_free()) {
1205		memory_bm_position_reset(bm);
 
 
 
 
1206		pfn = memory_bm_next_pfn(bm);
1207		while (pfn != BM_END_OF_MAP) {
1208			if (pfn_valid(pfn))
1209				clear_or_poison_free_page(pfn_to_page(pfn));
1210
1211			pfn = memory_bm_next_pfn(bm);
1212		}
1213		memory_bm_position_reset(bm);
1214		pr_info("free pages cleared after restore\n");
1215	}
 
 
 
1216}
1217
1218/**
1219 * snapshot_additional_pages - Estimate the number of extra pages needed.
1220 * @zone: Memory zone to carry out the computation for.
1221 *
1222 * Estimate the number of additional pages needed for setting up a hibernation
1223 * image data structures for @zone (usually, the returned value is greater than
1224 * the exact number).
1225 */
1226unsigned int snapshot_additional_pages(struct zone *zone)
1227{
1228	unsigned int rtree, nodes;
1229
1230	rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1231	rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1232			      LINKED_PAGE_DATA_SIZE);
1233	while (nodes > 1) {
1234		nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1235		rtree += nodes;
1236	}
1237
1238	return 2 * rtree;
1239}
1240
1241/*
1242 * Touch the watchdog for every WD_PAGE_COUNT pages.
1243 */
1244#define WD_PAGE_COUNT	(128*1024)
1245
1246static void mark_free_pages(struct zone *zone)
1247{
1248	unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
1249	unsigned long flags;
1250	unsigned int order, t;
1251	struct page *page;
1252
1253	if (zone_is_empty(zone))
1254		return;
1255
1256	spin_lock_irqsave(&zone->lock, flags);
1257
1258	max_zone_pfn = zone_end_pfn(zone);
1259	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1260		if (pfn_valid(pfn)) {
1261			page = pfn_to_page(pfn);
1262
1263			if (!--page_count) {
1264				touch_nmi_watchdog();
1265				page_count = WD_PAGE_COUNT;
1266			}
1267
1268			if (page_zone(page) != zone)
1269				continue;
1270
1271			if (!swsusp_page_is_forbidden(page))
1272				swsusp_unset_page_free(page);
1273		}
1274
1275	for_each_migratetype_order(order, t) {
1276		list_for_each_entry(page,
1277				&zone->free_area[order].free_list[t], buddy_list) {
1278			unsigned long i;
1279
1280			pfn = page_to_pfn(page);
1281			for (i = 0; i < (1UL << order); i++) {
1282				if (!--page_count) {
1283					touch_nmi_watchdog();
1284					page_count = WD_PAGE_COUNT;
1285				}
1286				swsusp_set_page_free(pfn_to_page(pfn + i));
1287			}
1288		}
1289	}
1290	spin_unlock_irqrestore(&zone->lock, flags);
1291}
1292
1293#ifdef CONFIG_HIGHMEM
1294/**
1295 * count_free_highmem_pages - Compute the total number of free highmem pages.
1296 *
1297 * The returned number is system-wide.
1298 */
1299static unsigned int count_free_highmem_pages(void)
1300{
1301	struct zone *zone;
1302	unsigned int cnt = 0;
1303
1304	for_each_populated_zone(zone)
1305		if (is_highmem(zone))
1306			cnt += zone_page_state(zone, NR_FREE_PAGES);
1307
1308	return cnt;
1309}
1310
1311/**
1312 * saveable_highmem_page - Check if a highmem page is saveable.
1313 *
1314 * Determine whether a highmem page should be included in a hibernation image.
1315 *
1316 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1317 * and it isn't part of a free chunk of pages.
1318 */
1319static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1320{
1321	struct page *page;
1322
1323	if (!pfn_valid(pfn))
1324		return NULL;
1325
1326	page = pfn_to_online_page(pfn);
1327	if (!page || page_zone(page) != zone)
1328		return NULL;
1329
1330	BUG_ON(!PageHighMem(page));
1331
1332	if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page))
1333		return NULL;
1334
1335	if (PageReserved(page) || PageOffline(page))
1336		return NULL;
1337
1338	if (page_is_guard(page))
1339		return NULL;
1340
1341	return page;
1342}
1343
1344/**
1345 * count_highmem_pages - Compute the total number of saveable highmem pages.
1346 */
1347static unsigned int count_highmem_pages(void)
1348{
1349	struct zone *zone;
1350	unsigned int n = 0;
1351
1352	for_each_populated_zone(zone) {
1353		unsigned long pfn, max_zone_pfn;
1354
1355		if (!is_highmem(zone))
1356			continue;
1357
1358		mark_free_pages(zone);
1359		max_zone_pfn = zone_end_pfn(zone);
1360		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1361			if (saveable_highmem_page(zone, pfn))
1362				n++;
1363	}
1364	return n;
1365}
1366#else
1367static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1368{
1369	return NULL;
1370}
1371#endif /* CONFIG_HIGHMEM */
1372
1373/**
1374 * saveable_page - Check if the given page is saveable.
1375 *
1376 * Determine whether a non-highmem page should be included in a hibernation
1377 * image.
1378 *
1379 * We should save the page if it isn't Nosave, and is not in the range
1380 * of pages statically defined as 'unsaveable', and it isn't part of
1381 * a free chunk of pages.
1382 */
1383static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1384{
1385	struct page *page;
1386
1387	if (!pfn_valid(pfn))
1388		return NULL;
1389
1390	page = pfn_to_online_page(pfn);
1391	if (!page || page_zone(page) != zone)
1392		return NULL;
1393
1394	BUG_ON(PageHighMem(page));
1395
1396	if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1397		return NULL;
1398
1399	if (PageOffline(page))
1400		return NULL;
1401
1402	if (PageReserved(page)
1403	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1404		return NULL;
1405
1406	if (page_is_guard(page))
1407		return NULL;
1408
1409	return page;
1410}
1411
1412/**
1413 * count_data_pages - Compute the total number of saveable non-highmem pages.
1414 */
1415static unsigned int count_data_pages(void)
1416{
1417	struct zone *zone;
1418	unsigned long pfn, max_zone_pfn;
1419	unsigned int n = 0;
1420
1421	for_each_populated_zone(zone) {
1422		if (is_highmem(zone))
1423			continue;
1424
1425		mark_free_pages(zone);
1426		max_zone_pfn = zone_end_pfn(zone);
1427		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1428			if (saveable_page(zone, pfn))
1429				n++;
1430	}
1431	return n;
1432}
1433
1434/*
1435 * This is needed, because copy_page and memcpy are not usable for copying
1436 * task structs. Returns true if the page was filled with only zeros,
1437 * otherwise false.
1438 */
1439static inline bool do_copy_page(long *dst, long *src)
1440{
1441	long z = 0;
1442	int n;
1443
1444	for (n = PAGE_SIZE / sizeof(long); n; n--) {
1445		z |= *src;
1446		*dst++ = *src++;
1447	}
1448	return !z;
1449}
1450
1451/**
1452 * safe_copy_page - Copy a page in a safe way.
1453 *
1454 * Check if the page we are going to copy is marked as present in the kernel
1455 * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
1456 * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present()
1457 * always returns 'true'. Returns true if the page was entirely composed of
1458 * zeros, otherwise it will return false.
1459 */
1460static bool safe_copy_page(void *dst, struct page *s_page)
1461{
1462	bool zeros_only;
1463
1464	if (kernel_page_present(s_page)) {
1465		zeros_only = do_copy_page(dst, page_address(s_page));
1466	} else {
1467		hibernate_map_page(s_page);
1468		zeros_only = do_copy_page(dst, page_address(s_page));
1469		hibernate_unmap_page(s_page);
1470	}
1471	return zeros_only;
1472}
1473
1474#ifdef CONFIG_HIGHMEM
1475static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1476{
1477	return is_highmem(zone) ?
1478		saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1479}
1480
1481static bool copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1482{
1483	struct page *s_page, *d_page;
1484	void *src, *dst;
1485	bool zeros_only;
1486
1487	s_page = pfn_to_page(src_pfn);
1488	d_page = pfn_to_page(dst_pfn);
1489	if (PageHighMem(s_page)) {
1490		src = kmap_local_page(s_page);
1491		dst = kmap_local_page(d_page);
1492		zeros_only = do_copy_page(dst, src);
1493		kunmap_local(dst);
1494		kunmap_local(src);
1495	} else {
1496		if (PageHighMem(d_page)) {
1497			/*
1498			 * The page pointed to by src may contain some kernel
1499			 * data modified by kmap_atomic()
1500			 */
1501			zeros_only = safe_copy_page(buffer, s_page);
1502			dst = kmap_local_page(d_page);
1503			copy_page(dst, buffer);
1504			kunmap_local(dst);
1505		} else {
1506			zeros_only = safe_copy_page(page_address(d_page), s_page);
1507		}
1508	}
1509	return zeros_only;
1510}
1511#else
1512#define page_is_saveable(zone, pfn)	saveable_page(zone, pfn)
1513
1514static inline int copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1515{
1516	return safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1517				pfn_to_page(src_pfn));
1518}
1519#endif /* CONFIG_HIGHMEM */
1520
1521/*
1522 * Copy data pages will copy all pages into pages pulled from the copy_bm.
1523 * If a page was entirely filled with zeros it will be marked in the zero_bm.
1524 *
1525 * Returns the number of pages copied.
1526 */
1527static unsigned long copy_data_pages(struct memory_bitmap *copy_bm,
1528			    struct memory_bitmap *orig_bm,
1529			    struct memory_bitmap *zero_bm)
1530{
1531	unsigned long copied_pages = 0;
1532	struct zone *zone;
1533	unsigned long pfn, copy_pfn;
1534
1535	for_each_populated_zone(zone) {
1536		unsigned long max_zone_pfn;
1537
1538		mark_free_pages(zone);
1539		max_zone_pfn = zone_end_pfn(zone);
1540		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1541			if (page_is_saveable(zone, pfn))
1542				memory_bm_set_bit(orig_bm, pfn);
1543	}
1544	memory_bm_position_reset(orig_bm);
1545	memory_bm_position_reset(copy_bm);
1546	copy_pfn = memory_bm_next_pfn(copy_bm);
1547	for(;;) {
1548		pfn = memory_bm_next_pfn(orig_bm);
1549		if (unlikely(pfn == BM_END_OF_MAP))
1550			break;
1551		if (copy_data_page(copy_pfn, pfn)) {
1552			memory_bm_set_bit(zero_bm, pfn);
1553			/* Use this copy_pfn for a page that is not full of zeros */
1554			continue;
1555		}
1556		copied_pages++;
1557		copy_pfn = memory_bm_next_pfn(copy_bm);
1558	}
1559	return copied_pages;
1560}
1561
1562/* Total number of image pages */
1563static unsigned int nr_copy_pages;
1564/* Number of pages needed for saving the original pfns of the image pages */
1565static unsigned int nr_meta_pages;
1566/* Number of zero pages */
1567static unsigned int nr_zero_pages;
1568
1569/*
1570 * Numbers of normal and highmem page frames allocated for hibernation image
1571 * before suspending devices.
1572 */
1573static unsigned int alloc_normal, alloc_highmem;
1574/*
1575 * Memory bitmap used for marking saveable pages (during hibernation) or
1576 * hibernation image pages (during restore)
1577 */
1578static struct memory_bitmap orig_bm;
1579/*
1580 * Memory bitmap used during hibernation for marking allocated page frames that
1581 * will contain copies of saveable pages.  During restore it is initially used
1582 * for marking hibernation image pages, but then the set bits from it are
1583 * duplicated in @orig_bm and it is released.  On highmem systems it is next
1584 * used for marking "safe" highmem pages, but it has to be reinitialized for
1585 * this purpose.
1586 */
1587static struct memory_bitmap copy_bm;
1588
1589/* Memory bitmap which tracks which saveable pages were zero filled. */
1590static struct memory_bitmap zero_bm;
1591
1592/**
1593 * swsusp_free - Free pages allocated for hibernation image.
1594 *
1595 * Image pages are allocated before snapshot creation, so they need to be
1596 * released after resume.
1597 */
1598void swsusp_free(void)
1599{
1600	unsigned long fb_pfn, fr_pfn;
1601
1602	if (!forbidden_pages_map || !free_pages_map)
1603		goto out;
1604
1605	memory_bm_position_reset(forbidden_pages_map);
1606	memory_bm_position_reset(free_pages_map);
1607
1608loop:
1609	fr_pfn = memory_bm_next_pfn(free_pages_map);
1610	fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1611
1612	/*
1613	 * Find the next bit set in both bitmaps. This is guaranteed to
1614	 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1615	 */
1616	do {
1617		if (fb_pfn < fr_pfn)
1618			fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1619		if (fr_pfn < fb_pfn)
1620			fr_pfn = memory_bm_next_pfn(free_pages_map);
1621	} while (fb_pfn != fr_pfn);
1622
1623	if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1624		struct page *page = pfn_to_page(fr_pfn);
1625
1626		memory_bm_clear_current(forbidden_pages_map);
1627		memory_bm_clear_current(free_pages_map);
1628		hibernate_restore_unprotect_page(page_address(page));
1629		__free_page(page);
1630		goto loop;
1631	}
1632
1633out:
1634	nr_copy_pages = 0;
1635	nr_meta_pages = 0;
1636	nr_zero_pages = 0;
1637	restore_pblist = NULL;
1638	buffer = NULL;
1639	alloc_normal = 0;
1640	alloc_highmem = 0;
1641	hibernate_restore_protection_end();
1642}
1643
1644/* Helper functions used for the shrinking of memory. */
1645
1646#define GFP_IMAGE	(GFP_KERNEL | __GFP_NOWARN)
1647
1648/**
1649 * preallocate_image_pages - Allocate a number of pages for hibernation image.
1650 * @nr_pages: Number of page frames to allocate.
1651 * @mask: GFP flags to use for the allocation.
1652 *
1653 * Return value: Number of page frames actually allocated
1654 */
1655static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1656{
1657	unsigned long nr_alloc = 0;
1658
1659	while (nr_pages > 0) {
1660		struct page *page;
1661
1662		page = alloc_image_page(mask);
1663		if (!page)
1664			break;
1665		memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1666		if (PageHighMem(page))
1667			alloc_highmem++;
1668		else
1669			alloc_normal++;
1670		nr_pages--;
1671		nr_alloc++;
1672	}
1673
1674	return nr_alloc;
1675}
1676
1677static unsigned long preallocate_image_memory(unsigned long nr_pages,
1678					      unsigned long avail_normal)
1679{
1680	unsigned long alloc;
1681
1682	if (avail_normal <= alloc_normal)
1683		return 0;
1684
1685	alloc = avail_normal - alloc_normal;
1686	if (nr_pages < alloc)
1687		alloc = nr_pages;
1688
1689	return preallocate_image_pages(alloc, GFP_IMAGE);
1690}
1691
1692#ifdef CONFIG_HIGHMEM
1693static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1694{
1695	return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1696}
1697
1698/**
1699 *  __fraction - Compute (an approximation of) x * (multiplier / base).
1700 */
1701static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1702{
1703	return div64_u64(x * multiplier, base);
 
 
1704}
1705
1706static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1707						  unsigned long highmem,
1708						  unsigned long total)
1709{
1710	unsigned long alloc = __fraction(nr_pages, highmem, total);
1711
1712	return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1713}
1714#else /* CONFIG_HIGHMEM */
1715static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1716{
1717	return 0;
1718}
1719
1720static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1721							 unsigned long highmem,
1722							 unsigned long total)
1723{
1724	return 0;
1725}
1726#endif /* CONFIG_HIGHMEM */
1727
1728/**
1729 * free_unnecessary_pages - Release preallocated pages not needed for the image.
1730 */
1731static unsigned long free_unnecessary_pages(void)
1732{
1733	unsigned long save, to_free_normal, to_free_highmem, free;
1734
1735	save = count_data_pages();
1736	if (alloc_normal >= save) {
1737		to_free_normal = alloc_normal - save;
1738		save = 0;
1739	} else {
1740		to_free_normal = 0;
1741		save -= alloc_normal;
1742	}
1743	save += count_highmem_pages();
1744	if (alloc_highmem >= save) {
1745		to_free_highmem = alloc_highmem - save;
1746	} else {
1747		to_free_highmem = 0;
1748		save -= alloc_highmem;
1749		if (to_free_normal > save)
1750			to_free_normal -= save;
1751		else
1752			to_free_normal = 0;
1753	}
1754	free = to_free_normal + to_free_highmem;
1755
1756	memory_bm_position_reset(&copy_bm);
1757
1758	while (to_free_normal > 0 || to_free_highmem > 0) {
1759		unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1760		struct page *page = pfn_to_page(pfn);
1761
1762		if (PageHighMem(page)) {
1763			if (!to_free_highmem)
1764				continue;
1765			to_free_highmem--;
1766			alloc_highmem--;
1767		} else {
1768			if (!to_free_normal)
1769				continue;
1770			to_free_normal--;
1771			alloc_normal--;
1772		}
1773		memory_bm_clear_bit(&copy_bm, pfn);
1774		swsusp_unset_page_forbidden(page);
1775		swsusp_unset_page_free(page);
1776		__free_page(page);
1777	}
1778
1779	return free;
1780}
1781
1782/**
1783 * minimum_image_size - Estimate the minimum acceptable size of an image.
1784 * @saveable: Number of saveable pages in the system.
1785 *
1786 * We want to avoid attempting to free too much memory too hard, so estimate the
1787 * minimum acceptable size of a hibernation image to use as the lower limit for
1788 * preallocating memory.
1789 *
1790 * We assume that the minimum image size should be proportional to
1791 *
1792 * [number of saveable pages] - [number of pages that can be freed in theory]
1793 *
1794 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1795 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
1796 */
1797static unsigned long minimum_image_size(unsigned long saveable)
1798{
1799	unsigned long size;
1800
1801	size = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B)
1802		+ global_node_page_state(NR_ACTIVE_ANON)
1803		+ global_node_page_state(NR_INACTIVE_ANON)
1804		+ global_node_page_state(NR_ACTIVE_FILE)
1805		+ global_node_page_state(NR_INACTIVE_FILE);
1806
1807	return saveable <= size ? 0 : saveable - size;
1808}
1809
1810/**
1811 * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1812 *
1813 * To create a hibernation image it is necessary to make a copy of every page
1814 * frame in use.  We also need a number of page frames to be free during
1815 * hibernation for allocations made while saving the image and for device
1816 * drivers, in case they need to allocate memory from their hibernation
1817 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1818 * estimate) and reserved_size divided by PAGE_SIZE (which is tunable through
1819 * /sys/power/reserved_size, respectively).  To make this happen, we compute the
1820 * total number of available page frames and allocate at least
1821 *
1822 * ([page frames total] - PAGES_FOR_IO - [metadata pages]) / 2
1823 *  - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1824 *
1825 * of them, which corresponds to the maximum size of a hibernation image.
1826 *
1827 * If image_size is set below the number following from the above formula,
1828 * the preallocation of memory is continued until the total number of saveable
1829 * pages in the system is below the requested image size or the minimum
1830 * acceptable image size returned by minimum_image_size(), whichever is greater.
1831 */
1832int hibernate_preallocate_memory(void)
1833{
1834	struct zone *zone;
1835	unsigned long saveable, size, max_size, count, highmem, pages = 0;
1836	unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1837	ktime_t start, stop;
1838	int error;
1839
1840	pr_info("Preallocating image memory\n");
1841	start = ktime_get();
1842
1843	error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1844	if (error) {
1845		pr_err("Cannot allocate original bitmap\n");
1846		goto err_out;
1847	}
1848
1849	error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1850	if (error) {
1851		pr_err("Cannot allocate copy bitmap\n");
1852		goto err_out;
1853	}
1854
1855	error = memory_bm_create(&zero_bm, GFP_IMAGE, PG_ANY);
1856	if (error) {
1857		pr_err("Cannot allocate zero bitmap\n");
1858		goto err_out;
1859	}
1860
1861	alloc_normal = 0;
1862	alloc_highmem = 0;
1863	nr_zero_pages = 0;
1864
1865	/* Count the number of saveable data pages. */
1866	save_highmem = count_highmem_pages();
1867	saveable = count_data_pages();
1868
1869	/*
1870	 * Compute the total number of page frames we can use (count) and the
1871	 * number of pages needed for image metadata (size).
1872	 */
1873	count = saveable;
1874	saveable += save_highmem;
1875	highmem = save_highmem;
1876	size = 0;
1877	for_each_populated_zone(zone) {
1878		size += snapshot_additional_pages(zone);
1879		if (is_highmem(zone))
1880			highmem += zone_page_state(zone, NR_FREE_PAGES);
1881		else
1882			count += zone_page_state(zone, NR_FREE_PAGES);
1883	}
1884	avail_normal = count;
1885	count += highmem;
1886	count -= totalreserve_pages;
1887
 
 
 
1888	/* Compute the maximum number of saveable pages to leave in memory. */
1889	max_size = (count - (size + PAGES_FOR_IO)) / 2
1890			- 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1891	/* Compute the desired number of image pages specified by image_size. */
1892	size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1893	if (size > max_size)
1894		size = max_size;
1895	/*
1896	 * If the desired number of image pages is at least as large as the
1897	 * current number of saveable pages in memory, allocate page frames for
1898	 * the image and we're done.
1899	 */
1900	if (size >= saveable) {
1901		pages = preallocate_image_highmem(save_highmem);
1902		pages += preallocate_image_memory(saveable - pages, avail_normal);
1903		goto out;
1904	}
1905
1906	/* Estimate the minimum size of the image. */
1907	pages = minimum_image_size(saveable);
1908	/*
1909	 * To avoid excessive pressure on the normal zone, leave room in it to
1910	 * accommodate an image of the minimum size (unless it's already too
1911	 * small, in which case don't preallocate pages from it at all).
1912	 */
1913	if (avail_normal > pages)
1914		avail_normal -= pages;
1915	else
1916		avail_normal = 0;
1917	if (size < pages)
1918		size = min_t(unsigned long, pages, max_size);
1919
1920	/*
1921	 * Let the memory management subsystem know that we're going to need a
1922	 * large number of page frames to allocate and make it free some memory.
1923	 * NOTE: If this is not done, performance will be hurt badly in some
1924	 * test cases.
1925	 */
1926	shrink_all_memory(saveable - size);
1927
1928	/*
1929	 * The number of saveable pages in memory was too high, so apply some
1930	 * pressure to decrease it.  First, make room for the largest possible
1931	 * image and fail if that doesn't work.  Next, try to decrease the size
1932	 * of the image as much as indicated by 'size' using allocations from
1933	 * highmem and non-highmem zones separately.
1934	 */
1935	pages_highmem = preallocate_image_highmem(highmem / 2);
1936	alloc = count - max_size;
1937	if (alloc > pages_highmem)
1938		alloc -= pages_highmem;
1939	else
1940		alloc = 0;
1941	pages = preallocate_image_memory(alloc, avail_normal);
1942	if (pages < alloc) {
1943		/* We have exhausted non-highmem pages, try highmem. */
1944		alloc -= pages;
1945		pages += pages_highmem;
1946		pages_highmem = preallocate_image_highmem(alloc);
1947		if (pages_highmem < alloc) {
1948			pr_err("Image allocation is %lu pages short\n",
1949				alloc - pages_highmem);
1950			goto err_out;
1951		}
1952		pages += pages_highmem;
1953		/*
1954		 * size is the desired number of saveable pages to leave in
1955		 * memory, so try to preallocate (all memory - size) pages.
1956		 */
1957		alloc = (count - pages) - size;
1958		pages += preallocate_image_highmem(alloc);
1959	} else {
1960		/*
1961		 * There are approximately max_size saveable pages at this point
1962		 * and we want to reduce this number down to size.
1963		 */
1964		alloc = max_size - size;
1965		size = preallocate_highmem_fraction(alloc, highmem, count);
1966		pages_highmem += size;
1967		alloc -= size;
1968		size = preallocate_image_memory(alloc, avail_normal);
1969		pages_highmem += preallocate_image_highmem(alloc - size);
1970		pages += pages_highmem + size;
1971	}
1972
1973	/*
1974	 * We only need as many page frames for the image as there are saveable
1975	 * pages in memory, but we have allocated more.  Release the excessive
1976	 * ones now.
1977	 */
1978	pages -= free_unnecessary_pages();
1979
1980 out:
1981	stop = ktime_get();
1982	pr_info("Allocated %lu pages for snapshot\n", pages);
1983	swsusp_show_speed(start, stop, pages, "Allocated");
1984
1985	return 0;
1986
1987 err_out:
 
1988	swsusp_free();
1989	return -ENOMEM;
1990}
1991
1992#ifdef CONFIG_HIGHMEM
1993/**
1994 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1995 *
1996 * Compute the number of non-highmem pages that will be necessary for creating
1997 * copies of highmem pages.
1998 */
1999static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
2000{
2001	unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
2002
2003	if (free_highmem >= nr_highmem)
2004		nr_highmem = 0;
2005	else
2006		nr_highmem -= free_highmem;
2007
2008	return nr_highmem;
2009}
2010#else
2011static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
2012#endif /* CONFIG_HIGHMEM */
2013
2014/**
2015 * enough_free_mem - Check if there is enough free memory for the image.
2016 */
2017static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
2018{
2019	struct zone *zone;
2020	unsigned int free = alloc_normal;
2021
2022	for_each_populated_zone(zone)
2023		if (!is_highmem(zone))
2024			free += zone_page_state(zone, NR_FREE_PAGES);
2025
2026	nr_pages += count_pages_for_highmem(nr_highmem);
2027	pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
2028		 nr_pages, PAGES_FOR_IO, free);
2029
2030	return free > nr_pages + PAGES_FOR_IO;
2031}
2032
2033#ifdef CONFIG_HIGHMEM
2034/**
2035 * get_highmem_buffer - Allocate a buffer for highmem pages.
2036 *
2037 * If there are some highmem pages in the hibernation image, we may need a
2038 * buffer to copy them and/or load their data.
2039 */
2040static inline int get_highmem_buffer(int safe_needed)
2041{
2042	buffer = get_image_page(GFP_ATOMIC, safe_needed);
2043	return buffer ? 0 : -ENOMEM;
2044}
2045
2046/**
2047 * alloc_highmem_pages - Allocate some highmem pages for the image.
2048 *
2049 * Try to allocate as many pages as needed, but if the number of free highmem
2050 * pages is less than that, allocate them all.
2051 */
2052static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
2053					       unsigned int nr_highmem)
2054{
2055	unsigned int to_alloc = count_free_highmem_pages();
2056
2057	if (to_alloc > nr_highmem)
2058		to_alloc = nr_highmem;
2059
2060	nr_highmem -= to_alloc;
2061	while (to_alloc-- > 0) {
2062		struct page *page;
2063
2064		page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
2065		memory_bm_set_bit(bm, page_to_pfn(page));
2066	}
2067	return nr_highmem;
2068}
2069#else
2070static inline int get_highmem_buffer(int safe_needed) { return 0; }
2071
2072static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
2073					       unsigned int n) { return 0; }
2074#endif /* CONFIG_HIGHMEM */
2075
2076/**
2077 * swsusp_alloc - Allocate memory for hibernation image.
2078 *
2079 * We first try to allocate as many highmem pages as there are
2080 * saveable highmem pages in the system.  If that fails, we allocate
2081 * non-highmem pages for the copies of the remaining highmem ones.
2082 *
2083 * In this approach it is likely that the copies of highmem pages will
2084 * also be located in the high memory, because of the way in which
2085 * copy_data_pages() works.
2086 */
2087static int swsusp_alloc(struct memory_bitmap *copy_bm,
2088			unsigned int nr_pages, unsigned int nr_highmem)
2089{
2090	if (nr_highmem > 0) {
2091		if (get_highmem_buffer(PG_ANY))
2092			goto err_out;
2093		if (nr_highmem > alloc_highmem) {
2094			nr_highmem -= alloc_highmem;
2095			nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
2096		}
2097	}
2098	if (nr_pages > alloc_normal) {
2099		nr_pages -= alloc_normal;
2100		while (nr_pages-- > 0) {
2101			struct page *page;
2102
2103			page = alloc_image_page(GFP_ATOMIC);
2104			if (!page)
2105				goto err_out;
2106			memory_bm_set_bit(copy_bm, page_to_pfn(page));
2107		}
2108	}
2109
2110	return 0;
2111
2112 err_out:
2113	swsusp_free();
2114	return -ENOMEM;
2115}
2116
2117asmlinkage __visible int swsusp_save(void)
2118{
2119	unsigned int nr_pages, nr_highmem;
2120
2121	pr_info("Creating image:\n");
2122
2123	drain_local_pages(NULL);
2124	nr_pages = count_data_pages();
2125	nr_highmem = count_highmem_pages();
2126	pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
2127
2128	if (!enough_free_mem(nr_pages, nr_highmem)) {
2129		pr_err("Not enough free memory\n");
2130		return -ENOMEM;
2131	}
2132
2133	if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem)) {
2134		pr_err("Memory allocation failed\n");
2135		return -ENOMEM;
2136	}
2137
2138	/*
2139	 * During allocating of suspend pagedir, new cold pages may appear.
2140	 * Kill them.
2141	 */
2142	drain_local_pages(NULL);
2143	nr_copy_pages = copy_data_pages(&copy_bm, &orig_bm, &zero_bm);
2144
2145	/*
2146	 * End of critical section. From now on, we can write to memory,
2147	 * but we should not touch disk. This specially means we must _not_
2148	 * touch swap space! Except we must write out our image of course.
2149	 */
 
2150	nr_pages += nr_highmem;
2151	/* We don't actually copy the zero pages */
2152	nr_zero_pages = nr_pages - nr_copy_pages;
2153	nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
2154
2155	pr_info("Image created (%d pages copied, %d zero pages)\n", nr_copy_pages, nr_zero_pages);
2156
2157	return 0;
2158}
2159
2160#ifndef CONFIG_ARCH_HIBERNATION_HEADER
2161static int init_header_complete(struct swsusp_info *info)
2162{
2163	memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2164	info->version_code = LINUX_VERSION_CODE;
2165	return 0;
2166}
2167
2168static const char *check_image_kernel(struct swsusp_info *info)
2169{
2170	if (info->version_code != LINUX_VERSION_CODE)
2171		return "kernel version";
2172	if (strcmp(info->uts.sysname,init_utsname()->sysname))
2173		return "system type";
2174	if (strcmp(info->uts.release,init_utsname()->release))
2175		return "kernel release";
2176	if (strcmp(info->uts.version,init_utsname()->version))
2177		return "version";
2178	if (strcmp(info->uts.machine,init_utsname()->machine))
2179		return "machine";
2180	return NULL;
2181}
2182#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2183
2184unsigned long snapshot_get_image_size(void)
2185{
2186	return nr_copy_pages + nr_meta_pages + 1;
2187}
2188
2189static int init_header(struct swsusp_info *info)
2190{
2191	memset(info, 0, sizeof(struct swsusp_info));
2192	info->num_physpages = get_num_physpages();
2193	info->image_pages = nr_copy_pages;
2194	info->pages = snapshot_get_image_size();
2195	info->size = info->pages;
2196	info->size <<= PAGE_SHIFT;
2197	return init_header_complete(info);
2198}
2199
2200#define ENCODED_PFN_ZERO_FLAG ((unsigned long)1 << (BITS_PER_LONG - 1))
2201#define ENCODED_PFN_MASK (~ENCODED_PFN_ZERO_FLAG)
2202
2203/**
2204 * pack_pfns - Prepare PFNs for saving.
2205 * @bm: Memory bitmap.
2206 * @buf: Memory buffer to store the PFNs in.
2207 * @zero_bm: Memory bitmap containing PFNs of zero pages.
2208 *
2209 * PFNs corresponding to set bits in @bm are stored in the area of memory
2210 * pointed to by @buf (1 page at a time). Pages which were filled with only
2211 * zeros will have the highest bit set in the packed format to distinguish
2212 * them from PFNs which will be contained in the image file.
2213 */
2214static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm,
2215		struct memory_bitmap *zero_bm)
2216{
2217	int j;
2218
2219	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2220		buf[j] = memory_bm_next_pfn(bm);
2221		if (unlikely(buf[j] == BM_END_OF_MAP))
2222			break;
2223		if (memory_bm_test_bit(zero_bm, buf[j]))
2224			buf[j] |= ENCODED_PFN_ZERO_FLAG;
2225	}
2226}
2227
2228/**
2229 * snapshot_read_next - Get the address to read the next image page from.
2230 * @handle: Snapshot handle to be used for the reading.
2231 *
2232 * On the first call, @handle should point to a zeroed snapshot_handle
2233 * structure.  The structure gets populated then and a pointer to it should be
2234 * passed to this function every next time.
2235 *
2236 * On success, the function returns a positive number.  Then, the caller
2237 * is allowed to read up to the returned number of bytes from the memory
2238 * location computed by the data_of() macro.
2239 *
2240 * The function returns 0 to indicate the end of the data stream condition,
2241 * and negative numbers are returned on errors.  If that happens, the structure
2242 * pointed to by @handle is not updated and should not be used any more.
2243 */
2244int snapshot_read_next(struct snapshot_handle *handle)
2245{
2246	if (handle->cur > nr_meta_pages + nr_copy_pages)
2247		return 0;
2248
2249	if (!buffer) {
2250		/* This makes the buffer be freed by swsusp_free() */
2251		buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2252		if (!buffer)
2253			return -ENOMEM;
2254	}
2255	if (!handle->cur) {
2256		int error;
2257
2258		error = init_header((struct swsusp_info *)buffer);
2259		if (error)
2260			return error;
2261		handle->buffer = buffer;
2262		memory_bm_position_reset(&orig_bm);
2263		memory_bm_position_reset(&copy_bm);
2264	} else if (handle->cur <= nr_meta_pages) {
2265		clear_page(buffer);
2266		pack_pfns(buffer, &orig_bm, &zero_bm);
2267	} else {
2268		struct page *page;
2269
2270		page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
2271		if (PageHighMem(page)) {
2272			/*
2273			 * Highmem pages are copied to the buffer,
2274			 * because we can't return with a kmapped
2275			 * highmem page (we may not be called again).
2276			 */
2277			void *kaddr;
2278
2279			kaddr = kmap_atomic(page);
2280			copy_page(buffer, kaddr);
2281			kunmap_atomic(kaddr);
2282			handle->buffer = buffer;
2283		} else {
2284			handle->buffer = page_address(page);
2285		}
2286	}
2287	handle->cur++;
2288	return PAGE_SIZE;
2289}
2290
2291static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2292				    struct memory_bitmap *src)
2293{
2294	unsigned long pfn;
2295
2296	memory_bm_position_reset(src);
2297	pfn = memory_bm_next_pfn(src);
2298	while (pfn != BM_END_OF_MAP) {
2299		memory_bm_set_bit(dst, pfn);
2300		pfn = memory_bm_next_pfn(src);
2301	}
2302}
2303
2304/**
2305 * mark_unsafe_pages - Mark pages that were used before hibernation.
2306 *
2307 * Mark the pages that cannot be used for storing the image during restoration,
2308 * because they conflict with the pages that had been used before hibernation.
2309 */
2310static void mark_unsafe_pages(struct memory_bitmap *bm)
2311{
2312	unsigned long pfn;
2313
2314	/* Clear the "free"/"unsafe" bit for all PFNs */
2315	memory_bm_position_reset(free_pages_map);
2316	pfn = memory_bm_next_pfn(free_pages_map);
2317	while (pfn != BM_END_OF_MAP) {
2318		memory_bm_clear_current(free_pages_map);
2319		pfn = memory_bm_next_pfn(free_pages_map);
2320	}
2321
2322	/* Mark pages that correspond to the "original" PFNs as "unsafe" */
2323	duplicate_memory_bitmap(free_pages_map, bm);
2324
2325	allocated_unsafe_pages = 0;
2326}
2327
2328static int check_header(struct swsusp_info *info)
2329{
2330	const char *reason;
2331
2332	reason = check_image_kernel(info);
2333	if (!reason && info->num_physpages != get_num_physpages())
2334		reason = "memory size";
2335	if (reason) {
2336		pr_err("Image mismatch: %s\n", reason);
2337		return -EPERM;
2338	}
2339	return 0;
2340}
2341
2342/**
2343 * load_header - Check the image header and copy the data from it.
2344 */
2345static int load_header(struct swsusp_info *info)
2346{
2347	int error;
2348
2349	restore_pblist = NULL;
2350	error = check_header(info);
2351	if (!error) {
2352		nr_copy_pages = info->image_pages;
2353		nr_meta_pages = info->pages - info->image_pages - 1;
2354	}
2355	return error;
2356}
2357
2358/**
2359 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2360 * @bm: Memory bitmap.
2361 * @buf: Area of memory containing the PFNs.
2362 * @zero_bm: Memory bitmap with the zero PFNs marked.
2363 *
2364 * For each element of the array pointed to by @buf (1 page at a time), set the
2365 * corresponding bit in @bm. If the page was originally populated with only
2366 * zeros then a corresponding bit will also be set in @zero_bm.
2367 */
2368static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm,
2369		struct memory_bitmap *zero_bm)
2370{
2371	unsigned long decoded_pfn;
2372        bool zero;
2373	int j;
2374
2375	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2376		if (unlikely(buf[j] == BM_END_OF_MAP))
2377			break;
2378
2379		zero = !!(buf[j] & ENCODED_PFN_ZERO_FLAG);
2380		decoded_pfn = buf[j] & ENCODED_PFN_MASK;
2381		if (pfn_valid(decoded_pfn) && memory_bm_pfn_present(bm, decoded_pfn)) {
2382			memory_bm_set_bit(bm, decoded_pfn);
2383			if (zero) {
2384				memory_bm_set_bit(zero_bm, decoded_pfn);
2385				nr_zero_pages++;
2386			}
2387		} else {
2388			if (!pfn_valid(decoded_pfn))
2389				pr_err(FW_BUG "Memory map mismatch at 0x%llx after hibernation\n",
2390				       (unsigned long long)PFN_PHYS(decoded_pfn));
2391			return -EFAULT;
2392		}
2393	}
2394
2395	return 0;
2396}
2397
2398#ifdef CONFIG_HIGHMEM
2399/*
2400 * struct highmem_pbe is used for creating the list of highmem pages that
2401 * should be restored atomically during the resume from disk, because the page
2402 * frames they have occupied before the suspend are in use.
2403 */
2404struct highmem_pbe {
2405	struct page *copy_page;	/* data is here now */
2406	struct page *orig_page;	/* data was here before the suspend */
2407	struct highmem_pbe *next;
2408};
2409
2410/*
2411 * List of highmem PBEs needed for restoring the highmem pages that were
2412 * allocated before the suspend and included in the suspend image, but have
2413 * also been allocated by the "resume" kernel, so their contents cannot be
2414 * written directly to their "original" page frames.
2415 */
2416static struct highmem_pbe *highmem_pblist;
2417
2418/**
2419 * count_highmem_image_pages - Compute the number of highmem pages in the image.
2420 * @bm: Memory bitmap.
2421 *
2422 * The bits in @bm that correspond to image pages are assumed to be set.
2423 */
2424static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2425{
2426	unsigned long pfn;
2427	unsigned int cnt = 0;
2428
2429	memory_bm_position_reset(bm);
2430	pfn = memory_bm_next_pfn(bm);
2431	while (pfn != BM_END_OF_MAP) {
2432		if (PageHighMem(pfn_to_page(pfn)))
2433			cnt++;
2434
2435		pfn = memory_bm_next_pfn(bm);
2436	}
2437	return cnt;
2438}
2439
2440static unsigned int safe_highmem_pages;
2441
2442static struct memory_bitmap *safe_highmem_bm;
2443
2444/**
2445 * prepare_highmem_image - Allocate memory for loading highmem data from image.
2446 * @bm: Pointer to an uninitialized memory bitmap structure.
2447 * @nr_highmem_p: Pointer to the number of highmem image pages.
2448 *
2449 * Try to allocate as many highmem pages as there are highmem image pages
2450 * (@nr_highmem_p points to the variable containing the number of highmem image
2451 * pages).  The pages that are "safe" (ie. will not be overwritten when the
2452 * hibernation image is restored entirely) have the corresponding bits set in
2453 * @bm (it must be uninitialized).
2454 *
2455 * NOTE: This function should not be called if there are no highmem image pages.
2456 */
2457static int prepare_highmem_image(struct memory_bitmap *bm,
2458				 unsigned int *nr_highmem_p)
2459{
2460	unsigned int to_alloc;
2461
2462	if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2463		return -ENOMEM;
2464
2465	if (get_highmem_buffer(PG_SAFE))
2466		return -ENOMEM;
2467
2468	to_alloc = count_free_highmem_pages();
2469	if (to_alloc > *nr_highmem_p)
2470		to_alloc = *nr_highmem_p;
2471	else
2472		*nr_highmem_p = to_alloc;
2473
2474	safe_highmem_pages = 0;
2475	while (to_alloc-- > 0) {
2476		struct page *page;
2477
2478		page = alloc_page(__GFP_HIGHMEM);
2479		if (!swsusp_page_is_free(page)) {
2480			/* The page is "safe", set its bit the bitmap */
2481			memory_bm_set_bit(bm, page_to_pfn(page));
2482			safe_highmem_pages++;
2483		}
2484		/* Mark the page as allocated */
2485		swsusp_set_page_forbidden(page);
2486		swsusp_set_page_free(page);
2487	}
2488	memory_bm_position_reset(bm);
2489	safe_highmem_bm = bm;
2490	return 0;
2491}
2492
2493static struct page *last_highmem_page;
2494
2495/**
2496 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2497 *
2498 * For a given highmem image page get a buffer that suspend_write_next() should
2499 * return to its caller to write to.
2500 *
2501 * If the page is to be saved to its "original" page frame or a copy of
2502 * the page is to be made in the highmem, @buffer is returned.  Otherwise,
2503 * the copy of the page is to be made in normal memory, so the address of
2504 * the copy is returned.
2505 *
2506 * If @buffer is returned, the caller of suspend_write_next() will write
2507 * the page's contents to @buffer, so they will have to be copied to the
2508 * right location on the next call to suspend_write_next() and it is done
2509 * with the help of copy_last_highmem_page().  For this purpose, if
2510 * @buffer is returned, @last_highmem_page is set to the page to which
2511 * the data will have to be copied from @buffer.
2512 */
2513static void *get_highmem_page_buffer(struct page *page,
2514				     struct chain_allocator *ca)
2515{
2516	struct highmem_pbe *pbe;
2517	void *kaddr;
2518
2519	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2520		/*
2521		 * We have allocated the "original" page frame and we can
2522		 * use it directly to store the loaded page.
2523		 */
2524		last_highmem_page = page;
2525		return buffer;
2526	}
2527	/*
2528	 * The "original" page frame has not been allocated and we have to
2529	 * use a "safe" page frame to store the loaded page.
2530	 */
2531	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2532	if (!pbe) {
2533		swsusp_free();
2534		return ERR_PTR(-ENOMEM);
2535	}
2536	pbe->orig_page = page;
2537	if (safe_highmem_pages > 0) {
2538		struct page *tmp;
2539
2540		/* Copy of the page will be stored in high memory */
2541		kaddr = buffer;
2542		tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2543		safe_highmem_pages--;
2544		last_highmem_page = tmp;
2545		pbe->copy_page = tmp;
2546	} else {
2547		/* Copy of the page will be stored in normal memory */
2548		kaddr = __get_safe_page(ca->gfp_mask);
2549		if (!kaddr)
2550			return ERR_PTR(-ENOMEM);
2551		pbe->copy_page = virt_to_page(kaddr);
2552	}
2553	pbe->next = highmem_pblist;
2554	highmem_pblist = pbe;
2555	return kaddr;
2556}
2557
2558/**
2559 * copy_last_highmem_page - Copy most the most recent highmem image page.
2560 *
2561 * Copy the contents of a highmem image from @buffer, where the caller of
2562 * snapshot_write_next() has stored them, to the right location represented by
2563 * @last_highmem_page .
2564 */
2565static void copy_last_highmem_page(void)
2566{
2567	if (last_highmem_page) {
2568		void *dst;
2569
2570		dst = kmap_atomic(last_highmem_page);
2571		copy_page(dst, buffer);
2572		kunmap_atomic(dst);
2573		last_highmem_page = NULL;
2574	}
2575}
2576
2577static inline int last_highmem_page_copied(void)
2578{
2579	return !last_highmem_page;
2580}
2581
2582static inline void free_highmem_data(void)
2583{
2584	if (safe_highmem_bm)
2585		memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2586
2587	if (buffer)
2588		free_image_page(buffer, PG_UNSAFE_CLEAR);
2589}
2590#else
2591static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2592
2593static inline int prepare_highmem_image(struct memory_bitmap *bm,
2594					unsigned int *nr_highmem_p) { return 0; }
2595
2596static inline void *get_highmem_page_buffer(struct page *page,
2597					    struct chain_allocator *ca)
2598{
2599	return ERR_PTR(-EINVAL);
2600}
2601
2602static inline void copy_last_highmem_page(void) {}
2603static inline int last_highmem_page_copied(void) { return 1; }
2604static inline void free_highmem_data(void) {}
2605#endif /* CONFIG_HIGHMEM */
2606
2607#define PBES_PER_LINKED_PAGE	(LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2608
2609/**
2610 * prepare_image - Make room for loading hibernation image.
2611 * @new_bm: Uninitialized memory bitmap structure.
2612 * @bm: Memory bitmap with unsafe pages marked.
2613 * @zero_bm: Memory bitmap containing the zero pages.
2614 *
2615 * Use @bm to mark the pages that will be overwritten in the process of
2616 * restoring the system memory state from the suspend image ("unsafe" pages)
2617 * and allocate memory for the image.
2618 *
2619 * The idea is to allocate a new memory bitmap first and then allocate
2620 * as many pages as needed for image data, but without specifying what those
2621 * pages will be used for just yet.  Instead, we mark them all as allocated and
2622 * create a lists of "safe" pages to be used later.  On systems with high
2623 * memory a list of "safe" highmem pages is created too.
2624 *
2625 * Because it was not known which pages were unsafe when @zero_bm was created,
2626 * make a copy of it and recreate it within safe pages.
2627 */
2628static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm,
2629		struct memory_bitmap *zero_bm)
2630{
2631	unsigned int nr_pages, nr_highmem;
2632	struct memory_bitmap tmp;
2633	struct linked_page *lp;
2634	int error;
2635
2636	/* If there is no highmem, the buffer will not be necessary */
2637	free_image_page(buffer, PG_UNSAFE_CLEAR);
2638	buffer = NULL;
2639
2640	nr_highmem = count_highmem_image_pages(bm);
2641	mark_unsafe_pages(bm);
2642
2643	error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2644	if (error)
2645		goto Free;
2646
2647	duplicate_memory_bitmap(new_bm, bm);
2648	memory_bm_free(bm, PG_UNSAFE_KEEP);
2649
2650	/* Make a copy of zero_bm so it can be created in safe pages */
2651	error = memory_bm_create(&tmp, GFP_ATOMIC, PG_SAFE);
2652	if (error)
2653		goto Free;
2654
2655	duplicate_memory_bitmap(&tmp, zero_bm);
2656	memory_bm_free(zero_bm, PG_UNSAFE_KEEP);
2657
2658	/* Recreate zero_bm in safe pages */
2659	error = memory_bm_create(zero_bm, GFP_ATOMIC, PG_SAFE);
2660	if (error)
2661		goto Free;
2662
2663	duplicate_memory_bitmap(zero_bm, &tmp);
2664	memory_bm_free(&tmp, PG_UNSAFE_CLEAR);
2665	/* At this point zero_bm is in safe pages and it can be used for restoring. */
2666
2667	if (nr_highmem > 0) {
2668		error = prepare_highmem_image(bm, &nr_highmem);
2669		if (error)
2670			goto Free;
2671	}
2672	/*
2673	 * Reserve some safe pages for potential later use.
2674	 *
2675	 * NOTE: This way we make sure there will be enough safe pages for the
2676	 * chain_alloc() in get_buffer().  It is a bit wasteful, but
2677	 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2678	 *
2679	 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2680	 */
2681	nr_pages = (nr_zero_pages + nr_copy_pages) - nr_highmem - allocated_unsafe_pages;
2682	nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2683	while (nr_pages > 0) {
2684		lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2685		if (!lp) {
2686			error = -ENOMEM;
2687			goto Free;
2688		}
2689		lp->next = safe_pages_list;
2690		safe_pages_list = lp;
2691		nr_pages--;
2692	}
2693	/* Preallocate memory for the image */
2694	nr_pages = (nr_zero_pages + nr_copy_pages) - nr_highmem - allocated_unsafe_pages;
2695	while (nr_pages > 0) {
2696		lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2697		if (!lp) {
2698			error = -ENOMEM;
2699			goto Free;
2700		}
2701		if (!swsusp_page_is_free(virt_to_page(lp))) {
2702			/* The page is "safe", add it to the list */
2703			lp->next = safe_pages_list;
2704			safe_pages_list = lp;
2705		}
2706		/* Mark the page as allocated */
2707		swsusp_set_page_forbidden(virt_to_page(lp));
2708		swsusp_set_page_free(virt_to_page(lp));
2709		nr_pages--;
2710	}
2711	return 0;
2712
2713 Free:
2714	swsusp_free();
2715	return error;
2716}
2717
2718/**
2719 * get_buffer - Get the address to store the next image data page.
2720 *
2721 * Get the address that snapshot_write_next() should return to its caller to
2722 * write to.
2723 */
2724static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2725{
2726	struct pbe *pbe;
2727	struct page *page;
2728	unsigned long pfn = memory_bm_next_pfn(bm);
2729
2730	if (pfn == BM_END_OF_MAP)
2731		return ERR_PTR(-EFAULT);
2732
2733	page = pfn_to_page(pfn);
2734	if (PageHighMem(page))
2735		return get_highmem_page_buffer(page, ca);
2736
2737	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2738		/*
2739		 * We have allocated the "original" page frame and we can
2740		 * use it directly to store the loaded page.
2741		 */
2742		return page_address(page);
2743
2744	/*
2745	 * The "original" page frame has not been allocated and we have to
2746	 * use a "safe" page frame to store the loaded page.
2747	 */
2748	pbe = chain_alloc(ca, sizeof(struct pbe));
2749	if (!pbe) {
2750		swsusp_free();
2751		return ERR_PTR(-ENOMEM);
2752	}
2753	pbe->orig_address = page_address(page);
2754	pbe->address = __get_safe_page(ca->gfp_mask);
2755	if (!pbe->address)
2756		return ERR_PTR(-ENOMEM);
2757	pbe->next = restore_pblist;
2758	restore_pblist = pbe;
2759	return pbe->address;
2760}
2761
2762/**
2763 * snapshot_write_next - Get the address to store the next image page.
2764 * @handle: Snapshot handle structure to guide the writing.
2765 *
2766 * On the first call, @handle should point to a zeroed snapshot_handle
2767 * structure.  The structure gets populated then and a pointer to it should be
2768 * passed to this function every next time.
2769 *
2770 * On success, the function returns a positive number.  Then, the caller
2771 * is allowed to write up to the returned number of bytes to the memory
2772 * location computed by the data_of() macro.
2773 *
2774 * The function returns 0 to indicate the "end of file" condition.  Negative
2775 * numbers are returned on errors, in which cases the structure pointed to by
2776 * @handle is not updated and should not be used any more.
2777 */
2778int snapshot_write_next(struct snapshot_handle *handle)
2779{
2780	static struct chain_allocator ca;
2781	int error;
2782
2783next:
2784	/* Check if we have already loaded the entire image */
2785	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages)
2786		return 0;
2787
 
 
2788	if (!handle->cur) {
2789		if (!buffer)
2790			/* This makes the buffer be freed by swsusp_free() */
2791			buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2792
2793		if (!buffer)
2794			return -ENOMEM;
2795
2796		handle->buffer = buffer;
2797	} else if (handle->cur == 1) {
2798		error = load_header(buffer);
2799		if (error)
2800			return error;
2801
2802		safe_pages_list = NULL;
2803
2804		error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2805		if (error)
2806			return error;
2807
2808		error = memory_bm_create(&zero_bm, GFP_ATOMIC, PG_ANY);
 
2809		if (error)
2810			return error;
2811
2812		nr_zero_pages = 0;
2813
2814		hibernate_restore_protection_begin();
2815	} else if (handle->cur <= nr_meta_pages + 1) {
2816		error = unpack_orig_pfns(buffer, &copy_bm, &zero_bm);
2817		if (error)
2818			return error;
2819
2820		if (handle->cur == nr_meta_pages + 1) {
2821			error = prepare_image(&orig_bm, &copy_bm, &zero_bm);
2822			if (error)
2823				return error;
2824
2825			chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2826			memory_bm_position_reset(&orig_bm);
2827			memory_bm_position_reset(&zero_bm);
2828			restore_pblist = NULL;
2829			handle->buffer = get_buffer(&orig_bm, &ca);
 
2830			if (IS_ERR(handle->buffer))
2831				return PTR_ERR(handle->buffer);
2832		}
2833	} else {
2834		copy_last_highmem_page();
 
 
2835		hibernate_restore_protect_page(handle->buffer);
2836		handle->buffer = get_buffer(&orig_bm, &ca);
2837		if (IS_ERR(handle->buffer))
2838			return PTR_ERR(handle->buffer);
 
 
2839	}
2840	handle->sync_read = (handle->buffer == buffer);
2841	handle->cur++;
2842
2843	/* Zero pages were not included in the image, memset it and move on. */
2844	if (handle->cur > nr_meta_pages + 1 &&
2845	    memory_bm_test_bit(&zero_bm, memory_bm_get_current(&orig_bm))) {
2846		memset(handle->buffer, 0, PAGE_SIZE);
2847		goto next;
2848	}
2849
2850	return PAGE_SIZE;
2851}
2852
2853/**
2854 * snapshot_write_finalize - Complete the loading of a hibernation image.
2855 *
2856 * Must be called after the last call to snapshot_write_next() in case the last
2857 * page in the image happens to be a highmem page and its contents should be
2858 * stored in highmem.  Additionally, it recycles bitmap memory that's not
2859 * necessary any more.
2860 */
2861void snapshot_write_finalize(struct snapshot_handle *handle)
2862{
2863	copy_last_highmem_page();
 
 
 
2864	hibernate_restore_protect_page(handle->buffer);
2865	/* Do that only if we have loaded the image entirely */
2866	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages + nr_zero_pages) {
2867		memory_bm_recycle(&orig_bm);
2868		free_highmem_data();
2869	}
2870}
2871
2872int snapshot_image_loaded(struct snapshot_handle *handle)
2873{
2874	return !(!nr_copy_pages || !last_highmem_page_copied() ||
2875			handle->cur <= nr_meta_pages + nr_copy_pages + nr_zero_pages);
2876}
2877
2878#ifdef CONFIG_HIGHMEM
2879/* Assumes that @buf is ready and points to a "safe" page */
2880static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2881				       void *buf)
2882{
2883	void *kaddr1, *kaddr2;
2884
2885	kaddr1 = kmap_atomic(p1);
2886	kaddr2 = kmap_atomic(p2);
2887	copy_page(buf, kaddr1);
2888	copy_page(kaddr1, kaddr2);
2889	copy_page(kaddr2, buf);
2890	kunmap_atomic(kaddr2);
2891	kunmap_atomic(kaddr1);
2892}
2893
2894/**
2895 * restore_highmem - Put highmem image pages into their original locations.
2896 *
2897 * For each highmem page that was in use before hibernation and is included in
2898 * the image, and also has been allocated by the "restore" kernel, swap its
2899 * current contents with the previous (ie. "before hibernation") ones.
2900 *
2901 * If the restore eventually fails, we can call this function once again and
2902 * restore the highmem state as seen by the restore kernel.
2903 */
2904int restore_highmem(void)
2905{
2906	struct highmem_pbe *pbe = highmem_pblist;
2907	void *buf;
2908
2909	if (!pbe)
2910		return 0;
2911
2912	buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2913	if (!buf)
2914		return -ENOMEM;
2915
2916	while (pbe) {
2917		swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2918		pbe = pbe->next;
2919	}
2920	free_image_page(buf, PG_UNSAFE_CLEAR);
2921	return 0;
2922}
2923#endif /* CONFIG_HIGHMEM */