Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*
   2 * linux/kernel/power/snapshot.c
   3 *
   4 * This file provides system snapshot/restore functionality for swsusp.
   5 *
   6 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
   7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
   8 *
   9 * This file is released under the GPLv2.
  10 *
  11 */
  12
  13#include <linux/version.h>
  14#include <linux/module.h>
  15#include <linux/mm.h>
  16#include <linux/suspend.h>
  17#include <linux/delay.h>
  18#include <linux/bitops.h>
  19#include <linux/spinlock.h>
  20#include <linux/kernel.h>
  21#include <linux/pm.h>
  22#include <linux/device.h>
  23#include <linux/init.h>
  24#include <linux/bootmem.h>
  25#include <linux/syscalls.h>
  26#include <linux/console.h>
  27#include <linux/highmem.h>
  28#include <linux/list.h>
  29#include <linux/slab.h>
  30#include <linux/compiler.h>
  31#include <linux/ktime.h>
  32
  33#include <linux/uaccess.h>
  34#include <asm/mmu_context.h>
  35#include <asm/pgtable.h>
  36#include <asm/tlbflush.h>
  37#include <asm/io.h>
  38
  39#include "power.h"
  40
  41#ifdef CONFIG_DEBUG_RODATA
  42static bool hibernate_restore_protection;
  43static bool hibernate_restore_protection_active;
  44
  45void enable_restore_image_protection(void)
  46{
  47	hibernate_restore_protection = true;
  48}
  49
  50static inline void hibernate_restore_protection_begin(void)
  51{
  52	hibernate_restore_protection_active = hibernate_restore_protection;
  53}
  54
  55static inline void hibernate_restore_protection_end(void)
  56{
  57	hibernate_restore_protection_active = false;
  58}
  59
  60static inline void hibernate_restore_protect_page(void *page_address)
  61{
  62	if (hibernate_restore_protection_active)
  63		set_memory_ro((unsigned long)page_address, 1);
  64}
  65
  66static inline void hibernate_restore_unprotect_page(void *page_address)
  67{
  68	if (hibernate_restore_protection_active)
  69		set_memory_rw((unsigned long)page_address, 1);
  70}
  71#else
  72static inline void hibernate_restore_protection_begin(void) {}
  73static inline void hibernate_restore_protection_end(void) {}
  74static inline void hibernate_restore_protect_page(void *page_address) {}
  75static inline void hibernate_restore_unprotect_page(void *page_address) {}
  76#endif /* CONFIG_DEBUG_RODATA */
  77
  78static int swsusp_page_is_free(struct page *);
  79static void swsusp_set_page_forbidden(struct page *);
  80static void swsusp_unset_page_forbidden(struct page *);
  81
  82/*
  83 * Number of bytes to reserve for memory allocations made by device drivers
  84 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
  85 * cause image creation to fail (tunable via /sys/power/reserved_size).
  86 */
  87unsigned long reserved_size;
  88
  89void __init hibernate_reserved_size_init(void)
  90{
  91	reserved_size = SPARE_PAGES * PAGE_SIZE;
  92}
  93
  94/*
  95 * Preferred image size in bytes (tunable via /sys/power/image_size).
  96 * When it is set to N, swsusp will do its best to ensure the image
  97 * size will not exceed N bytes, but if that is impossible, it will
  98 * try to create the smallest image possible.
  99 */
 100unsigned long image_size;
 101
 102void __init hibernate_image_size_init(void)
 103{
 104	image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
 105}
 106
 107/*
 108 * List of PBEs needed for restoring the pages that were allocated before
 109 * the suspend and included in the suspend image, but have also been
 110 * allocated by the "resume" kernel, so their contents cannot be written
 111 * directly to their "original" page frames.
 112 */
 113struct pbe *restore_pblist;
 114
 115/* struct linked_page is used to build chains of pages */
 116
 117#define LINKED_PAGE_DATA_SIZE	(PAGE_SIZE - sizeof(void *))
 118
 119struct linked_page {
 120	struct linked_page *next;
 121	char data[LINKED_PAGE_DATA_SIZE];
 122} __packed;
 123
 124/*
 125 * List of "safe" pages (ie. pages that were not used by the image kernel
 126 * before hibernation) that may be used as temporary storage for image kernel
 127 * memory contents.
 128 */
 129static struct linked_page *safe_pages_list;
 130
 131/* Pointer to an auxiliary buffer (1 page) */
 132static void *buffer;
 133
 
 
 
 
 
 
 
 
 
 
 134#define PG_ANY		0
 135#define PG_SAFE		1
 136#define PG_UNSAFE_CLEAR	1
 137#define PG_UNSAFE_KEEP	0
 138
 139static unsigned int allocated_unsafe_pages;
 140
 141/**
 142 * get_image_page - Allocate a page for a hibernation image.
 143 * @gfp_mask: GFP mask for the allocation.
 144 * @safe_needed: Get pages that were not used before hibernation (restore only)
 145 *
 146 * During image restoration, for storing the PBE list and the image data, we can
 147 * only use memory pages that do not conflict with the pages used before
 148 * hibernation.  The "unsafe" pages have PageNosaveFree set and we count them
 149 * using allocated_unsafe_pages.
 150 *
 151 * Each allocated image page is marked as PageNosave and PageNosaveFree so that
 152 * swsusp_free() can release it.
 153 */
 154static void *get_image_page(gfp_t gfp_mask, int safe_needed)
 155{
 156	void *res;
 157
 158	res = (void *)get_zeroed_page(gfp_mask);
 159	if (safe_needed)
 160		while (res && swsusp_page_is_free(virt_to_page(res))) {
 161			/* The page is unsafe, mark it for swsusp_free() */
 162			swsusp_set_page_forbidden(virt_to_page(res));
 163			allocated_unsafe_pages++;
 164			res = (void *)get_zeroed_page(gfp_mask);
 165		}
 166	if (res) {
 167		swsusp_set_page_forbidden(virt_to_page(res));
 168		swsusp_set_page_free(virt_to_page(res));
 169	}
 170	return res;
 171}
 172
 173static void *__get_safe_page(gfp_t gfp_mask)
 174{
 175	if (safe_pages_list) {
 176		void *ret = safe_pages_list;
 177
 178		safe_pages_list = safe_pages_list->next;
 179		memset(ret, 0, PAGE_SIZE);
 180		return ret;
 181	}
 182	return get_image_page(gfp_mask, PG_SAFE);
 183}
 184
 185unsigned long get_safe_page(gfp_t gfp_mask)
 186{
 187	return (unsigned long)__get_safe_page(gfp_mask);
 188}
 189
 190static struct page *alloc_image_page(gfp_t gfp_mask)
 191{
 192	struct page *page;
 193
 194	page = alloc_page(gfp_mask);
 195	if (page) {
 196		swsusp_set_page_forbidden(page);
 197		swsusp_set_page_free(page);
 198	}
 199	return page;
 200}
 201
 202static void recycle_safe_page(void *page_address)
 203{
 204	struct linked_page *lp = page_address;
 205
 206	lp->next = safe_pages_list;
 207	safe_pages_list = lp;
 208}
 209
 210/**
 211 * free_image_page - Free a page allocated for hibernation image.
 212 * @addr: Address of the page to free.
 213 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
 214 *
 215 * The page to free should have been allocated by get_image_page() (page flags
 216 * set by it are affected).
 217 */
 
 218static inline void free_image_page(void *addr, int clear_nosave_free)
 219{
 220	struct page *page;
 221
 222	BUG_ON(!virt_addr_valid(addr));
 223
 224	page = virt_to_page(addr);
 225
 226	swsusp_unset_page_forbidden(page);
 227	if (clear_nosave_free)
 228		swsusp_unset_page_free(page);
 229
 230	__free_page(page);
 231}
 232
 233static inline void free_list_of_pages(struct linked_page *list,
 234				      int clear_page_nosave)
 
 
 
 
 
 
 
 
 
 235{
 236	while (list) {
 237		struct linked_page *lp = list->next;
 238
 239		free_image_page(list, clear_page_nosave);
 240		list = lp;
 241	}
 242}
 243
 244/*
 245 * struct chain_allocator is used for allocating small objects out of
 246 * a linked list of pages called 'the chain'.
 247 *
 248 * The chain grows each time when there is no room for a new object in
 249 * the current page.  The allocated objects cannot be freed individually.
 250 * It is only possible to free them all at once, by freeing the entire
 251 * chain.
 252 *
 253 * NOTE: The chain allocator may be inefficient if the allocated objects
 254 * are not much smaller than PAGE_SIZE.
 255 */
 
 256struct chain_allocator {
 257	struct linked_page *chain;	/* the chain */
 258	unsigned int used_space;	/* total size of objects allocated out
 259					   of the current page */
 
 260	gfp_t gfp_mask;		/* mask for allocating pages */
 261	int safe_needed;	/* if set, only "safe" pages are allocated */
 262};
 263
 264static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
 265		       int safe_needed)
 266{
 267	ca->chain = NULL;
 268	ca->used_space = LINKED_PAGE_DATA_SIZE;
 269	ca->gfp_mask = gfp_mask;
 270	ca->safe_needed = safe_needed;
 271}
 272
 273static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
 274{
 275	void *ret;
 276
 277	if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
 278		struct linked_page *lp;
 279
 280		lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
 281					get_image_page(ca->gfp_mask, PG_ANY);
 282		if (!lp)
 283			return NULL;
 284
 285		lp->next = ca->chain;
 286		ca->chain = lp;
 287		ca->used_space = 0;
 288	}
 289	ret = ca->chain->data + ca->used_space;
 290	ca->used_space += size;
 291	return ret;
 292}
 293
 294/**
 295 * Data types related to memory bitmaps.
 296 *
 297 * Memory bitmap is a structure consiting of many linked lists of
 298 * objects.  The main list's elements are of type struct zone_bitmap
 299 * and each of them corresonds to one zone.  For each zone bitmap
 300 * object there is a list of objects of type struct bm_block that
 301 * represent each blocks of bitmap in which information is stored.
 302 *
 303 * struct memory_bitmap contains a pointer to the main list of zone
 304 * bitmap objects, a struct bm_position used for browsing the bitmap,
 305 * and a pointer to the list of pages used for allocating all of the
 306 * zone bitmap objects and bitmap block objects.
 307 *
 308 * NOTE: It has to be possible to lay out the bitmap in memory
 309 * using only allocations of order 0.  Additionally, the bitmap is
 310 * designed to work with arbitrary number of zones (this is over the
 311 * top for now, but let's avoid making unnecessary assumptions ;-).
 312 *
 313 * struct zone_bitmap contains a pointer to a list of bitmap block
 314 * objects and a pointer to the bitmap block object that has been
 315 * most recently used for setting bits.  Additionally, it contains the
 316 * PFNs that correspond to the start and end of the represented zone.
 317 *
 318 * struct bm_block contains a pointer to the memory page in which
 319 * information is stored (in the form of a block of bitmap)
 320 * It also contains the pfns that correspond to the start and end of
 321 * the represented memory area.
 322 *
 323 * The memory bitmap is organized as a radix tree to guarantee fast random
 324 * access to the bits. There is one radix tree for each zone (as returned
 325 * from create_mem_extents).
 326 *
 327 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
 328 * two linked lists for the nodes of the tree, one for the inner nodes and
 329 * one for the leave nodes. The linked leave nodes are used for fast linear
 330 * access of the memory bitmap.
 331 *
 332 * The struct rtree_node represents one node of the radix tree.
 333 */
 334
 335#define BM_END_OF_MAP	(~0UL)
 336
 337#define BM_BITS_PER_BLOCK	(PAGE_SIZE * BITS_PER_BYTE)
 338#define BM_BLOCK_SHIFT		(PAGE_SHIFT + 3)
 339#define BM_BLOCK_MASK		((1UL << BM_BLOCK_SHIFT) - 1)
 340
 341/*
 342 * struct rtree_node is a wrapper struct to link the nodes
 343 * of the rtree together for easy linear iteration over
 344 * bits and easy freeing
 345 */
 346struct rtree_node {
 347	struct list_head list;
 348	unsigned long *data;
 349};
 350
 351/*
 352 * struct mem_zone_bm_rtree represents a bitmap used for one
 353 * populated memory zone.
 354 */
 355struct mem_zone_bm_rtree {
 356	struct list_head list;		/* Link Zones together         */
 357	struct list_head nodes;		/* Radix Tree inner nodes      */
 358	struct list_head leaves;	/* Radix Tree leaves           */
 359	unsigned long start_pfn;	/* Zone start page frame       */
 360	unsigned long end_pfn;		/* Zone end page frame + 1     */
 361	struct rtree_node *rtree;	/* Radix Tree Root             */
 362	int levels;			/* Number of Radix Tree Levels */
 363	unsigned int blocks;		/* Number of Bitmap Blocks     */
 364};
 365
 366/* strcut bm_position is used for browsing memory bitmaps */
 367
 368struct bm_position {
 369	struct mem_zone_bm_rtree *zone;
 370	struct rtree_node *node;
 371	unsigned long node_pfn;
 372	int node_bit;
 373};
 374
 375struct memory_bitmap {
 376	struct list_head zones;
 377	struct linked_page *p_list;	/* list of pages used to store zone
 378					   bitmap objects and bitmap block
 379					   objects */
 
 380	struct bm_position cur;	/* most recently used bit position */
 381};
 382
 383/* Functions that operate on memory bitmaps */
 384
 385#define BM_ENTRIES_PER_LEVEL	(PAGE_SIZE / sizeof(unsigned long))
 386#if BITS_PER_LONG == 32
 387#define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 2)
 388#else
 389#define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 3)
 390#endif
 391#define BM_RTREE_LEVEL_MASK	((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
 392
 393/**
 394 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
 395 *
 396 * This function is used to allocate inner nodes as well as the
 397 * leave nodes of the radix tree. It also adds the node to the
 398 * corresponding linked list passed in by the *list parameter.
 399 */
 400static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
 401					   struct chain_allocator *ca,
 402					   struct list_head *list)
 403{
 404	struct rtree_node *node;
 405
 406	node = chain_alloc(ca, sizeof(struct rtree_node));
 407	if (!node)
 408		return NULL;
 409
 410	node->data = get_image_page(gfp_mask, safe_needed);
 411	if (!node->data)
 412		return NULL;
 413
 414	list_add_tail(&node->list, list);
 415
 416	return node;
 417}
 418
 
 
 419/**
 420 * add_rtree_block - Add a new leave node to the radix tree.
 421 *
 422 * The leave nodes need to be allocated in order to keep the leaves
 423 * linked list in order. This is guaranteed by the zone->blocks
 424 * counter.
 425 */
 426static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
 427			   int safe_needed, struct chain_allocator *ca)
 428{
 429	struct rtree_node *node, *block, **dst;
 430	unsigned int levels_needed, block_nr;
 431	int i;
 432
 433	block_nr = zone->blocks;
 434	levels_needed = 0;
 435
 436	/* How many levels do we need for this block nr? */
 437	while (block_nr) {
 438		levels_needed += 1;
 439		block_nr >>= BM_RTREE_LEVEL_SHIFT;
 440	}
 441
 442	/* Make sure the rtree has enough levels */
 443	for (i = zone->levels; i < levels_needed; i++) {
 444		node = alloc_rtree_node(gfp_mask, safe_needed, ca,
 445					&zone->nodes);
 446		if (!node)
 447			return -ENOMEM;
 448
 449		node->data[0] = (unsigned long)zone->rtree;
 450		zone->rtree = node;
 451		zone->levels += 1;
 452	}
 453
 454	/* Allocate new block */
 455	block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
 456	if (!block)
 457		return -ENOMEM;
 458
 459	/* Now walk the rtree to insert the block */
 460	node = zone->rtree;
 461	dst = &zone->rtree;
 462	block_nr = zone->blocks;
 463	for (i = zone->levels; i > 0; i--) {
 464		int index;
 465
 466		if (!node) {
 467			node = alloc_rtree_node(gfp_mask, safe_needed, ca,
 468						&zone->nodes);
 469			if (!node)
 470				return -ENOMEM;
 471			*dst = node;
 472		}
 473
 474		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
 475		index &= BM_RTREE_LEVEL_MASK;
 476		dst = (struct rtree_node **)&((*dst)->data[index]);
 477		node = *dst;
 478	}
 479
 480	zone->blocks += 1;
 481	*dst = block;
 482
 483	return 0;
 484}
 485
 486static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
 487			       int clear_nosave_free);
 488
 489/**
 490 * create_zone_bm_rtree - Create a radix tree for one zone.
 491 *
 492 * Allocated the mem_zone_bm_rtree structure and initializes it.
 493 * This function also allocated and builds the radix tree for the
 494 * zone.
 495 */
 496static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
 497						      int safe_needed,
 498						      struct chain_allocator *ca,
 499						      unsigned long start,
 500						      unsigned long end)
 501{
 502	struct mem_zone_bm_rtree *zone;
 503	unsigned int i, nr_blocks;
 504	unsigned long pages;
 505
 506	pages = end - start;
 507	zone  = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
 508	if (!zone)
 509		return NULL;
 510
 511	INIT_LIST_HEAD(&zone->nodes);
 512	INIT_LIST_HEAD(&zone->leaves);
 513	zone->start_pfn = start;
 514	zone->end_pfn = end;
 515	nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
 516
 517	for (i = 0; i < nr_blocks; i++) {
 518		if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
 519			free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
 520			return NULL;
 521		}
 522	}
 523
 524	return zone;
 525}
 526
 527/**
 528 * free_zone_bm_rtree - Free the memory of the radix tree.
 529 *
 530 * Free all node pages of the radix tree. The mem_zone_bm_rtree
 531 * structure itself is not freed here nor are the rtree_node
 532 * structs.
 533 */
 534static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
 535			       int clear_nosave_free)
 536{
 537	struct rtree_node *node;
 538
 539	list_for_each_entry(node, &zone->nodes, list)
 540		free_image_page(node->data, clear_nosave_free);
 541
 542	list_for_each_entry(node, &zone->leaves, list)
 543		free_image_page(node->data, clear_nosave_free);
 544}
 545
 546static void memory_bm_position_reset(struct memory_bitmap *bm)
 547{
 548	bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
 549				  list);
 550	bm->cur.node = list_entry(bm->cur.zone->leaves.next,
 551				  struct rtree_node, list);
 552	bm->cur.node_pfn = 0;
 553	bm->cur.node_bit = 0;
 554}
 555
 556static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
 557
 558struct mem_extent {
 559	struct list_head hook;
 560	unsigned long start;
 561	unsigned long end;
 562};
 563
 564/**
 565 * free_mem_extents - Free a list of memory extents.
 566 * @list: List of extents to free.
 567 */
 568static void free_mem_extents(struct list_head *list)
 569{
 570	struct mem_extent *ext, *aux;
 571
 572	list_for_each_entry_safe(ext, aux, list, hook) {
 573		list_del(&ext->hook);
 574		kfree(ext);
 575	}
 576}
 577
 578/**
 579 * create_mem_extents - Create a list of memory extents.
 580 * @list: List to put the extents into.
 581 * @gfp_mask: Mask to use for memory allocations.
 582 *
 583 * The extents represent contiguous ranges of PFNs.
 584 */
 585static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
 586{
 587	struct zone *zone;
 588
 589	INIT_LIST_HEAD(list);
 590
 591	for_each_populated_zone(zone) {
 592		unsigned long zone_start, zone_end;
 593		struct mem_extent *ext, *cur, *aux;
 594
 595		zone_start = zone->zone_start_pfn;
 596		zone_end = zone_end_pfn(zone);
 597
 598		list_for_each_entry(ext, list, hook)
 599			if (zone_start <= ext->end)
 600				break;
 601
 602		if (&ext->hook == list || zone_end < ext->start) {
 603			/* New extent is necessary */
 604			struct mem_extent *new_ext;
 605
 606			new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
 607			if (!new_ext) {
 608				free_mem_extents(list);
 609				return -ENOMEM;
 610			}
 611			new_ext->start = zone_start;
 612			new_ext->end = zone_end;
 613			list_add_tail(&new_ext->hook, &ext->hook);
 614			continue;
 615		}
 616
 617		/* Merge this zone's range of PFNs with the existing one */
 618		if (zone_start < ext->start)
 619			ext->start = zone_start;
 620		if (zone_end > ext->end)
 621			ext->end = zone_end;
 622
 623		/* More merging may be possible */
 624		cur = ext;
 625		list_for_each_entry_safe_continue(cur, aux, list, hook) {
 626			if (zone_end < cur->start)
 627				break;
 628			if (zone_end < cur->end)
 629				ext->end = cur->end;
 630			list_del(&cur->hook);
 631			kfree(cur);
 632		}
 633	}
 634
 635	return 0;
 636}
 637
 638/**
 639 * memory_bm_create - Allocate memory for a memory bitmap.
 640 */
 641static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
 642			    int safe_needed)
 643{
 644	struct chain_allocator ca;
 645	struct list_head mem_extents;
 646	struct mem_extent *ext;
 647	int error;
 648
 649	chain_init(&ca, gfp_mask, safe_needed);
 650	INIT_LIST_HEAD(&bm->zones);
 651
 652	error = create_mem_extents(&mem_extents, gfp_mask);
 653	if (error)
 654		return error;
 655
 656	list_for_each_entry(ext, &mem_extents, hook) {
 657		struct mem_zone_bm_rtree *zone;
 
 
 658
 659		zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
 660					    ext->start, ext->end);
 661		if (!zone) {
 662			error = -ENOMEM;
 663			goto Error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 664		}
 665		list_add_tail(&zone->list, &bm->zones);
 666	}
 667
 668	bm->p_list = ca.chain;
 669	memory_bm_position_reset(bm);
 670 Exit:
 671	free_mem_extents(&mem_extents);
 672	return error;
 673
 674 Error:
 675	bm->p_list = ca.chain;
 676	memory_bm_free(bm, PG_UNSAFE_CLEAR);
 677	goto Exit;
 678}
 679
 680/**
 681 * memory_bm_free - Free memory occupied by the memory bitmap.
 682 * @bm: Memory bitmap.
 683 */
 684static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
 685{
 686	struct mem_zone_bm_rtree *zone;
 687
 688	list_for_each_entry(zone, &bm->zones, list)
 689		free_zone_bm_rtree(zone, clear_nosave_free);
 
 690
 691	free_list_of_pages(bm->p_list, clear_nosave_free);
 692
 693	INIT_LIST_HEAD(&bm->zones);
 694}
 695
 696/**
 697 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
 698 *
 699 * Find the bit in memory bitmap @bm that corresponds to the given PFN.
 700 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
 701 *
 702 * Walk the radix tree to find the page containing the bit that represents @pfn
 703 * and return the position of the bit in @addr and @bit_nr.
 704 */
 705static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
 706			      void **addr, unsigned int *bit_nr)
 707{
 708	struct mem_zone_bm_rtree *curr, *zone;
 709	struct rtree_node *node;
 710	int i, block_nr;
 711
 712	zone = bm->cur.zone;
 713
 714	if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
 715		goto zone_found;
 716
 717	zone = NULL;
 
 
 
 
 
 718
 719	/* Find the right zone */
 720	list_for_each_entry(curr, &bm->zones, list) {
 721		if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
 722			zone = curr;
 723			break;
 724		}
 725	}
 726
 727	if (!zone)
 728		return -EFAULT;
 729
 730zone_found:
 731	/*
 732	 * We have found the zone. Now walk the radix tree to find the leaf node
 733	 * for our PFN.
 734	 */
 735	node = bm->cur.node;
 736	if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
 737		goto node_found;
 738
 739	node      = zone->rtree;
 740	block_nr  = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
 741
 742	for (i = zone->levels; i > 0; i--) {
 743		int index;
 744
 745		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
 746		index &= BM_RTREE_LEVEL_MASK;
 747		BUG_ON(node->data[index] == 0);
 748		node = (struct rtree_node *)node->data[index];
 749	}
 750
 751node_found:
 752	/* Update last position */
 753	bm->cur.zone = zone;
 754	bm->cur.node = node;
 755	bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
 756
 757	/* Set return values */
 758	*addr = node->data;
 759	*bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
 760
 761	return 0;
 762}
 763
 764static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
 765{
 766	void *addr;
 767	unsigned int bit;
 768	int error;
 769
 770	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 771	BUG_ON(error);
 772	set_bit(bit, addr);
 773}
 774
 775static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
 776{
 777	void *addr;
 778	unsigned int bit;
 779	int error;
 780
 781	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 782	if (!error)
 783		set_bit(bit, addr);
 784
 785	return error;
 786}
 787
 788static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
 789{
 790	void *addr;
 791	unsigned int bit;
 792	int error;
 793
 794	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 795	BUG_ON(error);
 796	clear_bit(bit, addr);
 797}
 798
 799static void memory_bm_clear_current(struct memory_bitmap *bm)
 800{
 801	int bit;
 802
 803	bit = max(bm->cur.node_bit - 1, 0);
 804	clear_bit(bit, bm->cur.node->data);
 805}
 806
 807static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
 808{
 809	void *addr;
 810	unsigned int bit;
 811	int error;
 812
 813	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 814	BUG_ON(error);
 815	return test_bit(bit, addr);
 816}
 817
 818static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
 819{
 820	void *addr;
 821	unsigned int bit;
 822
 823	return !memory_bm_find_bit(bm, pfn, &addr, &bit);
 824}
 825
 826/*
 827 * rtree_next_node - Jump to the next leaf node.
 828 *
 829 * Set the position to the beginning of the next node in the
 830 * memory bitmap. This is either the next node in the current
 831 * zone's radix tree or the first node in the radix tree of the
 832 * next zone.
 833 *
 834 * Return true if there is a next node, false otherwise.
 835 */
 836static bool rtree_next_node(struct memory_bitmap *bm)
 837{
 838	if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
 839		bm->cur.node = list_entry(bm->cur.node->list.next,
 840					  struct rtree_node, list);
 841		bm->cur.node_pfn += BM_BITS_PER_BLOCK;
 842		bm->cur.node_bit  = 0;
 843		touch_softlockup_watchdog();
 844		return true;
 845	}
 846
 847	/* No more nodes, goto next zone */
 848	if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
 849		bm->cur.zone = list_entry(bm->cur.zone->list.next,
 850				  struct mem_zone_bm_rtree, list);
 851		bm->cur.node = list_entry(bm->cur.zone->leaves.next,
 852					  struct rtree_node, list);
 853		bm->cur.node_pfn = 0;
 854		bm->cur.node_bit = 0;
 855		return true;
 856	}
 857
 858	/* No more zones */
 859	return false;
 860}
 861
 862/**
 863 * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
 864 * @bm: Memory bitmap.
 865 *
 866 * Starting from the last returned position this function searches for the next
 867 * set bit in @bm and returns the PFN represented by it.  If no more bits are
 868 * set, BM_END_OF_MAP is returned.
 869 *
 870 * It is required to run memory_bm_position_reset() before the first call to
 871 * this function for the given memory bitmap.
 872 */
 
 873static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
 874{
 875	unsigned long bits, pfn, pages;
 876	int bit;
 877
 
 878	do {
 879		pages	  = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
 880		bits      = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
 881		bit	  = find_next_bit(bm->cur.node->data, bits,
 882					  bm->cur.node_bit);
 883		if (bit < bits) {
 884			pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
 885			bm->cur.node_bit = bit + 1;
 886			return pfn;
 887		}
 888	} while (rtree_next_node(bm));
 889
 
 890	return BM_END_OF_MAP;
 
 
 
 
 891}
 892
 893/*
 894 * This structure represents a range of page frames the contents of which
 895 * should not be saved during hibernation.
 896 */
 
 897struct nosave_region {
 898	struct list_head list;
 899	unsigned long start_pfn;
 900	unsigned long end_pfn;
 901};
 902
 903static LIST_HEAD(nosave_regions);
 904
 905static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
 906{
 907	struct rtree_node *node;
 908
 909	list_for_each_entry(node, &zone->nodes, list)
 910		recycle_safe_page(node->data);
 911
 912	list_for_each_entry(node, &zone->leaves, list)
 913		recycle_safe_page(node->data);
 914}
 915
 916static void memory_bm_recycle(struct memory_bitmap *bm)
 917{
 918	struct mem_zone_bm_rtree *zone;
 919	struct linked_page *p_list;
 920
 921	list_for_each_entry(zone, &bm->zones, list)
 922		recycle_zone_bm_rtree(zone);
 923
 924	p_list = bm->p_list;
 925	while (p_list) {
 926		struct linked_page *lp = p_list;
 927
 928		p_list = lp->next;
 929		recycle_safe_page(lp);
 930	}
 931}
 932
 933/**
 934 * register_nosave_region - Register a region of unsaveable memory.
 935 *
 936 * Register a range of page frames the contents of which should not be saved
 937 * during hibernation (to be used in the early initialization code).
 938 */
 939void __init __register_nosave_region(unsigned long start_pfn,
 940				     unsigned long end_pfn, int use_kmalloc)
 
 
 941{
 942	struct nosave_region *region;
 943
 944	if (start_pfn >= end_pfn)
 945		return;
 946
 947	if (!list_empty(&nosave_regions)) {
 948		/* Try to extend the previous region (they should be sorted) */
 949		region = list_entry(nosave_regions.prev,
 950					struct nosave_region, list);
 951		if (region->end_pfn == start_pfn) {
 952			region->end_pfn = end_pfn;
 953			goto Report;
 954		}
 955	}
 956	if (use_kmalloc) {
 957		/* During init, this shouldn't fail */
 958		region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
 959		BUG_ON(!region);
 960	} else {
 961		/* This allocation cannot fail */
 962		region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
 963	}
 964	region->start_pfn = start_pfn;
 965	region->end_pfn = end_pfn;
 966	list_add_tail(&region->list, &nosave_regions);
 967 Report:
 968	printk(KERN_INFO "PM: Registered nosave memory: [mem %#010llx-%#010llx]\n",
 969		(unsigned long long) start_pfn << PAGE_SHIFT,
 970		((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
 971}
 972
 973/*
 974 * Set bits in this map correspond to the page frames the contents of which
 975 * should not be saved during the suspend.
 976 */
 977static struct memory_bitmap *forbidden_pages_map;
 978
 979/* Set bits in this map correspond to free page frames. */
 980static struct memory_bitmap *free_pages_map;
 981
 982/*
 983 * Each page frame allocated for creating the image is marked by setting the
 984 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
 985 */
 986
 987void swsusp_set_page_free(struct page *page)
 988{
 989	if (free_pages_map)
 990		memory_bm_set_bit(free_pages_map, page_to_pfn(page));
 991}
 992
 993static int swsusp_page_is_free(struct page *page)
 994{
 995	return free_pages_map ?
 996		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
 997}
 998
 999void swsusp_unset_page_free(struct page *page)
1000{
1001	if (free_pages_map)
1002		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1003}
1004
1005static void swsusp_set_page_forbidden(struct page *page)
1006{
1007	if (forbidden_pages_map)
1008		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1009}
1010
1011int swsusp_page_is_forbidden(struct page *page)
1012{
1013	return forbidden_pages_map ?
1014		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1015}
1016
1017static void swsusp_unset_page_forbidden(struct page *page)
1018{
1019	if (forbidden_pages_map)
1020		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1021}
1022
1023/**
1024 * mark_nosave_pages - Mark pages that should not be saved.
1025 * @bm: Memory bitmap.
1026 *
1027 * Set the bits in @bm that correspond to the page frames the contents of which
1028 * should not be saved.
1029 */
 
1030static void mark_nosave_pages(struct memory_bitmap *bm)
1031{
1032	struct nosave_region *region;
1033
1034	if (list_empty(&nosave_regions))
1035		return;
1036
1037	list_for_each_entry(region, &nosave_regions, list) {
1038		unsigned long pfn;
1039
1040		pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n",
1041			 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1042			 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1043				- 1);
1044
1045		for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1046			if (pfn_valid(pfn)) {
1047				/*
1048				 * It is safe to ignore the result of
1049				 * mem_bm_set_bit_check() here, since we won't
1050				 * touch the PFNs for which the error is
1051				 * returned anyway.
1052				 */
1053				mem_bm_set_bit_check(bm, pfn);
1054			}
1055	}
1056}
1057
1058/**
1059 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1060 *
1061 * Create bitmaps needed for marking page frames that should not be saved and
1062 * free page frames.  The forbidden_pages_map and free_pages_map pointers are
1063 * only modified if everything goes well, because we don't want the bits to be
1064 * touched before both bitmaps are set up.
1065 */
 
1066int create_basic_memory_bitmaps(void)
1067{
1068	struct memory_bitmap *bm1, *bm2;
1069	int error = 0;
1070
1071	if (forbidden_pages_map && free_pages_map)
1072		return 0;
1073	else
1074		BUG_ON(forbidden_pages_map || free_pages_map);
1075
1076	bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1077	if (!bm1)
1078		return -ENOMEM;
1079
1080	error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1081	if (error)
1082		goto Free_first_object;
1083
1084	bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1085	if (!bm2)
1086		goto Free_first_bitmap;
1087
1088	error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1089	if (error)
1090		goto Free_second_object;
1091
1092	forbidden_pages_map = bm1;
1093	free_pages_map = bm2;
1094	mark_nosave_pages(forbidden_pages_map);
1095
1096	pr_debug("PM: Basic memory bitmaps created\n");
1097
1098	return 0;
1099
1100 Free_second_object:
1101	kfree(bm2);
1102 Free_first_bitmap:
1103 	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1104 Free_first_object:
1105	kfree(bm1);
1106	return -ENOMEM;
1107}
1108
1109/**
1110 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1111 *
1112 * Free memory bitmaps allocated by create_basic_memory_bitmaps().  The
1113 * auxiliary pointers are necessary so that the bitmaps themselves are not
1114 * referred to while they are being freed.
1115 */
 
1116void free_basic_memory_bitmaps(void)
1117{
1118	struct memory_bitmap *bm1, *bm2;
1119
1120	if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1121		return;
1122
1123	bm1 = forbidden_pages_map;
1124	bm2 = free_pages_map;
1125	forbidden_pages_map = NULL;
1126	free_pages_map = NULL;
1127	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1128	kfree(bm1);
1129	memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1130	kfree(bm2);
1131
1132	pr_debug("PM: Basic memory bitmaps freed\n");
1133}
1134
1135void clear_free_pages(void)
1136{
1137#ifdef CONFIG_PAGE_POISONING_ZERO
1138	struct memory_bitmap *bm = free_pages_map;
1139	unsigned long pfn;
1140
1141	if (WARN_ON(!(free_pages_map)))
1142		return;
1143
1144	memory_bm_position_reset(bm);
1145	pfn = memory_bm_next_pfn(bm);
1146	while (pfn != BM_END_OF_MAP) {
1147		if (pfn_valid(pfn))
1148			clear_highpage(pfn_to_page(pfn));
1149
1150		pfn = memory_bm_next_pfn(bm);
1151	}
1152	memory_bm_position_reset(bm);
1153	pr_info("PM: free pages cleared after restore\n");
1154#endif /* PAGE_POISONING_ZERO */
1155}
1156
1157/**
1158 * snapshot_additional_pages - Estimate the number of extra pages needed.
1159 * @zone: Memory zone to carry out the computation for.
1160 *
1161 * Estimate the number of additional pages needed for setting up a hibernation
1162 * image data structures for @zone (usually, the returned value is greater than
1163 * the exact number).
1164 */
 
1165unsigned int snapshot_additional_pages(struct zone *zone)
1166{
1167	unsigned int rtree, nodes;
1168
1169	rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1170	rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1171			      LINKED_PAGE_DATA_SIZE);
1172	while (nodes > 1) {
1173		nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1174		rtree += nodes;
1175	}
1176
1177	return 2 * rtree;
 
 
 
1178}
1179
1180#ifdef CONFIG_HIGHMEM
1181/**
1182 * count_free_highmem_pages - Compute the total number of free highmem pages.
1183 *
1184 * The returned number is system-wide.
1185 */
 
1186static unsigned int count_free_highmem_pages(void)
1187{
1188	struct zone *zone;
1189	unsigned int cnt = 0;
1190
1191	for_each_populated_zone(zone)
1192		if (is_highmem(zone))
1193			cnt += zone_page_state(zone, NR_FREE_PAGES);
1194
1195	return cnt;
1196}
1197
1198/**
1199 * saveable_highmem_page - Check if a highmem page is saveable.
 
1200 *
1201 * Determine whether a highmem page should be included in a hibernation image.
1202 *
1203 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1204 * and it isn't part of a free chunk of pages.
1205 */
1206static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1207{
1208	struct page *page;
1209
1210	if (!pfn_valid(pfn))
1211		return NULL;
1212
1213	page = pfn_to_page(pfn);
1214	if (page_zone(page) != zone)
1215		return NULL;
1216
1217	BUG_ON(!PageHighMem(page));
1218
1219	if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page) ||
1220	    PageReserved(page))
1221		return NULL;
1222
1223	if (page_is_guard(page))
1224		return NULL;
1225
1226	return page;
1227}
1228
1229/**
1230 * count_highmem_pages - Compute the total number of saveable highmem pages.
 
1231 */
 
1232static unsigned int count_highmem_pages(void)
1233{
1234	struct zone *zone;
1235	unsigned int n = 0;
1236
1237	for_each_populated_zone(zone) {
1238		unsigned long pfn, max_zone_pfn;
1239
1240		if (!is_highmem(zone))
1241			continue;
1242
1243		mark_free_pages(zone);
1244		max_zone_pfn = zone_end_pfn(zone);
1245		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1246			if (saveable_highmem_page(zone, pfn))
1247				n++;
1248	}
1249	return n;
1250}
1251#else
1252static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1253{
1254	return NULL;
1255}
1256#endif /* CONFIG_HIGHMEM */
1257
1258/**
1259 * saveable_page - Check if the given page is saveable.
1260 *
1261 * Determine whether a non-highmem page should be included in a hibernation
1262 * image.
1263 *
1264 * We should save the page if it isn't Nosave, and is not in the range
1265 * of pages statically defined as 'unsaveable', and it isn't part of
1266 * a free chunk of pages.
1267 */
1268static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1269{
1270	struct page *page;
1271
1272	if (!pfn_valid(pfn))
1273		return NULL;
1274
1275	page = pfn_to_page(pfn);
1276	if (page_zone(page) != zone)
1277		return NULL;
1278
1279	BUG_ON(PageHighMem(page));
1280
1281	if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1282		return NULL;
1283
1284	if (PageReserved(page)
1285	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1286		return NULL;
1287
1288	if (page_is_guard(page))
1289		return NULL;
1290
1291	return page;
1292}
1293
1294/**
1295 * count_data_pages - Compute the total number of saveable non-highmem pages.
 
1296 */
 
1297static unsigned int count_data_pages(void)
1298{
1299	struct zone *zone;
1300	unsigned long pfn, max_zone_pfn;
1301	unsigned int n = 0;
1302
1303	for_each_populated_zone(zone) {
1304		if (is_highmem(zone))
1305			continue;
1306
1307		mark_free_pages(zone);
1308		max_zone_pfn = zone_end_pfn(zone);
1309		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1310			if (saveable_page(zone, pfn))
1311				n++;
1312	}
1313	return n;
1314}
1315
1316/*
1317 * This is needed, because copy_page and memcpy are not usable for copying
1318 * task structs.
1319 */
1320static inline void do_copy_page(long *dst, long *src)
1321{
1322	int n;
1323
1324	for (n = PAGE_SIZE / sizeof(long); n; n--)
1325		*dst++ = *src++;
1326}
1327
 
1328/**
1329 * safe_copy_page - Copy a page in a safe way.
1330 *
1331 * Check if the page we are going to copy is marked as present in the kernel
1332 * page tables (this always is the case if CONFIG_DEBUG_PAGEALLOC is not set
1333 * and in that case kernel_page_present() always returns 'true').
1334 */
1335static void safe_copy_page(void *dst, struct page *s_page)
1336{
1337	if (kernel_page_present(s_page)) {
1338		do_copy_page(dst, page_address(s_page));
1339	} else {
1340		kernel_map_pages(s_page, 1, 1);
1341		do_copy_page(dst, page_address(s_page));
1342		kernel_map_pages(s_page, 1, 0);
1343	}
1344}
1345
 
1346#ifdef CONFIG_HIGHMEM
1347static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
 
1348{
1349	return is_highmem(zone) ?
1350		saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1351}
1352
1353static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1354{
1355	struct page *s_page, *d_page;
1356	void *src, *dst;
1357
1358	s_page = pfn_to_page(src_pfn);
1359	d_page = pfn_to_page(dst_pfn);
1360	if (PageHighMem(s_page)) {
1361		src = kmap_atomic(s_page);
1362		dst = kmap_atomic(d_page);
1363		do_copy_page(dst, src);
1364		kunmap_atomic(dst);
1365		kunmap_atomic(src);
1366	} else {
1367		if (PageHighMem(d_page)) {
1368			/*
1369			 * The page pointed to by src may contain some kernel
1370			 * data modified by kmap_atomic()
1371			 */
1372			safe_copy_page(buffer, s_page);
1373			dst = kmap_atomic(d_page);
1374			copy_page(dst, buffer);
1375			kunmap_atomic(dst);
1376		} else {
1377			safe_copy_page(page_address(d_page), s_page);
1378		}
1379	}
1380}
1381#else
1382#define page_is_saveable(zone, pfn)	saveable_page(zone, pfn)
1383
1384static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1385{
1386	safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1387				pfn_to_page(src_pfn));
1388}
1389#endif /* CONFIG_HIGHMEM */
1390
1391static void copy_data_pages(struct memory_bitmap *copy_bm,
1392			    struct memory_bitmap *orig_bm)
1393{
1394	struct zone *zone;
1395	unsigned long pfn;
1396
1397	for_each_populated_zone(zone) {
1398		unsigned long max_zone_pfn;
1399
1400		mark_free_pages(zone);
1401		max_zone_pfn = zone_end_pfn(zone);
1402		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1403			if (page_is_saveable(zone, pfn))
1404				memory_bm_set_bit(orig_bm, pfn);
1405	}
1406	memory_bm_position_reset(orig_bm);
1407	memory_bm_position_reset(copy_bm);
1408	for(;;) {
1409		pfn = memory_bm_next_pfn(orig_bm);
1410		if (unlikely(pfn == BM_END_OF_MAP))
1411			break;
1412		copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1413	}
1414}
1415
1416/* Total number of image pages */
1417static unsigned int nr_copy_pages;
1418/* Number of pages needed for saving the original pfns of the image pages */
1419static unsigned int nr_meta_pages;
1420/*
1421 * Numbers of normal and highmem page frames allocated for hibernation image
1422 * before suspending devices.
1423 */
1424unsigned int alloc_normal, alloc_highmem;
1425/*
1426 * Memory bitmap used for marking saveable pages (during hibernation) or
1427 * hibernation image pages (during restore)
1428 */
1429static struct memory_bitmap orig_bm;
1430/*
1431 * Memory bitmap used during hibernation for marking allocated page frames that
1432 * will contain copies of saveable pages.  During restore it is initially used
1433 * for marking hibernation image pages, but then the set bits from it are
1434 * duplicated in @orig_bm and it is released.  On highmem systems it is next
1435 * used for marking "safe" highmem pages, but it has to be reinitialized for
1436 * this purpose.
1437 */
1438static struct memory_bitmap copy_bm;
1439
1440/**
1441 * swsusp_free - Free pages allocated for hibernation image.
1442 *
1443 * Image pages are alocated before snapshot creation, so they need to be
1444 * released after resume.
1445 */
 
1446void swsusp_free(void)
1447{
1448	unsigned long fb_pfn, fr_pfn;
1449
1450	if (!forbidden_pages_map || !free_pages_map)
1451		goto out;
1452
1453	memory_bm_position_reset(forbidden_pages_map);
1454	memory_bm_position_reset(free_pages_map);
1455
1456loop:
1457	fr_pfn = memory_bm_next_pfn(free_pages_map);
1458	fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
 
 
1459
1460	/*
1461	 * Find the next bit set in both bitmaps. This is guaranteed to
1462	 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1463	 */
1464	do {
1465		if (fb_pfn < fr_pfn)
1466			fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1467		if (fr_pfn < fb_pfn)
1468			fr_pfn = memory_bm_next_pfn(free_pages_map);
1469	} while (fb_pfn != fr_pfn);
1470
1471	if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1472		struct page *page = pfn_to_page(fr_pfn);
1473
1474		memory_bm_clear_current(forbidden_pages_map);
1475		memory_bm_clear_current(free_pages_map);
1476		hibernate_restore_unprotect_page(page_address(page));
1477		__free_page(page);
1478		goto loop;
1479	}
1480
1481out:
1482	nr_copy_pages = 0;
1483	nr_meta_pages = 0;
1484	restore_pblist = NULL;
1485	buffer = NULL;
1486	alloc_normal = 0;
1487	alloc_highmem = 0;
1488	hibernate_restore_protection_end();
1489}
1490
1491/* Helper functions used for the shrinking of memory. */
1492
1493#define GFP_IMAGE	(GFP_KERNEL | __GFP_NOWARN)
1494
1495/**
1496 * preallocate_image_pages - Allocate a number of pages for hibernation image.
1497 * @nr_pages: Number of page frames to allocate.
1498 * @mask: GFP flags to use for the allocation.
1499 *
1500 * Return value: Number of page frames actually allocated
1501 */
1502static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1503{
1504	unsigned long nr_alloc = 0;
1505
1506	while (nr_pages > 0) {
1507		struct page *page;
1508
1509		page = alloc_image_page(mask);
1510		if (!page)
1511			break;
1512		memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1513		if (PageHighMem(page))
1514			alloc_highmem++;
1515		else
1516			alloc_normal++;
1517		nr_pages--;
1518		nr_alloc++;
1519	}
1520
1521	return nr_alloc;
1522}
1523
1524static unsigned long preallocate_image_memory(unsigned long nr_pages,
1525					      unsigned long avail_normal)
1526{
1527	unsigned long alloc;
1528
1529	if (avail_normal <= alloc_normal)
1530		return 0;
1531
1532	alloc = avail_normal - alloc_normal;
1533	if (nr_pages < alloc)
1534		alloc = nr_pages;
1535
1536	return preallocate_image_pages(alloc, GFP_IMAGE);
1537}
1538
1539#ifdef CONFIG_HIGHMEM
1540static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1541{
1542	return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1543}
1544
1545/**
1546 *  __fraction - Compute (an approximation of) x * (multiplier / base).
1547 */
1548static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1549{
1550	x *= multiplier;
1551	do_div(x, base);
1552	return (unsigned long)x;
1553}
1554
1555static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1556						  unsigned long highmem,
1557						  unsigned long total)
1558{
1559	unsigned long alloc = __fraction(nr_pages, highmem, total);
1560
1561	return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1562}
1563#else /* CONFIG_HIGHMEM */
1564static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1565{
1566	return 0;
1567}
1568
1569static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1570							 unsigned long highmem,
1571							 unsigned long total)
1572{
1573	return 0;
1574}
1575#endif /* CONFIG_HIGHMEM */
1576
1577/**
1578 * free_unnecessary_pages - Release preallocated pages not needed for the image.
1579 */
1580static unsigned long free_unnecessary_pages(void)
1581{
1582	unsigned long save, to_free_normal, to_free_highmem, free;
1583
1584	save = count_data_pages();
1585	if (alloc_normal >= save) {
1586		to_free_normal = alloc_normal - save;
1587		save = 0;
1588	} else {
1589		to_free_normal = 0;
1590		save -= alloc_normal;
1591	}
1592	save += count_highmem_pages();
1593	if (alloc_highmem >= save) {
1594		to_free_highmem = alloc_highmem - save;
1595	} else {
1596		to_free_highmem = 0;
1597		save -= alloc_highmem;
1598		if (to_free_normal > save)
1599			to_free_normal -= save;
1600		else
1601			to_free_normal = 0;
1602	}
1603	free = to_free_normal + to_free_highmem;
1604
1605	memory_bm_position_reset(&copy_bm);
1606
1607	while (to_free_normal > 0 || to_free_highmem > 0) {
1608		unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1609		struct page *page = pfn_to_page(pfn);
1610
1611		if (PageHighMem(page)) {
1612			if (!to_free_highmem)
1613				continue;
1614			to_free_highmem--;
1615			alloc_highmem--;
1616		} else {
1617			if (!to_free_normal)
1618				continue;
1619			to_free_normal--;
1620			alloc_normal--;
1621		}
1622		memory_bm_clear_bit(&copy_bm, pfn);
1623		swsusp_unset_page_forbidden(page);
1624		swsusp_unset_page_free(page);
1625		__free_page(page);
1626	}
1627
1628	return free;
1629}
1630
1631/**
1632 * minimum_image_size - Estimate the minimum acceptable size of an image.
1633 * @saveable: Number of saveable pages in the system.
1634 *
1635 * We want to avoid attempting to free too much memory too hard, so estimate the
1636 * minimum acceptable size of a hibernation image to use as the lower limit for
1637 * preallocating memory.
1638 *
1639 * We assume that the minimum image size should be proportional to
1640 *
1641 * [number of saveable pages] - [number of pages that can be freed in theory]
1642 *
1643 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1644 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
1645 * minus mapped file pages.
1646 */
1647static unsigned long minimum_image_size(unsigned long saveable)
1648{
1649	unsigned long size;
1650
1651	size = global_page_state(NR_SLAB_RECLAIMABLE)
1652		+ global_node_page_state(NR_ACTIVE_ANON)
1653		+ global_node_page_state(NR_INACTIVE_ANON)
1654		+ global_node_page_state(NR_ACTIVE_FILE)
1655		+ global_node_page_state(NR_INACTIVE_FILE)
1656		- global_node_page_state(NR_FILE_MAPPED);
1657
1658	return saveable <= size ? 0 : saveable - size;
1659}
1660
1661/**
1662 * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1663 *
1664 * To create a hibernation image it is necessary to make a copy of every page
1665 * frame in use.  We also need a number of page frames to be free during
1666 * hibernation for allocations made while saving the image and for device
1667 * drivers, in case they need to allocate memory from their hibernation
1668 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1669 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1670 * /sys/power/reserved_size, respectively).  To make this happen, we compute the
1671 * total number of available page frames and allocate at least
1672 *
1673 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1674 *  + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1675 *
1676 * of them, which corresponds to the maximum size of a hibernation image.
1677 *
1678 * If image_size is set below the number following from the above formula,
1679 * the preallocation of memory is continued until the total number of saveable
1680 * pages in the system is below the requested image size or the minimum
1681 * acceptable image size returned by minimum_image_size(), whichever is greater.
1682 */
1683int hibernate_preallocate_memory(void)
1684{
1685	struct zone *zone;
1686	unsigned long saveable, size, max_size, count, highmem, pages = 0;
1687	unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1688	ktime_t start, stop;
1689	int error;
1690
1691	printk(KERN_INFO "PM: Preallocating image memory... ");
1692	start = ktime_get();
1693
1694	error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1695	if (error)
1696		goto err_out;
1697
1698	error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1699	if (error)
1700		goto err_out;
1701
1702	alloc_normal = 0;
1703	alloc_highmem = 0;
1704
1705	/* Count the number of saveable data pages. */
1706	save_highmem = count_highmem_pages();
1707	saveable = count_data_pages();
1708
1709	/*
1710	 * Compute the total number of page frames we can use (count) and the
1711	 * number of pages needed for image metadata (size).
1712	 */
1713	count = saveable;
1714	saveable += save_highmem;
1715	highmem = save_highmem;
1716	size = 0;
1717	for_each_populated_zone(zone) {
1718		size += snapshot_additional_pages(zone);
1719		if (is_highmem(zone))
1720			highmem += zone_page_state(zone, NR_FREE_PAGES);
1721		else
1722			count += zone_page_state(zone, NR_FREE_PAGES);
1723	}
1724	avail_normal = count;
1725	count += highmem;
1726	count -= totalreserve_pages;
1727
1728	/* Add number of pages required for page keys (s390 only). */
1729	size += page_key_additional_pages(saveable);
1730
1731	/* Compute the maximum number of saveable pages to leave in memory. */
1732	max_size = (count - (size + PAGES_FOR_IO)) / 2
1733			- 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1734	/* Compute the desired number of image pages specified by image_size. */
1735	size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1736	if (size > max_size)
1737		size = max_size;
1738	/*
1739	 * If the desired number of image pages is at least as large as the
1740	 * current number of saveable pages in memory, allocate page frames for
1741	 * the image and we're done.
1742	 */
1743	if (size >= saveable) {
1744		pages = preallocate_image_highmem(save_highmem);
1745		pages += preallocate_image_memory(saveable - pages, avail_normal);
1746		goto out;
1747	}
1748
1749	/* Estimate the minimum size of the image. */
1750	pages = minimum_image_size(saveable);
1751	/*
1752	 * To avoid excessive pressure on the normal zone, leave room in it to
1753	 * accommodate an image of the minimum size (unless it's already too
1754	 * small, in which case don't preallocate pages from it at all).
1755	 */
1756	if (avail_normal > pages)
1757		avail_normal -= pages;
1758	else
1759		avail_normal = 0;
1760	if (size < pages)
1761		size = min_t(unsigned long, pages, max_size);
1762
1763	/*
1764	 * Let the memory management subsystem know that we're going to need a
1765	 * large number of page frames to allocate and make it free some memory.
1766	 * NOTE: If this is not done, performance will be hurt badly in some
1767	 * test cases.
1768	 */
1769	shrink_all_memory(saveable - size);
1770
1771	/*
1772	 * The number of saveable pages in memory was too high, so apply some
1773	 * pressure to decrease it.  First, make room for the largest possible
1774	 * image and fail if that doesn't work.  Next, try to decrease the size
1775	 * of the image as much as indicated by 'size' using allocations from
1776	 * highmem and non-highmem zones separately.
1777	 */
1778	pages_highmem = preallocate_image_highmem(highmem / 2);
1779	alloc = count - max_size;
1780	if (alloc > pages_highmem)
1781		alloc -= pages_highmem;
1782	else
1783		alloc = 0;
1784	pages = preallocate_image_memory(alloc, avail_normal);
1785	if (pages < alloc) {
1786		/* We have exhausted non-highmem pages, try highmem. */
1787		alloc -= pages;
1788		pages += pages_highmem;
1789		pages_highmem = preallocate_image_highmem(alloc);
1790		if (pages_highmem < alloc)
1791			goto err_out;
1792		pages += pages_highmem;
1793		/*
1794		 * size is the desired number of saveable pages to leave in
1795		 * memory, so try to preallocate (all memory - size) pages.
1796		 */
1797		alloc = (count - pages) - size;
1798		pages += preallocate_image_highmem(alloc);
1799	} else {
1800		/*
1801		 * There are approximately max_size saveable pages at this point
1802		 * and we want to reduce this number down to size.
1803		 */
1804		alloc = max_size - size;
1805		size = preallocate_highmem_fraction(alloc, highmem, count);
1806		pages_highmem += size;
1807		alloc -= size;
1808		size = preallocate_image_memory(alloc, avail_normal);
1809		pages_highmem += preallocate_image_highmem(alloc - size);
1810		pages += pages_highmem + size;
1811	}
1812
1813	/*
1814	 * We only need as many page frames for the image as there are saveable
1815	 * pages in memory, but we have allocated more.  Release the excessive
1816	 * ones now.
1817	 */
1818	pages -= free_unnecessary_pages();
1819
1820 out:
1821	stop = ktime_get();
1822	printk(KERN_CONT "done (allocated %lu pages)\n", pages);
1823	swsusp_show_speed(start, stop, pages, "Allocated");
1824
1825	return 0;
1826
1827 err_out:
1828	printk(KERN_CONT "\n");
1829	swsusp_free();
1830	return -ENOMEM;
1831}
1832
1833#ifdef CONFIG_HIGHMEM
1834/**
1835 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1836 *
1837 * Compute the number of non-highmem pages that will be necessary for creating
1838 * copies of highmem pages.
1839 */
1840static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1841{
1842	unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1843
1844	if (free_highmem >= nr_highmem)
1845		nr_highmem = 0;
1846	else
1847		nr_highmem -= free_highmem;
1848
1849	return nr_highmem;
1850}
1851#else
1852static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
 
1853#endif /* CONFIG_HIGHMEM */
1854
1855/**
1856 * enough_free_mem - Check if there is enough free memory for the image.
 
1857 */
 
1858static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1859{
1860	struct zone *zone;
1861	unsigned int free = alloc_normal;
1862
1863	for_each_populated_zone(zone)
1864		if (!is_highmem(zone))
1865			free += zone_page_state(zone, NR_FREE_PAGES);
1866
1867	nr_pages += count_pages_for_highmem(nr_highmem);
1868	pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
1869		nr_pages, PAGES_FOR_IO, free);
1870
1871	return free > nr_pages + PAGES_FOR_IO;
1872}
1873
1874#ifdef CONFIG_HIGHMEM
1875/**
1876 * get_highmem_buffer - Allocate a buffer for highmem pages.
1877 *
1878 * If there are some highmem pages in the hibernation image, we may need a
1879 * buffer to copy them and/or load their data.
1880 */
 
1881static inline int get_highmem_buffer(int safe_needed)
1882{
1883	buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
1884	return buffer ? 0 : -ENOMEM;
1885}
1886
1887/**
1888 * alloc_highmem_image_pages - Allocate some highmem pages for the image.
1889 *
1890 * Try to allocate as many pages as needed, but if the number of free highmem
1891 * pages is less than that, allocate them all.
1892 */
1893static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1894					       unsigned int nr_highmem)
 
1895{
1896	unsigned int to_alloc = count_free_highmem_pages();
1897
1898	if (to_alloc > nr_highmem)
1899		to_alloc = nr_highmem;
1900
1901	nr_highmem -= to_alloc;
1902	while (to_alloc-- > 0) {
1903		struct page *page;
1904
1905		page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
1906		memory_bm_set_bit(bm, page_to_pfn(page));
1907	}
1908	return nr_highmem;
1909}
1910#else
1911static inline int get_highmem_buffer(int safe_needed) { return 0; }
1912
1913static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1914					       unsigned int n) { return 0; }
1915#endif /* CONFIG_HIGHMEM */
1916
1917/**
1918 * swsusp_alloc - Allocate memory for hibernation image.
 
 
 
 
1919 *
1920 * We first try to allocate as many highmem pages as there are
1921 * saveable highmem pages in the system.  If that fails, we allocate
1922 * non-highmem pages for the copies of the remaining highmem ones.
1923 *
1924 * In this approach it is likely that the copies of highmem pages will
1925 * also be located in the high memory, because of the way in which
1926 * copy_data_pages() works.
1927 */
1928static int swsusp_alloc(struct memory_bitmap *orig_bm,
1929			struct memory_bitmap *copy_bm,
1930			unsigned int nr_pages, unsigned int nr_highmem)
1931{
1932	if (nr_highmem > 0) {
1933		if (get_highmem_buffer(PG_ANY))
1934			goto err_out;
1935		if (nr_highmem > alloc_highmem) {
1936			nr_highmem -= alloc_highmem;
1937			nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1938		}
1939	}
1940	if (nr_pages > alloc_normal) {
1941		nr_pages -= alloc_normal;
1942		while (nr_pages-- > 0) {
1943			struct page *page;
1944
1945			page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
1946			if (!page)
1947				goto err_out;
1948			memory_bm_set_bit(copy_bm, page_to_pfn(page));
1949		}
1950	}
1951
1952	return 0;
1953
1954 err_out:
1955	swsusp_free();
1956	return -ENOMEM;
1957}
1958
1959asmlinkage __visible int swsusp_save(void)
1960{
1961	unsigned int nr_pages, nr_highmem;
1962
1963	printk(KERN_INFO "PM: Creating hibernation image:\n");
1964
1965	drain_local_pages(NULL);
1966	nr_pages = count_data_pages();
1967	nr_highmem = count_highmem_pages();
1968	printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
1969
1970	if (!enough_free_mem(nr_pages, nr_highmem)) {
1971		printk(KERN_ERR "PM: Not enough free memory\n");
1972		return -ENOMEM;
1973	}
1974
1975	if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
1976		printk(KERN_ERR "PM: Memory allocation failed\n");
1977		return -ENOMEM;
1978	}
1979
1980	/*
1981	 * During allocating of suspend pagedir, new cold pages may appear.
1982	 * Kill them.
1983	 */
1984	drain_local_pages(NULL);
1985	copy_data_pages(&copy_bm, &orig_bm);
1986
1987	/*
1988	 * End of critical section. From now on, we can write to memory,
1989	 * but we should not touch disk. This specially means we must _not_
1990	 * touch swap space! Except we must write out our image of course.
1991	 */
1992
1993	nr_pages += nr_highmem;
1994	nr_copy_pages = nr_pages;
1995	nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1996
1997	printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
1998		nr_pages);
1999
2000	return 0;
2001}
2002
2003#ifndef CONFIG_ARCH_HIBERNATION_HEADER
2004static int init_header_complete(struct swsusp_info *info)
2005{
2006	memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2007	info->version_code = LINUX_VERSION_CODE;
2008	return 0;
2009}
2010
2011static char *check_image_kernel(struct swsusp_info *info)
2012{
2013	if (info->version_code != LINUX_VERSION_CODE)
2014		return "kernel version";
2015	if (strcmp(info->uts.sysname,init_utsname()->sysname))
2016		return "system type";
2017	if (strcmp(info->uts.release,init_utsname()->release))
2018		return "kernel release";
2019	if (strcmp(info->uts.version,init_utsname()->version))
2020		return "version";
2021	if (strcmp(info->uts.machine,init_utsname()->machine))
2022		return "machine";
2023	return NULL;
2024}
2025#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2026
2027unsigned long snapshot_get_image_size(void)
2028{
2029	return nr_copy_pages + nr_meta_pages + 1;
2030}
2031
2032static int init_header(struct swsusp_info *info)
2033{
2034	memset(info, 0, sizeof(struct swsusp_info));
2035	info->num_physpages = get_num_physpages();
2036	info->image_pages = nr_copy_pages;
2037	info->pages = snapshot_get_image_size();
2038	info->size = info->pages;
2039	info->size <<= PAGE_SHIFT;
2040	return init_header_complete(info);
2041}
2042
2043/**
2044 * pack_pfns - Prepare PFNs for saving.
2045 * @bm: Memory bitmap.
2046 * @buf: Memory buffer to store the PFNs in.
2047 *
2048 * PFNs corresponding to set bits in @bm are stored in the area of memory
2049 * pointed to by @buf (1 page at a time).
2050 */
2051static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
 
 
2052{
2053	int j;
2054
2055	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2056		buf[j] = memory_bm_next_pfn(bm);
2057		if (unlikely(buf[j] == BM_END_OF_MAP))
2058			break;
2059		/* Save page key for data page (s390 only). */
2060		page_key_read(buf + j);
2061	}
2062}
2063
2064/**
2065 * snapshot_read_next - Get the address to read the next image page from.
2066 * @handle: Snapshot handle to be used for the reading.
2067 *
2068 * On the first call, @handle should point to a zeroed snapshot_handle
2069 * structure.  The structure gets populated then and a pointer to it should be
2070 * passed to this function every next time.
2071 *
2072 * On success, the function returns a positive number.  Then, the caller
2073 * is allowed to read up to the returned number of bytes from the memory
2074 * location computed by the data_of() macro.
2075 *
2076 * The function returns 0 to indicate the end of the data stream condition,
2077 * and negative numbers are returned on errors.  If that happens, the structure
2078 * pointed to by @handle is not updated and should not be used any more.
 
2079 */
 
2080int snapshot_read_next(struct snapshot_handle *handle)
2081{
2082	if (handle->cur > nr_meta_pages + nr_copy_pages)
2083		return 0;
2084
2085	if (!buffer) {
2086		/* This makes the buffer be freed by swsusp_free() */
2087		buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2088		if (!buffer)
2089			return -ENOMEM;
2090	}
2091	if (!handle->cur) {
2092		int error;
2093
2094		error = init_header((struct swsusp_info *)buffer);
2095		if (error)
2096			return error;
2097		handle->buffer = buffer;
2098		memory_bm_position_reset(&orig_bm);
2099		memory_bm_position_reset(&copy_bm);
2100	} else if (handle->cur <= nr_meta_pages) {
2101		clear_page(buffer);
2102		pack_pfns(buffer, &orig_bm);
2103	} else {
2104		struct page *page;
2105
2106		page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
2107		if (PageHighMem(page)) {
2108			/*
2109			 * Highmem pages are copied to the buffer,
2110			 * because we can't return with a kmapped
2111			 * highmem page (we may not be called again).
2112			 */
2113			void *kaddr;
2114
2115			kaddr = kmap_atomic(page);
2116			copy_page(buffer, kaddr);
2117			kunmap_atomic(kaddr);
2118			handle->buffer = buffer;
2119		} else {
2120			handle->buffer = page_address(page);
2121		}
2122	}
2123	handle->cur++;
2124	return PAGE_SIZE;
2125}
2126
2127static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2128				    struct memory_bitmap *src)
 
 
 
 
 
2129{
2130	unsigned long pfn;
 
2131
2132	memory_bm_position_reset(src);
2133	pfn = memory_bm_next_pfn(src);
2134	while (pfn != BM_END_OF_MAP) {
2135		memory_bm_set_bit(dst, pfn);
2136		pfn = memory_bm_next_pfn(src);
 
2137	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2138}
2139
2140/**
2141 * mark_unsafe_pages - Mark pages that were used before hibernation.
2142 *
2143 * Mark the pages that cannot be used for storing the image during restoration,
2144 * because they conflict with the pages that had been used before hibernation.
2145 */
2146static void mark_unsafe_pages(struct memory_bitmap *bm)
2147{
2148	unsigned long pfn;
2149
2150	/* Clear the "free"/"unsafe" bit for all PFNs */
2151	memory_bm_position_reset(free_pages_map);
2152	pfn = memory_bm_next_pfn(free_pages_map);
2153	while (pfn != BM_END_OF_MAP) {
2154		memory_bm_clear_current(free_pages_map);
2155		pfn = memory_bm_next_pfn(free_pages_map);
2156	}
2157
2158	/* Mark pages that correspond to the "original" PFNs as "unsafe" */
2159	duplicate_memory_bitmap(free_pages_map, bm);
2160
2161	allocated_unsafe_pages = 0;
2162}
2163
2164static int check_header(struct swsusp_info *info)
2165{
2166	char *reason;
2167
2168	reason = check_image_kernel(info);
2169	if (!reason && info->num_physpages != get_num_physpages())
2170		reason = "memory size";
2171	if (reason) {
2172		printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
2173		return -EPERM;
2174	}
2175	return 0;
2176}
2177
2178/**
2179 * load header - Check the image header and copy the data from it.
2180 */
2181static int load_header(struct swsusp_info *info)
 
 
2182{
2183	int error;
2184
2185	restore_pblist = NULL;
2186	error = check_header(info);
2187	if (!error) {
2188		nr_copy_pages = info->image_pages;
2189		nr_meta_pages = info->pages - info->image_pages - 1;
2190	}
2191	return error;
2192}
2193
2194/**
2195 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2196 * @bm: Memory bitmap.
2197 * @buf: Area of memory containing the PFNs.
2198 *
2199 * For each element of the array pointed to by @buf (1 page at a time), set the
2200 * corresponding bit in @bm.
2201 */
2202static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2203{
2204	int j;
2205
2206	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2207		if (unlikely(buf[j] == BM_END_OF_MAP))
2208			break;
2209
2210		/* Extract and buffer page key for data page (s390 only). */
2211		page_key_memorize(buf + j);
2212
2213		if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
2214			memory_bm_set_bit(bm, buf[j]);
2215		else
2216			return -EFAULT;
2217	}
2218
2219	return 0;
2220}
2221
 
 
 
 
 
2222#ifdef CONFIG_HIGHMEM
2223/*
2224 * struct highmem_pbe is used for creating the list of highmem pages that
2225 * should be restored atomically during the resume from disk, because the page
2226 * frames they have occupied before the suspend are in use.
2227 */
2228struct highmem_pbe {
2229	struct page *copy_page;	/* data is here now */
2230	struct page *orig_page;	/* data was here before the suspend */
2231	struct highmem_pbe *next;
2232};
2233
2234/*
2235 * List of highmem PBEs needed for restoring the highmem pages that were
2236 * allocated before the suspend and included in the suspend image, but have
2237 * also been allocated by the "resume" kernel, so their contents cannot be
2238 * written directly to their "original" page frames.
2239 */
2240static struct highmem_pbe *highmem_pblist;
2241
2242/**
2243 * count_highmem_image_pages - Compute the number of highmem pages in the image.
2244 * @bm: Memory bitmap.
2245 *
2246 * The bits in @bm that correspond to image pages are assumed to be set.
2247 */
 
2248static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2249{
2250	unsigned long pfn;
2251	unsigned int cnt = 0;
2252
2253	memory_bm_position_reset(bm);
2254	pfn = memory_bm_next_pfn(bm);
2255	while (pfn != BM_END_OF_MAP) {
2256		if (PageHighMem(pfn_to_page(pfn)))
2257			cnt++;
2258
2259		pfn = memory_bm_next_pfn(bm);
2260	}
2261	return cnt;
2262}
2263
 
 
 
 
 
 
 
 
 
 
 
 
2264static unsigned int safe_highmem_pages;
2265
2266static struct memory_bitmap *safe_highmem_bm;
2267
2268/**
2269 * prepare_highmem_image - Allocate memory for loading highmem data from image.
2270 * @bm: Pointer to an uninitialized memory bitmap structure.
2271 * @nr_highmem_p: Pointer to the number of highmem image pages.
2272 *
2273 * Try to allocate as many highmem pages as there are highmem image pages
2274 * (@nr_highmem_p points to the variable containing the number of highmem image
2275 * pages).  The pages that are "safe" (ie. will not be overwritten when the
2276 * hibernation image is restored entirely) have the corresponding bits set in
2277 * @bm (it must be unitialized).
2278 *
2279 * NOTE: This function should not be called if there are no highmem image pages.
2280 */
2281static int prepare_highmem_image(struct memory_bitmap *bm,
2282				 unsigned int *nr_highmem_p)
2283{
2284	unsigned int to_alloc;
2285
2286	if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2287		return -ENOMEM;
2288
2289	if (get_highmem_buffer(PG_SAFE))
2290		return -ENOMEM;
2291
2292	to_alloc = count_free_highmem_pages();
2293	if (to_alloc > *nr_highmem_p)
2294		to_alloc = *nr_highmem_p;
2295	else
2296		*nr_highmem_p = to_alloc;
2297
2298	safe_highmem_pages = 0;
2299	while (to_alloc-- > 0) {
2300		struct page *page;
2301
2302		page = alloc_page(__GFP_HIGHMEM);
2303		if (!swsusp_page_is_free(page)) {
2304			/* The page is "safe", set its bit the bitmap */
2305			memory_bm_set_bit(bm, page_to_pfn(page));
2306			safe_highmem_pages++;
2307		}
2308		/* Mark the page as allocated */
2309		swsusp_set_page_forbidden(page);
2310		swsusp_set_page_free(page);
2311	}
2312	memory_bm_position_reset(bm);
2313	safe_highmem_bm = bm;
2314	return 0;
2315}
2316
2317static struct page *last_highmem_page;
2318
2319/**
2320 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
 
2321 *
2322 * For a given highmem image page get a buffer that suspend_write_next() should
2323 * return to its caller to write to.
2324 *
2325 * If the page is to be saved to its "original" page frame or a copy of
2326 * the page is to be made in the highmem, @buffer is returned.  Otherwise,
2327 * the copy of the page is to be made in normal memory, so the address of
2328 * the copy is returned.
2329 *
2330 * If @buffer is returned, the caller of suspend_write_next() will write
2331 * the page's contents to @buffer, so they will have to be copied to the
2332 * right location on the next call to suspend_write_next() and it is done
2333 * with the help of copy_last_highmem_page().  For this purpose, if
2334 * @buffer is returned, @last_highmem_page is set to the page to which
2335 * the data will have to be copied from @buffer.
2336 */
2337static void *get_highmem_page_buffer(struct page *page,
2338				     struct chain_allocator *ca)
 
 
 
2339{
2340	struct highmem_pbe *pbe;
2341	void *kaddr;
2342
2343	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2344		/*
2345		 * We have allocated the "original" page frame and we can
2346		 * use it directly to store the loaded page.
2347		 */
2348		last_highmem_page = page;
2349		return buffer;
2350	}
2351	/*
2352	 * The "original" page frame has not been allocated and we have to
2353	 * use a "safe" page frame to store the loaded page.
2354	 */
2355	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2356	if (!pbe) {
2357		swsusp_free();
2358		return ERR_PTR(-ENOMEM);
2359	}
2360	pbe->orig_page = page;
2361	if (safe_highmem_pages > 0) {
2362		struct page *tmp;
2363
2364		/* Copy of the page will be stored in high memory */
2365		kaddr = buffer;
2366		tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2367		safe_highmem_pages--;
2368		last_highmem_page = tmp;
2369		pbe->copy_page = tmp;
2370	} else {
2371		/* Copy of the page will be stored in normal memory */
2372		kaddr = safe_pages_list;
2373		safe_pages_list = safe_pages_list->next;
2374		pbe->copy_page = virt_to_page(kaddr);
2375	}
2376	pbe->next = highmem_pblist;
2377	highmem_pblist = pbe;
2378	return kaddr;
2379}
2380
2381/**
2382 * copy_last_highmem_page - Copy most the most recent highmem image page.
2383 *
2384 * Copy the contents of a highmem image from @buffer, where the caller of
2385 * snapshot_write_next() has stored them, to the right location represented by
2386 * @last_highmem_page .
2387 */
 
2388static void copy_last_highmem_page(void)
2389{
2390	if (last_highmem_page) {
2391		void *dst;
2392
2393		dst = kmap_atomic(last_highmem_page);
2394		copy_page(dst, buffer);
2395		kunmap_atomic(dst);
2396		last_highmem_page = NULL;
2397	}
2398}
2399
2400static inline int last_highmem_page_copied(void)
2401{
2402	return !last_highmem_page;
2403}
2404
2405static inline void free_highmem_data(void)
2406{
2407	if (safe_highmem_bm)
2408		memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2409
2410	if (buffer)
2411		free_image_page(buffer, PG_UNSAFE_CLEAR);
2412}
2413#else
2414static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2415
2416static inline int prepare_highmem_image(struct memory_bitmap *bm,
2417					unsigned int *nr_highmem_p) { return 0; }
2418
2419static inline void *get_highmem_page_buffer(struct page *page,
2420					    struct chain_allocator *ca)
 
 
 
 
 
 
2421{
2422	return ERR_PTR(-EINVAL);
2423}
2424
2425static inline void copy_last_highmem_page(void) {}
2426static inline int last_highmem_page_copied(void) { return 1; }
2427static inline void free_highmem_data(void) {}
2428#endif /* CONFIG_HIGHMEM */
2429
2430#define PBES_PER_LINKED_PAGE	(LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2431
2432/**
2433 * prepare_image - Make room for loading hibernation image.
2434 * @new_bm: Unitialized memory bitmap structure.
2435 * @bm: Memory bitmap with unsafe pages marked.
2436 *
2437 * Use @bm to mark the pages that will be overwritten in the process of
2438 * restoring the system memory state from the suspend image ("unsafe" pages)
2439 * and allocate memory for the image.
2440 *
2441 * The idea is to allocate a new memory bitmap first and then allocate
2442 * as many pages as needed for image data, but without specifying what those
2443 * pages will be used for just yet.  Instead, we mark them all as allocated and
2444 * create a lists of "safe" pages to be used later.  On systems with high
2445 * memory a list of "safe" highmem pages is created too.
2446 */
2447static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
 
 
 
 
2448{
2449	unsigned int nr_pages, nr_highmem;
2450	struct linked_page *lp;
2451	int error;
2452
2453	/* If there is no highmem, the buffer will not be necessary */
2454	free_image_page(buffer, PG_UNSAFE_CLEAR);
2455	buffer = NULL;
2456
2457	nr_highmem = count_highmem_image_pages(bm);
2458	mark_unsafe_pages(bm);
 
 
2459
2460	error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2461	if (error)
2462		goto Free;
2463
2464	duplicate_memory_bitmap(new_bm, bm);
2465	memory_bm_free(bm, PG_UNSAFE_KEEP);
2466	if (nr_highmem > 0) {
2467		error = prepare_highmem_image(bm, &nr_highmem);
2468		if (error)
2469			goto Free;
2470	}
2471	/*
2472	 * Reserve some safe pages for potential later use.
2473	 *
2474	 * NOTE: This way we make sure there will be enough safe pages for the
2475	 * chain_alloc() in get_buffer().  It is a bit wasteful, but
2476	 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2477	 *
2478	 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2479	 */
 
 
2480	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2481	nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2482	while (nr_pages > 0) {
2483		lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2484		if (!lp) {
2485			error = -ENOMEM;
2486			goto Free;
2487		}
2488		lp->next = safe_pages_list;
2489		safe_pages_list = lp;
2490		nr_pages--;
2491	}
2492	/* Preallocate memory for the image */
 
2493	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2494	while (nr_pages > 0) {
2495		lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2496		if (!lp) {
2497			error = -ENOMEM;
2498			goto Free;
2499		}
2500		if (!swsusp_page_is_free(virt_to_page(lp))) {
2501			/* The page is "safe", add it to the list */
2502			lp->next = safe_pages_list;
2503			safe_pages_list = lp;
2504		}
2505		/* Mark the page as allocated */
2506		swsusp_set_page_forbidden(virt_to_page(lp));
2507		swsusp_set_page_free(virt_to_page(lp));
2508		nr_pages--;
2509	}
 
 
 
 
 
 
2510	return 0;
2511
2512 Free:
2513	swsusp_free();
2514	return error;
2515}
2516
2517/**
2518 * get_buffer - Get the address to store the next image data page.
2519 *
2520 * Get the address that snapshot_write_next() should return to its caller to
2521 * write to.
2522 */
 
2523static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2524{
2525	struct pbe *pbe;
2526	struct page *page;
2527	unsigned long pfn = memory_bm_next_pfn(bm);
2528
2529	if (pfn == BM_END_OF_MAP)
2530		return ERR_PTR(-EFAULT);
2531
2532	page = pfn_to_page(pfn);
2533	if (PageHighMem(page))
2534		return get_highmem_page_buffer(page, ca);
2535
2536	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2537		/*
2538		 * We have allocated the "original" page frame and we can
2539		 * use it directly to store the loaded page.
2540		 */
2541		return page_address(page);
2542
2543	/*
2544	 * The "original" page frame has not been allocated and we have to
2545	 * use a "safe" page frame to store the loaded page.
2546	 */
2547	pbe = chain_alloc(ca, sizeof(struct pbe));
2548	if (!pbe) {
2549		swsusp_free();
2550		return ERR_PTR(-ENOMEM);
2551	}
2552	pbe->orig_address = page_address(page);
2553	pbe->address = safe_pages_list;
2554	safe_pages_list = safe_pages_list->next;
2555	pbe->next = restore_pblist;
2556	restore_pblist = pbe;
2557	return pbe->address;
2558}
2559
2560/**
2561 * snapshot_write_next - Get the address to store the next image page.
2562 * @handle: Snapshot handle structure to guide the writing.
2563 *
2564 * On the first call, @handle should point to a zeroed snapshot_handle
2565 * structure.  The structure gets populated then and a pointer to it should be
2566 * passed to this function every next time.
2567 *
2568 * On success, the function returns a positive number.  Then, the caller
2569 * is allowed to write up to the returned number of bytes to the memory
2570 * location computed by the data_of() macro.
2571 *
2572 * The function returns 0 to indicate the "end of file" condition.  Negative
2573 * numbers are returned on errors, in which cases the structure pointed to by
2574 * @handle is not updated and should not be used any more.
 
2575 */
 
2576int snapshot_write_next(struct snapshot_handle *handle)
2577{
2578	static struct chain_allocator ca;
2579	int error = 0;
2580
2581	/* Check if we have already loaded the entire image */
2582	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2583		return 0;
2584
2585	handle->sync_read = 1;
2586
2587	if (!handle->cur) {
2588		if (!buffer)
2589			/* This makes the buffer be freed by swsusp_free() */
2590			buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2591
2592		if (!buffer)
2593			return -ENOMEM;
2594
2595		handle->buffer = buffer;
2596	} else if (handle->cur == 1) {
2597		error = load_header(buffer);
2598		if (error)
2599			return error;
2600
2601		safe_pages_list = NULL;
2602
2603		error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2604		if (error)
2605			return error;
2606
2607		/* Allocate buffer for page keys. */
2608		error = page_key_alloc(nr_copy_pages);
2609		if (error)
2610			return error;
2611
2612		hibernate_restore_protection_begin();
2613	} else if (handle->cur <= nr_meta_pages + 1) {
2614		error = unpack_orig_pfns(buffer, &copy_bm);
2615		if (error)
2616			return error;
2617
2618		if (handle->cur == nr_meta_pages + 1) {
2619			error = prepare_image(&orig_bm, &copy_bm);
2620			if (error)
2621				return error;
2622
2623			chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2624			memory_bm_position_reset(&orig_bm);
2625			restore_pblist = NULL;
2626			handle->buffer = get_buffer(&orig_bm, &ca);
2627			handle->sync_read = 0;
2628			if (IS_ERR(handle->buffer))
2629				return PTR_ERR(handle->buffer);
2630		}
2631	} else {
2632		copy_last_highmem_page();
2633		/* Restore page key for data page (s390 only). */
2634		page_key_write(handle->buffer);
2635		hibernate_restore_protect_page(handle->buffer);
2636		handle->buffer = get_buffer(&orig_bm, &ca);
2637		if (IS_ERR(handle->buffer))
2638			return PTR_ERR(handle->buffer);
2639		if (handle->buffer != buffer)
2640			handle->sync_read = 0;
2641	}
2642	handle->cur++;
2643	return PAGE_SIZE;
2644}
2645
2646/**
2647 * snapshot_write_finalize - Complete the loading of a hibernation image.
2648 *
2649 * Must be called after the last call to snapshot_write_next() in case the last
2650 * page in the image happens to be a highmem page and its contents should be
2651 * stored in highmem.  Additionally, it recycles bitmap memory that's not
2652 * necessary any more.
2653 */
 
2654void snapshot_write_finalize(struct snapshot_handle *handle)
2655{
2656	copy_last_highmem_page();
2657	/* Restore page key for data page (s390 only). */
2658	page_key_write(handle->buffer);
2659	page_key_free();
2660	hibernate_restore_protect_page(handle->buffer);
2661	/* Do that only if we have loaded the image entirely */
2662	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2663		memory_bm_recycle(&orig_bm);
2664		free_highmem_data();
2665	}
2666}
2667
2668int snapshot_image_loaded(struct snapshot_handle *handle)
2669{
2670	return !(!nr_copy_pages || !last_highmem_page_copied() ||
2671			handle->cur <= nr_meta_pages + nr_copy_pages);
2672}
2673
2674#ifdef CONFIG_HIGHMEM
2675/* Assumes that @buf is ready and points to a "safe" page */
2676static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2677				       void *buf)
2678{
2679	void *kaddr1, *kaddr2;
2680
2681	kaddr1 = kmap_atomic(p1);
2682	kaddr2 = kmap_atomic(p2);
2683	copy_page(buf, kaddr1);
2684	copy_page(kaddr1, kaddr2);
2685	copy_page(kaddr2, buf);
2686	kunmap_atomic(kaddr2);
2687	kunmap_atomic(kaddr1);
2688}
2689
2690/**
2691 * restore_highmem - Put highmem image pages into their original locations.
 
 
 
2692 *
2693 * For each highmem page that was in use before hibernation and is included in
2694 * the image, and also has been allocated by the "restore" kernel, swap its
2695 * current contents with the previous (ie. "before hibernation") ones.
2696 *
2697 * If the restore eventually fails, we can call this function once again and
2698 * restore the highmem state as seen by the restore kernel.
2699 */
 
2700int restore_highmem(void)
2701{
2702	struct highmem_pbe *pbe = highmem_pblist;
2703	void *buf;
2704
2705	if (!pbe)
2706		return 0;
2707
2708	buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2709	if (!buf)
2710		return -ENOMEM;
2711
2712	while (pbe) {
2713		swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2714		pbe = pbe->next;
2715	}
2716	free_image_page(buf, PG_UNSAFE_CLEAR);
2717	return 0;
2718}
2719#endif /* CONFIG_HIGHMEM */
v3.15
   1/*
   2 * linux/kernel/power/snapshot.c
   3 *
   4 * This file provides system snapshot/restore functionality for swsusp.
   5 *
   6 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
   7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
   8 *
   9 * This file is released under the GPLv2.
  10 *
  11 */
  12
  13#include <linux/version.h>
  14#include <linux/module.h>
  15#include <linux/mm.h>
  16#include <linux/suspend.h>
  17#include <linux/delay.h>
  18#include <linux/bitops.h>
  19#include <linux/spinlock.h>
  20#include <linux/kernel.h>
  21#include <linux/pm.h>
  22#include <linux/device.h>
  23#include <linux/init.h>
  24#include <linux/bootmem.h>
  25#include <linux/syscalls.h>
  26#include <linux/console.h>
  27#include <linux/highmem.h>
  28#include <linux/list.h>
  29#include <linux/slab.h>
  30#include <linux/compiler.h>
 
  31
  32#include <asm/uaccess.h>
  33#include <asm/mmu_context.h>
  34#include <asm/pgtable.h>
  35#include <asm/tlbflush.h>
  36#include <asm/io.h>
  37
  38#include "power.h"
  39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  40static int swsusp_page_is_free(struct page *);
  41static void swsusp_set_page_forbidden(struct page *);
  42static void swsusp_unset_page_forbidden(struct page *);
  43
  44/*
  45 * Number of bytes to reserve for memory allocations made by device drivers
  46 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
  47 * cause image creation to fail (tunable via /sys/power/reserved_size).
  48 */
  49unsigned long reserved_size;
  50
  51void __init hibernate_reserved_size_init(void)
  52{
  53	reserved_size = SPARE_PAGES * PAGE_SIZE;
  54}
  55
  56/*
  57 * Preferred image size in bytes (tunable via /sys/power/image_size).
  58 * When it is set to N, swsusp will do its best to ensure the image
  59 * size will not exceed N bytes, but if that is impossible, it will
  60 * try to create the smallest image possible.
  61 */
  62unsigned long image_size;
  63
  64void __init hibernate_image_size_init(void)
  65{
  66	image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
  67}
  68
  69/* List of PBEs needed for restoring the pages that were allocated before
 
  70 * the suspend and included in the suspend image, but have also been
  71 * allocated by the "resume" kernel, so their contents cannot be written
  72 * directly to their "original" page frames.
  73 */
  74struct pbe *restore_pblist;
  75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  76/* Pointer to an auxiliary buffer (1 page) */
  77static void *buffer;
  78
  79/**
  80 *	@safe_needed - on resume, for storing the PBE list and the image,
  81 *	we can only use memory pages that do not conflict with the pages
  82 *	used before suspend.  The unsafe pages have PageNosaveFree set
  83 *	and we count them using unsafe_pages.
  84 *
  85 *	Each allocated image page is marked as PageNosave and PageNosaveFree
  86 *	so that swsusp_free() can release it.
  87 */
  88
  89#define PG_ANY		0
  90#define PG_SAFE		1
  91#define PG_UNSAFE_CLEAR	1
  92#define PG_UNSAFE_KEEP	0
  93
  94static unsigned int allocated_unsafe_pages;
  95
 
 
 
 
 
 
 
 
 
 
 
 
 
  96static void *get_image_page(gfp_t gfp_mask, int safe_needed)
  97{
  98	void *res;
  99
 100	res = (void *)get_zeroed_page(gfp_mask);
 101	if (safe_needed)
 102		while (res && swsusp_page_is_free(virt_to_page(res))) {
 103			/* The page is unsafe, mark it for swsusp_free() */
 104			swsusp_set_page_forbidden(virt_to_page(res));
 105			allocated_unsafe_pages++;
 106			res = (void *)get_zeroed_page(gfp_mask);
 107		}
 108	if (res) {
 109		swsusp_set_page_forbidden(virt_to_page(res));
 110		swsusp_set_page_free(virt_to_page(res));
 111	}
 112	return res;
 113}
 114
 
 
 
 
 
 
 
 
 
 
 
 
 115unsigned long get_safe_page(gfp_t gfp_mask)
 116{
 117	return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
 118}
 119
 120static struct page *alloc_image_page(gfp_t gfp_mask)
 121{
 122	struct page *page;
 123
 124	page = alloc_page(gfp_mask);
 125	if (page) {
 126		swsusp_set_page_forbidden(page);
 127		swsusp_set_page_free(page);
 128	}
 129	return page;
 130}
 131
 
 
 
 
 
 
 
 
 132/**
 133 *	free_image_page - free page represented by @addr, allocated with
 134 *	get_image_page (page flags set by it must be cleared)
 
 
 
 
 135 */
 136
 137static inline void free_image_page(void *addr, int clear_nosave_free)
 138{
 139	struct page *page;
 140
 141	BUG_ON(!virt_addr_valid(addr));
 142
 143	page = virt_to_page(addr);
 144
 145	swsusp_unset_page_forbidden(page);
 146	if (clear_nosave_free)
 147		swsusp_unset_page_free(page);
 148
 149	__free_page(page);
 150}
 151
 152/* struct linked_page is used to build chains of pages */
 153
 154#define LINKED_PAGE_DATA_SIZE	(PAGE_SIZE - sizeof(void *))
 155
 156struct linked_page {
 157	struct linked_page *next;
 158	char data[LINKED_PAGE_DATA_SIZE];
 159} __packed;
 160
 161static inline void
 162free_list_of_pages(struct linked_page *list, int clear_page_nosave)
 163{
 164	while (list) {
 165		struct linked_page *lp = list->next;
 166
 167		free_image_page(list, clear_page_nosave);
 168		list = lp;
 169	}
 170}
 171
 172/**
 173  *	struct chain_allocator is used for allocating small objects out of
 174  *	a linked list of pages called 'the chain'.
 175  *
 176  *	The chain grows each time when there is no room for a new object in
 177  *	the current page.  The allocated objects cannot be freed individually.
 178  *	It is only possible to free them all at once, by freeing the entire
 179  *	chain.
 180  *
 181  *	NOTE: The chain allocator may be inefficient if the allocated objects
 182  *	are not much smaller than PAGE_SIZE.
 183  */
 184
 185struct chain_allocator {
 186	struct linked_page *chain;	/* the chain */
 187	unsigned int used_space;	/* total size of objects allocated out
 188					 * of the current page
 189					 */
 190	gfp_t gfp_mask;		/* mask for allocating pages */
 191	int safe_needed;	/* if set, only "safe" pages are allocated */
 192};
 193
 194static void
 195chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
 196{
 197	ca->chain = NULL;
 198	ca->used_space = LINKED_PAGE_DATA_SIZE;
 199	ca->gfp_mask = gfp_mask;
 200	ca->safe_needed = safe_needed;
 201}
 202
 203static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
 204{
 205	void *ret;
 206
 207	if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
 208		struct linked_page *lp;
 209
 210		lp = get_image_page(ca->gfp_mask, ca->safe_needed);
 
 211		if (!lp)
 212			return NULL;
 213
 214		lp->next = ca->chain;
 215		ca->chain = lp;
 216		ca->used_space = 0;
 217	}
 218	ret = ca->chain->data + ca->used_space;
 219	ca->used_space += size;
 220	return ret;
 221}
 222
 223/**
 224 *	Data types related to memory bitmaps.
 225 *
 226 *	Memory bitmap is a structure consiting of many linked lists of
 227 *	objects.  The main list's elements are of type struct zone_bitmap
 228 *	and each of them corresonds to one zone.  For each zone bitmap
 229 *	object there is a list of objects of type struct bm_block that
 230 *	represent each blocks of bitmap in which information is stored.
 231 *
 232 *	struct memory_bitmap contains a pointer to the main list of zone
 233 *	bitmap objects, a struct bm_position used for browsing the bitmap,
 234 *	and a pointer to the list of pages used for allocating all of the
 235 *	zone bitmap objects and bitmap block objects.
 236 *
 237 *	NOTE: It has to be possible to lay out the bitmap in memory
 238 *	using only allocations of order 0.  Additionally, the bitmap is
 239 *	designed to work with arbitrary number of zones (this is over the
 240 *	top for now, but let's avoid making unnecessary assumptions ;-).
 241 *
 242 *	struct zone_bitmap contains a pointer to a list of bitmap block
 243 *	objects and a pointer to the bitmap block object that has been
 244 *	most recently used for setting bits.  Additionally, it contains the
 245 *	pfns that correspond to the start and end of the represented zone.
 246 *
 247 *	struct bm_block contains a pointer to the memory page in which
 248 *	information is stored (in the form of a block of bitmap)
 249 *	It also contains the pfns that correspond to the start and end of
 250 *	the represented memory area.
 
 
 
 
 
 
 
 
 
 
 
 251 */
 252
 253#define BM_END_OF_MAP	(~0UL)
 254
 255#define BM_BITS_PER_BLOCK	(PAGE_SIZE * BITS_PER_BYTE)
 
 
 256
 257struct bm_block {
 258	struct list_head hook;	/* hook into a list of bitmap blocks */
 259	unsigned long start_pfn;	/* pfn represented by the first bit */
 260	unsigned long end_pfn;	/* pfn represented by the last bit plus 1 */
 261	unsigned long *data;	/* bitmap representing pages */
 
 
 
 262};
 263
 264static inline unsigned long bm_block_bits(struct bm_block *bb)
 265{
 266	return bb->end_pfn - bb->start_pfn;
 267}
 
 
 
 
 
 
 
 
 
 
 268
 269/* strcut bm_position is used for browsing memory bitmaps */
 270
 271struct bm_position {
 272	struct bm_block *block;
 273	int bit;
 
 
 274};
 275
 276struct memory_bitmap {
 277	struct list_head blocks;	/* list of bitmap blocks */
 278	struct linked_page *p_list;	/* list of pages used to store zone
 279					 * bitmap objects and bitmap block
 280					 * objects
 281					 */
 282	struct bm_position cur;	/* most recently used bit position */
 283};
 284
 285/* Functions that operate on memory bitmaps */
 286
 287static void memory_bm_position_reset(struct memory_bitmap *bm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 288{
 289	bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook);
 290	bm->cur.bit = 0;
 
 
 
 
 
 
 
 
 
 
 
 291}
 292
 293static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
 294
 295/**
 296 *	create_bm_block_list - create a list of block bitmap objects
 297 *	@pages - number of pages to track
 298 *	@list - list to put the allocated blocks into
 299 *	@ca - chain allocator to be used for allocating memory
 300 */
 301static int create_bm_block_list(unsigned long pages,
 302				struct list_head *list,
 303				struct chain_allocator *ca)
 304{
 305	unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
 
 
 306
 307	while (nr_blocks-- > 0) {
 308		struct bm_block *bb;
 309
 310		bb = chain_alloc(ca, sizeof(struct bm_block));
 311		if (!bb)
 
 
 
 
 
 
 
 
 
 312			return -ENOMEM;
 313		list_add(&bb->hook, list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 314	}
 315
 
 
 
 316	return 0;
 317}
 318
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 319struct mem_extent {
 320	struct list_head hook;
 321	unsigned long start;
 322	unsigned long end;
 323};
 324
 325/**
 326 *	free_mem_extents - free a list of memory extents
 327 *	@list - list of extents to empty
 328 */
 329static void free_mem_extents(struct list_head *list)
 330{
 331	struct mem_extent *ext, *aux;
 332
 333	list_for_each_entry_safe(ext, aux, list, hook) {
 334		list_del(&ext->hook);
 335		kfree(ext);
 336	}
 337}
 338
 339/**
 340 *	create_mem_extents - create a list of memory extents representing
 341 *	                     contiguous ranges of PFNs
 342 *	@list - list to put the extents into
 343 *	@gfp_mask - mask to use for memory allocations
 
 344 */
 345static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
 346{
 347	struct zone *zone;
 348
 349	INIT_LIST_HEAD(list);
 350
 351	for_each_populated_zone(zone) {
 352		unsigned long zone_start, zone_end;
 353		struct mem_extent *ext, *cur, *aux;
 354
 355		zone_start = zone->zone_start_pfn;
 356		zone_end = zone_end_pfn(zone);
 357
 358		list_for_each_entry(ext, list, hook)
 359			if (zone_start <= ext->end)
 360				break;
 361
 362		if (&ext->hook == list || zone_end < ext->start) {
 363			/* New extent is necessary */
 364			struct mem_extent *new_ext;
 365
 366			new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
 367			if (!new_ext) {
 368				free_mem_extents(list);
 369				return -ENOMEM;
 370			}
 371			new_ext->start = zone_start;
 372			new_ext->end = zone_end;
 373			list_add_tail(&new_ext->hook, &ext->hook);
 374			continue;
 375		}
 376
 377		/* Merge this zone's range of PFNs with the existing one */
 378		if (zone_start < ext->start)
 379			ext->start = zone_start;
 380		if (zone_end > ext->end)
 381			ext->end = zone_end;
 382
 383		/* More merging may be possible */
 384		cur = ext;
 385		list_for_each_entry_safe_continue(cur, aux, list, hook) {
 386			if (zone_end < cur->start)
 387				break;
 388			if (zone_end < cur->end)
 389				ext->end = cur->end;
 390			list_del(&cur->hook);
 391			kfree(cur);
 392		}
 393	}
 394
 395	return 0;
 396}
 397
 398/**
 399  *	memory_bm_create - allocate memory for a memory bitmap
 400  */
 401static int
 402memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
 403{
 404	struct chain_allocator ca;
 405	struct list_head mem_extents;
 406	struct mem_extent *ext;
 407	int error;
 408
 409	chain_init(&ca, gfp_mask, safe_needed);
 410	INIT_LIST_HEAD(&bm->blocks);
 411
 412	error = create_mem_extents(&mem_extents, gfp_mask);
 413	if (error)
 414		return error;
 415
 416	list_for_each_entry(ext, &mem_extents, hook) {
 417		struct bm_block *bb;
 418		unsigned long pfn = ext->start;
 419		unsigned long pages = ext->end - ext->start;
 420
 421		bb = list_entry(bm->blocks.prev, struct bm_block, hook);
 422
 423		error = create_bm_block_list(pages, bm->blocks.prev, &ca);
 424		if (error)
 425			goto Error;
 426
 427		list_for_each_entry_continue(bb, &bm->blocks, hook) {
 428			bb->data = get_image_page(gfp_mask, safe_needed);
 429			if (!bb->data) {
 430				error = -ENOMEM;
 431				goto Error;
 432			}
 433
 434			bb->start_pfn = pfn;
 435			if (pages >= BM_BITS_PER_BLOCK) {
 436				pfn += BM_BITS_PER_BLOCK;
 437				pages -= BM_BITS_PER_BLOCK;
 438			} else {
 439				/* This is executed only once in the loop */
 440				pfn += pages;
 441			}
 442			bb->end_pfn = pfn;
 443		}
 
 444	}
 445
 446	bm->p_list = ca.chain;
 447	memory_bm_position_reset(bm);
 448 Exit:
 449	free_mem_extents(&mem_extents);
 450	return error;
 451
 452 Error:
 453	bm->p_list = ca.chain;
 454	memory_bm_free(bm, PG_UNSAFE_CLEAR);
 455	goto Exit;
 456}
 457
 458/**
 459  *	memory_bm_free - free memory occupied by the memory bitmap @bm
 460  */
 
 461static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
 462{
 463	struct bm_block *bb;
 464
 465	list_for_each_entry(bb, &bm->blocks, hook)
 466		if (bb->data)
 467			free_image_page(bb->data, clear_nosave_free);
 468
 469	free_list_of_pages(bm->p_list, clear_nosave_free);
 470
 471	INIT_LIST_HEAD(&bm->blocks);
 472}
 473
 474/**
 475 *	memory_bm_find_bit - find the bit in the bitmap @bm that corresponds
 476 *	to given pfn.  The cur_zone_bm member of @bm and the cur_block member
 477 *	of @bm->cur_zone_bm are updated.
 
 
 
 
 478 */
 479static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
 480				void **addr, unsigned int *bit_nr)
 481{
 482	struct bm_block *bb;
 
 
 
 
 483
 484	/*
 485	 * Check if the pfn corresponds to the current bitmap block and find
 486	 * the block where it fits if this is not the case.
 487	 */
 488	bb = bm->cur.block;
 489	if (pfn < bb->start_pfn)
 490		list_for_each_entry_continue_reverse(bb, &bm->blocks, hook)
 491			if (pfn >= bb->start_pfn)
 492				break;
 493
 494	if (pfn >= bb->end_pfn)
 495		list_for_each_entry_continue(bb, &bm->blocks, hook)
 496			if (pfn >= bb->start_pfn && pfn < bb->end_pfn)
 497				break;
 
 
 
 498
 499	if (&bb->hook == &bm->blocks)
 500		return -EFAULT;
 501
 502	/* The block has been found */
 503	bm->cur.block = bb;
 504	pfn -= bb->start_pfn;
 505	bm->cur.bit = pfn + 1;
 506	*bit_nr = pfn;
 507	*addr = bb->data;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 508	return 0;
 509}
 510
 511static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
 512{
 513	void *addr;
 514	unsigned int bit;
 515	int error;
 516
 517	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 518	BUG_ON(error);
 519	set_bit(bit, addr);
 520}
 521
 522static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
 523{
 524	void *addr;
 525	unsigned int bit;
 526	int error;
 527
 528	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 529	if (!error)
 530		set_bit(bit, addr);
 
 531	return error;
 532}
 533
 534static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
 535{
 536	void *addr;
 537	unsigned int bit;
 538	int error;
 539
 540	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 541	BUG_ON(error);
 542	clear_bit(bit, addr);
 543}
 544
 
 
 
 
 
 
 
 
 545static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
 546{
 547	void *addr;
 548	unsigned int bit;
 549	int error;
 550
 551	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 552	BUG_ON(error);
 553	return test_bit(bit, addr);
 554}
 555
 556static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
 557{
 558	void *addr;
 559	unsigned int bit;
 560
 561	return !memory_bm_find_bit(bm, pfn, &addr, &bit);
 562}
 563
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 564/**
 565 *	memory_bm_next_pfn - find the pfn that corresponds to the next set bit
 566 *	in the bitmap @bm.  If the pfn cannot be found, BM_END_OF_MAP is
 567 *	returned.
 
 
 
 568 *
 569 *	It is required to run memory_bm_position_reset() before the first call to
 570 *	this function.
 571 */
 572
 573static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
 574{
 575	struct bm_block *bb;
 576	int bit;
 577
 578	bb = bm->cur.block;
 579	do {
 580		bit = bm->cur.bit;
 581		bit = find_next_bit(bb->data, bm_block_bits(bb), bit);
 582		if (bit < bm_block_bits(bb))
 583			goto Return_pfn;
 584
 585		bb = list_entry(bb->hook.next, struct bm_block, hook);
 586		bm->cur.block = bb;
 587		bm->cur.bit = 0;
 588	} while (&bb->hook != &bm->blocks);
 
 589
 590	memory_bm_position_reset(bm);
 591	return BM_END_OF_MAP;
 592
 593 Return_pfn:
 594	bm->cur.bit = bit + 1;
 595	return bb->start_pfn + bit;
 596}
 597
 598/**
 599 *	This structure represents a range of page frames the contents of which
 600 *	should not be saved during the suspend.
 601 */
 602
 603struct nosave_region {
 604	struct list_head list;
 605	unsigned long start_pfn;
 606	unsigned long end_pfn;
 607};
 608
 609static LIST_HEAD(nosave_regions);
 610
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 611/**
 612 *	register_nosave_region - register a range of page frames the contents
 613 *	of which should not be saved during the suspend (to be used in the early
 614 *	initialization code)
 
 615 */
 616
 617void __init
 618__register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
 619			 int use_kmalloc)
 620{
 621	struct nosave_region *region;
 622
 623	if (start_pfn >= end_pfn)
 624		return;
 625
 626	if (!list_empty(&nosave_regions)) {
 627		/* Try to extend the previous region (they should be sorted) */
 628		region = list_entry(nosave_regions.prev,
 629					struct nosave_region, list);
 630		if (region->end_pfn == start_pfn) {
 631			region->end_pfn = end_pfn;
 632			goto Report;
 633		}
 634	}
 635	if (use_kmalloc) {
 636		/* during init, this shouldn't fail */
 637		region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
 638		BUG_ON(!region);
 639	} else
 640		/* This allocation cannot fail */
 641		region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
 
 642	region->start_pfn = start_pfn;
 643	region->end_pfn = end_pfn;
 644	list_add_tail(&region->list, &nosave_regions);
 645 Report:
 646	printk(KERN_INFO "PM: Registered nosave memory: [mem %#010llx-%#010llx]\n",
 647		(unsigned long long) start_pfn << PAGE_SHIFT,
 648		((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
 649}
 650
 651/*
 652 * Set bits in this map correspond to the page frames the contents of which
 653 * should not be saved during the suspend.
 654 */
 655static struct memory_bitmap *forbidden_pages_map;
 656
 657/* Set bits in this map correspond to free page frames. */
 658static struct memory_bitmap *free_pages_map;
 659
 660/*
 661 * Each page frame allocated for creating the image is marked by setting the
 662 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
 663 */
 664
 665void swsusp_set_page_free(struct page *page)
 666{
 667	if (free_pages_map)
 668		memory_bm_set_bit(free_pages_map, page_to_pfn(page));
 669}
 670
 671static int swsusp_page_is_free(struct page *page)
 672{
 673	return free_pages_map ?
 674		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
 675}
 676
 677void swsusp_unset_page_free(struct page *page)
 678{
 679	if (free_pages_map)
 680		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
 681}
 682
 683static void swsusp_set_page_forbidden(struct page *page)
 684{
 685	if (forbidden_pages_map)
 686		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
 687}
 688
 689int swsusp_page_is_forbidden(struct page *page)
 690{
 691	return forbidden_pages_map ?
 692		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
 693}
 694
 695static void swsusp_unset_page_forbidden(struct page *page)
 696{
 697	if (forbidden_pages_map)
 698		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
 699}
 700
 701/**
 702 *	mark_nosave_pages - set bits corresponding to the page frames the
 703 *	contents of which should not be saved in a given bitmap.
 
 
 
 704 */
 705
 706static void mark_nosave_pages(struct memory_bitmap *bm)
 707{
 708	struct nosave_region *region;
 709
 710	if (list_empty(&nosave_regions))
 711		return;
 712
 713	list_for_each_entry(region, &nosave_regions, list) {
 714		unsigned long pfn;
 715
 716		pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n",
 717			 (unsigned long long) region->start_pfn << PAGE_SHIFT,
 718			 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
 719				- 1);
 720
 721		for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
 722			if (pfn_valid(pfn)) {
 723				/*
 724				 * It is safe to ignore the result of
 725				 * mem_bm_set_bit_check() here, since we won't
 726				 * touch the PFNs for which the error is
 727				 * returned anyway.
 728				 */
 729				mem_bm_set_bit_check(bm, pfn);
 730			}
 731	}
 732}
 733
 734/**
 735 *	create_basic_memory_bitmaps - create bitmaps needed for marking page
 736 *	frames that should not be saved and free page frames.  The pointers
 737 *	forbidden_pages_map and free_pages_map are only modified if everything
 738 *	goes well, because we don't want the bits to be used before both bitmaps
 739 *	are set up.
 
 740 */
 741
 742int create_basic_memory_bitmaps(void)
 743{
 744	struct memory_bitmap *bm1, *bm2;
 745	int error = 0;
 746
 747	if (forbidden_pages_map && free_pages_map)
 748		return 0;
 749	else
 750		BUG_ON(forbidden_pages_map || free_pages_map);
 751
 752	bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
 753	if (!bm1)
 754		return -ENOMEM;
 755
 756	error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
 757	if (error)
 758		goto Free_first_object;
 759
 760	bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
 761	if (!bm2)
 762		goto Free_first_bitmap;
 763
 764	error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
 765	if (error)
 766		goto Free_second_object;
 767
 768	forbidden_pages_map = bm1;
 769	free_pages_map = bm2;
 770	mark_nosave_pages(forbidden_pages_map);
 771
 772	pr_debug("PM: Basic memory bitmaps created\n");
 773
 774	return 0;
 775
 776 Free_second_object:
 777	kfree(bm2);
 778 Free_first_bitmap:
 779 	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
 780 Free_first_object:
 781	kfree(bm1);
 782	return -ENOMEM;
 783}
 784
 785/**
 786 *	free_basic_memory_bitmaps - free memory bitmaps allocated by
 787 *	create_basic_memory_bitmaps().  The auxiliary pointers are necessary
 788 *	so that the bitmaps themselves are not referred to while they are being
 789 *	freed.
 
 790 */
 791
 792void free_basic_memory_bitmaps(void)
 793{
 794	struct memory_bitmap *bm1, *bm2;
 795
 796	if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
 797		return;
 798
 799	bm1 = forbidden_pages_map;
 800	bm2 = free_pages_map;
 801	forbidden_pages_map = NULL;
 802	free_pages_map = NULL;
 803	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
 804	kfree(bm1);
 805	memory_bm_free(bm2, PG_UNSAFE_CLEAR);
 806	kfree(bm2);
 807
 808	pr_debug("PM: Basic memory bitmaps freed\n");
 809}
 810
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 811/**
 812 *	snapshot_additional_pages - estimate the number of additional pages
 813 *	be needed for setting up the suspend image data structures for given
 814 *	zone (usually the returned value is greater than the exact number)
 
 
 
 815 */
 816
 817unsigned int snapshot_additional_pages(struct zone *zone)
 818{
 819	unsigned int res;
 
 
 
 
 
 
 
 
 820
 821	res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
 822	res += DIV_ROUND_UP(res * sizeof(struct bm_block),
 823			    LINKED_PAGE_DATA_SIZE);
 824	return 2 * res;
 825}
 826
 827#ifdef CONFIG_HIGHMEM
 828/**
 829 *	count_free_highmem_pages - compute the total number of free highmem
 830 *	pages, system-wide.
 
 831 */
 832
 833static unsigned int count_free_highmem_pages(void)
 834{
 835	struct zone *zone;
 836	unsigned int cnt = 0;
 837
 838	for_each_populated_zone(zone)
 839		if (is_highmem(zone))
 840			cnt += zone_page_state(zone, NR_FREE_PAGES);
 841
 842	return cnt;
 843}
 844
 845/**
 846 *	saveable_highmem_page - Determine whether a highmem page should be
 847 *	included in the suspend image.
 848 *
 849 *	We should save the page if it isn't Nosave or NosaveFree, or Reserved,
 850 *	and it isn't a part of a free chunk of pages.
 
 
 851 */
 852static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
 853{
 854	struct page *page;
 855
 856	if (!pfn_valid(pfn))
 857		return NULL;
 858
 859	page = pfn_to_page(pfn);
 860	if (page_zone(page) != zone)
 861		return NULL;
 862
 863	BUG_ON(!PageHighMem(page));
 864
 865	if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page) ||
 866	    PageReserved(page))
 867		return NULL;
 868
 869	if (page_is_guard(page))
 870		return NULL;
 871
 872	return page;
 873}
 874
 875/**
 876 *	count_highmem_pages - compute the total number of saveable highmem
 877 *	pages.
 878 */
 879
 880static unsigned int count_highmem_pages(void)
 881{
 882	struct zone *zone;
 883	unsigned int n = 0;
 884
 885	for_each_populated_zone(zone) {
 886		unsigned long pfn, max_zone_pfn;
 887
 888		if (!is_highmem(zone))
 889			continue;
 890
 891		mark_free_pages(zone);
 892		max_zone_pfn = zone_end_pfn(zone);
 893		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
 894			if (saveable_highmem_page(zone, pfn))
 895				n++;
 896	}
 897	return n;
 898}
 899#else
 900static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
 901{
 902	return NULL;
 903}
 904#endif /* CONFIG_HIGHMEM */
 905
 906/**
 907 *	saveable_page - Determine whether a non-highmem page should be included
 908 *	in the suspend image.
 
 
 909 *
 910 *	We should save the page if it isn't Nosave, and is not in the range
 911 *	of pages statically defined as 'unsaveable', and it isn't a part of
 912 *	a free chunk of pages.
 913 */
 914static struct page *saveable_page(struct zone *zone, unsigned long pfn)
 915{
 916	struct page *page;
 917
 918	if (!pfn_valid(pfn))
 919		return NULL;
 920
 921	page = pfn_to_page(pfn);
 922	if (page_zone(page) != zone)
 923		return NULL;
 924
 925	BUG_ON(PageHighMem(page));
 926
 927	if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
 928		return NULL;
 929
 930	if (PageReserved(page)
 931	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
 932		return NULL;
 933
 934	if (page_is_guard(page))
 935		return NULL;
 936
 937	return page;
 938}
 939
 940/**
 941 *	count_data_pages - compute the total number of saveable non-highmem
 942 *	pages.
 943 */
 944
 945static unsigned int count_data_pages(void)
 946{
 947	struct zone *zone;
 948	unsigned long pfn, max_zone_pfn;
 949	unsigned int n = 0;
 950
 951	for_each_populated_zone(zone) {
 952		if (is_highmem(zone))
 953			continue;
 954
 955		mark_free_pages(zone);
 956		max_zone_pfn = zone_end_pfn(zone);
 957		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
 958			if (saveable_page(zone, pfn))
 959				n++;
 960	}
 961	return n;
 962}
 963
 964/* This is needed, because copy_page and memcpy are not usable for copying
 
 965 * task structs.
 966 */
 967static inline void do_copy_page(long *dst, long *src)
 968{
 969	int n;
 970
 971	for (n = PAGE_SIZE / sizeof(long); n; n--)
 972		*dst++ = *src++;
 973}
 974
 975
 976/**
 977 *	safe_copy_page - check if the page we are going to copy is marked as
 978 *		present in the kernel page tables (this always is the case if
 979 *		CONFIG_DEBUG_PAGEALLOC is not set and in that case
 980 *		kernel_page_present() always returns 'true').
 
 981 */
 982static void safe_copy_page(void *dst, struct page *s_page)
 983{
 984	if (kernel_page_present(s_page)) {
 985		do_copy_page(dst, page_address(s_page));
 986	} else {
 987		kernel_map_pages(s_page, 1, 1);
 988		do_copy_page(dst, page_address(s_page));
 989		kernel_map_pages(s_page, 1, 0);
 990	}
 991}
 992
 993
 994#ifdef CONFIG_HIGHMEM
 995static inline struct page *
 996page_is_saveable(struct zone *zone, unsigned long pfn)
 997{
 998	return is_highmem(zone) ?
 999		saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1000}
1001
1002static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1003{
1004	struct page *s_page, *d_page;
1005	void *src, *dst;
1006
1007	s_page = pfn_to_page(src_pfn);
1008	d_page = pfn_to_page(dst_pfn);
1009	if (PageHighMem(s_page)) {
1010		src = kmap_atomic(s_page);
1011		dst = kmap_atomic(d_page);
1012		do_copy_page(dst, src);
1013		kunmap_atomic(dst);
1014		kunmap_atomic(src);
1015	} else {
1016		if (PageHighMem(d_page)) {
1017			/* Page pointed to by src may contain some kernel
 
1018			 * data modified by kmap_atomic()
1019			 */
1020			safe_copy_page(buffer, s_page);
1021			dst = kmap_atomic(d_page);
1022			copy_page(dst, buffer);
1023			kunmap_atomic(dst);
1024		} else {
1025			safe_copy_page(page_address(d_page), s_page);
1026		}
1027	}
1028}
1029#else
1030#define page_is_saveable(zone, pfn)	saveable_page(zone, pfn)
1031
1032static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1033{
1034	safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1035				pfn_to_page(src_pfn));
1036}
1037#endif /* CONFIG_HIGHMEM */
1038
1039static void
1040copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
1041{
1042	struct zone *zone;
1043	unsigned long pfn;
1044
1045	for_each_populated_zone(zone) {
1046		unsigned long max_zone_pfn;
1047
1048		mark_free_pages(zone);
1049		max_zone_pfn = zone_end_pfn(zone);
1050		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1051			if (page_is_saveable(zone, pfn))
1052				memory_bm_set_bit(orig_bm, pfn);
1053	}
1054	memory_bm_position_reset(orig_bm);
1055	memory_bm_position_reset(copy_bm);
1056	for(;;) {
1057		pfn = memory_bm_next_pfn(orig_bm);
1058		if (unlikely(pfn == BM_END_OF_MAP))
1059			break;
1060		copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1061	}
1062}
1063
1064/* Total number of image pages */
1065static unsigned int nr_copy_pages;
1066/* Number of pages needed for saving the original pfns of the image pages */
1067static unsigned int nr_meta_pages;
1068/*
1069 * Numbers of normal and highmem page frames allocated for hibernation image
1070 * before suspending devices.
1071 */
1072unsigned int alloc_normal, alloc_highmem;
1073/*
1074 * Memory bitmap used for marking saveable pages (during hibernation) or
1075 * hibernation image pages (during restore)
1076 */
1077static struct memory_bitmap orig_bm;
1078/*
1079 * Memory bitmap used during hibernation for marking allocated page frames that
1080 * will contain copies of saveable pages.  During restore it is initially used
1081 * for marking hibernation image pages, but then the set bits from it are
1082 * duplicated in @orig_bm and it is released.  On highmem systems it is next
1083 * used for marking "safe" highmem pages, but it has to be reinitialized for
1084 * this purpose.
1085 */
1086static struct memory_bitmap copy_bm;
1087
1088/**
1089 *	swsusp_free - free pages allocated for the suspend.
1090 *
1091 *	Suspend pages are alocated before the atomic copy is made, so we
1092 *	need to release them after the resume.
1093 */
1094
1095void swsusp_free(void)
1096{
1097	struct zone *zone;
1098	unsigned long pfn, max_zone_pfn;
 
 
 
 
 
1099
1100	for_each_populated_zone(zone) {
1101		max_zone_pfn = zone_end_pfn(zone);
1102		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1103			if (pfn_valid(pfn)) {
1104				struct page *page = pfn_to_page(pfn);
1105
1106				if (swsusp_page_is_forbidden(page) &&
1107				    swsusp_page_is_free(page)) {
1108					swsusp_unset_page_forbidden(page);
1109					swsusp_unset_page_free(page);
1110					__free_page(page);
1111				}
1112			}
 
 
 
 
 
 
 
 
 
 
 
 
1113	}
 
 
1114	nr_copy_pages = 0;
1115	nr_meta_pages = 0;
1116	restore_pblist = NULL;
1117	buffer = NULL;
1118	alloc_normal = 0;
1119	alloc_highmem = 0;
 
1120}
1121
1122/* Helper functions used for the shrinking of memory. */
1123
1124#define GFP_IMAGE	(GFP_KERNEL | __GFP_NOWARN)
1125
1126/**
1127 * preallocate_image_pages - Allocate a number of pages for hibernation image
1128 * @nr_pages: Number of page frames to allocate.
1129 * @mask: GFP flags to use for the allocation.
1130 *
1131 * Return value: Number of page frames actually allocated
1132 */
1133static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1134{
1135	unsigned long nr_alloc = 0;
1136
1137	while (nr_pages > 0) {
1138		struct page *page;
1139
1140		page = alloc_image_page(mask);
1141		if (!page)
1142			break;
1143		memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1144		if (PageHighMem(page))
1145			alloc_highmem++;
1146		else
1147			alloc_normal++;
1148		nr_pages--;
1149		nr_alloc++;
1150	}
1151
1152	return nr_alloc;
1153}
1154
1155static unsigned long preallocate_image_memory(unsigned long nr_pages,
1156					      unsigned long avail_normal)
1157{
1158	unsigned long alloc;
1159
1160	if (avail_normal <= alloc_normal)
1161		return 0;
1162
1163	alloc = avail_normal - alloc_normal;
1164	if (nr_pages < alloc)
1165		alloc = nr_pages;
1166
1167	return preallocate_image_pages(alloc, GFP_IMAGE);
1168}
1169
1170#ifdef CONFIG_HIGHMEM
1171static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1172{
1173	return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1174}
1175
1176/**
1177 *  __fraction - Compute (an approximation of) x * (multiplier / base)
1178 */
1179static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1180{
1181	x *= multiplier;
1182	do_div(x, base);
1183	return (unsigned long)x;
1184}
1185
1186static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1187						unsigned long highmem,
1188						unsigned long total)
1189{
1190	unsigned long alloc = __fraction(nr_pages, highmem, total);
1191
1192	return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1193}
1194#else /* CONFIG_HIGHMEM */
1195static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1196{
1197	return 0;
1198}
1199
1200static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1201						unsigned long highmem,
1202						unsigned long total)
1203{
1204	return 0;
1205}
1206#endif /* CONFIG_HIGHMEM */
1207
1208/**
1209 * free_unnecessary_pages - Release preallocated pages not needed for the image
1210 */
1211static void free_unnecessary_pages(void)
1212{
1213	unsigned long save, to_free_normal, to_free_highmem;
1214
1215	save = count_data_pages();
1216	if (alloc_normal >= save) {
1217		to_free_normal = alloc_normal - save;
1218		save = 0;
1219	} else {
1220		to_free_normal = 0;
1221		save -= alloc_normal;
1222	}
1223	save += count_highmem_pages();
1224	if (alloc_highmem >= save) {
1225		to_free_highmem = alloc_highmem - save;
1226	} else {
1227		to_free_highmem = 0;
1228		save -= alloc_highmem;
1229		if (to_free_normal > save)
1230			to_free_normal -= save;
1231		else
1232			to_free_normal = 0;
1233	}
 
1234
1235	memory_bm_position_reset(&copy_bm);
1236
1237	while (to_free_normal > 0 || to_free_highmem > 0) {
1238		unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1239		struct page *page = pfn_to_page(pfn);
1240
1241		if (PageHighMem(page)) {
1242			if (!to_free_highmem)
1243				continue;
1244			to_free_highmem--;
1245			alloc_highmem--;
1246		} else {
1247			if (!to_free_normal)
1248				continue;
1249			to_free_normal--;
1250			alloc_normal--;
1251		}
1252		memory_bm_clear_bit(&copy_bm, pfn);
1253		swsusp_unset_page_forbidden(page);
1254		swsusp_unset_page_free(page);
1255		__free_page(page);
1256	}
 
 
1257}
1258
1259/**
1260 * minimum_image_size - Estimate the minimum acceptable size of an image
1261 * @saveable: Number of saveable pages in the system.
1262 *
1263 * We want to avoid attempting to free too much memory too hard, so estimate the
1264 * minimum acceptable size of a hibernation image to use as the lower limit for
1265 * preallocating memory.
1266 *
1267 * We assume that the minimum image size should be proportional to
1268 *
1269 * [number of saveable pages] - [number of pages that can be freed in theory]
1270 *
1271 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1272 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
1273 * minus mapped file pages.
1274 */
1275static unsigned long minimum_image_size(unsigned long saveable)
1276{
1277	unsigned long size;
1278
1279	size = global_page_state(NR_SLAB_RECLAIMABLE)
1280		+ global_page_state(NR_ACTIVE_ANON)
1281		+ global_page_state(NR_INACTIVE_ANON)
1282		+ global_page_state(NR_ACTIVE_FILE)
1283		+ global_page_state(NR_INACTIVE_FILE)
1284		- global_page_state(NR_FILE_MAPPED);
1285
1286	return saveable <= size ? 0 : saveable - size;
1287}
1288
1289/**
1290 * hibernate_preallocate_memory - Preallocate memory for hibernation image
1291 *
1292 * To create a hibernation image it is necessary to make a copy of every page
1293 * frame in use.  We also need a number of page frames to be free during
1294 * hibernation for allocations made while saving the image and for device
1295 * drivers, in case they need to allocate memory from their hibernation
1296 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1297 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1298 * /sys/power/reserved_size, respectively).  To make this happen, we compute the
1299 * total number of available page frames and allocate at least
1300 *
1301 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1302 *  + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1303 *
1304 * of them, which corresponds to the maximum size of a hibernation image.
1305 *
1306 * If image_size is set below the number following from the above formula,
1307 * the preallocation of memory is continued until the total number of saveable
1308 * pages in the system is below the requested image size or the minimum
1309 * acceptable image size returned by minimum_image_size(), whichever is greater.
1310 */
1311int hibernate_preallocate_memory(void)
1312{
1313	struct zone *zone;
1314	unsigned long saveable, size, max_size, count, highmem, pages = 0;
1315	unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1316	struct timeval start, stop;
1317	int error;
1318
1319	printk(KERN_INFO "PM: Preallocating image memory... ");
1320	do_gettimeofday(&start);
1321
1322	error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1323	if (error)
1324		goto err_out;
1325
1326	error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1327	if (error)
1328		goto err_out;
1329
1330	alloc_normal = 0;
1331	alloc_highmem = 0;
1332
1333	/* Count the number of saveable data pages. */
1334	save_highmem = count_highmem_pages();
1335	saveable = count_data_pages();
1336
1337	/*
1338	 * Compute the total number of page frames we can use (count) and the
1339	 * number of pages needed for image metadata (size).
1340	 */
1341	count = saveable;
1342	saveable += save_highmem;
1343	highmem = save_highmem;
1344	size = 0;
1345	for_each_populated_zone(zone) {
1346		size += snapshot_additional_pages(zone);
1347		if (is_highmem(zone))
1348			highmem += zone_page_state(zone, NR_FREE_PAGES);
1349		else
1350			count += zone_page_state(zone, NR_FREE_PAGES);
1351	}
1352	avail_normal = count;
1353	count += highmem;
1354	count -= totalreserve_pages;
1355
1356	/* Add number of pages required for page keys (s390 only). */
1357	size += page_key_additional_pages(saveable);
1358
1359	/* Compute the maximum number of saveable pages to leave in memory. */
1360	max_size = (count - (size + PAGES_FOR_IO)) / 2
1361			- 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1362	/* Compute the desired number of image pages specified by image_size. */
1363	size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1364	if (size > max_size)
1365		size = max_size;
1366	/*
1367	 * If the desired number of image pages is at least as large as the
1368	 * current number of saveable pages in memory, allocate page frames for
1369	 * the image and we're done.
1370	 */
1371	if (size >= saveable) {
1372		pages = preallocate_image_highmem(save_highmem);
1373		pages += preallocate_image_memory(saveable - pages, avail_normal);
1374		goto out;
1375	}
1376
1377	/* Estimate the minimum size of the image. */
1378	pages = minimum_image_size(saveable);
1379	/*
1380	 * To avoid excessive pressure on the normal zone, leave room in it to
1381	 * accommodate an image of the minimum size (unless it's already too
1382	 * small, in which case don't preallocate pages from it at all).
1383	 */
1384	if (avail_normal > pages)
1385		avail_normal -= pages;
1386	else
1387		avail_normal = 0;
1388	if (size < pages)
1389		size = min_t(unsigned long, pages, max_size);
1390
1391	/*
1392	 * Let the memory management subsystem know that we're going to need a
1393	 * large number of page frames to allocate and make it free some memory.
1394	 * NOTE: If this is not done, performance will be hurt badly in some
1395	 * test cases.
1396	 */
1397	shrink_all_memory(saveable - size);
1398
1399	/*
1400	 * The number of saveable pages in memory was too high, so apply some
1401	 * pressure to decrease it.  First, make room for the largest possible
1402	 * image and fail if that doesn't work.  Next, try to decrease the size
1403	 * of the image as much as indicated by 'size' using allocations from
1404	 * highmem and non-highmem zones separately.
1405	 */
1406	pages_highmem = preallocate_image_highmem(highmem / 2);
1407	alloc = count - max_size;
1408	if (alloc > pages_highmem)
1409		alloc -= pages_highmem;
1410	else
1411		alloc = 0;
1412	pages = preallocate_image_memory(alloc, avail_normal);
1413	if (pages < alloc) {
1414		/* We have exhausted non-highmem pages, try highmem. */
1415		alloc -= pages;
1416		pages += pages_highmem;
1417		pages_highmem = preallocate_image_highmem(alloc);
1418		if (pages_highmem < alloc)
1419			goto err_out;
1420		pages += pages_highmem;
1421		/*
1422		 * size is the desired number of saveable pages to leave in
1423		 * memory, so try to preallocate (all memory - size) pages.
1424		 */
1425		alloc = (count - pages) - size;
1426		pages += preallocate_image_highmem(alloc);
1427	} else {
1428		/*
1429		 * There are approximately max_size saveable pages at this point
1430		 * and we want to reduce this number down to size.
1431		 */
1432		alloc = max_size - size;
1433		size = preallocate_highmem_fraction(alloc, highmem, count);
1434		pages_highmem += size;
1435		alloc -= size;
1436		size = preallocate_image_memory(alloc, avail_normal);
1437		pages_highmem += preallocate_image_highmem(alloc - size);
1438		pages += pages_highmem + size;
1439	}
1440
1441	/*
1442	 * We only need as many page frames for the image as there are saveable
1443	 * pages in memory, but we have allocated more.  Release the excessive
1444	 * ones now.
1445	 */
1446	free_unnecessary_pages();
1447
1448 out:
1449	do_gettimeofday(&stop);
1450	printk(KERN_CONT "done (allocated %lu pages)\n", pages);
1451	swsusp_show_speed(&start, &stop, pages, "Allocated");
1452
1453	return 0;
1454
1455 err_out:
1456	printk(KERN_CONT "\n");
1457	swsusp_free();
1458	return -ENOMEM;
1459}
1460
1461#ifdef CONFIG_HIGHMEM
1462/**
1463  *	count_pages_for_highmem - compute the number of non-highmem pages
1464  *	that will be necessary for creating copies of highmem pages.
1465  */
1466
 
1467static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1468{
1469	unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1470
1471	if (free_highmem >= nr_highmem)
1472		nr_highmem = 0;
1473	else
1474		nr_highmem -= free_highmem;
1475
1476	return nr_highmem;
1477}
1478#else
1479static unsigned int
1480count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1481#endif /* CONFIG_HIGHMEM */
1482
1483/**
1484 *	enough_free_mem - Make sure we have enough free memory for the
1485 *	snapshot image.
1486 */
1487
1488static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1489{
1490	struct zone *zone;
1491	unsigned int free = alloc_normal;
1492
1493	for_each_populated_zone(zone)
1494		if (!is_highmem(zone))
1495			free += zone_page_state(zone, NR_FREE_PAGES);
1496
1497	nr_pages += count_pages_for_highmem(nr_highmem);
1498	pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
1499		nr_pages, PAGES_FOR_IO, free);
1500
1501	return free > nr_pages + PAGES_FOR_IO;
1502}
1503
1504#ifdef CONFIG_HIGHMEM
1505/**
1506 *	get_highmem_buffer - if there are some highmem pages in the suspend
1507 *	image, we may need the buffer to copy them and/or load their data.
 
 
1508 */
1509
1510static inline int get_highmem_buffer(int safe_needed)
1511{
1512	buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
1513	return buffer ? 0 : -ENOMEM;
1514}
1515
1516/**
1517 *	alloc_highmem_image_pages - allocate some highmem pages for the image.
1518 *	Try to allocate as many pages as needed, but if the number of free
1519 *	highmem pages is lesser than that, allocate them all.
 
1520 */
1521
1522static inline unsigned int
1523alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
1524{
1525	unsigned int to_alloc = count_free_highmem_pages();
1526
1527	if (to_alloc > nr_highmem)
1528		to_alloc = nr_highmem;
1529
1530	nr_highmem -= to_alloc;
1531	while (to_alloc-- > 0) {
1532		struct page *page;
1533
1534		page = alloc_image_page(__GFP_HIGHMEM);
1535		memory_bm_set_bit(bm, page_to_pfn(page));
1536	}
1537	return nr_highmem;
1538}
1539#else
1540static inline int get_highmem_buffer(int safe_needed) { return 0; }
1541
1542static inline unsigned int
1543alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
1544#endif /* CONFIG_HIGHMEM */
1545
1546/**
1547 *	swsusp_alloc - allocate memory for the suspend image
1548 *
1549 *	We first try to allocate as many highmem pages as there are
1550 *	saveable highmem pages in the system.  If that fails, we allocate
1551 *	non-highmem pages for the copies of the remaining highmem ones.
1552 *
1553 *	In this approach it is likely that the copies of highmem pages will
1554 *	also be located in the high memory, because of the way in which
1555 *	copy_data_pages() works.
1556 */
1557
1558static int
1559swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1560		unsigned int nr_pages, unsigned int nr_highmem)
 
 
 
1561{
1562	if (nr_highmem > 0) {
1563		if (get_highmem_buffer(PG_ANY))
1564			goto err_out;
1565		if (nr_highmem > alloc_highmem) {
1566			nr_highmem -= alloc_highmem;
1567			nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1568		}
1569	}
1570	if (nr_pages > alloc_normal) {
1571		nr_pages -= alloc_normal;
1572		while (nr_pages-- > 0) {
1573			struct page *page;
1574
1575			page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
1576			if (!page)
1577				goto err_out;
1578			memory_bm_set_bit(copy_bm, page_to_pfn(page));
1579		}
1580	}
1581
1582	return 0;
1583
1584 err_out:
1585	swsusp_free();
1586	return -ENOMEM;
1587}
1588
1589asmlinkage __visible int swsusp_save(void)
1590{
1591	unsigned int nr_pages, nr_highmem;
1592
1593	printk(KERN_INFO "PM: Creating hibernation image:\n");
1594
1595	drain_local_pages(NULL);
1596	nr_pages = count_data_pages();
1597	nr_highmem = count_highmem_pages();
1598	printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
1599
1600	if (!enough_free_mem(nr_pages, nr_highmem)) {
1601		printk(KERN_ERR "PM: Not enough free memory\n");
1602		return -ENOMEM;
1603	}
1604
1605	if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
1606		printk(KERN_ERR "PM: Memory allocation failed\n");
1607		return -ENOMEM;
1608	}
1609
1610	/* During allocating of suspend pagedir, new cold pages may appear.
 
1611	 * Kill them.
1612	 */
1613	drain_local_pages(NULL);
1614	copy_data_pages(&copy_bm, &orig_bm);
1615
1616	/*
1617	 * End of critical section. From now on, we can write to memory,
1618	 * but we should not touch disk. This specially means we must _not_
1619	 * touch swap space! Except we must write out our image of course.
1620	 */
1621
1622	nr_pages += nr_highmem;
1623	nr_copy_pages = nr_pages;
1624	nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1625
1626	printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
1627		nr_pages);
1628
1629	return 0;
1630}
1631
1632#ifndef CONFIG_ARCH_HIBERNATION_HEADER
1633static int init_header_complete(struct swsusp_info *info)
1634{
1635	memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
1636	info->version_code = LINUX_VERSION_CODE;
1637	return 0;
1638}
1639
1640static char *check_image_kernel(struct swsusp_info *info)
1641{
1642	if (info->version_code != LINUX_VERSION_CODE)
1643		return "kernel version";
1644	if (strcmp(info->uts.sysname,init_utsname()->sysname))
1645		return "system type";
1646	if (strcmp(info->uts.release,init_utsname()->release))
1647		return "kernel release";
1648	if (strcmp(info->uts.version,init_utsname()->version))
1649		return "version";
1650	if (strcmp(info->uts.machine,init_utsname()->machine))
1651		return "machine";
1652	return NULL;
1653}
1654#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
1655
1656unsigned long snapshot_get_image_size(void)
1657{
1658	return nr_copy_pages + nr_meta_pages + 1;
1659}
1660
1661static int init_header(struct swsusp_info *info)
1662{
1663	memset(info, 0, sizeof(struct swsusp_info));
1664	info->num_physpages = get_num_physpages();
1665	info->image_pages = nr_copy_pages;
1666	info->pages = snapshot_get_image_size();
1667	info->size = info->pages;
1668	info->size <<= PAGE_SHIFT;
1669	return init_header_complete(info);
1670}
1671
1672/**
1673 *	pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
1674 *	are stored in the array @buf[] (1 page at a time)
 
 
 
 
1675 */
1676
1677static inline void
1678pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
1679{
1680	int j;
1681
1682	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1683		buf[j] = memory_bm_next_pfn(bm);
1684		if (unlikely(buf[j] == BM_END_OF_MAP))
1685			break;
1686		/* Save page key for data page (s390 only). */
1687		page_key_read(buf + j);
1688	}
1689}
1690
1691/**
1692 *	snapshot_read_next - used for reading the system memory snapshot.
 
1693 *
1694 *	On the first call to it @handle should point to a zeroed
1695 *	snapshot_handle structure.  The structure gets updated and a pointer
1696 *	to it should be passed to this function every next time.
1697 *
1698 *	On success the function returns a positive number.  Then, the caller
1699 *	is allowed to read up to the returned number of bytes from the memory
1700 *	location computed by the data_of() macro.
1701 *
1702 *	The function returns 0 to indicate the end of data stream condition,
1703 *	and a negative number is returned on error.  In such cases the
1704 *	structure pointed to by @handle is not updated and should not be used
1705 *	any more.
1706 */
1707
1708int snapshot_read_next(struct snapshot_handle *handle)
1709{
1710	if (handle->cur > nr_meta_pages + nr_copy_pages)
1711		return 0;
1712
1713	if (!buffer) {
1714		/* This makes the buffer be freed by swsusp_free() */
1715		buffer = get_image_page(GFP_ATOMIC, PG_ANY);
1716		if (!buffer)
1717			return -ENOMEM;
1718	}
1719	if (!handle->cur) {
1720		int error;
1721
1722		error = init_header((struct swsusp_info *)buffer);
1723		if (error)
1724			return error;
1725		handle->buffer = buffer;
1726		memory_bm_position_reset(&orig_bm);
1727		memory_bm_position_reset(&copy_bm);
1728	} else if (handle->cur <= nr_meta_pages) {
1729		clear_page(buffer);
1730		pack_pfns(buffer, &orig_bm);
1731	} else {
1732		struct page *page;
1733
1734		page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
1735		if (PageHighMem(page)) {
1736			/* Highmem pages are copied to the buffer,
 
1737			 * because we can't return with a kmapped
1738			 * highmem page (we may not be called again).
1739			 */
1740			void *kaddr;
1741
1742			kaddr = kmap_atomic(page);
1743			copy_page(buffer, kaddr);
1744			kunmap_atomic(kaddr);
1745			handle->buffer = buffer;
1746		} else {
1747			handle->buffer = page_address(page);
1748		}
1749	}
1750	handle->cur++;
1751	return PAGE_SIZE;
1752}
1753
1754/**
1755 *	mark_unsafe_pages - mark the pages that cannot be used for storing
1756 *	the image during resume, because they conflict with the pages that
1757 *	had been used before suspend
1758 */
1759
1760static int mark_unsafe_pages(struct memory_bitmap *bm)
1761{
1762	struct zone *zone;
1763	unsigned long pfn, max_zone_pfn;
1764
1765	/* Clear page flags */
1766	for_each_populated_zone(zone) {
1767		max_zone_pfn = zone_end_pfn(zone);
1768		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1769			if (pfn_valid(pfn))
1770				swsusp_unset_page_free(pfn_to_page(pfn));
1771	}
1772
1773	/* Mark pages that correspond to the "original" pfns as "unsafe" */
1774	memory_bm_position_reset(bm);
1775	do {
1776		pfn = memory_bm_next_pfn(bm);
1777		if (likely(pfn != BM_END_OF_MAP)) {
1778			if (likely(pfn_valid(pfn)))
1779				swsusp_set_page_free(pfn_to_page(pfn));
1780			else
1781				return -EFAULT;
1782		}
1783	} while (pfn != BM_END_OF_MAP);
1784
1785	allocated_unsafe_pages = 0;
1786
1787	return 0;
1788}
1789
1790static void
1791duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
 
 
 
 
 
1792{
1793	unsigned long pfn;
1794
1795	memory_bm_position_reset(src);
1796	pfn = memory_bm_next_pfn(src);
 
1797	while (pfn != BM_END_OF_MAP) {
1798		memory_bm_set_bit(dst, pfn);
1799		pfn = memory_bm_next_pfn(src);
1800	}
 
 
 
 
 
1801}
1802
1803static int check_header(struct swsusp_info *info)
1804{
1805	char *reason;
1806
1807	reason = check_image_kernel(info);
1808	if (!reason && info->num_physpages != get_num_physpages())
1809		reason = "memory size";
1810	if (reason) {
1811		printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
1812		return -EPERM;
1813	}
1814	return 0;
1815}
1816
1817/**
1818 *	load header - check the image header and copy data from it
1819 */
1820
1821static int
1822load_header(struct swsusp_info *info)
1823{
1824	int error;
1825
1826	restore_pblist = NULL;
1827	error = check_header(info);
1828	if (!error) {
1829		nr_copy_pages = info->image_pages;
1830		nr_meta_pages = info->pages - info->image_pages - 1;
1831	}
1832	return error;
1833}
1834
1835/**
1836 *	unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
1837 *	the corresponding bit in the memory bitmap @bm
 
 
 
 
1838 */
1839static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
1840{
1841	int j;
1842
1843	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1844		if (unlikely(buf[j] == BM_END_OF_MAP))
1845			break;
1846
1847		/* Extract and buffer page key for data page (s390 only). */
1848		page_key_memorize(buf + j);
1849
1850		if (memory_bm_pfn_present(bm, buf[j]))
1851			memory_bm_set_bit(bm, buf[j]);
1852		else
1853			return -EFAULT;
1854	}
1855
1856	return 0;
1857}
1858
1859/* List of "safe" pages that may be used to store data loaded from the suspend
1860 * image
1861 */
1862static struct linked_page *safe_pages_list;
1863
1864#ifdef CONFIG_HIGHMEM
1865/* struct highmem_pbe is used for creating the list of highmem pages that
 
1866 * should be restored atomically during the resume from disk, because the page
1867 * frames they have occupied before the suspend are in use.
1868 */
1869struct highmem_pbe {
1870	struct page *copy_page;	/* data is here now */
1871	struct page *orig_page;	/* data was here before the suspend */
1872	struct highmem_pbe *next;
1873};
1874
1875/* List of highmem PBEs needed for restoring the highmem pages that were
 
1876 * allocated before the suspend and included in the suspend image, but have
1877 * also been allocated by the "resume" kernel, so their contents cannot be
1878 * written directly to their "original" page frames.
1879 */
1880static struct highmem_pbe *highmem_pblist;
1881
1882/**
1883 *	count_highmem_image_pages - compute the number of highmem pages in the
1884 *	suspend image.  The bits in the memory bitmap @bm that correspond to the
1885 *	image pages are assumed to be set.
 
1886 */
1887
1888static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
1889{
1890	unsigned long pfn;
1891	unsigned int cnt = 0;
1892
1893	memory_bm_position_reset(bm);
1894	pfn = memory_bm_next_pfn(bm);
1895	while (pfn != BM_END_OF_MAP) {
1896		if (PageHighMem(pfn_to_page(pfn)))
1897			cnt++;
1898
1899		pfn = memory_bm_next_pfn(bm);
1900	}
1901	return cnt;
1902}
1903
1904/**
1905 *	prepare_highmem_image - try to allocate as many highmem pages as
1906 *	there are highmem image pages (@nr_highmem_p points to the variable
1907 *	containing the number of highmem image pages).  The pages that are
1908 *	"safe" (ie. will not be overwritten when the suspend image is
1909 *	restored) have the corresponding bits set in @bm (it must be
1910 *	unitialized).
1911 *
1912 *	NOTE: This function should not be called if there are no highmem
1913 *	image pages.
1914 */
1915
1916static unsigned int safe_highmem_pages;
1917
1918static struct memory_bitmap *safe_highmem_bm;
1919
1920static int
1921prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
 
 
 
 
 
 
 
 
 
 
 
 
 
1922{
1923	unsigned int to_alloc;
1924
1925	if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
1926		return -ENOMEM;
1927
1928	if (get_highmem_buffer(PG_SAFE))
1929		return -ENOMEM;
1930
1931	to_alloc = count_free_highmem_pages();
1932	if (to_alloc > *nr_highmem_p)
1933		to_alloc = *nr_highmem_p;
1934	else
1935		*nr_highmem_p = to_alloc;
1936
1937	safe_highmem_pages = 0;
1938	while (to_alloc-- > 0) {
1939		struct page *page;
1940
1941		page = alloc_page(__GFP_HIGHMEM);
1942		if (!swsusp_page_is_free(page)) {
1943			/* The page is "safe", set its bit the bitmap */
1944			memory_bm_set_bit(bm, page_to_pfn(page));
1945			safe_highmem_pages++;
1946		}
1947		/* Mark the page as allocated */
1948		swsusp_set_page_forbidden(page);
1949		swsusp_set_page_free(page);
1950	}
1951	memory_bm_position_reset(bm);
1952	safe_highmem_bm = bm;
1953	return 0;
1954}
1955
 
 
1956/**
1957 *	get_highmem_page_buffer - for given highmem image page find the buffer
1958 *	that suspend_write_next() should set for its caller to write to.
1959 *
1960 *	If the page is to be saved to its "original" page frame or a copy of
1961 *	the page is to be made in the highmem, @buffer is returned.  Otherwise,
1962 *	the copy of the page is to be made in normal memory, so the address of
1963 *	the copy is returned.
1964 *
1965 *	If @buffer is returned, the caller of suspend_write_next() will write
1966 *	the page's contents to @buffer, so they will have to be copied to the
1967 *	right location on the next call to suspend_write_next() and it is done
1968 *	with the help of copy_last_highmem_page().  For this purpose, if
1969 *	@buffer is returned, @last_highmem page is set to the page to which
1970 *	the data will have to be copied from @buffer.
 
 
 
1971 */
1972
1973static struct page *last_highmem_page;
1974
1975static void *
1976get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
1977{
1978	struct highmem_pbe *pbe;
1979	void *kaddr;
1980
1981	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
1982		/* We have allocated the "original" page frame and we can
 
1983		 * use it directly to store the loaded page.
1984		 */
1985		last_highmem_page = page;
1986		return buffer;
1987	}
1988	/* The "original" page frame has not been allocated and we have to
 
1989	 * use a "safe" page frame to store the loaded page.
1990	 */
1991	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
1992	if (!pbe) {
1993		swsusp_free();
1994		return ERR_PTR(-ENOMEM);
1995	}
1996	pbe->orig_page = page;
1997	if (safe_highmem_pages > 0) {
1998		struct page *tmp;
1999
2000		/* Copy of the page will be stored in high memory */
2001		kaddr = buffer;
2002		tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2003		safe_highmem_pages--;
2004		last_highmem_page = tmp;
2005		pbe->copy_page = tmp;
2006	} else {
2007		/* Copy of the page will be stored in normal memory */
2008		kaddr = safe_pages_list;
2009		safe_pages_list = safe_pages_list->next;
2010		pbe->copy_page = virt_to_page(kaddr);
2011	}
2012	pbe->next = highmem_pblist;
2013	highmem_pblist = pbe;
2014	return kaddr;
2015}
2016
2017/**
2018 *	copy_last_highmem_page - copy the contents of a highmem image from
2019 *	@buffer, where the caller of snapshot_write_next() has place them,
2020 *	to the right location represented by @last_highmem_page .
 
 
2021 */
2022
2023static void copy_last_highmem_page(void)
2024{
2025	if (last_highmem_page) {
2026		void *dst;
2027
2028		dst = kmap_atomic(last_highmem_page);
2029		copy_page(dst, buffer);
2030		kunmap_atomic(dst);
2031		last_highmem_page = NULL;
2032	}
2033}
2034
2035static inline int last_highmem_page_copied(void)
2036{
2037	return !last_highmem_page;
2038}
2039
2040static inline void free_highmem_data(void)
2041{
2042	if (safe_highmem_bm)
2043		memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2044
2045	if (buffer)
2046		free_image_page(buffer, PG_UNSAFE_CLEAR);
2047}
2048#else
2049static inline int get_safe_write_buffer(void) { return 0; }
2050
2051static unsigned int
2052count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2053
2054static inline int
2055prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
2056{
2057	return 0;
2058}
2059
2060static inline void *
2061get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
2062{
2063	return ERR_PTR(-EINVAL);
2064}
2065
2066static inline void copy_last_highmem_page(void) {}
2067static inline int last_highmem_page_copied(void) { return 1; }
2068static inline void free_highmem_data(void) {}
2069#endif /* CONFIG_HIGHMEM */
2070
 
 
2071/**
2072 *	prepare_image - use the memory bitmap @bm to mark the pages that will
2073 *	be overwritten in the process of restoring the system memory state
2074 *	from the suspend image ("unsafe" pages) and allocate memory for the
2075 *	image.
2076 *
2077 *	The idea is to allocate a new memory bitmap first and then allocate
2078 *	as many pages as needed for the image data, but not to assign these
2079 *	pages to specific tasks initially.  Instead, we just mark them as
2080 *	allocated and create a lists of "safe" pages that will be used
2081 *	later.  On systems with high memory a list of "safe" highmem pages is
2082 *	also created.
 
 
2083 */
2084
2085#define PBES_PER_LINKED_PAGE	(LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2086
2087static int
2088prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2089{
2090	unsigned int nr_pages, nr_highmem;
2091	struct linked_page *sp_list, *lp;
2092	int error;
2093
2094	/* If there is no highmem, the buffer will not be necessary */
2095	free_image_page(buffer, PG_UNSAFE_CLEAR);
2096	buffer = NULL;
2097
2098	nr_highmem = count_highmem_image_pages(bm);
2099	error = mark_unsafe_pages(bm);
2100	if (error)
2101		goto Free;
2102
2103	error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2104	if (error)
2105		goto Free;
2106
2107	duplicate_memory_bitmap(new_bm, bm);
2108	memory_bm_free(bm, PG_UNSAFE_KEEP);
2109	if (nr_highmem > 0) {
2110		error = prepare_highmem_image(bm, &nr_highmem);
2111		if (error)
2112			goto Free;
2113	}
2114	/* Reserve some safe pages for potential later use.
 
2115	 *
2116	 * NOTE: This way we make sure there will be enough safe pages for the
2117	 * chain_alloc() in get_buffer().  It is a bit wasteful, but
2118	 * nr_copy_pages cannot be greater than 50% of the memory anyway.
 
 
2119	 */
2120	sp_list = NULL;
2121	/* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
2122	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2123	nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2124	while (nr_pages > 0) {
2125		lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2126		if (!lp) {
2127			error = -ENOMEM;
2128			goto Free;
2129		}
2130		lp->next = sp_list;
2131		sp_list = lp;
2132		nr_pages--;
2133	}
2134	/* Preallocate memory for the image */
2135	safe_pages_list = NULL;
2136	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2137	while (nr_pages > 0) {
2138		lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2139		if (!lp) {
2140			error = -ENOMEM;
2141			goto Free;
2142		}
2143		if (!swsusp_page_is_free(virt_to_page(lp))) {
2144			/* The page is "safe", add it to the list */
2145			lp->next = safe_pages_list;
2146			safe_pages_list = lp;
2147		}
2148		/* Mark the page as allocated */
2149		swsusp_set_page_forbidden(virt_to_page(lp));
2150		swsusp_set_page_free(virt_to_page(lp));
2151		nr_pages--;
2152	}
2153	/* Free the reserved safe pages so that chain_alloc() can use them */
2154	while (sp_list) {
2155		lp = sp_list->next;
2156		free_image_page(sp_list, PG_UNSAFE_CLEAR);
2157		sp_list = lp;
2158	}
2159	return 0;
2160
2161 Free:
2162	swsusp_free();
2163	return error;
2164}
2165
2166/**
2167 *	get_buffer - compute the address that snapshot_write_next() should
2168 *	set for its caller to write to.
 
 
2169 */
2170
2171static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2172{
2173	struct pbe *pbe;
2174	struct page *page;
2175	unsigned long pfn = memory_bm_next_pfn(bm);
2176
2177	if (pfn == BM_END_OF_MAP)
2178		return ERR_PTR(-EFAULT);
2179
2180	page = pfn_to_page(pfn);
2181	if (PageHighMem(page))
2182		return get_highmem_page_buffer(page, ca);
2183
2184	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2185		/* We have allocated the "original" page frame and we can
 
2186		 * use it directly to store the loaded page.
2187		 */
2188		return page_address(page);
2189
2190	/* The "original" page frame has not been allocated and we have to
 
2191	 * use a "safe" page frame to store the loaded page.
2192	 */
2193	pbe = chain_alloc(ca, sizeof(struct pbe));
2194	if (!pbe) {
2195		swsusp_free();
2196		return ERR_PTR(-ENOMEM);
2197	}
2198	pbe->orig_address = page_address(page);
2199	pbe->address = safe_pages_list;
2200	safe_pages_list = safe_pages_list->next;
2201	pbe->next = restore_pblist;
2202	restore_pblist = pbe;
2203	return pbe->address;
2204}
2205
2206/**
2207 *	snapshot_write_next - used for writing the system memory snapshot.
 
2208 *
2209 *	On the first call to it @handle should point to a zeroed
2210 *	snapshot_handle structure.  The structure gets updated and a pointer
2211 *	to it should be passed to this function every next time.
2212 *
2213 *	On success the function returns a positive number.  Then, the caller
2214 *	is allowed to write up to the returned number of bytes to the memory
2215 *	location computed by the data_of() macro.
2216 *
2217 *	The function returns 0 to indicate the "end of file" condition,
2218 *	and a negative number is returned on error.  In such cases the
2219 *	structure pointed to by @handle is not updated and should not be used
2220 *	any more.
2221 */
2222
2223int snapshot_write_next(struct snapshot_handle *handle)
2224{
2225	static struct chain_allocator ca;
2226	int error = 0;
2227
2228	/* Check if we have already loaded the entire image */
2229	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2230		return 0;
2231
2232	handle->sync_read = 1;
2233
2234	if (!handle->cur) {
2235		if (!buffer)
2236			/* This makes the buffer be freed by swsusp_free() */
2237			buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2238
2239		if (!buffer)
2240			return -ENOMEM;
2241
2242		handle->buffer = buffer;
2243	} else if (handle->cur == 1) {
2244		error = load_header(buffer);
2245		if (error)
2246			return error;
2247
 
 
2248		error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2249		if (error)
2250			return error;
2251
2252		/* Allocate buffer for page keys. */
2253		error = page_key_alloc(nr_copy_pages);
2254		if (error)
2255			return error;
2256
 
2257	} else if (handle->cur <= nr_meta_pages + 1) {
2258		error = unpack_orig_pfns(buffer, &copy_bm);
2259		if (error)
2260			return error;
2261
2262		if (handle->cur == nr_meta_pages + 1) {
2263			error = prepare_image(&orig_bm, &copy_bm);
2264			if (error)
2265				return error;
2266
2267			chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2268			memory_bm_position_reset(&orig_bm);
2269			restore_pblist = NULL;
2270			handle->buffer = get_buffer(&orig_bm, &ca);
2271			handle->sync_read = 0;
2272			if (IS_ERR(handle->buffer))
2273				return PTR_ERR(handle->buffer);
2274		}
2275	} else {
2276		copy_last_highmem_page();
2277		/* Restore page key for data page (s390 only). */
2278		page_key_write(handle->buffer);
 
2279		handle->buffer = get_buffer(&orig_bm, &ca);
2280		if (IS_ERR(handle->buffer))
2281			return PTR_ERR(handle->buffer);
2282		if (handle->buffer != buffer)
2283			handle->sync_read = 0;
2284	}
2285	handle->cur++;
2286	return PAGE_SIZE;
2287}
2288
2289/**
2290 *	snapshot_write_finalize - must be called after the last call to
2291 *	snapshot_write_next() in case the last page in the image happens
2292 *	to be a highmem page and its contents should be stored in the
2293 *	highmem.  Additionally, it releases the memory that will not be
2294 *	used any more.
 
2295 */
2296
2297void snapshot_write_finalize(struct snapshot_handle *handle)
2298{
2299	copy_last_highmem_page();
2300	/* Restore page key for data page (s390 only). */
2301	page_key_write(handle->buffer);
2302	page_key_free();
2303	/* Free only if we have loaded the image entirely */
 
2304	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2305		memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
2306		free_highmem_data();
2307	}
2308}
2309
2310int snapshot_image_loaded(struct snapshot_handle *handle)
2311{
2312	return !(!nr_copy_pages || !last_highmem_page_copied() ||
2313			handle->cur <= nr_meta_pages + nr_copy_pages);
2314}
2315
2316#ifdef CONFIG_HIGHMEM
2317/* Assumes that @buf is ready and points to a "safe" page */
2318static inline void
2319swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
2320{
2321	void *kaddr1, *kaddr2;
2322
2323	kaddr1 = kmap_atomic(p1);
2324	kaddr2 = kmap_atomic(p2);
2325	copy_page(buf, kaddr1);
2326	copy_page(kaddr1, kaddr2);
2327	copy_page(kaddr2, buf);
2328	kunmap_atomic(kaddr2);
2329	kunmap_atomic(kaddr1);
2330}
2331
2332/**
2333 *	restore_highmem - for each highmem page that was allocated before
2334 *	the suspend and included in the suspend image, and also has been
2335 *	allocated by the "resume" kernel swap its current (ie. "before
2336 *	resume") contents with the previous (ie. "before suspend") one.
2337 *
2338 *	If the resume eventually fails, we can call this function once
2339 *	again and restore the "before resume" highmem state.
 
 
 
 
2340 */
2341
2342int restore_highmem(void)
2343{
2344	struct highmem_pbe *pbe = highmem_pblist;
2345	void *buf;
2346
2347	if (!pbe)
2348		return 0;
2349
2350	buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2351	if (!buf)
2352		return -ENOMEM;
2353
2354	while (pbe) {
2355		swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2356		pbe = pbe->next;
2357	}
2358	free_image_page(buf, PG_UNSAFE_CLEAR);
2359	return 0;
2360}
2361#endif /* CONFIG_HIGHMEM */