Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * linux/kernel/power/snapshot.c
   3 *
   4 * This file provides system snapshot/restore functionality for swsusp.
   5 *
   6 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
   7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
   8 *
   9 * This file is released under the GPLv2.
  10 *
  11 */
  12
 
 
  13#include <linux/version.h>
  14#include <linux/module.h>
  15#include <linux/mm.h>
  16#include <linux/suspend.h>
  17#include <linux/delay.h>
  18#include <linux/bitops.h>
  19#include <linux/spinlock.h>
  20#include <linux/kernel.h>
  21#include <linux/pm.h>
  22#include <linux/device.h>
  23#include <linux/init.h>
  24#include <linux/bootmem.h>
 
  25#include <linux/syscalls.h>
  26#include <linux/console.h>
  27#include <linux/highmem.h>
  28#include <linux/list.h>
  29#include <linux/slab.h>
 
 
 
  30
  31#include <asm/uaccess.h>
  32#include <asm/mmu_context.h>
  33#include <asm/pgtable.h>
  34#include <asm/tlbflush.h>
  35#include <asm/io.h>
  36
  37#include "power.h"
  38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  39static int swsusp_page_is_free(struct page *);
  40static void swsusp_set_page_forbidden(struct page *);
  41static void swsusp_unset_page_forbidden(struct page *);
  42
  43/*
  44 * Number of bytes to reserve for memory allocations made by device drivers
  45 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
  46 * cause image creation to fail (tunable via /sys/power/reserved_size).
  47 */
  48unsigned long reserved_size;
  49
  50void __init hibernate_reserved_size_init(void)
  51{
  52	reserved_size = SPARE_PAGES * PAGE_SIZE;
  53}
  54
  55/*
  56 * Preferred image size in bytes (tunable via /sys/power/image_size).
  57 * When it is set to N, swsusp will do its best to ensure the image
  58 * size will not exceed N bytes, but if that is impossible, it will
  59 * try to create the smallest image possible.
  60 */
  61unsigned long image_size;
  62
  63void __init hibernate_image_size_init(void)
  64{
  65	image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
  66}
  67
  68/* List of PBEs needed for restoring the pages that were allocated before
 
  69 * the suspend and included in the suspend image, but have also been
  70 * allocated by the "resume" kernel, so their contents cannot be written
  71 * directly to their "original" page frames.
  72 */
  73struct pbe *restore_pblist;
  74
  75/* Pointer to an auxiliary buffer (1 page) */
  76static void *buffer;
  77
  78/**
  79 *	@safe_needed - on resume, for storing the PBE list and the image,
  80 *	we can only use memory pages that do not conflict with the pages
  81 *	used before suspend.  The unsafe pages have PageNosaveFree set
  82 *	and we count them using unsafe_pages.
  83 *
  84 *	Each allocated image page is marked as PageNosave and PageNosaveFree
  85 *	so that swsusp_free() can release it.
 
 
 
  86 */
 
 
 
 
  87
  88#define PG_ANY		0
  89#define PG_SAFE		1
  90#define PG_UNSAFE_CLEAR	1
  91#define PG_UNSAFE_KEEP	0
  92
  93static unsigned int allocated_unsafe_pages;
  94
 
 
 
 
 
 
 
 
 
 
 
 
 
  95static void *get_image_page(gfp_t gfp_mask, int safe_needed)
  96{
  97	void *res;
  98
  99	res = (void *)get_zeroed_page(gfp_mask);
 100	if (safe_needed)
 101		while (res && swsusp_page_is_free(virt_to_page(res))) {
 102			/* The page is unsafe, mark it for swsusp_free() */
 103			swsusp_set_page_forbidden(virt_to_page(res));
 104			allocated_unsafe_pages++;
 105			res = (void *)get_zeroed_page(gfp_mask);
 106		}
 107	if (res) {
 108		swsusp_set_page_forbidden(virt_to_page(res));
 109		swsusp_set_page_free(virt_to_page(res));
 110	}
 111	return res;
 112}
 113
 
 
 
 
 
 
 
 
 
 
 
 
 114unsigned long get_safe_page(gfp_t gfp_mask)
 115{
 116	return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
 117}
 118
 119static struct page *alloc_image_page(gfp_t gfp_mask)
 120{
 121	struct page *page;
 122
 123	page = alloc_page(gfp_mask);
 124	if (page) {
 125		swsusp_set_page_forbidden(page);
 126		swsusp_set_page_free(page);
 127	}
 128	return page;
 129}
 130
 
 
 
 
 
 
 
 
 131/**
 132 *	free_image_page - free page represented by @addr, allocated with
 133 *	get_image_page (page flags set by it must be cleared)
 
 
 
 
 134 */
 135
 136static inline void free_image_page(void *addr, int clear_nosave_free)
 137{
 138	struct page *page;
 139
 140	BUG_ON(!virt_addr_valid(addr));
 141
 142	page = virt_to_page(addr);
 143
 144	swsusp_unset_page_forbidden(page);
 145	if (clear_nosave_free)
 146		swsusp_unset_page_free(page);
 147
 148	__free_page(page);
 149}
 150
 151/* struct linked_page is used to build chains of pages */
 152
 153#define LINKED_PAGE_DATA_SIZE	(PAGE_SIZE - sizeof(void *))
 154
 155struct linked_page {
 156	struct linked_page *next;
 157	char data[LINKED_PAGE_DATA_SIZE];
 158} __attribute__((packed));
 159
 160static inline void
 161free_list_of_pages(struct linked_page *list, int clear_page_nosave)
 162{
 163	while (list) {
 164		struct linked_page *lp = list->next;
 165
 166		free_image_page(list, clear_page_nosave);
 167		list = lp;
 168	}
 169}
 170
 171/**
 172  *	struct chain_allocator is used for allocating small objects out of
 173  *	a linked list of pages called 'the chain'.
 174  *
 175  *	The chain grows each time when there is no room for a new object in
 176  *	the current page.  The allocated objects cannot be freed individually.
 177  *	It is only possible to free them all at once, by freeing the entire
 178  *	chain.
 179  *
 180  *	NOTE: The chain allocator may be inefficient if the allocated objects
 181  *	are not much smaller than PAGE_SIZE.
 182  */
 183
 184struct chain_allocator {
 185	struct linked_page *chain;	/* the chain */
 186	unsigned int used_space;	/* total size of objects allocated out
 187					 * of the current page
 188					 */
 189	gfp_t gfp_mask;		/* mask for allocating pages */
 190	int safe_needed;	/* if set, only "safe" pages are allocated */
 191};
 192
 193static void
 194chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
 195{
 196	ca->chain = NULL;
 197	ca->used_space = LINKED_PAGE_DATA_SIZE;
 198	ca->gfp_mask = gfp_mask;
 199	ca->safe_needed = safe_needed;
 200}
 201
 202static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
 203{
 204	void *ret;
 205
 206	if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
 207		struct linked_page *lp;
 208
 209		lp = get_image_page(ca->gfp_mask, ca->safe_needed);
 
 210		if (!lp)
 211			return NULL;
 212
 213		lp->next = ca->chain;
 214		ca->chain = lp;
 215		ca->used_space = 0;
 216	}
 217	ret = ca->chain->data + ca->used_space;
 218	ca->used_space += size;
 219	return ret;
 220}
 221
 222/**
 223 *	Data types related to memory bitmaps.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 224 *
 225 *	Memory bitmap is a structure consiting of many linked lists of
 226 *	objects.  The main list's elements are of type struct zone_bitmap
 227 *	and each of them corresonds to one zone.  For each zone bitmap
 228 *	object there is a list of objects of type struct bm_block that
 229 *	represent each blocks of bitmap in which information is stored.
 230 *
 231 *	struct memory_bitmap contains a pointer to the main list of zone
 232 *	bitmap objects, a struct bm_position used for browsing the bitmap,
 233 *	and a pointer to the list of pages used for allocating all of the
 234 *	zone bitmap objects and bitmap block objects.
 235 *
 236 *	NOTE: It has to be possible to lay out the bitmap in memory
 237 *	using only allocations of order 0.  Additionally, the bitmap is
 238 *	designed to work with arbitrary number of zones (this is over the
 239 *	top for now, but let's avoid making unnecessary assumptions ;-).
 240 *
 241 *	struct zone_bitmap contains a pointer to a list of bitmap block
 242 *	objects and a pointer to the bitmap block object that has been
 243 *	most recently used for setting bits.  Additionally, it contains the
 244 *	pfns that correspond to the start and end of the represented zone.
 245 *
 246 *	struct bm_block contains a pointer to the memory page in which
 247 *	information is stored (in the form of a block of bitmap)
 248 *	It also contains the pfns that correspond to the start and end of
 249 *	the represented memory area.
 250 */
 251
 252#define BM_END_OF_MAP	(~0UL)
 253
 254#define BM_BITS_PER_BLOCK	(PAGE_SIZE * BITS_PER_BYTE)
 
 
 255
 256struct bm_block {
 257	struct list_head hook;	/* hook into a list of bitmap blocks */
 258	unsigned long start_pfn;	/* pfn represented by the first bit */
 259	unsigned long end_pfn;	/* pfn represented by the last bit plus 1 */
 260	unsigned long *data;	/* bitmap representing pages */
 
 
 
 261};
 262
 263static inline unsigned long bm_block_bits(struct bm_block *bb)
 264{
 265	return bb->end_pfn - bb->start_pfn;
 266}
 
 
 
 
 
 
 
 
 
 
 267
 268/* strcut bm_position is used for browsing memory bitmaps */
 269
 270struct bm_position {
 271	struct bm_block *block;
 272	int bit;
 
 
 273};
 274
 275struct memory_bitmap {
 276	struct list_head blocks;	/* list of bitmap blocks */
 277	struct linked_page *p_list;	/* list of pages used to store zone
 278					 * bitmap objects and bitmap block
 279					 * objects
 280					 */
 281	struct bm_position cur;	/* most recently used bit position */
 282};
 283
 284/* Functions that operate on memory bitmaps */
 285
 286static void memory_bm_position_reset(struct memory_bitmap *bm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 287{
 288	bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook);
 289	bm->cur.bit = 0;
 290}
 291
 292static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
 
 
 
 
 
 
 
 
 
 
 
 293
 294/**
 295 *	create_bm_block_list - create a list of block bitmap objects
 296 *	@pages - number of pages to track
 297 *	@list - list to put the allocated blocks into
 298 *	@ca - chain allocator to be used for allocating memory
 299 */
 300static int create_bm_block_list(unsigned long pages,
 301				struct list_head *list,
 302				struct chain_allocator *ca)
 303{
 304	unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
 
 
 
 
 
 305
 306	while (nr_blocks-- > 0) {
 307		struct bm_block *bb;
 
 
 
 308
 309		bb = chain_alloc(ca, sizeof(struct bm_block));
 310		if (!bb)
 
 
 
 311			return -ENOMEM;
 312		list_add(&bb->hook, list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 313	}
 314
 
 
 
 315	return 0;
 316}
 317
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 318struct mem_extent {
 319	struct list_head hook;
 320	unsigned long start;
 321	unsigned long end;
 322};
 323
 324/**
 325 *	free_mem_extents - free a list of memory extents
 326 *	@list - list of extents to empty
 327 */
 328static void free_mem_extents(struct list_head *list)
 329{
 330	struct mem_extent *ext, *aux;
 331
 332	list_for_each_entry_safe(ext, aux, list, hook) {
 333		list_del(&ext->hook);
 334		kfree(ext);
 335	}
 336}
 337
 338/**
 339 *	create_mem_extents - create a list of memory extents representing
 340 *	                     contiguous ranges of PFNs
 341 *	@list - list to put the extents into
 342 *	@gfp_mask - mask to use for memory allocations
 
 343 */
 344static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
 345{
 346	struct zone *zone;
 347
 348	INIT_LIST_HEAD(list);
 349
 350	for_each_populated_zone(zone) {
 351		unsigned long zone_start, zone_end;
 352		struct mem_extent *ext, *cur, *aux;
 353
 354		zone_start = zone->zone_start_pfn;
 355		zone_end = zone->zone_start_pfn + zone->spanned_pages;
 356
 357		list_for_each_entry(ext, list, hook)
 358			if (zone_start <= ext->end)
 359				break;
 360
 361		if (&ext->hook == list || zone_end < ext->start) {
 362			/* New extent is necessary */
 363			struct mem_extent *new_ext;
 364
 365			new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
 366			if (!new_ext) {
 367				free_mem_extents(list);
 368				return -ENOMEM;
 369			}
 370			new_ext->start = zone_start;
 371			new_ext->end = zone_end;
 372			list_add_tail(&new_ext->hook, &ext->hook);
 373			continue;
 374		}
 375
 376		/* Merge this zone's range of PFNs with the existing one */
 377		if (zone_start < ext->start)
 378			ext->start = zone_start;
 379		if (zone_end > ext->end)
 380			ext->end = zone_end;
 381
 382		/* More merging may be possible */
 383		cur = ext;
 384		list_for_each_entry_safe_continue(cur, aux, list, hook) {
 385			if (zone_end < cur->start)
 386				break;
 387			if (zone_end < cur->end)
 388				ext->end = cur->end;
 389			list_del(&cur->hook);
 390			kfree(cur);
 391		}
 392	}
 393
 394	return 0;
 395}
 396
 397/**
 398  *	memory_bm_create - allocate memory for a memory bitmap
 399  */
 400static int
 401memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
 402{
 403	struct chain_allocator ca;
 404	struct list_head mem_extents;
 405	struct mem_extent *ext;
 406	int error;
 407
 408	chain_init(&ca, gfp_mask, safe_needed);
 409	INIT_LIST_HEAD(&bm->blocks);
 410
 411	error = create_mem_extents(&mem_extents, gfp_mask);
 412	if (error)
 413		return error;
 414
 415	list_for_each_entry(ext, &mem_extents, hook) {
 416		struct bm_block *bb;
 417		unsigned long pfn = ext->start;
 418		unsigned long pages = ext->end - ext->start;
 419
 420		bb = list_entry(bm->blocks.prev, struct bm_block, hook);
 421
 422		error = create_bm_block_list(pages, bm->blocks.prev, &ca);
 423		if (error)
 
 
 424			goto Error;
 425
 426		list_for_each_entry_continue(bb, &bm->blocks, hook) {
 427			bb->data = get_image_page(gfp_mask, safe_needed);
 428			if (!bb->data) {
 429				error = -ENOMEM;
 430				goto Error;
 431			}
 432
 433			bb->start_pfn = pfn;
 434			if (pages >= BM_BITS_PER_BLOCK) {
 435				pfn += BM_BITS_PER_BLOCK;
 436				pages -= BM_BITS_PER_BLOCK;
 437			} else {
 438				/* This is executed only once in the loop */
 439				pfn += pages;
 440			}
 441			bb->end_pfn = pfn;
 442		}
 
 443	}
 444
 445	bm->p_list = ca.chain;
 446	memory_bm_position_reset(bm);
 447 Exit:
 448	free_mem_extents(&mem_extents);
 449	return error;
 450
 451 Error:
 452	bm->p_list = ca.chain;
 453	memory_bm_free(bm, PG_UNSAFE_CLEAR);
 454	goto Exit;
 455}
 456
 457/**
 458  *	memory_bm_free - free memory occupied by the memory bitmap @bm
 459  */
 
 460static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
 461{
 462	struct bm_block *bb;
 463
 464	list_for_each_entry(bb, &bm->blocks, hook)
 465		if (bb->data)
 466			free_image_page(bb->data, clear_nosave_free);
 467
 468	free_list_of_pages(bm->p_list, clear_nosave_free);
 469
 470	INIT_LIST_HEAD(&bm->blocks);
 471}
 472
 473/**
 474 *	memory_bm_find_bit - find the bit in the bitmap @bm that corresponds
 475 *	to given pfn.  The cur_zone_bm member of @bm and the cur_block member
 476 *	of @bm->cur_zone_bm are updated.
 
 
 
 
 477 */
 478static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
 479				void **addr, unsigned int *bit_nr)
 480{
 481	struct bm_block *bb;
 
 
 482
 483	/*
 484	 * Check if the pfn corresponds to the current bitmap block and find
 485	 * the block where it fits if this is not the case.
 486	 */
 487	bb = bm->cur.block;
 488	if (pfn < bb->start_pfn)
 489		list_for_each_entry_continue_reverse(bb, &bm->blocks, hook)
 490			if (pfn >= bb->start_pfn)
 491				break;
 492
 493	if (pfn >= bb->end_pfn)
 494		list_for_each_entry_continue(bb, &bm->blocks, hook)
 495			if (pfn >= bb->start_pfn && pfn < bb->end_pfn)
 496				break;
 497
 498	if (&bb->hook == &bm->blocks)
 
 
 
 
 
 
 
 
 499		return -EFAULT;
 500
 501	/* The block has been found */
 502	bm->cur.block = bb;
 503	pfn -= bb->start_pfn;
 504	bm->cur.bit = pfn + 1;
 505	*bit_nr = pfn;
 506	*addr = bb->data;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 507	return 0;
 508}
 509
 510static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
 511{
 512	void *addr;
 513	unsigned int bit;
 514	int error;
 515
 516	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 517	BUG_ON(error);
 518	set_bit(bit, addr);
 519}
 520
 521static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
 522{
 523	void *addr;
 524	unsigned int bit;
 525	int error;
 526
 527	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 528	if (!error)
 529		set_bit(bit, addr);
 
 530	return error;
 531}
 532
 533static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
 534{
 535	void *addr;
 536	unsigned int bit;
 537	int error;
 538
 539	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 540	BUG_ON(error);
 541	clear_bit(bit, addr);
 542}
 543
 
 
 
 
 
 
 
 
 544static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
 545{
 546	void *addr;
 547	unsigned int bit;
 548	int error;
 549
 550	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 551	BUG_ON(error);
 552	return test_bit(bit, addr);
 553}
 554
 555static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
 556{
 557	void *addr;
 558	unsigned int bit;
 559
 560	return !memory_bm_find_bit(bm, pfn, &addr, &bit);
 561}
 562
 563/**
 564 *	memory_bm_next_pfn - find the pfn that corresponds to the next set bit
 565 *	in the bitmap @bm.  If the pfn cannot be found, BM_END_OF_MAP is
 566 *	returned.
 567 *
 568 *	It is required to run memory_bm_position_reset() before the first call to
 569 *	this function.
 
 
 
 
 570 */
 
 
 
 
 
 
 
 
 
 
 571
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 572static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
 573{
 574	struct bm_block *bb;
 575	int bit;
 576
 577	bb = bm->cur.block;
 578	do {
 579		bit = bm->cur.bit;
 580		bit = find_next_bit(bb->data, bm_block_bits(bb), bit);
 581		if (bit < bm_block_bits(bb))
 582			goto Return_pfn;
 583
 584		bb = list_entry(bb->hook.next, struct bm_block, hook);
 585		bm->cur.block = bb;
 586		bm->cur.bit = 0;
 587	} while (&bb->hook != &bm->blocks);
 
 588
 589	memory_bm_position_reset(bm);
 590	return BM_END_OF_MAP;
 591
 592 Return_pfn:
 593	bm->cur.bit = bit + 1;
 594	return bb->start_pfn + bit;
 595}
 596
 597/**
 598 *	This structure represents a range of page frames the contents of which
 599 *	should not be saved during the suspend.
 600 */
 601
 602struct nosave_region {
 603	struct list_head list;
 604	unsigned long start_pfn;
 605	unsigned long end_pfn;
 606};
 607
 608static LIST_HEAD(nosave_regions);
 609
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 610/**
 611 *	register_nosave_region - register a range of page frames the contents
 612 *	of which should not be saved during the suspend (to be used in the early
 613 *	initialization code)
 
 614 */
 615
 616void __init
 617__register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
 618			 int use_kmalloc)
 619{
 620	struct nosave_region *region;
 621
 622	if (start_pfn >= end_pfn)
 623		return;
 624
 625	if (!list_empty(&nosave_regions)) {
 626		/* Try to extend the previous region (they should be sorted) */
 627		region = list_entry(nosave_regions.prev,
 628					struct nosave_region, list);
 629		if (region->end_pfn == start_pfn) {
 630			region->end_pfn = end_pfn;
 631			goto Report;
 632		}
 633	}
 634	if (use_kmalloc) {
 635		/* during init, this shouldn't fail */
 636		region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
 637		BUG_ON(!region);
 638	} else
 639		/* This allocation cannot fail */
 640		region = alloc_bootmem(sizeof(struct nosave_region));
 
 
 
 
 
 641	region->start_pfn = start_pfn;
 642	region->end_pfn = end_pfn;
 643	list_add_tail(&region->list, &nosave_regions);
 644 Report:
 645	printk(KERN_INFO "PM: Registered nosave memory: %016lx - %016lx\n",
 646		start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
 
 647}
 648
 649/*
 650 * Set bits in this map correspond to the page frames the contents of which
 651 * should not be saved during the suspend.
 652 */
 653static struct memory_bitmap *forbidden_pages_map;
 654
 655/* Set bits in this map correspond to free page frames. */
 656static struct memory_bitmap *free_pages_map;
 657
 658/*
 659 * Each page frame allocated for creating the image is marked by setting the
 660 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
 661 */
 662
 663void swsusp_set_page_free(struct page *page)
 664{
 665	if (free_pages_map)
 666		memory_bm_set_bit(free_pages_map, page_to_pfn(page));
 667}
 668
 669static int swsusp_page_is_free(struct page *page)
 670{
 671	return free_pages_map ?
 672		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
 673}
 674
 675void swsusp_unset_page_free(struct page *page)
 676{
 677	if (free_pages_map)
 678		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
 679}
 680
 681static void swsusp_set_page_forbidden(struct page *page)
 682{
 683	if (forbidden_pages_map)
 684		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
 685}
 686
 687int swsusp_page_is_forbidden(struct page *page)
 688{
 689	return forbidden_pages_map ?
 690		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
 691}
 692
 693static void swsusp_unset_page_forbidden(struct page *page)
 694{
 695	if (forbidden_pages_map)
 696		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
 697}
 698
 699/**
 700 *	mark_nosave_pages - set bits corresponding to the page frames the
 701 *	contents of which should not be saved in a given bitmap.
 
 
 
 702 */
 703
 704static void mark_nosave_pages(struct memory_bitmap *bm)
 705{
 706	struct nosave_region *region;
 707
 708	if (list_empty(&nosave_regions))
 709		return;
 710
 711	list_for_each_entry(region, &nosave_regions, list) {
 712		unsigned long pfn;
 713
 714		pr_debug("PM: Marking nosave pages: %016lx - %016lx\n",
 715				region->start_pfn << PAGE_SHIFT,
 716				region->end_pfn << PAGE_SHIFT);
 
 717
 718		for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
 719			if (pfn_valid(pfn)) {
 720				/*
 721				 * It is safe to ignore the result of
 722				 * mem_bm_set_bit_check() here, since we won't
 723				 * touch the PFNs for which the error is
 724				 * returned anyway.
 725				 */
 726				mem_bm_set_bit_check(bm, pfn);
 727			}
 728	}
 729}
 730
 731/**
 732 *	create_basic_memory_bitmaps - create bitmaps needed for marking page
 733 *	frames that should not be saved and free page frames.  The pointers
 734 *	forbidden_pages_map and free_pages_map are only modified if everything
 735 *	goes well, because we don't want the bits to be used before both bitmaps
 736 *	are set up.
 
 737 */
 738
 739int create_basic_memory_bitmaps(void)
 740{
 741	struct memory_bitmap *bm1, *bm2;
 742	int error = 0;
 743
 744	BUG_ON(forbidden_pages_map || free_pages_map);
 
 
 
 745
 746	bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
 747	if (!bm1)
 748		return -ENOMEM;
 749
 750	error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
 751	if (error)
 752		goto Free_first_object;
 753
 754	bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
 755	if (!bm2)
 756		goto Free_first_bitmap;
 757
 758	error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
 759	if (error)
 760		goto Free_second_object;
 761
 762	forbidden_pages_map = bm1;
 763	free_pages_map = bm2;
 764	mark_nosave_pages(forbidden_pages_map);
 765
 766	pr_debug("PM: Basic memory bitmaps created\n");
 767
 768	return 0;
 769
 770 Free_second_object:
 771	kfree(bm2);
 772 Free_first_bitmap:
 773 	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
 774 Free_first_object:
 775	kfree(bm1);
 776	return -ENOMEM;
 777}
 778
 779/**
 780 *	free_basic_memory_bitmaps - free memory bitmaps allocated by
 781 *	create_basic_memory_bitmaps().  The auxiliary pointers are necessary
 782 *	so that the bitmaps themselves are not referred to while they are being
 783 *	freed.
 
 784 */
 785
 786void free_basic_memory_bitmaps(void)
 787{
 788	struct memory_bitmap *bm1, *bm2;
 789
 790	BUG_ON(!(forbidden_pages_map && free_pages_map));
 
 791
 792	bm1 = forbidden_pages_map;
 793	bm2 = free_pages_map;
 794	forbidden_pages_map = NULL;
 795	free_pages_map = NULL;
 796	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
 797	kfree(bm1);
 798	memory_bm_free(bm2, PG_UNSAFE_CLEAR);
 799	kfree(bm2);
 800
 801	pr_debug("PM: Basic memory bitmaps freed\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 802}
 803
 804/**
 805 *	snapshot_additional_pages - estimate the number of additional pages
 806 *	be needed for setting up the suspend image data structures for given
 807 *	zone (usually the returned value is greater than the exact number)
 
 
 
 808 */
 809
 810unsigned int snapshot_additional_pages(struct zone *zone)
 811{
 812	unsigned int res;
 
 
 
 
 
 
 
 
 813
 814	res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
 815	res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE);
 816	return 2 * res;
 817}
 818
 819#ifdef CONFIG_HIGHMEM
 820/**
 821 *	count_free_highmem_pages - compute the total number of free highmem
 822 *	pages, system-wide.
 
 823 */
 824
 825static unsigned int count_free_highmem_pages(void)
 826{
 827	struct zone *zone;
 828	unsigned int cnt = 0;
 829
 830	for_each_populated_zone(zone)
 831		if (is_highmem(zone))
 832			cnt += zone_page_state(zone, NR_FREE_PAGES);
 833
 834	return cnt;
 835}
 836
 837/**
 838 *	saveable_highmem_page - Determine whether a highmem page should be
 839 *	included in the suspend image.
 
 840 *
 841 *	We should save the page if it isn't Nosave or NosaveFree, or Reserved,
 842 *	and it isn't a part of a free chunk of pages.
 843 */
 844static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
 845{
 846	struct page *page;
 847
 848	if (!pfn_valid(pfn))
 849		return NULL;
 850
 851	page = pfn_to_page(pfn);
 852	if (page_zone(page) != zone)
 853		return NULL;
 854
 855	BUG_ON(!PageHighMem(page));
 856
 857	if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page) ||
 858	    PageReserved(page))
 
 
 
 
 
 859		return NULL;
 860
 861	return page;
 862}
 863
 864/**
 865 *	count_highmem_pages - compute the total number of saveable highmem
 866 *	pages.
 867 */
 868
 869static unsigned int count_highmem_pages(void)
 870{
 871	struct zone *zone;
 872	unsigned int n = 0;
 873
 874	for_each_populated_zone(zone) {
 875		unsigned long pfn, max_zone_pfn;
 876
 877		if (!is_highmem(zone))
 878			continue;
 879
 880		mark_free_pages(zone);
 881		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
 882		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
 883			if (saveable_highmem_page(zone, pfn))
 884				n++;
 885	}
 886	return n;
 887}
 888#else
 889static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
 890{
 891	return NULL;
 892}
 893#endif /* CONFIG_HIGHMEM */
 894
 895/**
 896 *	saveable_page - Determine whether a non-highmem page should be included
 897 *	in the suspend image.
 
 
 898 *
 899 *	We should save the page if it isn't Nosave, and is not in the range
 900 *	of pages statically defined as 'unsaveable', and it isn't a part of
 901 *	a free chunk of pages.
 902 */
 903static struct page *saveable_page(struct zone *zone, unsigned long pfn)
 904{
 905	struct page *page;
 906
 907	if (!pfn_valid(pfn))
 908		return NULL;
 909
 910	page = pfn_to_page(pfn);
 911	if (page_zone(page) != zone)
 912		return NULL;
 913
 914	BUG_ON(PageHighMem(page));
 915
 916	if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
 917		return NULL;
 918
 
 
 
 919	if (PageReserved(page)
 920	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
 921		return NULL;
 922
 
 
 
 923	return page;
 924}
 925
 926/**
 927 *	count_data_pages - compute the total number of saveable non-highmem
 928 *	pages.
 929 */
 930
 931static unsigned int count_data_pages(void)
 932{
 933	struct zone *zone;
 934	unsigned long pfn, max_zone_pfn;
 935	unsigned int n = 0;
 936
 937	for_each_populated_zone(zone) {
 938		if (is_highmem(zone))
 939			continue;
 940
 941		mark_free_pages(zone);
 942		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
 943		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
 944			if (saveable_page(zone, pfn))
 945				n++;
 946	}
 947	return n;
 948}
 949
 950/* This is needed, because copy_page and memcpy are not usable for copying
 
 951 * task structs.
 952 */
 953static inline void do_copy_page(long *dst, long *src)
 954{
 955	int n;
 956
 957	for (n = PAGE_SIZE / sizeof(long); n; n--)
 958		*dst++ = *src++;
 959}
 960
 961
 962/**
 963 *	safe_copy_page - check if the page we are going to copy is marked as
 964 *		present in the kernel page tables (this always is the case if
 965 *		CONFIG_DEBUG_PAGEALLOC is not set and in that case
 966 *		kernel_page_present() always returns 'true').
 
 
 967 */
 968static void safe_copy_page(void *dst, struct page *s_page)
 969{
 970	if (kernel_page_present(s_page)) {
 971		do_copy_page(dst, page_address(s_page));
 972	} else {
 973		kernel_map_pages(s_page, 1, 1);
 974		do_copy_page(dst, page_address(s_page));
 975		kernel_map_pages(s_page, 1, 0);
 976	}
 977}
 978
 979
 980#ifdef CONFIG_HIGHMEM
 981static inline struct page *
 982page_is_saveable(struct zone *zone, unsigned long pfn)
 983{
 984	return is_highmem(zone) ?
 985		saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
 986}
 987
 988static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
 989{
 990	struct page *s_page, *d_page;
 991	void *src, *dst;
 992
 993	s_page = pfn_to_page(src_pfn);
 994	d_page = pfn_to_page(dst_pfn);
 995	if (PageHighMem(s_page)) {
 996		src = kmap_atomic(s_page, KM_USER0);
 997		dst = kmap_atomic(d_page, KM_USER1);
 998		do_copy_page(dst, src);
 999		kunmap_atomic(dst, KM_USER1);
1000		kunmap_atomic(src, KM_USER0);
1001	} else {
1002		if (PageHighMem(d_page)) {
1003			/* Page pointed to by src may contain some kernel
 
1004			 * data modified by kmap_atomic()
1005			 */
1006			safe_copy_page(buffer, s_page);
1007			dst = kmap_atomic(d_page, KM_USER0);
1008			copy_page(dst, buffer);
1009			kunmap_atomic(dst, KM_USER0);
1010		} else {
1011			safe_copy_page(page_address(d_page), s_page);
1012		}
1013	}
1014}
1015#else
1016#define page_is_saveable(zone, pfn)	saveable_page(zone, pfn)
1017
1018static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1019{
1020	safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1021				pfn_to_page(src_pfn));
1022}
1023#endif /* CONFIG_HIGHMEM */
1024
1025static void
1026copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
1027{
1028	struct zone *zone;
1029	unsigned long pfn;
1030
1031	for_each_populated_zone(zone) {
1032		unsigned long max_zone_pfn;
1033
1034		mark_free_pages(zone);
1035		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1036		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1037			if (page_is_saveable(zone, pfn))
1038				memory_bm_set_bit(orig_bm, pfn);
1039	}
1040	memory_bm_position_reset(orig_bm);
1041	memory_bm_position_reset(copy_bm);
1042	for(;;) {
1043		pfn = memory_bm_next_pfn(orig_bm);
1044		if (unlikely(pfn == BM_END_OF_MAP))
1045			break;
1046		copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1047	}
1048}
1049
1050/* Total number of image pages */
1051static unsigned int nr_copy_pages;
1052/* Number of pages needed for saving the original pfns of the image pages */
1053static unsigned int nr_meta_pages;
1054/*
1055 * Numbers of normal and highmem page frames allocated for hibernation image
1056 * before suspending devices.
1057 */
1058unsigned int alloc_normal, alloc_highmem;
1059/*
1060 * Memory bitmap used for marking saveable pages (during hibernation) or
1061 * hibernation image pages (during restore)
1062 */
1063static struct memory_bitmap orig_bm;
1064/*
1065 * Memory bitmap used during hibernation for marking allocated page frames that
1066 * will contain copies of saveable pages.  During restore it is initially used
1067 * for marking hibernation image pages, but then the set bits from it are
1068 * duplicated in @orig_bm and it is released.  On highmem systems it is next
1069 * used for marking "safe" highmem pages, but it has to be reinitialized for
1070 * this purpose.
1071 */
1072static struct memory_bitmap copy_bm;
1073
1074/**
1075 *	swsusp_free - free pages allocated for the suspend.
1076 *
1077 *	Suspend pages are alocated before the atomic copy is made, so we
1078 *	need to release them after the resume.
1079 */
1080
1081void swsusp_free(void)
1082{
1083	struct zone *zone;
1084	unsigned long pfn, max_zone_pfn;
1085
1086	for_each_populated_zone(zone) {
1087		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1088		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1089			if (pfn_valid(pfn)) {
1090				struct page *page = pfn_to_page(pfn);
1091
1092				if (swsusp_page_is_forbidden(page) &&
1093				    swsusp_page_is_free(page)) {
1094					swsusp_unset_page_forbidden(page);
1095					swsusp_unset_page_free(page);
1096					__free_page(page);
1097				}
1098			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1099	}
 
 
1100	nr_copy_pages = 0;
1101	nr_meta_pages = 0;
1102	restore_pblist = NULL;
1103	buffer = NULL;
1104	alloc_normal = 0;
1105	alloc_highmem = 0;
 
1106}
1107
1108/* Helper functions used for the shrinking of memory. */
1109
1110#define GFP_IMAGE	(GFP_KERNEL | __GFP_NOWARN)
1111
1112/**
1113 * preallocate_image_pages - Allocate a number of pages for hibernation image
1114 * @nr_pages: Number of page frames to allocate.
1115 * @mask: GFP flags to use for the allocation.
1116 *
1117 * Return value: Number of page frames actually allocated
1118 */
1119static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1120{
1121	unsigned long nr_alloc = 0;
1122
1123	while (nr_pages > 0) {
1124		struct page *page;
1125
1126		page = alloc_image_page(mask);
1127		if (!page)
1128			break;
1129		memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1130		if (PageHighMem(page))
1131			alloc_highmem++;
1132		else
1133			alloc_normal++;
1134		nr_pages--;
1135		nr_alloc++;
1136	}
1137
1138	return nr_alloc;
1139}
1140
1141static unsigned long preallocate_image_memory(unsigned long nr_pages,
1142					      unsigned long avail_normal)
1143{
1144	unsigned long alloc;
1145
1146	if (avail_normal <= alloc_normal)
1147		return 0;
1148
1149	alloc = avail_normal - alloc_normal;
1150	if (nr_pages < alloc)
1151		alloc = nr_pages;
1152
1153	return preallocate_image_pages(alloc, GFP_IMAGE);
1154}
1155
1156#ifdef CONFIG_HIGHMEM
1157static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1158{
1159	return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1160}
1161
1162/**
1163 *  __fraction - Compute (an approximation of) x * (multiplier / base)
1164 */
1165static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1166{
1167	x *= multiplier;
1168	do_div(x, base);
1169	return (unsigned long)x;
1170}
1171
1172static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1173						unsigned long highmem,
1174						unsigned long total)
1175{
1176	unsigned long alloc = __fraction(nr_pages, highmem, total);
1177
1178	return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1179}
1180#else /* CONFIG_HIGHMEM */
1181static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1182{
1183	return 0;
1184}
1185
1186static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1187						unsigned long highmem,
1188						unsigned long total)
1189{
1190	return 0;
1191}
1192#endif /* CONFIG_HIGHMEM */
1193
1194/**
1195 * free_unnecessary_pages - Release preallocated pages not needed for the image
1196 */
1197static void free_unnecessary_pages(void)
1198{
1199	unsigned long save, to_free_normal, to_free_highmem;
1200
1201	save = count_data_pages();
1202	if (alloc_normal >= save) {
1203		to_free_normal = alloc_normal - save;
1204		save = 0;
1205	} else {
1206		to_free_normal = 0;
1207		save -= alloc_normal;
1208	}
1209	save += count_highmem_pages();
1210	if (alloc_highmem >= save) {
1211		to_free_highmem = alloc_highmem - save;
1212	} else {
1213		to_free_highmem = 0;
1214		save -= alloc_highmem;
1215		if (to_free_normal > save)
1216			to_free_normal -= save;
1217		else
1218			to_free_normal = 0;
1219	}
 
1220
1221	memory_bm_position_reset(&copy_bm);
1222
1223	while (to_free_normal > 0 || to_free_highmem > 0) {
1224		unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1225		struct page *page = pfn_to_page(pfn);
1226
1227		if (PageHighMem(page)) {
1228			if (!to_free_highmem)
1229				continue;
1230			to_free_highmem--;
1231			alloc_highmem--;
1232		} else {
1233			if (!to_free_normal)
1234				continue;
1235			to_free_normal--;
1236			alloc_normal--;
1237		}
1238		memory_bm_clear_bit(&copy_bm, pfn);
1239		swsusp_unset_page_forbidden(page);
1240		swsusp_unset_page_free(page);
1241		__free_page(page);
1242	}
 
 
1243}
1244
1245/**
1246 * minimum_image_size - Estimate the minimum acceptable size of an image
1247 * @saveable: Number of saveable pages in the system.
1248 *
1249 * We want to avoid attempting to free too much memory too hard, so estimate the
1250 * minimum acceptable size of a hibernation image to use as the lower limit for
1251 * preallocating memory.
1252 *
1253 * We assume that the minimum image size should be proportional to
1254 *
1255 * [number of saveable pages] - [number of pages that can be freed in theory]
1256 *
1257 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1258 * and (3) inactive anonymouns pages, (4) active and (5) inactive file pages,
1259 * minus mapped file pages.
1260 */
1261static unsigned long minimum_image_size(unsigned long saveable)
1262{
1263	unsigned long size;
1264
1265	size = global_page_state(NR_SLAB_RECLAIMABLE)
1266		+ global_page_state(NR_ACTIVE_ANON)
1267		+ global_page_state(NR_INACTIVE_ANON)
1268		+ global_page_state(NR_ACTIVE_FILE)
1269		+ global_page_state(NR_INACTIVE_FILE)
1270		- global_page_state(NR_FILE_MAPPED);
1271
1272	return saveable <= size ? 0 : saveable - size;
1273}
1274
1275/**
1276 * hibernate_preallocate_memory - Preallocate memory for hibernation image
1277 *
1278 * To create a hibernation image it is necessary to make a copy of every page
1279 * frame in use.  We also need a number of page frames to be free during
1280 * hibernation for allocations made while saving the image and for device
1281 * drivers, in case they need to allocate memory from their hibernation
1282 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1283 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1284 * /sys/power/reserved_size, respectively).  To make this happen, we compute the
1285 * total number of available page frames and allocate at least
1286 *
1287 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1288 *  + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1289 *
1290 * of them, which corresponds to the maximum size of a hibernation image.
1291 *
1292 * If image_size is set below the number following from the above formula,
1293 * the preallocation of memory is continued until the total number of saveable
1294 * pages in the system is below the requested image size or the minimum
1295 * acceptable image size returned by minimum_image_size(), whichever is greater.
1296 */
1297int hibernate_preallocate_memory(void)
1298{
1299	struct zone *zone;
1300	unsigned long saveable, size, max_size, count, highmem, pages = 0;
1301	unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1302	struct timeval start, stop;
1303	int error;
1304
1305	printk(KERN_INFO "PM: Preallocating image memory... ");
1306	do_gettimeofday(&start);
1307
1308	error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1309	if (error)
1310		goto err_out;
1311
1312	error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1313	if (error)
1314		goto err_out;
1315
1316	alloc_normal = 0;
1317	alloc_highmem = 0;
1318
1319	/* Count the number of saveable data pages. */
1320	save_highmem = count_highmem_pages();
1321	saveable = count_data_pages();
1322
1323	/*
1324	 * Compute the total number of page frames we can use (count) and the
1325	 * number of pages needed for image metadata (size).
1326	 */
1327	count = saveable;
1328	saveable += save_highmem;
1329	highmem = save_highmem;
1330	size = 0;
1331	for_each_populated_zone(zone) {
1332		size += snapshot_additional_pages(zone);
1333		if (is_highmem(zone))
1334			highmem += zone_page_state(zone, NR_FREE_PAGES);
1335		else
1336			count += zone_page_state(zone, NR_FREE_PAGES);
1337	}
1338	avail_normal = count;
1339	count += highmem;
1340	count -= totalreserve_pages;
1341
 
 
 
1342	/* Compute the maximum number of saveable pages to leave in memory. */
1343	max_size = (count - (size + PAGES_FOR_IO)) / 2
1344			- 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1345	/* Compute the desired number of image pages specified by image_size. */
1346	size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1347	if (size > max_size)
1348		size = max_size;
1349	/*
1350	 * If the desired number of image pages is at least as large as the
1351	 * current number of saveable pages in memory, allocate page frames for
1352	 * the image and we're done.
1353	 */
1354	if (size >= saveable) {
1355		pages = preallocate_image_highmem(save_highmem);
1356		pages += preallocate_image_memory(saveable - pages, avail_normal);
1357		goto out;
1358	}
1359
1360	/* Estimate the minimum size of the image. */
1361	pages = minimum_image_size(saveable);
1362	/*
1363	 * To avoid excessive pressure on the normal zone, leave room in it to
1364	 * accommodate an image of the minimum size (unless it's already too
1365	 * small, in which case don't preallocate pages from it at all).
1366	 */
1367	if (avail_normal > pages)
1368		avail_normal -= pages;
1369	else
1370		avail_normal = 0;
1371	if (size < pages)
1372		size = min_t(unsigned long, pages, max_size);
1373
1374	/*
1375	 * Let the memory management subsystem know that we're going to need a
1376	 * large number of page frames to allocate and make it free some memory.
1377	 * NOTE: If this is not done, performance will be hurt badly in some
1378	 * test cases.
1379	 */
1380	shrink_all_memory(saveable - size);
1381
1382	/*
1383	 * The number of saveable pages in memory was too high, so apply some
1384	 * pressure to decrease it.  First, make room for the largest possible
1385	 * image and fail if that doesn't work.  Next, try to decrease the size
1386	 * of the image as much as indicated by 'size' using allocations from
1387	 * highmem and non-highmem zones separately.
1388	 */
1389	pages_highmem = preallocate_image_highmem(highmem / 2);
1390	alloc = (count - max_size) - pages_highmem;
 
 
 
 
1391	pages = preallocate_image_memory(alloc, avail_normal);
1392	if (pages < alloc) {
1393		/* We have exhausted non-highmem pages, try highmem. */
1394		alloc -= pages;
1395		pages += pages_highmem;
1396		pages_highmem = preallocate_image_highmem(alloc);
1397		if (pages_highmem < alloc)
1398			goto err_out;
1399		pages += pages_highmem;
1400		/*
1401		 * size is the desired number of saveable pages to leave in
1402		 * memory, so try to preallocate (all memory - size) pages.
1403		 */
1404		alloc = (count - pages) - size;
1405		pages += preallocate_image_highmem(alloc);
1406	} else {
1407		/*
1408		 * There are approximately max_size saveable pages at this point
1409		 * and we want to reduce this number down to size.
1410		 */
1411		alloc = max_size - size;
1412		size = preallocate_highmem_fraction(alloc, highmem, count);
1413		pages_highmem += size;
1414		alloc -= size;
1415		size = preallocate_image_memory(alloc, avail_normal);
1416		pages_highmem += preallocate_image_highmem(alloc - size);
1417		pages += pages_highmem + size;
1418	}
1419
1420	/*
1421	 * We only need as many page frames for the image as there are saveable
1422	 * pages in memory, but we have allocated more.  Release the excessive
1423	 * ones now.
1424	 */
1425	free_unnecessary_pages();
1426
1427 out:
1428	do_gettimeofday(&stop);
1429	printk(KERN_CONT "done (allocated %lu pages)\n", pages);
1430	swsusp_show_speed(&start, &stop, pages, "Allocated");
1431
1432	return 0;
1433
1434 err_out:
1435	printk(KERN_CONT "\n");
1436	swsusp_free();
1437	return -ENOMEM;
1438}
1439
1440#ifdef CONFIG_HIGHMEM
1441/**
1442  *	count_pages_for_highmem - compute the number of non-highmem pages
1443  *	that will be necessary for creating copies of highmem pages.
1444  */
1445
 
1446static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1447{
1448	unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1449
1450	if (free_highmem >= nr_highmem)
1451		nr_highmem = 0;
1452	else
1453		nr_highmem -= free_highmem;
1454
1455	return nr_highmem;
1456}
1457#else
1458static unsigned int
1459count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1460#endif /* CONFIG_HIGHMEM */
1461
1462/**
1463 *	enough_free_mem - Make sure we have enough free memory for the
1464 *	snapshot image.
1465 */
1466
1467static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1468{
1469	struct zone *zone;
1470	unsigned int free = alloc_normal;
1471
1472	for_each_populated_zone(zone)
1473		if (!is_highmem(zone))
1474			free += zone_page_state(zone, NR_FREE_PAGES);
1475
1476	nr_pages += count_pages_for_highmem(nr_highmem);
1477	pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
1478		nr_pages, PAGES_FOR_IO, free);
1479
1480	return free > nr_pages + PAGES_FOR_IO;
1481}
1482
1483#ifdef CONFIG_HIGHMEM
1484/**
1485 *	get_highmem_buffer - if there are some highmem pages in the suspend
1486 *	image, we may need the buffer to copy them and/or load their data.
 
 
1487 */
1488
1489static inline int get_highmem_buffer(int safe_needed)
1490{
1491	buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
1492	return buffer ? 0 : -ENOMEM;
1493}
1494
1495/**
1496 *	alloc_highmem_image_pages - allocate some highmem pages for the image.
1497 *	Try to allocate as many pages as needed, but if the number of free
1498 *	highmem pages is lesser than that, allocate them all.
 
1499 */
1500
1501static inline unsigned int
1502alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
1503{
1504	unsigned int to_alloc = count_free_highmem_pages();
1505
1506	if (to_alloc > nr_highmem)
1507		to_alloc = nr_highmem;
1508
1509	nr_highmem -= to_alloc;
1510	while (to_alloc-- > 0) {
1511		struct page *page;
1512
1513		page = alloc_image_page(__GFP_HIGHMEM);
1514		memory_bm_set_bit(bm, page_to_pfn(page));
1515	}
1516	return nr_highmem;
1517}
1518#else
1519static inline int get_highmem_buffer(int safe_needed) { return 0; }
1520
1521static inline unsigned int
1522alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
1523#endif /* CONFIG_HIGHMEM */
1524
1525/**
1526 *	swsusp_alloc - allocate memory for the suspend image
1527 *
1528 *	We first try to allocate as many highmem pages as there are
1529 *	saveable highmem pages in the system.  If that fails, we allocate
1530 *	non-highmem pages for the copies of the remaining highmem ones.
1531 *
1532 *	In this approach it is likely that the copies of highmem pages will
1533 *	also be located in the high memory, because of the way in which
1534 *	copy_data_pages() works.
 
 
 
 
1535 */
1536
1537static int
1538swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1539		unsigned int nr_pages, unsigned int nr_highmem)
1540{
1541	if (nr_highmem > 0) {
1542		if (get_highmem_buffer(PG_ANY))
1543			goto err_out;
1544		if (nr_highmem > alloc_highmem) {
1545			nr_highmem -= alloc_highmem;
1546			nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1547		}
1548	}
1549	if (nr_pages > alloc_normal) {
1550		nr_pages -= alloc_normal;
1551		while (nr_pages-- > 0) {
1552			struct page *page;
1553
1554			page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
1555			if (!page)
1556				goto err_out;
1557			memory_bm_set_bit(copy_bm, page_to_pfn(page));
1558		}
1559	}
1560
1561	return 0;
1562
1563 err_out:
1564	swsusp_free();
1565	return -ENOMEM;
1566}
1567
1568asmlinkage int swsusp_save(void)
1569{
1570	unsigned int nr_pages, nr_highmem;
1571
1572	printk(KERN_INFO "PM: Creating hibernation image:\n");
1573
1574	drain_local_pages(NULL);
1575	nr_pages = count_data_pages();
1576	nr_highmem = count_highmem_pages();
1577	printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
1578
1579	if (!enough_free_mem(nr_pages, nr_highmem)) {
1580		printk(KERN_ERR "PM: Not enough free memory\n");
1581		return -ENOMEM;
1582	}
1583
1584	if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
1585		printk(KERN_ERR "PM: Memory allocation failed\n");
1586		return -ENOMEM;
1587	}
1588
1589	/* During allocating of suspend pagedir, new cold pages may appear.
 
1590	 * Kill them.
1591	 */
1592	drain_local_pages(NULL);
1593	copy_data_pages(&copy_bm, &orig_bm);
1594
1595	/*
1596	 * End of critical section. From now on, we can write to memory,
1597	 * but we should not touch disk. This specially means we must _not_
1598	 * touch swap space! Except we must write out our image of course.
1599	 */
1600
1601	nr_pages += nr_highmem;
1602	nr_copy_pages = nr_pages;
1603	nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1604
1605	printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
1606		nr_pages);
1607
1608	return 0;
1609}
1610
1611#ifndef CONFIG_ARCH_HIBERNATION_HEADER
1612static int init_header_complete(struct swsusp_info *info)
1613{
1614	memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
1615	info->version_code = LINUX_VERSION_CODE;
1616	return 0;
1617}
1618
1619static char *check_image_kernel(struct swsusp_info *info)
1620{
1621	if (info->version_code != LINUX_VERSION_CODE)
1622		return "kernel version";
1623	if (strcmp(info->uts.sysname,init_utsname()->sysname))
1624		return "system type";
1625	if (strcmp(info->uts.release,init_utsname()->release))
1626		return "kernel release";
1627	if (strcmp(info->uts.version,init_utsname()->version))
1628		return "version";
1629	if (strcmp(info->uts.machine,init_utsname()->machine))
1630		return "machine";
1631	return NULL;
1632}
1633#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
1634
1635unsigned long snapshot_get_image_size(void)
1636{
1637	return nr_copy_pages + nr_meta_pages + 1;
1638}
1639
1640static int init_header(struct swsusp_info *info)
1641{
1642	memset(info, 0, sizeof(struct swsusp_info));
1643	info->num_physpages = num_physpages;
1644	info->image_pages = nr_copy_pages;
1645	info->pages = snapshot_get_image_size();
1646	info->size = info->pages;
1647	info->size <<= PAGE_SHIFT;
1648	return init_header_complete(info);
1649}
1650
1651/**
1652 *	pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
1653 *	are stored in the array @buf[] (1 page at a time)
 
 
 
 
1654 */
1655
1656static inline void
1657pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
1658{
1659	int j;
1660
1661	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1662		buf[j] = memory_bm_next_pfn(bm);
1663		if (unlikely(buf[j] == BM_END_OF_MAP))
1664			break;
 
 
1665	}
1666}
1667
1668/**
1669 *	snapshot_read_next - used for reading the system memory snapshot.
 
 
 
 
 
 
 
 
 
1670 *
1671 *	On the first call to it @handle should point to a zeroed
1672 *	snapshot_handle structure.  The structure gets updated and a pointer
1673 *	to it should be passed to this function every next time.
1674 *
1675 *	On success the function returns a positive number.  Then, the caller
1676 *	is allowed to read up to the returned number of bytes from the memory
1677 *	location computed by the data_of() macro.
1678 *
1679 *	The function returns 0 to indicate the end of data stream condition,
1680 *	and a negative number is returned on error.  In such cases the
1681 *	structure pointed to by @handle is not updated and should not be used
1682 *	any more.
1683 */
1684
1685int snapshot_read_next(struct snapshot_handle *handle)
1686{
1687	if (handle->cur > nr_meta_pages + nr_copy_pages)
1688		return 0;
1689
1690	if (!buffer) {
1691		/* This makes the buffer be freed by swsusp_free() */
1692		buffer = get_image_page(GFP_ATOMIC, PG_ANY);
1693		if (!buffer)
1694			return -ENOMEM;
1695	}
1696	if (!handle->cur) {
1697		int error;
1698
1699		error = init_header((struct swsusp_info *)buffer);
1700		if (error)
1701			return error;
1702		handle->buffer = buffer;
1703		memory_bm_position_reset(&orig_bm);
1704		memory_bm_position_reset(&copy_bm);
1705	} else if (handle->cur <= nr_meta_pages) {
1706		clear_page(buffer);
1707		pack_pfns(buffer, &orig_bm);
1708	} else {
1709		struct page *page;
1710
1711		page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
1712		if (PageHighMem(page)) {
1713			/* Highmem pages are copied to the buffer,
 
1714			 * because we can't return with a kmapped
1715			 * highmem page (we may not be called again).
1716			 */
1717			void *kaddr;
1718
1719			kaddr = kmap_atomic(page, KM_USER0);
1720			copy_page(buffer, kaddr);
1721			kunmap_atomic(kaddr, KM_USER0);
1722			handle->buffer = buffer;
1723		} else {
1724			handle->buffer = page_address(page);
1725		}
1726	}
1727	handle->cur++;
1728	return PAGE_SIZE;
1729}
1730
1731/**
1732 *	mark_unsafe_pages - mark the pages that cannot be used for storing
1733 *	the image during resume, because they conflict with the pages that
1734 *	had been used before suspend
1735 */
1736
1737static int mark_unsafe_pages(struct memory_bitmap *bm)
1738{
1739	struct zone *zone;
1740	unsigned long pfn, max_zone_pfn;
1741
1742	/* Clear page flags */
1743	for_each_populated_zone(zone) {
1744		max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1745		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1746			if (pfn_valid(pfn))
1747				swsusp_unset_page_free(pfn_to_page(pfn));
1748	}
1749
1750	/* Mark pages that correspond to the "original" pfns as "unsafe" */
1751	memory_bm_position_reset(bm);
1752	do {
1753		pfn = memory_bm_next_pfn(bm);
1754		if (likely(pfn != BM_END_OF_MAP)) {
1755			if (likely(pfn_valid(pfn)))
1756				swsusp_set_page_free(pfn_to_page(pfn));
1757			else
1758				return -EFAULT;
1759		}
1760	} while (pfn != BM_END_OF_MAP);
1761
1762	allocated_unsafe_pages = 0;
1763
1764	return 0;
1765}
1766
1767static void
1768duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
 
 
 
 
 
1769{
1770	unsigned long pfn;
1771
1772	memory_bm_position_reset(src);
1773	pfn = memory_bm_next_pfn(src);
 
1774	while (pfn != BM_END_OF_MAP) {
1775		memory_bm_set_bit(dst, pfn);
1776		pfn = memory_bm_next_pfn(src);
1777	}
 
 
 
 
 
1778}
1779
1780static int check_header(struct swsusp_info *info)
1781{
1782	char *reason;
1783
1784	reason = check_image_kernel(info);
1785	if (!reason && info->num_physpages != num_physpages)
1786		reason = "memory size";
1787	if (reason) {
1788		printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
1789		return -EPERM;
1790	}
1791	return 0;
1792}
1793
1794/**
1795 *	load header - check the image header and copy data from it
1796 */
1797
1798static int
1799load_header(struct swsusp_info *info)
1800{
1801	int error;
1802
1803	restore_pblist = NULL;
1804	error = check_header(info);
1805	if (!error) {
1806		nr_copy_pages = info->image_pages;
1807		nr_meta_pages = info->pages - info->image_pages - 1;
1808	}
1809	return error;
1810}
1811
1812/**
1813 *	unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
1814 *	the corresponding bit in the memory bitmap @bm
 
 
 
 
1815 */
1816static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
1817{
1818	int j;
1819
1820	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1821		if (unlikely(buf[j] == BM_END_OF_MAP))
1822			break;
1823
1824		if (memory_bm_pfn_present(bm, buf[j]))
 
 
 
1825			memory_bm_set_bit(bm, buf[j]);
1826		else
1827			return -EFAULT;
1828	}
1829
1830	return 0;
1831}
1832
1833/* List of "safe" pages that may be used to store data loaded from the suspend
1834 * image
1835 */
1836static struct linked_page *safe_pages_list;
1837
1838#ifdef CONFIG_HIGHMEM
1839/* struct highmem_pbe is used for creating the list of highmem pages that
 
1840 * should be restored atomically during the resume from disk, because the page
1841 * frames they have occupied before the suspend are in use.
1842 */
1843struct highmem_pbe {
1844	struct page *copy_page;	/* data is here now */
1845	struct page *orig_page;	/* data was here before the suspend */
1846	struct highmem_pbe *next;
1847};
1848
1849/* List of highmem PBEs needed for restoring the highmem pages that were
 
1850 * allocated before the suspend and included in the suspend image, but have
1851 * also been allocated by the "resume" kernel, so their contents cannot be
1852 * written directly to their "original" page frames.
1853 */
1854static struct highmem_pbe *highmem_pblist;
1855
1856/**
1857 *	count_highmem_image_pages - compute the number of highmem pages in the
1858 *	suspend image.  The bits in the memory bitmap @bm that correspond to the
1859 *	image pages are assumed to be set.
 
1860 */
1861
1862static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
1863{
1864	unsigned long pfn;
1865	unsigned int cnt = 0;
1866
1867	memory_bm_position_reset(bm);
1868	pfn = memory_bm_next_pfn(bm);
1869	while (pfn != BM_END_OF_MAP) {
1870		if (PageHighMem(pfn_to_page(pfn)))
1871			cnt++;
1872
1873		pfn = memory_bm_next_pfn(bm);
1874	}
1875	return cnt;
1876}
1877
1878/**
1879 *	prepare_highmem_image - try to allocate as many highmem pages as
1880 *	there are highmem image pages (@nr_highmem_p points to the variable
1881 *	containing the number of highmem image pages).  The pages that are
1882 *	"safe" (ie. will not be overwritten when the suspend image is
1883 *	restored) have the corresponding bits set in @bm (it must be
1884 *	unitialized).
1885 *
1886 *	NOTE: This function should not be called if there are no highmem
1887 *	image pages.
1888 */
1889
1890static unsigned int safe_highmem_pages;
1891
1892static struct memory_bitmap *safe_highmem_bm;
1893
1894static int
1895prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
 
 
 
 
 
 
 
 
 
 
 
 
 
1896{
1897	unsigned int to_alloc;
1898
1899	if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
1900		return -ENOMEM;
1901
1902	if (get_highmem_buffer(PG_SAFE))
1903		return -ENOMEM;
1904
1905	to_alloc = count_free_highmem_pages();
1906	if (to_alloc > *nr_highmem_p)
1907		to_alloc = *nr_highmem_p;
1908	else
1909		*nr_highmem_p = to_alloc;
1910
1911	safe_highmem_pages = 0;
1912	while (to_alloc-- > 0) {
1913		struct page *page;
1914
1915		page = alloc_page(__GFP_HIGHMEM);
1916		if (!swsusp_page_is_free(page)) {
1917			/* The page is "safe", set its bit the bitmap */
1918			memory_bm_set_bit(bm, page_to_pfn(page));
1919			safe_highmem_pages++;
1920		}
1921		/* Mark the page as allocated */
1922		swsusp_set_page_forbidden(page);
1923		swsusp_set_page_free(page);
1924	}
1925	memory_bm_position_reset(bm);
1926	safe_highmem_bm = bm;
1927	return 0;
1928}
1929
 
 
1930/**
1931 *	get_highmem_page_buffer - for given highmem image page find the buffer
1932 *	that suspend_write_next() should set for its caller to write to.
 
 
1933 *
1934 *	If the page is to be saved to its "original" page frame or a copy of
1935 *	the page is to be made in the highmem, @buffer is returned.  Otherwise,
1936 *	the copy of the page is to be made in normal memory, so the address of
1937 *	the copy is returned.
1938 *
1939 *	If @buffer is returned, the caller of suspend_write_next() will write
1940 *	the page's contents to @buffer, so they will have to be copied to the
1941 *	right location on the next call to suspend_write_next() and it is done
1942 *	with the help of copy_last_highmem_page().  For this purpose, if
1943 *	@buffer is returned, @last_highmem page is set to the page to which
1944 *	the data will have to be copied from @buffer.
1945 */
1946
1947static struct page *last_highmem_page;
1948
1949static void *
1950get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
1951{
1952	struct highmem_pbe *pbe;
1953	void *kaddr;
1954
1955	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
1956		/* We have allocated the "original" page frame and we can
 
1957		 * use it directly to store the loaded page.
1958		 */
1959		last_highmem_page = page;
1960		return buffer;
1961	}
1962	/* The "original" page frame has not been allocated and we have to
 
1963	 * use a "safe" page frame to store the loaded page.
1964	 */
1965	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
1966	if (!pbe) {
1967		swsusp_free();
1968		return ERR_PTR(-ENOMEM);
1969	}
1970	pbe->orig_page = page;
1971	if (safe_highmem_pages > 0) {
1972		struct page *tmp;
1973
1974		/* Copy of the page will be stored in high memory */
1975		kaddr = buffer;
1976		tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
1977		safe_highmem_pages--;
1978		last_highmem_page = tmp;
1979		pbe->copy_page = tmp;
1980	} else {
1981		/* Copy of the page will be stored in normal memory */
1982		kaddr = safe_pages_list;
1983		safe_pages_list = safe_pages_list->next;
1984		pbe->copy_page = virt_to_page(kaddr);
1985	}
1986	pbe->next = highmem_pblist;
1987	highmem_pblist = pbe;
1988	return kaddr;
1989}
1990
1991/**
1992 *	copy_last_highmem_page - copy the contents of a highmem image from
1993 *	@buffer, where the caller of snapshot_write_next() has place them,
1994 *	to the right location represented by @last_highmem_page .
 
 
1995 */
1996
1997static void copy_last_highmem_page(void)
1998{
1999	if (last_highmem_page) {
2000		void *dst;
2001
2002		dst = kmap_atomic(last_highmem_page, KM_USER0);
2003		copy_page(dst, buffer);
2004		kunmap_atomic(dst, KM_USER0);
2005		last_highmem_page = NULL;
2006	}
2007}
2008
2009static inline int last_highmem_page_copied(void)
2010{
2011	return !last_highmem_page;
2012}
2013
2014static inline void free_highmem_data(void)
2015{
2016	if (safe_highmem_bm)
2017		memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2018
2019	if (buffer)
2020		free_image_page(buffer, PG_UNSAFE_CLEAR);
2021}
2022#else
2023static inline int get_safe_write_buffer(void) { return 0; }
2024
2025static unsigned int
2026count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2027
2028static inline int
2029prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
2030{
2031	return 0;
2032}
2033
2034static inline void *
2035get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
2036{
2037	return ERR_PTR(-EINVAL);
2038}
2039
2040static inline void copy_last_highmem_page(void) {}
2041static inline int last_highmem_page_copied(void) { return 1; }
2042static inline void free_highmem_data(void) {}
2043#endif /* CONFIG_HIGHMEM */
2044
2045/**
2046 *	prepare_image - use the memory bitmap @bm to mark the pages that will
2047 *	be overwritten in the process of restoring the system memory state
2048 *	from the suspend image ("unsafe" pages) and allocate memory for the
2049 *	image.
2050 *
2051 *	The idea is to allocate a new memory bitmap first and then allocate
2052 *	as many pages as needed for the image data, but not to assign these
2053 *	pages to specific tasks initially.  Instead, we just mark them as
2054 *	allocated and create a lists of "safe" pages that will be used
2055 *	later.  On systems with high memory a list of "safe" highmem pages is
2056 *	also created.
2057 */
2058
2059#define PBES_PER_LINKED_PAGE	(LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2060
2061static int
2062prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2063{
2064	unsigned int nr_pages, nr_highmem;
2065	struct linked_page *sp_list, *lp;
2066	int error;
2067
2068	/* If there is no highmem, the buffer will not be necessary */
2069	free_image_page(buffer, PG_UNSAFE_CLEAR);
2070	buffer = NULL;
2071
2072	nr_highmem = count_highmem_image_pages(bm);
2073	error = mark_unsafe_pages(bm);
2074	if (error)
2075		goto Free;
2076
2077	error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2078	if (error)
2079		goto Free;
2080
2081	duplicate_memory_bitmap(new_bm, bm);
2082	memory_bm_free(bm, PG_UNSAFE_KEEP);
2083	if (nr_highmem > 0) {
2084		error = prepare_highmem_image(bm, &nr_highmem);
2085		if (error)
2086			goto Free;
2087	}
2088	/* Reserve some safe pages for potential later use.
 
2089	 *
2090	 * NOTE: This way we make sure there will be enough safe pages for the
2091	 * chain_alloc() in get_buffer().  It is a bit wasteful, but
2092	 * nr_copy_pages cannot be greater than 50% of the memory anyway.
 
 
2093	 */
2094	sp_list = NULL;
2095	/* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
2096	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2097	nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2098	while (nr_pages > 0) {
2099		lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2100		if (!lp) {
2101			error = -ENOMEM;
2102			goto Free;
2103		}
2104		lp->next = sp_list;
2105		sp_list = lp;
2106		nr_pages--;
2107	}
2108	/* Preallocate memory for the image */
2109	safe_pages_list = NULL;
2110	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2111	while (nr_pages > 0) {
2112		lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2113		if (!lp) {
2114			error = -ENOMEM;
2115			goto Free;
2116		}
2117		if (!swsusp_page_is_free(virt_to_page(lp))) {
2118			/* The page is "safe", add it to the list */
2119			lp->next = safe_pages_list;
2120			safe_pages_list = lp;
2121		}
2122		/* Mark the page as allocated */
2123		swsusp_set_page_forbidden(virt_to_page(lp));
2124		swsusp_set_page_free(virt_to_page(lp));
2125		nr_pages--;
2126	}
2127	/* Free the reserved safe pages so that chain_alloc() can use them */
2128	while (sp_list) {
2129		lp = sp_list->next;
2130		free_image_page(sp_list, PG_UNSAFE_CLEAR);
2131		sp_list = lp;
2132	}
2133	return 0;
2134
2135 Free:
2136	swsusp_free();
2137	return error;
2138}
2139
2140/**
2141 *	get_buffer - compute the address that snapshot_write_next() should
2142 *	set for its caller to write to.
 
 
2143 */
2144
2145static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2146{
2147	struct pbe *pbe;
2148	struct page *page;
2149	unsigned long pfn = memory_bm_next_pfn(bm);
2150
2151	if (pfn == BM_END_OF_MAP)
2152		return ERR_PTR(-EFAULT);
2153
2154	page = pfn_to_page(pfn);
2155	if (PageHighMem(page))
2156		return get_highmem_page_buffer(page, ca);
2157
2158	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2159		/* We have allocated the "original" page frame and we can
 
2160		 * use it directly to store the loaded page.
2161		 */
2162		return page_address(page);
2163
2164	/* The "original" page frame has not been allocated and we have to
 
2165	 * use a "safe" page frame to store the loaded page.
2166	 */
2167	pbe = chain_alloc(ca, sizeof(struct pbe));
2168	if (!pbe) {
2169		swsusp_free();
2170		return ERR_PTR(-ENOMEM);
2171	}
2172	pbe->orig_address = page_address(page);
2173	pbe->address = safe_pages_list;
2174	safe_pages_list = safe_pages_list->next;
2175	pbe->next = restore_pblist;
2176	restore_pblist = pbe;
2177	return pbe->address;
2178}
2179
2180/**
2181 *	snapshot_write_next - used for writing the system memory snapshot.
 
2182 *
2183 *	On the first call to it @handle should point to a zeroed
2184 *	snapshot_handle structure.  The structure gets updated and a pointer
2185 *	to it should be passed to this function every next time.
2186 *
2187 *	On success the function returns a positive number.  Then, the caller
2188 *	is allowed to write up to the returned number of bytes to the memory
2189 *	location computed by the data_of() macro.
2190 *
2191 *	The function returns 0 to indicate the "end of file" condition,
2192 *	and a negative number is returned on error.  In such cases the
2193 *	structure pointed to by @handle is not updated and should not be used
2194 *	any more.
2195 */
2196
2197int snapshot_write_next(struct snapshot_handle *handle)
2198{
2199	static struct chain_allocator ca;
2200	int error = 0;
2201
2202	/* Check if we have already loaded the entire image */
2203	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2204		return 0;
2205
2206	handle->sync_read = 1;
2207
2208	if (!handle->cur) {
2209		if (!buffer)
2210			/* This makes the buffer be freed by swsusp_free() */
2211			buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2212
2213		if (!buffer)
2214			return -ENOMEM;
2215
2216		handle->buffer = buffer;
2217	} else if (handle->cur == 1) {
2218		error = load_header(buffer);
2219		if (error)
2220			return error;
2221
 
 
2222		error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2223		if (error)
2224			return error;
2225
 
 
 
 
 
 
2226	} else if (handle->cur <= nr_meta_pages + 1) {
2227		error = unpack_orig_pfns(buffer, &copy_bm);
2228		if (error)
2229			return error;
2230
2231		if (handle->cur == nr_meta_pages + 1) {
2232			error = prepare_image(&orig_bm, &copy_bm);
2233			if (error)
2234				return error;
2235
2236			chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2237			memory_bm_position_reset(&orig_bm);
2238			restore_pblist = NULL;
2239			handle->buffer = get_buffer(&orig_bm, &ca);
2240			handle->sync_read = 0;
2241			if (IS_ERR(handle->buffer))
2242				return PTR_ERR(handle->buffer);
2243		}
2244	} else {
2245		copy_last_highmem_page();
 
 
 
2246		handle->buffer = get_buffer(&orig_bm, &ca);
2247		if (IS_ERR(handle->buffer))
2248			return PTR_ERR(handle->buffer);
2249		if (handle->buffer != buffer)
2250			handle->sync_read = 0;
2251	}
2252	handle->cur++;
2253	return PAGE_SIZE;
2254}
2255
2256/**
2257 *	snapshot_write_finalize - must be called after the last call to
2258 *	snapshot_write_next() in case the last page in the image happens
2259 *	to be a highmem page and its contents should be stored in the
2260 *	highmem.  Additionally, it releases the memory that will not be
2261 *	used any more.
 
2262 */
2263
2264void snapshot_write_finalize(struct snapshot_handle *handle)
2265{
2266	copy_last_highmem_page();
2267	/* Free only if we have loaded the image entirely */
 
 
 
 
2268	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2269		memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
2270		free_highmem_data();
2271	}
2272}
2273
2274int snapshot_image_loaded(struct snapshot_handle *handle)
2275{
2276	return !(!nr_copy_pages || !last_highmem_page_copied() ||
2277			handle->cur <= nr_meta_pages + nr_copy_pages);
2278}
2279
2280#ifdef CONFIG_HIGHMEM
2281/* Assumes that @buf is ready and points to a "safe" page */
2282static inline void
2283swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
2284{
2285	void *kaddr1, *kaddr2;
2286
2287	kaddr1 = kmap_atomic(p1, KM_USER0);
2288	kaddr2 = kmap_atomic(p2, KM_USER1);
2289	copy_page(buf, kaddr1);
2290	copy_page(kaddr1, kaddr2);
2291	copy_page(kaddr2, buf);
2292	kunmap_atomic(kaddr2, KM_USER1);
2293	kunmap_atomic(kaddr1, KM_USER0);
2294}
2295
2296/**
2297 *	restore_highmem - for each highmem page that was allocated before
2298 *	the suspend and included in the suspend image, and also has been
2299 *	allocated by the "resume" kernel swap its current (ie. "before
2300 *	resume") contents with the previous (ie. "before suspend") one.
2301 *
2302 *	If the resume eventually fails, we can call this function once
2303 *	again and restore the "before resume" highmem state.
 
 
 
 
2304 */
2305
2306int restore_highmem(void)
2307{
2308	struct highmem_pbe *pbe = highmem_pblist;
2309	void *buf;
2310
2311	if (!pbe)
2312		return 0;
2313
2314	buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2315	if (!buf)
2316		return -ENOMEM;
2317
2318	while (pbe) {
2319		swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2320		pbe = pbe->next;
2321	}
2322	free_image_page(buf, PG_UNSAFE_CLEAR);
2323	return 0;
2324}
2325#endif /* CONFIG_HIGHMEM */
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * linux/kernel/power/snapshot.c
   4 *
   5 * This file provides system snapshot/restore functionality for swsusp.
   6 *
   7 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
   8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
 
 
 
   9 */
  10
  11#define pr_fmt(fmt) "PM: " fmt
  12
  13#include <linux/version.h>
  14#include <linux/module.h>
  15#include <linux/mm.h>
  16#include <linux/suspend.h>
  17#include <linux/delay.h>
  18#include <linux/bitops.h>
  19#include <linux/spinlock.h>
  20#include <linux/kernel.h>
  21#include <linux/pm.h>
  22#include <linux/device.h>
  23#include <linux/init.h>
  24#include <linux/memblock.h>
  25#include <linux/nmi.h>
  26#include <linux/syscalls.h>
  27#include <linux/console.h>
  28#include <linux/highmem.h>
  29#include <linux/list.h>
  30#include <linux/slab.h>
  31#include <linux/compiler.h>
  32#include <linux/ktime.h>
  33#include <linux/set_memory.h>
  34
  35#include <linux/uaccess.h>
  36#include <asm/mmu_context.h>
  37#include <asm/pgtable.h>
  38#include <asm/tlbflush.h>
  39#include <asm/io.h>
  40
  41#include "power.h"
  42
  43#if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_ARCH_HAS_SET_MEMORY)
  44static bool hibernate_restore_protection;
  45static bool hibernate_restore_protection_active;
  46
  47void enable_restore_image_protection(void)
  48{
  49	hibernate_restore_protection = true;
  50}
  51
  52static inline void hibernate_restore_protection_begin(void)
  53{
  54	hibernate_restore_protection_active = hibernate_restore_protection;
  55}
  56
  57static inline void hibernate_restore_protection_end(void)
  58{
  59	hibernate_restore_protection_active = false;
  60}
  61
  62static inline void hibernate_restore_protect_page(void *page_address)
  63{
  64	if (hibernate_restore_protection_active)
  65		set_memory_ro((unsigned long)page_address, 1);
  66}
  67
  68static inline void hibernate_restore_unprotect_page(void *page_address)
  69{
  70	if (hibernate_restore_protection_active)
  71		set_memory_rw((unsigned long)page_address, 1);
  72}
  73#else
  74static inline void hibernate_restore_protection_begin(void) {}
  75static inline void hibernate_restore_protection_end(void) {}
  76static inline void hibernate_restore_protect_page(void *page_address) {}
  77static inline void hibernate_restore_unprotect_page(void *page_address) {}
  78#endif /* CONFIG_STRICT_KERNEL_RWX  && CONFIG_ARCH_HAS_SET_MEMORY */
  79
  80static int swsusp_page_is_free(struct page *);
  81static void swsusp_set_page_forbidden(struct page *);
  82static void swsusp_unset_page_forbidden(struct page *);
  83
  84/*
  85 * Number of bytes to reserve for memory allocations made by device drivers
  86 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
  87 * cause image creation to fail (tunable via /sys/power/reserved_size).
  88 */
  89unsigned long reserved_size;
  90
  91void __init hibernate_reserved_size_init(void)
  92{
  93	reserved_size = SPARE_PAGES * PAGE_SIZE;
  94}
  95
  96/*
  97 * Preferred image size in bytes (tunable via /sys/power/image_size).
  98 * When it is set to N, swsusp will do its best to ensure the image
  99 * size will not exceed N bytes, but if that is impossible, it will
 100 * try to create the smallest image possible.
 101 */
 102unsigned long image_size;
 103
 104void __init hibernate_image_size_init(void)
 105{
 106	image_size = ((totalram_pages() * 2) / 5) * PAGE_SIZE;
 107}
 108
 109/*
 110 * List of PBEs needed for restoring the pages that were allocated before
 111 * the suspend and included in the suspend image, but have also been
 112 * allocated by the "resume" kernel, so their contents cannot be written
 113 * directly to their "original" page frames.
 114 */
 115struct pbe *restore_pblist;
 116
 117/* struct linked_page is used to build chains of pages */
 
 118
 119#define LINKED_PAGE_DATA_SIZE	(PAGE_SIZE - sizeof(void *))
 120
 121struct linked_page {
 122	struct linked_page *next;
 123	char data[LINKED_PAGE_DATA_SIZE];
 124} __packed;
 125
 126/*
 127 * List of "safe" pages (ie. pages that were not used by the image kernel
 128 * before hibernation) that may be used as temporary storage for image kernel
 129 * memory contents.
 130 */
 131static struct linked_page *safe_pages_list;
 132
 133/* Pointer to an auxiliary buffer (1 page) */
 134static void *buffer;
 135
 136#define PG_ANY		0
 137#define PG_SAFE		1
 138#define PG_UNSAFE_CLEAR	1
 139#define PG_UNSAFE_KEEP	0
 140
 141static unsigned int allocated_unsafe_pages;
 142
 143/**
 144 * get_image_page - Allocate a page for a hibernation image.
 145 * @gfp_mask: GFP mask for the allocation.
 146 * @safe_needed: Get pages that were not used before hibernation (restore only)
 147 *
 148 * During image restoration, for storing the PBE list and the image data, we can
 149 * only use memory pages that do not conflict with the pages used before
 150 * hibernation.  The "unsafe" pages have PageNosaveFree set and we count them
 151 * using allocated_unsafe_pages.
 152 *
 153 * Each allocated image page is marked as PageNosave and PageNosaveFree so that
 154 * swsusp_free() can release it.
 155 */
 156static void *get_image_page(gfp_t gfp_mask, int safe_needed)
 157{
 158	void *res;
 159
 160	res = (void *)get_zeroed_page(gfp_mask);
 161	if (safe_needed)
 162		while (res && swsusp_page_is_free(virt_to_page(res))) {
 163			/* The page is unsafe, mark it for swsusp_free() */
 164			swsusp_set_page_forbidden(virt_to_page(res));
 165			allocated_unsafe_pages++;
 166			res = (void *)get_zeroed_page(gfp_mask);
 167		}
 168	if (res) {
 169		swsusp_set_page_forbidden(virt_to_page(res));
 170		swsusp_set_page_free(virt_to_page(res));
 171	}
 172	return res;
 173}
 174
 175static void *__get_safe_page(gfp_t gfp_mask)
 176{
 177	if (safe_pages_list) {
 178		void *ret = safe_pages_list;
 179
 180		safe_pages_list = safe_pages_list->next;
 181		memset(ret, 0, PAGE_SIZE);
 182		return ret;
 183	}
 184	return get_image_page(gfp_mask, PG_SAFE);
 185}
 186
 187unsigned long get_safe_page(gfp_t gfp_mask)
 188{
 189	return (unsigned long)__get_safe_page(gfp_mask);
 190}
 191
 192static struct page *alloc_image_page(gfp_t gfp_mask)
 193{
 194	struct page *page;
 195
 196	page = alloc_page(gfp_mask);
 197	if (page) {
 198		swsusp_set_page_forbidden(page);
 199		swsusp_set_page_free(page);
 200	}
 201	return page;
 202}
 203
 204static void recycle_safe_page(void *page_address)
 205{
 206	struct linked_page *lp = page_address;
 207
 208	lp->next = safe_pages_list;
 209	safe_pages_list = lp;
 210}
 211
 212/**
 213 * free_image_page - Free a page allocated for hibernation image.
 214 * @addr: Address of the page to free.
 215 * @clear_nosave_free: If set, clear the PageNosaveFree bit for the page.
 216 *
 217 * The page to free should have been allocated by get_image_page() (page flags
 218 * set by it are affected).
 219 */
 
 220static inline void free_image_page(void *addr, int clear_nosave_free)
 221{
 222	struct page *page;
 223
 224	BUG_ON(!virt_addr_valid(addr));
 225
 226	page = virt_to_page(addr);
 227
 228	swsusp_unset_page_forbidden(page);
 229	if (clear_nosave_free)
 230		swsusp_unset_page_free(page);
 231
 232	__free_page(page);
 233}
 234
 235static inline void free_list_of_pages(struct linked_page *list,
 236				      int clear_page_nosave)
 
 
 
 
 
 
 
 
 
 237{
 238	while (list) {
 239		struct linked_page *lp = list->next;
 240
 241		free_image_page(list, clear_page_nosave);
 242		list = lp;
 243	}
 244}
 245
 246/*
 247 * struct chain_allocator is used for allocating small objects out of
 248 * a linked list of pages called 'the chain'.
 249 *
 250 * The chain grows each time when there is no room for a new object in
 251 * the current page.  The allocated objects cannot be freed individually.
 252 * It is only possible to free them all at once, by freeing the entire
 253 * chain.
 254 *
 255 * NOTE: The chain allocator may be inefficient if the allocated objects
 256 * are not much smaller than PAGE_SIZE.
 257 */
 
 258struct chain_allocator {
 259	struct linked_page *chain;	/* the chain */
 260	unsigned int used_space;	/* total size of objects allocated out
 261					   of the current page */
 
 262	gfp_t gfp_mask;		/* mask for allocating pages */
 263	int safe_needed;	/* if set, only "safe" pages are allocated */
 264};
 265
 266static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
 267		       int safe_needed)
 268{
 269	ca->chain = NULL;
 270	ca->used_space = LINKED_PAGE_DATA_SIZE;
 271	ca->gfp_mask = gfp_mask;
 272	ca->safe_needed = safe_needed;
 273}
 274
 275static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
 276{
 277	void *ret;
 278
 279	if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
 280		struct linked_page *lp;
 281
 282		lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
 283					get_image_page(ca->gfp_mask, PG_ANY);
 284		if (!lp)
 285			return NULL;
 286
 287		lp->next = ca->chain;
 288		ca->chain = lp;
 289		ca->used_space = 0;
 290	}
 291	ret = ca->chain->data + ca->used_space;
 292	ca->used_space += size;
 293	return ret;
 294}
 295
 296/**
 297 * Data types related to memory bitmaps.
 298 *
 299 * Memory bitmap is a structure consiting of many linked lists of
 300 * objects.  The main list's elements are of type struct zone_bitmap
 301 * and each of them corresonds to one zone.  For each zone bitmap
 302 * object there is a list of objects of type struct bm_block that
 303 * represent each blocks of bitmap in which information is stored.
 304 *
 305 * struct memory_bitmap contains a pointer to the main list of zone
 306 * bitmap objects, a struct bm_position used for browsing the bitmap,
 307 * and a pointer to the list of pages used for allocating all of the
 308 * zone bitmap objects and bitmap block objects.
 309 *
 310 * NOTE: It has to be possible to lay out the bitmap in memory
 311 * using only allocations of order 0.  Additionally, the bitmap is
 312 * designed to work with arbitrary number of zones (this is over the
 313 * top for now, but let's avoid making unnecessary assumptions ;-).
 314 *
 315 * struct zone_bitmap contains a pointer to a list of bitmap block
 316 * objects and a pointer to the bitmap block object that has been
 317 * most recently used for setting bits.  Additionally, it contains the
 318 * PFNs that correspond to the start and end of the represented zone.
 319 *
 320 * struct bm_block contains a pointer to the memory page in which
 321 * information is stored (in the form of a block of bitmap)
 322 * It also contains the pfns that correspond to the start and end of
 323 * the represented memory area.
 324 *
 325 * The memory bitmap is organized as a radix tree to guarantee fast random
 326 * access to the bits. There is one radix tree for each zone (as returned
 327 * from create_mem_extents).
 328 *
 329 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
 330 * two linked lists for the nodes of the tree, one for the inner nodes and
 331 * one for the leave nodes. The linked leave nodes are used for fast linear
 332 * access of the memory bitmap.
 333 *
 334 * The struct rtree_node represents one node of the radix tree.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 335 */
 336
 337#define BM_END_OF_MAP	(~0UL)
 338
 339#define BM_BITS_PER_BLOCK	(PAGE_SIZE * BITS_PER_BYTE)
 340#define BM_BLOCK_SHIFT		(PAGE_SHIFT + 3)
 341#define BM_BLOCK_MASK		((1UL << BM_BLOCK_SHIFT) - 1)
 342
 343/*
 344 * struct rtree_node is a wrapper struct to link the nodes
 345 * of the rtree together for easy linear iteration over
 346 * bits and easy freeing
 347 */
 348struct rtree_node {
 349	struct list_head list;
 350	unsigned long *data;
 351};
 352
 353/*
 354 * struct mem_zone_bm_rtree represents a bitmap used for one
 355 * populated memory zone.
 356 */
 357struct mem_zone_bm_rtree {
 358	struct list_head list;		/* Link Zones together         */
 359	struct list_head nodes;		/* Radix Tree inner nodes      */
 360	struct list_head leaves;	/* Radix Tree leaves           */
 361	unsigned long start_pfn;	/* Zone start page frame       */
 362	unsigned long end_pfn;		/* Zone end page frame + 1     */
 363	struct rtree_node *rtree;	/* Radix Tree Root             */
 364	int levels;			/* Number of Radix Tree Levels */
 365	unsigned int blocks;		/* Number of Bitmap Blocks     */
 366};
 367
 368/* strcut bm_position is used for browsing memory bitmaps */
 369
 370struct bm_position {
 371	struct mem_zone_bm_rtree *zone;
 372	struct rtree_node *node;
 373	unsigned long node_pfn;
 374	int node_bit;
 375};
 376
 377struct memory_bitmap {
 378	struct list_head zones;
 379	struct linked_page *p_list;	/* list of pages used to store zone
 380					   bitmap objects and bitmap block
 381					   objects */
 
 382	struct bm_position cur;	/* most recently used bit position */
 383};
 384
 385/* Functions that operate on memory bitmaps */
 386
 387#define BM_ENTRIES_PER_LEVEL	(PAGE_SIZE / sizeof(unsigned long))
 388#if BITS_PER_LONG == 32
 389#define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 2)
 390#else
 391#define BM_RTREE_LEVEL_SHIFT	(PAGE_SHIFT - 3)
 392#endif
 393#define BM_RTREE_LEVEL_MASK	((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
 394
 395/**
 396 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
 397 *
 398 * This function is used to allocate inner nodes as well as the
 399 * leave nodes of the radix tree. It also adds the node to the
 400 * corresponding linked list passed in by the *list parameter.
 401 */
 402static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
 403					   struct chain_allocator *ca,
 404					   struct list_head *list)
 405{
 406	struct rtree_node *node;
 
 
 407
 408	node = chain_alloc(ca, sizeof(struct rtree_node));
 409	if (!node)
 410		return NULL;
 411
 412	node->data = get_image_page(gfp_mask, safe_needed);
 413	if (!node->data)
 414		return NULL;
 415
 416	list_add_tail(&node->list, list);
 417
 418	return node;
 419}
 420
 421/**
 422 * add_rtree_block - Add a new leave node to the radix tree.
 423 *
 424 * The leave nodes need to be allocated in order to keep the leaves
 425 * linked list in order. This is guaranteed by the zone->blocks
 426 * counter.
 427 */
 428static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
 429			   int safe_needed, struct chain_allocator *ca)
 430{
 431	struct rtree_node *node, *block, **dst;
 432	unsigned int levels_needed, block_nr;
 433	int i;
 434
 435	block_nr = zone->blocks;
 436	levels_needed = 0;
 437
 438	/* How many levels do we need for this block nr? */
 439	while (block_nr) {
 440		levels_needed += 1;
 441		block_nr >>= BM_RTREE_LEVEL_SHIFT;
 442	}
 443
 444	/* Make sure the rtree has enough levels */
 445	for (i = zone->levels; i < levels_needed; i++) {
 446		node = alloc_rtree_node(gfp_mask, safe_needed, ca,
 447					&zone->nodes);
 448		if (!node)
 449			return -ENOMEM;
 450
 451		node->data[0] = (unsigned long)zone->rtree;
 452		zone->rtree = node;
 453		zone->levels += 1;
 454	}
 455
 456	/* Allocate new block */
 457	block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
 458	if (!block)
 459		return -ENOMEM;
 460
 461	/* Now walk the rtree to insert the block */
 462	node = zone->rtree;
 463	dst = &zone->rtree;
 464	block_nr = zone->blocks;
 465	for (i = zone->levels; i > 0; i--) {
 466		int index;
 467
 468		if (!node) {
 469			node = alloc_rtree_node(gfp_mask, safe_needed, ca,
 470						&zone->nodes);
 471			if (!node)
 472				return -ENOMEM;
 473			*dst = node;
 474		}
 475
 476		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
 477		index &= BM_RTREE_LEVEL_MASK;
 478		dst = (struct rtree_node **)&((*dst)->data[index]);
 479		node = *dst;
 480	}
 481
 482	zone->blocks += 1;
 483	*dst = block;
 484
 485	return 0;
 486}
 487
 488static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
 489			       int clear_nosave_free);
 490
 491/**
 492 * create_zone_bm_rtree - Create a radix tree for one zone.
 493 *
 494 * Allocated the mem_zone_bm_rtree structure and initializes it.
 495 * This function also allocated and builds the radix tree for the
 496 * zone.
 497 */
 498static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
 499						      int safe_needed,
 500						      struct chain_allocator *ca,
 501						      unsigned long start,
 502						      unsigned long end)
 503{
 504	struct mem_zone_bm_rtree *zone;
 505	unsigned int i, nr_blocks;
 506	unsigned long pages;
 507
 508	pages = end - start;
 509	zone  = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
 510	if (!zone)
 511		return NULL;
 512
 513	INIT_LIST_HEAD(&zone->nodes);
 514	INIT_LIST_HEAD(&zone->leaves);
 515	zone->start_pfn = start;
 516	zone->end_pfn = end;
 517	nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
 518
 519	for (i = 0; i < nr_blocks; i++) {
 520		if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
 521			free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
 522			return NULL;
 523		}
 524	}
 525
 526	return zone;
 527}
 528
 529/**
 530 * free_zone_bm_rtree - Free the memory of the radix tree.
 531 *
 532 * Free all node pages of the radix tree. The mem_zone_bm_rtree
 533 * structure itself is not freed here nor are the rtree_node
 534 * structs.
 535 */
 536static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
 537			       int clear_nosave_free)
 538{
 539	struct rtree_node *node;
 540
 541	list_for_each_entry(node, &zone->nodes, list)
 542		free_image_page(node->data, clear_nosave_free);
 543
 544	list_for_each_entry(node, &zone->leaves, list)
 545		free_image_page(node->data, clear_nosave_free);
 546}
 547
 548static void memory_bm_position_reset(struct memory_bitmap *bm)
 549{
 550	bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
 551				  list);
 552	bm->cur.node = list_entry(bm->cur.zone->leaves.next,
 553				  struct rtree_node, list);
 554	bm->cur.node_pfn = 0;
 555	bm->cur.node_bit = 0;
 556}
 557
 558static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
 559
 560struct mem_extent {
 561	struct list_head hook;
 562	unsigned long start;
 563	unsigned long end;
 564};
 565
 566/**
 567 * free_mem_extents - Free a list of memory extents.
 568 * @list: List of extents to free.
 569 */
 570static void free_mem_extents(struct list_head *list)
 571{
 572	struct mem_extent *ext, *aux;
 573
 574	list_for_each_entry_safe(ext, aux, list, hook) {
 575		list_del(&ext->hook);
 576		kfree(ext);
 577	}
 578}
 579
 580/**
 581 * create_mem_extents - Create a list of memory extents.
 582 * @list: List to put the extents into.
 583 * @gfp_mask: Mask to use for memory allocations.
 584 *
 585 * The extents represent contiguous ranges of PFNs.
 586 */
 587static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
 588{
 589	struct zone *zone;
 590
 591	INIT_LIST_HEAD(list);
 592
 593	for_each_populated_zone(zone) {
 594		unsigned long zone_start, zone_end;
 595		struct mem_extent *ext, *cur, *aux;
 596
 597		zone_start = zone->zone_start_pfn;
 598		zone_end = zone_end_pfn(zone);
 599
 600		list_for_each_entry(ext, list, hook)
 601			if (zone_start <= ext->end)
 602				break;
 603
 604		if (&ext->hook == list || zone_end < ext->start) {
 605			/* New extent is necessary */
 606			struct mem_extent *new_ext;
 607
 608			new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
 609			if (!new_ext) {
 610				free_mem_extents(list);
 611				return -ENOMEM;
 612			}
 613			new_ext->start = zone_start;
 614			new_ext->end = zone_end;
 615			list_add_tail(&new_ext->hook, &ext->hook);
 616			continue;
 617		}
 618
 619		/* Merge this zone's range of PFNs with the existing one */
 620		if (zone_start < ext->start)
 621			ext->start = zone_start;
 622		if (zone_end > ext->end)
 623			ext->end = zone_end;
 624
 625		/* More merging may be possible */
 626		cur = ext;
 627		list_for_each_entry_safe_continue(cur, aux, list, hook) {
 628			if (zone_end < cur->start)
 629				break;
 630			if (zone_end < cur->end)
 631				ext->end = cur->end;
 632			list_del(&cur->hook);
 633			kfree(cur);
 634		}
 635	}
 636
 637	return 0;
 638}
 639
 640/**
 641 * memory_bm_create - Allocate memory for a memory bitmap.
 642 */
 643static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
 644			    int safe_needed)
 645{
 646	struct chain_allocator ca;
 647	struct list_head mem_extents;
 648	struct mem_extent *ext;
 649	int error;
 650
 651	chain_init(&ca, gfp_mask, safe_needed);
 652	INIT_LIST_HEAD(&bm->zones);
 653
 654	error = create_mem_extents(&mem_extents, gfp_mask);
 655	if (error)
 656		return error;
 657
 658	list_for_each_entry(ext, &mem_extents, hook) {
 659		struct mem_zone_bm_rtree *zone;
 
 
 
 
 660
 661		zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
 662					    ext->start, ext->end);
 663		if (!zone) {
 664			error = -ENOMEM;
 665			goto Error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 666		}
 667		list_add_tail(&zone->list, &bm->zones);
 668	}
 669
 670	bm->p_list = ca.chain;
 671	memory_bm_position_reset(bm);
 672 Exit:
 673	free_mem_extents(&mem_extents);
 674	return error;
 675
 676 Error:
 677	bm->p_list = ca.chain;
 678	memory_bm_free(bm, PG_UNSAFE_CLEAR);
 679	goto Exit;
 680}
 681
 682/**
 683 * memory_bm_free - Free memory occupied by the memory bitmap.
 684 * @bm: Memory bitmap.
 685 */
 686static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
 687{
 688	struct mem_zone_bm_rtree *zone;
 689
 690	list_for_each_entry(zone, &bm->zones, list)
 691		free_zone_bm_rtree(zone, clear_nosave_free);
 
 692
 693	free_list_of_pages(bm->p_list, clear_nosave_free);
 694
 695	INIT_LIST_HEAD(&bm->zones);
 696}
 697
 698/**
 699 * memory_bm_find_bit - Find the bit for a given PFN in a memory bitmap.
 700 *
 701 * Find the bit in memory bitmap @bm that corresponds to the given PFN.
 702 * The cur.zone, cur.block and cur.node_pfn members of @bm are updated.
 703 *
 704 * Walk the radix tree to find the page containing the bit that represents @pfn
 705 * and return the position of the bit in @addr and @bit_nr.
 706 */
 707static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
 708			      void **addr, unsigned int *bit_nr)
 709{
 710	struct mem_zone_bm_rtree *curr, *zone;
 711	struct rtree_node *node;
 712	int i, block_nr;
 713
 714	zone = bm->cur.zone;
 
 
 
 
 
 
 
 
 715
 716	if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
 717		goto zone_found;
 718
 719	zone = NULL;
 720
 721	/* Find the right zone */
 722	list_for_each_entry(curr, &bm->zones, list) {
 723		if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
 724			zone = curr;
 725			break;
 726		}
 727	}
 728
 729	if (!zone)
 730		return -EFAULT;
 731
 732zone_found:
 733	/*
 734	 * We have found the zone. Now walk the radix tree to find the leaf node
 735	 * for our PFN.
 736	 */
 737	node = bm->cur.node;
 738	if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
 739		goto node_found;
 740
 741	node      = zone->rtree;
 742	block_nr  = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
 743
 744	for (i = zone->levels; i > 0; i--) {
 745		int index;
 746
 747		index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
 748		index &= BM_RTREE_LEVEL_MASK;
 749		BUG_ON(node->data[index] == 0);
 750		node = (struct rtree_node *)node->data[index];
 751	}
 752
 753node_found:
 754	/* Update last position */
 755	bm->cur.zone = zone;
 756	bm->cur.node = node;
 757	bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
 758
 759	/* Set return values */
 760	*addr = node->data;
 761	*bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
 762
 763	return 0;
 764}
 765
 766static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
 767{
 768	void *addr;
 769	unsigned int bit;
 770	int error;
 771
 772	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 773	BUG_ON(error);
 774	set_bit(bit, addr);
 775}
 776
 777static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
 778{
 779	void *addr;
 780	unsigned int bit;
 781	int error;
 782
 783	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 784	if (!error)
 785		set_bit(bit, addr);
 786
 787	return error;
 788}
 789
 790static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
 791{
 792	void *addr;
 793	unsigned int bit;
 794	int error;
 795
 796	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 797	BUG_ON(error);
 798	clear_bit(bit, addr);
 799}
 800
 801static void memory_bm_clear_current(struct memory_bitmap *bm)
 802{
 803	int bit;
 804
 805	bit = max(bm->cur.node_bit - 1, 0);
 806	clear_bit(bit, bm->cur.node->data);
 807}
 808
 809static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
 810{
 811	void *addr;
 812	unsigned int bit;
 813	int error;
 814
 815	error = memory_bm_find_bit(bm, pfn, &addr, &bit);
 816	BUG_ON(error);
 817	return test_bit(bit, addr);
 818}
 819
 820static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
 821{
 822	void *addr;
 823	unsigned int bit;
 824
 825	return !memory_bm_find_bit(bm, pfn, &addr, &bit);
 826}
 827
 828/*
 829 * rtree_next_node - Jump to the next leaf node.
 
 
 830 *
 831 * Set the position to the beginning of the next node in the
 832 * memory bitmap. This is either the next node in the current
 833 * zone's radix tree or the first node in the radix tree of the
 834 * next zone.
 835 *
 836 * Return true if there is a next node, false otherwise.
 837 */
 838static bool rtree_next_node(struct memory_bitmap *bm)
 839{
 840	if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
 841		bm->cur.node = list_entry(bm->cur.node->list.next,
 842					  struct rtree_node, list);
 843		bm->cur.node_pfn += BM_BITS_PER_BLOCK;
 844		bm->cur.node_bit  = 0;
 845		touch_softlockup_watchdog();
 846		return true;
 847	}
 848
 849	/* No more nodes, goto next zone */
 850	if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
 851		bm->cur.zone = list_entry(bm->cur.zone->list.next,
 852				  struct mem_zone_bm_rtree, list);
 853		bm->cur.node = list_entry(bm->cur.zone->leaves.next,
 854					  struct rtree_node, list);
 855		bm->cur.node_pfn = 0;
 856		bm->cur.node_bit = 0;
 857		return true;
 858	}
 859
 860	/* No more zones */
 861	return false;
 862}
 863
 864/**
 865 * memory_bm_rtree_next_pfn - Find the next set bit in a memory bitmap.
 866 * @bm: Memory bitmap.
 867 *
 868 * Starting from the last returned position this function searches for the next
 869 * set bit in @bm and returns the PFN represented by it.  If no more bits are
 870 * set, BM_END_OF_MAP is returned.
 871 *
 872 * It is required to run memory_bm_position_reset() before the first call to
 873 * this function for the given memory bitmap.
 874 */
 875static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
 876{
 877	unsigned long bits, pfn, pages;
 878	int bit;
 879
 
 880	do {
 881		pages	  = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
 882		bits      = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
 883		bit	  = find_next_bit(bm->cur.node->data, bits,
 884					  bm->cur.node_bit);
 885		if (bit < bits) {
 886			pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
 887			bm->cur.node_bit = bit + 1;
 888			return pfn;
 889		}
 890	} while (rtree_next_node(bm));
 891
 
 892	return BM_END_OF_MAP;
 
 
 
 
 893}
 894
 895/*
 896 * This structure represents a range of page frames the contents of which
 897 * should not be saved during hibernation.
 898 */
 
 899struct nosave_region {
 900	struct list_head list;
 901	unsigned long start_pfn;
 902	unsigned long end_pfn;
 903};
 904
 905static LIST_HEAD(nosave_regions);
 906
 907static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
 908{
 909	struct rtree_node *node;
 910
 911	list_for_each_entry(node, &zone->nodes, list)
 912		recycle_safe_page(node->data);
 913
 914	list_for_each_entry(node, &zone->leaves, list)
 915		recycle_safe_page(node->data);
 916}
 917
 918static void memory_bm_recycle(struct memory_bitmap *bm)
 919{
 920	struct mem_zone_bm_rtree *zone;
 921	struct linked_page *p_list;
 922
 923	list_for_each_entry(zone, &bm->zones, list)
 924		recycle_zone_bm_rtree(zone);
 925
 926	p_list = bm->p_list;
 927	while (p_list) {
 928		struct linked_page *lp = p_list;
 929
 930		p_list = lp->next;
 931		recycle_safe_page(lp);
 932	}
 933}
 934
 935/**
 936 * register_nosave_region - Register a region of unsaveable memory.
 937 *
 938 * Register a range of page frames the contents of which should not be saved
 939 * during hibernation (to be used in the early initialization code).
 940 */
 941void __init __register_nosave_region(unsigned long start_pfn,
 942				     unsigned long end_pfn, int use_kmalloc)
 
 
 943{
 944	struct nosave_region *region;
 945
 946	if (start_pfn >= end_pfn)
 947		return;
 948
 949	if (!list_empty(&nosave_regions)) {
 950		/* Try to extend the previous region (they should be sorted) */
 951		region = list_entry(nosave_regions.prev,
 952					struct nosave_region, list);
 953		if (region->end_pfn == start_pfn) {
 954			region->end_pfn = end_pfn;
 955			goto Report;
 956		}
 957	}
 958	if (use_kmalloc) {
 959		/* During init, this shouldn't fail */
 960		region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
 961		BUG_ON(!region);
 962	} else {
 963		/* This allocation cannot fail */
 964		region = memblock_alloc(sizeof(struct nosave_region),
 965					SMP_CACHE_BYTES);
 966		if (!region)
 967			panic("%s: Failed to allocate %zu bytes\n", __func__,
 968			      sizeof(struct nosave_region));
 969	}
 970	region->start_pfn = start_pfn;
 971	region->end_pfn = end_pfn;
 972	list_add_tail(&region->list, &nosave_regions);
 973 Report:
 974	pr_info("Registered nosave memory: [mem %#010llx-%#010llx]\n",
 975		(unsigned long long) start_pfn << PAGE_SHIFT,
 976		((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
 977}
 978
 979/*
 980 * Set bits in this map correspond to the page frames the contents of which
 981 * should not be saved during the suspend.
 982 */
 983static struct memory_bitmap *forbidden_pages_map;
 984
 985/* Set bits in this map correspond to free page frames. */
 986static struct memory_bitmap *free_pages_map;
 987
 988/*
 989 * Each page frame allocated for creating the image is marked by setting the
 990 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
 991 */
 992
 993void swsusp_set_page_free(struct page *page)
 994{
 995	if (free_pages_map)
 996		memory_bm_set_bit(free_pages_map, page_to_pfn(page));
 997}
 998
 999static int swsusp_page_is_free(struct page *page)
1000{
1001	return free_pages_map ?
1002		memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
1003}
1004
1005void swsusp_unset_page_free(struct page *page)
1006{
1007	if (free_pages_map)
1008		memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
1009}
1010
1011static void swsusp_set_page_forbidden(struct page *page)
1012{
1013	if (forbidden_pages_map)
1014		memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
1015}
1016
1017int swsusp_page_is_forbidden(struct page *page)
1018{
1019	return forbidden_pages_map ?
1020		memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
1021}
1022
1023static void swsusp_unset_page_forbidden(struct page *page)
1024{
1025	if (forbidden_pages_map)
1026		memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
1027}
1028
1029/**
1030 * mark_nosave_pages - Mark pages that should not be saved.
1031 * @bm: Memory bitmap.
1032 *
1033 * Set the bits in @bm that correspond to the page frames the contents of which
1034 * should not be saved.
1035 */
 
1036static void mark_nosave_pages(struct memory_bitmap *bm)
1037{
1038	struct nosave_region *region;
1039
1040	if (list_empty(&nosave_regions))
1041		return;
1042
1043	list_for_each_entry(region, &nosave_regions, list) {
1044		unsigned long pfn;
1045
1046		pr_debug("Marking nosave pages: [mem %#010llx-%#010llx]\n",
1047			 (unsigned long long) region->start_pfn << PAGE_SHIFT,
1048			 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
1049				- 1);
1050
1051		for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1052			if (pfn_valid(pfn)) {
1053				/*
1054				 * It is safe to ignore the result of
1055				 * mem_bm_set_bit_check() here, since we won't
1056				 * touch the PFNs for which the error is
1057				 * returned anyway.
1058				 */
1059				mem_bm_set_bit_check(bm, pfn);
1060			}
1061	}
1062}
1063
1064/**
1065 * create_basic_memory_bitmaps - Create bitmaps to hold basic page information.
1066 *
1067 * Create bitmaps needed for marking page frames that should not be saved and
1068 * free page frames.  The forbidden_pages_map and free_pages_map pointers are
1069 * only modified if everything goes well, because we don't want the bits to be
1070 * touched before both bitmaps are set up.
1071 */
 
1072int create_basic_memory_bitmaps(void)
1073{
1074	struct memory_bitmap *bm1, *bm2;
1075	int error = 0;
1076
1077	if (forbidden_pages_map && free_pages_map)
1078		return 0;
1079	else
1080		BUG_ON(forbidden_pages_map || free_pages_map);
1081
1082	bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1083	if (!bm1)
1084		return -ENOMEM;
1085
1086	error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1087	if (error)
1088		goto Free_first_object;
1089
1090	bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1091	if (!bm2)
1092		goto Free_first_bitmap;
1093
1094	error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1095	if (error)
1096		goto Free_second_object;
1097
1098	forbidden_pages_map = bm1;
1099	free_pages_map = bm2;
1100	mark_nosave_pages(forbidden_pages_map);
1101
1102	pr_debug("Basic memory bitmaps created\n");
1103
1104	return 0;
1105
1106 Free_second_object:
1107	kfree(bm2);
1108 Free_first_bitmap:
1109 	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1110 Free_first_object:
1111	kfree(bm1);
1112	return -ENOMEM;
1113}
1114
1115/**
1116 * free_basic_memory_bitmaps - Free memory bitmaps holding basic information.
1117 *
1118 * Free memory bitmaps allocated by create_basic_memory_bitmaps().  The
1119 * auxiliary pointers are necessary so that the bitmaps themselves are not
1120 * referred to while they are being freed.
1121 */
 
1122void free_basic_memory_bitmaps(void)
1123{
1124	struct memory_bitmap *bm1, *bm2;
1125
1126	if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1127		return;
1128
1129	bm1 = forbidden_pages_map;
1130	bm2 = free_pages_map;
1131	forbidden_pages_map = NULL;
1132	free_pages_map = NULL;
1133	memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1134	kfree(bm1);
1135	memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1136	kfree(bm2);
1137
1138	pr_debug("Basic memory bitmaps freed\n");
1139}
1140
1141void clear_free_pages(void)
1142{
1143#ifdef CONFIG_PAGE_POISONING_ZERO
1144	struct memory_bitmap *bm = free_pages_map;
1145	unsigned long pfn;
1146
1147	if (WARN_ON(!(free_pages_map)))
1148		return;
1149
1150	memory_bm_position_reset(bm);
1151	pfn = memory_bm_next_pfn(bm);
1152	while (pfn != BM_END_OF_MAP) {
1153		if (pfn_valid(pfn))
1154			clear_highpage(pfn_to_page(pfn));
1155
1156		pfn = memory_bm_next_pfn(bm);
1157	}
1158	memory_bm_position_reset(bm);
1159	pr_info("free pages cleared after restore\n");
1160#endif /* PAGE_POISONING_ZERO */
1161}
1162
1163/**
1164 * snapshot_additional_pages - Estimate the number of extra pages needed.
1165 * @zone: Memory zone to carry out the computation for.
1166 *
1167 * Estimate the number of additional pages needed for setting up a hibernation
1168 * image data structures for @zone (usually, the returned value is greater than
1169 * the exact number).
1170 */
 
1171unsigned int snapshot_additional_pages(struct zone *zone)
1172{
1173	unsigned int rtree, nodes;
1174
1175	rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1176	rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1177			      LINKED_PAGE_DATA_SIZE);
1178	while (nodes > 1) {
1179		nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1180		rtree += nodes;
1181	}
1182
1183	return 2 * rtree;
 
 
1184}
1185
1186#ifdef CONFIG_HIGHMEM
1187/**
1188 * count_free_highmem_pages - Compute the total number of free highmem pages.
1189 *
1190 * The returned number is system-wide.
1191 */
 
1192static unsigned int count_free_highmem_pages(void)
1193{
1194	struct zone *zone;
1195	unsigned int cnt = 0;
1196
1197	for_each_populated_zone(zone)
1198		if (is_highmem(zone))
1199			cnt += zone_page_state(zone, NR_FREE_PAGES);
1200
1201	return cnt;
1202}
1203
1204/**
1205 * saveable_highmem_page - Check if a highmem page is saveable.
1206 *
1207 * Determine whether a highmem page should be included in a hibernation image.
1208 *
1209 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1210 * and it isn't part of a free chunk of pages.
1211 */
1212static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1213{
1214	struct page *page;
1215
1216	if (!pfn_valid(pfn))
1217		return NULL;
1218
1219	page = pfn_to_online_page(pfn);
1220	if (!page || page_zone(page) != zone)
1221		return NULL;
1222
1223	BUG_ON(!PageHighMem(page));
1224
1225	if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page))
1226		return NULL;
1227
1228	if (PageReserved(page) || PageOffline(page))
1229		return NULL;
1230
1231	if (page_is_guard(page))
1232		return NULL;
1233
1234	return page;
1235}
1236
1237/**
1238 * count_highmem_pages - Compute the total number of saveable highmem pages.
 
1239 */
 
1240static unsigned int count_highmem_pages(void)
1241{
1242	struct zone *zone;
1243	unsigned int n = 0;
1244
1245	for_each_populated_zone(zone) {
1246		unsigned long pfn, max_zone_pfn;
1247
1248		if (!is_highmem(zone))
1249			continue;
1250
1251		mark_free_pages(zone);
1252		max_zone_pfn = zone_end_pfn(zone);
1253		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1254			if (saveable_highmem_page(zone, pfn))
1255				n++;
1256	}
1257	return n;
1258}
1259#else
1260static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1261{
1262	return NULL;
1263}
1264#endif /* CONFIG_HIGHMEM */
1265
1266/**
1267 * saveable_page - Check if the given page is saveable.
1268 *
1269 * Determine whether a non-highmem page should be included in a hibernation
1270 * image.
1271 *
1272 * We should save the page if it isn't Nosave, and is not in the range
1273 * of pages statically defined as 'unsaveable', and it isn't part of
1274 * a free chunk of pages.
1275 */
1276static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1277{
1278	struct page *page;
1279
1280	if (!pfn_valid(pfn))
1281		return NULL;
1282
1283	page = pfn_to_online_page(pfn);
1284	if (!page || page_zone(page) != zone)
1285		return NULL;
1286
1287	BUG_ON(PageHighMem(page));
1288
1289	if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1290		return NULL;
1291
1292	if (PageOffline(page))
1293		return NULL;
1294
1295	if (PageReserved(page)
1296	    && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1297		return NULL;
1298
1299	if (page_is_guard(page))
1300		return NULL;
1301
1302	return page;
1303}
1304
1305/**
1306 * count_data_pages - Compute the total number of saveable non-highmem pages.
 
1307 */
 
1308static unsigned int count_data_pages(void)
1309{
1310	struct zone *zone;
1311	unsigned long pfn, max_zone_pfn;
1312	unsigned int n = 0;
1313
1314	for_each_populated_zone(zone) {
1315		if (is_highmem(zone))
1316			continue;
1317
1318		mark_free_pages(zone);
1319		max_zone_pfn = zone_end_pfn(zone);
1320		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1321			if (saveable_page(zone, pfn))
1322				n++;
1323	}
1324	return n;
1325}
1326
1327/*
1328 * This is needed, because copy_page and memcpy are not usable for copying
1329 * task structs.
1330 */
1331static inline void do_copy_page(long *dst, long *src)
1332{
1333	int n;
1334
1335	for (n = PAGE_SIZE / sizeof(long); n; n--)
1336		*dst++ = *src++;
1337}
1338
 
1339/**
1340 * safe_copy_page - Copy a page in a safe way.
1341 *
1342 * Check if the page we are going to copy is marked as present in the kernel
1343 * page tables. This always is the case if CONFIG_DEBUG_PAGEALLOC or
1344 * CONFIG_ARCH_HAS_SET_DIRECT_MAP is not set. In that case kernel_page_present()
1345 * always returns 'true'.
1346 */
1347static void safe_copy_page(void *dst, struct page *s_page)
1348{
1349	if (kernel_page_present(s_page)) {
1350		do_copy_page(dst, page_address(s_page));
1351	} else {
1352		kernel_map_pages(s_page, 1, 1);
1353		do_copy_page(dst, page_address(s_page));
1354		kernel_map_pages(s_page, 1, 0);
1355	}
1356}
1357
 
1358#ifdef CONFIG_HIGHMEM
1359static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
 
1360{
1361	return is_highmem(zone) ?
1362		saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1363}
1364
1365static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1366{
1367	struct page *s_page, *d_page;
1368	void *src, *dst;
1369
1370	s_page = pfn_to_page(src_pfn);
1371	d_page = pfn_to_page(dst_pfn);
1372	if (PageHighMem(s_page)) {
1373		src = kmap_atomic(s_page);
1374		dst = kmap_atomic(d_page);
1375		do_copy_page(dst, src);
1376		kunmap_atomic(dst);
1377		kunmap_atomic(src);
1378	} else {
1379		if (PageHighMem(d_page)) {
1380			/*
1381			 * The page pointed to by src may contain some kernel
1382			 * data modified by kmap_atomic()
1383			 */
1384			safe_copy_page(buffer, s_page);
1385			dst = kmap_atomic(d_page);
1386			copy_page(dst, buffer);
1387			kunmap_atomic(dst);
1388		} else {
1389			safe_copy_page(page_address(d_page), s_page);
1390		}
1391	}
1392}
1393#else
1394#define page_is_saveable(zone, pfn)	saveable_page(zone, pfn)
1395
1396static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1397{
1398	safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1399				pfn_to_page(src_pfn));
1400}
1401#endif /* CONFIG_HIGHMEM */
1402
1403static void copy_data_pages(struct memory_bitmap *copy_bm,
1404			    struct memory_bitmap *orig_bm)
1405{
1406	struct zone *zone;
1407	unsigned long pfn;
1408
1409	for_each_populated_zone(zone) {
1410		unsigned long max_zone_pfn;
1411
1412		mark_free_pages(zone);
1413		max_zone_pfn = zone_end_pfn(zone);
1414		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1415			if (page_is_saveable(zone, pfn))
1416				memory_bm_set_bit(orig_bm, pfn);
1417	}
1418	memory_bm_position_reset(orig_bm);
1419	memory_bm_position_reset(copy_bm);
1420	for(;;) {
1421		pfn = memory_bm_next_pfn(orig_bm);
1422		if (unlikely(pfn == BM_END_OF_MAP))
1423			break;
1424		copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1425	}
1426}
1427
1428/* Total number of image pages */
1429static unsigned int nr_copy_pages;
1430/* Number of pages needed for saving the original pfns of the image pages */
1431static unsigned int nr_meta_pages;
1432/*
1433 * Numbers of normal and highmem page frames allocated for hibernation image
1434 * before suspending devices.
1435 */
1436static unsigned int alloc_normal, alloc_highmem;
1437/*
1438 * Memory bitmap used for marking saveable pages (during hibernation) or
1439 * hibernation image pages (during restore)
1440 */
1441static struct memory_bitmap orig_bm;
1442/*
1443 * Memory bitmap used during hibernation for marking allocated page frames that
1444 * will contain copies of saveable pages.  During restore it is initially used
1445 * for marking hibernation image pages, but then the set bits from it are
1446 * duplicated in @orig_bm and it is released.  On highmem systems it is next
1447 * used for marking "safe" highmem pages, but it has to be reinitialized for
1448 * this purpose.
1449 */
1450static struct memory_bitmap copy_bm;
1451
1452/**
1453 * swsusp_free - Free pages allocated for hibernation image.
1454 *
1455 * Image pages are alocated before snapshot creation, so they need to be
1456 * released after resume.
1457 */
 
1458void swsusp_free(void)
1459{
1460	unsigned long fb_pfn, fr_pfn;
 
1461
1462	if (!forbidden_pages_map || !free_pages_map)
1463		goto out;
 
 
 
1464
1465	memory_bm_position_reset(forbidden_pages_map);
1466	memory_bm_position_reset(free_pages_map);
1467
1468loop:
1469	fr_pfn = memory_bm_next_pfn(free_pages_map);
1470	fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1471
1472	/*
1473	 * Find the next bit set in both bitmaps. This is guaranteed to
1474	 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1475	 */
1476	do {
1477		if (fb_pfn < fr_pfn)
1478			fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1479		if (fr_pfn < fb_pfn)
1480			fr_pfn = memory_bm_next_pfn(free_pages_map);
1481	} while (fb_pfn != fr_pfn);
1482
1483	if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1484		struct page *page = pfn_to_page(fr_pfn);
1485
1486		memory_bm_clear_current(forbidden_pages_map);
1487		memory_bm_clear_current(free_pages_map);
1488		hibernate_restore_unprotect_page(page_address(page));
1489		__free_page(page);
1490		goto loop;
1491	}
1492
1493out:
1494	nr_copy_pages = 0;
1495	nr_meta_pages = 0;
1496	restore_pblist = NULL;
1497	buffer = NULL;
1498	alloc_normal = 0;
1499	alloc_highmem = 0;
1500	hibernate_restore_protection_end();
1501}
1502
1503/* Helper functions used for the shrinking of memory. */
1504
1505#define GFP_IMAGE	(GFP_KERNEL | __GFP_NOWARN)
1506
1507/**
1508 * preallocate_image_pages - Allocate a number of pages for hibernation image.
1509 * @nr_pages: Number of page frames to allocate.
1510 * @mask: GFP flags to use for the allocation.
1511 *
1512 * Return value: Number of page frames actually allocated
1513 */
1514static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1515{
1516	unsigned long nr_alloc = 0;
1517
1518	while (nr_pages > 0) {
1519		struct page *page;
1520
1521		page = alloc_image_page(mask);
1522		if (!page)
1523			break;
1524		memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1525		if (PageHighMem(page))
1526			alloc_highmem++;
1527		else
1528			alloc_normal++;
1529		nr_pages--;
1530		nr_alloc++;
1531	}
1532
1533	return nr_alloc;
1534}
1535
1536static unsigned long preallocate_image_memory(unsigned long nr_pages,
1537					      unsigned long avail_normal)
1538{
1539	unsigned long alloc;
1540
1541	if (avail_normal <= alloc_normal)
1542		return 0;
1543
1544	alloc = avail_normal - alloc_normal;
1545	if (nr_pages < alloc)
1546		alloc = nr_pages;
1547
1548	return preallocate_image_pages(alloc, GFP_IMAGE);
1549}
1550
1551#ifdef CONFIG_HIGHMEM
1552static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1553{
1554	return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1555}
1556
1557/**
1558 *  __fraction - Compute (an approximation of) x * (multiplier / base).
1559 */
1560static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1561{
1562	x *= multiplier;
1563	do_div(x, base);
1564	return (unsigned long)x;
1565}
1566
1567static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1568						  unsigned long highmem,
1569						  unsigned long total)
1570{
1571	unsigned long alloc = __fraction(nr_pages, highmem, total);
1572
1573	return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1574}
1575#else /* CONFIG_HIGHMEM */
1576static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1577{
1578	return 0;
1579}
1580
1581static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1582							 unsigned long highmem,
1583							 unsigned long total)
1584{
1585	return 0;
1586}
1587#endif /* CONFIG_HIGHMEM */
1588
1589/**
1590 * free_unnecessary_pages - Release preallocated pages not needed for the image.
1591 */
1592static unsigned long free_unnecessary_pages(void)
1593{
1594	unsigned long save, to_free_normal, to_free_highmem, free;
1595
1596	save = count_data_pages();
1597	if (alloc_normal >= save) {
1598		to_free_normal = alloc_normal - save;
1599		save = 0;
1600	} else {
1601		to_free_normal = 0;
1602		save -= alloc_normal;
1603	}
1604	save += count_highmem_pages();
1605	if (alloc_highmem >= save) {
1606		to_free_highmem = alloc_highmem - save;
1607	} else {
1608		to_free_highmem = 0;
1609		save -= alloc_highmem;
1610		if (to_free_normal > save)
1611			to_free_normal -= save;
1612		else
1613			to_free_normal = 0;
1614	}
1615	free = to_free_normal + to_free_highmem;
1616
1617	memory_bm_position_reset(&copy_bm);
1618
1619	while (to_free_normal > 0 || to_free_highmem > 0) {
1620		unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1621		struct page *page = pfn_to_page(pfn);
1622
1623		if (PageHighMem(page)) {
1624			if (!to_free_highmem)
1625				continue;
1626			to_free_highmem--;
1627			alloc_highmem--;
1628		} else {
1629			if (!to_free_normal)
1630				continue;
1631			to_free_normal--;
1632			alloc_normal--;
1633		}
1634		memory_bm_clear_bit(&copy_bm, pfn);
1635		swsusp_unset_page_forbidden(page);
1636		swsusp_unset_page_free(page);
1637		__free_page(page);
1638	}
1639
1640	return free;
1641}
1642
1643/**
1644 * minimum_image_size - Estimate the minimum acceptable size of an image.
1645 * @saveable: Number of saveable pages in the system.
1646 *
1647 * We want to avoid attempting to free too much memory too hard, so estimate the
1648 * minimum acceptable size of a hibernation image to use as the lower limit for
1649 * preallocating memory.
1650 *
1651 * We assume that the minimum image size should be proportional to
1652 *
1653 * [number of saveable pages] - [number of pages that can be freed in theory]
1654 *
1655 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1656 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages.
 
1657 */
1658static unsigned long minimum_image_size(unsigned long saveable)
1659{
1660	unsigned long size;
1661
1662	size = global_node_page_state(NR_SLAB_RECLAIMABLE)
1663		+ global_node_page_state(NR_ACTIVE_ANON)
1664		+ global_node_page_state(NR_INACTIVE_ANON)
1665		+ global_node_page_state(NR_ACTIVE_FILE)
1666		+ global_node_page_state(NR_INACTIVE_FILE);
 
1667
1668	return saveable <= size ? 0 : saveable - size;
1669}
1670
1671/**
1672 * hibernate_preallocate_memory - Preallocate memory for hibernation image.
1673 *
1674 * To create a hibernation image it is necessary to make a copy of every page
1675 * frame in use.  We also need a number of page frames to be free during
1676 * hibernation for allocations made while saving the image and for device
1677 * drivers, in case they need to allocate memory from their hibernation
1678 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1679 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1680 * /sys/power/reserved_size, respectively).  To make this happen, we compute the
1681 * total number of available page frames and allocate at least
1682 *
1683 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1684 *  + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1685 *
1686 * of them, which corresponds to the maximum size of a hibernation image.
1687 *
1688 * If image_size is set below the number following from the above formula,
1689 * the preallocation of memory is continued until the total number of saveable
1690 * pages in the system is below the requested image size or the minimum
1691 * acceptable image size returned by minimum_image_size(), whichever is greater.
1692 */
1693int hibernate_preallocate_memory(void)
1694{
1695	struct zone *zone;
1696	unsigned long saveable, size, max_size, count, highmem, pages = 0;
1697	unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1698	ktime_t start, stop;
1699	int error;
1700
1701	pr_info("Preallocating image memory... ");
1702	start = ktime_get();
1703
1704	error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1705	if (error)
1706		goto err_out;
1707
1708	error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1709	if (error)
1710		goto err_out;
1711
1712	alloc_normal = 0;
1713	alloc_highmem = 0;
1714
1715	/* Count the number of saveable data pages. */
1716	save_highmem = count_highmem_pages();
1717	saveable = count_data_pages();
1718
1719	/*
1720	 * Compute the total number of page frames we can use (count) and the
1721	 * number of pages needed for image metadata (size).
1722	 */
1723	count = saveable;
1724	saveable += save_highmem;
1725	highmem = save_highmem;
1726	size = 0;
1727	for_each_populated_zone(zone) {
1728		size += snapshot_additional_pages(zone);
1729		if (is_highmem(zone))
1730			highmem += zone_page_state(zone, NR_FREE_PAGES);
1731		else
1732			count += zone_page_state(zone, NR_FREE_PAGES);
1733	}
1734	avail_normal = count;
1735	count += highmem;
1736	count -= totalreserve_pages;
1737
1738	/* Add number of pages required for page keys (s390 only). */
1739	size += page_key_additional_pages(saveable);
1740
1741	/* Compute the maximum number of saveable pages to leave in memory. */
1742	max_size = (count - (size + PAGES_FOR_IO)) / 2
1743			- 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1744	/* Compute the desired number of image pages specified by image_size. */
1745	size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1746	if (size > max_size)
1747		size = max_size;
1748	/*
1749	 * If the desired number of image pages is at least as large as the
1750	 * current number of saveable pages in memory, allocate page frames for
1751	 * the image and we're done.
1752	 */
1753	if (size >= saveable) {
1754		pages = preallocate_image_highmem(save_highmem);
1755		pages += preallocate_image_memory(saveable - pages, avail_normal);
1756		goto out;
1757	}
1758
1759	/* Estimate the minimum size of the image. */
1760	pages = minimum_image_size(saveable);
1761	/*
1762	 * To avoid excessive pressure on the normal zone, leave room in it to
1763	 * accommodate an image of the minimum size (unless it's already too
1764	 * small, in which case don't preallocate pages from it at all).
1765	 */
1766	if (avail_normal > pages)
1767		avail_normal -= pages;
1768	else
1769		avail_normal = 0;
1770	if (size < pages)
1771		size = min_t(unsigned long, pages, max_size);
1772
1773	/*
1774	 * Let the memory management subsystem know that we're going to need a
1775	 * large number of page frames to allocate and make it free some memory.
1776	 * NOTE: If this is not done, performance will be hurt badly in some
1777	 * test cases.
1778	 */
1779	shrink_all_memory(saveable - size);
1780
1781	/*
1782	 * The number of saveable pages in memory was too high, so apply some
1783	 * pressure to decrease it.  First, make room for the largest possible
1784	 * image and fail if that doesn't work.  Next, try to decrease the size
1785	 * of the image as much as indicated by 'size' using allocations from
1786	 * highmem and non-highmem zones separately.
1787	 */
1788	pages_highmem = preallocate_image_highmem(highmem / 2);
1789	alloc = count - max_size;
1790	if (alloc > pages_highmem)
1791		alloc -= pages_highmem;
1792	else
1793		alloc = 0;
1794	pages = preallocate_image_memory(alloc, avail_normal);
1795	if (pages < alloc) {
1796		/* We have exhausted non-highmem pages, try highmem. */
1797		alloc -= pages;
1798		pages += pages_highmem;
1799		pages_highmem = preallocate_image_highmem(alloc);
1800		if (pages_highmem < alloc)
1801			goto err_out;
1802		pages += pages_highmem;
1803		/*
1804		 * size is the desired number of saveable pages to leave in
1805		 * memory, so try to preallocate (all memory - size) pages.
1806		 */
1807		alloc = (count - pages) - size;
1808		pages += preallocate_image_highmem(alloc);
1809	} else {
1810		/*
1811		 * There are approximately max_size saveable pages at this point
1812		 * and we want to reduce this number down to size.
1813		 */
1814		alloc = max_size - size;
1815		size = preallocate_highmem_fraction(alloc, highmem, count);
1816		pages_highmem += size;
1817		alloc -= size;
1818		size = preallocate_image_memory(alloc, avail_normal);
1819		pages_highmem += preallocate_image_highmem(alloc - size);
1820		pages += pages_highmem + size;
1821	}
1822
1823	/*
1824	 * We only need as many page frames for the image as there are saveable
1825	 * pages in memory, but we have allocated more.  Release the excessive
1826	 * ones now.
1827	 */
1828	pages -= free_unnecessary_pages();
1829
1830 out:
1831	stop = ktime_get();
1832	pr_cont("done (allocated %lu pages)\n", pages);
1833	swsusp_show_speed(start, stop, pages, "Allocated");
1834
1835	return 0;
1836
1837 err_out:
1838	pr_cont("\n");
1839	swsusp_free();
1840	return -ENOMEM;
1841}
1842
1843#ifdef CONFIG_HIGHMEM
1844/**
1845 * count_pages_for_highmem - Count non-highmem pages needed for copying highmem.
1846 *
1847 * Compute the number of non-highmem pages that will be necessary for creating
1848 * copies of highmem pages.
1849 */
1850static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1851{
1852	unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1853
1854	if (free_highmem >= nr_highmem)
1855		nr_highmem = 0;
1856	else
1857		nr_highmem -= free_highmem;
1858
1859	return nr_highmem;
1860}
1861#else
1862static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
 
1863#endif /* CONFIG_HIGHMEM */
1864
1865/**
1866 * enough_free_mem - Check if there is enough free memory for the image.
 
1867 */
 
1868static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1869{
1870	struct zone *zone;
1871	unsigned int free = alloc_normal;
1872
1873	for_each_populated_zone(zone)
1874		if (!is_highmem(zone))
1875			free += zone_page_state(zone, NR_FREE_PAGES);
1876
1877	nr_pages += count_pages_for_highmem(nr_highmem);
1878	pr_debug("Normal pages needed: %u + %u, available pages: %u\n",
1879		 nr_pages, PAGES_FOR_IO, free);
1880
1881	return free > nr_pages + PAGES_FOR_IO;
1882}
1883
1884#ifdef CONFIG_HIGHMEM
1885/**
1886 * get_highmem_buffer - Allocate a buffer for highmem pages.
1887 *
1888 * If there are some highmem pages in the hibernation image, we may need a
1889 * buffer to copy them and/or load their data.
1890 */
 
1891static inline int get_highmem_buffer(int safe_needed)
1892{
1893	buffer = get_image_page(GFP_ATOMIC, safe_needed);
1894	return buffer ? 0 : -ENOMEM;
1895}
1896
1897/**
1898 * alloc_highmem_image_pages - Allocate some highmem pages for the image.
1899 *
1900 * Try to allocate as many pages as needed, but if the number of free highmem
1901 * pages is less than that, allocate them all.
1902 */
1903static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1904					       unsigned int nr_highmem)
 
1905{
1906	unsigned int to_alloc = count_free_highmem_pages();
1907
1908	if (to_alloc > nr_highmem)
1909		to_alloc = nr_highmem;
1910
1911	nr_highmem -= to_alloc;
1912	while (to_alloc-- > 0) {
1913		struct page *page;
1914
1915		page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
1916		memory_bm_set_bit(bm, page_to_pfn(page));
1917	}
1918	return nr_highmem;
1919}
1920#else
1921static inline int get_highmem_buffer(int safe_needed) { return 0; }
1922
1923static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1924					       unsigned int n) { return 0; }
1925#endif /* CONFIG_HIGHMEM */
1926
1927/**
1928 * swsusp_alloc - Allocate memory for hibernation image.
 
 
 
 
1929 *
1930 * We first try to allocate as many highmem pages as there are
1931 * saveable highmem pages in the system.  If that fails, we allocate
1932 * non-highmem pages for the copies of the remaining highmem ones.
1933 *
1934 * In this approach it is likely that the copies of highmem pages will
1935 * also be located in the high memory, because of the way in which
1936 * copy_data_pages() works.
1937 */
1938static int swsusp_alloc(struct memory_bitmap *copy_bm,
1939			unsigned int nr_pages, unsigned int nr_highmem)
 
 
1940{
1941	if (nr_highmem > 0) {
1942		if (get_highmem_buffer(PG_ANY))
1943			goto err_out;
1944		if (nr_highmem > alloc_highmem) {
1945			nr_highmem -= alloc_highmem;
1946			nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1947		}
1948	}
1949	if (nr_pages > alloc_normal) {
1950		nr_pages -= alloc_normal;
1951		while (nr_pages-- > 0) {
1952			struct page *page;
1953
1954			page = alloc_image_page(GFP_ATOMIC);
1955			if (!page)
1956				goto err_out;
1957			memory_bm_set_bit(copy_bm, page_to_pfn(page));
1958		}
1959	}
1960
1961	return 0;
1962
1963 err_out:
1964	swsusp_free();
1965	return -ENOMEM;
1966}
1967
1968asmlinkage __visible int swsusp_save(void)
1969{
1970	unsigned int nr_pages, nr_highmem;
1971
1972	pr_info("Creating hibernation image:\n");
1973
1974	drain_local_pages(NULL);
1975	nr_pages = count_data_pages();
1976	nr_highmem = count_highmem_pages();
1977	pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
1978
1979	if (!enough_free_mem(nr_pages, nr_highmem)) {
1980		pr_err("Not enough free memory\n");
1981		return -ENOMEM;
1982	}
1983
1984	if (swsusp_alloc(&copy_bm, nr_pages, nr_highmem)) {
1985		pr_err("Memory allocation failed\n");
1986		return -ENOMEM;
1987	}
1988
1989	/*
1990	 * During allocating of suspend pagedir, new cold pages may appear.
1991	 * Kill them.
1992	 */
1993	drain_local_pages(NULL);
1994	copy_data_pages(&copy_bm, &orig_bm);
1995
1996	/*
1997	 * End of critical section. From now on, we can write to memory,
1998	 * but we should not touch disk. This specially means we must _not_
1999	 * touch swap space! Except we must write out our image of course.
2000	 */
2001
2002	nr_pages += nr_highmem;
2003	nr_copy_pages = nr_pages;
2004	nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
2005
2006	pr_info("Hibernation image created (%d pages copied)\n", nr_pages);
 
2007
2008	return 0;
2009}
2010
2011#ifndef CONFIG_ARCH_HIBERNATION_HEADER
2012static int init_header_complete(struct swsusp_info *info)
2013{
2014	memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
2015	info->version_code = LINUX_VERSION_CODE;
2016	return 0;
2017}
2018
2019static char *check_image_kernel(struct swsusp_info *info)
2020{
2021	if (info->version_code != LINUX_VERSION_CODE)
2022		return "kernel version";
2023	if (strcmp(info->uts.sysname,init_utsname()->sysname))
2024		return "system type";
2025	if (strcmp(info->uts.release,init_utsname()->release))
2026		return "kernel release";
2027	if (strcmp(info->uts.version,init_utsname()->version))
2028		return "version";
2029	if (strcmp(info->uts.machine,init_utsname()->machine))
2030		return "machine";
2031	return NULL;
2032}
2033#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
2034
2035unsigned long snapshot_get_image_size(void)
2036{
2037	return nr_copy_pages + nr_meta_pages + 1;
2038}
2039
2040static int init_header(struct swsusp_info *info)
2041{
2042	memset(info, 0, sizeof(struct swsusp_info));
2043	info->num_physpages = get_num_physpages();
2044	info->image_pages = nr_copy_pages;
2045	info->pages = snapshot_get_image_size();
2046	info->size = info->pages;
2047	info->size <<= PAGE_SHIFT;
2048	return init_header_complete(info);
2049}
2050
2051/**
2052 * pack_pfns - Prepare PFNs for saving.
2053 * @bm: Memory bitmap.
2054 * @buf: Memory buffer to store the PFNs in.
2055 *
2056 * PFNs corresponding to set bits in @bm are stored in the area of memory
2057 * pointed to by @buf (1 page at a time).
2058 */
2059static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
 
 
2060{
2061	int j;
2062
2063	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2064		buf[j] = memory_bm_next_pfn(bm);
2065		if (unlikely(buf[j] == BM_END_OF_MAP))
2066			break;
2067		/* Save page key for data page (s390 only). */
2068		page_key_read(buf + j);
2069	}
2070}
2071
2072/**
2073 * snapshot_read_next - Get the address to read the next image page from.
2074 * @handle: Snapshot handle to be used for the reading.
2075 *
2076 * On the first call, @handle should point to a zeroed snapshot_handle
2077 * structure.  The structure gets populated then and a pointer to it should be
2078 * passed to this function every next time.
2079 *
2080 * On success, the function returns a positive number.  Then, the caller
2081 * is allowed to read up to the returned number of bytes from the memory
2082 * location computed by the data_of() macro.
2083 *
2084 * The function returns 0 to indicate the end of the data stream condition,
2085 * and negative numbers are returned on errors.  If that happens, the structure
2086 * pointed to by @handle is not updated and should not be used any more.
 
 
 
 
 
 
 
 
 
2087 */
 
2088int snapshot_read_next(struct snapshot_handle *handle)
2089{
2090	if (handle->cur > nr_meta_pages + nr_copy_pages)
2091		return 0;
2092
2093	if (!buffer) {
2094		/* This makes the buffer be freed by swsusp_free() */
2095		buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2096		if (!buffer)
2097			return -ENOMEM;
2098	}
2099	if (!handle->cur) {
2100		int error;
2101
2102		error = init_header((struct swsusp_info *)buffer);
2103		if (error)
2104			return error;
2105		handle->buffer = buffer;
2106		memory_bm_position_reset(&orig_bm);
2107		memory_bm_position_reset(&copy_bm);
2108	} else if (handle->cur <= nr_meta_pages) {
2109		clear_page(buffer);
2110		pack_pfns(buffer, &orig_bm);
2111	} else {
2112		struct page *page;
2113
2114		page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
2115		if (PageHighMem(page)) {
2116			/*
2117			 * Highmem pages are copied to the buffer,
2118			 * because we can't return with a kmapped
2119			 * highmem page (we may not be called again).
2120			 */
2121			void *kaddr;
2122
2123			kaddr = kmap_atomic(page);
2124			copy_page(buffer, kaddr);
2125			kunmap_atomic(kaddr);
2126			handle->buffer = buffer;
2127		} else {
2128			handle->buffer = page_address(page);
2129		}
2130	}
2131	handle->cur++;
2132	return PAGE_SIZE;
2133}
2134
2135static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2136				    struct memory_bitmap *src)
 
 
 
 
 
2137{
2138	unsigned long pfn;
 
2139
2140	memory_bm_position_reset(src);
2141	pfn = memory_bm_next_pfn(src);
2142	while (pfn != BM_END_OF_MAP) {
2143		memory_bm_set_bit(dst, pfn);
2144		pfn = memory_bm_next_pfn(src);
 
2145	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2146}
2147
2148/**
2149 * mark_unsafe_pages - Mark pages that were used before hibernation.
2150 *
2151 * Mark the pages that cannot be used for storing the image during restoration,
2152 * because they conflict with the pages that had been used before hibernation.
2153 */
2154static void mark_unsafe_pages(struct memory_bitmap *bm)
2155{
2156	unsigned long pfn;
2157
2158	/* Clear the "free"/"unsafe" bit for all PFNs */
2159	memory_bm_position_reset(free_pages_map);
2160	pfn = memory_bm_next_pfn(free_pages_map);
2161	while (pfn != BM_END_OF_MAP) {
2162		memory_bm_clear_current(free_pages_map);
2163		pfn = memory_bm_next_pfn(free_pages_map);
2164	}
2165
2166	/* Mark pages that correspond to the "original" PFNs as "unsafe" */
2167	duplicate_memory_bitmap(free_pages_map, bm);
2168
2169	allocated_unsafe_pages = 0;
2170}
2171
2172static int check_header(struct swsusp_info *info)
2173{
2174	char *reason;
2175
2176	reason = check_image_kernel(info);
2177	if (!reason && info->num_physpages != get_num_physpages())
2178		reason = "memory size";
2179	if (reason) {
2180		pr_err("Image mismatch: %s\n", reason);
2181		return -EPERM;
2182	}
2183	return 0;
2184}
2185
2186/**
2187 * load header - Check the image header and copy the data from it.
2188 */
2189static int load_header(struct swsusp_info *info)
 
 
2190{
2191	int error;
2192
2193	restore_pblist = NULL;
2194	error = check_header(info);
2195	if (!error) {
2196		nr_copy_pages = info->image_pages;
2197		nr_meta_pages = info->pages - info->image_pages - 1;
2198	}
2199	return error;
2200}
2201
2202/**
2203 * unpack_orig_pfns - Set bits corresponding to given PFNs in a memory bitmap.
2204 * @bm: Memory bitmap.
2205 * @buf: Area of memory containing the PFNs.
2206 *
2207 * For each element of the array pointed to by @buf (1 page at a time), set the
2208 * corresponding bit in @bm.
2209 */
2210static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2211{
2212	int j;
2213
2214	for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2215		if (unlikely(buf[j] == BM_END_OF_MAP))
2216			break;
2217
2218		/* Extract and buffer page key for data page (s390 only). */
2219		page_key_memorize(buf + j);
2220
2221		if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
2222			memory_bm_set_bit(bm, buf[j]);
2223		else
2224			return -EFAULT;
2225	}
2226
2227	return 0;
2228}
2229
 
 
 
 
 
2230#ifdef CONFIG_HIGHMEM
2231/*
2232 * struct highmem_pbe is used for creating the list of highmem pages that
2233 * should be restored atomically during the resume from disk, because the page
2234 * frames they have occupied before the suspend are in use.
2235 */
2236struct highmem_pbe {
2237	struct page *copy_page;	/* data is here now */
2238	struct page *orig_page;	/* data was here before the suspend */
2239	struct highmem_pbe *next;
2240};
2241
2242/*
2243 * List of highmem PBEs needed for restoring the highmem pages that were
2244 * allocated before the suspend and included in the suspend image, but have
2245 * also been allocated by the "resume" kernel, so their contents cannot be
2246 * written directly to their "original" page frames.
2247 */
2248static struct highmem_pbe *highmem_pblist;
2249
2250/**
2251 * count_highmem_image_pages - Compute the number of highmem pages in the image.
2252 * @bm: Memory bitmap.
2253 *
2254 * The bits in @bm that correspond to image pages are assumed to be set.
2255 */
 
2256static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2257{
2258	unsigned long pfn;
2259	unsigned int cnt = 0;
2260
2261	memory_bm_position_reset(bm);
2262	pfn = memory_bm_next_pfn(bm);
2263	while (pfn != BM_END_OF_MAP) {
2264		if (PageHighMem(pfn_to_page(pfn)))
2265			cnt++;
2266
2267		pfn = memory_bm_next_pfn(bm);
2268	}
2269	return cnt;
2270}
2271
 
 
 
 
 
 
 
 
 
 
 
 
2272static unsigned int safe_highmem_pages;
2273
2274static struct memory_bitmap *safe_highmem_bm;
2275
2276/**
2277 * prepare_highmem_image - Allocate memory for loading highmem data from image.
2278 * @bm: Pointer to an uninitialized memory bitmap structure.
2279 * @nr_highmem_p: Pointer to the number of highmem image pages.
2280 *
2281 * Try to allocate as many highmem pages as there are highmem image pages
2282 * (@nr_highmem_p points to the variable containing the number of highmem image
2283 * pages).  The pages that are "safe" (ie. will not be overwritten when the
2284 * hibernation image is restored entirely) have the corresponding bits set in
2285 * @bm (it must be unitialized).
2286 *
2287 * NOTE: This function should not be called if there are no highmem image pages.
2288 */
2289static int prepare_highmem_image(struct memory_bitmap *bm,
2290				 unsigned int *nr_highmem_p)
2291{
2292	unsigned int to_alloc;
2293
2294	if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2295		return -ENOMEM;
2296
2297	if (get_highmem_buffer(PG_SAFE))
2298		return -ENOMEM;
2299
2300	to_alloc = count_free_highmem_pages();
2301	if (to_alloc > *nr_highmem_p)
2302		to_alloc = *nr_highmem_p;
2303	else
2304		*nr_highmem_p = to_alloc;
2305
2306	safe_highmem_pages = 0;
2307	while (to_alloc-- > 0) {
2308		struct page *page;
2309
2310		page = alloc_page(__GFP_HIGHMEM);
2311		if (!swsusp_page_is_free(page)) {
2312			/* The page is "safe", set its bit the bitmap */
2313			memory_bm_set_bit(bm, page_to_pfn(page));
2314			safe_highmem_pages++;
2315		}
2316		/* Mark the page as allocated */
2317		swsusp_set_page_forbidden(page);
2318		swsusp_set_page_free(page);
2319	}
2320	memory_bm_position_reset(bm);
2321	safe_highmem_bm = bm;
2322	return 0;
2323}
2324
2325static struct page *last_highmem_page;
2326
2327/**
2328 * get_highmem_page_buffer - Prepare a buffer to store a highmem image page.
2329 *
2330 * For a given highmem image page get a buffer that suspend_write_next() should
2331 * return to its caller to write to.
2332 *
2333 * If the page is to be saved to its "original" page frame or a copy of
2334 * the page is to be made in the highmem, @buffer is returned.  Otherwise,
2335 * the copy of the page is to be made in normal memory, so the address of
2336 * the copy is returned.
2337 *
2338 * If @buffer is returned, the caller of suspend_write_next() will write
2339 * the page's contents to @buffer, so they will have to be copied to the
2340 * right location on the next call to suspend_write_next() and it is done
2341 * with the help of copy_last_highmem_page().  For this purpose, if
2342 * @buffer is returned, @last_highmem_page is set to the page to which
2343 * the data will have to be copied from @buffer.
2344 */
2345static void *get_highmem_page_buffer(struct page *page,
2346				     struct chain_allocator *ca)
 
 
 
2347{
2348	struct highmem_pbe *pbe;
2349	void *kaddr;
2350
2351	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2352		/*
2353		 * We have allocated the "original" page frame and we can
2354		 * use it directly to store the loaded page.
2355		 */
2356		last_highmem_page = page;
2357		return buffer;
2358	}
2359	/*
2360	 * The "original" page frame has not been allocated and we have to
2361	 * use a "safe" page frame to store the loaded page.
2362	 */
2363	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2364	if (!pbe) {
2365		swsusp_free();
2366		return ERR_PTR(-ENOMEM);
2367	}
2368	pbe->orig_page = page;
2369	if (safe_highmem_pages > 0) {
2370		struct page *tmp;
2371
2372		/* Copy of the page will be stored in high memory */
2373		kaddr = buffer;
2374		tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2375		safe_highmem_pages--;
2376		last_highmem_page = tmp;
2377		pbe->copy_page = tmp;
2378	} else {
2379		/* Copy of the page will be stored in normal memory */
2380		kaddr = safe_pages_list;
2381		safe_pages_list = safe_pages_list->next;
2382		pbe->copy_page = virt_to_page(kaddr);
2383	}
2384	pbe->next = highmem_pblist;
2385	highmem_pblist = pbe;
2386	return kaddr;
2387}
2388
2389/**
2390 * copy_last_highmem_page - Copy most the most recent highmem image page.
2391 *
2392 * Copy the contents of a highmem image from @buffer, where the caller of
2393 * snapshot_write_next() has stored them, to the right location represented by
2394 * @last_highmem_page .
2395 */
 
2396static void copy_last_highmem_page(void)
2397{
2398	if (last_highmem_page) {
2399		void *dst;
2400
2401		dst = kmap_atomic(last_highmem_page);
2402		copy_page(dst, buffer);
2403		kunmap_atomic(dst);
2404		last_highmem_page = NULL;
2405	}
2406}
2407
2408static inline int last_highmem_page_copied(void)
2409{
2410	return !last_highmem_page;
2411}
2412
2413static inline void free_highmem_data(void)
2414{
2415	if (safe_highmem_bm)
2416		memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2417
2418	if (buffer)
2419		free_image_page(buffer, PG_UNSAFE_CLEAR);
2420}
2421#else
2422static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2423
2424static inline int prepare_highmem_image(struct memory_bitmap *bm,
2425					unsigned int *nr_highmem_p) { return 0; }
2426
2427static inline void *get_highmem_page_buffer(struct page *page,
2428					    struct chain_allocator *ca)
 
 
 
 
 
 
2429{
2430	return ERR_PTR(-EINVAL);
2431}
2432
2433static inline void copy_last_highmem_page(void) {}
2434static inline int last_highmem_page_copied(void) { return 1; }
2435static inline void free_highmem_data(void) {}
2436#endif /* CONFIG_HIGHMEM */
2437
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2438#define PBES_PER_LINKED_PAGE	(LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2439
2440/**
2441 * prepare_image - Make room for loading hibernation image.
2442 * @new_bm: Unitialized memory bitmap structure.
2443 * @bm: Memory bitmap with unsafe pages marked.
2444 *
2445 * Use @bm to mark the pages that will be overwritten in the process of
2446 * restoring the system memory state from the suspend image ("unsafe" pages)
2447 * and allocate memory for the image.
2448 *
2449 * The idea is to allocate a new memory bitmap first and then allocate
2450 * as many pages as needed for image data, but without specifying what those
2451 * pages will be used for just yet.  Instead, we mark them all as allocated and
2452 * create a lists of "safe" pages to be used later.  On systems with high
2453 * memory a list of "safe" highmem pages is created too.
2454 */
2455static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2456{
2457	unsigned int nr_pages, nr_highmem;
2458	struct linked_page *lp;
2459	int error;
2460
2461	/* If there is no highmem, the buffer will not be necessary */
2462	free_image_page(buffer, PG_UNSAFE_CLEAR);
2463	buffer = NULL;
2464
2465	nr_highmem = count_highmem_image_pages(bm);
2466	mark_unsafe_pages(bm);
 
 
2467
2468	error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2469	if (error)
2470		goto Free;
2471
2472	duplicate_memory_bitmap(new_bm, bm);
2473	memory_bm_free(bm, PG_UNSAFE_KEEP);
2474	if (nr_highmem > 0) {
2475		error = prepare_highmem_image(bm, &nr_highmem);
2476		if (error)
2477			goto Free;
2478	}
2479	/*
2480	 * Reserve some safe pages for potential later use.
2481	 *
2482	 * NOTE: This way we make sure there will be enough safe pages for the
2483	 * chain_alloc() in get_buffer().  It is a bit wasteful, but
2484	 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2485	 *
2486	 * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2487	 */
 
 
2488	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2489	nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2490	while (nr_pages > 0) {
2491		lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2492		if (!lp) {
2493			error = -ENOMEM;
2494			goto Free;
2495		}
2496		lp->next = safe_pages_list;
2497		safe_pages_list = lp;
2498		nr_pages--;
2499	}
2500	/* Preallocate memory for the image */
 
2501	nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2502	while (nr_pages > 0) {
2503		lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2504		if (!lp) {
2505			error = -ENOMEM;
2506			goto Free;
2507		}
2508		if (!swsusp_page_is_free(virt_to_page(lp))) {
2509			/* The page is "safe", add it to the list */
2510			lp->next = safe_pages_list;
2511			safe_pages_list = lp;
2512		}
2513		/* Mark the page as allocated */
2514		swsusp_set_page_forbidden(virt_to_page(lp));
2515		swsusp_set_page_free(virt_to_page(lp));
2516		nr_pages--;
2517	}
 
 
 
 
 
 
2518	return 0;
2519
2520 Free:
2521	swsusp_free();
2522	return error;
2523}
2524
2525/**
2526 * get_buffer - Get the address to store the next image data page.
2527 *
2528 * Get the address that snapshot_write_next() should return to its caller to
2529 * write to.
2530 */
 
2531static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2532{
2533	struct pbe *pbe;
2534	struct page *page;
2535	unsigned long pfn = memory_bm_next_pfn(bm);
2536
2537	if (pfn == BM_END_OF_MAP)
2538		return ERR_PTR(-EFAULT);
2539
2540	page = pfn_to_page(pfn);
2541	if (PageHighMem(page))
2542		return get_highmem_page_buffer(page, ca);
2543
2544	if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2545		/*
2546		 * We have allocated the "original" page frame and we can
2547		 * use it directly to store the loaded page.
2548		 */
2549		return page_address(page);
2550
2551	/*
2552	 * The "original" page frame has not been allocated and we have to
2553	 * use a "safe" page frame to store the loaded page.
2554	 */
2555	pbe = chain_alloc(ca, sizeof(struct pbe));
2556	if (!pbe) {
2557		swsusp_free();
2558		return ERR_PTR(-ENOMEM);
2559	}
2560	pbe->orig_address = page_address(page);
2561	pbe->address = safe_pages_list;
2562	safe_pages_list = safe_pages_list->next;
2563	pbe->next = restore_pblist;
2564	restore_pblist = pbe;
2565	return pbe->address;
2566}
2567
2568/**
2569 * snapshot_write_next - Get the address to store the next image page.
2570 * @handle: Snapshot handle structure to guide the writing.
2571 *
2572 * On the first call, @handle should point to a zeroed snapshot_handle
2573 * structure.  The structure gets populated then and a pointer to it should be
2574 * passed to this function every next time.
2575 *
2576 * On success, the function returns a positive number.  Then, the caller
2577 * is allowed to write up to the returned number of bytes to the memory
2578 * location computed by the data_of() macro.
2579 *
2580 * The function returns 0 to indicate the "end of file" condition.  Negative
2581 * numbers are returned on errors, in which cases the structure pointed to by
2582 * @handle is not updated and should not be used any more.
 
2583 */
 
2584int snapshot_write_next(struct snapshot_handle *handle)
2585{
2586	static struct chain_allocator ca;
2587	int error = 0;
2588
2589	/* Check if we have already loaded the entire image */
2590	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2591		return 0;
2592
2593	handle->sync_read = 1;
2594
2595	if (!handle->cur) {
2596		if (!buffer)
2597			/* This makes the buffer be freed by swsusp_free() */
2598			buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2599
2600		if (!buffer)
2601			return -ENOMEM;
2602
2603		handle->buffer = buffer;
2604	} else if (handle->cur == 1) {
2605		error = load_header(buffer);
2606		if (error)
2607			return error;
2608
2609		safe_pages_list = NULL;
2610
2611		error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2612		if (error)
2613			return error;
2614
2615		/* Allocate buffer for page keys. */
2616		error = page_key_alloc(nr_copy_pages);
2617		if (error)
2618			return error;
2619
2620		hibernate_restore_protection_begin();
2621	} else if (handle->cur <= nr_meta_pages + 1) {
2622		error = unpack_orig_pfns(buffer, &copy_bm);
2623		if (error)
2624			return error;
2625
2626		if (handle->cur == nr_meta_pages + 1) {
2627			error = prepare_image(&orig_bm, &copy_bm);
2628			if (error)
2629				return error;
2630
2631			chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2632			memory_bm_position_reset(&orig_bm);
2633			restore_pblist = NULL;
2634			handle->buffer = get_buffer(&orig_bm, &ca);
2635			handle->sync_read = 0;
2636			if (IS_ERR(handle->buffer))
2637				return PTR_ERR(handle->buffer);
2638		}
2639	} else {
2640		copy_last_highmem_page();
2641		/* Restore page key for data page (s390 only). */
2642		page_key_write(handle->buffer);
2643		hibernate_restore_protect_page(handle->buffer);
2644		handle->buffer = get_buffer(&orig_bm, &ca);
2645		if (IS_ERR(handle->buffer))
2646			return PTR_ERR(handle->buffer);
2647		if (handle->buffer != buffer)
2648			handle->sync_read = 0;
2649	}
2650	handle->cur++;
2651	return PAGE_SIZE;
2652}
2653
2654/**
2655 * snapshot_write_finalize - Complete the loading of a hibernation image.
2656 *
2657 * Must be called after the last call to snapshot_write_next() in case the last
2658 * page in the image happens to be a highmem page and its contents should be
2659 * stored in highmem.  Additionally, it recycles bitmap memory that's not
2660 * necessary any more.
2661 */
 
2662void snapshot_write_finalize(struct snapshot_handle *handle)
2663{
2664	copy_last_highmem_page();
2665	/* Restore page key for data page (s390 only). */
2666	page_key_write(handle->buffer);
2667	page_key_free();
2668	hibernate_restore_protect_page(handle->buffer);
2669	/* Do that only if we have loaded the image entirely */
2670	if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2671		memory_bm_recycle(&orig_bm);
2672		free_highmem_data();
2673	}
2674}
2675
2676int snapshot_image_loaded(struct snapshot_handle *handle)
2677{
2678	return !(!nr_copy_pages || !last_highmem_page_copied() ||
2679			handle->cur <= nr_meta_pages + nr_copy_pages);
2680}
2681
2682#ifdef CONFIG_HIGHMEM
2683/* Assumes that @buf is ready and points to a "safe" page */
2684static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2685				       void *buf)
2686{
2687	void *kaddr1, *kaddr2;
2688
2689	kaddr1 = kmap_atomic(p1);
2690	kaddr2 = kmap_atomic(p2);
2691	copy_page(buf, kaddr1);
2692	copy_page(kaddr1, kaddr2);
2693	copy_page(kaddr2, buf);
2694	kunmap_atomic(kaddr2);
2695	kunmap_atomic(kaddr1);
2696}
2697
2698/**
2699 * restore_highmem - Put highmem image pages into their original locations.
 
 
 
2700 *
2701 * For each highmem page that was in use before hibernation and is included in
2702 * the image, and also has been allocated by the "restore" kernel, swap its
2703 * current contents with the previous (ie. "before hibernation") ones.
2704 *
2705 * If the restore eventually fails, we can call this function once again and
2706 * restore the highmem state as seen by the restore kernel.
2707 */
 
2708int restore_highmem(void)
2709{
2710	struct highmem_pbe *pbe = highmem_pblist;
2711	void *buf;
2712
2713	if (!pbe)
2714		return 0;
2715
2716	buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2717	if (!buf)
2718		return -ENOMEM;
2719
2720	while (pbe) {
2721		swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2722		pbe = pbe->next;
2723	}
2724	free_image_page(buf, PG_UNSAFE_CLEAR);
2725	return 0;
2726}
2727#endif /* CONFIG_HIGHMEM */