Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * linux/kernel/power/swap.c
   4 *
   5 * This file provides functions for reading the suspend image from
   6 * and writing it to a swap partition.
   7 *
   8 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
   9 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
  10 * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
 
 
 
  11 */
  12
  13#define pr_fmt(fmt) "PM: " fmt
  14
  15#include <linux/module.h>
  16#include <linux/file.h>
  17#include <linux/delay.h>
  18#include <linux/bitops.h>
  19#include <linux/genhd.h>
  20#include <linux/device.h>
  21#include <linux/bio.h>
  22#include <linux/blkdev.h>
  23#include <linux/swap.h>
  24#include <linux/swapops.h>
  25#include <linux/pm.h>
  26#include <linux/slab.h>
  27#include <linux/lzo.h>
  28#include <linux/vmalloc.h>
  29#include <linux/cpumask.h>
  30#include <linux/atomic.h>
  31#include <linux/kthread.h>
  32#include <linux/crc32.h>
  33#include <linux/ktime.h>
  34
  35#include "power.h"
  36
  37#define HIBERNATE_SIG	"S1SUSPEND"
  38
  39/*
  40 * When reading an {un,}compressed image, we may restore pages in place,
  41 * in which case some architectures need these pages cleaning before they
  42 * can be executed. We don't know which pages these may be, so clean the lot.
  43 */
  44static bool clean_pages_on_read;
  45static bool clean_pages_on_decompress;
  46
  47/*
  48 *	The swap map is a data structure used for keeping track of each page
  49 *	written to a swap partition.  It consists of many swap_map_page
  50 *	structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
  51 *	These structures are stored on the swap and linked together with the
  52 *	help of the .next_swap member.
  53 *
  54 *	The swap map is created during suspend.  The swap map pages are
  55 *	allocated and populated one at a time, so we only need one memory
  56 *	page to set up the entire structure.
  57 *
  58 *	During resume we pick up all swap_map_page structures into a list.
  59 */
  60
  61#define MAP_PAGE_ENTRIES	(PAGE_SIZE / sizeof(sector_t) - 1)
  62
  63/*
  64 * Number of free pages that are not high.
  65 */
  66static inline unsigned long low_free_pages(void)
  67{
  68	return nr_free_pages() - nr_free_highpages();
  69}
  70
  71/*
  72 * Number of pages required to be kept free while writing the image. Always
  73 * half of all available low pages before the writing starts.
  74 */
  75static inline unsigned long reqd_free_pages(void)
  76{
  77	return low_free_pages() / 2;
  78}
  79
  80struct swap_map_page {
  81	sector_t entries[MAP_PAGE_ENTRIES];
  82	sector_t next_swap;
  83};
  84
  85struct swap_map_page_list {
  86	struct swap_map_page *map;
  87	struct swap_map_page_list *next;
  88};
  89
  90/**
  91 *	The swap_map_handle structure is used for handling swap in
  92 *	a file-alike way
  93 */
  94
  95struct swap_map_handle {
  96	struct swap_map_page *cur;
  97	struct swap_map_page_list *maps;
  98	sector_t cur_swap;
  99	sector_t first_sector;
 100	unsigned int k;
 101	unsigned long reqd_free_pages;
 102	u32 crc32;
 103};
 104
 105struct swsusp_header {
 106	char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
 107	              sizeof(u32)];
 108	u32	crc32;
 109	sector_t image;
 110	unsigned int flags;	/* Flags to pass to the "boot" kernel */
 111	char	orig_sig[10];
 112	char	sig[10];
 113} __packed;
 114
 115static struct swsusp_header *swsusp_header;
 116
 117/**
 118 *	The following functions are used for tracing the allocated
 119 *	swap pages, so that they can be freed in case of an error.
 120 */
 121
 122struct swsusp_extent {
 123	struct rb_node node;
 124	unsigned long start;
 125	unsigned long end;
 126};
 127
 128static struct rb_root swsusp_extents = RB_ROOT;
 129
 130static int swsusp_extents_insert(unsigned long swap_offset)
 131{
 132	struct rb_node **new = &(swsusp_extents.rb_node);
 133	struct rb_node *parent = NULL;
 134	struct swsusp_extent *ext;
 135
 136	/* Figure out where to put the new node */
 137	while (*new) {
 138		ext = rb_entry(*new, struct swsusp_extent, node);
 139		parent = *new;
 140		if (swap_offset < ext->start) {
 141			/* Try to merge */
 142			if (swap_offset == ext->start - 1) {
 143				ext->start--;
 144				return 0;
 145			}
 146			new = &((*new)->rb_left);
 147		} else if (swap_offset > ext->end) {
 148			/* Try to merge */
 149			if (swap_offset == ext->end + 1) {
 150				ext->end++;
 151				return 0;
 152			}
 153			new = &((*new)->rb_right);
 154		} else {
 155			/* It already is in the tree */
 156			return -EINVAL;
 157		}
 158	}
 159	/* Add the new node and rebalance the tree. */
 160	ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
 161	if (!ext)
 162		return -ENOMEM;
 163
 164	ext->start = swap_offset;
 165	ext->end = swap_offset;
 166	rb_link_node(&ext->node, parent, new);
 167	rb_insert_color(&ext->node, &swsusp_extents);
 168	return 0;
 169}
 170
 171/**
 172 *	alloc_swapdev_block - allocate a swap page and register that it has
 173 *	been allocated, so that it can be freed in case of an error.
 174 */
 175
 176sector_t alloc_swapdev_block(int swap)
 177{
 178	unsigned long offset;
 179
 180	offset = swp_offset(get_swap_page_of_type(swap));
 181	if (offset) {
 182		if (swsusp_extents_insert(offset))
 183			swap_free(swp_entry(swap, offset));
 184		else
 185			return swapdev_block(swap, offset);
 186	}
 187	return 0;
 188}
 189
 190/**
 191 *	free_all_swap_pages - free swap pages allocated for saving image data.
 192 *	It also frees the extents used to register which swap entries had been
 193 *	allocated.
 194 */
 195
 196void free_all_swap_pages(int swap)
 197{
 198	struct rb_node *node;
 199
 200	while ((node = swsusp_extents.rb_node)) {
 201		struct swsusp_extent *ext;
 202		unsigned long offset;
 203
 204		ext = rb_entry(node, struct swsusp_extent, node);
 205		rb_erase(node, &swsusp_extents);
 206		for (offset = ext->start; offset <= ext->end; offset++)
 207			swap_free(swp_entry(swap, offset));
 208
 209		kfree(ext);
 210	}
 211}
 212
 213int swsusp_swap_in_use(void)
 214{
 215	return (swsusp_extents.rb_node != NULL);
 216}
 217
 218/*
 219 * General things
 220 */
 221
 222static unsigned short root_swap = 0xffff;
 223static struct block_device *hib_resume_bdev;
 224
 225struct hib_bio_batch {
 226	atomic_t		count;
 227	wait_queue_head_t	wait;
 228	blk_status_t		error;
 229};
 230
 231static void hib_init_batch(struct hib_bio_batch *hb)
 232{
 233	atomic_set(&hb->count, 0);
 234	init_waitqueue_head(&hb->wait);
 235	hb->error = BLK_STS_OK;
 236}
 237
 238static void hib_end_io(struct bio *bio)
 239{
 240	struct hib_bio_batch *hb = bio->bi_private;
 241	struct page *page = bio_first_page_all(bio);
 242
 243	if (bio->bi_status) {
 244		pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
 245			 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
 246			 (unsigned long long)bio->bi_iter.bi_sector);
 
 247	}
 248
 249	if (bio_data_dir(bio) == WRITE)
 250		put_page(page);
 251	else if (clean_pages_on_read)
 252		flush_icache_range((unsigned long)page_address(page),
 253				   (unsigned long)page_address(page) + PAGE_SIZE);
 254
 255	if (bio->bi_status && !hb->error)
 256		hb->error = bio->bi_status;
 257	if (atomic_dec_and_test(&hb->count))
 258		wake_up(&hb->wait);
 259
 260	bio_put(bio);
 261}
 262
 263static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
 264		struct hib_bio_batch *hb)
 265{
 266	struct page *page = virt_to_page(addr);
 267	struct bio *bio;
 268	int error = 0;
 269
 270	bio = bio_alloc(GFP_NOIO | __GFP_HIGH, 1);
 271	bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
 272	bio_set_dev(bio, hib_resume_bdev);
 273	bio_set_op_attrs(bio, op, op_flags);
 274
 275	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
 276		pr_err("Adding page to bio failed at %llu\n",
 277		       (unsigned long long)bio->bi_iter.bi_sector);
 278		bio_put(bio);
 279		return -EFAULT;
 280	}
 281
 282	if (hb) {
 283		bio->bi_end_io = hib_end_io;
 284		bio->bi_private = hb;
 285		atomic_inc(&hb->count);
 286		submit_bio(bio);
 287	} else {
 288		error = submit_bio_wait(bio);
 289		bio_put(bio);
 290	}
 291
 292	return error;
 293}
 294
 295static blk_status_t hib_wait_io(struct hib_bio_batch *hb)
 296{
 297	wait_event(hb->wait, atomic_read(&hb->count) == 0);
 298	return blk_status_to_errno(hb->error);
 299}
 300
 301/*
 302 * Saving part
 303 */
 304
 305static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
 306{
 307	int error;
 308
 309	hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
 310		      swsusp_header, NULL);
 311	if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
 312	    !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
 313		memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
 314		memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
 315		swsusp_header->image = handle->first_sector;
 316		swsusp_header->flags = flags;
 317		if (flags & SF_CRC32_MODE)
 318			swsusp_header->crc32 = handle->crc32;
 319		error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
 320				      swsusp_resume_block, swsusp_header, NULL);
 321	} else {
 322		pr_err("Swap header not found!\n");
 323		error = -ENODEV;
 324	}
 325	return error;
 326}
 327
 328/**
 329 *	swsusp_swap_check - check if the resume device is a swap device
 330 *	and get its index (if so)
 331 *
 332 *	This is called before saving image
 333 */
 334static int swsusp_swap_check(void)
 335{
 336	int res;
 337
 338	res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
 339			&hib_resume_bdev);
 340	if (res < 0)
 341		return res;
 342
 343	root_swap = res;
 344	res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
 345	if (res)
 346		return res;
 347
 348	res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
 349	if (res < 0)
 350		blkdev_put(hib_resume_bdev, FMODE_WRITE);
 351
 352	/*
 353	 * Update the resume device to the one actually used,
 354	 * so the test_resume mode can use it in case it is
 355	 * invoked from hibernate() to test the snapshot.
 356	 */
 357	swsusp_resume_device = hib_resume_bdev->bd_dev;
 358	return res;
 359}
 360
 361/**
 362 *	write_page - Write one page to given swap location.
 363 *	@buf:		Address we're writing.
 364 *	@offset:	Offset of the swap page we're writing to.
 365 *	@hb:		bio completion batch
 366 */
 367
 368static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
 369{
 370	void *src;
 371	int ret;
 372
 373	if (!offset)
 374		return -ENOSPC;
 375
 376	if (hb) {
 377		src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
 378		                              __GFP_NORETRY);
 379		if (src) {
 380			copy_page(src, buf);
 381		} else {
 382			ret = hib_wait_io(hb); /* Free pages */
 383			if (ret)
 384				return ret;
 385			src = (void *)__get_free_page(GFP_NOIO |
 386			                              __GFP_NOWARN |
 387			                              __GFP_NORETRY);
 388			if (src) {
 389				copy_page(src, buf);
 390			} else {
 391				WARN_ON_ONCE(1);
 392				hb = NULL;	/* Go synchronous */
 393				src = buf;
 394			}
 395		}
 396	} else {
 397		src = buf;
 398	}
 399	return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb);
 400}
 401
 402static void release_swap_writer(struct swap_map_handle *handle)
 403{
 404	if (handle->cur)
 405		free_page((unsigned long)handle->cur);
 406	handle->cur = NULL;
 407}
 408
 409static int get_swap_writer(struct swap_map_handle *handle)
 410{
 411	int ret;
 412
 413	ret = swsusp_swap_check();
 414	if (ret) {
 415		if (ret != -ENOSPC)
 416			pr_err("Cannot find swap device, try swapon -a\n");
 
 417		return ret;
 418	}
 419	handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
 420	if (!handle->cur) {
 421		ret = -ENOMEM;
 422		goto err_close;
 423	}
 424	handle->cur_swap = alloc_swapdev_block(root_swap);
 425	if (!handle->cur_swap) {
 426		ret = -ENOSPC;
 427		goto err_rel;
 428	}
 429	handle->k = 0;
 430	handle->reqd_free_pages = reqd_free_pages();
 431	handle->first_sector = handle->cur_swap;
 432	return 0;
 433err_rel:
 434	release_swap_writer(handle);
 435err_close:
 436	swsusp_close(FMODE_WRITE);
 437	return ret;
 438}
 439
 440static int swap_write_page(struct swap_map_handle *handle, void *buf,
 441		struct hib_bio_batch *hb)
 442{
 443	int error = 0;
 444	sector_t offset;
 445
 446	if (!handle->cur)
 447		return -EINVAL;
 448	offset = alloc_swapdev_block(root_swap);
 449	error = write_page(buf, offset, hb);
 450	if (error)
 451		return error;
 452	handle->cur->entries[handle->k++] = offset;
 453	if (handle->k >= MAP_PAGE_ENTRIES) {
 454		offset = alloc_swapdev_block(root_swap);
 455		if (!offset)
 456			return -ENOSPC;
 457		handle->cur->next_swap = offset;
 458		error = write_page(handle->cur, handle->cur_swap, hb);
 459		if (error)
 460			goto out;
 461		clear_page(handle->cur);
 462		handle->cur_swap = offset;
 463		handle->k = 0;
 464
 465		if (hb && low_free_pages() <= handle->reqd_free_pages) {
 466			error = hib_wait_io(hb);
 467			if (error)
 468				goto out;
 469			/*
 470			 * Recalculate the number of required free pages, to
 471			 * make sure we never take more than half.
 472			 */
 473			handle->reqd_free_pages = reqd_free_pages();
 474		}
 475	}
 476 out:
 477	return error;
 478}
 479
 480static int flush_swap_writer(struct swap_map_handle *handle)
 481{
 482	if (handle->cur && handle->cur_swap)
 483		return write_page(handle->cur, handle->cur_swap, NULL);
 484	else
 485		return -EINVAL;
 486}
 487
 488static int swap_writer_finish(struct swap_map_handle *handle,
 489		unsigned int flags, int error)
 490{
 491	if (!error) {
 492		flush_swap_writer(handle);
 493		pr_info("S");
 494		error = mark_swapfiles(handle, flags);
 495		pr_cont("|\n");
 496	}
 497
 498	if (error)
 499		free_all_swap_pages(root_swap);
 500	release_swap_writer(handle);
 501	swsusp_close(FMODE_WRITE);
 502
 503	return error;
 504}
 505
 506/* We need to remember how much compressed data we need to read. */
 507#define LZO_HEADER	sizeof(size_t)
 508
 509/* Number of pages/bytes we'll compress at one time. */
 510#define LZO_UNC_PAGES	32
 511#define LZO_UNC_SIZE	(LZO_UNC_PAGES * PAGE_SIZE)
 512
 513/* Number of pages/bytes we need for compressed data (worst case). */
 514#define LZO_CMP_PAGES	DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
 515			             LZO_HEADER, PAGE_SIZE)
 516#define LZO_CMP_SIZE	(LZO_CMP_PAGES * PAGE_SIZE)
 517
 518/* Maximum number of threads for compression/decompression. */
 519#define LZO_THREADS	3
 520
 521/* Minimum/maximum number of pages for read buffering. */
 522#define LZO_MIN_RD_PAGES	1024
 523#define LZO_MAX_RD_PAGES	8192
 524
 525
 526/**
 527 *	save_image - save the suspend image data
 528 */
 529
 530static int save_image(struct swap_map_handle *handle,
 531                      struct snapshot_handle *snapshot,
 532                      unsigned int nr_to_write)
 533{
 534	unsigned int m;
 535	int ret;
 536	int nr_pages;
 537	int err2;
 538	struct hib_bio_batch hb;
 539	ktime_t start;
 540	ktime_t stop;
 541
 542	hib_init_batch(&hb);
 543
 544	pr_info("Saving image data pages (%u pages)...\n",
 545		nr_to_write);
 546	m = nr_to_write / 10;
 547	if (!m)
 548		m = 1;
 549	nr_pages = 0;
 550	start = ktime_get();
 551	while (1) {
 552		ret = snapshot_read_next(snapshot);
 553		if (ret <= 0)
 554			break;
 555		ret = swap_write_page(handle, data_of(*snapshot), &hb);
 556		if (ret)
 557			break;
 558		if (!(nr_pages % m))
 559			pr_info("Image saving progress: %3d%%\n",
 560				nr_pages / m * 10);
 561		nr_pages++;
 562	}
 563	err2 = hib_wait_io(&hb);
 564	stop = ktime_get();
 565	if (!ret)
 566		ret = err2;
 567	if (!ret)
 568		pr_info("Image saving done\n");
 569	swsusp_show_speed(start, stop, nr_to_write, "Wrote");
 570	return ret;
 571}
 572
 573/**
 574 * Structure used for CRC32.
 575 */
 576struct crc_data {
 577	struct task_struct *thr;                  /* thread */
 578	atomic_t ready;                           /* ready to start flag */
 579	atomic_t stop;                            /* ready to stop flag */
 580	unsigned run_threads;                     /* nr current threads */
 581	wait_queue_head_t go;                     /* start crc update */
 582	wait_queue_head_t done;                   /* crc update done */
 583	u32 *crc32;                               /* points to handle's crc32 */
 584	size_t *unc_len[LZO_THREADS];             /* uncompressed lengths */
 585	unsigned char *unc[LZO_THREADS];          /* uncompressed data */
 586};
 587
 588/**
 589 * CRC32 update function that runs in its own thread.
 590 */
 591static int crc32_threadfn(void *data)
 592{
 593	struct crc_data *d = data;
 594	unsigned i;
 595
 596	while (1) {
 597		wait_event(d->go, atomic_read(&d->ready) ||
 598		                  kthread_should_stop());
 599		if (kthread_should_stop()) {
 600			d->thr = NULL;
 601			atomic_set(&d->stop, 1);
 602			wake_up(&d->done);
 603			break;
 604		}
 605		atomic_set(&d->ready, 0);
 606
 607		for (i = 0; i < d->run_threads; i++)
 608			*d->crc32 = crc32_le(*d->crc32,
 609			                     d->unc[i], *d->unc_len[i]);
 610		atomic_set(&d->stop, 1);
 611		wake_up(&d->done);
 612	}
 613	return 0;
 614}
 615/**
 616 * Structure used for LZO data compression.
 617 */
 618struct cmp_data {
 619	struct task_struct *thr;                  /* thread */
 620	atomic_t ready;                           /* ready to start flag */
 621	atomic_t stop;                            /* ready to stop flag */
 622	int ret;                                  /* return code */
 623	wait_queue_head_t go;                     /* start compression */
 624	wait_queue_head_t done;                   /* compression done */
 625	size_t unc_len;                           /* uncompressed length */
 626	size_t cmp_len;                           /* compressed length */
 627	unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
 628	unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
 629	unsigned char wrk[LZO1X_1_MEM_COMPRESS];  /* compression workspace */
 630};
 631
 632/**
 633 * Compression function that runs in its own thread.
 634 */
 635static int lzo_compress_threadfn(void *data)
 636{
 637	struct cmp_data *d = data;
 638
 639	while (1) {
 640		wait_event(d->go, atomic_read(&d->ready) ||
 641		                  kthread_should_stop());
 642		if (kthread_should_stop()) {
 643			d->thr = NULL;
 644			d->ret = -1;
 645			atomic_set(&d->stop, 1);
 646			wake_up(&d->done);
 647			break;
 648		}
 649		atomic_set(&d->ready, 0);
 650
 651		d->ret = lzo1x_1_compress(d->unc, d->unc_len,
 652		                          d->cmp + LZO_HEADER, &d->cmp_len,
 653		                          d->wrk);
 654		atomic_set(&d->stop, 1);
 655		wake_up(&d->done);
 656	}
 657	return 0;
 658}
 659
 660/**
 661 * save_image_lzo - Save the suspend image data compressed with LZO.
 662 * @handle: Swap map handle to use for saving the image.
 663 * @snapshot: Image to read data from.
 664 * @nr_to_write: Number of pages to save.
 665 */
 666static int save_image_lzo(struct swap_map_handle *handle,
 667                          struct snapshot_handle *snapshot,
 668                          unsigned int nr_to_write)
 669{
 670	unsigned int m;
 671	int ret = 0;
 672	int nr_pages;
 673	int err2;
 674	struct hib_bio_batch hb;
 675	ktime_t start;
 676	ktime_t stop;
 677	size_t off;
 678	unsigned thr, run_threads, nr_threads;
 679	unsigned char *page = NULL;
 680	struct cmp_data *data = NULL;
 681	struct crc_data *crc = NULL;
 682
 683	hib_init_batch(&hb);
 684
 685	/*
 686	 * We'll limit the number of threads for compression to limit memory
 687	 * footprint.
 688	 */
 689	nr_threads = num_online_cpus() - 1;
 690	nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
 691
 692	page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
 693	if (!page) {
 694		pr_err("Failed to allocate LZO page\n");
 695		ret = -ENOMEM;
 696		goto out_clean;
 697	}
 698
 699	data = vmalloc(array_size(nr_threads, sizeof(*data)));
 700	if (!data) {
 701		pr_err("Failed to allocate LZO data\n");
 702		ret = -ENOMEM;
 703		goto out_clean;
 704	}
 705	for (thr = 0; thr < nr_threads; thr++)
 706		memset(&data[thr], 0, offsetof(struct cmp_data, go));
 707
 708	crc = kmalloc(sizeof(*crc), GFP_KERNEL);
 709	if (!crc) {
 710		pr_err("Failed to allocate crc\n");
 711		ret = -ENOMEM;
 712		goto out_clean;
 713	}
 714	memset(crc, 0, offsetof(struct crc_data, go));
 715
 716	/*
 717	 * Start the compression threads.
 718	 */
 719	for (thr = 0; thr < nr_threads; thr++) {
 720		init_waitqueue_head(&data[thr].go);
 721		init_waitqueue_head(&data[thr].done);
 722
 723		data[thr].thr = kthread_run(lzo_compress_threadfn,
 724		                            &data[thr],
 725		                            "image_compress/%u", thr);
 726		if (IS_ERR(data[thr].thr)) {
 727			data[thr].thr = NULL;
 728			pr_err("Cannot start compression threads\n");
 
 729			ret = -ENOMEM;
 730			goto out_clean;
 731		}
 732	}
 733
 734	/*
 735	 * Start the CRC32 thread.
 736	 */
 737	init_waitqueue_head(&crc->go);
 738	init_waitqueue_head(&crc->done);
 739
 740	handle->crc32 = 0;
 741	crc->crc32 = &handle->crc32;
 742	for (thr = 0; thr < nr_threads; thr++) {
 743		crc->unc[thr] = data[thr].unc;
 744		crc->unc_len[thr] = &data[thr].unc_len;
 745	}
 746
 747	crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
 748	if (IS_ERR(crc->thr)) {
 749		crc->thr = NULL;
 750		pr_err("Cannot start CRC32 thread\n");
 751		ret = -ENOMEM;
 752		goto out_clean;
 753	}
 754
 755	/*
 756	 * Adjust the number of required free pages after all allocations have
 757	 * been done. We don't want to run out of pages when writing.
 758	 */
 759	handle->reqd_free_pages = reqd_free_pages();
 760
 761	pr_info("Using %u thread(s) for compression\n", nr_threads);
 762	pr_info("Compressing and saving image data (%u pages)...\n",
 763		nr_to_write);
 
 764	m = nr_to_write / 10;
 765	if (!m)
 766		m = 1;
 767	nr_pages = 0;
 768	start = ktime_get();
 769	for (;;) {
 770		for (thr = 0; thr < nr_threads; thr++) {
 771			for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
 772				ret = snapshot_read_next(snapshot);
 773				if (ret < 0)
 774					goto out_finish;
 775
 776				if (!ret)
 777					break;
 778
 779				memcpy(data[thr].unc + off,
 780				       data_of(*snapshot), PAGE_SIZE);
 781
 782				if (!(nr_pages % m))
 783					pr_info("Image saving progress: %3d%%\n",
 784						nr_pages / m * 10);
 
 
 785				nr_pages++;
 786			}
 787			if (!off)
 788				break;
 789
 790			data[thr].unc_len = off;
 791
 792			atomic_set(&data[thr].ready, 1);
 793			wake_up(&data[thr].go);
 794		}
 795
 796		if (!thr)
 797			break;
 798
 799		crc->run_threads = thr;
 800		atomic_set(&crc->ready, 1);
 801		wake_up(&crc->go);
 802
 803		for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
 804			wait_event(data[thr].done,
 805			           atomic_read(&data[thr].stop));
 806			atomic_set(&data[thr].stop, 0);
 807
 808			ret = data[thr].ret;
 809
 810			if (ret < 0) {
 811				pr_err("LZO compression failed\n");
 812				goto out_finish;
 813			}
 814
 815			if (unlikely(!data[thr].cmp_len ||
 816			             data[thr].cmp_len >
 817			             lzo1x_worst_compress(data[thr].unc_len))) {
 818				pr_err("Invalid LZO compressed length\n");
 
 819				ret = -1;
 820				goto out_finish;
 821			}
 822
 823			*(size_t *)data[thr].cmp = data[thr].cmp_len;
 824
 825			/*
 826			 * Given we are writing one page at a time to disk, we
 827			 * copy that much from the buffer, although the last
 828			 * bit will likely be smaller than full page. This is
 829			 * OK - we saved the length of the compressed data, so
 830			 * any garbage at the end will be discarded when we
 831			 * read it.
 832			 */
 833			for (off = 0;
 834			     off < LZO_HEADER + data[thr].cmp_len;
 835			     off += PAGE_SIZE) {
 836				memcpy(page, data[thr].cmp + off, PAGE_SIZE);
 837
 838				ret = swap_write_page(handle, page, &hb);
 839				if (ret)
 840					goto out_finish;
 841			}
 842		}
 843
 844		wait_event(crc->done, atomic_read(&crc->stop));
 845		atomic_set(&crc->stop, 0);
 846	}
 847
 848out_finish:
 849	err2 = hib_wait_io(&hb);
 850	stop = ktime_get();
 851	if (!ret)
 852		ret = err2;
 853	if (!ret)
 854		pr_info("Image saving done\n");
 855	swsusp_show_speed(start, stop, nr_to_write, "Wrote");
 856out_clean:
 857	if (crc) {
 858		if (crc->thr)
 859			kthread_stop(crc->thr);
 860		kfree(crc);
 861	}
 862	if (data) {
 863		for (thr = 0; thr < nr_threads; thr++)
 864			if (data[thr].thr)
 865				kthread_stop(data[thr].thr);
 866		vfree(data);
 867	}
 868	if (page) free_page((unsigned long)page);
 869
 870	return ret;
 871}
 872
 873/**
 874 *	enough_swap - Make sure we have enough swap to save the image.
 875 *
 876 *	Returns TRUE or FALSE after checking the total amount of swap
 877 *	space avaiable from the resume partition.
 878 */
 879
 880static int enough_swap(unsigned int nr_pages)
 881{
 882	unsigned int free_swap = count_swap_pages(root_swap, 1);
 883	unsigned int required;
 884
 885	pr_debug("Free swap pages: %u\n", free_swap);
 886
 887	required = PAGES_FOR_IO + nr_pages;
 888	return free_swap > required;
 889}
 890
 891/**
 892 *	swsusp_write - Write entire image and metadata.
 893 *	@flags: flags to pass to the "boot" kernel in the image header
 894 *
 895 *	It is important _NOT_ to umount filesystems at this point. We want
 896 *	them synced (in case something goes wrong) but we DO not want to mark
 897 *	filesystem clean: it is not. (And it does not matter, if we resume
 898 *	correctly, we'll mark system clean, anyway.)
 899 */
 900
 901int swsusp_write(unsigned int flags)
 902{
 903	struct swap_map_handle handle;
 904	struct snapshot_handle snapshot;
 905	struct swsusp_info *header;
 906	unsigned long pages;
 907	int error;
 908
 909	pages = snapshot_get_image_size();
 910	error = get_swap_writer(&handle);
 911	if (error) {
 912		pr_err("Cannot get swap writer\n");
 913		return error;
 914	}
 915	if (flags & SF_NOCOMPRESS_MODE) {
 916		if (!enough_swap(pages)) {
 917			pr_err("Not enough free swap\n");
 918			error = -ENOSPC;
 919			goto out_finish;
 920		}
 921	}
 922	memset(&snapshot, 0, sizeof(struct snapshot_handle));
 923	error = snapshot_read_next(&snapshot);
 924	if (error < (int)PAGE_SIZE) {
 925		if (error >= 0)
 926			error = -EFAULT;
 927
 928		goto out_finish;
 929	}
 930	header = (struct swsusp_info *)data_of(snapshot);
 931	error = swap_write_page(&handle, header, NULL);
 932	if (!error) {
 933		error = (flags & SF_NOCOMPRESS_MODE) ?
 934			save_image(&handle, &snapshot, pages - 1) :
 935			save_image_lzo(&handle, &snapshot, pages - 1);
 936	}
 937out_finish:
 938	error = swap_writer_finish(&handle, flags, error);
 939	return error;
 940}
 941
 942/**
 943 *	The following functions allow us to read data using a swap map
 944 *	in a file-alike way
 945 */
 946
 947static void release_swap_reader(struct swap_map_handle *handle)
 948{
 949	struct swap_map_page_list *tmp;
 950
 951	while (handle->maps) {
 952		if (handle->maps->map)
 953			free_page((unsigned long)handle->maps->map);
 954		tmp = handle->maps;
 955		handle->maps = handle->maps->next;
 956		kfree(tmp);
 957	}
 958	handle->cur = NULL;
 959}
 960
 961static int get_swap_reader(struct swap_map_handle *handle,
 962		unsigned int *flags_p)
 963{
 964	int error;
 965	struct swap_map_page_list *tmp, *last;
 966	sector_t offset;
 967
 968	*flags_p = swsusp_header->flags;
 969
 970	if (!swsusp_header->image) /* how can this happen? */
 971		return -EINVAL;
 972
 973	handle->cur = NULL;
 974	last = handle->maps = NULL;
 975	offset = swsusp_header->image;
 976	while (offset) {
 977		tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL);
 978		if (!tmp) {
 979			release_swap_reader(handle);
 980			return -ENOMEM;
 981		}
 
 982		if (!handle->maps)
 983			handle->maps = tmp;
 984		if (last)
 985			last->next = tmp;
 986		last = tmp;
 987
 988		tmp->map = (struct swap_map_page *)
 989			   __get_free_page(GFP_NOIO | __GFP_HIGH);
 990		if (!tmp->map) {
 991			release_swap_reader(handle);
 992			return -ENOMEM;
 993		}
 994
 995		error = hib_submit_io(REQ_OP_READ, 0, offset, tmp->map, NULL);
 996		if (error) {
 997			release_swap_reader(handle);
 998			return error;
 999		}
1000		offset = tmp->map->next_swap;
1001	}
1002	handle->k = 0;
1003	handle->cur = handle->maps->map;
1004	return 0;
1005}
1006
1007static int swap_read_page(struct swap_map_handle *handle, void *buf,
1008		struct hib_bio_batch *hb)
1009{
1010	sector_t offset;
1011	int error;
1012	struct swap_map_page_list *tmp;
1013
1014	if (!handle->cur)
1015		return -EINVAL;
1016	offset = handle->cur->entries[handle->k];
1017	if (!offset)
1018		return -EFAULT;
1019	error = hib_submit_io(REQ_OP_READ, 0, offset, buf, hb);
1020	if (error)
1021		return error;
1022	if (++handle->k >= MAP_PAGE_ENTRIES) {
1023		handle->k = 0;
1024		free_page((unsigned long)handle->maps->map);
1025		tmp = handle->maps;
1026		handle->maps = handle->maps->next;
1027		kfree(tmp);
1028		if (!handle->maps)
1029			release_swap_reader(handle);
1030		else
1031			handle->cur = handle->maps->map;
1032	}
1033	return error;
1034}
1035
1036static int swap_reader_finish(struct swap_map_handle *handle)
1037{
1038	release_swap_reader(handle);
1039
1040	return 0;
1041}
1042
1043/**
1044 *	load_image - load the image using the swap map handle
1045 *	@handle and the snapshot handle @snapshot
1046 *	(assume there are @nr_pages pages to load)
1047 */
1048
1049static int load_image(struct swap_map_handle *handle,
1050                      struct snapshot_handle *snapshot,
1051                      unsigned int nr_to_read)
1052{
1053	unsigned int m;
1054	int ret = 0;
1055	ktime_t start;
1056	ktime_t stop;
1057	struct hib_bio_batch hb;
1058	int err2;
1059	unsigned nr_pages;
1060
1061	hib_init_batch(&hb);
1062
1063	clean_pages_on_read = true;
1064	pr_info("Loading image data pages (%u pages)...\n", nr_to_read);
1065	m = nr_to_read / 10;
1066	if (!m)
1067		m = 1;
1068	nr_pages = 0;
1069	start = ktime_get();
1070	for ( ; ; ) {
1071		ret = snapshot_write_next(snapshot);
1072		if (ret <= 0)
1073			break;
1074		ret = swap_read_page(handle, data_of(*snapshot), &hb);
1075		if (ret)
1076			break;
1077		if (snapshot->sync_read)
1078			ret = hib_wait_io(&hb);
1079		if (ret)
1080			break;
1081		if (!(nr_pages % m))
1082			pr_info("Image loading progress: %3d%%\n",
1083				nr_pages / m * 10);
1084		nr_pages++;
1085	}
1086	err2 = hib_wait_io(&hb);
1087	stop = ktime_get();
1088	if (!ret)
1089		ret = err2;
1090	if (!ret) {
1091		pr_info("Image loading done\n");
1092		snapshot_write_finalize(snapshot);
1093		if (!snapshot_image_loaded(snapshot))
1094			ret = -ENODATA;
1095	}
1096	swsusp_show_speed(start, stop, nr_to_read, "Read");
1097	return ret;
1098}
1099
1100/**
1101 * Structure used for LZO data decompression.
1102 */
1103struct dec_data {
1104	struct task_struct *thr;                  /* thread */
1105	atomic_t ready;                           /* ready to start flag */
1106	atomic_t stop;                            /* ready to stop flag */
1107	int ret;                                  /* return code */
1108	wait_queue_head_t go;                     /* start decompression */
1109	wait_queue_head_t done;                   /* decompression done */
1110	size_t unc_len;                           /* uncompressed length */
1111	size_t cmp_len;                           /* compressed length */
1112	unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
1113	unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
1114};
1115
1116/**
1117 * Deompression function that runs in its own thread.
1118 */
1119static int lzo_decompress_threadfn(void *data)
1120{
1121	struct dec_data *d = data;
1122
1123	while (1) {
1124		wait_event(d->go, atomic_read(&d->ready) ||
1125		                  kthread_should_stop());
1126		if (kthread_should_stop()) {
1127			d->thr = NULL;
1128			d->ret = -1;
1129			atomic_set(&d->stop, 1);
1130			wake_up(&d->done);
1131			break;
1132		}
1133		atomic_set(&d->ready, 0);
1134
1135		d->unc_len = LZO_UNC_SIZE;
1136		d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1137		                               d->unc, &d->unc_len);
1138		if (clean_pages_on_decompress)
1139			flush_icache_range((unsigned long)d->unc,
1140					   (unsigned long)d->unc + d->unc_len);
1141
1142		atomic_set(&d->stop, 1);
1143		wake_up(&d->done);
1144	}
1145	return 0;
1146}
1147
1148/**
1149 * load_image_lzo - Load compressed image data and decompress them with LZO.
1150 * @handle: Swap map handle to use for loading data.
1151 * @snapshot: Image to copy uncompressed data into.
1152 * @nr_to_read: Number of pages to load.
1153 */
1154static int load_image_lzo(struct swap_map_handle *handle,
1155                          struct snapshot_handle *snapshot,
1156                          unsigned int nr_to_read)
1157{
1158	unsigned int m;
1159	int ret = 0;
1160	int eof = 0;
1161	struct hib_bio_batch hb;
1162	ktime_t start;
1163	ktime_t stop;
1164	unsigned nr_pages;
1165	size_t off;
1166	unsigned i, thr, run_threads, nr_threads;
1167	unsigned ring = 0, pg = 0, ring_size = 0,
1168	         have = 0, want, need, asked = 0;
1169	unsigned long read_pages = 0;
1170	unsigned char **page = NULL;
1171	struct dec_data *data = NULL;
1172	struct crc_data *crc = NULL;
1173
1174	hib_init_batch(&hb);
1175
1176	/*
1177	 * We'll limit the number of threads for decompression to limit memory
1178	 * footprint.
1179	 */
1180	nr_threads = num_online_cpus() - 1;
1181	nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
1182
1183	page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
1184	if (!page) {
1185		pr_err("Failed to allocate LZO page\n");
1186		ret = -ENOMEM;
1187		goto out_clean;
1188	}
1189
1190	data = vmalloc(array_size(nr_threads, sizeof(*data)));
1191	if (!data) {
1192		pr_err("Failed to allocate LZO data\n");
1193		ret = -ENOMEM;
1194		goto out_clean;
1195	}
1196	for (thr = 0; thr < nr_threads; thr++)
1197		memset(&data[thr], 0, offsetof(struct dec_data, go));
1198
1199	crc = kmalloc(sizeof(*crc), GFP_KERNEL);
1200	if (!crc) {
1201		pr_err("Failed to allocate crc\n");
1202		ret = -ENOMEM;
1203		goto out_clean;
1204	}
1205	memset(crc, 0, offsetof(struct crc_data, go));
1206
1207	clean_pages_on_decompress = true;
1208
1209	/*
1210	 * Start the decompression threads.
1211	 */
1212	for (thr = 0; thr < nr_threads; thr++) {
1213		init_waitqueue_head(&data[thr].go);
1214		init_waitqueue_head(&data[thr].done);
1215
1216		data[thr].thr = kthread_run(lzo_decompress_threadfn,
1217		                            &data[thr],
1218		                            "image_decompress/%u", thr);
1219		if (IS_ERR(data[thr].thr)) {
1220			data[thr].thr = NULL;
1221			pr_err("Cannot start decompression threads\n");
 
1222			ret = -ENOMEM;
1223			goto out_clean;
1224		}
1225	}
1226
1227	/*
1228	 * Start the CRC32 thread.
1229	 */
1230	init_waitqueue_head(&crc->go);
1231	init_waitqueue_head(&crc->done);
1232
1233	handle->crc32 = 0;
1234	crc->crc32 = &handle->crc32;
1235	for (thr = 0; thr < nr_threads; thr++) {
1236		crc->unc[thr] = data[thr].unc;
1237		crc->unc_len[thr] = &data[thr].unc_len;
1238	}
1239
1240	crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1241	if (IS_ERR(crc->thr)) {
1242		crc->thr = NULL;
1243		pr_err("Cannot start CRC32 thread\n");
1244		ret = -ENOMEM;
1245		goto out_clean;
1246	}
1247
1248	/*
1249	 * Set the number of pages for read buffering.
1250	 * This is complete guesswork, because we'll only know the real
1251	 * picture once prepare_image() is called, which is much later on
1252	 * during the image load phase. We'll assume the worst case and
1253	 * say that none of the image pages are from high memory.
1254	 */
1255	if (low_free_pages() > snapshot_get_image_size())
1256		read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1257	read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
1258
1259	for (i = 0; i < read_pages; i++) {
1260		page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
1261						  GFP_NOIO | __GFP_HIGH :
1262						  GFP_NOIO | __GFP_NOWARN |
1263						  __GFP_NORETRY);
1264
1265		if (!page[i]) {
1266			if (i < LZO_CMP_PAGES) {
1267				ring_size = i;
1268				pr_err("Failed to allocate LZO pages\n");
 
1269				ret = -ENOMEM;
1270				goto out_clean;
1271			} else {
1272				break;
1273			}
1274		}
1275	}
1276	want = ring_size = i;
1277
1278	pr_info("Using %u thread(s) for decompression\n", nr_threads);
1279	pr_info("Loading and decompressing image data (%u pages)...\n",
1280		nr_to_read);
 
1281	m = nr_to_read / 10;
1282	if (!m)
1283		m = 1;
1284	nr_pages = 0;
1285	start = ktime_get();
1286
1287	ret = snapshot_write_next(snapshot);
1288	if (ret <= 0)
1289		goto out_finish;
1290
1291	for(;;) {
1292		for (i = 0; !eof && i < want; i++) {
1293			ret = swap_read_page(handle, page[ring], &hb);
1294			if (ret) {
1295				/*
1296				 * On real read error, finish. On end of data,
1297				 * set EOF flag and just exit the read loop.
1298				 */
1299				if (handle->cur &&
1300				    handle->cur->entries[handle->k]) {
1301					goto out_finish;
1302				} else {
1303					eof = 1;
1304					break;
1305				}
1306			}
1307			if (++ring >= ring_size)
1308				ring = 0;
1309		}
1310		asked += i;
1311		want -= i;
1312
1313		/*
1314		 * We are out of data, wait for some more.
1315		 */
1316		if (!have) {
1317			if (!asked)
1318				break;
1319
1320			ret = hib_wait_io(&hb);
1321			if (ret)
1322				goto out_finish;
1323			have += asked;
1324			asked = 0;
1325			if (eof)
1326				eof = 2;
1327		}
1328
1329		if (crc->run_threads) {
1330			wait_event(crc->done, atomic_read(&crc->stop));
1331			atomic_set(&crc->stop, 0);
1332			crc->run_threads = 0;
1333		}
1334
1335		for (thr = 0; have && thr < nr_threads; thr++) {
1336			data[thr].cmp_len = *(size_t *)page[pg];
1337			if (unlikely(!data[thr].cmp_len ||
1338			             data[thr].cmp_len >
1339			             lzo1x_worst_compress(LZO_UNC_SIZE))) {
1340				pr_err("Invalid LZO compressed length\n");
 
1341				ret = -1;
1342				goto out_finish;
1343			}
1344
1345			need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
1346			                    PAGE_SIZE);
1347			if (need > have) {
1348				if (eof > 1) {
1349					ret = -1;
1350					goto out_finish;
1351				}
1352				break;
1353			}
1354
1355			for (off = 0;
1356			     off < LZO_HEADER + data[thr].cmp_len;
1357			     off += PAGE_SIZE) {
1358				memcpy(data[thr].cmp + off,
1359				       page[pg], PAGE_SIZE);
1360				have--;
1361				want++;
1362				if (++pg >= ring_size)
1363					pg = 0;
1364			}
1365
1366			atomic_set(&data[thr].ready, 1);
1367			wake_up(&data[thr].go);
1368		}
1369
1370		/*
1371		 * Wait for more data while we are decompressing.
1372		 */
1373		if (have < LZO_CMP_PAGES && asked) {
1374			ret = hib_wait_io(&hb);
1375			if (ret)
1376				goto out_finish;
1377			have += asked;
1378			asked = 0;
1379			if (eof)
1380				eof = 2;
1381		}
1382
1383		for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1384			wait_event(data[thr].done,
1385			           atomic_read(&data[thr].stop));
1386			atomic_set(&data[thr].stop, 0);
1387
1388			ret = data[thr].ret;
1389
1390			if (ret < 0) {
1391				pr_err("LZO decompression failed\n");
 
1392				goto out_finish;
1393			}
1394
1395			if (unlikely(!data[thr].unc_len ||
1396			             data[thr].unc_len > LZO_UNC_SIZE ||
1397			             data[thr].unc_len & (PAGE_SIZE - 1))) {
1398				pr_err("Invalid LZO uncompressed length\n");
 
1399				ret = -1;
1400				goto out_finish;
1401			}
1402
1403			for (off = 0;
1404			     off < data[thr].unc_len; off += PAGE_SIZE) {
1405				memcpy(data_of(*snapshot),
1406				       data[thr].unc + off, PAGE_SIZE);
1407
1408				if (!(nr_pages % m))
1409					pr_info("Image loading progress: %3d%%\n",
1410						nr_pages / m * 10);
 
 
1411				nr_pages++;
1412
1413				ret = snapshot_write_next(snapshot);
1414				if (ret <= 0) {
1415					crc->run_threads = thr + 1;
1416					atomic_set(&crc->ready, 1);
1417					wake_up(&crc->go);
1418					goto out_finish;
1419				}
1420			}
1421		}
1422
1423		crc->run_threads = thr;
1424		atomic_set(&crc->ready, 1);
1425		wake_up(&crc->go);
1426	}
1427
1428out_finish:
1429	if (crc->run_threads) {
1430		wait_event(crc->done, atomic_read(&crc->stop));
1431		atomic_set(&crc->stop, 0);
1432	}
1433	stop = ktime_get();
1434	if (!ret) {
1435		pr_info("Image loading done\n");
1436		snapshot_write_finalize(snapshot);
1437		if (!snapshot_image_loaded(snapshot))
1438			ret = -ENODATA;
1439		if (!ret) {
1440			if (swsusp_header->flags & SF_CRC32_MODE) {
1441				if(handle->crc32 != swsusp_header->crc32) {
1442					pr_err("Invalid image CRC32!\n");
 
1443					ret = -ENODATA;
1444				}
1445			}
1446		}
1447	}
1448	swsusp_show_speed(start, stop, nr_to_read, "Read");
1449out_clean:
1450	for (i = 0; i < ring_size; i++)
1451		free_page((unsigned long)page[i]);
1452	if (crc) {
1453		if (crc->thr)
1454			kthread_stop(crc->thr);
1455		kfree(crc);
1456	}
1457	if (data) {
1458		for (thr = 0; thr < nr_threads; thr++)
1459			if (data[thr].thr)
1460				kthread_stop(data[thr].thr);
1461		vfree(data);
1462	}
1463	vfree(page);
1464
1465	return ret;
1466}
1467
1468/**
1469 *	swsusp_read - read the hibernation image.
1470 *	@flags_p: flags passed by the "frozen" kernel in the image header should
1471 *		  be written into this memory location
1472 */
1473
1474int swsusp_read(unsigned int *flags_p)
1475{
1476	int error;
1477	struct swap_map_handle handle;
1478	struct snapshot_handle snapshot;
1479	struct swsusp_info *header;
1480
1481	memset(&snapshot, 0, sizeof(struct snapshot_handle));
1482	error = snapshot_write_next(&snapshot);
1483	if (error < (int)PAGE_SIZE)
1484		return error < 0 ? error : -EFAULT;
1485	header = (struct swsusp_info *)data_of(snapshot);
1486	error = get_swap_reader(&handle, flags_p);
1487	if (error)
1488		goto end;
1489	if (!error)
1490		error = swap_read_page(&handle, header, NULL);
1491	if (!error) {
1492		error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1493			load_image(&handle, &snapshot, header->pages - 1) :
1494			load_image_lzo(&handle, &snapshot, header->pages - 1);
1495	}
1496	swap_reader_finish(&handle);
1497end:
1498	if (!error)
1499		pr_debug("Image successfully loaded\n");
1500	else
1501		pr_debug("Error %d resuming\n", error);
1502	return error;
1503}
1504
1505/**
1506 *      swsusp_check - Check for swsusp signature in the resume device
1507 */
1508
1509int swsusp_check(void)
1510{
1511	int error;
1512
1513	hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
1514					    FMODE_READ, NULL);
1515	if (!IS_ERR(hib_resume_bdev)) {
1516		set_blocksize(hib_resume_bdev, PAGE_SIZE);
1517		clear_page(swsusp_header);
1518		error = hib_submit_io(REQ_OP_READ, 0,
1519					swsusp_resume_block,
1520					swsusp_header, NULL);
1521		if (error)
1522			goto put;
1523
1524		if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1525			memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
1526			/* Reset swap signature now */
1527			error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
1528						swsusp_resume_block,
1529						swsusp_header, NULL);
1530		} else {
1531			error = -EINVAL;
1532		}
1533
1534put:
1535		if (error)
1536			blkdev_put(hib_resume_bdev, FMODE_READ);
1537		else
1538			pr_debug("Image signature found, resuming\n");
1539	} else {
1540		error = PTR_ERR(hib_resume_bdev);
1541	}
1542
1543	if (error)
1544		pr_debug("Image not found (code %d)\n", error);
1545
1546	return error;
1547}
1548
1549/**
1550 *	swsusp_close - close swap device.
1551 */
1552
1553void swsusp_close(fmode_t mode)
1554{
1555	if (IS_ERR(hib_resume_bdev)) {
1556		pr_debug("Image device not initialised\n");
1557		return;
1558	}
1559
1560	blkdev_put(hib_resume_bdev, mode);
1561}
1562
1563/**
1564 *      swsusp_unmark - Unmark swsusp signature in the resume device
1565 */
1566
1567#ifdef CONFIG_SUSPEND
1568int swsusp_unmark(void)
1569{
1570	int error;
1571
1572	hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
1573		      swsusp_header, NULL);
1574	if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
1575		memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
1576		error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
1577					swsusp_resume_block,
1578					swsusp_header, NULL);
1579	} else {
1580		pr_err("Cannot find swsusp signature!\n");
1581		error = -ENODEV;
1582	}
1583
1584	/*
1585	 * We just returned from suspend, we don't need the image any more.
1586	 */
1587	free_all_swap_pages(root_swap);
1588
1589	return error;
1590}
1591#endif
1592
1593static int __init swsusp_header_init(void)
1594{
1595	swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1596	if (!swsusp_header)
1597		panic("Could not allocate memory for swsusp_header\n");
1598	return 0;
1599}
1600
1601core_initcall(swsusp_header_init);
v4.6
 
   1/*
   2 * linux/kernel/power/swap.c
   3 *
   4 * This file provides functions for reading the suspend image from
   5 * and writing it to a swap partition.
   6 *
   7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
   8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
   9 * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
  10 *
  11 * This file is released under the GPLv2.
  12 *
  13 */
  14
 
 
  15#include <linux/module.h>
  16#include <linux/file.h>
  17#include <linux/delay.h>
  18#include <linux/bitops.h>
  19#include <linux/genhd.h>
  20#include <linux/device.h>
  21#include <linux/bio.h>
  22#include <linux/blkdev.h>
  23#include <linux/swap.h>
  24#include <linux/swapops.h>
  25#include <linux/pm.h>
  26#include <linux/slab.h>
  27#include <linux/lzo.h>
  28#include <linux/vmalloc.h>
  29#include <linux/cpumask.h>
  30#include <linux/atomic.h>
  31#include <linux/kthread.h>
  32#include <linux/crc32.h>
  33#include <linux/ktime.h>
  34
  35#include "power.h"
  36
  37#define HIBERNATE_SIG	"S1SUSPEND"
  38
  39/*
 
 
 
 
 
 
 
 
  40 *	The swap map is a data structure used for keeping track of each page
  41 *	written to a swap partition.  It consists of many swap_map_page
  42 *	structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
  43 *	These structures are stored on the swap and linked together with the
  44 *	help of the .next_swap member.
  45 *
  46 *	The swap map is created during suspend.  The swap map pages are
  47 *	allocated and populated one at a time, so we only need one memory
  48 *	page to set up the entire structure.
  49 *
  50 *	During resume we pick up all swap_map_page structures into a list.
  51 */
  52
  53#define MAP_PAGE_ENTRIES	(PAGE_SIZE / sizeof(sector_t) - 1)
  54
  55/*
  56 * Number of free pages that are not high.
  57 */
  58static inline unsigned long low_free_pages(void)
  59{
  60	return nr_free_pages() - nr_free_highpages();
  61}
  62
  63/*
  64 * Number of pages required to be kept free while writing the image. Always
  65 * half of all available low pages before the writing starts.
  66 */
  67static inline unsigned long reqd_free_pages(void)
  68{
  69	return low_free_pages() / 2;
  70}
  71
  72struct swap_map_page {
  73	sector_t entries[MAP_PAGE_ENTRIES];
  74	sector_t next_swap;
  75};
  76
  77struct swap_map_page_list {
  78	struct swap_map_page *map;
  79	struct swap_map_page_list *next;
  80};
  81
  82/**
  83 *	The swap_map_handle structure is used for handling swap in
  84 *	a file-alike way
  85 */
  86
  87struct swap_map_handle {
  88	struct swap_map_page *cur;
  89	struct swap_map_page_list *maps;
  90	sector_t cur_swap;
  91	sector_t first_sector;
  92	unsigned int k;
  93	unsigned long reqd_free_pages;
  94	u32 crc32;
  95};
  96
  97struct swsusp_header {
  98	char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
  99	              sizeof(u32)];
 100	u32	crc32;
 101	sector_t image;
 102	unsigned int flags;	/* Flags to pass to the "boot" kernel */
 103	char	orig_sig[10];
 104	char	sig[10];
 105} __packed;
 106
 107static struct swsusp_header *swsusp_header;
 108
 109/**
 110 *	The following functions are used for tracing the allocated
 111 *	swap pages, so that they can be freed in case of an error.
 112 */
 113
 114struct swsusp_extent {
 115	struct rb_node node;
 116	unsigned long start;
 117	unsigned long end;
 118};
 119
 120static struct rb_root swsusp_extents = RB_ROOT;
 121
 122static int swsusp_extents_insert(unsigned long swap_offset)
 123{
 124	struct rb_node **new = &(swsusp_extents.rb_node);
 125	struct rb_node *parent = NULL;
 126	struct swsusp_extent *ext;
 127
 128	/* Figure out where to put the new node */
 129	while (*new) {
 130		ext = rb_entry(*new, struct swsusp_extent, node);
 131		parent = *new;
 132		if (swap_offset < ext->start) {
 133			/* Try to merge */
 134			if (swap_offset == ext->start - 1) {
 135				ext->start--;
 136				return 0;
 137			}
 138			new = &((*new)->rb_left);
 139		} else if (swap_offset > ext->end) {
 140			/* Try to merge */
 141			if (swap_offset == ext->end + 1) {
 142				ext->end++;
 143				return 0;
 144			}
 145			new = &((*new)->rb_right);
 146		} else {
 147			/* It already is in the tree */
 148			return -EINVAL;
 149		}
 150	}
 151	/* Add the new node and rebalance the tree. */
 152	ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
 153	if (!ext)
 154		return -ENOMEM;
 155
 156	ext->start = swap_offset;
 157	ext->end = swap_offset;
 158	rb_link_node(&ext->node, parent, new);
 159	rb_insert_color(&ext->node, &swsusp_extents);
 160	return 0;
 161}
 162
 163/**
 164 *	alloc_swapdev_block - allocate a swap page and register that it has
 165 *	been allocated, so that it can be freed in case of an error.
 166 */
 167
 168sector_t alloc_swapdev_block(int swap)
 169{
 170	unsigned long offset;
 171
 172	offset = swp_offset(get_swap_page_of_type(swap));
 173	if (offset) {
 174		if (swsusp_extents_insert(offset))
 175			swap_free(swp_entry(swap, offset));
 176		else
 177			return swapdev_block(swap, offset);
 178	}
 179	return 0;
 180}
 181
 182/**
 183 *	free_all_swap_pages - free swap pages allocated for saving image data.
 184 *	It also frees the extents used to register which swap entries had been
 185 *	allocated.
 186 */
 187
 188void free_all_swap_pages(int swap)
 189{
 190	struct rb_node *node;
 191
 192	while ((node = swsusp_extents.rb_node)) {
 193		struct swsusp_extent *ext;
 194		unsigned long offset;
 195
 196		ext = container_of(node, struct swsusp_extent, node);
 197		rb_erase(node, &swsusp_extents);
 198		for (offset = ext->start; offset <= ext->end; offset++)
 199			swap_free(swp_entry(swap, offset));
 200
 201		kfree(ext);
 202	}
 203}
 204
 205int swsusp_swap_in_use(void)
 206{
 207	return (swsusp_extents.rb_node != NULL);
 208}
 209
 210/*
 211 * General things
 212 */
 213
 214static unsigned short root_swap = 0xffff;
 215static struct block_device *hib_resume_bdev;
 216
 217struct hib_bio_batch {
 218	atomic_t		count;
 219	wait_queue_head_t	wait;
 220	int			error;
 221};
 222
 223static void hib_init_batch(struct hib_bio_batch *hb)
 224{
 225	atomic_set(&hb->count, 0);
 226	init_waitqueue_head(&hb->wait);
 227	hb->error = 0;
 228}
 229
 230static void hib_end_io(struct bio *bio)
 231{
 232	struct hib_bio_batch *hb = bio->bi_private;
 233	struct page *page = bio->bi_io_vec[0].bv_page;
 234
 235	if (bio->bi_error) {
 236		printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
 237				imajor(bio->bi_bdev->bd_inode),
 238				iminor(bio->bi_bdev->bd_inode),
 239				(unsigned long long)bio->bi_iter.bi_sector);
 240	}
 241
 242	if (bio_data_dir(bio) == WRITE)
 243		put_page(page);
 
 
 
 244
 245	if (bio->bi_error && !hb->error)
 246		hb->error = bio->bi_error;
 247	if (atomic_dec_and_test(&hb->count))
 248		wake_up(&hb->wait);
 249
 250	bio_put(bio);
 251}
 252
 253static int hib_submit_io(int rw, pgoff_t page_off, void *addr,
 254		struct hib_bio_batch *hb)
 255{
 256	struct page *page = virt_to_page(addr);
 257	struct bio *bio;
 258	int error = 0;
 259
 260	bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1);
 261	bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
 262	bio->bi_bdev = hib_resume_bdev;
 
 263
 264	if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
 265		printk(KERN_ERR "PM: Adding page to bio failed at %llu\n",
 266			(unsigned long long)bio->bi_iter.bi_sector);
 267		bio_put(bio);
 268		return -EFAULT;
 269	}
 270
 271	if (hb) {
 272		bio->bi_end_io = hib_end_io;
 273		bio->bi_private = hb;
 274		atomic_inc(&hb->count);
 275		submit_bio(rw, bio);
 276	} else {
 277		error = submit_bio_wait(rw, bio);
 278		bio_put(bio);
 279	}
 280
 281	return error;
 282}
 283
 284static int hib_wait_io(struct hib_bio_batch *hb)
 285{
 286	wait_event(hb->wait, atomic_read(&hb->count) == 0);
 287	return hb->error;
 288}
 289
 290/*
 291 * Saving part
 292 */
 293
 294static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
 295{
 296	int error;
 297
 298	hib_submit_io(READ_SYNC, swsusp_resume_block, swsusp_header, NULL);
 
 299	if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
 300	    !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
 301		memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
 302		memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
 303		swsusp_header->image = handle->first_sector;
 304		swsusp_header->flags = flags;
 305		if (flags & SF_CRC32_MODE)
 306			swsusp_header->crc32 = handle->crc32;
 307		error = hib_submit_io(WRITE_SYNC, swsusp_resume_block,
 308					swsusp_header, NULL);
 309	} else {
 310		printk(KERN_ERR "PM: Swap header not found!\n");
 311		error = -ENODEV;
 312	}
 313	return error;
 314}
 315
 316/**
 317 *	swsusp_swap_check - check if the resume device is a swap device
 318 *	and get its index (if so)
 319 *
 320 *	This is called before saving image
 321 */
 322static int swsusp_swap_check(void)
 323{
 324	int res;
 325
 326	res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
 327			&hib_resume_bdev);
 328	if (res < 0)
 329		return res;
 330
 331	root_swap = res;
 332	res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
 333	if (res)
 334		return res;
 335
 336	res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
 337	if (res < 0)
 338		blkdev_put(hib_resume_bdev, FMODE_WRITE);
 339
 
 
 
 
 
 
 340	return res;
 341}
 342
 343/**
 344 *	write_page - Write one page to given swap location.
 345 *	@buf:		Address we're writing.
 346 *	@offset:	Offset of the swap page we're writing to.
 347 *	@hb:		bio completion batch
 348 */
 349
 350static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
 351{
 352	void *src;
 353	int ret;
 354
 355	if (!offset)
 356		return -ENOSPC;
 357
 358	if (hb) {
 359		src = (void *)__get_free_page(__GFP_RECLAIM | __GFP_NOWARN |
 360		                              __GFP_NORETRY);
 361		if (src) {
 362			copy_page(src, buf);
 363		} else {
 364			ret = hib_wait_io(hb); /* Free pages */
 365			if (ret)
 366				return ret;
 367			src = (void *)__get_free_page(__GFP_RECLAIM |
 368			                              __GFP_NOWARN |
 369			                              __GFP_NORETRY);
 370			if (src) {
 371				copy_page(src, buf);
 372			} else {
 373				WARN_ON_ONCE(1);
 374				hb = NULL;	/* Go synchronous */
 375				src = buf;
 376			}
 377		}
 378	} else {
 379		src = buf;
 380	}
 381	return hib_submit_io(WRITE_SYNC, offset, src, hb);
 382}
 383
 384static void release_swap_writer(struct swap_map_handle *handle)
 385{
 386	if (handle->cur)
 387		free_page((unsigned long)handle->cur);
 388	handle->cur = NULL;
 389}
 390
 391static int get_swap_writer(struct swap_map_handle *handle)
 392{
 393	int ret;
 394
 395	ret = swsusp_swap_check();
 396	if (ret) {
 397		if (ret != -ENOSPC)
 398			printk(KERN_ERR "PM: Cannot find swap device, try "
 399					"swapon -a.\n");
 400		return ret;
 401	}
 402	handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
 403	if (!handle->cur) {
 404		ret = -ENOMEM;
 405		goto err_close;
 406	}
 407	handle->cur_swap = alloc_swapdev_block(root_swap);
 408	if (!handle->cur_swap) {
 409		ret = -ENOSPC;
 410		goto err_rel;
 411	}
 412	handle->k = 0;
 413	handle->reqd_free_pages = reqd_free_pages();
 414	handle->first_sector = handle->cur_swap;
 415	return 0;
 416err_rel:
 417	release_swap_writer(handle);
 418err_close:
 419	swsusp_close(FMODE_WRITE);
 420	return ret;
 421}
 422
 423static int swap_write_page(struct swap_map_handle *handle, void *buf,
 424		struct hib_bio_batch *hb)
 425{
 426	int error = 0;
 427	sector_t offset;
 428
 429	if (!handle->cur)
 430		return -EINVAL;
 431	offset = alloc_swapdev_block(root_swap);
 432	error = write_page(buf, offset, hb);
 433	if (error)
 434		return error;
 435	handle->cur->entries[handle->k++] = offset;
 436	if (handle->k >= MAP_PAGE_ENTRIES) {
 437		offset = alloc_swapdev_block(root_swap);
 438		if (!offset)
 439			return -ENOSPC;
 440		handle->cur->next_swap = offset;
 441		error = write_page(handle->cur, handle->cur_swap, hb);
 442		if (error)
 443			goto out;
 444		clear_page(handle->cur);
 445		handle->cur_swap = offset;
 446		handle->k = 0;
 447
 448		if (hb && low_free_pages() <= handle->reqd_free_pages) {
 449			error = hib_wait_io(hb);
 450			if (error)
 451				goto out;
 452			/*
 453			 * Recalculate the number of required free pages, to
 454			 * make sure we never take more than half.
 455			 */
 456			handle->reqd_free_pages = reqd_free_pages();
 457		}
 458	}
 459 out:
 460	return error;
 461}
 462
 463static int flush_swap_writer(struct swap_map_handle *handle)
 464{
 465	if (handle->cur && handle->cur_swap)
 466		return write_page(handle->cur, handle->cur_swap, NULL);
 467	else
 468		return -EINVAL;
 469}
 470
 471static int swap_writer_finish(struct swap_map_handle *handle,
 472		unsigned int flags, int error)
 473{
 474	if (!error) {
 475		flush_swap_writer(handle);
 476		printk(KERN_INFO "PM: S");
 477		error = mark_swapfiles(handle, flags);
 478		printk("|\n");
 479	}
 480
 481	if (error)
 482		free_all_swap_pages(root_swap);
 483	release_swap_writer(handle);
 484	swsusp_close(FMODE_WRITE);
 485
 486	return error;
 487}
 488
 489/* We need to remember how much compressed data we need to read. */
 490#define LZO_HEADER	sizeof(size_t)
 491
 492/* Number of pages/bytes we'll compress at one time. */
 493#define LZO_UNC_PAGES	32
 494#define LZO_UNC_SIZE	(LZO_UNC_PAGES * PAGE_SIZE)
 495
 496/* Number of pages/bytes we need for compressed data (worst case). */
 497#define LZO_CMP_PAGES	DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
 498			             LZO_HEADER, PAGE_SIZE)
 499#define LZO_CMP_SIZE	(LZO_CMP_PAGES * PAGE_SIZE)
 500
 501/* Maximum number of threads for compression/decompression. */
 502#define LZO_THREADS	3
 503
 504/* Minimum/maximum number of pages for read buffering. */
 505#define LZO_MIN_RD_PAGES	1024
 506#define LZO_MAX_RD_PAGES	8192
 507
 508
 509/**
 510 *	save_image - save the suspend image data
 511 */
 512
 513static int save_image(struct swap_map_handle *handle,
 514                      struct snapshot_handle *snapshot,
 515                      unsigned int nr_to_write)
 516{
 517	unsigned int m;
 518	int ret;
 519	int nr_pages;
 520	int err2;
 521	struct hib_bio_batch hb;
 522	ktime_t start;
 523	ktime_t stop;
 524
 525	hib_init_batch(&hb);
 526
 527	printk(KERN_INFO "PM: Saving image data pages (%u pages)...\n",
 528		nr_to_write);
 529	m = nr_to_write / 10;
 530	if (!m)
 531		m = 1;
 532	nr_pages = 0;
 533	start = ktime_get();
 534	while (1) {
 535		ret = snapshot_read_next(snapshot);
 536		if (ret <= 0)
 537			break;
 538		ret = swap_write_page(handle, data_of(*snapshot), &hb);
 539		if (ret)
 540			break;
 541		if (!(nr_pages % m))
 542			printk(KERN_INFO "PM: Image saving progress: %3d%%\n",
 543			       nr_pages / m * 10);
 544		nr_pages++;
 545	}
 546	err2 = hib_wait_io(&hb);
 547	stop = ktime_get();
 548	if (!ret)
 549		ret = err2;
 550	if (!ret)
 551		printk(KERN_INFO "PM: Image saving done.\n");
 552	swsusp_show_speed(start, stop, nr_to_write, "Wrote");
 553	return ret;
 554}
 555
 556/**
 557 * Structure used for CRC32.
 558 */
 559struct crc_data {
 560	struct task_struct *thr;                  /* thread */
 561	atomic_t ready;                           /* ready to start flag */
 562	atomic_t stop;                            /* ready to stop flag */
 563	unsigned run_threads;                     /* nr current threads */
 564	wait_queue_head_t go;                     /* start crc update */
 565	wait_queue_head_t done;                   /* crc update done */
 566	u32 *crc32;                               /* points to handle's crc32 */
 567	size_t *unc_len[LZO_THREADS];             /* uncompressed lengths */
 568	unsigned char *unc[LZO_THREADS];          /* uncompressed data */
 569};
 570
 571/**
 572 * CRC32 update function that runs in its own thread.
 573 */
 574static int crc32_threadfn(void *data)
 575{
 576	struct crc_data *d = data;
 577	unsigned i;
 578
 579	while (1) {
 580		wait_event(d->go, atomic_read(&d->ready) ||
 581		                  kthread_should_stop());
 582		if (kthread_should_stop()) {
 583			d->thr = NULL;
 584			atomic_set(&d->stop, 1);
 585			wake_up(&d->done);
 586			break;
 587		}
 588		atomic_set(&d->ready, 0);
 589
 590		for (i = 0; i < d->run_threads; i++)
 591			*d->crc32 = crc32_le(*d->crc32,
 592			                     d->unc[i], *d->unc_len[i]);
 593		atomic_set(&d->stop, 1);
 594		wake_up(&d->done);
 595	}
 596	return 0;
 597}
 598/**
 599 * Structure used for LZO data compression.
 600 */
 601struct cmp_data {
 602	struct task_struct *thr;                  /* thread */
 603	atomic_t ready;                           /* ready to start flag */
 604	atomic_t stop;                            /* ready to stop flag */
 605	int ret;                                  /* return code */
 606	wait_queue_head_t go;                     /* start compression */
 607	wait_queue_head_t done;                   /* compression done */
 608	size_t unc_len;                           /* uncompressed length */
 609	size_t cmp_len;                           /* compressed length */
 610	unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
 611	unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
 612	unsigned char wrk[LZO1X_1_MEM_COMPRESS];  /* compression workspace */
 613};
 614
 615/**
 616 * Compression function that runs in its own thread.
 617 */
 618static int lzo_compress_threadfn(void *data)
 619{
 620	struct cmp_data *d = data;
 621
 622	while (1) {
 623		wait_event(d->go, atomic_read(&d->ready) ||
 624		                  kthread_should_stop());
 625		if (kthread_should_stop()) {
 626			d->thr = NULL;
 627			d->ret = -1;
 628			atomic_set(&d->stop, 1);
 629			wake_up(&d->done);
 630			break;
 631		}
 632		atomic_set(&d->ready, 0);
 633
 634		d->ret = lzo1x_1_compress(d->unc, d->unc_len,
 635		                          d->cmp + LZO_HEADER, &d->cmp_len,
 636		                          d->wrk);
 637		atomic_set(&d->stop, 1);
 638		wake_up(&d->done);
 639	}
 640	return 0;
 641}
 642
 643/**
 644 * save_image_lzo - Save the suspend image data compressed with LZO.
 645 * @handle: Swap map handle to use for saving the image.
 646 * @snapshot: Image to read data from.
 647 * @nr_to_write: Number of pages to save.
 648 */
 649static int save_image_lzo(struct swap_map_handle *handle,
 650                          struct snapshot_handle *snapshot,
 651                          unsigned int nr_to_write)
 652{
 653	unsigned int m;
 654	int ret = 0;
 655	int nr_pages;
 656	int err2;
 657	struct hib_bio_batch hb;
 658	ktime_t start;
 659	ktime_t stop;
 660	size_t off;
 661	unsigned thr, run_threads, nr_threads;
 662	unsigned char *page = NULL;
 663	struct cmp_data *data = NULL;
 664	struct crc_data *crc = NULL;
 665
 666	hib_init_batch(&hb);
 667
 668	/*
 669	 * We'll limit the number of threads for compression to limit memory
 670	 * footprint.
 671	 */
 672	nr_threads = num_online_cpus() - 1;
 673	nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
 674
 675	page = (void *)__get_free_page(__GFP_RECLAIM | __GFP_HIGH);
 676	if (!page) {
 677		printk(KERN_ERR "PM: Failed to allocate LZO page\n");
 678		ret = -ENOMEM;
 679		goto out_clean;
 680	}
 681
 682	data = vmalloc(sizeof(*data) * nr_threads);
 683	if (!data) {
 684		printk(KERN_ERR "PM: Failed to allocate LZO data\n");
 685		ret = -ENOMEM;
 686		goto out_clean;
 687	}
 688	for (thr = 0; thr < nr_threads; thr++)
 689		memset(&data[thr], 0, offsetof(struct cmp_data, go));
 690
 691	crc = kmalloc(sizeof(*crc), GFP_KERNEL);
 692	if (!crc) {
 693		printk(KERN_ERR "PM: Failed to allocate crc\n");
 694		ret = -ENOMEM;
 695		goto out_clean;
 696	}
 697	memset(crc, 0, offsetof(struct crc_data, go));
 698
 699	/*
 700	 * Start the compression threads.
 701	 */
 702	for (thr = 0; thr < nr_threads; thr++) {
 703		init_waitqueue_head(&data[thr].go);
 704		init_waitqueue_head(&data[thr].done);
 705
 706		data[thr].thr = kthread_run(lzo_compress_threadfn,
 707		                            &data[thr],
 708		                            "image_compress/%u", thr);
 709		if (IS_ERR(data[thr].thr)) {
 710			data[thr].thr = NULL;
 711			printk(KERN_ERR
 712			       "PM: Cannot start compression threads\n");
 713			ret = -ENOMEM;
 714			goto out_clean;
 715		}
 716	}
 717
 718	/*
 719	 * Start the CRC32 thread.
 720	 */
 721	init_waitqueue_head(&crc->go);
 722	init_waitqueue_head(&crc->done);
 723
 724	handle->crc32 = 0;
 725	crc->crc32 = &handle->crc32;
 726	for (thr = 0; thr < nr_threads; thr++) {
 727		crc->unc[thr] = data[thr].unc;
 728		crc->unc_len[thr] = &data[thr].unc_len;
 729	}
 730
 731	crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
 732	if (IS_ERR(crc->thr)) {
 733		crc->thr = NULL;
 734		printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
 735		ret = -ENOMEM;
 736		goto out_clean;
 737	}
 738
 739	/*
 740	 * Adjust the number of required free pages after all allocations have
 741	 * been done. We don't want to run out of pages when writing.
 742	 */
 743	handle->reqd_free_pages = reqd_free_pages();
 744
 745	printk(KERN_INFO
 746		"PM: Using %u thread(s) for compression.\n"
 747		"PM: Compressing and saving image data (%u pages)...\n",
 748		nr_threads, nr_to_write);
 749	m = nr_to_write / 10;
 750	if (!m)
 751		m = 1;
 752	nr_pages = 0;
 753	start = ktime_get();
 754	for (;;) {
 755		for (thr = 0; thr < nr_threads; thr++) {
 756			for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
 757				ret = snapshot_read_next(snapshot);
 758				if (ret < 0)
 759					goto out_finish;
 760
 761				if (!ret)
 762					break;
 763
 764				memcpy(data[thr].unc + off,
 765				       data_of(*snapshot), PAGE_SIZE);
 766
 767				if (!(nr_pages % m))
 768					printk(KERN_INFO
 769					       "PM: Image saving progress: "
 770					       "%3d%%\n",
 771				               nr_pages / m * 10);
 772				nr_pages++;
 773			}
 774			if (!off)
 775				break;
 776
 777			data[thr].unc_len = off;
 778
 779			atomic_set(&data[thr].ready, 1);
 780			wake_up(&data[thr].go);
 781		}
 782
 783		if (!thr)
 784			break;
 785
 786		crc->run_threads = thr;
 787		atomic_set(&crc->ready, 1);
 788		wake_up(&crc->go);
 789
 790		for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
 791			wait_event(data[thr].done,
 792			           atomic_read(&data[thr].stop));
 793			atomic_set(&data[thr].stop, 0);
 794
 795			ret = data[thr].ret;
 796
 797			if (ret < 0) {
 798				printk(KERN_ERR "PM: LZO compression failed\n");
 799				goto out_finish;
 800			}
 801
 802			if (unlikely(!data[thr].cmp_len ||
 803			             data[thr].cmp_len >
 804			             lzo1x_worst_compress(data[thr].unc_len))) {
 805				printk(KERN_ERR
 806				       "PM: Invalid LZO compressed length\n");
 807				ret = -1;
 808				goto out_finish;
 809			}
 810
 811			*(size_t *)data[thr].cmp = data[thr].cmp_len;
 812
 813			/*
 814			 * Given we are writing one page at a time to disk, we
 815			 * copy that much from the buffer, although the last
 816			 * bit will likely be smaller than full page. This is
 817			 * OK - we saved the length of the compressed data, so
 818			 * any garbage at the end will be discarded when we
 819			 * read it.
 820			 */
 821			for (off = 0;
 822			     off < LZO_HEADER + data[thr].cmp_len;
 823			     off += PAGE_SIZE) {
 824				memcpy(page, data[thr].cmp + off, PAGE_SIZE);
 825
 826				ret = swap_write_page(handle, page, &hb);
 827				if (ret)
 828					goto out_finish;
 829			}
 830		}
 831
 832		wait_event(crc->done, atomic_read(&crc->stop));
 833		atomic_set(&crc->stop, 0);
 834	}
 835
 836out_finish:
 837	err2 = hib_wait_io(&hb);
 838	stop = ktime_get();
 839	if (!ret)
 840		ret = err2;
 841	if (!ret)
 842		printk(KERN_INFO "PM: Image saving done.\n");
 843	swsusp_show_speed(start, stop, nr_to_write, "Wrote");
 844out_clean:
 845	if (crc) {
 846		if (crc->thr)
 847			kthread_stop(crc->thr);
 848		kfree(crc);
 849	}
 850	if (data) {
 851		for (thr = 0; thr < nr_threads; thr++)
 852			if (data[thr].thr)
 853				kthread_stop(data[thr].thr);
 854		vfree(data);
 855	}
 856	if (page) free_page((unsigned long)page);
 857
 858	return ret;
 859}
 860
 861/**
 862 *	enough_swap - Make sure we have enough swap to save the image.
 863 *
 864 *	Returns TRUE or FALSE after checking the total amount of swap
 865 *	space avaiable from the resume partition.
 866 */
 867
 868static int enough_swap(unsigned int nr_pages, unsigned int flags)
 869{
 870	unsigned int free_swap = count_swap_pages(root_swap, 1);
 871	unsigned int required;
 872
 873	pr_debug("PM: Free swap pages: %u\n", free_swap);
 874
 875	required = PAGES_FOR_IO + nr_pages;
 876	return free_swap > required;
 877}
 878
 879/**
 880 *	swsusp_write - Write entire image and metadata.
 881 *	@flags: flags to pass to the "boot" kernel in the image header
 882 *
 883 *	It is important _NOT_ to umount filesystems at this point. We want
 884 *	them synced (in case something goes wrong) but we DO not want to mark
 885 *	filesystem clean: it is not. (And it does not matter, if we resume
 886 *	correctly, we'll mark system clean, anyway.)
 887 */
 888
 889int swsusp_write(unsigned int flags)
 890{
 891	struct swap_map_handle handle;
 892	struct snapshot_handle snapshot;
 893	struct swsusp_info *header;
 894	unsigned long pages;
 895	int error;
 896
 897	pages = snapshot_get_image_size();
 898	error = get_swap_writer(&handle);
 899	if (error) {
 900		printk(KERN_ERR "PM: Cannot get swap writer\n");
 901		return error;
 902	}
 903	if (flags & SF_NOCOMPRESS_MODE) {
 904		if (!enough_swap(pages, flags)) {
 905			printk(KERN_ERR "PM: Not enough free swap\n");
 906			error = -ENOSPC;
 907			goto out_finish;
 908		}
 909	}
 910	memset(&snapshot, 0, sizeof(struct snapshot_handle));
 911	error = snapshot_read_next(&snapshot);
 912	if (error < PAGE_SIZE) {
 913		if (error >= 0)
 914			error = -EFAULT;
 915
 916		goto out_finish;
 917	}
 918	header = (struct swsusp_info *)data_of(snapshot);
 919	error = swap_write_page(&handle, header, NULL);
 920	if (!error) {
 921		error = (flags & SF_NOCOMPRESS_MODE) ?
 922			save_image(&handle, &snapshot, pages - 1) :
 923			save_image_lzo(&handle, &snapshot, pages - 1);
 924	}
 925out_finish:
 926	error = swap_writer_finish(&handle, flags, error);
 927	return error;
 928}
 929
 930/**
 931 *	The following functions allow us to read data using a swap map
 932 *	in a file-alike way
 933 */
 934
 935static void release_swap_reader(struct swap_map_handle *handle)
 936{
 937	struct swap_map_page_list *tmp;
 938
 939	while (handle->maps) {
 940		if (handle->maps->map)
 941			free_page((unsigned long)handle->maps->map);
 942		tmp = handle->maps;
 943		handle->maps = handle->maps->next;
 944		kfree(tmp);
 945	}
 946	handle->cur = NULL;
 947}
 948
 949static int get_swap_reader(struct swap_map_handle *handle,
 950		unsigned int *flags_p)
 951{
 952	int error;
 953	struct swap_map_page_list *tmp, *last;
 954	sector_t offset;
 955
 956	*flags_p = swsusp_header->flags;
 957
 958	if (!swsusp_header->image) /* how can this happen? */
 959		return -EINVAL;
 960
 961	handle->cur = NULL;
 962	last = handle->maps = NULL;
 963	offset = swsusp_header->image;
 964	while (offset) {
 965		tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL);
 966		if (!tmp) {
 967			release_swap_reader(handle);
 968			return -ENOMEM;
 969		}
 970		memset(tmp, 0, sizeof(*tmp));
 971		if (!handle->maps)
 972			handle->maps = tmp;
 973		if (last)
 974			last->next = tmp;
 975		last = tmp;
 976
 977		tmp->map = (struct swap_map_page *)
 978			   __get_free_page(__GFP_RECLAIM | __GFP_HIGH);
 979		if (!tmp->map) {
 980			release_swap_reader(handle);
 981			return -ENOMEM;
 982		}
 983
 984		error = hib_submit_io(READ_SYNC, offset, tmp->map, NULL);
 985		if (error) {
 986			release_swap_reader(handle);
 987			return error;
 988		}
 989		offset = tmp->map->next_swap;
 990	}
 991	handle->k = 0;
 992	handle->cur = handle->maps->map;
 993	return 0;
 994}
 995
 996static int swap_read_page(struct swap_map_handle *handle, void *buf,
 997		struct hib_bio_batch *hb)
 998{
 999	sector_t offset;
1000	int error;
1001	struct swap_map_page_list *tmp;
1002
1003	if (!handle->cur)
1004		return -EINVAL;
1005	offset = handle->cur->entries[handle->k];
1006	if (!offset)
1007		return -EFAULT;
1008	error = hib_submit_io(READ_SYNC, offset, buf, hb);
1009	if (error)
1010		return error;
1011	if (++handle->k >= MAP_PAGE_ENTRIES) {
1012		handle->k = 0;
1013		free_page((unsigned long)handle->maps->map);
1014		tmp = handle->maps;
1015		handle->maps = handle->maps->next;
1016		kfree(tmp);
1017		if (!handle->maps)
1018			release_swap_reader(handle);
1019		else
1020			handle->cur = handle->maps->map;
1021	}
1022	return error;
1023}
1024
1025static int swap_reader_finish(struct swap_map_handle *handle)
1026{
1027	release_swap_reader(handle);
1028
1029	return 0;
1030}
1031
1032/**
1033 *	load_image - load the image using the swap map handle
1034 *	@handle and the snapshot handle @snapshot
1035 *	(assume there are @nr_pages pages to load)
1036 */
1037
1038static int load_image(struct swap_map_handle *handle,
1039                      struct snapshot_handle *snapshot,
1040                      unsigned int nr_to_read)
1041{
1042	unsigned int m;
1043	int ret = 0;
1044	ktime_t start;
1045	ktime_t stop;
1046	struct hib_bio_batch hb;
1047	int err2;
1048	unsigned nr_pages;
1049
1050	hib_init_batch(&hb);
1051
1052	printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n",
1053		nr_to_read);
1054	m = nr_to_read / 10;
1055	if (!m)
1056		m = 1;
1057	nr_pages = 0;
1058	start = ktime_get();
1059	for ( ; ; ) {
1060		ret = snapshot_write_next(snapshot);
1061		if (ret <= 0)
1062			break;
1063		ret = swap_read_page(handle, data_of(*snapshot), &hb);
1064		if (ret)
1065			break;
1066		if (snapshot->sync_read)
1067			ret = hib_wait_io(&hb);
1068		if (ret)
1069			break;
1070		if (!(nr_pages % m))
1071			printk(KERN_INFO "PM: Image loading progress: %3d%%\n",
1072			       nr_pages / m * 10);
1073		nr_pages++;
1074	}
1075	err2 = hib_wait_io(&hb);
1076	stop = ktime_get();
1077	if (!ret)
1078		ret = err2;
1079	if (!ret) {
1080		printk(KERN_INFO "PM: Image loading done.\n");
1081		snapshot_write_finalize(snapshot);
1082		if (!snapshot_image_loaded(snapshot))
1083			ret = -ENODATA;
1084	}
1085	swsusp_show_speed(start, stop, nr_to_read, "Read");
1086	return ret;
1087}
1088
1089/**
1090 * Structure used for LZO data decompression.
1091 */
1092struct dec_data {
1093	struct task_struct *thr;                  /* thread */
1094	atomic_t ready;                           /* ready to start flag */
1095	atomic_t stop;                            /* ready to stop flag */
1096	int ret;                                  /* return code */
1097	wait_queue_head_t go;                     /* start decompression */
1098	wait_queue_head_t done;                   /* decompression done */
1099	size_t unc_len;                           /* uncompressed length */
1100	size_t cmp_len;                           /* compressed length */
1101	unsigned char unc[LZO_UNC_SIZE];          /* uncompressed buffer */
1102	unsigned char cmp[LZO_CMP_SIZE];          /* compressed buffer */
1103};
1104
1105/**
1106 * Deompression function that runs in its own thread.
1107 */
1108static int lzo_decompress_threadfn(void *data)
1109{
1110	struct dec_data *d = data;
1111
1112	while (1) {
1113		wait_event(d->go, atomic_read(&d->ready) ||
1114		                  kthread_should_stop());
1115		if (kthread_should_stop()) {
1116			d->thr = NULL;
1117			d->ret = -1;
1118			atomic_set(&d->stop, 1);
1119			wake_up(&d->done);
1120			break;
1121		}
1122		atomic_set(&d->ready, 0);
1123
1124		d->unc_len = LZO_UNC_SIZE;
1125		d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1126		                               d->unc, &d->unc_len);
 
 
 
 
1127		atomic_set(&d->stop, 1);
1128		wake_up(&d->done);
1129	}
1130	return 0;
1131}
1132
1133/**
1134 * load_image_lzo - Load compressed image data and decompress them with LZO.
1135 * @handle: Swap map handle to use for loading data.
1136 * @snapshot: Image to copy uncompressed data into.
1137 * @nr_to_read: Number of pages to load.
1138 */
1139static int load_image_lzo(struct swap_map_handle *handle,
1140                          struct snapshot_handle *snapshot,
1141                          unsigned int nr_to_read)
1142{
1143	unsigned int m;
1144	int ret = 0;
1145	int eof = 0;
1146	struct hib_bio_batch hb;
1147	ktime_t start;
1148	ktime_t stop;
1149	unsigned nr_pages;
1150	size_t off;
1151	unsigned i, thr, run_threads, nr_threads;
1152	unsigned ring = 0, pg = 0, ring_size = 0,
1153	         have = 0, want, need, asked = 0;
1154	unsigned long read_pages = 0;
1155	unsigned char **page = NULL;
1156	struct dec_data *data = NULL;
1157	struct crc_data *crc = NULL;
1158
1159	hib_init_batch(&hb);
1160
1161	/*
1162	 * We'll limit the number of threads for decompression to limit memory
1163	 * footprint.
1164	 */
1165	nr_threads = num_online_cpus() - 1;
1166	nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
1167
1168	page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
1169	if (!page) {
1170		printk(KERN_ERR "PM: Failed to allocate LZO page\n");
1171		ret = -ENOMEM;
1172		goto out_clean;
1173	}
1174
1175	data = vmalloc(sizeof(*data) * nr_threads);
1176	if (!data) {
1177		printk(KERN_ERR "PM: Failed to allocate LZO data\n");
1178		ret = -ENOMEM;
1179		goto out_clean;
1180	}
1181	for (thr = 0; thr < nr_threads; thr++)
1182		memset(&data[thr], 0, offsetof(struct dec_data, go));
1183
1184	crc = kmalloc(sizeof(*crc), GFP_KERNEL);
1185	if (!crc) {
1186		printk(KERN_ERR "PM: Failed to allocate crc\n");
1187		ret = -ENOMEM;
1188		goto out_clean;
1189	}
1190	memset(crc, 0, offsetof(struct crc_data, go));
1191
 
 
1192	/*
1193	 * Start the decompression threads.
1194	 */
1195	for (thr = 0; thr < nr_threads; thr++) {
1196		init_waitqueue_head(&data[thr].go);
1197		init_waitqueue_head(&data[thr].done);
1198
1199		data[thr].thr = kthread_run(lzo_decompress_threadfn,
1200		                            &data[thr],
1201		                            "image_decompress/%u", thr);
1202		if (IS_ERR(data[thr].thr)) {
1203			data[thr].thr = NULL;
1204			printk(KERN_ERR
1205			       "PM: Cannot start decompression threads\n");
1206			ret = -ENOMEM;
1207			goto out_clean;
1208		}
1209	}
1210
1211	/*
1212	 * Start the CRC32 thread.
1213	 */
1214	init_waitqueue_head(&crc->go);
1215	init_waitqueue_head(&crc->done);
1216
1217	handle->crc32 = 0;
1218	crc->crc32 = &handle->crc32;
1219	for (thr = 0; thr < nr_threads; thr++) {
1220		crc->unc[thr] = data[thr].unc;
1221		crc->unc_len[thr] = &data[thr].unc_len;
1222	}
1223
1224	crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1225	if (IS_ERR(crc->thr)) {
1226		crc->thr = NULL;
1227		printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
1228		ret = -ENOMEM;
1229		goto out_clean;
1230	}
1231
1232	/*
1233	 * Set the number of pages for read buffering.
1234	 * This is complete guesswork, because we'll only know the real
1235	 * picture once prepare_image() is called, which is much later on
1236	 * during the image load phase. We'll assume the worst case and
1237	 * say that none of the image pages are from high memory.
1238	 */
1239	if (low_free_pages() > snapshot_get_image_size())
1240		read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1241	read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
1242
1243	for (i = 0; i < read_pages; i++) {
1244		page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
1245						  __GFP_RECLAIM | __GFP_HIGH :
1246						  __GFP_RECLAIM | __GFP_NOWARN |
1247						  __GFP_NORETRY);
1248
1249		if (!page[i]) {
1250			if (i < LZO_CMP_PAGES) {
1251				ring_size = i;
1252				printk(KERN_ERR
1253				       "PM: Failed to allocate LZO pages\n");
1254				ret = -ENOMEM;
1255				goto out_clean;
1256			} else {
1257				break;
1258			}
1259		}
1260	}
1261	want = ring_size = i;
1262
1263	printk(KERN_INFO
1264		"PM: Using %u thread(s) for decompression.\n"
1265		"PM: Loading and decompressing image data (%u pages)...\n",
1266		nr_threads, nr_to_read);
1267	m = nr_to_read / 10;
1268	if (!m)
1269		m = 1;
1270	nr_pages = 0;
1271	start = ktime_get();
1272
1273	ret = snapshot_write_next(snapshot);
1274	if (ret <= 0)
1275		goto out_finish;
1276
1277	for(;;) {
1278		for (i = 0; !eof && i < want; i++) {
1279			ret = swap_read_page(handle, page[ring], &hb);
1280			if (ret) {
1281				/*
1282				 * On real read error, finish. On end of data,
1283				 * set EOF flag and just exit the read loop.
1284				 */
1285				if (handle->cur &&
1286				    handle->cur->entries[handle->k]) {
1287					goto out_finish;
1288				} else {
1289					eof = 1;
1290					break;
1291				}
1292			}
1293			if (++ring >= ring_size)
1294				ring = 0;
1295		}
1296		asked += i;
1297		want -= i;
1298
1299		/*
1300		 * We are out of data, wait for some more.
1301		 */
1302		if (!have) {
1303			if (!asked)
1304				break;
1305
1306			ret = hib_wait_io(&hb);
1307			if (ret)
1308				goto out_finish;
1309			have += asked;
1310			asked = 0;
1311			if (eof)
1312				eof = 2;
1313		}
1314
1315		if (crc->run_threads) {
1316			wait_event(crc->done, atomic_read(&crc->stop));
1317			atomic_set(&crc->stop, 0);
1318			crc->run_threads = 0;
1319		}
1320
1321		for (thr = 0; have && thr < nr_threads; thr++) {
1322			data[thr].cmp_len = *(size_t *)page[pg];
1323			if (unlikely(!data[thr].cmp_len ||
1324			             data[thr].cmp_len >
1325			             lzo1x_worst_compress(LZO_UNC_SIZE))) {
1326				printk(KERN_ERR
1327				       "PM: Invalid LZO compressed length\n");
1328				ret = -1;
1329				goto out_finish;
1330			}
1331
1332			need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
1333			                    PAGE_SIZE);
1334			if (need > have) {
1335				if (eof > 1) {
1336					ret = -1;
1337					goto out_finish;
1338				}
1339				break;
1340			}
1341
1342			for (off = 0;
1343			     off < LZO_HEADER + data[thr].cmp_len;
1344			     off += PAGE_SIZE) {
1345				memcpy(data[thr].cmp + off,
1346				       page[pg], PAGE_SIZE);
1347				have--;
1348				want++;
1349				if (++pg >= ring_size)
1350					pg = 0;
1351			}
1352
1353			atomic_set(&data[thr].ready, 1);
1354			wake_up(&data[thr].go);
1355		}
1356
1357		/*
1358		 * Wait for more data while we are decompressing.
1359		 */
1360		if (have < LZO_CMP_PAGES && asked) {
1361			ret = hib_wait_io(&hb);
1362			if (ret)
1363				goto out_finish;
1364			have += asked;
1365			asked = 0;
1366			if (eof)
1367				eof = 2;
1368		}
1369
1370		for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1371			wait_event(data[thr].done,
1372			           atomic_read(&data[thr].stop));
1373			atomic_set(&data[thr].stop, 0);
1374
1375			ret = data[thr].ret;
1376
1377			if (ret < 0) {
1378				printk(KERN_ERR
1379				       "PM: LZO decompression failed\n");
1380				goto out_finish;
1381			}
1382
1383			if (unlikely(!data[thr].unc_len ||
1384			             data[thr].unc_len > LZO_UNC_SIZE ||
1385			             data[thr].unc_len & (PAGE_SIZE - 1))) {
1386				printk(KERN_ERR
1387				       "PM: Invalid LZO uncompressed length\n");
1388				ret = -1;
1389				goto out_finish;
1390			}
1391
1392			for (off = 0;
1393			     off < data[thr].unc_len; off += PAGE_SIZE) {
1394				memcpy(data_of(*snapshot),
1395				       data[thr].unc + off, PAGE_SIZE);
1396
1397				if (!(nr_pages % m))
1398					printk(KERN_INFO
1399					       "PM: Image loading progress: "
1400					       "%3d%%\n",
1401					       nr_pages / m * 10);
1402				nr_pages++;
1403
1404				ret = snapshot_write_next(snapshot);
1405				if (ret <= 0) {
1406					crc->run_threads = thr + 1;
1407					atomic_set(&crc->ready, 1);
1408					wake_up(&crc->go);
1409					goto out_finish;
1410				}
1411			}
1412		}
1413
1414		crc->run_threads = thr;
1415		atomic_set(&crc->ready, 1);
1416		wake_up(&crc->go);
1417	}
1418
1419out_finish:
1420	if (crc->run_threads) {
1421		wait_event(crc->done, atomic_read(&crc->stop));
1422		atomic_set(&crc->stop, 0);
1423	}
1424	stop = ktime_get();
1425	if (!ret) {
1426		printk(KERN_INFO "PM: Image loading done.\n");
1427		snapshot_write_finalize(snapshot);
1428		if (!snapshot_image_loaded(snapshot))
1429			ret = -ENODATA;
1430		if (!ret) {
1431			if (swsusp_header->flags & SF_CRC32_MODE) {
1432				if(handle->crc32 != swsusp_header->crc32) {
1433					printk(KERN_ERR
1434					       "PM: Invalid image CRC32!\n");
1435					ret = -ENODATA;
1436				}
1437			}
1438		}
1439	}
1440	swsusp_show_speed(start, stop, nr_to_read, "Read");
1441out_clean:
1442	for (i = 0; i < ring_size; i++)
1443		free_page((unsigned long)page[i]);
1444	if (crc) {
1445		if (crc->thr)
1446			kthread_stop(crc->thr);
1447		kfree(crc);
1448	}
1449	if (data) {
1450		for (thr = 0; thr < nr_threads; thr++)
1451			if (data[thr].thr)
1452				kthread_stop(data[thr].thr);
1453		vfree(data);
1454	}
1455	vfree(page);
1456
1457	return ret;
1458}
1459
1460/**
1461 *	swsusp_read - read the hibernation image.
1462 *	@flags_p: flags passed by the "frozen" kernel in the image header should
1463 *		  be written into this memory location
1464 */
1465
1466int swsusp_read(unsigned int *flags_p)
1467{
1468	int error;
1469	struct swap_map_handle handle;
1470	struct snapshot_handle snapshot;
1471	struct swsusp_info *header;
1472
1473	memset(&snapshot, 0, sizeof(struct snapshot_handle));
1474	error = snapshot_write_next(&snapshot);
1475	if (error < PAGE_SIZE)
1476		return error < 0 ? error : -EFAULT;
1477	header = (struct swsusp_info *)data_of(snapshot);
1478	error = get_swap_reader(&handle, flags_p);
1479	if (error)
1480		goto end;
1481	if (!error)
1482		error = swap_read_page(&handle, header, NULL);
1483	if (!error) {
1484		error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1485			load_image(&handle, &snapshot, header->pages - 1) :
1486			load_image_lzo(&handle, &snapshot, header->pages - 1);
1487	}
1488	swap_reader_finish(&handle);
1489end:
1490	if (!error)
1491		pr_debug("PM: Image successfully loaded\n");
1492	else
1493		pr_debug("PM: Error %d resuming\n", error);
1494	return error;
1495}
1496
1497/**
1498 *      swsusp_check - Check for swsusp signature in the resume device
1499 */
1500
1501int swsusp_check(void)
1502{
1503	int error;
1504
1505	hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
1506					    FMODE_READ, NULL);
1507	if (!IS_ERR(hib_resume_bdev)) {
1508		set_blocksize(hib_resume_bdev, PAGE_SIZE);
1509		clear_page(swsusp_header);
1510		error = hib_submit_io(READ_SYNC, swsusp_resume_block,
 
1511					swsusp_header, NULL);
1512		if (error)
1513			goto put;
1514
1515		if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1516			memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
1517			/* Reset swap signature now */
1518			error = hib_submit_io(WRITE_SYNC, swsusp_resume_block,
 
1519						swsusp_header, NULL);
1520		} else {
1521			error = -EINVAL;
1522		}
1523
1524put:
1525		if (error)
1526			blkdev_put(hib_resume_bdev, FMODE_READ);
1527		else
1528			pr_debug("PM: Image signature found, resuming\n");
1529	} else {
1530		error = PTR_ERR(hib_resume_bdev);
1531	}
1532
1533	if (error)
1534		pr_debug("PM: Image not found (code %d)\n", error);
1535
1536	return error;
1537}
1538
1539/**
1540 *	swsusp_close - close swap device.
1541 */
1542
1543void swsusp_close(fmode_t mode)
1544{
1545	if (IS_ERR(hib_resume_bdev)) {
1546		pr_debug("PM: Image device not initialised\n");
1547		return;
1548	}
1549
1550	blkdev_put(hib_resume_bdev, mode);
1551}
1552
1553/**
1554 *      swsusp_unmark - Unmark swsusp signature in the resume device
1555 */
1556
1557#ifdef CONFIG_SUSPEND
1558int swsusp_unmark(void)
1559{
1560	int error;
1561
1562	hib_submit_io(READ_SYNC, swsusp_resume_block, swsusp_header, NULL);
 
1563	if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
1564		memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
1565		error = hib_submit_io(WRITE_SYNC, swsusp_resume_block,
 
1566					swsusp_header, NULL);
1567	} else {
1568		printk(KERN_ERR "PM: Cannot find swsusp signature!\n");
1569		error = -ENODEV;
1570	}
1571
1572	/*
1573	 * We just returned from suspend, we don't need the image any more.
1574	 */
1575	free_all_swap_pages(root_swap);
1576
1577	return error;
1578}
1579#endif
1580
1581static int swsusp_header_init(void)
1582{
1583	swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1584	if (!swsusp_header)
1585		panic("Could not allocate memory for swsusp_header\n");
1586	return 0;
1587}
1588
1589core_initcall(swsusp_header_init);