Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/power/swap.c
4 *
5 * This file provides functions for reading the suspend image from
6 * and writing it to a swap partition.
7 *
8 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
9 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
10 * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
11 */
12
13#define pr_fmt(fmt) "PM: " fmt
14
15#include <linux/module.h>
16#include <linux/file.h>
17#include <linux/delay.h>
18#include <linux/bitops.h>
19#include <linux/genhd.h>
20#include <linux/device.h>
21#include <linux/bio.h>
22#include <linux/blkdev.h>
23#include <linux/swap.h>
24#include <linux/swapops.h>
25#include <linux/pm.h>
26#include <linux/slab.h>
27#include <linux/lzo.h>
28#include <linux/vmalloc.h>
29#include <linux/cpumask.h>
30#include <linux/atomic.h>
31#include <linux/kthread.h>
32#include <linux/crc32.h>
33#include <linux/ktime.h>
34
35#include "power.h"
36
37#define HIBERNATE_SIG "S1SUSPEND"
38
39/*
40 * When reading an {un,}compressed image, we may restore pages in place,
41 * in which case some architectures need these pages cleaning before they
42 * can be executed. We don't know which pages these may be, so clean the lot.
43 */
44static bool clean_pages_on_read;
45static bool clean_pages_on_decompress;
46
47/*
48 * The swap map is a data structure used for keeping track of each page
49 * written to a swap partition. It consists of many swap_map_page
50 * structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
51 * These structures are stored on the swap and linked together with the
52 * help of the .next_swap member.
53 *
54 * The swap map is created during suspend. The swap map pages are
55 * allocated and populated one at a time, so we only need one memory
56 * page to set up the entire structure.
57 *
58 * During resume we pick up all swap_map_page structures into a list.
59 */
60
61#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
62
63/*
64 * Number of free pages that are not high.
65 */
66static inline unsigned long low_free_pages(void)
67{
68 return nr_free_pages() - nr_free_highpages();
69}
70
71/*
72 * Number of pages required to be kept free while writing the image. Always
73 * half of all available low pages before the writing starts.
74 */
75static inline unsigned long reqd_free_pages(void)
76{
77 return low_free_pages() / 2;
78}
79
80struct swap_map_page {
81 sector_t entries[MAP_PAGE_ENTRIES];
82 sector_t next_swap;
83};
84
85struct swap_map_page_list {
86 struct swap_map_page *map;
87 struct swap_map_page_list *next;
88};
89
90/**
91 * The swap_map_handle structure is used for handling swap in
92 * a file-alike way
93 */
94
95struct swap_map_handle {
96 struct swap_map_page *cur;
97 struct swap_map_page_list *maps;
98 sector_t cur_swap;
99 sector_t first_sector;
100 unsigned int k;
101 unsigned long reqd_free_pages;
102 u32 crc32;
103};
104
105struct swsusp_header {
106 char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
107 sizeof(u32)];
108 u32 crc32;
109 sector_t image;
110 unsigned int flags; /* Flags to pass to the "boot" kernel */
111 char orig_sig[10];
112 char sig[10];
113} __packed;
114
115static struct swsusp_header *swsusp_header;
116
117/**
118 * The following functions are used for tracing the allocated
119 * swap pages, so that they can be freed in case of an error.
120 */
121
122struct swsusp_extent {
123 struct rb_node node;
124 unsigned long start;
125 unsigned long end;
126};
127
128static struct rb_root swsusp_extents = RB_ROOT;
129
130static int swsusp_extents_insert(unsigned long swap_offset)
131{
132 struct rb_node **new = &(swsusp_extents.rb_node);
133 struct rb_node *parent = NULL;
134 struct swsusp_extent *ext;
135
136 /* Figure out where to put the new node */
137 while (*new) {
138 ext = rb_entry(*new, struct swsusp_extent, node);
139 parent = *new;
140 if (swap_offset < ext->start) {
141 /* Try to merge */
142 if (swap_offset == ext->start - 1) {
143 ext->start--;
144 return 0;
145 }
146 new = &((*new)->rb_left);
147 } else if (swap_offset > ext->end) {
148 /* Try to merge */
149 if (swap_offset == ext->end + 1) {
150 ext->end++;
151 return 0;
152 }
153 new = &((*new)->rb_right);
154 } else {
155 /* It already is in the tree */
156 return -EINVAL;
157 }
158 }
159 /* Add the new node and rebalance the tree. */
160 ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
161 if (!ext)
162 return -ENOMEM;
163
164 ext->start = swap_offset;
165 ext->end = swap_offset;
166 rb_link_node(&ext->node, parent, new);
167 rb_insert_color(&ext->node, &swsusp_extents);
168 return 0;
169}
170
171/**
172 * alloc_swapdev_block - allocate a swap page and register that it has
173 * been allocated, so that it can be freed in case of an error.
174 */
175
176sector_t alloc_swapdev_block(int swap)
177{
178 unsigned long offset;
179
180 offset = swp_offset(get_swap_page_of_type(swap));
181 if (offset) {
182 if (swsusp_extents_insert(offset))
183 swap_free(swp_entry(swap, offset));
184 else
185 return swapdev_block(swap, offset);
186 }
187 return 0;
188}
189
190/**
191 * free_all_swap_pages - free swap pages allocated for saving image data.
192 * It also frees the extents used to register which swap entries had been
193 * allocated.
194 */
195
196void free_all_swap_pages(int swap)
197{
198 struct rb_node *node;
199
200 while ((node = swsusp_extents.rb_node)) {
201 struct swsusp_extent *ext;
202 unsigned long offset;
203
204 ext = rb_entry(node, struct swsusp_extent, node);
205 rb_erase(node, &swsusp_extents);
206 for (offset = ext->start; offset <= ext->end; offset++)
207 swap_free(swp_entry(swap, offset));
208
209 kfree(ext);
210 }
211}
212
213int swsusp_swap_in_use(void)
214{
215 return (swsusp_extents.rb_node != NULL);
216}
217
218/*
219 * General things
220 */
221
222static unsigned short root_swap = 0xffff;
223static struct block_device *hib_resume_bdev;
224
225struct hib_bio_batch {
226 atomic_t count;
227 wait_queue_head_t wait;
228 blk_status_t error;
229};
230
231static void hib_init_batch(struct hib_bio_batch *hb)
232{
233 atomic_set(&hb->count, 0);
234 init_waitqueue_head(&hb->wait);
235 hb->error = BLK_STS_OK;
236}
237
238static void hib_end_io(struct bio *bio)
239{
240 struct hib_bio_batch *hb = bio->bi_private;
241 struct page *page = bio_first_page_all(bio);
242
243 if (bio->bi_status) {
244 pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
245 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
246 (unsigned long long)bio->bi_iter.bi_sector);
247 }
248
249 if (bio_data_dir(bio) == WRITE)
250 put_page(page);
251 else if (clean_pages_on_read)
252 flush_icache_range((unsigned long)page_address(page),
253 (unsigned long)page_address(page) + PAGE_SIZE);
254
255 if (bio->bi_status && !hb->error)
256 hb->error = bio->bi_status;
257 if (atomic_dec_and_test(&hb->count))
258 wake_up(&hb->wait);
259
260 bio_put(bio);
261}
262
263static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
264 struct hib_bio_batch *hb)
265{
266 struct page *page = virt_to_page(addr);
267 struct bio *bio;
268 int error = 0;
269
270 bio = bio_alloc(GFP_NOIO | __GFP_HIGH, 1);
271 bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
272 bio_set_dev(bio, hib_resume_bdev);
273 bio_set_op_attrs(bio, op, op_flags);
274
275 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
276 pr_err("Adding page to bio failed at %llu\n",
277 (unsigned long long)bio->bi_iter.bi_sector);
278 bio_put(bio);
279 return -EFAULT;
280 }
281
282 if (hb) {
283 bio->bi_end_io = hib_end_io;
284 bio->bi_private = hb;
285 atomic_inc(&hb->count);
286 submit_bio(bio);
287 } else {
288 error = submit_bio_wait(bio);
289 bio_put(bio);
290 }
291
292 return error;
293}
294
295static blk_status_t hib_wait_io(struct hib_bio_batch *hb)
296{
297 wait_event(hb->wait, atomic_read(&hb->count) == 0);
298 return blk_status_to_errno(hb->error);
299}
300
301/*
302 * Saving part
303 */
304
305static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
306{
307 int error;
308
309 hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
310 swsusp_header, NULL);
311 if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
312 !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
313 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
314 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
315 swsusp_header->image = handle->first_sector;
316 swsusp_header->flags = flags;
317 if (flags & SF_CRC32_MODE)
318 swsusp_header->crc32 = handle->crc32;
319 error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
320 swsusp_resume_block, swsusp_header, NULL);
321 } else {
322 pr_err("Swap header not found!\n");
323 error = -ENODEV;
324 }
325 return error;
326}
327
328/**
329 * swsusp_swap_check - check if the resume device is a swap device
330 * and get its index (if so)
331 *
332 * This is called before saving image
333 */
334static int swsusp_swap_check(void)
335{
336 int res;
337
338 res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
339 &hib_resume_bdev);
340 if (res < 0)
341 return res;
342
343 root_swap = res;
344 res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
345 if (res)
346 return res;
347
348 res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
349 if (res < 0)
350 blkdev_put(hib_resume_bdev, FMODE_WRITE);
351
352 /*
353 * Update the resume device to the one actually used,
354 * so the test_resume mode can use it in case it is
355 * invoked from hibernate() to test the snapshot.
356 */
357 swsusp_resume_device = hib_resume_bdev->bd_dev;
358 return res;
359}
360
361/**
362 * write_page - Write one page to given swap location.
363 * @buf: Address we're writing.
364 * @offset: Offset of the swap page we're writing to.
365 * @hb: bio completion batch
366 */
367
368static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
369{
370 void *src;
371 int ret;
372
373 if (!offset)
374 return -ENOSPC;
375
376 if (hb) {
377 src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
378 __GFP_NORETRY);
379 if (src) {
380 copy_page(src, buf);
381 } else {
382 ret = hib_wait_io(hb); /* Free pages */
383 if (ret)
384 return ret;
385 src = (void *)__get_free_page(GFP_NOIO |
386 __GFP_NOWARN |
387 __GFP_NORETRY);
388 if (src) {
389 copy_page(src, buf);
390 } else {
391 WARN_ON_ONCE(1);
392 hb = NULL; /* Go synchronous */
393 src = buf;
394 }
395 }
396 } else {
397 src = buf;
398 }
399 return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb);
400}
401
402static void release_swap_writer(struct swap_map_handle *handle)
403{
404 if (handle->cur)
405 free_page((unsigned long)handle->cur);
406 handle->cur = NULL;
407}
408
409static int get_swap_writer(struct swap_map_handle *handle)
410{
411 int ret;
412
413 ret = swsusp_swap_check();
414 if (ret) {
415 if (ret != -ENOSPC)
416 pr_err("Cannot find swap device, try swapon -a\n");
417 return ret;
418 }
419 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
420 if (!handle->cur) {
421 ret = -ENOMEM;
422 goto err_close;
423 }
424 handle->cur_swap = alloc_swapdev_block(root_swap);
425 if (!handle->cur_swap) {
426 ret = -ENOSPC;
427 goto err_rel;
428 }
429 handle->k = 0;
430 handle->reqd_free_pages = reqd_free_pages();
431 handle->first_sector = handle->cur_swap;
432 return 0;
433err_rel:
434 release_swap_writer(handle);
435err_close:
436 swsusp_close(FMODE_WRITE);
437 return ret;
438}
439
440static int swap_write_page(struct swap_map_handle *handle, void *buf,
441 struct hib_bio_batch *hb)
442{
443 int error = 0;
444 sector_t offset;
445
446 if (!handle->cur)
447 return -EINVAL;
448 offset = alloc_swapdev_block(root_swap);
449 error = write_page(buf, offset, hb);
450 if (error)
451 return error;
452 handle->cur->entries[handle->k++] = offset;
453 if (handle->k >= MAP_PAGE_ENTRIES) {
454 offset = alloc_swapdev_block(root_swap);
455 if (!offset)
456 return -ENOSPC;
457 handle->cur->next_swap = offset;
458 error = write_page(handle->cur, handle->cur_swap, hb);
459 if (error)
460 goto out;
461 clear_page(handle->cur);
462 handle->cur_swap = offset;
463 handle->k = 0;
464
465 if (hb && low_free_pages() <= handle->reqd_free_pages) {
466 error = hib_wait_io(hb);
467 if (error)
468 goto out;
469 /*
470 * Recalculate the number of required free pages, to
471 * make sure we never take more than half.
472 */
473 handle->reqd_free_pages = reqd_free_pages();
474 }
475 }
476 out:
477 return error;
478}
479
480static int flush_swap_writer(struct swap_map_handle *handle)
481{
482 if (handle->cur && handle->cur_swap)
483 return write_page(handle->cur, handle->cur_swap, NULL);
484 else
485 return -EINVAL;
486}
487
488static int swap_writer_finish(struct swap_map_handle *handle,
489 unsigned int flags, int error)
490{
491 if (!error) {
492 flush_swap_writer(handle);
493 pr_info("S");
494 error = mark_swapfiles(handle, flags);
495 pr_cont("|\n");
496 }
497
498 if (error)
499 free_all_swap_pages(root_swap);
500 release_swap_writer(handle);
501 swsusp_close(FMODE_WRITE);
502
503 return error;
504}
505
506/* We need to remember how much compressed data we need to read. */
507#define LZO_HEADER sizeof(size_t)
508
509/* Number of pages/bytes we'll compress at one time. */
510#define LZO_UNC_PAGES 32
511#define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE)
512
513/* Number of pages/bytes we need for compressed data (worst case). */
514#define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
515 LZO_HEADER, PAGE_SIZE)
516#define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
517
518/* Maximum number of threads for compression/decompression. */
519#define LZO_THREADS 3
520
521/* Minimum/maximum number of pages for read buffering. */
522#define LZO_MIN_RD_PAGES 1024
523#define LZO_MAX_RD_PAGES 8192
524
525
526/**
527 * save_image - save the suspend image data
528 */
529
530static int save_image(struct swap_map_handle *handle,
531 struct snapshot_handle *snapshot,
532 unsigned int nr_to_write)
533{
534 unsigned int m;
535 int ret;
536 int nr_pages;
537 int err2;
538 struct hib_bio_batch hb;
539 ktime_t start;
540 ktime_t stop;
541
542 hib_init_batch(&hb);
543
544 pr_info("Saving image data pages (%u pages)...\n",
545 nr_to_write);
546 m = nr_to_write / 10;
547 if (!m)
548 m = 1;
549 nr_pages = 0;
550 start = ktime_get();
551 while (1) {
552 ret = snapshot_read_next(snapshot);
553 if (ret <= 0)
554 break;
555 ret = swap_write_page(handle, data_of(*snapshot), &hb);
556 if (ret)
557 break;
558 if (!(nr_pages % m))
559 pr_info("Image saving progress: %3d%%\n",
560 nr_pages / m * 10);
561 nr_pages++;
562 }
563 err2 = hib_wait_io(&hb);
564 stop = ktime_get();
565 if (!ret)
566 ret = err2;
567 if (!ret)
568 pr_info("Image saving done\n");
569 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
570 return ret;
571}
572
573/**
574 * Structure used for CRC32.
575 */
576struct crc_data {
577 struct task_struct *thr; /* thread */
578 atomic_t ready; /* ready to start flag */
579 atomic_t stop; /* ready to stop flag */
580 unsigned run_threads; /* nr current threads */
581 wait_queue_head_t go; /* start crc update */
582 wait_queue_head_t done; /* crc update done */
583 u32 *crc32; /* points to handle's crc32 */
584 size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */
585 unsigned char *unc[LZO_THREADS]; /* uncompressed data */
586};
587
588/**
589 * CRC32 update function that runs in its own thread.
590 */
591static int crc32_threadfn(void *data)
592{
593 struct crc_data *d = data;
594 unsigned i;
595
596 while (1) {
597 wait_event(d->go, atomic_read(&d->ready) ||
598 kthread_should_stop());
599 if (kthread_should_stop()) {
600 d->thr = NULL;
601 atomic_set(&d->stop, 1);
602 wake_up(&d->done);
603 break;
604 }
605 atomic_set(&d->ready, 0);
606
607 for (i = 0; i < d->run_threads; i++)
608 *d->crc32 = crc32_le(*d->crc32,
609 d->unc[i], *d->unc_len[i]);
610 atomic_set(&d->stop, 1);
611 wake_up(&d->done);
612 }
613 return 0;
614}
615/**
616 * Structure used for LZO data compression.
617 */
618struct cmp_data {
619 struct task_struct *thr; /* thread */
620 atomic_t ready; /* ready to start flag */
621 atomic_t stop; /* ready to stop flag */
622 int ret; /* return code */
623 wait_queue_head_t go; /* start compression */
624 wait_queue_head_t done; /* compression done */
625 size_t unc_len; /* uncompressed length */
626 size_t cmp_len; /* compressed length */
627 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
628 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
629 unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */
630};
631
632/**
633 * Compression function that runs in its own thread.
634 */
635static int lzo_compress_threadfn(void *data)
636{
637 struct cmp_data *d = data;
638
639 while (1) {
640 wait_event(d->go, atomic_read(&d->ready) ||
641 kthread_should_stop());
642 if (kthread_should_stop()) {
643 d->thr = NULL;
644 d->ret = -1;
645 atomic_set(&d->stop, 1);
646 wake_up(&d->done);
647 break;
648 }
649 atomic_set(&d->ready, 0);
650
651 d->ret = lzo1x_1_compress(d->unc, d->unc_len,
652 d->cmp + LZO_HEADER, &d->cmp_len,
653 d->wrk);
654 atomic_set(&d->stop, 1);
655 wake_up(&d->done);
656 }
657 return 0;
658}
659
660/**
661 * save_image_lzo - Save the suspend image data compressed with LZO.
662 * @handle: Swap map handle to use for saving the image.
663 * @snapshot: Image to read data from.
664 * @nr_to_write: Number of pages to save.
665 */
666static int save_image_lzo(struct swap_map_handle *handle,
667 struct snapshot_handle *snapshot,
668 unsigned int nr_to_write)
669{
670 unsigned int m;
671 int ret = 0;
672 int nr_pages;
673 int err2;
674 struct hib_bio_batch hb;
675 ktime_t start;
676 ktime_t stop;
677 size_t off;
678 unsigned thr, run_threads, nr_threads;
679 unsigned char *page = NULL;
680 struct cmp_data *data = NULL;
681 struct crc_data *crc = NULL;
682
683 hib_init_batch(&hb);
684
685 /*
686 * We'll limit the number of threads for compression to limit memory
687 * footprint.
688 */
689 nr_threads = num_online_cpus() - 1;
690 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
691
692 page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
693 if (!page) {
694 pr_err("Failed to allocate LZO page\n");
695 ret = -ENOMEM;
696 goto out_clean;
697 }
698
699 data = vmalloc(array_size(nr_threads, sizeof(*data)));
700 if (!data) {
701 pr_err("Failed to allocate LZO data\n");
702 ret = -ENOMEM;
703 goto out_clean;
704 }
705 for (thr = 0; thr < nr_threads; thr++)
706 memset(&data[thr], 0, offsetof(struct cmp_data, go));
707
708 crc = kmalloc(sizeof(*crc), GFP_KERNEL);
709 if (!crc) {
710 pr_err("Failed to allocate crc\n");
711 ret = -ENOMEM;
712 goto out_clean;
713 }
714 memset(crc, 0, offsetof(struct crc_data, go));
715
716 /*
717 * Start the compression threads.
718 */
719 for (thr = 0; thr < nr_threads; thr++) {
720 init_waitqueue_head(&data[thr].go);
721 init_waitqueue_head(&data[thr].done);
722
723 data[thr].thr = kthread_run(lzo_compress_threadfn,
724 &data[thr],
725 "image_compress/%u", thr);
726 if (IS_ERR(data[thr].thr)) {
727 data[thr].thr = NULL;
728 pr_err("Cannot start compression threads\n");
729 ret = -ENOMEM;
730 goto out_clean;
731 }
732 }
733
734 /*
735 * Start the CRC32 thread.
736 */
737 init_waitqueue_head(&crc->go);
738 init_waitqueue_head(&crc->done);
739
740 handle->crc32 = 0;
741 crc->crc32 = &handle->crc32;
742 for (thr = 0; thr < nr_threads; thr++) {
743 crc->unc[thr] = data[thr].unc;
744 crc->unc_len[thr] = &data[thr].unc_len;
745 }
746
747 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
748 if (IS_ERR(crc->thr)) {
749 crc->thr = NULL;
750 pr_err("Cannot start CRC32 thread\n");
751 ret = -ENOMEM;
752 goto out_clean;
753 }
754
755 /*
756 * Adjust the number of required free pages after all allocations have
757 * been done. We don't want to run out of pages when writing.
758 */
759 handle->reqd_free_pages = reqd_free_pages();
760
761 pr_info("Using %u thread(s) for compression\n", nr_threads);
762 pr_info("Compressing and saving image data (%u pages)...\n",
763 nr_to_write);
764 m = nr_to_write / 10;
765 if (!m)
766 m = 1;
767 nr_pages = 0;
768 start = ktime_get();
769 for (;;) {
770 for (thr = 0; thr < nr_threads; thr++) {
771 for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
772 ret = snapshot_read_next(snapshot);
773 if (ret < 0)
774 goto out_finish;
775
776 if (!ret)
777 break;
778
779 memcpy(data[thr].unc + off,
780 data_of(*snapshot), PAGE_SIZE);
781
782 if (!(nr_pages % m))
783 pr_info("Image saving progress: %3d%%\n",
784 nr_pages / m * 10);
785 nr_pages++;
786 }
787 if (!off)
788 break;
789
790 data[thr].unc_len = off;
791
792 atomic_set(&data[thr].ready, 1);
793 wake_up(&data[thr].go);
794 }
795
796 if (!thr)
797 break;
798
799 crc->run_threads = thr;
800 atomic_set(&crc->ready, 1);
801 wake_up(&crc->go);
802
803 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
804 wait_event(data[thr].done,
805 atomic_read(&data[thr].stop));
806 atomic_set(&data[thr].stop, 0);
807
808 ret = data[thr].ret;
809
810 if (ret < 0) {
811 pr_err("LZO compression failed\n");
812 goto out_finish;
813 }
814
815 if (unlikely(!data[thr].cmp_len ||
816 data[thr].cmp_len >
817 lzo1x_worst_compress(data[thr].unc_len))) {
818 pr_err("Invalid LZO compressed length\n");
819 ret = -1;
820 goto out_finish;
821 }
822
823 *(size_t *)data[thr].cmp = data[thr].cmp_len;
824
825 /*
826 * Given we are writing one page at a time to disk, we
827 * copy that much from the buffer, although the last
828 * bit will likely be smaller than full page. This is
829 * OK - we saved the length of the compressed data, so
830 * any garbage at the end will be discarded when we
831 * read it.
832 */
833 for (off = 0;
834 off < LZO_HEADER + data[thr].cmp_len;
835 off += PAGE_SIZE) {
836 memcpy(page, data[thr].cmp + off, PAGE_SIZE);
837
838 ret = swap_write_page(handle, page, &hb);
839 if (ret)
840 goto out_finish;
841 }
842 }
843
844 wait_event(crc->done, atomic_read(&crc->stop));
845 atomic_set(&crc->stop, 0);
846 }
847
848out_finish:
849 err2 = hib_wait_io(&hb);
850 stop = ktime_get();
851 if (!ret)
852 ret = err2;
853 if (!ret)
854 pr_info("Image saving done\n");
855 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
856out_clean:
857 if (crc) {
858 if (crc->thr)
859 kthread_stop(crc->thr);
860 kfree(crc);
861 }
862 if (data) {
863 for (thr = 0; thr < nr_threads; thr++)
864 if (data[thr].thr)
865 kthread_stop(data[thr].thr);
866 vfree(data);
867 }
868 if (page) free_page((unsigned long)page);
869
870 return ret;
871}
872
873/**
874 * enough_swap - Make sure we have enough swap to save the image.
875 *
876 * Returns TRUE or FALSE after checking the total amount of swap
877 * space avaiable from the resume partition.
878 */
879
880static int enough_swap(unsigned int nr_pages)
881{
882 unsigned int free_swap = count_swap_pages(root_swap, 1);
883 unsigned int required;
884
885 pr_debug("Free swap pages: %u\n", free_swap);
886
887 required = PAGES_FOR_IO + nr_pages;
888 return free_swap > required;
889}
890
891/**
892 * swsusp_write - Write entire image and metadata.
893 * @flags: flags to pass to the "boot" kernel in the image header
894 *
895 * It is important _NOT_ to umount filesystems at this point. We want
896 * them synced (in case something goes wrong) but we DO not want to mark
897 * filesystem clean: it is not. (And it does not matter, if we resume
898 * correctly, we'll mark system clean, anyway.)
899 */
900
901int swsusp_write(unsigned int flags)
902{
903 struct swap_map_handle handle;
904 struct snapshot_handle snapshot;
905 struct swsusp_info *header;
906 unsigned long pages;
907 int error;
908
909 pages = snapshot_get_image_size();
910 error = get_swap_writer(&handle);
911 if (error) {
912 pr_err("Cannot get swap writer\n");
913 return error;
914 }
915 if (flags & SF_NOCOMPRESS_MODE) {
916 if (!enough_swap(pages)) {
917 pr_err("Not enough free swap\n");
918 error = -ENOSPC;
919 goto out_finish;
920 }
921 }
922 memset(&snapshot, 0, sizeof(struct snapshot_handle));
923 error = snapshot_read_next(&snapshot);
924 if (error < (int)PAGE_SIZE) {
925 if (error >= 0)
926 error = -EFAULT;
927
928 goto out_finish;
929 }
930 header = (struct swsusp_info *)data_of(snapshot);
931 error = swap_write_page(&handle, header, NULL);
932 if (!error) {
933 error = (flags & SF_NOCOMPRESS_MODE) ?
934 save_image(&handle, &snapshot, pages - 1) :
935 save_image_lzo(&handle, &snapshot, pages - 1);
936 }
937out_finish:
938 error = swap_writer_finish(&handle, flags, error);
939 return error;
940}
941
942/**
943 * The following functions allow us to read data using a swap map
944 * in a file-alike way
945 */
946
947static void release_swap_reader(struct swap_map_handle *handle)
948{
949 struct swap_map_page_list *tmp;
950
951 while (handle->maps) {
952 if (handle->maps->map)
953 free_page((unsigned long)handle->maps->map);
954 tmp = handle->maps;
955 handle->maps = handle->maps->next;
956 kfree(tmp);
957 }
958 handle->cur = NULL;
959}
960
961static int get_swap_reader(struct swap_map_handle *handle,
962 unsigned int *flags_p)
963{
964 int error;
965 struct swap_map_page_list *tmp, *last;
966 sector_t offset;
967
968 *flags_p = swsusp_header->flags;
969
970 if (!swsusp_header->image) /* how can this happen? */
971 return -EINVAL;
972
973 handle->cur = NULL;
974 last = handle->maps = NULL;
975 offset = swsusp_header->image;
976 while (offset) {
977 tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL);
978 if (!tmp) {
979 release_swap_reader(handle);
980 return -ENOMEM;
981 }
982 if (!handle->maps)
983 handle->maps = tmp;
984 if (last)
985 last->next = tmp;
986 last = tmp;
987
988 tmp->map = (struct swap_map_page *)
989 __get_free_page(GFP_NOIO | __GFP_HIGH);
990 if (!tmp->map) {
991 release_swap_reader(handle);
992 return -ENOMEM;
993 }
994
995 error = hib_submit_io(REQ_OP_READ, 0, offset, tmp->map, NULL);
996 if (error) {
997 release_swap_reader(handle);
998 return error;
999 }
1000 offset = tmp->map->next_swap;
1001 }
1002 handle->k = 0;
1003 handle->cur = handle->maps->map;
1004 return 0;
1005}
1006
1007static int swap_read_page(struct swap_map_handle *handle, void *buf,
1008 struct hib_bio_batch *hb)
1009{
1010 sector_t offset;
1011 int error;
1012 struct swap_map_page_list *tmp;
1013
1014 if (!handle->cur)
1015 return -EINVAL;
1016 offset = handle->cur->entries[handle->k];
1017 if (!offset)
1018 return -EFAULT;
1019 error = hib_submit_io(REQ_OP_READ, 0, offset, buf, hb);
1020 if (error)
1021 return error;
1022 if (++handle->k >= MAP_PAGE_ENTRIES) {
1023 handle->k = 0;
1024 free_page((unsigned long)handle->maps->map);
1025 tmp = handle->maps;
1026 handle->maps = handle->maps->next;
1027 kfree(tmp);
1028 if (!handle->maps)
1029 release_swap_reader(handle);
1030 else
1031 handle->cur = handle->maps->map;
1032 }
1033 return error;
1034}
1035
1036static int swap_reader_finish(struct swap_map_handle *handle)
1037{
1038 release_swap_reader(handle);
1039
1040 return 0;
1041}
1042
1043/**
1044 * load_image - load the image using the swap map handle
1045 * @handle and the snapshot handle @snapshot
1046 * (assume there are @nr_pages pages to load)
1047 */
1048
1049static int load_image(struct swap_map_handle *handle,
1050 struct snapshot_handle *snapshot,
1051 unsigned int nr_to_read)
1052{
1053 unsigned int m;
1054 int ret = 0;
1055 ktime_t start;
1056 ktime_t stop;
1057 struct hib_bio_batch hb;
1058 int err2;
1059 unsigned nr_pages;
1060
1061 hib_init_batch(&hb);
1062
1063 clean_pages_on_read = true;
1064 pr_info("Loading image data pages (%u pages)...\n", nr_to_read);
1065 m = nr_to_read / 10;
1066 if (!m)
1067 m = 1;
1068 nr_pages = 0;
1069 start = ktime_get();
1070 for ( ; ; ) {
1071 ret = snapshot_write_next(snapshot);
1072 if (ret <= 0)
1073 break;
1074 ret = swap_read_page(handle, data_of(*snapshot), &hb);
1075 if (ret)
1076 break;
1077 if (snapshot->sync_read)
1078 ret = hib_wait_io(&hb);
1079 if (ret)
1080 break;
1081 if (!(nr_pages % m))
1082 pr_info("Image loading progress: %3d%%\n",
1083 nr_pages / m * 10);
1084 nr_pages++;
1085 }
1086 err2 = hib_wait_io(&hb);
1087 stop = ktime_get();
1088 if (!ret)
1089 ret = err2;
1090 if (!ret) {
1091 pr_info("Image loading done\n");
1092 snapshot_write_finalize(snapshot);
1093 if (!snapshot_image_loaded(snapshot))
1094 ret = -ENODATA;
1095 }
1096 swsusp_show_speed(start, stop, nr_to_read, "Read");
1097 return ret;
1098}
1099
1100/**
1101 * Structure used for LZO data decompression.
1102 */
1103struct dec_data {
1104 struct task_struct *thr; /* thread */
1105 atomic_t ready; /* ready to start flag */
1106 atomic_t stop; /* ready to stop flag */
1107 int ret; /* return code */
1108 wait_queue_head_t go; /* start decompression */
1109 wait_queue_head_t done; /* decompression done */
1110 size_t unc_len; /* uncompressed length */
1111 size_t cmp_len; /* compressed length */
1112 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
1113 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
1114};
1115
1116/**
1117 * Deompression function that runs in its own thread.
1118 */
1119static int lzo_decompress_threadfn(void *data)
1120{
1121 struct dec_data *d = data;
1122
1123 while (1) {
1124 wait_event(d->go, atomic_read(&d->ready) ||
1125 kthread_should_stop());
1126 if (kthread_should_stop()) {
1127 d->thr = NULL;
1128 d->ret = -1;
1129 atomic_set(&d->stop, 1);
1130 wake_up(&d->done);
1131 break;
1132 }
1133 atomic_set(&d->ready, 0);
1134
1135 d->unc_len = LZO_UNC_SIZE;
1136 d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1137 d->unc, &d->unc_len);
1138 if (clean_pages_on_decompress)
1139 flush_icache_range((unsigned long)d->unc,
1140 (unsigned long)d->unc + d->unc_len);
1141
1142 atomic_set(&d->stop, 1);
1143 wake_up(&d->done);
1144 }
1145 return 0;
1146}
1147
1148/**
1149 * load_image_lzo - Load compressed image data and decompress them with LZO.
1150 * @handle: Swap map handle to use for loading data.
1151 * @snapshot: Image to copy uncompressed data into.
1152 * @nr_to_read: Number of pages to load.
1153 */
1154static int load_image_lzo(struct swap_map_handle *handle,
1155 struct snapshot_handle *snapshot,
1156 unsigned int nr_to_read)
1157{
1158 unsigned int m;
1159 int ret = 0;
1160 int eof = 0;
1161 struct hib_bio_batch hb;
1162 ktime_t start;
1163 ktime_t stop;
1164 unsigned nr_pages;
1165 size_t off;
1166 unsigned i, thr, run_threads, nr_threads;
1167 unsigned ring = 0, pg = 0, ring_size = 0,
1168 have = 0, want, need, asked = 0;
1169 unsigned long read_pages = 0;
1170 unsigned char **page = NULL;
1171 struct dec_data *data = NULL;
1172 struct crc_data *crc = NULL;
1173
1174 hib_init_batch(&hb);
1175
1176 /*
1177 * We'll limit the number of threads for decompression to limit memory
1178 * footprint.
1179 */
1180 nr_threads = num_online_cpus() - 1;
1181 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
1182
1183 page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
1184 if (!page) {
1185 pr_err("Failed to allocate LZO page\n");
1186 ret = -ENOMEM;
1187 goto out_clean;
1188 }
1189
1190 data = vmalloc(array_size(nr_threads, sizeof(*data)));
1191 if (!data) {
1192 pr_err("Failed to allocate LZO data\n");
1193 ret = -ENOMEM;
1194 goto out_clean;
1195 }
1196 for (thr = 0; thr < nr_threads; thr++)
1197 memset(&data[thr], 0, offsetof(struct dec_data, go));
1198
1199 crc = kmalloc(sizeof(*crc), GFP_KERNEL);
1200 if (!crc) {
1201 pr_err("Failed to allocate crc\n");
1202 ret = -ENOMEM;
1203 goto out_clean;
1204 }
1205 memset(crc, 0, offsetof(struct crc_data, go));
1206
1207 clean_pages_on_decompress = true;
1208
1209 /*
1210 * Start the decompression threads.
1211 */
1212 for (thr = 0; thr < nr_threads; thr++) {
1213 init_waitqueue_head(&data[thr].go);
1214 init_waitqueue_head(&data[thr].done);
1215
1216 data[thr].thr = kthread_run(lzo_decompress_threadfn,
1217 &data[thr],
1218 "image_decompress/%u", thr);
1219 if (IS_ERR(data[thr].thr)) {
1220 data[thr].thr = NULL;
1221 pr_err("Cannot start decompression threads\n");
1222 ret = -ENOMEM;
1223 goto out_clean;
1224 }
1225 }
1226
1227 /*
1228 * Start the CRC32 thread.
1229 */
1230 init_waitqueue_head(&crc->go);
1231 init_waitqueue_head(&crc->done);
1232
1233 handle->crc32 = 0;
1234 crc->crc32 = &handle->crc32;
1235 for (thr = 0; thr < nr_threads; thr++) {
1236 crc->unc[thr] = data[thr].unc;
1237 crc->unc_len[thr] = &data[thr].unc_len;
1238 }
1239
1240 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1241 if (IS_ERR(crc->thr)) {
1242 crc->thr = NULL;
1243 pr_err("Cannot start CRC32 thread\n");
1244 ret = -ENOMEM;
1245 goto out_clean;
1246 }
1247
1248 /*
1249 * Set the number of pages for read buffering.
1250 * This is complete guesswork, because we'll only know the real
1251 * picture once prepare_image() is called, which is much later on
1252 * during the image load phase. We'll assume the worst case and
1253 * say that none of the image pages are from high memory.
1254 */
1255 if (low_free_pages() > snapshot_get_image_size())
1256 read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1257 read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
1258
1259 for (i = 0; i < read_pages; i++) {
1260 page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
1261 GFP_NOIO | __GFP_HIGH :
1262 GFP_NOIO | __GFP_NOWARN |
1263 __GFP_NORETRY);
1264
1265 if (!page[i]) {
1266 if (i < LZO_CMP_PAGES) {
1267 ring_size = i;
1268 pr_err("Failed to allocate LZO pages\n");
1269 ret = -ENOMEM;
1270 goto out_clean;
1271 } else {
1272 break;
1273 }
1274 }
1275 }
1276 want = ring_size = i;
1277
1278 pr_info("Using %u thread(s) for decompression\n", nr_threads);
1279 pr_info("Loading and decompressing image data (%u pages)...\n",
1280 nr_to_read);
1281 m = nr_to_read / 10;
1282 if (!m)
1283 m = 1;
1284 nr_pages = 0;
1285 start = ktime_get();
1286
1287 ret = snapshot_write_next(snapshot);
1288 if (ret <= 0)
1289 goto out_finish;
1290
1291 for(;;) {
1292 for (i = 0; !eof && i < want; i++) {
1293 ret = swap_read_page(handle, page[ring], &hb);
1294 if (ret) {
1295 /*
1296 * On real read error, finish. On end of data,
1297 * set EOF flag and just exit the read loop.
1298 */
1299 if (handle->cur &&
1300 handle->cur->entries[handle->k]) {
1301 goto out_finish;
1302 } else {
1303 eof = 1;
1304 break;
1305 }
1306 }
1307 if (++ring >= ring_size)
1308 ring = 0;
1309 }
1310 asked += i;
1311 want -= i;
1312
1313 /*
1314 * We are out of data, wait for some more.
1315 */
1316 if (!have) {
1317 if (!asked)
1318 break;
1319
1320 ret = hib_wait_io(&hb);
1321 if (ret)
1322 goto out_finish;
1323 have += asked;
1324 asked = 0;
1325 if (eof)
1326 eof = 2;
1327 }
1328
1329 if (crc->run_threads) {
1330 wait_event(crc->done, atomic_read(&crc->stop));
1331 atomic_set(&crc->stop, 0);
1332 crc->run_threads = 0;
1333 }
1334
1335 for (thr = 0; have && thr < nr_threads; thr++) {
1336 data[thr].cmp_len = *(size_t *)page[pg];
1337 if (unlikely(!data[thr].cmp_len ||
1338 data[thr].cmp_len >
1339 lzo1x_worst_compress(LZO_UNC_SIZE))) {
1340 pr_err("Invalid LZO compressed length\n");
1341 ret = -1;
1342 goto out_finish;
1343 }
1344
1345 need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
1346 PAGE_SIZE);
1347 if (need > have) {
1348 if (eof > 1) {
1349 ret = -1;
1350 goto out_finish;
1351 }
1352 break;
1353 }
1354
1355 for (off = 0;
1356 off < LZO_HEADER + data[thr].cmp_len;
1357 off += PAGE_SIZE) {
1358 memcpy(data[thr].cmp + off,
1359 page[pg], PAGE_SIZE);
1360 have--;
1361 want++;
1362 if (++pg >= ring_size)
1363 pg = 0;
1364 }
1365
1366 atomic_set(&data[thr].ready, 1);
1367 wake_up(&data[thr].go);
1368 }
1369
1370 /*
1371 * Wait for more data while we are decompressing.
1372 */
1373 if (have < LZO_CMP_PAGES && asked) {
1374 ret = hib_wait_io(&hb);
1375 if (ret)
1376 goto out_finish;
1377 have += asked;
1378 asked = 0;
1379 if (eof)
1380 eof = 2;
1381 }
1382
1383 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1384 wait_event(data[thr].done,
1385 atomic_read(&data[thr].stop));
1386 atomic_set(&data[thr].stop, 0);
1387
1388 ret = data[thr].ret;
1389
1390 if (ret < 0) {
1391 pr_err("LZO decompression failed\n");
1392 goto out_finish;
1393 }
1394
1395 if (unlikely(!data[thr].unc_len ||
1396 data[thr].unc_len > LZO_UNC_SIZE ||
1397 data[thr].unc_len & (PAGE_SIZE - 1))) {
1398 pr_err("Invalid LZO uncompressed length\n");
1399 ret = -1;
1400 goto out_finish;
1401 }
1402
1403 for (off = 0;
1404 off < data[thr].unc_len; off += PAGE_SIZE) {
1405 memcpy(data_of(*snapshot),
1406 data[thr].unc + off, PAGE_SIZE);
1407
1408 if (!(nr_pages % m))
1409 pr_info("Image loading progress: %3d%%\n",
1410 nr_pages / m * 10);
1411 nr_pages++;
1412
1413 ret = snapshot_write_next(snapshot);
1414 if (ret <= 0) {
1415 crc->run_threads = thr + 1;
1416 atomic_set(&crc->ready, 1);
1417 wake_up(&crc->go);
1418 goto out_finish;
1419 }
1420 }
1421 }
1422
1423 crc->run_threads = thr;
1424 atomic_set(&crc->ready, 1);
1425 wake_up(&crc->go);
1426 }
1427
1428out_finish:
1429 if (crc->run_threads) {
1430 wait_event(crc->done, atomic_read(&crc->stop));
1431 atomic_set(&crc->stop, 0);
1432 }
1433 stop = ktime_get();
1434 if (!ret) {
1435 pr_info("Image loading done\n");
1436 snapshot_write_finalize(snapshot);
1437 if (!snapshot_image_loaded(snapshot))
1438 ret = -ENODATA;
1439 if (!ret) {
1440 if (swsusp_header->flags & SF_CRC32_MODE) {
1441 if(handle->crc32 != swsusp_header->crc32) {
1442 pr_err("Invalid image CRC32!\n");
1443 ret = -ENODATA;
1444 }
1445 }
1446 }
1447 }
1448 swsusp_show_speed(start, stop, nr_to_read, "Read");
1449out_clean:
1450 for (i = 0; i < ring_size; i++)
1451 free_page((unsigned long)page[i]);
1452 if (crc) {
1453 if (crc->thr)
1454 kthread_stop(crc->thr);
1455 kfree(crc);
1456 }
1457 if (data) {
1458 for (thr = 0; thr < nr_threads; thr++)
1459 if (data[thr].thr)
1460 kthread_stop(data[thr].thr);
1461 vfree(data);
1462 }
1463 vfree(page);
1464
1465 return ret;
1466}
1467
1468/**
1469 * swsusp_read - read the hibernation image.
1470 * @flags_p: flags passed by the "frozen" kernel in the image header should
1471 * be written into this memory location
1472 */
1473
1474int swsusp_read(unsigned int *flags_p)
1475{
1476 int error;
1477 struct swap_map_handle handle;
1478 struct snapshot_handle snapshot;
1479 struct swsusp_info *header;
1480
1481 memset(&snapshot, 0, sizeof(struct snapshot_handle));
1482 error = snapshot_write_next(&snapshot);
1483 if (error < (int)PAGE_SIZE)
1484 return error < 0 ? error : -EFAULT;
1485 header = (struct swsusp_info *)data_of(snapshot);
1486 error = get_swap_reader(&handle, flags_p);
1487 if (error)
1488 goto end;
1489 if (!error)
1490 error = swap_read_page(&handle, header, NULL);
1491 if (!error) {
1492 error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1493 load_image(&handle, &snapshot, header->pages - 1) :
1494 load_image_lzo(&handle, &snapshot, header->pages - 1);
1495 }
1496 swap_reader_finish(&handle);
1497end:
1498 if (!error)
1499 pr_debug("Image successfully loaded\n");
1500 else
1501 pr_debug("Error %d resuming\n", error);
1502 return error;
1503}
1504
1505/**
1506 * swsusp_check - Check for swsusp signature in the resume device
1507 */
1508
1509int swsusp_check(void)
1510{
1511 int error;
1512
1513 hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
1514 FMODE_READ, NULL);
1515 if (!IS_ERR(hib_resume_bdev)) {
1516 set_blocksize(hib_resume_bdev, PAGE_SIZE);
1517 clear_page(swsusp_header);
1518 error = hib_submit_io(REQ_OP_READ, 0,
1519 swsusp_resume_block,
1520 swsusp_header, NULL);
1521 if (error)
1522 goto put;
1523
1524 if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1525 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
1526 /* Reset swap signature now */
1527 error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
1528 swsusp_resume_block,
1529 swsusp_header, NULL);
1530 } else {
1531 error = -EINVAL;
1532 }
1533
1534put:
1535 if (error)
1536 blkdev_put(hib_resume_bdev, FMODE_READ);
1537 else
1538 pr_debug("Image signature found, resuming\n");
1539 } else {
1540 error = PTR_ERR(hib_resume_bdev);
1541 }
1542
1543 if (error)
1544 pr_debug("Image not found (code %d)\n", error);
1545
1546 return error;
1547}
1548
1549/**
1550 * swsusp_close - close swap device.
1551 */
1552
1553void swsusp_close(fmode_t mode)
1554{
1555 if (IS_ERR(hib_resume_bdev)) {
1556 pr_debug("Image device not initialised\n");
1557 return;
1558 }
1559
1560 blkdev_put(hib_resume_bdev, mode);
1561}
1562
1563/**
1564 * swsusp_unmark - Unmark swsusp signature in the resume device
1565 */
1566
1567#ifdef CONFIG_SUSPEND
1568int swsusp_unmark(void)
1569{
1570 int error;
1571
1572 hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
1573 swsusp_header, NULL);
1574 if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
1575 memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
1576 error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
1577 swsusp_resume_block,
1578 swsusp_header, NULL);
1579 } else {
1580 pr_err("Cannot find swsusp signature!\n");
1581 error = -ENODEV;
1582 }
1583
1584 /*
1585 * We just returned from suspend, we don't need the image any more.
1586 */
1587 free_all_swap_pages(root_swap);
1588
1589 return error;
1590}
1591#endif
1592
1593static int __init swsusp_header_init(void)
1594{
1595 swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1596 if (!swsusp_header)
1597 panic("Could not allocate memory for swsusp_header\n");
1598 return 0;
1599}
1600
1601core_initcall(swsusp_header_init);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/power/swap.c
4 *
5 * This file provides functions for reading the suspend image from
6 * and writing it to a swap partition.
7 *
8 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
9 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
10 * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
11 */
12
13#define pr_fmt(fmt) "PM: " fmt
14
15#include <linux/module.h>
16#include <linux/file.h>
17#include <linux/delay.h>
18#include <linux/bitops.h>
19#include <linux/device.h>
20#include <linux/bio.h>
21#include <linux/blkdev.h>
22#include <linux/swap.h>
23#include <linux/swapops.h>
24#include <linux/pm.h>
25#include <linux/slab.h>
26#include <linux/lzo.h>
27#include <linux/vmalloc.h>
28#include <linux/cpumask.h>
29#include <linux/atomic.h>
30#include <linux/kthread.h>
31#include <linux/crc32.h>
32#include <linux/ktime.h>
33
34#include "power.h"
35
36#define HIBERNATE_SIG "S1SUSPEND"
37
38u32 swsusp_hardware_signature;
39
40/*
41 * When reading an {un,}compressed image, we may restore pages in place,
42 * in which case some architectures need these pages cleaning before they
43 * can be executed. We don't know which pages these may be, so clean the lot.
44 */
45static bool clean_pages_on_read;
46static bool clean_pages_on_decompress;
47
48/*
49 * The swap map is a data structure used for keeping track of each page
50 * written to a swap partition. It consists of many swap_map_page
51 * structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
52 * These structures are stored on the swap and linked together with the
53 * help of the .next_swap member.
54 *
55 * The swap map is created during suspend. The swap map pages are
56 * allocated and populated one at a time, so we only need one memory
57 * page to set up the entire structure.
58 *
59 * During resume we pick up all swap_map_page structures into a list.
60 */
61
62#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
63
64/*
65 * Number of free pages that are not high.
66 */
67static inline unsigned long low_free_pages(void)
68{
69 return nr_free_pages() - nr_free_highpages();
70}
71
72/*
73 * Number of pages required to be kept free while writing the image. Always
74 * half of all available low pages before the writing starts.
75 */
76static inline unsigned long reqd_free_pages(void)
77{
78 return low_free_pages() / 2;
79}
80
81struct swap_map_page {
82 sector_t entries[MAP_PAGE_ENTRIES];
83 sector_t next_swap;
84};
85
86struct swap_map_page_list {
87 struct swap_map_page *map;
88 struct swap_map_page_list *next;
89};
90
91/*
92 * The swap_map_handle structure is used for handling swap in
93 * a file-alike way
94 */
95
96struct swap_map_handle {
97 struct swap_map_page *cur;
98 struct swap_map_page_list *maps;
99 sector_t cur_swap;
100 sector_t first_sector;
101 unsigned int k;
102 unsigned long reqd_free_pages;
103 u32 crc32;
104};
105
106struct swsusp_header {
107 char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
108 sizeof(u32) - sizeof(u32)];
109 u32 hw_sig;
110 u32 crc32;
111 sector_t image;
112 unsigned int flags; /* Flags to pass to the "boot" kernel */
113 char orig_sig[10];
114 char sig[10];
115} __packed;
116
117static struct swsusp_header *swsusp_header;
118
119/*
120 * The following functions are used for tracing the allocated
121 * swap pages, so that they can be freed in case of an error.
122 */
123
124struct swsusp_extent {
125 struct rb_node node;
126 unsigned long start;
127 unsigned long end;
128};
129
130static struct rb_root swsusp_extents = RB_ROOT;
131
132static int swsusp_extents_insert(unsigned long swap_offset)
133{
134 struct rb_node **new = &(swsusp_extents.rb_node);
135 struct rb_node *parent = NULL;
136 struct swsusp_extent *ext;
137
138 /* Figure out where to put the new node */
139 while (*new) {
140 ext = rb_entry(*new, struct swsusp_extent, node);
141 parent = *new;
142 if (swap_offset < ext->start) {
143 /* Try to merge */
144 if (swap_offset == ext->start - 1) {
145 ext->start--;
146 return 0;
147 }
148 new = &((*new)->rb_left);
149 } else if (swap_offset > ext->end) {
150 /* Try to merge */
151 if (swap_offset == ext->end + 1) {
152 ext->end++;
153 return 0;
154 }
155 new = &((*new)->rb_right);
156 } else {
157 /* It already is in the tree */
158 return -EINVAL;
159 }
160 }
161 /* Add the new node and rebalance the tree. */
162 ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
163 if (!ext)
164 return -ENOMEM;
165
166 ext->start = swap_offset;
167 ext->end = swap_offset;
168 rb_link_node(&ext->node, parent, new);
169 rb_insert_color(&ext->node, &swsusp_extents);
170 return 0;
171}
172
173/*
174 * alloc_swapdev_block - allocate a swap page and register that it has
175 * been allocated, so that it can be freed in case of an error.
176 */
177
178sector_t alloc_swapdev_block(int swap)
179{
180 unsigned long offset;
181
182 offset = swp_offset(get_swap_page_of_type(swap));
183 if (offset) {
184 if (swsusp_extents_insert(offset))
185 swap_free(swp_entry(swap, offset));
186 else
187 return swapdev_block(swap, offset);
188 }
189 return 0;
190}
191
192/*
193 * free_all_swap_pages - free swap pages allocated for saving image data.
194 * It also frees the extents used to register which swap entries had been
195 * allocated.
196 */
197
198void free_all_swap_pages(int swap)
199{
200 struct rb_node *node;
201
202 while ((node = swsusp_extents.rb_node)) {
203 struct swsusp_extent *ext;
204 unsigned long offset;
205
206 ext = rb_entry(node, struct swsusp_extent, node);
207 rb_erase(node, &swsusp_extents);
208 for (offset = ext->start; offset <= ext->end; offset++)
209 swap_free(swp_entry(swap, offset));
210
211 kfree(ext);
212 }
213}
214
215int swsusp_swap_in_use(void)
216{
217 return (swsusp_extents.rb_node != NULL);
218}
219
220/*
221 * General things
222 */
223
224static unsigned short root_swap = 0xffff;
225static struct bdev_handle *hib_resume_bdev_handle;
226
227struct hib_bio_batch {
228 atomic_t count;
229 wait_queue_head_t wait;
230 blk_status_t error;
231 struct blk_plug plug;
232};
233
234static void hib_init_batch(struct hib_bio_batch *hb)
235{
236 atomic_set(&hb->count, 0);
237 init_waitqueue_head(&hb->wait);
238 hb->error = BLK_STS_OK;
239 blk_start_plug(&hb->plug);
240}
241
242static void hib_finish_batch(struct hib_bio_batch *hb)
243{
244 blk_finish_plug(&hb->plug);
245}
246
247static void hib_end_io(struct bio *bio)
248{
249 struct hib_bio_batch *hb = bio->bi_private;
250 struct page *page = bio_first_page_all(bio);
251
252 if (bio->bi_status) {
253 pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
254 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
255 (unsigned long long)bio->bi_iter.bi_sector);
256 }
257
258 if (bio_data_dir(bio) == WRITE)
259 put_page(page);
260 else if (clean_pages_on_read)
261 flush_icache_range((unsigned long)page_address(page),
262 (unsigned long)page_address(page) + PAGE_SIZE);
263
264 if (bio->bi_status && !hb->error)
265 hb->error = bio->bi_status;
266 if (atomic_dec_and_test(&hb->count))
267 wake_up(&hb->wait);
268
269 bio_put(bio);
270}
271
272static int hib_submit_io(blk_opf_t opf, pgoff_t page_off, void *addr,
273 struct hib_bio_batch *hb)
274{
275 struct page *page = virt_to_page(addr);
276 struct bio *bio;
277 int error = 0;
278
279 bio = bio_alloc(hib_resume_bdev_handle->bdev, 1, opf,
280 GFP_NOIO | __GFP_HIGH);
281 bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
282
283 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
284 pr_err("Adding page to bio failed at %llu\n",
285 (unsigned long long)bio->bi_iter.bi_sector);
286 bio_put(bio);
287 return -EFAULT;
288 }
289
290 if (hb) {
291 bio->bi_end_io = hib_end_io;
292 bio->bi_private = hb;
293 atomic_inc(&hb->count);
294 submit_bio(bio);
295 } else {
296 error = submit_bio_wait(bio);
297 bio_put(bio);
298 }
299
300 return error;
301}
302
303static int hib_wait_io(struct hib_bio_batch *hb)
304{
305 /*
306 * We are relying on the behavior of blk_plug that a thread with
307 * a plug will flush the plug list before sleeping.
308 */
309 wait_event(hb->wait, atomic_read(&hb->count) == 0);
310 return blk_status_to_errno(hb->error);
311}
312
313/*
314 * Saving part
315 */
316static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
317{
318 int error;
319
320 hib_submit_io(REQ_OP_READ, swsusp_resume_block, swsusp_header, NULL);
321 if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
322 !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
323 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
324 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
325 swsusp_header->image = handle->first_sector;
326 if (swsusp_hardware_signature) {
327 swsusp_header->hw_sig = swsusp_hardware_signature;
328 flags |= SF_HW_SIG;
329 }
330 swsusp_header->flags = flags;
331 if (flags & SF_CRC32_MODE)
332 swsusp_header->crc32 = handle->crc32;
333 error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
334 swsusp_resume_block, swsusp_header, NULL);
335 } else {
336 pr_err("Swap header not found!\n");
337 error = -ENODEV;
338 }
339 return error;
340}
341
342/**
343 * swsusp_swap_check - check if the resume device is a swap device
344 * and get its index (if so)
345 *
346 * This is called before saving image
347 */
348static int swsusp_swap_check(void)
349{
350 int res;
351
352 if (swsusp_resume_device)
353 res = swap_type_of(swsusp_resume_device, swsusp_resume_block);
354 else
355 res = find_first_swap(&swsusp_resume_device);
356 if (res < 0)
357 return res;
358 root_swap = res;
359
360 hib_resume_bdev_handle = bdev_open_by_dev(swsusp_resume_device,
361 BLK_OPEN_WRITE, NULL, NULL);
362 if (IS_ERR(hib_resume_bdev_handle))
363 return PTR_ERR(hib_resume_bdev_handle);
364
365 res = set_blocksize(hib_resume_bdev_handle->bdev, PAGE_SIZE);
366 if (res < 0)
367 bdev_release(hib_resume_bdev_handle);
368
369 return res;
370}
371
372/**
373 * write_page - Write one page to given swap location.
374 * @buf: Address we're writing.
375 * @offset: Offset of the swap page we're writing to.
376 * @hb: bio completion batch
377 */
378
379static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
380{
381 void *src;
382 int ret;
383
384 if (!offset)
385 return -ENOSPC;
386
387 if (hb) {
388 src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
389 __GFP_NORETRY);
390 if (src) {
391 copy_page(src, buf);
392 } else {
393 ret = hib_wait_io(hb); /* Free pages */
394 if (ret)
395 return ret;
396 src = (void *)__get_free_page(GFP_NOIO |
397 __GFP_NOWARN |
398 __GFP_NORETRY);
399 if (src) {
400 copy_page(src, buf);
401 } else {
402 WARN_ON_ONCE(1);
403 hb = NULL; /* Go synchronous */
404 src = buf;
405 }
406 }
407 } else {
408 src = buf;
409 }
410 return hib_submit_io(REQ_OP_WRITE | REQ_SYNC, offset, src, hb);
411}
412
413static void release_swap_writer(struct swap_map_handle *handle)
414{
415 if (handle->cur)
416 free_page((unsigned long)handle->cur);
417 handle->cur = NULL;
418}
419
420static int get_swap_writer(struct swap_map_handle *handle)
421{
422 int ret;
423
424 ret = swsusp_swap_check();
425 if (ret) {
426 if (ret != -ENOSPC)
427 pr_err("Cannot find swap device, try swapon -a\n");
428 return ret;
429 }
430 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
431 if (!handle->cur) {
432 ret = -ENOMEM;
433 goto err_close;
434 }
435 handle->cur_swap = alloc_swapdev_block(root_swap);
436 if (!handle->cur_swap) {
437 ret = -ENOSPC;
438 goto err_rel;
439 }
440 handle->k = 0;
441 handle->reqd_free_pages = reqd_free_pages();
442 handle->first_sector = handle->cur_swap;
443 return 0;
444err_rel:
445 release_swap_writer(handle);
446err_close:
447 swsusp_close();
448 return ret;
449}
450
451static int swap_write_page(struct swap_map_handle *handle, void *buf,
452 struct hib_bio_batch *hb)
453{
454 int error;
455 sector_t offset;
456
457 if (!handle->cur)
458 return -EINVAL;
459 offset = alloc_swapdev_block(root_swap);
460 error = write_page(buf, offset, hb);
461 if (error)
462 return error;
463 handle->cur->entries[handle->k++] = offset;
464 if (handle->k >= MAP_PAGE_ENTRIES) {
465 offset = alloc_swapdev_block(root_swap);
466 if (!offset)
467 return -ENOSPC;
468 handle->cur->next_swap = offset;
469 error = write_page(handle->cur, handle->cur_swap, hb);
470 if (error)
471 goto out;
472 clear_page(handle->cur);
473 handle->cur_swap = offset;
474 handle->k = 0;
475
476 if (hb && low_free_pages() <= handle->reqd_free_pages) {
477 error = hib_wait_io(hb);
478 if (error)
479 goto out;
480 /*
481 * Recalculate the number of required free pages, to
482 * make sure we never take more than half.
483 */
484 handle->reqd_free_pages = reqd_free_pages();
485 }
486 }
487 out:
488 return error;
489}
490
491static int flush_swap_writer(struct swap_map_handle *handle)
492{
493 if (handle->cur && handle->cur_swap)
494 return write_page(handle->cur, handle->cur_swap, NULL);
495 else
496 return -EINVAL;
497}
498
499static int swap_writer_finish(struct swap_map_handle *handle,
500 unsigned int flags, int error)
501{
502 if (!error) {
503 pr_info("S");
504 error = mark_swapfiles(handle, flags);
505 pr_cont("|\n");
506 flush_swap_writer(handle);
507 }
508
509 if (error)
510 free_all_swap_pages(root_swap);
511 release_swap_writer(handle);
512 swsusp_close();
513
514 return error;
515}
516
517/* We need to remember how much compressed data we need to read. */
518#define LZO_HEADER sizeof(size_t)
519
520/* Number of pages/bytes we'll compress at one time. */
521#define LZO_UNC_PAGES 32
522#define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE)
523
524/* Number of pages/bytes we need for compressed data (worst case). */
525#define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
526 LZO_HEADER, PAGE_SIZE)
527#define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
528
529/* Maximum number of threads for compression/decompression. */
530#define LZO_THREADS 3
531
532/* Minimum/maximum number of pages for read buffering. */
533#define LZO_MIN_RD_PAGES 1024
534#define LZO_MAX_RD_PAGES 8192
535
536
537/**
538 * save_image - save the suspend image data
539 */
540
541static int save_image(struct swap_map_handle *handle,
542 struct snapshot_handle *snapshot,
543 unsigned int nr_to_write)
544{
545 unsigned int m;
546 int ret;
547 int nr_pages;
548 int err2;
549 struct hib_bio_batch hb;
550 ktime_t start;
551 ktime_t stop;
552
553 hib_init_batch(&hb);
554
555 pr_info("Saving image data pages (%u pages)...\n",
556 nr_to_write);
557 m = nr_to_write / 10;
558 if (!m)
559 m = 1;
560 nr_pages = 0;
561 start = ktime_get();
562 while (1) {
563 ret = snapshot_read_next(snapshot);
564 if (ret <= 0)
565 break;
566 ret = swap_write_page(handle, data_of(*snapshot), &hb);
567 if (ret)
568 break;
569 if (!(nr_pages % m))
570 pr_info("Image saving progress: %3d%%\n",
571 nr_pages / m * 10);
572 nr_pages++;
573 }
574 err2 = hib_wait_io(&hb);
575 hib_finish_batch(&hb);
576 stop = ktime_get();
577 if (!ret)
578 ret = err2;
579 if (!ret)
580 pr_info("Image saving done\n");
581 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
582 return ret;
583}
584
585/*
586 * Structure used for CRC32.
587 */
588struct crc_data {
589 struct task_struct *thr; /* thread */
590 atomic_t ready; /* ready to start flag */
591 atomic_t stop; /* ready to stop flag */
592 unsigned run_threads; /* nr current threads */
593 wait_queue_head_t go; /* start crc update */
594 wait_queue_head_t done; /* crc update done */
595 u32 *crc32; /* points to handle's crc32 */
596 size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */
597 unsigned char *unc[LZO_THREADS]; /* uncompressed data */
598};
599
600/*
601 * CRC32 update function that runs in its own thread.
602 */
603static int crc32_threadfn(void *data)
604{
605 struct crc_data *d = data;
606 unsigned i;
607
608 while (1) {
609 wait_event(d->go, atomic_read_acquire(&d->ready) ||
610 kthread_should_stop());
611 if (kthread_should_stop()) {
612 d->thr = NULL;
613 atomic_set_release(&d->stop, 1);
614 wake_up(&d->done);
615 break;
616 }
617 atomic_set(&d->ready, 0);
618
619 for (i = 0; i < d->run_threads; i++)
620 *d->crc32 = crc32_le(*d->crc32,
621 d->unc[i], *d->unc_len[i]);
622 atomic_set_release(&d->stop, 1);
623 wake_up(&d->done);
624 }
625 return 0;
626}
627/*
628 * Structure used for LZO data compression.
629 */
630struct cmp_data {
631 struct task_struct *thr; /* thread */
632 atomic_t ready; /* ready to start flag */
633 atomic_t stop; /* ready to stop flag */
634 int ret; /* return code */
635 wait_queue_head_t go; /* start compression */
636 wait_queue_head_t done; /* compression done */
637 size_t unc_len; /* uncompressed length */
638 size_t cmp_len; /* compressed length */
639 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
640 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
641 unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */
642};
643
644/*
645 * Compression function that runs in its own thread.
646 */
647static int lzo_compress_threadfn(void *data)
648{
649 struct cmp_data *d = data;
650
651 while (1) {
652 wait_event(d->go, atomic_read_acquire(&d->ready) ||
653 kthread_should_stop());
654 if (kthread_should_stop()) {
655 d->thr = NULL;
656 d->ret = -1;
657 atomic_set_release(&d->stop, 1);
658 wake_up(&d->done);
659 break;
660 }
661 atomic_set(&d->ready, 0);
662
663 d->ret = lzo1x_1_compress(d->unc, d->unc_len,
664 d->cmp + LZO_HEADER, &d->cmp_len,
665 d->wrk);
666 atomic_set_release(&d->stop, 1);
667 wake_up(&d->done);
668 }
669 return 0;
670}
671
672/**
673 * save_image_lzo - Save the suspend image data compressed with LZO.
674 * @handle: Swap map handle to use for saving the image.
675 * @snapshot: Image to read data from.
676 * @nr_to_write: Number of pages to save.
677 */
678static int save_image_lzo(struct swap_map_handle *handle,
679 struct snapshot_handle *snapshot,
680 unsigned int nr_to_write)
681{
682 unsigned int m;
683 int ret = 0;
684 int nr_pages;
685 int err2;
686 struct hib_bio_batch hb;
687 ktime_t start;
688 ktime_t stop;
689 size_t off;
690 unsigned thr, run_threads, nr_threads;
691 unsigned char *page = NULL;
692 struct cmp_data *data = NULL;
693 struct crc_data *crc = NULL;
694
695 hib_init_batch(&hb);
696
697 /*
698 * We'll limit the number of threads for compression to limit memory
699 * footprint.
700 */
701 nr_threads = num_online_cpus() - 1;
702 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
703
704 page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
705 if (!page) {
706 pr_err("Failed to allocate LZO page\n");
707 ret = -ENOMEM;
708 goto out_clean;
709 }
710
711 data = vzalloc(array_size(nr_threads, sizeof(*data)));
712 if (!data) {
713 pr_err("Failed to allocate LZO data\n");
714 ret = -ENOMEM;
715 goto out_clean;
716 }
717
718 crc = kzalloc(sizeof(*crc), GFP_KERNEL);
719 if (!crc) {
720 pr_err("Failed to allocate crc\n");
721 ret = -ENOMEM;
722 goto out_clean;
723 }
724
725 /*
726 * Start the compression threads.
727 */
728 for (thr = 0; thr < nr_threads; thr++) {
729 init_waitqueue_head(&data[thr].go);
730 init_waitqueue_head(&data[thr].done);
731
732 data[thr].thr = kthread_run(lzo_compress_threadfn,
733 &data[thr],
734 "image_compress/%u", thr);
735 if (IS_ERR(data[thr].thr)) {
736 data[thr].thr = NULL;
737 pr_err("Cannot start compression threads\n");
738 ret = -ENOMEM;
739 goto out_clean;
740 }
741 }
742
743 /*
744 * Start the CRC32 thread.
745 */
746 init_waitqueue_head(&crc->go);
747 init_waitqueue_head(&crc->done);
748
749 handle->crc32 = 0;
750 crc->crc32 = &handle->crc32;
751 for (thr = 0; thr < nr_threads; thr++) {
752 crc->unc[thr] = data[thr].unc;
753 crc->unc_len[thr] = &data[thr].unc_len;
754 }
755
756 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
757 if (IS_ERR(crc->thr)) {
758 crc->thr = NULL;
759 pr_err("Cannot start CRC32 thread\n");
760 ret = -ENOMEM;
761 goto out_clean;
762 }
763
764 /*
765 * Adjust the number of required free pages after all allocations have
766 * been done. We don't want to run out of pages when writing.
767 */
768 handle->reqd_free_pages = reqd_free_pages();
769
770 pr_info("Using %u thread(s) for compression\n", nr_threads);
771 pr_info("Compressing and saving image data (%u pages)...\n",
772 nr_to_write);
773 m = nr_to_write / 10;
774 if (!m)
775 m = 1;
776 nr_pages = 0;
777 start = ktime_get();
778 for (;;) {
779 for (thr = 0; thr < nr_threads; thr++) {
780 for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
781 ret = snapshot_read_next(snapshot);
782 if (ret < 0)
783 goto out_finish;
784
785 if (!ret)
786 break;
787
788 memcpy(data[thr].unc + off,
789 data_of(*snapshot), PAGE_SIZE);
790
791 if (!(nr_pages % m))
792 pr_info("Image saving progress: %3d%%\n",
793 nr_pages / m * 10);
794 nr_pages++;
795 }
796 if (!off)
797 break;
798
799 data[thr].unc_len = off;
800
801 atomic_set_release(&data[thr].ready, 1);
802 wake_up(&data[thr].go);
803 }
804
805 if (!thr)
806 break;
807
808 crc->run_threads = thr;
809 atomic_set_release(&crc->ready, 1);
810 wake_up(&crc->go);
811
812 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
813 wait_event(data[thr].done,
814 atomic_read_acquire(&data[thr].stop));
815 atomic_set(&data[thr].stop, 0);
816
817 ret = data[thr].ret;
818
819 if (ret < 0) {
820 pr_err("LZO compression failed\n");
821 goto out_finish;
822 }
823
824 if (unlikely(!data[thr].cmp_len ||
825 data[thr].cmp_len >
826 lzo1x_worst_compress(data[thr].unc_len))) {
827 pr_err("Invalid LZO compressed length\n");
828 ret = -1;
829 goto out_finish;
830 }
831
832 *(size_t *)data[thr].cmp = data[thr].cmp_len;
833
834 /*
835 * Given we are writing one page at a time to disk, we
836 * copy that much from the buffer, although the last
837 * bit will likely be smaller than full page. This is
838 * OK - we saved the length of the compressed data, so
839 * any garbage at the end will be discarded when we
840 * read it.
841 */
842 for (off = 0;
843 off < LZO_HEADER + data[thr].cmp_len;
844 off += PAGE_SIZE) {
845 memcpy(page, data[thr].cmp + off, PAGE_SIZE);
846
847 ret = swap_write_page(handle, page, &hb);
848 if (ret)
849 goto out_finish;
850 }
851 }
852
853 wait_event(crc->done, atomic_read_acquire(&crc->stop));
854 atomic_set(&crc->stop, 0);
855 }
856
857out_finish:
858 err2 = hib_wait_io(&hb);
859 stop = ktime_get();
860 if (!ret)
861 ret = err2;
862 if (!ret)
863 pr_info("Image saving done\n");
864 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
865out_clean:
866 hib_finish_batch(&hb);
867 if (crc) {
868 if (crc->thr)
869 kthread_stop(crc->thr);
870 kfree(crc);
871 }
872 if (data) {
873 for (thr = 0; thr < nr_threads; thr++)
874 if (data[thr].thr)
875 kthread_stop(data[thr].thr);
876 vfree(data);
877 }
878 if (page) free_page((unsigned long)page);
879
880 return ret;
881}
882
883/**
884 * enough_swap - Make sure we have enough swap to save the image.
885 *
886 * Returns TRUE or FALSE after checking the total amount of swap
887 * space available from the resume partition.
888 */
889
890static int enough_swap(unsigned int nr_pages)
891{
892 unsigned int free_swap = count_swap_pages(root_swap, 1);
893 unsigned int required;
894
895 pr_debug("Free swap pages: %u\n", free_swap);
896
897 required = PAGES_FOR_IO + nr_pages;
898 return free_swap > required;
899}
900
901/**
902 * swsusp_write - Write entire image and metadata.
903 * @flags: flags to pass to the "boot" kernel in the image header
904 *
905 * It is important _NOT_ to umount filesystems at this point. We want
906 * them synced (in case something goes wrong) but we DO not want to mark
907 * filesystem clean: it is not. (And it does not matter, if we resume
908 * correctly, we'll mark system clean, anyway.)
909 */
910
911int swsusp_write(unsigned int flags)
912{
913 struct swap_map_handle handle;
914 struct snapshot_handle snapshot;
915 struct swsusp_info *header;
916 unsigned long pages;
917 int error;
918
919 pages = snapshot_get_image_size();
920 error = get_swap_writer(&handle);
921 if (error) {
922 pr_err("Cannot get swap writer\n");
923 return error;
924 }
925 if (flags & SF_NOCOMPRESS_MODE) {
926 if (!enough_swap(pages)) {
927 pr_err("Not enough free swap\n");
928 error = -ENOSPC;
929 goto out_finish;
930 }
931 }
932 memset(&snapshot, 0, sizeof(struct snapshot_handle));
933 error = snapshot_read_next(&snapshot);
934 if (error < (int)PAGE_SIZE) {
935 if (error >= 0)
936 error = -EFAULT;
937
938 goto out_finish;
939 }
940 header = (struct swsusp_info *)data_of(snapshot);
941 error = swap_write_page(&handle, header, NULL);
942 if (!error) {
943 error = (flags & SF_NOCOMPRESS_MODE) ?
944 save_image(&handle, &snapshot, pages - 1) :
945 save_image_lzo(&handle, &snapshot, pages - 1);
946 }
947out_finish:
948 error = swap_writer_finish(&handle, flags, error);
949 return error;
950}
951
952/*
953 * The following functions allow us to read data using a swap map
954 * in a file-like way.
955 */
956
957static void release_swap_reader(struct swap_map_handle *handle)
958{
959 struct swap_map_page_list *tmp;
960
961 while (handle->maps) {
962 if (handle->maps->map)
963 free_page((unsigned long)handle->maps->map);
964 tmp = handle->maps;
965 handle->maps = handle->maps->next;
966 kfree(tmp);
967 }
968 handle->cur = NULL;
969}
970
971static int get_swap_reader(struct swap_map_handle *handle,
972 unsigned int *flags_p)
973{
974 int error;
975 struct swap_map_page_list *tmp, *last;
976 sector_t offset;
977
978 *flags_p = swsusp_header->flags;
979
980 if (!swsusp_header->image) /* how can this happen? */
981 return -EINVAL;
982
983 handle->cur = NULL;
984 last = handle->maps = NULL;
985 offset = swsusp_header->image;
986 while (offset) {
987 tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL);
988 if (!tmp) {
989 release_swap_reader(handle);
990 return -ENOMEM;
991 }
992 if (!handle->maps)
993 handle->maps = tmp;
994 if (last)
995 last->next = tmp;
996 last = tmp;
997
998 tmp->map = (struct swap_map_page *)
999 __get_free_page(GFP_NOIO | __GFP_HIGH);
1000 if (!tmp->map) {
1001 release_swap_reader(handle);
1002 return -ENOMEM;
1003 }
1004
1005 error = hib_submit_io(REQ_OP_READ, offset, tmp->map, NULL);
1006 if (error) {
1007 release_swap_reader(handle);
1008 return error;
1009 }
1010 offset = tmp->map->next_swap;
1011 }
1012 handle->k = 0;
1013 handle->cur = handle->maps->map;
1014 return 0;
1015}
1016
1017static int swap_read_page(struct swap_map_handle *handle, void *buf,
1018 struct hib_bio_batch *hb)
1019{
1020 sector_t offset;
1021 int error;
1022 struct swap_map_page_list *tmp;
1023
1024 if (!handle->cur)
1025 return -EINVAL;
1026 offset = handle->cur->entries[handle->k];
1027 if (!offset)
1028 return -EFAULT;
1029 error = hib_submit_io(REQ_OP_READ, offset, buf, hb);
1030 if (error)
1031 return error;
1032 if (++handle->k >= MAP_PAGE_ENTRIES) {
1033 handle->k = 0;
1034 free_page((unsigned long)handle->maps->map);
1035 tmp = handle->maps;
1036 handle->maps = handle->maps->next;
1037 kfree(tmp);
1038 if (!handle->maps)
1039 release_swap_reader(handle);
1040 else
1041 handle->cur = handle->maps->map;
1042 }
1043 return error;
1044}
1045
1046static int swap_reader_finish(struct swap_map_handle *handle)
1047{
1048 release_swap_reader(handle);
1049
1050 return 0;
1051}
1052
1053/**
1054 * load_image - load the image using the swap map handle
1055 * @handle and the snapshot handle @snapshot
1056 * (assume there are @nr_pages pages to load)
1057 */
1058
1059static int load_image(struct swap_map_handle *handle,
1060 struct snapshot_handle *snapshot,
1061 unsigned int nr_to_read)
1062{
1063 unsigned int m;
1064 int ret = 0;
1065 ktime_t start;
1066 ktime_t stop;
1067 struct hib_bio_batch hb;
1068 int err2;
1069 unsigned nr_pages;
1070
1071 hib_init_batch(&hb);
1072
1073 clean_pages_on_read = true;
1074 pr_info("Loading image data pages (%u pages)...\n", nr_to_read);
1075 m = nr_to_read / 10;
1076 if (!m)
1077 m = 1;
1078 nr_pages = 0;
1079 start = ktime_get();
1080 for ( ; ; ) {
1081 ret = snapshot_write_next(snapshot);
1082 if (ret <= 0)
1083 break;
1084 ret = swap_read_page(handle, data_of(*snapshot), &hb);
1085 if (ret)
1086 break;
1087 if (snapshot->sync_read)
1088 ret = hib_wait_io(&hb);
1089 if (ret)
1090 break;
1091 if (!(nr_pages % m))
1092 pr_info("Image loading progress: %3d%%\n",
1093 nr_pages / m * 10);
1094 nr_pages++;
1095 }
1096 err2 = hib_wait_io(&hb);
1097 hib_finish_batch(&hb);
1098 stop = ktime_get();
1099 if (!ret)
1100 ret = err2;
1101 if (!ret) {
1102 pr_info("Image loading done\n");
1103 snapshot_write_finalize(snapshot);
1104 if (!snapshot_image_loaded(snapshot))
1105 ret = -ENODATA;
1106 }
1107 swsusp_show_speed(start, stop, nr_to_read, "Read");
1108 return ret;
1109}
1110
1111/*
1112 * Structure used for LZO data decompression.
1113 */
1114struct dec_data {
1115 struct task_struct *thr; /* thread */
1116 atomic_t ready; /* ready to start flag */
1117 atomic_t stop; /* ready to stop flag */
1118 int ret; /* return code */
1119 wait_queue_head_t go; /* start decompression */
1120 wait_queue_head_t done; /* decompression done */
1121 size_t unc_len; /* uncompressed length */
1122 size_t cmp_len; /* compressed length */
1123 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
1124 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
1125};
1126
1127/*
1128 * Decompression function that runs in its own thread.
1129 */
1130static int lzo_decompress_threadfn(void *data)
1131{
1132 struct dec_data *d = data;
1133
1134 while (1) {
1135 wait_event(d->go, atomic_read_acquire(&d->ready) ||
1136 kthread_should_stop());
1137 if (kthread_should_stop()) {
1138 d->thr = NULL;
1139 d->ret = -1;
1140 atomic_set_release(&d->stop, 1);
1141 wake_up(&d->done);
1142 break;
1143 }
1144 atomic_set(&d->ready, 0);
1145
1146 d->unc_len = LZO_UNC_SIZE;
1147 d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1148 d->unc, &d->unc_len);
1149 if (clean_pages_on_decompress)
1150 flush_icache_range((unsigned long)d->unc,
1151 (unsigned long)d->unc + d->unc_len);
1152
1153 atomic_set_release(&d->stop, 1);
1154 wake_up(&d->done);
1155 }
1156 return 0;
1157}
1158
1159/**
1160 * load_image_lzo - Load compressed image data and decompress them with LZO.
1161 * @handle: Swap map handle to use for loading data.
1162 * @snapshot: Image to copy uncompressed data into.
1163 * @nr_to_read: Number of pages to load.
1164 */
1165static int load_image_lzo(struct swap_map_handle *handle,
1166 struct snapshot_handle *snapshot,
1167 unsigned int nr_to_read)
1168{
1169 unsigned int m;
1170 int ret = 0;
1171 int eof = 0;
1172 struct hib_bio_batch hb;
1173 ktime_t start;
1174 ktime_t stop;
1175 unsigned nr_pages;
1176 size_t off;
1177 unsigned i, thr, run_threads, nr_threads;
1178 unsigned ring = 0, pg = 0, ring_size = 0,
1179 have = 0, want, need, asked = 0;
1180 unsigned long read_pages = 0;
1181 unsigned char **page = NULL;
1182 struct dec_data *data = NULL;
1183 struct crc_data *crc = NULL;
1184
1185 hib_init_batch(&hb);
1186
1187 /*
1188 * We'll limit the number of threads for decompression to limit memory
1189 * footprint.
1190 */
1191 nr_threads = num_online_cpus() - 1;
1192 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
1193
1194 page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
1195 if (!page) {
1196 pr_err("Failed to allocate LZO page\n");
1197 ret = -ENOMEM;
1198 goto out_clean;
1199 }
1200
1201 data = vzalloc(array_size(nr_threads, sizeof(*data)));
1202 if (!data) {
1203 pr_err("Failed to allocate LZO data\n");
1204 ret = -ENOMEM;
1205 goto out_clean;
1206 }
1207
1208 crc = kzalloc(sizeof(*crc), GFP_KERNEL);
1209 if (!crc) {
1210 pr_err("Failed to allocate crc\n");
1211 ret = -ENOMEM;
1212 goto out_clean;
1213 }
1214
1215 clean_pages_on_decompress = true;
1216
1217 /*
1218 * Start the decompression threads.
1219 */
1220 for (thr = 0; thr < nr_threads; thr++) {
1221 init_waitqueue_head(&data[thr].go);
1222 init_waitqueue_head(&data[thr].done);
1223
1224 data[thr].thr = kthread_run(lzo_decompress_threadfn,
1225 &data[thr],
1226 "image_decompress/%u", thr);
1227 if (IS_ERR(data[thr].thr)) {
1228 data[thr].thr = NULL;
1229 pr_err("Cannot start decompression threads\n");
1230 ret = -ENOMEM;
1231 goto out_clean;
1232 }
1233 }
1234
1235 /*
1236 * Start the CRC32 thread.
1237 */
1238 init_waitqueue_head(&crc->go);
1239 init_waitqueue_head(&crc->done);
1240
1241 handle->crc32 = 0;
1242 crc->crc32 = &handle->crc32;
1243 for (thr = 0; thr < nr_threads; thr++) {
1244 crc->unc[thr] = data[thr].unc;
1245 crc->unc_len[thr] = &data[thr].unc_len;
1246 }
1247
1248 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1249 if (IS_ERR(crc->thr)) {
1250 crc->thr = NULL;
1251 pr_err("Cannot start CRC32 thread\n");
1252 ret = -ENOMEM;
1253 goto out_clean;
1254 }
1255
1256 /*
1257 * Set the number of pages for read buffering.
1258 * This is complete guesswork, because we'll only know the real
1259 * picture once prepare_image() is called, which is much later on
1260 * during the image load phase. We'll assume the worst case and
1261 * say that none of the image pages are from high memory.
1262 */
1263 if (low_free_pages() > snapshot_get_image_size())
1264 read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1265 read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
1266
1267 for (i = 0; i < read_pages; i++) {
1268 page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
1269 GFP_NOIO | __GFP_HIGH :
1270 GFP_NOIO | __GFP_NOWARN |
1271 __GFP_NORETRY);
1272
1273 if (!page[i]) {
1274 if (i < LZO_CMP_PAGES) {
1275 ring_size = i;
1276 pr_err("Failed to allocate LZO pages\n");
1277 ret = -ENOMEM;
1278 goto out_clean;
1279 } else {
1280 break;
1281 }
1282 }
1283 }
1284 want = ring_size = i;
1285
1286 pr_info("Using %u thread(s) for decompression\n", nr_threads);
1287 pr_info("Loading and decompressing image data (%u pages)...\n",
1288 nr_to_read);
1289 m = nr_to_read / 10;
1290 if (!m)
1291 m = 1;
1292 nr_pages = 0;
1293 start = ktime_get();
1294
1295 ret = snapshot_write_next(snapshot);
1296 if (ret <= 0)
1297 goto out_finish;
1298
1299 for(;;) {
1300 for (i = 0; !eof && i < want; i++) {
1301 ret = swap_read_page(handle, page[ring], &hb);
1302 if (ret) {
1303 /*
1304 * On real read error, finish. On end of data,
1305 * set EOF flag and just exit the read loop.
1306 */
1307 if (handle->cur &&
1308 handle->cur->entries[handle->k]) {
1309 goto out_finish;
1310 } else {
1311 eof = 1;
1312 break;
1313 }
1314 }
1315 if (++ring >= ring_size)
1316 ring = 0;
1317 }
1318 asked += i;
1319 want -= i;
1320
1321 /*
1322 * We are out of data, wait for some more.
1323 */
1324 if (!have) {
1325 if (!asked)
1326 break;
1327
1328 ret = hib_wait_io(&hb);
1329 if (ret)
1330 goto out_finish;
1331 have += asked;
1332 asked = 0;
1333 if (eof)
1334 eof = 2;
1335 }
1336
1337 if (crc->run_threads) {
1338 wait_event(crc->done, atomic_read_acquire(&crc->stop));
1339 atomic_set(&crc->stop, 0);
1340 crc->run_threads = 0;
1341 }
1342
1343 for (thr = 0; have && thr < nr_threads; thr++) {
1344 data[thr].cmp_len = *(size_t *)page[pg];
1345 if (unlikely(!data[thr].cmp_len ||
1346 data[thr].cmp_len >
1347 lzo1x_worst_compress(LZO_UNC_SIZE))) {
1348 pr_err("Invalid LZO compressed length\n");
1349 ret = -1;
1350 goto out_finish;
1351 }
1352
1353 need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
1354 PAGE_SIZE);
1355 if (need > have) {
1356 if (eof > 1) {
1357 ret = -1;
1358 goto out_finish;
1359 }
1360 break;
1361 }
1362
1363 for (off = 0;
1364 off < LZO_HEADER + data[thr].cmp_len;
1365 off += PAGE_SIZE) {
1366 memcpy(data[thr].cmp + off,
1367 page[pg], PAGE_SIZE);
1368 have--;
1369 want++;
1370 if (++pg >= ring_size)
1371 pg = 0;
1372 }
1373
1374 atomic_set_release(&data[thr].ready, 1);
1375 wake_up(&data[thr].go);
1376 }
1377
1378 /*
1379 * Wait for more data while we are decompressing.
1380 */
1381 if (have < LZO_CMP_PAGES && asked) {
1382 ret = hib_wait_io(&hb);
1383 if (ret)
1384 goto out_finish;
1385 have += asked;
1386 asked = 0;
1387 if (eof)
1388 eof = 2;
1389 }
1390
1391 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1392 wait_event(data[thr].done,
1393 atomic_read_acquire(&data[thr].stop));
1394 atomic_set(&data[thr].stop, 0);
1395
1396 ret = data[thr].ret;
1397
1398 if (ret < 0) {
1399 pr_err("LZO decompression failed\n");
1400 goto out_finish;
1401 }
1402
1403 if (unlikely(!data[thr].unc_len ||
1404 data[thr].unc_len > LZO_UNC_SIZE ||
1405 data[thr].unc_len & (PAGE_SIZE - 1))) {
1406 pr_err("Invalid LZO uncompressed length\n");
1407 ret = -1;
1408 goto out_finish;
1409 }
1410
1411 for (off = 0;
1412 off < data[thr].unc_len; off += PAGE_SIZE) {
1413 memcpy(data_of(*snapshot),
1414 data[thr].unc + off, PAGE_SIZE);
1415
1416 if (!(nr_pages % m))
1417 pr_info("Image loading progress: %3d%%\n",
1418 nr_pages / m * 10);
1419 nr_pages++;
1420
1421 ret = snapshot_write_next(snapshot);
1422 if (ret <= 0) {
1423 crc->run_threads = thr + 1;
1424 atomic_set_release(&crc->ready, 1);
1425 wake_up(&crc->go);
1426 goto out_finish;
1427 }
1428 }
1429 }
1430
1431 crc->run_threads = thr;
1432 atomic_set_release(&crc->ready, 1);
1433 wake_up(&crc->go);
1434 }
1435
1436out_finish:
1437 if (crc->run_threads) {
1438 wait_event(crc->done, atomic_read_acquire(&crc->stop));
1439 atomic_set(&crc->stop, 0);
1440 }
1441 stop = ktime_get();
1442 if (!ret) {
1443 pr_info("Image loading done\n");
1444 snapshot_write_finalize(snapshot);
1445 if (!snapshot_image_loaded(snapshot))
1446 ret = -ENODATA;
1447 if (!ret) {
1448 if (swsusp_header->flags & SF_CRC32_MODE) {
1449 if(handle->crc32 != swsusp_header->crc32) {
1450 pr_err("Invalid image CRC32!\n");
1451 ret = -ENODATA;
1452 }
1453 }
1454 }
1455 }
1456 swsusp_show_speed(start, stop, nr_to_read, "Read");
1457out_clean:
1458 hib_finish_batch(&hb);
1459 for (i = 0; i < ring_size; i++)
1460 free_page((unsigned long)page[i]);
1461 if (crc) {
1462 if (crc->thr)
1463 kthread_stop(crc->thr);
1464 kfree(crc);
1465 }
1466 if (data) {
1467 for (thr = 0; thr < nr_threads; thr++)
1468 if (data[thr].thr)
1469 kthread_stop(data[thr].thr);
1470 vfree(data);
1471 }
1472 vfree(page);
1473
1474 return ret;
1475}
1476
1477/**
1478 * swsusp_read - read the hibernation image.
1479 * @flags_p: flags passed by the "frozen" kernel in the image header should
1480 * be written into this memory location
1481 */
1482
1483int swsusp_read(unsigned int *flags_p)
1484{
1485 int error;
1486 struct swap_map_handle handle;
1487 struct snapshot_handle snapshot;
1488 struct swsusp_info *header;
1489
1490 memset(&snapshot, 0, sizeof(struct snapshot_handle));
1491 error = snapshot_write_next(&snapshot);
1492 if (error < (int)PAGE_SIZE)
1493 return error < 0 ? error : -EFAULT;
1494 header = (struct swsusp_info *)data_of(snapshot);
1495 error = get_swap_reader(&handle, flags_p);
1496 if (error)
1497 goto end;
1498 if (!error)
1499 error = swap_read_page(&handle, header, NULL);
1500 if (!error) {
1501 error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1502 load_image(&handle, &snapshot, header->pages - 1) :
1503 load_image_lzo(&handle, &snapshot, header->pages - 1);
1504 }
1505 swap_reader_finish(&handle);
1506end:
1507 if (!error)
1508 pr_debug("Image successfully loaded\n");
1509 else
1510 pr_debug("Error %d resuming\n", error);
1511 return error;
1512}
1513
1514static void *swsusp_holder;
1515
1516/**
1517 * swsusp_check - Open the resume device and check for the swsusp signature.
1518 * @exclusive: Open the resume device exclusively.
1519 */
1520
1521int swsusp_check(bool exclusive)
1522{
1523 void *holder = exclusive ? &swsusp_holder : NULL;
1524 int error;
1525
1526 hib_resume_bdev_handle = bdev_open_by_dev(swsusp_resume_device,
1527 BLK_OPEN_READ, holder, NULL);
1528 if (!IS_ERR(hib_resume_bdev_handle)) {
1529 set_blocksize(hib_resume_bdev_handle->bdev, PAGE_SIZE);
1530 clear_page(swsusp_header);
1531 error = hib_submit_io(REQ_OP_READ, swsusp_resume_block,
1532 swsusp_header, NULL);
1533 if (error)
1534 goto put;
1535
1536 if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1537 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
1538 /* Reset swap signature now */
1539 error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
1540 swsusp_resume_block,
1541 swsusp_header, NULL);
1542 } else {
1543 error = -EINVAL;
1544 }
1545 if (!error && swsusp_header->flags & SF_HW_SIG &&
1546 swsusp_header->hw_sig != swsusp_hardware_signature) {
1547 pr_info("Suspend image hardware signature mismatch (%08x now %08x); aborting resume.\n",
1548 swsusp_header->hw_sig, swsusp_hardware_signature);
1549 error = -EINVAL;
1550 }
1551
1552put:
1553 if (error)
1554 bdev_release(hib_resume_bdev_handle);
1555 else
1556 pr_debug("Image signature found, resuming\n");
1557 } else {
1558 error = PTR_ERR(hib_resume_bdev_handle);
1559 }
1560
1561 if (error)
1562 pr_debug("Image not found (code %d)\n", error);
1563
1564 return error;
1565}
1566
1567/**
1568 * swsusp_close - close resume device.
1569 */
1570
1571void swsusp_close(void)
1572{
1573 if (IS_ERR(hib_resume_bdev_handle)) {
1574 pr_debug("Image device not initialised\n");
1575 return;
1576 }
1577
1578 bdev_release(hib_resume_bdev_handle);
1579}
1580
1581/**
1582 * swsusp_unmark - Unmark swsusp signature in the resume device
1583 */
1584
1585#ifdef CONFIG_SUSPEND
1586int swsusp_unmark(void)
1587{
1588 int error;
1589
1590 hib_submit_io(REQ_OP_READ, swsusp_resume_block,
1591 swsusp_header, NULL);
1592 if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
1593 memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
1594 error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
1595 swsusp_resume_block,
1596 swsusp_header, NULL);
1597 } else {
1598 pr_err("Cannot find swsusp signature!\n");
1599 error = -ENODEV;
1600 }
1601
1602 /*
1603 * We just returned from suspend, we don't need the image any more.
1604 */
1605 free_all_swap_pages(root_swap);
1606
1607 return error;
1608}
1609#endif
1610
1611static int __init swsusp_header_init(void)
1612{
1613 swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1614 if (!swsusp_header)
1615 panic("Could not allocate memory for swsusp_header\n");
1616 return 0;
1617}
1618
1619core_initcall(swsusp_header_init);