Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/power/swap.c
4 *
5 * This file provides functions for reading the suspend image from
6 * and writing it to a swap partition.
7 *
8 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
9 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
10 * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
11 */
12
13#define pr_fmt(fmt) "PM: " fmt
14
15#include <linux/module.h>
16#include <linux/file.h>
17#include <linux/delay.h>
18#include <linux/bitops.h>
19#include <linux/device.h>
20#include <linux/bio.h>
21#include <linux/blkdev.h>
22#include <linux/swap.h>
23#include <linux/swapops.h>
24#include <linux/pm.h>
25#include <linux/slab.h>
26#include <linux/lzo.h>
27#include <linux/vmalloc.h>
28#include <linux/cpumask.h>
29#include <linux/atomic.h>
30#include <linux/kthread.h>
31#include <linux/crc32.h>
32#include <linux/ktime.h>
33
34#include "power.h"
35
36#define HIBERNATE_SIG "S1SUSPEND"
37
38u32 swsusp_hardware_signature;
39
40/*
41 * When reading an {un,}compressed image, we may restore pages in place,
42 * in which case some architectures need these pages cleaning before they
43 * can be executed. We don't know which pages these may be, so clean the lot.
44 */
45static bool clean_pages_on_read;
46static bool clean_pages_on_decompress;
47
48/*
49 * The swap map is a data structure used for keeping track of each page
50 * written to a swap partition. It consists of many swap_map_page
51 * structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
52 * These structures are stored on the swap and linked together with the
53 * help of the .next_swap member.
54 *
55 * The swap map is created during suspend. The swap map pages are
56 * allocated and populated one at a time, so we only need one memory
57 * page to set up the entire structure.
58 *
59 * During resume we pick up all swap_map_page structures into a list.
60 */
61
62#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
63
64/*
65 * Number of free pages that are not high.
66 */
67static inline unsigned long low_free_pages(void)
68{
69 return nr_free_pages() - nr_free_highpages();
70}
71
72/*
73 * Number of pages required to be kept free while writing the image. Always
74 * half of all available low pages before the writing starts.
75 */
76static inline unsigned long reqd_free_pages(void)
77{
78 return low_free_pages() / 2;
79}
80
81struct swap_map_page {
82 sector_t entries[MAP_PAGE_ENTRIES];
83 sector_t next_swap;
84};
85
86struct swap_map_page_list {
87 struct swap_map_page *map;
88 struct swap_map_page_list *next;
89};
90
91/*
92 * The swap_map_handle structure is used for handling swap in
93 * a file-alike way
94 */
95
96struct swap_map_handle {
97 struct swap_map_page *cur;
98 struct swap_map_page_list *maps;
99 sector_t cur_swap;
100 sector_t first_sector;
101 unsigned int k;
102 unsigned long reqd_free_pages;
103 u32 crc32;
104};
105
106struct swsusp_header {
107 char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
108 sizeof(u32) - sizeof(u32)];
109 u32 hw_sig;
110 u32 crc32;
111 sector_t image;
112 unsigned int flags; /* Flags to pass to the "boot" kernel */
113 char orig_sig[10];
114 char sig[10];
115} __packed;
116
117static struct swsusp_header *swsusp_header;
118
119/*
120 * The following functions are used for tracing the allocated
121 * swap pages, so that they can be freed in case of an error.
122 */
123
124struct swsusp_extent {
125 struct rb_node node;
126 unsigned long start;
127 unsigned long end;
128};
129
130static struct rb_root swsusp_extents = RB_ROOT;
131
132static int swsusp_extents_insert(unsigned long swap_offset)
133{
134 struct rb_node **new = &(swsusp_extents.rb_node);
135 struct rb_node *parent = NULL;
136 struct swsusp_extent *ext;
137
138 /* Figure out where to put the new node */
139 while (*new) {
140 ext = rb_entry(*new, struct swsusp_extent, node);
141 parent = *new;
142 if (swap_offset < ext->start) {
143 /* Try to merge */
144 if (swap_offset == ext->start - 1) {
145 ext->start--;
146 return 0;
147 }
148 new = &((*new)->rb_left);
149 } else if (swap_offset > ext->end) {
150 /* Try to merge */
151 if (swap_offset == ext->end + 1) {
152 ext->end++;
153 return 0;
154 }
155 new = &((*new)->rb_right);
156 } else {
157 /* It already is in the tree */
158 return -EINVAL;
159 }
160 }
161 /* Add the new node and rebalance the tree. */
162 ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
163 if (!ext)
164 return -ENOMEM;
165
166 ext->start = swap_offset;
167 ext->end = swap_offset;
168 rb_link_node(&ext->node, parent, new);
169 rb_insert_color(&ext->node, &swsusp_extents);
170 return 0;
171}
172
173/*
174 * alloc_swapdev_block - allocate a swap page and register that it has
175 * been allocated, so that it can be freed in case of an error.
176 */
177
178sector_t alloc_swapdev_block(int swap)
179{
180 unsigned long offset;
181
182 offset = swp_offset(get_swap_page_of_type(swap));
183 if (offset) {
184 if (swsusp_extents_insert(offset))
185 swap_free(swp_entry(swap, offset));
186 else
187 return swapdev_block(swap, offset);
188 }
189 return 0;
190}
191
192/*
193 * free_all_swap_pages - free swap pages allocated for saving image data.
194 * It also frees the extents used to register which swap entries had been
195 * allocated.
196 */
197
198void free_all_swap_pages(int swap)
199{
200 struct rb_node *node;
201
202 while ((node = swsusp_extents.rb_node)) {
203 struct swsusp_extent *ext;
204 unsigned long offset;
205
206 ext = rb_entry(node, struct swsusp_extent, node);
207 rb_erase(node, &swsusp_extents);
208 for (offset = ext->start; offset <= ext->end; offset++)
209 swap_free(swp_entry(swap, offset));
210
211 kfree(ext);
212 }
213}
214
215int swsusp_swap_in_use(void)
216{
217 return (swsusp_extents.rb_node != NULL);
218}
219
220/*
221 * General things
222 */
223
224static unsigned short root_swap = 0xffff;
225static struct block_device *hib_resume_bdev;
226
227struct hib_bio_batch {
228 atomic_t count;
229 wait_queue_head_t wait;
230 blk_status_t error;
231 struct blk_plug plug;
232};
233
234static void hib_init_batch(struct hib_bio_batch *hb)
235{
236 atomic_set(&hb->count, 0);
237 init_waitqueue_head(&hb->wait);
238 hb->error = BLK_STS_OK;
239 blk_start_plug(&hb->plug);
240}
241
242static void hib_finish_batch(struct hib_bio_batch *hb)
243{
244 blk_finish_plug(&hb->plug);
245}
246
247static void hib_end_io(struct bio *bio)
248{
249 struct hib_bio_batch *hb = bio->bi_private;
250 struct page *page = bio_first_page_all(bio);
251
252 if (bio->bi_status) {
253 pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
254 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
255 (unsigned long long)bio->bi_iter.bi_sector);
256 }
257
258 if (bio_data_dir(bio) == WRITE)
259 put_page(page);
260 else if (clean_pages_on_read)
261 flush_icache_range((unsigned long)page_address(page),
262 (unsigned long)page_address(page) + PAGE_SIZE);
263
264 if (bio->bi_status && !hb->error)
265 hb->error = bio->bi_status;
266 if (atomic_dec_and_test(&hb->count))
267 wake_up(&hb->wait);
268
269 bio_put(bio);
270}
271
272static int hib_submit_io(blk_opf_t opf, pgoff_t page_off, void *addr,
273 struct hib_bio_batch *hb)
274{
275 struct page *page = virt_to_page(addr);
276 struct bio *bio;
277 int error = 0;
278
279 bio = bio_alloc(hib_resume_bdev, 1, opf, GFP_NOIO | __GFP_HIGH);
280 bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
281
282 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
283 pr_err("Adding page to bio failed at %llu\n",
284 (unsigned long long)bio->bi_iter.bi_sector);
285 bio_put(bio);
286 return -EFAULT;
287 }
288
289 if (hb) {
290 bio->bi_end_io = hib_end_io;
291 bio->bi_private = hb;
292 atomic_inc(&hb->count);
293 submit_bio(bio);
294 } else {
295 error = submit_bio_wait(bio);
296 bio_put(bio);
297 }
298
299 return error;
300}
301
302static int hib_wait_io(struct hib_bio_batch *hb)
303{
304 /*
305 * We are relying on the behavior of blk_plug that a thread with
306 * a plug will flush the plug list before sleeping.
307 */
308 wait_event(hb->wait, atomic_read(&hb->count) == 0);
309 return blk_status_to_errno(hb->error);
310}
311
312/*
313 * Saving part
314 */
315static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
316{
317 int error;
318
319 hib_submit_io(REQ_OP_READ, swsusp_resume_block, swsusp_header, NULL);
320 if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
321 !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
322 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
323 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
324 swsusp_header->image = handle->first_sector;
325 if (swsusp_hardware_signature) {
326 swsusp_header->hw_sig = swsusp_hardware_signature;
327 flags |= SF_HW_SIG;
328 }
329 swsusp_header->flags = flags;
330 if (flags & SF_CRC32_MODE)
331 swsusp_header->crc32 = handle->crc32;
332 error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
333 swsusp_resume_block, swsusp_header, NULL);
334 } else {
335 pr_err("Swap header not found!\n");
336 error = -ENODEV;
337 }
338 return error;
339}
340
341/**
342 * swsusp_swap_check - check if the resume device is a swap device
343 * and get its index (if so)
344 *
345 * This is called before saving image
346 */
347static int swsusp_swap_check(void)
348{
349 int res;
350
351 if (swsusp_resume_device)
352 res = swap_type_of(swsusp_resume_device, swsusp_resume_block);
353 else
354 res = find_first_swap(&swsusp_resume_device);
355 if (res < 0)
356 return res;
357 root_swap = res;
358
359 hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, FMODE_WRITE,
360 NULL);
361 if (IS_ERR(hib_resume_bdev))
362 return PTR_ERR(hib_resume_bdev);
363
364 res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
365 if (res < 0)
366 blkdev_put(hib_resume_bdev, FMODE_WRITE);
367
368 return res;
369}
370
371/**
372 * write_page - Write one page to given swap location.
373 * @buf: Address we're writing.
374 * @offset: Offset of the swap page we're writing to.
375 * @hb: bio completion batch
376 */
377
378static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
379{
380 void *src;
381 int ret;
382
383 if (!offset)
384 return -ENOSPC;
385
386 if (hb) {
387 src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
388 __GFP_NORETRY);
389 if (src) {
390 copy_page(src, buf);
391 } else {
392 ret = hib_wait_io(hb); /* Free pages */
393 if (ret)
394 return ret;
395 src = (void *)__get_free_page(GFP_NOIO |
396 __GFP_NOWARN |
397 __GFP_NORETRY);
398 if (src) {
399 copy_page(src, buf);
400 } else {
401 WARN_ON_ONCE(1);
402 hb = NULL; /* Go synchronous */
403 src = buf;
404 }
405 }
406 } else {
407 src = buf;
408 }
409 return hib_submit_io(REQ_OP_WRITE | REQ_SYNC, offset, src, hb);
410}
411
412static void release_swap_writer(struct swap_map_handle *handle)
413{
414 if (handle->cur)
415 free_page((unsigned long)handle->cur);
416 handle->cur = NULL;
417}
418
419static int get_swap_writer(struct swap_map_handle *handle)
420{
421 int ret;
422
423 ret = swsusp_swap_check();
424 if (ret) {
425 if (ret != -ENOSPC)
426 pr_err("Cannot find swap device, try swapon -a\n");
427 return ret;
428 }
429 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
430 if (!handle->cur) {
431 ret = -ENOMEM;
432 goto err_close;
433 }
434 handle->cur_swap = alloc_swapdev_block(root_swap);
435 if (!handle->cur_swap) {
436 ret = -ENOSPC;
437 goto err_rel;
438 }
439 handle->k = 0;
440 handle->reqd_free_pages = reqd_free_pages();
441 handle->first_sector = handle->cur_swap;
442 return 0;
443err_rel:
444 release_swap_writer(handle);
445err_close:
446 swsusp_close(FMODE_WRITE);
447 return ret;
448}
449
450static int swap_write_page(struct swap_map_handle *handle, void *buf,
451 struct hib_bio_batch *hb)
452{
453 int error = 0;
454 sector_t offset;
455
456 if (!handle->cur)
457 return -EINVAL;
458 offset = alloc_swapdev_block(root_swap);
459 error = write_page(buf, offset, hb);
460 if (error)
461 return error;
462 handle->cur->entries[handle->k++] = offset;
463 if (handle->k >= MAP_PAGE_ENTRIES) {
464 offset = alloc_swapdev_block(root_swap);
465 if (!offset)
466 return -ENOSPC;
467 handle->cur->next_swap = offset;
468 error = write_page(handle->cur, handle->cur_swap, hb);
469 if (error)
470 goto out;
471 clear_page(handle->cur);
472 handle->cur_swap = offset;
473 handle->k = 0;
474
475 if (hb && low_free_pages() <= handle->reqd_free_pages) {
476 error = hib_wait_io(hb);
477 if (error)
478 goto out;
479 /*
480 * Recalculate the number of required free pages, to
481 * make sure we never take more than half.
482 */
483 handle->reqd_free_pages = reqd_free_pages();
484 }
485 }
486 out:
487 return error;
488}
489
490static int flush_swap_writer(struct swap_map_handle *handle)
491{
492 if (handle->cur && handle->cur_swap)
493 return write_page(handle->cur, handle->cur_swap, NULL);
494 else
495 return -EINVAL;
496}
497
498static int swap_writer_finish(struct swap_map_handle *handle,
499 unsigned int flags, int error)
500{
501 if (!error) {
502 pr_info("S");
503 error = mark_swapfiles(handle, flags);
504 pr_cont("|\n");
505 flush_swap_writer(handle);
506 }
507
508 if (error)
509 free_all_swap_pages(root_swap);
510 release_swap_writer(handle);
511 swsusp_close(FMODE_WRITE);
512
513 return error;
514}
515
516/* We need to remember how much compressed data we need to read. */
517#define LZO_HEADER sizeof(size_t)
518
519/* Number of pages/bytes we'll compress at one time. */
520#define LZO_UNC_PAGES 32
521#define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE)
522
523/* Number of pages/bytes we need for compressed data (worst case). */
524#define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
525 LZO_HEADER, PAGE_SIZE)
526#define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
527
528/* Maximum number of threads for compression/decompression. */
529#define LZO_THREADS 3
530
531/* Minimum/maximum number of pages for read buffering. */
532#define LZO_MIN_RD_PAGES 1024
533#define LZO_MAX_RD_PAGES 8192
534
535
536/**
537 * save_image - save the suspend image data
538 */
539
540static int save_image(struct swap_map_handle *handle,
541 struct snapshot_handle *snapshot,
542 unsigned int nr_to_write)
543{
544 unsigned int m;
545 int ret;
546 int nr_pages;
547 int err2;
548 struct hib_bio_batch hb;
549 ktime_t start;
550 ktime_t stop;
551
552 hib_init_batch(&hb);
553
554 pr_info("Saving image data pages (%u pages)...\n",
555 nr_to_write);
556 m = nr_to_write / 10;
557 if (!m)
558 m = 1;
559 nr_pages = 0;
560 start = ktime_get();
561 while (1) {
562 ret = snapshot_read_next(snapshot);
563 if (ret <= 0)
564 break;
565 ret = swap_write_page(handle, data_of(*snapshot), &hb);
566 if (ret)
567 break;
568 if (!(nr_pages % m))
569 pr_info("Image saving progress: %3d%%\n",
570 nr_pages / m * 10);
571 nr_pages++;
572 }
573 err2 = hib_wait_io(&hb);
574 hib_finish_batch(&hb);
575 stop = ktime_get();
576 if (!ret)
577 ret = err2;
578 if (!ret)
579 pr_info("Image saving done\n");
580 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
581 return ret;
582}
583
584/**
585 * Structure used for CRC32.
586 */
587struct crc_data {
588 struct task_struct *thr; /* thread */
589 atomic_t ready; /* ready to start flag */
590 atomic_t stop; /* ready to stop flag */
591 unsigned run_threads; /* nr current threads */
592 wait_queue_head_t go; /* start crc update */
593 wait_queue_head_t done; /* crc update done */
594 u32 *crc32; /* points to handle's crc32 */
595 size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */
596 unsigned char *unc[LZO_THREADS]; /* uncompressed data */
597};
598
599/**
600 * CRC32 update function that runs in its own thread.
601 */
602static int crc32_threadfn(void *data)
603{
604 struct crc_data *d = data;
605 unsigned i;
606
607 while (1) {
608 wait_event(d->go, atomic_read(&d->ready) ||
609 kthread_should_stop());
610 if (kthread_should_stop()) {
611 d->thr = NULL;
612 atomic_set(&d->stop, 1);
613 wake_up(&d->done);
614 break;
615 }
616 atomic_set(&d->ready, 0);
617
618 for (i = 0; i < d->run_threads; i++)
619 *d->crc32 = crc32_le(*d->crc32,
620 d->unc[i], *d->unc_len[i]);
621 atomic_set(&d->stop, 1);
622 wake_up(&d->done);
623 }
624 return 0;
625}
626/**
627 * Structure used for LZO data compression.
628 */
629struct cmp_data {
630 struct task_struct *thr; /* thread */
631 atomic_t ready; /* ready to start flag */
632 atomic_t stop; /* ready to stop flag */
633 int ret; /* return code */
634 wait_queue_head_t go; /* start compression */
635 wait_queue_head_t done; /* compression done */
636 size_t unc_len; /* uncompressed length */
637 size_t cmp_len; /* compressed length */
638 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
639 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
640 unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */
641};
642
643/**
644 * Compression function that runs in its own thread.
645 */
646static int lzo_compress_threadfn(void *data)
647{
648 struct cmp_data *d = data;
649
650 while (1) {
651 wait_event(d->go, atomic_read(&d->ready) ||
652 kthread_should_stop());
653 if (kthread_should_stop()) {
654 d->thr = NULL;
655 d->ret = -1;
656 atomic_set(&d->stop, 1);
657 wake_up(&d->done);
658 break;
659 }
660 atomic_set(&d->ready, 0);
661
662 d->ret = lzo1x_1_compress(d->unc, d->unc_len,
663 d->cmp + LZO_HEADER, &d->cmp_len,
664 d->wrk);
665 atomic_set(&d->stop, 1);
666 wake_up(&d->done);
667 }
668 return 0;
669}
670
671/**
672 * save_image_lzo - Save the suspend image data compressed with LZO.
673 * @handle: Swap map handle to use for saving the image.
674 * @snapshot: Image to read data from.
675 * @nr_to_write: Number of pages to save.
676 */
677static int save_image_lzo(struct swap_map_handle *handle,
678 struct snapshot_handle *snapshot,
679 unsigned int nr_to_write)
680{
681 unsigned int m;
682 int ret = 0;
683 int nr_pages;
684 int err2;
685 struct hib_bio_batch hb;
686 ktime_t start;
687 ktime_t stop;
688 size_t off;
689 unsigned thr, run_threads, nr_threads;
690 unsigned char *page = NULL;
691 struct cmp_data *data = NULL;
692 struct crc_data *crc = NULL;
693
694 hib_init_batch(&hb);
695
696 /*
697 * We'll limit the number of threads for compression to limit memory
698 * footprint.
699 */
700 nr_threads = num_online_cpus() - 1;
701 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
702
703 page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
704 if (!page) {
705 pr_err("Failed to allocate LZO page\n");
706 ret = -ENOMEM;
707 goto out_clean;
708 }
709
710 data = vzalloc(array_size(nr_threads, sizeof(*data)));
711 if (!data) {
712 pr_err("Failed to allocate LZO data\n");
713 ret = -ENOMEM;
714 goto out_clean;
715 }
716
717 crc = kzalloc(sizeof(*crc), GFP_KERNEL);
718 if (!crc) {
719 pr_err("Failed to allocate crc\n");
720 ret = -ENOMEM;
721 goto out_clean;
722 }
723
724 /*
725 * Start the compression threads.
726 */
727 for (thr = 0; thr < nr_threads; thr++) {
728 init_waitqueue_head(&data[thr].go);
729 init_waitqueue_head(&data[thr].done);
730
731 data[thr].thr = kthread_run(lzo_compress_threadfn,
732 &data[thr],
733 "image_compress/%u", thr);
734 if (IS_ERR(data[thr].thr)) {
735 data[thr].thr = NULL;
736 pr_err("Cannot start compression threads\n");
737 ret = -ENOMEM;
738 goto out_clean;
739 }
740 }
741
742 /*
743 * Start the CRC32 thread.
744 */
745 init_waitqueue_head(&crc->go);
746 init_waitqueue_head(&crc->done);
747
748 handle->crc32 = 0;
749 crc->crc32 = &handle->crc32;
750 for (thr = 0; thr < nr_threads; thr++) {
751 crc->unc[thr] = data[thr].unc;
752 crc->unc_len[thr] = &data[thr].unc_len;
753 }
754
755 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
756 if (IS_ERR(crc->thr)) {
757 crc->thr = NULL;
758 pr_err("Cannot start CRC32 thread\n");
759 ret = -ENOMEM;
760 goto out_clean;
761 }
762
763 /*
764 * Adjust the number of required free pages after all allocations have
765 * been done. We don't want to run out of pages when writing.
766 */
767 handle->reqd_free_pages = reqd_free_pages();
768
769 pr_info("Using %u thread(s) for compression\n", nr_threads);
770 pr_info("Compressing and saving image data (%u pages)...\n",
771 nr_to_write);
772 m = nr_to_write / 10;
773 if (!m)
774 m = 1;
775 nr_pages = 0;
776 start = ktime_get();
777 for (;;) {
778 for (thr = 0; thr < nr_threads; thr++) {
779 for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
780 ret = snapshot_read_next(snapshot);
781 if (ret < 0)
782 goto out_finish;
783
784 if (!ret)
785 break;
786
787 memcpy(data[thr].unc + off,
788 data_of(*snapshot), PAGE_SIZE);
789
790 if (!(nr_pages % m))
791 pr_info("Image saving progress: %3d%%\n",
792 nr_pages / m * 10);
793 nr_pages++;
794 }
795 if (!off)
796 break;
797
798 data[thr].unc_len = off;
799
800 atomic_set(&data[thr].ready, 1);
801 wake_up(&data[thr].go);
802 }
803
804 if (!thr)
805 break;
806
807 crc->run_threads = thr;
808 atomic_set(&crc->ready, 1);
809 wake_up(&crc->go);
810
811 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
812 wait_event(data[thr].done,
813 atomic_read(&data[thr].stop));
814 atomic_set(&data[thr].stop, 0);
815
816 ret = data[thr].ret;
817
818 if (ret < 0) {
819 pr_err("LZO compression failed\n");
820 goto out_finish;
821 }
822
823 if (unlikely(!data[thr].cmp_len ||
824 data[thr].cmp_len >
825 lzo1x_worst_compress(data[thr].unc_len))) {
826 pr_err("Invalid LZO compressed length\n");
827 ret = -1;
828 goto out_finish;
829 }
830
831 *(size_t *)data[thr].cmp = data[thr].cmp_len;
832
833 /*
834 * Given we are writing one page at a time to disk, we
835 * copy that much from the buffer, although the last
836 * bit will likely be smaller than full page. This is
837 * OK - we saved the length of the compressed data, so
838 * any garbage at the end will be discarded when we
839 * read it.
840 */
841 for (off = 0;
842 off < LZO_HEADER + data[thr].cmp_len;
843 off += PAGE_SIZE) {
844 memcpy(page, data[thr].cmp + off, PAGE_SIZE);
845
846 ret = swap_write_page(handle, page, &hb);
847 if (ret)
848 goto out_finish;
849 }
850 }
851
852 wait_event(crc->done, atomic_read(&crc->stop));
853 atomic_set(&crc->stop, 0);
854 }
855
856out_finish:
857 err2 = hib_wait_io(&hb);
858 stop = ktime_get();
859 if (!ret)
860 ret = err2;
861 if (!ret)
862 pr_info("Image saving done\n");
863 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
864out_clean:
865 hib_finish_batch(&hb);
866 if (crc) {
867 if (crc->thr)
868 kthread_stop(crc->thr);
869 kfree(crc);
870 }
871 if (data) {
872 for (thr = 0; thr < nr_threads; thr++)
873 if (data[thr].thr)
874 kthread_stop(data[thr].thr);
875 vfree(data);
876 }
877 if (page) free_page((unsigned long)page);
878
879 return ret;
880}
881
882/**
883 * enough_swap - Make sure we have enough swap to save the image.
884 *
885 * Returns TRUE or FALSE after checking the total amount of swap
886 * space available from the resume partition.
887 */
888
889static int enough_swap(unsigned int nr_pages)
890{
891 unsigned int free_swap = count_swap_pages(root_swap, 1);
892 unsigned int required;
893
894 pr_debug("Free swap pages: %u\n", free_swap);
895
896 required = PAGES_FOR_IO + nr_pages;
897 return free_swap > required;
898}
899
900/**
901 * swsusp_write - Write entire image and metadata.
902 * @flags: flags to pass to the "boot" kernel in the image header
903 *
904 * It is important _NOT_ to umount filesystems at this point. We want
905 * them synced (in case something goes wrong) but we DO not want to mark
906 * filesystem clean: it is not. (And it does not matter, if we resume
907 * correctly, we'll mark system clean, anyway.)
908 */
909
910int swsusp_write(unsigned int flags)
911{
912 struct swap_map_handle handle;
913 struct snapshot_handle snapshot;
914 struct swsusp_info *header;
915 unsigned long pages;
916 int error;
917
918 pages = snapshot_get_image_size();
919 error = get_swap_writer(&handle);
920 if (error) {
921 pr_err("Cannot get swap writer\n");
922 return error;
923 }
924 if (flags & SF_NOCOMPRESS_MODE) {
925 if (!enough_swap(pages)) {
926 pr_err("Not enough free swap\n");
927 error = -ENOSPC;
928 goto out_finish;
929 }
930 }
931 memset(&snapshot, 0, sizeof(struct snapshot_handle));
932 error = snapshot_read_next(&snapshot);
933 if (error < (int)PAGE_SIZE) {
934 if (error >= 0)
935 error = -EFAULT;
936
937 goto out_finish;
938 }
939 header = (struct swsusp_info *)data_of(snapshot);
940 error = swap_write_page(&handle, header, NULL);
941 if (!error) {
942 error = (flags & SF_NOCOMPRESS_MODE) ?
943 save_image(&handle, &snapshot, pages - 1) :
944 save_image_lzo(&handle, &snapshot, pages - 1);
945 }
946out_finish:
947 error = swap_writer_finish(&handle, flags, error);
948 return error;
949}
950
951/**
952 * The following functions allow us to read data using a swap map
953 * in a file-alike way
954 */
955
956static void release_swap_reader(struct swap_map_handle *handle)
957{
958 struct swap_map_page_list *tmp;
959
960 while (handle->maps) {
961 if (handle->maps->map)
962 free_page((unsigned long)handle->maps->map);
963 tmp = handle->maps;
964 handle->maps = handle->maps->next;
965 kfree(tmp);
966 }
967 handle->cur = NULL;
968}
969
970static int get_swap_reader(struct swap_map_handle *handle,
971 unsigned int *flags_p)
972{
973 int error;
974 struct swap_map_page_list *tmp, *last;
975 sector_t offset;
976
977 *flags_p = swsusp_header->flags;
978
979 if (!swsusp_header->image) /* how can this happen? */
980 return -EINVAL;
981
982 handle->cur = NULL;
983 last = handle->maps = NULL;
984 offset = swsusp_header->image;
985 while (offset) {
986 tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL);
987 if (!tmp) {
988 release_swap_reader(handle);
989 return -ENOMEM;
990 }
991 if (!handle->maps)
992 handle->maps = tmp;
993 if (last)
994 last->next = tmp;
995 last = tmp;
996
997 tmp->map = (struct swap_map_page *)
998 __get_free_page(GFP_NOIO | __GFP_HIGH);
999 if (!tmp->map) {
1000 release_swap_reader(handle);
1001 return -ENOMEM;
1002 }
1003
1004 error = hib_submit_io(REQ_OP_READ, offset, tmp->map, NULL);
1005 if (error) {
1006 release_swap_reader(handle);
1007 return error;
1008 }
1009 offset = tmp->map->next_swap;
1010 }
1011 handle->k = 0;
1012 handle->cur = handle->maps->map;
1013 return 0;
1014}
1015
1016static int swap_read_page(struct swap_map_handle *handle, void *buf,
1017 struct hib_bio_batch *hb)
1018{
1019 sector_t offset;
1020 int error;
1021 struct swap_map_page_list *tmp;
1022
1023 if (!handle->cur)
1024 return -EINVAL;
1025 offset = handle->cur->entries[handle->k];
1026 if (!offset)
1027 return -EFAULT;
1028 error = hib_submit_io(REQ_OP_READ, offset, buf, hb);
1029 if (error)
1030 return error;
1031 if (++handle->k >= MAP_PAGE_ENTRIES) {
1032 handle->k = 0;
1033 free_page((unsigned long)handle->maps->map);
1034 tmp = handle->maps;
1035 handle->maps = handle->maps->next;
1036 kfree(tmp);
1037 if (!handle->maps)
1038 release_swap_reader(handle);
1039 else
1040 handle->cur = handle->maps->map;
1041 }
1042 return error;
1043}
1044
1045static int swap_reader_finish(struct swap_map_handle *handle)
1046{
1047 release_swap_reader(handle);
1048
1049 return 0;
1050}
1051
1052/**
1053 * load_image - load the image using the swap map handle
1054 * @handle and the snapshot handle @snapshot
1055 * (assume there are @nr_pages pages to load)
1056 */
1057
1058static int load_image(struct swap_map_handle *handle,
1059 struct snapshot_handle *snapshot,
1060 unsigned int nr_to_read)
1061{
1062 unsigned int m;
1063 int ret = 0;
1064 ktime_t start;
1065 ktime_t stop;
1066 struct hib_bio_batch hb;
1067 int err2;
1068 unsigned nr_pages;
1069
1070 hib_init_batch(&hb);
1071
1072 clean_pages_on_read = true;
1073 pr_info("Loading image data pages (%u pages)...\n", nr_to_read);
1074 m = nr_to_read / 10;
1075 if (!m)
1076 m = 1;
1077 nr_pages = 0;
1078 start = ktime_get();
1079 for ( ; ; ) {
1080 ret = snapshot_write_next(snapshot);
1081 if (ret <= 0)
1082 break;
1083 ret = swap_read_page(handle, data_of(*snapshot), &hb);
1084 if (ret)
1085 break;
1086 if (snapshot->sync_read)
1087 ret = hib_wait_io(&hb);
1088 if (ret)
1089 break;
1090 if (!(nr_pages % m))
1091 pr_info("Image loading progress: %3d%%\n",
1092 nr_pages / m * 10);
1093 nr_pages++;
1094 }
1095 err2 = hib_wait_io(&hb);
1096 hib_finish_batch(&hb);
1097 stop = ktime_get();
1098 if (!ret)
1099 ret = err2;
1100 if (!ret) {
1101 pr_info("Image loading done\n");
1102 snapshot_write_finalize(snapshot);
1103 if (!snapshot_image_loaded(snapshot))
1104 ret = -ENODATA;
1105 }
1106 swsusp_show_speed(start, stop, nr_to_read, "Read");
1107 return ret;
1108}
1109
1110/**
1111 * Structure used for LZO data decompression.
1112 */
1113struct dec_data {
1114 struct task_struct *thr; /* thread */
1115 atomic_t ready; /* ready to start flag */
1116 atomic_t stop; /* ready to stop flag */
1117 int ret; /* return code */
1118 wait_queue_head_t go; /* start decompression */
1119 wait_queue_head_t done; /* decompression done */
1120 size_t unc_len; /* uncompressed length */
1121 size_t cmp_len; /* compressed length */
1122 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
1123 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
1124};
1125
1126/**
1127 * Decompression function that runs in its own thread.
1128 */
1129static int lzo_decompress_threadfn(void *data)
1130{
1131 struct dec_data *d = data;
1132
1133 while (1) {
1134 wait_event(d->go, atomic_read(&d->ready) ||
1135 kthread_should_stop());
1136 if (kthread_should_stop()) {
1137 d->thr = NULL;
1138 d->ret = -1;
1139 atomic_set(&d->stop, 1);
1140 wake_up(&d->done);
1141 break;
1142 }
1143 atomic_set(&d->ready, 0);
1144
1145 d->unc_len = LZO_UNC_SIZE;
1146 d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1147 d->unc, &d->unc_len);
1148 if (clean_pages_on_decompress)
1149 flush_icache_range((unsigned long)d->unc,
1150 (unsigned long)d->unc + d->unc_len);
1151
1152 atomic_set(&d->stop, 1);
1153 wake_up(&d->done);
1154 }
1155 return 0;
1156}
1157
1158/**
1159 * load_image_lzo - Load compressed image data and decompress them with LZO.
1160 * @handle: Swap map handle to use for loading data.
1161 * @snapshot: Image to copy uncompressed data into.
1162 * @nr_to_read: Number of pages to load.
1163 */
1164static int load_image_lzo(struct swap_map_handle *handle,
1165 struct snapshot_handle *snapshot,
1166 unsigned int nr_to_read)
1167{
1168 unsigned int m;
1169 int ret = 0;
1170 int eof = 0;
1171 struct hib_bio_batch hb;
1172 ktime_t start;
1173 ktime_t stop;
1174 unsigned nr_pages;
1175 size_t off;
1176 unsigned i, thr, run_threads, nr_threads;
1177 unsigned ring = 0, pg = 0, ring_size = 0,
1178 have = 0, want, need, asked = 0;
1179 unsigned long read_pages = 0;
1180 unsigned char **page = NULL;
1181 struct dec_data *data = NULL;
1182 struct crc_data *crc = NULL;
1183
1184 hib_init_batch(&hb);
1185
1186 /*
1187 * We'll limit the number of threads for decompression to limit memory
1188 * footprint.
1189 */
1190 nr_threads = num_online_cpus() - 1;
1191 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
1192
1193 page = vmalloc(array_size(LZO_MAX_RD_PAGES, sizeof(*page)));
1194 if (!page) {
1195 pr_err("Failed to allocate LZO page\n");
1196 ret = -ENOMEM;
1197 goto out_clean;
1198 }
1199
1200 data = vzalloc(array_size(nr_threads, sizeof(*data)));
1201 if (!data) {
1202 pr_err("Failed to allocate LZO data\n");
1203 ret = -ENOMEM;
1204 goto out_clean;
1205 }
1206
1207 crc = kzalloc(sizeof(*crc), GFP_KERNEL);
1208 if (!crc) {
1209 pr_err("Failed to allocate crc\n");
1210 ret = -ENOMEM;
1211 goto out_clean;
1212 }
1213
1214 clean_pages_on_decompress = true;
1215
1216 /*
1217 * Start the decompression threads.
1218 */
1219 for (thr = 0; thr < nr_threads; thr++) {
1220 init_waitqueue_head(&data[thr].go);
1221 init_waitqueue_head(&data[thr].done);
1222
1223 data[thr].thr = kthread_run(lzo_decompress_threadfn,
1224 &data[thr],
1225 "image_decompress/%u", thr);
1226 if (IS_ERR(data[thr].thr)) {
1227 data[thr].thr = NULL;
1228 pr_err("Cannot start decompression threads\n");
1229 ret = -ENOMEM;
1230 goto out_clean;
1231 }
1232 }
1233
1234 /*
1235 * Start the CRC32 thread.
1236 */
1237 init_waitqueue_head(&crc->go);
1238 init_waitqueue_head(&crc->done);
1239
1240 handle->crc32 = 0;
1241 crc->crc32 = &handle->crc32;
1242 for (thr = 0; thr < nr_threads; thr++) {
1243 crc->unc[thr] = data[thr].unc;
1244 crc->unc_len[thr] = &data[thr].unc_len;
1245 }
1246
1247 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1248 if (IS_ERR(crc->thr)) {
1249 crc->thr = NULL;
1250 pr_err("Cannot start CRC32 thread\n");
1251 ret = -ENOMEM;
1252 goto out_clean;
1253 }
1254
1255 /*
1256 * Set the number of pages for read buffering.
1257 * This is complete guesswork, because we'll only know the real
1258 * picture once prepare_image() is called, which is much later on
1259 * during the image load phase. We'll assume the worst case and
1260 * say that none of the image pages are from high memory.
1261 */
1262 if (low_free_pages() > snapshot_get_image_size())
1263 read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1264 read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
1265
1266 for (i = 0; i < read_pages; i++) {
1267 page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
1268 GFP_NOIO | __GFP_HIGH :
1269 GFP_NOIO | __GFP_NOWARN |
1270 __GFP_NORETRY);
1271
1272 if (!page[i]) {
1273 if (i < LZO_CMP_PAGES) {
1274 ring_size = i;
1275 pr_err("Failed to allocate LZO pages\n");
1276 ret = -ENOMEM;
1277 goto out_clean;
1278 } else {
1279 break;
1280 }
1281 }
1282 }
1283 want = ring_size = i;
1284
1285 pr_info("Using %u thread(s) for decompression\n", nr_threads);
1286 pr_info("Loading and decompressing image data (%u pages)...\n",
1287 nr_to_read);
1288 m = nr_to_read / 10;
1289 if (!m)
1290 m = 1;
1291 nr_pages = 0;
1292 start = ktime_get();
1293
1294 ret = snapshot_write_next(snapshot);
1295 if (ret <= 0)
1296 goto out_finish;
1297
1298 for(;;) {
1299 for (i = 0; !eof && i < want; i++) {
1300 ret = swap_read_page(handle, page[ring], &hb);
1301 if (ret) {
1302 /*
1303 * On real read error, finish. On end of data,
1304 * set EOF flag and just exit the read loop.
1305 */
1306 if (handle->cur &&
1307 handle->cur->entries[handle->k]) {
1308 goto out_finish;
1309 } else {
1310 eof = 1;
1311 break;
1312 }
1313 }
1314 if (++ring >= ring_size)
1315 ring = 0;
1316 }
1317 asked += i;
1318 want -= i;
1319
1320 /*
1321 * We are out of data, wait for some more.
1322 */
1323 if (!have) {
1324 if (!asked)
1325 break;
1326
1327 ret = hib_wait_io(&hb);
1328 if (ret)
1329 goto out_finish;
1330 have += asked;
1331 asked = 0;
1332 if (eof)
1333 eof = 2;
1334 }
1335
1336 if (crc->run_threads) {
1337 wait_event(crc->done, atomic_read(&crc->stop));
1338 atomic_set(&crc->stop, 0);
1339 crc->run_threads = 0;
1340 }
1341
1342 for (thr = 0; have && thr < nr_threads; thr++) {
1343 data[thr].cmp_len = *(size_t *)page[pg];
1344 if (unlikely(!data[thr].cmp_len ||
1345 data[thr].cmp_len >
1346 lzo1x_worst_compress(LZO_UNC_SIZE))) {
1347 pr_err("Invalid LZO compressed length\n");
1348 ret = -1;
1349 goto out_finish;
1350 }
1351
1352 need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
1353 PAGE_SIZE);
1354 if (need > have) {
1355 if (eof > 1) {
1356 ret = -1;
1357 goto out_finish;
1358 }
1359 break;
1360 }
1361
1362 for (off = 0;
1363 off < LZO_HEADER + data[thr].cmp_len;
1364 off += PAGE_SIZE) {
1365 memcpy(data[thr].cmp + off,
1366 page[pg], PAGE_SIZE);
1367 have--;
1368 want++;
1369 if (++pg >= ring_size)
1370 pg = 0;
1371 }
1372
1373 atomic_set(&data[thr].ready, 1);
1374 wake_up(&data[thr].go);
1375 }
1376
1377 /*
1378 * Wait for more data while we are decompressing.
1379 */
1380 if (have < LZO_CMP_PAGES && asked) {
1381 ret = hib_wait_io(&hb);
1382 if (ret)
1383 goto out_finish;
1384 have += asked;
1385 asked = 0;
1386 if (eof)
1387 eof = 2;
1388 }
1389
1390 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1391 wait_event(data[thr].done,
1392 atomic_read(&data[thr].stop));
1393 atomic_set(&data[thr].stop, 0);
1394
1395 ret = data[thr].ret;
1396
1397 if (ret < 0) {
1398 pr_err("LZO decompression failed\n");
1399 goto out_finish;
1400 }
1401
1402 if (unlikely(!data[thr].unc_len ||
1403 data[thr].unc_len > LZO_UNC_SIZE ||
1404 data[thr].unc_len & (PAGE_SIZE - 1))) {
1405 pr_err("Invalid LZO uncompressed length\n");
1406 ret = -1;
1407 goto out_finish;
1408 }
1409
1410 for (off = 0;
1411 off < data[thr].unc_len; off += PAGE_SIZE) {
1412 memcpy(data_of(*snapshot),
1413 data[thr].unc + off, PAGE_SIZE);
1414
1415 if (!(nr_pages % m))
1416 pr_info("Image loading progress: %3d%%\n",
1417 nr_pages / m * 10);
1418 nr_pages++;
1419
1420 ret = snapshot_write_next(snapshot);
1421 if (ret <= 0) {
1422 crc->run_threads = thr + 1;
1423 atomic_set(&crc->ready, 1);
1424 wake_up(&crc->go);
1425 goto out_finish;
1426 }
1427 }
1428 }
1429
1430 crc->run_threads = thr;
1431 atomic_set(&crc->ready, 1);
1432 wake_up(&crc->go);
1433 }
1434
1435out_finish:
1436 if (crc->run_threads) {
1437 wait_event(crc->done, atomic_read(&crc->stop));
1438 atomic_set(&crc->stop, 0);
1439 }
1440 stop = ktime_get();
1441 if (!ret) {
1442 pr_info("Image loading done\n");
1443 snapshot_write_finalize(snapshot);
1444 if (!snapshot_image_loaded(snapshot))
1445 ret = -ENODATA;
1446 if (!ret) {
1447 if (swsusp_header->flags & SF_CRC32_MODE) {
1448 if(handle->crc32 != swsusp_header->crc32) {
1449 pr_err("Invalid image CRC32!\n");
1450 ret = -ENODATA;
1451 }
1452 }
1453 }
1454 }
1455 swsusp_show_speed(start, stop, nr_to_read, "Read");
1456out_clean:
1457 hib_finish_batch(&hb);
1458 for (i = 0; i < ring_size; i++)
1459 free_page((unsigned long)page[i]);
1460 if (crc) {
1461 if (crc->thr)
1462 kthread_stop(crc->thr);
1463 kfree(crc);
1464 }
1465 if (data) {
1466 for (thr = 0; thr < nr_threads; thr++)
1467 if (data[thr].thr)
1468 kthread_stop(data[thr].thr);
1469 vfree(data);
1470 }
1471 vfree(page);
1472
1473 return ret;
1474}
1475
1476/**
1477 * swsusp_read - read the hibernation image.
1478 * @flags_p: flags passed by the "frozen" kernel in the image header should
1479 * be written into this memory location
1480 */
1481
1482int swsusp_read(unsigned int *flags_p)
1483{
1484 int error;
1485 struct swap_map_handle handle;
1486 struct snapshot_handle snapshot;
1487 struct swsusp_info *header;
1488
1489 memset(&snapshot, 0, sizeof(struct snapshot_handle));
1490 error = snapshot_write_next(&snapshot);
1491 if (error < (int)PAGE_SIZE)
1492 return error < 0 ? error : -EFAULT;
1493 header = (struct swsusp_info *)data_of(snapshot);
1494 error = get_swap_reader(&handle, flags_p);
1495 if (error)
1496 goto end;
1497 if (!error)
1498 error = swap_read_page(&handle, header, NULL);
1499 if (!error) {
1500 error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1501 load_image(&handle, &snapshot, header->pages - 1) :
1502 load_image_lzo(&handle, &snapshot, header->pages - 1);
1503 }
1504 swap_reader_finish(&handle);
1505end:
1506 if (!error)
1507 pr_debug("Image successfully loaded\n");
1508 else
1509 pr_debug("Error %d resuming\n", error);
1510 return error;
1511}
1512
1513/**
1514 * swsusp_check - Check for swsusp signature in the resume device
1515 */
1516
1517int swsusp_check(void)
1518{
1519 int error;
1520 void *holder;
1521
1522 hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
1523 FMODE_READ | FMODE_EXCL, &holder);
1524 if (!IS_ERR(hib_resume_bdev)) {
1525 set_blocksize(hib_resume_bdev, PAGE_SIZE);
1526 clear_page(swsusp_header);
1527 error = hib_submit_io(REQ_OP_READ, swsusp_resume_block,
1528 swsusp_header, NULL);
1529 if (error)
1530 goto put;
1531
1532 if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1533 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
1534 /* Reset swap signature now */
1535 error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
1536 swsusp_resume_block,
1537 swsusp_header, NULL);
1538 } else {
1539 error = -EINVAL;
1540 }
1541 if (!error && swsusp_header->flags & SF_HW_SIG &&
1542 swsusp_header->hw_sig != swsusp_hardware_signature) {
1543 pr_info("Suspend image hardware signature mismatch (%08x now %08x); aborting resume.\n",
1544 swsusp_header->hw_sig, swsusp_hardware_signature);
1545 error = -EINVAL;
1546 }
1547
1548put:
1549 if (error)
1550 blkdev_put(hib_resume_bdev, FMODE_READ | FMODE_EXCL);
1551 else
1552 pr_debug("Image signature found, resuming\n");
1553 } else {
1554 error = PTR_ERR(hib_resume_bdev);
1555 }
1556
1557 if (error)
1558 pr_debug("Image not found (code %d)\n", error);
1559
1560 return error;
1561}
1562
1563/**
1564 * swsusp_close - close swap device.
1565 */
1566
1567void swsusp_close(fmode_t mode)
1568{
1569 if (IS_ERR(hib_resume_bdev)) {
1570 pr_debug("Image device not initialised\n");
1571 return;
1572 }
1573
1574 blkdev_put(hib_resume_bdev, mode);
1575}
1576
1577/**
1578 * swsusp_unmark - Unmark swsusp signature in the resume device
1579 */
1580
1581#ifdef CONFIG_SUSPEND
1582int swsusp_unmark(void)
1583{
1584 int error;
1585
1586 hib_submit_io(REQ_OP_READ, swsusp_resume_block,
1587 swsusp_header, NULL);
1588 if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
1589 memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
1590 error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
1591 swsusp_resume_block,
1592 swsusp_header, NULL);
1593 } else {
1594 pr_err("Cannot find swsusp signature!\n");
1595 error = -ENODEV;
1596 }
1597
1598 /*
1599 * We just returned from suspend, we don't need the image any more.
1600 */
1601 free_all_swap_pages(root_swap);
1602
1603 return error;
1604}
1605#endif
1606
1607static int __init swsusp_header_init(void)
1608{
1609 swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1610 if (!swsusp_header)
1611 panic("Could not allocate memory for swsusp_header\n");
1612 return 0;
1613}
1614
1615core_initcall(swsusp_header_init);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/kernel/power/swap.c
4 *
5 * This file provides functions for reading the suspend image from
6 * and writing it to a swap partition.
7 *
8 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
9 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
10 * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
11 */
12
13#define pr_fmt(fmt) "PM: " fmt
14
15#include <linux/module.h>
16#include <linux/file.h>
17#include <linux/delay.h>
18#include <linux/bitops.h>
19#include <linux/device.h>
20#include <linux/bio.h>
21#include <linux/blkdev.h>
22#include <linux/swap.h>
23#include <linux/swapops.h>
24#include <linux/pm.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/cpumask.h>
28#include <linux/atomic.h>
29#include <linux/kthread.h>
30#include <linux/crc32.h>
31#include <linux/ktime.h>
32
33#include "power.h"
34
35#define HIBERNATE_SIG "S1SUSPEND"
36
37u32 swsusp_hardware_signature;
38
39/*
40 * When reading an {un,}compressed image, we may restore pages in place,
41 * in which case some architectures need these pages cleaning before they
42 * can be executed. We don't know which pages these may be, so clean the lot.
43 */
44static bool clean_pages_on_read;
45static bool clean_pages_on_decompress;
46
47/*
48 * The swap map is a data structure used for keeping track of each page
49 * written to a swap partition. It consists of many swap_map_page
50 * structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
51 * These structures are stored on the swap and linked together with the
52 * help of the .next_swap member.
53 *
54 * The swap map is created during suspend. The swap map pages are
55 * allocated and populated one at a time, so we only need one memory
56 * page to set up the entire structure.
57 *
58 * During resume we pick up all swap_map_page structures into a list.
59 */
60
61#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
62
63/*
64 * Number of free pages that are not high.
65 */
66static inline unsigned long low_free_pages(void)
67{
68 return nr_free_pages() - nr_free_highpages();
69}
70
71/*
72 * Number of pages required to be kept free while writing the image. Always
73 * half of all available low pages before the writing starts.
74 */
75static inline unsigned long reqd_free_pages(void)
76{
77 return low_free_pages() / 2;
78}
79
80struct swap_map_page {
81 sector_t entries[MAP_PAGE_ENTRIES];
82 sector_t next_swap;
83};
84
85struct swap_map_page_list {
86 struct swap_map_page *map;
87 struct swap_map_page_list *next;
88};
89
90/*
91 * The swap_map_handle structure is used for handling swap in
92 * a file-alike way
93 */
94
95struct swap_map_handle {
96 struct swap_map_page *cur;
97 struct swap_map_page_list *maps;
98 sector_t cur_swap;
99 sector_t first_sector;
100 unsigned int k;
101 unsigned long reqd_free_pages;
102 u32 crc32;
103};
104
105struct swsusp_header {
106 char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
107 sizeof(u32) - sizeof(u32)];
108 u32 hw_sig;
109 u32 crc32;
110 sector_t image;
111 unsigned int flags; /* Flags to pass to the "boot" kernel */
112 char orig_sig[10];
113 char sig[10];
114} __packed;
115
116static struct swsusp_header *swsusp_header;
117
118/*
119 * The following functions are used for tracing the allocated
120 * swap pages, so that they can be freed in case of an error.
121 */
122
123struct swsusp_extent {
124 struct rb_node node;
125 unsigned long start;
126 unsigned long end;
127};
128
129static struct rb_root swsusp_extents = RB_ROOT;
130
131static int swsusp_extents_insert(unsigned long swap_offset)
132{
133 struct rb_node **new = &(swsusp_extents.rb_node);
134 struct rb_node *parent = NULL;
135 struct swsusp_extent *ext;
136
137 /* Figure out where to put the new node */
138 while (*new) {
139 ext = rb_entry(*new, struct swsusp_extent, node);
140 parent = *new;
141 if (swap_offset < ext->start) {
142 /* Try to merge */
143 if (swap_offset == ext->start - 1) {
144 ext->start--;
145 return 0;
146 }
147 new = &((*new)->rb_left);
148 } else if (swap_offset > ext->end) {
149 /* Try to merge */
150 if (swap_offset == ext->end + 1) {
151 ext->end++;
152 return 0;
153 }
154 new = &((*new)->rb_right);
155 } else {
156 /* It already is in the tree */
157 return -EINVAL;
158 }
159 }
160 /* Add the new node and rebalance the tree. */
161 ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
162 if (!ext)
163 return -ENOMEM;
164
165 ext->start = swap_offset;
166 ext->end = swap_offset;
167 rb_link_node(&ext->node, parent, new);
168 rb_insert_color(&ext->node, &swsusp_extents);
169 return 0;
170}
171
172/*
173 * alloc_swapdev_block - allocate a swap page and register that it has
174 * been allocated, so that it can be freed in case of an error.
175 */
176
177sector_t alloc_swapdev_block(int swap)
178{
179 unsigned long offset;
180
181 offset = swp_offset(get_swap_page_of_type(swap));
182 if (offset) {
183 if (swsusp_extents_insert(offset))
184 swap_free(swp_entry(swap, offset));
185 else
186 return swapdev_block(swap, offset);
187 }
188 return 0;
189}
190
191/*
192 * free_all_swap_pages - free swap pages allocated for saving image data.
193 * It also frees the extents used to register which swap entries had been
194 * allocated.
195 */
196
197void free_all_swap_pages(int swap)
198{
199 struct rb_node *node;
200
201 while ((node = swsusp_extents.rb_node)) {
202 struct swsusp_extent *ext;
203 unsigned long offset;
204
205 ext = rb_entry(node, struct swsusp_extent, node);
206 rb_erase(node, &swsusp_extents);
207 for (offset = ext->start; offset <= ext->end; offset++)
208 swap_free(swp_entry(swap, offset));
209
210 kfree(ext);
211 }
212}
213
214int swsusp_swap_in_use(void)
215{
216 return (swsusp_extents.rb_node != NULL);
217}
218
219/*
220 * General things
221 */
222
223static unsigned short root_swap = 0xffff;
224static struct file *hib_resume_bdev_file;
225
226struct hib_bio_batch {
227 atomic_t count;
228 wait_queue_head_t wait;
229 blk_status_t error;
230 struct blk_plug plug;
231};
232
233static void hib_init_batch(struct hib_bio_batch *hb)
234{
235 atomic_set(&hb->count, 0);
236 init_waitqueue_head(&hb->wait);
237 hb->error = BLK_STS_OK;
238 blk_start_plug(&hb->plug);
239}
240
241static void hib_finish_batch(struct hib_bio_batch *hb)
242{
243 blk_finish_plug(&hb->plug);
244}
245
246static void hib_end_io(struct bio *bio)
247{
248 struct hib_bio_batch *hb = bio->bi_private;
249 struct page *page = bio_first_page_all(bio);
250
251 if (bio->bi_status) {
252 pr_alert("Read-error on swap-device (%u:%u:%Lu)\n",
253 MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
254 (unsigned long long)bio->bi_iter.bi_sector);
255 }
256
257 if (bio_data_dir(bio) == WRITE)
258 put_page(page);
259 else if (clean_pages_on_read)
260 flush_icache_range((unsigned long)page_address(page),
261 (unsigned long)page_address(page) + PAGE_SIZE);
262
263 if (bio->bi_status && !hb->error)
264 hb->error = bio->bi_status;
265 if (atomic_dec_and_test(&hb->count))
266 wake_up(&hb->wait);
267
268 bio_put(bio);
269}
270
271static int hib_submit_io(blk_opf_t opf, pgoff_t page_off, void *addr,
272 struct hib_bio_batch *hb)
273{
274 struct page *page = virt_to_page(addr);
275 struct bio *bio;
276 int error = 0;
277
278 bio = bio_alloc(file_bdev(hib_resume_bdev_file), 1, opf,
279 GFP_NOIO | __GFP_HIGH);
280 bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
281
282 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
283 pr_err("Adding page to bio failed at %llu\n",
284 (unsigned long long)bio->bi_iter.bi_sector);
285 bio_put(bio);
286 return -EFAULT;
287 }
288
289 if (hb) {
290 bio->bi_end_io = hib_end_io;
291 bio->bi_private = hb;
292 atomic_inc(&hb->count);
293 submit_bio(bio);
294 } else {
295 error = submit_bio_wait(bio);
296 bio_put(bio);
297 }
298
299 return error;
300}
301
302static int hib_wait_io(struct hib_bio_batch *hb)
303{
304 /*
305 * We are relying on the behavior of blk_plug that a thread with
306 * a plug will flush the plug list before sleeping.
307 */
308 wait_event(hb->wait, atomic_read(&hb->count) == 0);
309 return blk_status_to_errno(hb->error);
310}
311
312/*
313 * Saving part
314 */
315static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
316{
317 int error;
318
319 hib_submit_io(REQ_OP_READ, swsusp_resume_block, swsusp_header, NULL);
320 if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
321 !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
322 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
323 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
324 swsusp_header->image = handle->first_sector;
325 if (swsusp_hardware_signature) {
326 swsusp_header->hw_sig = swsusp_hardware_signature;
327 flags |= SF_HW_SIG;
328 }
329 swsusp_header->flags = flags;
330 if (flags & SF_CRC32_MODE)
331 swsusp_header->crc32 = handle->crc32;
332 error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
333 swsusp_resume_block, swsusp_header, NULL);
334 } else {
335 pr_err("Swap header not found!\n");
336 error = -ENODEV;
337 }
338 return error;
339}
340
341/*
342 * Hold the swsusp_header flag. This is used in software_resume() in
343 * 'kernel/power/hibernate' to check if the image is compressed and query
344 * for the compression algorithm support(if so).
345 */
346unsigned int swsusp_header_flags;
347
348/**
349 * swsusp_swap_check - check if the resume device is a swap device
350 * and get its index (if so)
351 *
352 * This is called before saving image
353 */
354static int swsusp_swap_check(void)
355{
356 int res;
357
358 if (swsusp_resume_device)
359 res = swap_type_of(swsusp_resume_device, swsusp_resume_block);
360 else
361 res = find_first_swap(&swsusp_resume_device);
362 if (res < 0)
363 return res;
364 root_swap = res;
365
366 hib_resume_bdev_file = bdev_file_open_by_dev(swsusp_resume_device,
367 BLK_OPEN_WRITE, NULL, NULL);
368 if (IS_ERR(hib_resume_bdev_file))
369 return PTR_ERR(hib_resume_bdev_file);
370
371 res = set_blocksize(file_bdev(hib_resume_bdev_file), PAGE_SIZE);
372 if (res < 0)
373 fput(hib_resume_bdev_file);
374
375 return res;
376}
377
378/**
379 * write_page - Write one page to given swap location.
380 * @buf: Address we're writing.
381 * @offset: Offset of the swap page we're writing to.
382 * @hb: bio completion batch
383 */
384
385static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
386{
387 void *src;
388 int ret;
389
390 if (!offset)
391 return -ENOSPC;
392
393 if (hb) {
394 src = (void *)__get_free_page(GFP_NOIO | __GFP_NOWARN |
395 __GFP_NORETRY);
396 if (src) {
397 copy_page(src, buf);
398 } else {
399 ret = hib_wait_io(hb); /* Free pages */
400 if (ret)
401 return ret;
402 src = (void *)__get_free_page(GFP_NOIO |
403 __GFP_NOWARN |
404 __GFP_NORETRY);
405 if (src) {
406 copy_page(src, buf);
407 } else {
408 WARN_ON_ONCE(1);
409 hb = NULL; /* Go synchronous */
410 src = buf;
411 }
412 }
413 } else {
414 src = buf;
415 }
416 return hib_submit_io(REQ_OP_WRITE | REQ_SYNC, offset, src, hb);
417}
418
419static void release_swap_writer(struct swap_map_handle *handle)
420{
421 if (handle->cur)
422 free_page((unsigned long)handle->cur);
423 handle->cur = NULL;
424}
425
426static int get_swap_writer(struct swap_map_handle *handle)
427{
428 int ret;
429
430 ret = swsusp_swap_check();
431 if (ret) {
432 if (ret != -ENOSPC)
433 pr_err("Cannot find swap device, try swapon -a\n");
434 return ret;
435 }
436 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
437 if (!handle->cur) {
438 ret = -ENOMEM;
439 goto err_close;
440 }
441 handle->cur_swap = alloc_swapdev_block(root_swap);
442 if (!handle->cur_swap) {
443 ret = -ENOSPC;
444 goto err_rel;
445 }
446 handle->k = 0;
447 handle->reqd_free_pages = reqd_free_pages();
448 handle->first_sector = handle->cur_swap;
449 return 0;
450err_rel:
451 release_swap_writer(handle);
452err_close:
453 swsusp_close();
454 return ret;
455}
456
457static int swap_write_page(struct swap_map_handle *handle, void *buf,
458 struct hib_bio_batch *hb)
459{
460 int error;
461 sector_t offset;
462
463 if (!handle->cur)
464 return -EINVAL;
465 offset = alloc_swapdev_block(root_swap);
466 error = write_page(buf, offset, hb);
467 if (error)
468 return error;
469 handle->cur->entries[handle->k++] = offset;
470 if (handle->k >= MAP_PAGE_ENTRIES) {
471 offset = alloc_swapdev_block(root_swap);
472 if (!offset)
473 return -ENOSPC;
474 handle->cur->next_swap = offset;
475 error = write_page(handle->cur, handle->cur_swap, hb);
476 if (error)
477 goto out;
478 clear_page(handle->cur);
479 handle->cur_swap = offset;
480 handle->k = 0;
481
482 if (hb && low_free_pages() <= handle->reqd_free_pages) {
483 error = hib_wait_io(hb);
484 if (error)
485 goto out;
486 /*
487 * Recalculate the number of required free pages, to
488 * make sure we never take more than half.
489 */
490 handle->reqd_free_pages = reqd_free_pages();
491 }
492 }
493 out:
494 return error;
495}
496
497static int flush_swap_writer(struct swap_map_handle *handle)
498{
499 if (handle->cur && handle->cur_swap)
500 return write_page(handle->cur, handle->cur_swap, NULL);
501 else
502 return -EINVAL;
503}
504
505static int swap_writer_finish(struct swap_map_handle *handle,
506 unsigned int flags, int error)
507{
508 if (!error) {
509 pr_info("S");
510 error = mark_swapfiles(handle, flags);
511 pr_cont("|\n");
512 flush_swap_writer(handle);
513 }
514
515 if (error)
516 free_all_swap_pages(root_swap);
517 release_swap_writer(handle);
518 swsusp_close();
519
520 return error;
521}
522
523/*
524 * Bytes we need for compressed data in worst case. We assume(limitation)
525 * this is the worst of all the compression algorithms.
526 */
527#define bytes_worst_compress(x) ((x) + ((x) / 16) + 64 + 3 + 2)
528
529/* We need to remember how much compressed data we need to read. */
530#define CMP_HEADER sizeof(size_t)
531
532/* Number of pages/bytes we'll compress at one time. */
533#define UNC_PAGES 32
534#define UNC_SIZE (UNC_PAGES * PAGE_SIZE)
535
536/* Number of pages we need for compressed data (worst case). */
537#define CMP_PAGES DIV_ROUND_UP(bytes_worst_compress(UNC_SIZE) + \
538 CMP_HEADER, PAGE_SIZE)
539#define CMP_SIZE (CMP_PAGES * PAGE_SIZE)
540
541/* Maximum number of threads for compression/decompression. */
542#define CMP_THREADS 3
543
544/* Minimum/maximum number of pages for read buffering. */
545#define CMP_MIN_RD_PAGES 1024
546#define CMP_MAX_RD_PAGES 8192
547
548/**
549 * save_image - save the suspend image data
550 */
551
552static int save_image(struct swap_map_handle *handle,
553 struct snapshot_handle *snapshot,
554 unsigned int nr_to_write)
555{
556 unsigned int m;
557 int ret;
558 int nr_pages;
559 int err2;
560 struct hib_bio_batch hb;
561 ktime_t start;
562 ktime_t stop;
563
564 hib_init_batch(&hb);
565
566 pr_info("Saving image data pages (%u pages)...\n",
567 nr_to_write);
568 m = nr_to_write / 10;
569 if (!m)
570 m = 1;
571 nr_pages = 0;
572 start = ktime_get();
573 while (1) {
574 ret = snapshot_read_next(snapshot);
575 if (ret <= 0)
576 break;
577 ret = swap_write_page(handle, data_of(*snapshot), &hb);
578 if (ret)
579 break;
580 if (!(nr_pages % m))
581 pr_info("Image saving progress: %3d%%\n",
582 nr_pages / m * 10);
583 nr_pages++;
584 }
585 err2 = hib_wait_io(&hb);
586 hib_finish_batch(&hb);
587 stop = ktime_get();
588 if (!ret)
589 ret = err2;
590 if (!ret)
591 pr_info("Image saving done\n");
592 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
593 return ret;
594}
595
596/*
597 * Structure used for CRC32.
598 */
599struct crc_data {
600 struct task_struct *thr; /* thread */
601 atomic_t ready; /* ready to start flag */
602 atomic_t stop; /* ready to stop flag */
603 unsigned run_threads; /* nr current threads */
604 wait_queue_head_t go; /* start crc update */
605 wait_queue_head_t done; /* crc update done */
606 u32 *crc32; /* points to handle's crc32 */
607 size_t *unc_len[CMP_THREADS]; /* uncompressed lengths */
608 unsigned char *unc[CMP_THREADS]; /* uncompressed data */
609};
610
611/*
612 * CRC32 update function that runs in its own thread.
613 */
614static int crc32_threadfn(void *data)
615{
616 struct crc_data *d = data;
617 unsigned i;
618
619 while (1) {
620 wait_event(d->go, atomic_read_acquire(&d->ready) ||
621 kthread_should_stop());
622 if (kthread_should_stop()) {
623 d->thr = NULL;
624 atomic_set_release(&d->stop, 1);
625 wake_up(&d->done);
626 break;
627 }
628 atomic_set(&d->ready, 0);
629
630 for (i = 0; i < d->run_threads; i++)
631 *d->crc32 = crc32_le(*d->crc32,
632 d->unc[i], *d->unc_len[i]);
633 atomic_set_release(&d->stop, 1);
634 wake_up(&d->done);
635 }
636 return 0;
637}
638/*
639 * Structure used for data compression.
640 */
641struct cmp_data {
642 struct task_struct *thr; /* thread */
643 struct crypto_comp *cc; /* crypto compressor stream */
644 atomic_t ready; /* ready to start flag */
645 atomic_t stop; /* ready to stop flag */
646 int ret; /* return code */
647 wait_queue_head_t go; /* start compression */
648 wait_queue_head_t done; /* compression done */
649 size_t unc_len; /* uncompressed length */
650 size_t cmp_len; /* compressed length */
651 unsigned char unc[UNC_SIZE]; /* uncompressed buffer */
652 unsigned char cmp[CMP_SIZE]; /* compressed buffer */
653};
654
655/* Indicates the image size after compression */
656static atomic_t compressed_size = ATOMIC_INIT(0);
657
658/*
659 * Compression function that runs in its own thread.
660 */
661static int compress_threadfn(void *data)
662{
663 struct cmp_data *d = data;
664 unsigned int cmp_len = 0;
665
666 while (1) {
667 wait_event(d->go, atomic_read_acquire(&d->ready) ||
668 kthread_should_stop());
669 if (kthread_should_stop()) {
670 d->thr = NULL;
671 d->ret = -1;
672 atomic_set_release(&d->stop, 1);
673 wake_up(&d->done);
674 break;
675 }
676 atomic_set(&d->ready, 0);
677
678 cmp_len = CMP_SIZE - CMP_HEADER;
679 d->ret = crypto_comp_compress(d->cc, d->unc, d->unc_len,
680 d->cmp + CMP_HEADER,
681 &cmp_len);
682 d->cmp_len = cmp_len;
683
684 atomic_set(&compressed_size, atomic_read(&compressed_size) + d->cmp_len);
685 atomic_set_release(&d->stop, 1);
686 wake_up(&d->done);
687 }
688 return 0;
689}
690
691/**
692 * save_compressed_image - Save the suspend image data after compression.
693 * @handle: Swap map handle to use for saving the image.
694 * @snapshot: Image to read data from.
695 * @nr_to_write: Number of pages to save.
696 */
697static int save_compressed_image(struct swap_map_handle *handle,
698 struct snapshot_handle *snapshot,
699 unsigned int nr_to_write)
700{
701 unsigned int m;
702 int ret = 0;
703 int nr_pages;
704 int err2;
705 struct hib_bio_batch hb;
706 ktime_t start;
707 ktime_t stop;
708 size_t off;
709 unsigned thr, run_threads, nr_threads;
710 unsigned char *page = NULL;
711 struct cmp_data *data = NULL;
712 struct crc_data *crc = NULL;
713
714 hib_init_batch(&hb);
715
716 atomic_set(&compressed_size, 0);
717
718 /*
719 * We'll limit the number of threads for compression to limit memory
720 * footprint.
721 */
722 nr_threads = num_online_cpus() - 1;
723 nr_threads = clamp_val(nr_threads, 1, CMP_THREADS);
724
725 page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
726 if (!page) {
727 pr_err("Failed to allocate %s page\n", hib_comp_algo);
728 ret = -ENOMEM;
729 goto out_clean;
730 }
731
732 data = vzalloc(array_size(nr_threads, sizeof(*data)));
733 if (!data) {
734 pr_err("Failed to allocate %s data\n", hib_comp_algo);
735 ret = -ENOMEM;
736 goto out_clean;
737 }
738
739 crc = kzalloc(sizeof(*crc), GFP_KERNEL);
740 if (!crc) {
741 pr_err("Failed to allocate crc\n");
742 ret = -ENOMEM;
743 goto out_clean;
744 }
745
746 /*
747 * Start the compression threads.
748 */
749 for (thr = 0; thr < nr_threads; thr++) {
750 init_waitqueue_head(&data[thr].go);
751 init_waitqueue_head(&data[thr].done);
752
753 data[thr].cc = crypto_alloc_comp(hib_comp_algo, 0, 0);
754 if (IS_ERR_OR_NULL(data[thr].cc)) {
755 pr_err("Could not allocate comp stream %ld\n", PTR_ERR(data[thr].cc));
756 ret = -EFAULT;
757 goto out_clean;
758 }
759
760 data[thr].thr = kthread_run(compress_threadfn,
761 &data[thr],
762 "image_compress/%u", thr);
763 if (IS_ERR(data[thr].thr)) {
764 data[thr].thr = NULL;
765 pr_err("Cannot start compression threads\n");
766 ret = -ENOMEM;
767 goto out_clean;
768 }
769 }
770
771 /*
772 * Start the CRC32 thread.
773 */
774 init_waitqueue_head(&crc->go);
775 init_waitqueue_head(&crc->done);
776
777 handle->crc32 = 0;
778 crc->crc32 = &handle->crc32;
779 for (thr = 0; thr < nr_threads; thr++) {
780 crc->unc[thr] = data[thr].unc;
781 crc->unc_len[thr] = &data[thr].unc_len;
782 }
783
784 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
785 if (IS_ERR(crc->thr)) {
786 crc->thr = NULL;
787 pr_err("Cannot start CRC32 thread\n");
788 ret = -ENOMEM;
789 goto out_clean;
790 }
791
792 /*
793 * Adjust the number of required free pages after all allocations have
794 * been done. We don't want to run out of pages when writing.
795 */
796 handle->reqd_free_pages = reqd_free_pages();
797
798 pr_info("Using %u thread(s) for %s compression\n", nr_threads, hib_comp_algo);
799 pr_info("Compressing and saving image data (%u pages)...\n",
800 nr_to_write);
801 m = nr_to_write / 10;
802 if (!m)
803 m = 1;
804 nr_pages = 0;
805 start = ktime_get();
806 for (;;) {
807 for (thr = 0; thr < nr_threads; thr++) {
808 for (off = 0; off < UNC_SIZE; off += PAGE_SIZE) {
809 ret = snapshot_read_next(snapshot);
810 if (ret < 0)
811 goto out_finish;
812
813 if (!ret)
814 break;
815
816 memcpy(data[thr].unc + off,
817 data_of(*snapshot), PAGE_SIZE);
818
819 if (!(nr_pages % m))
820 pr_info("Image saving progress: %3d%%\n",
821 nr_pages / m * 10);
822 nr_pages++;
823 }
824 if (!off)
825 break;
826
827 data[thr].unc_len = off;
828
829 atomic_set_release(&data[thr].ready, 1);
830 wake_up(&data[thr].go);
831 }
832
833 if (!thr)
834 break;
835
836 crc->run_threads = thr;
837 atomic_set_release(&crc->ready, 1);
838 wake_up(&crc->go);
839
840 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
841 wait_event(data[thr].done,
842 atomic_read_acquire(&data[thr].stop));
843 atomic_set(&data[thr].stop, 0);
844
845 ret = data[thr].ret;
846
847 if (ret < 0) {
848 pr_err("%s compression failed\n", hib_comp_algo);
849 goto out_finish;
850 }
851
852 if (unlikely(!data[thr].cmp_len ||
853 data[thr].cmp_len >
854 bytes_worst_compress(data[thr].unc_len))) {
855 pr_err("Invalid %s compressed length\n", hib_comp_algo);
856 ret = -1;
857 goto out_finish;
858 }
859
860 *(size_t *)data[thr].cmp = data[thr].cmp_len;
861
862 /*
863 * Given we are writing one page at a time to disk, we
864 * copy that much from the buffer, although the last
865 * bit will likely be smaller than full page. This is
866 * OK - we saved the length of the compressed data, so
867 * any garbage at the end will be discarded when we
868 * read it.
869 */
870 for (off = 0;
871 off < CMP_HEADER + data[thr].cmp_len;
872 off += PAGE_SIZE) {
873 memcpy(page, data[thr].cmp + off, PAGE_SIZE);
874
875 ret = swap_write_page(handle, page, &hb);
876 if (ret)
877 goto out_finish;
878 }
879 }
880
881 wait_event(crc->done, atomic_read_acquire(&crc->stop));
882 atomic_set(&crc->stop, 0);
883 }
884
885out_finish:
886 err2 = hib_wait_io(&hb);
887 stop = ktime_get();
888 if (!ret)
889 ret = err2;
890 if (!ret)
891 pr_info("Image saving done\n");
892 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
893 pr_info("Image size after compression: %d kbytes\n",
894 (atomic_read(&compressed_size) / 1024));
895
896out_clean:
897 hib_finish_batch(&hb);
898 if (crc) {
899 if (crc->thr)
900 kthread_stop(crc->thr);
901 kfree(crc);
902 }
903 if (data) {
904 for (thr = 0; thr < nr_threads; thr++) {
905 if (data[thr].thr)
906 kthread_stop(data[thr].thr);
907 if (data[thr].cc)
908 crypto_free_comp(data[thr].cc);
909 }
910 vfree(data);
911 }
912 if (page) free_page((unsigned long)page);
913
914 return ret;
915}
916
917/**
918 * enough_swap - Make sure we have enough swap to save the image.
919 *
920 * Returns TRUE or FALSE after checking the total amount of swap
921 * space available from the resume partition.
922 */
923
924static int enough_swap(unsigned int nr_pages)
925{
926 unsigned int free_swap = count_swap_pages(root_swap, 1);
927 unsigned int required;
928
929 pr_debug("Free swap pages: %u\n", free_swap);
930
931 required = PAGES_FOR_IO + nr_pages;
932 return free_swap > required;
933}
934
935/**
936 * swsusp_write - Write entire image and metadata.
937 * @flags: flags to pass to the "boot" kernel in the image header
938 *
939 * It is important _NOT_ to umount filesystems at this point. We want
940 * them synced (in case something goes wrong) but we DO not want to mark
941 * filesystem clean: it is not. (And it does not matter, if we resume
942 * correctly, we'll mark system clean, anyway.)
943 */
944
945int swsusp_write(unsigned int flags)
946{
947 struct swap_map_handle handle;
948 struct snapshot_handle snapshot;
949 struct swsusp_info *header;
950 unsigned long pages;
951 int error;
952
953 pages = snapshot_get_image_size();
954 error = get_swap_writer(&handle);
955 if (error) {
956 pr_err("Cannot get swap writer\n");
957 return error;
958 }
959 if (flags & SF_NOCOMPRESS_MODE) {
960 if (!enough_swap(pages)) {
961 pr_err("Not enough free swap\n");
962 error = -ENOSPC;
963 goto out_finish;
964 }
965 }
966 memset(&snapshot, 0, sizeof(struct snapshot_handle));
967 error = snapshot_read_next(&snapshot);
968 if (error < (int)PAGE_SIZE) {
969 if (error >= 0)
970 error = -EFAULT;
971
972 goto out_finish;
973 }
974 header = (struct swsusp_info *)data_of(snapshot);
975 error = swap_write_page(&handle, header, NULL);
976 if (!error) {
977 error = (flags & SF_NOCOMPRESS_MODE) ?
978 save_image(&handle, &snapshot, pages - 1) :
979 save_compressed_image(&handle, &snapshot, pages - 1);
980 }
981out_finish:
982 error = swap_writer_finish(&handle, flags, error);
983 return error;
984}
985
986/*
987 * The following functions allow us to read data using a swap map
988 * in a file-like way.
989 */
990
991static void release_swap_reader(struct swap_map_handle *handle)
992{
993 struct swap_map_page_list *tmp;
994
995 while (handle->maps) {
996 if (handle->maps->map)
997 free_page((unsigned long)handle->maps->map);
998 tmp = handle->maps;
999 handle->maps = handle->maps->next;
1000 kfree(tmp);
1001 }
1002 handle->cur = NULL;
1003}
1004
1005static int get_swap_reader(struct swap_map_handle *handle,
1006 unsigned int *flags_p)
1007{
1008 int error;
1009 struct swap_map_page_list *tmp, *last;
1010 sector_t offset;
1011
1012 *flags_p = swsusp_header->flags;
1013
1014 if (!swsusp_header->image) /* how can this happen? */
1015 return -EINVAL;
1016
1017 handle->cur = NULL;
1018 last = handle->maps = NULL;
1019 offset = swsusp_header->image;
1020 while (offset) {
1021 tmp = kzalloc(sizeof(*handle->maps), GFP_KERNEL);
1022 if (!tmp) {
1023 release_swap_reader(handle);
1024 return -ENOMEM;
1025 }
1026 if (!handle->maps)
1027 handle->maps = tmp;
1028 if (last)
1029 last->next = tmp;
1030 last = tmp;
1031
1032 tmp->map = (struct swap_map_page *)
1033 __get_free_page(GFP_NOIO | __GFP_HIGH);
1034 if (!tmp->map) {
1035 release_swap_reader(handle);
1036 return -ENOMEM;
1037 }
1038
1039 error = hib_submit_io(REQ_OP_READ, offset, tmp->map, NULL);
1040 if (error) {
1041 release_swap_reader(handle);
1042 return error;
1043 }
1044 offset = tmp->map->next_swap;
1045 }
1046 handle->k = 0;
1047 handle->cur = handle->maps->map;
1048 return 0;
1049}
1050
1051static int swap_read_page(struct swap_map_handle *handle, void *buf,
1052 struct hib_bio_batch *hb)
1053{
1054 sector_t offset;
1055 int error;
1056 struct swap_map_page_list *tmp;
1057
1058 if (!handle->cur)
1059 return -EINVAL;
1060 offset = handle->cur->entries[handle->k];
1061 if (!offset)
1062 return -EFAULT;
1063 error = hib_submit_io(REQ_OP_READ, offset, buf, hb);
1064 if (error)
1065 return error;
1066 if (++handle->k >= MAP_PAGE_ENTRIES) {
1067 handle->k = 0;
1068 free_page((unsigned long)handle->maps->map);
1069 tmp = handle->maps;
1070 handle->maps = handle->maps->next;
1071 kfree(tmp);
1072 if (!handle->maps)
1073 release_swap_reader(handle);
1074 else
1075 handle->cur = handle->maps->map;
1076 }
1077 return error;
1078}
1079
1080static int swap_reader_finish(struct swap_map_handle *handle)
1081{
1082 release_swap_reader(handle);
1083
1084 return 0;
1085}
1086
1087/**
1088 * load_image - load the image using the swap map handle
1089 * @handle and the snapshot handle @snapshot
1090 * (assume there are @nr_pages pages to load)
1091 */
1092
1093static int load_image(struct swap_map_handle *handle,
1094 struct snapshot_handle *snapshot,
1095 unsigned int nr_to_read)
1096{
1097 unsigned int m;
1098 int ret = 0;
1099 ktime_t start;
1100 ktime_t stop;
1101 struct hib_bio_batch hb;
1102 int err2;
1103 unsigned nr_pages;
1104
1105 hib_init_batch(&hb);
1106
1107 clean_pages_on_read = true;
1108 pr_info("Loading image data pages (%u pages)...\n", nr_to_read);
1109 m = nr_to_read / 10;
1110 if (!m)
1111 m = 1;
1112 nr_pages = 0;
1113 start = ktime_get();
1114 for ( ; ; ) {
1115 ret = snapshot_write_next(snapshot);
1116 if (ret <= 0)
1117 break;
1118 ret = swap_read_page(handle, data_of(*snapshot), &hb);
1119 if (ret)
1120 break;
1121 if (snapshot->sync_read)
1122 ret = hib_wait_io(&hb);
1123 if (ret)
1124 break;
1125 if (!(nr_pages % m))
1126 pr_info("Image loading progress: %3d%%\n",
1127 nr_pages / m * 10);
1128 nr_pages++;
1129 }
1130 err2 = hib_wait_io(&hb);
1131 hib_finish_batch(&hb);
1132 stop = ktime_get();
1133 if (!ret)
1134 ret = err2;
1135 if (!ret) {
1136 pr_info("Image loading done\n");
1137 ret = snapshot_write_finalize(snapshot);
1138 if (!ret && !snapshot_image_loaded(snapshot))
1139 ret = -ENODATA;
1140 }
1141 swsusp_show_speed(start, stop, nr_to_read, "Read");
1142 return ret;
1143}
1144
1145/*
1146 * Structure used for data decompression.
1147 */
1148struct dec_data {
1149 struct task_struct *thr; /* thread */
1150 struct crypto_comp *cc; /* crypto compressor stream */
1151 atomic_t ready; /* ready to start flag */
1152 atomic_t stop; /* ready to stop flag */
1153 int ret; /* return code */
1154 wait_queue_head_t go; /* start decompression */
1155 wait_queue_head_t done; /* decompression done */
1156 size_t unc_len; /* uncompressed length */
1157 size_t cmp_len; /* compressed length */
1158 unsigned char unc[UNC_SIZE]; /* uncompressed buffer */
1159 unsigned char cmp[CMP_SIZE]; /* compressed buffer */
1160};
1161
1162/*
1163 * Decompression function that runs in its own thread.
1164 */
1165static int decompress_threadfn(void *data)
1166{
1167 struct dec_data *d = data;
1168 unsigned int unc_len = 0;
1169
1170 while (1) {
1171 wait_event(d->go, atomic_read_acquire(&d->ready) ||
1172 kthread_should_stop());
1173 if (kthread_should_stop()) {
1174 d->thr = NULL;
1175 d->ret = -1;
1176 atomic_set_release(&d->stop, 1);
1177 wake_up(&d->done);
1178 break;
1179 }
1180 atomic_set(&d->ready, 0);
1181
1182 unc_len = UNC_SIZE;
1183 d->ret = crypto_comp_decompress(d->cc, d->cmp + CMP_HEADER, d->cmp_len,
1184 d->unc, &unc_len);
1185 d->unc_len = unc_len;
1186
1187 if (clean_pages_on_decompress)
1188 flush_icache_range((unsigned long)d->unc,
1189 (unsigned long)d->unc + d->unc_len);
1190
1191 atomic_set_release(&d->stop, 1);
1192 wake_up(&d->done);
1193 }
1194 return 0;
1195}
1196
1197/**
1198 * load_compressed_image - Load compressed image data and decompress it.
1199 * @handle: Swap map handle to use for loading data.
1200 * @snapshot: Image to copy uncompressed data into.
1201 * @nr_to_read: Number of pages to load.
1202 */
1203static int load_compressed_image(struct swap_map_handle *handle,
1204 struct snapshot_handle *snapshot,
1205 unsigned int nr_to_read)
1206{
1207 unsigned int m;
1208 int ret = 0;
1209 int eof = 0;
1210 struct hib_bio_batch hb;
1211 ktime_t start;
1212 ktime_t stop;
1213 unsigned nr_pages;
1214 size_t off;
1215 unsigned i, thr, run_threads, nr_threads;
1216 unsigned ring = 0, pg = 0, ring_size = 0,
1217 have = 0, want, need, asked = 0;
1218 unsigned long read_pages = 0;
1219 unsigned char **page = NULL;
1220 struct dec_data *data = NULL;
1221 struct crc_data *crc = NULL;
1222
1223 hib_init_batch(&hb);
1224
1225 /*
1226 * We'll limit the number of threads for decompression to limit memory
1227 * footprint.
1228 */
1229 nr_threads = num_online_cpus() - 1;
1230 nr_threads = clamp_val(nr_threads, 1, CMP_THREADS);
1231
1232 page = vmalloc(array_size(CMP_MAX_RD_PAGES, sizeof(*page)));
1233 if (!page) {
1234 pr_err("Failed to allocate %s page\n", hib_comp_algo);
1235 ret = -ENOMEM;
1236 goto out_clean;
1237 }
1238
1239 data = vzalloc(array_size(nr_threads, sizeof(*data)));
1240 if (!data) {
1241 pr_err("Failed to allocate %s data\n", hib_comp_algo);
1242 ret = -ENOMEM;
1243 goto out_clean;
1244 }
1245
1246 crc = kzalloc(sizeof(*crc), GFP_KERNEL);
1247 if (!crc) {
1248 pr_err("Failed to allocate crc\n");
1249 ret = -ENOMEM;
1250 goto out_clean;
1251 }
1252
1253 clean_pages_on_decompress = true;
1254
1255 /*
1256 * Start the decompression threads.
1257 */
1258 for (thr = 0; thr < nr_threads; thr++) {
1259 init_waitqueue_head(&data[thr].go);
1260 init_waitqueue_head(&data[thr].done);
1261
1262 data[thr].cc = crypto_alloc_comp(hib_comp_algo, 0, 0);
1263 if (IS_ERR_OR_NULL(data[thr].cc)) {
1264 pr_err("Could not allocate comp stream %ld\n", PTR_ERR(data[thr].cc));
1265 ret = -EFAULT;
1266 goto out_clean;
1267 }
1268
1269 data[thr].thr = kthread_run(decompress_threadfn,
1270 &data[thr],
1271 "image_decompress/%u", thr);
1272 if (IS_ERR(data[thr].thr)) {
1273 data[thr].thr = NULL;
1274 pr_err("Cannot start decompression threads\n");
1275 ret = -ENOMEM;
1276 goto out_clean;
1277 }
1278 }
1279
1280 /*
1281 * Start the CRC32 thread.
1282 */
1283 init_waitqueue_head(&crc->go);
1284 init_waitqueue_head(&crc->done);
1285
1286 handle->crc32 = 0;
1287 crc->crc32 = &handle->crc32;
1288 for (thr = 0; thr < nr_threads; thr++) {
1289 crc->unc[thr] = data[thr].unc;
1290 crc->unc_len[thr] = &data[thr].unc_len;
1291 }
1292
1293 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1294 if (IS_ERR(crc->thr)) {
1295 crc->thr = NULL;
1296 pr_err("Cannot start CRC32 thread\n");
1297 ret = -ENOMEM;
1298 goto out_clean;
1299 }
1300
1301 /*
1302 * Set the number of pages for read buffering.
1303 * This is complete guesswork, because we'll only know the real
1304 * picture once prepare_image() is called, which is much later on
1305 * during the image load phase. We'll assume the worst case and
1306 * say that none of the image pages are from high memory.
1307 */
1308 if (low_free_pages() > snapshot_get_image_size())
1309 read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1310 read_pages = clamp_val(read_pages, CMP_MIN_RD_PAGES, CMP_MAX_RD_PAGES);
1311
1312 for (i = 0; i < read_pages; i++) {
1313 page[i] = (void *)__get_free_page(i < CMP_PAGES ?
1314 GFP_NOIO | __GFP_HIGH :
1315 GFP_NOIO | __GFP_NOWARN |
1316 __GFP_NORETRY);
1317
1318 if (!page[i]) {
1319 if (i < CMP_PAGES) {
1320 ring_size = i;
1321 pr_err("Failed to allocate %s pages\n", hib_comp_algo);
1322 ret = -ENOMEM;
1323 goto out_clean;
1324 } else {
1325 break;
1326 }
1327 }
1328 }
1329 want = ring_size = i;
1330
1331 pr_info("Using %u thread(s) for %s decompression\n", nr_threads, hib_comp_algo);
1332 pr_info("Loading and decompressing image data (%u pages)...\n",
1333 nr_to_read);
1334 m = nr_to_read / 10;
1335 if (!m)
1336 m = 1;
1337 nr_pages = 0;
1338 start = ktime_get();
1339
1340 ret = snapshot_write_next(snapshot);
1341 if (ret <= 0)
1342 goto out_finish;
1343
1344 for(;;) {
1345 for (i = 0; !eof && i < want; i++) {
1346 ret = swap_read_page(handle, page[ring], &hb);
1347 if (ret) {
1348 /*
1349 * On real read error, finish. On end of data,
1350 * set EOF flag and just exit the read loop.
1351 */
1352 if (handle->cur &&
1353 handle->cur->entries[handle->k]) {
1354 goto out_finish;
1355 } else {
1356 eof = 1;
1357 break;
1358 }
1359 }
1360 if (++ring >= ring_size)
1361 ring = 0;
1362 }
1363 asked += i;
1364 want -= i;
1365
1366 /*
1367 * We are out of data, wait for some more.
1368 */
1369 if (!have) {
1370 if (!asked)
1371 break;
1372
1373 ret = hib_wait_io(&hb);
1374 if (ret)
1375 goto out_finish;
1376 have += asked;
1377 asked = 0;
1378 if (eof)
1379 eof = 2;
1380 }
1381
1382 if (crc->run_threads) {
1383 wait_event(crc->done, atomic_read_acquire(&crc->stop));
1384 atomic_set(&crc->stop, 0);
1385 crc->run_threads = 0;
1386 }
1387
1388 for (thr = 0; have && thr < nr_threads; thr++) {
1389 data[thr].cmp_len = *(size_t *)page[pg];
1390 if (unlikely(!data[thr].cmp_len ||
1391 data[thr].cmp_len >
1392 bytes_worst_compress(UNC_SIZE))) {
1393 pr_err("Invalid %s compressed length\n", hib_comp_algo);
1394 ret = -1;
1395 goto out_finish;
1396 }
1397
1398 need = DIV_ROUND_UP(data[thr].cmp_len + CMP_HEADER,
1399 PAGE_SIZE);
1400 if (need > have) {
1401 if (eof > 1) {
1402 ret = -1;
1403 goto out_finish;
1404 }
1405 break;
1406 }
1407
1408 for (off = 0;
1409 off < CMP_HEADER + data[thr].cmp_len;
1410 off += PAGE_SIZE) {
1411 memcpy(data[thr].cmp + off,
1412 page[pg], PAGE_SIZE);
1413 have--;
1414 want++;
1415 if (++pg >= ring_size)
1416 pg = 0;
1417 }
1418
1419 atomic_set_release(&data[thr].ready, 1);
1420 wake_up(&data[thr].go);
1421 }
1422
1423 /*
1424 * Wait for more data while we are decompressing.
1425 */
1426 if (have < CMP_PAGES && asked) {
1427 ret = hib_wait_io(&hb);
1428 if (ret)
1429 goto out_finish;
1430 have += asked;
1431 asked = 0;
1432 if (eof)
1433 eof = 2;
1434 }
1435
1436 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1437 wait_event(data[thr].done,
1438 atomic_read_acquire(&data[thr].stop));
1439 atomic_set(&data[thr].stop, 0);
1440
1441 ret = data[thr].ret;
1442
1443 if (ret < 0) {
1444 pr_err("%s decompression failed\n", hib_comp_algo);
1445 goto out_finish;
1446 }
1447
1448 if (unlikely(!data[thr].unc_len ||
1449 data[thr].unc_len > UNC_SIZE ||
1450 data[thr].unc_len & (PAGE_SIZE - 1))) {
1451 pr_err("Invalid %s uncompressed length\n", hib_comp_algo);
1452 ret = -1;
1453 goto out_finish;
1454 }
1455
1456 for (off = 0;
1457 off < data[thr].unc_len; off += PAGE_SIZE) {
1458 memcpy(data_of(*snapshot),
1459 data[thr].unc + off, PAGE_SIZE);
1460
1461 if (!(nr_pages % m))
1462 pr_info("Image loading progress: %3d%%\n",
1463 nr_pages / m * 10);
1464 nr_pages++;
1465
1466 ret = snapshot_write_next(snapshot);
1467 if (ret <= 0) {
1468 crc->run_threads = thr + 1;
1469 atomic_set_release(&crc->ready, 1);
1470 wake_up(&crc->go);
1471 goto out_finish;
1472 }
1473 }
1474 }
1475
1476 crc->run_threads = thr;
1477 atomic_set_release(&crc->ready, 1);
1478 wake_up(&crc->go);
1479 }
1480
1481out_finish:
1482 if (crc->run_threads) {
1483 wait_event(crc->done, atomic_read_acquire(&crc->stop));
1484 atomic_set(&crc->stop, 0);
1485 }
1486 stop = ktime_get();
1487 if (!ret) {
1488 pr_info("Image loading done\n");
1489 ret = snapshot_write_finalize(snapshot);
1490 if (!ret && !snapshot_image_loaded(snapshot))
1491 ret = -ENODATA;
1492 if (!ret) {
1493 if (swsusp_header->flags & SF_CRC32_MODE) {
1494 if(handle->crc32 != swsusp_header->crc32) {
1495 pr_err("Invalid image CRC32!\n");
1496 ret = -ENODATA;
1497 }
1498 }
1499 }
1500 }
1501 swsusp_show_speed(start, stop, nr_to_read, "Read");
1502out_clean:
1503 hib_finish_batch(&hb);
1504 for (i = 0; i < ring_size; i++)
1505 free_page((unsigned long)page[i]);
1506 if (crc) {
1507 if (crc->thr)
1508 kthread_stop(crc->thr);
1509 kfree(crc);
1510 }
1511 if (data) {
1512 for (thr = 0; thr < nr_threads; thr++) {
1513 if (data[thr].thr)
1514 kthread_stop(data[thr].thr);
1515 if (data[thr].cc)
1516 crypto_free_comp(data[thr].cc);
1517 }
1518 vfree(data);
1519 }
1520 vfree(page);
1521
1522 return ret;
1523}
1524
1525/**
1526 * swsusp_read - read the hibernation image.
1527 * @flags_p: flags passed by the "frozen" kernel in the image header should
1528 * be written into this memory location
1529 */
1530
1531int swsusp_read(unsigned int *flags_p)
1532{
1533 int error;
1534 struct swap_map_handle handle;
1535 struct snapshot_handle snapshot;
1536 struct swsusp_info *header;
1537
1538 memset(&snapshot, 0, sizeof(struct snapshot_handle));
1539 error = snapshot_write_next(&snapshot);
1540 if (error < (int)PAGE_SIZE)
1541 return error < 0 ? error : -EFAULT;
1542 header = (struct swsusp_info *)data_of(snapshot);
1543 error = get_swap_reader(&handle, flags_p);
1544 if (error)
1545 goto end;
1546 if (!error)
1547 error = swap_read_page(&handle, header, NULL);
1548 if (!error) {
1549 error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1550 load_image(&handle, &snapshot, header->pages - 1) :
1551 load_compressed_image(&handle, &snapshot, header->pages - 1);
1552 }
1553 swap_reader_finish(&handle);
1554end:
1555 if (!error)
1556 pr_debug("Image successfully loaded\n");
1557 else
1558 pr_debug("Error %d resuming\n", error);
1559 return error;
1560}
1561
1562static void *swsusp_holder;
1563
1564/**
1565 * swsusp_check - Open the resume device and check for the swsusp signature.
1566 * @exclusive: Open the resume device exclusively.
1567 */
1568
1569int swsusp_check(bool exclusive)
1570{
1571 void *holder = exclusive ? &swsusp_holder : NULL;
1572 int error;
1573
1574 hib_resume_bdev_file = bdev_file_open_by_dev(swsusp_resume_device,
1575 BLK_OPEN_READ, holder, NULL);
1576 if (!IS_ERR(hib_resume_bdev_file)) {
1577 set_blocksize(file_bdev(hib_resume_bdev_file), PAGE_SIZE);
1578 clear_page(swsusp_header);
1579 error = hib_submit_io(REQ_OP_READ, swsusp_resume_block,
1580 swsusp_header, NULL);
1581 if (error)
1582 goto put;
1583
1584 if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1585 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
1586 swsusp_header_flags = swsusp_header->flags;
1587 /* Reset swap signature now */
1588 error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
1589 swsusp_resume_block,
1590 swsusp_header, NULL);
1591 } else {
1592 error = -EINVAL;
1593 }
1594 if (!error && swsusp_header->flags & SF_HW_SIG &&
1595 swsusp_header->hw_sig != swsusp_hardware_signature) {
1596 pr_info("Suspend image hardware signature mismatch (%08x now %08x); aborting resume.\n",
1597 swsusp_header->hw_sig, swsusp_hardware_signature);
1598 error = -EINVAL;
1599 }
1600
1601put:
1602 if (error)
1603 fput(hib_resume_bdev_file);
1604 else
1605 pr_debug("Image signature found, resuming\n");
1606 } else {
1607 error = PTR_ERR(hib_resume_bdev_file);
1608 }
1609
1610 if (error)
1611 pr_debug("Image not found (code %d)\n", error);
1612
1613 return error;
1614}
1615
1616/**
1617 * swsusp_close - close resume device.
1618 */
1619
1620void swsusp_close(void)
1621{
1622 if (IS_ERR(hib_resume_bdev_file)) {
1623 pr_debug("Image device not initialised\n");
1624 return;
1625 }
1626
1627 fput(hib_resume_bdev_file);
1628}
1629
1630/**
1631 * swsusp_unmark - Unmark swsusp signature in the resume device
1632 */
1633
1634#ifdef CONFIG_SUSPEND
1635int swsusp_unmark(void)
1636{
1637 int error;
1638
1639 hib_submit_io(REQ_OP_READ, swsusp_resume_block,
1640 swsusp_header, NULL);
1641 if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
1642 memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
1643 error = hib_submit_io(REQ_OP_WRITE | REQ_SYNC,
1644 swsusp_resume_block,
1645 swsusp_header, NULL);
1646 } else {
1647 pr_err("Cannot find swsusp signature!\n");
1648 error = -ENODEV;
1649 }
1650
1651 /*
1652 * We just returned from suspend, we don't need the image any more.
1653 */
1654 free_all_swap_pages(root_swap);
1655
1656 return error;
1657}
1658#endif
1659
1660static int __init swsusp_header_init(void)
1661{
1662 swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1663 if (!swsusp_header)
1664 panic("Could not allocate memory for swsusp_header\n");
1665 return 0;
1666}
1667
1668core_initcall(swsusp_header_init);