Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Hibernation support for x86-64
4 *
5 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
6 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
8 */
9
10#include <linux/gfp.h>
11#include <linux/smp.h>
12#include <linux/suspend.h>
13#include <linux/scatterlist.h>
14#include <linux/kdebug.h>
15#include <linux/pgtable.h>
16
17#include <crypto/hash.h>
18
19#include <asm/e820/api.h>
20#include <asm/init.h>
21#include <asm/proto.h>
22#include <asm/page.h>
23#include <asm/mtrr.h>
24#include <asm/sections.h>
25#include <asm/suspend.h>
26#include <asm/tlbflush.h>
27
28static int set_up_temporary_text_mapping(pgd_t *pgd)
29{
30 pmd_t *pmd;
31 pud_t *pud;
32 p4d_t *p4d = NULL;
33 pgprot_t pgtable_prot = __pgprot(_KERNPG_TABLE);
34 pgprot_t pmd_text_prot = __pgprot(__PAGE_KERNEL_LARGE_EXEC);
35
36 /* Filter out unsupported __PAGE_KERNEL* bits: */
37 pgprot_val(pmd_text_prot) &= __default_kernel_pte_mask;
38 pgprot_val(pgtable_prot) &= __default_kernel_pte_mask;
39
40 /*
41 * The new mapping only has to cover the page containing the image
42 * kernel's entry point (jump_address_phys), because the switch over to
43 * it is carried out by relocated code running from a page allocated
44 * specifically for this purpose and covered by the identity mapping, so
45 * the temporary kernel text mapping is only needed for the final jump.
46 * Moreover, in that mapping the virtual address of the image kernel's
47 * entry point must be the same as its virtual address in the image
48 * kernel (restore_jump_address), so the image kernel's
49 * restore_registers() code doesn't find itself in a different area of
50 * the virtual address space after switching over to the original page
51 * tables used by the image kernel.
52 */
53
54 if (pgtable_l5_enabled()) {
55 p4d = (p4d_t *)get_safe_page(GFP_ATOMIC);
56 if (!p4d)
57 return -ENOMEM;
58 }
59
60 pud = (pud_t *)get_safe_page(GFP_ATOMIC);
61 if (!pud)
62 return -ENOMEM;
63
64 pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
65 if (!pmd)
66 return -ENOMEM;
67
68 set_pmd(pmd + pmd_index(restore_jump_address),
69 __pmd((jump_address_phys & PMD_MASK) | pgprot_val(pmd_text_prot)));
70 set_pud(pud + pud_index(restore_jump_address),
71 __pud(__pa(pmd) | pgprot_val(pgtable_prot)));
72 if (p4d) {
73 p4d_t new_p4d = __p4d(__pa(pud) | pgprot_val(pgtable_prot));
74 pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot));
75
76 set_p4d(p4d + p4d_index(restore_jump_address), new_p4d);
77 set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
78 } else {
79 /* No p4d for 4-level paging: point the pgd to the pud page table */
80 pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot));
81 set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
82 }
83
84 return 0;
85}
86
87static void *alloc_pgt_page(void *context)
88{
89 return (void *)get_safe_page(GFP_ATOMIC);
90}
91
92static int set_up_temporary_mappings(void)
93{
94 struct x86_mapping_info info = {
95 .alloc_pgt_page = alloc_pgt_page,
96 .page_flag = __PAGE_KERNEL_LARGE_EXEC,
97 .offset = __PAGE_OFFSET,
98 };
99 unsigned long mstart, mend;
100 pgd_t *pgd;
101 int result;
102 int i;
103
104 pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
105 if (!pgd)
106 return -ENOMEM;
107
108 /* Prepare a temporary mapping for the kernel text */
109 result = set_up_temporary_text_mapping(pgd);
110 if (result)
111 return result;
112
113 /* Set up the direct mapping from scratch */
114 for (i = 0; i < nr_pfn_mapped; i++) {
115 mstart = pfn_mapped[i].start << PAGE_SHIFT;
116 mend = pfn_mapped[i].end << PAGE_SHIFT;
117
118 result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
119 if (result)
120 return result;
121 }
122
123 temp_pgt = __pa(pgd);
124 return 0;
125}
126
127asmlinkage int swsusp_arch_resume(void)
128{
129 int error;
130
131 /* We have got enough memory and from now on we cannot recover */
132 error = set_up_temporary_mappings();
133 if (error)
134 return error;
135
136 error = relocate_restore_code();
137 if (error)
138 return error;
139
140 restore_image();
141 return 0;
142}
1/*
2 * Hibernation support for x86-64
3 *
4 * Distribute under GPLv2
5 *
6 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
7 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
9 */
10
11#include <linux/gfp.h>
12#include <linux/smp.h>
13#include <linux/suspend.h>
14#include <linux/scatterlist.h>
15#include <linux/kdebug.h>
16
17#include <crypto/hash.h>
18
19#include <asm/init.h>
20#include <asm/proto.h>
21#include <asm/page.h>
22#include <asm/pgtable.h>
23#include <asm/mtrr.h>
24#include <asm/sections.h>
25#include <asm/suspend.h>
26#include <asm/tlbflush.h>
27
28/* Defined in hibernate_asm_64.S */
29extern asmlinkage __visible int restore_image(void);
30
31/*
32 * Address to jump to in the last phase of restore in order to get to the image
33 * kernel's text (this value is passed in the image header).
34 */
35unsigned long restore_jump_address __visible;
36unsigned long jump_address_phys;
37
38/*
39 * Value of the cr3 register from before the hibernation (this value is passed
40 * in the image header).
41 */
42unsigned long restore_cr3 __visible;
43
44unsigned long temp_level4_pgt __visible;
45
46unsigned long relocated_restore_code __visible;
47
48static int set_up_temporary_text_mapping(pgd_t *pgd)
49{
50 pmd_t *pmd;
51 pud_t *pud;
52
53 /*
54 * The new mapping only has to cover the page containing the image
55 * kernel's entry point (jump_address_phys), because the switch over to
56 * it is carried out by relocated code running from a page allocated
57 * specifically for this purpose and covered by the identity mapping, so
58 * the temporary kernel text mapping is only needed for the final jump.
59 * Moreover, in that mapping the virtual address of the image kernel's
60 * entry point must be the same as its virtual address in the image
61 * kernel (restore_jump_address), so the image kernel's
62 * restore_registers() code doesn't find itself in a different area of
63 * the virtual address space after switching over to the original page
64 * tables used by the image kernel.
65 */
66 pud = (pud_t *)get_safe_page(GFP_ATOMIC);
67 if (!pud)
68 return -ENOMEM;
69
70 pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
71 if (!pmd)
72 return -ENOMEM;
73
74 set_pmd(pmd + pmd_index(restore_jump_address),
75 __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
76 set_pud(pud + pud_index(restore_jump_address),
77 __pud(__pa(pmd) | _KERNPG_TABLE));
78 set_pgd(pgd + pgd_index(restore_jump_address),
79 __pgd(__pa(pud) | _KERNPG_TABLE));
80
81 return 0;
82}
83
84static void *alloc_pgt_page(void *context)
85{
86 return (void *)get_safe_page(GFP_ATOMIC);
87}
88
89static int set_up_temporary_mappings(void)
90{
91 struct x86_mapping_info info = {
92 .alloc_pgt_page = alloc_pgt_page,
93 .pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
94 .offset = __PAGE_OFFSET,
95 };
96 unsigned long mstart, mend;
97 pgd_t *pgd;
98 int result;
99 int i;
100
101 pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
102 if (!pgd)
103 return -ENOMEM;
104
105 /* Prepare a temporary mapping for the kernel text */
106 result = set_up_temporary_text_mapping(pgd);
107 if (result)
108 return result;
109
110 /* Set up the direct mapping from scratch */
111 for (i = 0; i < nr_pfn_mapped; i++) {
112 mstart = pfn_mapped[i].start << PAGE_SHIFT;
113 mend = pfn_mapped[i].end << PAGE_SHIFT;
114
115 result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
116 if (result)
117 return result;
118 }
119
120 temp_level4_pgt = __pa(pgd);
121 return 0;
122}
123
124static int relocate_restore_code(void)
125{
126 pgd_t *pgd;
127 pud_t *pud;
128
129 relocated_restore_code = get_safe_page(GFP_ATOMIC);
130 if (!relocated_restore_code)
131 return -ENOMEM;
132
133 memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);
134
135 /* Make the page containing the relocated code executable */
136 pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
137 pud = pud_offset(pgd, relocated_restore_code);
138 if (pud_large(*pud)) {
139 set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
140 } else {
141 pmd_t *pmd = pmd_offset(pud, relocated_restore_code);
142
143 if (pmd_large(*pmd)) {
144 set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
145 } else {
146 pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code);
147
148 set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
149 }
150 }
151 __flush_tlb_all();
152
153 return 0;
154}
155
156int swsusp_arch_resume(void)
157{
158 int error;
159
160 /* We have got enough memory and from now on we cannot recover */
161 error = set_up_temporary_mappings();
162 if (error)
163 return error;
164
165 error = relocate_restore_code();
166 if (error)
167 return error;
168
169 restore_image();
170 return 0;
171}
172
173/*
174 * pfn_is_nosave - check if given pfn is in the 'nosave' section
175 */
176
177int pfn_is_nosave(unsigned long pfn)
178{
179 unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
180 unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
181 return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
182}
183
184#define MD5_DIGEST_SIZE 16
185
186struct restore_data_record {
187 unsigned long jump_address;
188 unsigned long jump_address_phys;
189 unsigned long cr3;
190 unsigned long magic;
191 u8 e820_digest[MD5_DIGEST_SIZE];
192};
193
194#define RESTORE_MAGIC 0x23456789ABCDEF01UL
195
196#if IS_BUILTIN(CONFIG_CRYPTO_MD5)
197/**
198 * get_e820_md5 - calculate md5 according to given e820 map
199 *
200 * @map: the e820 map to be calculated
201 * @buf: the md5 result to be stored to
202 */
203static int get_e820_md5(struct e820map *map, void *buf)
204{
205 struct scatterlist sg;
206 struct crypto_ahash *tfm;
207 int size;
208 int ret = 0;
209
210 tfm = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
211 if (IS_ERR(tfm))
212 return -ENOMEM;
213
214 {
215 AHASH_REQUEST_ON_STACK(req, tfm);
216 size = offsetof(struct e820map, map)
217 + sizeof(struct e820entry) * map->nr_map;
218 ahash_request_set_tfm(req, tfm);
219 sg_init_one(&sg, (u8 *)map, size);
220 ahash_request_set_callback(req, 0, NULL, NULL);
221 ahash_request_set_crypt(req, &sg, buf, size);
222
223 if (crypto_ahash_digest(req))
224 ret = -EINVAL;
225 ahash_request_zero(req);
226 }
227 crypto_free_ahash(tfm);
228
229 return ret;
230}
231
232static void hibernation_e820_save(void *buf)
233{
234 get_e820_md5(e820_saved, buf);
235}
236
237static bool hibernation_e820_mismatch(void *buf)
238{
239 int ret;
240 u8 result[MD5_DIGEST_SIZE];
241
242 memset(result, 0, MD5_DIGEST_SIZE);
243 /* If there is no digest in suspend kernel, let it go. */
244 if (!memcmp(result, buf, MD5_DIGEST_SIZE))
245 return false;
246
247 ret = get_e820_md5(e820_saved, result);
248 if (ret)
249 return true;
250
251 return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false;
252}
253#else
254static void hibernation_e820_save(void *buf)
255{
256}
257
258static bool hibernation_e820_mismatch(void *buf)
259{
260 /* If md5 is not builtin for restore kernel, let it go. */
261 return false;
262}
263#endif
264
265/**
266 * arch_hibernation_header_save - populate the architecture specific part
267 * of a hibernation image header
268 * @addr: address to save the data at
269 */
270int arch_hibernation_header_save(void *addr, unsigned int max_size)
271{
272 struct restore_data_record *rdr = addr;
273
274 if (max_size < sizeof(struct restore_data_record))
275 return -EOVERFLOW;
276 rdr->jump_address = (unsigned long)&restore_registers;
277 rdr->jump_address_phys = __pa_symbol(&restore_registers);
278 rdr->cr3 = restore_cr3;
279 rdr->magic = RESTORE_MAGIC;
280
281 hibernation_e820_save(rdr->e820_digest);
282
283 return 0;
284}
285
286/**
287 * arch_hibernation_header_restore - read the architecture specific data
288 * from the hibernation image header
289 * @addr: address to read the data from
290 */
291int arch_hibernation_header_restore(void *addr)
292{
293 struct restore_data_record *rdr = addr;
294
295 restore_jump_address = rdr->jump_address;
296 jump_address_phys = rdr->jump_address_phys;
297 restore_cr3 = rdr->cr3;
298
299 if (rdr->magic != RESTORE_MAGIC) {
300 pr_crit("Unrecognized hibernate image header format!\n");
301 return -EINVAL;
302 }
303
304 if (hibernation_e820_mismatch(rdr->e820_digest)) {
305 pr_crit("Hibernate inconsistent memory map detected!\n");
306 return -ENODEV;
307 }
308
309 return 0;
310}