Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Hibernation support for x86
  4 *
  5 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
  6 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
  7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
  8 */
  9#include <linux/gfp.h>
 10#include <linux/smp.h>
 11#include <linux/suspend.h>
 12#include <linux/scatterlist.h>
 13#include <linux/kdebug.h>
 14#include <linux/cpu.h>
 15#include <linux/pgtable.h>
 16#include <linux/types.h>
 17#include <linux/crc32.h>
 18
 19#include <asm/e820/api.h>
 20#include <asm/init.h>
 21#include <asm/proto.h>
 22#include <asm/page.h>
 
 23#include <asm/mtrr.h>
 24#include <asm/sections.h>
 25#include <asm/suspend.h>
 26#include <asm/tlbflush.h>
 27
 28/*
 29 * Address to jump to in the last phase of restore in order to get to the image
 30 * kernel's text (this value is passed in the image header).
 31 */
 32unsigned long restore_jump_address __visible;
 33unsigned long jump_address_phys;
 34
 35/*
 36 * Value of the cr3 register from before the hibernation (this value is passed
 37 * in the image header).
 38 */
 39unsigned long restore_cr3 __visible;
 40unsigned long temp_pgt __visible;
 41unsigned long relocated_restore_code __visible;
 42
 43/**
 44 *	pfn_is_nosave - check if given pfn is in the 'nosave' section
 45 */
 46int pfn_is_nosave(unsigned long pfn)
 47{
 48	unsigned long nosave_begin_pfn;
 49	unsigned long nosave_end_pfn;
 50
 51	nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
 52	nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
 53
 54	return pfn >= nosave_begin_pfn && pfn < nosave_end_pfn;
 55}
 56
 
 
 
 57struct restore_data_record {
 58	unsigned long jump_address;
 59	unsigned long jump_address_phys;
 60	unsigned long cr3;
 61	unsigned long magic;
 62	unsigned long e820_checksum;
 63};
 64
 
 65/**
 66 * compute_e820_crc32 - calculate crc32 of a given e820 table
 67 *
 68 * @table: the e820 table to be calculated
 69 *
 70 * Return: the resulting checksum
 71 */
 72static inline u32 compute_e820_crc32(struct e820_table *table)
 73{
 74	int size = offsetof(struct e820_table, entries) +
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 75		sizeof(struct e820_entry) * table->nr_entries;
 76
 77	return ~crc32_le(~0, (unsigned char const *)table, size);
 
 
 
 
 
 
 
 
 
 
 
 
 78}
 79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 80#ifdef CONFIG_X86_64
 81#define RESTORE_MAGIC	0x23456789ABCDEF02UL
 82#else
 83#define RESTORE_MAGIC	0x12345679UL
 84#endif
 85
 86/**
 87 *	arch_hibernation_header_save - populate the architecture specific part
 88 *		of a hibernation image header
 89 *	@addr: address to save the data at
 90 */
 91int arch_hibernation_header_save(void *addr, unsigned int max_size)
 92{
 93	struct restore_data_record *rdr = addr;
 94
 95	if (max_size < sizeof(struct restore_data_record))
 96		return -EOVERFLOW;
 97	rdr->magic = RESTORE_MAGIC;
 98	rdr->jump_address = (unsigned long)restore_registers;
 99	rdr->jump_address_phys = __pa_symbol(restore_registers);
100
101	/*
102	 * The restore code fixes up CR3 and CR4 in the following sequence:
103	 *
104	 * [in hibernation asm]
105	 * 1. CR3 <= temporary page tables
106	 * 2. CR4 <= mmu_cr4_features (from the kernel that restores us)
107	 * 3. CR3 <= rdr->cr3
108	 * 4. CR4 <= mmu_cr4_features (from us, i.e. the image kernel)
109	 * [in restore_processor_state()]
110	 * 5. CR4 <= saved CR4
111	 * 6. CR3 <= saved CR3
112	 *
113	 * Our mmu_cr4_features has CR4.PCIDE=0, and toggling
114	 * CR4.PCIDE while CR3's PCID bits are nonzero is illegal, so
115	 * rdr->cr3 needs to point to valid page tables but must not
116	 * have any of the PCID bits set.
117	 */
118	rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK;
119
120	rdr->e820_checksum = compute_e820_crc32(e820_table_firmware);
121	return 0;
122}
123
124/**
125 *	arch_hibernation_header_restore - read the architecture specific data
126 *		from the hibernation image header
127 *	@addr: address to read the data from
128 */
129int arch_hibernation_header_restore(void *addr)
130{
131	struct restore_data_record *rdr = addr;
132
133	if (rdr->magic != RESTORE_MAGIC) {
134		pr_crit("Unrecognized hibernate image header format!\n");
135		return -EINVAL;
136	}
137
138	restore_jump_address = rdr->jump_address;
139	jump_address_phys = rdr->jump_address_phys;
140	restore_cr3 = rdr->cr3;
141
142	if (rdr->e820_checksum != compute_e820_crc32(e820_table_firmware)) {
143		pr_crit("Hibernate inconsistent memory map detected!\n");
144		return -ENODEV;
145	}
146
147	return 0;
148}
149
150int relocate_restore_code(void)
151{
152	pgd_t *pgd;
153	p4d_t *p4d;
154	pud_t *pud;
155	pmd_t *pmd;
156	pte_t *pte;
157
158	relocated_restore_code = get_safe_page(GFP_ATOMIC);
159	if (!relocated_restore_code)
160		return -ENOMEM;
161
162	__memcpy((void *)relocated_restore_code, core_restore_code, PAGE_SIZE);
163
164	/* Make the page containing the relocated code executable */
165	pgd = (pgd_t *)__va(read_cr3_pa()) +
166		pgd_index(relocated_restore_code);
167	p4d = p4d_offset(pgd, relocated_restore_code);
168	if (p4d_large(*p4d)) {
169		set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX));
170		goto out;
171	}
172	pud = pud_offset(p4d, relocated_restore_code);
173	if (pud_large(*pud)) {
174		set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
175		goto out;
176	}
177	pmd = pmd_offset(pud, relocated_restore_code);
178	if (pmd_large(*pmd)) {
179		set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
180		goto out;
181	}
182	pte = pte_offset_kernel(pmd, relocated_restore_code);
183	set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
184out:
185	__flush_tlb_all();
186	return 0;
187}
188
189int arch_resume_nosmt(void)
190{
191	int ret = 0;
192	/*
193	 * We reached this while coming out of hibernation. This means
194	 * that SMT siblings are sleeping in hlt, as mwait is not safe
195	 * against control transition during resume (see comment in
196	 * hibernate_resume_nonboot_cpu_disable()).
197	 *
198	 * If the resumed kernel has SMT disabled, we have to take all the
199	 * SMT siblings out of hlt, and offline them again so that they
200	 * end up in mwait proper.
201	 *
202	 * Called with hotplug disabled.
203	 */
204	cpu_hotplug_enable();
205	if (cpu_smt_control == CPU_SMT_DISABLED ||
206			cpu_smt_control == CPU_SMT_FORCE_DISABLED) {
207		enum cpuhp_smt_control old = cpu_smt_control;
208
209		ret = cpuhp_smt_enable();
210		if (ret)
211			goto out;
212		ret = cpuhp_smt_disable(old);
213		if (ret)
214			goto out;
215	}
216out:
217	cpu_hotplug_disable();
218	return ret;
219}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Hibernation support for x86
  4 *
  5 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
  6 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
  7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
  8 */
  9#include <linux/gfp.h>
 10#include <linux/smp.h>
 11#include <linux/suspend.h>
 12#include <linux/scatterlist.h>
 13#include <linux/kdebug.h>
 14#include <linux/cpu.h>
 15
 16#include <crypto/hash.h>
 
 17
 18#include <asm/e820/api.h>
 19#include <asm/init.h>
 20#include <asm/proto.h>
 21#include <asm/page.h>
 22#include <asm/pgtable.h>
 23#include <asm/mtrr.h>
 24#include <asm/sections.h>
 25#include <asm/suspend.h>
 26#include <asm/tlbflush.h>
 27
 28/*
 29 * Address to jump to in the last phase of restore in order to get to the image
 30 * kernel's text (this value is passed in the image header).
 31 */
 32unsigned long restore_jump_address __visible;
 33unsigned long jump_address_phys;
 34
 35/*
 36 * Value of the cr3 register from before the hibernation (this value is passed
 37 * in the image header).
 38 */
 39unsigned long restore_cr3 __visible;
 40unsigned long temp_pgt __visible;
 41unsigned long relocated_restore_code __visible;
 42
 43/**
 44 *	pfn_is_nosave - check if given pfn is in the 'nosave' section
 45 */
 46int pfn_is_nosave(unsigned long pfn)
 47{
 48	unsigned long nosave_begin_pfn;
 49	unsigned long nosave_end_pfn;
 50
 51	nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
 52	nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
 53
 54	return pfn >= nosave_begin_pfn && pfn < nosave_end_pfn;
 55}
 56
 57
 58#define MD5_DIGEST_SIZE 16
 59
 60struct restore_data_record {
 61	unsigned long jump_address;
 62	unsigned long jump_address_phys;
 63	unsigned long cr3;
 64	unsigned long magic;
 65	u8 e820_digest[MD5_DIGEST_SIZE];
 66};
 67
 68#if IS_BUILTIN(CONFIG_CRYPTO_MD5)
 69/**
 70 * get_e820_md5 - calculate md5 according to given e820 table
 71 *
 72 * @table: the e820 table to be calculated
 73 * @buf: the md5 result to be stored to
 
 74 */
 75static int get_e820_md5(struct e820_table *table, void *buf)
 76{
 77	struct crypto_shash *tfm;
 78	struct shash_desc *desc;
 79	int size;
 80	int ret = 0;
 81
 82	tfm = crypto_alloc_shash("md5", 0, 0);
 83	if (IS_ERR(tfm))
 84		return -ENOMEM;
 85
 86	desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
 87		       GFP_KERNEL);
 88	if (!desc) {
 89		ret = -ENOMEM;
 90		goto free_tfm;
 91	}
 92
 93	desc->tfm = tfm;
 94
 95	size = offsetof(struct e820_table, entries) +
 96		sizeof(struct e820_entry) * table->nr_entries;
 97
 98	if (crypto_shash_digest(desc, (u8 *)table, size, buf))
 99		ret = -EINVAL;
100
101	kzfree(desc);
102
103free_tfm:
104	crypto_free_shash(tfm);
105	return ret;
106}
107
108static int hibernation_e820_save(void *buf)
109{
110	return get_e820_md5(e820_table_firmware, buf);
111}
112
113static bool hibernation_e820_mismatch(void *buf)
114{
115	int ret;
116	u8 result[MD5_DIGEST_SIZE];
117
118	memset(result, 0, MD5_DIGEST_SIZE);
119	/* If there is no digest in suspend kernel, let it go. */
120	if (!memcmp(result, buf, MD5_DIGEST_SIZE))
121		return false;
122
123	ret = get_e820_md5(e820_table_firmware, result);
124	if (ret)
125		return true;
126
127	return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false;
128}
129#else
130static int hibernation_e820_save(void *buf)
131{
132	return 0;
133}
134
135static bool hibernation_e820_mismatch(void *buf)
136{
137	/* If md5 is not builtin for restore kernel, let it go. */
138	return false;
139}
140#endif
141
142#ifdef CONFIG_X86_64
143#define RESTORE_MAGIC	0x23456789ABCDEF01UL
144#else
145#define RESTORE_MAGIC	0x12345678UL
146#endif
147
148/**
149 *	arch_hibernation_header_save - populate the architecture specific part
150 *		of a hibernation image header
151 *	@addr: address to save the data at
152 */
153int arch_hibernation_header_save(void *addr, unsigned int max_size)
154{
155	struct restore_data_record *rdr = addr;
156
157	if (max_size < sizeof(struct restore_data_record))
158		return -EOVERFLOW;
159	rdr->magic = RESTORE_MAGIC;
160	rdr->jump_address = (unsigned long)restore_registers;
161	rdr->jump_address_phys = __pa_symbol(restore_registers);
162
163	/*
164	 * The restore code fixes up CR3 and CR4 in the following sequence:
165	 *
166	 * [in hibernation asm]
167	 * 1. CR3 <= temporary page tables
168	 * 2. CR4 <= mmu_cr4_features (from the kernel that restores us)
169	 * 3. CR3 <= rdr->cr3
170	 * 4. CR4 <= mmu_cr4_features (from us, i.e. the image kernel)
171	 * [in restore_processor_state()]
172	 * 5. CR4 <= saved CR4
173	 * 6. CR3 <= saved CR3
174	 *
175	 * Our mmu_cr4_features has CR4.PCIDE=0, and toggling
176	 * CR4.PCIDE while CR3's PCID bits are nonzero is illegal, so
177	 * rdr->cr3 needs to point to valid page tables but must not
178	 * have any of the PCID bits set.
179	 */
180	rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK;
181
182	return hibernation_e820_save(rdr->e820_digest);
 
183}
184
185/**
186 *	arch_hibernation_header_restore - read the architecture specific data
187 *		from the hibernation image header
188 *	@addr: address to read the data from
189 */
190int arch_hibernation_header_restore(void *addr)
191{
192	struct restore_data_record *rdr = addr;
193
194	if (rdr->magic != RESTORE_MAGIC) {
195		pr_crit("Unrecognized hibernate image header format!\n");
196		return -EINVAL;
197	}
198
199	restore_jump_address = rdr->jump_address;
200	jump_address_phys = rdr->jump_address_phys;
201	restore_cr3 = rdr->cr3;
202
203	if (hibernation_e820_mismatch(rdr->e820_digest)) {
204		pr_crit("Hibernate inconsistent memory map detected!\n");
205		return -ENODEV;
206	}
207
208	return 0;
209}
210
211int relocate_restore_code(void)
212{
213	pgd_t *pgd;
214	p4d_t *p4d;
215	pud_t *pud;
216	pmd_t *pmd;
217	pte_t *pte;
218
219	relocated_restore_code = get_safe_page(GFP_ATOMIC);
220	if (!relocated_restore_code)
221		return -ENOMEM;
222
223	memcpy((void *)relocated_restore_code, core_restore_code, PAGE_SIZE);
224
225	/* Make the page containing the relocated code executable */
226	pgd = (pgd_t *)__va(read_cr3_pa()) +
227		pgd_index(relocated_restore_code);
228	p4d = p4d_offset(pgd, relocated_restore_code);
229	if (p4d_large(*p4d)) {
230		set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX));
231		goto out;
232	}
233	pud = pud_offset(p4d, relocated_restore_code);
234	if (pud_large(*pud)) {
235		set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
236		goto out;
237	}
238	pmd = pmd_offset(pud, relocated_restore_code);
239	if (pmd_large(*pmd)) {
240		set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
241		goto out;
242	}
243	pte = pte_offset_kernel(pmd, relocated_restore_code);
244	set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
245out:
246	__flush_tlb_all();
247	return 0;
248}
249
250int arch_resume_nosmt(void)
251{
252	int ret = 0;
253	/*
254	 * We reached this while coming out of hibernation. This means
255	 * that SMT siblings are sleeping in hlt, as mwait is not safe
256	 * against control transition during resume (see comment in
257	 * hibernate_resume_nonboot_cpu_disable()).
258	 *
259	 * If the resumed kernel has SMT disabled, we have to take all the
260	 * SMT siblings out of hlt, and offline them again so that they
261	 * end up in mwait proper.
262	 *
263	 * Called with hotplug disabled.
264	 */
265	cpu_hotplug_enable();
266	if (cpu_smt_control == CPU_SMT_DISABLED ||
267			cpu_smt_control == CPU_SMT_FORCE_DISABLED) {
268		enum cpuhp_smt_control old = cpu_smt_control;
269
270		ret = cpuhp_smt_enable();
271		if (ret)
272			goto out;
273		ret = cpuhp_smt_disable(old);
274		if (ret)
275			goto out;
276	}
277out:
278	cpu_hotplug_disable();
279	return ret;
280}