Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Hibernation support for x86-64
  4 *
 
 
  5 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
  6 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
  7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
  8 */
  9
 10#include <linux/gfp.h>
 11#include <linux/smp.h>
 12#include <linux/suspend.h>
 13#include <linux/scatterlist.h>
 14#include <linux/kdebug.h>
 15#include <linux/pgtable.h>
 16
 17#include <crypto/hash.h>
 18
 19#include <asm/e820/api.h>
 20#include <asm/init.h>
 21#include <asm/proto.h>
 22#include <asm/page.h>
 
 23#include <asm/mtrr.h>
 24#include <asm/sections.h>
 25#include <asm/suspend.h>
 26#include <asm/tlbflush.h>
 27
 28static int set_up_temporary_text_mapping(pgd_t *pgd)
 29{
 30	pmd_t *pmd;
 31	pud_t *pud;
 32	p4d_t *p4d = NULL;
 33	pgprot_t pgtable_prot = __pgprot(_KERNPG_TABLE);
 34	pgprot_t pmd_text_prot = __pgprot(__PAGE_KERNEL_LARGE_EXEC);
 35
 36	/* Filter out unsupported __PAGE_KERNEL* bits: */
 37	pgprot_val(pmd_text_prot) &= __default_kernel_pte_mask;
 38	pgprot_val(pgtable_prot)  &= __default_kernel_pte_mask;
 39
 40	/*
 41	 * The new mapping only has to cover the page containing the image
 42	 * kernel's entry point (jump_address_phys), because the switch over to
 43	 * it is carried out by relocated code running from a page allocated
 44	 * specifically for this purpose and covered by the identity mapping, so
 45	 * the temporary kernel text mapping is only needed for the final jump.
 46	 * Moreover, in that mapping the virtual address of the image kernel's
 47	 * entry point must be the same as its virtual address in the image
 48	 * kernel (restore_jump_address), so the image kernel's
 49	 * restore_registers() code doesn't find itself in a different area of
 50	 * the virtual address space after switching over to the original page
 51	 * tables used by the image kernel.
 52	 */
 53
 54	if (pgtable_l5_enabled()) {
 55		p4d = (p4d_t *)get_safe_page(GFP_ATOMIC);
 56		if (!p4d)
 57			return -ENOMEM;
 58	}
 59
 60	pud = (pud_t *)get_safe_page(GFP_ATOMIC);
 61	if (!pud)
 62		return -ENOMEM;
 63
 64	pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
 65	if (!pmd)
 66		return -ENOMEM;
 
 
 67
 68	set_pmd(pmd + pmd_index(restore_jump_address),
 69		__pmd((jump_address_phys & PMD_MASK) | pgprot_val(pmd_text_prot)));
 70	set_pud(pud + pud_index(restore_jump_address),
 71		__pud(__pa(pmd) | pgprot_val(pgtable_prot)));
 72	if (p4d) {
 73		p4d_t new_p4d = __p4d(__pa(pud) | pgprot_val(pgtable_prot));
 74		pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot));
 75
 76		set_p4d(p4d + p4d_index(restore_jump_address), new_p4d);
 77		set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
 78	} else {
 79		/* No p4d for 4-level paging: point the pgd to the pud page table */
 80		pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot));
 81		set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
 82	}
 83
 84	return 0;
 85}
 
 86
 87static void *alloc_pgt_page(void *context)
 88{
 89	return (void *)get_safe_page(GFP_ATOMIC);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 90}
 91
 92static int set_up_temporary_mappings(void)
 93{
 94	struct x86_mapping_info info = {
 95		.alloc_pgt_page	= alloc_pgt_page,
 96		.page_flag	= __PAGE_KERNEL_LARGE_EXEC,
 97		.offset		= __PAGE_OFFSET,
 98	};
 99	unsigned long mstart, mend;
100	pgd_t *pgd;
101	int result;
102	int i;
103
104	pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
105	if (!pgd)
106		return -ENOMEM;
107
108	/* Prepare a temporary mapping for the kernel text */
109	result = set_up_temporary_text_mapping(pgd);
110	if (result)
111		return result;
112
113	/* Set up the direct mapping from scratch */
114	for (i = 0; i < nr_pfn_mapped; i++) {
115		mstart = pfn_mapped[i].start << PAGE_SHIFT;
116		mend   = pfn_mapped[i].end << PAGE_SHIFT;
117
118		result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
119		if (result)
120			return result;
121	}
122
123	temp_pgt = __pa(pgd);
 
 
 
 
 
 
 
 
 
 
 
124	return 0;
125}
126
127asmlinkage int swsusp_arch_resume(void)
128{
129	int error;
130
131	/* We have got enough memory and from now on we cannot recover */
132	error = set_up_temporary_mappings();
133	if (error)
134		return error;
135
136	error = relocate_restore_code();
137	if (error)
138		return error;
 
 
139
140	restore_image();
141	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142}
v3.1
 
  1/*
  2 * Hibernation support for x86-64
  3 *
  4 * Distribute under GPLv2
  5 *
  6 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
  7 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
  8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
  9 */
 10
 11#include <linux/gfp.h>
 12#include <linux/smp.h>
 13#include <linux/suspend.h>
 
 
 
 
 
 
 
 
 14#include <asm/proto.h>
 15#include <asm/page.h>
 16#include <asm/pgtable.h>
 17#include <asm/mtrr.h>
 
 18#include <asm/suspend.h>
 
 19
 20/* References to section boundaries */
 21extern const void __nosave_begin, __nosave_end;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 22
 23/* Defined in hibernate_asm_64.S */
 24extern int restore_image(void);
 
 25
 26/*
 27 * Address to jump to in the last phase of restore in order to get to the image
 28 * kernel's text (this value is passed in the image header).
 29 */
 30unsigned long restore_jump_address;
 31
 32/*
 33 * Value of the cr3 register from before the hibernation (this value is passed
 34 * in the image header).
 35 */
 36unsigned long restore_cr3;
 
 
 
 
 
 
 
 
 
 
 37
 38pgd_t *temp_level4_pgt;
 39
 40void *relocated_restore_code;
 41
 42static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
 43{
 44	long i, j;
 45
 46	i = pud_index(address);
 47	pud = pud + i;
 48	for (; i < PTRS_PER_PUD; pud++, i++) {
 49		unsigned long paddr;
 50		pmd_t *pmd;
 51
 52		paddr = address + i*PUD_SIZE;
 53		if (paddr >= end)
 54			break;
 55
 56		pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
 57		if (!pmd)
 58			return -ENOMEM;
 59		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
 60		for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
 61			unsigned long pe;
 62
 63			if (paddr >= end)
 64				break;
 65			pe = __PAGE_KERNEL_LARGE_EXEC | paddr;
 66			pe &= __supported_pte_mask;
 67			set_pmd(pmd, __pmd(pe));
 68		}
 69	}
 70	return 0;
 71}
 72
 73static int set_up_temporary_mappings(void)
 74{
 75	unsigned long start, end, next;
 76	int error;
 
 
 
 
 
 
 
 77
 78	temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
 79	if (!temp_level4_pgt)
 80		return -ENOMEM;
 81
 82	/* It is safe to reuse the original kernel mapping */
 83	set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
 84		init_level4_pgt[pgd_index(__START_KERNEL_map)]);
 
 85
 86	/* Set up the direct mapping from scratch */
 87	start = (unsigned long)pfn_to_kaddr(0);
 88	end = (unsigned long)pfn_to_kaddr(max_pfn);
 
 
 
 
 
 
 89
 90	for (; start < end; start = next) {
 91		pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC);
 92		if (!pud)
 93			return -ENOMEM;
 94		next = start + PGDIR_SIZE;
 95		if (next > end)
 96			next = end;
 97		if ((error = res_phys_pud_init(pud, __pa(start), __pa(next))))
 98			return error;
 99		set_pgd(temp_level4_pgt + pgd_index(start),
100			mk_kernel_pgd(__pa(pud)));
101	}
102	return 0;
103}
104
105int swsusp_arch_resume(void)
106{
107	int error;
108
109	/* We have got enough memory and from now on we cannot recover */
110	if ((error = set_up_temporary_mappings()))
 
111		return error;
112
113	relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC);
114	if (!relocated_restore_code)
115		return -ENOMEM;
116	memcpy(relocated_restore_code, &core_restore_code,
117	       &restore_registers - &core_restore_code);
118
119	restore_image();
120	return 0;
121}
122
123/*
124 *	pfn_is_nosave - check if given pfn is in the 'nosave' section
125 */
126
127int pfn_is_nosave(unsigned long pfn)
128{
129	unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
130	unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
131	return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
132}
133
134struct restore_data_record {
135	unsigned long jump_address;
136	unsigned long cr3;
137	unsigned long magic;
138};
139
140#define RESTORE_MAGIC	0x0123456789ABCDEFUL
141
142/**
143 *	arch_hibernation_header_save - populate the architecture specific part
144 *		of a hibernation image header
145 *	@addr: address to save the data at
146 */
147int arch_hibernation_header_save(void *addr, unsigned int max_size)
148{
149	struct restore_data_record *rdr = addr;
150
151	if (max_size < sizeof(struct restore_data_record))
152		return -EOVERFLOW;
153	rdr->jump_address = restore_jump_address;
154	rdr->cr3 = restore_cr3;
155	rdr->magic = RESTORE_MAGIC;
156	return 0;
157}
158
159/**
160 *	arch_hibernation_header_restore - read the architecture specific data
161 *		from the hibernation image header
162 *	@addr: address to read the data from
163 */
164int arch_hibernation_header_restore(void *addr)
165{
166	struct restore_data_record *rdr = addr;
167
168	restore_jump_address = rdr->jump_address;
169	restore_cr3 = rdr->cr3;
170	return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
171}