Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * This file contains the routines setting up the linux page tables.
  4 *  -- paulus
  5 *
  6 *  Derived from arch/ppc/mm/init.c:
  7 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  8 *
  9 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 10 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 11 *    Copyright (C) 1996 Paul Mackerras
 12 *
 13 *  Derived from "arch/i386/mm/init.c"
 14 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 15 */
 16
 17#include <linux/kernel.h>
 18#include <linux/module.h>
 19#include <linux/types.h>
 20#include <linux/mm.h>
 21#include <linux/vmalloc.h>
 22#include <linux/init.h>
 23#include <linux/highmem.h>
 24#include <linux/memblock.h>
 25#include <linux/slab.h>
 26#include <linux/set_memory.h>
 27
 28#include <asm/pgalloc.h>
 29#include <asm/fixmap.h>
 30#include <asm/setup.h>
 31#include <asm/sections.h>
 32#include <asm/early_ioremap.h>
 33
 34#include <mm/mmu_decl.h>
 35
 
 
 36static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data;
 37
 38notrace void __init early_ioremap_init(void)
 39{
 40	unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE);
 41	pte_t *ptep = (pte_t *)early_fixmap_pagetable;
 42	pmd_t *pmdp = pmd_off_k(addr);
 43
 44	for (; (s32)(FIXADDR_TOP - addr) > 0;
 45	     addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++)
 46		pmd_populate_kernel(&init_mm, pmdp, ptep);
 47
 48	early_ioremap_setup();
 49}
 50
 51static void __init *early_alloc_pgtable(unsigned long size)
 52{
 53	void *ptr = memblock_alloc(size, size);
 54
 55	if (!ptr)
 56		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 57		      __func__, size, size);
 58
 59	return ptr;
 60}
 61
 62pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
 63{
 64	if (pmd_none(*pmdp)) {
 65		pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
 66
 67		pmd_populate_kernel(&init_mm, pmdp, ptep);
 68	}
 69	return pte_offset_kernel(pmdp, va);
 70}
 71
 72
 73int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
 74{
 75	pmd_t *pd;
 76	pte_t *pg;
 77	int err = -ENOMEM;
 78
 79	/* Use upper 10 bits of VA to index the first level map */
 80	pd = pmd_off_k(va);
 81	/* Use middle 10 bits of VA to index the second-level map */
 82	if (likely(slab_is_available()))
 83		pg = pte_alloc_kernel(pd, va);
 84	else
 85		pg = early_pte_alloc_kernel(pd, va);
 86	if (pg) {
 87		err = 0;
 88		/* The PTE should never be already set nor present in the
 89		 * hash table
 90		 */
 91		BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot));
 92		set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
 93	}
 94	smp_wmb();
 95	return err;
 96}
 97
 98/*
 99 * Map in a chunk of physical memory starting at start.
100 */
101static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
102{
103	unsigned long v, s;
104	phys_addr_t p;
105	bool ktext;
106
107	s = offset;
108	v = PAGE_OFFSET + s;
109	p = memstart_addr + s;
110	for (; s < top; s += PAGE_SIZE) {
111		ktext = core_kernel_text(v);
 
112		map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
113		v += PAGE_SIZE;
114		p += PAGE_SIZE;
115	}
116}
117
118void __init mapin_ram(void)
119{
120	phys_addr_t base, end;
121	u64 i;
122
123	for_each_mem_range(i, &base, &end) {
124		phys_addr_t top = min(end, total_lowmem);
125
126		if (base >= top)
127			continue;
128		base = mmu_mapin_ram(base, top);
129		__mapin_ram_chunk(base, top);
130	}
131}
132
133void mark_initmem_nx(void)
134{
135	unsigned long numpages = PFN_UP((unsigned long)_einittext) -
136				 PFN_DOWN((unsigned long)_sinittext);
137
138	mmu_mark_initmem_nx();
139
140	if (!v_block_mapped((unsigned long)_sinittext)) {
141		set_memory_nx((unsigned long)_sinittext, numpages);
142		set_memory_rw((unsigned long)_sinittext, numpages);
143	}
144}
145
146#ifdef CONFIG_STRICT_KERNEL_RWX
147void mark_rodata_ro(void)
148{
149	unsigned long numpages;
150
151	if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && mmu_has_feature(MMU_FTR_HPTE_TABLE))
152		pr_warn("This platform has HASH MMU, STRICT_MODULE_RWX won't work\n");
153
154	if (v_block_mapped((unsigned long)_stext + 1)) {
155		mmu_mark_rodata_ro();
156		ptdump_check_wx();
157		return;
158	}
159
 
 
 
 
160	/*
161	 * mark text and rodata as read only. __end_rodata is set by
162	 * powerpc's linker script and includes tables and data
163	 * requiring relocation which are not put in RO_DATA.
164	 */
165	numpages = PFN_UP((unsigned long)__end_rodata) -
166		   PFN_DOWN((unsigned long)_stext);
167
168	set_memory_ro((unsigned long)_stext, numpages);
169
170	// mark_initmem_nx() should have already run by now
171	ptdump_check_wx();
172}
173#endif
174
175#if defined(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && defined(CONFIG_DEBUG_PAGEALLOC)
176void __kernel_map_pages(struct page *page, int numpages, int enable)
177{
178	unsigned long addr = (unsigned long)page_address(page);
179
180	if (PageHighMem(page))
181		return;
182
183	if (enable)
184		set_memory_p(addr, numpages);
185	else
186		set_memory_np(addr, numpages);
187}
188#endif /* CONFIG_DEBUG_PAGEALLOC */
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * This file contains the routines setting up the linux page tables.
  4 *  -- paulus
  5 *
  6 *  Derived from arch/ppc/mm/init.c:
  7 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  8 *
  9 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 10 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 11 *    Copyright (C) 1996 Paul Mackerras
 12 *
 13 *  Derived from "arch/i386/mm/init.c"
 14 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 15 */
 16
 17#include <linux/kernel.h>
 18#include <linux/module.h>
 19#include <linux/types.h>
 20#include <linux/mm.h>
 21#include <linux/vmalloc.h>
 22#include <linux/init.h>
 23#include <linux/highmem.h>
 24#include <linux/memblock.h>
 25#include <linux/slab.h>
 26#include <linux/set_memory.h>
 27
 28#include <asm/pgalloc.h>
 29#include <asm/fixmap.h>
 30#include <asm/setup.h>
 31#include <asm/sections.h>
 32#include <asm/early_ioremap.h>
 33
 34#include <mm/mmu_decl.h>
 35
 36extern char etext[], _stext[], _sinittext[], _einittext[];
 37
 38static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data;
 39
 40notrace void __init early_ioremap_init(void)
 41{
 42	unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE);
 43	pte_t *ptep = (pte_t *)early_fixmap_pagetable;
 44	pmd_t *pmdp = pmd_off_k(addr);
 45
 46	for (; (s32)(FIXADDR_TOP - addr) > 0;
 47	     addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++)
 48		pmd_populate_kernel(&init_mm, pmdp, ptep);
 49
 50	early_ioremap_setup();
 51}
 52
 53static void __init *early_alloc_pgtable(unsigned long size)
 54{
 55	void *ptr = memblock_alloc(size, size);
 56
 57	if (!ptr)
 58		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 59		      __func__, size, size);
 60
 61	return ptr;
 62}
 63
 64pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
 65{
 66	if (pmd_none(*pmdp)) {
 67		pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
 68
 69		pmd_populate_kernel(&init_mm, pmdp, ptep);
 70	}
 71	return pte_offset_kernel(pmdp, va);
 72}
 73
 74
 75int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
 76{
 77	pmd_t *pd;
 78	pte_t *pg;
 79	int err = -ENOMEM;
 80
 81	/* Use upper 10 bits of VA to index the first level map */
 82	pd = pmd_off_k(va);
 83	/* Use middle 10 bits of VA to index the second-level map */
 84	if (likely(slab_is_available()))
 85		pg = pte_alloc_kernel(pd, va);
 86	else
 87		pg = early_pte_alloc_kernel(pd, va);
 88	if (pg) {
 89		err = 0;
 90		/* The PTE should never be already set nor present in the
 91		 * hash table
 92		 */
 93		BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot));
 94		set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
 95	}
 96	smp_wmb();
 97	return err;
 98}
 99
100/*
101 * Map in a chunk of physical memory starting at start.
102 */
103static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
104{
105	unsigned long v, s;
106	phys_addr_t p;
107	int ktext;
108
109	s = offset;
110	v = PAGE_OFFSET + s;
111	p = memstart_addr + s;
112	for (; s < top; s += PAGE_SIZE) {
113		ktext = ((char *)v >= _stext && (char *)v < etext) ||
114			((char *)v >= _sinittext && (char *)v < _einittext);
115		map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
116		v += PAGE_SIZE;
117		p += PAGE_SIZE;
118	}
119}
120
121void __init mapin_ram(void)
122{
123	phys_addr_t base, end;
124	u64 i;
125
126	for_each_mem_range(i, &base, &end) {
127		phys_addr_t top = min(end, total_lowmem);
128
129		if (base >= top)
130			continue;
131		base = mmu_mapin_ram(base, top);
132		__mapin_ram_chunk(base, top);
133	}
134}
135
136void mark_initmem_nx(void)
137{
138	unsigned long numpages = PFN_UP((unsigned long)_einittext) -
139				 PFN_DOWN((unsigned long)_sinittext);
140
141	if (v_block_mapped((unsigned long)_sinittext))
142		mmu_mark_initmem_nx();
143	else
144		set_memory_attr((unsigned long)_sinittext, numpages, PAGE_KERNEL);
 
 
145}
146
147#ifdef CONFIG_STRICT_KERNEL_RWX
148void mark_rodata_ro(void)
149{
150	unsigned long numpages;
151
 
 
 
152	if (v_block_mapped((unsigned long)_stext + 1)) {
153		mmu_mark_rodata_ro();
154		ptdump_check_wx();
155		return;
156	}
157
158	numpages = PFN_UP((unsigned long)_etext) -
159		   PFN_DOWN((unsigned long)_stext);
160
161	set_memory_attr((unsigned long)_stext, numpages, PAGE_KERNEL_ROX);
162	/*
163	 * mark .rodata as read only. Use __init_begin rather than __end_rodata
164	 * to cover NOTES and EXCEPTION_TABLE.
 
165	 */
166	numpages = PFN_UP((unsigned long)__init_begin) -
167		   PFN_DOWN((unsigned long)__start_rodata);
168
169	set_memory_attr((unsigned long)__start_rodata, numpages, PAGE_KERNEL_RO);
170
171	// mark_initmem_nx() should have already run by now
172	ptdump_check_wx();
173}
174#endif
175
176#ifdef CONFIG_DEBUG_PAGEALLOC
177void __kernel_map_pages(struct page *page, int numpages, int enable)
178{
179	unsigned long addr = (unsigned long)page_address(page);
180
181	if (PageHighMem(page))
182		return;
183
184	if (enable)
185		set_memory_attr(addr, numpages, PAGE_KERNEL);
186	else
187		set_memory_attr(addr, numpages, __pgprot(0));
188}
189#endif /* CONFIG_DEBUG_PAGEALLOC */