Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 *
  3 *  Copyright (C) 1995  Linus Torvalds
  4 *
  5 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  6 */
  7
  8#include <linux/module.h>
  9#include <linux/signal.h>
 10#include <linux/sched.h>
 11#include <linux/kernel.h>
 12#include <linux/errno.h>
 13#include <linux/string.h>
 14#include <linux/types.h>
 15#include <linux/ptrace.h>
 16#include <linux/mman.h>
 17#include <linux/mm.h>
 18#include <linux/hugetlb.h>
 19#include <linux/swap.h>
 20#include <linux/smp.h>
 21#include <linux/init.h>
 22#include <linux/highmem.h>
 23#include <linux/pagemap.h>
 24#include <linux/pci.h>
 25#include <linux/pfn.h>
 26#include <linux/poison.h>
 27#include <linux/bootmem.h>
 28#include <linux/memblock.h>
 29#include <linux/proc_fs.h>
 30#include <linux/memory_hotplug.h>
 31#include <linux/initrd.h>
 32#include <linux/cpumask.h>
 33#include <linux/gfp.h>
 34
 35#include <asm/asm.h>
 36#include <asm/bios_ebda.h>
 37#include <asm/processor.h>
 38#include <asm/uaccess.h>
 39#include <asm/pgtable.h>
 40#include <asm/dma.h>
 41#include <asm/fixmap.h>
 42#include <asm/e820.h>
 43#include <asm/apic.h>
 44#include <asm/bugs.h>
 45#include <asm/tlb.h>
 46#include <asm/tlbflush.h>
 47#include <asm/olpc_ofw.h>
 48#include <asm/pgalloc.h>
 49#include <asm/sections.h>
 50#include <asm/paravirt.h>
 51#include <asm/setup.h>
 52#include <asm/cacheflush.h>
 53#include <asm/page_types.h>
 
 54#include <asm/init.h>
 
 
 55
 56#include "mm_internal.h"
 57
 58unsigned long highstart_pfn, highend_pfn;
 59
 60static noinline int do_test_wp_bit(void);
 61
 62bool __read_mostly __vmalloc_start_set = false;
 63
 64/*
 65 * Creates a middle page table and puts a pointer to it in the
 66 * given global directory entry. This only returns the gd entry
 67 * in non-PAE compilation mode, since the middle layer is folded.
 68 */
 69static pmd_t * __init one_md_table_init(pgd_t *pgd)
 70{
 
 71	pud_t *pud;
 72	pmd_t *pmd_table;
 73
 74#ifdef CONFIG_X86_PAE
 75	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
 76		pmd_table = (pmd_t *)alloc_low_page();
 77		paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
 78		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
 79		pud = pud_offset(pgd, 0);
 
 80		BUG_ON(pmd_table != pmd_offset(pud, 0));
 81
 82		return pmd_table;
 83	}
 84#endif
 85	pud = pud_offset(pgd, 0);
 
 86	pmd_table = pmd_offset(pud, 0);
 87
 88	return pmd_table;
 89}
 90
 91/*
 92 * Create a page table and place a pointer to it in a middle page
 93 * directory entry:
 94 */
 95static pte_t * __init one_page_table_init(pmd_t *pmd)
 96{
 97	if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
 98		pte_t *page_table = (pte_t *)alloc_low_page();
 99
100		paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
101		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
102		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
103	}
104
105	return pte_offset_kernel(pmd, 0);
106}
107
108pmd_t * __init populate_extra_pmd(unsigned long vaddr)
109{
110	int pgd_idx = pgd_index(vaddr);
111	int pmd_idx = pmd_index(vaddr);
112
113	return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx;
114}
115
116pte_t * __init populate_extra_pte(unsigned long vaddr)
117{
118	int pte_idx = pte_index(vaddr);
119	pmd_t *pmd;
120
121	pmd = populate_extra_pmd(vaddr);
122	return one_page_table_init(pmd) + pte_idx;
123}
124
125static unsigned long __init
126page_table_range_init_count(unsigned long start, unsigned long end)
127{
128	unsigned long count = 0;
129#ifdef CONFIG_HIGHMEM
130	int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
131	int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
132	int pgd_idx, pmd_idx;
133	unsigned long vaddr;
134
135	if (pmd_idx_kmap_begin == pmd_idx_kmap_end)
136		return 0;
137
138	vaddr = start;
139	pgd_idx = pgd_index(vaddr);
140	pmd_idx = pmd_index(vaddr);
141
142	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
143		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
144							pmd_idx++) {
145			if ((vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin &&
146			    (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end)
147				count++;
148			vaddr += PMD_SIZE;
149		}
150		pmd_idx = 0;
151	}
152#endif
153	return count;
154}
155
156static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
157					   unsigned long vaddr, pte_t *lastpte,
158					   void **adr)
159{
160#ifdef CONFIG_HIGHMEM
161	/*
162	 * Something (early fixmap) may already have put a pte
163	 * page here, which causes the page table allocation
164	 * to become nonlinear. Attempt to fix it, and if it
165	 * is still nonlinear then we have to bug.
166	 */
167	int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
168	int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
169
170	if (pmd_idx_kmap_begin != pmd_idx_kmap_end
171	    && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
172	    && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) {
173		pte_t *newpte;
174		int i;
175
176		BUG_ON(after_bootmem);
177		newpte = *adr;
178		for (i = 0; i < PTRS_PER_PTE; i++)
179			set_pte(newpte + i, pte[i]);
180		*adr = (void *)(((unsigned long)(*adr)) + PAGE_SIZE);
181
182		paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
183		set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
184		BUG_ON(newpte != pte_offset_kernel(pmd, 0));
185		__flush_tlb_all();
186
187		paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
188		pte = newpte;
189	}
190	BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
191	       && vaddr > fix_to_virt(FIX_KMAP_END)
192	       && lastpte && lastpte + PTRS_PER_PTE != pte);
193#endif
194	return pte;
195}
196
197/*
198 * This function initializes a certain range of kernel virtual memory
199 * with new bootmem page tables, everywhere page tables are missing in
200 * the given range.
201 *
202 * NOTE: The pagetables are allocated contiguous on the physical space
203 * so we can cache the place of the first one and move around without
204 * checking the pgd every time.
205 */
206static void __init
207page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
208{
209	int pgd_idx, pmd_idx;
210	unsigned long vaddr;
211	pgd_t *pgd;
212	pmd_t *pmd;
213	pte_t *pte = NULL;
214	unsigned long count = page_table_range_init_count(start, end);
215	void *adr = NULL;
216
217	if (count)
218		adr = alloc_low_pages(count);
219
220	vaddr = start;
221	pgd_idx = pgd_index(vaddr);
222	pmd_idx = pmd_index(vaddr);
223	pgd = pgd_base + pgd_idx;
224
225	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
226		pmd = one_md_table_init(pgd);
227		pmd = pmd + pmd_index(vaddr);
228		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
229							pmd++, pmd_idx++) {
230			pte = page_table_kmap_check(one_page_table_init(pmd),
231						    pmd, vaddr, pte, &adr);
232
233			vaddr += PMD_SIZE;
234		}
235		pmd_idx = 0;
236	}
237}
238
239static inline int is_kernel_text(unsigned long addr)
 
 
 
 
240{
241	if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
242		return 1;
243	return 0;
244}
245
246/*
247 * This maps the physical memory to kernel virtual address space, a total
248 * of max_low_pfn pages, by creating page tables starting from address
249 * PAGE_OFFSET:
250 */
251unsigned long __init
252kernel_physical_mapping_init(unsigned long start,
253			     unsigned long end,
254			     unsigned long page_size_mask)
 
255{
256	int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
257	unsigned long last_map_addr = end;
258	unsigned long start_pfn, end_pfn;
259	pgd_t *pgd_base = swapper_pg_dir;
260	int pgd_idx, pmd_idx, pte_ofs;
261	unsigned long pfn;
262	pgd_t *pgd;
263	pmd_t *pmd;
264	pte_t *pte;
265	unsigned pages_2m, pages_4k;
266	int mapping_iter;
267
268	start_pfn = start >> PAGE_SHIFT;
269	end_pfn = end >> PAGE_SHIFT;
270
271	/*
272	 * First iteration will setup identity mapping using large/small pages
273	 * based on use_pse, with other attributes same as set by
274	 * the early code in head_32.S
275	 *
276	 * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
277	 * as desired for the kernel identity mapping.
278	 *
279	 * This two pass mechanism conforms to the TLB app note which says:
280	 *
281	 *     "Software should not write to a paging-structure entry in a way
282	 *      that would change, for any linear address, both the page size
283	 *      and either the page frame or attributes."
284	 */
285	mapping_iter = 1;
286
287	if (!cpu_has_pse)
288		use_pse = 0;
289
290repeat:
291	pages_2m = pages_4k = 0;
292	pfn = start_pfn;
293	pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
294	pgd = pgd_base + pgd_idx;
295	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
296		pmd = one_md_table_init(pgd);
297
298		if (pfn >= end_pfn)
299			continue;
300#ifdef CONFIG_X86_PAE
301		pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
302		pmd += pmd_idx;
303#else
304		pmd_idx = 0;
305#endif
306		for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
307		     pmd++, pmd_idx++) {
308			unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
309
310			/*
311			 * Map with big pages if possible, otherwise
312			 * create normal page tables:
313			 */
314			if (use_pse) {
315				unsigned int addr2;
316				pgprot_t prot = PAGE_KERNEL_LARGE;
317				/*
318				 * first pass will use the same initial
319				 * identity mapping attribute + _PAGE_PSE.
320				 */
321				pgprot_t init_prot =
322					__pgprot(PTE_IDENT_ATTR |
323						 _PAGE_PSE);
324
325				pfn &= PMD_MASK >> PAGE_SHIFT;
326				addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
327					PAGE_OFFSET + PAGE_SIZE-1;
328
329				if (is_kernel_text(addr) ||
330				    is_kernel_text(addr2))
331					prot = PAGE_KERNEL_LARGE_EXEC;
332
333				pages_2m++;
334				if (mapping_iter == 1)
335					set_pmd(pmd, pfn_pmd(pfn, init_prot));
336				else
337					set_pmd(pmd, pfn_pmd(pfn, prot));
338
339				pfn += PTRS_PER_PTE;
340				continue;
341			}
342			pte = one_page_table_init(pmd);
343
344			pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
345			pte += pte_ofs;
346			for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
347			     pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
348				pgprot_t prot = PAGE_KERNEL;
349				/*
350				 * first pass will use the same initial
351				 * identity mapping attribute.
352				 */
353				pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
354
355				if (is_kernel_text(addr))
356					prot = PAGE_KERNEL_EXEC;
357
358				pages_4k++;
359				if (mapping_iter == 1) {
360					set_pte(pte, pfn_pte(pfn, init_prot));
361					last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
362				} else
363					set_pte(pte, pfn_pte(pfn, prot));
364			}
365		}
366	}
367	if (mapping_iter == 1) {
368		/*
369		 * update direct mapping page count only in the first
370		 * iteration.
371		 */
372		update_page_count(PG_LEVEL_2M, pages_2m);
373		update_page_count(PG_LEVEL_4K, pages_4k);
374
375		/*
376		 * local global flush tlb, which will flush the previous
377		 * mappings present in both small and large page TLB's.
378		 */
379		__flush_tlb_all();
380
381		/*
382		 * Second iteration will set the actual desired PTE attributes.
383		 */
384		mapping_iter = 2;
385		goto repeat;
386	}
387	return last_map_addr;
388}
389
390pte_t *kmap_pte;
391
392static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
393{
394	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
395			vaddr), vaddr), vaddr);
396}
397
398static void __init kmap_init(void)
399{
400	unsigned long kmap_vstart;
401
402	/*
403	 * Cache the first kmap pte:
404	 */
405	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
406	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
407}
408
409#ifdef CONFIG_HIGHMEM
410static void __init permanent_kmaps_init(pgd_t *pgd_base)
411{
412	unsigned long vaddr;
413	pgd_t *pgd;
414	pud_t *pud;
415	pmd_t *pmd;
416	pte_t *pte;
417
418	vaddr = PKMAP_BASE;
419	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
420
421	pgd = swapper_pg_dir + pgd_index(vaddr);
422	pud = pud_offset(pgd, vaddr);
423	pmd = pmd_offset(pud, vaddr);
424	pte = pte_offset_kernel(pmd, vaddr);
425	pkmap_page_table = pte;
426}
427
428void __init add_highpages_with_active_regions(int nid,
429			 unsigned long start_pfn, unsigned long end_pfn)
430{
431	phys_addr_t start, end;
432	u64 i;
433
434	for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) {
435		unsigned long pfn = clamp_t(unsigned long, PFN_UP(start),
436					    start_pfn, end_pfn);
437		unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end),
438					      start_pfn, end_pfn);
439		for ( ; pfn < e_pfn; pfn++)
440			if (pfn_valid(pfn))
441				free_highmem_page(pfn_to_page(pfn));
442	}
443}
444#else
445static inline void permanent_kmaps_init(pgd_t *pgd_base)
446{
447}
448#endif /* CONFIG_HIGHMEM */
449
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
450void __init native_pagetable_init(void)
451{
452	unsigned long pfn, va;
453	pgd_t *pgd, *base = swapper_pg_dir;
 
454	pud_t *pud;
455	pmd_t *pmd;
456	pte_t *pte;
457
458	/*
459	 * Remove any mappings which extend past the end of physical
460	 * memory from the boot time page table.
461	 * In virtual address space, we should have at least two pages
462	 * from VMALLOC_END to pkmap or fixmap according to VMALLOC_END
463	 * definition. And max_low_pfn is set to VMALLOC_END physical
464	 * address. If initial memory mapping is doing right job, we
465	 * should have pte used near max_low_pfn or one pmd is not present.
466	 */
467	for (pfn = max_low_pfn; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
468		va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
469		pgd = base + pgd_index(va);
470		if (!pgd_present(*pgd))
471			break;
472
473		pud = pud_offset(pgd, va);
 
474		pmd = pmd_offset(pud, va);
475		if (!pmd_present(*pmd))
476			break;
477
478		/* should not be large page here */
479		if (pmd_large(*pmd)) {
480			pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n",
481				pfn, pmd, __pa(pmd));
482			BUG_ON(1);
483		}
484
485		pte = pte_offset_kernel(pmd, va);
486		if (!pte_present(*pte))
487			break;
488
489		printk(KERN_DEBUG "clearing pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx pte: %p pte phys: %lx\n",
490				pfn, pmd, __pa(pmd), pte, __pa(pte));
491		pte_clear(NULL, va, pte);
492	}
493	paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
494	paging_init();
495}
496
497/*
498 * Build a proper pagetable for the kernel mappings.  Up until this
499 * point, we've been running on some set of pagetables constructed by
500 * the boot process.
501 *
502 * If we're booting on native hardware, this will be a pagetable
503 * constructed in arch/x86/kernel/head_32.S.  The root of the
504 * pagetable will be swapper_pg_dir.
505 *
506 * If we're booting paravirtualized under a hypervisor, then there are
507 * more options: we may already be running PAE, and the pagetable may
508 * or may not be based in swapper_pg_dir.  In any case,
509 * paravirt_pagetable_init() will set up swapper_pg_dir
510 * appropriately for the rest of the initialization to work.
511 *
512 * In general, pagetable_init() assumes that the pagetable may already
513 * be partially populated, and so it avoids stomping on any existing
514 * mappings.
515 */
516void __init early_ioremap_page_table_range_init(void)
517{
518	pgd_t *pgd_base = swapper_pg_dir;
519	unsigned long vaddr, end;
520
521	/*
522	 * Fixed mappings, only the page table structure has to be
523	 * created - mappings will be set by set_fixmap():
524	 */
525	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
526	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
527	page_table_range_init(vaddr, end, pgd_base);
528	early_ioremap_reset();
529}
530
531static void __init pagetable_init(void)
532{
533	pgd_t *pgd_base = swapper_pg_dir;
534
535	permanent_kmaps_init(pgd_base);
536}
537
538pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
 
 
 
 
539EXPORT_SYMBOL_GPL(__supported_pte_mask);
 
 
540
541/* user-defined highmem size */
542static unsigned int highmem_pages = -1;
543
544/*
545 * highmem=size forces highmem to be exactly 'size' bytes.
546 * This works even on boxes that have no highmem otherwise.
547 * This also works to reduce highmem size on bigger boxes.
548 */
549static int __init parse_highmem(char *arg)
550{
551	if (!arg)
552		return -EINVAL;
553
554	highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
555	return 0;
556}
557early_param("highmem", parse_highmem);
558
559#define MSG_HIGHMEM_TOO_BIG \
560	"highmem size (%luMB) is bigger than pages available (%luMB)!\n"
561
562#define MSG_LOWMEM_TOO_SMALL \
563	"highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
564/*
565 * All of RAM fits into lowmem - but if user wants highmem
566 * artificially via the highmem=x boot parameter then create
567 * it:
568 */
569static void __init lowmem_pfn_init(void)
570{
571	/* max_low_pfn is 0, we already have early_res support */
572	max_low_pfn = max_pfn;
573
574	if (highmem_pages == -1)
575		highmem_pages = 0;
576#ifdef CONFIG_HIGHMEM
577	if (highmem_pages >= max_pfn) {
578		printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
579			pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
580		highmem_pages = 0;
581	}
582	if (highmem_pages) {
583		if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
584			printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
585				pages_to_mb(highmem_pages));
586			highmem_pages = 0;
587		}
588		max_low_pfn -= highmem_pages;
589	}
590#else
591	if (highmem_pages)
592		printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
593#endif
594}
595
596#define MSG_HIGHMEM_TOO_SMALL \
597	"only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
598
599#define MSG_HIGHMEM_TRIMMED \
600	"Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
601/*
602 * We have more RAM than fits into lowmem - we try to put it into
603 * highmem, also taking the highmem=x boot parameter into account:
604 */
605static void __init highmem_pfn_init(void)
606{
607	max_low_pfn = MAXMEM_PFN;
608
609	if (highmem_pages == -1)
610		highmem_pages = max_pfn - MAXMEM_PFN;
611
612	if (highmem_pages + MAXMEM_PFN < max_pfn)
613		max_pfn = MAXMEM_PFN + highmem_pages;
614
615	if (highmem_pages + MAXMEM_PFN > max_pfn) {
616		printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
617			pages_to_mb(max_pfn - MAXMEM_PFN),
618			pages_to_mb(highmem_pages));
619		highmem_pages = 0;
620	}
621#ifndef CONFIG_HIGHMEM
622	/* Maximum memory usable is what is directly addressable */
623	printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
624	if (max_pfn > MAX_NONPAE_PFN)
625		printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
626	else
627		printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
628	max_pfn = MAXMEM_PFN;
629#else /* !CONFIG_HIGHMEM */
630#ifndef CONFIG_HIGHMEM64G
631	if (max_pfn > MAX_NONPAE_PFN) {
632		max_pfn = MAX_NONPAE_PFN;
633		printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
634	}
635#endif /* !CONFIG_HIGHMEM64G */
636#endif /* !CONFIG_HIGHMEM */
637}
638
639/*
640 * Determine low and high memory ranges:
641 */
642void __init find_low_pfn_range(void)
643{
644	/* it could update max_pfn */
645
646	if (max_pfn <= MAXMEM_PFN)
647		lowmem_pfn_init();
648	else
649		highmem_pfn_init();
650}
651
652#ifndef CONFIG_NEED_MULTIPLE_NODES
653void __init initmem_init(void)
654{
655#ifdef CONFIG_HIGHMEM
656	highstart_pfn = highend_pfn = max_pfn;
657	if (max_pfn > max_low_pfn)
658		highstart_pfn = max_low_pfn;
659	printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
660		pages_to_mb(highend_pfn - highstart_pfn));
661	high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
662#else
663	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
664#endif
665
666	memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
667	sparse_memory_present_with_active_regions(0);
668
669#ifdef CONFIG_FLATMEM
670	max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn;
671#endif
672	__vmalloc_start_set = true;
673
674	printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
675			pages_to_mb(max_low_pfn));
676
677	setup_bootmem_allocator();
678}
679#endif /* !CONFIG_NEED_MULTIPLE_NODES */
680
681void __init setup_bootmem_allocator(void)
682{
683	printk(KERN_INFO "  mapped low ram: 0 - %08lx\n",
684		 max_pfn_mapped<<PAGE_SHIFT);
685	printk(KERN_INFO "  low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
686}
687
688/*
689 * paging_init() sets up the page tables - note that the first 8MB are
690 * already mapped by head.S.
691 *
692 * This routines also unmaps the page at virtual kernel address 0, so
693 * that we can trap those pesky NULL-reference errors in the kernel.
694 */
695void __init paging_init(void)
696{
697	pagetable_init();
698
699	__flush_tlb_all();
700
701	kmap_init();
702
703	/*
704	 * NOTE: at this point the bootmem allocator is fully available.
705	 */
706	olpc_dt_build_devicetree();
707	sparse_memory_present_with_active_regions(MAX_NUMNODES);
708	sparse_init();
709	zone_sizes_init();
710}
711
712/*
713 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
714 * and also on some strange 486's. All 586+'s are OK. This used to involve
715 * black magic jumps to work around some nasty CPU bugs, but fortunately the
716 * switch to using exceptions got rid of all that.
717 */
718static void __init test_wp_bit(void)
719{
720	printk(KERN_INFO
721  "Checking if this processor honours the WP bit even in supervisor mode...");
 
722
723	/* Any page-aligned address will do, the test is non-destructive */
724	__set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_KERNEL_RO);
725	boot_cpu_data.wp_works_ok = do_test_wp_bit();
726	clear_fixmap(FIX_WP_TEST);
727
728	if (!boot_cpu_data.wp_works_ok) {
729		printk(KERN_CONT "No.\n");
730		panic("Linux doesn't support CPUs with broken WP.");
731	} else {
732		printk(KERN_CONT "Ok.\n");
 
733	}
 
 
 
734}
735
736void __init mem_init(void)
737{
738	pci_iommu_alloc();
739
740#ifdef CONFIG_FLATMEM
741	BUG_ON(!mem_map);
742#endif
743	/*
744	 * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to
745	 * be done before free_all_bootmem(). Memblock use free low memory for
746	 * temporary data (see find_range_array()) and for this purpose can use
747	 * pages that was already passed to the buddy allocator, hence marked as
748	 * not accessible in the page tables when compiled with
749	 * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not
750	 * important here.
751	 */
752	set_highmem_pages_init();
753
754	/* this will put all low memory onto the freelists */
755	free_all_bootmem();
756
757	after_bootmem = 1;
 
758
759	mem_init_print_info(NULL);
760	printk(KERN_INFO "virtual kernel memory layout:\n"
761		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
762#ifdef CONFIG_HIGHMEM
763		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
764#endif
765		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
766		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
767		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
768		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
769		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
770		FIXADDR_START, FIXADDR_TOP,
771		(FIXADDR_TOP - FIXADDR_START) >> 10,
772
773#ifdef CONFIG_HIGHMEM
774		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
775		(LAST_PKMAP*PAGE_SIZE) >> 10,
776#endif
777
778		VMALLOC_START, VMALLOC_END,
779		(VMALLOC_END - VMALLOC_START) >> 20,
780
781		(unsigned long)__va(0), (unsigned long)high_memory,
782		((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
783
784		(unsigned long)&__init_begin, (unsigned long)&__init_end,
785		((unsigned long)&__init_end -
786		 (unsigned long)&__init_begin) >> 10,
787
788		(unsigned long)&_etext, (unsigned long)&_edata,
789		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
790
791		(unsigned long)&_text, (unsigned long)&_etext,
792		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
793
794	/*
795	 * Check boundaries twice: Some fundamental inconsistencies can
796	 * be detected at build time already.
797	 */
798#define __FIXADDR_TOP (-PAGE_SIZE)
799#ifdef CONFIG_HIGHMEM
800	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
801	BUILD_BUG_ON(VMALLOC_END			> PKMAP_BASE);
802#endif
803#define high_memory (-128UL << 20)
804	BUILD_BUG_ON(VMALLOC_START			>= VMALLOC_END);
805#undef high_memory
806#undef __FIXADDR_TOP
807#ifdef CONFIG_RANDOMIZE_BASE
808	BUILD_BUG_ON(CONFIG_RANDOMIZE_BASE_MAX_OFFSET > KERNEL_IMAGE_SIZE);
809#endif
810
811#ifdef CONFIG_HIGHMEM
812	BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
813	BUG_ON(VMALLOC_END				> PKMAP_BASE);
814#endif
815	BUG_ON(VMALLOC_START				>= VMALLOC_END);
816	BUG_ON((unsigned long)high_memory		> VMALLOC_START);
817
818	if (boot_cpu_data.wp_works_ok < 0)
819		test_wp_bit();
820}
821
822#ifdef CONFIG_MEMORY_HOTPLUG
823int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
 
824{
825	struct pglist_data *pgdata = NODE_DATA(nid);
826	struct zone *zone = pgdata->node_zones +
827		zone_for_memory(nid, start, size, ZONE_HIGHMEM, for_device);
828	unsigned long start_pfn = start >> PAGE_SHIFT;
829	unsigned long nr_pages = size >> PAGE_SHIFT;
 
 
 
 
 
 
 
 
 
 
 
 
830
831	return __add_pages(nid, zone, start_pfn, nr_pages);
832}
833
834#ifdef CONFIG_MEMORY_HOTREMOVE
835int arch_remove_memory(u64 start, u64 size)
836{
837	unsigned long start_pfn = start >> PAGE_SHIFT;
838	unsigned long nr_pages = size >> PAGE_SHIFT;
839	struct zone *zone;
840
841	zone = page_zone(pfn_to_page(start_pfn));
842	return __remove_pages(zone, start_pfn, nr_pages);
843}
844#endif
845#endif
846
847/*
848 * This function cannot be __init, since exceptions don't work in that
849 * section.  Put this after the callers, so that it cannot be inlined.
850 */
851static noinline int do_test_wp_bit(void)
852{
853	char tmp_reg;
854	int flag;
855
856	__asm__ __volatile__(
857		"	movb %0, %1	\n"
858		"1:	movb %1, %0	\n"
859		"	xorl %2, %2	\n"
860		"2:			\n"
861		_ASM_EXTABLE(1b,2b)
862		:"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
863		 "=q" (tmp_reg),
864		 "=r" (flag)
865		:"2" (1)
866		:"memory");
867
868	return flag;
869}
870
871const int rodata_test_data = 0xC3;
872EXPORT_SYMBOL_GPL(rodata_test_data);
873
874int kernel_set_to_readonly __read_mostly;
875
876void set_kernel_text_rw(void)
877{
878	unsigned long start = PFN_ALIGN(_text);
879	unsigned long size = PFN_ALIGN(_etext) - start;
880
881	if (!kernel_set_to_readonly)
882		return;
883
884	pr_debug("Set kernel text: %lx - %lx for read write\n",
885		 start, start+size);
886
887	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
888}
889
890void set_kernel_text_ro(void)
891{
892	unsigned long start = PFN_ALIGN(_text);
893	unsigned long size = PFN_ALIGN(_etext) - start;
894
895	if (!kernel_set_to_readonly)
896		return;
897
898	pr_debug("Set kernel text: %lx - %lx for read only\n",
899		 start, start+size);
900
901	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
902}
903
904static void mark_nxdata_nx(void)
905{
906	/*
907	 * When this called, init has already been executed and released,
908	 * so everything past _etext should be NX.
909	 */
910	unsigned long start = PFN_ALIGN(_etext);
911	/*
912	 * This comes from is_kernel_text upper limit. Also HPAGE where used:
913	 */
914	unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start;
915
916	if (__supported_pte_mask & _PAGE_NX)
917		printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10);
918	set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT);
919}
920
921void mark_rodata_ro(void)
922{
923	unsigned long start = PFN_ALIGN(_text);
924	unsigned long size = PFN_ALIGN(_etext) - start;
925
926	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
927	printk(KERN_INFO "Write protecting the kernel text: %luk\n",
928		size >> 10);
929
930	kernel_set_to_readonly = 1;
931
932#ifdef CONFIG_CPA_DEBUG
933	printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
934		start, start+size);
935	set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
936
937	printk(KERN_INFO "Testing CPA: write protecting again\n");
938	set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
939#endif
940
941	start += size;
942	size = (unsigned long)__end_rodata - start;
943	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
944	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
945		size >> 10);
946	rodata_test();
947
948#ifdef CONFIG_CPA_DEBUG
949	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
950	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
951
952	printk(KERN_INFO "Testing CPA: write protecting again\n");
953	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
954#endif
955	mark_nxdata_nx();
956	if (__supported_pte_mask & _PAGE_NX)
957		debug_checkwx();
958}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *
  4 *  Copyright (C) 1995  Linus Torvalds
  5 *
  6 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  7 */
  8
 
  9#include <linux/signal.h>
 10#include <linux/sched.h>
 11#include <linux/kernel.h>
 12#include <linux/errno.h>
 13#include <linux/string.h>
 14#include <linux/types.h>
 15#include <linux/ptrace.h>
 16#include <linux/mman.h>
 17#include <linux/mm.h>
 18#include <linux/hugetlb.h>
 19#include <linux/swap.h>
 20#include <linux/smp.h>
 21#include <linux/init.h>
 22#include <linux/highmem.h>
 23#include <linux/pagemap.h>
 24#include <linux/pci.h>
 25#include <linux/pfn.h>
 26#include <linux/poison.h>
 
 27#include <linux/memblock.h>
 28#include <linux/proc_fs.h>
 29#include <linux/memory_hotplug.h>
 30#include <linux/initrd.h>
 31#include <linux/cpumask.h>
 32#include <linux/gfp.h>
 33
 34#include <asm/asm.h>
 35#include <asm/bios_ebda.h>
 36#include <asm/processor.h>
 37#include <linux/uaccess.h>
 
 38#include <asm/dma.h>
 39#include <asm/fixmap.h>
 40#include <asm/e820/api.h>
 41#include <asm/apic.h>
 42#include <asm/bugs.h>
 43#include <asm/tlb.h>
 44#include <asm/tlbflush.h>
 45#include <asm/olpc_ofw.h>
 46#include <asm/pgalloc.h>
 47#include <asm/sections.h>
 48#include <asm/paravirt.h>
 49#include <asm/setup.h>
 50#include <asm/set_memory.h>
 51#include <asm/page_types.h>
 52#include <asm/cpu_entry_area.h>
 53#include <asm/init.h>
 54#include <asm/pgtable_areas.h>
 55#include <asm/numa.h>
 56
 57#include "mm_internal.h"
 58
 59unsigned long highstart_pfn, highend_pfn;
 60
 
 
 61bool __read_mostly __vmalloc_start_set = false;
 62
 63/*
 64 * Creates a middle page table and puts a pointer to it in the
 65 * given global directory entry. This only returns the gd entry
 66 * in non-PAE compilation mode, since the middle layer is folded.
 67 */
 68static pmd_t * __init one_md_table_init(pgd_t *pgd)
 69{
 70	p4d_t *p4d;
 71	pud_t *pud;
 72	pmd_t *pmd_table;
 73
 74#ifdef CONFIG_X86_PAE
 75	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
 76		pmd_table = (pmd_t *)alloc_low_page();
 77		paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
 78		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
 79		p4d = p4d_offset(pgd, 0);
 80		pud = pud_offset(p4d, 0);
 81		BUG_ON(pmd_table != pmd_offset(pud, 0));
 82
 83		return pmd_table;
 84	}
 85#endif
 86	p4d = p4d_offset(pgd, 0);
 87	pud = pud_offset(p4d, 0);
 88	pmd_table = pmd_offset(pud, 0);
 89
 90	return pmd_table;
 91}
 92
 93/*
 94 * Create a page table and place a pointer to it in a middle page
 95 * directory entry:
 96 */
 97static pte_t * __init one_page_table_init(pmd_t *pmd)
 98{
 99	if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
100		pte_t *page_table = (pte_t *)alloc_low_page();
101
102		paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
103		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
104		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
105	}
106
107	return pte_offset_kernel(pmd, 0);
108}
109
110pmd_t * __init populate_extra_pmd(unsigned long vaddr)
111{
112	int pgd_idx = pgd_index(vaddr);
113	int pmd_idx = pmd_index(vaddr);
114
115	return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx;
116}
117
118pte_t * __init populate_extra_pte(unsigned long vaddr)
119{
120	int pte_idx = pte_index(vaddr);
121	pmd_t *pmd;
122
123	pmd = populate_extra_pmd(vaddr);
124	return one_page_table_init(pmd) + pte_idx;
125}
126
127static unsigned long __init
128page_table_range_init_count(unsigned long start, unsigned long end)
129{
130	unsigned long count = 0;
131#ifdef CONFIG_HIGHMEM
132	int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
133	int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
134	int pgd_idx, pmd_idx;
135	unsigned long vaddr;
136
137	if (pmd_idx_kmap_begin == pmd_idx_kmap_end)
138		return 0;
139
140	vaddr = start;
141	pgd_idx = pgd_index(vaddr);
142	pmd_idx = pmd_index(vaddr);
143
144	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
145		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
146							pmd_idx++) {
147			if ((vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin &&
148			    (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end)
149				count++;
150			vaddr += PMD_SIZE;
151		}
152		pmd_idx = 0;
153	}
154#endif
155	return count;
156}
157
158static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
159					   unsigned long vaddr, pte_t *lastpte,
160					   void **adr)
161{
162#ifdef CONFIG_HIGHMEM
163	/*
164	 * Something (early fixmap) may already have put a pte
165	 * page here, which causes the page table allocation
166	 * to become nonlinear. Attempt to fix it, and if it
167	 * is still nonlinear then we have to bug.
168	 */
169	int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
170	int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
171
172	if (pmd_idx_kmap_begin != pmd_idx_kmap_end
173	    && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
174	    && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) {
175		pte_t *newpte;
176		int i;
177
178		BUG_ON(after_bootmem);
179		newpte = *adr;
180		for (i = 0; i < PTRS_PER_PTE; i++)
181			set_pte(newpte + i, pte[i]);
182		*adr = (void *)(((unsigned long)(*adr)) + PAGE_SIZE);
183
184		paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
185		set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
186		BUG_ON(newpte != pte_offset_kernel(pmd, 0));
187		__flush_tlb_all();
188
189		paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
190		pte = newpte;
191	}
192	BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
193	       && vaddr > fix_to_virt(FIX_KMAP_END)
194	       && lastpte && lastpte + PTRS_PER_PTE != pte);
195#endif
196	return pte;
197}
198
199/*
200 * This function initializes a certain range of kernel virtual memory
201 * with new bootmem page tables, everywhere page tables are missing in
202 * the given range.
203 *
204 * NOTE: The pagetables are allocated contiguous on the physical space
205 * so we can cache the place of the first one and move around without
206 * checking the pgd every time.
207 */
208static void __init
209page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
210{
211	int pgd_idx, pmd_idx;
212	unsigned long vaddr;
213	pgd_t *pgd;
214	pmd_t *pmd;
215	pte_t *pte = NULL;
216	unsigned long count = page_table_range_init_count(start, end);
217	void *adr = NULL;
218
219	if (count)
220		adr = alloc_low_pages(count);
221
222	vaddr = start;
223	pgd_idx = pgd_index(vaddr);
224	pmd_idx = pmd_index(vaddr);
225	pgd = pgd_base + pgd_idx;
226
227	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
228		pmd = one_md_table_init(pgd);
229		pmd = pmd + pmd_index(vaddr);
230		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
231							pmd++, pmd_idx++) {
232			pte = page_table_kmap_check(one_page_table_init(pmd),
233						    pmd, vaddr, pte, &adr);
234
235			vaddr += PMD_SIZE;
236		}
237		pmd_idx = 0;
238	}
239}
240
241/*
242 * The <linux/kallsyms.h> already defines is_kernel_text,
243 * using '__' prefix not to get in conflict.
244 */
245static inline int __is_kernel_text(unsigned long addr)
246{
247	if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
248		return 1;
249	return 0;
250}
251
252/*
253 * This maps the physical memory to kernel virtual address space, a total
254 * of max_low_pfn pages, by creating page tables starting from address
255 * PAGE_OFFSET:
256 */
257unsigned long __init
258kernel_physical_mapping_init(unsigned long start,
259			     unsigned long end,
260			     unsigned long page_size_mask,
261			     pgprot_t prot)
262{
263	int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
264	unsigned long last_map_addr = end;
265	unsigned long start_pfn, end_pfn;
266	pgd_t *pgd_base = swapper_pg_dir;
267	int pgd_idx, pmd_idx, pte_ofs;
268	unsigned long pfn;
269	pgd_t *pgd;
270	pmd_t *pmd;
271	pte_t *pte;
272	unsigned pages_2m, pages_4k;
273	int mapping_iter;
274
275	start_pfn = start >> PAGE_SHIFT;
276	end_pfn = end >> PAGE_SHIFT;
277
278	/*
279	 * First iteration will setup identity mapping using large/small pages
280	 * based on use_pse, with other attributes same as set by
281	 * the early code in head_32.S
282	 *
283	 * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
284	 * as desired for the kernel identity mapping.
285	 *
286	 * This two pass mechanism conforms to the TLB app note which says:
287	 *
288	 *     "Software should not write to a paging-structure entry in a way
289	 *      that would change, for any linear address, both the page size
290	 *      and either the page frame or attributes."
291	 */
292	mapping_iter = 1;
293
294	if (!boot_cpu_has(X86_FEATURE_PSE))
295		use_pse = 0;
296
297repeat:
298	pages_2m = pages_4k = 0;
299	pfn = start_pfn;
300	pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
301	pgd = pgd_base + pgd_idx;
302	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
303		pmd = one_md_table_init(pgd);
304
305		if (pfn >= end_pfn)
306			continue;
307#ifdef CONFIG_X86_PAE
308		pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
309		pmd += pmd_idx;
310#else
311		pmd_idx = 0;
312#endif
313		for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
314		     pmd++, pmd_idx++) {
315			unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
316
317			/*
318			 * Map with big pages if possible, otherwise
319			 * create normal page tables:
320			 */
321			if (use_pse) {
322				unsigned int addr2;
323				pgprot_t prot = PAGE_KERNEL_LARGE;
324				/*
325				 * first pass will use the same initial
326				 * identity mapping attribute + _PAGE_PSE.
327				 */
328				pgprot_t init_prot =
329					__pgprot(PTE_IDENT_ATTR |
330						 _PAGE_PSE);
331
332				pfn &= PMD_MASK >> PAGE_SHIFT;
333				addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
334					PAGE_OFFSET + PAGE_SIZE-1;
335
336				if (__is_kernel_text(addr) ||
337				    __is_kernel_text(addr2))
338					prot = PAGE_KERNEL_LARGE_EXEC;
339
340				pages_2m++;
341				if (mapping_iter == 1)
342					set_pmd(pmd, pfn_pmd(pfn, init_prot));
343				else
344					set_pmd(pmd, pfn_pmd(pfn, prot));
345
346				pfn += PTRS_PER_PTE;
347				continue;
348			}
349			pte = one_page_table_init(pmd);
350
351			pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
352			pte += pte_ofs;
353			for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
354			     pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
355				pgprot_t prot = PAGE_KERNEL;
356				/*
357				 * first pass will use the same initial
358				 * identity mapping attribute.
359				 */
360				pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
361
362				if (__is_kernel_text(addr))
363					prot = PAGE_KERNEL_EXEC;
364
365				pages_4k++;
366				if (mapping_iter == 1) {
367					set_pte(pte, pfn_pte(pfn, init_prot));
368					last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
369				} else
370					set_pte(pte, pfn_pte(pfn, prot));
371			}
372		}
373	}
374	if (mapping_iter == 1) {
375		/*
376		 * update direct mapping page count only in the first
377		 * iteration.
378		 */
379		update_page_count(PG_LEVEL_2M, pages_2m);
380		update_page_count(PG_LEVEL_4K, pages_4k);
381
382		/*
383		 * local global flush tlb, which will flush the previous
384		 * mappings present in both small and large page TLB's.
385		 */
386		__flush_tlb_all();
387
388		/*
389		 * Second iteration will set the actual desired PTE attributes.
390		 */
391		mapping_iter = 2;
392		goto repeat;
393	}
394	return last_map_addr;
395}
396
397pte_t *kmap_pte;
398
 
 
 
 
 
 
399static void __init kmap_init(void)
400{
401	unsigned long kmap_vstart;
402
403	/*
404	 * Cache the first kmap pte:
405	 */
406	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
407	kmap_pte = virt_to_kpte(kmap_vstart);
408}
409
410#ifdef CONFIG_HIGHMEM
411static void __init permanent_kmaps_init(pgd_t *pgd_base)
412{
413	unsigned long vaddr = PKMAP_BASE;
 
 
 
 
414
 
415	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
416
417	pkmap_page_table = virt_to_kpte(vaddr);
 
 
 
 
418}
419
420void __init add_highpages_with_active_regions(int nid,
421			 unsigned long start_pfn, unsigned long end_pfn)
422{
423	phys_addr_t start, end;
424	u64 i;
425
426	for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) {
427		unsigned long pfn = clamp_t(unsigned long, PFN_UP(start),
428					    start_pfn, end_pfn);
429		unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end),
430					      start_pfn, end_pfn);
431		for ( ; pfn < e_pfn; pfn++)
432			if (pfn_valid(pfn))
433				free_highmem_page(pfn_to_page(pfn));
434	}
435}
436#else
437static inline void permanent_kmaps_init(pgd_t *pgd_base)
438{
439}
440#endif /* CONFIG_HIGHMEM */
441
442void __init sync_initial_page_table(void)
443{
444	clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
445			swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
446			KERNEL_PGD_PTRS);
447
448	/*
449	 * sync back low identity map too.  It is used for example
450	 * in the 32-bit EFI stub.
451	 */
452	clone_pgd_range(initial_page_table,
453			swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
454			min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
455}
456
457void __init native_pagetable_init(void)
458{
459	unsigned long pfn, va;
460	pgd_t *pgd, *base = swapper_pg_dir;
461	p4d_t *p4d;
462	pud_t *pud;
463	pmd_t *pmd;
464	pte_t *pte;
465
466	/*
467	 * Remove any mappings which extend past the end of physical
468	 * memory from the boot time page table.
469	 * In virtual address space, we should have at least two pages
470	 * from VMALLOC_END to pkmap or fixmap according to VMALLOC_END
471	 * definition. And max_low_pfn is set to VMALLOC_END physical
472	 * address. If initial memory mapping is doing right job, we
473	 * should have pte used near max_low_pfn or one pmd is not present.
474	 */
475	for (pfn = max_low_pfn; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
476		va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
477		pgd = base + pgd_index(va);
478		if (!pgd_present(*pgd))
479			break;
480
481		p4d = p4d_offset(pgd, va);
482		pud = pud_offset(p4d, va);
483		pmd = pmd_offset(pud, va);
484		if (!pmd_present(*pmd))
485			break;
486
487		/* should not be large page here */
488		if (pmd_large(*pmd)) {
489			pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n",
490				pfn, pmd, __pa(pmd));
491			BUG_ON(1);
492		}
493
494		pte = pte_offset_kernel(pmd, va);
495		if (!pte_present(*pte))
496			break;
497
498		printk(KERN_DEBUG "clearing pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx pte: %p pte phys: %lx\n",
499				pfn, pmd, __pa(pmd), pte, __pa(pte));
500		pte_clear(NULL, va, pte);
501	}
502	paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
503	paging_init();
504}
505
506/*
507 * Build a proper pagetable for the kernel mappings.  Up until this
508 * point, we've been running on some set of pagetables constructed by
509 * the boot process.
510 *
511 * If we're booting on native hardware, this will be a pagetable
512 * constructed in arch/x86/kernel/head_32.S.  The root of the
513 * pagetable will be swapper_pg_dir.
514 *
515 * If we're booting paravirtualized under a hypervisor, then there are
516 * more options: we may already be running PAE, and the pagetable may
517 * or may not be based in swapper_pg_dir.  In any case,
518 * paravirt_pagetable_init() will set up swapper_pg_dir
519 * appropriately for the rest of the initialization to work.
520 *
521 * In general, pagetable_init() assumes that the pagetable may already
522 * be partially populated, and so it avoids stomping on any existing
523 * mappings.
524 */
525void __init early_ioremap_page_table_range_init(void)
526{
527	pgd_t *pgd_base = swapper_pg_dir;
528	unsigned long vaddr, end;
529
530	/*
531	 * Fixed mappings, only the page table structure has to be
532	 * created - mappings will be set by set_fixmap():
533	 */
534	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
535	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
536	page_table_range_init(vaddr, end, pgd_base);
537	early_ioremap_reset();
538}
539
540static void __init pagetable_init(void)
541{
542	pgd_t *pgd_base = swapper_pg_dir;
543
544	permanent_kmaps_init(pgd_base);
545}
546
547#define DEFAULT_PTE_MASK ~(_PAGE_NX | _PAGE_GLOBAL)
548/* Bits supported by the hardware: */
549pteval_t __supported_pte_mask __read_mostly = DEFAULT_PTE_MASK;
550/* Bits allowed in normal kernel mappings: */
551pteval_t __default_kernel_pte_mask __read_mostly = DEFAULT_PTE_MASK;
552EXPORT_SYMBOL_GPL(__supported_pte_mask);
553/* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
554EXPORT_SYMBOL(__default_kernel_pte_mask);
555
556/* user-defined highmem size */
557static unsigned int highmem_pages = -1;
558
559/*
560 * highmem=size forces highmem to be exactly 'size' bytes.
561 * This works even on boxes that have no highmem otherwise.
562 * This also works to reduce highmem size on bigger boxes.
563 */
564static int __init parse_highmem(char *arg)
565{
566	if (!arg)
567		return -EINVAL;
568
569	highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
570	return 0;
571}
572early_param("highmem", parse_highmem);
573
574#define MSG_HIGHMEM_TOO_BIG \
575	"highmem size (%luMB) is bigger than pages available (%luMB)!\n"
576
577#define MSG_LOWMEM_TOO_SMALL \
578	"highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
579/*
580 * All of RAM fits into lowmem - but if user wants highmem
581 * artificially via the highmem=x boot parameter then create
582 * it:
583 */
584static void __init lowmem_pfn_init(void)
585{
586	/* max_low_pfn is 0, we already have early_res support */
587	max_low_pfn = max_pfn;
588
589	if (highmem_pages == -1)
590		highmem_pages = 0;
591#ifdef CONFIG_HIGHMEM
592	if (highmem_pages >= max_pfn) {
593		printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
594			pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
595		highmem_pages = 0;
596	}
597	if (highmem_pages) {
598		if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
599			printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
600				pages_to_mb(highmem_pages));
601			highmem_pages = 0;
602		}
603		max_low_pfn -= highmem_pages;
604	}
605#else
606	if (highmem_pages)
607		printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
608#endif
609}
610
611#define MSG_HIGHMEM_TOO_SMALL \
612	"only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
613
614#define MSG_HIGHMEM_TRIMMED \
615	"Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
616/*
617 * We have more RAM than fits into lowmem - we try to put it into
618 * highmem, also taking the highmem=x boot parameter into account:
619 */
620static void __init highmem_pfn_init(void)
621{
622	max_low_pfn = MAXMEM_PFN;
623
624	if (highmem_pages == -1)
625		highmem_pages = max_pfn - MAXMEM_PFN;
626
627	if (highmem_pages + MAXMEM_PFN < max_pfn)
628		max_pfn = MAXMEM_PFN + highmem_pages;
629
630	if (highmem_pages + MAXMEM_PFN > max_pfn) {
631		printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
632			pages_to_mb(max_pfn - MAXMEM_PFN),
633			pages_to_mb(highmem_pages));
634		highmem_pages = 0;
635	}
636#ifndef CONFIG_HIGHMEM
637	/* Maximum memory usable is what is directly addressable */
638	printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
639	if (max_pfn > MAX_NONPAE_PFN)
640		printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
641	else
642		printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
643	max_pfn = MAXMEM_PFN;
644#else /* !CONFIG_HIGHMEM */
645#ifndef CONFIG_HIGHMEM64G
646	if (max_pfn > MAX_NONPAE_PFN) {
647		max_pfn = MAX_NONPAE_PFN;
648		printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
649	}
650#endif /* !CONFIG_HIGHMEM64G */
651#endif /* !CONFIG_HIGHMEM */
652}
653
654/*
655 * Determine low and high memory ranges:
656 */
657void __init find_low_pfn_range(void)
658{
659	/* it could update max_pfn */
660
661	if (max_pfn <= MAXMEM_PFN)
662		lowmem_pfn_init();
663	else
664		highmem_pfn_init();
665}
666
667#ifndef CONFIG_NEED_MULTIPLE_NODES
668void __init initmem_init(void)
669{
670#ifdef CONFIG_HIGHMEM
671	highstart_pfn = highend_pfn = max_pfn;
672	if (max_pfn > max_low_pfn)
673		highstart_pfn = max_low_pfn;
674	printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
675		pages_to_mb(highend_pfn - highstart_pfn));
676	high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
677#else
678	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
679#endif
680
681	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
 
682
683#ifdef CONFIG_FLATMEM
684	max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn;
685#endif
686	__vmalloc_start_set = true;
687
688	printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
689			pages_to_mb(max_low_pfn));
690
691	setup_bootmem_allocator();
692}
693#endif /* !CONFIG_NEED_MULTIPLE_NODES */
694
695void __init setup_bootmem_allocator(void)
696{
697	printk(KERN_INFO "  mapped low ram: 0 - %08lx\n",
698		 max_pfn_mapped<<PAGE_SHIFT);
699	printk(KERN_INFO "  low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
700}
701
702/*
703 * paging_init() sets up the page tables - note that the first 8MB are
704 * already mapped by head.S.
705 *
706 * This routines also unmaps the page at virtual kernel address 0, so
707 * that we can trap those pesky NULL-reference errors in the kernel.
708 */
709void __init paging_init(void)
710{
711	pagetable_init();
712
713	__flush_tlb_all();
714
715	kmap_init();
716
717	/*
718	 * NOTE: at this point the bootmem allocator is fully available.
719	 */
720	olpc_dt_build_devicetree();
 
721	sparse_init();
722	zone_sizes_init();
723}
724
725/*
726 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
727 * and also on some strange 486's. All 586+'s are OK. This used to involve
728 * black magic jumps to work around some nasty CPU bugs, but fortunately the
729 * switch to using exceptions got rid of all that.
730 */
731static void __init test_wp_bit(void)
732{
733	char z = 0;
734
735	printk(KERN_INFO "Checking if this processor honours the WP bit even in supervisor mode...");
736
737	__set_fixmap(FIX_WP_TEST, __pa_symbol(empty_zero_page), PAGE_KERNEL_RO);
738
739	if (copy_to_kernel_nofault((char *)fix_to_virt(FIX_WP_TEST), &z, 1)) {
740		clear_fixmap(FIX_WP_TEST);
 
 
 
 
 
741		printk(KERN_CONT "Ok.\n");
742		return;
743	}
744
745	printk(KERN_CONT "No.\n");
746	panic("Linux doesn't support CPUs with broken WP.");
747}
748
749void __init mem_init(void)
750{
751	pci_iommu_alloc();
752
753#ifdef CONFIG_FLATMEM
754	BUG_ON(!mem_map);
755#endif
756	/*
757	 * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to
758	 * be done before memblock_free_all(). Memblock use free low memory for
759	 * temporary data (see find_range_array()) and for this purpose can use
760	 * pages that was already passed to the buddy allocator, hence marked as
761	 * not accessible in the page tables when compiled with
762	 * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not
763	 * important here.
764	 */
765	set_highmem_pages_init();
766
767	/* this will put all low memory onto the freelists */
768	memblock_free_all();
769
770	after_bootmem = 1;
771	x86_init.hyper.init_after_bootmem();
772
773	mem_init_print_info(NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
774
775	/*
776	 * Check boundaries twice: Some fundamental inconsistencies can
777	 * be detected at build time already.
778	 */
779#define __FIXADDR_TOP (-PAGE_SIZE)
780#ifdef CONFIG_HIGHMEM
781	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
782	BUILD_BUG_ON(VMALLOC_END			> PKMAP_BASE);
783#endif
784#define high_memory (-128UL << 20)
785	BUILD_BUG_ON(VMALLOC_START			>= VMALLOC_END);
786#undef high_memory
787#undef __FIXADDR_TOP
 
 
 
788
789#ifdef CONFIG_HIGHMEM
790	BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
791	BUG_ON(VMALLOC_END				> PKMAP_BASE);
792#endif
793	BUG_ON(VMALLOC_START				>= VMALLOC_END);
794	BUG_ON((unsigned long)high_memory		> VMALLOC_START);
795
796	test_wp_bit();
 
797}
798
799#ifdef CONFIG_MEMORY_HOTPLUG
800int arch_add_memory(int nid, u64 start, u64 size,
801		    struct mhp_params *params)
802{
 
 
 
803	unsigned long start_pfn = start >> PAGE_SHIFT;
804	unsigned long nr_pages = size >> PAGE_SHIFT;
805	int ret;
806
807	/*
808	 * The page tables were already mapped at boot so if the caller
809	 * requests a different mapping type then we must change all the
810	 * pages with __set_memory_prot().
811	 */
812	if (params->pgprot.pgprot != PAGE_KERNEL.pgprot) {
813		ret = __set_memory_prot(start, nr_pages, params->pgprot);
814		if (ret)
815			return ret;
816	}
817
818	return __add_pages(nid, start_pfn, nr_pages, params);
819}
820
821void arch_remove_memory(int nid, u64 start, u64 size,
822			struct vmem_altmap *altmap)
823{
824	unsigned long start_pfn = start >> PAGE_SHIFT;
825	unsigned long nr_pages = size >> PAGE_SHIFT;
 
826
827	__remove_pages(start_pfn, nr_pages, altmap);
 
828}
829#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
830
831int kernel_set_to_readonly __read_mostly;
832
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
833static void mark_nxdata_nx(void)
834{
835	/*
836	 * When this called, init has already been executed and released,
837	 * so everything past _etext should be NX.
838	 */
839	unsigned long start = PFN_ALIGN(_etext);
840	/*
841	 * This comes from __is_kernel_text upper limit. Also HPAGE where used:
842	 */
843	unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start;
844
845	if (__supported_pte_mask & _PAGE_NX)
846		printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10);
847	set_memory_nx(start, size >> PAGE_SHIFT);
848}
849
850void mark_rodata_ro(void)
851{
852	unsigned long start = PFN_ALIGN(_text);
853	unsigned long size = (unsigned long)__end_rodata - start;
854
855	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
856	pr_info("Write protecting kernel text and read-only data: %luk\n",
857		size >> 10);
858
859	kernel_set_to_readonly = 1;
860
861#ifdef CONFIG_CPA_DEBUG
862	pr_info("Testing CPA: Reverting %lx-%lx\n", start, start + size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
863	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
864
865	pr_info("Testing CPA: write protecting again\n");
866	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
867#endif
868	mark_nxdata_nx();
869	if (__supported_pte_mask & _PAGE_NX)
870		debug_checkwx();
871}