Linux Audio

Check our new training course

Loading...
v4.10.11
  1/*
  2 *
  3 *  Copyright (C) 1995  Linus Torvalds
  4 *
  5 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  6 */
  7
 
  8#include <linux/signal.h>
  9#include <linux/sched.h>
 10#include <linux/kernel.h>
 11#include <linux/errno.h>
 12#include <linux/string.h>
 13#include <linux/types.h>
 14#include <linux/ptrace.h>
 15#include <linux/mman.h>
 16#include <linux/mm.h>
 17#include <linux/hugetlb.h>
 18#include <linux/swap.h>
 19#include <linux/smp.h>
 20#include <linux/init.h>
 21#include <linux/highmem.h>
 22#include <linux/pagemap.h>
 23#include <linux/pci.h>
 24#include <linux/pfn.h>
 25#include <linux/poison.h>
 26#include <linux/bootmem.h>
 27#include <linux/memblock.h>
 28#include <linux/proc_fs.h>
 29#include <linux/memory_hotplug.h>
 30#include <linux/initrd.h>
 31#include <linux/cpumask.h>
 32#include <linux/gfp.h>
 33
 34#include <asm/asm.h>
 35#include <asm/bios_ebda.h>
 36#include <asm/processor.h>
 37#include <linux/uaccess.h>
 
 38#include <asm/pgtable.h>
 39#include <asm/dma.h>
 40#include <asm/fixmap.h>
 41#include <asm/e820.h>
 42#include <asm/apic.h>
 43#include <asm/bugs.h>
 44#include <asm/tlb.h>
 45#include <asm/tlbflush.h>
 46#include <asm/olpc_ofw.h>
 47#include <asm/pgalloc.h>
 48#include <asm/sections.h>
 49#include <asm/paravirt.h>
 50#include <asm/setup.h>
 51#include <asm/cacheflush.h>
 52#include <asm/page_types.h>
 53#include <asm/init.h>
 54
 55#include "mm_internal.h"
 56
 57unsigned long highstart_pfn, highend_pfn;
 58
 59static noinline int do_test_wp_bit(void);
 60
 61bool __read_mostly __vmalloc_start_set = false;
 62
 
 
 
 
 
 
 
 
 
 
 
 
 
 63/*
 64 * Creates a middle page table and puts a pointer to it in the
 65 * given global directory entry. This only returns the gd entry
 66 * in non-PAE compilation mode, since the middle layer is folded.
 67 */
 68static pmd_t * __init one_md_table_init(pgd_t *pgd)
 69{
 70	pud_t *pud;
 71	pmd_t *pmd_table;
 72
 73#ifdef CONFIG_X86_PAE
 74	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
 75		pmd_table = (pmd_t *)alloc_low_page();
 
 
 
 76		paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
 77		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
 78		pud = pud_offset(pgd, 0);
 79		BUG_ON(pmd_table != pmd_offset(pud, 0));
 80
 81		return pmd_table;
 82	}
 83#endif
 84	pud = pud_offset(pgd, 0);
 85	pmd_table = pmd_offset(pud, 0);
 86
 87	return pmd_table;
 88}
 89
 90/*
 91 * Create a page table and place a pointer to it in a middle page
 92 * directory entry:
 93 */
 94static pte_t * __init one_page_table_init(pmd_t *pmd)
 95{
 96	if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
 97		pte_t *page_table = (pte_t *)alloc_low_page();
 
 
 
 
 
 
 
 
 
 
 98
 99		paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
100		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
101		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
102	}
103
104	return pte_offset_kernel(pmd, 0);
105}
106
107pmd_t * __init populate_extra_pmd(unsigned long vaddr)
108{
109	int pgd_idx = pgd_index(vaddr);
110	int pmd_idx = pmd_index(vaddr);
111
112	return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx;
113}
114
115pte_t * __init populate_extra_pte(unsigned long vaddr)
116{
117	int pte_idx = pte_index(vaddr);
118	pmd_t *pmd;
119
120	pmd = populate_extra_pmd(vaddr);
121	return one_page_table_init(pmd) + pte_idx;
122}
123
124static unsigned long __init
125page_table_range_init_count(unsigned long start, unsigned long end)
126{
127	unsigned long count = 0;
128#ifdef CONFIG_HIGHMEM
129	int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
130	int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
131	int pgd_idx, pmd_idx;
132	unsigned long vaddr;
133
134	if (pmd_idx_kmap_begin == pmd_idx_kmap_end)
135		return 0;
136
137	vaddr = start;
138	pgd_idx = pgd_index(vaddr);
139	pmd_idx = pmd_index(vaddr);
140
141	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
142		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
143							pmd_idx++) {
144			if ((vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin &&
145			    (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end)
146				count++;
147			vaddr += PMD_SIZE;
148		}
149		pmd_idx = 0;
150	}
151#endif
152	return count;
153}
154
155static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
156					   unsigned long vaddr, pte_t *lastpte,
157					   void **adr)
158{
159#ifdef CONFIG_HIGHMEM
160	/*
161	 * Something (early fixmap) may already have put a pte
162	 * page here, which causes the page table allocation
163	 * to become nonlinear. Attempt to fix it, and if it
164	 * is still nonlinear then we have to bug.
165	 */
166	int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
167	int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
168
169	if (pmd_idx_kmap_begin != pmd_idx_kmap_end
170	    && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
171	    && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) {
 
 
172		pte_t *newpte;
173		int i;
174
175		BUG_ON(after_bootmem);
176		newpte = *adr;
177		for (i = 0; i < PTRS_PER_PTE; i++)
178			set_pte(newpte + i, pte[i]);
179		*adr = (void *)(((unsigned long)(*adr)) + PAGE_SIZE);
180
181		paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
182		set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
183		BUG_ON(newpte != pte_offset_kernel(pmd, 0));
184		__flush_tlb_all();
185
186		paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
187		pte = newpte;
188	}
189	BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
190	       && vaddr > fix_to_virt(FIX_KMAP_END)
191	       && lastpte && lastpte + PTRS_PER_PTE != pte);
192#endif
193	return pte;
194}
195
196/*
197 * This function initializes a certain range of kernel virtual memory
198 * with new bootmem page tables, everywhere page tables are missing in
199 * the given range.
200 *
201 * NOTE: The pagetables are allocated contiguous on the physical space
202 * so we can cache the place of the first one and move around without
203 * checking the pgd every time.
204 */
205static void __init
206page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
207{
208	int pgd_idx, pmd_idx;
209	unsigned long vaddr;
210	pgd_t *pgd;
211	pmd_t *pmd;
212	pte_t *pte = NULL;
213	unsigned long count = page_table_range_init_count(start, end);
214	void *adr = NULL;
215
216	if (count)
217		adr = alloc_low_pages(count);
218
219	vaddr = start;
220	pgd_idx = pgd_index(vaddr);
221	pmd_idx = pmd_index(vaddr);
222	pgd = pgd_base + pgd_idx;
223
224	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
225		pmd = one_md_table_init(pgd);
226		pmd = pmd + pmd_index(vaddr);
227		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
228							pmd++, pmd_idx++) {
229			pte = page_table_kmap_check(one_page_table_init(pmd),
230						    pmd, vaddr, pte, &adr);
231
232			vaddr += PMD_SIZE;
233		}
234		pmd_idx = 0;
235	}
236}
237
238static inline int is_kernel_text(unsigned long addr)
239{
240	if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
241		return 1;
242	return 0;
243}
244
245/*
246 * This maps the physical memory to kernel virtual address space, a total
247 * of max_low_pfn pages, by creating page tables starting from address
248 * PAGE_OFFSET:
249 */
250unsigned long __init
251kernel_physical_mapping_init(unsigned long start,
252			     unsigned long end,
253			     unsigned long page_size_mask)
254{
255	int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
256	unsigned long last_map_addr = end;
257	unsigned long start_pfn, end_pfn;
258	pgd_t *pgd_base = swapper_pg_dir;
259	int pgd_idx, pmd_idx, pte_ofs;
260	unsigned long pfn;
261	pgd_t *pgd;
262	pmd_t *pmd;
263	pte_t *pte;
264	unsigned pages_2m, pages_4k;
265	int mapping_iter;
266
267	start_pfn = start >> PAGE_SHIFT;
268	end_pfn = end >> PAGE_SHIFT;
269
270	/*
271	 * First iteration will setup identity mapping using large/small pages
272	 * based on use_pse, with other attributes same as set by
273	 * the early code in head_32.S
274	 *
275	 * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
276	 * as desired for the kernel identity mapping.
277	 *
278	 * This two pass mechanism conforms to the TLB app note which says:
279	 *
280	 *     "Software should not write to a paging-structure entry in a way
281	 *      that would change, for any linear address, both the page size
282	 *      and either the page frame or attributes."
283	 */
284	mapping_iter = 1;
285
286	if (!boot_cpu_has(X86_FEATURE_PSE))
287		use_pse = 0;
288
289repeat:
290	pages_2m = pages_4k = 0;
291	pfn = start_pfn;
292	pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
293	pgd = pgd_base + pgd_idx;
294	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
295		pmd = one_md_table_init(pgd);
296
297		if (pfn >= end_pfn)
298			continue;
299#ifdef CONFIG_X86_PAE
300		pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
301		pmd += pmd_idx;
302#else
303		pmd_idx = 0;
304#endif
305		for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
306		     pmd++, pmd_idx++) {
307			unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
308
309			/*
310			 * Map with big pages if possible, otherwise
311			 * create normal page tables:
312			 */
313			if (use_pse) {
314				unsigned int addr2;
315				pgprot_t prot = PAGE_KERNEL_LARGE;
316				/*
317				 * first pass will use the same initial
318				 * identity mapping attribute + _PAGE_PSE.
319				 */
320				pgprot_t init_prot =
321					__pgprot(PTE_IDENT_ATTR |
322						 _PAGE_PSE);
323
324				pfn &= PMD_MASK >> PAGE_SHIFT;
325				addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
326					PAGE_OFFSET + PAGE_SIZE-1;
327
328				if (is_kernel_text(addr) ||
329				    is_kernel_text(addr2))
330					prot = PAGE_KERNEL_LARGE_EXEC;
331
332				pages_2m++;
333				if (mapping_iter == 1)
334					set_pmd(pmd, pfn_pmd(pfn, init_prot));
335				else
336					set_pmd(pmd, pfn_pmd(pfn, prot));
337
338				pfn += PTRS_PER_PTE;
339				continue;
340			}
341			pte = one_page_table_init(pmd);
342
343			pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
344			pte += pte_ofs;
345			for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
346			     pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
347				pgprot_t prot = PAGE_KERNEL;
348				/*
349				 * first pass will use the same initial
350				 * identity mapping attribute.
351				 */
352				pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
353
354				if (is_kernel_text(addr))
355					prot = PAGE_KERNEL_EXEC;
356
357				pages_4k++;
358				if (mapping_iter == 1) {
359					set_pte(pte, pfn_pte(pfn, init_prot));
360					last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
361				} else
362					set_pte(pte, pfn_pte(pfn, prot));
363			}
364		}
365	}
366	if (mapping_iter == 1) {
367		/*
368		 * update direct mapping page count only in the first
369		 * iteration.
370		 */
371		update_page_count(PG_LEVEL_2M, pages_2m);
372		update_page_count(PG_LEVEL_4K, pages_4k);
373
374		/*
375		 * local global flush tlb, which will flush the previous
376		 * mappings present in both small and large page TLB's.
377		 */
378		__flush_tlb_all();
379
380		/*
381		 * Second iteration will set the actual desired PTE attributes.
382		 */
383		mapping_iter = 2;
384		goto repeat;
385	}
386	return last_map_addr;
387}
388
389pte_t *kmap_pte;
 
390
391static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
392{
393	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
394			vaddr), vaddr), vaddr);
395}
396
397static void __init kmap_init(void)
398{
399	unsigned long kmap_vstart;
400
401	/*
402	 * Cache the first kmap pte:
403	 */
404	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
405	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
 
 
406}
407
408#ifdef CONFIG_HIGHMEM
409static void __init permanent_kmaps_init(pgd_t *pgd_base)
410{
411	unsigned long vaddr;
412	pgd_t *pgd;
413	pud_t *pud;
414	pmd_t *pmd;
415	pte_t *pte;
416
417	vaddr = PKMAP_BASE;
418	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
419
420	pgd = swapper_pg_dir + pgd_index(vaddr);
421	pud = pud_offset(pgd, vaddr);
422	pmd = pmd_offset(pud, vaddr);
423	pte = pte_offset_kernel(pmd, vaddr);
424	pkmap_page_table = pte;
425}
426
 
 
 
 
 
 
 
 
427void __init add_highpages_with_active_regions(int nid,
428			 unsigned long start_pfn, unsigned long end_pfn)
429{
430	phys_addr_t start, end;
431	u64 i;
432
433	for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) {
434		unsigned long pfn = clamp_t(unsigned long, PFN_UP(start),
435					    start_pfn, end_pfn);
436		unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end),
437					      start_pfn, end_pfn);
438		for ( ; pfn < e_pfn; pfn++)
439			if (pfn_valid(pfn))
440				free_highmem_page(pfn_to_page(pfn));
 
 
 
 
 
 
441	}
442}
443#else
444static inline void permanent_kmaps_init(pgd_t *pgd_base)
445{
446}
447#endif /* CONFIG_HIGHMEM */
448
449void __init native_pagetable_init(void)
450{
451	unsigned long pfn, va;
452	pgd_t *pgd, *base = swapper_pg_dir;
453	pud_t *pud;
454	pmd_t *pmd;
455	pte_t *pte;
456
457	/*
458	 * Remove any mappings which extend past the end of physical
459	 * memory from the boot time page table.
460	 * In virtual address space, we should have at least two pages
461	 * from VMALLOC_END to pkmap or fixmap according to VMALLOC_END
462	 * definition. And max_low_pfn is set to VMALLOC_END physical
463	 * address. If initial memory mapping is doing right job, we
464	 * should have pte used near max_low_pfn or one pmd is not present.
465	 */
466	for (pfn = max_low_pfn; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
467		va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
468		pgd = base + pgd_index(va);
469		if (!pgd_present(*pgd))
470			break;
471
472		pud = pud_offset(pgd, va);
473		pmd = pmd_offset(pud, va);
474		if (!pmd_present(*pmd))
475			break;
476
477		/* should not be large page here */
478		if (pmd_large(*pmd)) {
479			pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n",
480				pfn, pmd, __pa(pmd));
481			BUG_ON(1);
482		}
483
484		pte = pte_offset_kernel(pmd, va);
485		if (!pte_present(*pte))
486			break;
487
488		printk(KERN_DEBUG "clearing pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx pte: %p pte phys: %lx\n",
489				pfn, pmd, __pa(pmd), pte, __pa(pte));
490		pte_clear(NULL, va, pte);
491	}
492	paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
493	paging_init();
 
 
 
494}
495
496/*
497 * Build a proper pagetable for the kernel mappings.  Up until this
498 * point, we've been running on some set of pagetables constructed by
499 * the boot process.
500 *
501 * If we're booting on native hardware, this will be a pagetable
502 * constructed in arch/x86/kernel/head_32.S.  The root of the
503 * pagetable will be swapper_pg_dir.
504 *
505 * If we're booting paravirtualized under a hypervisor, then there are
506 * more options: we may already be running PAE, and the pagetable may
507 * or may not be based in swapper_pg_dir.  In any case,
508 * paravirt_pagetable_init() will set up swapper_pg_dir
509 * appropriately for the rest of the initialization to work.
510 *
511 * In general, pagetable_init() assumes that the pagetable may already
512 * be partially populated, and so it avoids stomping on any existing
513 * mappings.
514 */
515void __init early_ioremap_page_table_range_init(void)
516{
517	pgd_t *pgd_base = swapper_pg_dir;
518	unsigned long vaddr, end;
519
520	/*
521	 * Fixed mappings, only the page table structure has to be
522	 * created - mappings will be set by set_fixmap():
523	 */
524	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
525	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
526	page_table_range_init(vaddr, end, pgd_base);
527	early_ioremap_reset();
528}
529
530static void __init pagetable_init(void)
531{
532	pgd_t *pgd_base = swapper_pg_dir;
533
534	permanent_kmaps_init(pgd_base);
535}
536
537pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
538EXPORT_SYMBOL_GPL(__supported_pte_mask);
539
540/* user-defined highmem size */
541static unsigned int highmem_pages = -1;
542
543/*
544 * highmem=size forces highmem to be exactly 'size' bytes.
545 * This works even on boxes that have no highmem otherwise.
546 * This also works to reduce highmem size on bigger boxes.
547 */
548static int __init parse_highmem(char *arg)
549{
550	if (!arg)
551		return -EINVAL;
552
553	highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
554	return 0;
555}
556early_param("highmem", parse_highmem);
557
558#define MSG_HIGHMEM_TOO_BIG \
559	"highmem size (%luMB) is bigger than pages available (%luMB)!\n"
560
561#define MSG_LOWMEM_TOO_SMALL \
562	"highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
563/*
564 * All of RAM fits into lowmem - but if user wants highmem
565 * artificially via the highmem=x boot parameter then create
566 * it:
567 */
568static void __init lowmem_pfn_init(void)
569{
570	/* max_low_pfn is 0, we already have early_res support */
571	max_low_pfn = max_pfn;
572
573	if (highmem_pages == -1)
574		highmem_pages = 0;
575#ifdef CONFIG_HIGHMEM
576	if (highmem_pages >= max_pfn) {
577		printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
578			pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
579		highmem_pages = 0;
580	}
581	if (highmem_pages) {
582		if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
583			printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
584				pages_to_mb(highmem_pages));
585			highmem_pages = 0;
586		}
587		max_low_pfn -= highmem_pages;
588	}
589#else
590	if (highmem_pages)
591		printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
592#endif
593}
594
595#define MSG_HIGHMEM_TOO_SMALL \
596	"only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
597
598#define MSG_HIGHMEM_TRIMMED \
599	"Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
600/*
601 * We have more RAM than fits into lowmem - we try to put it into
602 * highmem, also taking the highmem=x boot parameter into account:
603 */
604static void __init highmem_pfn_init(void)
605{
606	max_low_pfn = MAXMEM_PFN;
607
608	if (highmem_pages == -1)
609		highmem_pages = max_pfn - MAXMEM_PFN;
610
611	if (highmem_pages + MAXMEM_PFN < max_pfn)
612		max_pfn = MAXMEM_PFN + highmem_pages;
613
614	if (highmem_pages + MAXMEM_PFN > max_pfn) {
615		printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
616			pages_to_mb(max_pfn - MAXMEM_PFN),
617			pages_to_mb(highmem_pages));
618		highmem_pages = 0;
619	}
620#ifndef CONFIG_HIGHMEM
621	/* Maximum memory usable is what is directly addressable */
622	printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
623	if (max_pfn > MAX_NONPAE_PFN)
624		printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
625	else
626		printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
627	max_pfn = MAXMEM_PFN;
628#else /* !CONFIG_HIGHMEM */
629#ifndef CONFIG_HIGHMEM64G
630	if (max_pfn > MAX_NONPAE_PFN) {
631		max_pfn = MAX_NONPAE_PFN;
632		printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
633	}
634#endif /* !CONFIG_HIGHMEM64G */
635#endif /* !CONFIG_HIGHMEM */
636}
637
638/*
639 * Determine low and high memory ranges:
640 */
641void __init find_low_pfn_range(void)
642{
643	/* it could update max_pfn */
644
645	if (max_pfn <= MAXMEM_PFN)
646		lowmem_pfn_init();
647	else
648		highmem_pfn_init();
649}
650
651#ifndef CONFIG_NEED_MULTIPLE_NODES
652void __init initmem_init(void)
653{
654#ifdef CONFIG_HIGHMEM
655	highstart_pfn = highend_pfn = max_pfn;
656	if (max_pfn > max_low_pfn)
657		highstart_pfn = max_low_pfn;
 
 
658	printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
659		pages_to_mb(highend_pfn - highstart_pfn));
 
660	high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
661#else
 
 
 
662	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
663#endif
664
665	memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
666	sparse_memory_present_with_active_regions(0);
667
668#ifdef CONFIG_FLATMEM
669	max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn;
670#endif
671	__vmalloc_start_set = true;
672
673	printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
674			pages_to_mb(max_low_pfn));
675
676	setup_bootmem_allocator();
677}
678#endif /* !CONFIG_NEED_MULTIPLE_NODES */
679
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
680void __init setup_bootmem_allocator(void)
681{
682	printk(KERN_INFO "  mapped low ram: 0 - %08lx\n",
683		 max_pfn_mapped<<PAGE_SHIFT);
684	printk(KERN_INFO "  low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
 
 
685}
686
687/*
688 * paging_init() sets up the page tables - note that the first 8MB are
689 * already mapped by head.S.
690 *
691 * This routines also unmaps the page at virtual kernel address 0, so
692 * that we can trap those pesky NULL-reference errors in the kernel.
693 */
694void __init paging_init(void)
695{
696	pagetable_init();
697
698	__flush_tlb_all();
699
700	kmap_init();
701
702	/*
703	 * NOTE: at this point the bootmem allocator is fully available.
704	 */
705	olpc_dt_build_devicetree();
706	sparse_memory_present_with_active_regions(MAX_NUMNODES);
707	sparse_init();
708	zone_sizes_init();
709}
710
711/*
712 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
713 * and also on some strange 486's. All 586+'s are OK. This used to involve
714 * black magic jumps to work around some nasty CPU bugs, but fortunately the
715 * switch to using exceptions got rid of all that.
716 */
717static void __init test_wp_bit(void)
718{
719	printk(KERN_INFO
720  "Checking if this processor honours the WP bit even in supervisor mode...");
721
722	/* Any page-aligned address will do, the test is non-destructive */
723	__set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_KERNEL_RO);
724	boot_cpu_data.wp_works_ok = do_test_wp_bit();
725	clear_fixmap(FIX_WP_TEST);
726
727	if (!boot_cpu_data.wp_works_ok) {
728		printk(KERN_CONT "No.\n");
729		panic("Linux doesn't support CPUs with broken WP.");
 
 
 
730	} else {
731		printk(KERN_CONT "Ok.\n");
732	}
733}
734
735void __init mem_init(void)
736{
 
 
 
737	pci_iommu_alloc();
738
739#ifdef CONFIG_FLATMEM
740	BUG_ON(!mem_map);
741#endif
742	/*
743	 * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to
744	 * be done before free_all_bootmem(). Memblock use free low memory for
745	 * temporary data (see find_range_array()) and for this purpose can use
746	 * pages that was already passed to the buddy allocator, hence marked as
747	 * not accessible in the page tables when compiled with
748	 * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not
749	 * important here.
750	 */
751	set_highmem_pages_init();
752
753	/* this will put all low memory onto the freelists */
754	free_all_bootmem();
755
756	after_bootmem = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
757
758	mem_init_print_info(NULL);
759	printk(KERN_INFO "virtual kernel memory layout:\n"
760		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
761#ifdef CONFIG_HIGHMEM
762		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
763#endif
764		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
765		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
766		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
767		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
768		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
769		FIXADDR_START, FIXADDR_TOP,
770		(FIXADDR_TOP - FIXADDR_START) >> 10,
771
772#ifdef CONFIG_HIGHMEM
773		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
774		(LAST_PKMAP*PAGE_SIZE) >> 10,
775#endif
776
777		VMALLOC_START, VMALLOC_END,
778		(VMALLOC_END - VMALLOC_START) >> 20,
779
780		(unsigned long)__va(0), (unsigned long)high_memory,
781		((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
782
783		(unsigned long)&__init_begin, (unsigned long)&__init_end,
784		((unsigned long)&__init_end -
785		 (unsigned long)&__init_begin) >> 10,
786
787		(unsigned long)&_etext, (unsigned long)&_edata,
788		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
789
790		(unsigned long)&_text, (unsigned long)&_etext,
791		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
792
793	/*
794	 * Check boundaries twice: Some fundamental inconsistencies can
795	 * be detected at build time already.
796	 */
797#define __FIXADDR_TOP (-PAGE_SIZE)
798#ifdef CONFIG_HIGHMEM
799	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
800	BUILD_BUG_ON(VMALLOC_END			> PKMAP_BASE);
801#endif
802#define high_memory (-128UL << 20)
803	BUILD_BUG_ON(VMALLOC_START			>= VMALLOC_END);
804#undef high_memory
805#undef __FIXADDR_TOP
806
807#ifdef CONFIG_HIGHMEM
808	BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
809	BUG_ON(VMALLOC_END				> PKMAP_BASE);
810#endif
811	BUG_ON(VMALLOC_START				>= VMALLOC_END);
812	BUG_ON((unsigned long)high_memory		> VMALLOC_START);
813
814	if (boot_cpu_data.wp_works_ok < 0)
815		test_wp_bit();
816}
817
818#ifdef CONFIG_MEMORY_HOTPLUG
819int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
820{
821	struct pglist_data *pgdata = NODE_DATA(nid);
822	struct zone *zone = pgdata->node_zones +
823		zone_for_memory(nid, start, size, ZONE_HIGHMEM, for_device);
824	unsigned long start_pfn = start >> PAGE_SHIFT;
825	unsigned long nr_pages = size >> PAGE_SHIFT;
826
827	return __add_pages(nid, zone, start_pfn, nr_pages);
828}
829
830#ifdef CONFIG_MEMORY_HOTREMOVE
831int arch_remove_memory(u64 start, u64 size)
832{
833	unsigned long start_pfn = start >> PAGE_SHIFT;
834	unsigned long nr_pages = size >> PAGE_SHIFT;
835	struct zone *zone;
836
837	zone = page_zone(pfn_to_page(start_pfn));
838	return __remove_pages(zone, start_pfn, nr_pages);
839}
840#endif
841#endif
842
843/*
844 * This function cannot be __init, since exceptions don't work in that
845 * section.  Put this after the callers, so that it cannot be inlined.
846 */
847static noinline int do_test_wp_bit(void)
848{
849	char tmp_reg;
850	int flag;
851
852	__asm__ __volatile__(
853		"	movb %0, %1	\n"
854		"1:	movb %1, %0	\n"
855		"	xorl %2, %2	\n"
856		"2:			\n"
857		_ASM_EXTABLE(1b,2b)
858		:"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
859		 "=q" (tmp_reg),
860		 "=r" (flag)
861		:"2" (1)
862		:"memory");
863
864	return flag;
865}
866
 
867const int rodata_test_data = 0xC3;
868EXPORT_SYMBOL_GPL(rodata_test_data);
869
870int kernel_set_to_readonly __read_mostly;
871
872void set_kernel_text_rw(void)
873{
874	unsigned long start = PFN_ALIGN(_text);
875	unsigned long size = PFN_ALIGN(_etext) - start;
876
877	if (!kernel_set_to_readonly)
878		return;
879
880	pr_debug("Set kernel text: %lx - %lx for read write\n",
881		 start, start+size);
882
883	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
884}
885
886void set_kernel_text_ro(void)
887{
888	unsigned long start = PFN_ALIGN(_text);
889	unsigned long size = PFN_ALIGN(_etext) - start;
890
891	if (!kernel_set_to_readonly)
892		return;
893
894	pr_debug("Set kernel text: %lx - %lx for read only\n",
895		 start, start+size);
896
897	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
898}
899
900static void mark_nxdata_nx(void)
901{
902	/*
903	 * When this called, init has already been executed and released,
904	 * so everything past _etext should be NX.
905	 */
906	unsigned long start = PFN_ALIGN(_etext);
907	/*
908	 * This comes from is_kernel_text upper limit. Also HPAGE where used:
909	 */
910	unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start;
911
912	if (__supported_pte_mask & _PAGE_NX)
913		printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10);
914	set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT);
915}
916
917void mark_rodata_ro(void)
918{
919	unsigned long start = PFN_ALIGN(_text);
920	unsigned long size = PFN_ALIGN(_etext) - start;
921
922	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
923	printk(KERN_INFO "Write protecting the kernel text: %luk\n",
924		size >> 10);
925
926	kernel_set_to_readonly = 1;
927
928#ifdef CONFIG_CPA_DEBUG
929	printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
930		start, start+size);
931	set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
932
933	printk(KERN_INFO "Testing CPA: write protecting again\n");
934	set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
935#endif
936
937	start += size;
938	size = (unsigned long)__end_rodata - start;
939	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
940	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
941		size >> 10);
942	rodata_test();
943
944#ifdef CONFIG_CPA_DEBUG
945	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
946	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
947
948	printk(KERN_INFO "Testing CPA: write protecting again\n");
949	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
950#endif
951	mark_nxdata_nx();
952	if (__supported_pte_mask & _PAGE_NX)
953		debug_checkwx();
954}
v3.1
  1/*
  2 *
  3 *  Copyright (C) 1995  Linus Torvalds
  4 *
  5 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  6 */
  7
  8#include <linux/module.h>
  9#include <linux/signal.h>
 10#include <linux/sched.h>
 11#include <linux/kernel.h>
 12#include <linux/errno.h>
 13#include <linux/string.h>
 14#include <linux/types.h>
 15#include <linux/ptrace.h>
 16#include <linux/mman.h>
 17#include <linux/mm.h>
 18#include <linux/hugetlb.h>
 19#include <linux/swap.h>
 20#include <linux/smp.h>
 21#include <linux/init.h>
 22#include <linux/highmem.h>
 23#include <linux/pagemap.h>
 24#include <linux/pci.h>
 25#include <linux/pfn.h>
 26#include <linux/poison.h>
 27#include <linux/bootmem.h>
 28#include <linux/memblock.h>
 29#include <linux/proc_fs.h>
 30#include <linux/memory_hotplug.h>
 31#include <linux/initrd.h>
 32#include <linux/cpumask.h>
 33#include <linux/gfp.h>
 34
 35#include <asm/asm.h>
 36#include <asm/bios_ebda.h>
 37#include <asm/processor.h>
 38#include <asm/system.h>
 39#include <asm/uaccess.h>
 40#include <asm/pgtable.h>
 41#include <asm/dma.h>
 42#include <asm/fixmap.h>
 43#include <asm/e820.h>
 44#include <asm/apic.h>
 45#include <asm/bugs.h>
 46#include <asm/tlb.h>
 47#include <asm/tlbflush.h>
 48#include <asm/olpc_ofw.h>
 49#include <asm/pgalloc.h>
 50#include <asm/sections.h>
 51#include <asm/paravirt.h>
 52#include <asm/setup.h>
 53#include <asm/cacheflush.h>
 54#include <asm/page_types.h>
 55#include <asm/init.h>
 56
 
 
 57unsigned long highstart_pfn, highend_pfn;
 58
 59static noinline int do_test_wp_bit(void);
 60
 61bool __read_mostly __vmalloc_start_set = false;
 62
 63static __init void *alloc_low_page(void)
 64{
 65	unsigned long pfn = pgt_buf_end++;
 66	void *adr;
 67
 68	if (pfn >= pgt_buf_top)
 69		panic("alloc_low_page: ran out of memory");
 70
 71	adr = __va(pfn * PAGE_SIZE);
 72	clear_page(adr);
 73	return adr;
 74}
 75
 76/*
 77 * Creates a middle page table and puts a pointer to it in the
 78 * given global directory entry. This only returns the gd entry
 79 * in non-PAE compilation mode, since the middle layer is folded.
 80 */
 81static pmd_t * __init one_md_table_init(pgd_t *pgd)
 82{
 83	pud_t *pud;
 84	pmd_t *pmd_table;
 85
 86#ifdef CONFIG_X86_PAE
 87	if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
 88		if (after_bootmem)
 89			pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
 90		else
 91			pmd_table = (pmd_t *)alloc_low_page();
 92		paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
 93		set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
 94		pud = pud_offset(pgd, 0);
 95		BUG_ON(pmd_table != pmd_offset(pud, 0));
 96
 97		return pmd_table;
 98	}
 99#endif
100	pud = pud_offset(pgd, 0);
101	pmd_table = pmd_offset(pud, 0);
102
103	return pmd_table;
104}
105
106/*
107 * Create a page table and place a pointer to it in a middle page
108 * directory entry:
109 */
110static pte_t * __init one_page_table_init(pmd_t *pmd)
111{
112	if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
113		pte_t *page_table = NULL;
114
115		if (after_bootmem) {
116#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
117			page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
118#endif
119			if (!page_table)
120				page_table =
121				(pte_t *)alloc_bootmem_pages(PAGE_SIZE);
122		} else
123			page_table = (pte_t *)alloc_low_page();
124
125		paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
126		set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
127		BUG_ON(page_table != pte_offset_kernel(pmd, 0));
128	}
129
130	return pte_offset_kernel(pmd, 0);
131}
132
133pmd_t * __init populate_extra_pmd(unsigned long vaddr)
134{
135	int pgd_idx = pgd_index(vaddr);
136	int pmd_idx = pmd_index(vaddr);
137
138	return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx;
139}
140
141pte_t * __init populate_extra_pte(unsigned long vaddr)
142{
143	int pte_idx = pte_index(vaddr);
144	pmd_t *pmd;
145
146	pmd = populate_extra_pmd(vaddr);
147	return one_page_table_init(pmd) + pte_idx;
148}
149
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
151					   unsigned long vaddr, pte_t *lastpte)
 
152{
153#ifdef CONFIG_HIGHMEM
154	/*
155	 * Something (early fixmap) may already have put a pte
156	 * page here, which causes the page table allocation
157	 * to become nonlinear. Attempt to fix it, and if it
158	 * is still nonlinear then we have to bug.
159	 */
160	int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
161	int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
162
163	if (pmd_idx_kmap_begin != pmd_idx_kmap_end
164	    && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
165	    && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
166	    && ((__pa(pte) >> PAGE_SHIFT) < pgt_buf_start
167		|| (__pa(pte) >> PAGE_SHIFT) >= pgt_buf_end)) {
168		pte_t *newpte;
169		int i;
170
171		BUG_ON(after_bootmem);
172		newpte = alloc_low_page();
173		for (i = 0; i < PTRS_PER_PTE; i++)
174			set_pte(newpte + i, pte[i]);
 
175
176		paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
177		set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
178		BUG_ON(newpte != pte_offset_kernel(pmd, 0));
179		__flush_tlb_all();
180
181		paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
182		pte = newpte;
183	}
184	BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
185	       && vaddr > fix_to_virt(FIX_KMAP_END)
186	       && lastpte && lastpte + PTRS_PER_PTE != pte);
187#endif
188	return pte;
189}
190
191/*
192 * This function initializes a certain range of kernel virtual memory
193 * with new bootmem page tables, everywhere page tables are missing in
194 * the given range.
195 *
196 * NOTE: The pagetables are allocated contiguous on the physical space
197 * so we can cache the place of the first one and move around without
198 * checking the pgd every time.
199 */
200static void __init
201page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
202{
203	int pgd_idx, pmd_idx;
204	unsigned long vaddr;
205	pgd_t *pgd;
206	pmd_t *pmd;
207	pte_t *pte = NULL;
 
 
 
 
 
208
209	vaddr = start;
210	pgd_idx = pgd_index(vaddr);
211	pmd_idx = pmd_index(vaddr);
212	pgd = pgd_base + pgd_idx;
213
214	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
215		pmd = one_md_table_init(pgd);
216		pmd = pmd + pmd_index(vaddr);
217		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
218							pmd++, pmd_idx++) {
219			pte = page_table_kmap_check(one_page_table_init(pmd),
220			                            pmd, vaddr, pte);
221
222			vaddr += PMD_SIZE;
223		}
224		pmd_idx = 0;
225	}
226}
227
228static inline int is_kernel_text(unsigned long addr)
229{
230	if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
231		return 1;
232	return 0;
233}
234
235/*
236 * This maps the physical memory to kernel virtual address space, a total
237 * of max_low_pfn pages, by creating page tables starting from address
238 * PAGE_OFFSET:
239 */
240unsigned long __init
241kernel_physical_mapping_init(unsigned long start,
242			     unsigned long end,
243			     unsigned long page_size_mask)
244{
245	int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
246	unsigned long last_map_addr = end;
247	unsigned long start_pfn, end_pfn;
248	pgd_t *pgd_base = swapper_pg_dir;
249	int pgd_idx, pmd_idx, pte_ofs;
250	unsigned long pfn;
251	pgd_t *pgd;
252	pmd_t *pmd;
253	pte_t *pte;
254	unsigned pages_2m, pages_4k;
255	int mapping_iter;
256
257	start_pfn = start >> PAGE_SHIFT;
258	end_pfn = end >> PAGE_SHIFT;
259
260	/*
261	 * First iteration will setup identity mapping using large/small pages
262	 * based on use_pse, with other attributes same as set by
263	 * the early code in head_32.S
264	 *
265	 * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
266	 * as desired for the kernel identity mapping.
267	 *
268	 * This two pass mechanism conforms to the TLB app note which says:
269	 *
270	 *     "Software should not write to a paging-structure entry in a way
271	 *      that would change, for any linear address, both the page size
272	 *      and either the page frame or attributes."
273	 */
274	mapping_iter = 1;
275
276	if (!cpu_has_pse)
277		use_pse = 0;
278
279repeat:
280	pages_2m = pages_4k = 0;
281	pfn = start_pfn;
282	pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
283	pgd = pgd_base + pgd_idx;
284	for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
285		pmd = one_md_table_init(pgd);
286
287		if (pfn >= end_pfn)
288			continue;
289#ifdef CONFIG_X86_PAE
290		pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
291		pmd += pmd_idx;
292#else
293		pmd_idx = 0;
294#endif
295		for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
296		     pmd++, pmd_idx++) {
297			unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
298
299			/*
300			 * Map with big pages if possible, otherwise
301			 * create normal page tables:
302			 */
303			if (use_pse) {
304				unsigned int addr2;
305				pgprot_t prot = PAGE_KERNEL_LARGE;
306				/*
307				 * first pass will use the same initial
308				 * identity mapping attribute + _PAGE_PSE.
309				 */
310				pgprot_t init_prot =
311					__pgprot(PTE_IDENT_ATTR |
312						 _PAGE_PSE);
313
 
314				addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
315					PAGE_OFFSET + PAGE_SIZE-1;
316
317				if (is_kernel_text(addr) ||
318				    is_kernel_text(addr2))
319					prot = PAGE_KERNEL_LARGE_EXEC;
320
321				pages_2m++;
322				if (mapping_iter == 1)
323					set_pmd(pmd, pfn_pmd(pfn, init_prot));
324				else
325					set_pmd(pmd, pfn_pmd(pfn, prot));
326
327				pfn += PTRS_PER_PTE;
328				continue;
329			}
330			pte = one_page_table_init(pmd);
331
332			pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
333			pte += pte_ofs;
334			for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
335			     pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
336				pgprot_t prot = PAGE_KERNEL;
337				/*
338				 * first pass will use the same initial
339				 * identity mapping attribute.
340				 */
341				pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
342
343				if (is_kernel_text(addr))
344					prot = PAGE_KERNEL_EXEC;
345
346				pages_4k++;
347				if (mapping_iter == 1) {
348					set_pte(pte, pfn_pte(pfn, init_prot));
349					last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
350				} else
351					set_pte(pte, pfn_pte(pfn, prot));
352			}
353		}
354	}
355	if (mapping_iter == 1) {
356		/*
357		 * update direct mapping page count only in the first
358		 * iteration.
359		 */
360		update_page_count(PG_LEVEL_2M, pages_2m);
361		update_page_count(PG_LEVEL_4K, pages_4k);
362
363		/*
364		 * local global flush tlb, which will flush the previous
365		 * mappings present in both small and large page TLB's.
366		 */
367		__flush_tlb_all();
368
369		/*
370		 * Second iteration will set the actual desired PTE attributes.
371		 */
372		mapping_iter = 2;
373		goto repeat;
374	}
375	return last_map_addr;
376}
377
378pte_t *kmap_pte;
379pgprot_t kmap_prot;
380
381static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
382{
383	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
384			vaddr), vaddr), vaddr);
385}
386
387static void __init kmap_init(void)
388{
389	unsigned long kmap_vstart;
390
391	/*
392	 * Cache the first kmap pte:
393	 */
394	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
395	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
396
397	kmap_prot = PAGE_KERNEL;
398}
399
400#ifdef CONFIG_HIGHMEM
401static void __init permanent_kmaps_init(pgd_t *pgd_base)
402{
403	unsigned long vaddr;
404	pgd_t *pgd;
405	pud_t *pud;
406	pmd_t *pmd;
407	pte_t *pte;
408
409	vaddr = PKMAP_BASE;
410	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
411
412	pgd = swapper_pg_dir + pgd_index(vaddr);
413	pud = pud_offset(pgd, vaddr);
414	pmd = pmd_offset(pud, vaddr);
415	pte = pte_offset_kernel(pmd, vaddr);
416	pkmap_page_table = pte;
417}
418
419static void __init add_one_highpage_init(struct page *page)
420{
421	ClearPageReserved(page);
422	init_page_count(page);
423	__free_page(page);
424	totalhigh_pages++;
425}
426
427void __init add_highpages_with_active_regions(int nid,
428			 unsigned long start_pfn, unsigned long end_pfn)
429{
430	struct range *range;
431	int nr_range;
432	int i;
433
434	nr_range = __get_free_all_memory_range(&range, nid, start_pfn, end_pfn);
435
436	for (i = 0; i < nr_range; i++) {
437		struct page *page;
438		int node_pfn;
439
440		for (node_pfn = range[i].start; node_pfn < range[i].end;
441		     node_pfn++) {
442			if (!pfn_valid(node_pfn))
443				continue;
444			page = pfn_to_page(node_pfn);
445			add_one_highpage_init(page);
446		}
447	}
448}
449#else
450static inline void permanent_kmaps_init(pgd_t *pgd_base)
451{
452}
453#endif /* CONFIG_HIGHMEM */
454
455void __init native_pagetable_setup_start(pgd_t *base)
456{
457	unsigned long pfn, va;
458	pgd_t *pgd;
459	pud_t *pud;
460	pmd_t *pmd;
461	pte_t *pte;
462
463	/*
464	 * Remove any mappings which extend past the end of physical
465	 * memory from the boot time page table:
 
 
 
 
 
466	 */
467	for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
468		va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
469		pgd = base + pgd_index(va);
470		if (!pgd_present(*pgd))
471			break;
472
473		pud = pud_offset(pgd, va);
474		pmd = pmd_offset(pud, va);
475		if (!pmd_present(*pmd))
476			break;
477
 
 
 
 
 
 
 
478		pte = pte_offset_kernel(pmd, va);
479		if (!pte_present(*pte))
480			break;
481
 
 
482		pte_clear(NULL, va, pte);
483	}
484	paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
485}
486
487void __init native_pagetable_setup_done(pgd_t *base)
488{
489}
490
491/*
492 * Build a proper pagetable for the kernel mappings.  Up until this
493 * point, we've been running on some set of pagetables constructed by
494 * the boot process.
495 *
496 * If we're booting on native hardware, this will be a pagetable
497 * constructed in arch/x86/kernel/head_32.S.  The root of the
498 * pagetable will be swapper_pg_dir.
499 *
500 * If we're booting paravirtualized under a hypervisor, then there are
501 * more options: we may already be running PAE, and the pagetable may
502 * or may not be based in swapper_pg_dir.  In any case,
503 * paravirt_pagetable_setup_start() will set up swapper_pg_dir
504 * appropriately for the rest of the initialization to work.
505 *
506 * In general, pagetable_init() assumes that the pagetable may already
507 * be partially populated, and so it avoids stomping on any existing
508 * mappings.
509 */
510void __init early_ioremap_page_table_range_init(void)
511{
512	pgd_t *pgd_base = swapper_pg_dir;
513	unsigned long vaddr, end;
514
515	/*
516	 * Fixed mappings, only the page table structure has to be
517	 * created - mappings will be set by set_fixmap():
518	 */
519	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
520	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
521	page_table_range_init(vaddr, end, pgd_base);
522	early_ioremap_reset();
523}
524
525static void __init pagetable_init(void)
526{
527	pgd_t *pgd_base = swapper_pg_dir;
528
529	permanent_kmaps_init(pgd_base);
530}
531
532pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
533EXPORT_SYMBOL_GPL(__supported_pte_mask);
534
535/* user-defined highmem size */
536static unsigned int highmem_pages = -1;
537
538/*
539 * highmem=size forces highmem to be exactly 'size' bytes.
540 * This works even on boxes that have no highmem otherwise.
541 * This also works to reduce highmem size on bigger boxes.
542 */
543static int __init parse_highmem(char *arg)
544{
545	if (!arg)
546		return -EINVAL;
547
548	highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
549	return 0;
550}
551early_param("highmem", parse_highmem);
552
553#define MSG_HIGHMEM_TOO_BIG \
554	"highmem size (%luMB) is bigger than pages available (%luMB)!\n"
555
556#define MSG_LOWMEM_TOO_SMALL \
557	"highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
558/*
559 * All of RAM fits into lowmem - but if user wants highmem
560 * artificially via the highmem=x boot parameter then create
561 * it:
562 */
563void __init lowmem_pfn_init(void)
564{
565	/* max_low_pfn is 0, we already have early_res support */
566	max_low_pfn = max_pfn;
567
568	if (highmem_pages == -1)
569		highmem_pages = 0;
570#ifdef CONFIG_HIGHMEM
571	if (highmem_pages >= max_pfn) {
572		printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
573			pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
574		highmem_pages = 0;
575	}
576	if (highmem_pages) {
577		if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
578			printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
579				pages_to_mb(highmem_pages));
580			highmem_pages = 0;
581		}
582		max_low_pfn -= highmem_pages;
583	}
584#else
585	if (highmem_pages)
586		printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
587#endif
588}
589
590#define MSG_HIGHMEM_TOO_SMALL \
591	"only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
592
593#define MSG_HIGHMEM_TRIMMED \
594	"Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
595/*
596 * We have more RAM than fits into lowmem - we try to put it into
597 * highmem, also taking the highmem=x boot parameter into account:
598 */
599void __init highmem_pfn_init(void)
600{
601	max_low_pfn = MAXMEM_PFN;
602
603	if (highmem_pages == -1)
604		highmem_pages = max_pfn - MAXMEM_PFN;
605
606	if (highmem_pages + MAXMEM_PFN < max_pfn)
607		max_pfn = MAXMEM_PFN + highmem_pages;
608
609	if (highmem_pages + MAXMEM_PFN > max_pfn) {
610		printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
611			pages_to_mb(max_pfn - MAXMEM_PFN),
612			pages_to_mb(highmem_pages));
613		highmem_pages = 0;
614	}
615#ifndef CONFIG_HIGHMEM
616	/* Maximum memory usable is what is directly addressable */
617	printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
618	if (max_pfn > MAX_NONPAE_PFN)
619		printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
620	else
621		printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
622	max_pfn = MAXMEM_PFN;
623#else /* !CONFIG_HIGHMEM */
624#ifndef CONFIG_HIGHMEM64G
625	if (max_pfn > MAX_NONPAE_PFN) {
626		max_pfn = MAX_NONPAE_PFN;
627		printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
628	}
629#endif /* !CONFIG_HIGHMEM64G */
630#endif /* !CONFIG_HIGHMEM */
631}
632
633/*
634 * Determine low and high memory ranges:
635 */
636void __init find_low_pfn_range(void)
637{
638	/* it could update max_pfn */
639
640	if (max_pfn <= MAXMEM_PFN)
641		lowmem_pfn_init();
642	else
643		highmem_pfn_init();
644}
645
646#ifndef CONFIG_NEED_MULTIPLE_NODES
647void __init initmem_init(void)
648{
649#ifdef CONFIG_HIGHMEM
650	highstart_pfn = highend_pfn = max_pfn;
651	if (max_pfn > max_low_pfn)
652		highstart_pfn = max_low_pfn;
653	memblock_x86_register_active_regions(0, 0, highend_pfn);
654	sparse_memory_present_with_active_regions(0);
655	printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
656		pages_to_mb(highend_pfn - highstart_pfn));
657	num_physpages = highend_pfn;
658	high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
659#else
660	memblock_x86_register_active_regions(0, 0, max_low_pfn);
661	sparse_memory_present_with_active_regions(0);
662	num_physpages = max_low_pfn;
663	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
664#endif
 
 
 
 
665#ifdef CONFIG_FLATMEM
666	max_mapnr = num_physpages;
667#endif
668	__vmalloc_start_set = true;
669
670	printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
671			pages_to_mb(max_low_pfn));
672
673	setup_bootmem_allocator();
674}
675#endif /* !CONFIG_NEED_MULTIPLE_NODES */
676
677static void __init zone_sizes_init(void)
678{
679	unsigned long max_zone_pfns[MAX_NR_ZONES];
680	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
681#ifdef CONFIG_ZONE_DMA
682	max_zone_pfns[ZONE_DMA] =
683		virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
684#endif
685	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
686#ifdef CONFIG_HIGHMEM
687	max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
688#endif
689
690	free_area_init_nodes(max_zone_pfns);
691}
692
693void __init setup_bootmem_allocator(void)
694{
695	printk(KERN_INFO "  mapped low ram: 0 - %08lx\n",
696		 max_pfn_mapped<<PAGE_SHIFT);
697	printk(KERN_INFO "  low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
698
699	after_bootmem = 1;
700}
701
702/*
703 * paging_init() sets up the page tables - note that the first 8MB are
704 * already mapped by head.S.
705 *
706 * This routines also unmaps the page at virtual kernel address 0, so
707 * that we can trap those pesky NULL-reference errors in the kernel.
708 */
709void __init paging_init(void)
710{
711	pagetable_init();
712
713	__flush_tlb_all();
714
715	kmap_init();
716
717	/*
718	 * NOTE: at this point the bootmem allocator is fully available.
719	 */
720	olpc_dt_build_devicetree();
721	sparse_memory_present_with_active_regions(MAX_NUMNODES);
722	sparse_init();
723	zone_sizes_init();
724}
725
726/*
727 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
728 * and also on some strange 486's. All 586+'s are OK. This used to involve
729 * black magic jumps to work around some nasty CPU bugs, but fortunately the
730 * switch to using exceptions got rid of all that.
731 */
732static void __init test_wp_bit(void)
733{
734	printk(KERN_INFO
735  "Checking if this processor honours the WP bit even in supervisor mode...");
736
737	/* Any page-aligned address will do, the test is non-destructive */
738	__set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
739	boot_cpu_data.wp_works_ok = do_test_wp_bit();
740	clear_fixmap(FIX_WP_TEST);
741
742	if (!boot_cpu_data.wp_works_ok) {
743		printk(KERN_CONT "No.\n");
744#ifdef CONFIG_X86_WP_WORKS_OK
745		panic(
746  "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
747#endif
748	} else {
749		printk(KERN_CONT "Ok.\n");
750	}
751}
752
753void __init mem_init(void)
754{
755	int codesize, reservedpages, datasize, initsize;
756	int tmp;
757
758	pci_iommu_alloc();
759
760#ifdef CONFIG_FLATMEM
761	BUG_ON(!mem_map);
762#endif
 
 
 
 
 
 
 
 
 
 
 
763	/* this will put all low memory onto the freelists */
764	totalram_pages += free_all_bootmem();
765
766	reservedpages = 0;
767	for (tmp = 0; tmp < max_low_pfn; tmp++)
768		/*
769		 * Only count reserved RAM pages:
770		 */
771		if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
772			reservedpages++;
773
774	set_highmem_pages_init();
775
776	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
777	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
778	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
779
780	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
781			"%dk reserved, %dk data, %dk init, %ldk highmem)\n",
782		nr_free_pages() << (PAGE_SHIFT-10),
783		num_physpages << (PAGE_SHIFT-10),
784		codesize >> 10,
785		reservedpages << (PAGE_SHIFT-10),
786		datasize >> 10,
787		initsize >> 10,
788		totalhigh_pages << (PAGE_SHIFT-10));
789
 
790	printk(KERN_INFO "virtual kernel memory layout:\n"
791		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
792#ifdef CONFIG_HIGHMEM
793		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
794#endif
795		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
796		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
797		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
798		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
799		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
800		FIXADDR_START, FIXADDR_TOP,
801		(FIXADDR_TOP - FIXADDR_START) >> 10,
802
803#ifdef CONFIG_HIGHMEM
804		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
805		(LAST_PKMAP*PAGE_SIZE) >> 10,
806#endif
807
808		VMALLOC_START, VMALLOC_END,
809		(VMALLOC_END - VMALLOC_START) >> 20,
810
811		(unsigned long)__va(0), (unsigned long)high_memory,
812		((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
813
814		(unsigned long)&__init_begin, (unsigned long)&__init_end,
815		((unsigned long)&__init_end -
816		 (unsigned long)&__init_begin) >> 10,
817
818		(unsigned long)&_etext, (unsigned long)&_edata,
819		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
820
821		(unsigned long)&_text, (unsigned long)&_etext,
822		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
823
824	/*
825	 * Check boundaries twice: Some fundamental inconsistencies can
826	 * be detected at build time already.
827	 */
828#define __FIXADDR_TOP (-PAGE_SIZE)
829#ifdef CONFIG_HIGHMEM
830	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
831	BUILD_BUG_ON(VMALLOC_END			> PKMAP_BASE);
832#endif
833#define high_memory (-128UL << 20)
834	BUILD_BUG_ON(VMALLOC_START			>= VMALLOC_END);
835#undef high_memory
836#undef __FIXADDR_TOP
837
838#ifdef CONFIG_HIGHMEM
839	BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE	> FIXADDR_START);
840	BUG_ON(VMALLOC_END				> PKMAP_BASE);
841#endif
842	BUG_ON(VMALLOC_START				>= VMALLOC_END);
843	BUG_ON((unsigned long)high_memory		> VMALLOC_START);
844
845	if (boot_cpu_data.wp_works_ok < 0)
846		test_wp_bit();
847}
848
849#ifdef CONFIG_MEMORY_HOTPLUG
850int arch_add_memory(int nid, u64 start, u64 size)
851{
852	struct pglist_data *pgdata = NODE_DATA(nid);
853	struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
 
854	unsigned long start_pfn = start >> PAGE_SHIFT;
855	unsigned long nr_pages = size >> PAGE_SHIFT;
856
857	return __add_pages(nid, zone, start_pfn, nr_pages);
858}
 
 
 
 
 
 
 
 
 
 
 
 
859#endif
860
861/*
862 * This function cannot be __init, since exceptions don't work in that
863 * section.  Put this after the callers, so that it cannot be inlined.
864 */
865static noinline int do_test_wp_bit(void)
866{
867	char tmp_reg;
868	int flag;
869
870	__asm__ __volatile__(
871		"	movb %0, %1	\n"
872		"1:	movb %1, %0	\n"
873		"	xorl %2, %2	\n"
874		"2:			\n"
875		_ASM_EXTABLE(1b,2b)
876		:"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
877		 "=q" (tmp_reg),
878		 "=r" (flag)
879		:"2" (1)
880		:"memory");
881
882	return flag;
883}
884
885#ifdef CONFIG_DEBUG_RODATA
886const int rodata_test_data = 0xC3;
887EXPORT_SYMBOL_GPL(rodata_test_data);
888
889int kernel_set_to_readonly __read_mostly;
890
891void set_kernel_text_rw(void)
892{
893	unsigned long start = PFN_ALIGN(_text);
894	unsigned long size = PFN_ALIGN(_etext) - start;
895
896	if (!kernel_set_to_readonly)
897		return;
898
899	pr_debug("Set kernel text: %lx - %lx for read write\n",
900		 start, start+size);
901
902	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
903}
904
905void set_kernel_text_ro(void)
906{
907	unsigned long start = PFN_ALIGN(_text);
908	unsigned long size = PFN_ALIGN(_etext) - start;
909
910	if (!kernel_set_to_readonly)
911		return;
912
913	pr_debug("Set kernel text: %lx - %lx for read only\n",
914		 start, start+size);
915
916	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
917}
918
919static void mark_nxdata_nx(void)
920{
921	/*
922	 * When this called, init has already been executed and released,
923	 * so everything past _etext should be NX.
924	 */
925	unsigned long start = PFN_ALIGN(_etext);
926	/*
927	 * This comes from is_kernel_text upper limit. Also HPAGE where used:
928	 */
929	unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start;
930
931	if (__supported_pte_mask & _PAGE_NX)
932		printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10);
933	set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT);
934}
935
936void mark_rodata_ro(void)
937{
938	unsigned long start = PFN_ALIGN(_text);
939	unsigned long size = PFN_ALIGN(_etext) - start;
940
941	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
942	printk(KERN_INFO "Write protecting the kernel text: %luk\n",
943		size >> 10);
944
945	kernel_set_to_readonly = 1;
946
947#ifdef CONFIG_CPA_DEBUG
948	printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
949		start, start+size);
950	set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
951
952	printk(KERN_INFO "Testing CPA: write protecting again\n");
953	set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
954#endif
955
956	start += size;
957	size = (unsigned long)__end_rodata - start;
958	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
959	printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
960		size >> 10);
961	rodata_test();
962
963#ifdef CONFIG_CPA_DEBUG
964	printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
965	set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
966
967	printk(KERN_INFO "Testing CPA: write protecting again\n");
968	set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
969#endif
970	mark_nxdata_nx();
 
 
971}
972#endif
973