Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1/*
  2 * Copyright (C) 1995  Linus Torvalds
  3 * Copyright 2010 Tilera Corporation. All Rights Reserved.
  4 *
  5 *   This program is free software; you can redistribute it and/or
  6 *   modify it under the terms of the GNU General Public License
  7 *   as published by the Free Software Foundation, version 2.
  8 *
  9 *   This program is distributed in the hope that it will be useful, but
 10 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 11 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 12 *   NON INFRINGEMENT.  See the GNU General Public License for
 13 *   more details.
 14 */
 15
 16#include <linux/module.h>
 17#include <linux/signal.h>
 18#include <linux/sched.h>
 19#include <linux/kernel.h>
 20#include <linux/errno.h>
 21#include <linux/string.h>
 22#include <linux/types.h>
 23#include <linux/ptrace.h>
 24#include <linux/mman.h>
 25#include <linux/mm.h>
 26#include <linux/hugetlb.h>
 27#include <linux/swap.h>
 28#include <linux/smp.h>
 29#include <linux/init.h>
 30#include <linux/highmem.h>
 31#include <linux/pagemap.h>
 32#include <linux/poison.h>
 33#include <linux/bootmem.h>
 34#include <linux/slab.h>
 35#include <linux/proc_fs.h>
 36#include <linux/efi.h>
 37#include <linux/memory_hotplug.h>
 38#include <linux/uaccess.h>
 39#include <asm/mmu_context.h>
 40#include <asm/processor.h>
 41#include <asm/pgtable.h>
 42#include <asm/pgalloc.h>
 43#include <asm/dma.h>
 44#include <asm/fixmap.h>
 45#include <asm/tlb.h>
 46#include <asm/tlbflush.h>
 47#include <asm/sections.h>
 48#include <asm/setup.h>
 49#include <asm/homecache.h>
 50#include <hv/hypervisor.h>
 51#include <arch/chip.h>
 52
 53#include "migrate.h"
 54
 55#define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0))
 56
 57#ifndef __tilegx__
 58unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE;
 59EXPORT_SYMBOL(VMALLOC_RESERVE);
 60#endif
 61
 62/* Create an L2 page table */
 63static pte_t * __init alloc_pte(void)
 64{
 65	return __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0);
 66}
 67
 68/*
 69 * L2 page tables per controller.  We allocate these all at once from
 70 * the bootmem allocator and store them here.  This saves on kernel L2
 71 * page table memory, compared to allocating a full 64K page per L2
 72 * page table, and also means that in cases where we use huge pages,
 73 * we are guaranteed to later be able to shatter those huge pages and
 74 * switch to using these page tables instead, without requiring
 75 * further allocation.  Each l2_ptes[] entry points to the first page
 76 * table for the first hugepage-size piece of memory on the
 77 * controller; other page tables are just indexed directly, i.e. the
 78 * L2 page tables are contiguous in memory for each controller.
 79 */
 80static pte_t *l2_ptes[MAX_NUMNODES];
 81static int num_l2_ptes[MAX_NUMNODES];
 82
 83static void init_prealloc_ptes(int node, int pages)
 84{
 85	BUG_ON(pages & (PTRS_PER_PTE - 1));
 86	if (pages) {
 87		num_l2_ptes[node] = pages;
 88		l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t),
 89						HV_PAGE_TABLE_ALIGN, 0);
 90	}
 91}
 92
 93pte_t *get_prealloc_pte(unsigned long pfn)
 94{
 95	int node = pfn_to_nid(pfn);
 96	pfn &= ~(-1UL << (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT));
 97	BUG_ON(node >= MAX_NUMNODES);
 98	BUG_ON(pfn >= num_l2_ptes[node]);
 99	return &l2_ptes[node][pfn];
100}
101
102/*
103 * What caching do we expect pages from the heap to have when
104 * they are allocated during bootup?  (Once we've installed the
105 * "real" swapper_pg_dir.)
106 */
107static int initial_heap_home(void)
108{
109	if (hash_default)
110		return PAGE_HOME_HASH;
111	return smp_processor_id();
112}
113
114/*
115 * Place a pointer to an L2 page table in a middle page
116 * directory entry.
117 */
118static void __init assign_pte(pmd_t *pmd, pte_t *page_table)
119{
120	phys_addr_t pa = __pa(page_table);
121	unsigned long l2_ptfn = pa >> HV_LOG2_PAGE_TABLE_ALIGN;
122	pte_t pteval = hv_pte_set_ptfn(__pgprot(_PAGE_TABLE), l2_ptfn);
123	BUG_ON((pa & (HV_PAGE_TABLE_ALIGN-1)) != 0);
124	pteval = pte_set_home(pteval, initial_heap_home());
125	*(pte_t *)pmd = pteval;
126	if (page_table != (pte_t *)pmd_page_vaddr(*pmd))
127		BUG();
128}
129
130#ifdef __tilegx__
131
132static inline pmd_t *alloc_pmd(void)
133{
134	return __alloc_bootmem(L1_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0);
135}
136
137static inline void assign_pmd(pud_t *pud, pmd_t *pmd)
138{
139	assign_pte((pmd_t *)pud, (pte_t *)pmd);
140}
141
142#endif /* __tilegx__ */
143
144/* Replace the given pmd with a full PTE table. */
145void __init shatter_pmd(pmd_t *pmd)
146{
147	pte_t *pte = get_prealloc_pte(pte_pfn(*(pte_t *)pmd));
148	assign_pte(pmd, pte);
149}
150
151#ifdef __tilegx__
152static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va)
153{
154	pud_t *pud = pud_offset(&pgtables[pgd_index(va)], va);
155	if (pud_none(*pud))
156		assign_pmd(pud, alloc_pmd());
157	return pmd_offset(pud, va);
158}
159#else
160static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va)
161{
162	return pmd_offset(pud_offset(&pgtables[pgd_index(va)], va), va);
163}
164#endif
165
166/*
167 * This function initializes a certain range of kernel virtual memory
168 * with new bootmem page tables, everywhere page tables are missing in
169 * the given range.
170 */
171
172/*
173 * NOTE: The pagetables are allocated contiguous on the physical space
174 * so we can cache the place of the first one and move around without
175 * checking the pgd every time.
176 */
177static void __init page_table_range_init(unsigned long start,
178					 unsigned long end, pgd_t *pgd)
179{
180	unsigned long vaddr;
181	start = round_down(start, PMD_SIZE);
182	end = round_up(end, PMD_SIZE);
183	for (vaddr = start; vaddr < end; vaddr += PMD_SIZE) {
184		pmd_t *pmd = get_pmd(pgd, vaddr);
185		if (pmd_none(*pmd))
186			assign_pte(pmd, alloc_pte());
187	}
188}
189
190
191static int __initdata ktext_hash = 1;  /* .text pages */
192static int __initdata kdata_hash = 1;  /* .data and .bss pages */
193int __write_once hash_default = 1;     /* kernel allocator pages */
194EXPORT_SYMBOL(hash_default);
195int __write_once kstack_hash = 1;      /* if no homecaching, use h4h */
196
197/*
198 * CPUs to use to for striping the pages of kernel data.  If hash-for-home
199 * is available, this is only relevant if kcache_hash sets up the
200 * .data and .bss to be page-homed, and we don't want the default mode
201 * of using the full set of kernel cpus for the striping.
202 */
203static __initdata struct cpumask kdata_mask;
204static __initdata int kdata_arg_seen;
205
206int __write_once kdata_huge;       /* if no homecaching, small pages */
207
208
209/* Combine a generic pgprot_t with cache home to get a cache-aware pgprot. */
210static pgprot_t __init construct_pgprot(pgprot_t prot, int home)
211{
212	prot = pte_set_home(prot, home);
213	if (home == PAGE_HOME_IMMUTABLE) {
214		if (ktext_hash)
215			prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_HASH_L3);
216		else
217			prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_NO_L3);
218	}
219	return prot;
220}
221
222/*
223 * For a given kernel data VA, how should it be cached?
224 * We return the complete pgprot_t with caching bits set.
225 */
226static pgprot_t __init init_pgprot(ulong address)
227{
228	int cpu;
229	unsigned long page;
230	enum { CODE_DELTA = MEM_SV_START - PAGE_OFFSET };
231
232	/* For kdata=huge, everything is just hash-for-home. */
233	if (kdata_huge)
234		return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
235
236	/* We map the aliased pages of permanent text inaccessible. */
237	if (address < (ulong) _sinittext - CODE_DELTA)
238		return PAGE_NONE;
239
240	/* We map read-only data non-coherent for performance. */
241	if ((address >= (ulong) __start_rodata &&
242	     address < (ulong) __end_rodata) ||
243	    address == (ulong) empty_zero_page) {
244		return construct_pgprot(PAGE_KERNEL_RO, PAGE_HOME_IMMUTABLE);
245	}
246
247#ifndef __tilegx__
248	/* Force the atomic_locks[] array page to be hash-for-home. */
249	if (address == (ulong) atomic_locks)
250		return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
251#endif
252
253	/*
254	 * Everything else that isn't data or bss is heap, so mark it
255	 * with the initial heap home (hash-for-home, or this cpu).  This
256	 * includes any addresses after the loaded image and any address before
257	 * _einitdata, since we already captured the case of text before
258	 * _sinittext, and __pa(einittext) is approximately __pa(sinitdata).
259	 *
260	 * All the LOWMEM pages that we mark this way will get their
261	 * struct page homecache properly marked later, in set_page_homes().
262	 * The HIGHMEM pages we leave with a default zero for their
263	 * homes, but with a zero free_time we don't have to actually
264	 * do a flush action the first time we use them, either.
265	 */
266	if (address >= (ulong) _end || address < (ulong) _einitdata)
267		return construct_pgprot(PAGE_KERNEL, initial_heap_home());
268
269	/* Use hash-for-home if requested for data/bss. */
270	if (kdata_hash)
271		return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
272
273	/*
274	 * Otherwise we just hand out consecutive cpus.  To avoid
275	 * requiring this function to hold state, we just walk forward from
276	 * _sdata by PAGE_SIZE, skipping the readonly and init data, to reach
277	 * the requested address, while walking cpu home around kdata_mask.
278	 * This is typically no more than a dozen or so iterations.
279	 */
280	page = (((ulong)__end_rodata) + PAGE_SIZE - 1) & PAGE_MASK;
281	BUG_ON(address < page || address >= (ulong)_end);
282	cpu = cpumask_first(&kdata_mask);
283	for (; page < address; page += PAGE_SIZE) {
284		if (page >= (ulong)&init_thread_union &&
285		    page < (ulong)&init_thread_union + THREAD_SIZE)
286			continue;
287		if (page == (ulong)empty_zero_page)
288			continue;
289#ifndef __tilegx__
290		if (page == (ulong)atomic_locks)
291			continue;
292#endif
293		cpu = cpumask_next(cpu, &kdata_mask);
294		if (cpu == NR_CPUS)
295			cpu = cpumask_first(&kdata_mask);
296	}
297	return construct_pgprot(PAGE_KERNEL, cpu);
298}
299
300/*
301 * This function sets up how we cache the kernel text.  If we have
302 * hash-for-home support, normally that is used instead (see the
303 * kcache_hash boot flag for more information).  But if we end up
304 * using a page-based caching technique, this option sets up the
305 * details of that.  In addition, the "ktext=nocache" option may
306 * always be used to disable local caching of text pages, if desired.
307 */
308
309static int __initdata ktext_arg_seen;
310static int __initdata ktext_small;
311static int __initdata ktext_local;
312static int __initdata ktext_all;
313static int __initdata ktext_nondataplane;
314static int __initdata ktext_nocache;
315static struct cpumask __initdata ktext_mask;
316
317static int __init setup_ktext(char *str)
318{
319	if (str == NULL)
320		return -EINVAL;
321
322	/* If you have a leading "nocache", turn off ktext caching */
323	if (strncmp(str, "nocache", 7) == 0) {
324		ktext_nocache = 1;
325		pr_info("ktext: disabling local caching of kernel text\n");
326		str += 7;
327		if (*str == ',')
328			++str;
329		if (*str == '\0')
330			return 0;
331	}
332
333	ktext_arg_seen = 1;
334
335	/* Default setting: use a huge page */
336	if (strcmp(str, "huge") == 0)
337		pr_info("ktext: using one huge locally cached page\n");
338
339	/* Pay TLB cost but get no cache benefit: cache small pages locally */
340	else if (strcmp(str, "local") == 0) {
341		ktext_small = 1;
342		ktext_local = 1;
343		pr_info("ktext: using small pages with local caching\n");
344	}
345
346	/* Neighborhood cache ktext pages on all cpus. */
347	else if (strcmp(str, "all") == 0) {
348		ktext_small = 1;
349		ktext_all = 1;
350		pr_info("ktext: using maximal caching neighborhood\n");
351	}
352
353
354	/* Neighborhood ktext pages on specified mask */
355	else if (cpulist_parse(str, &ktext_mask) == 0) {
356		char buf[NR_CPUS * 5];
357		cpulist_scnprintf(buf, sizeof(buf), &ktext_mask);
358		if (cpumask_weight(&ktext_mask) > 1) {
359			ktext_small = 1;
360			pr_info("ktext: using caching neighborhood %s "
361			       "with small pages\n", buf);
362		} else {
363			pr_info("ktext: caching on cpu %s with one huge page\n",
364			       buf);
365		}
366	}
367
368	else if (*str)
369		return -EINVAL;
370
371	return 0;
372}
373
374early_param("ktext", setup_ktext);
375
376
377static inline pgprot_t ktext_set_nocache(pgprot_t prot)
378{
379	if (!ktext_nocache)
380		prot = hv_pte_set_nc(prot);
381	else
382		prot = hv_pte_set_no_alloc_l2(prot);
383	return prot;
384}
385
386/* Temporary page table we use for staging. */
387static pgd_t pgtables[PTRS_PER_PGD]
388 __attribute__((aligned(HV_PAGE_TABLE_ALIGN)));
389
390/*
391 * This maps the physical memory to kernel virtual address space, a total
392 * of max_low_pfn pages, by creating page tables starting from address
393 * PAGE_OFFSET.
394 *
395 * This routine transitions us from using a set of compiled-in large
396 * pages to using some more precise caching, including removing access
397 * to code pages mapped at PAGE_OFFSET (executed only at MEM_SV_START)
398 * marking read-only data as locally cacheable, striping the remaining
399 * .data and .bss across all the available tiles, and removing access
400 * to pages above the top of RAM (thus ensuring a page fault from a bad
401 * virtual address rather than a hypervisor shoot down for accessing
402 * memory outside the assigned limits).
403 */
404static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
405{
406	unsigned long long irqmask;
407	unsigned long address, pfn;
408	pmd_t *pmd;
409	pte_t *pte;
410	int pte_ofs;
411	const struct cpumask *my_cpu_mask = cpumask_of(smp_processor_id());
412	struct cpumask kstripe_mask;
413	int rc, i;
414
415	if (ktext_arg_seen && ktext_hash) {
416		pr_warning("warning: \"ktext\" boot argument ignored"
417			   " if \"kcache_hash\" sets up text hash-for-home\n");
418		ktext_small = 0;
419	}
420
421	if (kdata_arg_seen && kdata_hash) {
422		pr_warning("warning: \"kdata\" boot argument ignored"
423			   " if \"kcache_hash\" sets up data hash-for-home\n");
424	}
425
426	if (kdata_huge && !hash_default) {
427		pr_warning("warning: disabling \"kdata=huge\"; requires"
428			  " kcache_hash=all or =allbutstack\n");
429		kdata_huge = 0;
430	}
431
432	/*
433	 * Set up a mask for cpus to use for kernel striping.
434	 * This is normally all cpus, but minus dataplane cpus if any.
435	 * If the dataplane covers the whole chip, we stripe over
436	 * the whole chip too.
437	 */
438	cpumask_copy(&kstripe_mask, cpu_possible_mask);
439	if (!kdata_arg_seen)
440		kdata_mask = kstripe_mask;
441
442	/* Allocate and fill in L2 page tables */
443	for (i = 0; i < MAX_NUMNODES; ++i) {
444#ifdef CONFIG_HIGHMEM
445		unsigned long end_pfn = node_lowmem_end_pfn[i];
446#else
447		unsigned long end_pfn = node_end_pfn[i];
448#endif
449		unsigned long end_huge_pfn = 0;
450
451		/* Pre-shatter the last huge page to allow per-cpu pages. */
452		if (kdata_huge)
453			end_huge_pfn = end_pfn - (HPAGE_SIZE >> PAGE_SHIFT);
454
455		pfn = node_start_pfn[i];
456
457		/* Allocate enough memory to hold L2 page tables for node. */
458		init_prealloc_ptes(i, end_pfn - pfn);
459
460		address = (unsigned long) pfn_to_kaddr(pfn);
461		while (pfn < end_pfn) {
462			BUG_ON(address & (HPAGE_SIZE-1));
463			pmd = get_pmd(pgtables, address);
464			pte = get_prealloc_pte(pfn);
465			if (pfn < end_huge_pfn) {
466				pgprot_t prot = init_pgprot(address);
467				*(pte_t *)pmd = pte_mkhuge(pfn_pte(pfn, prot));
468				for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
469				     pfn++, pte_ofs++, address += PAGE_SIZE)
470					pte[pte_ofs] = pfn_pte(pfn, prot);
471			} else {
472				if (kdata_huge)
473					printk(KERN_DEBUG "pre-shattered huge"
474					       " page at %#lx\n", address);
475				for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
476				     pfn++, pte_ofs++, address += PAGE_SIZE) {
477					pgprot_t prot = init_pgprot(address);
478					pte[pte_ofs] = pfn_pte(pfn, prot);
479				}
480				assign_pte(pmd, pte);
481			}
482		}
483	}
484
485	/*
486	 * Set or check ktext_map now that we have cpu_possible_mask
487	 * and kstripe_mask to work with.
488	 */
489	if (ktext_all)
490		cpumask_copy(&ktext_mask, cpu_possible_mask);
491	else if (ktext_nondataplane)
492		ktext_mask = kstripe_mask;
493	else if (!cpumask_empty(&ktext_mask)) {
494		/* Sanity-check any mask that was requested */
495		struct cpumask bad;
496		cpumask_andnot(&bad, &ktext_mask, cpu_possible_mask);
497		cpumask_and(&ktext_mask, &ktext_mask, cpu_possible_mask);
498		if (!cpumask_empty(&bad)) {
499			char buf[NR_CPUS * 5];
500			cpulist_scnprintf(buf, sizeof(buf), &bad);
501			pr_info("ktext: not using unavailable cpus %s\n", buf);
502		}
503		if (cpumask_empty(&ktext_mask)) {
504			pr_warning("ktext: no valid cpus; caching on %d.\n",
505				   smp_processor_id());
506			cpumask_copy(&ktext_mask,
507				     cpumask_of(smp_processor_id()));
508		}
509	}
510
511	address = MEM_SV_START;
512	pmd = get_pmd(pgtables, address);
513	pfn = 0;  /* code starts at PA 0 */
514	if (ktext_small) {
515		/* Allocate an L2 PTE for the kernel text */
516		int cpu = 0;
517		pgprot_t prot = construct_pgprot(PAGE_KERNEL_EXEC,
518						 PAGE_HOME_IMMUTABLE);
519
520		if (ktext_local) {
521			if (ktext_nocache)
522				prot = hv_pte_set_mode(prot,
523						       HV_PTE_MODE_UNCACHED);
524			else
525				prot = hv_pte_set_mode(prot,
526						       HV_PTE_MODE_CACHE_NO_L3);
527		} else {
528			prot = hv_pte_set_mode(prot,
529					       HV_PTE_MODE_CACHE_TILE_L3);
530			cpu = cpumask_first(&ktext_mask);
531
532			prot = ktext_set_nocache(prot);
533		}
534
535		BUG_ON(address != (unsigned long)_text);
536		pte = NULL;
537		for (; address < (unsigned long)_einittext;
538		     pfn++, address += PAGE_SIZE) {
539			pte_ofs = pte_index(address);
540			if (pte_ofs == 0) {
541				if (pte)
542					assign_pte(pmd++, pte);
543				pte = alloc_pte();
544			}
545			if (!ktext_local) {
546				prot = set_remote_cache_cpu(prot, cpu);
547				cpu = cpumask_next(cpu, &ktext_mask);
548				if (cpu == NR_CPUS)
549					cpu = cpumask_first(&ktext_mask);
550			}
551			pte[pte_ofs] = pfn_pte(pfn, prot);
552		}
553		if (pte)
554			assign_pte(pmd, pte);
555	} else {
556		pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
557		pteval = pte_mkhuge(pteval);
558		if (ktext_hash) {
559			pteval = hv_pte_set_mode(pteval,
560						 HV_PTE_MODE_CACHE_HASH_L3);
561			pteval = ktext_set_nocache(pteval);
562		} else
563		if (cpumask_weight(&ktext_mask) == 1) {
564			pteval = set_remote_cache_cpu(pteval,
565					      cpumask_first(&ktext_mask));
566			pteval = hv_pte_set_mode(pteval,
567						 HV_PTE_MODE_CACHE_TILE_L3);
568			pteval = ktext_set_nocache(pteval);
569		} else if (ktext_nocache)
570			pteval = hv_pte_set_mode(pteval,
571						 HV_PTE_MODE_UNCACHED);
572		else
573			pteval = hv_pte_set_mode(pteval,
574						 HV_PTE_MODE_CACHE_NO_L3);
575		for (; address < (unsigned long)_einittext;
576		     pfn += PFN_DOWN(HPAGE_SIZE), address += HPAGE_SIZE)
577			*(pte_t *)(pmd++) = pfn_pte(pfn, pteval);
578	}
579
580	/* Set swapper_pgprot here so it is flushed to memory right away. */
581	swapper_pgprot = init_pgprot((unsigned long)swapper_pg_dir);
582
583	/*
584	 * Since we may be changing the caching of the stack and page
585	 * table itself, we invoke an assembly helper to do the
586	 * following steps:
587	 *
588	 *  - flush the cache so we start with an empty slate
589	 *  - install pgtables[] as the real page table
590	 *  - flush the TLB so the new page table takes effect
591	 */
592	irqmask = interrupt_mask_save_mask();
593	interrupt_mask_set_mask(-1ULL);
594	rc = flush_and_install_context(__pa(pgtables),
595				       init_pgprot((unsigned long)pgtables),
596				       __get_cpu_var(current_asid),
597				       cpumask_bits(my_cpu_mask));
598	interrupt_mask_restore_mask(irqmask);
599	BUG_ON(rc != 0);
600
601	/* Copy the page table back to the normal swapper_pg_dir. */
602	memcpy(pgd_base, pgtables, sizeof(pgtables));
603	__install_page_table(pgd_base, __get_cpu_var(current_asid),
604			     swapper_pgprot);
605
606	/*
607	 * We just read swapper_pgprot and thus brought it into the cache,
608	 * with its new home & caching mode.  When we start the other CPUs,
609	 * they're going to reference swapper_pgprot via their initial fake
610	 * VA-is-PA mappings, which cache everything locally.  At that
611	 * time, if it's in our cache with a conflicting home, the
612	 * simulator's coherence checker will complain.  So, flush it out
613	 * of our cache; we're not going to ever use it again anyway.
614	 */
615	__insn_finv(&swapper_pgprot);
616}
617
618/*
619 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
620 * is valid. The argument is a physical page number.
621 *
622 * On Tile, the only valid things for which we can just hand out unchecked
623 * PTEs are the kernel code and data.  Anything else might change its
624 * homing with time, and we wouldn't know to adjust the /dev/mem PTEs.
625 * Note that init_thread_union is released to heap soon after boot,
626 * so we include it in the init data.
627 *
628 * For TILE-Gx, we might want to consider allowing access to PA
629 * regions corresponding to PCI space, etc.
630 */
631int devmem_is_allowed(unsigned long pagenr)
632{
633	return pagenr < kaddr_to_pfn(_end) &&
634		!(pagenr >= kaddr_to_pfn(&init_thread_union) ||
635		  pagenr < kaddr_to_pfn(_einitdata)) &&
636		!(pagenr >= kaddr_to_pfn(_sinittext) ||
637		  pagenr <= kaddr_to_pfn(_einittext-1));
638}
639
640#ifdef CONFIG_HIGHMEM
641static void __init permanent_kmaps_init(pgd_t *pgd_base)
642{
643	pgd_t *pgd;
644	pud_t *pud;
645	pmd_t *pmd;
646	pte_t *pte;
647	unsigned long vaddr;
648
649	vaddr = PKMAP_BASE;
650	page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
651
652	pgd = swapper_pg_dir + pgd_index(vaddr);
653	pud = pud_offset(pgd, vaddr);
654	pmd = pmd_offset(pud, vaddr);
655	pte = pte_offset_kernel(pmd, vaddr);
656	pkmap_page_table = pte;
657}
658#endif /* CONFIG_HIGHMEM */
659
660
661#ifndef CONFIG_64BIT
662static void __init init_free_pfn_range(unsigned long start, unsigned long end)
663{
664	unsigned long pfn;
665	struct page *page = pfn_to_page(start);
666
667	for (pfn = start; pfn < end; ) {
668		/* Optimize by freeing pages in large batches */
669		int order = __ffs(pfn);
670		int count, i;
671		struct page *p;
672
673		if (order >= MAX_ORDER)
674			order = MAX_ORDER-1;
675		count = 1 << order;
676		while (pfn + count > end) {
677			count >>= 1;
678			--order;
679		}
680		for (p = page, i = 0; i < count; ++i, ++p) {
681			__ClearPageReserved(p);
682			/*
683			 * Hacky direct set to avoid unnecessary
684			 * lock take/release for EVERY page here.
685			 */
686			p->_count.counter = 0;
687			p->_mapcount.counter = -1;
688		}
689		init_page_count(page);
690		__free_pages(page, order);
691		adjust_managed_page_count(page, count);
692
693		page += count;
694		pfn += count;
695	}
696}
697
698static void __init set_non_bootmem_pages_init(void)
699{
700	struct zone *z;
701	for_each_zone(z) {
702		unsigned long start, end;
703		int nid = z->zone_pgdat->node_id;
704#ifdef CONFIG_HIGHMEM
705		int idx = zone_idx(z);
706#endif
707
708		start = z->zone_start_pfn;
709		end = start + z->spanned_pages;
710		start = max(start, node_free_pfn[nid]);
711		start = max(start, max_low_pfn);
712
713#ifdef CONFIG_HIGHMEM
714		if (idx == ZONE_HIGHMEM)
715			totalhigh_pages += z->spanned_pages;
716#endif
717		if (kdata_huge) {
718			unsigned long percpu_pfn = node_percpu_pfn[nid];
719			if (start < percpu_pfn && end > percpu_pfn)
720				end = percpu_pfn;
721		}
722#ifdef CONFIG_PCI
723		if (start <= pci_reserve_start_pfn &&
724		    end > pci_reserve_start_pfn) {
725			if (end > pci_reserve_end_pfn)
726				init_free_pfn_range(pci_reserve_end_pfn, end);
727			end = pci_reserve_start_pfn;
728		}
729#endif
730		init_free_pfn_range(start, end);
731	}
732}
733#endif
734
735/*
736 * paging_init() sets up the page tables - note that all of lowmem is
737 * already mapped by head.S.
738 */
739void __init paging_init(void)
740{
741#ifdef __tilegx__
742	pud_t *pud;
743#endif
744	pgd_t *pgd_base = swapper_pg_dir;
745
746	kernel_physical_mapping_init(pgd_base);
747
748	/* Fixed mappings, only the page table structure has to be created. */
749	page_table_range_init(fix_to_virt(__end_of_fixed_addresses - 1),
750			      FIXADDR_TOP, pgd_base);
751
752#ifdef CONFIG_HIGHMEM
753	permanent_kmaps_init(pgd_base);
754#endif
755
756#ifdef __tilegx__
757	/*
758	 * Since GX allocates just one pmd_t array worth of vmalloc space,
759	 * we go ahead and allocate it statically here, then share it
760	 * globally.  As a result we don't have to worry about any task
761	 * changing init_mm once we get up and running, and there's no
762	 * need for e.g. vmalloc_sync_all().
763	 */
764	BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END - 1));
765	pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START);
766	assign_pmd(pud, alloc_pmd());
767#endif
768}
769
770
771/*
772 * Walk the kernel page tables and derive the page_home() from
773 * the PTEs, so that set_pte() can properly validate the caching
774 * of all PTEs it sees.
775 */
776void __init set_page_homes(void)
777{
778}
779
780static void __init set_max_mapnr_init(void)
781{
782#ifdef CONFIG_FLATMEM
783	max_mapnr = max_low_pfn;
784#endif
785}
786
787void __init mem_init(void)
788{
789	int i;
790#ifndef __tilegx__
791	void *last;
792#endif
793
794#ifdef CONFIG_FLATMEM
795	BUG_ON(!mem_map);
796#endif
797
798#ifdef CONFIG_HIGHMEM
799	/* check that fixmap and pkmap do not overlap */
800	if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) {
801		pr_err("fixmap and kmap areas overlap"
802		       " - this will crash\n");
803		pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n",
804		       PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1),
805		       FIXADDR_START);
806		BUG();
807	}
808#endif
809
810	set_max_mapnr_init();
811
812	/* this will put all bootmem onto the freelists */
813	free_all_bootmem();
814
815#ifndef CONFIG_64BIT
816	/* count all remaining LOWMEM and give all HIGHMEM to page allocator */
817	set_non_bootmem_pages_init();
818#endif
819
820	mem_init_print_info(NULL);
821
822	/*
823	 * In debug mode, dump some interesting memory mappings.
824	 */
825#ifdef CONFIG_HIGHMEM
826	printk(KERN_DEBUG "  KMAP    %#lx - %#lx\n",
827	       FIXADDR_START, FIXADDR_TOP + PAGE_SIZE - 1);
828	printk(KERN_DEBUG "  PKMAP   %#lx - %#lx\n",
829	       PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP) - 1);
830#endif
831	printk(KERN_DEBUG "  VMALLOC %#lx - %#lx\n",
832	       _VMALLOC_START, _VMALLOC_END - 1);
833#ifdef __tilegx__
834	for (i = MAX_NUMNODES-1; i >= 0; --i) {
835		struct pglist_data *node = &node_data[i];
836		if (node->node_present_pages) {
837			unsigned long start = (unsigned long)
838				pfn_to_kaddr(node->node_start_pfn);
839			unsigned long end = start +
840				(node->node_present_pages << PAGE_SHIFT);
841			printk(KERN_DEBUG "  MEM%d    %#lx - %#lx\n",
842			       i, start, end - 1);
843		}
844	}
845#else
846	last = high_memory;
847	for (i = MAX_NUMNODES-1; i >= 0; --i) {
848		if ((unsigned long)vbase_map[i] != -1UL) {
849			printk(KERN_DEBUG "  LOWMEM%d %#lx - %#lx\n",
850			       i, (unsigned long) (vbase_map[i]),
851			       (unsigned long) (last-1));
852			last = vbase_map[i];
853		}
854	}
855#endif
856
857#ifndef __tilegx__
858	/*
859	 * Convert from using one lock for all atomic operations to
860	 * one per cpu.
861	 */
862	__init_atomic_per_cpu();
863#endif
864}
865
866/*
867 * this is for the non-NUMA, single node SMP system case.
868 * Specifically, in the case of x86, we will always add
869 * memory to the highmem for now.
870 */
871#ifndef CONFIG_NEED_MULTIPLE_NODES
872int arch_add_memory(u64 start, u64 size)
873{
874	struct pglist_data *pgdata = &contig_page_data;
875	struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
876	unsigned long start_pfn = start >> PAGE_SHIFT;
877	unsigned long nr_pages = size >> PAGE_SHIFT;
878
879	return __add_pages(zone, start_pfn, nr_pages);
880}
881
882int remove_memory(u64 start, u64 size)
883{
884	return -EINVAL;
885}
886
887#ifdef CONFIG_MEMORY_HOTREMOVE
888int arch_remove_memory(u64 start, u64 size)
889{
890	/* TODO */
891	return -EBUSY;
892}
893#endif
894#endif
895
896struct kmem_cache *pgd_cache;
897
898void __init pgtable_cache_init(void)
899{
900	pgd_cache = kmem_cache_create("pgd", SIZEOF_PGD, SIZEOF_PGD, 0, NULL);
901	if (!pgd_cache)
902		panic("pgtable_cache_init(): Cannot create pgd cache");
903}
904
905#ifdef CONFIG_DEBUG_PAGEALLOC
906static long __write_once initfree;
907#else
908static long __write_once initfree = 1;
909#endif
910
911/* Select whether to free (1) or mark unusable (0) the __init pages. */
912static int __init set_initfree(char *str)
913{
914	long val;
915	if (strict_strtol(str, 0, &val) == 0) {
916		initfree = val;
917		pr_info("initfree: %s free init pages\n",
918			initfree ? "will" : "won't");
919	}
920	return 1;
921}
922__setup("initfree=", set_initfree);
923
924static void free_init_pages(char *what, unsigned long begin, unsigned long end)
925{
926	unsigned long addr = (unsigned long) begin;
927
928	if (kdata_huge && !initfree) {
929		pr_warning("Warning: ignoring initfree=0:"
930			   " incompatible with kdata=huge\n");
931		initfree = 1;
932	}
933	end = (end + PAGE_SIZE - 1) & PAGE_MASK;
934	local_flush_tlb_pages(NULL, begin, PAGE_SIZE, end - begin);
935	for (addr = begin; addr < end; addr += PAGE_SIZE) {
936		/*
937		 * Note we just reset the home here directly in the
938		 * page table.  We know this is safe because our caller
939		 * just flushed the caches on all the other cpus,
940		 * and they won't be touching any of these pages.
941		 */
942		int pfn = kaddr_to_pfn((void *)addr);
943		struct page *page = pfn_to_page(pfn);
944		pte_t *ptep = virt_to_kpte(addr);
945		if (!initfree) {
946			/*
947			 * If debugging page accesses then do not free
948			 * this memory but mark them not present - any
949			 * buggy init-section access will create a
950			 * kernel page fault:
951			 */
952			pte_clear(&init_mm, addr, ptep);
953			continue;
954		}
955		if (pte_huge(*ptep))
956			BUG_ON(!kdata_huge);
957		else
958			set_pte_at(&init_mm, addr, ptep,
959				   pfn_pte(pfn, PAGE_KERNEL));
960		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
961		free_reserved_page(page);
962	}
963	pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
964}
965
966void free_initmem(void)
967{
968	const unsigned long text_delta = MEM_SV_START - PAGE_OFFSET;
969
970	/*
971	 * Evict the cache on all cores to avoid incoherence.
972	 * We are guaranteed that no one will touch the init pages any more.
973	 */
974	homecache_evict(&cpu_cacheable_map);
975
976	/* Free the data pages that we won't use again after init. */
977	free_init_pages("unused kernel data",
978			(unsigned long)_sinitdata,
979			(unsigned long)_einitdata);
980
981	/*
982	 * Free the pages mapped from 0xc0000000 that correspond to code
983	 * pages from MEM_SV_START that we won't use again after init.
984	 */
985	free_init_pages("unused kernel text",
986			(unsigned long)_sinittext - text_delta,
987			(unsigned long)_einittext - text_delta);
988	/* Do a global TLB flush so everyone sees the changes. */
989	flush_tlb_all();
990}