Linux Audio

Check our new training course

Loading...
v6.2
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994 - 2000 Ralf Baechle
  7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  9 * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
 10 */
 11#include <linux/bug.h>
 12#include <linux/init.h>
 13#include <linux/export.h>
 14#include <linux/signal.h>
 15#include <linux/sched.h>
 16#include <linux/smp.h>
 17#include <linux/kernel.h>
 18#include <linux/errno.h>
 19#include <linux/string.h>
 20#include <linux/types.h>
 21#include <linux/pagemap.h>
 22#include <linux/ptrace.h>
 23#include <linux/mman.h>
 24#include <linux/mm.h>
 25#include <linux/memblock.h>
 26#include <linux/highmem.h>
 27#include <linux/swap.h>
 28#include <linux/proc_fs.h>
 29#include <linux/pfn.h>
 30#include <linux/hardirq.h>
 31#include <linux/gfp.h>
 32#include <linux/kcore.h>
 33#include <linux/initrd.h>
 34
 
 35#include <asm/bootinfo.h>
 36#include <asm/cachectl.h>
 37#include <asm/cpu.h>
 38#include <asm/dma.h>
 39#include <asm/maar.h>
 40#include <asm/mmu_context.h>
 41#include <asm/sections.h>
 
 42#include <asm/pgalloc.h>
 43#include <asm/tlb.h>
 44#include <asm/fixmap.h>
 45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 46/*
 47 * We have up to 8 empty zeroed pages so we can map one of the right colour
 48 * when needed.	 This is necessary only on R4000 / R4400 SC and MC versions
 49 * where we have to avoid VCED / VECI exceptions for good performance at
 50 * any price.  Since page is never written to after the initialization we
 51 * don't have to care about aliases on other CPUs.
 52 */
 53unsigned long empty_zero_page, zero_page_mask;
 54EXPORT_SYMBOL_GPL(empty_zero_page);
 55EXPORT_SYMBOL(zero_page_mask);
 56
 57/*
 58 * Not static inline because used by IP27 special magic initialization code
 59 */
 60void setup_zero_pages(void)
 61{
 62	unsigned int order, i;
 63	struct page *page;
 64
 65	if (cpu_has_vce)
 66		order = 3;
 67	else
 68		order = 0;
 69
 70	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
 71	if (!empty_zero_page)
 72		panic("Oh boy, that early out of memory?");
 73
 74	page = virt_to_page((void *)empty_zero_page);
 75	split_page(page, order);
 76	for (i = 0; i < (1 << order); i++, page++)
 77		mark_page_reserved(page);
 78
 79	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
 80}
 81
 82static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 83{
 84	enum fixed_addresses idx;
 85	unsigned int old_mmid;
 86	unsigned long vaddr, flags, entrylo;
 87	unsigned long old_ctx;
 88	pte_t pte;
 89	int tlbidx;
 90
 91	BUG_ON(Page_dcache_dirty(page));
 92
 93	preempt_disable();
 94	pagefault_disable();
 95	idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
 
 
 
 
 96	idx += in_interrupt() ? FIX_N_COLOURS : 0;
 
 97	vaddr = __fix_to_virt(FIX_CMAP_END - idx);
 98	pte = mk_pte(page, prot);
 99#if defined(CONFIG_XPA)
100	entrylo = pte_to_entrylo(pte.pte_high);
101#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
102	entrylo = pte.pte_high;
103#else
104	entrylo = pte_to_entrylo(pte_val(pte));
105#endif
106
107	local_irq_save(flags);
108	old_ctx = read_c0_entryhi();
109	write_c0_entryhi(vaddr & (PAGE_MASK << 1));
110	write_c0_entrylo0(entrylo);
111	write_c0_entrylo1(entrylo);
112	if (cpu_has_mmid) {
113		old_mmid = read_c0_memorymapid();
114		write_c0_memorymapid(MMID_KERNEL_WIRED);
115	}
116#ifdef CONFIG_XPA
117	if (cpu_has_xpa) {
118		entrylo = (pte.pte_low & _PFNX_MASK);
119		writex_c0_entrylo0(entrylo);
120		writex_c0_entrylo1(entrylo);
121	}
122#endif
123	tlbidx = num_wired_entries();
 
 
124	write_c0_wired(tlbidx + 1);
125	write_c0_index(tlbidx);
126	mtc0_tlbw_hazard();
127	tlb_write_indexed();
 
128	tlbw_use_hazard();
129	write_c0_entryhi(old_ctx);
130	if (cpu_has_mmid)
131		write_c0_memorymapid(old_mmid);
132	local_irq_restore(flags);
133
134	return (void*) vaddr;
135}
136
137void *kmap_coherent(struct page *page, unsigned long addr)
138{
139	return __kmap_pgprot(page, addr, PAGE_KERNEL);
140}
141
142void *kmap_noncoherent(struct page *page, unsigned long addr)
143{
144	return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
145}
146
147void kunmap_coherent(void)
148{
 
149	unsigned int wired;
150	unsigned long flags, old_ctx;
151
152	local_irq_save(flags);
153	old_ctx = read_c0_entryhi();
154	wired = num_wired_entries() - 1;
155	write_c0_wired(wired);
156	write_c0_index(wired);
157	write_c0_entryhi(UNIQUE_ENTRYHI(wired));
158	write_c0_entrylo0(0);
159	write_c0_entrylo1(0);
160	mtc0_tlbw_hazard();
161	tlb_write_indexed();
162	tlbw_use_hazard();
163	write_c0_entryhi(old_ctx);
164	local_irq_restore(flags);
 
165	pagefault_enable();
166	preempt_enable();
167}
168
169void copy_user_highpage(struct page *to, struct page *from,
170	unsigned long vaddr, struct vm_area_struct *vma)
171{
172	void *vfrom, *vto;
173
174	vto = kmap_atomic(to);
175	if (cpu_has_dc_aliases &&
176	    page_mapcount(from) && !Page_dcache_dirty(from)) {
177		vfrom = kmap_coherent(from, vaddr);
178		copy_page(vto, vfrom);
179		kunmap_coherent();
180	} else {
181		vfrom = kmap_atomic(from);
182		copy_page(vto, vfrom);
183		kunmap_atomic(vfrom);
184	}
185	if ((!cpu_has_ic_fills_f_dc) ||
186	    pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
187		flush_data_cache_page((unsigned long)vto);
188	kunmap_atomic(vto);
189	/* Make sure this page is cleared on other CPU's too before using it */
190	smp_wmb();
191}
192
193void copy_to_user_page(struct vm_area_struct *vma,
194	struct page *page, unsigned long vaddr, void *dst, const void *src,
195	unsigned long len)
196{
197	if (cpu_has_dc_aliases &&
198	    page_mapcount(page) && !Page_dcache_dirty(page)) {
199		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
200		memcpy(vto, src, len);
201		kunmap_coherent();
202	} else {
203		memcpy(dst, src, len);
204		if (cpu_has_dc_aliases)
205			SetPageDcacheDirty(page);
206	}
207	if (vma->vm_flags & VM_EXEC)
208		flush_cache_page(vma, vaddr, page_to_pfn(page));
209}
210
211void copy_from_user_page(struct vm_area_struct *vma,
212	struct page *page, unsigned long vaddr, void *dst, const void *src,
213	unsigned long len)
214{
215	if (cpu_has_dc_aliases &&
216	    page_mapcount(page) && !Page_dcache_dirty(page)) {
217		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
218		memcpy(dst, vfrom, len);
219		kunmap_coherent();
220	} else {
221		memcpy(dst, src, len);
222		if (cpu_has_dc_aliases)
223			SetPageDcacheDirty(page);
224	}
225}
226EXPORT_SYMBOL_GPL(copy_from_user_page);
227
228void __init fixrange_init(unsigned long start, unsigned long end,
229	pgd_t *pgd_base)
230{
231#ifdef CONFIG_HIGHMEM
232	pgd_t *pgd;
233	pud_t *pud;
234	pmd_t *pmd;
235	pte_t *pte;
236	int i, j, k;
237	unsigned long vaddr;
238
239	vaddr = start;
240	i = pgd_index(vaddr);
241	j = pud_index(vaddr);
242	k = pmd_index(vaddr);
243	pgd = pgd_base + i;
244
245	for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
246		pud = (pud_t *)pgd;
247		for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
248			pmd = (pmd_t *)pud;
249			for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
250				if (pmd_none(*pmd)) {
251					pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
252									   PAGE_SIZE);
253					if (!pte)
254						panic("%s: Failed to allocate %lu bytes align=%lx\n",
255						      __func__, PAGE_SIZE,
256						      PAGE_SIZE);
257
258					set_pmd(pmd, __pmd((unsigned long)pte));
259					BUG_ON(pte != pte_offset_kernel(pmd, 0));
260				}
261				vaddr += PMD_SIZE;
262			}
263			k = 0;
264		}
265		j = 0;
266	}
267#endif
268}
269
270struct maar_walk_info {
271	struct maar_config cfg[16];
272	unsigned int num_cfg;
273};
274
275static int maar_res_walk(unsigned long start_pfn, unsigned long nr_pages,
276			 void *data)
277{
278	struct maar_walk_info *wi = data;
279	struct maar_config *cfg = &wi->cfg[wi->num_cfg];
280	unsigned int maar_align;
281
282	/* MAAR registers hold physical addresses right shifted by 4 bits */
283	maar_align = BIT(MIPS_MAAR_ADDR_SHIFT + 4);
284
285	/* Fill in the MAAR config entry */
286	cfg->lower = ALIGN(PFN_PHYS(start_pfn), maar_align);
287	cfg->upper = ALIGN_DOWN(PFN_PHYS(start_pfn + nr_pages), maar_align) - 1;
288	cfg->attrs = MIPS_MAAR_S;
289
290	/* Ensure we don't overflow the cfg array */
291	if (!WARN_ON(wi->num_cfg >= ARRAY_SIZE(wi->cfg)))
292		wi->num_cfg++;
293
294	return 0;
295}
296
297
298unsigned __weak platform_maar_init(unsigned num_pairs)
299{
300	unsigned int num_configured;
301	struct maar_walk_info wi;
302
303	wi.num_cfg = 0;
304	walk_system_ram_range(0, max_pfn, &wi, maar_res_walk);
305
306	num_configured = maar_config(wi.cfg, wi.num_cfg, num_pairs);
307	if (num_configured < wi.num_cfg)
308		pr_warn("Not enough MAAR pairs (%u) for all memory regions (%u)\n",
309			num_pairs, wi.num_cfg);
310
311	return num_configured;
312}
313
314void maar_init(void)
315{
316	unsigned num_maars, used, i;
317	phys_addr_t lower, upper, attr;
318	static struct {
319		struct maar_config cfgs[3];
320		unsigned used;
321	} recorded = { { { 0 } }, 0 };
322
323	if (!cpu_has_maar)
324		return;
325
326	/* Detect the number of MAARs */
327	write_c0_maari(~0);
328	back_to_back_c0_hazard();
329	num_maars = read_c0_maari() + 1;
330
331	/* MAARs should be in pairs */
332	WARN_ON(num_maars % 2);
333
334	/* Set MAARs using values we recorded already */
335	if (recorded.used) {
336		used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
337		BUG_ON(used != recorded.used);
338	} else {
339		/* Configure the required MAARs */
340		used = platform_maar_init(num_maars / 2);
341	}
342
343	/* Disable any further MAARs */
344	for (i = (used * 2); i < num_maars; i++) {
345		write_c0_maari(i);
346		back_to_back_c0_hazard();
347		write_c0_maar(0);
348		back_to_back_c0_hazard();
349	}
350
351	if (recorded.used)
352		return;
353
354	pr_info("MAAR configuration:\n");
355	for (i = 0; i < num_maars; i += 2) {
356		write_c0_maari(i);
357		back_to_back_c0_hazard();
358		upper = read_c0_maar();
359#ifdef CONFIG_XPA
360		upper |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
361#endif
362
363		write_c0_maari(i + 1);
364		back_to_back_c0_hazard();
365		lower = read_c0_maar();
366#ifdef CONFIG_XPA
367		lower |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
368#endif
369
370		attr = lower & upper;
371		lower = (lower & MIPS_MAAR_ADDR) << 4;
372		upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
373
374		pr_info("  [%d]: ", i / 2);
375		if ((attr & MIPS_MAAR_V) != MIPS_MAAR_V) {
376			pr_cont("disabled\n");
377			continue;
378		}
379
380		pr_cont("%pa-%pa", &lower, &upper);
381
382		if (attr & MIPS_MAAR_S)
383			pr_cont(" speculate");
384
385		pr_cont("\n");
386
387		/* Record the setup for use on secondary CPUs */
388		if (used <= ARRAY_SIZE(recorded.cfgs)) {
389			recorded.cfgs[recorded.used].lower = lower;
390			recorded.cfgs[recorded.used].upper = upper;
391			recorded.cfgs[recorded.used].attrs = attr;
392			recorded.used++;
393		}
394	}
 
 
395}
396
397#ifndef CONFIG_NUMA
398void __init paging_init(void)
399{
400	unsigned long max_zone_pfns[MAX_NR_ZONES];
 
401
402	pagetable_init();
403
 
 
 
 
 
404#ifdef CONFIG_ZONE_DMA
405	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
406#endif
407#ifdef CONFIG_ZONE_DMA32
408	max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
409#endif
410	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 
411#ifdef CONFIG_HIGHMEM
412	max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
 
413
414	if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
415		printk(KERN_WARNING "This processor doesn't support highmem."
416		       " %ldk highmem ignored\n",
417		       (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
418		max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
 
419	}
420#endif
421
422	free_area_init(max_zone_pfns);
423}
424
425#ifdef CONFIG_64BIT
426static struct kcore_list kcore_kseg0;
427#endif
428
429static inline void __init mem_init_free_highmem(void)
430{
431#ifdef CONFIG_HIGHMEM
432	unsigned long tmp;
433
434	if (cpu_has_dc_aliases)
435		return;
436
437	for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
438		struct page *page = pfn_to_page(tmp);
439
440		if (!memblock_is_memory(PFN_PHYS(tmp)))
441			SetPageReserved(page);
442		else
443			free_highmem_page(page);
444	}
445#endif
446}
447
448void __init mem_init(void)
449{
450	/*
451	 * When _PFN_SHIFT is greater than PAGE_SHIFT we won't have enough PTE
452	 * bits to hold a full 32b physical address on MIPS32 systems.
453	 */
454	BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (_PFN_SHIFT > PAGE_SHIFT));
455
456#ifdef CONFIG_HIGHMEM
 
 
 
457	max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
458#else
459	max_mapnr = max_low_pfn;
460#endif
461	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
462
463	maar_init();
464	memblock_free_all();
465	setup_zero_pages();	/* Setup zeroed pages.  */
466	mem_init_free_highmem();
 
467
468#ifdef CONFIG_64BIT
469	if ((unsigned long) &_text > (unsigned long) CKSEG0)
470		/* The -4 is a hack so that user tools don't have to handle
471		   the overflow.  */
472		kclist_add(&kcore_kseg0, (void *) CKSEG0,
473				0x80000000 - 4, KCORE_TEXT);
474#endif
475}
476#endif /* !CONFIG_NUMA */
477
478void free_init_pages(const char *what, unsigned long begin, unsigned long end)
479{
480	unsigned long pfn;
481
482	for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
483		struct page *page = pfn_to_page(pfn);
484		void *addr = phys_to_virt(PFN_PHYS(pfn));
485
486		memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
487		free_reserved_page(page);
488	}
489	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
490}
491
492void (*free_init_pages_eva)(void *begin, void *end) = NULL;
493
494void __weak __init prom_free_prom_memory(void)
495{
496	/* nothing to do */
 
497}
 
 
 
498
499void __ref free_initmem(void)
500{
501	prom_free_prom_memory();
502	/*
503	 * Let the platform define a specific function to free the
504	 * init section since EVA may have used any possible mapping
505	 * between virtual and physical addresses.
506	 */
507	if (free_init_pages_eva)
508		free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
509	else
510		free_initmem_default(POISON_FREE_INITMEM);
511}
512
513#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
514unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
515EXPORT_SYMBOL(__per_cpu_offset);
516
517static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
518{
519	return node_distance(cpu_to_node(from), cpu_to_node(to));
520}
521
522static int __init pcpu_cpu_to_node(int cpu)
523{
524	return cpu_to_node(cpu);
525}
526
527void __init setup_per_cpu_areas(void)
528{
529	unsigned long delta;
530	unsigned int cpu;
531	int rc;
532
533	/*
534	 * Always reserve area for module percpu variables.  That's
535	 * what the legacy allocator did.
536	 */
537	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
538				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
539				    pcpu_cpu_distance,
540				    pcpu_cpu_to_node);
541	if (rc < 0)
542		panic("Failed to initialize percpu areas.");
543
544	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
545	for_each_possible_cpu(cpu)
546		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
547}
548#endif
549
550#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
551unsigned long pgd_current[NR_CPUS];
552#endif
553
554/*
 
 
 
 
555 * Align swapper_pg_dir in to 64K, allows its address to be loaded
556 * with a single LUI instruction in the TLB handlers.  If we used
557 * __aligned(64K), its size would get rounded up to the alignment
558 * size, and waste space.  So we place it in its own section and align
559 * it in the linker script.
560 */
561pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
562#ifndef __PAGETABLE_PUD_FOLDED
563pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
564#endif
565#ifndef __PAGETABLE_PMD_FOLDED
566pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
567EXPORT_SYMBOL_GPL(invalid_pmd_table);
568#endif
569pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
570EXPORT_SYMBOL(invalid_pte_table);
v3.15
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994 - 2000 Ralf Baechle
  7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  9 * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
 10 */
 11#include <linux/bug.h>
 12#include <linux/init.h>
 13#include <linux/module.h>
 14#include <linux/signal.h>
 15#include <linux/sched.h>
 16#include <linux/smp.h>
 17#include <linux/kernel.h>
 18#include <linux/errno.h>
 19#include <linux/string.h>
 20#include <linux/types.h>
 21#include <linux/pagemap.h>
 22#include <linux/ptrace.h>
 23#include <linux/mman.h>
 24#include <linux/mm.h>
 25#include <linux/bootmem.h>
 26#include <linux/highmem.h>
 27#include <linux/swap.h>
 28#include <linux/proc_fs.h>
 29#include <linux/pfn.h>
 30#include <linux/hardirq.h>
 31#include <linux/gfp.h>
 32#include <linux/kcore.h>
 
 33
 34#include <asm/asm-offsets.h>
 35#include <asm/bootinfo.h>
 36#include <asm/cachectl.h>
 37#include <asm/cpu.h>
 38#include <asm/dma.h>
 39#include <asm/kmap_types.h>
 40#include <asm/mmu_context.h>
 41#include <asm/sections.h>
 42#include <asm/pgtable.h>
 43#include <asm/pgalloc.h>
 44#include <asm/tlb.h>
 45#include <asm/fixmap.h>
 46
 47/* Atomicity and interruptability */
 48#ifdef CONFIG_MIPS_MT_SMTC
 49
 50#include <asm/mipsmtregs.h>
 51
 52#define ENTER_CRITICAL(flags) \
 53	{ \
 54	unsigned int mvpflags; \
 55	local_irq_save(flags);\
 56	mvpflags = dvpe()
 57#define EXIT_CRITICAL(flags) \
 58	evpe(mvpflags); \
 59	local_irq_restore(flags); \
 60	}
 61#else
 62
 63#define ENTER_CRITICAL(flags) local_irq_save(flags)
 64#define EXIT_CRITICAL(flags) local_irq_restore(flags)
 65
 66#endif /* CONFIG_MIPS_MT_SMTC */
 67
 68/*
 69 * We have up to 8 empty zeroed pages so we can map one of the right colour
 70 * when needed.	 This is necessary only on R4000 / R4400 SC and MC versions
 71 * where we have to avoid VCED / VECI exceptions for good performance at
 72 * any price.  Since page is never written to after the initialization we
 73 * don't have to care about aliases on other CPUs.
 74 */
 75unsigned long empty_zero_page, zero_page_mask;
 76EXPORT_SYMBOL_GPL(empty_zero_page);
 
 77
 78/*
 79 * Not static inline because used by IP27 special magic initialization code
 80 */
 81void setup_zero_pages(void)
 82{
 83	unsigned int order, i;
 84	struct page *page;
 85
 86	if (cpu_has_vce)
 87		order = 3;
 88	else
 89		order = 0;
 90
 91	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
 92	if (!empty_zero_page)
 93		panic("Oh boy, that early out of memory?");
 94
 95	page = virt_to_page((void *)empty_zero_page);
 96	split_page(page, order);
 97	for (i = 0; i < (1 << order); i++, page++)
 98		mark_page_reserved(page);
 99
100	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
101}
102
103#ifdef CONFIG_MIPS_MT_SMTC
104static pte_t *kmap_coherent_pte;
105static void __init kmap_coherent_init(void)
106{
107	unsigned long vaddr;
108
109	/* cache the first coherent kmap pte */
110	vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
111	kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
112}
113#else
114static inline void kmap_coherent_init(void) {}
115#endif
116
117void *kmap_coherent(struct page *page, unsigned long addr)
118{
119	enum fixed_addresses idx;
 
120	unsigned long vaddr, flags, entrylo;
121	unsigned long old_ctx;
122	pte_t pte;
123	int tlbidx;
124
125	BUG_ON(Page_dcache_dirty(page));
126
 
127	pagefault_disable();
128	idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
129#ifdef CONFIG_MIPS_MT_SMTC
130	idx += FIX_N_COLOURS * smp_processor_id() +
131		(in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0);
132#else
133	idx += in_interrupt() ? FIX_N_COLOURS : 0;
134#endif
135	vaddr = __fix_to_virt(FIX_CMAP_END - idx);
136	pte = mk_pte(page, PAGE_KERNEL);
137#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
 
 
138	entrylo = pte.pte_high;
139#else
140	entrylo = pte_to_entrylo(pte_val(pte));
141#endif
142
143	ENTER_CRITICAL(flags);
144	old_ctx = read_c0_entryhi();
145	write_c0_entryhi(vaddr & (PAGE_MASK << 1));
146	write_c0_entrylo0(entrylo);
147	write_c0_entrylo1(entrylo);
148#ifdef CONFIG_MIPS_MT_SMTC
149	set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
150	/* preload TLB instead of local_flush_tlb_one() */
151	mtc0_tlbw_hazard();
152	tlb_probe();
153	tlb_probe_hazard();
154	tlbidx = read_c0_index();
155	mtc0_tlbw_hazard();
156	if (tlbidx < 0)
157		tlb_write_random();
158	else
159		tlb_write_indexed();
160#else
161	tlbidx = read_c0_wired();
162	write_c0_wired(tlbidx + 1);
163	write_c0_index(tlbidx);
164	mtc0_tlbw_hazard();
165	tlb_write_indexed();
166#endif
167	tlbw_use_hazard();
168	write_c0_entryhi(old_ctx);
169	EXIT_CRITICAL(flags);
 
 
170
171	return (void*) vaddr;
172}
173
 
 
 
 
 
 
 
 
 
 
174void kunmap_coherent(void)
175{
176#ifndef CONFIG_MIPS_MT_SMTC
177	unsigned int wired;
178	unsigned long flags, old_ctx;
179
180	ENTER_CRITICAL(flags);
181	old_ctx = read_c0_entryhi();
182	wired = read_c0_wired() - 1;
183	write_c0_wired(wired);
184	write_c0_index(wired);
185	write_c0_entryhi(UNIQUE_ENTRYHI(wired));
186	write_c0_entrylo0(0);
187	write_c0_entrylo1(0);
188	mtc0_tlbw_hazard();
189	tlb_write_indexed();
190	tlbw_use_hazard();
191	write_c0_entryhi(old_ctx);
192	EXIT_CRITICAL(flags);
193#endif
194	pagefault_enable();
 
195}
196
197void copy_user_highpage(struct page *to, struct page *from,
198	unsigned long vaddr, struct vm_area_struct *vma)
199{
200	void *vfrom, *vto;
201
202	vto = kmap_atomic(to);
203	if (cpu_has_dc_aliases &&
204	    page_mapped(from) && !Page_dcache_dirty(from)) {
205		vfrom = kmap_coherent(from, vaddr);
206		copy_page(vto, vfrom);
207		kunmap_coherent();
208	} else {
209		vfrom = kmap_atomic(from);
210		copy_page(vto, vfrom);
211		kunmap_atomic(vfrom);
212	}
213	if ((!cpu_has_ic_fills_f_dc) ||
214	    pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
215		flush_data_cache_page((unsigned long)vto);
216	kunmap_atomic(vto);
217	/* Make sure this page is cleared on other CPU's too before using it */
218	smp_wmb();
219}
220
221void copy_to_user_page(struct vm_area_struct *vma,
222	struct page *page, unsigned long vaddr, void *dst, const void *src,
223	unsigned long len)
224{
225	if (cpu_has_dc_aliases &&
226	    page_mapped(page) && !Page_dcache_dirty(page)) {
227		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
228		memcpy(vto, src, len);
229		kunmap_coherent();
230	} else {
231		memcpy(dst, src, len);
232		if (cpu_has_dc_aliases)
233			SetPageDcacheDirty(page);
234	}
235	if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
236		flush_cache_page(vma, vaddr, page_to_pfn(page));
237}
238
239void copy_from_user_page(struct vm_area_struct *vma,
240	struct page *page, unsigned long vaddr, void *dst, const void *src,
241	unsigned long len)
242{
243	if (cpu_has_dc_aliases &&
244	    page_mapped(page) && !Page_dcache_dirty(page)) {
245		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
246		memcpy(dst, vfrom, len);
247		kunmap_coherent();
248	} else {
249		memcpy(dst, src, len);
250		if (cpu_has_dc_aliases)
251			SetPageDcacheDirty(page);
252	}
253}
254EXPORT_SYMBOL_GPL(copy_from_user_page);
255
256void __init fixrange_init(unsigned long start, unsigned long end,
257	pgd_t *pgd_base)
258{
259#if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
260	pgd_t *pgd;
261	pud_t *pud;
262	pmd_t *pmd;
263	pte_t *pte;
264	int i, j, k;
265	unsigned long vaddr;
266
267	vaddr = start;
268	i = __pgd_offset(vaddr);
269	j = __pud_offset(vaddr);
270	k = __pmd_offset(vaddr);
271	pgd = pgd_base + i;
272
273	for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
274		pud = (pud_t *)pgd;
275		for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
276			pmd = (pmd_t *)pud;
277			for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
278				if (pmd_none(*pmd)) {
279					pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
 
 
 
 
 
 
280					set_pmd(pmd, __pmd((unsigned long)pte));
281					BUG_ON(pte != pte_offset_kernel(pmd, 0));
282				}
283				vaddr += PMD_SIZE;
284			}
285			k = 0;
286		}
287		j = 0;
288	}
289#endif
290}
291
292#ifndef CONFIG_NEED_MULTIPLE_NODES
293int page_is_ram(unsigned long pagenr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
294{
295	int i;
 
 
 
 
296
297	for (i = 0; i < boot_mem_map.nr_map; i++) {
298		unsigned long addr, end;
 
 
299
300		switch (boot_mem_map.map[i].type) {
301		case BOOT_MEM_RAM:
302		case BOOT_MEM_INIT_RAM:
303			break;
304		default:
305			/* not usable memory */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
306			continue;
307		}
308
309		addr = PFN_UP(boot_mem_map.map[i].addr);
310		end = PFN_DOWN(boot_mem_map.map[i].addr +
311			       boot_mem_map.map[i].size);
 
312
313		if (pagenr >= addr && pagenr < end)
314			return 1;
 
 
 
 
 
 
 
315	}
316
317	return 0;
318}
319
 
320void __init paging_init(void)
321{
322	unsigned long max_zone_pfns[MAX_NR_ZONES];
323	unsigned long lastpfn __maybe_unused;
324
325	pagetable_init();
326
327#ifdef CONFIG_HIGHMEM
328	kmap_init();
329#endif
330	kmap_coherent_init();
331
332#ifdef CONFIG_ZONE_DMA
333	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
334#endif
335#ifdef CONFIG_ZONE_DMA32
336	max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
337#endif
338	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
339	lastpfn = max_low_pfn;
340#ifdef CONFIG_HIGHMEM
341	max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
342	lastpfn = highend_pfn;
343
344	if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
345		printk(KERN_WARNING "This processor doesn't support highmem."
346		       " %ldk highmem ignored\n",
347		       (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
348		max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
349		lastpfn = max_low_pfn;
350	}
351#endif
352
353	free_area_init_nodes(max_zone_pfns);
354}
355
356#ifdef CONFIG_64BIT
357static struct kcore_list kcore_kseg0;
358#endif
359
360static inline void mem_init_free_highmem(void)
361{
362#ifdef CONFIG_HIGHMEM
363	unsigned long tmp;
364
 
 
 
365	for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
366		struct page *page = pfn_to_page(tmp);
367
368		if (!page_is_ram(tmp))
369			SetPageReserved(page);
370		else
371			free_highmem_page(page);
372	}
373#endif
374}
375
376void __init mem_init(void)
377{
 
 
 
 
 
 
378#ifdef CONFIG_HIGHMEM
379#ifdef CONFIG_DISCONTIGMEM
380#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
381#endif
382	max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
383#else
384	max_mapnr = max_low_pfn;
385#endif
386	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
387
388	free_all_bootmem();
 
389	setup_zero_pages();	/* Setup zeroed pages.  */
390	mem_init_free_highmem();
391	mem_init_print_info(NULL);
392
393#ifdef CONFIG_64BIT
394	if ((unsigned long) &_text > (unsigned long) CKSEG0)
395		/* The -4 is a hack so that user tools don't have to handle
396		   the overflow.  */
397		kclist_add(&kcore_kseg0, (void *) CKSEG0,
398				0x80000000 - 4, KCORE_TEXT);
399#endif
400}
401#endif /* !CONFIG_NEED_MULTIPLE_NODES */
402
403void free_init_pages(const char *what, unsigned long begin, unsigned long end)
404{
405	unsigned long pfn;
406
407	for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
408		struct page *page = pfn_to_page(pfn);
409		void *addr = phys_to_virt(PFN_PHYS(pfn));
410
411		memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
412		free_reserved_page(page);
413	}
414	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
415}
416
417#ifdef CONFIG_BLK_DEV_INITRD
418void free_initrd_mem(unsigned long start, unsigned long end)
 
419{
420	free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
421			   "initrd");
422}
423#endif
424
425void (*free_init_pages_eva)(void *begin, void *end) = NULL;
426
427void __init_refok free_initmem(void)
428{
429	prom_free_prom_memory();
430	/*
431	 * Let the platform define a specific function to free the
432	 * init section since EVA may have used any possible mapping
433	 * between virtual and physical addresses.
434	 */
435	if (free_init_pages_eva)
436		free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
437	else
438		free_initmem_default(POISON_FREE_INITMEM);
439}
440
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
441#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
442unsigned long pgd_current[NR_CPUS];
443#endif
444
445/*
446 * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
447 * are constants.  So we use the variants from asm-offset.h until that gcc
448 * will officially be retired.
449 *
450 * Align swapper_pg_dir in to 64K, allows its address to be loaded
451 * with a single LUI instruction in the TLB handlers.  If we used
452 * __aligned(64K), its size would get rounded up to the alignment
453 * size, and waste space.  So we place it in its own section and align
454 * it in the linker script.
455 */
456pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
 
 
 
457#ifndef __PAGETABLE_PMD_FOLDED
458pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
 
459#endif
460pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;