Linux Audio

Check our new training course

Loading...
v6.2
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994 - 2000 Ralf Baechle
  7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  9 * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
 10 */
 11#include <linux/bug.h>
 12#include <linux/init.h>
 13#include <linux/export.h>
 14#include <linux/signal.h>
 15#include <linux/sched.h>
 16#include <linux/smp.h>
 17#include <linux/kernel.h>
 18#include <linux/errno.h>
 19#include <linux/string.h>
 20#include <linux/types.h>
 21#include <linux/pagemap.h>
 22#include <linux/ptrace.h>
 23#include <linux/mman.h>
 24#include <linux/mm.h>
 25#include <linux/memblock.h>
 26#include <linux/highmem.h>
 27#include <linux/swap.h>
 28#include <linux/proc_fs.h>
 29#include <linux/pfn.h>
 30#include <linux/hardirq.h>
 31#include <linux/gfp.h>
 32#include <linux/kcore.h>
 33#include <linux/initrd.h>
 34
 
 35#include <asm/bootinfo.h>
 36#include <asm/cachectl.h>
 37#include <asm/cpu.h>
 38#include <asm/dma.h>
 
 39#include <asm/maar.h>
 40#include <asm/mmu_context.h>
 41#include <asm/sections.h>
 
 42#include <asm/pgalloc.h>
 43#include <asm/tlb.h>
 44#include <asm/fixmap.h>
 
 45
 46/*
 47 * We have up to 8 empty zeroed pages so we can map one of the right colour
 48 * when needed.	 This is necessary only on R4000 / R4400 SC and MC versions
 49 * where we have to avoid VCED / VECI exceptions for good performance at
 50 * any price.  Since page is never written to after the initialization we
 51 * don't have to care about aliases on other CPUs.
 52 */
 53unsigned long empty_zero_page, zero_page_mask;
 54EXPORT_SYMBOL_GPL(empty_zero_page);
 55EXPORT_SYMBOL(zero_page_mask);
 56
 57/*
 58 * Not static inline because used by IP27 special magic initialization code
 59 */
 60void setup_zero_pages(void)
 61{
 62	unsigned int order, i;
 63	struct page *page;
 64
 65	if (cpu_has_vce)
 66		order = 3;
 67	else
 68		order = 0;
 69
 70	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
 71	if (!empty_zero_page)
 72		panic("Oh boy, that early out of memory?");
 73
 74	page = virt_to_page((void *)empty_zero_page);
 75	split_page(page, order);
 76	for (i = 0; i < (1 << order); i++, page++)
 77		mark_page_reserved(page);
 78
 79	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
 80}
 81
 82static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
 83{
 84	enum fixed_addresses idx;
 85	unsigned int old_mmid;
 86	unsigned long vaddr, flags, entrylo;
 87	unsigned long old_ctx;
 88	pte_t pte;
 89	int tlbidx;
 90
 91	BUG_ON(Page_dcache_dirty(page));
 92
 93	preempt_disable();
 94	pagefault_disable();
 95	idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
 96	idx += in_interrupt() ? FIX_N_COLOURS : 0;
 97	vaddr = __fix_to_virt(FIX_CMAP_END - idx);
 98	pte = mk_pte(page, prot);
 99#if defined(CONFIG_XPA)
100	entrylo = pte_to_entrylo(pte.pte_high);
101#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
102	entrylo = pte.pte_high;
103#else
104	entrylo = pte_to_entrylo(pte_val(pte));
105#endif
106
107	local_irq_save(flags);
108	old_ctx = read_c0_entryhi();
109	write_c0_entryhi(vaddr & (PAGE_MASK << 1));
110	write_c0_entrylo0(entrylo);
111	write_c0_entrylo1(entrylo);
112	if (cpu_has_mmid) {
113		old_mmid = read_c0_memorymapid();
114		write_c0_memorymapid(MMID_KERNEL_WIRED);
115	}
116#ifdef CONFIG_XPA
117	if (cpu_has_xpa) {
118		entrylo = (pte.pte_low & _PFNX_MASK);
119		writex_c0_entrylo0(entrylo);
120		writex_c0_entrylo1(entrylo);
121	}
122#endif
123	tlbidx = num_wired_entries();
124	write_c0_wired(tlbidx + 1);
125	write_c0_index(tlbidx);
126	mtc0_tlbw_hazard();
127	tlb_write_indexed();
128	tlbw_use_hazard();
129	write_c0_entryhi(old_ctx);
130	if (cpu_has_mmid)
131		write_c0_memorymapid(old_mmid);
132	local_irq_restore(flags);
133
134	return (void*) vaddr;
135}
136
137void *kmap_coherent(struct page *page, unsigned long addr)
138{
139	return __kmap_pgprot(page, addr, PAGE_KERNEL);
140}
141
142void *kmap_noncoherent(struct page *page, unsigned long addr)
143{
144	return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
145}
146
147void kunmap_coherent(void)
148{
149	unsigned int wired;
150	unsigned long flags, old_ctx;
151
152	local_irq_save(flags);
153	old_ctx = read_c0_entryhi();
154	wired = num_wired_entries() - 1;
155	write_c0_wired(wired);
156	write_c0_index(wired);
157	write_c0_entryhi(UNIQUE_ENTRYHI(wired));
158	write_c0_entrylo0(0);
159	write_c0_entrylo1(0);
160	mtc0_tlbw_hazard();
161	tlb_write_indexed();
162	tlbw_use_hazard();
163	write_c0_entryhi(old_ctx);
164	local_irq_restore(flags);
165	pagefault_enable();
166	preempt_enable();
167}
168
169void copy_user_highpage(struct page *to, struct page *from,
170	unsigned long vaddr, struct vm_area_struct *vma)
171{
172	void *vfrom, *vto;
173
174	vto = kmap_atomic(to);
175	if (cpu_has_dc_aliases &&
176	    page_mapcount(from) && !Page_dcache_dirty(from)) {
177		vfrom = kmap_coherent(from, vaddr);
178		copy_page(vto, vfrom);
179		kunmap_coherent();
180	} else {
181		vfrom = kmap_atomic(from);
182		copy_page(vto, vfrom);
183		kunmap_atomic(vfrom);
184	}
185	if ((!cpu_has_ic_fills_f_dc) ||
186	    pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
187		flush_data_cache_page((unsigned long)vto);
188	kunmap_atomic(vto);
189	/* Make sure this page is cleared on other CPU's too before using it */
190	smp_wmb();
191}
192
193void copy_to_user_page(struct vm_area_struct *vma,
194	struct page *page, unsigned long vaddr, void *dst, const void *src,
195	unsigned long len)
196{
197	if (cpu_has_dc_aliases &&
198	    page_mapcount(page) && !Page_dcache_dirty(page)) {
199		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
200		memcpy(vto, src, len);
201		kunmap_coherent();
202	} else {
203		memcpy(dst, src, len);
204		if (cpu_has_dc_aliases)
205			SetPageDcacheDirty(page);
206	}
207	if (vma->vm_flags & VM_EXEC)
208		flush_cache_page(vma, vaddr, page_to_pfn(page));
209}
210
211void copy_from_user_page(struct vm_area_struct *vma,
212	struct page *page, unsigned long vaddr, void *dst, const void *src,
213	unsigned long len)
214{
215	if (cpu_has_dc_aliases &&
216	    page_mapcount(page) && !Page_dcache_dirty(page)) {
217		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
218		memcpy(dst, vfrom, len);
219		kunmap_coherent();
220	} else {
221		memcpy(dst, src, len);
222		if (cpu_has_dc_aliases)
223			SetPageDcacheDirty(page);
224	}
225}
226EXPORT_SYMBOL_GPL(copy_from_user_page);
227
228void __init fixrange_init(unsigned long start, unsigned long end,
229	pgd_t *pgd_base)
230{
231#ifdef CONFIG_HIGHMEM
232	pgd_t *pgd;
233	pud_t *pud;
234	pmd_t *pmd;
235	pte_t *pte;
236	int i, j, k;
237	unsigned long vaddr;
238
239	vaddr = start;
240	i = pgd_index(vaddr);
241	j = pud_index(vaddr);
242	k = pmd_index(vaddr);
243	pgd = pgd_base + i;
244
245	for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
246		pud = (pud_t *)pgd;
247		for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
248			pmd = (pmd_t *)pud;
249			for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
250				if (pmd_none(*pmd)) {
251					pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
252									   PAGE_SIZE);
253					if (!pte)
254						panic("%s: Failed to allocate %lu bytes align=%lx\n",
255						      __func__, PAGE_SIZE,
256						      PAGE_SIZE);
257
258					set_pmd(pmd, __pmd((unsigned long)pte));
259					BUG_ON(pte != pte_offset_kernel(pmd, 0));
260				}
261				vaddr += PMD_SIZE;
262			}
263			k = 0;
264		}
265		j = 0;
266	}
267#endif
268}
269
270struct maar_walk_info {
271	struct maar_config cfg[16];
272	unsigned int num_cfg;
273};
274
275static int maar_res_walk(unsigned long start_pfn, unsigned long nr_pages,
276			 void *data)
277{
278	struct maar_walk_info *wi = data;
279	struct maar_config *cfg = &wi->cfg[wi->num_cfg];
280	unsigned int maar_align;
281
282	/* MAAR registers hold physical addresses right shifted by 4 bits */
283	maar_align = BIT(MIPS_MAAR_ADDR_SHIFT + 4);
284
285	/* Fill in the MAAR config entry */
286	cfg->lower = ALIGN(PFN_PHYS(start_pfn), maar_align);
287	cfg->upper = ALIGN_DOWN(PFN_PHYS(start_pfn + nr_pages), maar_align) - 1;
288	cfg->attrs = MIPS_MAAR_S;
289
290	/* Ensure we don't overflow the cfg array */
291	if (!WARN_ON(wi->num_cfg >= ARRAY_SIZE(wi->cfg)))
292		wi->num_cfg++;
293
294	return 0;
295}
296
297
298unsigned __weak platform_maar_init(unsigned num_pairs)
299{
300	unsigned int num_configured;
301	struct maar_walk_info wi;
302
303	wi.num_cfg = 0;
304	walk_system_ram_range(0, max_pfn, &wi, maar_res_walk);
 
 
 
 
 
 
305
306	num_configured = maar_config(wi.cfg, wi.num_cfg, num_pairs);
307	if (num_configured < wi.num_cfg)
308		pr_warn("Not enough MAAR pairs (%u) for all memory regions (%u)\n",
309			num_pairs, wi.num_cfg);
 
 
 
 
 
 
 
 
 
 
 
 
 
310
311	return num_configured;
312}
313
314void maar_init(void)
315{
316	unsigned num_maars, used, i;
317	phys_addr_t lower, upper, attr;
318	static struct {
319		struct maar_config cfgs[3];
320		unsigned used;
321	} recorded = { { { 0 } }, 0 };
322
323	if (!cpu_has_maar)
324		return;
325
326	/* Detect the number of MAARs */
327	write_c0_maari(~0);
328	back_to_back_c0_hazard();
329	num_maars = read_c0_maari() + 1;
330
331	/* MAARs should be in pairs */
332	WARN_ON(num_maars % 2);
333
334	/* Set MAARs using values we recorded already */
335	if (recorded.used) {
336		used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
337		BUG_ON(used != recorded.used);
338	} else {
339		/* Configure the required MAARs */
340		used = platform_maar_init(num_maars / 2);
341	}
342
343	/* Disable any further MAARs */
344	for (i = (used * 2); i < num_maars; i++) {
345		write_c0_maari(i);
346		back_to_back_c0_hazard();
347		write_c0_maar(0);
348		back_to_back_c0_hazard();
349	}
350
351	if (recorded.used)
352		return;
353
354	pr_info("MAAR configuration:\n");
355	for (i = 0; i < num_maars; i += 2) {
356		write_c0_maari(i);
357		back_to_back_c0_hazard();
358		upper = read_c0_maar();
359#ifdef CONFIG_XPA
360		upper |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
361#endif
362
363		write_c0_maari(i + 1);
364		back_to_back_c0_hazard();
365		lower = read_c0_maar();
366#ifdef CONFIG_XPA
367		lower |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
368#endif
369
370		attr = lower & upper;
371		lower = (lower & MIPS_MAAR_ADDR) << 4;
372		upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
373
374		pr_info("  [%d]: ", i / 2);
375		if ((attr & MIPS_MAAR_V) != MIPS_MAAR_V) {
376			pr_cont("disabled\n");
377			continue;
378		}
379
380		pr_cont("%pa-%pa", &lower, &upper);
381
382		if (attr & MIPS_MAAR_S)
383			pr_cont(" speculate");
384
385		pr_cont("\n");
386
387		/* Record the setup for use on secondary CPUs */
388		if (used <= ARRAY_SIZE(recorded.cfgs)) {
389			recorded.cfgs[recorded.used].lower = lower;
390			recorded.cfgs[recorded.used].upper = upper;
391			recorded.cfgs[recorded.used].attrs = attr;
392			recorded.used++;
393		}
394	}
395}
396
397#ifndef CONFIG_NUMA
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
398void __init paging_init(void)
399{
400	unsigned long max_zone_pfns[MAX_NR_ZONES];
 
401
402	pagetable_init();
403
 
 
 
404#ifdef CONFIG_ZONE_DMA
405	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
406#endif
407#ifdef CONFIG_ZONE_DMA32
408	max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
409#endif
410	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 
411#ifdef CONFIG_HIGHMEM
412	max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
 
413
414	if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
415		printk(KERN_WARNING "This processor doesn't support highmem."
416		       " %ldk highmem ignored\n",
417		       (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
418		max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
 
419	}
420#endif
421
422	free_area_init(max_zone_pfns);
423}
424
425#ifdef CONFIG_64BIT
426static struct kcore_list kcore_kseg0;
427#endif
428
429static inline void __init mem_init_free_highmem(void)
430{
431#ifdef CONFIG_HIGHMEM
432	unsigned long tmp;
433
434	if (cpu_has_dc_aliases)
435		return;
436
437	for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
438		struct page *page = pfn_to_page(tmp);
439
440		if (!memblock_is_memory(PFN_PHYS(tmp)))
441			SetPageReserved(page);
442		else
443			free_highmem_page(page);
444	}
445#endif
446}
447
448void __init mem_init(void)
449{
450	/*
451	 * When _PFN_SHIFT is greater than PAGE_SHIFT we won't have enough PTE
452	 * bits to hold a full 32b physical address on MIPS32 systems.
453	 */
454	BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (_PFN_SHIFT > PAGE_SHIFT));
455
456#ifdef CONFIG_HIGHMEM
 
 
 
457	max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
458#else
459	max_mapnr = max_low_pfn;
460#endif
461	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
462
463	maar_init();
464	memblock_free_all();
465	setup_zero_pages();	/* Setup zeroed pages.  */
466	mem_init_free_highmem();
 
467
468#ifdef CONFIG_64BIT
469	if ((unsigned long) &_text > (unsigned long) CKSEG0)
470		/* The -4 is a hack so that user tools don't have to handle
471		   the overflow.  */
472		kclist_add(&kcore_kseg0, (void *) CKSEG0,
473				0x80000000 - 4, KCORE_TEXT);
474#endif
475}
476#endif /* !CONFIG_NUMA */
477
478void free_init_pages(const char *what, unsigned long begin, unsigned long end)
479{
480	unsigned long pfn;
481
482	for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
483		struct page *page = pfn_to_page(pfn);
484		void *addr = phys_to_virt(PFN_PHYS(pfn));
485
486		memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
487		free_reserved_page(page);
488	}
489	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
490}
491
492void (*free_init_pages_eva)(void *begin, void *end) = NULL;
493
494void __weak __init prom_free_prom_memory(void)
495{
496	/* nothing to do */
 
497}
 
 
 
498
499void __ref free_initmem(void)
500{
501	prom_free_prom_memory();
502	/*
503	 * Let the platform define a specific function to free the
504	 * init section since EVA may have used any possible mapping
505	 * between virtual and physical addresses.
506	 */
507	if (free_init_pages_eva)
508		free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
509	else
510		free_initmem_default(POISON_FREE_INITMEM);
511}
512
513#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
514unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
515EXPORT_SYMBOL(__per_cpu_offset);
516
517static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
518{
519	return node_distance(cpu_to_node(from), cpu_to_node(to));
520}
521
522static int __init pcpu_cpu_to_node(int cpu)
523{
524	return cpu_to_node(cpu);
525}
526
527void __init setup_per_cpu_areas(void)
528{
529	unsigned long delta;
530	unsigned int cpu;
531	int rc;
532
533	/*
534	 * Always reserve area for module percpu variables.  That's
535	 * what the legacy allocator did.
536	 */
537	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
538				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
539				    pcpu_cpu_distance,
540				    pcpu_cpu_to_node);
541	if (rc < 0)
542		panic("Failed to initialize percpu areas.");
543
544	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
545	for_each_possible_cpu(cpu)
546		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
547}
548#endif
549
550#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
551unsigned long pgd_current[NR_CPUS];
552#endif
553
554/*
 
 
 
 
555 * Align swapper_pg_dir in to 64K, allows its address to be loaded
556 * with a single LUI instruction in the TLB handlers.  If we used
557 * __aligned(64K), its size would get rounded up to the alignment
558 * size, and waste space.  So we place it in its own section and align
559 * it in the linker script.
560 */
561pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
562#ifndef __PAGETABLE_PUD_FOLDED
563pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
564#endif
565#ifndef __PAGETABLE_PMD_FOLDED
566pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
567EXPORT_SYMBOL_GPL(invalid_pmd_table);
568#endif
569pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
570EXPORT_SYMBOL(invalid_pte_table);
v4.10.11
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994 - 2000 Ralf Baechle
  7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  9 * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
 10 */
 11#include <linux/bug.h>
 12#include <linux/init.h>
 13#include <linux/export.h>
 14#include <linux/signal.h>
 15#include <linux/sched.h>
 16#include <linux/smp.h>
 17#include <linux/kernel.h>
 18#include <linux/errno.h>
 19#include <linux/string.h>
 20#include <linux/types.h>
 21#include <linux/pagemap.h>
 22#include <linux/ptrace.h>
 23#include <linux/mman.h>
 24#include <linux/mm.h>
 25#include <linux/bootmem.h>
 26#include <linux/highmem.h>
 27#include <linux/swap.h>
 28#include <linux/proc_fs.h>
 29#include <linux/pfn.h>
 30#include <linux/hardirq.h>
 31#include <linux/gfp.h>
 32#include <linux/kcore.h>
 
 33
 34#include <asm/asm-offsets.h>
 35#include <asm/bootinfo.h>
 36#include <asm/cachectl.h>
 37#include <asm/cpu.h>
 38#include <asm/dma.h>
 39#include <asm/kmap_types.h>
 40#include <asm/maar.h>
 41#include <asm/mmu_context.h>
 42#include <asm/sections.h>
 43#include <asm/pgtable.h>
 44#include <asm/pgalloc.h>
 45#include <asm/tlb.h>
 46#include <asm/fixmap.h>
 47#include <asm/maar.h>
 48
 49/*
 50 * We have up to 8 empty zeroed pages so we can map one of the right colour
 51 * when needed.	 This is necessary only on R4000 / R4400 SC and MC versions
 52 * where we have to avoid VCED / VECI exceptions for good performance at
 53 * any price.  Since page is never written to after the initialization we
 54 * don't have to care about aliases on other CPUs.
 55 */
 56unsigned long empty_zero_page, zero_page_mask;
 57EXPORT_SYMBOL_GPL(empty_zero_page);
 58EXPORT_SYMBOL(zero_page_mask);
 59
 60/*
 61 * Not static inline because used by IP27 special magic initialization code
 62 */
 63void setup_zero_pages(void)
 64{
 65	unsigned int order, i;
 66	struct page *page;
 67
 68	if (cpu_has_vce)
 69		order = 3;
 70	else
 71		order = 0;
 72
 73	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
 74	if (!empty_zero_page)
 75		panic("Oh boy, that early out of memory?");
 76
 77	page = virt_to_page((void *)empty_zero_page);
 78	split_page(page, order);
 79	for (i = 0; i < (1 << order); i++, page++)
 80		mark_page_reserved(page);
 81
 82	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
 83}
 84
 85static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
 86{
 87	enum fixed_addresses idx;
 
 88	unsigned long vaddr, flags, entrylo;
 89	unsigned long old_ctx;
 90	pte_t pte;
 91	int tlbidx;
 92
 93	BUG_ON(Page_dcache_dirty(page));
 94
 95	preempt_disable();
 96	pagefault_disable();
 97	idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
 98	idx += in_interrupt() ? FIX_N_COLOURS : 0;
 99	vaddr = __fix_to_virt(FIX_CMAP_END - idx);
100	pte = mk_pte(page, prot);
101#if defined(CONFIG_XPA)
102	entrylo = pte_to_entrylo(pte.pte_high);
103#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
104	entrylo = pte.pte_high;
105#else
106	entrylo = pte_to_entrylo(pte_val(pte));
107#endif
108
109	local_irq_save(flags);
110	old_ctx = read_c0_entryhi();
111	write_c0_entryhi(vaddr & (PAGE_MASK << 1));
112	write_c0_entrylo0(entrylo);
113	write_c0_entrylo1(entrylo);
 
 
 
 
114#ifdef CONFIG_XPA
115	if (cpu_has_xpa) {
116		entrylo = (pte.pte_low & _PFNX_MASK);
117		writex_c0_entrylo0(entrylo);
118		writex_c0_entrylo1(entrylo);
119	}
120#endif
121	tlbidx = num_wired_entries();
122	write_c0_wired(tlbidx + 1);
123	write_c0_index(tlbidx);
124	mtc0_tlbw_hazard();
125	tlb_write_indexed();
126	tlbw_use_hazard();
127	write_c0_entryhi(old_ctx);
 
 
128	local_irq_restore(flags);
129
130	return (void*) vaddr;
131}
132
133void *kmap_coherent(struct page *page, unsigned long addr)
134{
135	return __kmap_pgprot(page, addr, PAGE_KERNEL);
136}
137
138void *kmap_noncoherent(struct page *page, unsigned long addr)
139{
140	return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
141}
142
143void kunmap_coherent(void)
144{
145	unsigned int wired;
146	unsigned long flags, old_ctx;
147
148	local_irq_save(flags);
149	old_ctx = read_c0_entryhi();
150	wired = num_wired_entries() - 1;
151	write_c0_wired(wired);
152	write_c0_index(wired);
153	write_c0_entryhi(UNIQUE_ENTRYHI(wired));
154	write_c0_entrylo0(0);
155	write_c0_entrylo1(0);
156	mtc0_tlbw_hazard();
157	tlb_write_indexed();
158	tlbw_use_hazard();
159	write_c0_entryhi(old_ctx);
160	local_irq_restore(flags);
161	pagefault_enable();
162	preempt_enable();
163}
164
165void copy_user_highpage(struct page *to, struct page *from,
166	unsigned long vaddr, struct vm_area_struct *vma)
167{
168	void *vfrom, *vto;
169
170	vto = kmap_atomic(to);
171	if (cpu_has_dc_aliases &&
172	    page_mapcount(from) && !Page_dcache_dirty(from)) {
173		vfrom = kmap_coherent(from, vaddr);
174		copy_page(vto, vfrom);
175		kunmap_coherent();
176	} else {
177		vfrom = kmap_atomic(from);
178		copy_page(vto, vfrom);
179		kunmap_atomic(vfrom);
180	}
181	if ((!cpu_has_ic_fills_f_dc) ||
182	    pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
183		flush_data_cache_page((unsigned long)vto);
184	kunmap_atomic(vto);
185	/* Make sure this page is cleared on other CPU's too before using it */
186	smp_wmb();
187}
188
189void copy_to_user_page(struct vm_area_struct *vma,
190	struct page *page, unsigned long vaddr, void *dst, const void *src,
191	unsigned long len)
192{
193	if (cpu_has_dc_aliases &&
194	    page_mapcount(page) && !Page_dcache_dirty(page)) {
195		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
196		memcpy(vto, src, len);
197		kunmap_coherent();
198	} else {
199		memcpy(dst, src, len);
200		if (cpu_has_dc_aliases)
201			SetPageDcacheDirty(page);
202	}
203	if (vma->vm_flags & VM_EXEC)
204		flush_cache_page(vma, vaddr, page_to_pfn(page));
205}
206
207void copy_from_user_page(struct vm_area_struct *vma,
208	struct page *page, unsigned long vaddr, void *dst, const void *src,
209	unsigned long len)
210{
211	if (cpu_has_dc_aliases &&
212	    page_mapcount(page) && !Page_dcache_dirty(page)) {
213		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
214		memcpy(dst, vfrom, len);
215		kunmap_coherent();
216	} else {
217		memcpy(dst, src, len);
218		if (cpu_has_dc_aliases)
219			SetPageDcacheDirty(page);
220	}
221}
222EXPORT_SYMBOL_GPL(copy_from_user_page);
223
224void __init fixrange_init(unsigned long start, unsigned long end,
225	pgd_t *pgd_base)
226{
227#ifdef CONFIG_HIGHMEM
228	pgd_t *pgd;
229	pud_t *pud;
230	pmd_t *pmd;
231	pte_t *pte;
232	int i, j, k;
233	unsigned long vaddr;
234
235	vaddr = start;
236	i = __pgd_offset(vaddr);
237	j = __pud_offset(vaddr);
238	k = __pmd_offset(vaddr);
239	pgd = pgd_base + i;
240
241	for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
242		pud = (pud_t *)pgd;
243		for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
244			pmd = (pmd_t *)pud;
245			for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
246				if (pmd_none(*pmd)) {
247					pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
 
 
 
 
 
 
248					set_pmd(pmd, __pmd((unsigned long)pte));
249					BUG_ON(pte != pte_offset_kernel(pmd, 0));
250				}
251				vaddr += PMD_SIZE;
252			}
253			k = 0;
254		}
255		j = 0;
256	}
257#endif
258}
259
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260unsigned __weak platform_maar_init(unsigned num_pairs)
261{
262	struct maar_config cfg[BOOT_MEM_MAP_MAX];
263	unsigned i, num_configured, num_cfg = 0;
264
265	for (i = 0; i < boot_mem_map.nr_map; i++) {
266		switch (boot_mem_map.map[i].type) {
267		case BOOT_MEM_RAM:
268		case BOOT_MEM_INIT_RAM:
269			break;
270		default:
271			continue;
272		}
273
274		/* Round lower up */
275		cfg[num_cfg].lower = boot_mem_map.map[i].addr;
276		cfg[num_cfg].lower = (cfg[num_cfg].lower + 0xffff) & ~0xffff;
277
278		/* Round upper down */
279		cfg[num_cfg].upper = boot_mem_map.map[i].addr +
280					boot_mem_map.map[i].size;
281		cfg[num_cfg].upper = (cfg[num_cfg].upper & ~0xffff) - 1;
282
283		cfg[num_cfg].attrs = MIPS_MAAR_S;
284		num_cfg++;
285	}
286
287	num_configured = maar_config(cfg, num_cfg, num_pairs);
288	if (num_configured < num_cfg)
289		pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
290			num_pairs, num_cfg);
291
292	return num_configured;
293}
294
295void maar_init(void)
296{
297	unsigned num_maars, used, i;
298	phys_addr_t lower, upper, attr;
299	static struct {
300		struct maar_config cfgs[3];
301		unsigned used;
302	} recorded = { { { 0 } }, 0 };
303
304	if (!cpu_has_maar)
305		return;
306
307	/* Detect the number of MAARs */
308	write_c0_maari(~0);
309	back_to_back_c0_hazard();
310	num_maars = read_c0_maari() + 1;
311
312	/* MAARs should be in pairs */
313	WARN_ON(num_maars % 2);
314
315	/* Set MAARs using values we recorded already */
316	if (recorded.used) {
317		used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
318		BUG_ON(used != recorded.used);
319	} else {
320		/* Configure the required MAARs */
321		used = platform_maar_init(num_maars / 2);
322	}
323
324	/* Disable any further MAARs */
325	for (i = (used * 2); i < num_maars; i++) {
326		write_c0_maari(i);
327		back_to_back_c0_hazard();
328		write_c0_maar(0);
329		back_to_back_c0_hazard();
330	}
331
332	if (recorded.used)
333		return;
334
335	pr_info("MAAR configuration:\n");
336	for (i = 0; i < num_maars; i += 2) {
337		write_c0_maari(i);
338		back_to_back_c0_hazard();
339		upper = read_c0_maar();
 
 
 
340
341		write_c0_maari(i + 1);
342		back_to_back_c0_hazard();
343		lower = read_c0_maar();
 
 
 
344
345		attr = lower & upper;
346		lower = (lower & MIPS_MAAR_ADDR) << 4;
347		upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
348
349		pr_info("  [%d]: ", i / 2);
350		if (!(attr & MIPS_MAAR_V)) {
351			pr_cont("disabled\n");
352			continue;
353		}
354
355		pr_cont("%pa-%pa", &lower, &upper);
356
357		if (attr & MIPS_MAAR_S)
358			pr_cont(" speculate");
359
360		pr_cont("\n");
361
362		/* Record the setup for use on secondary CPUs */
363		if (used <= ARRAY_SIZE(recorded.cfgs)) {
364			recorded.cfgs[recorded.used].lower = lower;
365			recorded.cfgs[recorded.used].upper = upper;
366			recorded.cfgs[recorded.used].attrs = attr;
367			recorded.used++;
368		}
369	}
370}
371
372#ifndef CONFIG_NEED_MULTIPLE_NODES
373int page_is_ram(unsigned long pagenr)
374{
375	int i;
376
377	for (i = 0; i < boot_mem_map.nr_map; i++) {
378		unsigned long addr, end;
379
380		switch (boot_mem_map.map[i].type) {
381		case BOOT_MEM_RAM:
382		case BOOT_MEM_INIT_RAM:
383			break;
384		default:
385			/* not usable memory */
386			continue;
387		}
388
389		addr = PFN_UP(boot_mem_map.map[i].addr);
390		end = PFN_DOWN(boot_mem_map.map[i].addr +
391			       boot_mem_map.map[i].size);
392
393		if (pagenr >= addr && pagenr < end)
394			return 1;
395	}
396
397	return 0;
398}
399
400void __init paging_init(void)
401{
402	unsigned long max_zone_pfns[MAX_NR_ZONES];
403	unsigned long lastpfn __maybe_unused;
404
405	pagetable_init();
406
407#ifdef CONFIG_HIGHMEM
408	kmap_init();
409#endif
410#ifdef CONFIG_ZONE_DMA
411	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
412#endif
413#ifdef CONFIG_ZONE_DMA32
414	max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
415#endif
416	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
417	lastpfn = max_low_pfn;
418#ifdef CONFIG_HIGHMEM
419	max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
420	lastpfn = highend_pfn;
421
422	if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
423		printk(KERN_WARNING "This processor doesn't support highmem."
424		       " %ldk highmem ignored\n",
425		       (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
426		max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
427		lastpfn = max_low_pfn;
428	}
429#endif
430
431	free_area_init_nodes(max_zone_pfns);
432}
433
434#ifdef CONFIG_64BIT
435static struct kcore_list kcore_kseg0;
436#endif
437
438static inline void mem_init_free_highmem(void)
439{
440#ifdef CONFIG_HIGHMEM
441	unsigned long tmp;
442
443	if (cpu_has_dc_aliases)
444		return;
445
446	for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
447		struct page *page = pfn_to_page(tmp);
448
449		if (!page_is_ram(tmp))
450			SetPageReserved(page);
451		else
452			free_highmem_page(page);
453	}
454#endif
455}
456
457void __init mem_init(void)
458{
 
 
 
 
 
 
459#ifdef CONFIG_HIGHMEM
460#ifdef CONFIG_DISCONTIGMEM
461#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
462#endif
463	max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
464#else
465	max_mapnr = max_low_pfn;
466#endif
467	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
468
469	maar_init();
470	free_all_bootmem();
471	setup_zero_pages();	/* Setup zeroed pages.  */
472	mem_init_free_highmem();
473	mem_init_print_info(NULL);
474
475#ifdef CONFIG_64BIT
476	if ((unsigned long) &_text > (unsigned long) CKSEG0)
477		/* The -4 is a hack so that user tools don't have to handle
478		   the overflow.  */
479		kclist_add(&kcore_kseg0, (void *) CKSEG0,
480				0x80000000 - 4, KCORE_TEXT);
481#endif
482}
483#endif /* !CONFIG_NEED_MULTIPLE_NODES */
484
485void free_init_pages(const char *what, unsigned long begin, unsigned long end)
486{
487	unsigned long pfn;
488
489	for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
490		struct page *page = pfn_to_page(pfn);
491		void *addr = phys_to_virt(PFN_PHYS(pfn));
492
493		memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
494		free_reserved_page(page);
495	}
496	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
497}
498
499#ifdef CONFIG_BLK_DEV_INITRD
500void free_initrd_mem(unsigned long start, unsigned long end)
 
501{
502	free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
503			   "initrd");
504}
505#endif
506
507void (*free_init_pages_eva)(void *begin, void *end) = NULL;
508
509void __ref free_initmem(void)
510{
511	prom_free_prom_memory();
512	/*
513	 * Let the platform define a specific function to free the
514	 * init section since EVA may have used any possible mapping
515	 * between virtual and physical addresses.
516	 */
517	if (free_init_pages_eva)
518		free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
519	else
520		free_initmem_default(POISON_FREE_INITMEM);
521}
522
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
523#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
524unsigned long pgd_current[NR_CPUS];
525#endif
526
527/*
528 * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
529 * are constants.  So we use the variants from asm-offset.h until that gcc
530 * will officially be retired.
531 *
532 * Align swapper_pg_dir in to 64K, allows its address to be loaded
533 * with a single LUI instruction in the TLB handlers.  If we used
534 * __aligned(64K), its size would get rounded up to the alignment
535 * size, and waste space.  So we place it in its own section and align
536 * it in the linker script.
537 */
538pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
 
 
 
539#ifndef __PAGETABLE_PMD_FOLDED
540pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
 
541#endif
542pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;