Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994 - 2000 Ralf Baechle
  7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  9 * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
 10 */
 11#include <linux/bug.h>
 12#include <linux/init.h>
 13#include <linux/module.h>
 14#include <linux/signal.h>
 15#include <linux/sched.h>
 16#include <linux/smp.h>
 17#include <linux/kernel.h>
 18#include <linux/errno.h>
 19#include <linux/string.h>
 20#include <linux/types.h>
 21#include <linux/pagemap.h>
 22#include <linux/ptrace.h>
 23#include <linux/mman.h>
 24#include <linux/mm.h>
 25#include <linux/bootmem.h>
 26#include <linux/highmem.h>
 27#include <linux/swap.h>
 28#include <linux/proc_fs.h>
 29#include <linux/pfn.h>
 30#include <linux/hardirq.h>
 31#include <linux/gfp.h>
 
 32
 33#include <asm/asm-offsets.h>
 34#include <asm/bootinfo.h>
 35#include <asm/cachectl.h>
 36#include <asm/cpu.h>
 37#include <asm/dma.h>
 38#include <asm/kmap_types.h>
 39#include <asm/mmu_context.h>
 40#include <asm/sections.h>
 41#include <asm/pgtable.h>
 42#include <asm/pgalloc.h>
 43#include <asm/tlb.h>
 44#include <asm/fixmap.h>
 45
 46/* Atomicity and interruptability */
 47#ifdef CONFIG_MIPS_MT_SMTC
 48
 49#include <asm/mipsmtregs.h>
 50
 51#define ENTER_CRITICAL(flags) \
 52	{ \
 53	unsigned int mvpflags; \
 54	local_irq_save(flags);\
 55	mvpflags = dvpe()
 56#define EXIT_CRITICAL(flags) \
 57	evpe(mvpflags); \
 58	local_irq_restore(flags); \
 59	}
 60#else
 61
 62#define ENTER_CRITICAL(flags) local_irq_save(flags)
 63#define EXIT_CRITICAL(flags) local_irq_restore(flags)
 64
 65#endif /* CONFIG_MIPS_MT_SMTC */
 66
 67/*
 68 * We have up to 8 empty zeroed pages so we can map one of the right colour
 69 * when needed.  This is necessary only on R4000 / R4400 SC and MC versions
 70 * where we have to avoid VCED / VECI exceptions for good performance at
 71 * any price.  Since page is never written to after the initialization we
 72 * don't have to care about aliases on other CPUs.
 73 */
 74unsigned long empty_zero_page, zero_page_mask;
 75EXPORT_SYMBOL_GPL(empty_zero_page);
 76
 77/*
 78 * Not static inline because used by IP27 special magic initialization code
 79 */
 80unsigned long setup_zero_pages(void)
 81{
 82	unsigned int order;
 83	unsigned long size;
 84	struct page *page;
 85
 86	if (cpu_has_vce)
 87		order = 3;
 88	else
 89		order = 0;
 90
 91	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
 92	if (!empty_zero_page)
 93		panic("Oh boy, that early out of memory?");
 94
 95	page = virt_to_page((void *)empty_zero_page);
 96	split_page(page, order);
 97	while (page < virt_to_page((void *)(empty_zero_page + (PAGE_SIZE << order)))) {
 98		SetPageReserved(page);
 99		page++;
100	}
101
102	size = PAGE_SIZE << order;
103	zero_page_mask = (size - 1) & PAGE_MASK;
104
105	return 1UL << order;
106}
107
108#ifdef CONFIG_MIPS_MT_SMTC
109static pte_t *kmap_coherent_pte;
110static void __init kmap_coherent_init(void)
111{
112	unsigned long vaddr;
113
114	/* cache the first coherent kmap pte */
115	vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
116	kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
117}
118#else
119static inline void kmap_coherent_init(void) {}
120#endif
121
122void *kmap_coherent(struct page *page, unsigned long addr)
123{
124	enum fixed_addresses idx;
125	unsigned long vaddr, flags, entrylo;
126	unsigned long old_ctx;
127	pte_t pte;
128	int tlbidx;
129
130	BUG_ON(Page_dcache_dirty(page));
131
132	inc_preempt_count();
133	idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
134#ifdef CONFIG_MIPS_MT_SMTC
135	idx += FIX_N_COLOURS * smp_processor_id() +
136		(in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0);
137#else
138	idx += in_interrupt() ? FIX_N_COLOURS : 0;
139#endif
140	vaddr = __fix_to_virt(FIX_CMAP_END - idx);
141	pte = mk_pte(page, PAGE_KERNEL);
142#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
143	entrylo = pte.pte_high;
144#else
145	entrylo = pte_to_entrylo(pte_val(pte));
146#endif
147
148	ENTER_CRITICAL(flags);
149	old_ctx = read_c0_entryhi();
150	write_c0_entryhi(vaddr & (PAGE_MASK << 1));
151	write_c0_entrylo0(entrylo);
152	write_c0_entrylo1(entrylo);
153#ifdef CONFIG_MIPS_MT_SMTC
154	set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
155	/* preload TLB instead of local_flush_tlb_one() */
156	mtc0_tlbw_hazard();
157	tlb_probe();
158	tlb_probe_hazard();
159	tlbidx = read_c0_index();
160	mtc0_tlbw_hazard();
161	if (tlbidx < 0)
162		tlb_write_random();
163	else
164		tlb_write_indexed();
165#else
166	tlbidx = read_c0_wired();
167	write_c0_wired(tlbidx + 1);
168	write_c0_index(tlbidx);
169	mtc0_tlbw_hazard();
170	tlb_write_indexed();
171#endif
172	tlbw_use_hazard();
173	write_c0_entryhi(old_ctx);
174	EXIT_CRITICAL(flags);
175
176	return (void*) vaddr;
177}
178
179#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
180
181void kunmap_coherent(void)
182{
183#ifndef CONFIG_MIPS_MT_SMTC
184	unsigned int wired;
185	unsigned long flags, old_ctx;
186
187	ENTER_CRITICAL(flags);
188	old_ctx = read_c0_entryhi();
189	wired = read_c0_wired() - 1;
190	write_c0_wired(wired);
191	write_c0_index(wired);
192	write_c0_entryhi(UNIQUE_ENTRYHI(wired));
193	write_c0_entrylo0(0);
194	write_c0_entrylo1(0);
195	mtc0_tlbw_hazard();
196	tlb_write_indexed();
197	tlbw_use_hazard();
198	write_c0_entryhi(old_ctx);
199	EXIT_CRITICAL(flags);
200#endif
201	dec_preempt_count();
202	preempt_check_resched();
203}
204
205void copy_user_highpage(struct page *to, struct page *from,
206	unsigned long vaddr, struct vm_area_struct *vma)
207{
208	void *vfrom, *vto;
209
210	vto = kmap_atomic(to, KM_USER1);
211	if (cpu_has_dc_aliases &&
212	    page_mapped(from) && !Page_dcache_dirty(from)) {
213		vfrom = kmap_coherent(from, vaddr);
214		copy_page(vto, vfrom);
215		kunmap_coherent();
216	} else {
217		vfrom = kmap_atomic(from, KM_USER0);
218		copy_page(vto, vfrom);
219		kunmap_atomic(vfrom, KM_USER0);
220	}
221	if ((!cpu_has_ic_fills_f_dc) ||
222	    pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
223		flush_data_cache_page((unsigned long)vto);
224	kunmap_atomic(vto, KM_USER1);
225	/* Make sure this page is cleared on other CPU's too before using it */
226	smp_wmb();
227}
228
229void copy_to_user_page(struct vm_area_struct *vma,
230	struct page *page, unsigned long vaddr, void *dst, const void *src,
231	unsigned long len)
232{
233	if (cpu_has_dc_aliases &&
234	    page_mapped(page) && !Page_dcache_dirty(page)) {
235		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
236		memcpy(vto, src, len);
237		kunmap_coherent();
238	} else {
239		memcpy(dst, src, len);
240		if (cpu_has_dc_aliases)
241			SetPageDcacheDirty(page);
242	}
243	if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
244		flush_cache_page(vma, vaddr, page_to_pfn(page));
245}
246
247void copy_from_user_page(struct vm_area_struct *vma,
248	struct page *page, unsigned long vaddr, void *dst, const void *src,
249	unsigned long len)
250{
251	if (cpu_has_dc_aliases &&
252	    page_mapped(page) && !Page_dcache_dirty(page)) {
253		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
254		memcpy(dst, vfrom, len);
255		kunmap_coherent();
256	} else {
257		memcpy(dst, src, len);
258		if (cpu_has_dc_aliases)
259			SetPageDcacheDirty(page);
260	}
261}
 
262
263void __init fixrange_init(unsigned long start, unsigned long end,
264	pgd_t *pgd_base)
265{
266#if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
267	pgd_t *pgd;
268	pud_t *pud;
269	pmd_t *pmd;
270	pte_t *pte;
271	int i, j, k;
272	unsigned long vaddr;
273
274	vaddr = start;
275	i = __pgd_offset(vaddr);
276	j = __pud_offset(vaddr);
277	k = __pmd_offset(vaddr);
278	pgd = pgd_base + i;
279
280	for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
281		pud = (pud_t *)pgd;
282		for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
283			pmd = (pmd_t *)pud;
284			for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
285				if (pmd_none(*pmd)) {
286					pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
287					set_pmd(pmd, __pmd((unsigned long)pte));
288					BUG_ON(pte != pte_offset_kernel(pmd, 0));
289				}
290				vaddr += PMD_SIZE;
291			}
292			k = 0;
293		}
294		j = 0;
295	}
296#endif
297}
298
299#ifndef CONFIG_NEED_MULTIPLE_NODES
300int page_is_ram(unsigned long pagenr)
301{
302	int i;
303
304	for (i = 0; i < boot_mem_map.nr_map; i++) {
305		unsigned long addr, end;
306
307		if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
 
 
 
 
308			/* not usable memory */
309			continue;
 
310
311		addr = PFN_UP(boot_mem_map.map[i].addr);
312		end = PFN_DOWN(boot_mem_map.map[i].addr +
313			       boot_mem_map.map[i].size);
314
315		if (pagenr >= addr && pagenr < end)
316			return 1;
317	}
318
319	return 0;
320}
321
322void __init paging_init(void)
323{
324	unsigned long max_zone_pfns[MAX_NR_ZONES];
325	unsigned long lastpfn __maybe_unused;
326
327	pagetable_init();
328
329#ifdef CONFIG_HIGHMEM
330	kmap_init();
331#endif
332	kmap_coherent_init();
333
334#ifdef CONFIG_ZONE_DMA
335	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
336#endif
337#ifdef CONFIG_ZONE_DMA32
338	max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
339#endif
340	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
341	lastpfn = max_low_pfn;
342#ifdef CONFIG_HIGHMEM
343	max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
344	lastpfn = highend_pfn;
345
346	if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
347		printk(KERN_WARNING "This processor doesn't support highmem."
348		       " %ldk highmem ignored\n",
349		       (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
350		max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
351		lastpfn = max_low_pfn;
352	}
353#endif
354
355	free_area_init_nodes(max_zone_pfns);
356}
357
358#ifdef CONFIG_64BIT
359static struct kcore_list kcore_kseg0;
360#endif
361
362void __init mem_init(void)
363{
364	unsigned long codesize, reservedpages, datasize, initsize;
365	unsigned long tmp, ram;
 
 
 
366
 
 
 
 
 
 
 
 
 
 
367#ifdef CONFIG_HIGHMEM
368#ifdef CONFIG_DISCONTIGMEM
369#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
370#endif
371	max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
372#else
373	max_mapnr = max_low_pfn;
374#endif
375	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
376
377	totalram_pages += free_all_bootmem();
378	totalram_pages -= setup_zero_pages();	/* Setup zeroed pages.  */
379
380	reservedpages = ram = 0;
381	for (tmp = 0; tmp < max_low_pfn; tmp++)
382		if (page_is_ram(tmp)) {
383			ram++;
384			if (PageReserved(pfn_to_page(tmp)))
385				reservedpages++;
386		}
387	num_physpages = ram;
388
389#ifdef CONFIG_HIGHMEM
390	for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
391		struct page *page = pfn_to_page(tmp);
392
393		if (!page_is_ram(tmp)) {
394			SetPageReserved(page);
395			continue;
396		}
397		ClearPageReserved(page);
398		init_page_count(page);
399		__free_page(page);
400		totalhigh_pages++;
401	}
402	totalram_pages += totalhigh_pages;
403	num_physpages += totalhigh_pages;
404#endif
405
406	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
407	datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
408	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
409
410#ifdef CONFIG_64BIT
411	if ((unsigned long) &_text > (unsigned long) CKSEG0)
412		/* The -4 is a hack so that user tools don't have to handle
413		   the overflow.  */
414		kclist_add(&kcore_kseg0, (void *) CKSEG0,
415				0x80000000 - 4, KCORE_TEXT);
416#endif
417
418	printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
419	       "%ldk reserved, %ldk data, %ldk init, %ldk highmem)\n",
420	       nr_free_pages() << (PAGE_SHIFT-10),
421	       ram << (PAGE_SHIFT-10),
422	       codesize >> 10,
423	       reservedpages << (PAGE_SHIFT-10),
424	       datasize >> 10,
425	       initsize >> 10,
426	       totalhigh_pages << (PAGE_SHIFT-10));
427}
428#endif /* !CONFIG_NEED_MULTIPLE_NODES */
429
430void free_init_pages(const char *what, unsigned long begin, unsigned long end)
431{
432	unsigned long pfn;
433
434	for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
435		struct page *page = pfn_to_page(pfn);
436		void *addr = phys_to_virt(PFN_PHYS(pfn));
437
438		ClearPageReserved(page);
439		init_page_count(page);
440		memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
441		__free_page(page);
442		totalram_pages++;
443	}
444	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
445}
446
447#ifdef CONFIG_BLK_DEV_INITRD
448void free_initrd_mem(unsigned long start, unsigned long end)
449{
450	free_init_pages("initrd memory",
451			virt_to_phys((void *)start),
452			virt_to_phys((void *)end));
453}
454#endif
455
 
 
456void __init_refok free_initmem(void)
457{
458	prom_free_prom_memory();
459	free_init_pages("unused kernel memory",
460			__pa_symbol(&__init_begin),
461			__pa_symbol(&__init_end));
 
 
 
 
 
 
462}
463
464#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
465unsigned long pgd_current[NR_CPUS];
466#endif
467/*
468 * On 64-bit we've got three-level pagetables with a slightly
469 * different layout ...
470 */
471#define __page_aligned(order) __attribute__((__aligned__(PAGE_SIZE<<order)))
472
473/*
474 * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
475 * are constants.  So we use the variants from asm-offset.h until that gcc
476 * will officially be retired.
 
 
 
 
 
 
477 */
478pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER);
479#ifndef __PAGETABLE_PMD_FOLDED
480pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
481#endif
482pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
v3.15
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994 - 2000 Ralf Baechle
  7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  9 * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
 10 */
 11#include <linux/bug.h>
 12#include <linux/init.h>
 13#include <linux/module.h>
 14#include <linux/signal.h>
 15#include <linux/sched.h>
 16#include <linux/smp.h>
 17#include <linux/kernel.h>
 18#include <linux/errno.h>
 19#include <linux/string.h>
 20#include <linux/types.h>
 21#include <linux/pagemap.h>
 22#include <linux/ptrace.h>
 23#include <linux/mman.h>
 24#include <linux/mm.h>
 25#include <linux/bootmem.h>
 26#include <linux/highmem.h>
 27#include <linux/swap.h>
 28#include <linux/proc_fs.h>
 29#include <linux/pfn.h>
 30#include <linux/hardirq.h>
 31#include <linux/gfp.h>
 32#include <linux/kcore.h>
 33
 34#include <asm/asm-offsets.h>
 35#include <asm/bootinfo.h>
 36#include <asm/cachectl.h>
 37#include <asm/cpu.h>
 38#include <asm/dma.h>
 39#include <asm/kmap_types.h>
 40#include <asm/mmu_context.h>
 41#include <asm/sections.h>
 42#include <asm/pgtable.h>
 43#include <asm/pgalloc.h>
 44#include <asm/tlb.h>
 45#include <asm/fixmap.h>
 46
 47/* Atomicity and interruptability */
 48#ifdef CONFIG_MIPS_MT_SMTC
 49
 50#include <asm/mipsmtregs.h>
 51
 52#define ENTER_CRITICAL(flags) \
 53	{ \
 54	unsigned int mvpflags; \
 55	local_irq_save(flags);\
 56	mvpflags = dvpe()
 57#define EXIT_CRITICAL(flags) \
 58	evpe(mvpflags); \
 59	local_irq_restore(flags); \
 60	}
 61#else
 62
 63#define ENTER_CRITICAL(flags) local_irq_save(flags)
 64#define EXIT_CRITICAL(flags) local_irq_restore(flags)
 65
 66#endif /* CONFIG_MIPS_MT_SMTC */
 67
 68/*
 69 * We have up to 8 empty zeroed pages so we can map one of the right colour
 70 * when needed.	 This is necessary only on R4000 / R4400 SC and MC versions
 71 * where we have to avoid VCED / VECI exceptions for good performance at
 72 * any price.  Since page is never written to after the initialization we
 73 * don't have to care about aliases on other CPUs.
 74 */
 75unsigned long empty_zero_page, zero_page_mask;
 76EXPORT_SYMBOL_GPL(empty_zero_page);
 77
 78/*
 79 * Not static inline because used by IP27 special magic initialization code
 80 */
 81void setup_zero_pages(void)
 82{
 83	unsigned int order, i;
 
 84	struct page *page;
 85
 86	if (cpu_has_vce)
 87		order = 3;
 88	else
 89		order = 0;
 90
 91	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
 92	if (!empty_zero_page)
 93		panic("Oh boy, that early out of memory?");
 94
 95	page = virt_to_page((void *)empty_zero_page);
 96	split_page(page, order);
 97	for (i = 0; i < (1 << order); i++, page++)
 98		mark_page_reserved(page);
 
 
 
 
 
 99
100	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
101}
102
103#ifdef CONFIG_MIPS_MT_SMTC
104static pte_t *kmap_coherent_pte;
105static void __init kmap_coherent_init(void)
106{
107	unsigned long vaddr;
108
109	/* cache the first coherent kmap pte */
110	vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
111	kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
112}
113#else
114static inline void kmap_coherent_init(void) {}
115#endif
116
117void *kmap_coherent(struct page *page, unsigned long addr)
118{
119	enum fixed_addresses idx;
120	unsigned long vaddr, flags, entrylo;
121	unsigned long old_ctx;
122	pte_t pte;
123	int tlbidx;
124
125	BUG_ON(Page_dcache_dirty(page));
126
127	pagefault_disable();
128	idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
129#ifdef CONFIG_MIPS_MT_SMTC
130	idx += FIX_N_COLOURS * smp_processor_id() +
131		(in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0);
132#else
133	idx += in_interrupt() ? FIX_N_COLOURS : 0;
134#endif
135	vaddr = __fix_to_virt(FIX_CMAP_END - idx);
136	pte = mk_pte(page, PAGE_KERNEL);
137#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
138	entrylo = pte.pte_high;
139#else
140	entrylo = pte_to_entrylo(pte_val(pte));
141#endif
142
143	ENTER_CRITICAL(flags);
144	old_ctx = read_c0_entryhi();
145	write_c0_entryhi(vaddr & (PAGE_MASK << 1));
146	write_c0_entrylo0(entrylo);
147	write_c0_entrylo1(entrylo);
148#ifdef CONFIG_MIPS_MT_SMTC
149	set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
150	/* preload TLB instead of local_flush_tlb_one() */
151	mtc0_tlbw_hazard();
152	tlb_probe();
153	tlb_probe_hazard();
154	tlbidx = read_c0_index();
155	mtc0_tlbw_hazard();
156	if (tlbidx < 0)
157		tlb_write_random();
158	else
159		tlb_write_indexed();
160#else
161	tlbidx = read_c0_wired();
162	write_c0_wired(tlbidx + 1);
163	write_c0_index(tlbidx);
164	mtc0_tlbw_hazard();
165	tlb_write_indexed();
166#endif
167	tlbw_use_hazard();
168	write_c0_entryhi(old_ctx);
169	EXIT_CRITICAL(flags);
170
171	return (void*) vaddr;
172}
173
 
 
174void kunmap_coherent(void)
175{
176#ifndef CONFIG_MIPS_MT_SMTC
177	unsigned int wired;
178	unsigned long flags, old_ctx;
179
180	ENTER_CRITICAL(flags);
181	old_ctx = read_c0_entryhi();
182	wired = read_c0_wired() - 1;
183	write_c0_wired(wired);
184	write_c0_index(wired);
185	write_c0_entryhi(UNIQUE_ENTRYHI(wired));
186	write_c0_entrylo0(0);
187	write_c0_entrylo1(0);
188	mtc0_tlbw_hazard();
189	tlb_write_indexed();
190	tlbw_use_hazard();
191	write_c0_entryhi(old_ctx);
192	EXIT_CRITICAL(flags);
193#endif
194	pagefault_enable();
 
195}
196
197void copy_user_highpage(struct page *to, struct page *from,
198	unsigned long vaddr, struct vm_area_struct *vma)
199{
200	void *vfrom, *vto;
201
202	vto = kmap_atomic(to);
203	if (cpu_has_dc_aliases &&
204	    page_mapped(from) && !Page_dcache_dirty(from)) {
205		vfrom = kmap_coherent(from, vaddr);
206		copy_page(vto, vfrom);
207		kunmap_coherent();
208	} else {
209		vfrom = kmap_atomic(from);
210		copy_page(vto, vfrom);
211		kunmap_atomic(vfrom);
212	}
213	if ((!cpu_has_ic_fills_f_dc) ||
214	    pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
215		flush_data_cache_page((unsigned long)vto);
216	kunmap_atomic(vto);
217	/* Make sure this page is cleared on other CPU's too before using it */
218	smp_wmb();
219}
220
221void copy_to_user_page(struct vm_area_struct *vma,
222	struct page *page, unsigned long vaddr, void *dst, const void *src,
223	unsigned long len)
224{
225	if (cpu_has_dc_aliases &&
226	    page_mapped(page) && !Page_dcache_dirty(page)) {
227		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
228		memcpy(vto, src, len);
229		kunmap_coherent();
230	} else {
231		memcpy(dst, src, len);
232		if (cpu_has_dc_aliases)
233			SetPageDcacheDirty(page);
234	}
235	if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
236		flush_cache_page(vma, vaddr, page_to_pfn(page));
237}
238
239void copy_from_user_page(struct vm_area_struct *vma,
240	struct page *page, unsigned long vaddr, void *dst, const void *src,
241	unsigned long len)
242{
243	if (cpu_has_dc_aliases &&
244	    page_mapped(page) && !Page_dcache_dirty(page)) {
245		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
246		memcpy(dst, vfrom, len);
247		kunmap_coherent();
248	} else {
249		memcpy(dst, src, len);
250		if (cpu_has_dc_aliases)
251			SetPageDcacheDirty(page);
252	}
253}
254EXPORT_SYMBOL_GPL(copy_from_user_page);
255
256void __init fixrange_init(unsigned long start, unsigned long end,
257	pgd_t *pgd_base)
258{
259#if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
260	pgd_t *pgd;
261	pud_t *pud;
262	pmd_t *pmd;
263	pte_t *pte;
264	int i, j, k;
265	unsigned long vaddr;
266
267	vaddr = start;
268	i = __pgd_offset(vaddr);
269	j = __pud_offset(vaddr);
270	k = __pmd_offset(vaddr);
271	pgd = pgd_base + i;
272
273	for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
274		pud = (pud_t *)pgd;
275		for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
276			pmd = (pmd_t *)pud;
277			for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
278				if (pmd_none(*pmd)) {
279					pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
280					set_pmd(pmd, __pmd((unsigned long)pte));
281					BUG_ON(pte != pte_offset_kernel(pmd, 0));
282				}
283				vaddr += PMD_SIZE;
284			}
285			k = 0;
286		}
287		j = 0;
288	}
289#endif
290}
291
292#ifndef CONFIG_NEED_MULTIPLE_NODES
293int page_is_ram(unsigned long pagenr)
294{
295	int i;
296
297	for (i = 0; i < boot_mem_map.nr_map; i++) {
298		unsigned long addr, end;
299
300		switch (boot_mem_map.map[i].type) {
301		case BOOT_MEM_RAM:
302		case BOOT_MEM_INIT_RAM:
303			break;
304		default:
305			/* not usable memory */
306			continue;
307		}
308
309		addr = PFN_UP(boot_mem_map.map[i].addr);
310		end = PFN_DOWN(boot_mem_map.map[i].addr +
311			       boot_mem_map.map[i].size);
312
313		if (pagenr >= addr && pagenr < end)
314			return 1;
315	}
316
317	return 0;
318}
319
320void __init paging_init(void)
321{
322	unsigned long max_zone_pfns[MAX_NR_ZONES];
323	unsigned long lastpfn __maybe_unused;
324
325	pagetable_init();
326
327#ifdef CONFIG_HIGHMEM
328	kmap_init();
329#endif
330	kmap_coherent_init();
331
332#ifdef CONFIG_ZONE_DMA
333	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
334#endif
335#ifdef CONFIG_ZONE_DMA32
336	max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
337#endif
338	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
339	lastpfn = max_low_pfn;
340#ifdef CONFIG_HIGHMEM
341	max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
342	lastpfn = highend_pfn;
343
344	if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
345		printk(KERN_WARNING "This processor doesn't support highmem."
346		       " %ldk highmem ignored\n",
347		       (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
348		max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
349		lastpfn = max_low_pfn;
350	}
351#endif
352
353	free_area_init_nodes(max_zone_pfns);
354}
355
356#ifdef CONFIG_64BIT
357static struct kcore_list kcore_kseg0;
358#endif
359
360static inline void mem_init_free_highmem(void)
361{
362#ifdef CONFIG_HIGHMEM
363	unsigned long tmp;
364
365	for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
366		struct page *page = pfn_to_page(tmp);
367
368		if (!page_is_ram(tmp))
369			SetPageReserved(page);
370		else
371			free_highmem_page(page);
372	}
373#endif
374}
375
376void __init mem_init(void)
377{
378#ifdef CONFIG_HIGHMEM
379#ifdef CONFIG_DISCONTIGMEM
380#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
381#endif
382	max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
383#else
384	max_mapnr = max_low_pfn;
385#endif
386	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
387
388	free_all_bootmem();
389	setup_zero_pages();	/* Setup zeroed pages.  */
390	mem_init_free_highmem();
391	mem_init_print_info(NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
392
393#ifdef CONFIG_64BIT
394	if ((unsigned long) &_text > (unsigned long) CKSEG0)
395		/* The -4 is a hack so that user tools don't have to handle
396		   the overflow.  */
397		kclist_add(&kcore_kseg0, (void *) CKSEG0,
398				0x80000000 - 4, KCORE_TEXT);
399#endif
 
 
 
 
 
 
 
 
 
 
400}
401#endif /* !CONFIG_NEED_MULTIPLE_NODES */
402
403void free_init_pages(const char *what, unsigned long begin, unsigned long end)
404{
405	unsigned long pfn;
406
407	for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
408		struct page *page = pfn_to_page(pfn);
409		void *addr = phys_to_virt(PFN_PHYS(pfn));
410
 
 
411		memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
412		free_reserved_page(page);
 
413	}
414	printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
415}
416
417#ifdef CONFIG_BLK_DEV_INITRD
418void free_initrd_mem(unsigned long start, unsigned long end)
419{
420	free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
421			   "initrd");
 
422}
423#endif
424
425void (*free_init_pages_eva)(void *begin, void *end) = NULL;
426
427void __init_refok free_initmem(void)
428{
429	prom_free_prom_memory();
430	/*
431	 * Let the platform define a specific function to free the
432	 * init section since EVA may have used any possible mapping
433	 * between virtual and physical addresses.
434	 */
435	if (free_init_pages_eva)
436		free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
437	else
438		free_initmem_default(POISON_FREE_INITMEM);
439}
440
441#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
442unsigned long pgd_current[NR_CPUS];
443#endif
 
 
 
 
 
444
445/*
446 * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
447 * are constants.  So we use the variants from asm-offset.h until that gcc
448 * will officially be retired.
449 *
450 * Align swapper_pg_dir in to 64K, allows its address to be loaded
451 * with a single LUI instruction in the TLB handlers.  If we used
452 * __aligned(64K), its size would get rounded up to the alignment
453 * size, and waste space.  So we place it in its own section and align
454 * it in the linker script.
455 */
456pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
457#ifndef __PAGETABLE_PMD_FOLDED
458pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
459#endif
460pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;