Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
10 */
11#include <linux/bug.h>
12#include <linux/init.h>
13#include <linux/export.h>
14#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/smp.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/pagemap.h>
22#include <linux/ptrace.h>
23#include <linux/mman.h>
24#include <linux/mm.h>
25#include <linux/memblock.h>
26#include <linux/highmem.h>
27#include <linux/swap.h>
28#include <linux/proc_fs.h>
29#include <linux/pfn.h>
30#include <linux/hardirq.h>
31#include <linux/gfp.h>
32#include <linux/kcore.h>
33#include <linux/initrd.h>
34
35#include <asm/bootinfo.h>
36#include <asm/cachectl.h>
37#include <asm/cpu.h>
38#include <asm/dma.h>
39#include <asm/kmap_types.h>
40#include <asm/maar.h>
41#include <asm/mmu_context.h>
42#include <asm/sections.h>
43#include <asm/pgalloc.h>
44#include <asm/tlb.h>
45#include <asm/fixmap.h>
46
47/*
48 * We have up to 8 empty zeroed pages so we can map one of the right colour
49 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
50 * where we have to avoid VCED / VECI exceptions for good performance at
51 * any price. Since page is never written to after the initialization we
52 * don't have to care about aliases on other CPUs.
53 */
54unsigned long empty_zero_page, zero_page_mask;
55EXPORT_SYMBOL_GPL(empty_zero_page);
56EXPORT_SYMBOL(zero_page_mask);
57
58/*
59 * Not static inline because used by IP27 special magic initialization code
60 */
61void setup_zero_pages(void)
62{
63 unsigned int order, i;
64 struct page *page;
65
66 if (cpu_has_vce)
67 order = 3;
68 else
69 order = 0;
70
71 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
72 if (!empty_zero_page)
73 panic("Oh boy, that early out of memory?");
74
75 page = virt_to_page((void *)empty_zero_page);
76 split_page(page, order);
77 for (i = 0; i < (1 << order); i++, page++)
78 mark_page_reserved(page);
79
80 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
81}
82
83static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
84{
85 enum fixed_addresses idx;
86 unsigned int old_mmid;
87 unsigned long vaddr, flags, entrylo;
88 unsigned long old_ctx;
89 pte_t pte;
90 int tlbidx;
91
92 BUG_ON(Page_dcache_dirty(page));
93
94 preempt_disable();
95 pagefault_disable();
96 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
97 idx += in_interrupt() ? FIX_N_COLOURS : 0;
98 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
99 pte = mk_pte(page, prot);
100#if defined(CONFIG_XPA)
101 entrylo = pte_to_entrylo(pte.pte_high);
102#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
103 entrylo = pte.pte_high;
104#else
105 entrylo = pte_to_entrylo(pte_val(pte));
106#endif
107
108 local_irq_save(flags);
109 old_ctx = read_c0_entryhi();
110 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
111 write_c0_entrylo0(entrylo);
112 write_c0_entrylo1(entrylo);
113 if (cpu_has_mmid) {
114 old_mmid = read_c0_memorymapid();
115 write_c0_memorymapid(MMID_KERNEL_WIRED);
116 }
117#ifdef CONFIG_XPA
118 if (cpu_has_xpa) {
119 entrylo = (pte.pte_low & _PFNX_MASK);
120 writex_c0_entrylo0(entrylo);
121 writex_c0_entrylo1(entrylo);
122 }
123#endif
124 tlbidx = num_wired_entries();
125 write_c0_wired(tlbidx + 1);
126 write_c0_index(tlbidx);
127 mtc0_tlbw_hazard();
128 tlb_write_indexed();
129 tlbw_use_hazard();
130 write_c0_entryhi(old_ctx);
131 if (cpu_has_mmid)
132 write_c0_memorymapid(old_mmid);
133 local_irq_restore(flags);
134
135 return (void*) vaddr;
136}
137
138void *kmap_coherent(struct page *page, unsigned long addr)
139{
140 return __kmap_pgprot(page, addr, PAGE_KERNEL);
141}
142
143void *kmap_noncoherent(struct page *page, unsigned long addr)
144{
145 return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
146}
147
148void kunmap_coherent(void)
149{
150 unsigned int wired;
151 unsigned long flags, old_ctx;
152
153 local_irq_save(flags);
154 old_ctx = read_c0_entryhi();
155 wired = num_wired_entries() - 1;
156 write_c0_wired(wired);
157 write_c0_index(wired);
158 write_c0_entryhi(UNIQUE_ENTRYHI(wired));
159 write_c0_entrylo0(0);
160 write_c0_entrylo1(0);
161 mtc0_tlbw_hazard();
162 tlb_write_indexed();
163 tlbw_use_hazard();
164 write_c0_entryhi(old_ctx);
165 local_irq_restore(flags);
166 pagefault_enable();
167 preempt_enable();
168}
169
170void copy_user_highpage(struct page *to, struct page *from,
171 unsigned long vaddr, struct vm_area_struct *vma)
172{
173 void *vfrom, *vto;
174
175 vto = kmap_atomic(to);
176 if (cpu_has_dc_aliases &&
177 page_mapcount(from) && !Page_dcache_dirty(from)) {
178 vfrom = kmap_coherent(from, vaddr);
179 copy_page(vto, vfrom);
180 kunmap_coherent();
181 } else {
182 vfrom = kmap_atomic(from);
183 copy_page(vto, vfrom);
184 kunmap_atomic(vfrom);
185 }
186 if ((!cpu_has_ic_fills_f_dc) ||
187 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
188 flush_data_cache_page((unsigned long)vto);
189 kunmap_atomic(vto);
190 /* Make sure this page is cleared on other CPU's too before using it */
191 smp_wmb();
192}
193
194void copy_to_user_page(struct vm_area_struct *vma,
195 struct page *page, unsigned long vaddr, void *dst, const void *src,
196 unsigned long len)
197{
198 if (cpu_has_dc_aliases &&
199 page_mapcount(page) && !Page_dcache_dirty(page)) {
200 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
201 memcpy(vto, src, len);
202 kunmap_coherent();
203 } else {
204 memcpy(dst, src, len);
205 if (cpu_has_dc_aliases)
206 SetPageDcacheDirty(page);
207 }
208 if (vma->vm_flags & VM_EXEC)
209 flush_cache_page(vma, vaddr, page_to_pfn(page));
210}
211
212void copy_from_user_page(struct vm_area_struct *vma,
213 struct page *page, unsigned long vaddr, void *dst, const void *src,
214 unsigned long len)
215{
216 if (cpu_has_dc_aliases &&
217 page_mapcount(page) && !Page_dcache_dirty(page)) {
218 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
219 memcpy(dst, vfrom, len);
220 kunmap_coherent();
221 } else {
222 memcpy(dst, src, len);
223 if (cpu_has_dc_aliases)
224 SetPageDcacheDirty(page);
225 }
226}
227EXPORT_SYMBOL_GPL(copy_from_user_page);
228
229void __init fixrange_init(unsigned long start, unsigned long end,
230 pgd_t *pgd_base)
231{
232#ifdef CONFIG_HIGHMEM
233 pgd_t *pgd;
234 pud_t *pud;
235 pmd_t *pmd;
236 pte_t *pte;
237 int i, j, k;
238 unsigned long vaddr;
239
240 vaddr = start;
241 i = pgd_index(vaddr);
242 j = pud_index(vaddr);
243 k = pmd_index(vaddr);
244 pgd = pgd_base + i;
245
246 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
247 pud = (pud_t *)pgd;
248 for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
249 pmd = (pmd_t *)pud;
250 for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
251 if (pmd_none(*pmd)) {
252 pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
253 PAGE_SIZE);
254 if (!pte)
255 panic("%s: Failed to allocate %lu bytes align=%lx\n",
256 __func__, PAGE_SIZE,
257 PAGE_SIZE);
258
259 set_pmd(pmd, __pmd((unsigned long)pte));
260 BUG_ON(pte != pte_offset_kernel(pmd, 0));
261 }
262 vaddr += PMD_SIZE;
263 }
264 k = 0;
265 }
266 j = 0;
267 }
268#endif
269}
270
271struct maar_walk_info {
272 struct maar_config cfg[16];
273 unsigned int num_cfg;
274};
275
276static int maar_res_walk(unsigned long start_pfn, unsigned long nr_pages,
277 void *data)
278{
279 struct maar_walk_info *wi = data;
280 struct maar_config *cfg = &wi->cfg[wi->num_cfg];
281 unsigned int maar_align;
282
283 /* MAAR registers hold physical addresses right shifted by 4 bits */
284 maar_align = BIT(MIPS_MAAR_ADDR_SHIFT + 4);
285
286 /* Fill in the MAAR config entry */
287 cfg->lower = ALIGN(PFN_PHYS(start_pfn), maar_align);
288 cfg->upper = ALIGN_DOWN(PFN_PHYS(start_pfn + nr_pages), maar_align) - 1;
289 cfg->attrs = MIPS_MAAR_S;
290
291 /* Ensure we don't overflow the cfg array */
292 if (!WARN_ON(wi->num_cfg >= ARRAY_SIZE(wi->cfg)))
293 wi->num_cfg++;
294
295 return 0;
296}
297
298
299unsigned __weak platform_maar_init(unsigned num_pairs)
300{
301 unsigned int num_configured;
302 struct maar_walk_info wi;
303
304 wi.num_cfg = 0;
305 walk_system_ram_range(0, max_pfn, &wi, maar_res_walk);
306
307 num_configured = maar_config(wi.cfg, wi.num_cfg, num_pairs);
308 if (num_configured < wi.num_cfg)
309 pr_warn("Not enough MAAR pairs (%u) for all memory regions (%u)\n",
310 num_pairs, wi.num_cfg);
311
312 return num_configured;
313}
314
315void maar_init(void)
316{
317 unsigned num_maars, used, i;
318 phys_addr_t lower, upper, attr;
319 static struct {
320 struct maar_config cfgs[3];
321 unsigned used;
322 } recorded = { { { 0 } }, 0 };
323
324 if (!cpu_has_maar)
325 return;
326
327 /* Detect the number of MAARs */
328 write_c0_maari(~0);
329 back_to_back_c0_hazard();
330 num_maars = read_c0_maari() + 1;
331
332 /* MAARs should be in pairs */
333 WARN_ON(num_maars % 2);
334
335 /* Set MAARs using values we recorded already */
336 if (recorded.used) {
337 used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
338 BUG_ON(used != recorded.used);
339 } else {
340 /* Configure the required MAARs */
341 used = platform_maar_init(num_maars / 2);
342 }
343
344 /* Disable any further MAARs */
345 for (i = (used * 2); i < num_maars; i++) {
346 write_c0_maari(i);
347 back_to_back_c0_hazard();
348 write_c0_maar(0);
349 back_to_back_c0_hazard();
350 }
351
352 if (recorded.used)
353 return;
354
355 pr_info("MAAR configuration:\n");
356 for (i = 0; i < num_maars; i += 2) {
357 write_c0_maari(i);
358 back_to_back_c0_hazard();
359 upper = read_c0_maar();
360#ifdef CONFIG_XPA
361 upper |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
362#endif
363
364 write_c0_maari(i + 1);
365 back_to_back_c0_hazard();
366 lower = read_c0_maar();
367#ifdef CONFIG_XPA
368 lower |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
369#endif
370
371 attr = lower & upper;
372 lower = (lower & MIPS_MAAR_ADDR) << 4;
373 upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
374
375 pr_info(" [%d]: ", i / 2);
376 if ((attr & MIPS_MAAR_V) != MIPS_MAAR_V) {
377 pr_cont("disabled\n");
378 continue;
379 }
380
381 pr_cont("%pa-%pa", &lower, &upper);
382
383 if (attr & MIPS_MAAR_S)
384 pr_cont(" speculate");
385
386 pr_cont("\n");
387
388 /* Record the setup for use on secondary CPUs */
389 if (used <= ARRAY_SIZE(recorded.cfgs)) {
390 recorded.cfgs[recorded.used].lower = lower;
391 recorded.cfgs[recorded.used].upper = upper;
392 recorded.cfgs[recorded.used].attrs = attr;
393 recorded.used++;
394 }
395 }
396}
397
398#ifndef CONFIG_NEED_MULTIPLE_NODES
399void __init paging_init(void)
400{
401 unsigned long max_zone_pfns[MAX_NR_ZONES];
402
403 pagetable_init();
404
405#ifdef CONFIG_HIGHMEM
406 kmap_init();
407#endif
408#ifdef CONFIG_ZONE_DMA
409 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
410#endif
411#ifdef CONFIG_ZONE_DMA32
412 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
413#endif
414 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
415#ifdef CONFIG_HIGHMEM
416 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
417
418 if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
419 printk(KERN_WARNING "This processor doesn't support highmem."
420 " %ldk highmem ignored\n",
421 (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
422 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
423 }
424#endif
425
426 free_area_init(max_zone_pfns);
427}
428
429#ifdef CONFIG_64BIT
430static struct kcore_list kcore_kseg0;
431#endif
432
433static inline void __init mem_init_free_highmem(void)
434{
435#ifdef CONFIG_HIGHMEM
436 unsigned long tmp;
437
438 if (cpu_has_dc_aliases)
439 return;
440
441 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
442 struct page *page = pfn_to_page(tmp);
443
444 if (!memblock_is_memory(PFN_PHYS(tmp)))
445 SetPageReserved(page);
446 else
447 free_highmem_page(page);
448 }
449#endif
450}
451
452void __init mem_init(void)
453{
454 /*
455 * When _PFN_SHIFT is greater than PAGE_SHIFT we won't have enough PTE
456 * bits to hold a full 32b physical address on MIPS32 systems.
457 */
458 BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (_PFN_SHIFT > PAGE_SHIFT));
459
460#ifdef CONFIG_HIGHMEM
461#ifdef CONFIG_DISCONTIGMEM
462#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
463#endif
464 max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
465#else
466 max_mapnr = max_low_pfn;
467#endif
468 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
469
470 maar_init();
471 memblock_free_all();
472 setup_zero_pages(); /* Setup zeroed pages. */
473 mem_init_free_highmem();
474 mem_init_print_info(NULL);
475
476#ifdef CONFIG_64BIT
477 if ((unsigned long) &_text > (unsigned long) CKSEG0)
478 /* The -4 is a hack so that user tools don't have to handle
479 the overflow. */
480 kclist_add(&kcore_kseg0, (void *) CKSEG0,
481 0x80000000 - 4, KCORE_TEXT);
482#endif
483}
484#endif /* !CONFIG_NEED_MULTIPLE_NODES */
485
486void free_init_pages(const char *what, unsigned long begin, unsigned long end)
487{
488 unsigned long pfn;
489
490 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
491 struct page *page = pfn_to_page(pfn);
492 void *addr = phys_to_virt(PFN_PHYS(pfn));
493
494 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
495 free_reserved_page(page);
496 }
497 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
498}
499
500void (*free_init_pages_eva)(void *begin, void *end) = NULL;
501
502void __ref free_initmem(void)
503{
504 prom_free_prom_memory();
505 /*
506 * Let the platform define a specific function to free the
507 * init section since EVA may have used any possible mapping
508 * between virtual and physical addresses.
509 */
510 if (free_init_pages_eva)
511 free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
512 else
513 free_initmem_default(POISON_FREE_INITMEM);
514}
515
516#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
517unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
518EXPORT_SYMBOL(__per_cpu_offset);
519
520static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
521{
522 return node_distance(cpu_to_node(from), cpu_to_node(to));
523}
524
525static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
526 size_t align)
527{
528 return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS),
529 MEMBLOCK_ALLOC_ACCESSIBLE,
530 cpu_to_node(cpu));
531}
532
533static void __init pcpu_fc_free(void *ptr, size_t size)
534{
535 memblock_free_early(__pa(ptr), size);
536}
537
538void __init setup_per_cpu_areas(void)
539{
540 unsigned long delta;
541 unsigned int cpu;
542 int rc;
543
544 /*
545 * Always reserve area for module percpu variables. That's
546 * what the legacy allocator did.
547 */
548 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
549 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
550 pcpu_cpu_distance,
551 pcpu_fc_alloc, pcpu_fc_free);
552 if (rc < 0)
553 panic("Failed to initialize percpu areas.");
554
555 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
556 for_each_possible_cpu(cpu)
557 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
558}
559#endif
560
561#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
562unsigned long pgd_current[NR_CPUS];
563#endif
564
565/*
566 * Align swapper_pg_dir in to 64K, allows its address to be loaded
567 * with a single LUI instruction in the TLB handlers. If we used
568 * __aligned(64K), its size would get rounded up to the alignment
569 * size, and waste space. So we place it in its own section and align
570 * it in the linker script.
571 */
572pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
573#ifndef __PAGETABLE_PUD_FOLDED
574pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
575#endif
576#ifndef __PAGETABLE_PMD_FOLDED
577pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
578EXPORT_SYMBOL_GPL(invalid_pmd_table);
579#endif
580pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
581EXPORT_SYMBOL(invalid_pte_table);
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
10 */
11#include <linux/bug.h>
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/smp.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/pagemap.h>
22#include <linux/ptrace.h>
23#include <linux/mman.h>
24#include <linux/mm.h>
25#include <linux/bootmem.h>
26#include <linux/highmem.h>
27#include <linux/swap.h>
28#include <linux/proc_fs.h>
29#include <linux/pfn.h>
30#include <linux/hardirq.h>
31#include <linux/gfp.h>
32#include <linux/kcore.h>
33
34#include <asm/asm-offsets.h>
35#include <asm/bootinfo.h>
36#include <asm/cachectl.h>
37#include <asm/cpu.h>
38#include <asm/dma.h>
39#include <asm/kmap_types.h>
40#include <asm/mmu_context.h>
41#include <asm/sections.h>
42#include <asm/pgtable.h>
43#include <asm/pgalloc.h>
44#include <asm/tlb.h>
45#include <asm/fixmap.h>
46
47/* Atomicity and interruptability */
48#ifdef CONFIG_MIPS_MT_SMTC
49
50#include <asm/mipsmtregs.h>
51
52#define ENTER_CRITICAL(flags) \
53 { \
54 unsigned int mvpflags; \
55 local_irq_save(flags);\
56 mvpflags = dvpe()
57#define EXIT_CRITICAL(flags) \
58 evpe(mvpflags); \
59 local_irq_restore(flags); \
60 }
61#else
62
63#define ENTER_CRITICAL(flags) local_irq_save(flags)
64#define EXIT_CRITICAL(flags) local_irq_restore(flags)
65
66#endif /* CONFIG_MIPS_MT_SMTC */
67
68/*
69 * We have up to 8 empty zeroed pages so we can map one of the right colour
70 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
71 * where we have to avoid VCED / VECI exceptions for good performance at
72 * any price. Since page is never written to after the initialization we
73 * don't have to care about aliases on other CPUs.
74 */
75unsigned long empty_zero_page, zero_page_mask;
76EXPORT_SYMBOL_GPL(empty_zero_page);
77
78/*
79 * Not static inline because used by IP27 special magic initialization code
80 */
81void setup_zero_pages(void)
82{
83 unsigned int order, i;
84 struct page *page;
85
86 if (cpu_has_vce)
87 order = 3;
88 else
89 order = 0;
90
91 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
92 if (!empty_zero_page)
93 panic("Oh boy, that early out of memory?");
94
95 page = virt_to_page((void *)empty_zero_page);
96 split_page(page, order);
97 for (i = 0; i < (1 << order); i++, page++)
98 mark_page_reserved(page);
99
100 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
101}
102
103#ifdef CONFIG_MIPS_MT_SMTC
104static pte_t *kmap_coherent_pte;
105static void __init kmap_coherent_init(void)
106{
107 unsigned long vaddr;
108
109 /* cache the first coherent kmap pte */
110 vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
111 kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
112}
113#else
114static inline void kmap_coherent_init(void) {}
115#endif
116
117void *kmap_coherent(struct page *page, unsigned long addr)
118{
119 enum fixed_addresses idx;
120 unsigned long vaddr, flags, entrylo;
121 unsigned long old_ctx;
122 pte_t pte;
123 int tlbidx;
124
125 BUG_ON(Page_dcache_dirty(page));
126
127 pagefault_disable();
128 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
129#ifdef CONFIG_MIPS_MT_SMTC
130 idx += FIX_N_COLOURS * smp_processor_id() +
131 (in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0);
132#else
133 idx += in_interrupt() ? FIX_N_COLOURS : 0;
134#endif
135 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
136 pte = mk_pte(page, PAGE_KERNEL);
137#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
138 entrylo = pte.pte_high;
139#else
140 entrylo = pte_to_entrylo(pte_val(pte));
141#endif
142
143 ENTER_CRITICAL(flags);
144 old_ctx = read_c0_entryhi();
145 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
146 write_c0_entrylo0(entrylo);
147 write_c0_entrylo1(entrylo);
148#ifdef CONFIG_MIPS_MT_SMTC
149 set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
150 /* preload TLB instead of local_flush_tlb_one() */
151 mtc0_tlbw_hazard();
152 tlb_probe();
153 tlb_probe_hazard();
154 tlbidx = read_c0_index();
155 mtc0_tlbw_hazard();
156 if (tlbidx < 0)
157 tlb_write_random();
158 else
159 tlb_write_indexed();
160#else
161 tlbidx = read_c0_wired();
162 write_c0_wired(tlbidx + 1);
163 write_c0_index(tlbidx);
164 mtc0_tlbw_hazard();
165 tlb_write_indexed();
166#endif
167 tlbw_use_hazard();
168 write_c0_entryhi(old_ctx);
169 EXIT_CRITICAL(flags);
170
171 return (void*) vaddr;
172}
173
174void kunmap_coherent(void)
175{
176#ifndef CONFIG_MIPS_MT_SMTC
177 unsigned int wired;
178 unsigned long flags, old_ctx;
179
180 ENTER_CRITICAL(flags);
181 old_ctx = read_c0_entryhi();
182 wired = read_c0_wired() - 1;
183 write_c0_wired(wired);
184 write_c0_index(wired);
185 write_c0_entryhi(UNIQUE_ENTRYHI(wired));
186 write_c0_entrylo0(0);
187 write_c0_entrylo1(0);
188 mtc0_tlbw_hazard();
189 tlb_write_indexed();
190 tlbw_use_hazard();
191 write_c0_entryhi(old_ctx);
192 EXIT_CRITICAL(flags);
193#endif
194 pagefault_enable();
195}
196
197void copy_user_highpage(struct page *to, struct page *from,
198 unsigned long vaddr, struct vm_area_struct *vma)
199{
200 void *vfrom, *vto;
201
202 vto = kmap_atomic(to);
203 if (cpu_has_dc_aliases &&
204 page_mapped(from) && !Page_dcache_dirty(from)) {
205 vfrom = kmap_coherent(from, vaddr);
206 copy_page(vto, vfrom);
207 kunmap_coherent();
208 } else {
209 vfrom = kmap_atomic(from);
210 copy_page(vto, vfrom);
211 kunmap_atomic(vfrom);
212 }
213 if ((!cpu_has_ic_fills_f_dc) ||
214 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
215 flush_data_cache_page((unsigned long)vto);
216 kunmap_atomic(vto);
217 /* Make sure this page is cleared on other CPU's too before using it */
218 smp_wmb();
219}
220
221void copy_to_user_page(struct vm_area_struct *vma,
222 struct page *page, unsigned long vaddr, void *dst, const void *src,
223 unsigned long len)
224{
225 if (cpu_has_dc_aliases &&
226 page_mapped(page) && !Page_dcache_dirty(page)) {
227 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
228 memcpy(vto, src, len);
229 kunmap_coherent();
230 } else {
231 memcpy(dst, src, len);
232 if (cpu_has_dc_aliases)
233 SetPageDcacheDirty(page);
234 }
235 if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
236 flush_cache_page(vma, vaddr, page_to_pfn(page));
237}
238
239void copy_from_user_page(struct vm_area_struct *vma,
240 struct page *page, unsigned long vaddr, void *dst, const void *src,
241 unsigned long len)
242{
243 if (cpu_has_dc_aliases &&
244 page_mapped(page) && !Page_dcache_dirty(page)) {
245 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
246 memcpy(dst, vfrom, len);
247 kunmap_coherent();
248 } else {
249 memcpy(dst, src, len);
250 if (cpu_has_dc_aliases)
251 SetPageDcacheDirty(page);
252 }
253}
254EXPORT_SYMBOL_GPL(copy_from_user_page);
255
256void __init fixrange_init(unsigned long start, unsigned long end,
257 pgd_t *pgd_base)
258{
259#if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
260 pgd_t *pgd;
261 pud_t *pud;
262 pmd_t *pmd;
263 pte_t *pte;
264 int i, j, k;
265 unsigned long vaddr;
266
267 vaddr = start;
268 i = __pgd_offset(vaddr);
269 j = __pud_offset(vaddr);
270 k = __pmd_offset(vaddr);
271 pgd = pgd_base + i;
272
273 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
274 pud = (pud_t *)pgd;
275 for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
276 pmd = (pmd_t *)pud;
277 for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
278 if (pmd_none(*pmd)) {
279 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
280 set_pmd(pmd, __pmd((unsigned long)pte));
281 BUG_ON(pte != pte_offset_kernel(pmd, 0));
282 }
283 vaddr += PMD_SIZE;
284 }
285 k = 0;
286 }
287 j = 0;
288 }
289#endif
290}
291
292#ifndef CONFIG_NEED_MULTIPLE_NODES
293int page_is_ram(unsigned long pagenr)
294{
295 int i;
296
297 for (i = 0; i < boot_mem_map.nr_map; i++) {
298 unsigned long addr, end;
299
300 switch (boot_mem_map.map[i].type) {
301 case BOOT_MEM_RAM:
302 case BOOT_MEM_INIT_RAM:
303 break;
304 default:
305 /* not usable memory */
306 continue;
307 }
308
309 addr = PFN_UP(boot_mem_map.map[i].addr);
310 end = PFN_DOWN(boot_mem_map.map[i].addr +
311 boot_mem_map.map[i].size);
312
313 if (pagenr >= addr && pagenr < end)
314 return 1;
315 }
316
317 return 0;
318}
319
320void __init paging_init(void)
321{
322 unsigned long max_zone_pfns[MAX_NR_ZONES];
323 unsigned long lastpfn __maybe_unused;
324
325 pagetable_init();
326
327#ifdef CONFIG_HIGHMEM
328 kmap_init();
329#endif
330 kmap_coherent_init();
331
332#ifdef CONFIG_ZONE_DMA
333 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
334#endif
335#ifdef CONFIG_ZONE_DMA32
336 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
337#endif
338 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
339 lastpfn = max_low_pfn;
340#ifdef CONFIG_HIGHMEM
341 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
342 lastpfn = highend_pfn;
343
344 if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
345 printk(KERN_WARNING "This processor doesn't support highmem."
346 " %ldk highmem ignored\n",
347 (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
348 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
349 lastpfn = max_low_pfn;
350 }
351#endif
352
353 free_area_init_nodes(max_zone_pfns);
354}
355
356#ifdef CONFIG_64BIT
357static struct kcore_list kcore_kseg0;
358#endif
359
360static inline void mem_init_free_highmem(void)
361{
362#ifdef CONFIG_HIGHMEM
363 unsigned long tmp;
364
365 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
366 struct page *page = pfn_to_page(tmp);
367
368 if (!page_is_ram(tmp))
369 SetPageReserved(page);
370 else
371 free_highmem_page(page);
372 }
373#endif
374}
375
376void __init mem_init(void)
377{
378#ifdef CONFIG_HIGHMEM
379#ifdef CONFIG_DISCONTIGMEM
380#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
381#endif
382 max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
383#else
384 max_mapnr = max_low_pfn;
385#endif
386 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
387
388 free_all_bootmem();
389 setup_zero_pages(); /* Setup zeroed pages. */
390 mem_init_free_highmem();
391 mem_init_print_info(NULL);
392
393#ifdef CONFIG_64BIT
394 if ((unsigned long) &_text > (unsigned long) CKSEG0)
395 /* The -4 is a hack so that user tools don't have to handle
396 the overflow. */
397 kclist_add(&kcore_kseg0, (void *) CKSEG0,
398 0x80000000 - 4, KCORE_TEXT);
399#endif
400}
401#endif /* !CONFIG_NEED_MULTIPLE_NODES */
402
403void free_init_pages(const char *what, unsigned long begin, unsigned long end)
404{
405 unsigned long pfn;
406
407 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
408 struct page *page = pfn_to_page(pfn);
409 void *addr = phys_to_virt(PFN_PHYS(pfn));
410
411 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
412 free_reserved_page(page);
413 }
414 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
415}
416
417#ifdef CONFIG_BLK_DEV_INITRD
418void free_initrd_mem(unsigned long start, unsigned long end)
419{
420 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
421 "initrd");
422}
423#endif
424
425void (*free_init_pages_eva)(void *begin, void *end) = NULL;
426
427void __init_refok free_initmem(void)
428{
429 prom_free_prom_memory();
430 /*
431 * Let the platform define a specific function to free the
432 * init section since EVA may have used any possible mapping
433 * between virtual and physical addresses.
434 */
435 if (free_init_pages_eva)
436 free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
437 else
438 free_initmem_default(POISON_FREE_INITMEM);
439}
440
441#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
442unsigned long pgd_current[NR_CPUS];
443#endif
444
445/*
446 * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
447 * are constants. So we use the variants from asm-offset.h until that gcc
448 * will officially be retired.
449 *
450 * Align swapper_pg_dir in to 64K, allows its address to be loaded
451 * with a single LUI instruction in the TLB handlers. If we used
452 * __aligned(64K), its size would get rounded up to the alignment
453 * size, and waste space. So we place it in its own section and align
454 * it in the linker script.
455 */
456pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
457#ifndef __PAGETABLE_PMD_FOLDED
458pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
459#endif
460pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;