Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
10 */
11#include <linux/bug.h>
12#include <linux/init.h>
13#include <linux/export.h>
14#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/smp.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/pagemap.h>
22#include <linux/ptrace.h>
23#include <linux/mman.h>
24#include <linux/mm.h>
25#include <linux/memblock.h>
26#include <linux/highmem.h>
27#include <linux/swap.h>
28#include <linux/proc_fs.h>
29#include <linux/pfn.h>
30#include <linux/hardirq.h>
31#include <linux/gfp.h>
32#include <linux/kcore.h>
33#include <linux/initrd.h>
34
35#include <asm/bootinfo.h>
36#include <asm/cachectl.h>
37#include <asm/cpu.h>
38#include <asm/dma.h>
39#include <asm/kmap_types.h>
40#include <asm/maar.h>
41#include <asm/mmu_context.h>
42#include <asm/sections.h>
43#include <asm/pgtable.h>
44#include <asm/pgalloc.h>
45#include <asm/tlb.h>
46#include <asm/fixmap.h>
47
48/*
49 * We have up to 8 empty zeroed pages so we can map one of the right colour
50 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
51 * where we have to avoid VCED / VECI exceptions for good performance at
52 * any price. Since page is never written to after the initialization we
53 * don't have to care about aliases on other CPUs.
54 */
55unsigned long empty_zero_page, zero_page_mask;
56EXPORT_SYMBOL_GPL(empty_zero_page);
57EXPORT_SYMBOL(zero_page_mask);
58
59/*
60 * Not static inline because used by IP27 special magic initialization code
61 */
62void setup_zero_pages(void)
63{
64 unsigned int order, i;
65 struct page *page;
66
67 if (cpu_has_vce)
68 order = 3;
69 else
70 order = 0;
71
72 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
73 if (!empty_zero_page)
74 panic("Oh boy, that early out of memory?");
75
76 page = virt_to_page((void *)empty_zero_page);
77 split_page(page, order);
78 for (i = 0; i < (1 << order); i++, page++)
79 mark_page_reserved(page);
80
81 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
82}
83
84static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
85{
86 enum fixed_addresses idx;
87 unsigned int uninitialized_var(old_mmid);
88 unsigned long vaddr, flags, entrylo;
89 unsigned long old_ctx;
90 pte_t pte;
91 int tlbidx;
92
93 BUG_ON(Page_dcache_dirty(page));
94
95 preempt_disable();
96 pagefault_disable();
97 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
98 idx += in_interrupt() ? FIX_N_COLOURS : 0;
99 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
100 pte = mk_pte(page, prot);
101#if defined(CONFIG_XPA)
102 entrylo = pte_to_entrylo(pte.pte_high);
103#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
104 entrylo = pte.pte_high;
105#else
106 entrylo = pte_to_entrylo(pte_val(pte));
107#endif
108
109 local_irq_save(flags);
110 old_ctx = read_c0_entryhi();
111 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
112 write_c0_entrylo0(entrylo);
113 write_c0_entrylo1(entrylo);
114 if (cpu_has_mmid) {
115 old_mmid = read_c0_memorymapid();
116 write_c0_memorymapid(MMID_KERNEL_WIRED);
117 }
118#ifdef CONFIG_XPA
119 if (cpu_has_xpa) {
120 entrylo = (pte.pte_low & _PFNX_MASK);
121 writex_c0_entrylo0(entrylo);
122 writex_c0_entrylo1(entrylo);
123 }
124#endif
125 tlbidx = num_wired_entries();
126 write_c0_wired(tlbidx + 1);
127 write_c0_index(tlbidx);
128 mtc0_tlbw_hazard();
129 tlb_write_indexed();
130 tlbw_use_hazard();
131 write_c0_entryhi(old_ctx);
132 if (cpu_has_mmid)
133 write_c0_memorymapid(old_mmid);
134 local_irq_restore(flags);
135
136 return (void*) vaddr;
137}
138
139void *kmap_coherent(struct page *page, unsigned long addr)
140{
141 return __kmap_pgprot(page, addr, PAGE_KERNEL);
142}
143
144void *kmap_noncoherent(struct page *page, unsigned long addr)
145{
146 return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
147}
148
149void kunmap_coherent(void)
150{
151 unsigned int wired;
152 unsigned long flags, old_ctx;
153
154 local_irq_save(flags);
155 old_ctx = read_c0_entryhi();
156 wired = num_wired_entries() - 1;
157 write_c0_wired(wired);
158 write_c0_index(wired);
159 write_c0_entryhi(UNIQUE_ENTRYHI(wired));
160 write_c0_entrylo0(0);
161 write_c0_entrylo1(0);
162 mtc0_tlbw_hazard();
163 tlb_write_indexed();
164 tlbw_use_hazard();
165 write_c0_entryhi(old_ctx);
166 local_irq_restore(flags);
167 pagefault_enable();
168 preempt_enable();
169}
170
171void copy_user_highpage(struct page *to, struct page *from,
172 unsigned long vaddr, struct vm_area_struct *vma)
173{
174 void *vfrom, *vto;
175
176 vto = kmap_atomic(to);
177 if (cpu_has_dc_aliases &&
178 page_mapcount(from) && !Page_dcache_dirty(from)) {
179 vfrom = kmap_coherent(from, vaddr);
180 copy_page(vto, vfrom);
181 kunmap_coherent();
182 } else {
183 vfrom = kmap_atomic(from);
184 copy_page(vto, vfrom);
185 kunmap_atomic(vfrom);
186 }
187 if ((!cpu_has_ic_fills_f_dc) ||
188 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
189 flush_data_cache_page((unsigned long)vto);
190 kunmap_atomic(vto);
191 /* Make sure this page is cleared on other CPU's too before using it */
192 smp_wmb();
193}
194
195void copy_to_user_page(struct vm_area_struct *vma,
196 struct page *page, unsigned long vaddr, void *dst, const void *src,
197 unsigned long len)
198{
199 if (cpu_has_dc_aliases &&
200 page_mapcount(page) && !Page_dcache_dirty(page)) {
201 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
202 memcpy(vto, src, len);
203 kunmap_coherent();
204 } else {
205 memcpy(dst, src, len);
206 if (cpu_has_dc_aliases)
207 SetPageDcacheDirty(page);
208 }
209 if (vma->vm_flags & VM_EXEC)
210 flush_cache_page(vma, vaddr, page_to_pfn(page));
211}
212
213void copy_from_user_page(struct vm_area_struct *vma,
214 struct page *page, unsigned long vaddr, void *dst, const void *src,
215 unsigned long len)
216{
217 if (cpu_has_dc_aliases &&
218 page_mapcount(page) && !Page_dcache_dirty(page)) {
219 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
220 memcpy(dst, vfrom, len);
221 kunmap_coherent();
222 } else {
223 memcpy(dst, src, len);
224 if (cpu_has_dc_aliases)
225 SetPageDcacheDirty(page);
226 }
227}
228EXPORT_SYMBOL_GPL(copy_from_user_page);
229
230void __init fixrange_init(unsigned long start, unsigned long end,
231 pgd_t *pgd_base)
232{
233#ifdef CONFIG_HIGHMEM
234 pgd_t *pgd;
235 pud_t *pud;
236 pmd_t *pmd;
237 pte_t *pte;
238 int i, j, k;
239 unsigned long vaddr;
240
241 vaddr = start;
242 i = __pgd_offset(vaddr);
243 j = __pud_offset(vaddr);
244 k = __pmd_offset(vaddr);
245 pgd = pgd_base + i;
246
247 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
248 pud = (pud_t *)pgd;
249 for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
250 pmd = (pmd_t *)pud;
251 for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
252 if (pmd_none(*pmd)) {
253 pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
254 PAGE_SIZE);
255 if (!pte)
256 panic("%s: Failed to allocate %lu bytes align=%lx\n",
257 __func__, PAGE_SIZE,
258 PAGE_SIZE);
259
260 set_pmd(pmd, __pmd((unsigned long)pte));
261 BUG_ON(pte != pte_offset_kernel(pmd, 0));
262 }
263 vaddr += PMD_SIZE;
264 }
265 k = 0;
266 }
267 j = 0;
268 }
269#endif
270}
271
272struct maar_walk_info {
273 struct maar_config cfg[16];
274 unsigned int num_cfg;
275};
276
277static int maar_res_walk(unsigned long start_pfn, unsigned long nr_pages,
278 void *data)
279{
280 struct maar_walk_info *wi = data;
281 struct maar_config *cfg = &wi->cfg[wi->num_cfg];
282 unsigned int maar_align;
283
284 /* MAAR registers hold physical addresses right shifted by 4 bits */
285 maar_align = BIT(MIPS_MAAR_ADDR_SHIFT + 4);
286
287 /* Fill in the MAAR config entry */
288 cfg->lower = ALIGN(PFN_PHYS(start_pfn), maar_align);
289 cfg->upper = ALIGN_DOWN(PFN_PHYS(start_pfn + nr_pages), maar_align) - 1;
290 cfg->attrs = MIPS_MAAR_S;
291
292 /* Ensure we don't overflow the cfg array */
293 if (!WARN_ON(wi->num_cfg >= ARRAY_SIZE(wi->cfg)))
294 wi->num_cfg++;
295
296 return 0;
297}
298
299
300unsigned __weak platform_maar_init(unsigned num_pairs)
301{
302 unsigned int num_configured;
303 struct maar_walk_info wi;
304
305 wi.num_cfg = 0;
306 walk_system_ram_range(0, max_pfn, &wi, maar_res_walk);
307
308 num_configured = maar_config(wi.cfg, wi.num_cfg, num_pairs);
309 if (num_configured < wi.num_cfg)
310 pr_warn("Not enough MAAR pairs (%u) for all memory regions (%u)\n",
311 num_pairs, wi.num_cfg);
312
313 return num_configured;
314}
315
316void maar_init(void)
317{
318 unsigned num_maars, used, i;
319 phys_addr_t lower, upper, attr;
320 static struct {
321 struct maar_config cfgs[3];
322 unsigned used;
323 } recorded = { { { 0 } }, 0 };
324
325 if (!cpu_has_maar)
326 return;
327
328 /* Detect the number of MAARs */
329 write_c0_maari(~0);
330 back_to_back_c0_hazard();
331 num_maars = read_c0_maari() + 1;
332
333 /* MAARs should be in pairs */
334 WARN_ON(num_maars % 2);
335
336 /* Set MAARs using values we recorded already */
337 if (recorded.used) {
338 used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
339 BUG_ON(used != recorded.used);
340 } else {
341 /* Configure the required MAARs */
342 used = platform_maar_init(num_maars / 2);
343 }
344
345 /* Disable any further MAARs */
346 for (i = (used * 2); i < num_maars; i++) {
347 write_c0_maari(i);
348 back_to_back_c0_hazard();
349 write_c0_maar(0);
350 back_to_back_c0_hazard();
351 }
352
353 if (recorded.used)
354 return;
355
356 pr_info("MAAR configuration:\n");
357 for (i = 0; i < num_maars; i += 2) {
358 write_c0_maari(i);
359 back_to_back_c0_hazard();
360 upper = read_c0_maar();
361
362 write_c0_maari(i + 1);
363 back_to_back_c0_hazard();
364 lower = read_c0_maar();
365
366 attr = lower & upper;
367 lower = (lower & MIPS_MAAR_ADDR) << 4;
368 upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
369
370 pr_info(" [%d]: ", i / 2);
371 if (!(attr & MIPS_MAAR_VL)) {
372 pr_cont("disabled\n");
373 continue;
374 }
375
376 pr_cont("%pa-%pa", &lower, &upper);
377
378 if (attr & MIPS_MAAR_S)
379 pr_cont(" speculate");
380
381 pr_cont("\n");
382
383 /* Record the setup for use on secondary CPUs */
384 if (used <= ARRAY_SIZE(recorded.cfgs)) {
385 recorded.cfgs[recorded.used].lower = lower;
386 recorded.cfgs[recorded.used].upper = upper;
387 recorded.cfgs[recorded.used].attrs = attr;
388 recorded.used++;
389 }
390 }
391}
392
393#ifndef CONFIG_NEED_MULTIPLE_NODES
394void __init paging_init(void)
395{
396 unsigned long max_zone_pfns[MAX_NR_ZONES];
397
398 pagetable_init();
399
400#ifdef CONFIG_HIGHMEM
401 kmap_init();
402#endif
403#ifdef CONFIG_ZONE_DMA
404 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
405#endif
406#ifdef CONFIG_ZONE_DMA32
407 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
408#endif
409 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
410#ifdef CONFIG_HIGHMEM
411 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
412
413 if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
414 printk(KERN_WARNING "This processor doesn't support highmem."
415 " %ldk highmem ignored\n",
416 (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
417 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
418 }
419#endif
420
421 free_area_init_nodes(max_zone_pfns);
422}
423
424#ifdef CONFIG_64BIT
425static struct kcore_list kcore_kseg0;
426#endif
427
428static inline void __init mem_init_free_highmem(void)
429{
430#ifdef CONFIG_HIGHMEM
431 unsigned long tmp;
432
433 if (cpu_has_dc_aliases)
434 return;
435
436 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
437 struct page *page = pfn_to_page(tmp);
438
439 if (!memblock_is_memory(PFN_PHYS(tmp)))
440 SetPageReserved(page);
441 else
442 free_highmem_page(page);
443 }
444#endif
445}
446
447void __init mem_init(void)
448{
449 /*
450 * When _PFN_SHIFT is greater than PAGE_SHIFT we won't have enough PTE
451 * bits to hold a full 32b physical address on MIPS32 systems.
452 */
453 BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (_PFN_SHIFT > PAGE_SHIFT));
454
455#ifdef CONFIG_HIGHMEM
456#ifdef CONFIG_DISCONTIGMEM
457#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
458#endif
459 max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
460#else
461 max_mapnr = max_low_pfn;
462#endif
463 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
464
465 maar_init();
466 memblock_free_all();
467 setup_zero_pages(); /* Setup zeroed pages. */
468 mem_init_free_highmem();
469 mem_init_print_info(NULL);
470
471#ifdef CONFIG_64BIT
472 if ((unsigned long) &_text > (unsigned long) CKSEG0)
473 /* The -4 is a hack so that user tools don't have to handle
474 the overflow. */
475 kclist_add(&kcore_kseg0, (void *) CKSEG0,
476 0x80000000 - 4, KCORE_TEXT);
477#endif
478}
479#endif /* !CONFIG_NEED_MULTIPLE_NODES */
480
481void free_init_pages(const char *what, unsigned long begin, unsigned long end)
482{
483 unsigned long pfn;
484
485 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
486 struct page *page = pfn_to_page(pfn);
487 void *addr = phys_to_virt(PFN_PHYS(pfn));
488
489 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
490 free_reserved_page(page);
491 }
492 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
493}
494
495void (*free_init_pages_eva)(void *begin, void *end) = NULL;
496
497void __ref free_initmem(void)
498{
499 prom_free_prom_memory();
500 /*
501 * Let the platform define a specific function to free the
502 * init section since EVA may have used any possible mapping
503 * between virtual and physical addresses.
504 */
505 if (free_init_pages_eva)
506 free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
507 else
508 free_initmem_default(POISON_FREE_INITMEM);
509}
510
511#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
512unsigned long pgd_current[NR_CPUS];
513#endif
514
515/*
516 * Align swapper_pg_dir in to 64K, allows its address to be loaded
517 * with a single LUI instruction in the TLB handlers. If we used
518 * __aligned(64K), its size would get rounded up to the alignment
519 * size, and waste space. So we place it in its own section and align
520 * it in the linker script.
521 */
522pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
523#ifndef __PAGETABLE_PUD_FOLDED
524pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
525#endif
526#ifndef __PAGETABLE_PMD_FOLDED
527pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
528EXPORT_SYMBOL_GPL(invalid_pmd_table);
529#endif
530pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
531EXPORT_SYMBOL(invalid_pte_table);
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
10 */
11#include <linux/bug.h>
12#include <linux/init.h>
13#include <linux/export.h>
14#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/smp.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/pagemap.h>
22#include <linux/ptrace.h>
23#include <linux/mman.h>
24#include <linux/mm.h>
25#include <linux/memblock.h>
26#include <linux/highmem.h>
27#include <linux/swap.h>
28#include <linux/proc_fs.h>
29#include <linux/pfn.h>
30#include <linux/hardirq.h>
31#include <linux/gfp.h>
32#include <linux/kcore.h>
33#include <linux/initrd.h>
34
35#include <asm/bootinfo.h>
36#include <asm/cachectl.h>
37#include <asm/cpu.h>
38#include <asm/dma.h>
39#include <asm/maar.h>
40#include <asm/mmu_context.h>
41#include <asm/sections.h>
42#include <asm/pgalloc.h>
43#include <asm/tlb.h>
44#include <asm/fixmap.h>
45
46/*
47 * We have up to 8 empty zeroed pages so we can map one of the right colour
48 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
49 * where we have to avoid VCED / VECI exceptions for good performance at
50 * any price. Since page is never written to after the initialization we
51 * don't have to care about aliases on other CPUs.
52 */
53unsigned long empty_zero_page, zero_page_mask;
54EXPORT_SYMBOL_GPL(empty_zero_page);
55EXPORT_SYMBOL(zero_page_mask);
56
57/*
58 * Not static inline because used by IP27 special magic initialization code
59 */
60void setup_zero_pages(void)
61{
62 unsigned int order, i;
63 struct page *page;
64
65 if (cpu_has_vce)
66 order = 3;
67 else
68 order = 0;
69
70 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
71 if (!empty_zero_page)
72 panic("Oh boy, that early out of memory?");
73
74 page = virt_to_page((void *)empty_zero_page);
75 split_page(page, order);
76 for (i = 0; i < (1 << order); i++, page++)
77 mark_page_reserved(page);
78
79 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
80}
81
82static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
83{
84 enum fixed_addresses idx;
85 unsigned int old_mmid;
86 unsigned long vaddr, flags, entrylo;
87 unsigned long old_ctx;
88 pte_t pte;
89 int tlbidx;
90
91 BUG_ON(Page_dcache_dirty(page));
92
93 preempt_disable();
94 pagefault_disable();
95 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
96 idx += in_interrupt() ? FIX_N_COLOURS : 0;
97 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
98 pte = mk_pte(page, prot);
99#if defined(CONFIG_XPA)
100 entrylo = pte_to_entrylo(pte.pte_high);
101#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
102 entrylo = pte.pte_high;
103#else
104 entrylo = pte_to_entrylo(pte_val(pte));
105#endif
106
107 local_irq_save(flags);
108 old_ctx = read_c0_entryhi();
109 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
110 write_c0_entrylo0(entrylo);
111 write_c0_entrylo1(entrylo);
112 if (cpu_has_mmid) {
113 old_mmid = read_c0_memorymapid();
114 write_c0_memorymapid(MMID_KERNEL_WIRED);
115 }
116#ifdef CONFIG_XPA
117 if (cpu_has_xpa) {
118 entrylo = (pte.pte_low & _PFNX_MASK);
119 writex_c0_entrylo0(entrylo);
120 writex_c0_entrylo1(entrylo);
121 }
122#endif
123 tlbidx = num_wired_entries();
124 write_c0_wired(tlbidx + 1);
125 write_c0_index(tlbidx);
126 mtc0_tlbw_hazard();
127 tlb_write_indexed();
128 tlbw_use_hazard();
129 write_c0_entryhi(old_ctx);
130 if (cpu_has_mmid)
131 write_c0_memorymapid(old_mmid);
132 local_irq_restore(flags);
133
134 return (void*) vaddr;
135}
136
137void *kmap_coherent(struct page *page, unsigned long addr)
138{
139 return __kmap_pgprot(page, addr, PAGE_KERNEL);
140}
141
142void *kmap_noncoherent(struct page *page, unsigned long addr)
143{
144 return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
145}
146
147void kunmap_coherent(void)
148{
149 unsigned int wired;
150 unsigned long flags, old_ctx;
151
152 local_irq_save(flags);
153 old_ctx = read_c0_entryhi();
154 wired = num_wired_entries() - 1;
155 write_c0_wired(wired);
156 write_c0_index(wired);
157 write_c0_entryhi(UNIQUE_ENTRYHI(wired));
158 write_c0_entrylo0(0);
159 write_c0_entrylo1(0);
160 mtc0_tlbw_hazard();
161 tlb_write_indexed();
162 tlbw_use_hazard();
163 write_c0_entryhi(old_ctx);
164 local_irq_restore(flags);
165 pagefault_enable();
166 preempt_enable();
167}
168
169void copy_user_highpage(struct page *to, struct page *from,
170 unsigned long vaddr, struct vm_area_struct *vma)
171{
172 void *vfrom, *vto;
173
174 vto = kmap_atomic(to);
175 if (cpu_has_dc_aliases &&
176 page_mapcount(from) && !Page_dcache_dirty(from)) {
177 vfrom = kmap_coherent(from, vaddr);
178 copy_page(vto, vfrom);
179 kunmap_coherent();
180 } else {
181 vfrom = kmap_atomic(from);
182 copy_page(vto, vfrom);
183 kunmap_atomic(vfrom);
184 }
185 if ((!cpu_has_ic_fills_f_dc) ||
186 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
187 flush_data_cache_page((unsigned long)vto);
188 kunmap_atomic(vto);
189 /* Make sure this page is cleared on other CPU's too before using it */
190 smp_wmb();
191}
192
193void copy_to_user_page(struct vm_area_struct *vma,
194 struct page *page, unsigned long vaddr, void *dst, const void *src,
195 unsigned long len)
196{
197 if (cpu_has_dc_aliases &&
198 page_mapcount(page) && !Page_dcache_dirty(page)) {
199 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
200 memcpy(vto, src, len);
201 kunmap_coherent();
202 } else {
203 memcpy(dst, src, len);
204 if (cpu_has_dc_aliases)
205 SetPageDcacheDirty(page);
206 }
207 if (vma->vm_flags & VM_EXEC)
208 flush_cache_page(vma, vaddr, page_to_pfn(page));
209}
210
211void copy_from_user_page(struct vm_area_struct *vma,
212 struct page *page, unsigned long vaddr, void *dst, const void *src,
213 unsigned long len)
214{
215 if (cpu_has_dc_aliases &&
216 page_mapcount(page) && !Page_dcache_dirty(page)) {
217 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
218 memcpy(dst, vfrom, len);
219 kunmap_coherent();
220 } else {
221 memcpy(dst, src, len);
222 if (cpu_has_dc_aliases)
223 SetPageDcacheDirty(page);
224 }
225}
226EXPORT_SYMBOL_GPL(copy_from_user_page);
227
228void __init fixrange_init(unsigned long start, unsigned long end,
229 pgd_t *pgd_base)
230{
231#ifdef CONFIG_HIGHMEM
232 pgd_t *pgd;
233 pud_t *pud;
234 pmd_t *pmd;
235 pte_t *pte;
236 int i, j, k;
237 unsigned long vaddr;
238
239 vaddr = start;
240 i = pgd_index(vaddr);
241 j = pud_index(vaddr);
242 k = pmd_index(vaddr);
243 pgd = pgd_base + i;
244
245 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
246 pud = (pud_t *)pgd;
247 for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
248 pmd = (pmd_t *)pud;
249 for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
250 if (pmd_none(*pmd)) {
251 pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
252 PAGE_SIZE);
253 if (!pte)
254 panic("%s: Failed to allocate %lu bytes align=%lx\n",
255 __func__, PAGE_SIZE,
256 PAGE_SIZE);
257
258 set_pmd(pmd, __pmd((unsigned long)pte));
259 BUG_ON(pte != pte_offset_kernel(pmd, 0));
260 }
261 vaddr += PMD_SIZE;
262 }
263 k = 0;
264 }
265 j = 0;
266 }
267#endif
268}
269
270struct maar_walk_info {
271 struct maar_config cfg[16];
272 unsigned int num_cfg;
273};
274
275static int maar_res_walk(unsigned long start_pfn, unsigned long nr_pages,
276 void *data)
277{
278 struct maar_walk_info *wi = data;
279 struct maar_config *cfg = &wi->cfg[wi->num_cfg];
280 unsigned int maar_align;
281
282 /* MAAR registers hold physical addresses right shifted by 4 bits */
283 maar_align = BIT(MIPS_MAAR_ADDR_SHIFT + 4);
284
285 /* Fill in the MAAR config entry */
286 cfg->lower = ALIGN(PFN_PHYS(start_pfn), maar_align);
287 cfg->upper = ALIGN_DOWN(PFN_PHYS(start_pfn + nr_pages), maar_align) - 1;
288 cfg->attrs = MIPS_MAAR_S;
289
290 /* Ensure we don't overflow the cfg array */
291 if (!WARN_ON(wi->num_cfg >= ARRAY_SIZE(wi->cfg)))
292 wi->num_cfg++;
293
294 return 0;
295}
296
297
298unsigned __weak platform_maar_init(unsigned num_pairs)
299{
300 unsigned int num_configured;
301 struct maar_walk_info wi;
302
303 wi.num_cfg = 0;
304 walk_system_ram_range(0, max_pfn, &wi, maar_res_walk);
305
306 num_configured = maar_config(wi.cfg, wi.num_cfg, num_pairs);
307 if (num_configured < wi.num_cfg)
308 pr_warn("Not enough MAAR pairs (%u) for all memory regions (%u)\n",
309 num_pairs, wi.num_cfg);
310
311 return num_configured;
312}
313
314void maar_init(void)
315{
316 unsigned num_maars, used, i;
317 phys_addr_t lower, upper, attr;
318 static struct {
319 struct maar_config cfgs[3];
320 unsigned used;
321 } recorded = { { { 0 } }, 0 };
322
323 if (!cpu_has_maar)
324 return;
325
326 /* Detect the number of MAARs */
327 write_c0_maari(~0);
328 back_to_back_c0_hazard();
329 num_maars = read_c0_maari() + 1;
330
331 /* MAARs should be in pairs */
332 WARN_ON(num_maars % 2);
333
334 /* Set MAARs using values we recorded already */
335 if (recorded.used) {
336 used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
337 BUG_ON(used != recorded.used);
338 } else {
339 /* Configure the required MAARs */
340 used = platform_maar_init(num_maars / 2);
341 }
342
343 /* Disable any further MAARs */
344 for (i = (used * 2); i < num_maars; i++) {
345 write_c0_maari(i);
346 back_to_back_c0_hazard();
347 write_c0_maar(0);
348 back_to_back_c0_hazard();
349 }
350
351 if (recorded.used)
352 return;
353
354 pr_info("MAAR configuration:\n");
355 for (i = 0; i < num_maars; i += 2) {
356 write_c0_maari(i);
357 back_to_back_c0_hazard();
358 upper = read_c0_maar();
359#ifdef CONFIG_XPA
360 upper |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
361#endif
362
363 write_c0_maari(i + 1);
364 back_to_back_c0_hazard();
365 lower = read_c0_maar();
366#ifdef CONFIG_XPA
367 lower |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT;
368#endif
369
370 attr = lower & upper;
371 lower = (lower & MIPS_MAAR_ADDR) << 4;
372 upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
373
374 pr_info(" [%d]: ", i / 2);
375 if ((attr & MIPS_MAAR_V) != MIPS_MAAR_V) {
376 pr_cont("disabled\n");
377 continue;
378 }
379
380 pr_cont("%pa-%pa", &lower, &upper);
381
382 if (attr & MIPS_MAAR_S)
383 pr_cont(" speculate");
384
385 pr_cont("\n");
386
387 /* Record the setup for use on secondary CPUs */
388 if (used <= ARRAY_SIZE(recorded.cfgs)) {
389 recorded.cfgs[recorded.used].lower = lower;
390 recorded.cfgs[recorded.used].upper = upper;
391 recorded.cfgs[recorded.used].attrs = attr;
392 recorded.used++;
393 }
394 }
395}
396
397#ifndef CONFIG_NUMA
398void __init paging_init(void)
399{
400 unsigned long max_zone_pfns[MAX_NR_ZONES];
401
402 pagetable_init();
403
404#ifdef CONFIG_ZONE_DMA
405 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
406#endif
407#ifdef CONFIG_ZONE_DMA32
408 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
409#endif
410 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
411#ifdef CONFIG_HIGHMEM
412 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
413
414 if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
415 printk(KERN_WARNING "This processor doesn't support highmem."
416 " %ldk highmem ignored\n",
417 (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
418 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
419 }
420#endif
421
422 free_area_init(max_zone_pfns);
423}
424
425#ifdef CONFIG_64BIT
426static struct kcore_list kcore_kseg0;
427#endif
428
429static inline void __init mem_init_free_highmem(void)
430{
431#ifdef CONFIG_HIGHMEM
432 unsigned long tmp;
433
434 if (cpu_has_dc_aliases)
435 return;
436
437 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
438 struct page *page = pfn_to_page(tmp);
439
440 if (!memblock_is_memory(PFN_PHYS(tmp)))
441 SetPageReserved(page);
442 else
443 free_highmem_page(page);
444 }
445#endif
446}
447
448void __init mem_init(void)
449{
450 /*
451 * When _PFN_SHIFT is greater than PAGE_SHIFT we won't have enough PTE
452 * bits to hold a full 32b physical address on MIPS32 systems.
453 */
454 BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (_PFN_SHIFT > PAGE_SHIFT));
455
456#ifdef CONFIG_HIGHMEM
457 max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
458#else
459 max_mapnr = max_low_pfn;
460#endif
461 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
462
463 maar_init();
464 memblock_free_all();
465 setup_zero_pages(); /* Setup zeroed pages. */
466 mem_init_free_highmem();
467
468#ifdef CONFIG_64BIT
469 if ((unsigned long) &_text > (unsigned long) CKSEG0)
470 /* The -4 is a hack so that user tools don't have to handle
471 the overflow. */
472 kclist_add(&kcore_kseg0, (void *) CKSEG0,
473 0x80000000 - 4, KCORE_TEXT);
474#endif
475}
476#endif /* !CONFIG_NUMA */
477
478void free_init_pages(const char *what, unsigned long begin, unsigned long end)
479{
480 unsigned long pfn;
481
482 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
483 struct page *page = pfn_to_page(pfn);
484 void *addr = phys_to_virt(PFN_PHYS(pfn));
485
486 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
487 free_reserved_page(page);
488 }
489 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
490}
491
492void (*free_init_pages_eva)(void *begin, void *end) = NULL;
493
494void __weak __init prom_free_prom_memory(void)
495{
496 /* nothing to do */
497}
498
499void __ref free_initmem(void)
500{
501 prom_free_prom_memory();
502 /*
503 * Let the platform define a specific function to free the
504 * init section since EVA may have used any possible mapping
505 * between virtual and physical addresses.
506 */
507 if (free_init_pages_eva)
508 free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
509 else
510 free_initmem_default(POISON_FREE_INITMEM);
511}
512
513#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
514unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
515EXPORT_SYMBOL(__per_cpu_offset);
516
517static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
518{
519 return node_distance(cpu_to_node(from), cpu_to_node(to));
520}
521
522static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
523 size_t align)
524{
525 return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS),
526 MEMBLOCK_ALLOC_ACCESSIBLE,
527 cpu_to_node(cpu));
528}
529
530static void __init pcpu_fc_free(void *ptr, size_t size)
531{
532 memblock_free_early(__pa(ptr), size);
533}
534
535void __init setup_per_cpu_areas(void)
536{
537 unsigned long delta;
538 unsigned int cpu;
539 int rc;
540
541 /*
542 * Always reserve area for module percpu variables. That's
543 * what the legacy allocator did.
544 */
545 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
546 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
547 pcpu_cpu_distance,
548 pcpu_fc_alloc, pcpu_fc_free);
549 if (rc < 0)
550 panic("Failed to initialize percpu areas.");
551
552 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
553 for_each_possible_cpu(cpu)
554 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
555}
556#endif
557
558#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
559unsigned long pgd_current[NR_CPUS];
560#endif
561
562/*
563 * Align swapper_pg_dir in to 64K, allows its address to be loaded
564 * with a single LUI instruction in the TLB handlers. If we used
565 * __aligned(64K), its size would get rounded up to the alignment
566 * size, and waste space. So we place it in its own section and align
567 * it in the linker script.
568 */
569pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
570#ifndef __PAGETABLE_PUD_FOLDED
571pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
572#endif
573#ifndef __PAGETABLE_PMD_FOLDED
574pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
575EXPORT_SYMBOL_GPL(invalid_pmd_table);
576#endif
577pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
578EXPORT_SYMBOL(invalid_pte_table);