Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * arch/sparc64/mm/init.c
4 *
5 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9#include <linux/extable.h>
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/string.h>
13#include <linux/init.h>
14#include <linux/memblock.h>
15#include <linux/mm.h>
16#include <linux/hugetlb.h>
17#include <linux/initrd.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
20#include <linux/poison.h>
21#include <linux/fs.h>
22#include <linux/seq_file.h>
23#include <linux/kprobes.h>
24#include <linux/cache.h>
25#include <linux/sort.h>
26#include <linux/ioport.h>
27#include <linux/percpu.h>
28#include <linux/mmzone.h>
29#include <linux/gfp.h>
30#include <linux/bootmem_info.h>
31
32#include <asm/head.h>
33#include <asm/page.h>
34#include <asm/pgalloc.h>
35#include <asm/oplib.h>
36#include <asm/iommu.h>
37#include <asm/io.h>
38#include <linux/uaccess.h>
39#include <asm/mmu_context.h>
40#include <asm/tlbflush.h>
41#include <asm/dma.h>
42#include <asm/starfire.h>
43#include <asm/tlb.h>
44#include <asm/spitfire.h>
45#include <asm/sections.h>
46#include <asm/tsb.h>
47#include <asm/hypervisor.h>
48#include <asm/prom.h>
49#include <asm/mdesc.h>
50#include <asm/cpudata.h>
51#include <asm/setup.h>
52#include <asm/irq.h>
53
54#include "init_64.h"
55
56unsigned long kern_linear_pte_xor[4] __read_mostly;
57static unsigned long page_cache4v_flag;
58
59/* A bitmap, two bits for every 256MB of physical memory. These two
60 * bits determine what page size we use for kernel linear
61 * translations. They form an index into kern_linear_pte_xor[]. The
62 * value in the indexed slot is XOR'd with the TLB miss virtual
63 * address to form the resulting TTE. The mapping is:
64 *
65 * 0 ==> 4MB
66 * 1 ==> 256MB
67 * 2 ==> 2GB
68 * 3 ==> 16GB
69 *
70 * All sun4v chips support 256MB pages. Only SPARC-T4 and later
71 * support 2GB pages, and hopefully future cpus will support the 16GB
72 * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there
73 * if these larger page sizes are not supported by the cpu.
74 *
75 * It would be nice to determine this from the machine description
76 * 'cpu' properties, but we need to have this table setup before the
77 * MDESC is initialized.
78 */
79
80#ifndef CONFIG_DEBUG_PAGEALLOC
81/* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
82 * Space is allocated for this right after the trap table in
83 * arch/sparc64/kernel/head.S
84 */
85extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
86#endif
87extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
88
89static unsigned long cpu_pgsz_mask;
90
91#define MAX_BANKS 1024
92
93static struct linux_prom64_registers pavail[MAX_BANKS];
94static int pavail_ents;
95
96u64 numa_latency[MAX_NUMNODES][MAX_NUMNODES];
97
98static int cmp_p64(const void *a, const void *b)
99{
100 const struct linux_prom64_registers *x = a, *y = b;
101
102 if (x->phys_addr > y->phys_addr)
103 return 1;
104 if (x->phys_addr < y->phys_addr)
105 return -1;
106 return 0;
107}
108
109static void __init read_obp_memory(const char *property,
110 struct linux_prom64_registers *regs,
111 int *num_ents)
112{
113 phandle node = prom_finddevice("/memory");
114 int prop_size = prom_getproplen(node, property);
115 int ents, ret, i;
116
117 ents = prop_size / sizeof(struct linux_prom64_registers);
118 if (ents > MAX_BANKS) {
119 prom_printf("The machine has more %s property entries than "
120 "this kernel can support (%d).\n",
121 property, MAX_BANKS);
122 prom_halt();
123 }
124
125 ret = prom_getproperty(node, property, (char *) regs, prop_size);
126 if (ret == -1) {
127 prom_printf("Couldn't get %s property from /memory.\n",
128 property);
129 prom_halt();
130 }
131
132 /* Sanitize what we got from the firmware, by page aligning
133 * everything.
134 */
135 for (i = 0; i < ents; i++) {
136 unsigned long base, size;
137
138 base = regs[i].phys_addr;
139 size = regs[i].reg_size;
140
141 size &= PAGE_MASK;
142 if (base & ~PAGE_MASK) {
143 unsigned long new_base = PAGE_ALIGN(base);
144
145 size -= new_base - base;
146 if ((long) size < 0L)
147 size = 0UL;
148 base = new_base;
149 }
150 if (size == 0UL) {
151 /* If it is empty, simply get rid of it.
152 * This simplifies the logic of the other
153 * functions that process these arrays.
154 */
155 memmove(®s[i], ®s[i + 1],
156 (ents - i - 1) * sizeof(regs[0]));
157 i--;
158 ents--;
159 continue;
160 }
161 regs[i].phys_addr = base;
162 regs[i].reg_size = size;
163 }
164
165 *num_ents = ents;
166
167 sort(regs, ents, sizeof(struct linux_prom64_registers),
168 cmp_p64, NULL);
169}
170
171/* Kernel physical address base and size in bytes. */
172unsigned long kern_base __read_mostly;
173unsigned long kern_size __read_mostly;
174
175/* Initial ramdisk setup */
176extern unsigned long sparc_ramdisk_image64;
177extern unsigned int sparc_ramdisk_image;
178extern unsigned int sparc_ramdisk_size;
179
180struct page *mem_map_zero __read_mostly;
181EXPORT_SYMBOL(mem_map_zero);
182
183unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
184
185unsigned long sparc64_kern_pri_context __read_mostly;
186unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
187unsigned long sparc64_kern_sec_context __read_mostly;
188
189int num_kernel_image_mappings;
190
191#ifdef CONFIG_DEBUG_DCFLUSH
192atomic_t dcpage_flushes = ATOMIC_INIT(0);
193#ifdef CONFIG_SMP
194atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
195#endif
196#endif
197
198inline void flush_dcache_page_impl(struct page *page)
199{
200 BUG_ON(tlb_type == hypervisor);
201#ifdef CONFIG_DEBUG_DCFLUSH
202 atomic_inc(&dcpage_flushes);
203#endif
204
205#ifdef DCACHE_ALIASING_POSSIBLE
206 __flush_dcache_page(page_address(page),
207 ((tlb_type == spitfire) &&
208 page_mapping_file(page) != NULL));
209#else
210 if (page_mapping_file(page) != NULL &&
211 tlb_type == spitfire)
212 __flush_icache_page(__pa(page_address(page)));
213#endif
214}
215
216#define PG_dcache_dirty PG_arch_1
217#define PG_dcache_cpu_shift 32UL
218#define PG_dcache_cpu_mask \
219 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
220
221#define dcache_dirty_cpu(page) \
222 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
223
224static inline void set_dcache_dirty(struct page *page, int this_cpu)
225{
226 unsigned long mask = this_cpu;
227 unsigned long non_cpu_bits;
228
229 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
230 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
231
232 __asm__ __volatile__("1:\n\t"
233 "ldx [%2], %%g7\n\t"
234 "and %%g7, %1, %%g1\n\t"
235 "or %%g1, %0, %%g1\n\t"
236 "casx [%2], %%g7, %%g1\n\t"
237 "cmp %%g7, %%g1\n\t"
238 "bne,pn %%xcc, 1b\n\t"
239 " nop"
240 : /* no outputs */
241 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
242 : "g1", "g7");
243}
244
245static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
246{
247 unsigned long mask = (1UL << PG_dcache_dirty);
248
249 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
250 "1:\n\t"
251 "ldx [%2], %%g7\n\t"
252 "srlx %%g7, %4, %%g1\n\t"
253 "and %%g1, %3, %%g1\n\t"
254 "cmp %%g1, %0\n\t"
255 "bne,pn %%icc, 2f\n\t"
256 " andn %%g7, %1, %%g1\n\t"
257 "casx [%2], %%g7, %%g1\n\t"
258 "cmp %%g7, %%g1\n\t"
259 "bne,pn %%xcc, 1b\n\t"
260 " nop\n"
261 "2:"
262 : /* no outputs */
263 : "r" (cpu), "r" (mask), "r" (&page->flags),
264 "i" (PG_dcache_cpu_mask),
265 "i" (PG_dcache_cpu_shift)
266 : "g1", "g7");
267}
268
269static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
270{
271 unsigned long tsb_addr = (unsigned long) ent;
272
273 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
274 tsb_addr = __pa(tsb_addr);
275
276 __tsb_insert(tsb_addr, tag, pte);
277}
278
279unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
280
281static void flush_dcache(unsigned long pfn)
282{
283 struct page *page;
284
285 page = pfn_to_page(pfn);
286 if (page) {
287 unsigned long pg_flags;
288
289 pg_flags = page->flags;
290 if (pg_flags & (1UL << PG_dcache_dirty)) {
291 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
292 PG_dcache_cpu_mask);
293 int this_cpu = get_cpu();
294
295 /* This is just to optimize away some function calls
296 * in the SMP case.
297 */
298 if (cpu == this_cpu)
299 flush_dcache_page_impl(page);
300 else
301 smp_flush_dcache_page_impl(page, cpu);
302
303 clear_dcache_dirty_cpu(page, cpu);
304
305 put_cpu();
306 }
307 }
308}
309
310/* mm->context.lock must be held */
311static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
312 unsigned long tsb_hash_shift, unsigned long address,
313 unsigned long tte)
314{
315 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
316 unsigned long tag;
317
318 if (unlikely(!tsb))
319 return;
320
321 tsb += ((address >> tsb_hash_shift) &
322 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
323 tag = (address >> 22UL);
324 tsb_insert(tsb, tag, tte);
325}
326
327#ifdef CONFIG_HUGETLB_PAGE
328static int __init hugetlbpage_init(void)
329{
330 hugetlb_add_hstate(HPAGE_64K_SHIFT - PAGE_SHIFT);
331 hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT);
332 hugetlb_add_hstate(HPAGE_256MB_SHIFT - PAGE_SHIFT);
333 hugetlb_add_hstate(HPAGE_2GB_SHIFT - PAGE_SHIFT);
334
335 return 0;
336}
337
338arch_initcall(hugetlbpage_init);
339
340static void __init pud_huge_patch(void)
341{
342 struct pud_huge_patch_entry *p;
343 unsigned long addr;
344
345 p = &__pud_huge_patch;
346 addr = p->addr;
347 *(unsigned int *)addr = p->insn;
348
349 __asm__ __volatile__("flush %0" : : "r" (addr));
350}
351
352bool __init arch_hugetlb_valid_size(unsigned long size)
353{
354 unsigned int hugepage_shift = ilog2(size);
355 unsigned short hv_pgsz_idx;
356 unsigned int hv_pgsz_mask;
357
358 switch (hugepage_shift) {
359 case HPAGE_16GB_SHIFT:
360 hv_pgsz_mask = HV_PGSZ_MASK_16GB;
361 hv_pgsz_idx = HV_PGSZ_IDX_16GB;
362 pud_huge_patch();
363 break;
364 case HPAGE_2GB_SHIFT:
365 hv_pgsz_mask = HV_PGSZ_MASK_2GB;
366 hv_pgsz_idx = HV_PGSZ_IDX_2GB;
367 break;
368 case HPAGE_256MB_SHIFT:
369 hv_pgsz_mask = HV_PGSZ_MASK_256MB;
370 hv_pgsz_idx = HV_PGSZ_IDX_256MB;
371 break;
372 case HPAGE_SHIFT:
373 hv_pgsz_mask = HV_PGSZ_MASK_4MB;
374 hv_pgsz_idx = HV_PGSZ_IDX_4MB;
375 break;
376 case HPAGE_64K_SHIFT:
377 hv_pgsz_mask = HV_PGSZ_MASK_64K;
378 hv_pgsz_idx = HV_PGSZ_IDX_64K;
379 break;
380 default:
381 hv_pgsz_mask = 0;
382 }
383
384 if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U)
385 return false;
386
387 return true;
388}
389#endif /* CONFIG_HUGETLB_PAGE */
390
391void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
392{
393 struct mm_struct *mm;
394 unsigned long flags;
395 bool is_huge_tsb;
396 pte_t pte = *ptep;
397
398 if (tlb_type != hypervisor) {
399 unsigned long pfn = pte_pfn(pte);
400
401 if (pfn_valid(pfn))
402 flush_dcache(pfn);
403 }
404
405 mm = vma->vm_mm;
406
407 /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
408 if (!pte_accessible(mm, pte))
409 return;
410
411 spin_lock_irqsave(&mm->context.lock, flags);
412
413 is_huge_tsb = false;
414#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
415 if (mm->context.hugetlb_pte_count || mm->context.thp_pte_count) {
416 unsigned long hugepage_size = PAGE_SIZE;
417
418 if (is_vm_hugetlb_page(vma))
419 hugepage_size = huge_page_size(hstate_vma(vma));
420
421 if (hugepage_size >= PUD_SIZE) {
422 unsigned long mask = 0x1ffc00000UL;
423
424 /* Transfer bits [32:22] from address to resolve
425 * at 4M granularity.
426 */
427 pte_val(pte) &= ~mask;
428 pte_val(pte) |= (address & mask);
429 } else if (hugepage_size >= PMD_SIZE) {
430 /* We are fabricating 8MB pages using 4MB
431 * real hw pages.
432 */
433 pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
434 }
435
436 if (hugepage_size >= PMD_SIZE) {
437 __update_mmu_tsb_insert(mm, MM_TSB_HUGE,
438 REAL_HPAGE_SHIFT, address, pte_val(pte));
439 is_huge_tsb = true;
440 }
441 }
442#endif
443 if (!is_huge_tsb)
444 __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
445 address, pte_val(pte));
446
447 spin_unlock_irqrestore(&mm->context.lock, flags);
448}
449
450void flush_dcache_page(struct page *page)
451{
452 struct address_space *mapping;
453 int this_cpu;
454
455 if (tlb_type == hypervisor)
456 return;
457
458 /* Do not bother with the expensive D-cache flush if it
459 * is merely the zero page. The 'bigcore' testcase in GDB
460 * causes this case to run millions of times.
461 */
462 if (page == ZERO_PAGE(0))
463 return;
464
465 this_cpu = get_cpu();
466
467 mapping = page_mapping_file(page);
468 if (mapping && !mapping_mapped(mapping)) {
469 int dirty = test_bit(PG_dcache_dirty, &page->flags);
470 if (dirty) {
471 int dirty_cpu = dcache_dirty_cpu(page);
472
473 if (dirty_cpu == this_cpu)
474 goto out;
475 smp_flush_dcache_page_impl(page, dirty_cpu);
476 }
477 set_dcache_dirty(page, this_cpu);
478 } else {
479 /* We could delay the flush for the !page_mapping
480 * case too. But that case is for exec env/arg
481 * pages and those are %99 certainly going to get
482 * faulted into the tlb (and thus flushed) anyways.
483 */
484 flush_dcache_page_impl(page);
485 }
486
487out:
488 put_cpu();
489}
490EXPORT_SYMBOL(flush_dcache_page);
491
492void __kprobes flush_icache_range(unsigned long start, unsigned long end)
493{
494 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
495 if (tlb_type == spitfire) {
496 unsigned long kaddr;
497
498 /* This code only runs on Spitfire cpus so this is
499 * why we can assume _PAGE_PADDR_4U.
500 */
501 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
502 unsigned long paddr, mask = _PAGE_PADDR_4U;
503
504 if (kaddr >= PAGE_OFFSET)
505 paddr = kaddr & mask;
506 else {
507 pte_t *ptep = virt_to_kpte(kaddr);
508
509 paddr = pte_val(*ptep) & mask;
510 }
511 __flush_icache_page(paddr);
512 }
513 }
514}
515EXPORT_SYMBOL(flush_icache_range);
516
517void mmu_info(struct seq_file *m)
518{
519 static const char *pgsz_strings[] = {
520 "8K", "64K", "512K", "4MB", "32MB",
521 "256MB", "2GB", "16GB",
522 };
523 int i, printed;
524
525 if (tlb_type == cheetah)
526 seq_printf(m, "MMU Type\t: Cheetah\n");
527 else if (tlb_type == cheetah_plus)
528 seq_printf(m, "MMU Type\t: Cheetah+\n");
529 else if (tlb_type == spitfire)
530 seq_printf(m, "MMU Type\t: Spitfire\n");
531 else if (tlb_type == hypervisor)
532 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
533 else
534 seq_printf(m, "MMU Type\t: ???\n");
535
536 seq_printf(m, "MMU PGSZs\t: ");
537 printed = 0;
538 for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
539 if (cpu_pgsz_mask & (1UL << i)) {
540 seq_printf(m, "%s%s",
541 printed ? "," : "", pgsz_strings[i]);
542 printed++;
543 }
544 }
545 seq_putc(m, '\n');
546
547#ifdef CONFIG_DEBUG_DCFLUSH
548 seq_printf(m, "DCPageFlushes\t: %d\n",
549 atomic_read(&dcpage_flushes));
550#ifdef CONFIG_SMP
551 seq_printf(m, "DCPageFlushesXC\t: %d\n",
552 atomic_read(&dcpage_flushes_xcall));
553#endif /* CONFIG_SMP */
554#endif /* CONFIG_DEBUG_DCFLUSH */
555}
556
557struct linux_prom_translation prom_trans[512] __read_mostly;
558unsigned int prom_trans_ents __read_mostly;
559
560unsigned long kern_locked_tte_data;
561
562/* The obp translations are saved based on 8k pagesize, since obp can
563 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
564 * HI_OBP_ADDRESS range are handled in ktlb.S.
565 */
566static inline int in_obp_range(unsigned long vaddr)
567{
568 return (vaddr >= LOW_OBP_ADDRESS &&
569 vaddr < HI_OBP_ADDRESS);
570}
571
572static int cmp_ptrans(const void *a, const void *b)
573{
574 const struct linux_prom_translation *x = a, *y = b;
575
576 if (x->virt > y->virt)
577 return 1;
578 if (x->virt < y->virt)
579 return -1;
580 return 0;
581}
582
583/* Read OBP translations property into 'prom_trans[]'. */
584static void __init read_obp_translations(void)
585{
586 int n, node, ents, first, last, i;
587
588 node = prom_finddevice("/virtual-memory");
589 n = prom_getproplen(node, "translations");
590 if (unlikely(n == 0 || n == -1)) {
591 prom_printf("prom_mappings: Couldn't get size.\n");
592 prom_halt();
593 }
594 if (unlikely(n > sizeof(prom_trans))) {
595 prom_printf("prom_mappings: Size %d is too big.\n", n);
596 prom_halt();
597 }
598
599 if ((n = prom_getproperty(node, "translations",
600 (char *)&prom_trans[0],
601 sizeof(prom_trans))) == -1) {
602 prom_printf("prom_mappings: Couldn't get property.\n");
603 prom_halt();
604 }
605
606 n = n / sizeof(struct linux_prom_translation);
607
608 ents = n;
609
610 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
611 cmp_ptrans, NULL);
612
613 /* Now kick out all the non-OBP entries. */
614 for (i = 0; i < ents; i++) {
615 if (in_obp_range(prom_trans[i].virt))
616 break;
617 }
618 first = i;
619 for (; i < ents; i++) {
620 if (!in_obp_range(prom_trans[i].virt))
621 break;
622 }
623 last = i;
624
625 for (i = 0; i < (last - first); i++) {
626 struct linux_prom_translation *src = &prom_trans[i + first];
627 struct linux_prom_translation *dest = &prom_trans[i];
628
629 *dest = *src;
630 }
631 for (; i < ents; i++) {
632 struct linux_prom_translation *dest = &prom_trans[i];
633 dest->virt = dest->size = dest->data = 0x0UL;
634 }
635
636 prom_trans_ents = last - first;
637
638 if (tlb_type == spitfire) {
639 /* Clear diag TTE bits. */
640 for (i = 0; i < prom_trans_ents; i++)
641 prom_trans[i].data &= ~0x0003fe0000000000UL;
642 }
643
644 /* Force execute bit on. */
645 for (i = 0; i < prom_trans_ents; i++)
646 prom_trans[i].data |= (tlb_type == hypervisor ?
647 _PAGE_EXEC_4V : _PAGE_EXEC_4U);
648}
649
650static void __init hypervisor_tlb_lock(unsigned long vaddr,
651 unsigned long pte,
652 unsigned long mmu)
653{
654 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
655
656 if (ret != 0) {
657 prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
658 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
659 prom_halt();
660 }
661}
662
663static unsigned long kern_large_tte(unsigned long paddr);
664
665static void __init remap_kernel(void)
666{
667 unsigned long phys_page, tte_vaddr, tte_data;
668 int i, tlb_ent = sparc64_highest_locked_tlbent();
669
670 tte_vaddr = (unsigned long) KERNBASE;
671 phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
672 tte_data = kern_large_tte(phys_page);
673
674 kern_locked_tte_data = tte_data;
675
676 /* Now lock us into the TLBs via Hypervisor or OBP. */
677 if (tlb_type == hypervisor) {
678 for (i = 0; i < num_kernel_image_mappings; i++) {
679 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
680 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
681 tte_vaddr += 0x400000;
682 tte_data += 0x400000;
683 }
684 } else {
685 for (i = 0; i < num_kernel_image_mappings; i++) {
686 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
687 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
688 tte_vaddr += 0x400000;
689 tte_data += 0x400000;
690 }
691 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
692 }
693 if (tlb_type == cheetah_plus) {
694 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
695 CTX_CHEETAH_PLUS_NUC);
696 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
697 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
698 }
699}
700
701
702static void __init inherit_prom_mappings(void)
703{
704 /* Now fixup OBP's idea about where we really are mapped. */
705 printk("Remapping the kernel... ");
706 remap_kernel();
707 printk("done.\n");
708}
709
710void prom_world(int enter)
711{
712 /*
713 * No need to change the address space any more, just flush
714 * the register windows
715 */
716 __asm__ __volatile__("flushw");
717}
718
719void __flush_dcache_range(unsigned long start, unsigned long end)
720{
721 unsigned long va;
722
723 if (tlb_type == spitfire) {
724 int n = 0;
725
726 for (va = start; va < end; va += 32) {
727 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
728 if (++n >= 512)
729 break;
730 }
731 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
732 start = __pa(start);
733 end = __pa(end);
734 for (va = start; va < end; va += 32)
735 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
736 "membar #Sync"
737 : /* no outputs */
738 : "r" (va),
739 "i" (ASI_DCACHE_INVALIDATE));
740 }
741}
742EXPORT_SYMBOL(__flush_dcache_range);
743
744/* get_new_mmu_context() uses "cache + 1". */
745DEFINE_SPINLOCK(ctx_alloc_lock);
746unsigned long tlb_context_cache = CTX_FIRST_VERSION;
747#define MAX_CTX_NR (1UL << CTX_NR_BITS)
748#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
749DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
750DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
751
752static void mmu_context_wrap(void)
753{
754 unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
755 unsigned long new_ver, new_ctx, old_ctx;
756 struct mm_struct *mm;
757 int cpu;
758
759 bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
760
761 /* Reserve kernel context */
762 set_bit(0, mmu_context_bmap);
763
764 new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
765 if (unlikely(new_ver == 0))
766 new_ver = CTX_FIRST_VERSION;
767 tlb_context_cache = new_ver;
768
769 /*
770 * Make sure that any new mm that are added into per_cpu_secondary_mm,
771 * are going to go through get_new_mmu_context() path.
772 */
773 mb();
774
775 /*
776 * Updated versions to current on those CPUs that had valid secondary
777 * contexts
778 */
779 for_each_online_cpu(cpu) {
780 /*
781 * If a new mm is stored after we took this mm from the array,
782 * it will go into get_new_mmu_context() path, because we
783 * already bumped the version in tlb_context_cache.
784 */
785 mm = per_cpu(per_cpu_secondary_mm, cpu);
786
787 if (unlikely(!mm || mm == &init_mm))
788 continue;
789
790 old_ctx = mm->context.sparc64_ctx_val;
791 if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
792 new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
793 set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
794 mm->context.sparc64_ctx_val = new_ctx;
795 }
796 }
797}
798
799/* Caller does TLB context flushing on local CPU if necessary.
800 * The caller also ensures that CTX_VALID(mm->context) is false.
801 *
802 * We must be careful about boundary cases so that we never
803 * let the user have CTX 0 (nucleus) or we ever use a CTX
804 * version of zero (and thus NO_CONTEXT would not be caught
805 * by version mis-match tests in mmu_context.h).
806 *
807 * Always invoked with interrupts disabled.
808 */
809void get_new_mmu_context(struct mm_struct *mm)
810{
811 unsigned long ctx, new_ctx;
812 unsigned long orig_pgsz_bits;
813
814 spin_lock(&ctx_alloc_lock);
815retry:
816 /* wrap might have happened, test again if our context became valid */
817 if (unlikely(CTX_VALID(mm->context)))
818 goto out;
819 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
820 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
821 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
822 if (new_ctx >= (1 << CTX_NR_BITS)) {
823 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
824 if (new_ctx >= ctx) {
825 mmu_context_wrap();
826 goto retry;
827 }
828 }
829 if (mm->context.sparc64_ctx_val)
830 cpumask_clear(mm_cpumask(mm));
831 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
832 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
833 tlb_context_cache = new_ctx;
834 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
835out:
836 spin_unlock(&ctx_alloc_lock);
837}
838
839static int numa_enabled = 1;
840static int numa_debug;
841
842static int __init early_numa(char *p)
843{
844 if (!p)
845 return 0;
846
847 if (strstr(p, "off"))
848 numa_enabled = 0;
849
850 if (strstr(p, "debug"))
851 numa_debug = 1;
852
853 return 0;
854}
855early_param("numa", early_numa);
856
857#define numadbg(f, a...) \
858do { if (numa_debug) \
859 printk(KERN_INFO f, ## a); \
860} while (0)
861
862static void __init find_ramdisk(unsigned long phys_base)
863{
864#ifdef CONFIG_BLK_DEV_INITRD
865 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
866 unsigned long ramdisk_image;
867
868 /* Older versions of the bootloader only supported a
869 * 32-bit physical address for the ramdisk image
870 * location, stored at sparc_ramdisk_image. Newer
871 * SILO versions set sparc_ramdisk_image to zero and
872 * provide a full 64-bit physical address at
873 * sparc_ramdisk_image64.
874 */
875 ramdisk_image = sparc_ramdisk_image;
876 if (!ramdisk_image)
877 ramdisk_image = sparc_ramdisk_image64;
878
879 /* Another bootloader quirk. The bootloader normalizes
880 * the physical address to KERNBASE, so we have to
881 * factor that back out and add in the lowest valid
882 * physical page address to get the true physical address.
883 */
884 ramdisk_image -= KERNBASE;
885 ramdisk_image += phys_base;
886
887 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
888 ramdisk_image, sparc_ramdisk_size);
889
890 initrd_start = ramdisk_image;
891 initrd_end = ramdisk_image + sparc_ramdisk_size;
892
893 memblock_reserve(initrd_start, sparc_ramdisk_size);
894
895 initrd_start += PAGE_OFFSET;
896 initrd_end += PAGE_OFFSET;
897 }
898#endif
899}
900
901struct node_mem_mask {
902 unsigned long mask;
903 unsigned long match;
904};
905static struct node_mem_mask node_masks[MAX_NUMNODES];
906static int num_node_masks;
907
908#ifdef CONFIG_NUMA
909
910struct mdesc_mlgroup {
911 u64 node;
912 u64 latency;
913 u64 match;
914 u64 mask;
915};
916
917static struct mdesc_mlgroup *mlgroups;
918static int num_mlgroups;
919
920int numa_cpu_lookup_table[NR_CPUS];
921cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
922
923struct mdesc_mblock {
924 u64 base;
925 u64 size;
926 u64 offset; /* RA-to-PA */
927};
928static struct mdesc_mblock *mblocks;
929static int num_mblocks;
930
931static struct mdesc_mblock * __init addr_to_mblock(unsigned long addr)
932{
933 struct mdesc_mblock *m = NULL;
934 int i;
935
936 for (i = 0; i < num_mblocks; i++) {
937 m = &mblocks[i];
938
939 if (addr >= m->base &&
940 addr < (m->base + m->size)) {
941 break;
942 }
943 }
944
945 return m;
946}
947
948static u64 __init memblock_nid_range_sun4u(u64 start, u64 end, int *nid)
949{
950 int prev_nid, new_nid;
951
952 prev_nid = NUMA_NO_NODE;
953 for ( ; start < end; start += PAGE_SIZE) {
954 for (new_nid = 0; new_nid < num_node_masks; new_nid++) {
955 struct node_mem_mask *p = &node_masks[new_nid];
956
957 if ((start & p->mask) == p->match) {
958 if (prev_nid == NUMA_NO_NODE)
959 prev_nid = new_nid;
960 break;
961 }
962 }
963
964 if (new_nid == num_node_masks) {
965 prev_nid = 0;
966 WARN_ONCE(1, "addr[%Lx] doesn't match a NUMA node rule. Some memory will be owned by node 0.",
967 start);
968 break;
969 }
970
971 if (prev_nid != new_nid)
972 break;
973 }
974 *nid = prev_nid;
975
976 return start > end ? end : start;
977}
978
979static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
980{
981 u64 ret_end, pa_start, m_mask, m_match, m_end;
982 struct mdesc_mblock *mblock;
983 int _nid, i;
984
985 if (tlb_type != hypervisor)
986 return memblock_nid_range_sun4u(start, end, nid);
987
988 mblock = addr_to_mblock(start);
989 if (!mblock) {
990 WARN_ONCE(1, "memblock_nid_range: Can't find mblock addr[%Lx]",
991 start);
992
993 _nid = 0;
994 ret_end = end;
995 goto done;
996 }
997
998 pa_start = start + mblock->offset;
999 m_match = 0;
1000 m_mask = 0;
1001
1002 for (_nid = 0; _nid < num_node_masks; _nid++) {
1003 struct node_mem_mask *const m = &node_masks[_nid];
1004
1005 if ((pa_start & m->mask) == m->match) {
1006 m_match = m->match;
1007 m_mask = m->mask;
1008 break;
1009 }
1010 }
1011
1012 if (num_node_masks == _nid) {
1013 /* We could not find NUMA group, so default to 0, but lets
1014 * search for latency group, so we could calculate the correct
1015 * end address that we return
1016 */
1017 _nid = 0;
1018
1019 for (i = 0; i < num_mlgroups; i++) {
1020 struct mdesc_mlgroup *const m = &mlgroups[i];
1021
1022 if ((pa_start & m->mask) == m->match) {
1023 m_match = m->match;
1024 m_mask = m->mask;
1025 break;
1026 }
1027 }
1028
1029 if (i == num_mlgroups) {
1030 WARN_ONCE(1, "memblock_nid_range: Can't find latency group addr[%Lx]",
1031 start);
1032
1033 ret_end = end;
1034 goto done;
1035 }
1036 }
1037
1038 /*
1039 * Each latency group has match and mask, and each memory block has an
1040 * offset. An address belongs to a latency group if its address matches
1041 * the following formula: ((addr + offset) & mask) == match
1042 * It is, however, slow to check every single page if it matches a
1043 * particular latency group. As optimization we calculate end value by
1044 * using bit arithmetics.
1045 */
1046 m_end = m_match + (1ul << __ffs(m_mask)) - mblock->offset;
1047 m_end += pa_start & ~((1ul << fls64(m_mask)) - 1);
1048 ret_end = m_end > end ? end : m_end;
1049
1050done:
1051 *nid = _nid;
1052 return ret_end;
1053}
1054#endif
1055
1056/* This must be invoked after performing all of the necessary
1057 * memblock_set_node() calls for 'nid'. We need to be able to get
1058 * correct data from get_pfn_range_for_nid().
1059 */
1060static void __init allocate_node_data(int nid)
1061{
1062 struct pglist_data *p;
1063 unsigned long start_pfn, end_pfn;
1064#ifdef CONFIG_NUMA
1065
1066 NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data),
1067 SMP_CACHE_BYTES, nid);
1068 if (!NODE_DATA(nid)) {
1069 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
1070 prom_halt();
1071 }
1072
1073 NODE_DATA(nid)->node_id = nid;
1074#endif
1075
1076 p = NODE_DATA(nid);
1077
1078 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1079 p->node_start_pfn = start_pfn;
1080 p->node_spanned_pages = end_pfn - start_pfn;
1081}
1082
1083static void init_node_masks_nonnuma(void)
1084{
1085#ifdef CONFIG_NUMA
1086 int i;
1087#endif
1088
1089 numadbg("Initializing tables for non-numa.\n");
1090
1091 node_masks[0].mask = 0;
1092 node_masks[0].match = 0;
1093 num_node_masks = 1;
1094
1095#ifdef CONFIG_NUMA
1096 for (i = 0; i < NR_CPUS; i++)
1097 numa_cpu_lookup_table[i] = 0;
1098
1099 cpumask_setall(&numa_cpumask_lookup_table[0]);
1100#endif
1101}
1102
1103#ifdef CONFIG_NUMA
1104struct pglist_data *node_data[MAX_NUMNODES];
1105
1106EXPORT_SYMBOL(numa_cpu_lookup_table);
1107EXPORT_SYMBOL(numa_cpumask_lookup_table);
1108EXPORT_SYMBOL(node_data);
1109
1110static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
1111 u32 cfg_handle)
1112{
1113 u64 arc;
1114
1115 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
1116 u64 target = mdesc_arc_target(md, arc);
1117 const u64 *val;
1118
1119 val = mdesc_get_property(md, target,
1120 "cfg-handle", NULL);
1121 if (val && *val == cfg_handle)
1122 return 0;
1123 }
1124 return -ENODEV;
1125}
1126
1127static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
1128 u32 cfg_handle)
1129{
1130 u64 arc, candidate, best_latency = ~(u64)0;
1131
1132 candidate = MDESC_NODE_NULL;
1133 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1134 u64 target = mdesc_arc_target(md, arc);
1135 const char *name = mdesc_node_name(md, target);
1136 const u64 *val;
1137
1138 if (strcmp(name, "pio-latency-group"))
1139 continue;
1140
1141 val = mdesc_get_property(md, target, "latency", NULL);
1142 if (!val)
1143 continue;
1144
1145 if (*val < best_latency) {
1146 candidate = target;
1147 best_latency = *val;
1148 }
1149 }
1150
1151 if (candidate == MDESC_NODE_NULL)
1152 return -ENODEV;
1153
1154 return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
1155}
1156
1157int of_node_to_nid(struct device_node *dp)
1158{
1159 const struct linux_prom64_registers *regs;
1160 struct mdesc_handle *md;
1161 u32 cfg_handle;
1162 int count, nid;
1163 u64 grp;
1164
1165 /* This is the right thing to do on currently supported
1166 * SUN4U NUMA platforms as well, as the PCI controller does
1167 * not sit behind any particular memory controller.
1168 */
1169 if (!mlgroups)
1170 return -1;
1171
1172 regs = of_get_property(dp, "reg", NULL);
1173 if (!regs)
1174 return -1;
1175
1176 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1177
1178 md = mdesc_grab();
1179
1180 count = 0;
1181 nid = NUMA_NO_NODE;
1182 mdesc_for_each_node_by_name(md, grp, "group") {
1183 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
1184 nid = count;
1185 break;
1186 }
1187 count++;
1188 }
1189
1190 mdesc_release(md);
1191
1192 return nid;
1193}
1194
1195static void __init add_node_ranges(void)
1196{
1197 phys_addr_t start, end;
1198 unsigned long prev_max;
1199 u64 i;
1200
1201memblock_resized:
1202 prev_max = memblock.memory.max;
1203
1204 for_each_mem_range(i, &start, &end) {
1205 while (start < end) {
1206 unsigned long this_end;
1207 int nid;
1208
1209 this_end = memblock_nid_range(start, end, &nid);
1210
1211 numadbg("Setting memblock NUMA node nid[%d] "
1212 "start[%llx] end[%lx]\n",
1213 nid, start, this_end);
1214
1215 memblock_set_node(start, this_end - start,
1216 &memblock.memory, nid);
1217 if (memblock.memory.max != prev_max)
1218 goto memblock_resized;
1219 start = this_end;
1220 }
1221 }
1222}
1223
1224static int __init grab_mlgroups(struct mdesc_handle *md)
1225{
1226 unsigned long paddr;
1227 int count = 0;
1228 u64 node;
1229
1230 mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1231 count++;
1232 if (!count)
1233 return -ENOENT;
1234
1235 paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mlgroup),
1236 SMP_CACHE_BYTES);
1237 if (!paddr)
1238 return -ENOMEM;
1239
1240 mlgroups = __va(paddr);
1241 num_mlgroups = count;
1242
1243 count = 0;
1244 mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1245 struct mdesc_mlgroup *m = &mlgroups[count++];
1246 const u64 *val;
1247
1248 m->node = node;
1249
1250 val = mdesc_get_property(md, node, "latency", NULL);
1251 m->latency = *val;
1252 val = mdesc_get_property(md, node, "address-match", NULL);
1253 m->match = *val;
1254 val = mdesc_get_property(md, node, "address-mask", NULL);
1255 m->mask = *val;
1256
1257 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1258 "match[%llx] mask[%llx]\n",
1259 count - 1, m->node, m->latency, m->match, m->mask);
1260 }
1261
1262 return 0;
1263}
1264
1265static int __init grab_mblocks(struct mdesc_handle *md)
1266{
1267 unsigned long paddr;
1268 int count = 0;
1269 u64 node;
1270
1271 mdesc_for_each_node_by_name(md, node, "mblock")
1272 count++;
1273 if (!count)
1274 return -ENOENT;
1275
1276 paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mblock),
1277 SMP_CACHE_BYTES);
1278 if (!paddr)
1279 return -ENOMEM;
1280
1281 mblocks = __va(paddr);
1282 num_mblocks = count;
1283
1284 count = 0;
1285 mdesc_for_each_node_by_name(md, node, "mblock") {
1286 struct mdesc_mblock *m = &mblocks[count++];
1287 const u64 *val;
1288
1289 val = mdesc_get_property(md, node, "base", NULL);
1290 m->base = *val;
1291 val = mdesc_get_property(md, node, "size", NULL);
1292 m->size = *val;
1293 val = mdesc_get_property(md, node,
1294 "address-congruence-offset", NULL);
1295
1296 /* The address-congruence-offset property is optional.
1297 * Explicity zero it be identifty this.
1298 */
1299 if (val)
1300 m->offset = *val;
1301 else
1302 m->offset = 0UL;
1303
1304 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
1305 count - 1, m->base, m->size, m->offset);
1306 }
1307
1308 return 0;
1309}
1310
1311static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1312 u64 grp, cpumask_t *mask)
1313{
1314 u64 arc;
1315
1316 cpumask_clear(mask);
1317
1318 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1319 u64 target = mdesc_arc_target(md, arc);
1320 const char *name = mdesc_node_name(md, target);
1321 const u64 *id;
1322
1323 if (strcmp(name, "cpu"))
1324 continue;
1325 id = mdesc_get_property(md, target, "id", NULL);
1326 if (*id < nr_cpu_ids)
1327 cpumask_set_cpu(*id, mask);
1328 }
1329}
1330
1331static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1332{
1333 int i;
1334
1335 for (i = 0; i < num_mlgroups; i++) {
1336 struct mdesc_mlgroup *m = &mlgroups[i];
1337 if (m->node == node)
1338 return m;
1339 }
1340 return NULL;
1341}
1342
1343int __node_distance(int from, int to)
1344{
1345 if ((from >= MAX_NUMNODES) || (to >= MAX_NUMNODES)) {
1346 pr_warn("Returning default NUMA distance value for %d->%d\n",
1347 from, to);
1348 return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE;
1349 }
1350 return numa_latency[from][to];
1351}
1352EXPORT_SYMBOL(__node_distance);
1353
1354static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
1355{
1356 int i;
1357
1358 for (i = 0; i < MAX_NUMNODES; i++) {
1359 struct node_mem_mask *n = &node_masks[i];
1360
1361 if ((grp->mask == n->mask) && (grp->match == n->match))
1362 break;
1363 }
1364 return i;
1365}
1366
1367static void __init find_numa_latencies_for_group(struct mdesc_handle *md,
1368 u64 grp, int index)
1369{
1370 u64 arc;
1371
1372 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1373 int tnode;
1374 u64 target = mdesc_arc_target(md, arc);
1375 struct mdesc_mlgroup *m = find_mlgroup(target);
1376
1377 if (!m)
1378 continue;
1379 tnode = find_best_numa_node_for_mlgroup(m);
1380 if (tnode == MAX_NUMNODES)
1381 continue;
1382 numa_latency[index][tnode] = m->latency;
1383 }
1384}
1385
1386static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1387 int index)
1388{
1389 struct mdesc_mlgroup *candidate = NULL;
1390 u64 arc, best_latency = ~(u64)0;
1391 struct node_mem_mask *n;
1392
1393 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1394 u64 target = mdesc_arc_target(md, arc);
1395 struct mdesc_mlgroup *m = find_mlgroup(target);
1396 if (!m)
1397 continue;
1398 if (m->latency < best_latency) {
1399 candidate = m;
1400 best_latency = m->latency;
1401 }
1402 }
1403 if (!candidate)
1404 return -ENOENT;
1405
1406 if (num_node_masks != index) {
1407 printk(KERN_ERR "Inconsistent NUMA state, "
1408 "index[%d] != num_node_masks[%d]\n",
1409 index, num_node_masks);
1410 return -EINVAL;
1411 }
1412
1413 n = &node_masks[num_node_masks++];
1414
1415 n->mask = candidate->mask;
1416 n->match = candidate->match;
1417
1418 numadbg("NUMA NODE[%d]: mask[%lx] match[%lx] (latency[%llx])\n",
1419 index, n->mask, n->match, candidate->latency);
1420
1421 return 0;
1422}
1423
1424static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1425 int index)
1426{
1427 cpumask_t mask;
1428 int cpu;
1429
1430 numa_parse_mdesc_group_cpus(md, grp, &mask);
1431
1432 for_each_cpu(cpu, &mask)
1433 numa_cpu_lookup_table[cpu] = index;
1434 cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
1435
1436 if (numa_debug) {
1437 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
1438 for_each_cpu(cpu, &mask)
1439 printk("%d ", cpu);
1440 printk("]\n");
1441 }
1442
1443 return numa_attach_mlgroup(md, grp, index);
1444}
1445
1446static int __init numa_parse_mdesc(void)
1447{
1448 struct mdesc_handle *md = mdesc_grab();
1449 int i, j, err, count;
1450 u64 node;
1451
1452 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1453 if (node == MDESC_NODE_NULL) {
1454 mdesc_release(md);
1455 return -ENOENT;
1456 }
1457
1458 err = grab_mblocks(md);
1459 if (err < 0)
1460 goto out;
1461
1462 err = grab_mlgroups(md);
1463 if (err < 0)
1464 goto out;
1465
1466 count = 0;
1467 mdesc_for_each_node_by_name(md, node, "group") {
1468 err = numa_parse_mdesc_group(md, node, count);
1469 if (err < 0)
1470 break;
1471 count++;
1472 }
1473
1474 count = 0;
1475 mdesc_for_each_node_by_name(md, node, "group") {
1476 find_numa_latencies_for_group(md, node, count);
1477 count++;
1478 }
1479
1480 /* Normalize numa latency matrix according to ACPI SLIT spec. */
1481 for (i = 0; i < MAX_NUMNODES; i++) {
1482 u64 self_latency = numa_latency[i][i];
1483
1484 for (j = 0; j < MAX_NUMNODES; j++) {
1485 numa_latency[i][j] =
1486 (numa_latency[i][j] * LOCAL_DISTANCE) /
1487 self_latency;
1488 }
1489 }
1490
1491 add_node_ranges();
1492
1493 for (i = 0; i < num_node_masks; i++) {
1494 allocate_node_data(i);
1495 node_set_online(i);
1496 }
1497
1498 err = 0;
1499out:
1500 mdesc_release(md);
1501 return err;
1502}
1503
1504static int __init numa_parse_jbus(void)
1505{
1506 unsigned long cpu, index;
1507
1508 /* NUMA node id is encoded in bits 36 and higher, and there is
1509 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1510 */
1511 index = 0;
1512 for_each_present_cpu(cpu) {
1513 numa_cpu_lookup_table[cpu] = index;
1514 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
1515 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1516 node_masks[index].match = cpu << 36UL;
1517
1518 index++;
1519 }
1520 num_node_masks = index;
1521
1522 add_node_ranges();
1523
1524 for (index = 0; index < num_node_masks; index++) {
1525 allocate_node_data(index);
1526 node_set_online(index);
1527 }
1528
1529 return 0;
1530}
1531
1532static int __init numa_parse_sun4u(void)
1533{
1534 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1535 unsigned long ver;
1536
1537 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
1538 if ((ver >> 32UL) == __JALAPENO_ID ||
1539 (ver >> 32UL) == __SERRANO_ID)
1540 return numa_parse_jbus();
1541 }
1542 return -1;
1543}
1544
1545static int __init bootmem_init_numa(void)
1546{
1547 int i, j;
1548 int err = -1;
1549
1550 numadbg("bootmem_init_numa()\n");
1551
1552 /* Some sane defaults for numa latency values */
1553 for (i = 0; i < MAX_NUMNODES; i++) {
1554 for (j = 0; j < MAX_NUMNODES; j++)
1555 numa_latency[i][j] = (i == j) ?
1556 LOCAL_DISTANCE : REMOTE_DISTANCE;
1557 }
1558
1559 if (numa_enabled) {
1560 if (tlb_type == hypervisor)
1561 err = numa_parse_mdesc();
1562 else
1563 err = numa_parse_sun4u();
1564 }
1565 return err;
1566}
1567
1568#else
1569
1570static int bootmem_init_numa(void)
1571{
1572 return -1;
1573}
1574
1575#endif
1576
1577static void __init bootmem_init_nonnuma(void)
1578{
1579 unsigned long top_of_ram = memblock_end_of_DRAM();
1580 unsigned long total_ram = memblock_phys_mem_size();
1581
1582 numadbg("bootmem_init_nonnuma()\n");
1583
1584 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1585 top_of_ram, total_ram);
1586 printk(KERN_INFO "Memory hole size: %ldMB\n",
1587 (top_of_ram - total_ram) >> 20);
1588
1589 init_node_masks_nonnuma();
1590 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
1591 allocate_node_data(0);
1592 node_set_online(0);
1593}
1594
1595static unsigned long __init bootmem_init(unsigned long phys_base)
1596{
1597 unsigned long end_pfn;
1598
1599 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1600 max_pfn = max_low_pfn = end_pfn;
1601 min_low_pfn = (phys_base >> PAGE_SHIFT);
1602
1603 if (bootmem_init_numa() < 0)
1604 bootmem_init_nonnuma();
1605
1606 /* Dump memblock with node info. */
1607 memblock_dump_all();
1608
1609 /* XXX cpu notifier XXX */
1610
1611 sparse_init();
1612
1613 return end_pfn;
1614}
1615
1616static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1617static int pall_ents __initdata;
1618
1619static unsigned long max_phys_bits = 40;
1620
1621bool kern_addr_valid(unsigned long addr)
1622{
1623 pgd_t *pgd;
1624 p4d_t *p4d;
1625 pud_t *pud;
1626 pmd_t *pmd;
1627 pte_t *pte;
1628
1629 if ((long)addr < 0L) {
1630 unsigned long pa = __pa(addr);
1631
1632 if ((pa >> max_phys_bits) != 0UL)
1633 return false;
1634
1635 return pfn_valid(pa >> PAGE_SHIFT);
1636 }
1637
1638 if (addr >= (unsigned long) KERNBASE &&
1639 addr < (unsigned long)&_end)
1640 return true;
1641
1642 pgd = pgd_offset_k(addr);
1643 if (pgd_none(*pgd))
1644 return false;
1645
1646 p4d = p4d_offset(pgd, addr);
1647 if (p4d_none(*p4d))
1648 return false;
1649
1650 pud = pud_offset(p4d, addr);
1651 if (pud_none(*pud))
1652 return false;
1653
1654 if (pud_large(*pud))
1655 return pfn_valid(pud_pfn(*pud));
1656
1657 pmd = pmd_offset(pud, addr);
1658 if (pmd_none(*pmd))
1659 return false;
1660
1661 if (pmd_large(*pmd))
1662 return pfn_valid(pmd_pfn(*pmd));
1663
1664 pte = pte_offset_kernel(pmd, addr);
1665 if (pte_none(*pte))
1666 return false;
1667
1668 return pfn_valid(pte_pfn(*pte));
1669}
1670
1671static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
1672 unsigned long vend,
1673 pud_t *pud)
1674{
1675 const unsigned long mask16gb = (1UL << 34) - 1UL;
1676 u64 pte_val = vstart;
1677
1678 /* Each PUD is 8GB */
1679 if ((vstart & mask16gb) ||
1680 (vend - vstart <= mask16gb)) {
1681 pte_val ^= kern_linear_pte_xor[2];
1682 pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
1683
1684 return vstart + PUD_SIZE;
1685 }
1686
1687 pte_val ^= kern_linear_pte_xor[3];
1688 pte_val |= _PAGE_PUD_HUGE;
1689
1690 vend = vstart + mask16gb + 1UL;
1691 while (vstart < vend) {
1692 pud_val(*pud) = pte_val;
1693
1694 pte_val += PUD_SIZE;
1695 vstart += PUD_SIZE;
1696 pud++;
1697 }
1698 return vstart;
1699}
1700
1701static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
1702 bool guard)
1703{
1704 if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
1705 return true;
1706
1707 return false;
1708}
1709
1710static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
1711 unsigned long vend,
1712 pmd_t *pmd)
1713{
1714 const unsigned long mask256mb = (1UL << 28) - 1UL;
1715 const unsigned long mask2gb = (1UL << 31) - 1UL;
1716 u64 pte_val = vstart;
1717
1718 /* Each PMD is 8MB */
1719 if ((vstart & mask256mb) ||
1720 (vend - vstart <= mask256mb)) {
1721 pte_val ^= kern_linear_pte_xor[0];
1722 pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
1723
1724 return vstart + PMD_SIZE;
1725 }
1726
1727 if ((vstart & mask2gb) ||
1728 (vend - vstart <= mask2gb)) {
1729 pte_val ^= kern_linear_pte_xor[1];
1730 pte_val |= _PAGE_PMD_HUGE;
1731 vend = vstart + mask256mb + 1UL;
1732 } else {
1733 pte_val ^= kern_linear_pte_xor[2];
1734 pte_val |= _PAGE_PMD_HUGE;
1735 vend = vstart + mask2gb + 1UL;
1736 }
1737
1738 while (vstart < vend) {
1739 pmd_val(*pmd) = pte_val;
1740
1741 pte_val += PMD_SIZE;
1742 vstart += PMD_SIZE;
1743 pmd++;
1744 }
1745
1746 return vstart;
1747}
1748
1749static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
1750 bool guard)
1751{
1752 if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
1753 return true;
1754
1755 return false;
1756}
1757
1758static unsigned long __ref kernel_map_range(unsigned long pstart,
1759 unsigned long pend, pgprot_t prot,
1760 bool use_huge)
1761{
1762 unsigned long vstart = PAGE_OFFSET + pstart;
1763 unsigned long vend = PAGE_OFFSET + pend;
1764 unsigned long alloc_bytes = 0UL;
1765
1766 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
1767 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1768 vstart, vend);
1769 prom_halt();
1770 }
1771
1772 while (vstart < vend) {
1773 unsigned long this_end, paddr = __pa(vstart);
1774 pgd_t *pgd = pgd_offset_k(vstart);
1775 p4d_t *p4d;
1776 pud_t *pud;
1777 pmd_t *pmd;
1778 pte_t *pte;
1779
1780 if (pgd_none(*pgd)) {
1781 pud_t *new;
1782
1783 new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
1784 PAGE_SIZE);
1785 if (!new)
1786 goto err_alloc;
1787 alloc_bytes += PAGE_SIZE;
1788 pgd_populate(&init_mm, pgd, new);
1789 }
1790
1791 p4d = p4d_offset(pgd, vstart);
1792 if (p4d_none(*p4d)) {
1793 pud_t *new;
1794
1795 new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
1796 PAGE_SIZE);
1797 if (!new)
1798 goto err_alloc;
1799 alloc_bytes += PAGE_SIZE;
1800 p4d_populate(&init_mm, p4d, new);
1801 }
1802
1803 pud = pud_offset(p4d, vstart);
1804 if (pud_none(*pud)) {
1805 pmd_t *new;
1806
1807 if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
1808 vstart = kernel_map_hugepud(vstart, vend, pud);
1809 continue;
1810 }
1811 new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
1812 PAGE_SIZE);
1813 if (!new)
1814 goto err_alloc;
1815 alloc_bytes += PAGE_SIZE;
1816 pud_populate(&init_mm, pud, new);
1817 }
1818
1819 pmd = pmd_offset(pud, vstart);
1820 if (pmd_none(*pmd)) {
1821 pte_t *new;
1822
1823 if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
1824 vstart = kernel_map_hugepmd(vstart, vend, pmd);
1825 continue;
1826 }
1827 new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
1828 PAGE_SIZE);
1829 if (!new)
1830 goto err_alloc;
1831 alloc_bytes += PAGE_SIZE;
1832 pmd_populate_kernel(&init_mm, pmd, new);
1833 }
1834
1835 pte = pte_offset_kernel(pmd, vstart);
1836 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1837 if (this_end > vend)
1838 this_end = vend;
1839
1840 while (vstart < this_end) {
1841 pte_val(*pte) = (paddr | pgprot_val(prot));
1842
1843 vstart += PAGE_SIZE;
1844 paddr += PAGE_SIZE;
1845 pte++;
1846 }
1847 }
1848
1849 return alloc_bytes;
1850
1851err_alloc:
1852 panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
1853 __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1854 return -ENOMEM;
1855}
1856
1857static void __init flush_all_kernel_tsbs(void)
1858{
1859 int i;
1860
1861 for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
1862 struct tsb *ent = &swapper_tsb[i];
1863
1864 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1865 }
1866#ifndef CONFIG_DEBUG_PAGEALLOC
1867 for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
1868 struct tsb *ent = &swapper_4m_tsb[i];
1869
1870 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1871 }
1872#endif
1873}
1874
1875extern unsigned int kvmap_linear_patch[1];
1876
1877static void __init kernel_physical_mapping_init(void)
1878{
1879 unsigned long i, mem_alloced = 0UL;
1880 bool use_huge = true;
1881
1882#ifdef CONFIG_DEBUG_PAGEALLOC
1883 use_huge = false;
1884#endif
1885 for (i = 0; i < pall_ents; i++) {
1886 unsigned long phys_start, phys_end;
1887
1888 phys_start = pall[i].phys_addr;
1889 phys_end = phys_start + pall[i].reg_size;
1890
1891 mem_alloced += kernel_map_range(phys_start, phys_end,
1892 PAGE_KERNEL, use_huge);
1893 }
1894
1895 printk("Allocated %ld bytes for kernel page tables.\n",
1896 mem_alloced);
1897
1898 kvmap_linear_patch[0] = 0x01000000; /* nop */
1899 flushi(&kvmap_linear_patch[0]);
1900
1901 flush_all_kernel_tsbs();
1902
1903 __flush_tlb_all();
1904}
1905
1906#ifdef CONFIG_DEBUG_PAGEALLOC
1907void __kernel_map_pages(struct page *page, int numpages, int enable)
1908{
1909 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1910 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1911
1912 kernel_map_range(phys_start, phys_end,
1913 (enable ? PAGE_KERNEL : __pgprot(0)), false);
1914
1915 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1916 PAGE_OFFSET + phys_end);
1917
1918 /* we should perform an IPI and flush all tlbs,
1919 * but that can deadlock->flush only current cpu.
1920 */
1921 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1922 PAGE_OFFSET + phys_end);
1923}
1924#endif
1925
1926unsigned long __init find_ecache_flush_span(unsigned long size)
1927{
1928 int i;
1929
1930 for (i = 0; i < pavail_ents; i++) {
1931 if (pavail[i].reg_size >= size)
1932 return pavail[i].phys_addr;
1933 }
1934
1935 return ~0UL;
1936}
1937
1938unsigned long PAGE_OFFSET;
1939EXPORT_SYMBOL(PAGE_OFFSET);
1940
1941unsigned long VMALLOC_END = 0x0000010000000000UL;
1942EXPORT_SYMBOL(VMALLOC_END);
1943
1944unsigned long sparc64_va_hole_top = 0xfffff80000000000UL;
1945unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
1946
1947static void __init setup_page_offset(void)
1948{
1949 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1950 /* Cheetah/Panther support a full 64-bit virtual
1951 * address, so we can use all that our page tables
1952 * support.
1953 */
1954 sparc64_va_hole_top = 0xfff0000000000000UL;
1955 sparc64_va_hole_bottom = 0x0010000000000000UL;
1956
1957 max_phys_bits = 42;
1958 } else if (tlb_type == hypervisor) {
1959 switch (sun4v_chip_type) {
1960 case SUN4V_CHIP_NIAGARA1:
1961 case SUN4V_CHIP_NIAGARA2:
1962 /* T1 and T2 support 48-bit virtual addresses. */
1963 sparc64_va_hole_top = 0xffff800000000000UL;
1964 sparc64_va_hole_bottom = 0x0000800000000000UL;
1965
1966 max_phys_bits = 39;
1967 break;
1968 case SUN4V_CHIP_NIAGARA3:
1969 /* T3 supports 48-bit virtual addresses. */
1970 sparc64_va_hole_top = 0xffff800000000000UL;
1971 sparc64_va_hole_bottom = 0x0000800000000000UL;
1972
1973 max_phys_bits = 43;
1974 break;
1975 case SUN4V_CHIP_NIAGARA4:
1976 case SUN4V_CHIP_NIAGARA5:
1977 case SUN4V_CHIP_SPARC64X:
1978 case SUN4V_CHIP_SPARC_M6:
1979 /* T4 and later support 52-bit virtual addresses. */
1980 sparc64_va_hole_top = 0xfff8000000000000UL;
1981 sparc64_va_hole_bottom = 0x0008000000000000UL;
1982 max_phys_bits = 47;
1983 break;
1984 case SUN4V_CHIP_SPARC_M7:
1985 case SUN4V_CHIP_SPARC_SN:
1986 /* M7 and later support 52-bit virtual addresses. */
1987 sparc64_va_hole_top = 0xfff8000000000000UL;
1988 sparc64_va_hole_bottom = 0x0008000000000000UL;
1989 max_phys_bits = 49;
1990 break;
1991 case SUN4V_CHIP_SPARC_M8:
1992 default:
1993 /* M8 and later support 54-bit virtual addresses.
1994 * However, restricting M8 and above VA bits to 53
1995 * as 4-level page table cannot support more than
1996 * 53 VA bits.
1997 */
1998 sparc64_va_hole_top = 0xfff0000000000000UL;
1999 sparc64_va_hole_bottom = 0x0010000000000000UL;
2000 max_phys_bits = 51;
2001 break;
2002 }
2003 }
2004
2005 if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
2006 prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
2007 max_phys_bits);
2008 prom_halt();
2009 }
2010
2011 PAGE_OFFSET = sparc64_va_hole_top;
2012 VMALLOC_END = ((sparc64_va_hole_bottom >> 1) +
2013 (sparc64_va_hole_bottom >> 2));
2014
2015 pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
2016 PAGE_OFFSET, max_phys_bits);
2017 pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
2018 VMALLOC_START, VMALLOC_END);
2019 pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
2020 VMEMMAP_BASE, VMEMMAP_BASE << 1);
2021}
2022
2023static void __init tsb_phys_patch(void)
2024{
2025 struct tsb_ldquad_phys_patch_entry *pquad;
2026 struct tsb_phys_patch_entry *p;
2027
2028 pquad = &__tsb_ldquad_phys_patch;
2029 while (pquad < &__tsb_ldquad_phys_patch_end) {
2030 unsigned long addr = pquad->addr;
2031
2032 if (tlb_type == hypervisor)
2033 *(unsigned int *) addr = pquad->sun4v_insn;
2034 else
2035 *(unsigned int *) addr = pquad->sun4u_insn;
2036 wmb();
2037 __asm__ __volatile__("flush %0"
2038 : /* no outputs */
2039 : "r" (addr));
2040
2041 pquad++;
2042 }
2043
2044 p = &__tsb_phys_patch;
2045 while (p < &__tsb_phys_patch_end) {
2046 unsigned long addr = p->addr;
2047
2048 *(unsigned int *) addr = p->insn;
2049 wmb();
2050 __asm__ __volatile__("flush %0"
2051 : /* no outputs */
2052 : "r" (addr));
2053
2054 p++;
2055 }
2056}
2057
2058/* Don't mark as init, we give this to the Hypervisor. */
2059#ifndef CONFIG_DEBUG_PAGEALLOC
2060#define NUM_KTSB_DESCR 2
2061#else
2062#define NUM_KTSB_DESCR 1
2063#endif
2064static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
2065
2066/* The swapper TSBs are loaded with a base sequence of:
2067 *
2068 * sethi %uhi(SYMBOL), REG1
2069 * sethi %hi(SYMBOL), REG2
2070 * or REG1, %ulo(SYMBOL), REG1
2071 * or REG2, %lo(SYMBOL), REG2
2072 * sllx REG1, 32, REG1
2073 * or REG1, REG2, REG1
2074 *
2075 * When we use physical addressing for the TSB accesses, we patch the
2076 * first four instructions in the above sequence.
2077 */
2078
2079static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
2080{
2081 unsigned long high_bits, low_bits;
2082
2083 high_bits = (pa >> 32) & 0xffffffff;
2084 low_bits = (pa >> 0) & 0xffffffff;
2085
2086 while (start < end) {
2087 unsigned int *ia = (unsigned int *)(unsigned long)*start;
2088
2089 ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
2090 __asm__ __volatile__("flush %0" : : "r" (ia));
2091
2092 ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
2093 __asm__ __volatile__("flush %0" : : "r" (ia + 1));
2094
2095 ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
2096 __asm__ __volatile__("flush %0" : : "r" (ia + 2));
2097
2098 ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
2099 __asm__ __volatile__("flush %0" : : "r" (ia + 3));
2100
2101 start++;
2102 }
2103}
2104
2105static void ktsb_phys_patch(void)
2106{
2107 extern unsigned int __swapper_tsb_phys_patch;
2108 extern unsigned int __swapper_tsb_phys_patch_end;
2109 unsigned long ktsb_pa;
2110
2111 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
2112 patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
2113 &__swapper_tsb_phys_patch_end, ktsb_pa);
2114#ifndef CONFIG_DEBUG_PAGEALLOC
2115 {
2116 extern unsigned int __swapper_4m_tsb_phys_patch;
2117 extern unsigned int __swapper_4m_tsb_phys_patch_end;
2118 ktsb_pa = (kern_base +
2119 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
2120 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
2121 &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
2122 }
2123#endif
2124}
2125
2126static void __init sun4v_ktsb_init(void)
2127{
2128 unsigned long ktsb_pa;
2129
2130 /* First KTSB for PAGE_SIZE mappings. */
2131 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
2132
2133 switch (PAGE_SIZE) {
2134 case 8 * 1024:
2135 default:
2136 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
2137 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
2138 break;
2139
2140 case 64 * 1024:
2141 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
2142 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
2143 break;
2144
2145 case 512 * 1024:
2146 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
2147 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
2148 break;
2149
2150 case 4 * 1024 * 1024:
2151 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
2152 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
2153 break;
2154 }
2155
2156 ktsb_descr[0].assoc = 1;
2157 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
2158 ktsb_descr[0].ctx_idx = 0;
2159 ktsb_descr[0].tsb_base = ktsb_pa;
2160 ktsb_descr[0].resv = 0;
2161
2162#ifndef CONFIG_DEBUG_PAGEALLOC
2163 /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */
2164 ktsb_pa = (kern_base +
2165 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
2166
2167 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
2168 ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
2169 HV_PGSZ_MASK_256MB |
2170 HV_PGSZ_MASK_2GB |
2171 HV_PGSZ_MASK_16GB) &
2172 cpu_pgsz_mask);
2173 ktsb_descr[1].assoc = 1;
2174 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
2175 ktsb_descr[1].ctx_idx = 0;
2176 ktsb_descr[1].tsb_base = ktsb_pa;
2177 ktsb_descr[1].resv = 0;
2178#endif
2179}
2180
2181void sun4v_ktsb_register(void)
2182{
2183 unsigned long pa, ret;
2184
2185 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
2186
2187 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
2188 if (ret != 0) {
2189 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
2190 "errors with %lx\n", pa, ret);
2191 prom_halt();
2192 }
2193}
2194
2195static void __init sun4u_linear_pte_xor_finalize(void)
2196{
2197#ifndef CONFIG_DEBUG_PAGEALLOC
2198 /* This is where we would add Panther support for
2199 * 32MB and 256MB pages.
2200 */
2201#endif
2202}
2203
2204static void __init sun4v_linear_pte_xor_finalize(void)
2205{
2206 unsigned long pagecv_flag;
2207
2208 /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead
2209 * enables MCD error. Do not set bit 9 on M7 processor.
2210 */
2211 switch (sun4v_chip_type) {
2212 case SUN4V_CHIP_SPARC_M7:
2213 case SUN4V_CHIP_SPARC_M8:
2214 case SUN4V_CHIP_SPARC_SN:
2215 pagecv_flag = 0x00;
2216 break;
2217 default:
2218 pagecv_flag = _PAGE_CV_4V;
2219 break;
2220 }
2221#ifndef CONFIG_DEBUG_PAGEALLOC
2222 if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
2223 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
2224 PAGE_OFFSET;
2225 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag |
2226 _PAGE_P_4V | _PAGE_W_4V);
2227 } else {
2228 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
2229 }
2230
2231 if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
2232 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
2233 PAGE_OFFSET;
2234 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag |
2235 _PAGE_P_4V | _PAGE_W_4V);
2236 } else {
2237 kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
2238 }
2239
2240 if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
2241 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
2242 PAGE_OFFSET;
2243 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag |
2244 _PAGE_P_4V | _PAGE_W_4V);
2245 } else {
2246 kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
2247 }
2248#endif
2249}
2250
2251/* paging_init() sets up the page tables */
2252
2253static unsigned long last_valid_pfn;
2254
2255static void sun4u_pgprot_init(void);
2256static void sun4v_pgprot_init(void);
2257
2258#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2259#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2260#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2261#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2262#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2263#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2264
2265/* We need to exclude reserved regions. This exclusion will include
2266 * vmlinux and initrd. To be more precise the initrd size could be used to
2267 * compute a new lower limit because it is freed later during initialization.
2268 */
2269static void __init reduce_memory(phys_addr_t limit_ram)
2270{
2271 limit_ram += memblock_reserved_size();
2272 memblock_enforce_memory_limit(limit_ram);
2273}
2274
2275void __init paging_init(void)
2276{
2277 unsigned long end_pfn, shift, phys_base;
2278 unsigned long real_end, i;
2279
2280 setup_page_offset();
2281
2282 /* These build time checkes make sure that the dcache_dirty_cpu()
2283 * page->flags usage will work.
2284 *
2285 * When a page gets marked as dcache-dirty, we store the
2286 * cpu number starting at bit 32 in the page->flags. Also,
2287 * functions like clear_dcache_dirty_cpu use the cpu mask
2288 * in 13-bit signed-immediate instruction fields.
2289 */
2290
2291 /*
2292 * Page flags must not reach into upper 32 bits that are used
2293 * for the cpu number
2294 */
2295 BUILD_BUG_ON(NR_PAGEFLAGS > 32);
2296
2297 /*
2298 * The bit fields placed in the high range must not reach below
2299 * the 32 bit boundary. Otherwise we cannot place the cpu field
2300 * at the 32 bit boundary.
2301 */
2302 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
2303 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
2304
2305 BUILD_BUG_ON(NR_CPUS > 4096);
2306
2307 kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
2308 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
2309
2310 /* Invalidate both kernel TSBs. */
2311 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
2312#ifndef CONFIG_DEBUG_PAGEALLOC
2313 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2314#endif
2315
2316 /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde
2317 * bit on M7 processor. This is a conflicting usage of the same
2318 * bit. Enabling TTE.cv on M7 would turn on Memory Corruption
2319 * Detection error on all pages and this will lead to problems
2320 * later. Kernel does not run with MCD enabled and hence rest
2321 * of the required steps to fully configure memory corruption
2322 * detection are not taken. We need to ensure TTE.mcde is not
2323 * set on M7 processor. Compute the value of cacheability
2324 * flag for use later taking this into consideration.
2325 */
2326 switch (sun4v_chip_type) {
2327 case SUN4V_CHIP_SPARC_M7:
2328 case SUN4V_CHIP_SPARC_M8:
2329 case SUN4V_CHIP_SPARC_SN:
2330 page_cache4v_flag = _PAGE_CP_4V;
2331 break;
2332 default:
2333 page_cache4v_flag = _PAGE_CACHE_4V;
2334 break;
2335 }
2336
2337 if (tlb_type == hypervisor)
2338 sun4v_pgprot_init();
2339 else
2340 sun4u_pgprot_init();
2341
2342 if (tlb_type == cheetah_plus ||
2343 tlb_type == hypervisor) {
2344 tsb_phys_patch();
2345 ktsb_phys_patch();
2346 }
2347
2348 if (tlb_type == hypervisor)
2349 sun4v_patch_tlb_handlers();
2350
2351 /* Find available physical memory...
2352 *
2353 * Read it twice in order to work around a bug in openfirmware.
2354 * The call to grab this table itself can cause openfirmware to
2355 * allocate memory, which in turn can take away some space from
2356 * the list of available memory. Reading it twice makes sure
2357 * we really do get the final value.
2358 */
2359 read_obp_translations();
2360 read_obp_memory("reg", &pall[0], &pall_ents);
2361 read_obp_memory("available", &pavail[0], &pavail_ents);
2362 read_obp_memory("available", &pavail[0], &pavail_ents);
2363
2364 phys_base = 0xffffffffffffffffUL;
2365 for (i = 0; i < pavail_ents; i++) {
2366 phys_base = min(phys_base, pavail[i].phys_addr);
2367 memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
2368 }
2369
2370 memblock_reserve(kern_base, kern_size);
2371
2372 find_ramdisk(phys_base);
2373
2374 if (cmdline_memory_size)
2375 reduce_memory(cmdline_memory_size);
2376
2377 memblock_allow_resize();
2378 memblock_dump_all();
2379
2380 set_bit(0, mmu_context_bmap);
2381
2382 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
2383
2384 real_end = (unsigned long)_end;
2385 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
2386 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
2387 num_kernel_image_mappings);
2388
2389 /* Set kernel pgd to upper alias so physical page computations
2390 * work.
2391 */
2392 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
2393
2394 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
2395
2396 inherit_prom_mappings();
2397
2398 /* Ok, we can use our TLB miss and window trap handlers safely. */
2399 setup_tba();
2400
2401 __flush_tlb_all();
2402
2403 prom_build_devicetree();
2404 of_populate_present_mask();
2405#ifndef CONFIG_SMP
2406 of_fill_in_cpu_data();
2407#endif
2408
2409 if (tlb_type == hypervisor) {
2410 sun4v_mdesc_init();
2411 mdesc_populate_present_mask(cpu_all_mask);
2412#ifndef CONFIG_SMP
2413 mdesc_fill_in_cpu_data(cpu_all_mask);
2414#endif
2415 mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
2416
2417 sun4v_linear_pte_xor_finalize();
2418
2419 sun4v_ktsb_init();
2420 sun4v_ktsb_register();
2421 } else {
2422 unsigned long impl, ver;
2423
2424 cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
2425 HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
2426
2427 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
2428 impl = ((ver >> 32) & 0xffff);
2429 if (impl == PANTHER_IMPL)
2430 cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
2431 HV_PGSZ_MASK_256MB);
2432
2433 sun4u_linear_pte_xor_finalize();
2434 }
2435
2436 /* Flush the TLBs and the 4M TSB so that the updated linear
2437 * pte XOR settings are realized for all mappings.
2438 */
2439 __flush_tlb_all();
2440#ifndef CONFIG_DEBUG_PAGEALLOC
2441 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2442#endif
2443 __flush_tlb_all();
2444
2445 /* Setup bootmem... */
2446 last_valid_pfn = end_pfn = bootmem_init(phys_base);
2447
2448 kernel_physical_mapping_init();
2449
2450 {
2451 unsigned long max_zone_pfns[MAX_NR_ZONES];
2452
2453 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
2454
2455 max_zone_pfns[ZONE_NORMAL] = end_pfn;
2456
2457 free_area_init(max_zone_pfns);
2458 }
2459
2460 printk("Booting Linux...\n");
2461}
2462
2463int page_in_phys_avail(unsigned long paddr)
2464{
2465 int i;
2466
2467 paddr &= PAGE_MASK;
2468
2469 for (i = 0; i < pavail_ents; i++) {
2470 unsigned long start, end;
2471
2472 start = pavail[i].phys_addr;
2473 end = start + pavail[i].reg_size;
2474
2475 if (paddr >= start && paddr < end)
2476 return 1;
2477 }
2478 if (paddr >= kern_base && paddr < (kern_base + kern_size))
2479 return 1;
2480#ifdef CONFIG_BLK_DEV_INITRD
2481 if (paddr >= __pa(initrd_start) &&
2482 paddr < __pa(PAGE_ALIGN(initrd_end)))
2483 return 1;
2484#endif
2485
2486 return 0;
2487}
2488
2489static void __init register_page_bootmem_info(void)
2490{
2491#ifdef CONFIG_NUMA
2492 int i;
2493
2494 for_each_online_node(i)
2495 if (NODE_DATA(i)->node_spanned_pages)
2496 register_page_bootmem_info_node(NODE_DATA(i));
2497#endif
2498}
2499void __init mem_init(void)
2500{
2501 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
2502
2503 memblock_free_all();
2504
2505 /*
2506 * Must be done after boot memory is put on freelist, because here we
2507 * might set fields in deferred struct pages that have not yet been
2508 * initialized, and memblock_free_all() initializes all the reserved
2509 * deferred pages for us.
2510 */
2511 register_page_bootmem_info();
2512
2513 /*
2514 * Set up the zero page, mark it reserved, so that page count
2515 * is not manipulated when freeing the page from user ptes.
2516 */
2517 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
2518 if (mem_map_zero == NULL) {
2519 prom_printf("paging_init: Cannot alloc zero page.\n");
2520 prom_halt();
2521 }
2522 mark_page_reserved(mem_map_zero);
2523
2524
2525 if (tlb_type == cheetah || tlb_type == cheetah_plus)
2526 cheetah_ecache_flush_init();
2527}
2528
2529void free_initmem(void)
2530{
2531 unsigned long addr, initend;
2532 int do_free = 1;
2533
2534 /* If the physical memory maps were trimmed by kernel command
2535 * line options, don't even try freeing this initmem stuff up.
2536 * The kernel image could have been in the trimmed out region
2537 * and if so the freeing below will free invalid page structs.
2538 */
2539 if (cmdline_memory_size)
2540 do_free = 0;
2541
2542 /*
2543 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2544 */
2545 addr = PAGE_ALIGN((unsigned long)(__init_begin));
2546 initend = (unsigned long)(__init_end) & PAGE_MASK;
2547 for (; addr < initend; addr += PAGE_SIZE) {
2548 unsigned long page;
2549
2550 page = (addr +
2551 ((unsigned long) __va(kern_base)) -
2552 ((unsigned long) KERNBASE));
2553 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
2554
2555 if (do_free)
2556 free_reserved_page(virt_to_page(page));
2557 }
2558}
2559
2560pgprot_t PAGE_KERNEL __read_mostly;
2561EXPORT_SYMBOL(PAGE_KERNEL);
2562
2563pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2564pgprot_t PAGE_COPY __read_mostly;
2565
2566pgprot_t PAGE_SHARED __read_mostly;
2567EXPORT_SYMBOL(PAGE_SHARED);
2568
2569unsigned long pg_iobits __read_mostly;
2570
2571unsigned long _PAGE_IE __read_mostly;
2572EXPORT_SYMBOL(_PAGE_IE);
2573
2574unsigned long _PAGE_E __read_mostly;
2575EXPORT_SYMBOL(_PAGE_E);
2576
2577unsigned long _PAGE_CACHE __read_mostly;
2578EXPORT_SYMBOL(_PAGE_CACHE);
2579
2580#ifdef CONFIG_SPARSEMEM_VMEMMAP
2581int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2582 int node, struct vmem_altmap *altmap)
2583{
2584 unsigned long pte_base;
2585
2586 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2587 _PAGE_CP_4U | _PAGE_CV_4U |
2588 _PAGE_P_4U | _PAGE_W_4U);
2589 if (tlb_type == hypervisor)
2590 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2591 page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V);
2592
2593 pte_base |= _PAGE_PMD_HUGE;
2594
2595 vstart = vstart & PMD_MASK;
2596 vend = ALIGN(vend, PMD_SIZE);
2597 for (; vstart < vend; vstart += PMD_SIZE) {
2598 pgd_t *pgd = vmemmap_pgd_populate(vstart, node);
2599 unsigned long pte;
2600 p4d_t *p4d;
2601 pud_t *pud;
2602 pmd_t *pmd;
2603
2604 if (!pgd)
2605 return -ENOMEM;
2606
2607 p4d = vmemmap_p4d_populate(pgd, vstart, node);
2608 if (!p4d)
2609 return -ENOMEM;
2610
2611 pud = vmemmap_pud_populate(p4d, vstart, node);
2612 if (!pud)
2613 return -ENOMEM;
2614
2615 pmd = pmd_offset(pud, vstart);
2616 pte = pmd_val(*pmd);
2617 if (!(pte & _PAGE_VALID)) {
2618 void *block = vmemmap_alloc_block(PMD_SIZE, node);
2619
2620 if (!block)
2621 return -ENOMEM;
2622
2623 pmd_val(*pmd) = pte_base | __pa(block);
2624 }
2625 }
2626
2627 return 0;
2628}
2629
2630void vmemmap_free(unsigned long start, unsigned long end,
2631 struct vmem_altmap *altmap)
2632{
2633}
2634#endif /* CONFIG_SPARSEMEM_VMEMMAP */
2635
2636/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
2637static pgprot_t protection_map[16] __ro_after_init;
2638
2639static void prot_init_common(unsigned long page_none,
2640 unsigned long page_shared,
2641 unsigned long page_copy,
2642 unsigned long page_readonly,
2643 unsigned long page_exec_bit)
2644{
2645 PAGE_COPY = __pgprot(page_copy);
2646 PAGE_SHARED = __pgprot(page_shared);
2647
2648 protection_map[0x0] = __pgprot(page_none);
2649 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2650 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2651 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2652 protection_map[0x4] = __pgprot(page_readonly);
2653 protection_map[0x5] = __pgprot(page_readonly);
2654 protection_map[0x6] = __pgprot(page_copy);
2655 protection_map[0x7] = __pgprot(page_copy);
2656 protection_map[0x8] = __pgprot(page_none);
2657 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2658 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2659 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2660 protection_map[0xc] = __pgprot(page_readonly);
2661 protection_map[0xd] = __pgprot(page_readonly);
2662 protection_map[0xe] = __pgprot(page_shared);
2663 protection_map[0xf] = __pgprot(page_shared);
2664}
2665
2666static void __init sun4u_pgprot_init(void)
2667{
2668 unsigned long page_none, page_shared, page_copy, page_readonly;
2669 unsigned long page_exec_bit;
2670 int i;
2671
2672 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2673 _PAGE_CACHE_4U | _PAGE_P_4U |
2674 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2675 _PAGE_EXEC_4U);
2676 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2677 _PAGE_CACHE_4U | _PAGE_P_4U |
2678 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2679 _PAGE_EXEC_4U | _PAGE_L_4U);
2680
2681 _PAGE_IE = _PAGE_IE_4U;
2682 _PAGE_E = _PAGE_E_4U;
2683 _PAGE_CACHE = _PAGE_CACHE_4U;
2684
2685 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2686 __ACCESS_BITS_4U | _PAGE_E_4U);
2687
2688#ifdef CONFIG_DEBUG_PAGEALLOC
2689 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2690#else
2691 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
2692 PAGE_OFFSET;
2693#endif
2694 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2695 _PAGE_P_4U | _PAGE_W_4U);
2696
2697 for (i = 1; i < 4; i++)
2698 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2699
2700 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2701 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2702 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2703
2704
2705 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2706 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2707 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2708 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2709 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2710 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2711 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2712
2713 page_exec_bit = _PAGE_EXEC_4U;
2714
2715 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2716 page_exec_bit);
2717}
2718
2719static void __init sun4v_pgprot_init(void)
2720{
2721 unsigned long page_none, page_shared, page_copy, page_readonly;
2722 unsigned long page_exec_bit;
2723 int i;
2724
2725 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
2726 page_cache4v_flag | _PAGE_P_4V |
2727 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2728 _PAGE_EXEC_4V);
2729 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
2730
2731 _PAGE_IE = _PAGE_IE_4V;
2732 _PAGE_E = _PAGE_E_4V;
2733 _PAGE_CACHE = page_cache4v_flag;
2734
2735#ifdef CONFIG_DEBUG_PAGEALLOC
2736 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2737#else
2738 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
2739 PAGE_OFFSET;
2740#endif
2741 kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V |
2742 _PAGE_W_4V);
2743
2744 for (i = 1; i < 4; i++)
2745 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2746
2747 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2748 __ACCESS_BITS_4V | _PAGE_E_4V);
2749
2750 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2751 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2752 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2753 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2754
2755 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag;
2756 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2757 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
2758 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2759 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2760 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2761 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2762
2763 page_exec_bit = _PAGE_EXEC_4V;
2764
2765 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2766 page_exec_bit);
2767}
2768
2769unsigned long pte_sz_bits(unsigned long sz)
2770{
2771 if (tlb_type == hypervisor) {
2772 switch (sz) {
2773 case 8 * 1024:
2774 default:
2775 return _PAGE_SZ8K_4V;
2776 case 64 * 1024:
2777 return _PAGE_SZ64K_4V;
2778 case 512 * 1024:
2779 return _PAGE_SZ512K_4V;
2780 case 4 * 1024 * 1024:
2781 return _PAGE_SZ4MB_4V;
2782 }
2783 } else {
2784 switch (sz) {
2785 case 8 * 1024:
2786 default:
2787 return _PAGE_SZ8K_4U;
2788 case 64 * 1024:
2789 return _PAGE_SZ64K_4U;
2790 case 512 * 1024:
2791 return _PAGE_SZ512K_4U;
2792 case 4 * 1024 * 1024:
2793 return _PAGE_SZ4MB_4U;
2794 }
2795 }
2796}
2797
2798pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2799{
2800 pte_t pte;
2801
2802 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
2803 pte_val(pte) |= (((unsigned long)space) << 32);
2804 pte_val(pte) |= pte_sz_bits(page_size);
2805
2806 return pte;
2807}
2808
2809static unsigned long kern_large_tte(unsigned long paddr)
2810{
2811 unsigned long val;
2812
2813 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2814 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2815 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2816 if (tlb_type == hypervisor)
2817 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2818 page_cache4v_flag | _PAGE_P_4V |
2819 _PAGE_EXEC_4V | _PAGE_W_4V);
2820
2821 return val | paddr;
2822}
2823
2824/* If not locked, zap it. */
2825void __flush_tlb_all(void)
2826{
2827 unsigned long pstate;
2828 int i;
2829
2830 __asm__ __volatile__("flushw\n\t"
2831 "rdpr %%pstate, %0\n\t"
2832 "wrpr %0, %1, %%pstate"
2833 : "=r" (pstate)
2834 : "i" (PSTATE_IE));
2835 if (tlb_type == hypervisor) {
2836 sun4v_mmu_demap_all();
2837 } else if (tlb_type == spitfire) {
2838 for (i = 0; i < 64; i++) {
2839 /* Spitfire Errata #32 workaround */
2840 /* NOTE: Always runs on spitfire, so no
2841 * cheetah+ page size encodings.
2842 */
2843 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2844 "flush %%g6"
2845 : /* No outputs */
2846 : "r" (0),
2847 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2848
2849 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2850 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2851 "membar #Sync"
2852 : /* no outputs */
2853 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2854 spitfire_put_dtlb_data(i, 0x0UL);
2855 }
2856
2857 /* Spitfire Errata #32 workaround */
2858 /* NOTE: Always runs on spitfire, so no
2859 * cheetah+ page size encodings.
2860 */
2861 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2862 "flush %%g6"
2863 : /* No outputs */
2864 : "r" (0),
2865 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2866
2867 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2868 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2869 "membar #Sync"
2870 : /* no outputs */
2871 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2872 spitfire_put_itlb_data(i, 0x0UL);
2873 }
2874 }
2875 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2876 cheetah_flush_dtlb_all();
2877 cheetah_flush_itlb_all();
2878 }
2879 __asm__ __volatile__("wrpr %0, 0, %%pstate"
2880 : : "r" (pstate));
2881}
2882
2883pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
2884{
2885 struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2886 pte_t *pte = NULL;
2887
2888 if (page)
2889 pte = (pte_t *) page_address(page);
2890
2891 return pte;
2892}
2893
2894pgtable_t pte_alloc_one(struct mm_struct *mm)
2895{
2896 struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2897 if (!page)
2898 return NULL;
2899 if (!pgtable_pte_page_ctor(page)) {
2900 __free_page(page);
2901 return NULL;
2902 }
2903 return (pte_t *) page_address(page);
2904}
2905
2906void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2907{
2908 free_page((unsigned long)pte);
2909}
2910
2911static void __pte_free(pgtable_t pte)
2912{
2913 struct page *page = virt_to_page(pte);
2914
2915 pgtable_pte_page_dtor(page);
2916 __free_page(page);
2917}
2918
2919void pte_free(struct mm_struct *mm, pgtable_t pte)
2920{
2921 __pte_free(pte);
2922}
2923
2924void pgtable_free(void *table, bool is_page)
2925{
2926 if (is_page)
2927 __pte_free(table);
2928 else
2929 kmem_cache_free(pgtable_cache, table);
2930}
2931
2932#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2933void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2934 pmd_t *pmd)
2935{
2936 unsigned long pte, flags;
2937 struct mm_struct *mm;
2938 pmd_t entry = *pmd;
2939
2940 if (!pmd_large(entry) || !pmd_young(entry))
2941 return;
2942
2943 pte = pmd_val(entry);
2944
2945 /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */
2946 if (!(pte & _PAGE_VALID))
2947 return;
2948
2949 /* We are fabricating 8MB pages using 4MB real hw pages. */
2950 pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
2951
2952 mm = vma->vm_mm;
2953
2954 spin_lock_irqsave(&mm->context.lock, flags);
2955
2956 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
2957 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
2958 addr, pte);
2959
2960 spin_unlock_irqrestore(&mm->context.lock, flags);
2961}
2962#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2963
2964#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
2965static void context_reload(void *__data)
2966{
2967 struct mm_struct *mm = __data;
2968
2969 if (mm == current->mm)
2970 load_secondary_context(mm);
2971}
2972
2973void hugetlb_setup(struct pt_regs *regs)
2974{
2975 struct mm_struct *mm = current->mm;
2976 struct tsb_config *tp;
2977
2978 if (faulthandler_disabled() || !mm) {
2979 const struct exception_table_entry *entry;
2980
2981 entry = search_exception_tables(regs->tpc);
2982 if (entry) {
2983 regs->tpc = entry->fixup;
2984 regs->tnpc = regs->tpc + 4;
2985 return;
2986 }
2987 pr_alert("Unexpected HugeTLB setup in atomic context.\n");
2988 die_if_kernel("HugeTSB in atomic", regs);
2989 }
2990
2991 tp = &mm->context.tsb_block[MM_TSB_HUGE];
2992 if (likely(tp->tsb == NULL))
2993 tsb_grow(mm, MM_TSB_HUGE, 0);
2994
2995 tsb_context_switch(mm);
2996 smp_tsb_sync(mm);
2997
2998 /* On UltraSPARC-III+ and later, configure the second half of
2999 * the Data-TLB for huge pages.
3000 */
3001 if (tlb_type == cheetah_plus) {
3002 bool need_context_reload = false;
3003 unsigned long ctx;
3004
3005 spin_lock_irq(&ctx_alloc_lock);
3006 ctx = mm->context.sparc64_ctx_val;
3007 ctx &= ~CTX_PGSZ_MASK;
3008 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
3009 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
3010
3011 if (ctx != mm->context.sparc64_ctx_val) {
3012 /* When changing the page size fields, we
3013 * must perform a context flush so that no
3014 * stale entries match. This flush must
3015 * occur with the original context register
3016 * settings.
3017 */
3018 do_flush_tlb_mm(mm);
3019
3020 /* Reload the context register of all processors
3021 * also executing in this address space.
3022 */
3023 mm->context.sparc64_ctx_val = ctx;
3024 need_context_reload = true;
3025 }
3026 spin_unlock_irq(&ctx_alloc_lock);
3027
3028 if (need_context_reload)
3029 on_each_cpu(context_reload, mm, 0);
3030 }
3031}
3032#endif
3033
3034static struct resource code_resource = {
3035 .name = "Kernel code",
3036 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
3037};
3038
3039static struct resource data_resource = {
3040 .name = "Kernel data",
3041 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
3042};
3043
3044static struct resource bss_resource = {
3045 .name = "Kernel bss",
3046 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
3047};
3048
3049static inline resource_size_t compute_kern_paddr(void *addr)
3050{
3051 return (resource_size_t) (addr - KERNBASE + kern_base);
3052}
3053
3054static void __init kernel_lds_init(void)
3055{
3056 code_resource.start = compute_kern_paddr(_text);
3057 code_resource.end = compute_kern_paddr(_etext - 1);
3058 data_resource.start = compute_kern_paddr(_etext);
3059 data_resource.end = compute_kern_paddr(_edata - 1);
3060 bss_resource.start = compute_kern_paddr(__bss_start);
3061 bss_resource.end = compute_kern_paddr(_end - 1);
3062}
3063
3064static int __init report_memory(void)
3065{
3066 int i;
3067 struct resource *res;
3068
3069 kernel_lds_init();
3070
3071 for (i = 0; i < pavail_ents; i++) {
3072 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
3073
3074 if (!res) {
3075 pr_warn("Failed to allocate source.\n");
3076 break;
3077 }
3078
3079 res->name = "System RAM";
3080 res->start = pavail[i].phys_addr;
3081 res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
3082 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
3083
3084 if (insert_resource(&iomem_resource, res) < 0) {
3085 pr_warn("Resource insertion failed.\n");
3086 break;
3087 }
3088
3089 insert_resource(res, &code_resource);
3090 insert_resource(res, &data_resource);
3091 insert_resource(res, &bss_resource);
3092 }
3093
3094 return 0;
3095}
3096arch_initcall(report_memory);
3097
3098#ifdef CONFIG_SMP
3099#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
3100#else
3101#define do_flush_tlb_kernel_range __flush_tlb_kernel_range
3102#endif
3103
3104void flush_tlb_kernel_range(unsigned long start, unsigned long end)
3105{
3106 if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
3107 if (start < LOW_OBP_ADDRESS) {
3108 flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
3109 do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
3110 }
3111 if (end > HI_OBP_ADDRESS) {
3112 flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
3113 do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
3114 }
3115 } else {
3116 flush_tsb_kernel_range(start, end);
3117 do_flush_tlb_kernel_range(start, end);
3118 }
3119}
3120
3121void copy_user_highpage(struct page *to, struct page *from,
3122 unsigned long vaddr, struct vm_area_struct *vma)
3123{
3124 char *vfrom, *vto;
3125
3126 vfrom = kmap_atomic(from);
3127 vto = kmap_atomic(to);
3128 copy_user_page(vto, vfrom, vaddr, to);
3129 kunmap_atomic(vto);
3130 kunmap_atomic(vfrom);
3131
3132 /* If this page has ADI enabled, copy over any ADI tags
3133 * as well
3134 */
3135 if (vma->vm_flags & VM_SPARC_ADI) {
3136 unsigned long pfrom, pto, i, adi_tag;
3137
3138 pfrom = page_to_phys(from);
3139 pto = page_to_phys(to);
3140
3141 for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) {
3142 asm volatile("ldxa [%1] %2, %0\n\t"
3143 : "=r" (adi_tag)
3144 : "r" (i), "i" (ASI_MCD_REAL));
3145 asm volatile("stxa %0, [%1] %2\n\t"
3146 :
3147 : "r" (adi_tag), "r" (pto),
3148 "i" (ASI_MCD_REAL));
3149 pto += adi_blksize();
3150 }
3151 asm volatile("membar #Sync\n\t");
3152 }
3153}
3154EXPORT_SYMBOL(copy_user_highpage);
3155
3156void copy_highpage(struct page *to, struct page *from)
3157{
3158 char *vfrom, *vto;
3159
3160 vfrom = kmap_atomic(from);
3161 vto = kmap_atomic(to);
3162 copy_page(vto, vfrom);
3163 kunmap_atomic(vto);
3164 kunmap_atomic(vfrom);
3165
3166 /* If this platform is ADI enabled, copy any ADI tags
3167 * as well
3168 */
3169 if (adi_capable()) {
3170 unsigned long pfrom, pto, i, adi_tag;
3171
3172 pfrom = page_to_phys(from);
3173 pto = page_to_phys(to);
3174
3175 for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) {
3176 asm volatile("ldxa [%1] %2, %0\n\t"
3177 : "=r" (adi_tag)
3178 : "r" (i), "i" (ASI_MCD_REAL));
3179 asm volatile("stxa %0, [%1] %2\n\t"
3180 :
3181 : "r" (adi_tag), "r" (pto),
3182 "i" (ASI_MCD_REAL));
3183 pto += adi_blksize();
3184 }
3185 asm volatile("membar #Sync\n\t");
3186 }
3187}
3188EXPORT_SYMBOL(copy_highpage);
3189
3190pgprot_t vm_get_page_prot(unsigned long vm_flags)
3191{
3192 unsigned long prot = pgprot_val(protection_map[vm_flags &
3193 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);
3194
3195 if (vm_flags & VM_SPARC_ADI)
3196 prot |= _PAGE_MCD_4V;
3197
3198 return __pgprot(prot);
3199}
3200EXPORT_SYMBOL(vm_get_page_prot);
1/*
2 * arch/sparc64/mm/init.c
3 *
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8#include <linux/extable.h>
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/string.h>
12#include <linux/init.h>
13#include <linux/bootmem.h>
14#include <linux/mm.h>
15#include <linux/hugetlb.h>
16#include <linux/initrd.h>
17#include <linux/swap.h>
18#include <linux/pagemap.h>
19#include <linux/poison.h>
20#include <linux/fs.h>
21#include <linux/seq_file.h>
22#include <linux/kprobes.h>
23#include <linux/cache.h>
24#include <linux/sort.h>
25#include <linux/ioport.h>
26#include <linux/percpu.h>
27#include <linux/memblock.h>
28#include <linux/mmzone.h>
29#include <linux/gfp.h>
30
31#include <asm/head.h>
32#include <asm/page.h>
33#include <asm/pgalloc.h>
34#include <asm/pgtable.h>
35#include <asm/oplib.h>
36#include <asm/iommu.h>
37#include <asm/io.h>
38#include <linux/uaccess.h>
39#include <asm/mmu_context.h>
40#include <asm/tlbflush.h>
41#include <asm/dma.h>
42#include <asm/starfire.h>
43#include <asm/tlb.h>
44#include <asm/spitfire.h>
45#include <asm/sections.h>
46#include <asm/tsb.h>
47#include <asm/hypervisor.h>
48#include <asm/prom.h>
49#include <asm/mdesc.h>
50#include <asm/cpudata.h>
51#include <asm/setup.h>
52#include <asm/irq.h>
53
54#include "init_64.h"
55
56unsigned long kern_linear_pte_xor[4] __read_mostly;
57static unsigned long page_cache4v_flag;
58
59/* A bitmap, two bits for every 256MB of physical memory. These two
60 * bits determine what page size we use for kernel linear
61 * translations. They form an index into kern_linear_pte_xor[]. The
62 * value in the indexed slot is XOR'd with the TLB miss virtual
63 * address to form the resulting TTE. The mapping is:
64 *
65 * 0 ==> 4MB
66 * 1 ==> 256MB
67 * 2 ==> 2GB
68 * 3 ==> 16GB
69 *
70 * All sun4v chips support 256MB pages. Only SPARC-T4 and later
71 * support 2GB pages, and hopefully future cpus will support the 16GB
72 * pages as well. For slots 2 and 3, we encode a 256MB TTE xor there
73 * if these larger page sizes are not supported by the cpu.
74 *
75 * It would be nice to determine this from the machine description
76 * 'cpu' properties, but we need to have this table setup before the
77 * MDESC is initialized.
78 */
79
80#ifndef CONFIG_DEBUG_PAGEALLOC
81/* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
82 * Space is allocated for this right after the trap table in
83 * arch/sparc64/kernel/head.S
84 */
85extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
86#endif
87extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
88
89static unsigned long cpu_pgsz_mask;
90
91#define MAX_BANKS 1024
92
93static struct linux_prom64_registers pavail[MAX_BANKS];
94static int pavail_ents;
95
96u64 numa_latency[MAX_NUMNODES][MAX_NUMNODES];
97
98static int cmp_p64(const void *a, const void *b)
99{
100 const struct linux_prom64_registers *x = a, *y = b;
101
102 if (x->phys_addr > y->phys_addr)
103 return 1;
104 if (x->phys_addr < y->phys_addr)
105 return -1;
106 return 0;
107}
108
109static void __init read_obp_memory(const char *property,
110 struct linux_prom64_registers *regs,
111 int *num_ents)
112{
113 phandle node = prom_finddevice("/memory");
114 int prop_size = prom_getproplen(node, property);
115 int ents, ret, i;
116
117 ents = prop_size / sizeof(struct linux_prom64_registers);
118 if (ents > MAX_BANKS) {
119 prom_printf("The machine has more %s property entries than "
120 "this kernel can support (%d).\n",
121 property, MAX_BANKS);
122 prom_halt();
123 }
124
125 ret = prom_getproperty(node, property, (char *) regs, prop_size);
126 if (ret == -1) {
127 prom_printf("Couldn't get %s property from /memory.\n",
128 property);
129 prom_halt();
130 }
131
132 /* Sanitize what we got from the firmware, by page aligning
133 * everything.
134 */
135 for (i = 0; i < ents; i++) {
136 unsigned long base, size;
137
138 base = regs[i].phys_addr;
139 size = regs[i].reg_size;
140
141 size &= PAGE_MASK;
142 if (base & ~PAGE_MASK) {
143 unsigned long new_base = PAGE_ALIGN(base);
144
145 size -= new_base - base;
146 if ((long) size < 0L)
147 size = 0UL;
148 base = new_base;
149 }
150 if (size == 0UL) {
151 /* If it is empty, simply get rid of it.
152 * This simplifies the logic of the other
153 * functions that process these arrays.
154 */
155 memmove(®s[i], ®s[i + 1],
156 (ents - i - 1) * sizeof(regs[0]));
157 i--;
158 ents--;
159 continue;
160 }
161 regs[i].phys_addr = base;
162 regs[i].reg_size = size;
163 }
164
165 *num_ents = ents;
166
167 sort(regs, ents, sizeof(struct linux_prom64_registers),
168 cmp_p64, NULL);
169}
170
171/* Kernel physical address base and size in bytes. */
172unsigned long kern_base __read_mostly;
173unsigned long kern_size __read_mostly;
174
175/* Initial ramdisk setup */
176extern unsigned long sparc_ramdisk_image64;
177extern unsigned int sparc_ramdisk_image;
178extern unsigned int sparc_ramdisk_size;
179
180struct page *mem_map_zero __read_mostly;
181EXPORT_SYMBOL(mem_map_zero);
182
183unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
184
185unsigned long sparc64_kern_pri_context __read_mostly;
186unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
187unsigned long sparc64_kern_sec_context __read_mostly;
188
189int num_kernel_image_mappings;
190
191#ifdef CONFIG_DEBUG_DCFLUSH
192atomic_t dcpage_flushes = ATOMIC_INIT(0);
193#ifdef CONFIG_SMP
194atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
195#endif
196#endif
197
198inline void flush_dcache_page_impl(struct page *page)
199{
200 BUG_ON(tlb_type == hypervisor);
201#ifdef CONFIG_DEBUG_DCFLUSH
202 atomic_inc(&dcpage_flushes);
203#endif
204
205#ifdef DCACHE_ALIASING_POSSIBLE
206 __flush_dcache_page(page_address(page),
207 ((tlb_type == spitfire) &&
208 page_mapping(page) != NULL));
209#else
210 if (page_mapping(page) != NULL &&
211 tlb_type == spitfire)
212 __flush_icache_page(__pa(page_address(page)));
213#endif
214}
215
216#define PG_dcache_dirty PG_arch_1
217#define PG_dcache_cpu_shift 32UL
218#define PG_dcache_cpu_mask \
219 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
220
221#define dcache_dirty_cpu(page) \
222 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
223
224static inline void set_dcache_dirty(struct page *page, int this_cpu)
225{
226 unsigned long mask = this_cpu;
227 unsigned long non_cpu_bits;
228
229 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
230 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
231
232 __asm__ __volatile__("1:\n\t"
233 "ldx [%2], %%g7\n\t"
234 "and %%g7, %1, %%g1\n\t"
235 "or %%g1, %0, %%g1\n\t"
236 "casx [%2], %%g7, %%g1\n\t"
237 "cmp %%g7, %%g1\n\t"
238 "bne,pn %%xcc, 1b\n\t"
239 " nop"
240 : /* no outputs */
241 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
242 : "g1", "g7");
243}
244
245static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
246{
247 unsigned long mask = (1UL << PG_dcache_dirty);
248
249 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
250 "1:\n\t"
251 "ldx [%2], %%g7\n\t"
252 "srlx %%g7, %4, %%g1\n\t"
253 "and %%g1, %3, %%g1\n\t"
254 "cmp %%g1, %0\n\t"
255 "bne,pn %%icc, 2f\n\t"
256 " andn %%g7, %1, %%g1\n\t"
257 "casx [%2], %%g7, %%g1\n\t"
258 "cmp %%g7, %%g1\n\t"
259 "bne,pn %%xcc, 1b\n\t"
260 " nop\n"
261 "2:"
262 : /* no outputs */
263 : "r" (cpu), "r" (mask), "r" (&page->flags),
264 "i" (PG_dcache_cpu_mask),
265 "i" (PG_dcache_cpu_shift)
266 : "g1", "g7");
267}
268
269static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
270{
271 unsigned long tsb_addr = (unsigned long) ent;
272
273 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
274 tsb_addr = __pa(tsb_addr);
275
276 __tsb_insert(tsb_addr, tag, pte);
277}
278
279unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
280
281static void flush_dcache(unsigned long pfn)
282{
283 struct page *page;
284
285 page = pfn_to_page(pfn);
286 if (page) {
287 unsigned long pg_flags;
288
289 pg_flags = page->flags;
290 if (pg_flags & (1UL << PG_dcache_dirty)) {
291 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
292 PG_dcache_cpu_mask);
293 int this_cpu = get_cpu();
294
295 /* This is just to optimize away some function calls
296 * in the SMP case.
297 */
298 if (cpu == this_cpu)
299 flush_dcache_page_impl(page);
300 else
301 smp_flush_dcache_page_impl(page, cpu);
302
303 clear_dcache_dirty_cpu(page, cpu);
304
305 put_cpu();
306 }
307 }
308}
309
310/* mm->context.lock must be held */
311static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
312 unsigned long tsb_hash_shift, unsigned long address,
313 unsigned long tte)
314{
315 struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
316 unsigned long tag;
317
318 if (unlikely(!tsb))
319 return;
320
321 tsb += ((address >> tsb_hash_shift) &
322 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
323 tag = (address >> 22UL);
324 tsb_insert(tsb, tag, tte);
325}
326
327void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
328{
329 struct mm_struct *mm;
330 unsigned long flags;
331 pte_t pte = *ptep;
332
333 if (tlb_type != hypervisor) {
334 unsigned long pfn = pte_pfn(pte);
335
336 if (pfn_valid(pfn))
337 flush_dcache(pfn);
338 }
339
340 mm = vma->vm_mm;
341
342 /* Don't insert a non-valid PTE into the TSB, we'll deadlock. */
343 if (!pte_accessible(mm, pte))
344 return;
345
346 spin_lock_irqsave(&mm->context.lock, flags);
347
348#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
349 if ((mm->context.hugetlb_pte_count || mm->context.thp_pte_count) &&
350 is_hugetlb_pte(pte)) {
351 /* We are fabricating 8MB pages using 4MB real hw pages. */
352 pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
353 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
354 address, pte_val(pte));
355 } else
356#endif
357 __update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
358 address, pte_val(pte));
359
360 spin_unlock_irqrestore(&mm->context.lock, flags);
361}
362
363void flush_dcache_page(struct page *page)
364{
365 struct address_space *mapping;
366 int this_cpu;
367
368 if (tlb_type == hypervisor)
369 return;
370
371 /* Do not bother with the expensive D-cache flush if it
372 * is merely the zero page. The 'bigcore' testcase in GDB
373 * causes this case to run millions of times.
374 */
375 if (page == ZERO_PAGE(0))
376 return;
377
378 this_cpu = get_cpu();
379
380 mapping = page_mapping(page);
381 if (mapping && !mapping_mapped(mapping)) {
382 int dirty = test_bit(PG_dcache_dirty, &page->flags);
383 if (dirty) {
384 int dirty_cpu = dcache_dirty_cpu(page);
385
386 if (dirty_cpu == this_cpu)
387 goto out;
388 smp_flush_dcache_page_impl(page, dirty_cpu);
389 }
390 set_dcache_dirty(page, this_cpu);
391 } else {
392 /* We could delay the flush for the !page_mapping
393 * case too. But that case is for exec env/arg
394 * pages and those are %99 certainly going to get
395 * faulted into the tlb (and thus flushed) anyways.
396 */
397 flush_dcache_page_impl(page);
398 }
399
400out:
401 put_cpu();
402}
403EXPORT_SYMBOL(flush_dcache_page);
404
405void __kprobes flush_icache_range(unsigned long start, unsigned long end)
406{
407 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
408 if (tlb_type == spitfire) {
409 unsigned long kaddr;
410
411 /* This code only runs on Spitfire cpus so this is
412 * why we can assume _PAGE_PADDR_4U.
413 */
414 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
415 unsigned long paddr, mask = _PAGE_PADDR_4U;
416
417 if (kaddr >= PAGE_OFFSET)
418 paddr = kaddr & mask;
419 else {
420 pgd_t *pgdp = pgd_offset_k(kaddr);
421 pud_t *pudp = pud_offset(pgdp, kaddr);
422 pmd_t *pmdp = pmd_offset(pudp, kaddr);
423 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
424
425 paddr = pte_val(*ptep) & mask;
426 }
427 __flush_icache_page(paddr);
428 }
429 }
430}
431EXPORT_SYMBOL(flush_icache_range);
432
433void mmu_info(struct seq_file *m)
434{
435 static const char *pgsz_strings[] = {
436 "8K", "64K", "512K", "4MB", "32MB",
437 "256MB", "2GB", "16GB",
438 };
439 int i, printed;
440
441 if (tlb_type == cheetah)
442 seq_printf(m, "MMU Type\t: Cheetah\n");
443 else if (tlb_type == cheetah_plus)
444 seq_printf(m, "MMU Type\t: Cheetah+\n");
445 else if (tlb_type == spitfire)
446 seq_printf(m, "MMU Type\t: Spitfire\n");
447 else if (tlb_type == hypervisor)
448 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
449 else
450 seq_printf(m, "MMU Type\t: ???\n");
451
452 seq_printf(m, "MMU PGSZs\t: ");
453 printed = 0;
454 for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
455 if (cpu_pgsz_mask & (1UL << i)) {
456 seq_printf(m, "%s%s",
457 printed ? "," : "", pgsz_strings[i]);
458 printed++;
459 }
460 }
461 seq_putc(m, '\n');
462
463#ifdef CONFIG_DEBUG_DCFLUSH
464 seq_printf(m, "DCPageFlushes\t: %d\n",
465 atomic_read(&dcpage_flushes));
466#ifdef CONFIG_SMP
467 seq_printf(m, "DCPageFlushesXC\t: %d\n",
468 atomic_read(&dcpage_flushes_xcall));
469#endif /* CONFIG_SMP */
470#endif /* CONFIG_DEBUG_DCFLUSH */
471}
472
473struct linux_prom_translation prom_trans[512] __read_mostly;
474unsigned int prom_trans_ents __read_mostly;
475
476unsigned long kern_locked_tte_data;
477
478/* The obp translations are saved based on 8k pagesize, since obp can
479 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
480 * HI_OBP_ADDRESS range are handled in ktlb.S.
481 */
482static inline int in_obp_range(unsigned long vaddr)
483{
484 return (vaddr >= LOW_OBP_ADDRESS &&
485 vaddr < HI_OBP_ADDRESS);
486}
487
488static int cmp_ptrans(const void *a, const void *b)
489{
490 const struct linux_prom_translation *x = a, *y = b;
491
492 if (x->virt > y->virt)
493 return 1;
494 if (x->virt < y->virt)
495 return -1;
496 return 0;
497}
498
499/* Read OBP translations property into 'prom_trans[]'. */
500static void __init read_obp_translations(void)
501{
502 int n, node, ents, first, last, i;
503
504 node = prom_finddevice("/virtual-memory");
505 n = prom_getproplen(node, "translations");
506 if (unlikely(n == 0 || n == -1)) {
507 prom_printf("prom_mappings: Couldn't get size.\n");
508 prom_halt();
509 }
510 if (unlikely(n > sizeof(prom_trans))) {
511 prom_printf("prom_mappings: Size %d is too big.\n", n);
512 prom_halt();
513 }
514
515 if ((n = prom_getproperty(node, "translations",
516 (char *)&prom_trans[0],
517 sizeof(prom_trans))) == -1) {
518 prom_printf("prom_mappings: Couldn't get property.\n");
519 prom_halt();
520 }
521
522 n = n / sizeof(struct linux_prom_translation);
523
524 ents = n;
525
526 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
527 cmp_ptrans, NULL);
528
529 /* Now kick out all the non-OBP entries. */
530 for (i = 0; i < ents; i++) {
531 if (in_obp_range(prom_trans[i].virt))
532 break;
533 }
534 first = i;
535 for (; i < ents; i++) {
536 if (!in_obp_range(prom_trans[i].virt))
537 break;
538 }
539 last = i;
540
541 for (i = 0; i < (last - first); i++) {
542 struct linux_prom_translation *src = &prom_trans[i + first];
543 struct linux_prom_translation *dest = &prom_trans[i];
544
545 *dest = *src;
546 }
547 for (; i < ents; i++) {
548 struct linux_prom_translation *dest = &prom_trans[i];
549 dest->virt = dest->size = dest->data = 0x0UL;
550 }
551
552 prom_trans_ents = last - first;
553
554 if (tlb_type == spitfire) {
555 /* Clear diag TTE bits. */
556 for (i = 0; i < prom_trans_ents; i++)
557 prom_trans[i].data &= ~0x0003fe0000000000UL;
558 }
559
560 /* Force execute bit on. */
561 for (i = 0; i < prom_trans_ents; i++)
562 prom_trans[i].data |= (tlb_type == hypervisor ?
563 _PAGE_EXEC_4V : _PAGE_EXEC_4U);
564}
565
566static void __init hypervisor_tlb_lock(unsigned long vaddr,
567 unsigned long pte,
568 unsigned long mmu)
569{
570 unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
571
572 if (ret != 0) {
573 prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
574 "errors with %lx\n", vaddr, 0, pte, mmu, ret);
575 prom_halt();
576 }
577}
578
579static unsigned long kern_large_tte(unsigned long paddr);
580
581static void __init remap_kernel(void)
582{
583 unsigned long phys_page, tte_vaddr, tte_data;
584 int i, tlb_ent = sparc64_highest_locked_tlbent();
585
586 tte_vaddr = (unsigned long) KERNBASE;
587 phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
588 tte_data = kern_large_tte(phys_page);
589
590 kern_locked_tte_data = tte_data;
591
592 /* Now lock us into the TLBs via Hypervisor or OBP. */
593 if (tlb_type == hypervisor) {
594 for (i = 0; i < num_kernel_image_mappings; i++) {
595 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
596 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
597 tte_vaddr += 0x400000;
598 tte_data += 0x400000;
599 }
600 } else {
601 for (i = 0; i < num_kernel_image_mappings; i++) {
602 prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
603 prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
604 tte_vaddr += 0x400000;
605 tte_data += 0x400000;
606 }
607 sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
608 }
609 if (tlb_type == cheetah_plus) {
610 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
611 CTX_CHEETAH_PLUS_NUC);
612 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
613 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
614 }
615}
616
617
618static void __init inherit_prom_mappings(void)
619{
620 /* Now fixup OBP's idea about where we really are mapped. */
621 printk("Remapping the kernel... ");
622 remap_kernel();
623 printk("done.\n");
624}
625
626void prom_world(int enter)
627{
628 if (!enter)
629 set_fs(get_fs());
630
631 __asm__ __volatile__("flushw");
632}
633
634void __flush_dcache_range(unsigned long start, unsigned long end)
635{
636 unsigned long va;
637
638 if (tlb_type == spitfire) {
639 int n = 0;
640
641 for (va = start; va < end; va += 32) {
642 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
643 if (++n >= 512)
644 break;
645 }
646 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
647 start = __pa(start);
648 end = __pa(end);
649 for (va = start; va < end; va += 32)
650 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
651 "membar #Sync"
652 : /* no outputs */
653 : "r" (va),
654 "i" (ASI_DCACHE_INVALIDATE));
655 }
656}
657EXPORT_SYMBOL(__flush_dcache_range);
658
659/* get_new_mmu_context() uses "cache + 1". */
660DEFINE_SPINLOCK(ctx_alloc_lock);
661unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
662#define MAX_CTX_NR (1UL << CTX_NR_BITS)
663#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
664DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
665
666/* Caller does TLB context flushing on local CPU if necessary.
667 * The caller also ensures that CTX_VALID(mm->context) is false.
668 *
669 * We must be careful about boundary cases so that we never
670 * let the user have CTX 0 (nucleus) or we ever use a CTX
671 * version of zero (and thus NO_CONTEXT would not be caught
672 * by version mis-match tests in mmu_context.h).
673 *
674 * Always invoked with interrupts disabled.
675 */
676void get_new_mmu_context(struct mm_struct *mm)
677{
678 unsigned long ctx, new_ctx;
679 unsigned long orig_pgsz_bits;
680 int new_version;
681
682 spin_lock(&ctx_alloc_lock);
683 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
684 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
685 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
686 new_version = 0;
687 if (new_ctx >= (1 << CTX_NR_BITS)) {
688 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
689 if (new_ctx >= ctx) {
690 int i;
691 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
692 CTX_FIRST_VERSION;
693 if (new_ctx == 1)
694 new_ctx = CTX_FIRST_VERSION;
695
696 /* Don't call memset, for 16 entries that's just
697 * plain silly...
698 */
699 mmu_context_bmap[0] = 3;
700 mmu_context_bmap[1] = 0;
701 mmu_context_bmap[2] = 0;
702 mmu_context_bmap[3] = 0;
703 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
704 mmu_context_bmap[i + 0] = 0;
705 mmu_context_bmap[i + 1] = 0;
706 mmu_context_bmap[i + 2] = 0;
707 mmu_context_bmap[i + 3] = 0;
708 }
709 new_version = 1;
710 goto out;
711 }
712 }
713 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
714 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
715out:
716 tlb_context_cache = new_ctx;
717 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
718 spin_unlock(&ctx_alloc_lock);
719
720 if (unlikely(new_version))
721 smp_new_mmu_context_version();
722}
723
724static int numa_enabled = 1;
725static int numa_debug;
726
727static int __init early_numa(char *p)
728{
729 if (!p)
730 return 0;
731
732 if (strstr(p, "off"))
733 numa_enabled = 0;
734
735 if (strstr(p, "debug"))
736 numa_debug = 1;
737
738 return 0;
739}
740early_param("numa", early_numa);
741
742#define numadbg(f, a...) \
743do { if (numa_debug) \
744 printk(KERN_INFO f, ## a); \
745} while (0)
746
747static void __init find_ramdisk(unsigned long phys_base)
748{
749#ifdef CONFIG_BLK_DEV_INITRD
750 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
751 unsigned long ramdisk_image;
752
753 /* Older versions of the bootloader only supported a
754 * 32-bit physical address for the ramdisk image
755 * location, stored at sparc_ramdisk_image. Newer
756 * SILO versions set sparc_ramdisk_image to zero and
757 * provide a full 64-bit physical address at
758 * sparc_ramdisk_image64.
759 */
760 ramdisk_image = sparc_ramdisk_image;
761 if (!ramdisk_image)
762 ramdisk_image = sparc_ramdisk_image64;
763
764 /* Another bootloader quirk. The bootloader normalizes
765 * the physical address to KERNBASE, so we have to
766 * factor that back out and add in the lowest valid
767 * physical page address to get the true physical address.
768 */
769 ramdisk_image -= KERNBASE;
770 ramdisk_image += phys_base;
771
772 numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
773 ramdisk_image, sparc_ramdisk_size);
774
775 initrd_start = ramdisk_image;
776 initrd_end = ramdisk_image + sparc_ramdisk_size;
777
778 memblock_reserve(initrd_start, sparc_ramdisk_size);
779
780 initrd_start += PAGE_OFFSET;
781 initrd_end += PAGE_OFFSET;
782 }
783#endif
784}
785
786struct node_mem_mask {
787 unsigned long mask;
788 unsigned long val;
789};
790static struct node_mem_mask node_masks[MAX_NUMNODES];
791static int num_node_masks;
792
793#ifdef CONFIG_NEED_MULTIPLE_NODES
794
795int numa_cpu_lookup_table[NR_CPUS];
796cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
797
798struct mdesc_mblock {
799 u64 base;
800 u64 size;
801 u64 offset; /* RA-to-PA */
802};
803static struct mdesc_mblock *mblocks;
804static int num_mblocks;
805static int find_numa_node_for_addr(unsigned long pa,
806 struct node_mem_mask *pnode_mask);
807
808static unsigned long __init ra_to_pa(unsigned long addr)
809{
810 int i;
811
812 for (i = 0; i < num_mblocks; i++) {
813 struct mdesc_mblock *m = &mblocks[i];
814
815 if (addr >= m->base &&
816 addr < (m->base + m->size)) {
817 addr += m->offset;
818 break;
819 }
820 }
821 return addr;
822}
823
824static int __init find_node(unsigned long addr)
825{
826 static bool search_mdesc = true;
827 static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL };
828 static int last_index;
829 int i;
830
831 addr = ra_to_pa(addr);
832 for (i = 0; i < num_node_masks; i++) {
833 struct node_mem_mask *p = &node_masks[i];
834
835 if ((addr & p->mask) == p->val)
836 return i;
837 }
838 /* The following condition has been observed on LDOM guests because
839 * node_masks only contains the best latency mask and value.
840 * LDOM guest's mdesc can contain a single latency group to
841 * cover multiple address range. Print warning message only if the
842 * address cannot be found in node_masks nor mdesc.
843 */
844 if ((search_mdesc) &&
845 ((addr & last_mem_mask.mask) != last_mem_mask.val)) {
846 /* find the available node in the mdesc */
847 last_index = find_numa_node_for_addr(addr, &last_mem_mask);
848 numadbg("find_node: latency group for address 0x%lx is %d\n",
849 addr, last_index);
850 if ((last_index < 0) || (last_index >= num_node_masks)) {
851 /* WARN_ONCE() and use default group 0 */
852 WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0.");
853 search_mdesc = false;
854 last_index = 0;
855 }
856 }
857
858 return last_index;
859}
860
861static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
862{
863 *nid = find_node(start);
864 start += PAGE_SIZE;
865 while (start < end) {
866 int n = find_node(start);
867
868 if (n != *nid)
869 break;
870 start += PAGE_SIZE;
871 }
872
873 if (start > end)
874 start = end;
875
876 return start;
877}
878#endif
879
880/* This must be invoked after performing all of the necessary
881 * memblock_set_node() calls for 'nid'. We need to be able to get
882 * correct data from get_pfn_range_for_nid().
883 */
884static void __init allocate_node_data(int nid)
885{
886 struct pglist_data *p;
887 unsigned long start_pfn, end_pfn;
888#ifdef CONFIG_NEED_MULTIPLE_NODES
889 unsigned long paddr;
890
891 paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
892 if (!paddr) {
893 prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
894 prom_halt();
895 }
896 NODE_DATA(nid) = __va(paddr);
897 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
898
899 NODE_DATA(nid)->node_id = nid;
900#endif
901
902 p = NODE_DATA(nid);
903
904 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
905 p->node_start_pfn = start_pfn;
906 p->node_spanned_pages = end_pfn - start_pfn;
907}
908
909static void init_node_masks_nonnuma(void)
910{
911#ifdef CONFIG_NEED_MULTIPLE_NODES
912 int i;
913#endif
914
915 numadbg("Initializing tables for non-numa.\n");
916
917 node_masks[0].mask = node_masks[0].val = 0;
918 num_node_masks = 1;
919
920#ifdef CONFIG_NEED_MULTIPLE_NODES
921 for (i = 0; i < NR_CPUS; i++)
922 numa_cpu_lookup_table[i] = 0;
923
924 cpumask_setall(&numa_cpumask_lookup_table[0]);
925#endif
926}
927
928#ifdef CONFIG_NEED_MULTIPLE_NODES
929struct pglist_data *node_data[MAX_NUMNODES];
930
931EXPORT_SYMBOL(numa_cpu_lookup_table);
932EXPORT_SYMBOL(numa_cpumask_lookup_table);
933EXPORT_SYMBOL(node_data);
934
935struct mdesc_mlgroup {
936 u64 node;
937 u64 latency;
938 u64 match;
939 u64 mask;
940};
941static struct mdesc_mlgroup *mlgroups;
942static int num_mlgroups;
943
944static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
945 u32 cfg_handle)
946{
947 u64 arc;
948
949 mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
950 u64 target = mdesc_arc_target(md, arc);
951 const u64 *val;
952
953 val = mdesc_get_property(md, target,
954 "cfg-handle", NULL);
955 if (val && *val == cfg_handle)
956 return 0;
957 }
958 return -ENODEV;
959}
960
961static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
962 u32 cfg_handle)
963{
964 u64 arc, candidate, best_latency = ~(u64)0;
965
966 candidate = MDESC_NODE_NULL;
967 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
968 u64 target = mdesc_arc_target(md, arc);
969 const char *name = mdesc_node_name(md, target);
970 const u64 *val;
971
972 if (strcmp(name, "pio-latency-group"))
973 continue;
974
975 val = mdesc_get_property(md, target, "latency", NULL);
976 if (!val)
977 continue;
978
979 if (*val < best_latency) {
980 candidate = target;
981 best_latency = *val;
982 }
983 }
984
985 if (candidate == MDESC_NODE_NULL)
986 return -ENODEV;
987
988 return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
989}
990
991int of_node_to_nid(struct device_node *dp)
992{
993 const struct linux_prom64_registers *regs;
994 struct mdesc_handle *md;
995 u32 cfg_handle;
996 int count, nid;
997 u64 grp;
998
999 /* This is the right thing to do on currently supported
1000 * SUN4U NUMA platforms as well, as the PCI controller does
1001 * not sit behind any particular memory controller.
1002 */
1003 if (!mlgroups)
1004 return -1;
1005
1006 regs = of_get_property(dp, "reg", NULL);
1007 if (!regs)
1008 return -1;
1009
1010 cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1011
1012 md = mdesc_grab();
1013
1014 count = 0;
1015 nid = -1;
1016 mdesc_for_each_node_by_name(md, grp, "group") {
1017 if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
1018 nid = count;
1019 break;
1020 }
1021 count++;
1022 }
1023
1024 mdesc_release(md);
1025
1026 return nid;
1027}
1028
1029static void __init add_node_ranges(void)
1030{
1031 struct memblock_region *reg;
1032
1033 for_each_memblock(memory, reg) {
1034 unsigned long size = reg->size;
1035 unsigned long start, end;
1036
1037 start = reg->base;
1038 end = start + size;
1039 while (start < end) {
1040 unsigned long this_end;
1041 int nid;
1042
1043 this_end = memblock_nid_range(start, end, &nid);
1044
1045 numadbg("Setting memblock NUMA node nid[%d] "
1046 "start[%lx] end[%lx]\n",
1047 nid, start, this_end);
1048
1049 memblock_set_node(start, this_end - start,
1050 &memblock.memory, nid);
1051 start = this_end;
1052 }
1053 }
1054}
1055
1056static int __init grab_mlgroups(struct mdesc_handle *md)
1057{
1058 unsigned long paddr;
1059 int count = 0;
1060 u64 node;
1061
1062 mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1063 count++;
1064 if (!count)
1065 return -ENOENT;
1066
1067 paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
1068 SMP_CACHE_BYTES);
1069 if (!paddr)
1070 return -ENOMEM;
1071
1072 mlgroups = __va(paddr);
1073 num_mlgroups = count;
1074
1075 count = 0;
1076 mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1077 struct mdesc_mlgroup *m = &mlgroups[count++];
1078 const u64 *val;
1079
1080 m->node = node;
1081
1082 val = mdesc_get_property(md, node, "latency", NULL);
1083 m->latency = *val;
1084 val = mdesc_get_property(md, node, "address-match", NULL);
1085 m->match = *val;
1086 val = mdesc_get_property(md, node, "address-mask", NULL);
1087 m->mask = *val;
1088
1089 numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1090 "match[%llx] mask[%llx]\n",
1091 count - 1, m->node, m->latency, m->match, m->mask);
1092 }
1093
1094 return 0;
1095}
1096
1097static int __init grab_mblocks(struct mdesc_handle *md)
1098{
1099 unsigned long paddr;
1100 int count = 0;
1101 u64 node;
1102
1103 mdesc_for_each_node_by_name(md, node, "mblock")
1104 count++;
1105 if (!count)
1106 return -ENOENT;
1107
1108 paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
1109 SMP_CACHE_BYTES);
1110 if (!paddr)
1111 return -ENOMEM;
1112
1113 mblocks = __va(paddr);
1114 num_mblocks = count;
1115
1116 count = 0;
1117 mdesc_for_each_node_by_name(md, node, "mblock") {
1118 struct mdesc_mblock *m = &mblocks[count++];
1119 const u64 *val;
1120
1121 val = mdesc_get_property(md, node, "base", NULL);
1122 m->base = *val;
1123 val = mdesc_get_property(md, node, "size", NULL);
1124 m->size = *val;
1125 val = mdesc_get_property(md, node,
1126 "address-congruence-offset", NULL);
1127
1128 /* The address-congruence-offset property is optional.
1129 * Explicity zero it be identifty this.
1130 */
1131 if (val)
1132 m->offset = *val;
1133 else
1134 m->offset = 0UL;
1135
1136 numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
1137 count - 1, m->base, m->size, m->offset);
1138 }
1139
1140 return 0;
1141}
1142
1143static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1144 u64 grp, cpumask_t *mask)
1145{
1146 u64 arc;
1147
1148 cpumask_clear(mask);
1149
1150 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1151 u64 target = mdesc_arc_target(md, arc);
1152 const char *name = mdesc_node_name(md, target);
1153 const u64 *id;
1154
1155 if (strcmp(name, "cpu"))
1156 continue;
1157 id = mdesc_get_property(md, target, "id", NULL);
1158 if (*id < nr_cpu_ids)
1159 cpumask_set_cpu(*id, mask);
1160 }
1161}
1162
1163static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1164{
1165 int i;
1166
1167 for (i = 0; i < num_mlgroups; i++) {
1168 struct mdesc_mlgroup *m = &mlgroups[i];
1169 if (m->node == node)
1170 return m;
1171 }
1172 return NULL;
1173}
1174
1175int __node_distance(int from, int to)
1176{
1177 if ((from >= MAX_NUMNODES) || (to >= MAX_NUMNODES)) {
1178 pr_warn("Returning default NUMA distance value for %d->%d\n",
1179 from, to);
1180 return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE;
1181 }
1182 return numa_latency[from][to];
1183}
1184
1185static int find_numa_node_for_addr(unsigned long pa,
1186 struct node_mem_mask *pnode_mask)
1187{
1188 struct mdesc_handle *md = mdesc_grab();
1189 u64 node, arc;
1190 int i = 0;
1191
1192 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1193 if (node == MDESC_NODE_NULL)
1194 goto out;
1195
1196 mdesc_for_each_node_by_name(md, node, "group") {
1197 mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) {
1198 u64 target = mdesc_arc_target(md, arc);
1199 struct mdesc_mlgroup *m = find_mlgroup(target);
1200
1201 if (!m)
1202 continue;
1203 if ((pa & m->mask) == m->match) {
1204 if (pnode_mask) {
1205 pnode_mask->mask = m->mask;
1206 pnode_mask->val = m->match;
1207 }
1208 mdesc_release(md);
1209 return i;
1210 }
1211 }
1212 i++;
1213 }
1214
1215out:
1216 mdesc_release(md);
1217 return -1;
1218}
1219
1220static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
1221{
1222 int i;
1223
1224 for (i = 0; i < MAX_NUMNODES; i++) {
1225 struct node_mem_mask *n = &node_masks[i];
1226
1227 if ((grp->mask == n->mask) && (grp->match == n->val))
1228 break;
1229 }
1230 return i;
1231}
1232
1233static void __init find_numa_latencies_for_group(struct mdesc_handle *md,
1234 u64 grp, int index)
1235{
1236 u64 arc;
1237
1238 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1239 int tnode;
1240 u64 target = mdesc_arc_target(md, arc);
1241 struct mdesc_mlgroup *m = find_mlgroup(target);
1242
1243 if (!m)
1244 continue;
1245 tnode = find_best_numa_node_for_mlgroup(m);
1246 if (tnode == MAX_NUMNODES)
1247 continue;
1248 numa_latency[index][tnode] = m->latency;
1249 }
1250}
1251
1252static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1253 int index)
1254{
1255 struct mdesc_mlgroup *candidate = NULL;
1256 u64 arc, best_latency = ~(u64)0;
1257 struct node_mem_mask *n;
1258
1259 mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1260 u64 target = mdesc_arc_target(md, arc);
1261 struct mdesc_mlgroup *m = find_mlgroup(target);
1262 if (!m)
1263 continue;
1264 if (m->latency < best_latency) {
1265 candidate = m;
1266 best_latency = m->latency;
1267 }
1268 }
1269 if (!candidate)
1270 return -ENOENT;
1271
1272 if (num_node_masks != index) {
1273 printk(KERN_ERR "Inconsistent NUMA state, "
1274 "index[%d] != num_node_masks[%d]\n",
1275 index, num_node_masks);
1276 return -EINVAL;
1277 }
1278
1279 n = &node_masks[num_node_masks++];
1280
1281 n->mask = candidate->mask;
1282 n->val = candidate->match;
1283
1284 numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%llx])\n",
1285 index, n->mask, n->val, candidate->latency);
1286
1287 return 0;
1288}
1289
1290static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1291 int index)
1292{
1293 cpumask_t mask;
1294 int cpu;
1295
1296 numa_parse_mdesc_group_cpus(md, grp, &mask);
1297
1298 for_each_cpu(cpu, &mask)
1299 numa_cpu_lookup_table[cpu] = index;
1300 cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
1301
1302 if (numa_debug) {
1303 printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
1304 for_each_cpu(cpu, &mask)
1305 printk("%d ", cpu);
1306 printk("]\n");
1307 }
1308
1309 return numa_attach_mlgroup(md, grp, index);
1310}
1311
1312static int __init numa_parse_mdesc(void)
1313{
1314 struct mdesc_handle *md = mdesc_grab();
1315 int i, j, err, count;
1316 u64 node;
1317
1318 node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1319 if (node == MDESC_NODE_NULL) {
1320 mdesc_release(md);
1321 return -ENOENT;
1322 }
1323
1324 err = grab_mblocks(md);
1325 if (err < 0)
1326 goto out;
1327
1328 err = grab_mlgroups(md);
1329 if (err < 0)
1330 goto out;
1331
1332 count = 0;
1333 mdesc_for_each_node_by_name(md, node, "group") {
1334 err = numa_parse_mdesc_group(md, node, count);
1335 if (err < 0)
1336 break;
1337 count++;
1338 }
1339
1340 count = 0;
1341 mdesc_for_each_node_by_name(md, node, "group") {
1342 find_numa_latencies_for_group(md, node, count);
1343 count++;
1344 }
1345
1346 /* Normalize numa latency matrix according to ACPI SLIT spec. */
1347 for (i = 0; i < MAX_NUMNODES; i++) {
1348 u64 self_latency = numa_latency[i][i];
1349
1350 for (j = 0; j < MAX_NUMNODES; j++) {
1351 numa_latency[i][j] =
1352 (numa_latency[i][j] * LOCAL_DISTANCE) /
1353 self_latency;
1354 }
1355 }
1356
1357 add_node_ranges();
1358
1359 for (i = 0; i < num_node_masks; i++) {
1360 allocate_node_data(i);
1361 node_set_online(i);
1362 }
1363
1364 err = 0;
1365out:
1366 mdesc_release(md);
1367 return err;
1368}
1369
1370static int __init numa_parse_jbus(void)
1371{
1372 unsigned long cpu, index;
1373
1374 /* NUMA node id is encoded in bits 36 and higher, and there is
1375 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1376 */
1377 index = 0;
1378 for_each_present_cpu(cpu) {
1379 numa_cpu_lookup_table[cpu] = index;
1380 cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
1381 node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1382 node_masks[index].val = cpu << 36UL;
1383
1384 index++;
1385 }
1386 num_node_masks = index;
1387
1388 add_node_ranges();
1389
1390 for (index = 0; index < num_node_masks; index++) {
1391 allocate_node_data(index);
1392 node_set_online(index);
1393 }
1394
1395 return 0;
1396}
1397
1398static int __init numa_parse_sun4u(void)
1399{
1400 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1401 unsigned long ver;
1402
1403 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
1404 if ((ver >> 32UL) == __JALAPENO_ID ||
1405 (ver >> 32UL) == __SERRANO_ID)
1406 return numa_parse_jbus();
1407 }
1408 return -1;
1409}
1410
1411static int __init bootmem_init_numa(void)
1412{
1413 int i, j;
1414 int err = -1;
1415
1416 numadbg("bootmem_init_numa()\n");
1417
1418 /* Some sane defaults for numa latency values */
1419 for (i = 0; i < MAX_NUMNODES; i++) {
1420 for (j = 0; j < MAX_NUMNODES; j++)
1421 numa_latency[i][j] = (i == j) ?
1422 LOCAL_DISTANCE : REMOTE_DISTANCE;
1423 }
1424
1425 if (numa_enabled) {
1426 if (tlb_type == hypervisor)
1427 err = numa_parse_mdesc();
1428 else
1429 err = numa_parse_sun4u();
1430 }
1431 return err;
1432}
1433
1434#else
1435
1436static int bootmem_init_numa(void)
1437{
1438 return -1;
1439}
1440
1441#endif
1442
1443static void __init bootmem_init_nonnuma(void)
1444{
1445 unsigned long top_of_ram = memblock_end_of_DRAM();
1446 unsigned long total_ram = memblock_phys_mem_size();
1447
1448 numadbg("bootmem_init_nonnuma()\n");
1449
1450 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1451 top_of_ram, total_ram);
1452 printk(KERN_INFO "Memory hole size: %ldMB\n",
1453 (top_of_ram - total_ram) >> 20);
1454
1455 init_node_masks_nonnuma();
1456 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
1457 allocate_node_data(0);
1458 node_set_online(0);
1459}
1460
1461static unsigned long __init bootmem_init(unsigned long phys_base)
1462{
1463 unsigned long end_pfn;
1464
1465 end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1466 max_pfn = max_low_pfn = end_pfn;
1467 min_low_pfn = (phys_base >> PAGE_SHIFT);
1468
1469 if (bootmem_init_numa() < 0)
1470 bootmem_init_nonnuma();
1471
1472 /* Dump memblock with node info. */
1473 memblock_dump_all();
1474
1475 /* XXX cpu notifier XXX */
1476
1477 sparse_memory_present_with_active_regions(MAX_NUMNODES);
1478 sparse_init();
1479
1480 return end_pfn;
1481}
1482
1483static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1484static int pall_ents __initdata;
1485
1486static unsigned long max_phys_bits = 40;
1487
1488bool kern_addr_valid(unsigned long addr)
1489{
1490 pgd_t *pgd;
1491 pud_t *pud;
1492 pmd_t *pmd;
1493 pte_t *pte;
1494
1495 if ((long)addr < 0L) {
1496 unsigned long pa = __pa(addr);
1497
1498 if ((addr >> max_phys_bits) != 0UL)
1499 return false;
1500
1501 return pfn_valid(pa >> PAGE_SHIFT);
1502 }
1503
1504 if (addr >= (unsigned long) KERNBASE &&
1505 addr < (unsigned long)&_end)
1506 return true;
1507
1508 pgd = pgd_offset_k(addr);
1509 if (pgd_none(*pgd))
1510 return 0;
1511
1512 pud = pud_offset(pgd, addr);
1513 if (pud_none(*pud))
1514 return 0;
1515
1516 if (pud_large(*pud))
1517 return pfn_valid(pud_pfn(*pud));
1518
1519 pmd = pmd_offset(pud, addr);
1520 if (pmd_none(*pmd))
1521 return 0;
1522
1523 if (pmd_large(*pmd))
1524 return pfn_valid(pmd_pfn(*pmd));
1525
1526 pte = pte_offset_kernel(pmd, addr);
1527 if (pte_none(*pte))
1528 return 0;
1529
1530 return pfn_valid(pte_pfn(*pte));
1531}
1532EXPORT_SYMBOL(kern_addr_valid);
1533
1534static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
1535 unsigned long vend,
1536 pud_t *pud)
1537{
1538 const unsigned long mask16gb = (1UL << 34) - 1UL;
1539 u64 pte_val = vstart;
1540
1541 /* Each PUD is 8GB */
1542 if ((vstart & mask16gb) ||
1543 (vend - vstart <= mask16gb)) {
1544 pte_val ^= kern_linear_pte_xor[2];
1545 pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
1546
1547 return vstart + PUD_SIZE;
1548 }
1549
1550 pte_val ^= kern_linear_pte_xor[3];
1551 pte_val |= _PAGE_PUD_HUGE;
1552
1553 vend = vstart + mask16gb + 1UL;
1554 while (vstart < vend) {
1555 pud_val(*pud) = pte_val;
1556
1557 pte_val += PUD_SIZE;
1558 vstart += PUD_SIZE;
1559 pud++;
1560 }
1561 return vstart;
1562}
1563
1564static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
1565 bool guard)
1566{
1567 if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
1568 return true;
1569
1570 return false;
1571}
1572
1573static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
1574 unsigned long vend,
1575 pmd_t *pmd)
1576{
1577 const unsigned long mask256mb = (1UL << 28) - 1UL;
1578 const unsigned long mask2gb = (1UL << 31) - 1UL;
1579 u64 pte_val = vstart;
1580
1581 /* Each PMD is 8MB */
1582 if ((vstart & mask256mb) ||
1583 (vend - vstart <= mask256mb)) {
1584 pte_val ^= kern_linear_pte_xor[0];
1585 pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
1586
1587 return vstart + PMD_SIZE;
1588 }
1589
1590 if ((vstart & mask2gb) ||
1591 (vend - vstart <= mask2gb)) {
1592 pte_val ^= kern_linear_pte_xor[1];
1593 pte_val |= _PAGE_PMD_HUGE;
1594 vend = vstart + mask256mb + 1UL;
1595 } else {
1596 pte_val ^= kern_linear_pte_xor[2];
1597 pte_val |= _PAGE_PMD_HUGE;
1598 vend = vstart + mask2gb + 1UL;
1599 }
1600
1601 while (vstart < vend) {
1602 pmd_val(*pmd) = pte_val;
1603
1604 pte_val += PMD_SIZE;
1605 vstart += PMD_SIZE;
1606 pmd++;
1607 }
1608
1609 return vstart;
1610}
1611
1612static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
1613 bool guard)
1614{
1615 if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
1616 return true;
1617
1618 return false;
1619}
1620
1621static unsigned long __ref kernel_map_range(unsigned long pstart,
1622 unsigned long pend, pgprot_t prot,
1623 bool use_huge)
1624{
1625 unsigned long vstart = PAGE_OFFSET + pstart;
1626 unsigned long vend = PAGE_OFFSET + pend;
1627 unsigned long alloc_bytes = 0UL;
1628
1629 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
1630 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1631 vstart, vend);
1632 prom_halt();
1633 }
1634
1635 while (vstart < vend) {
1636 unsigned long this_end, paddr = __pa(vstart);
1637 pgd_t *pgd = pgd_offset_k(vstart);
1638 pud_t *pud;
1639 pmd_t *pmd;
1640 pte_t *pte;
1641
1642 if (pgd_none(*pgd)) {
1643 pud_t *new;
1644
1645 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1646 alloc_bytes += PAGE_SIZE;
1647 pgd_populate(&init_mm, pgd, new);
1648 }
1649 pud = pud_offset(pgd, vstart);
1650 if (pud_none(*pud)) {
1651 pmd_t *new;
1652
1653 if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
1654 vstart = kernel_map_hugepud(vstart, vend, pud);
1655 continue;
1656 }
1657 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1658 alloc_bytes += PAGE_SIZE;
1659 pud_populate(&init_mm, pud, new);
1660 }
1661
1662 pmd = pmd_offset(pud, vstart);
1663 if (pmd_none(*pmd)) {
1664 pte_t *new;
1665
1666 if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
1667 vstart = kernel_map_hugepmd(vstart, vend, pmd);
1668 continue;
1669 }
1670 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1671 alloc_bytes += PAGE_SIZE;
1672 pmd_populate_kernel(&init_mm, pmd, new);
1673 }
1674
1675 pte = pte_offset_kernel(pmd, vstart);
1676 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1677 if (this_end > vend)
1678 this_end = vend;
1679
1680 while (vstart < this_end) {
1681 pte_val(*pte) = (paddr | pgprot_val(prot));
1682
1683 vstart += PAGE_SIZE;
1684 paddr += PAGE_SIZE;
1685 pte++;
1686 }
1687 }
1688
1689 return alloc_bytes;
1690}
1691
1692static void __init flush_all_kernel_tsbs(void)
1693{
1694 int i;
1695
1696 for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
1697 struct tsb *ent = &swapper_tsb[i];
1698
1699 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1700 }
1701#ifndef CONFIG_DEBUG_PAGEALLOC
1702 for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
1703 struct tsb *ent = &swapper_4m_tsb[i];
1704
1705 ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1706 }
1707#endif
1708}
1709
1710extern unsigned int kvmap_linear_patch[1];
1711
1712static void __init kernel_physical_mapping_init(void)
1713{
1714 unsigned long i, mem_alloced = 0UL;
1715 bool use_huge = true;
1716
1717#ifdef CONFIG_DEBUG_PAGEALLOC
1718 use_huge = false;
1719#endif
1720 for (i = 0; i < pall_ents; i++) {
1721 unsigned long phys_start, phys_end;
1722
1723 phys_start = pall[i].phys_addr;
1724 phys_end = phys_start + pall[i].reg_size;
1725
1726 mem_alloced += kernel_map_range(phys_start, phys_end,
1727 PAGE_KERNEL, use_huge);
1728 }
1729
1730 printk("Allocated %ld bytes for kernel page tables.\n",
1731 mem_alloced);
1732
1733 kvmap_linear_patch[0] = 0x01000000; /* nop */
1734 flushi(&kvmap_linear_patch[0]);
1735
1736 flush_all_kernel_tsbs();
1737
1738 __flush_tlb_all();
1739}
1740
1741#ifdef CONFIG_DEBUG_PAGEALLOC
1742void __kernel_map_pages(struct page *page, int numpages, int enable)
1743{
1744 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1745 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1746
1747 kernel_map_range(phys_start, phys_end,
1748 (enable ? PAGE_KERNEL : __pgprot(0)), false);
1749
1750 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1751 PAGE_OFFSET + phys_end);
1752
1753 /* we should perform an IPI and flush all tlbs,
1754 * but that can deadlock->flush only current cpu.
1755 */
1756 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1757 PAGE_OFFSET + phys_end);
1758}
1759#endif
1760
1761unsigned long __init find_ecache_flush_span(unsigned long size)
1762{
1763 int i;
1764
1765 for (i = 0; i < pavail_ents; i++) {
1766 if (pavail[i].reg_size >= size)
1767 return pavail[i].phys_addr;
1768 }
1769
1770 return ~0UL;
1771}
1772
1773unsigned long PAGE_OFFSET;
1774EXPORT_SYMBOL(PAGE_OFFSET);
1775
1776unsigned long VMALLOC_END = 0x0000010000000000UL;
1777EXPORT_SYMBOL(VMALLOC_END);
1778
1779unsigned long sparc64_va_hole_top = 0xfffff80000000000UL;
1780unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
1781
1782static void __init setup_page_offset(void)
1783{
1784 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1785 /* Cheetah/Panther support a full 64-bit virtual
1786 * address, so we can use all that our page tables
1787 * support.
1788 */
1789 sparc64_va_hole_top = 0xfff0000000000000UL;
1790 sparc64_va_hole_bottom = 0x0010000000000000UL;
1791
1792 max_phys_bits = 42;
1793 } else if (tlb_type == hypervisor) {
1794 switch (sun4v_chip_type) {
1795 case SUN4V_CHIP_NIAGARA1:
1796 case SUN4V_CHIP_NIAGARA2:
1797 /* T1 and T2 support 48-bit virtual addresses. */
1798 sparc64_va_hole_top = 0xffff800000000000UL;
1799 sparc64_va_hole_bottom = 0x0000800000000000UL;
1800
1801 max_phys_bits = 39;
1802 break;
1803 case SUN4V_CHIP_NIAGARA3:
1804 /* T3 supports 48-bit virtual addresses. */
1805 sparc64_va_hole_top = 0xffff800000000000UL;
1806 sparc64_va_hole_bottom = 0x0000800000000000UL;
1807
1808 max_phys_bits = 43;
1809 break;
1810 case SUN4V_CHIP_NIAGARA4:
1811 case SUN4V_CHIP_NIAGARA5:
1812 case SUN4V_CHIP_SPARC64X:
1813 case SUN4V_CHIP_SPARC_M6:
1814 /* T4 and later support 52-bit virtual addresses. */
1815 sparc64_va_hole_top = 0xfff8000000000000UL;
1816 sparc64_va_hole_bottom = 0x0008000000000000UL;
1817 max_phys_bits = 47;
1818 break;
1819 case SUN4V_CHIP_SPARC_M7:
1820 case SUN4V_CHIP_SPARC_SN:
1821 default:
1822 /* M7 and later support 52-bit virtual addresses. */
1823 sparc64_va_hole_top = 0xfff8000000000000UL;
1824 sparc64_va_hole_bottom = 0x0008000000000000UL;
1825 max_phys_bits = 49;
1826 break;
1827 }
1828 }
1829
1830 if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
1831 prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
1832 max_phys_bits);
1833 prom_halt();
1834 }
1835
1836 PAGE_OFFSET = sparc64_va_hole_top;
1837 VMALLOC_END = ((sparc64_va_hole_bottom >> 1) +
1838 (sparc64_va_hole_bottom >> 2));
1839
1840 pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
1841 PAGE_OFFSET, max_phys_bits);
1842 pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
1843 VMALLOC_START, VMALLOC_END);
1844 pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
1845 VMEMMAP_BASE, VMEMMAP_BASE << 1);
1846}
1847
1848static void __init tsb_phys_patch(void)
1849{
1850 struct tsb_ldquad_phys_patch_entry *pquad;
1851 struct tsb_phys_patch_entry *p;
1852
1853 pquad = &__tsb_ldquad_phys_patch;
1854 while (pquad < &__tsb_ldquad_phys_patch_end) {
1855 unsigned long addr = pquad->addr;
1856
1857 if (tlb_type == hypervisor)
1858 *(unsigned int *) addr = pquad->sun4v_insn;
1859 else
1860 *(unsigned int *) addr = pquad->sun4u_insn;
1861 wmb();
1862 __asm__ __volatile__("flush %0"
1863 : /* no outputs */
1864 : "r" (addr));
1865
1866 pquad++;
1867 }
1868
1869 p = &__tsb_phys_patch;
1870 while (p < &__tsb_phys_patch_end) {
1871 unsigned long addr = p->addr;
1872
1873 *(unsigned int *) addr = p->insn;
1874 wmb();
1875 __asm__ __volatile__("flush %0"
1876 : /* no outputs */
1877 : "r" (addr));
1878
1879 p++;
1880 }
1881}
1882
1883/* Don't mark as init, we give this to the Hypervisor. */
1884#ifndef CONFIG_DEBUG_PAGEALLOC
1885#define NUM_KTSB_DESCR 2
1886#else
1887#define NUM_KTSB_DESCR 1
1888#endif
1889static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
1890
1891/* The swapper TSBs are loaded with a base sequence of:
1892 *
1893 * sethi %uhi(SYMBOL), REG1
1894 * sethi %hi(SYMBOL), REG2
1895 * or REG1, %ulo(SYMBOL), REG1
1896 * or REG2, %lo(SYMBOL), REG2
1897 * sllx REG1, 32, REG1
1898 * or REG1, REG2, REG1
1899 *
1900 * When we use physical addressing for the TSB accesses, we patch the
1901 * first four instructions in the above sequence.
1902 */
1903
1904static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
1905{
1906 unsigned long high_bits, low_bits;
1907
1908 high_bits = (pa >> 32) & 0xffffffff;
1909 low_bits = (pa >> 0) & 0xffffffff;
1910
1911 while (start < end) {
1912 unsigned int *ia = (unsigned int *)(unsigned long)*start;
1913
1914 ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
1915 __asm__ __volatile__("flush %0" : : "r" (ia));
1916
1917 ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
1918 __asm__ __volatile__("flush %0" : : "r" (ia + 1));
1919
1920 ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
1921 __asm__ __volatile__("flush %0" : : "r" (ia + 2));
1922
1923 ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
1924 __asm__ __volatile__("flush %0" : : "r" (ia + 3));
1925
1926 start++;
1927 }
1928}
1929
1930static void ktsb_phys_patch(void)
1931{
1932 extern unsigned int __swapper_tsb_phys_patch;
1933 extern unsigned int __swapper_tsb_phys_patch_end;
1934 unsigned long ktsb_pa;
1935
1936 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1937 patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
1938 &__swapper_tsb_phys_patch_end, ktsb_pa);
1939#ifndef CONFIG_DEBUG_PAGEALLOC
1940 {
1941 extern unsigned int __swapper_4m_tsb_phys_patch;
1942 extern unsigned int __swapper_4m_tsb_phys_patch_end;
1943 ktsb_pa = (kern_base +
1944 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1945 patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
1946 &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
1947 }
1948#endif
1949}
1950
1951static void __init sun4v_ktsb_init(void)
1952{
1953 unsigned long ktsb_pa;
1954
1955 /* First KTSB for PAGE_SIZE mappings. */
1956 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1957
1958 switch (PAGE_SIZE) {
1959 case 8 * 1024:
1960 default:
1961 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1962 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1963 break;
1964
1965 case 64 * 1024:
1966 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1967 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1968 break;
1969
1970 case 512 * 1024:
1971 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1972 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
1973 break;
1974
1975 case 4 * 1024 * 1024:
1976 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1977 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1978 break;
1979 }
1980
1981 ktsb_descr[0].assoc = 1;
1982 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
1983 ktsb_descr[0].ctx_idx = 0;
1984 ktsb_descr[0].tsb_base = ktsb_pa;
1985 ktsb_descr[0].resv = 0;
1986
1987#ifndef CONFIG_DEBUG_PAGEALLOC
1988 /* Second KTSB for 4MB/256MB/2GB/16GB mappings. */
1989 ktsb_pa = (kern_base +
1990 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1991
1992 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
1993 ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
1994 HV_PGSZ_MASK_256MB |
1995 HV_PGSZ_MASK_2GB |
1996 HV_PGSZ_MASK_16GB) &
1997 cpu_pgsz_mask);
1998 ktsb_descr[1].assoc = 1;
1999 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
2000 ktsb_descr[1].ctx_idx = 0;
2001 ktsb_descr[1].tsb_base = ktsb_pa;
2002 ktsb_descr[1].resv = 0;
2003#endif
2004}
2005
2006void sun4v_ktsb_register(void)
2007{
2008 unsigned long pa, ret;
2009
2010 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
2011
2012 ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
2013 if (ret != 0) {
2014 prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
2015 "errors with %lx\n", pa, ret);
2016 prom_halt();
2017 }
2018}
2019
2020static void __init sun4u_linear_pte_xor_finalize(void)
2021{
2022#ifndef CONFIG_DEBUG_PAGEALLOC
2023 /* This is where we would add Panther support for
2024 * 32MB and 256MB pages.
2025 */
2026#endif
2027}
2028
2029static void __init sun4v_linear_pte_xor_finalize(void)
2030{
2031 unsigned long pagecv_flag;
2032
2033 /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead
2034 * enables MCD error. Do not set bit 9 on M7 processor.
2035 */
2036 switch (sun4v_chip_type) {
2037 case SUN4V_CHIP_SPARC_M7:
2038 case SUN4V_CHIP_SPARC_SN:
2039 pagecv_flag = 0x00;
2040 break;
2041 default:
2042 pagecv_flag = _PAGE_CV_4V;
2043 break;
2044 }
2045#ifndef CONFIG_DEBUG_PAGEALLOC
2046 if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
2047 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
2048 PAGE_OFFSET;
2049 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag |
2050 _PAGE_P_4V | _PAGE_W_4V);
2051 } else {
2052 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
2053 }
2054
2055 if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
2056 kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
2057 PAGE_OFFSET;
2058 kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag |
2059 _PAGE_P_4V | _PAGE_W_4V);
2060 } else {
2061 kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
2062 }
2063
2064 if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
2065 kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
2066 PAGE_OFFSET;
2067 kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag |
2068 _PAGE_P_4V | _PAGE_W_4V);
2069 } else {
2070 kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
2071 }
2072#endif
2073}
2074
2075/* paging_init() sets up the page tables */
2076
2077static unsigned long last_valid_pfn;
2078
2079static void sun4u_pgprot_init(void);
2080static void sun4v_pgprot_init(void);
2081
2082static phys_addr_t __init available_memory(void)
2083{
2084 phys_addr_t available = 0ULL;
2085 phys_addr_t pa_start, pa_end;
2086 u64 i;
2087
2088 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
2089 &pa_end, NULL)
2090 available = available + (pa_end - pa_start);
2091
2092 return available;
2093}
2094
2095#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
2096#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
2097#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2098#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2099#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2100#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2101
2102/* We need to exclude reserved regions. This exclusion will include
2103 * vmlinux and initrd. To be more precise the initrd size could be used to
2104 * compute a new lower limit because it is freed later during initialization.
2105 */
2106static void __init reduce_memory(phys_addr_t limit_ram)
2107{
2108 phys_addr_t avail_ram = available_memory();
2109 phys_addr_t pa_start, pa_end;
2110 u64 i;
2111
2112 if (limit_ram >= avail_ram)
2113 return;
2114
2115 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
2116 &pa_end, NULL) {
2117 phys_addr_t region_size = pa_end - pa_start;
2118 phys_addr_t clip_start = pa_start;
2119
2120 avail_ram = avail_ram - region_size;
2121 /* Are we consuming too much? */
2122 if (avail_ram < limit_ram) {
2123 phys_addr_t give_back = limit_ram - avail_ram;
2124
2125 region_size = region_size - give_back;
2126 clip_start = clip_start + give_back;
2127 }
2128
2129 memblock_remove(clip_start, region_size);
2130
2131 if (avail_ram <= limit_ram)
2132 break;
2133 i = 0UL;
2134 }
2135}
2136
2137void __init paging_init(void)
2138{
2139 unsigned long end_pfn, shift, phys_base;
2140 unsigned long real_end, i;
2141
2142 setup_page_offset();
2143
2144 /* These build time checkes make sure that the dcache_dirty_cpu()
2145 * page->flags usage will work.
2146 *
2147 * When a page gets marked as dcache-dirty, we store the
2148 * cpu number starting at bit 32 in the page->flags. Also,
2149 * functions like clear_dcache_dirty_cpu use the cpu mask
2150 * in 13-bit signed-immediate instruction fields.
2151 */
2152
2153 /*
2154 * Page flags must not reach into upper 32 bits that are used
2155 * for the cpu number
2156 */
2157 BUILD_BUG_ON(NR_PAGEFLAGS > 32);
2158
2159 /*
2160 * The bit fields placed in the high range must not reach below
2161 * the 32 bit boundary. Otherwise we cannot place the cpu field
2162 * at the 32 bit boundary.
2163 */
2164 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
2165 ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
2166
2167 BUILD_BUG_ON(NR_CPUS > 4096);
2168
2169 kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
2170 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
2171
2172 /* Invalidate both kernel TSBs. */
2173 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
2174#ifndef CONFIG_DEBUG_PAGEALLOC
2175 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2176#endif
2177
2178 /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde
2179 * bit on M7 processor. This is a conflicting usage of the same
2180 * bit. Enabling TTE.cv on M7 would turn on Memory Corruption
2181 * Detection error on all pages and this will lead to problems
2182 * later. Kernel does not run with MCD enabled and hence rest
2183 * of the required steps to fully configure memory corruption
2184 * detection are not taken. We need to ensure TTE.mcde is not
2185 * set on M7 processor. Compute the value of cacheability
2186 * flag for use later taking this into consideration.
2187 */
2188 switch (sun4v_chip_type) {
2189 case SUN4V_CHIP_SPARC_M7:
2190 case SUN4V_CHIP_SPARC_SN:
2191 page_cache4v_flag = _PAGE_CP_4V;
2192 break;
2193 default:
2194 page_cache4v_flag = _PAGE_CACHE_4V;
2195 break;
2196 }
2197
2198 if (tlb_type == hypervisor)
2199 sun4v_pgprot_init();
2200 else
2201 sun4u_pgprot_init();
2202
2203 if (tlb_type == cheetah_plus ||
2204 tlb_type == hypervisor) {
2205 tsb_phys_patch();
2206 ktsb_phys_patch();
2207 }
2208
2209 if (tlb_type == hypervisor)
2210 sun4v_patch_tlb_handlers();
2211
2212 /* Find available physical memory...
2213 *
2214 * Read it twice in order to work around a bug in openfirmware.
2215 * The call to grab this table itself can cause openfirmware to
2216 * allocate memory, which in turn can take away some space from
2217 * the list of available memory. Reading it twice makes sure
2218 * we really do get the final value.
2219 */
2220 read_obp_translations();
2221 read_obp_memory("reg", &pall[0], &pall_ents);
2222 read_obp_memory("available", &pavail[0], &pavail_ents);
2223 read_obp_memory("available", &pavail[0], &pavail_ents);
2224
2225 phys_base = 0xffffffffffffffffUL;
2226 for (i = 0; i < pavail_ents; i++) {
2227 phys_base = min(phys_base, pavail[i].phys_addr);
2228 memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
2229 }
2230
2231 memblock_reserve(kern_base, kern_size);
2232
2233 find_ramdisk(phys_base);
2234
2235 if (cmdline_memory_size)
2236 reduce_memory(cmdline_memory_size);
2237
2238 memblock_allow_resize();
2239 memblock_dump_all();
2240
2241 set_bit(0, mmu_context_bmap);
2242
2243 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
2244
2245 real_end = (unsigned long)_end;
2246 num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
2247 printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
2248 num_kernel_image_mappings);
2249
2250 /* Set kernel pgd to upper alias so physical page computations
2251 * work.
2252 */
2253 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
2254
2255 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
2256
2257 inherit_prom_mappings();
2258
2259 /* Ok, we can use our TLB miss and window trap handlers safely. */
2260 setup_tba();
2261
2262 __flush_tlb_all();
2263
2264 prom_build_devicetree();
2265 of_populate_present_mask();
2266#ifndef CONFIG_SMP
2267 of_fill_in_cpu_data();
2268#endif
2269
2270 if (tlb_type == hypervisor) {
2271 sun4v_mdesc_init();
2272 mdesc_populate_present_mask(cpu_all_mask);
2273#ifndef CONFIG_SMP
2274 mdesc_fill_in_cpu_data(cpu_all_mask);
2275#endif
2276 mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
2277
2278 sun4v_linear_pte_xor_finalize();
2279
2280 sun4v_ktsb_init();
2281 sun4v_ktsb_register();
2282 } else {
2283 unsigned long impl, ver;
2284
2285 cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
2286 HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
2287
2288 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
2289 impl = ((ver >> 32) & 0xffff);
2290 if (impl == PANTHER_IMPL)
2291 cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
2292 HV_PGSZ_MASK_256MB);
2293
2294 sun4u_linear_pte_xor_finalize();
2295 }
2296
2297 /* Flush the TLBs and the 4M TSB so that the updated linear
2298 * pte XOR settings are realized for all mappings.
2299 */
2300 __flush_tlb_all();
2301#ifndef CONFIG_DEBUG_PAGEALLOC
2302 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2303#endif
2304 __flush_tlb_all();
2305
2306 /* Setup bootmem... */
2307 last_valid_pfn = end_pfn = bootmem_init(phys_base);
2308
2309 kernel_physical_mapping_init();
2310
2311 {
2312 unsigned long max_zone_pfns[MAX_NR_ZONES];
2313
2314 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
2315
2316 max_zone_pfns[ZONE_NORMAL] = end_pfn;
2317
2318 free_area_init_nodes(max_zone_pfns);
2319 }
2320
2321 printk("Booting Linux...\n");
2322}
2323
2324int page_in_phys_avail(unsigned long paddr)
2325{
2326 int i;
2327
2328 paddr &= PAGE_MASK;
2329
2330 for (i = 0; i < pavail_ents; i++) {
2331 unsigned long start, end;
2332
2333 start = pavail[i].phys_addr;
2334 end = start + pavail[i].reg_size;
2335
2336 if (paddr >= start && paddr < end)
2337 return 1;
2338 }
2339 if (paddr >= kern_base && paddr < (kern_base + kern_size))
2340 return 1;
2341#ifdef CONFIG_BLK_DEV_INITRD
2342 if (paddr >= __pa(initrd_start) &&
2343 paddr < __pa(PAGE_ALIGN(initrd_end)))
2344 return 1;
2345#endif
2346
2347 return 0;
2348}
2349
2350static void __init register_page_bootmem_info(void)
2351{
2352#ifdef CONFIG_NEED_MULTIPLE_NODES
2353 int i;
2354
2355 for_each_online_node(i)
2356 if (NODE_DATA(i)->node_spanned_pages)
2357 register_page_bootmem_info_node(NODE_DATA(i));
2358#endif
2359}
2360void __init mem_init(void)
2361{
2362 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
2363
2364 register_page_bootmem_info();
2365 free_all_bootmem();
2366
2367 /*
2368 * Set up the zero page, mark it reserved, so that page count
2369 * is not manipulated when freeing the page from user ptes.
2370 */
2371 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
2372 if (mem_map_zero == NULL) {
2373 prom_printf("paging_init: Cannot alloc zero page.\n");
2374 prom_halt();
2375 }
2376 mark_page_reserved(mem_map_zero);
2377
2378 mem_init_print_info(NULL);
2379
2380 if (tlb_type == cheetah || tlb_type == cheetah_plus)
2381 cheetah_ecache_flush_init();
2382}
2383
2384void free_initmem(void)
2385{
2386 unsigned long addr, initend;
2387 int do_free = 1;
2388
2389 /* If the physical memory maps were trimmed by kernel command
2390 * line options, don't even try freeing this initmem stuff up.
2391 * The kernel image could have been in the trimmed out region
2392 * and if so the freeing below will free invalid page structs.
2393 */
2394 if (cmdline_memory_size)
2395 do_free = 0;
2396
2397 /*
2398 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2399 */
2400 addr = PAGE_ALIGN((unsigned long)(__init_begin));
2401 initend = (unsigned long)(__init_end) & PAGE_MASK;
2402 for (; addr < initend; addr += PAGE_SIZE) {
2403 unsigned long page;
2404
2405 page = (addr +
2406 ((unsigned long) __va(kern_base)) -
2407 ((unsigned long) KERNBASE));
2408 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
2409
2410 if (do_free)
2411 free_reserved_page(virt_to_page(page));
2412 }
2413}
2414
2415#ifdef CONFIG_BLK_DEV_INITRD
2416void free_initrd_mem(unsigned long start, unsigned long end)
2417{
2418 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
2419 "initrd");
2420}
2421#endif
2422
2423pgprot_t PAGE_KERNEL __read_mostly;
2424EXPORT_SYMBOL(PAGE_KERNEL);
2425
2426pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2427pgprot_t PAGE_COPY __read_mostly;
2428
2429pgprot_t PAGE_SHARED __read_mostly;
2430EXPORT_SYMBOL(PAGE_SHARED);
2431
2432unsigned long pg_iobits __read_mostly;
2433
2434unsigned long _PAGE_IE __read_mostly;
2435EXPORT_SYMBOL(_PAGE_IE);
2436
2437unsigned long _PAGE_E __read_mostly;
2438EXPORT_SYMBOL(_PAGE_E);
2439
2440unsigned long _PAGE_CACHE __read_mostly;
2441EXPORT_SYMBOL(_PAGE_CACHE);
2442
2443#ifdef CONFIG_SPARSEMEM_VMEMMAP
2444int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2445 int node)
2446{
2447 unsigned long pte_base;
2448
2449 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2450 _PAGE_CP_4U | _PAGE_CV_4U |
2451 _PAGE_P_4U | _PAGE_W_4U);
2452 if (tlb_type == hypervisor)
2453 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2454 page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V);
2455
2456 pte_base |= _PAGE_PMD_HUGE;
2457
2458 vstart = vstart & PMD_MASK;
2459 vend = ALIGN(vend, PMD_SIZE);
2460 for (; vstart < vend; vstart += PMD_SIZE) {
2461 pgd_t *pgd = pgd_offset_k(vstart);
2462 unsigned long pte;
2463 pud_t *pud;
2464 pmd_t *pmd;
2465
2466 if (pgd_none(*pgd)) {
2467 pud_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
2468
2469 if (!new)
2470 return -ENOMEM;
2471 pgd_populate(&init_mm, pgd, new);
2472 }
2473
2474 pud = pud_offset(pgd, vstart);
2475 if (pud_none(*pud)) {
2476 pmd_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
2477
2478 if (!new)
2479 return -ENOMEM;
2480 pud_populate(&init_mm, pud, new);
2481 }
2482
2483 pmd = pmd_offset(pud, vstart);
2484
2485 pte = pmd_val(*pmd);
2486 if (!(pte & _PAGE_VALID)) {
2487 void *block = vmemmap_alloc_block(PMD_SIZE, node);
2488
2489 if (!block)
2490 return -ENOMEM;
2491
2492 pmd_val(*pmd) = pte_base | __pa(block);
2493 }
2494 }
2495
2496 return 0;
2497}
2498
2499void vmemmap_free(unsigned long start, unsigned long end)
2500{
2501}
2502#endif /* CONFIG_SPARSEMEM_VMEMMAP */
2503
2504static void prot_init_common(unsigned long page_none,
2505 unsigned long page_shared,
2506 unsigned long page_copy,
2507 unsigned long page_readonly,
2508 unsigned long page_exec_bit)
2509{
2510 PAGE_COPY = __pgprot(page_copy);
2511 PAGE_SHARED = __pgprot(page_shared);
2512
2513 protection_map[0x0] = __pgprot(page_none);
2514 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2515 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2516 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2517 protection_map[0x4] = __pgprot(page_readonly);
2518 protection_map[0x5] = __pgprot(page_readonly);
2519 protection_map[0x6] = __pgprot(page_copy);
2520 protection_map[0x7] = __pgprot(page_copy);
2521 protection_map[0x8] = __pgprot(page_none);
2522 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2523 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2524 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2525 protection_map[0xc] = __pgprot(page_readonly);
2526 protection_map[0xd] = __pgprot(page_readonly);
2527 protection_map[0xe] = __pgprot(page_shared);
2528 protection_map[0xf] = __pgprot(page_shared);
2529}
2530
2531static void __init sun4u_pgprot_init(void)
2532{
2533 unsigned long page_none, page_shared, page_copy, page_readonly;
2534 unsigned long page_exec_bit;
2535 int i;
2536
2537 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2538 _PAGE_CACHE_4U | _PAGE_P_4U |
2539 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2540 _PAGE_EXEC_4U);
2541 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2542 _PAGE_CACHE_4U | _PAGE_P_4U |
2543 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2544 _PAGE_EXEC_4U | _PAGE_L_4U);
2545
2546 _PAGE_IE = _PAGE_IE_4U;
2547 _PAGE_E = _PAGE_E_4U;
2548 _PAGE_CACHE = _PAGE_CACHE_4U;
2549
2550 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2551 __ACCESS_BITS_4U | _PAGE_E_4U);
2552
2553#ifdef CONFIG_DEBUG_PAGEALLOC
2554 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2555#else
2556 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
2557 PAGE_OFFSET;
2558#endif
2559 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2560 _PAGE_P_4U | _PAGE_W_4U);
2561
2562 for (i = 1; i < 4; i++)
2563 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2564
2565 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2566 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2567 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2568
2569
2570 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2571 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2572 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2573 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2574 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2575 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2576 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2577
2578 page_exec_bit = _PAGE_EXEC_4U;
2579
2580 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2581 page_exec_bit);
2582}
2583
2584static void __init sun4v_pgprot_init(void)
2585{
2586 unsigned long page_none, page_shared, page_copy, page_readonly;
2587 unsigned long page_exec_bit;
2588 int i;
2589
2590 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
2591 page_cache4v_flag | _PAGE_P_4V |
2592 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
2593 _PAGE_EXEC_4V);
2594 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
2595
2596 _PAGE_IE = _PAGE_IE_4V;
2597 _PAGE_E = _PAGE_E_4V;
2598 _PAGE_CACHE = page_cache4v_flag;
2599
2600#ifdef CONFIG_DEBUG_PAGEALLOC
2601 kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2602#else
2603 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
2604 PAGE_OFFSET;
2605#endif
2606 kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V |
2607 _PAGE_W_4V);
2608
2609 for (i = 1; i < 4; i++)
2610 kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2611
2612 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2613 __ACCESS_BITS_4V | _PAGE_E_4V);
2614
2615 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2616 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2617 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2618 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2619
2620 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag;
2621 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2622 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
2623 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2624 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2625 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2626 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2627
2628 page_exec_bit = _PAGE_EXEC_4V;
2629
2630 prot_init_common(page_none, page_shared, page_copy, page_readonly,
2631 page_exec_bit);
2632}
2633
2634unsigned long pte_sz_bits(unsigned long sz)
2635{
2636 if (tlb_type == hypervisor) {
2637 switch (sz) {
2638 case 8 * 1024:
2639 default:
2640 return _PAGE_SZ8K_4V;
2641 case 64 * 1024:
2642 return _PAGE_SZ64K_4V;
2643 case 512 * 1024:
2644 return _PAGE_SZ512K_4V;
2645 case 4 * 1024 * 1024:
2646 return _PAGE_SZ4MB_4V;
2647 }
2648 } else {
2649 switch (sz) {
2650 case 8 * 1024:
2651 default:
2652 return _PAGE_SZ8K_4U;
2653 case 64 * 1024:
2654 return _PAGE_SZ64K_4U;
2655 case 512 * 1024:
2656 return _PAGE_SZ512K_4U;
2657 case 4 * 1024 * 1024:
2658 return _PAGE_SZ4MB_4U;
2659 }
2660 }
2661}
2662
2663pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2664{
2665 pte_t pte;
2666
2667 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
2668 pte_val(pte) |= (((unsigned long)space) << 32);
2669 pte_val(pte) |= pte_sz_bits(page_size);
2670
2671 return pte;
2672}
2673
2674static unsigned long kern_large_tte(unsigned long paddr)
2675{
2676 unsigned long val;
2677
2678 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2679 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2680 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2681 if (tlb_type == hypervisor)
2682 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2683 page_cache4v_flag | _PAGE_P_4V |
2684 _PAGE_EXEC_4V | _PAGE_W_4V);
2685
2686 return val | paddr;
2687}
2688
2689/* If not locked, zap it. */
2690void __flush_tlb_all(void)
2691{
2692 unsigned long pstate;
2693 int i;
2694
2695 __asm__ __volatile__("flushw\n\t"
2696 "rdpr %%pstate, %0\n\t"
2697 "wrpr %0, %1, %%pstate"
2698 : "=r" (pstate)
2699 : "i" (PSTATE_IE));
2700 if (tlb_type == hypervisor) {
2701 sun4v_mmu_demap_all();
2702 } else if (tlb_type == spitfire) {
2703 for (i = 0; i < 64; i++) {
2704 /* Spitfire Errata #32 workaround */
2705 /* NOTE: Always runs on spitfire, so no
2706 * cheetah+ page size encodings.
2707 */
2708 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2709 "flush %%g6"
2710 : /* No outputs */
2711 : "r" (0),
2712 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2713
2714 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2715 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2716 "membar #Sync"
2717 : /* no outputs */
2718 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2719 spitfire_put_dtlb_data(i, 0x0UL);
2720 }
2721
2722 /* Spitfire Errata #32 workaround */
2723 /* NOTE: Always runs on spitfire, so no
2724 * cheetah+ page size encodings.
2725 */
2726 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
2727 "flush %%g6"
2728 : /* No outputs */
2729 : "r" (0),
2730 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2731
2732 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2733 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2734 "membar #Sync"
2735 : /* no outputs */
2736 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2737 spitfire_put_itlb_data(i, 0x0UL);
2738 }
2739 }
2740 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2741 cheetah_flush_dtlb_all();
2742 cheetah_flush_itlb_all();
2743 }
2744 __asm__ __volatile__("wrpr %0, 0, %%pstate"
2745 : : "r" (pstate));
2746}
2747
2748pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
2749 unsigned long address)
2750{
2751 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
2752 pte_t *pte = NULL;
2753
2754 if (page)
2755 pte = (pte_t *) page_address(page);
2756
2757 return pte;
2758}
2759
2760pgtable_t pte_alloc_one(struct mm_struct *mm,
2761 unsigned long address)
2762{
2763 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
2764 if (!page)
2765 return NULL;
2766 if (!pgtable_page_ctor(page)) {
2767 free_hot_cold_page(page, 0);
2768 return NULL;
2769 }
2770 return (pte_t *) page_address(page);
2771}
2772
2773void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2774{
2775 free_page((unsigned long)pte);
2776}
2777
2778static void __pte_free(pgtable_t pte)
2779{
2780 struct page *page = virt_to_page(pte);
2781
2782 pgtable_page_dtor(page);
2783 __free_page(page);
2784}
2785
2786void pte_free(struct mm_struct *mm, pgtable_t pte)
2787{
2788 __pte_free(pte);
2789}
2790
2791void pgtable_free(void *table, bool is_page)
2792{
2793 if (is_page)
2794 __pte_free(table);
2795 else
2796 kmem_cache_free(pgtable_cache, table);
2797}
2798
2799#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2800void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2801 pmd_t *pmd)
2802{
2803 unsigned long pte, flags;
2804 struct mm_struct *mm;
2805 pmd_t entry = *pmd;
2806
2807 if (!pmd_large(entry) || !pmd_young(entry))
2808 return;
2809
2810 pte = pmd_val(entry);
2811
2812 /* Don't insert a non-valid PMD into the TSB, we'll deadlock. */
2813 if (!(pte & _PAGE_VALID))
2814 return;
2815
2816 /* We are fabricating 8MB pages using 4MB real hw pages. */
2817 pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
2818
2819 mm = vma->vm_mm;
2820
2821 spin_lock_irqsave(&mm->context.lock, flags);
2822
2823 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
2824 __update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
2825 addr, pte);
2826
2827 spin_unlock_irqrestore(&mm->context.lock, flags);
2828}
2829#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2830
2831#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
2832static void context_reload(void *__data)
2833{
2834 struct mm_struct *mm = __data;
2835
2836 if (mm == current->mm)
2837 load_secondary_context(mm);
2838}
2839
2840void hugetlb_setup(struct pt_regs *regs)
2841{
2842 struct mm_struct *mm = current->mm;
2843 struct tsb_config *tp;
2844
2845 if (faulthandler_disabled() || !mm) {
2846 const struct exception_table_entry *entry;
2847
2848 entry = search_exception_tables(regs->tpc);
2849 if (entry) {
2850 regs->tpc = entry->fixup;
2851 regs->tnpc = regs->tpc + 4;
2852 return;
2853 }
2854 pr_alert("Unexpected HugeTLB setup in atomic context.\n");
2855 die_if_kernel("HugeTSB in atomic", regs);
2856 }
2857
2858 tp = &mm->context.tsb_block[MM_TSB_HUGE];
2859 if (likely(tp->tsb == NULL))
2860 tsb_grow(mm, MM_TSB_HUGE, 0);
2861
2862 tsb_context_switch(mm);
2863 smp_tsb_sync(mm);
2864
2865 /* On UltraSPARC-III+ and later, configure the second half of
2866 * the Data-TLB for huge pages.
2867 */
2868 if (tlb_type == cheetah_plus) {
2869 bool need_context_reload = false;
2870 unsigned long ctx;
2871
2872 spin_lock_irq(&ctx_alloc_lock);
2873 ctx = mm->context.sparc64_ctx_val;
2874 ctx &= ~CTX_PGSZ_MASK;
2875 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
2876 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
2877
2878 if (ctx != mm->context.sparc64_ctx_val) {
2879 /* When changing the page size fields, we
2880 * must perform a context flush so that no
2881 * stale entries match. This flush must
2882 * occur with the original context register
2883 * settings.
2884 */
2885 do_flush_tlb_mm(mm);
2886
2887 /* Reload the context register of all processors
2888 * also executing in this address space.
2889 */
2890 mm->context.sparc64_ctx_val = ctx;
2891 need_context_reload = true;
2892 }
2893 spin_unlock_irq(&ctx_alloc_lock);
2894
2895 if (need_context_reload)
2896 on_each_cpu(context_reload, mm, 0);
2897 }
2898}
2899#endif
2900
2901static struct resource code_resource = {
2902 .name = "Kernel code",
2903 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
2904};
2905
2906static struct resource data_resource = {
2907 .name = "Kernel data",
2908 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
2909};
2910
2911static struct resource bss_resource = {
2912 .name = "Kernel bss",
2913 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
2914};
2915
2916static inline resource_size_t compute_kern_paddr(void *addr)
2917{
2918 return (resource_size_t) (addr - KERNBASE + kern_base);
2919}
2920
2921static void __init kernel_lds_init(void)
2922{
2923 code_resource.start = compute_kern_paddr(_text);
2924 code_resource.end = compute_kern_paddr(_etext - 1);
2925 data_resource.start = compute_kern_paddr(_etext);
2926 data_resource.end = compute_kern_paddr(_edata - 1);
2927 bss_resource.start = compute_kern_paddr(__bss_start);
2928 bss_resource.end = compute_kern_paddr(_end - 1);
2929}
2930
2931static int __init report_memory(void)
2932{
2933 int i;
2934 struct resource *res;
2935
2936 kernel_lds_init();
2937
2938 for (i = 0; i < pavail_ents; i++) {
2939 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
2940
2941 if (!res) {
2942 pr_warn("Failed to allocate source.\n");
2943 break;
2944 }
2945
2946 res->name = "System RAM";
2947 res->start = pavail[i].phys_addr;
2948 res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
2949 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
2950
2951 if (insert_resource(&iomem_resource, res) < 0) {
2952 pr_warn("Resource insertion failed.\n");
2953 break;
2954 }
2955
2956 insert_resource(res, &code_resource);
2957 insert_resource(res, &data_resource);
2958 insert_resource(res, &bss_resource);
2959 }
2960
2961 return 0;
2962}
2963arch_initcall(report_memory);
2964
2965#ifdef CONFIG_SMP
2966#define do_flush_tlb_kernel_range smp_flush_tlb_kernel_range
2967#else
2968#define do_flush_tlb_kernel_range __flush_tlb_kernel_range
2969#endif
2970
2971void flush_tlb_kernel_range(unsigned long start, unsigned long end)
2972{
2973 if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
2974 if (start < LOW_OBP_ADDRESS) {
2975 flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
2976 do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
2977 }
2978 if (end > HI_OBP_ADDRESS) {
2979 flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
2980 do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
2981 }
2982 } else {
2983 flush_tsb_kernel_range(start, end);
2984 do_flush_tlb_kernel_range(start, end);
2985 }
2986}