Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  arch/sparc64/mm/init.c
   4 *
   5 *  Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
   6 *  Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
   7 */
   8 
   9#include <linux/extable.h>
  10#include <linux/kernel.h>
  11#include <linux/sched.h>
  12#include <linux/string.h>
  13#include <linux/init.h>
  14#include <linux/memblock.h>
  15#include <linux/mm.h>
  16#include <linux/hugetlb.h>
  17#include <linux/initrd.h>
  18#include <linux/swap.h>
  19#include <linux/pagemap.h>
  20#include <linux/poison.h>
  21#include <linux/fs.h>
  22#include <linux/seq_file.h>
  23#include <linux/kprobes.h>
  24#include <linux/cache.h>
  25#include <linux/sort.h>
  26#include <linux/ioport.h>
  27#include <linux/percpu.h>
  28#include <linux/mmzone.h>
  29#include <linux/gfp.h>
  30#include <linux/bootmem_info.h>
  31
  32#include <asm/head.h>
  33#include <asm/page.h>
  34#include <asm/pgalloc.h>
  35#include <asm/oplib.h>
  36#include <asm/iommu.h>
  37#include <asm/io.h>
  38#include <linux/uaccess.h>
  39#include <asm/mmu_context.h>
  40#include <asm/tlbflush.h>
  41#include <asm/dma.h>
  42#include <asm/starfire.h>
  43#include <asm/tlb.h>
  44#include <asm/spitfire.h>
  45#include <asm/sections.h>
  46#include <asm/tsb.h>
  47#include <asm/hypervisor.h>
  48#include <asm/prom.h>
  49#include <asm/mdesc.h>
  50#include <asm/cpudata.h>
  51#include <asm/setup.h>
  52#include <asm/irq.h>
  53
  54#include "init_64.h"
  55
  56unsigned long kern_linear_pte_xor[4] __read_mostly;
  57static unsigned long page_cache4v_flag;
  58
  59/* A bitmap, two bits for every 256MB of physical memory.  These two
  60 * bits determine what page size we use for kernel linear
  61 * translations.  They form an index into kern_linear_pte_xor[].  The
  62 * value in the indexed slot is XOR'd with the TLB miss virtual
  63 * address to form the resulting TTE.  The mapping is:
  64 *
  65 *	0	==>	4MB
  66 *	1	==>	256MB
  67 *	2	==>	2GB
  68 *	3	==>	16GB
  69 *
  70 * All sun4v chips support 256MB pages.  Only SPARC-T4 and later
  71 * support 2GB pages, and hopefully future cpus will support the 16GB
  72 * pages as well.  For slots 2 and 3, we encode a 256MB TTE xor there
  73 * if these larger page sizes are not supported by the cpu.
  74 *
  75 * It would be nice to determine this from the machine description
  76 * 'cpu' properties, but we need to have this table setup before the
  77 * MDESC is initialized.
  78 */
  79
  80#ifndef CONFIG_DEBUG_PAGEALLOC
  81/* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
  82 * Space is allocated for this right after the trap table in
  83 * arch/sparc64/kernel/head.S
  84 */
  85extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
  86#endif
  87extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
  88
  89static unsigned long cpu_pgsz_mask;
  90
  91#define MAX_BANKS	1024
  92
  93static struct linux_prom64_registers pavail[MAX_BANKS];
  94static int pavail_ents;
  95
  96u64 numa_latency[MAX_NUMNODES][MAX_NUMNODES];
  97
  98static int cmp_p64(const void *a, const void *b)
  99{
 100	const struct linux_prom64_registers *x = a, *y = b;
 101
 102	if (x->phys_addr > y->phys_addr)
 103		return 1;
 104	if (x->phys_addr < y->phys_addr)
 105		return -1;
 106	return 0;
 107}
 108
 109static void __init read_obp_memory(const char *property,
 110				   struct linux_prom64_registers *regs,
 111				   int *num_ents)
 112{
 113	phandle node = prom_finddevice("/memory");
 114	int prop_size = prom_getproplen(node, property);
 115	int ents, ret, i;
 116
 117	ents = prop_size / sizeof(struct linux_prom64_registers);
 118	if (ents > MAX_BANKS) {
 119		prom_printf("The machine has more %s property entries than "
 120			    "this kernel can support (%d).\n",
 121			    property, MAX_BANKS);
 122		prom_halt();
 123	}
 124
 125	ret = prom_getproperty(node, property, (char *) regs, prop_size);
 126	if (ret == -1) {
 127		prom_printf("Couldn't get %s property from /memory.\n",
 128				property);
 129		prom_halt();
 130	}
 131
 132	/* Sanitize what we got from the firmware, by page aligning
 133	 * everything.
 134	 */
 135	for (i = 0; i < ents; i++) {
 136		unsigned long base, size;
 137
 138		base = regs[i].phys_addr;
 139		size = regs[i].reg_size;
 140
 141		size &= PAGE_MASK;
 142		if (base & ~PAGE_MASK) {
 143			unsigned long new_base = PAGE_ALIGN(base);
 144
 145			size -= new_base - base;
 146			if ((long) size < 0L)
 147				size = 0UL;
 148			base = new_base;
 149		}
 150		if (size == 0UL) {
 151			/* If it is empty, simply get rid of it.
 152			 * This simplifies the logic of the other
 153			 * functions that process these arrays.
 154			 */
 155			memmove(&regs[i], &regs[i + 1],
 156				(ents - i - 1) * sizeof(regs[0]));
 157			i--;
 158			ents--;
 159			continue;
 160		}
 161		regs[i].phys_addr = base;
 162		regs[i].reg_size = size;
 163	}
 164
 165	*num_ents = ents;
 166
 167	sort(regs, ents, sizeof(struct linux_prom64_registers),
 168	     cmp_p64, NULL);
 169}
 170
 171/* Kernel physical address base and size in bytes.  */
 172unsigned long kern_base __read_mostly;
 173unsigned long kern_size __read_mostly;
 174
 175/* Initial ramdisk setup */
 176extern unsigned long sparc_ramdisk_image64;
 177extern unsigned int sparc_ramdisk_image;
 178extern unsigned int sparc_ramdisk_size;
 179
 180struct page *mem_map_zero __read_mostly;
 181EXPORT_SYMBOL(mem_map_zero);
 182
 183unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
 184
 185unsigned long sparc64_kern_pri_context __read_mostly;
 186unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
 187unsigned long sparc64_kern_sec_context __read_mostly;
 188
 189int num_kernel_image_mappings;
 190
 191#ifdef CONFIG_DEBUG_DCFLUSH
 192atomic_t dcpage_flushes = ATOMIC_INIT(0);
 193#ifdef CONFIG_SMP
 194atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
 195#endif
 196#endif
 197
 198inline void flush_dcache_page_impl(struct page *page)
 199{
 
 
 200	BUG_ON(tlb_type == hypervisor);
 201#ifdef CONFIG_DEBUG_DCFLUSH
 202	atomic_inc(&dcpage_flushes);
 203#endif
 204
 205#ifdef DCACHE_ALIASING_POSSIBLE
 206	__flush_dcache_page(page_address(page),
 207			    ((tlb_type == spitfire) &&
 208			     page_mapping_file(page) != NULL));
 
 209#else
 210	if (page_mapping_file(page) != NULL &&
 211	    tlb_type == spitfire)
 212		__flush_icache_page(__pa(page_address(page)));
 
 
 213#endif
 214}
 215
 216#define PG_dcache_dirty		PG_arch_1
 217#define PG_dcache_cpu_shift	32UL
 218#define PG_dcache_cpu_mask	\
 219	((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
 220
 221#define dcache_dirty_cpu(page) \
 222	(((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
 223
 224static inline void set_dcache_dirty(struct page *page, int this_cpu)
 225{
 226	unsigned long mask = this_cpu;
 227	unsigned long non_cpu_bits;
 228
 229	non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
 230	mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
 231
 232	__asm__ __volatile__("1:\n\t"
 233			     "ldx	[%2], %%g7\n\t"
 234			     "and	%%g7, %1, %%g1\n\t"
 235			     "or	%%g1, %0, %%g1\n\t"
 236			     "casx	[%2], %%g7, %%g1\n\t"
 237			     "cmp	%%g7, %%g1\n\t"
 238			     "bne,pn	%%xcc, 1b\n\t"
 239			     " nop"
 240			     : /* no outputs */
 241			     : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
 242			     : "g1", "g7");
 243}
 244
 245static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
 246{
 247	unsigned long mask = (1UL << PG_dcache_dirty);
 248
 249	__asm__ __volatile__("! test_and_clear_dcache_dirty\n"
 250			     "1:\n\t"
 251			     "ldx	[%2], %%g7\n\t"
 252			     "srlx	%%g7, %4, %%g1\n\t"
 253			     "and	%%g1, %3, %%g1\n\t"
 254			     "cmp	%%g1, %0\n\t"
 255			     "bne,pn	%%icc, 2f\n\t"
 256			     " andn	%%g7, %1, %%g1\n\t"
 257			     "casx	[%2], %%g7, %%g1\n\t"
 258			     "cmp	%%g7, %%g1\n\t"
 259			     "bne,pn	%%xcc, 1b\n\t"
 260			     " nop\n"
 261			     "2:"
 262			     : /* no outputs */
 263			     : "r" (cpu), "r" (mask), "r" (&page->flags),
 264			       "i" (PG_dcache_cpu_mask),
 265			       "i" (PG_dcache_cpu_shift)
 266			     : "g1", "g7");
 267}
 268
 269static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
 270{
 271	unsigned long tsb_addr = (unsigned long) ent;
 272
 273	if (tlb_type == cheetah_plus || tlb_type == hypervisor)
 274		tsb_addr = __pa(tsb_addr);
 275
 276	__tsb_insert(tsb_addr, tag, pte);
 277}
 278
 279unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
 280
 281static void flush_dcache(unsigned long pfn)
 282{
 283	struct page *page;
 284
 285	page = pfn_to_page(pfn);
 286	if (page) {
 
 287		unsigned long pg_flags;
 288
 289		pg_flags = page->flags;
 290		if (pg_flags & (1UL << PG_dcache_dirty)) {
 291			int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
 292				   PG_dcache_cpu_mask);
 293			int this_cpu = get_cpu();
 294
 295			/* This is just to optimize away some function calls
 296			 * in the SMP case.
 297			 */
 298			if (cpu == this_cpu)
 299				flush_dcache_page_impl(page);
 300			else
 301				smp_flush_dcache_page_impl(page, cpu);
 302
 303			clear_dcache_dirty_cpu(page, cpu);
 304
 305			put_cpu();
 306		}
 307	}
 308}
 309
 310/* mm->context.lock must be held */
 311static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
 312				    unsigned long tsb_hash_shift, unsigned long address,
 313				    unsigned long tte)
 314{
 315	struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
 316	unsigned long tag;
 317
 318	if (unlikely(!tsb))
 319		return;
 320
 321	tsb += ((address >> tsb_hash_shift) &
 322		(mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
 323	tag = (address >> 22UL);
 324	tsb_insert(tsb, tag, tte);
 325}
 326
 327#ifdef CONFIG_HUGETLB_PAGE
 328static int __init hugetlbpage_init(void)
 329{
 330	hugetlb_add_hstate(HPAGE_64K_SHIFT - PAGE_SHIFT);
 331	hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT);
 332	hugetlb_add_hstate(HPAGE_256MB_SHIFT - PAGE_SHIFT);
 333	hugetlb_add_hstate(HPAGE_2GB_SHIFT - PAGE_SHIFT);
 334
 335	return 0;
 336}
 337
 338arch_initcall(hugetlbpage_init);
 339
 340static void __init pud_huge_patch(void)
 341{
 342	struct pud_huge_patch_entry *p;
 343	unsigned long addr;
 344
 345	p = &__pud_huge_patch;
 346	addr = p->addr;
 347	*(unsigned int *)addr = p->insn;
 348
 349	__asm__ __volatile__("flush %0" : : "r" (addr));
 350}
 351
 352bool __init arch_hugetlb_valid_size(unsigned long size)
 353{
 354	unsigned int hugepage_shift = ilog2(size);
 355	unsigned short hv_pgsz_idx;
 356	unsigned int hv_pgsz_mask;
 357
 358	switch (hugepage_shift) {
 359	case HPAGE_16GB_SHIFT:
 360		hv_pgsz_mask = HV_PGSZ_MASK_16GB;
 361		hv_pgsz_idx = HV_PGSZ_IDX_16GB;
 362		pud_huge_patch();
 363		break;
 364	case HPAGE_2GB_SHIFT:
 365		hv_pgsz_mask = HV_PGSZ_MASK_2GB;
 366		hv_pgsz_idx = HV_PGSZ_IDX_2GB;
 367		break;
 368	case HPAGE_256MB_SHIFT:
 369		hv_pgsz_mask = HV_PGSZ_MASK_256MB;
 370		hv_pgsz_idx = HV_PGSZ_IDX_256MB;
 371		break;
 372	case HPAGE_SHIFT:
 373		hv_pgsz_mask = HV_PGSZ_MASK_4MB;
 374		hv_pgsz_idx = HV_PGSZ_IDX_4MB;
 375		break;
 376	case HPAGE_64K_SHIFT:
 377		hv_pgsz_mask = HV_PGSZ_MASK_64K;
 378		hv_pgsz_idx = HV_PGSZ_IDX_64K;
 379		break;
 380	default:
 381		hv_pgsz_mask = 0;
 382	}
 383
 384	if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U)
 385		return false;
 386
 387	return true;
 388}
 389#endif	/* CONFIG_HUGETLB_PAGE */
 390
 391void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
 
 392{
 393	struct mm_struct *mm;
 394	unsigned long flags;
 395	bool is_huge_tsb;
 396	pte_t pte = *ptep;
 
 397
 398	if (tlb_type != hypervisor) {
 399		unsigned long pfn = pte_pfn(pte);
 400
 401		if (pfn_valid(pfn))
 402			flush_dcache(pfn);
 403	}
 404
 405	mm = vma->vm_mm;
 406
 407	/* Don't insert a non-valid PTE into the TSB, we'll deadlock.  */
 408	if (!pte_accessible(mm, pte))
 409		return;
 410
 411	spin_lock_irqsave(&mm->context.lock, flags);
 412
 413	is_huge_tsb = false;
 414#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
 415	if (mm->context.hugetlb_pte_count || mm->context.thp_pte_count) {
 416		unsigned long hugepage_size = PAGE_SIZE;
 417
 418		if (is_vm_hugetlb_page(vma))
 419			hugepage_size = huge_page_size(hstate_vma(vma));
 420
 421		if (hugepage_size >= PUD_SIZE) {
 422			unsigned long mask = 0x1ffc00000UL;
 423
 424			/* Transfer bits [32:22] from address to resolve
 425			 * at 4M granularity.
 426			 */
 427			pte_val(pte) &= ~mask;
 428			pte_val(pte) |= (address & mask);
 429		} else if (hugepage_size >= PMD_SIZE) {
 430			/* We are fabricating 8MB pages using 4MB
 431			 * real hw pages.
 432			 */
 433			pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
 434		}
 435
 436		if (hugepage_size >= PMD_SIZE) {
 437			__update_mmu_tsb_insert(mm, MM_TSB_HUGE,
 438				REAL_HPAGE_SHIFT, address, pte_val(pte));
 439			is_huge_tsb = true;
 440		}
 441	}
 442#endif
 443	if (!is_huge_tsb)
 444		__update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
 445					address, pte_val(pte));
 
 
 
 
 
 446
 447	spin_unlock_irqrestore(&mm->context.lock, flags);
 448}
 449
 450void flush_dcache_page(struct page *page)
 451{
 
 452	struct address_space *mapping;
 453	int this_cpu;
 454
 455	if (tlb_type == hypervisor)
 456		return;
 457
 458	/* Do not bother with the expensive D-cache flush if it
 459	 * is merely the zero page.  The 'bigcore' testcase in GDB
 460	 * causes this case to run millions of times.
 461	 */
 462	if (page == ZERO_PAGE(0))
 463		return;
 464
 465	this_cpu = get_cpu();
 466
 467	mapping = page_mapping_file(page);
 468	if (mapping && !mapping_mapped(mapping)) {
 469		int dirty = test_bit(PG_dcache_dirty, &page->flags);
 470		if (dirty) {
 471			int dirty_cpu = dcache_dirty_cpu(page);
 472
 473			if (dirty_cpu == this_cpu)
 474				goto out;
 475			smp_flush_dcache_page_impl(page, dirty_cpu);
 476		}
 477		set_dcache_dirty(page, this_cpu);
 478	} else {
 479		/* We could delay the flush for the !page_mapping
 480		 * case too.  But that case is for exec env/arg
 481		 * pages and those are %99 certainly going to get
 482		 * faulted into the tlb (and thus flushed) anyways.
 483		 */
 484		flush_dcache_page_impl(page);
 485	}
 486
 487out:
 488	put_cpu();
 489}
 490EXPORT_SYMBOL(flush_dcache_page);
 491
 492void __kprobes flush_icache_range(unsigned long start, unsigned long end)
 493{
 494	/* Cheetah and Hypervisor platform cpus have coherent I-cache. */
 495	if (tlb_type == spitfire) {
 496		unsigned long kaddr;
 497
 498		/* This code only runs on Spitfire cpus so this is
 499		 * why we can assume _PAGE_PADDR_4U.
 500		 */
 501		for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
 502			unsigned long paddr, mask = _PAGE_PADDR_4U;
 503
 504			if (kaddr >= PAGE_OFFSET)
 505				paddr = kaddr & mask;
 506			else {
 507				pte_t *ptep = virt_to_kpte(kaddr);
 508
 509				paddr = pte_val(*ptep) & mask;
 510			}
 511			__flush_icache_page(paddr);
 512		}
 513	}
 514}
 515EXPORT_SYMBOL(flush_icache_range);
 516
 517void mmu_info(struct seq_file *m)
 518{
 519	static const char *pgsz_strings[] = {
 520		"8K", "64K", "512K", "4MB", "32MB",
 521		"256MB", "2GB", "16GB",
 522	};
 523	int i, printed;
 524
 525	if (tlb_type == cheetah)
 526		seq_printf(m, "MMU Type\t: Cheetah\n");
 527	else if (tlb_type == cheetah_plus)
 528		seq_printf(m, "MMU Type\t: Cheetah+\n");
 529	else if (tlb_type == spitfire)
 530		seq_printf(m, "MMU Type\t: Spitfire\n");
 531	else if (tlb_type == hypervisor)
 532		seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
 533	else
 534		seq_printf(m, "MMU Type\t: ???\n");
 535
 536	seq_printf(m, "MMU PGSZs\t: ");
 537	printed = 0;
 538	for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
 539		if (cpu_pgsz_mask & (1UL << i)) {
 540			seq_printf(m, "%s%s",
 541				   printed ? "," : "", pgsz_strings[i]);
 542			printed++;
 543		}
 544	}
 545	seq_putc(m, '\n');
 546
 547#ifdef CONFIG_DEBUG_DCFLUSH
 548	seq_printf(m, "DCPageFlushes\t: %d\n",
 549		   atomic_read(&dcpage_flushes));
 550#ifdef CONFIG_SMP
 551	seq_printf(m, "DCPageFlushesXC\t: %d\n",
 552		   atomic_read(&dcpage_flushes_xcall));
 553#endif /* CONFIG_SMP */
 554#endif /* CONFIG_DEBUG_DCFLUSH */
 555}
 556
 557struct linux_prom_translation prom_trans[512] __read_mostly;
 558unsigned int prom_trans_ents __read_mostly;
 559
 560unsigned long kern_locked_tte_data;
 561
 562/* The obp translations are saved based on 8k pagesize, since obp can
 563 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
 564 * HI_OBP_ADDRESS range are handled in ktlb.S.
 565 */
 566static inline int in_obp_range(unsigned long vaddr)
 567{
 568	return (vaddr >= LOW_OBP_ADDRESS &&
 569		vaddr < HI_OBP_ADDRESS);
 570}
 571
 572static int cmp_ptrans(const void *a, const void *b)
 573{
 574	const struct linux_prom_translation *x = a, *y = b;
 575
 576	if (x->virt > y->virt)
 577		return 1;
 578	if (x->virt < y->virt)
 579		return -1;
 580	return 0;
 581}
 582
 583/* Read OBP translations property into 'prom_trans[]'.  */
 584static void __init read_obp_translations(void)
 585{
 586	int n, node, ents, first, last, i;
 587
 588	node = prom_finddevice("/virtual-memory");
 589	n = prom_getproplen(node, "translations");
 590	if (unlikely(n == 0 || n == -1)) {
 591		prom_printf("prom_mappings: Couldn't get size.\n");
 592		prom_halt();
 593	}
 594	if (unlikely(n > sizeof(prom_trans))) {
 595		prom_printf("prom_mappings: Size %d is too big.\n", n);
 596		prom_halt();
 597	}
 598
 599	if ((n = prom_getproperty(node, "translations",
 600				  (char *)&prom_trans[0],
 601				  sizeof(prom_trans))) == -1) {
 602		prom_printf("prom_mappings: Couldn't get property.\n");
 603		prom_halt();
 604	}
 605
 606	n = n / sizeof(struct linux_prom_translation);
 607
 608	ents = n;
 609
 610	sort(prom_trans, ents, sizeof(struct linux_prom_translation),
 611	     cmp_ptrans, NULL);
 612
 613	/* Now kick out all the non-OBP entries.  */
 614	for (i = 0; i < ents; i++) {
 615		if (in_obp_range(prom_trans[i].virt))
 616			break;
 617	}
 618	first = i;
 619	for (; i < ents; i++) {
 620		if (!in_obp_range(prom_trans[i].virt))
 621			break;
 622	}
 623	last = i;
 624
 625	for (i = 0; i < (last - first); i++) {
 626		struct linux_prom_translation *src = &prom_trans[i + first];
 627		struct linux_prom_translation *dest = &prom_trans[i];
 628
 629		*dest = *src;
 630	}
 631	for (; i < ents; i++) {
 632		struct linux_prom_translation *dest = &prom_trans[i];
 633		dest->virt = dest->size = dest->data = 0x0UL;
 634	}
 635
 636	prom_trans_ents = last - first;
 637
 638	if (tlb_type == spitfire) {
 639		/* Clear diag TTE bits. */
 640		for (i = 0; i < prom_trans_ents; i++)
 641			prom_trans[i].data &= ~0x0003fe0000000000UL;
 642	}
 643
 644	/* Force execute bit on.  */
 645	for (i = 0; i < prom_trans_ents; i++)
 646		prom_trans[i].data |= (tlb_type == hypervisor ?
 647				       _PAGE_EXEC_4V : _PAGE_EXEC_4U);
 648}
 649
 650static void __init hypervisor_tlb_lock(unsigned long vaddr,
 651				       unsigned long pte,
 652				       unsigned long mmu)
 653{
 654	unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
 655
 656	if (ret != 0) {
 657		prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
 658			    "errors with %lx\n", vaddr, 0, pte, mmu, ret);
 659		prom_halt();
 660	}
 661}
 662
 663static unsigned long kern_large_tte(unsigned long paddr);
 664
 665static void __init remap_kernel(void)
 666{
 667	unsigned long phys_page, tte_vaddr, tte_data;
 668	int i, tlb_ent = sparc64_highest_locked_tlbent();
 669
 670	tte_vaddr = (unsigned long) KERNBASE;
 671	phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
 672	tte_data = kern_large_tte(phys_page);
 673
 674	kern_locked_tte_data = tte_data;
 675
 676	/* Now lock us into the TLBs via Hypervisor or OBP. */
 677	if (tlb_type == hypervisor) {
 678		for (i = 0; i < num_kernel_image_mappings; i++) {
 679			hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
 680			hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
 681			tte_vaddr += 0x400000;
 682			tte_data += 0x400000;
 683		}
 684	} else {
 685		for (i = 0; i < num_kernel_image_mappings; i++) {
 686			prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
 687			prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
 688			tte_vaddr += 0x400000;
 689			tte_data += 0x400000;
 690		}
 691		sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
 692	}
 693	if (tlb_type == cheetah_plus) {
 694		sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
 695					    CTX_CHEETAH_PLUS_NUC);
 696		sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
 697		sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
 698	}
 699}
 700
 701
 702static void __init inherit_prom_mappings(void)
 703{
 704	/* Now fixup OBP's idea about where we really are mapped. */
 705	printk("Remapping the kernel... ");
 706	remap_kernel();
 707	printk("done.\n");
 708}
 709
 710void prom_world(int enter)
 711{
 712	/*
 713	 * No need to change the address space any more, just flush
 714	 * the register windows
 715	 */
 716	__asm__ __volatile__("flushw");
 717}
 718
 719void __flush_dcache_range(unsigned long start, unsigned long end)
 720{
 721	unsigned long va;
 722
 723	if (tlb_type == spitfire) {
 724		int n = 0;
 725
 726		for (va = start; va < end; va += 32) {
 727			spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
 728			if (++n >= 512)
 729				break;
 730		}
 731	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
 732		start = __pa(start);
 733		end = __pa(end);
 734		for (va = start; va < end; va += 32)
 735			__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
 736					     "membar #Sync"
 737					     : /* no outputs */
 738					     : "r" (va),
 739					       "i" (ASI_DCACHE_INVALIDATE));
 740	}
 741}
 742EXPORT_SYMBOL(__flush_dcache_range);
 743
 744/* get_new_mmu_context() uses "cache + 1".  */
 745DEFINE_SPINLOCK(ctx_alloc_lock);
 746unsigned long tlb_context_cache = CTX_FIRST_VERSION;
 747#define MAX_CTX_NR	(1UL << CTX_NR_BITS)
 748#define CTX_BMAP_SLOTS	BITS_TO_LONGS(MAX_CTX_NR)
 749DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
 750DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
 751
 752static void mmu_context_wrap(void)
 753{
 754	unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
 755	unsigned long new_ver, new_ctx, old_ctx;
 756	struct mm_struct *mm;
 757	int cpu;
 758
 759	bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
 760
 761	/* Reserve kernel context */
 762	set_bit(0, mmu_context_bmap);
 763
 764	new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
 765	if (unlikely(new_ver == 0))
 766		new_ver = CTX_FIRST_VERSION;
 767	tlb_context_cache = new_ver;
 768
 769	/*
 770	 * Make sure that any new mm that are added into per_cpu_secondary_mm,
 771	 * are going to go through get_new_mmu_context() path.
 772	 */
 773	mb();
 774
 775	/*
 776	 * Updated versions to current on those CPUs that had valid secondary
 777	 * contexts
 778	 */
 779	for_each_online_cpu(cpu) {
 780		/*
 781		 * If a new mm is stored after we took this mm from the array,
 782		 * it will go into get_new_mmu_context() path, because we
 783		 * already bumped the version in tlb_context_cache.
 784		 */
 785		mm = per_cpu(per_cpu_secondary_mm, cpu);
 786
 787		if (unlikely(!mm || mm == &init_mm))
 788			continue;
 789
 790		old_ctx = mm->context.sparc64_ctx_val;
 791		if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
 792			new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
 793			set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
 794			mm->context.sparc64_ctx_val = new_ctx;
 795		}
 796	}
 797}
 798
 799/* Caller does TLB context flushing on local CPU if necessary.
 800 * The caller also ensures that CTX_VALID(mm->context) is false.
 801 *
 802 * We must be careful about boundary cases so that we never
 803 * let the user have CTX 0 (nucleus) or we ever use a CTX
 804 * version of zero (and thus NO_CONTEXT would not be caught
 805 * by version mis-match tests in mmu_context.h).
 806 *
 807 * Always invoked with interrupts disabled.
 808 */
 809void get_new_mmu_context(struct mm_struct *mm)
 810{
 811	unsigned long ctx, new_ctx;
 812	unsigned long orig_pgsz_bits;
 813
 814	spin_lock(&ctx_alloc_lock);
 815retry:
 816	/* wrap might have happened, test again if our context became valid */
 817	if (unlikely(CTX_VALID(mm->context)))
 818		goto out;
 819	orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
 820	ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
 821	new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
 822	if (new_ctx >= (1 << CTX_NR_BITS)) {
 823		new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
 824		if (new_ctx >= ctx) {
 825			mmu_context_wrap();
 826			goto retry;
 827		}
 828	}
 829	if (mm->context.sparc64_ctx_val)
 830		cpumask_clear(mm_cpumask(mm));
 831	mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
 832	new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
 833	tlb_context_cache = new_ctx;
 834	mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
 835out:
 836	spin_unlock(&ctx_alloc_lock);
 837}
 838
 839static int numa_enabled = 1;
 840static int numa_debug;
 841
 842static int __init early_numa(char *p)
 843{
 844	if (!p)
 845		return 0;
 846
 847	if (strstr(p, "off"))
 848		numa_enabled = 0;
 849
 850	if (strstr(p, "debug"))
 851		numa_debug = 1;
 852
 853	return 0;
 854}
 855early_param("numa", early_numa);
 856
 857#define numadbg(f, a...) \
 858do {	if (numa_debug) \
 859		printk(KERN_INFO f, ## a); \
 860} while (0)
 861
 862static void __init find_ramdisk(unsigned long phys_base)
 863{
 864#ifdef CONFIG_BLK_DEV_INITRD
 865	if (sparc_ramdisk_image || sparc_ramdisk_image64) {
 866		unsigned long ramdisk_image;
 867
 868		/* Older versions of the bootloader only supported a
 869		 * 32-bit physical address for the ramdisk image
 870		 * location, stored at sparc_ramdisk_image.  Newer
 871		 * SILO versions set sparc_ramdisk_image to zero and
 872		 * provide a full 64-bit physical address at
 873		 * sparc_ramdisk_image64.
 874		 */
 875		ramdisk_image = sparc_ramdisk_image;
 876		if (!ramdisk_image)
 877			ramdisk_image = sparc_ramdisk_image64;
 878
 879		/* Another bootloader quirk.  The bootloader normalizes
 880		 * the physical address to KERNBASE, so we have to
 881		 * factor that back out and add in the lowest valid
 882		 * physical page address to get the true physical address.
 883		 */
 884		ramdisk_image -= KERNBASE;
 885		ramdisk_image += phys_base;
 886
 887		numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
 888			ramdisk_image, sparc_ramdisk_size);
 889
 890		initrd_start = ramdisk_image;
 891		initrd_end = ramdisk_image + sparc_ramdisk_size;
 892
 893		memblock_reserve(initrd_start, sparc_ramdisk_size);
 894
 895		initrd_start += PAGE_OFFSET;
 896		initrd_end += PAGE_OFFSET;
 897	}
 898#endif
 899}
 900
 901struct node_mem_mask {
 902	unsigned long mask;
 903	unsigned long match;
 904};
 905static struct node_mem_mask node_masks[MAX_NUMNODES];
 906static int num_node_masks;
 907
 908#ifdef CONFIG_NUMA
 909
 910struct mdesc_mlgroup {
 911	u64	node;
 912	u64	latency;
 913	u64	match;
 914	u64	mask;
 915};
 916
 917static struct mdesc_mlgroup *mlgroups;
 918static int num_mlgroups;
 919
 920int numa_cpu_lookup_table[NR_CPUS];
 921cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
 922
 923struct mdesc_mblock {
 924	u64	base;
 925	u64	size;
 926	u64	offset; /* RA-to-PA */
 927};
 928static struct mdesc_mblock *mblocks;
 929static int num_mblocks;
 930
 931static struct mdesc_mblock * __init addr_to_mblock(unsigned long addr)
 932{
 933	struct mdesc_mblock *m = NULL;
 934	int i;
 935
 936	for (i = 0; i < num_mblocks; i++) {
 937		m = &mblocks[i];
 938
 939		if (addr >= m->base &&
 940		    addr < (m->base + m->size)) {
 941			break;
 942		}
 943	}
 944
 945	return m;
 946}
 947
 948static u64 __init memblock_nid_range_sun4u(u64 start, u64 end, int *nid)
 949{
 950	int prev_nid, new_nid;
 951
 952	prev_nid = NUMA_NO_NODE;
 953	for ( ; start < end; start += PAGE_SIZE) {
 954		for (new_nid = 0; new_nid < num_node_masks; new_nid++) {
 955			struct node_mem_mask *p = &node_masks[new_nid];
 956
 957			if ((start & p->mask) == p->match) {
 958				if (prev_nid == NUMA_NO_NODE)
 959					prev_nid = new_nid;
 960				break;
 961			}
 962		}
 963
 964		if (new_nid == num_node_masks) {
 965			prev_nid = 0;
 966			WARN_ONCE(1, "addr[%Lx] doesn't match a NUMA node rule. Some memory will be owned by node 0.",
 967				  start);
 968			break;
 969		}
 970
 971		if (prev_nid != new_nid)
 972			break;
 973	}
 974	*nid = prev_nid;
 975
 976	return start > end ? end : start;
 977}
 978
 979static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
 980{
 981	u64 ret_end, pa_start, m_mask, m_match, m_end;
 982	struct mdesc_mblock *mblock;
 983	int _nid, i;
 984
 985	if (tlb_type != hypervisor)
 986		return memblock_nid_range_sun4u(start, end, nid);
 987
 988	mblock = addr_to_mblock(start);
 989	if (!mblock) {
 990		WARN_ONCE(1, "memblock_nid_range: Can't find mblock addr[%Lx]",
 991			  start);
 992
 993		_nid = 0;
 994		ret_end = end;
 995		goto done;
 996	}
 997
 998	pa_start = start + mblock->offset;
 999	m_match = 0;
1000	m_mask = 0;
1001
1002	for (_nid = 0; _nid < num_node_masks; _nid++) {
1003		struct node_mem_mask *const m = &node_masks[_nid];
1004
1005		if ((pa_start & m->mask) == m->match) {
1006			m_match = m->match;
1007			m_mask = m->mask;
1008			break;
1009		}
1010	}
1011
1012	if (num_node_masks == _nid) {
1013		/* We could not find NUMA group, so default to 0, but lets
1014		 * search for latency group, so we could calculate the correct
1015		 * end address that we return
1016		 */
1017		_nid = 0;
1018
1019		for (i = 0; i < num_mlgroups; i++) {
1020			struct mdesc_mlgroup *const m = &mlgroups[i];
1021
1022			if ((pa_start & m->mask) == m->match) {
1023				m_match = m->match;
1024				m_mask = m->mask;
1025				break;
1026			}
1027		}
1028
1029		if (i == num_mlgroups) {
1030			WARN_ONCE(1, "memblock_nid_range: Can't find latency group addr[%Lx]",
1031				  start);
1032
1033			ret_end = end;
1034			goto done;
1035		}
1036	}
1037
1038	/*
1039	 * Each latency group has match and mask, and each memory block has an
1040	 * offset.  An address belongs to a latency group if its address matches
1041	 * the following formula: ((addr + offset) & mask) == match
1042	 * It is, however, slow to check every single page if it matches a
1043	 * particular latency group. As optimization we calculate end value by
1044	 * using bit arithmetics.
1045	 */
1046	m_end = m_match + (1ul << __ffs(m_mask)) - mblock->offset;
1047	m_end += pa_start & ~((1ul << fls64(m_mask)) - 1);
1048	ret_end = m_end > end ? end : m_end;
1049
1050done:
1051	*nid = _nid;
1052	return ret_end;
1053}
1054#endif
1055
1056/* This must be invoked after performing all of the necessary
1057 * memblock_set_node() calls for 'nid'.  We need to be able to get
1058 * correct data from get_pfn_range_for_nid().
1059 */
1060static void __init allocate_node_data(int nid)
1061{
1062	struct pglist_data *p;
1063	unsigned long start_pfn, end_pfn;
1064#ifdef CONFIG_NUMA
1065
1066	NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data),
1067					     SMP_CACHE_BYTES, nid);
1068	if (!NODE_DATA(nid)) {
1069		prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
1070		prom_halt();
1071	}
1072
1073	NODE_DATA(nid)->node_id = nid;
1074#endif
1075
1076	p = NODE_DATA(nid);
1077
1078	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1079	p->node_start_pfn = start_pfn;
1080	p->node_spanned_pages = end_pfn - start_pfn;
1081}
1082
1083static void init_node_masks_nonnuma(void)
1084{
1085#ifdef CONFIG_NUMA
1086	int i;
1087#endif
1088
1089	numadbg("Initializing tables for non-numa.\n");
1090
1091	node_masks[0].mask = 0;
1092	node_masks[0].match = 0;
1093	num_node_masks = 1;
1094
1095#ifdef CONFIG_NUMA
1096	for (i = 0; i < NR_CPUS; i++)
1097		numa_cpu_lookup_table[i] = 0;
1098
1099	cpumask_setall(&numa_cpumask_lookup_table[0]);
1100#endif
1101}
1102
1103#ifdef CONFIG_NUMA
1104struct pglist_data *node_data[MAX_NUMNODES];
1105
1106EXPORT_SYMBOL(numa_cpu_lookup_table);
1107EXPORT_SYMBOL(numa_cpumask_lookup_table);
1108EXPORT_SYMBOL(node_data);
1109
1110static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
1111				   u32 cfg_handle)
1112{
1113	u64 arc;
1114
1115	mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
1116		u64 target = mdesc_arc_target(md, arc);
1117		const u64 *val;
1118
1119		val = mdesc_get_property(md, target,
1120					 "cfg-handle", NULL);
1121		if (val && *val == cfg_handle)
1122			return 0;
1123	}
1124	return -ENODEV;
1125}
1126
1127static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
1128				    u32 cfg_handle)
1129{
1130	u64 arc, candidate, best_latency = ~(u64)0;
1131
1132	candidate = MDESC_NODE_NULL;
1133	mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1134		u64 target = mdesc_arc_target(md, arc);
1135		const char *name = mdesc_node_name(md, target);
1136		const u64 *val;
1137
1138		if (strcmp(name, "pio-latency-group"))
1139			continue;
1140
1141		val = mdesc_get_property(md, target, "latency", NULL);
1142		if (!val)
1143			continue;
1144
1145		if (*val < best_latency) {
1146			candidate = target;
1147			best_latency = *val;
1148		}
1149	}
1150
1151	if (candidate == MDESC_NODE_NULL)
1152		return -ENODEV;
1153
1154	return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
1155}
1156
1157int of_node_to_nid(struct device_node *dp)
1158{
1159	const struct linux_prom64_registers *regs;
1160	struct mdesc_handle *md;
1161	u32 cfg_handle;
1162	int count, nid;
1163	u64 grp;
1164
1165	/* This is the right thing to do on currently supported
1166	 * SUN4U NUMA platforms as well, as the PCI controller does
1167	 * not sit behind any particular memory controller.
1168	 */
1169	if (!mlgroups)
1170		return -1;
1171
1172	regs = of_get_property(dp, "reg", NULL);
1173	if (!regs)
1174		return -1;
1175
1176	cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1177
1178	md = mdesc_grab();
1179
1180	count = 0;
1181	nid = NUMA_NO_NODE;
1182	mdesc_for_each_node_by_name(md, grp, "group") {
1183		if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
1184			nid = count;
1185			break;
1186		}
1187		count++;
1188	}
1189
1190	mdesc_release(md);
1191
1192	return nid;
1193}
1194
1195static void __init add_node_ranges(void)
1196{
1197	phys_addr_t start, end;
1198	unsigned long prev_max;
1199	u64 i;
1200
1201memblock_resized:
1202	prev_max = memblock.memory.max;
1203
1204	for_each_mem_range(i, &start, &end) {
1205		while (start < end) {
1206			unsigned long this_end;
1207			int nid;
1208
1209			this_end = memblock_nid_range(start, end, &nid);
1210
1211			numadbg("Setting memblock NUMA node nid[%d] "
1212				"start[%llx] end[%lx]\n",
1213				nid, start, this_end);
1214
1215			memblock_set_node(start, this_end - start,
1216					  &memblock.memory, nid);
1217			if (memblock.memory.max != prev_max)
1218				goto memblock_resized;
1219			start = this_end;
1220		}
1221	}
1222}
1223
1224static int __init grab_mlgroups(struct mdesc_handle *md)
1225{
1226	unsigned long paddr;
1227	int count = 0;
1228	u64 node;
1229
1230	mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1231		count++;
1232	if (!count)
1233		return -ENOENT;
1234
1235	paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mlgroup),
1236				    SMP_CACHE_BYTES);
1237	if (!paddr)
1238		return -ENOMEM;
1239
1240	mlgroups = __va(paddr);
1241	num_mlgroups = count;
1242
1243	count = 0;
1244	mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1245		struct mdesc_mlgroup *m = &mlgroups[count++];
1246		const u64 *val;
1247
1248		m->node = node;
1249
1250		val = mdesc_get_property(md, node, "latency", NULL);
1251		m->latency = *val;
1252		val = mdesc_get_property(md, node, "address-match", NULL);
1253		m->match = *val;
1254		val = mdesc_get_property(md, node, "address-mask", NULL);
1255		m->mask = *val;
1256
1257		numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1258			"match[%llx] mask[%llx]\n",
1259			count - 1, m->node, m->latency, m->match, m->mask);
1260	}
1261
1262	return 0;
1263}
1264
1265static int __init grab_mblocks(struct mdesc_handle *md)
1266{
1267	unsigned long paddr;
1268	int count = 0;
1269	u64 node;
1270
1271	mdesc_for_each_node_by_name(md, node, "mblock")
1272		count++;
1273	if (!count)
1274		return -ENOENT;
1275
1276	paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mblock),
1277				    SMP_CACHE_BYTES);
1278	if (!paddr)
1279		return -ENOMEM;
1280
1281	mblocks = __va(paddr);
1282	num_mblocks = count;
1283
1284	count = 0;
1285	mdesc_for_each_node_by_name(md, node, "mblock") {
1286		struct mdesc_mblock *m = &mblocks[count++];
1287		const u64 *val;
1288
1289		val = mdesc_get_property(md, node, "base", NULL);
1290		m->base = *val;
1291		val = mdesc_get_property(md, node, "size", NULL);
1292		m->size = *val;
1293		val = mdesc_get_property(md, node,
1294					 "address-congruence-offset", NULL);
1295
1296		/* The address-congruence-offset property is optional.
1297		 * Explicity zero it be identifty this.
1298		 */
1299		if (val)
1300			m->offset = *val;
1301		else
1302			m->offset = 0UL;
1303
1304		numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
1305			count - 1, m->base, m->size, m->offset);
1306	}
1307
1308	return 0;
1309}
1310
1311static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1312					       u64 grp, cpumask_t *mask)
1313{
1314	u64 arc;
1315
1316	cpumask_clear(mask);
1317
1318	mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1319		u64 target = mdesc_arc_target(md, arc);
1320		const char *name = mdesc_node_name(md, target);
1321		const u64 *id;
1322
1323		if (strcmp(name, "cpu"))
1324			continue;
1325		id = mdesc_get_property(md, target, "id", NULL);
1326		if (*id < nr_cpu_ids)
1327			cpumask_set_cpu(*id, mask);
1328	}
1329}
1330
1331static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1332{
1333	int i;
1334
1335	for (i = 0; i < num_mlgroups; i++) {
1336		struct mdesc_mlgroup *m = &mlgroups[i];
1337		if (m->node == node)
1338			return m;
1339	}
1340	return NULL;
1341}
1342
1343int __node_distance(int from, int to)
1344{
1345	if ((from >= MAX_NUMNODES) || (to >= MAX_NUMNODES)) {
1346		pr_warn("Returning default NUMA distance value for %d->%d\n",
1347			from, to);
1348		return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE;
1349	}
1350	return numa_latency[from][to];
1351}
1352EXPORT_SYMBOL(__node_distance);
1353
1354static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
1355{
1356	int i;
1357
1358	for (i = 0; i < MAX_NUMNODES; i++) {
1359		struct node_mem_mask *n = &node_masks[i];
1360
1361		if ((grp->mask == n->mask) && (grp->match == n->match))
1362			break;
1363	}
1364	return i;
1365}
1366
1367static void __init find_numa_latencies_for_group(struct mdesc_handle *md,
1368						 u64 grp, int index)
1369{
1370	u64 arc;
1371
1372	mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1373		int tnode;
1374		u64 target = mdesc_arc_target(md, arc);
1375		struct mdesc_mlgroup *m = find_mlgroup(target);
1376
1377		if (!m)
1378			continue;
1379		tnode = find_best_numa_node_for_mlgroup(m);
1380		if (tnode == MAX_NUMNODES)
1381			continue;
1382		numa_latency[index][tnode] = m->latency;
1383	}
1384}
1385
1386static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1387				      int index)
1388{
1389	struct mdesc_mlgroup *candidate = NULL;
1390	u64 arc, best_latency = ~(u64)0;
1391	struct node_mem_mask *n;
1392
1393	mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1394		u64 target = mdesc_arc_target(md, arc);
1395		struct mdesc_mlgroup *m = find_mlgroup(target);
1396		if (!m)
1397			continue;
1398		if (m->latency < best_latency) {
1399			candidate = m;
1400			best_latency = m->latency;
1401		}
1402	}
1403	if (!candidate)
1404		return -ENOENT;
1405
1406	if (num_node_masks != index) {
1407		printk(KERN_ERR "Inconsistent NUMA state, "
1408		       "index[%d] != num_node_masks[%d]\n",
1409		       index, num_node_masks);
1410		return -EINVAL;
1411	}
1412
1413	n = &node_masks[num_node_masks++];
1414
1415	n->mask = candidate->mask;
1416	n->match = candidate->match;
1417
1418	numadbg("NUMA NODE[%d]: mask[%lx] match[%lx] (latency[%llx])\n",
1419		index, n->mask, n->match, candidate->latency);
1420
1421	return 0;
1422}
1423
1424static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1425					 int index)
1426{
1427	cpumask_t mask;
1428	int cpu;
1429
1430	numa_parse_mdesc_group_cpus(md, grp, &mask);
1431
1432	for_each_cpu(cpu, &mask)
1433		numa_cpu_lookup_table[cpu] = index;
1434	cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
1435
1436	if (numa_debug) {
1437		printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
1438		for_each_cpu(cpu, &mask)
1439			printk("%d ", cpu);
1440		printk("]\n");
1441	}
1442
1443	return numa_attach_mlgroup(md, grp, index);
1444}
1445
1446static int __init numa_parse_mdesc(void)
1447{
1448	struct mdesc_handle *md = mdesc_grab();
1449	int i, j, err, count;
1450	u64 node;
1451
1452	node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1453	if (node == MDESC_NODE_NULL) {
1454		mdesc_release(md);
1455		return -ENOENT;
1456	}
1457
1458	err = grab_mblocks(md);
1459	if (err < 0)
1460		goto out;
1461
1462	err = grab_mlgroups(md);
1463	if (err < 0)
1464		goto out;
1465
1466	count = 0;
1467	mdesc_for_each_node_by_name(md, node, "group") {
1468		err = numa_parse_mdesc_group(md, node, count);
1469		if (err < 0)
1470			break;
1471		count++;
1472	}
1473
1474	count = 0;
1475	mdesc_for_each_node_by_name(md, node, "group") {
1476		find_numa_latencies_for_group(md, node, count);
1477		count++;
1478	}
1479
1480	/* Normalize numa latency matrix according to ACPI SLIT spec. */
1481	for (i = 0; i < MAX_NUMNODES; i++) {
1482		u64 self_latency = numa_latency[i][i];
1483
1484		for (j = 0; j < MAX_NUMNODES; j++) {
1485			numa_latency[i][j] =
1486				(numa_latency[i][j] * LOCAL_DISTANCE) /
1487				self_latency;
1488		}
1489	}
1490
1491	add_node_ranges();
1492
1493	for (i = 0; i < num_node_masks; i++) {
1494		allocate_node_data(i);
1495		node_set_online(i);
1496	}
1497
1498	err = 0;
1499out:
1500	mdesc_release(md);
1501	return err;
1502}
1503
1504static int __init numa_parse_jbus(void)
1505{
1506	unsigned long cpu, index;
1507
1508	/* NUMA node id is encoded in bits 36 and higher, and there is
1509	 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1510	 */
1511	index = 0;
1512	for_each_present_cpu(cpu) {
1513		numa_cpu_lookup_table[cpu] = index;
1514		cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
1515		node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1516		node_masks[index].match = cpu << 36UL;
1517
1518		index++;
1519	}
1520	num_node_masks = index;
1521
1522	add_node_ranges();
1523
1524	for (index = 0; index < num_node_masks; index++) {
1525		allocate_node_data(index);
1526		node_set_online(index);
1527	}
1528
1529	return 0;
1530}
1531
1532static int __init numa_parse_sun4u(void)
1533{
1534	if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1535		unsigned long ver;
1536
1537		__asm__ ("rdpr %%ver, %0" : "=r" (ver));
1538		if ((ver >> 32UL) == __JALAPENO_ID ||
1539		    (ver >> 32UL) == __SERRANO_ID)
1540			return numa_parse_jbus();
1541	}
1542	return -1;
1543}
1544
1545static int __init bootmem_init_numa(void)
1546{
1547	int i, j;
1548	int err = -1;
1549
1550	numadbg("bootmem_init_numa()\n");
1551
1552	/* Some sane defaults for numa latency values */
1553	for (i = 0; i < MAX_NUMNODES; i++) {
1554		for (j = 0; j < MAX_NUMNODES; j++)
1555			numa_latency[i][j] = (i == j) ?
1556				LOCAL_DISTANCE : REMOTE_DISTANCE;
1557	}
1558
1559	if (numa_enabled) {
1560		if (tlb_type == hypervisor)
1561			err = numa_parse_mdesc();
1562		else
1563			err = numa_parse_sun4u();
1564	}
1565	return err;
1566}
1567
1568#else
1569
1570static int bootmem_init_numa(void)
1571{
1572	return -1;
1573}
1574
1575#endif
1576
1577static void __init bootmem_init_nonnuma(void)
1578{
1579	unsigned long top_of_ram = memblock_end_of_DRAM();
1580	unsigned long total_ram = memblock_phys_mem_size();
1581
1582	numadbg("bootmem_init_nonnuma()\n");
1583
1584	printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1585	       top_of_ram, total_ram);
1586	printk(KERN_INFO "Memory hole size: %ldMB\n",
1587	       (top_of_ram - total_ram) >> 20);
1588
1589	init_node_masks_nonnuma();
1590	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
1591	allocate_node_data(0);
1592	node_set_online(0);
1593}
1594
1595static unsigned long __init bootmem_init(unsigned long phys_base)
1596{
1597	unsigned long end_pfn;
1598
1599	end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1600	max_pfn = max_low_pfn = end_pfn;
1601	min_low_pfn = (phys_base >> PAGE_SHIFT);
1602
1603	if (bootmem_init_numa() < 0)
1604		bootmem_init_nonnuma();
1605
1606	/* Dump memblock with node info. */
1607	memblock_dump_all();
1608
1609	/* XXX cpu notifier XXX */
1610
1611	sparse_init();
1612
1613	return end_pfn;
1614}
1615
1616static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1617static int pall_ents __initdata;
1618
1619static unsigned long max_phys_bits = 40;
1620
1621bool kern_addr_valid(unsigned long addr)
1622{
1623	pgd_t *pgd;
1624	p4d_t *p4d;
1625	pud_t *pud;
1626	pmd_t *pmd;
1627	pte_t *pte;
1628
1629	if ((long)addr < 0L) {
1630		unsigned long pa = __pa(addr);
1631
1632		if ((pa >> max_phys_bits) != 0UL)
1633			return false;
1634
1635		return pfn_valid(pa >> PAGE_SHIFT);
1636	}
1637
1638	if (addr >= (unsigned long) KERNBASE &&
1639	    addr < (unsigned long)&_end)
1640		return true;
1641
1642	pgd = pgd_offset_k(addr);
1643	if (pgd_none(*pgd))
1644		return false;
1645
1646	p4d = p4d_offset(pgd, addr);
1647	if (p4d_none(*p4d))
1648		return false;
1649
1650	pud = pud_offset(p4d, addr);
1651	if (pud_none(*pud))
1652		return false;
1653
1654	if (pud_large(*pud))
1655		return pfn_valid(pud_pfn(*pud));
1656
1657	pmd = pmd_offset(pud, addr);
1658	if (pmd_none(*pmd))
1659		return false;
1660
1661	if (pmd_large(*pmd))
1662		return pfn_valid(pmd_pfn(*pmd));
1663
1664	pte = pte_offset_kernel(pmd, addr);
1665	if (pte_none(*pte))
1666		return false;
1667
1668	return pfn_valid(pte_pfn(*pte));
1669}
1670
1671static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
1672					      unsigned long vend,
1673					      pud_t *pud)
1674{
1675	const unsigned long mask16gb = (1UL << 34) - 1UL;
1676	u64 pte_val = vstart;
1677
1678	/* Each PUD is 8GB */
1679	if ((vstart & mask16gb) ||
1680	    (vend - vstart <= mask16gb)) {
1681		pte_val ^= kern_linear_pte_xor[2];
1682		pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
1683
1684		return vstart + PUD_SIZE;
1685	}
1686
1687	pte_val ^= kern_linear_pte_xor[3];
1688	pte_val |= _PAGE_PUD_HUGE;
1689
1690	vend = vstart + mask16gb + 1UL;
1691	while (vstart < vend) {
1692		pud_val(*pud) = pte_val;
1693
1694		pte_val += PUD_SIZE;
1695		vstart += PUD_SIZE;
1696		pud++;
1697	}
1698	return vstart;
1699}
1700
1701static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
1702				   bool guard)
1703{
1704	if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
1705		return true;
1706
1707	return false;
1708}
1709
1710static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
1711					      unsigned long vend,
1712					      pmd_t *pmd)
1713{
1714	const unsigned long mask256mb = (1UL << 28) - 1UL;
1715	const unsigned long mask2gb = (1UL << 31) - 1UL;
1716	u64 pte_val = vstart;
1717
1718	/* Each PMD is 8MB */
1719	if ((vstart & mask256mb) ||
1720	    (vend - vstart <= mask256mb)) {
1721		pte_val ^= kern_linear_pte_xor[0];
1722		pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
1723
1724		return vstart + PMD_SIZE;
1725	}
1726
1727	if ((vstart & mask2gb) ||
1728	    (vend - vstart <= mask2gb)) {
1729		pte_val ^= kern_linear_pte_xor[1];
1730		pte_val |= _PAGE_PMD_HUGE;
1731		vend = vstart + mask256mb + 1UL;
1732	} else {
1733		pte_val ^= kern_linear_pte_xor[2];
1734		pte_val |= _PAGE_PMD_HUGE;
1735		vend = vstart + mask2gb + 1UL;
1736	}
1737
1738	while (vstart < vend) {
1739		pmd_val(*pmd) = pte_val;
1740
1741		pte_val += PMD_SIZE;
1742		vstart += PMD_SIZE;
1743		pmd++;
1744	}
1745
1746	return vstart;
1747}
1748
1749static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
1750				   bool guard)
1751{
1752	if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
1753		return true;
1754
1755	return false;
1756}
1757
1758static unsigned long __ref kernel_map_range(unsigned long pstart,
1759					    unsigned long pend, pgprot_t prot,
1760					    bool use_huge)
1761{
1762	unsigned long vstart = PAGE_OFFSET + pstart;
1763	unsigned long vend = PAGE_OFFSET + pend;
1764	unsigned long alloc_bytes = 0UL;
1765
1766	if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
1767		prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1768			    vstart, vend);
1769		prom_halt();
1770	}
1771
1772	while (vstart < vend) {
1773		unsigned long this_end, paddr = __pa(vstart);
1774		pgd_t *pgd = pgd_offset_k(vstart);
1775		p4d_t *p4d;
1776		pud_t *pud;
1777		pmd_t *pmd;
1778		pte_t *pte;
1779
1780		if (pgd_none(*pgd)) {
1781			pud_t *new;
1782
1783			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
1784						  PAGE_SIZE);
1785			if (!new)
1786				goto err_alloc;
1787			alloc_bytes += PAGE_SIZE;
1788			pgd_populate(&init_mm, pgd, new);
1789		}
1790
1791		p4d = p4d_offset(pgd, vstart);
1792		if (p4d_none(*p4d)) {
1793			pud_t *new;
1794
1795			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
1796						  PAGE_SIZE);
1797			if (!new)
1798				goto err_alloc;
1799			alloc_bytes += PAGE_SIZE;
1800			p4d_populate(&init_mm, p4d, new);
1801		}
1802
1803		pud = pud_offset(p4d, vstart);
1804		if (pud_none(*pud)) {
1805			pmd_t *new;
1806
1807			if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
1808				vstart = kernel_map_hugepud(vstart, vend, pud);
1809				continue;
1810			}
1811			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
1812						  PAGE_SIZE);
1813			if (!new)
1814				goto err_alloc;
1815			alloc_bytes += PAGE_SIZE;
1816			pud_populate(&init_mm, pud, new);
1817		}
1818
1819		pmd = pmd_offset(pud, vstart);
1820		if (pmd_none(*pmd)) {
1821			pte_t *new;
1822
1823			if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
1824				vstart = kernel_map_hugepmd(vstart, vend, pmd);
1825				continue;
1826			}
1827			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
1828						  PAGE_SIZE);
1829			if (!new)
1830				goto err_alloc;
1831			alloc_bytes += PAGE_SIZE;
1832			pmd_populate_kernel(&init_mm, pmd, new);
1833		}
1834
1835		pte = pte_offset_kernel(pmd, vstart);
1836		this_end = (vstart + PMD_SIZE) & PMD_MASK;
1837		if (this_end > vend)
1838			this_end = vend;
1839
1840		while (vstart < this_end) {
1841			pte_val(*pte) = (paddr | pgprot_val(prot));
1842
1843			vstart += PAGE_SIZE;
1844			paddr += PAGE_SIZE;
1845			pte++;
1846		}
1847	}
1848
1849	return alloc_bytes;
1850
1851err_alloc:
1852	panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
1853	      __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1854	return -ENOMEM;
1855}
1856
1857static void __init flush_all_kernel_tsbs(void)
1858{
1859	int i;
1860
1861	for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
1862		struct tsb *ent = &swapper_tsb[i];
1863
1864		ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1865	}
1866#ifndef CONFIG_DEBUG_PAGEALLOC
1867	for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
1868		struct tsb *ent = &swapper_4m_tsb[i];
1869
1870		ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1871	}
1872#endif
1873}
1874
1875extern unsigned int kvmap_linear_patch[1];
1876
1877static void __init kernel_physical_mapping_init(void)
1878{
1879	unsigned long i, mem_alloced = 0UL;
1880	bool use_huge = true;
1881
1882#ifdef CONFIG_DEBUG_PAGEALLOC
1883	use_huge = false;
1884#endif
1885	for (i = 0; i < pall_ents; i++) {
1886		unsigned long phys_start, phys_end;
1887
1888		phys_start = pall[i].phys_addr;
1889		phys_end = phys_start + pall[i].reg_size;
1890
1891		mem_alloced += kernel_map_range(phys_start, phys_end,
1892						PAGE_KERNEL, use_huge);
1893	}
1894
1895	printk("Allocated %ld bytes for kernel page tables.\n",
1896	       mem_alloced);
1897
1898	kvmap_linear_patch[0] = 0x01000000; /* nop */
1899	flushi(&kvmap_linear_patch[0]);
1900
1901	flush_all_kernel_tsbs();
1902
1903	__flush_tlb_all();
1904}
1905
1906#ifdef CONFIG_DEBUG_PAGEALLOC
1907void __kernel_map_pages(struct page *page, int numpages, int enable)
1908{
1909	unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1910	unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1911
1912	kernel_map_range(phys_start, phys_end,
1913			 (enable ? PAGE_KERNEL : __pgprot(0)), false);
1914
1915	flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1916			       PAGE_OFFSET + phys_end);
1917
1918	/* we should perform an IPI and flush all tlbs,
1919	 * but that can deadlock->flush only current cpu.
1920	 */
1921	__flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1922				 PAGE_OFFSET + phys_end);
1923}
1924#endif
1925
1926unsigned long __init find_ecache_flush_span(unsigned long size)
1927{
1928	int i;
1929
1930	for (i = 0; i < pavail_ents; i++) {
1931		if (pavail[i].reg_size >= size)
1932			return pavail[i].phys_addr;
1933	}
1934
1935	return ~0UL;
1936}
1937
1938unsigned long PAGE_OFFSET;
1939EXPORT_SYMBOL(PAGE_OFFSET);
1940
1941unsigned long VMALLOC_END   = 0x0000010000000000UL;
1942EXPORT_SYMBOL(VMALLOC_END);
1943
1944unsigned long sparc64_va_hole_top =    0xfffff80000000000UL;
1945unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
1946
1947static void __init setup_page_offset(void)
1948{
1949	if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1950		/* Cheetah/Panther support a full 64-bit virtual
1951		 * address, so we can use all that our page tables
1952		 * support.
1953		 */
1954		sparc64_va_hole_top =    0xfff0000000000000UL;
1955		sparc64_va_hole_bottom = 0x0010000000000000UL;
1956
1957		max_phys_bits = 42;
1958	} else if (tlb_type == hypervisor) {
1959		switch (sun4v_chip_type) {
1960		case SUN4V_CHIP_NIAGARA1:
1961		case SUN4V_CHIP_NIAGARA2:
1962			/* T1 and T2 support 48-bit virtual addresses.  */
1963			sparc64_va_hole_top =    0xffff800000000000UL;
1964			sparc64_va_hole_bottom = 0x0000800000000000UL;
1965
1966			max_phys_bits = 39;
1967			break;
1968		case SUN4V_CHIP_NIAGARA3:
1969			/* T3 supports 48-bit virtual addresses.  */
1970			sparc64_va_hole_top =    0xffff800000000000UL;
1971			sparc64_va_hole_bottom = 0x0000800000000000UL;
1972
1973			max_phys_bits = 43;
1974			break;
1975		case SUN4V_CHIP_NIAGARA4:
1976		case SUN4V_CHIP_NIAGARA5:
1977		case SUN4V_CHIP_SPARC64X:
1978		case SUN4V_CHIP_SPARC_M6:
1979			/* T4 and later support 52-bit virtual addresses.  */
1980			sparc64_va_hole_top =    0xfff8000000000000UL;
1981			sparc64_va_hole_bottom = 0x0008000000000000UL;
1982			max_phys_bits = 47;
1983			break;
1984		case SUN4V_CHIP_SPARC_M7:
1985		case SUN4V_CHIP_SPARC_SN:
1986			/* M7 and later support 52-bit virtual addresses.  */
1987			sparc64_va_hole_top =    0xfff8000000000000UL;
1988			sparc64_va_hole_bottom = 0x0008000000000000UL;
1989			max_phys_bits = 49;
1990			break;
1991		case SUN4V_CHIP_SPARC_M8:
1992		default:
1993			/* M8 and later support 54-bit virtual addresses.
1994			 * However, restricting M8 and above VA bits to 53
1995			 * as 4-level page table cannot support more than
1996			 * 53 VA bits.
1997			 */
1998			sparc64_va_hole_top =    0xfff0000000000000UL;
1999			sparc64_va_hole_bottom = 0x0010000000000000UL;
2000			max_phys_bits = 51;
2001			break;
2002		}
2003	}
2004
2005	if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
2006		prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
2007			    max_phys_bits);
2008		prom_halt();
2009	}
2010
2011	PAGE_OFFSET = sparc64_va_hole_top;
2012	VMALLOC_END = ((sparc64_va_hole_bottom >> 1) +
2013		       (sparc64_va_hole_bottom >> 2));
2014
2015	pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
2016		PAGE_OFFSET, max_phys_bits);
2017	pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
2018		VMALLOC_START, VMALLOC_END);
2019	pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
2020		VMEMMAP_BASE, VMEMMAP_BASE << 1);
2021}
2022
2023static void __init tsb_phys_patch(void)
2024{
2025	struct tsb_ldquad_phys_patch_entry *pquad;
2026	struct tsb_phys_patch_entry *p;
2027
2028	pquad = &__tsb_ldquad_phys_patch;
2029	while (pquad < &__tsb_ldquad_phys_patch_end) {
2030		unsigned long addr = pquad->addr;
2031
2032		if (tlb_type == hypervisor)
2033			*(unsigned int *) addr = pquad->sun4v_insn;
2034		else
2035			*(unsigned int *) addr = pquad->sun4u_insn;
2036		wmb();
2037		__asm__ __volatile__("flush	%0"
2038				     : /* no outputs */
2039				     : "r" (addr));
2040
2041		pquad++;
2042	}
2043
2044	p = &__tsb_phys_patch;
2045	while (p < &__tsb_phys_patch_end) {
2046		unsigned long addr = p->addr;
2047
2048		*(unsigned int *) addr = p->insn;
2049		wmb();
2050		__asm__ __volatile__("flush	%0"
2051				     : /* no outputs */
2052				     : "r" (addr));
2053
2054		p++;
2055	}
2056}
2057
2058/* Don't mark as init, we give this to the Hypervisor.  */
2059#ifndef CONFIG_DEBUG_PAGEALLOC
2060#define NUM_KTSB_DESCR	2
2061#else
2062#define NUM_KTSB_DESCR	1
2063#endif
2064static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
2065
2066/* The swapper TSBs are loaded with a base sequence of:
2067 *
2068 *	sethi	%uhi(SYMBOL), REG1
2069 *	sethi	%hi(SYMBOL), REG2
2070 *	or	REG1, %ulo(SYMBOL), REG1
2071 *	or	REG2, %lo(SYMBOL), REG2
2072 *	sllx	REG1, 32, REG1
2073 *	or	REG1, REG2, REG1
2074 *
2075 * When we use physical addressing for the TSB accesses, we patch the
2076 * first four instructions in the above sequence.
2077 */
2078
2079static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
2080{
2081	unsigned long high_bits, low_bits;
2082
2083	high_bits = (pa >> 32) & 0xffffffff;
2084	low_bits = (pa >> 0) & 0xffffffff;
2085
2086	while (start < end) {
2087		unsigned int *ia = (unsigned int *)(unsigned long)*start;
2088
2089		ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
2090		__asm__ __volatile__("flush	%0" : : "r" (ia));
2091
2092		ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
2093		__asm__ __volatile__("flush	%0" : : "r" (ia + 1));
2094
2095		ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
2096		__asm__ __volatile__("flush	%0" : : "r" (ia + 2));
2097
2098		ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
2099		__asm__ __volatile__("flush	%0" : : "r" (ia + 3));
2100
2101		start++;
2102	}
2103}
2104
2105static void ktsb_phys_patch(void)
2106{
2107	extern unsigned int __swapper_tsb_phys_patch;
2108	extern unsigned int __swapper_tsb_phys_patch_end;
2109	unsigned long ktsb_pa;
2110
2111	ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
2112	patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
2113			    &__swapper_tsb_phys_patch_end, ktsb_pa);
2114#ifndef CONFIG_DEBUG_PAGEALLOC
2115	{
2116	extern unsigned int __swapper_4m_tsb_phys_patch;
2117	extern unsigned int __swapper_4m_tsb_phys_patch_end;
2118	ktsb_pa = (kern_base +
2119		   ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
2120	patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
2121			    &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
2122	}
2123#endif
2124}
2125
2126static void __init sun4v_ktsb_init(void)
2127{
2128	unsigned long ktsb_pa;
2129
2130	/* First KTSB for PAGE_SIZE mappings.  */
2131	ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
2132
2133	switch (PAGE_SIZE) {
2134	case 8 * 1024:
2135	default:
2136		ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
2137		ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
2138		break;
2139
2140	case 64 * 1024:
2141		ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
2142		ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
2143		break;
2144
2145	case 512 * 1024:
2146		ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
2147		ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
2148		break;
2149
2150	case 4 * 1024 * 1024:
2151		ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
2152		ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
2153		break;
2154	}
2155
2156	ktsb_descr[0].assoc = 1;
2157	ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
2158	ktsb_descr[0].ctx_idx = 0;
2159	ktsb_descr[0].tsb_base = ktsb_pa;
2160	ktsb_descr[0].resv = 0;
2161
2162#ifndef CONFIG_DEBUG_PAGEALLOC
2163	/* Second KTSB for 4MB/256MB/2GB/16GB mappings.  */
2164	ktsb_pa = (kern_base +
2165		   ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
2166
2167	ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
2168	ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
2169				    HV_PGSZ_MASK_256MB |
2170				    HV_PGSZ_MASK_2GB |
2171				    HV_PGSZ_MASK_16GB) &
2172				   cpu_pgsz_mask);
2173	ktsb_descr[1].assoc = 1;
2174	ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
2175	ktsb_descr[1].ctx_idx = 0;
2176	ktsb_descr[1].tsb_base = ktsb_pa;
2177	ktsb_descr[1].resv = 0;
2178#endif
2179}
2180
2181void sun4v_ktsb_register(void)
2182{
2183	unsigned long pa, ret;
2184
2185	pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
2186
2187	ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
2188	if (ret != 0) {
2189		prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
2190			    "errors with %lx\n", pa, ret);
2191		prom_halt();
2192	}
2193}
2194
2195static void __init sun4u_linear_pte_xor_finalize(void)
2196{
2197#ifndef CONFIG_DEBUG_PAGEALLOC
2198	/* This is where we would add Panther support for
2199	 * 32MB and 256MB pages.
2200	 */
2201#endif
2202}
2203
2204static void __init sun4v_linear_pte_xor_finalize(void)
2205{
2206	unsigned long pagecv_flag;
2207
2208	/* Bit 9 of TTE is no longer CV bit on M7 processor and it instead
2209	 * enables MCD error. Do not set bit 9 on M7 processor.
2210	 */
2211	switch (sun4v_chip_type) {
2212	case SUN4V_CHIP_SPARC_M7:
2213	case SUN4V_CHIP_SPARC_M8:
2214	case SUN4V_CHIP_SPARC_SN:
2215		pagecv_flag = 0x00;
2216		break;
2217	default:
2218		pagecv_flag = _PAGE_CV_4V;
2219		break;
2220	}
2221#ifndef CONFIG_DEBUG_PAGEALLOC
2222	if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
2223		kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
2224			PAGE_OFFSET;
2225		kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag |
2226					   _PAGE_P_4V | _PAGE_W_4V);
2227	} else {
2228		kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
2229	}
2230
2231	if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
2232		kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
2233			PAGE_OFFSET;
2234		kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag |
2235					   _PAGE_P_4V | _PAGE_W_4V);
2236	} else {
2237		kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
2238	}
2239
2240	if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
2241		kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
2242			PAGE_OFFSET;
2243		kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag |
2244					   _PAGE_P_4V | _PAGE_W_4V);
2245	} else {
2246		kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
2247	}
2248#endif
2249}
2250
2251/* paging_init() sets up the page tables */
2252
2253static unsigned long last_valid_pfn;
2254
2255static void sun4u_pgprot_init(void);
2256static void sun4v_pgprot_init(void);
2257
2258#define _PAGE_CACHE_4U	(_PAGE_CP_4U | _PAGE_CV_4U)
2259#define _PAGE_CACHE_4V	(_PAGE_CP_4V | _PAGE_CV_4V)
2260#define __DIRTY_BITS_4U	 (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2261#define __DIRTY_BITS_4V	 (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2262#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2263#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2264
2265/* We need to exclude reserved regions. This exclusion will include
2266 * vmlinux and initrd. To be more precise the initrd size could be used to
2267 * compute a new lower limit because it is freed later during initialization.
2268 */
2269static void __init reduce_memory(phys_addr_t limit_ram)
2270{
2271	limit_ram += memblock_reserved_size();
2272	memblock_enforce_memory_limit(limit_ram);
2273}
2274
2275void __init paging_init(void)
2276{
2277	unsigned long end_pfn, shift, phys_base;
2278	unsigned long real_end, i;
2279
2280	setup_page_offset();
2281
2282	/* These build time checkes make sure that the dcache_dirty_cpu()
2283	 * page->flags usage will work.
2284	 *
2285	 * When a page gets marked as dcache-dirty, we store the
2286	 * cpu number starting at bit 32 in the page->flags.  Also,
2287	 * functions like clear_dcache_dirty_cpu use the cpu mask
2288	 * in 13-bit signed-immediate instruction fields.
2289	 */
2290
2291	/*
2292	 * Page flags must not reach into upper 32 bits that are used
2293	 * for the cpu number
2294	 */
2295	BUILD_BUG_ON(NR_PAGEFLAGS > 32);
2296
2297	/*
2298	 * The bit fields placed in the high range must not reach below
2299	 * the 32 bit boundary. Otherwise we cannot place the cpu field
2300	 * at the 32 bit boundary.
2301	 */
2302	BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
2303		ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
2304
2305	BUILD_BUG_ON(NR_CPUS > 4096);
2306
2307	kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
2308	kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
2309
2310	/* Invalidate both kernel TSBs.  */
2311	memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
2312#ifndef CONFIG_DEBUG_PAGEALLOC
2313	memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2314#endif
2315
2316	/* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde
2317	 * bit on M7 processor. This is a conflicting usage of the same
2318	 * bit. Enabling TTE.cv on M7 would turn on Memory Corruption
2319	 * Detection error on all pages and this will lead to problems
2320	 * later. Kernel does not run with MCD enabled and hence rest
2321	 * of the required steps to fully configure memory corruption
2322	 * detection are not taken. We need to ensure TTE.mcde is not
2323	 * set on M7 processor. Compute the value of cacheability
2324	 * flag for use later taking this into consideration.
2325	 */
2326	switch (sun4v_chip_type) {
2327	case SUN4V_CHIP_SPARC_M7:
2328	case SUN4V_CHIP_SPARC_M8:
2329	case SUN4V_CHIP_SPARC_SN:
2330		page_cache4v_flag = _PAGE_CP_4V;
2331		break;
2332	default:
2333		page_cache4v_flag = _PAGE_CACHE_4V;
2334		break;
2335	}
2336
2337	if (tlb_type == hypervisor)
2338		sun4v_pgprot_init();
2339	else
2340		sun4u_pgprot_init();
2341
2342	if (tlb_type == cheetah_plus ||
2343	    tlb_type == hypervisor) {
2344		tsb_phys_patch();
2345		ktsb_phys_patch();
2346	}
2347
2348	if (tlb_type == hypervisor)
2349		sun4v_patch_tlb_handlers();
2350
2351	/* Find available physical memory...
2352	 *
2353	 * Read it twice in order to work around a bug in openfirmware.
2354	 * The call to grab this table itself can cause openfirmware to
2355	 * allocate memory, which in turn can take away some space from
2356	 * the list of available memory.  Reading it twice makes sure
2357	 * we really do get the final value.
2358	 */
2359	read_obp_translations();
2360	read_obp_memory("reg", &pall[0], &pall_ents);
2361	read_obp_memory("available", &pavail[0], &pavail_ents);
2362	read_obp_memory("available", &pavail[0], &pavail_ents);
2363
2364	phys_base = 0xffffffffffffffffUL;
2365	for (i = 0; i < pavail_ents; i++) {
2366		phys_base = min(phys_base, pavail[i].phys_addr);
2367		memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
2368	}
2369
2370	memblock_reserve(kern_base, kern_size);
2371
2372	find_ramdisk(phys_base);
2373
2374	if (cmdline_memory_size)
2375		reduce_memory(cmdline_memory_size);
2376
2377	memblock_allow_resize();
2378	memblock_dump_all();
2379
2380	set_bit(0, mmu_context_bmap);
2381
2382	shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
2383
2384	real_end = (unsigned long)_end;
2385	num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
2386	printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
2387	       num_kernel_image_mappings);
2388
2389	/* Set kernel pgd to upper alias so physical page computations
2390	 * work.
2391	 */
2392	init_mm.pgd += ((shift) / (sizeof(pgd_t)));
2393	
2394	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
2395
2396	inherit_prom_mappings();
2397	
2398	/* Ok, we can use our TLB miss and window trap handlers safely.  */
2399	setup_tba();
2400
2401	__flush_tlb_all();
2402
2403	prom_build_devicetree();
2404	of_populate_present_mask();
2405#ifndef CONFIG_SMP
2406	of_fill_in_cpu_data();
2407#endif
2408
2409	if (tlb_type == hypervisor) {
2410		sun4v_mdesc_init();
2411		mdesc_populate_present_mask(cpu_all_mask);
2412#ifndef CONFIG_SMP
2413		mdesc_fill_in_cpu_data(cpu_all_mask);
2414#endif
2415		mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
2416
2417		sun4v_linear_pte_xor_finalize();
2418
2419		sun4v_ktsb_init();
2420		sun4v_ktsb_register();
2421	} else {
2422		unsigned long impl, ver;
2423
2424		cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
2425				 HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
2426
2427		__asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
2428		impl = ((ver >> 32) & 0xffff);
2429		if (impl == PANTHER_IMPL)
2430			cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
2431					  HV_PGSZ_MASK_256MB);
2432
2433		sun4u_linear_pte_xor_finalize();
2434	}
2435
2436	/* Flush the TLBs and the 4M TSB so that the updated linear
2437	 * pte XOR settings are realized for all mappings.
2438	 */
2439	__flush_tlb_all();
2440#ifndef CONFIG_DEBUG_PAGEALLOC
2441	memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2442#endif
2443	__flush_tlb_all();
2444
2445	/* Setup bootmem... */
2446	last_valid_pfn = end_pfn = bootmem_init(phys_base);
2447
2448	kernel_physical_mapping_init();
2449
2450	{
2451		unsigned long max_zone_pfns[MAX_NR_ZONES];
2452
2453		memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
2454
2455		max_zone_pfns[ZONE_NORMAL] = end_pfn;
2456
2457		free_area_init(max_zone_pfns);
2458	}
2459
2460	printk("Booting Linux...\n");
2461}
2462
2463int page_in_phys_avail(unsigned long paddr)
2464{
2465	int i;
2466
2467	paddr &= PAGE_MASK;
2468
2469	for (i = 0; i < pavail_ents; i++) {
2470		unsigned long start, end;
2471
2472		start = pavail[i].phys_addr;
2473		end = start + pavail[i].reg_size;
2474
2475		if (paddr >= start && paddr < end)
2476			return 1;
2477	}
2478	if (paddr >= kern_base && paddr < (kern_base + kern_size))
2479		return 1;
2480#ifdef CONFIG_BLK_DEV_INITRD
2481	if (paddr >= __pa(initrd_start) &&
2482	    paddr < __pa(PAGE_ALIGN(initrd_end)))
2483		return 1;
2484#endif
2485
2486	return 0;
2487}
2488
2489static void __init register_page_bootmem_info(void)
2490{
2491#ifdef CONFIG_NUMA
2492	int i;
2493
2494	for_each_online_node(i)
2495		if (NODE_DATA(i)->node_spanned_pages)
2496			register_page_bootmem_info_node(NODE_DATA(i));
2497#endif
2498}
2499void __init mem_init(void)
2500{
2501	high_memory = __va(last_valid_pfn << PAGE_SHIFT);
2502
2503	memblock_free_all();
2504
2505	/*
2506	 * Must be done after boot memory is put on freelist, because here we
2507	 * might set fields in deferred struct pages that have not yet been
2508	 * initialized, and memblock_free_all() initializes all the reserved
2509	 * deferred pages for us.
2510	 */
2511	register_page_bootmem_info();
2512
2513	/*
2514	 * Set up the zero page, mark it reserved, so that page count
2515	 * is not manipulated when freeing the page from user ptes.
2516	 */
2517	mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
2518	if (mem_map_zero == NULL) {
2519		prom_printf("paging_init: Cannot alloc zero page.\n");
2520		prom_halt();
2521	}
2522	mark_page_reserved(mem_map_zero);
2523
2524
2525	if (tlb_type == cheetah || tlb_type == cheetah_plus)
2526		cheetah_ecache_flush_init();
2527}
2528
2529void free_initmem(void)
2530{
2531	unsigned long addr, initend;
2532	int do_free = 1;
2533
2534	/* If the physical memory maps were trimmed by kernel command
2535	 * line options, don't even try freeing this initmem stuff up.
2536	 * The kernel image could have been in the trimmed out region
2537	 * and if so the freeing below will free invalid page structs.
2538	 */
2539	if (cmdline_memory_size)
2540		do_free = 0;
2541
2542	/*
2543	 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2544	 */
2545	addr = PAGE_ALIGN((unsigned long)(__init_begin));
2546	initend = (unsigned long)(__init_end) & PAGE_MASK;
2547	for (; addr < initend; addr += PAGE_SIZE) {
2548		unsigned long page;
2549
2550		page = (addr +
2551			((unsigned long) __va(kern_base)) -
2552			((unsigned long) KERNBASE));
2553		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
2554
2555		if (do_free)
2556			free_reserved_page(virt_to_page(page));
2557	}
2558}
2559
2560pgprot_t PAGE_KERNEL __read_mostly;
2561EXPORT_SYMBOL(PAGE_KERNEL);
2562
2563pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2564pgprot_t PAGE_COPY __read_mostly;
2565
2566pgprot_t PAGE_SHARED __read_mostly;
2567EXPORT_SYMBOL(PAGE_SHARED);
2568
2569unsigned long pg_iobits __read_mostly;
2570
2571unsigned long _PAGE_IE __read_mostly;
2572EXPORT_SYMBOL(_PAGE_IE);
2573
2574unsigned long _PAGE_E __read_mostly;
2575EXPORT_SYMBOL(_PAGE_E);
2576
2577unsigned long _PAGE_CACHE __read_mostly;
2578EXPORT_SYMBOL(_PAGE_CACHE);
2579
2580#ifdef CONFIG_SPARSEMEM_VMEMMAP
2581int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2582			       int node, struct vmem_altmap *altmap)
2583{
2584	unsigned long pte_base;
2585
2586	pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2587		    _PAGE_CP_4U | _PAGE_CV_4U |
2588		    _PAGE_P_4U | _PAGE_W_4U);
2589	if (tlb_type == hypervisor)
2590		pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2591			    page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V);
2592
2593	pte_base |= _PAGE_PMD_HUGE;
2594
2595	vstart = vstart & PMD_MASK;
2596	vend = ALIGN(vend, PMD_SIZE);
2597	for (; vstart < vend; vstart += PMD_SIZE) {
2598		pgd_t *pgd = vmemmap_pgd_populate(vstart, node);
2599		unsigned long pte;
2600		p4d_t *p4d;
2601		pud_t *pud;
2602		pmd_t *pmd;
2603
2604		if (!pgd)
2605			return -ENOMEM;
2606
2607		p4d = vmemmap_p4d_populate(pgd, vstart, node);
2608		if (!p4d)
2609			return -ENOMEM;
2610
2611		pud = vmemmap_pud_populate(p4d, vstart, node);
2612		if (!pud)
2613			return -ENOMEM;
2614
2615		pmd = pmd_offset(pud, vstart);
2616		pte = pmd_val(*pmd);
2617		if (!(pte & _PAGE_VALID)) {
2618			void *block = vmemmap_alloc_block(PMD_SIZE, node);
2619
2620			if (!block)
2621				return -ENOMEM;
2622
2623			pmd_val(*pmd) = pte_base | __pa(block);
2624		}
2625	}
2626
2627	return 0;
2628}
2629
2630void vmemmap_free(unsigned long start, unsigned long end,
2631		struct vmem_altmap *altmap)
2632{
2633}
2634#endif /* CONFIG_SPARSEMEM_VMEMMAP */
2635
2636/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
2637static pgprot_t protection_map[16] __ro_after_init;
2638
2639static void prot_init_common(unsigned long page_none,
2640			     unsigned long page_shared,
2641			     unsigned long page_copy,
2642			     unsigned long page_readonly,
2643			     unsigned long page_exec_bit)
2644{
2645	PAGE_COPY = __pgprot(page_copy);
2646	PAGE_SHARED = __pgprot(page_shared);
2647
2648	protection_map[0x0] = __pgprot(page_none);
2649	protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2650	protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2651	protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2652	protection_map[0x4] = __pgprot(page_readonly);
2653	protection_map[0x5] = __pgprot(page_readonly);
2654	protection_map[0x6] = __pgprot(page_copy);
2655	protection_map[0x7] = __pgprot(page_copy);
2656	protection_map[0x8] = __pgprot(page_none);
2657	protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2658	protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2659	protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2660	protection_map[0xc] = __pgprot(page_readonly);
2661	protection_map[0xd] = __pgprot(page_readonly);
2662	protection_map[0xe] = __pgprot(page_shared);
2663	protection_map[0xf] = __pgprot(page_shared);
2664}
2665
2666static void __init sun4u_pgprot_init(void)
2667{
2668	unsigned long page_none, page_shared, page_copy, page_readonly;
2669	unsigned long page_exec_bit;
2670	int i;
2671
2672	PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2673				_PAGE_CACHE_4U | _PAGE_P_4U |
2674				__ACCESS_BITS_4U | __DIRTY_BITS_4U |
2675				_PAGE_EXEC_4U);
2676	PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2677				       _PAGE_CACHE_4U | _PAGE_P_4U |
2678				       __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2679				       _PAGE_EXEC_4U | _PAGE_L_4U);
2680
2681	_PAGE_IE = _PAGE_IE_4U;
2682	_PAGE_E = _PAGE_E_4U;
2683	_PAGE_CACHE = _PAGE_CACHE_4U;
2684
2685	pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2686		     __ACCESS_BITS_4U | _PAGE_E_4U);
2687
2688#ifdef CONFIG_DEBUG_PAGEALLOC
2689	kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2690#else
2691	kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
2692		PAGE_OFFSET;
2693#endif
2694	kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2695				   _PAGE_P_4U | _PAGE_W_4U);
2696
2697	for (i = 1; i < 4; i++)
2698		kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2699
2700	_PAGE_ALL_SZ_BITS =  (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2701			      _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2702			      _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2703
2704
2705	page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2706	page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2707		       __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2708	page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2709		       __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2710	page_readonly   = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2711			   __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2712
2713	page_exec_bit = _PAGE_EXEC_4U;
2714
2715	prot_init_common(page_none, page_shared, page_copy, page_readonly,
2716			 page_exec_bit);
2717}
2718
2719static void __init sun4v_pgprot_init(void)
2720{
2721	unsigned long page_none, page_shared, page_copy, page_readonly;
2722	unsigned long page_exec_bit;
2723	int i;
2724
2725	PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
2726				page_cache4v_flag | _PAGE_P_4V |
2727				__ACCESS_BITS_4V | __DIRTY_BITS_4V |
2728				_PAGE_EXEC_4V);
2729	PAGE_KERNEL_LOCKED = PAGE_KERNEL;
2730
2731	_PAGE_IE = _PAGE_IE_4V;
2732	_PAGE_E = _PAGE_E_4V;
2733	_PAGE_CACHE = page_cache4v_flag;
2734
2735#ifdef CONFIG_DEBUG_PAGEALLOC
2736	kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2737#else
2738	kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
2739		PAGE_OFFSET;
2740#endif
2741	kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V |
2742				   _PAGE_W_4V);
2743
2744	for (i = 1; i < 4; i++)
2745		kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2746
2747	pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2748		     __ACCESS_BITS_4V | _PAGE_E_4V);
2749
2750	_PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2751			     _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2752			     _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2753			     _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2754
2755	page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag;
2756	page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2757		       __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
2758	page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2759		       __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2760	page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2761			 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2762
2763	page_exec_bit = _PAGE_EXEC_4V;
2764
2765	prot_init_common(page_none, page_shared, page_copy, page_readonly,
2766			 page_exec_bit);
2767}
2768
2769unsigned long pte_sz_bits(unsigned long sz)
2770{
2771	if (tlb_type == hypervisor) {
2772		switch (sz) {
2773		case 8 * 1024:
2774		default:
2775			return _PAGE_SZ8K_4V;
2776		case 64 * 1024:
2777			return _PAGE_SZ64K_4V;
2778		case 512 * 1024:
2779			return _PAGE_SZ512K_4V;
2780		case 4 * 1024 * 1024:
2781			return _PAGE_SZ4MB_4V;
2782		}
2783	} else {
2784		switch (sz) {
2785		case 8 * 1024:
2786		default:
2787			return _PAGE_SZ8K_4U;
2788		case 64 * 1024:
2789			return _PAGE_SZ64K_4U;
2790		case 512 * 1024:
2791			return _PAGE_SZ512K_4U;
2792		case 4 * 1024 * 1024:
2793			return _PAGE_SZ4MB_4U;
2794		}
2795	}
2796}
2797
2798pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2799{
2800	pte_t pte;
2801
2802	pte_val(pte)  = page | pgprot_val(pgprot_noncached(prot));
2803	pte_val(pte) |= (((unsigned long)space) << 32);
2804	pte_val(pte) |= pte_sz_bits(page_size);
2805
2806	return pte;
2807}
2808
2809static unsigned long kern_large_tte(unsigned long paddr)
2810{
2811	unsigned long val;
2812
2813	val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2814	       _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2815	       _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2816	if (tlb_type == hypervisor)
2817		val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2818		       page_cache4v_flag | _PAGE_P_4V |
2819		       _PAGE_EXEC_4V | _PAGE_W_4V);
2820
2821	return val | paddr;
2822}
2823
2824/* If not locked, zap it. */
2825void __flush_tlb_all(void)
2826{
2827	unsigned long pstate;
2828	int i;
2829
2830	__asm__ __volatile__("flushw\n\t"
2831			     "rdpr	%%pstate, %0\n\t"
2832			     "wrpr	%0, %1, %%pstate"
2833			     : "=r" (pstate)
2834			     : "i" (PSTATE_IE));
2835	if (tlb_type == hypervisor) {
2836		sun4v_mmu_demap_all();
2837	} else if (tlb_type == spitfire) {
2838		for (i = 0; i < 64; i++) {
2839			/* Spitfire Errata #32 workaround */
2840			/* NOTE: Always runs on spitfire, so no
2841			 *       cheetah+ page size encodings.
2842			 */
2843			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
2844					     "flush	%%g6"
2845					     : /* No outputs */
2846					     : "r" (0),
2847					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2848
2849			if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2850				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2851						     "membar #Sync"
2852						     : /* no outputs */
2853						     : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2854				spitfire_put_dtlb_data(i, 0x0UL);
2855			}
2856
2857			/* Spitfire Errata #32 workaround */
2858			/* NOTE: Always runs on spitfire, so no
2859			 *       cheetah+ page size encodings.
2860			 */
2861			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
2862					     "flush	%%g6"
2863					     : /* No outputs */
2864					     : "r" (0),
2865					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2866
2867			if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2868				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2869						     "membar #Sync"
2870						     : /* no outputs */
2871						     : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2872				spitfire_put_itlb_data(i, 0x0UL);
2873			}
2874		}
2875	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2876		cheetah_flush_dtlb_all();
2877		cheetah_flush_itlb_all();
2878	}
2879	__asm__ __volatile__("wrpr	%0, 0, %%pstate"
2880			     : : "r" (pstate));
2881}
2882
2883pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
2884{
2885	struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2886	pte_t *pte = NULL;
2887
2888	if (page)
2889		pte = (pte_t *) page_address(page);
2890
2891	return pte;
2892}
2893
2894pgtable_t pte_alloc_one(struct mm_struct *mm)
2895{
2896	struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2897	if (!page)
 
2898		return NULL;
2899	if (!pgtable_pte_page_ctor(page)) {
2900		__free_page(page);
2901		return NULL;
2902	}
2903	return (pte_t *) page_address(page);
2904}
2905
2906void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2907{
2908	free_page((unsigned long)pte);
2909}
2910
2911static void __pte_free(pgtable_t pte)
2912{
2913	struct page *page = virt_to_page(pte);
2914
2915	pgtable_pte_page_dtor(page);
2916	__free_page(page);
2917}
2918
2919void pte_free(struct mm_struct *mm, pgtable_t pte)
2920{
2921	__pte_free(pte);
2922}
2923
2924void pgtable_free(void *table, bool is_page)
2925{
2926	if (is_page)
2927		__pte_free(table);
2928	else
2929		kmem_cache_free(pgtable_cache, table);
2930}
2931
2932#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2933void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2934			  pmd_t *pmd)
2935{
2936	unsigned long pte, flags;
2937	struct mm_struct *mm;
2938	pmd_t entry = *pmd;
2939
2940	if (!pmd_large(entry) || !pmd_young(entry))
2941		return;
2942
2943	pte = pmd_val(entry);
2944
2945	/* Don't insert a non-valid PMD into the TSB, we'll deadlock.  */
2946	if (!(pte & _PAGE_VALID))
2947		return;
2948
2949	/* We are fabricating 8MB pages using 4MB real hw pages.  */
2950	pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
2951
2952	mm = vma->vm_mm;
2953
2954	spin_lock_irqsave(&mm->context.lock, flags);
2955
2956	if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
2957		__update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
2958					addr, pte);
2959
2960	spin_unlock_irqrestore(&mm->context.lock, flags);
2961}
2962#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2963
2964#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
2965static void context_reload(void *__data)
2966{
2967	struct mm_struct *mm = __data;
2968
2969	if (mm == current->mm)
2970		load_secondary_context(mm);
2971}
2972
2973void hugetlb_setup(struct pt_regs *regs)
2974{
2975	struct mm_struct *mm = current->mm;
2976	struct tsb_config *tp;
2977
2978	if (faulthandler_disabled() || !mm) {
2979		const struct exception_table_entry *entry;
2980
2981		entry = search_exception_tables(regs->tpc);
2982		if (entry) {
2983			regs->tpc = entry->fixup;
2984			regs->tnpc = regs->tpc + 4;
2985			return;
2986		}
2987		pr_alert("Unexpected HugeTLB setup in atomic context.\n");
2988		die_if_kernel("HugeTSB in atomic", regs);
2989	}
2990
2991	tp = &mm->context.tsb_block[MM_TSB_HUGE];
2992	if (likely(tp->tsb == NULL))
2993		tsb_grow(mm, MM_TSB_HUGE, 0);
2994
2995	tsb_context_switch(mm);
2996	smp_tsb_sync(mm);
2997
2998	/* On UltraSPARC-III+ and later, configure the second half of
2999	 * the Data-TLB for huge pages.
3000	 */
3001	if (tlb_type == cheetah_plus) {
3002		bool need_context_reload = false;
3003		unsigned long ctx;
3004
3005		spin_lock_irq(&ctx_alloc_lock);
3006		ctx = mm->context.sparc64_ctx_val;
3007		ctx &= ~CTX_PGSZ_MASK;
3008		ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
3009		ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
3010
3011		if (ctx != mm->context.sparc64_ctx_val) {
3012			/* When changing the page size fields, we
3013			 * must perform a context flush so that no
3014			 * stale entries match.  This flush must
3015			 * occur with the original context register
3016			 * settings.
3017			 */
3018			do_flush_tlb_mm(mm);
3019
3020			/* Reload the context register of all processors
3021			 * also executing in this address space.
3022			 */
3023			mm->context.sparc64_ctx_val = ctx;
3024			need_context_reload = true;
3025		}
3026		spin_unlock_irq(&ctx_alloc_lock);
3027
3028		if (need_context_reload)
3029			on_each_cpu(context_reload, mm, 0);
3030	}
3031}
3032#endif
3033
3034static struct resource code_resource = {
3035	.name	= "Kernel code",
3036	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
3037};
3038
3039static struct resource data_resource = {
3040	.name	= "Kernel data",
3041	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
3042};
3043
3044static struct resource bss_resource = {
3045	.name	= "Kernel bss",
3046	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
3047};
3048
3049static inline resource_size_t compute_kern_paddr(void *addr)
3050{
3051	return (resource_size_t) (addr - KERNBASE + kern_base);
3052}
3053
3054static void __init kernel_lds_init(void)
3055{
3056	code_resource.start = compute_kern_paddr(_text);
3057	code_resource.end   = compute_kern_paddr(_etext - 1);
3058	data_resource.start = compute_kern_paddr(_etext);
3059	data_resource.end   = compute_kern_paddr(_edata - 1);
3060	bss_resource.start  = compute_kern_paddr(__bss_start);
3061	bss_resource.end    = compute_kern_paddr(_end - 1);
3062}
3063
3064static int __init report_memory(void)
3065{
3066	int i;
3067	struct resource *res;
3068
3069	kernel_lds_init();
3070
3071	for (i = 0; i < pavail_ents; i++) {
3072		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
3073
3074		if (!res) {
3075			pr_warn("Failed to allocate source.\n");
3076			break;
3077		}
3078
3079		res->name = "System RAM";
3080		res->start = pavail[i].phys_addr;
3081		res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
3082		res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
3083
3084		if (insert_resource(&iomem_resource, res) < 0) {
3085			pr_warn("Resource insertion failed.\n");
3086			break;
3087		}
3088
3089		insert_resource(res, &code_resource);
3090		insert_resource(res, &data_resource);
3091		insert_resource(res, &bss_resource);
3092	}
3093
3094	return 0;
3095}
3096arch_initcall(report_memory);
3097
3098#ifdef CONFIG_SMP
3099#define do_flush_tlb_kernel_range	smp_flush_tlb_kernel_range
3100#else
3101#define do_flush_tlb_kernel_range	__flush_tlb_kernel_range
3102#endif
3103
3104void flush_tlb_kernel_range(unsigned long start, unsigned long end)
3105{
3106	if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
3107		if (start < LOW_OBP_ADDRESS) {
3108			flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
3109			do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
3110		}
3111		if (end > HI_OBP_ADDRESS) {
3112			flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
3113			do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
3114		}
3115	} else {
3116		flush_tsb_kernel_range(start, end);
3117		do_flush_tlb_kernel_range(start, end);
3118	}
3119}
3120
3121void copy_user_highpage(struct page *to, struct page *from,
3122	unsigned long vaddr, struct vm_area_struct *vma)
3123{
3124	char *vfrom, *vto;
3125
3126	vfrom = kmap_atomic(from);
3127	vto = kmap_atomic(to);
3128	copy_user_page(vto, vfrom, vaddr, to);
3129	kunmap_atomic(vto);
3130	kunmap_atomic(vfrom);
3131
3132	/* If this page has ADI enabled, copy over any ADI tags
3133	 * as well
3134	 */
3135	if (vma->vm_flags & VM_SPARC_ADI) {
3136		unsigned long pfrom, pto, i, adi_tag;
3137
3138		pfrom = page_to_phys(from);
3139		pto = page_to_phys(to);
3140
3141		for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) {
3142			asm volatile("ldxa [%1] %2, %0\n\t"
3143					: "=r" (adi_tag)
3144					:  "r" (i), "i" (ASI_MCD_REAL));
3145			asm volatile("stxa %0, [%1] %2\n\t"
3146					:
3147					: "r" (adi_tag), "r" (pto),
3148					  "i" (ASI_MCD_REAL));
3149			pto += adi_blksize();
3150		}
3151		asm volatile("membar #Sync\n\t");
3152	}
3153}
3154EXPORT_SYMBOL(copy_user_highpage);
3155
3156void copy_highpage(struct page *to, struct page *from)
3157{
3158	char *vfrom, *vto;
3159
3160	vfrom = kmap_atomic(from);
3161	vto = kmap_atomic(to);
3162	copy_page(vto, vfrom);
3163	kunmap_atomic(vto);
3164	kunmap_atomic(vfrom);
3165
3166	/* If this platform is ADI enabled, copy any ADI tags
3167	 * as well
3168	 */
3169	if (adi_capable()) {
3170		unsigned long pfrom, pto, i, adi_tag;
3171
3172		pfrom = page_to_phys(from);
3173		pto = page_to_phys(to);
3174
3175		for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) {
3176			asm volatile("ldxa [%1] %2, %0\n\t"
3177					: "=r" (adi_tag)
3178					:  "r" (i), "i" (ASI_MCD_REAL));
3179			asm volatile("stxa %0, [%1] %2\n\t"
3180					:
3181					: "r" (adi_tag), "r" (pto),
3182					  "i" (ASI_MCD_REAL));
3183			pto += adi_blksize();
3184		}
3185		asm volatile("membar #Sync\n\t");
3186	}
3187}
3188EXPORT_SYMBOL(copy_highpage);
3189
3190pgprot_t vm_get_page_prot(unsigned long vm_flags)
3191{
3192	unsigned long prot = pgprot_val(protection_map[vm_flags &
3193					(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);
3194
3195	if (vm_flags & VM_SPARC_ADI)
3196		prot |= _PAGE_MCD_4V;
3197
3198	return __pgprot(prot);
3199}
3200EXPORT_SYMBOL(vm_get_page_prot);
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  arch/sparc64/mm/init.c
   4 *
   5 *  Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
   6 *  Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
   7 */
   8 
   9#include <linux/extable.h>
  10#include <linux/kernel.h>
  11#include <linux/sched.h>
  12#include <linux/string.h>
  13#include <linux/init.h>
  14#include <linux/memblock.h>
  15#include <linux/mm.h>
  16#include <linux/hugetlb.h>
  17#include <linux/initrd.h>
  18#include <linux/swap.h>
  19#include <linux/pagemap.h>
  20#include <linux/poison.h>
  21#include <linux/fs.h>
  22#include <linux/seq_file.h>
  23#include <linux/kprobes.h>
  24#include <linux/cache.h>
  25#include <linux/sort.h>
  26#include <linux/ioport.h>
  27#include <linux/percpu.h>
  28#include <linux/mmzone.h>
  29#include <linux/gfp.h>
  30#include <linux/bootmem_info.h>
  31
  32#include <asm/head.h>
  33#include <asm/page.h>
  34#include <asm/pgalloc.h>
  35#include <asm/oplib.h>
  36#include <asm/iommu.h>
  37#include <asm/io.h>
  38#include <linux/uaccess.h>
  39#include <asm/mmu_context.h>
  40#include <asm/tlbflush.h>
  41#include <asm/dma.h>
  42#include <asm/starfire.h>
  43#include <asm/tlb.h>
  44#include <asm/spitfire.h>
  45#include <asm/sections.h>
  46#include <asm/tsb.h>
  47#include <asm/hypervisor.h>
  48#include <asm/prom.h>
  49#include <asm/mdesc.h>
  50#include <asm/cpudata.h>
  51#include <asm/setup.h>
  52#include <asm/irq.h>
  53
  54#include "init_64.h"
  55
  56unsigned long kern_linear_pte_xor[4] __read_mostly;
  57static unsigned long page_cache4v_flag;
  58
  59/* A bitmap, two bits for every 256MB of physical memory.  These two
  60 * bits determine what page size we use for kernel linear
  61 * translations.  They form an index into kern_linear_pte_xor[].  The
  62 * value in the indexed slot is XOR'd with the TLB miss virtual
  63 * address to form the resulting TTE.  The mapping is:
  64 *
  65 *	0	==>	4MB
  66 *	1	==>	256MB
  67 *	2	==>	2GB
  68 *	3	==>	16GB
  69 *
  70 * All sun4v chips support 256MB pages.  Only SPARC-T4 and later
  71 * support 2GB pages, and hopefully future cpus will support the 16GB
  72 * pages as well.  For slots 2 and 3, we encode a 256MB TTE xor there
  73 * if these larger page sizes are not supported by the cpu.
  74 *
  75 * It would be nice to determine this from the machine description
  76 * 'cpu' properties, but we need to have this table setup before the
  77 * MDESC is initialized.
  78 */
  79
  80#ifndef CONFIG_DEBUG_PAGEALLOC
  81/* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
  82 * Space is allocated for this right after the trap table in
  83 * arch/sparc64/kernel/head.S
  84 */
  85extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
  86#endif
  87extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
  88
  89static unsigned long cpu_pgsz_mask;
  90
  91#define MAX_BANKS	1024
  92
  93static struct linux_prom64_registers pavail[MAX_BANKS];
  94static int pavail_ents;
  95
  96u64 numa_latency[MAX_NUMNODES][MAX_NUMNODES];
  97
  98static int cmp_p64(const void *a, const void *b)
  99{
 100	const struct linux_prom64_registers *x = a, *y = b;
 101
 102	if (x->phys_addr > y->phys_addr)
 103		return 1;
 104	if (x->phys_addr < y->phys_addr)
 105		return -1;
 106	return 0;
 107}
 108
 109static void __init read_obp_memory(const char *property,
 110				   struct linux_prom64_registers *regs,
 111				   int *num_ents)
 112{
 113	phandle node = prom_finddevice("/memory");
 114	int prop_size = prom_getproplen(node, property);
 115	int ents, ret, i;
 116
 117	ents = prop_size / sizeof(struct linux_prom64_registers);
 118	if (ents > MAX_BANKS) {
 119		prom_printf("The machine has more %s property entries than "
 120			    "this kernel can support (%d).\n",
 121			    property, MAX_BANKS);
 122		prom_halt();
 123	}
 124
 125	ret = prom_getproperty(node, property, (char *) regs, prop_size);
 126	if (ret == -1) {
 127		prom_printf("Couldn't get %s property from /memory.\n",
 128				property);
 129		prom_halt();
 130	}
 131
 132	/* Sanitize what we got from the firmware, by page aligning
 133	 * everything.
 134	 */
 135	for (i = 0; i < ents; i++) {
 136		unsigned long base, size;
 137
 138		base = regs[i].phys_addr;
 139		size = regs[i].reg_size;
 140
 141		size &= PAGE_MASK;
 142		if (base & ~PAGE_MASK) {
 143			unsigned long new_base = PAGE_ALIGN(base);
 144
 145			size -= new_base - base;
 146			if ((long) size < 0L)
 147				size = 0UL;
 148			base = new_base;
 149		}
 150		if (size == 0UL) {
 151			/* If it is empty, simply get rid of it.
 152			 * This simplifies the logic of the other
 153			 * functions that process these arrays.
 154			 */
 155			memmove(&regs[i], &regs[i + 1],
 156				(ents - i - 1) * sizeof(regs[0]));
 157			i--;
 158			ents--;
 159			continue;
 160		}
 161		regs[i].phys_addr = base;
 162		regs[i].reg_size = size;
 163	}
 164
 165	*num_ents = ents;
 166
 167	sort(regs, ents, sizeof(struct linux_prom64_registers),
 168	     cmp_p64, NULL);
 169}
 170
 171/* Kernel physical address base and size in bytes.  */
 172unsigned long kern_base __read_mostly;
 173unsigned long kern_size __read_mostly;
 174
 175/* Initial ramdisk setup */
 176extern unsigned long sparc_ramdisk_image64;
 177extern unsigned int sparc_ramdisk_image;
 178extern unsigned int sparc_ramdisk_size;
 179
 180struct page *mem_map_zero __read_mostly;
 181EXPORT_SYMBOL(mem_map_zero);
 182
 183unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
 184
 185unsigned long sparc64_kern_pri_context __read_mostly;
 186unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
 187unsigned long sparc64_kern_sec_context __read_mostly;
 188
 189int num_kernel_image_mappings;
 190
 191#ifdef CONFIG_DEBUG_DCFLUSH
 192atomic_t dcpage_flushes = ATOMIC_INIT(0);
 193#ifdef CONFIG_SMP
 194atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
 195#endif
 196#endif
 197
 198inline void flush_dcache_folio_impl(struct folio *folio)
 199{
 200	unsigned int i, nr = folio_nr_pages(folio);
 201
 202	BUG_ON(tlb_type == hypervisor);
 203#ifdef CONFIG_DEBUG_DCFLUSH
 204	atomic_inc(&dcpage_flushes);
 205#endif
 206
 207#ifdef DCACHE_ALIASING_POSSIBLE
 208	for (i = 0; i < nr; i++)
 209		__flush_dcache_page(folio_address(folio) + i * PAGE_SIZE,
 210				    ((tlb_type == spitfire) &&
 211				     folio_flush_mapping(folio) != NULL));
 212#else
 213	if (folio_flush_mapping(folio) != NULL &&
 214	    tlb_type == spitfire) {
 215		for (i = 0; i < nr; i++)
 216			__flush_icache_page((pfn + i) * PAGE_SIZE);
 217	}
 218#endif
 219}
 220
 221#define PG_dcache_dirty		PG_arch_1
 222#define PG_dcache_cpu_shift	32UL
 223#define PG_dcache_cpu_mask	\
 224	((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
 225
 226#define dcache_dirty_cpu(folio) \
 227	(((folio)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
 228
 229static inline void set_dcache_dirty(struct folio *folio, int this_cpu)
 230{
 231	unsigned long mask = this_cpu;
 232	unsigned long non_cpu_bits;
 233
 234	non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
 235	mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
 236
 237	__asm__ __volatile__("1:\n\t"
 238			     "ldx	[%2], %%g7\n\t"
 239			     "and	%%g7, %1, %%g1\n\t"
 240			     "or	%%g1, %0, %%g1\n\t"
 241			     "casx	[%2], %%g7, %%g1\n\t"
 242			     "cmp	%%g7, %%g1\n\t"
 243			     "bne,pn	%%xcc, 1b\n\t"
 244			     " nop"
 245			     : /* no outputs */
 246			     : "r" (mask), "r" (non_cpu_bits), "r" (&folio->flags)
 247			     : "g1", "g7");
 248}
 249
 250static inline void clear_dcache_dirty_cpu(struct folio *folio, unsigned long cpu)
 251{
 252	unsigned long mask = (1UL << PG_dcache_dirty);
 253
 254	__asm__ __volatile__("! test_and_clear_dcache_dirty\n"
 255			     "1:\n\t"
 256			     "ldx	[%2], %%g7\n\t"
 257			     "srlx	%%g7, %4, %%g1\n\t"
 258			     "and	%%g1, %3, %%g1\n\t"
 259			     "cmp	%%g1, %0\n\t"
 260			     "bne,pn	%%icc, 2f\n\t"
 261			     " andn	%%g7, %1, %%g1\n\t"
 262			     "casx	[%2], %%g7, %%g1\n\t"
 263			     "cmp	%%g7, %%g1\n\t"
 264			     "bne,pn	%%xcc, 1b\n\t"
 265			     " nop\n"
 266			     "2:"
 267			     : /* no outputs */
 268			     : "r" (cpu), "r" (mask), "r" (&folio->flags),
 269			       "i" (PG_dcache_cpu_mask),
 270			       "i" (PG_dcache_cpu_shift)
 271			     : "g1", "g7");
 272}
 273
 274static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
 275{
 276	unsigned long tsb_addr = (unsigned long) ent;
 277
 278	if (tlb_type == cheetah_plus || tlb_type == hypervisor)
 279		tsb_addr = __pa(tsb_addr);
 280
 281	__tsb_insert(tsb_addr, tag, pte);
 282}
 283
 284unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
 285
 286static void flush_dcache(unsigned long pfn)
 287{
 288	struct page *page;
 289
 290	page = pfn_to_page(pfn);
 291	if (page) {
 292		struct folio *folio = page_folio(page);
 293		unsigned long pg_flags;
 294
 295		pg_flags = folio->flags;
 296		if (pg_flags & (1UL << PG_dcache_dirty)) {
 297			int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
 298				   PG_dcache_cpu_mask);
 299			int this_cpu = get_cpu();
 300
 301			/* This is just to optimize away some function calls
 302			 * in the SMP case.
 303			 */
 304			if (cpu == this_cpu)
 305				flush_dcache_folio_impl(folio);
 306			else
 307				smp_flush_dcache_folio_impl(folio, cpu);
 308
 309			clear_dcache_dirty_cpu(folio, cpu);
 310
 311			put_cpu();
 312		}
 313	}
 314}
 315
 316/* mm->context.lock must be held */
 317static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
 318				    unsigned long tsb_hash_shift, unsigned long address,
 319				    unsigned long tte)
 320{
 321	struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
 322	unsigned long tag;
 323
 324	if (unlikely(!tsb))
 325		return;
 326
 327	tsb += ((address >> tsb_hash_shift) &
 328		(mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
 329	tag = (address >> 22UL);
 330	tsb_insert(tsb, tag, tte);
 331}
 332
 333#ifdef CONFIG_HUGETLB_PAGE
 334static int __init hugetlbpage_init(void)
 335{
 336	hugetlb_add_hstate(HPAGE_64K_SHIFT - PAGE_SHIFT);
 337	hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT);
 338	hugetlb_add_hstate(HPAGE_256MB_SHIFT - PAGE_SHIFT);
 339	hugetlb_add_hstate(HPAGE_2GB_SHIFT - PAGE_SHIFT);
 340
 341	return 0;
 342}
 343
 344arch_initcall(hugetlbpage_init);
 345
 346static void __init pud_huge_patch(void)
 347{
 348	struct pud_huge_patch_entry *p;
 349	unsigned long addr;
 350
 351	p = &__pud_huge_patch;
 352	addr = p->addr;
 353	*(unsigned int *)addr = p->insn;
 354
 355	__asm__ __volatile__("flush %0" : : "r" (addr));
 356}
 357
 358bool __init arch_hugetlb_valid_size(unsigned long size)
 359{
 360	unsigned int hugepage_shift = ilog2(size);
 361	unsigned short hv_pgsz_idx;
 362	unsigned int hv_pgsz_mask;
 363
 364	switch (hugepage_shift) {
 365	case HPAGE_16GB_SHIFT:
 366		hv_pgsz_mask = HV_PGSZ_MASK_16GB;
 367		hv_pgsz_idx = HV_PGSZ_IDX_16GB;
 368		pud_huge_patch();
 369		break;
 370	case HPAGE_2GB_SHIFT:
 371		hv_pgsz_mask = HV_PGSZ_MASK_2GB;
 372		hv_pgsz_idx = HV_PGSZ_IDX_2GB;
 373		break;
 374	case HPAGE_256MB_SHIFT:
 375		hv_pgsz_mask = HV_PGSZ_MASK_256MB;
 376		hv_pgsz_idx = HV_PGSZ_IDX_256MB;
 377		break;
 378	case HPAGE_SHIFT:
 379		hv_pgsz_mask = HV_PGSZ_MASK_4MB;
 380		hv_pgsz_idx = HV_PGSZ_IDX_4MB;
 381		break;
 382	case HPAGE_64K_SHIFT:
 383		hv_pgsz_mask = HV_PGSZ_MASK_64K;
 384		hv_pgsz_idx = HV_PGSZ_IDX_64K;
 385		break;
 386	default:
 387		hv_pgsz_mask = 0;
 388	}
 389
 390	if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U)
 391		return false;
 392
 393	return true;
 394}
 395#endif	/* CONFIG_HUGETLB_PAGE */
 396
 397void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
 398		unsigned long address, pte_t *ptep, unsigned int nr)
 399{
 400	struct mm_struct *mm;
 401	unsigned long flags;
 402	bool is_huge_tsb;
 403	pte_t pte = *ptep;
 404	unsigned int i;
 405
 406	if (tlb_type != hypervisor) {
 407		unsigned long pfn = pte_pfn(pte);
 408
 409		if (pfn_valid(pfn))
 410			flush_dcache(pfn);
 411	}
 412
 413	mm = vma->vm_mm;
 414
 415	/* Don't insert a non-valid PTE into the TSB, we'll deadlock.  */
 416	if (!pte_accessible(mm, pte))
 417		return;
 418
 419	spin_lock_irqsave(&mm->context.lock, flags);
 420
 421	is_huge_tsb = false;
 422#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
 423	if (mm->context.hugetlb_pte_count || mm->context.thp_pte_count) {
 424		unsigned long hugepage_size = PAGE_SIZE;
 425
 426		if (is_vm_hugetlb_page(vma))
 427			hugepage_size = huge_page_size(hstate_vma(vma));
 428
 429		if (hugepage_size >= PUD_SIZE) {
 430			unsigned long mask = 0x1ffc00000UL;
 431
 432			/* Transfer bits [32:22] from address to resolve
 433			 * at 4M granularity.
 434			 */
 435			pte_val(pte) &= ~mask;
 436			pte_val(pte) |= (address & mask);
 437		} else if (hugepage_size >= PMD_SIZE) {
 438			/* We are fabricating 8MB pages using 4MB
 439			 * real hw pages.
 440			 */
 441			pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
 442		}
 443
 444		if (hugepage_size >= PMD_SIZE) {
 445			__update_mmu_tsb_insert(mm, MM_TSB_HUGE,
 446				REAL_HPAGE_SHIFT, address, pte_val(pte));
 447			is_huge_tsb = true;
 448		}
 449	}
 450#endif
 451	if (!is_huge_tsb) {
 452		for (i = 0; i < nr; i++) {
 453			__update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
 454						address, pte_val(pte));
 455			address += PAGE_SIZE;
 456			pte_val(pte) += PAGE_SIZE;
 457		}
 458	}
 459
 460	spin_unlock_irqrestore(&mm->context.lock, flags);
 461}
 462
 463void flush_dcache_folio(struct folio *folio)
 464{
 465	unsigned long pfn = folio_pfn(folio);
 466	struct address_space *mapping;
 467	int this_cpu;
 468
 469	if (tlb_type == hypervisor)
 470		return;
 471
 472	/* Do not bother with the expensive D-cache flush if it
 473	 * is merely the zero page.  The 'bigcore' testcase in GDB
 474	 * causes this case to run millions of times.
 475	 */
 476	if (is_zero_pfn(pfn))
 477		return;
 478
 479	this_cpu = get_cpu();
 480
 481	mapping = folio_flush_mapping(folio);
 482	if (mapping && !mapping_mapped(mapping)) {
 483		bool dirty = test_bit(PG_dcache_dirty, &folio->flags);
 484		if (dirty) {
 485			int dirty_cpu = dcache_dirty_cpu(folio);
 486
 487			if (dirty_cpu == this_cpu)
 488				goto out;
 489			smp_flush_dcache_folio_impl(folio, dirty_cpu);
 490		}
 491		set_dcache_dirty(folio, this_cpu);
 492	} else {
 493		/* We could delay the flush for the !page_mapping
 494		 * case too.  But that case is for exec env/arg
 495		 * pages and those are %99 certainly going to get
 496		 * faulted into the tlb (and thus flushed) anyways.
 497		 */
 498		flush_dcache_folio_impl(folio);
 499	}
 500
 501out:
 502	put_cpu();
 503}
 504EXPORT_SYMBOL(flush_dcache_folio);
 505
 506void __kprobes flush_icache_range(unsigned long start, unsigned long end)
 507{
 508	/* Cheetah and Hypervisor platform cpus have coherent I-cache. */
 509	if (tlb_type == spitfire) {
 510		unsigned long kaddr;
 511
 512		/* This code only runs on Spitfire cpus so this is
 513		 * why we can assume _PAGE_PADDR_4U.
 514		 */
 515		for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
 516			unsigned long paddr, mask = _PAGE_PADDR_4U;
 517
 518			if (kaddr >= PAGE_OFFSET)
 519				paddr = kaddr & mask;
 520			else {
 521				pte_t *ptep = virt_to_kpte(kaddr);
 522
 523				paddr = pte_val(*ptep) & mask;
 524			}
 525			__flush_icache_page(paddr);
 526		}
 527	}
 528}
 529EXPORT_SYMBOL(flush_icache_range);
 530
 531void mmu_info(struct seq_file *m)
 532{
 533	static const char *pgsz_strings[] = {
 534		"8K", "64K", "512K", "4MB", "32MB",
 535		"256MB", "2GB", "16GB",
 536	};
 537	int i, printed;
 538
 539	if (tlb_type == cheetah)
 540		seq_printf(m, "MMU Type\t: Cheetah\n");
 541	else if (tlb_type == cheetah_plus)
 542		seq_printf(m, "MMU Type\t: Cheetah+\n");
 543	else if (tlb_type == spitfire)
 544		seq_printf(m, "MMU Type\t: Spitfire\n");
 545	else if (tlb_type == hypervisor)
 546		seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
 547	else
 548		seq_printf(m, "MMU Type\t: ???\n");
 549
 550	seq_printf(m, "MMU PGSZs\t: ");
 551	printed = 0;
 552	for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
 553		if (cpu_pgsz_mask & (1UL << i)) {
 554			seq_printf(m, "%s%s",
 555				   printed ? "," : "", pgsz_strings[i]);
 556			printed++;
 557		}
 558	}
 559	seq_putc(m, '\n');
 560
 561#ifdef CONFIG_DEBUG_DCFLUSH
 562	seq_printf(m, "DCPageFlushes\t: %d\n",
 563		   atomic_read(&dcpage_flushes));
 564#ifdef CONFIG_SMP
 565	seq_printf(m, "DCPageFlushesXC\t: %d\n",
 566		   atomic_read(&dcpage_flushes_xcall));
 567#endif /* CONFIG_SMP */
 568#endif /* CONFIG_DEBUG_DCFLUSH */
 569}
 570
 571struct linux_prom_translation prom_trans[512] __read_mostly;
 572unsigned int prom_trans_ents __read_mostly;
 573
 574unsigned long kern_locked_tte_data;
 575
 576/* The obp translations are saved based on 8k pagesize, since obp can
 577 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
 578 * HI_OBP_ADDRESS range are handled in ktlb.S.
 579 */
 580static inline int in_obp_range(unsigned long vaddr)
 581{
 582	return (vaddr >= LOW_OBP_ADDRESS &&
 583		vaddr < HI_OBP_ADDRESS);
 584}
 585
 586static int cmp_ptrans(const void *a, const void *b)
 587{
 588	const struct linux_prom_translation *x = a, *y = b;
 589
 590	if (x->virt > y->virt)
 591		return 1;
 592	if (x->virt < y->virt)
 593		return -1;
 594	return 0;
 595}
 596
 597/* Read OBP translations property into 'prom_trans[]'.  */
 598static void __init read_obp_translations(void)
 599{
 600	int n, node, ents, first, last, i;
 601
 602	node = prom_finddevice("/virtual-memory");
 603	n = prom_getproplen(node, "translations");
 604	if (unlikely(n == 0 || n == -1)) {
 605		prom_printf("prom_mappings: Couldn't get size.\n");
 606		prom_halt();
 607	}
 608	if (unlikely(n > sizeof(prom_trans))) {
 609		prom_printf("prom_mappings: Size %d is too big.\n", n);
 610		prom_halt();
 611	}
 612
 613	if ((n = prom_getproperty(node, "translations",
 614				  (char *)&prom_trans[0],
 615				  sizeof(prom_trans))) == -1) {
 616		prom_printf("prom_mappings: Couldn't get property.\n");
 617		prom_halt();
 618	}
 619
 620	n = n / sizeof(struct linux_prom_translation);
 621
 622	ents = n;
 623
 624	sort(prom_trans, ents, sizeof(struct linux_prom_translation),
 625	     cmp_ptrans, NULL);
 626
 627	/* Now kick out all the non-OBP entries.  */
 628	for (i = 0; i < ents; i++) {
 629		if (in_obp_range(prom_trans[i].virt))
 630			break;
 631	}
 632	first = i;
 633	for (; i < ents; i++) {
 634		if (!in_obp_range(prom_trans[i].virt))
 635			break;
 636	}
 637	last = i;
 638
 639	for (i = 0; i < (last - first); i++) {
 640		struct linux_prom_translation *src = &prom_trans[i + first];
 641		struct linux_prom_translation *dest = &prom_trans[i];
 642
 643		*dest = *src;
 644	}
 645	for (; i < ents; i++) {
 646		struct linux_prom_translation *dest = &prom_trans[i];
 647		dest->virt = dest->size = dest->data = 0x0UL;
 648	}
 649
 650	prom_trans_ents = last - first;
 651
 652	if (tlb_type == spitfire) {
 653		/* Clear diag TTE bits. */
 654		for (i = 0; i < prom_trans_ents; i++)
 655			prom_trans[i].data &= ~0x0003fe0000000000UL;
 656	}
 657
 658	/* Force execute bit on.  */
 659	for (i = 0; i < prom_trans_ents; i++)
 660		prom_trans[i].data |= (tlb_type == hypervisor ?
 661				       _PAGE_EXEC_4V : _PAGE_EXEC_4U);
 662}
 663
 664static void __init hypervisor_tlb_lock(unsigned long vaddr,
 665				       unsigned long pte,
 666				       unsigned long mmu)
 667{
 668	unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
 669
 670	if (ret != 0) {
 671		prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
 672			    "errors with %lx\n", vaddr, 0, pte, mmu, ret);
 673		prom_halt();
 674	}
 675}
 676
 677static unsigned long kern_large_tte(unsigned long paddr);
 678
 679static void __init remap_kernel(void)
 680{
 681	unsigned long phys_page, tte_vaddr, tte_data;
 682	int i, tlb_ent = sparc64_highest_locked_tlbent();
 683
 684	tte_vaddr = (unsigned long) KERNBASE;
 685	phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
 686	tte_data = kern_large_tte(phys_page);
 687
 688	kern_locked_tte_data = tte_data;
 689
 690	/* Now lock us into the TLBs via Hypervisor or OBP. */
 691	if (tlb_type == hypervisor) {
 692		for (i = 0; i < num_kernel_image_mappings; i++) {
 693			hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
 694			hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
 695			tte_vaddr += 0x400000;
 696			tte_data += 0x400000;
 697		}
 698	} else {
 699		for (i = 0; i < num_kernel_image_mappings; i++) {
 700			prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
 701			prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
 702			tte_vaddr += 0x400000;
 703			tte_data += 0x400000;
 704		}
 705		sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
 706	}
 707	if (tlb_type == cheetah_plus) {
 708		sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
 709					    CTX_CHEETAH_PLUS_NUC);
 710		sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
 711		sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
 712	}
 713}
 714
 715
 716static void __init inherit_prom_mappings(void)
 717{
 718	/* Now fixup OBP's idea about where we really are mapped. */
 719	printk("Remapping the kernel... ");
 720	remap_kernel();
 721	printk("done.\n");
 722}
 723
 724void prom_world(int enter)
 725{
 726	/*
 727	 * No need to change the address space any more, just flush
 728	 * the register windows
 729	 */
 730	__asm__ __volatile__("flushw");
 731}
 732
 733void __flush_dcache_range(unsigned long start, unsigned long end)
 734{
 735	unsigned long va;
 736
 737	if (tlb_type == spitfire) {
 738		int n = 0;
 739
 740		for (va = start; va < end; va += 32) {
 741			spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
 742			if (++n >= 512)
 743				break;
 744		}
 745	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
 746		start = __pa(start);
 747		end = __pa(end);
 748		for (va = start; va < end; va += 32)
 749			__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
 750					     "membar #Sync"
 751					     : /* no outputs */
 752					     : "r" (va),
 753					       "i" (ASI_DCACHE_INVALIDATE));
 754	}
 755}
 756EXPORT_SYMBOL(__flush_dcache_range);
 757
 758/* get_new_mmu_context() uses "cache + 1".  */
 759DEFINE_SPINLOCK(ctx_alloc_lock);
 760unsigned long tlb_context_cache = CTX_FIRST_VERSION;
 761#define MAX_CTX_NR	(1UL << CTX_NR_BITS)
 762#define CTX_BMAP_SLOTS	BITS_TO_LONGS(MAX_CTX_NR)
 763DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
 764DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
 765
 766static void mmu_context_wrap(void)
 767{
 768	unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
 769	unsigned long new_ver, new_ctx, old_ctx;
 770	struct mm_struct *mm;
 771	int cpu;
 772
 773	bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
 774
 775	/* Reserve kernel context */
 776	set_bit(0, mmu_context_bmap);
 777
 778	new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
 779	if (unlikely(new_ver == 0))
 780		new_ver = CTX_FIRST_VERSION;
 781	tlb_context_cache = new_ver;
 782
 783	/*
 784	 * Make sure that any new mm that are added into per_cpu_secondary_mm,
 785	 * are going to go through get_new_mmu_context() path.
 786	 */
 787	mb();
 788
 789	/*
 790	 * Updated versions to current on those CPUs that had valid secondary
 791	 * contexts
 792	 */
 793	for_each_online_cpu(cpu) {
 794		/*
 795		 * If a new mm is stored after we took this mm from the array,
 796		 * it will go into get_new_mmu_context() path, because we
 797		 * already bumped the version in tlb_context_cache.
 798		 */
 799		mm = per_cpu(per_cpu_secondary_mm, cpu);
 800
 801		if (unlikely(!mm || mm == &init_mm))
 802			continue;
 803
 804		old_ctx = mm->context.sparc64_ctx_val;
 805		if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
 806			new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
 807			set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
 808			mm->context.sparc64_ctx_val = new_ctx;
 809		}
 810	}
 811}
 812
 813/* Caller does TLB context flushing on local CPU if necessary.
 814 * The caller also ensures that CTX_VALID(mm->context) is false.
 815 *
 816 * We must be careful about boundary cases so that we never
 817 * let the user have CTX 0 (nucleus) or we ever use a CTX
 818 * version of zero (and thus NO_CONTEXT would not be caught
 819 * by version mis-match tests in mmu_context.h).
 820 *
 821 * Always invoked with interrupts disabled.
 822 */
 823void get_new_mmu_context(struct mm_struct *mm)
 824{
 825	unsigned long ctx, new_ctx;
 826	unsigned long orig_pgsz_bits;
 827
 828	spin_lock(&ctx_alloc_lock);
 829retry:
 830	/* wrap might have happened, test again if our context became valid */
 831	if (unlikely(CTX_VALID(mm->context)))
 832		goto out;
 833	orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
 834	ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
 835	new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
 836	if (new_ctx >= (1 << CTX_NR_BITS)) {
 837		new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
 838		if (new_ctx >= ctx) {
 839			mmu_context_wrap();
 840			goto retry;
 841		}
 842	}
 843	if (mm->context.sparc64_ctx_val)
 844		cpumask_clear(mm_cpumask(mm));
 845	mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
 846	new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
 847	tlb_context_cache = new_ctx;
 848	mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
 849out:
 850	spin_unlock(&ctx_alloc_lock);
 851}
 852
 853static int numa_enabled = 1;
 854static int numa_debug;
 855
 856static int __init early_numa(char *p)
 857{
 858	if (!p)
 859		return 0;
 860
 861	if (strstr(p, "off"))
 862		numa_enabled = 0;
 863
 864	if (strstr(p, "debug"))
 865		numa_debug = 1;
 866
 867	return 0;
 868}
 869early_param("numa", early_numa);
 870
 871#define numadbg(f, a...) \
 872do {	if (numa_debug) \
 873		printk(KERN_INFO f, ## a); \
 874} while (0)
 875
 876static void __init find_ramdisk(unsigned long phys_base)
 877{
 878#ifdef CONFIG_BLK_DEV_INITRD
 879	if (sparc_ramdisk_image || sparc_ramdisk_image64) {
 880		unsigned long ramdisk_image;
 881
 882		/* Older versions of the bootloader only supported a
 883		 * 32-bit physical address for the ramdisk image
 884		 * location, stored at sparc_ramdisk_image.  Newer
 885		 * SILO versions set sparc_ramdisk_image to zero and
 886		 * provide a full 64-bit physical address at
 887		 * sparc_ramdisk_image64.
 888		 */
 889		ramdisk_image = sparc_ramdisk_image;
 890		if (!ramdisk_image)
 891			ramdisk_image = sparc_ramdisk_image64;
 892
 893		/* Another bootloader quirk.  The bootloader normalizes
 894		 * the physical address to KERNBASE, so we have to
 895		 * factor that back out and add in the lowest valid
 896		 * physical page address to get the true physical address.
 897		 */
 898		ramdisk_image -= KERNBASE;
 899		ramdisk_image += phys_base;
 900
 901		numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
 902			ramdisk_image, sparc_ramdisk_size);
 903
 904		initrd_start = ramdisk_image;
 905		initrd_end = ramdisk_image + sparc_ramdisk_size;
 906
 907		memblock_reserve(initrd_start, sparc_ramdisk_size);
 908
 909		initrd_start += PAGE_OFFSET;
 910		initrd_end += PAGE_OFFSET;
 911	}
 912#endif
 913}
 914
 915struct node_mem_mask {
 916	unsigned long mask;
 917	unsigned long match;
 918};
 919static struct node_mem_mask node_masks[MAX_NUMNODES];
 920static int num_node_masks;
 921
 922#ifdef CONFIG_NUMA
 923
 924struct mdesc_mlgroup {
 925	u64	node;
 926	u64	latency;
 927	u64	match;
 928	u64	mask;
 929};
 930
 931static struct mdesc_mlgroup *mlgroups;
 932static int num_mlgroups;
 933
 934int numa_cpu_lookup_table[NR_CPUS];
 935cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
 936
 937struct mdesc_mblock {
 938	u64	base;
 939	u64	size;
 940	u64	offset; /* RA-to-PA */
 941};
 942static struct mdesc_mblock *mblocks;
 943static int num_mblocks;
 944
 945static struct mdesc_mblock * __init addr_to_mblock(unsigned long addr)
 946{
 947	struct mdesc_mblock *m = NULL;
 948	int i;
 949
 950	for (i = 0; i < num_mblocks; i++) {
 951		m = &mblocks[i];
 952
 953		if (addr >= m->base &&
 954		    addr < (m->base + m->size)) {
 955			break;
 956		}
 957	}
 958
 959	return m;
 960}
 961
 962static u64 __init memblock_nid_range_sun4u(u64 start, u64 end, int *nid)
 963{
 964	int prev_nid, new_nid;
 965
 966	prev_nid = NUMA_NO_NODE;
 967	for ( ; start < end; start += PAGE_SIZE) {
 968		for (new_nid = 0; new_nid < num_node_masks; new_nid++) {
 969			struct node_mem_mask *p = &node_masks[new_nid];
 970
 971			if ((start & p->mask) == p->match) {
 972				if (prev_nid == NUMA_NO_NODE)
 973					prev_nid = new_nid;
 974				break;
 975			}
 976		}
 977
 978		if (new_nid == num_node_masks) {
 979			prev_nid = 0;
 980			WARN_ONCE(1, "addr[%Lx] doesn't match a NUMA node rule. Some memory will be owned by node 0.",
 981				  start);
 982			break;
 983		}
 984
 985		if (prev_nid != new_nid)
 986			break;
 987	}
 988	*nid = prev_nid;
 989
 990	return start > end ? end : start;
 991}
 992
 993static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
 994{
 995	u64 ret_end, pa_start, m_mask, m_match, m_end;
 996	struct mdesc_mblock *mblock;
 997	int _nid, i;
 998
 999	if (tlb_type != hypervisor)
1000		return memblock_nid_range_sun4u(start, end, nid);
1001
1002	mblock = addr_to_mblock(start);
1003	if (!mblock) {
1004		WARN_ONCE(1, "memblock_nid_range: Can't find mblock addr[%Lx]",
1005			  start);
1006
1007		_nid = 0;
1008		ret_end = end;
1009		goto done;
1010	}
1011
1012	pa_start = start + mblock->offset;
1013	m_match = 0;
1014	m_mask = 0;
1015
1016	for (_nid = 0; _nid < num_node_masks; _nid++) {
1017		struct node_mem_mask *const m = &node_masks[_nid];
1018
1019		if ((pa_start & m->mask) == m->match) {
1020			m_match = m->match;
1021			m_mask = m->mask;
1022			break;
1023		}
1024	}
1025
1026	if (num_node_masks == _nid) {
1027		/* We could not find NUMA group, so default to 0, but lets
1028		 * search for latency group, so we could calculate the correct
1029		 * end address that we return
1030		 */
1031		_nid = 0;
1032
1033		for (i = 0; i < num_mlgroups; i++) {
1034			struct mdesc_mlgroup *const m = &mlgroups[i];
1035
1036			if ((pa_start & m->mask) == m->match) {
1037				m_match = m->match;
1038				m_mask = m->mask;
1039				break;
1040			}
1041		}
1042
1043		if (i == num_mlgroups) {
1044			WARN_ONCE(1, "memblock_nid_range: Can't find latency group addr[%Lx]",
1045				  start);
1046
1047			ret_end = end;
1048			goto done;
1049		}
1050	}
1051
1052	/*
1053	 * Each latency group has match and mask, and each memory block has an
1054	 * offset.  An address belongs to a latency group if its address matches
1055	 * the following formula: ((addr + offset) & mask) == match
1056	 * It is, however, slow to check every single page if it matches a
1057	 * particular latency group. As optimization we calculate end value by
1058	 * using bit arithmetics.
1059	 */
1060	m_end = m_match + (1ul << __ffs(m_mask)) - mblock->offset;
1061	m_end += pa_start & ~((1ul << fls64(m_mask)) - 1);
1062	ret_end = m_end > end ? end : m_end;
1063
1064done:
1065	*nid = _nid;
1066	return ret_end;
1067}
1068#endif
1069
1070/* This must be invoked after performing all of the necessary
1071 * memblock_set_node() calls for 'nid'.  We need to be able to get
1072 * correct data from get_pfn_range_for_nid().
1073 */
1074static void __init allocate_node_data(int nid)
1075{
1076	struct pglist_data *p;
1077	unsigned long start_pfn, end_pfn;
1078#ifdef CONFIG_NUMA
1079
1080	NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data),
1081					     SMP_CACHE_BYTES, nid);
1082	if (!NODE_DATA(nid)) {
1083		prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
1084		prom_halt();
1085	}
1086
1087	NODE_DATA(nid)->node_id = nid;
1088#endif
1089
1090	p = NODE_DATA(nid);
1091
1092	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1093	p->node_start_pfn = start_pfn;
1094	p->node_spanned_pages = end_pfn - start_pfn;
1095}
1096
1097static void init_node_masks_nonnuma(void)
1098{
1099#ifdef CONFIG_NUMA
1100	int i;
1101#endif
1102
1103	numadbg("Initializing tables for non-numa.\n");
1104
1105	node_masks[0].mask = 0;
1106	node_masks[0].match = 0;
1107	num_node_masks = 1;
1108
1109#ifdef CONFIG_NUMA
1110	for (i = 0; i < NR_CPUS; i++)
1111		numa_cpu_lookup_table[i] = 0;
1112
1113	cpumask_setall(&numa_cpumask_lookup_table[0]);
1114#endif
1115}
1116
1117#ifdef CONFIG_NUMA
1118struct pglist_data *node_data[MAX_NUMNODES];
1119
1120EXPORT_SYMBOL(numa_cpu_lookup_table);
1121EXPORT_SYMBOL(numa_cpumask_lookup_table);
1122EXPORT_SYMBOL(node_data);
1123
1124static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
1125				   u32 cfg_handle)
1126{
1127	u64 arc;
1128
1129	mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
1130		u64 target = mdesc_arc_target(md, arc);
1131		const u64 *val;
1132
1133		val = mdesc_get_property(md, target,
1134					 "cfg-handle", NULL);
1135		if (val && *val == cfg_handle)
1136			return 0;
1137	}
1138	return -ENODEV;
1139}
1140
1141static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
1142				    u32 cfg_handle)
1143{
1144	u64 arc, candidate, best_latency = ~(u64)0;
1145
1146	candidate = MDESC_NODE_NULL;
1147	mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1148		u64 target = mdesc_arc_target(md, arc);
1149		const char *name = mdesc_node_name(md, target);
1150		const u64 *val;
1151
1152		if (strcmp(name, "pio-latency-group"))
1153			continue;
1154
1155		val = mdesc_get_property(md, target, "latency", NULL);
1156		if (!val)
1157			continue;
1158
1159		if (*val < best_latency) {
1160			candidate = target;
1161			best_latency = *val;
1162		}
1163	}
1164
1165	if (candidate == MDESC_NODE_NULL)
1166		return -ENODEV;
1167
1168	return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
1169}
1170
1171int of_node_to_nid(struct device_node *dp)
1172{
1173	const struct linux_prom64_registers *regs;
1174	struct mdesc_handle *md;
1175	u32 cfg_handle;
1176	int count, nid;
1177	u64 grp;
1178
1179	/* This is the right thing to do on currently supported
1180	 * SUN4U NUMA platforms as well, as the PCI controller does
1181	 * not sit behind any particular memory controller.
1182	 */
1183	if (!mlgroups)
1184		return -1;
1185
1186	regs = of_get_property(dp, "reg", NULL);
1187	if (!regs)
1188		return -1;
1189
1190	cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
1191
1192	md = mdesc_grab();
1193
1194	count = 0;
1195	nid = NUMA_NO_NODE;
1196	mdesc_for_each_node_by_name(md, grp, "group") {
1197		if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
1198			nid = count;
1199			break;
1200		}
1201		count++;
1202	}
1203
1204	mdesc_release(md);
1205
1206	return nid;
1207}
1208
1209static void __init add_node_ranges(void)
1210{
1211	phys_addr_t start, end;
1212	unsigned long prev_max;
1213	u64 i;
1214
1215memblock_resized:
1216	prev_max = memblock.memory.max;
1217
1218	for_each_mem_range(i, &start, &end) {
1219		while (start < end) {
1220			unsigned long this_end;
1221			int nid;
1222
1223			this_end = memblock_nid_range(start, end, &nid);
1224
1225			numadbg("Setting memblock NUMA node nid[%d] "
1226				"start[%llx] end[%lx]\n",
1227				nid, start, this_end);
1228
1229			memblock_set_node(start, this_end - start,
1230					  &memblock.memory, nid);
1231			if (memblock.memory.max != prev_max)
1232				goto memblock_resized;
1233			start = this_end;
1234		}
1235	}
1236}
1237
1238static int __init grab_mlgroups(struct mdesc_handle *md)
1239{
1240	unsigned long paddr;
1241	int count = 0;
1242	u64 node;
1243
1244	mdesc_for_each_node_by_name(md, node, "memory-latency-group")
1245		count++;
1246	if (!count)
1247		return -ENOENT;
1248
1249	paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mlgroup),
1250				    SMP_CACHE_BYTES);
1251	if (!paddr)
1252		return -ENOMEM;
1253
1254	mlgroups = __va(paddr);
1255	num_mlgroups = count;
1256
1257	count = 0;
1258	mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
1259		struct mdesc_mlgroup *m = &mlgroups[count++];
1260		const u64 *val;
1261
1262		m->node = node;
1263
1264		val = mdesc_get_property(md, node, "latency", NULL);
1265		m->latency = *val;
1266		val = mdesc_get_property(md, node, "address-match", NULL);
1267		m->match = *val;
1268		val = mdesc_get_property(md, node, "address-mask", NULL);
1269		m->mask = *val;
1270
1271		numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
1272			"match[%llx] mask[%llx]\n",
1273			count - 1, m->node, m->latency, m->match, m->mask);
1274	}
1275
1276	return 0;
1277}
1278
1279static int __init grab_mblocks(struct mdesc_handle *md)
1280{
1281	unsigned long paddr;
1282	int count = 0;
1283	u64 node;
1284
1285	mdesc_for_each_node_by_name(md, node, "mblock")
1286		count++;
1287	if (!count)
1288		return -ENOENT;
1289
1290	paddr = memblock_phys_alloc(count * sizeof(struct mdesc_mblock),
1291				    SMP_CACHE_BYTES);
1292	if (!paddr)
1293		return -ENOMEM;
1294
1295	mblocks = __va(paddr);
1296	num_mblocks = count;
1297
1298	count = 0;
1299	mdesc_for_each_node_by_name(md, node, "mblock") {
1300		struct mdesc_mblock *m = &mblocks[count++];
1301		const u64 *val;
1302
1303		val = mdesc_get_property(md, node, "base", NULL);
1304		m->base = *val;
1305		val = mdesc_get_property(md, node, "size", NULL);
1306		m->size = *val;
1307		val = mdesc_get_property(md, node,
1308					 "address-congruence-offset", NULL);
1309
1310		/* The address-congruence-offset property is optional.
1311		 * Explicity zero it be identifty this.
1312		 */
1313		if (val)
1314			m->offset = *val;
1315		else
1316			m->offset = 0UL;
1317
1318		numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
1319			count - 1, m->base, m->size, m->offset);
1320	}
1321
1322	return 0;
1323}
1324
1325static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
1326					       u64 grp, cpumask_t *mask)
1327{
1328	u64 arc;
1329
1330	cpumask_clear(mask);
1331
1332	mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
1333		u64 target = mdesc_arc_target(md, arc);
1334		const char *name = mdesc_node_name(md, target);
1335		const u64 *id;
1336
1337		if (strcmp(name, "cpu"))
1338			continue;
1339		id = mdesc_get_property(md, target, "id", NULL);
1340		if (*id < nr_cpu_ids)
1341			cpumask_set_cpu(*id, mask);
1342	}
1343}
1344
1345static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
1346{
1347	int i;
1348
1349	for (i = 0; i < num_mlgroups; i++) {
1350		struct mdesc_mlgroup *m = &mlgroups[i];
1351		if (m->node == node)
1352			return m;
1353	}
1354	return NULL;
1355}
1356
1357int __node_distance(int from, int to)
1358{
1359	if ((from >= MAX_NUMNODES) || (to >= MAX_NUMNODES)) {
1360		pr_warn("Returning default NUMA distance value for %d->%d\n",
1361			from, to);
1362		return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE;
1363	}
1364	return numa_latency[from][to];
1365}
1366EXPORT_SYMBOL(__node_distance);
1367
1368static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
1369{
1370	int i;
1371
1372	for (i = 0; i < MAX_NUMNODES; i++) {
1373		struct node_mem_mask *n = &node_masks[i];
1374
1375		if ((grp->mask == n->mask) && (grp->match == n->match))
1376			break;
1377	}
1378	return i;
1379}
1380
1381static void __init find_numa_latencies_for_group(struct mdesc_handle *md,
1382						 u64 grp, int index)
1383{
1384	u64 arc;
1385
1386	mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1387		int tnode;
1388		u64 target = mdesc_arc_target(md, arc);
1389		struct mdesc_mlgroup *m = find_mlgroup(target);
1390
1391		if (!m)
1392			continue;
1393		tnode = find_best_numa_node_for_mlgroup(m);
1394		if (tnode == MAX_NUMNODES)
1395			continue;
1396		numa_latency[index][tnode] = m->latency;
1397	}
1398}
1399
1400static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
1401				      int index)
1402{
1403	struct mdesc_mlgroup *candidate = NULL;
1404	u64 arc, best_latency = ~(u64)0;
1405	struct node_mem_mask *n;
1406
1407	mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
1408		u64 target = mdesc_arc_target(md, arc);
1409		struct mdesc_mlgroup *m = find_mlgroup(target);
1410		if (!m)
1411			continue;
1412		if (m->latency < best_latency) {
1413			candidate = m;
1414			best_latency = m->latency;
1415		}
1416	}
1417	if (!candidate)
1418		return -ENOENT;
1419
1420	if (num_node_masks != index) {
1421		printk(KERN_ERR "Inconsistent NUMA state, "
1422		       "index[%d] != num_node_masks[%d]\n",
1423		       index, num_node_masks);
1424		return -EINVAL;
1425	}
1426
1427	n = &node_masks[num_node_masks++];
1428
1429	n->mask = candidate->mask;
1430	n->match = candidate->match;
1431
1432	numadbg("NUMA NODE[%d]: mask[%lx] match[%lx] (latency[%llx])\n",
1433		index, n->mask, n->match, candidate->latency);
1434
1435	return 0;
1436}
1437
1438static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
1439					 int index)
1440{
1441	cpumask_t mask;
1442	int cpu;
1443
1444	numa_parse_mdesc_group_cpus(md, grp, &mask);
1445
1446	for_each_cpu(cpu, &mask)
1447		numa_cpu_lookup_table[cpu] = index;
1448	cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
1449
1450	if (numa_debug) {
1451		printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
1452		for_each_cpu(cpu, &mask)
1453			printk("%d ", cpu);
1454		printk("]\n");
1455	}
1456
1457	return numa_attach_mlgroup(md, grp, index);
1458}
1459
1460static int __init numa_parse_mdesc(void)
1461{
1462	struct mdesc_handle *md = mdesc_grab();
1463	int i, j, err, count;
1464	u64 node;
1465
1466	node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
1467	if (node == MDESC_NODE_NULL) {
1468		mdesc_release(md);
1469		return -ENOENT;
1470	}
1471
1472	err = grab_mblocks(md);
1473	if (err < 0)
1474		goto out;
1475
1476	err = grab_mlgroups(md);
1477	if (err < 0)
1478		goto out;
1479
1480	count = 0;
1481	mdesc_for_each_node_by_name(md, node, "group") {
1482		err = numa_parse_mdesc_group(md, node, count);
1483		if (err < 0)
1484			break;
1485		count++;
1486	}
1487
1488	count = 0;
1489	mdesc_for_each_node_by_name(md, node, "group") {
1490		find_numa_latencies_for_group(md, node, count);
1491		count++;
1492	}
1493
1494	/* Normalize numa latency matrix according to ACPI SLIT spec. */
1495	for (i = 0; i < MAX_NUMNODES; i++) {
1496		u64 self_latency = numa_latency[i][i];
1497
1498		for (j = 0; j < MAX_NUMNODES; j++) {
1499			numa_latency[i][j] =
1500				(numa_latency[i][j] * LOCAL_DISTANCE) /
1501				self_latency;
1502		}
1503	}
1504
1505	add_node_ranges();
1506
1507	for (i = 0; i < num_node_masks; i++) {
1508		allocate_node_data(i);
1509		node_set_online(i);
1510	}
1511
1512	err = 0;
1513out:
1514	mdesc_release(md);
1515	return err;
1516}
1517
1518static int __init numa_parse_jbus(void)
1519{
1520	unsigned long cpu, index;
1521
1522	/* NUMA node id is encoded in bits 36 and higher, and there is
1523	 * a 1-to-1 mapping from CPU ID to NUMA node ID.
1524	 */
1525	index = 0;
1526	for_each_present_cpu(cpu) {
1527		numa_cpu_lookup_table[cpu] = index;
1528		cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
1529		node_masks[index].mask = ~((1UL << 36UL) - 1UL);
1530		node_masks[index].match = cpu << 36UL;
1531
1532		index++;
1533	}
1534	num_node_masks = index;
1535
1536	add_node_ranges();
1537
1538	for (index = 0; index < num_node_masks; index++) {
1539		allocate_node_data(index);
1540		node_set_online(index);
1541	}
1542
1543	return 0;
1544}
1545
1546static int __init numa_parse_sun4u(void)
1547{
1548	if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1549		unsigned long ver;
1550
1551		__asm__ ("rdpr %%ver, %0" : "=r" (ver));
1552		if ((ver >> 32UL) == __JALAPENO_ID ||
1553		    (ver >> 32UL) == __SERRANO_ID)
1554			return numa_parse_jbus();
1555	}
1556	return -1;
1557}
1558
1559static int __init bootmem_init_numa(void)
1560{
1561	int i, j;
1562	int err = -1;
1563
1564	numadbg("bootmem_init_numa()\n");
1565
1566	/* Some sane defaults for numa latency values */
1567	for (i = 0; i < MAX_NUMNODES; i++) {
1568		for (j = 0; j < MAX_NUMNODES; j++)
1569			numa_latency[i][j] = (i == j) ?
1570				LOCAL_DISTANCE : REMOTE_DISTANCE;
1571	}
1572
1573	if (numa_enabled) {
1574		if (tlb_type == hypervisor)
1575			err = numa_parse_mdesc();
1576		else
1577			err = numa_parse_sun4u();
1578	}
1579	return err;
1580}
1581
1582#else
1583
1584static int bootmem_init_numa(void)
1585{
1586	return -1;
1587}
1588
1589#endif
1590
1591static void __init bootmem_init_nonnuma(void)
1592{
1593	unsigned long top_of_ram = memblock_end_of_DRAM();
1594	unsigned long total_ram = memblock_phys_mem_size();
1595
1596	numadbg("bootmem_init_nonnuma()\n");
1597
1598	printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
1599	       top_of_ram, total_ram);
1600	printk(KERN_INFO "Memory hole size: %ldMB\n",
1601	       (top_of_ram - total_ram) >> 20);
1602
1603	init_node_masks_nonnuma();
1604	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
1605	allocate_node_data(0);
1606	node_set_online(0);
1607}
1608
1609static unsigned long __init bootmem_init(unsigned long phys_base)
1610{
1611	unsigned long end_pfn;
1612
1613	end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1614	max_pfn = max_low_pfn = end_pfn;
1615	min_low_pfn = (phys_base >> PAGE_SHIFT);
1616
1617	if (bootmem_init_numa() < 0)
1618		bootmem_init_nonnuma();
1619
1620	/* Dump memblock with node info. */
1621	memblock_dump_all();
1622
1623	/* XXX cpu notifier XXX */
1624
1625	sparse_init();
1626
1627	return end_pfn;
1628}
1629
1630static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1631static int pall_ents __initdata;
1632
1633static unsigned long max_phys_bits = 40;
1634
1635bool kern_addr_valid(unsigned long addr)
1636{
1637	pgd_t *pgd;
1638	p4d_t *p4d;
1639	pud_t *pud;
1640	pmd_t *pmd;
1641	pte_t *pte;
1642
1643	if ((long)addr < 0L) {
1644		unsigned long pa = __pa(addr);
1645
1646		if ((pa >> max_phys_bits) != 0UL)
1647			return false;
1648
1649		return pfn_valid(pa >> PAGE_SHIFT);
1650	}
1651
1652	if (addr >= (unsigned long) KERNBASE &&
1653	    addr < (unsigned long)&_end)
1654		return true;
1655
1656	pgd = pgd_offset_k(addr);
1657	if (pgd_none(*pgd))
1658		return false;
1659
1660	p4d = p4d_offset(pgd, addr);
1661	if (p4d_none(*p4d))
1662		return false;
1663
1664	pud = pud_offset(p4d, addr);
1665	if (pud_none(*pud))
1666		return false;
1667
1668	if (pud_leaf(*pud))
1669		return pfn_valid(pud_pfn(*pud));
1670
1671	pmd = pmd_offset(pud, addr);
1672	if (pmd_none(*pmd))
1673		return false;
1674
1675	if (pmd_leaf(*pmd))
1676		return pfn_valid(pmd_pfn(*pmd));
1677
1678	pte = pte_offset_kernel(pmd, addr);
1679	if (pte_none(*pte))
1680		return false;
1681
1682	return pfn_valid(pte_pfn(*pte));
1683}
1684
1685static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
1686					      unsigned long vend,
1687					      pud_t *pud)
1688{
1689	const unsigned long mask16gb = (1UL << 34) - 1UL;
1690	u64 pte_val = vstart;
1691
1692	/* Each PUD is 8GB */
1693	if ((vstart & mask16gb) ||
1694	    (vend - vstart <= mask16gb)) {
1695		pte_val ^= kern_linear_pte_xor[2];
1696		pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
1697
1698		return vstart + PUD_SIZE;
1699	}
1700
1701	pte_val ^= kern_linear_pte_xor[3];
1702	pte_val |= _PAGE_PUD_HUGE;
1703
1704	vend = vstart + mask16gb + 1UL;
1705	while (vstart < vend) {
1706		pud_val(*pud) = pte_val;
1707
1708		pte_val += PUD_SIZE;
1709		vstart += PUD_SIZE;
1710		pud++;
1711	}
1712	return vstart;
1713}
1714
1715static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
1716				   bool guard)
1717{
1718	if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
1719		return true;
1720
1721	return false;
1722}
1723
1724static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
1725					      unsigned long vend,
1726					      pmd_t *pmd)
1727{
1728	const unsigned long mask256mb = (1UL << 28) - 1UL;
1729	const unsigned long mask2gb = (1UL << 31) - 1UL;
1730	u64 pte_val = vstart;
1731
1732	/* Each PMD is 8MB */
1733	if ((vstart & mask256mb) ||
1734	    (vend - vstart <= mask256mb)) {
1735		pte_val ^= kern_linear_pte_xor[0];
1736		pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
1737
1738		return vstart + PMD_SIZE;
1739	}
1740
1741	if ((vstart & mask2gb) ||
1742	    (vend - vstart <= mask2gb)) {
1743		pte_val ^= kern_linear_pte_xor[1];
1744		pte_val |= _PAGE_PMD_HUGE;
1745		vend = vstart + mask256mb + 1UL;
1746	} else {
1747		pte_val ^= kern_linear_pte_xor[2];
1748		pte_val |= _PAGE_PMD_HUGE;
1749		vend = vstart + mask2gb + 1UL;
1750	}
1751
1752	while (vstart < vend) {
1753		pmd_val(*pmd) = pte_val;
1754
1755		pte_val += PMD_SIZE;
1756		vstart += PMD_SIZE;
1757		pmd++;
1758	}
1759
1760	return vstart;
1761}
1762
1763static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
1764				   bool guard)
1765{
1766	if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
1767		return true;
1768
1769	return false;
1770}
1771
1772static unsigned long __ref kernel_map_range(unsigned long pstart,
1773					    unsigned long pend, pgprot_t prot,
1774					    bool use_huge)
1775{
1776	unsigned long vstart = PAGE_OFFSET + pstart;
1777	unsigned long vend = PAGE_OFFSET + pend;
1778	unsigned long alloc_bytes = 0UL;
1779
1780	if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
1781		prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
1782			    vstart, vend);
1783		prom_halt();
1784	}
1785
1786	while (vstart < vend) {
1787		unsigned long this_end, paddr = __pa(vstart);
1788		pgd_t *pgd = pgd_offset_k(vstart);
1789		p4d_t *p4d;
1790		pud_t *pud;
1791		pmd_t *pmd;
1792		pte_t *pte;
1793
1794		if (pgd_none(*pgd)) {
1795			pud_t *new;
1796
1797			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
1798						  PAGE_SIZE);
1799			if (!new)
1800				goto err_alloc;
1801			alloc_bytes += PAGE_SIZE;
1802			pgd_populate(&init_mm, pgd, new);
1803		}
1804
1805		p4d = p4d_offset(pgd, vstart);
1806		if (p4d_none(*p4d)) {
1807			pud_t *new;
1808
1809			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
1810						  PAGE_SIZE);
1811			if (!new)
1812				goto err_alloc;
1813			alloc_bytes += PAGE_SIZE;
1814			p4d_populate(&init_mm, p4d, new);
1815		}
1816
1817		pud = pud_offset(p4d, vstart);
1818		if (pud_none(*pud)) {
1819			pmd_t *new;
1820
1821			if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
1822				vstart = kernel_map_hugepud(vstart, vend, pud);
1823				continue;
1824			}
1825			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
1826						  PAGE_SIZE);
1827			if (!new)
1828				goto err_alloc;
1829			alloc_bytes += PAGE_SIZE;
1830			pud_populate(&init_mm, pud, new);
1831		}
1832
1833		pmd = pmd_offset(pud, vstart);
1834		if (pmd_none(*pmd)) {
1835			pte_t *new;
1836
1837			if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
1838				vstart = kernel_map_hugepmd(vstart, vend, pmd);
1839				continue;
1840			}
1841			new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE,
1842						  PAGE_SIZE);
1843			if (!new)
1844				goto err_alloc;
1845			alloc_bytes += PAGE_SIZE;
1846			pmd_populate_kernel(&init_mm, pmd, new);
1847		}
1848
1849		pte = pte_offset_kernel(pmd, vstart);
1850		this_end = (vstart + PMD_SIZE) & PMD_MASK;
1851		if (this_end > vend)
1852			this_end = vend;
1853
1854		while (vstart < this_end) {
1855			pte_val(*pte) = (paddr | pgprot_val(prot));
1856
1857			vstart += PAGE_SIZE;
1858			paddr += PAGE_SIZE;
1859			pte++;
1860		}
1861	}
1862
1863	return alloc_bytes;
1864
1865err_alloc:
1866	panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
1867	      __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1868	return -ENOMEM;
1869}
1870
1871static void __init flush_all_kernel_tsbs(void)
1872{
1873	int i;
1874
1875	for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
1876		struct tsb *ent = &swapper_tsb[i];
1877
1878		ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1879	}
1880#ifndef CONFIG_DEBUG_PAGEALLOC
1881	for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
1882		struct tsb *ent = &swapper_4m_tsb[i];
1883
1884		ent->tag = (1UL << TSB_TAG_INVALID_BIT);
1885	}
1886#endif
1887}
1888
1889extern unsigned int kvmap_linear_patch[1];
1890
1891static void __init kernel_physical_mapping_init(void)
1892{
1893	unsigned long i, mem_alloced = 0UL;
1894	bool use_huge = true;
1895
1896#ifdef CONFIG_DEBUG_PAGEALLOC
1897	use_huge = false;
1898#endif
1899	for (i = 0; i < pall_ents; i++) {
1900		unsigned long phys_start, phys_end;
1901
1902		phys_start = pall[i].phys_addr;
1903		phys_end = phys_start + pall[i].reg_size;
1904
1905		mem_alloced += kernel_map_range(phys_start, phys_end,
1906						PAGE_KERNEL, use_huge);
1907	}
1908
1909	printk("Allocated %ld bytes for kernel page tables.\n",
1910	       mem_alloced);
1911
1912	kvmap_linear_patch[0] = 0x01000000; /* nop */
1913	flushi(&kvmap_linear_patch[0]);
1914
1915	flush_all_kernel_tsbs();
1916
1917	__flush_tlb_all();
1918}
1919
1920#ifdef CONFIG_DEBUG_PAGEALLOC
1921void __kernel_map_pages(struct page *page, int numpages, int enable)
1922{
1923	unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1924	unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1925
1926	kernel_map_range(phys_start, phys_end,
1927			 (enable ? PAGE_KERNEL : __pgprot(0)), false);
1928
1929	flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1930			       PAGE_OFFSET + phys_end);
1931
1932	/* we should perform an IPI and flush all tlbs,
1933	 * but that can deadlock->flush only current cpu.
1934	 */
1935	__flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1936				 PAGE_OFFSET + phys_end);
1937}
1938#endif
1939
1940unsigned long __init find_ecache_flush_span(unsigned long size)
1941{
1942	int i;
1943
1944	for (i = 0; i < pavail_ents; i++) {
1945		if (pavail[i].reg_size >= size)
1946			return pavail[i].phys_addr;
1947	}
1948
1949	return ~0UL;
1950}
1951
1952unsigned long PAGE_OFFSET;
1953EXPORT_SYMBOL(PAGE_OFFSET);
1954
1955unsigned long VMALLOC_END   = 0x0000010000000000UL;
1956EXPORT_SYMBOL(VMALLOC_END);
1957
1958unsigned long sparc64_va_hole_top =    0xfffff80000000000UL;
1959unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
1960
1961static void __init setup_page_offset(void)
1962{
1963	if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1964		/* Cheetah/Panther support a full 64-bit virtual
1965		 * address, so we can use all that our page tables
1966		 * support.
1967		 */
1968		sparc64_va_hole_top =    0xfff0000000000000UL;
1969		sparc64_va_hole_bottom = 0x0010000000000000UL;
1970
1971		max_phys_bits = 42;
1972	} else if (tlb_type == hypervisor) {
1973		switch (sun4v_chip_type) {
1974		case SUN4V_CHIP_NIAGARA1:
1975		case SUN4V_CHIP_NIAGARA2:
1976			/* T1 and T2 support 48-bit virtual addresses.  */
1977			sparc64_va_hole_top =    0xffff800000000000UL;
1978			sparc64_va_hole_bottom = 0x0000800000000000UL;
1979
1980			max_phys_bits = 39;
1981			break;
1982		case SUN4V_CHIP_NIAGARA3:
1983			/* T3 supports 48-bit virtual addresses.  */
1984			sparc64_va_hole_top =    0xffff800000000000UL;
1985			sparc64_va_hole_bottom = 0x0000800000000000UL;
1986
1987			max_phys_bits = 43;
1988			break;
1989		case SUN4V_CHIP_NIAGARA4:
1990		case SUN4V_CHIP_NIAGARA5:
1991		case SUN4V_CHIP_SPARC64X:
1992		case SUN4V_CHIP_SPARC_M6:
1993			/* T4 and later support 52-bit virtual addresses.  */
1994			sparc64_va_hole_top =    0xfff8000000000000UL;
1995			sparc64_va_hole_bottom = 0x0008000000000000UL;
1996			max_phys_bits = 47;
1997			break;
1998		case SUN4V_CHIP_SPARC_M7:
1999		case SUN4V_CHIP_SPARC_SN:
2000			/* M7 and later support 52-bit virtual addresses.  */
2001			sparc64_va_hole_top =    0xfff8000000000000UL;
2002			sparc64_va_hole_bottom = 0x0008000000000000UL;
2003			max_phys_bits = 49;
2004			break;
2005		case SUN4V_CHIP_SPARC_M8:
2006		default:
2007			/* M8 and later support 54-bit virtual addresses.
2008			 * However, restricting M8 and above VA bits to 53
2009			 * as 4-level page table cannot support more than
2010			 * 53 VA bits.
2011			 */
2012			sparc64_va_hole_top =    0xfff0000000000000UL;
2013			sparc64_va_hole_bottom = 0x0010000000000000UL;
2014			max_phys_bits = 51;
2015			break;
2016		}
2017	}
2018
2019	if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
2020		prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
2021			    max_phys_bits);
2022		prom_halt();
2023	}
2024
2025	PAGE_OFFSET = sparc64_va_hole_top;
2026	VMALLOC_END = ((sparc64_va_hole_bottom >> 1) +
2027		       (sparc64_va_hole_bottom >> 2));
2028
2029	pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
2030		PAGE_OFFSET, max_phys_bits);
2031	pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
2032		VMALLOC_START, VMALLOC_END);
2033	pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
2034		VMEMMAP_BASE, VMEMMAP_BASE << 1);
2035}
2036
2037static void __init tsb_phys_patch(void)
2038{
2039	struct tsb_ldquad_phys_patch_entry *pquad;
2040	struct tsb_phys_patch_entry *p;
2041
2042	pquad = &__tsb_ldquad_phys_patch;
2043	while (pquad < &__tsb_ldquad_phys_patch_end) {
2044		unsigned long addr = pquad->addr;
2045
2046		if (tlb_type == hypervisor)
2047			*(unsigned int *) addr = pquad->sun4v_insn;
2048		else
2049			*(unsigned int *) addr = pquad->sun4u_insn;
2050		wmb();
2051		__asm__ __volatile__("flush	%0"
2052				     : /* no outputs */
2053				     : "r" (addr));
2054
2055		pquad++;
2056	}
2057
2058	p = &__tsb_phys_patch;
2059	while (p < &__tsb_phys_patch_end) {
2060		unsigned long addr = p->addr;
2061
2062		*(unsigned int *) addr = p->insn;
2063		wmb();
2064		__asm__ __volatile__("flush	%0"
2065				     : /* no outputs */
2066				     : "r" (addr));
2067
2068		p++;
2069	}
2070}
2071
2072/* Don't mark as init, we give this to the Hypervisor.  */
2073#ifndef CONFIG_DEBUG_PAGEALLOC
2074#define NUM_KTSB_DESCR	2
2075#else
2076#define NUM_KTSB_DESCR	1
2077#endif
2078static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
2079
2080/* The swapper TSBs are loaded with a base sequence of:
2081 *
2082 *	sethi	%uhi(SYMBOL), REG1
2083 *	sethi	%hi(SYMBOL), REG2
2084 *	or	REG1, %ulo(SYMBOL), REG1
2085 *	or	REG2, %lo(SYMBOL), REG2
2086 *	sllx	REG1, 32, REG1
2087 *	or	REG1, REG2, REG1
2088 *
2089 * When we use physical addressing for the TSB accesses, we patch the
2090 * first four instructions in the above sequence.
2091 */
2092
2093static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
2094{
2095	unsigned long high_bits, low_bits;
2096
2097	high_bits = (pa >> 32) & 0xffffffff;
2098	low_bits = (pa >> 0) & 0xffffffff;
2099
2100	while (start < end) {
2101		unsigned int *ia = (unsigned int *)(unsigned long)*start;
2102
2103		ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
2104		__asm__ __volatile__("flush	%0" : : "r" (ia));
2105
2106		ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
2107		__asm__ __volatile__("flush	%0" : : "r" (ia + 1));
2108
2109		ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
2110		__asm__ __volatile__("flush	%0" : : "r" (ia + 2));
2111
2112		ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
2113		__asm__ __volatile__("flush	%0" : : "r" (ia + 3));
2114
2115		start++;
2116	}
2117}
2118
2119static void ktsb_phys_patch(void)
2120{
2121	extern unsigned int __swapper_tsb_phys_patch;
2122	extern unsigned int __swapper_tsb_phys_patch_end;
2123	unsigned long ktsb_pa;
2124
2125	ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
2126	patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
2127			    &__swapper_tsb_phys_patch_end, ktsb_pa);
2128#ifndef CONFIG_DEBUG_PAGEALLOC
2129	{
2130	extern unsigned int __swapper_4m_tsb_phys_patch;
2131	extern unsigned int __swapper_4m_tsb_phys_patch_end;
2132	ktsb_pa = (kern_base +
2133		   ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
2134	patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
2135			    &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
2136	}
2137#endif
2138}
2139
2140static void __init sun4v_ktsb_init(void)
2141{
2142	unsigned long ktsb_pa;
2143
2144	/* First KTSB for PAGE_SIZE mappings.  */
2145	ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
2146
2147	switch (PAGE_SIZE) {
2148	case 8 * 1024:
2149	default:
2150		ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
2151		ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
2152		break;
2153
2154	case 64 * 1024:
2155		ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
2156		ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
2157		break;
2158
2159	case 512 * 1024:
2160		ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
2161		ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
2162		break;
2163
2164	case 4 * 1024 * 1024:
2165		ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
2166		ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
2167		break;
2168	}
2169
2170	ktsb_descr[0].assoc = 1;
2171	ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
2172	ktsb_descr[0].ctx_idx = 0;
2173	ktsb_descr[0].tsb_base = ktsb_pa;
2174	ktsb_descr[0].resv = 0;
2175
2176#ifndef CONFIG_DEBUG_PAGEALLOC
2177	/* Second KTSB for 4MB/256MB/2GB/16GB mappings.  */
2178	ktsb_pa = (kern_base +
2179		   ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
2180
2181	ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
2182	ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
2183				    HV_PGSZ_MASK_256MB |
2184				    HV_PGSZ_MASK_2GB |
2185				    HV_PGSZ_MASK_16GB) &
2186				   cpu_pgsz_mask);
2187	ktsb_descr[1].assoc = 1;
2188	ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
2189	ktsb_descr[1].ctx_idx = 0;
2190	ktsb_descr[1].tsb_base = ktsb_pa;
2191	ktsb_descr[1].resv = 0;
2192#endif
2193}
2194
2195void sun4v_ktsb_register(void)
2196{
2197	unsigned long pa, ret;
2198
2199	pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
2200
2201	ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
2202	if (ret != 0) {
2203		prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
2204			    "errors with %lx\n", pa, ret);
2205		prom_halt();
2206	}
2207}
2208
2209static void __init sun4u_linear_pte_xor_finalize(void)
2210{
2211#ifndef CONFIG_DEBUG_PAGEALLOC
2212	/* This is where we would add Panther support for
2213	 * 32MB and 256MB pages.
2214	 */
2215#endif
2216}
2217
2218static void __init sun4v_linear_pte_xor_finalize(void)
2219{
2220	unsigned long pagecv_flag;
2221
2222	/* Bit 9 of TTE is no longer CV bit on M7 processor and it instead
2223	 * enables MCD error. Do not set bit 9 on M7 processor.
2224	 */
2225	switch (sun4v_chip_type) {
2226	case SUN4V_CHIP_SPARC_M7:
2227	case SUN4V_CHIP_SPARC_M8:
2228	case SUN4V_CHIP_SPARC_SN:
2229		pagecv_flag = 0x00;
2230		break;
2231	default:
2232		pagecv_flag = _PAGE_CV_4V;
2233		break;
2234	}
2235#ifndef CONFIG_DEBUG_PAGEALLOC
2236	if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
2237		kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
2238			PAGE_OFFSET;
2239		kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag |
2240					   _PAGE_P_4V | _PAGE_W_4V);
2241	} else {
2242		kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
2243	}
2244
2245	if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
2246		kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
2247			PAGE_OFFSET;
2248		kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag |
2249					   _PAGE_P_4V | _PAGE_W_4V);
2250	} else {
2251		kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
2252	}
2253
2254	if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
2255		kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
2256			PAGE_OFFSET;
2257		kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag |
2258					   _PAGE_P_4V | _PAGE_W_4V);
2259	} else {
2260		kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
2261	}
2262#endif
2263}
2264
2265/* paging_init() sets up the page tables */
2266
2267static unsigned long last_valid_pfn;
2268
2269static void sun4u_pgprot_init(void);
2270static void sun4v_pgprot_init(void);
2271
2272#define _PAGE_CACHE_4U	(_PAGE_CP_4U | _PAGE_CV_4U)
2273#define _PAGE_CACHE_4V	(_PAGE_CP_4V | _PAGE_CV_4V)
2274#define __DIRTY_BITS_4U	 (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
2275#define __DIRTY_BITS_4V	 (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
2276#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
2277#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
2278
2279/* We need to exclude reserved regions. This exclusion will include
2280 * vmlinux and initrd. To be more precise the initrd size could be used to
2281 * compute a new lower limit because it is freed later during initialization.
2282 */
2283static void __init reduce_memory(phys_addr_t limit_ram)
2284{
2285	limit_ram += memblock_reserved_size();
2286	memblock_enforce_memory_limit(limit_ram);
2287}
2288
2289void __init paging_init(void)
2290{
2291	unsigned long end_pfn, shift, phys_base;
2292	unsigned long real_end, i;
2293
2294	setup_page_offset();
2295
2296	/* These build time checkes make sure that the dcache_dirty_cpu()
2297	 * folio->flags usage will work.
2298	 *
2299	 * When a page gets marked as dcache-dirty, we store the
2300	 * cpu number starting at bit 32 in the folio->flags.  Also,
2301	 * functions like clear_dcache_dirty_cpu use the cpu mask
2302	 * in 13-bit signed-immediate instruction fields.
2303	 */
2304
2305	/*
2306	 * Page flags must not reach into upper 32 bits that are used
2307	 * for the cpu number
2308	 */
2309	BUILD_BUG_ON(NR_PAGEFLAGS > 32);
2310
2311	/*
2312	 * The bit fields placed in the high range must not reach below
2313	 * the 32 bit boundary. Otherwise we cannot place the cpu field
2314	 * at the 32 bit boundary.
2315	 */
2316	BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
2317		ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
2318
2319	BUILD_BUG_ON(NR_CPUS > 4096);
2320
2321	kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
2322	kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
2323
2324	/* Invalidate both kernel TSBs.  */
2325	memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
2326#ifndef CONFIG_DEBUG_PAGEALLOC
2327	memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2328#endif
2329
2330	/* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde
2331	 * bit on M7 processor. This is a conflicting usage of the same
2332	 * bit. Enabling TTE.cv on M7 would turn on Memory Corruption
2333	 * Detection error on all pages and this will lead to problems
2334	 * later. Kernel does not run with MCD enabled and hence rest
2335	 * of the required steps to fully configure memory corruption
2336	 * detection are not taken. We need to ensure TTE.mcde is not
2337	 * set on M7 processor. Compute the value of cacheability
2338	 * flag for use later taking this into consideration.
2339	 */
2340	switch (sun4v_chip_type) {
2341	case SUN4V_CHIP_SPARC_M7:
2342	case SUN4V_CHIP_SPARC_M8:
2343	case SUN4V_CHIP_SPARC_SN:
2344		page_cache4v_flag = _PAGE_CP_4V;
2345		break;
2346	default:
2347		page_cache4v_flag = _PAGE_CACHE_4V;
2348		break;
2349	}
2350
2351	if (tlb_type == hypervisor)
2352		sun4v_pgprot_init();
2353	else
2354		sun4u_pgprot_init();
2355
2356	if (tlb_type == cheetah_plus ||
2357	    tlb_type == hypervisor) {
2358		tsb_phys_patch();
2359		ktsb_phys_patch();
2360	}
2361
2362	if (tlb_type == hypervisor)
2363		sun4v_patch_tlb_handlers();
2364
2365	/* Find available physical memory...
2366	 *
2367	 * Read it twice in order to work around a bug in openfirmware.
2368	 * The call to grab this table itself can cause openfirmware to
2369	 * allocate memory, which in turn can take away some space from
2370	 * the list of available memory.  Reading it twice makes sure
2371	 * we really do get the final value.
2372	 */
2373	read_obp_translations();
2374	read_obp_memory("reg", &pall[0], &pall_ents);
2375	read_obp_memory("available", &pavail[0], &pavail_ents);
2376	read_obp_memory("available", &pavail[0], &pavail_ents);
2377
2378	phys_base = 0xffffffffffffffffUL;
2379	for (i = 0; i < pavail_ents; i++) {
2380		phys_base = min(phys_base, pavail[i].phys_addr);
2381		memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
2382	}
2383
2384	memblock_reserve(kern_base, kern_size);
2385
2386	find_ramdisk(phys_base);
2387
2388	if (cmdline_memory_size)
2389		reduce_memory(cmdline_memory_size);
2390
2391	memblock_allow_resize();
2392	memblock_dump_all();
2393
2394	set_bit(0, mmu_context_bmap);
2395
2396	shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
2397
2398	real_end = (unsigned long)_end;
2399	num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
2400	printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
2401	       num_kernel_image_mappings);
2402
2403	/* Set kernel pgd to upper alias so physical page computations
2404	 * work.
2405	 */
2406	init_mm.pgd += ((shift) / (sizeof(pgd_t)));
2407	
2408	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
2409
2410	inherit_prom_mappings();
2411	
2412	/* Ok, we can use our TLB miss and window trap handlers safely.  */
2413	setup_tba();
2414
2415	__flush_tlb_all();
2416
2417	prom_build_devicetree();
2418	of_populate_present_mask();
2419#ifndef CONFIG_SMP
2420	of_fill_in_cpu_data();
2421#endif
2422
2423	if (tlb_type == hypervisor) {
2424		sun4v_mdesc_init();
2425		mdesc_populate_present_mask(cpu_all_mask);
2426#ifndef CONFIG_SMP
2427		mdesc_fill_in_cpu_data(cpu_all_mask);
2428#endif
2429		mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
2430
2431		sun4v_linear_pte_xor_finalize();
2432
2433		sun4v_ktsb_init();
2434		sun4v_ktsb_register();
2435	} else {
2436		unsigned long impl, ver;
2437
2438		cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
2439				 HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
2440
2441		__asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
2442		impl = ((ver >> 32) & 0xffff);
2443		if (impl == PANTHER_IMPL)
2444			cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
2445					  HV_PGSZ_MASK_256MB);
2446
2447		sun4u_linear_pte_xor_finalize();
2448	}
2449
2450	/* Flush the TLBs and the 4M TSB so that the updated linear
2451	 * pte XOR settings are realized for all mappings.
2452	 */
2453	__flush_tlb_all();
2454#ifndef CONFIG_DEBUG_PAGEALLOC
2455	memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
2456#endif
2457	__flush_tlb_all();
2458
2459	/* Setup bootmem... */
2460	last_valid_pfn = end_pfn = bootmem_init(phys_base);
2461
2462	kernel_physical_mapping_init();
2463
2464	{
2465		unsigned long max_zone_pfns[MAX_NR_ZONES];
2466
2467		memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
2468
2469		max_zone_pfns[ZONE_NORMAL] = end_pfn;
2470
2471		free_area_init(max_zone_pfns);
2472	}
2473
2474	printk("Booting Linux...\n");
2475}
2476
2477int page_in_phys_avail(unsigned long paddr)
2478{
2479	int i;
2480
2481	paddr &= PAGE_MASK;
2482
2483	for (i = 0; i < pavail_ents; i++) {
2484		unsigned long start, end;
2485
2486		start = pavail[i].phys_addr;
2487		end = start + pavail[i].reg_size;
2488
2489		if (paddr >= start && paddr < end)
2490			return 1;
2491	}
2492	if (paddr >= kern_base && paddr < (kern_base + kern_size))
2493		return 1;
2494#ifdef CONFIG_BLK_DEV_INITRD
2495	if (paddr >= __pa(initrd_start) &&
2496	    paddr < __pa(PAGE_ALIGN(initrd_end)))
2497		return 1;
2498#endif
2499
2500	return 0;
2501}
2502
2503static void __init register_page_bootmem_info(void)
2504{
2505#ifdef CONFIG_NUMA
2506	int i;
2507
2508	for_each_online_node(i)
2509		if (NODE_DATA(i)->node_spanned_pages)
2510			register_page_bootmem_info_node(NODE_DATA(i));
2511#endif
2512}
2513void __init mem_init(void)
2514{
2515	high_memory = __va(last_valid_pfn << PAGE_SHIFT);
2516
2517	memblock_free_all();
2518
2519	/*
2520	 * Must be done after boot memory is put on freelist, because here we
2521	 * might set fields in deferred struct pages that have not yet been
2522	 * initialized, and memblock_free_all() initializes all the reserved
2523	 * deferred pages for us.
2524	 */
2525	register_page_bootmem_info();
2526
2527	/*
2528	 * Set up the zero page, mark it reserved, so that page count
2529	 * is not manipulated when freeing the page from user ptes.
2530	 */
2531	mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
2532	if (mem_map_zero == NULL) {
2533		prom_printf("paging_init: Cannot alloc zero page.\n");
2534		prom_halt();
2535	}
2536	mark_page_reserved(mem_map_zero);
2537
2538
2539	if (tlb_type == cheetah || tlb_type == cheetah_plus)
2540		cheetah_ecache_flush_init();
2541}
2542
2543void free_initmem(void)
2544{
2545	unsigned long addr, initend;
2546	int do_free = 1;
2547
2548	/* If the physical memory maps were trimmed by kernel command
2549	 * line options, don't even try freeing this initmem stuff up.
2550	 * The kernel image could have been in the trimmed out region
2551	 * and if so the freeing below will free invalid page structs.
2552	 */
2553	if (cmdline_memory_size)
2554		do_free = 0;
2555
2556	/*
2557	 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
2558	 */
2559	addr = PAGE_ALIGN((unsigned long)(__init_begin));
2560	initend = (unsigned long)(__init_end) & PAGE_MASK;
2561	for (; addr < initend; addr += PAGE_SIZE) {
2562		unsigned long page;
2563
2564		page = (addr +
2565			((unsigned long) __va(kern_base)) -
2566			((unsigned long) KERNBASE));
2567		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
2568
2569		if (do_free)
2570			free_reserved_page(virt_to_page(page));
2571	}
2572}
2573
2574pgprot_t PAGE_KERNEL __read_mostly;
2575EXPORT_SYMBOL(PAGE_KERNEL);
2576
2577pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
2578pgprot_t PAGE_COPY __read_mostly;
2579
2580pgprot_t PAGE_SHARED __read_mostly;
2581EXPORT_SYMBOL(PAGE_SHARED);
2582
2583unsigned long pg_iobits __read_mostly;
2584
2585unsigned long _PAGE_IE __read_mostly;
2586EXPORT_SYMBOL(_PAGE_IE);
2587
2588unsigned long _PAGE_E __read_mostly;
2589EXPORT_SYMBOL(_PAGE_E);
2590
2591unsigned long _PAGE_CACHE __read_mostly;
2592EXPORT_SYMBOL(_PAGE_CACHE);
2593
2594#ifdef CONFIG_SPARSEMEM_VMEMMAP
2595int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2596			       int node, struct vmem_altmap *altmap)
2597{
2598	unsigned long pte_base;
2599
2600	pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2601		    _PAGE_CP_4U | _PAGE_CV_4U |
2602		    _PAGE_P_4U | _PAGE_W_4U);
2603	if (tlb_type == hypervisor)
2604		pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2605			    page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V);
2606
2607	pte_base |= _PAGE_PMD_HUGE;
2608
2609	vstart = vstart & PMD_MASK;
2610	vend = ALIGN(vend, PMD_SIZE);
2611	for (; vstart < vend; vstart += PMD_SIZE) {
2612		pgd_t *pgd = vmemmap_pgd_populate(vstart, node);
2613		unsigned long pte;
2614		p4d_t *p4d;
2615		pud_t *pud;
2616		pmd_t *pmd;
2617
2618		if (!pgd)
2619			return -ENOMEM;
2620
2621		p4d = vmemmap_p4d_populate(pgd, vstart, node);
2622		if (!p4d)
2623			return -ENOMEM;
2624
2625		pud = vmemmap_pud_populate(p4d, vstart, node);
2626		if (!pud)
2627			return -ENOMEM;
2628
2629		pmd = pmd_offset(pud, vstart);
2630		pte = pmd_val(*pmd);
2631		if (!(pte & _PAGE_VALID)) {
2632			void *block = vmemmap_alloc_block(PMD_SIZE, node);
2633
2634			if (!block)
2635				return -ENOMEM;
2636
2637			pmd_val(*pmd) = pte_base | __pa(block);
2638		}
2639	}
2640
2641	return 0;
2642}
2643
2644void vmemmap_free(unsigned long start, unsigned long end,
2645		struct vmem_altmap *altmap)
2646{
2647}
2648#endif /* CONFIG_SPARSEMEM_VMEMMAP */
2649
2650/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
2651static pgprot_t protection_map[16] __ro_after_init;
2652
2653static void prot_init_common(unsigned long page_none,
2654			     unsigned long page_shared,
2655			     unsigned long page_copy,
2656			     unsigned long page_readonly,
2657			     unsigned long page_exec_bit)
2658{
2659	PAGE_COPY = __pgprot(page_copy);
2660	PAGE_SHARED = __pgprot(page_shared);
2661
2662	protection_map[0x0] = __pgprot(page_none);
2663	protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
2664	protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
2665	protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
2666	protection_map[0x4] = __pgprot(page_readonly);
2667	protection_map[0x5] = __pgprot(page_readonly);
2668	protection_map[0x6] = __pgprot(page_copy);
2669	protection_map[0x7] = __pgprot(page_copy);
2670	protection_map[0x8] = __pgprot(page_none);
2671	protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
2672	protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
2673	protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
2674	protection_map[0xc] = __pgprot(page_readonly);
2675	protection_map[0xd] = __pgprot(page_readonly);
2676	protection_map[0xe] = __pgprot(page_shared);
2677	protection_map[0xf] = __pgprot(page_shared);
2678}
2679
2680static void __init sun4u_pgprot_init(void)
2681{
2682	unsigned long page_none, page_shared, page_copy, page_readonly;
2683	unsigned long page_exec_bit;
2684	int i;
2685
2686	PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2687				_PAGE_CACHE_4U | _PAGE_P_4U |
2688				__ACCESS_BITS_4U | __DIRTY_BITS_4U |
2689				_PAGE_EXEC_4U);
2690	PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
2691				       _PAGE_CACHE_4U | _PAGE_P_4U |
2692				       __ACCESS_BITS_4U | __DIRTY_BITS_4U |
2693				       _PAGE_EXEC_4U | _PAGE_L_4U);
2694
2695	_PAGE_IE = _PAGE_IE_4U;
2696	_PAGE_E = _PAGE_E_4U;
2697	_PAGE_CACHE = _PAGE_CACHE_4U;
2698
2699	pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
2700		     __ACCESS_BITS_4U | _PAGE_E_4U);
2701
2702#ifdef CONFIG_DEBUG_PAGEALLOC
2703	kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2704#else
2705	kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
2706		PAGE_OFFSET;
2707#endif
2708	kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
2709				   _PAGE_P_4U | _PAGE_W_4U);
2710
2711	for (i = 1; i < 4; i++)
2712		kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2713
2714	_PAGE_ALL_SZ_BITS =  (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
2715			      _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
2716			      _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
2717
2718
2719	page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
2720	page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2721		       __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
2722	page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2723		       __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2724	page_readonly   = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
2725			   __ACCESS_BITS_4U | _PAGE_EXEC_4U);
2726
2727	page_exec_bit = _PAGE_EXEC_4U;
2728
2729	prot_init_common(page_none, page_shared, page_copy, page_readonly,
2730			 page_exec_bit);
2731}
2732
2733static void __init sun4v_pgprot_init(void)
2734{
2735	unsigned long page_none, page_shared, page_copy, page_readonly;
2736	unsigned long page_exec_bit;
2737	int i;
2738
2739	PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
2740				page_cache4v_flag | _PAGE_P_4V |
2741				__ACCESS_BITS_4V | __DIRTY_BITS_4V |
2742				_PAGE_EXEC_4V);
2743	PAGE_KERNEL_LOCKED = PAGE_KERNEL;
2744
2745	_PAGE_IE = _PAGE_IE_4V;
2746	_PAGE_E = _PAGE_E_4V;
2747	_PAGE_CACHE = page_cache4v_flag;
2748
2749#ifdef CONFIG_DEBUG_PAGEALLOC
2750	kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
2751#else
2752	kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
2753		PAGE_OFFSET;
2754#endif
2755	kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V |
2756				   _PAGE_W_4V);
2757
2758	for (i = 1; i < 4; i++)
2759		kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
2760
2761	pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
2762		     __ACCESS_BITS_4V | _PAGE_E_4V);
2763
2764	_PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
2765			     _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
2766			     _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
2767			     _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
2768
2769	page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag;
2770	page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2771		       __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
2772	page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2773		       __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2774	page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
2775			 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
2776
2777	page_exec_bit = _PAGE_EXEC_4V;
2778
2779	prot_init_common(page_none, page_shared, page_copy, page_readonly,
2780			 page_exec_bit);
2781}
2782
2783unsigned long pte_sz_bits(unsigned long sz)
2784{
2785	if (tlb_type == hypervisor) {
2786		switch (sz) {
2787		case 8 * 1024:
2788		default:
2789			return _PAGE_SZ8K_4V;
2790		case 64 * 1024:
2791			return _PAGE_SZ64K_4V;
2792		case 512 * 1024:
2793			return _PAGE_SZ512K_4V;
2794		case 4 * 1024 * 1024:
2795			return _PAGE_SZ4MB_4V;
2796		}
2797	} else {
2798		switch (sz) {
2799		case 8 * 1024:
2800		default:
2801			return _PAGE_SZ8K_4U;
2802		case 64 * 1024:
2803			return _PAGE_SZ64K_4U;
2804		case 512 * 1024:
2805			return _PAGE_SZ512K_4U;
2806		case 4 * 1024 * 1024:
2807			return _PAGE_SZ4MB_4U;
2808		}
2809	}
2810}
2811
2812pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
2813{
2814	pte_t pte;
2815
2816	pte_val(pte)  = page | pgprot_val(pgprot_noncached(prot));
2817	pte_val(pte) |= (((unsigned long)space) << 32);
2818	pte_val(pte) |= pte_sz_bits(page_size);
2819
2820	return pte;
2821}
2822
2823static unsigned long kern_large_tte(unsigned long paddr)
2824{
2825	unsigned long val;
2826
2827	val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
2828	       _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
2829	       _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
2830	if (tlb_type == hypervisor)
2831		val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
2832		       page_cache4v_flag | _PAGE_P_4V |
2833		       _PAGE_EXEC_4V | _PAGE_W_4V);
2834
2835	return val | paddr;
2836}
2837
2838/* If not locked, zap it. */
2839void __flush_tlb_all(void)
2840{
2841	unsigned long pstate;
2842	int i;
2843
2844	__asm__ __volatile__("flushw\n\t"
2845			     "rdpr	%%pstate, %0\n\t"
2846			     "wrpr	%0, %1, %%pstate"
2847			     : "=r" (pstate)
2848			     : "i" (PSTATE_IE));
2849	if (tlb_type == hypervisor) {
2850		sun4v_mmu_demap_all();
2851	} else if (tlb_type == spitfire) {
2852		for (i = 0; i < 64; i++) {
2853			/* Spitfire Errata #32 workaround */
2854			/* NOTE: Always runs on spitfire, so no
2855			 *       cheetah+ page size encodings.
2856			 */
2857			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
2858					     "flush	%%g6"
2859					     : /* No outputs */
2860					     : "r" (0),
2861					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2862
2863			if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
2864				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2865						     "membar #Sync"
2866						     : /* no outputs */
2867						     : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
2868				spitfire_put_dtlb_data(i, 0x0UL);
2869			}
2870
2871			/* Spitfire Errata #32 workaround */
2872			/* NOTE: Always runs on spitfire, so no
2873			 *       cheetah+ page size encodings.
2874			 */
2875			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
2876					     "flush	%%g6"
2877					     : /* No outputs */
2878					     : "r" (0),
2879					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
2880
2881			if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
2882				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
2883						     "membar #Sync"
2884						     : /* no outputs */
2885						     : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
2886				spitfire_put_itlb_data(i, 0x0UL);
2887			}
2888		}
2889	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
2890		cheetah_flush_dtlb_all();
2891		cheetah_flush_itlb_all();
2892	}
2893	__asm__ __volatile__("wrpr	%0, 0, %%pstate"
2894			     : : "r" (pstate));
2895}
2896
2897pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
2898{
2899	struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2900	pte_t *pte = NULL;
2901
2902	if (page)
2903		pte = (pte_t *) page_address(page);
2904
2905	return pte;
2906}
2907
2908pgtable_t pte_alloc_one(struct mm_struct *mm)
2909{
2910	struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL | __GFP_ZERO, 0);
2911
2912	if (!ptdesc)
2913		return NULL;
2914	if (!pagetable_pte_ctor(ptdesc)) {
2915		pagetable_free(ptdesc);
2916		return NULL;
2917	}
2918	return ptdesc_address(ptdesc);
2919}
2920
2921void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
2922{
2923	free_page((unsigned long)pte);
2924}
2925
2926static void __pte_free(pgtable_t pte)
2927{
2928	struct ptdesc *ptdesc = virt_to_ptdesc(pte);
2929
2930	pagetable_pte_dtor(ptdesc);
2931	pagetable_free(ptdesc);
2932}
2933
2934void pte_free(struct mm_struct *mm, pgtable_t pte)
2935{
2936	__pte_free(pte);
2937}
2938
2939void pgtable_free(void *table, bool is_page)
2940{
2941	if (is_page)
2942		__pte_free(table);
2943	else
2944		kmem_cache_free(pgtable_cache, table);
2945}
2946
2947#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2948static void pte_free_now(struct rcu_head *head)
2949{
2950	struct page *page;
2951
2952	page = container_of(head, struct page, rcu_head);
2953	__pte_free((pgtable_t)page_address(page));
2954}
2955
2956void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
2957{
2958	struct page *page;
2959
2960	page = virt_to_page(pgtable);
2961	call_rcu(&page->rcu_head, pte_free_now);
2962}
2963
2964void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
2965			  pmd_t *pmd)
2966{
2967	unsigned long pte, flags;
2968	struct mm_struct *mm;
2969	pmd_t entry = *pmd;
2970
2971	if (!pmd_leaf(entry) || !pmd_young(entry))
2972		return;
2973
2974	pte = pmd_val(entry);
2975
2976	/* Don't insert a non-valid PMD into the TSB, we'll deadlock.  */
2977	if (!(pte & _PAGE_VALID))
2978		return;
2979
2980	/* We are fabricating 8MB pages using 4MB real hw pages.  */
2981	pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
2982
2983	mm = vma->vm_mm;
2984
2985	spin_lock_irqsave(&mm->context.lock, flags);
2986
2987	if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
2988		__update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
2989					addr, pte);
2990
2991	spin_unlock_irqrestore(&mm->context.lock, flags);
2992}
2993#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2994
2995#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
2996static void context_reload(void *__data)
2997{
2998	struct mm_struct *mm = __data;
2999
3000	if (mm == current->mm)
3001		load_secondary_context(mm);
3002}
3003
3004void hugetlb_setup(struct pt_regs *regs)
3005{
3006	struct mm_struct *mm = current->mm;
3007	struct tsb_config *tp;
3008
3009	if (faulthandler_disabled() || !mm) {
3010		const struct exception_table_entry *entry;
3011
3012		entry = search_exception_tables(regs->tpc);
3013		if (entry) {
3014			regs->tpc = entry->fixup;
3015			regs->tnpc = regs->tpc + 4;
3016			return;
3017		}
3018		pr_alert("Unexpected HugeTLB setup in atomic context.\n");
3019		die_if_kernel("HugeTSB in atomic", regs);
3020	}
3021
3022	tp = &mm->context.tsb_block[MM_TSB_HUGE];
3023	if (likely(tp->tsb == NULL))
3024		tsb_grow(mm, MM_TSB_HUGE, 0);
3025
3026	tsb_context_switch(mm);
3027	smp_tsb_sync(mm);
3028
3029	/* On UltraSPARC-III+ and later, configure the second half of
3030	 * the Data-TLB for huge pages.
3031	 */
3032	if (tlb_type == cheetah_plus) {
3033		bool need_context_reload = false;
3034		unsigned long ctx;
3035
3036		spin_lock_irq(&ctx_alloc_lock);
3037		ctx = mm->context.sparc64_ctx_val;
3038		ctx &= ~CTX_PGSZ_MASK;
3039		ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
3040		ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
3041
3042		if (ctx != mm->context.sparc64_ctx_val) {
3043			/* When changing the page size fields, we
3044			 * must perform a context flush so that no
3045			 * stale entries match.  This flush must
3046			 * occur with the original context register
3047			 * settings.
3048			 */
3049			do_flush_tlb_mm(mm);
3050
3051			/* Reload the context register of all processors
3052			 * also executing in this address space.
3053			 */
3054			mm->context.sparc64_ctx_val = ctx;
3055			need_context_reload = true;
3056		}
3057		spin_unlock_irq(&ctx_alloc_lock);
3058
3059		if (need_context_reload)
3060			on_each_cpu(context_reload, mm, 0);
3061	}
3062}
3063#endif
3064
3065static struct resource code_resource = {
3066	.name	= "Kernel code",
3067	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
3068};
3069
3070static struct resource data_resource = {
3071	.name	= "Kernel data",
3072	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
3073};
3074
3075static struct resource bss_resource = {
3076	.name	= "Kernel bss",
3077	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
3078};
3079
3080static inline resource_size_t compute_kern_paddr(void *addr)
3081{
3082	return (resource_size_t) (addr - KERNBASE + kern_base);
3083}
3084
3085static void __init kernel_lds_init(void)
3086{
3087	code_resource.start = compute_kern_paddr(_text);
3088	code_resource.end   = compute_kern_paddr(_etext - 1);
3089	data_resource.start = compute_kern_paddr(_etext);
3090	data_resource.end   = compute_kern_paddr(_edata - 1);
3091	bss_resource.start  = compute_kern_paddr(__bss_start);
3092	bss_resource.end    = compute_kern_paddr(_end - 1);
3093}
3094
3095static int __init report_memory(void)
3096{
3097	int i;
3098	struct resource *res;
3099
3100	kernel_lds_init();
3101
3102	for (i = 0; i < pavail_ents; i++) {
3103		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
3104
3105		if (!res) {
3106			pr_warn("Failed to allocate source.\n");
3107			break;
3108		}
3109
3110		res->name = "System RAM";
3111		res->start = pavail[i].phys_addr;
3112		res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
3113		res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
3114
3115		if (insert_resource(&iomem_resource, res) < 0) {
3116			pr_warn("Resource insertion failed.\n");
3117			break;
3118		}
3119
3120		insert_resource(res, &code_resource);
3121		insert_resource(res, &data_resource);
3122		insert_resource(res, &bss_resource);
3123	}
3124
3125	return 0;
3126}
3127arch_initcall(report_memory);
3128
3129#ifdef CONFIG_SMP
3130#define do_flush_tlb_kernel_range	smp_flush_tlb_kernel_range
3131#else
3132#define do_flush_tlb_kernel_range	__flush_tlb_kernel_range
3133#endif
3134
3135void flush_tlb_kernel_range(unsigned long start, unsigned long end)
3136{
3137	if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
3138		if (start < LOW_OBP_ADDRESS) {
3139			flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
3140			do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
3141		}
3142		if (end > HI_OBP_ADDRESS) {
3143			flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
3144			do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
3145		}
3146	} else {
3147		flush_tsb_kernel_range(start, end);
3148		do_flush_tlb_kernel_range(start, end);
3149	}
3150}
3151
3152void copy_user_highpage(struct page *to, struct page *from,
3153	unsigned long vaddr, struct vm_area_struct *vma)
3154{
3155	char *vfrom, *vto;
3156
3157	vfrom = kmap_atomic(from);
3158	vto = kmap_atomic(to);
3159	copy_user_page(vto, vfrom, vaddr, to);
3160	kunmap_atomic(vto);
3161	kunmap_atomic(vfrom);
3162
3163	/* If this page has ADI enabled, copy over any ADI tags
3164	 * as well
3165	 */
3166	if (vma->vm_flags & VM_SPARC_ADI) {
3167		unsigned long pfrom, pto, i, adi_tag;
3168
3169		pfrom = page_to_phys(from);
3170		pto = page_to_phys(to);
3171
3172		for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) {
3173			asm volatile("ldxa [%1] %2, %0\n\t"
3174					: "=r" (adi_tag)
3175					:  "r" (i), "i" (ASI_MCD_REAL));
3176			asm volatile("stxa %0, [%1] %2\n\t"
3177					:
3178					: "r" (adi_tag), "r" (pto),
3179					  "i" (ASI_MCD_REAL));
3180			pto += adi_blksize();
3181		}
3182		asm volatile("membar #Sync\n\t");
3183	}
3184}
3185EXPORT_SYMBOL(copy_user_highpage);
3186
3187void copy_highpage(struct page *to, struct page *from)
3188{
3189	char *vfrom, *vto;
3190
3191	vfrom = kmap_atomic(from);
3192	vto = kmap_atomic(to);
3193	copy_page(vto, vfrom);
3194	kunmap_atomic(vto);
3195	kunmap_atomic(vfrom);
3196
3197	/* If this platform is ADI enabled, copy any ADI tags
3198	 * as well
3199	 */
3200	if (adi_capable()) {
3201		unsigned long pfrom, pto, i, adi_tag;
3202
3203		pfrom = page_to_phys(from);
3204		pto = page_to_phys(to);
3205
3206		for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) {
3207			asm volatile("ldxa [%1] %2, %0\n\t"
3208					: "=r" (adi_tag)
3209					:  "r" (i), "i" (ASI_MCD_REAL));
3210			asm volatile("stxa %0, [%1] %2\n\t"
3211					:
3212					: "r" (adi_tag), "r" (pto),
3213					  "i" (ASI_MCD_REAL));
3214			pto += adi_blksize();
3215		}
3216		asm volatile("membar #Sync\n\t");
3217	}
3218}
3219EXPORT_SYMBOL(copy_highpage);
3220
3221pgprot_t vm_get_page_prot(unsigned long vm_flags)
3222{
3223	unsigned long prot = pgprot_val(protection_map[vm_flags &
3224					(VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);
3225
3226	if (vm_flags & VM_SPARC_ADI)
3227		prot |= _PAGE_MCD_4V;
3228
3229	return __pgprot(prot);
3230}
3231EXPORT_SYMBOL(vm_get_page_prot);