Linux Audio

Check our new training course

Loading...
v4.17
   1/*
   2 *  linux/arch/arm/mm/mmu.c
   3 *
   4 *  Copyright (C) 1995-2005 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#include <linux/module.h>
  11#include <linux/kernel.h>
  12#include <linux/errno.h>
  13#include <linux/init.h>
  14#include <linux/mman.h>
  15#include <linux/nodemask.h>
  16#include <linux/memblock.h>
  17#include <linux/fs.h>
  18#include <linux/vmalloc.h>
  19#include <linux/sizes.h>
  20
  21#include <asm/cp15.h>
  22#include <asm/cputype.h>
  23#include <asm/sections.h>
  24#include <asm/cachetype.h>
  25#include <asm/fixmap.h>
  26#include <asm/sections.h>
  27#include <asm/setup.h>
 
  28#include <asm/smp_plat.h>
  29#include <asm/tlb.h>
  30#include <asm/highmem.h>
  31#include <asm/system_info.h>
  32#include <asm/traps.h>
  33#include <asm/procinfo.h>
  34#include <asm/memory.h>
  35
  36#include <asm/mach/arch.h>
  37#include <asm/mach/map.h>
  38#include <asm/mach/pci.h>
  39#include <asm/fixmap.h>
  40
  41#include "fault.h"
  42#include "mm.h"
  43#include "tcm.h"
  44
  45/*
  46 * empty_zero_page is a special page that is used for
  47 * zero-initialized data and COW.
  48 */
  49struct page *empty_zero_page;
  50EXPORT_SYMBOL(empty_zero_page);
  51
  52/*
  53 * The pmd table for the upper-most set of pages.
  54 */
  55pmd_t *top_pmd;
  56
  57pmdval_t user_pmd_table = _PAGE_USER_TABLE;
  58
  59#define CPOLICY_UNCACHED	0
  60#define CPOLICY_BUFFERED	1
  61#define CPOLICY_WRITETHROUGH	2
  62#define CPOLICY_WRITEBACK	3
  63#define CPOLICY_WRITEALLOC	4
  64
  65static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
  66static unsigned int ecc_mask __initdata = 0;
  67pgprot_t pgprot_user;
  68pgprot_t pgprot_kernel;
  69pgprot_t pgprot_hyp_device;
  70pgprot_t pgprot_s2;
  71pgprot_t pgprot_s2_device;
  72
  73EXPORT_SYMBOL(pgprot_user);
  74EXPORT_SYMBOL(pgprot_kernel);
  75
  76struct cachepolicy {
  77	const char	policy[16];
  78	unsigned int	cr_mask;
  79	pmdval_t	pmd;
  80	pteval_t	pte;
  81	pteval_t	pte_s2;
  82};
  83
  84#ifdef CONFIG_ARM_LPAE
  85#define s2_policy(policy)	policy
  86#else
  87#define s2_policy(policy)	0
  88#endif
  89
  90unsigned long kimage_voffset __ro_after_init;
  91
  92static struct cachepolicy cache_policies[] __initdata = {
  93	{
  94		.policy		= "uncached",
  95		.cr_mask	= CR_W|CR_C,
  96		.pmd		= PMD_SECT_UNCACHED,
  97		.pte		= L_PTE_MT_UNCACHED,
  98		.pte_s2		= s2_policy(L_PTE_S2_MT_UNCACHED),
  99	}, {
 100		.policy		= "buffered",
 101		.cr_mask	= CR_C,
 102		.pmd		= PMD_SECT_BUFFERED,
 103		.pte		= L_PTE_MT_BUFFERABLE,
 104		.pte_s2		= s2_policy(L_PTE_S2_MT_UNCACHED),
 105	}, {
 106		.policy		= "writethrough",
 107		.cr_mask	= 0,
 108		.pmd		= PMD_SECT_WT,
 109		.pte		= L_PTE_MT_WRITETHROUGH,
 110		.pte_s2		= s2_policy(L_PTE_S2_MT_WRITETHROUGH),
 111	}, {
 112		.policy		= "writeback",
 113		.cr_mask	= 0,
 114		.pmd		= PMD_SECT_WB,
 115		.pte		= L_PTE_MT_WRITEBACK,
 116		.pte_s2		= s2_policy(L_PTE_S2_MT_WRITEBACK),
 117	}, {
 118		.policy		= "writealloc",
 119		.cr_mask	= 0,
 120		.pmd		= PMD_SECT_WBWA,
 121		.pte		= L_PTE_MT_WRITEALLOC,
 122		.pte_s2		= s2_policy(L_PTE_S2_MT_WRITEBACK),
 123	}
 124};
 125
 126#ifdef CONFIG_CPU_CP15
 127static unsigned long initial_pmd_value __initdata = 0;
 128
 129/*
 130 * Initialise the cache_policy variable with the initial state specified
 131 * via the "pmd" value.  This is used to ensure that on ARMv6 and later,
 132 * the C code sets the page tables up with the same policy as the head
 133 * assembly code, which avoids an illegal state where the TLBs can get
 134 * confused.  See comments in early_cachepolicy() for more information.
 135 */
 136void __init init_default_cache_policy(unsigned long pmd)
 137{
 138	int i;
 139
 140	initial_pmd_value = pmd;
 141
 142	pmd &= PMD_SECT_CACHE_MASK;
 143
 144	for (i = 0; i < ARRAY_SIZE(cache_policies); i++)
 145		if (cache_policies[i].pmd == pmd) {
 146			cachepolicy = i;
 147			break;
 148		}
 149
 150	if (i == ARRAY_SIZE(cache_policies))
 151		pr_err("ERROR: could not find cache policy\n");
 152}
 153
 154/*
 155 * These are useful for identifying cache coherency problems by allowing
 156 * the cache or the cache and writebuffer to be turned off.  (Note: the
 157 * write buffer should not be on and the cache off).
 
 158 */
 159static int __init early_cachepolicy(char *p)
 160{
 161	int i, selected = -1;
 162
 163	for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
 164		int len = strlen(cache_policies[i].policy);
 165
 166		if (memcmp(p, cache_policies[i].policy, len) == 0) {
 167			selected = i;
 
 
 168			break;
 169		}
 170	}
 171
 172	if (selected == -1)
 173		pr_err("ERROR: unknown or unsupported cache policy\n");
 174
 175	/*
 176	 * This restriction is partly to do with the way we boot; it is
 177	 * unpredictable to have memory mapped using two different sets of
 178	 * memory attributes (shared, type, and cache attribs).  We can not
 179	 * change these attributes once the initial assembly has setup the
 180	 * page tables.
 181	 */
 182	if (cpu_architecture() >= CPU_ARCH_ARMv6 && selected != cachepolicy) {
 183		pr_warn("Only cachepolicy=%s supported on ARMv6 and later\n",
 184			cache_policies[cachepolicy].policy);
 185		return 0;
 186	}
 187
 188	if (selected != cachepolicy) {
 189		unsigned long cr = __clear_cr(cache_policies[selected].cr_mask);
 190		cachepolicy = selected;
 191		flush_cache_all();
 192		set_cr(cr);
 193	}
 
 
 194	return 0;
 195}
 196early_param("cachepolicy", early_cachepolicy);
 197
 198static int __init early_nocache(char *__unused)
 199{
 200	char *p = "buffered";
 201	pr_warn("nocache is deprecated; use cachepolicy=%s\n", p);
 202	early_cachepolicy(p);
 203	return 0;
 204}
 205early_param("nocache", early_nocache);
 206
 207static int __init early_nowrite(char *__unused)
 208{
 209	char *p = "uncached";
 210	pr_warn("nowb is deprecated; use cachepolicy=%s\n", p);
 211	early_cachepolicy(p);
 212	return 0;
 213}
 214early_param("nowb", early_nowrite);
 215
 216#ifndef CONFIG_ARM_LPAE
 217static int __init early_ecc(char *p)
 218{
 219	if (memcmp(p, "on", 2) == 0)
 220		ecc_mask = PMD_PROTECTION;
 221	else if (memcmp(p, "off", 3) == 0)
 222		ecc_mask = 0;
 223	return 0;
 224}
 225early_param("ecc", early_ecc);
 226#endif
 227
 228#else /* ifdef CONFIG_CPU_CP15 */
 229
 230static int __init early_cachepolicy(char *p)
 231{
 232	pr_warn("cachepolicy kernel parameter not supported without cp15\n");
 233}
 234early_param("cachepolicy", early_cachepolicy);
 235
 236static int __init noalign_setup(char *__unused)
 237{
 238	pr_warn("noalign kernel parameter not supported without cp15\n");
 
 
 
 239}
 240__setup("noalign", noalign_setup);
 241
 242#endif /* ifdef CONFIG_CPU_CP15 / else */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 243
 244#define PROT_PTE_DEVICE		L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
 245#define PROT_PTE_S2_DEVICE	PROT_PTE_DEVICE
 246#define PROT_SECT_DEVICE	PMD_TYPE_SECT|PMD_SECT_AP_WRITE
 247
 248static struct mem_type mem_types[] __ro_after_init = {
 249	[MT_DEVICE] = {		  /* Strongly ordered / ARMv6 shared device */
 250		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
 251				  L_PTE_SHARED,
 252		.prot_pte_s2	= s2_policy(PROT_PTE_S2_DEVICE) |
 253				  s2_policy(L_PTE_S2_MT_DEV_SHARED) |
 254				  L_PTE_SHARED,
 255		.prot_l1	= PMD_TYPE_TABLE,
 256		.prot_sect	= PROT_SECT_DEVICE | PMD_SECT_S,
 257		.domain		= DOMAIN_IO,
 258	},
 259	[MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
 260		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
 261		.prot_l1	= PMD_TYPE_TABLE,
 262		.prot_sect	= PROT_SECT_DEVICE,
 263		.domain		= DOMAIN_IO,
 264	},
 265	[MT_DEVICE_CACHED] = {	  /* ioremap_cached */
 266		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
 267		.prot_l1	= PMD_TYPE_TABLE,
 268		.prot_sect	= PROT_SECT_DEVICE | PMD_SECT_WB,
 269		.domain		= DOMAIN_IO,
 270	},
 271	[MT_DEVICE_WC] = {	/* ioremap_wc */
 272		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
 273		.prot_l1	= PMD_TYPE_TABLE,
 274		.prot_sect	= PROT_SECT_DEVICE,
 275		.domain		= DOMAIN_IO,
 276	},
 277	[MT_UNCACHED] = {
 278		.prot_pte	= PROT_PTE_DEVICE,
 279		.prot_l1	= PMD_TYPE_TABLE,
 280		.prot_sect	= PMD_TYPE_SECT | PMD_SECT_XN,
 281		.domain		= DOMAIN_IO,
 282	},
 283	[MT_CACHECLEAN] = {
 284		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
 285		.domain    = DOMAIN_KERNEL,
 286	},
 287#ifndef CONFIG_ARM_LPAE
 288	[MT_MINICLEAN] = {
 289		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
 290		.domain    = DOMAIN_KERNEL,
 291	},
 292#endif
 293	[MT_LOW_VECTORS] = {
 294		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 295				L_PTE_RDONLY,
 296		.prot_l1   = PMD_TYPE_TABLE,
 297		.domain    = DOMAIN_VECTORS,
 298	},
 299	[MT_HIGH_VECTORS] = {
 300		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 301				L_PTE_USER | L_PTE_RDONLY,
 302		.prot_l1   = PMD_TYPE_TABLE,
 303		.domain    = DOMAIN_VECTORS,
 304	},
 305	[MT_MEMORY_RWX] = {
 306		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
 307		.prot_l1   = PMD_TYPE_TABLE,
 308		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 309		.domain    = DOMAIN_KERNEL,
 310	},
 311	[MT_MEMORY_RW] = {
 312		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 313			     L_PTE_XN,
 314		.prot_l1   = PMD_TYPE_TABLE,
 315		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 316		.domain    = DOMAIN_KERNEL,
 317	},
 318	[MT_ROM] = {
 319		.prot_sect = PMD_TYPE_SECT,
 320		.domain    = DOMAIN_KERNEL,
 321	},
 322	[MT_MEMORY_RWX_NONCACHED] = {
 323		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 324				L_PTE_MT_BUFFERABLE,
 325		.prot_l1   = PMD_TYPE_TABLE,
 326		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 327		.domain    = DOMAIN_KERNEL,
 328	},
 329	[MT_MEMORY_RW_DTCM] = {
 330		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 331				L_PTE_XN,
 332		.prot_l1   = PMD_TYPE_TABLE,
 333		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
 334		.domain    = DOMAIN_KERNEL,
 335	},
 336	[MT_MEMORY_RWX_ITCM] = {
 337		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
 338		.prot_l1   = PMD_TYPE_TABLE,
 339		.domain    = DOMAIN_KERNEL,
 340	},
 341	[MT_MEMORY_RW_SO] = {
 342		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 343				L_PTE_MT_UNCACHED | L_PTE_XN,
 344		.prot_l1   = PMD_TYPE_TABLE,
 345		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
 346				PMD_SECT_UNCACHED | PMD_SECT_XN,
 347		.domain    = DOMAIN_KERNEL,
 348	},
 349	[MT_MEMORY_DMA_READY] = {
 350		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 351				L_PTE_XN,
 352		.prot_l1   = PMD_TYPE_TABLE,
 353		.domain    = DOMAIN_KERNEL,
 354	},
 355};
 356
 357const struct mem_type *get_mem_type(unsigned int type)
 358{
 359	return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
 360}
 361EXPORT_SYMBOL(get_mem_type);
 362
 363static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr);
 364
 365static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
 366	__aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata;
 367
 368static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr)
 369{
 370	return &bm_pte[pte_index(addr)];
 371}
 372
 373static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
 374{
 375	return pte_offset_kernel(dir, addr);
 376}
 377
 378static inline pmd_t * __init fixmap_pmd(unsigned long addr)
 379{
 380	pgd_t *pgd = pgd_offset_k(addr);
 381	pud_t *pud = pud_offset(pgd, addr);
 382	pmd_t *pmd = pmd_offset(pud, addr);
 383
 384	return pmd;
 385}
 386
 387void __init early_fixmap_init(void)
 388{
 389	pmd_t *pmd;
 390
 391	/*
 392	 * The early fixmap range spans multiple pmds, for which
 393	 * we are not prepared:
 394	 */
 395	BUILD_BUG_ON((__fix_to_virt(__end_of_early_ioremap_region) >> PMD_SHIFT)
 396		     != FIXADDR_TOP >> PMD_SHIFT);
 397
 398	pmd = fixmap_pmd(FIXADDR_TOP);
 399	pmd_populate_kernel(&init_mm, pmd, bm_pte);
 400
 401	pte_offset_fixmap = pte_offset_early_fixmap;
 402}
 403
 404/*
 405 * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
 406 * As a result, this can only be called with preemption disabled, as under
 407 * stop_machine().
 408 */
 409void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
 410{
 411	unsigned long vaddr = __fix_to_virt(idx);
 412	pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
 413
 414	/* Make sure fixmap region does not exceed available allocation. */
 415	BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
 416		     FIXADDR_END);
 417	BUG_ON(idx >= __end_of_fixed_addresses);
 418
 419	/* we only support device mappings until pgprot_kernel has been set */
 420	if (WARN_ON(pgprot_val(prot) != pgprot_val(FIXMAP_PAGE_IO) &&
 421		    pgprot_val(pgprot_kernel) == 0))
 422		return;
 423
 424	if (pgprot_val(prot))
 425		set_pte_at(NULL, vaddr, pte,
 426			pfn_pte(phys >> PAGE_SHIFT, prot));
 427	else
 428		pte_clear(NULL, vaddr, pte);
 429	local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
 430}
 431
 432/*
 433 * Adjust the PMD section entries according to the CPU in use.
 434 */
 435static void __init build_mem_type_table(void)
 436{
 437	struct cachepolicy *cp;
 438	unsigned int cr = get_cr();
 439	pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
 440	pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;
 441	int cpu_arch = cpu_architecture();
 442	int i;
 443
 444	if (cpu_arch < CPU_ARCH_ARMv6) {
 445#if defined(CONFIG_CPU_DCACHE_DISABLE)
 446		if (cachepolicy > CPOLICY_BUFFERED)
 447			cachepolicy = CPOLICY_BUFFERED;
 448#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
 449		if (cachepolicy > CPOLICY_WRITETHROUGH)
 450			cachepolicy = CPOLICY_WRITETHROUGH;
 451#endif
 452	}
 453	if (cpu_arch < CPU_ARCH_ARMv5) {
 454		if (cachepolicy >= CPOLICY_WRITEALLOC)
 455			cachepolicy = CPOLICY_WRITEBACK;
 456		ecc_mask = 0;
 457	}
 458
 459	if (is_smp()) {
 460		if (cachepolicy != CPOLICY_WRITEALLOC) {
 461			pr_warn("Forcing write-allocate cache policy for SMP\n");
 462			cachepolicy = CPOLICY_WRITEALLOC;
 463		}
 464		if (!(initial_pmd_value & PMD_SECT_S)) {
 465			pr_warn("Forcing shared mappings for SMP\n");
 466			initial_pmd_value |= PMD_SECT_S;
 467		}
 468	}
 469
 470	/*
 471	 * Strip out features not present on earlier architectures.
 472	 * Pre-ARMv5 CPUs don't have TEX bits.  Pre-ARMv6 CPUs or those
 473	 * without extended page tables don't have the 'Shared' bit.
 474	 */
 475	if (cpu_arch < CPU_ARCH_ARMv5)
 476		for (i = 0; i < ARRAY_SIZE(mem_types); i++)
 477			mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
 478	if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
 479		for (i = 0; i < ARRAY_SIZE(mem_types); i++)
 480			mem_types[i].prot_sect &= ~PMD_SECT_S;
 481
 482	/*
 483	 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
 484	 * "update-able on write" bit on ARM610).  However, Xscale and
 485	 * Xscale3 require this bit to be cleared.
 486	 */
 487	if (cpu_is_xscale_family()) {
 488		for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
 489			mem_types[i].prot_sect &= ~PMD_BIT4;
 490			mem_types[i].prot_l1 &= ~PMD_BIT4;
 491		}
 492	} else if (cpu_arch < CPU_ARCH_ARMv6) {
 493		for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
 494			if (mem_types[i].prot_l1)
 495				mem_types[i].prot_l1 |= PMD_BIT4;
 496			if (mem_types[i].prot_sect)
 497				mem_types[i].prot_sect |= PMD_BIT4;
 498		}
 499	}
 500
 501	/*
 502	 * Mark the device areas according to the CPU/architecture.
 503	 */
 504	if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
 505		if (!cpu_is_xsc3()) {
 506			/*
 507			 * Mark device regions on ARMv6+ as execute-never
 508			 * to prevent speculative instruction fetches.
 509			 */
 510			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
 511			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
 512			mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
 513			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
 514
 515			/* Also setup NX memory mapping */
 516			mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
 517		}
 518		if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
 519			/*
 520			 * For ARMv7 with TEX remapping,
 521			 * - shared device is SXCB=1100
 522			 * - nonshared device is SXCB=0100
 523			 * - write combine device mem is SXCB=0001
 524			 * (Uncached Normal memory)
 525			 */
 526			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
 527			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
 528			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
 529		} else if (cpu_is_xsc3()) {
 530			/*
 531			 * For Xscale3,
 532			 * - shared device is TEXCB=00101
 533			 * - nonshared device is TEXCB=01000
 534			 * - write combine device mem is TEXCB=00100
 535			 * (Inner/Outer Uncacheable in xsc3 parlance)
 536			 */
 537			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
 538			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
 539			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
 540		} else {
 541			/*
 542			 * For ARMv6 and ARMv7 without TEX remapping,
 543			 * - shared device is TEXCB=00001
 544			 * - nonshared device is TEXCB=01000
 545			 * - write combine device mem is TEXCB=00100
 546			 * (Uncached Normal in ARMv6 parlance).
 547			 */
 548			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
 549			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
 550			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
 551		}
 552	} else {
 553		/*
 554		 * On others, write combining is "Uncached/Buffered"
 555		 */
 556		mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
 557	}
 558
 559	/*
 560	 * Now deal with the memory-type mappings
 561	 */
 562	cp = &cache_policies[cachepolicy];
 563	vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
 564	s2_pgprot = cp->pte_s2;
 565	hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
 566	s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
 567
 568#ifndef CONFIG_ARM_LPAE
 569	/*
 570	 * We don't use domains on ARMv6 (since this causes problems with
 571	 * v6/v7 kernels), so we must use a separate memory type for user
 572	 * r/o, kernel r/w to map the vectors page.
 573	 */
 574	if (cpu_arch == CPU_ARCH_ARMv6)
 575		vecs_pgprot |= L_PTE_MT_VECTORS;
 576
 577	/*
 578	 * Check is it with support for the PXN bit
 579	 * in the Short-descriptor translation table format descriptors.
 580	 */
 581	if (cpu_arch == CPU_ARCH_ARMv7 &&
 582		(read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) >= 4) {
 583		user_pmd_table |= PMD_PXNTABLE;
 
 
 584	}
 585#endif
 586
 587	/*
 588	 * ARMv6 and above have extended page tables.
 589	 */
 590	if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
 591#ifndef CONFIG_ARM_LPAE
 592		/*
 593		 * Mark cache clean areas and XIP ROM read only
 594		 * from SVC mode and no access from userspace.
 595		 */
 596		mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 597		mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 598		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 599#endif
 600
 601		/*
 602		 * If the initial page tables were created with the S bit
 603		 * set, then we need to do the same here for the same
 604		 * reasons given in early_cachepolicy().
 605		 */
 606		if (initial_pmd_value & PMD_SECT_S) {
 607			user_pgprot |= L_PTE_SHARED;
 608			kern_pgprot |= L_PTE_SHARED;
 609			vecs_pgprot |= L_PTE_SHARED;
 610			s2_pgprot |= L_PTE_SHARED;
 611			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
 612			mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
 613			mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
 614			mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
 615			mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
 616			mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
 617			mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
 618			mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
 619			mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
 620			mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
 621			mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
 622		}
 623	}
 624
 625	/*
 626	 * Non-cacheable Normal - intended for memory areas that must
 627	 * not cause dirty cache line writebacks when used
 628	 */
 629	if (cpu_arch >= CPU_ARCH_ARMv6) {
 630		if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
 631			/* Non-cacheable Normal is XCB = 001 */
 632			mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
 633				PMD_SECT_BUFFERED;
 634		} else {
 635			/* For both ARMv6 and non-TEX-remapping ARMv7 */
 636			mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
 637				PMD_SECT_TEX(1);
 638		}
 639	} else {
 640		mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
 641	}
 642
 643#ifdef CONFIG_ARM_LPAE
 644	/*
 645	 * Do not generate access flag faults for the kernel mappings.
 646	 */
 647	for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
 648		mem_types[i].prot_pte |= PTE_EXT_AF;
 649		if (mem_types[i].prot_sect)
 650			mem_types[i].prot_sect |= PMD_SECT_AF;
 651	}
 652	kern_pgprot |= PTE_EXT_AF;
 653	vecs_pgprot |= PTE_EXT_AF;
 654
 655	/*
 656	 * Set PXN for user mappings
 657	 */
 658	user_pgprot |= PTE_EXT_PXN;
 659#endif
 660
 661	for (i = 0; i < 16; i++) {
 662		pteval_t v = pgprot_val(protection_map[i]);
 663		protection_map[i] = __pgprot(v | user_pgprot);
 664	}
 665
 666	mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
 667	mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
 668
 669	pgprot_user   = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
 670	pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
 671				 L_PTE_DIRTY | kern_pgprot);
 672	pgprot_s2  = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot);
 673	pgprot_s2_device  = __pgprot(s2_device_pgprot);
 674	pgprot_hyp_device  = __pgprot(hyp_device_pgprot);
 675
 676	mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
 677	mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
 678	mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
 679	mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
 680	mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
 681	mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
 682	mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
 683	mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
 684	mem_types[MT_ROM].prot_sect |= cp->pmd;
 685
 686	switch (cp->pmd) {
 687	case PMD_SECT_WT:
 688		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
 689		break;
 690	case PMD_SECT_WB:
 691	case PMD_SECT_WBWA:
 692		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
 693		break;
 694	}
 695	pr_info("Memory policy: %sData cache %s\n",
 696		ecc_mask ? "ECC enabled, " : "", cp->policy);
 697
 698	for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
 699		struct mem_type *t = &mem_types[i];
 700		if (t->prot_l1)
 701			t->prot_l1 |= PMD_DOMAIN(t->domain);
 702		if (t->prot_sect)
 703			t->prot_sect |= PMD_DOMAIN(t->domain);
 704	}
 705}
 706
 707#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
 708pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 709			      unsigned long size, pgprot_t vma_prot)
 710{
 711	if (!pfn_valid(pfn))
 712		return pgprot_noncached(vma_prot);
 713	else if (file->f_flags & O_SYNC)
 714		return pgprot_writecombine(vma_prot);
 715	return vma_prot;
 716}
 717EXPORT_SYMBOL(phys_mem_access_prot);
 718#endif
 719
 720#define vectors_base()	(vectors_high() ? 0xffff0000 : 0)
 721
 722static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
 723{
 724	void *ptr = __va(memblock_alloc(sz, align));
 725	memset(ptr, 0, sz);
 726	return ptr;
 727}
 728
 729static void __init *early_alloc(unsigned long sz)
 730{
 731	return early_alloc_aligned(sz, sz);
 732}
 733
 734static void *__init late_alloc(unsigned long sz)
 735{
 736	void *ptr = (void *)__get_free_pages(PGALLOC_GFP, get_order(sz));
 737
 738	if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
 739		BUG();
 740	return ptr;
 741}
 742
 743static pte_t * __init arm_pte_alloc(pmd_t *pmd, unsigned long addr,
 744				unsigned long prot,
 745				void *(*alloc)(unsigned long sz))
 746{
 747	if (pmd_none(*pmd)) {
 748		pte_t *pte = alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
 749		__pmd_populate(pmd, __pa(pte), prot);
 750	}
 751	BUG_ON(pmd_bad(*pmd));
 752	return pte_offset_kernel(pmd, addr);
 753}
 754
 755static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr,
 756				      unsigned long prot)
 757{
 758	return arm_pte_alloc(pmd, addr, prot, early_alloc);
 759}
 760
 761static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
 762				  unsigned long end, unsigned long pfn,
 763				  const struct mem_type *type,
 764				  void *(*alloc)(unsigned long sz),
 765				  bool ng)
 766{
 767	pte_t *pte = arm_pte_alloc(pmd, addr, type->prot_l1, alloc);
 768	do {
 769		set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)),
 770			    ng ? PTE_EXT_NG : 0);
 771		pfn++;
 772	} while (pte++, addr += PAGE_SIZE, addr != end);
 773}
 774
 775static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
 776			unsigned long end, phys_addr_t phys,
 777			const struct mem_type *type, bool ng)
 778{
 779	pmd_t *p = pmd;
 780
 781#ifndef CONFIG_ARM_LPAE
 782	/*
 783	 * In classic MMU format, puds and pmds are folded in to
 784	 * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
 785	 * group of L1 entries making up one logical pointer to
 786	 * an L2 table (2MB), where as PMDs refer to the individual
 787	 * L1 entries (1MB). Hence increment to get the correct
 788	 * offset for odd 1MB sections.
 789	 * (See arch/arm/include/asm/pgtable-2level.h)
 790	 */
 791	if (addr & SECTION_SIZE)
 792		pmd++;
 793#endif
 794	do {
 795		*pmd = __pmd(phys | type->prot_sect | (ng ? PMD_SECT_nG : 0));
 796		phys += SECTION_SIZE;
 797	} while (pmd++, addr += SECTION_SIZE, addr != end);
 798
 799	flush_pmd_entry(p);
 800}
 801
 802static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
 803				      unsigned long end, phys_addr_t phys,
 804				      const struct mem_type *type,
 805				      void *(*alloc)(unsigned long sz), bool ng)
 806{
 807	pmd_t *pmd = pmd_offset(pud, addr);
 808	unsigned long next;
 809
 810	do {
 811		/*
 812		 * With LPAE, we must loop over to map
 813		 * all the pmds for the given range.
 814		 */
 815		next = pmd_addr_end(addr, end);
 
 
 
 
 
 
 
 
 
 
 816
 
 
 817		/*
 818		 * Try a section mapping - addr, next and phys must all be
 819		 * aligned to a section boundary.
 820		 */
 821		if (type->prot_sect &&
 822				((addr | next | phys) & ~SECTION_MASK) == 0) {
 823			__map_init_section(pmd, addr, next, phys, type, ng);
 824		} else {
 825			alloc_init_pte(pmd, addr, next,
 826				       __phys_to_pfn(phys), type, alloc, ng);
 827		}
 828
 829		phys += next - addr;
 830
 831	} while (pmd++, addr = next, addr != end);
 832}
 833
 834static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
 835				  unsigned long end, phys_addr_t phys,
 836				  const struct mem_type *type,
 837				  void *(*alloc)(unsigned long sz), bool ng)
 838{
 839	pud_t *pud = pud_offset(pgd, addr);
 840	unsigned long next;
 841
 842	do {
 843		next = pud_addr_end(addr, end);
 844		alloc_init_pmd(pud, addr, next, phys, type, alloc, ng);
 845		phys += next - addr;
 846	} while (pud++, addr = next, addr != end);
 847}
 848
 849#ifndef CONFIG_ARM_LPAE
 850static void __init create_36bit_mapping(struct mm_struct *mm,
 851					struct map_desc *md,
 852					const struct mem_type *type,
 853					bool ng)
 854{
 855	unsigned long addr, length, end;
 856	phys_addr_t phys;
 857	pgd_t *pgd;
 858
 859	addr = md->virtual;
 860	phys = __pfn_to_phys(md->pfn);
 861	length = PAGE_ALIGN(md->length);
 862
 863	if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
 864		pr_err("MM: CPU does not support supersection mapping for 0x%08llx at 0x%08lx\n",
 
 865		       (long long)__pfn_to_phys((u64)md->pfn), addr);
 866		return;
 867	}
 868
 869	/* N.B.	ARMv6 supersections are only defined to work with domain 0.
 870	 *	Since domain assignments can in fact be arbitrary, the
 871	 *	'domain == 0' check below is required to insure that ARMv6
 872	 *	supersections are only allocated for domain 0 regardless
 873	 *	of the actual domain assignments in use.
 874	 */
 875	if (type->domain) {
 876		pr_err("MM: invalid domain in supersection mapping for 0x%08llx at 0x%08lx\n",
 
 877		       (long long)__pfn_to_phys((u64)md->pfn), addr);
 878		return;
 879	}
 880
 881	if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
 882		pr_err("MM: cannot create mapping for 0x%08llx at 0x%08lx invalid alignment\n",
 
 883		       (long long)__pfn_to_phys((u64)md->pfn), addr);
 884		return;
 885	}
 886
 887	/*
 888	 * Shift bits [35:32] of address into bits [23:20] of PMD
 889	 * (See ARMv6 spec).
 890	 */
 891	phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
 892
 893	pgd = pgd_offset(mm, addr);
 894	end = addr + length;
 895	do {
 896		pud_t *pud = pud_offset(pgd, addr);
 897		pmd_t *pmd = pmd_offset(pud, addr);
 898		int i;
 899
 900		for (i = 0; i < 16; i++)
 901			*pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER |
 902				       (ng ? PMD_SECT_nG : 0));
 903
 904		addr += SUPERSECTION_SIZE;
 905		phys += SUPERSECTION_SIZE;
 906		pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
 907	} while (addr != end);
 908}
 909#endif	/* !CONFIG_ARM_LPAE */
 910
 911static void __init __create_mapping(struct mm_struct *mm, struct map_desc *md,
 912				    void *(*alloc)(unsigned long sz),
 913				    bool ng)
 
 
 
 
 
 914{
 915	unsigned long addr, length, end;
 916	phys_addr_t phys;
 917	const struct mem_type *type;
 918	pgd_t *pgd;
 919
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 920	type = &mem_types[md->type];
 921
 922#ifndef CONFIG_ARM_LPAE
 923	/*
 924	 * Catch 36-bit addresses
 925	 */
 926	if (md->pfn >= 0x100000) {
 927		create_36bit_mapping(mm, md, type, ng);
 928		return;
 929	}
 930#endif
 931
 932	addr = md->virtual & PAGE_MASK;
 933	phys = __pfn_to_phys(md->pfn);
 934	length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
 935
 936	if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
 937		pr_warn("BUG: map for 0x%08llx at 0x%08lx can not be mapped using pages, ignoring.\n",
 938			(long long)__pfn_to_phys(md->pfn), addr);
 
 939		return;
 940	}
 941
 942	pgd = pgd_offset(mm, addr);
 943	end = addr + length;
 944	do {
 945		unsigned long next = pgd_addr_end(addr, end);
 946
 947		alloc_init_pud(pgd, addr, next, phys, type, alloc, ng);
 948
 949		phys += next - addr;
 950		addr = next;
 951	} while (pgd++, addr != end);
 952}
 953
 954/*
 955 * Create the page directory entries and any necessary
 956 * page tables for the mapping specified by `md'.  We
 957 * are able to cope here with varying sizes and address
 958 * offsets, and we take full advantage of sections and
 959 * supersections.
 960 */
 961static void __init create_mapping(struct map_desc *md)
 962{
 963	if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
 964		pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
 965			(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
 966		return;
 967	}
 968
 969	if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
 970	    md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
 971	    (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
 972		pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
 973			(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
 974	}
 975
 976	__create_mapping(&init_mm, md, early_alloc, false);
 977}
 978
 979void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md,
 980				bool ng)
 981{
 982#ifdef CONFIG_ARM_LPAE
 983	pud_t *pud = pud_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
 984	if (WARN_ON(!pud))
 985		return;
 986	pmd_alloc(mm, pud, 0);
 987#endif
 988	__create_mapping(mm, md, late_alloc, ng);
 989}
 990
 991/*
 992 * Create the architecture specific mappings
 993 */
 994void __init iotable_init(struct map_desc *io_desc, int nr)
 995{
 996	struct map_desc *md;
 997	struct vm_struct *vm;
 998	struct static_vm *svm;
 999
1000	if (!nr)
1001		return;
1002
1003	svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
1004
1005	for (md = io_desc; nr; md++, nr--) {
1006		create_mapping(md);
1007
1008		vm = &svm->vm;
1009		vm->addr = (void *)(md->virtual & PAGE_MASK);
1010		vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
1011		vm->phys_addr = __pfn_to_phys(md->pfn);
1012		vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
1013		vm->flags |= VM_ARM_MTYPE(md->type);
1014		vm->caller = iotable_init;
1015		add_static_vm_early(svm++);
1016	}
1017}
1018
1019void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
1020				  void *caller)
1021{
1022	struct vm_struct *vm;
1023	struct static_vm *svm;
1024
1025	svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm));
1026
1027	vm = &svm->vm;
1028	vm->addr = (void *)addr;
1029	vm->size = size;
1030	vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
1031	vm->caller = caller;
1032	add_static_vm_early(svm);
1033}
1034
1035#ifndef CONFIG_ARM_LPAE
1036
1037/*
1038 * The Linux PMD is made of two consecutive section entries covering 2MB
1039 * (see definition in include/asm/pgtable-2level.h).  However a call to
1040 * create_mapping() may optimize static mappings by using individual
1041 * 1MB section mappings.  This leaves the actual PMD potentially half
1042 * initialized if the top or bottom section entry isn't used, leaving it
1043 * open to problems if a subsequent ioremap() or vmalloc() tries to use
1044 * the virtual space left free by that unused section entry.
1045 *
1046 * Let's avoid the issue by inserting dummy vm entries covering the unused
1047 * PMD halves once the static mappings are in place.
1048 */
1049
1050static void __init pmd_empty_section_gap(unsigned long addr)
1051{
1052	vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap);
1053}
1054
1055static void __init fill_pmd_gaps(void)
1056{
1057	struct static_vm *svm;
1058	struct vm_struct *vm;
1059	unsigned long addr, next = 0;
1060	pmd_t *pmd;
1061
1062	list_for_each_entry(svm, &static_vmlist, list) {
1063		vm = &svm->vm;
1064		addr = (unsigned long)vm->addr;
1065		if (addr < next)
1066			continue;
1067
1068		/*
1069		 * Check if this vm starts on an odd section boundary.
1070		 * If so and the first section entry for this PMD is free
1071		 * then we block the corresponding virtual address.
1072		 */
1073		if ((addr & ~PMD_MASK) == SECTION_SIZE) {
1074			pmd = pmd_off_k(addr);
1075			if (pmd_none(*pmd))
1076				pmd_empty_section_gap(addr & PMD_MASK);
1077		}
1078
1079		/*
1080		 * Then check if this vm ends on an odd section boundary.
1081		 * If so and the second section entry for this PMD is empty
1082		 * then we block the corresponding virtual address.
1083		 */
1084		addr += vm->size;
1085		if ((addr & ~PMD_MASK) == SECTION_SIZE) {
1086			pmd = pmd_off_k(addr) + 1;
1087			if (pmd_none(*pmd))
1088				pmd_empty_section_gap(addr);
1089		}
1090
1091		/* no need to look at any vm entry until we hit the next PMD */
1092		next = (addr + PMD_SIZE - 1) & PMD_MASK;
1093	}
1094}
1095
1096#else
1097#define fill_pmd_gaps() do { } while (0)
1098#endif
1099
1100#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
1101static void __init pci_reserve_io(void)
1102{
1103	struct static_vm *svm;
1104
1105	svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE);
1106	if (svm)
1107		return;
1108
1109	vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
1110}
1111#else
1112#define pci_reserve_io() do { } while (0)
1113#endif
1114
1115#ifdef CONFIG_DEBUG_LL
1116void __init debug_ll_io_init(void)
1117{
1118	struct map_desc map;
1119
1120	debug_ll_addr(&map.pfn, &map.virtual);
1121	if (!map.pfn || !map.virtual)
1122		return;
1123	map.pfn = __phys_to_pfn(map.pfn);
1124	map.virtual &= PAGE_MASK;
1125	map.length = PAGE_SIZE;
1126	map.type = MT_DEVICE;
1127	iotable_init(&map, 1);
1128}
1129#endif
1130
1131static void * __initdata vmalloc_min =
1132	(void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
1133
1134/*
1135 * vmalloc=size forces the vmalloc area to be exactly 'size'
1136 * bytes. This can be used to increase (or decrease) the vmalloc
1137 * area - the default is 240m.
1138 */
1139static int __init early_vmalloc(char *arg)
1140{
1141	unsigned long vmalloc_reserve = memparse(arg, NULL);
1142
1143	if (vmalloc_reserve < SZ_16M) {
1144		vmalloc_reserve = SZ_16M;
1145		pr_warn("vmalloc area too small, limiting to %luMB\n",
 
1146			vmalloc_reserve >> 20);
1147	}
1148
1149	if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
1150		vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
1151		pr_warn("vmalloc area is too big, limiting to %luMB\n",
 
1152			vmalloc_reserve >> 20);
1153	}
1154
1155	vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
1156	return 0;
1157}
1158early_param("vmalloc", early_vmalloc);
1159
1160phys_addr_t arm_lowmem_limit __initdata = 0;
1161
1162void __init adjust_lowmem_bounds(void)
1163{
1164	phys_addr_t memblock_limit = 0;
1165	u64 vmalloc_limit;
1166	struct memblock_region *reg;
1167	phys_addr_t lowmem_limit = 0;
1168
1169	/*
1170	 * Let's use our own (unoptimized) equivalent of __pa() that is
1171	 * not affected by wrap-arounds when sizeof(phys_addr_t) == 4.
1172	 * The result is used as the upper bound on physical memory address
1173	 * and may itself be outside the valid range for which phys_addr_t
1174	 * and therefore __pa() is defined.
1175	 */
1176	vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
1177
1178	for_each_memblock(memory, reg) {
1179		phys_addr_t block_start = reg->base;
1180		phys_addr_t block_end = reg->base + reg->size;
 
1181
1182		if (reg->base < vmalloc_limit) {
1183			if (block_end > lowmem_limit)
1184				/*
1185				 * Compare as u64 to ensure vmalloc_limit does
1186				 * not get truncated. block_end should always
1187				 * fit in phys_addr_t so there should be no
1188				 * issue with assignment.
1189				 */
1190				lowmem_limit = min_t(u64,
1191							 vmalloc_limit,
1192							 block_end);
1193
1194			/*
1195			 * Find the first non-pmd-aligned page, and point
1196			 * memblock_limit at it. This relies on rounding the
1197			 * limit down to be pmd-aligned, which happens at the
1198			 * end of this function.
1199			 *
1200			 * With this algorithm, the start or end of almost any
1201			 * bank can be non-pmd-aligned. The only exception is
1202			 * that the start of the bank 0 must be section-
1203			 * aligned, since otherwise memory would need to be
1204			 * allocated when mapping the start of bank 0, which
1205			 * occurs before any free memory is mapped.
1206			 */
1207			if (!memblock_limit) {
1208				if (!IS_ALIGNED(block_start, PMD_SIZE))
1209					memblock_limit = block_start;
1210				else if (!IS_ALIGNED(block_end, PMD_SIZE))
1211					memblock_limit = lowmem_limit;
1212			}
1213
1214		}
1215	}
1216
1217	arm_lowmem_limit = lowmem_limit;
1218
1219	high_memory = __va(arm_lowmem_limit - 1) + 1;
1220
1221	if (!memblock_limit)
1222		memblock_limit = arm_lowmem_limit;
1223
1224	/*
1225	 * Round the memblock limit down to a pmd size.  This
1226	 * helps to ensure that we will allocate memory from the
1227	 * last full pmd, which should be mapped.
1228	 */
1229	memblock_limit = round_down(memblock_limit, PMD_SIZE);
 
 
 
 
 
 
1230
1231	if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
1232		if (memblock_end_of_DRAM() > arm_lowmem_limit) {
1233			phys_addr_t end = memblock_end_of_DRAM();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1234
1235			pr_notice("Ignoring RAM at %pa-%pa\n",
1236				  &memblock_limit, &end);
1237			pr_notice("Consider using a HIGHMEM enabled kernel.\n");
 
 
1238
1239			memblock_remove(memblock_limit, end - memblock_limit);
 
 
 
 
 
 
 
 
 
 
 
 
1240		}
1241	}
1242
1243	memblock_set_current_limit(memblock_limit);
 
1244}
1245
1246static inline void prepare_page_table(void)
1247{
1248	unsigned long addr;
1249	phys_addr_t end;
1250
1251	/*
1252	 * Clear out all the mappings below the kernel image.
1253	 */
1254	for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
1255		pmd_clear(pmd_off_k(addr));
1256
1257#ifdef CONFIG_XIP_KERNEL
1258	/* The XIP kernel is mapped in the module area -- skip over it */
1259	addr = ((unsigned long)_exiprom + PMD_SIZE - 1) & PMD_MASK;
1260#endif
1261	for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
1262		pmd_clear(pmd_off_k(addr));
1263
1264	/*
1265	 * Find the end of the first block of lowmem.
1266	 */
1267	end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
1268	if (end >= arm_lowmem_limit)
1269		end = arm_lowmem_limit;
1270
1271	/*
1272	 * Clear out all the kernel space mappings, except for the first
1273	 * memory bank, up to the vmalloc region.
1274	 */
1275	for (addr = __phys_to_virt(end);
1276	     addr < VMALLOC_START; addr += PMD_SIZE)
1277		pmd_clear(pmd_off_k(addr));
1278}
1279
1280#ifdef CONFIG_ARM_LPAE
1281/* the first page is reserved for pgd */
1282#define SWAPPER_PG_DIR_SIZE	(PAGE_SIZE + \
1283				 PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
1284#else
1285#define SWAPPER_PG_DIR_SIZE	(PTRS_PER_PGD * sizeof(pgd_t))
1286#endif
1287
1288/*
1289 * Reserve the special regions of memory
1290 */
1291void __init arm_mm_memblock_reserve(void)
1292{
1293	/*
1294	 * Reserve the page tables.  These are already in use,
1295	 * and can only be in node 0.
1296	 */
1297	memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
1298
1299#ifdef CONFIG_SA1111
1300	/*
1301	 * Because of the SA1111 DMA bug, we want to preserve our
1302	 * precious DMA-able memory...
1303	 */
1304	memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
1305#endif
1306}
1307
1308/*
1309 * Set up the device mappings.  Since we clear out the page tables for all
1310 * mappings above VMALLOC_START, except early fixmap, we might remove debug
1311 * device mappings.  This means earlycon can be used to debug this function
1312 * Any other function or debugging method which may touch any device _will_
1313 * crash the kernel.
1314 */
1315static void __init devicemaps_init(const struct machine_desc *mdesc)
1316{
1317	struct map_desc map;
1318	unsigned long addr;
1319	void *vectors;
1320
1321	/*
1322	 * Allocate the vector page early.
1323	 */
1324	vectors = early_alloc(PAGE_SIZE * 2);
1325
1326	early_trap_init(vectors);
1327
1328	/*
1329	 * Clear page table except top pmd used by early fixmaps
1330	 */
1331	for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
1332		pmd_clear(pmd_off_k(addr));
1333
1334	/*
1335	 * Map the kernel if it is XIP.
1336	 * It is always first in the modulearea.
1337	 */
1338#ifdef CONFIG_XIP_KERNEL
1339	map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
1340	map.virtual = MODULES_VADDR;
1341	map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK;
1342	map.type = MT_ROM;
1343	create_mapping(&map);
1344#endif
1345
1346	/*
1347	 * Map the cache flushing regions.
1348	 */
1349#ifdef FLUSH_BASE
1350	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
1351	map.virtual = FLUSH_BASE;
1352	map.length = SZ_1M;
1353	map.type = MT_CACHECLEAN;
1354	create_mapping(&map);
1355#endif
1356#ifdef FLUSH_BASE_MINICACHE
1357	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
1358	map.virtual = FLUSH_BASE_MINICACHE;
1359	map.length = SZ_1M;
1360	map.type = MT_MINICLEAN;
1361	create_mapping(&map);
1362#endif
1363
1364	/*
1365	 * Create a mapping for the machine vectors at the high-vectors
1366	 * location (0xffff0000).  If we aren't using high-vectors, also
1367	 * create a mapping at the low-vectors virtual address.
1368	 */
1369	map.pfn = __phys_to_pfn(virt_to_phys(vectors));
1370	map.virtual = 0xffff0000;
1371	map.length = PAGE_SIZE;
1372#ifdef CONFIG_KUSER_HELPERS
1373	map.type = MT_HIGH_VECTORS;
1374#else
1375	map.type = MT_LOW_VECTORS;
1376#endif
1377	create_mapping(&map);
1378
1379	if (!vectors_high()) {
1380		map.virtual = 0;
1381		map.length = PAGE_SIZE * 2;
1382		map.type = MT_LOW_VECTORS;
1383		create_mapping(&map);
1384	}
1385
1386	/* Now create a kernel read-only mapping */
1387	map.pfn += 1;
1388	map.virtual = 0xffff0000 + PAGE_SIZE;
1389	map.length = PAGE_SIZE;
1390	map.type = MT_LOW_VECTORS;
1391	create_mapping(&map);
1392
1393	/*
1394	 * Ask the machine support to map in the statically mapped devices.
1395	 */
1396	if (mdesc->map_io)
1397		mdesc->map_io();
1398	else
1399		debug_ll_io_init();
1400	fill_pmd_gaps();
1401
1402	/* Reserve fixed i/o space in VMALLOC region */
1403	pci_reserve_io();
1404
1405	/*
1406	 * Finally flush the caches and tlb to ensure that we're in a
1407	 * consistent state wrt the writebuffer.  This also ensures that
1408	 * any write-allocated cache lines in the vector page are written
1409	 * back.  After this point, we can start to touch devices again.
1410	 */
1411	local_flush_tlb_all();
1412	flush_cache_all();
1413
1414	/* Enable asynchronous aborts */
1415	early_abt_enable();
1416}
1417
1418static void __init kmap_init(void)
1419{
1420#ifdef CONFIG_HIGHMEM
1421	pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
1422		PKMAP_BASE, _PAGE_KERNEL_TABLE);
1423#endif
1424
1425	early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START,
1426			_PAGE_KERNEL_TABLE);
1427}
1428
1429static void __init map_lowmem(void)
1430{
1431	struct memblock_region *reg;
1432	phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE);
1433	phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
1434
1435	/* Map all the lowmem memory banks. */
1436	for_each_memblock(memory, reg) {
1437		phys_addr_t start = reg->base;
1438		phys_addr_t end = start + reg->size;
1439		struct map_desc map;
1440
1441		if (memblock_is_nomap(reg))
1442			continue;
1443
1444		if (end > arm_lowmem_limit)
1445			end = arm_lowmem_limit;
1446		if (start >= end)
1447			break;
1448
1449		if (end < kernel_x_start) {
1450			map.pfn = __phys_to_pfn(start);
1451			map.virtual = __phys_to_virt(start);
1452			map.length = end - start;
1453			map.type = MT_MEMORY_RWX;
1454
1455			create_mapping(&map);
1456		} else if (start >= kernel_x_end) {
1457			map.pfn = __phys_to_pfn(start);
1458			map.virtual = __phys_to_virt(start);
1459			map.length = end - start;
1460			map.type = MT_MEMORY_RW;
1461
1462			create_mapping(&map);
1463		} else {
1464			/* This better cover the entire kernel */
1465			if (start < kernel_x_start) {
1466				map.pfn = __phys_to_pfn(start);
1467				map.virtual = __phys_to_virt(start);
1468				map.length = kernel_x_start - start;
1469				map.type = MT_MEMORY_RW;
1470
1471				create_mapping(&map);
1472			}
1473
1474			map.pfn = __phys_to_pfn(kernel_x_start);
1475			map.virtual = __phys_to_virt(kernel_x_start);
1476			map.length = kernel_x_end - kernel_x_start;
1477			map.type = MT_MEMORY_RWX;
1478
1479			create_mapping(&map);
1480
1481			if (kernel_x_end < end) {
1482				map.pfn = __phys_to_pfn(kernel_x_end);
1483				map.virtual = __phys_to_virt(kernel_x_end);
1484				map.length = end - kernel_x_end;
1485				map.type = MT_MEMORY_RW;
1486
1487				create_mapping(&map);
1488			}
1489		}
1490	}
1491}
1492
1493#ifdef CONFIG_ARM_PV_FIXUP
1494extern unsigned long __atags_pointer;
1495typedef void pgtables_remap(long long offset, unsigned long pgd, void *bdata);
1496pgtables_remap lpae_pgtables_remap_asm;
1497
1498/*
1499 * early_paging_init() recreates boot time page table setup, allowing machines
1500 * to switch over to a high (>4G) address space on LPAE systems
1501 */
1502static void __init early_paging_init(const struct machine_desc *mdesc)
1503{
1504	pgtables_remap *lpae_pgtables_remap;
1505	unsigned long pa_pgd;
1506	unsigned int cr, ttbcr;
1507	long long offset;
1508	void *boot_data;
1509
1510	if (!mdesc->pv_fixup)
1511		return;
1512
1513	offset = mdesc->pv_fixup();
1514	if (offset == 0)
1515		return;
1516
1517	/*
1518	 * Get the address of the remap function in the 1:1 identity
1519	 * mapping setup by the early page table assembly code.  We
1520	 * must get this prior to the pv update.  The following barrier
1521	 * ensures that this is complete before we fixup any P:V offsets.
1522	 */
1523	lpae_pgtables_remap = (pgtables_remap *)(unsigned long)__pa(lpae_pgtables_remap_asm);
1524	pa_pgd = __pa(swapper_pg_dir);
1525	boot_data = __va(__atags_pointer);
1526	barrier();
1527
1528	pr_info("Switching physical address space to 0x%08llx\n",
1529		(u64)PHYS_OFFSET + offset);
1530
1531	/* Re-set the phys pfn offset, and the pv offset */
1532	__pv_offset += offset;
1533	__pv_phys_pfn_offset += PFN_DOWN(offset);
1534
1535	/* Run the patch stub to update the constants */
1536	fixup_pv_table(&__pv_table_begin,
1537		(&__pv_table_end - &__pv_table_begin) << 2);
1538
1539	/*
1540	 * We changing not only the virtual to physical mapping, but also
1541	 * the physical addresses used to access memory.  We need to flush
1542	 * all levels of cache in the system with caching disabled to
1543	 * ensure that all data is written back, and nothing is prefetched
1544	 * into the caches.  We also need to prevent the TLB walkers
1545	 * allocating into the caches too.  Note that this is ARMv7 LPAE
1546	 * specific.
1547	 */
1548	cr = get_cr();
1549	set_cr(cr & ~(CR_I | CR_C));
1550	asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr));
1551	asm volatile("mcr p15, 0, %0, c2, c0, 2"
1552		: : "r" (ttbcr & ~(3 << 8 | 3 << 10)));
1553	flush_cache_all();
1554
1555	/*
1556	 * Fixup the page tables - this must be in the idmap region as
1557	 * we need to disable the MMU to do this safely, and hence it
1558	 * needs to be assembly.  It's fairly simple, as we're using the
1559	 * temporary tables setup by the initial assembly code.
1560	 */
1561	lpae_pgtables_remap(offset, pa_pgd, boot_data);
1562
1563	/* Re-enable the caches and cacheable TLB walks */
1564	asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr));
1565	set_cr(cr);
1566}
1567
1568#else
1569
1570static void __init early_paging_init(const struct machine_desc *mdesc)
1571{
1572	long long offset;
1573
1574	if (!mdesc->pv_fixup)
1575		return;
1576
1577	offset = mdesc->pv_fixup();
1578	if (offset == 0)
1579		return;
1580
1581	pr_crit("Physical address space modification is only to support Keystone2.\n");
1582	pr_crit("Please enable ARM_LPAE and ARM_PATCH_PHYS_VIRT support to use this\n");
1583	pr_crit("feature. Your kernel may crash now, have a good day.\n");
1584	add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1585}
1586
1587#endif
1588
1589static void __init early_fixmap_shutdown(void)
1590{
1591	int i;
1592	unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1);
1593
1594	pte_offset_fixmap = pte_offset_late_fixmap;
1595	pmd_clear(fixmap_pmd(va));
1596	local_flush_tlb_kernel_page(va);
1597
1598	for (i = 0; i < __end_of_permanent_fixed_addresses; i++) {
1599		pte_t *pte;
1600		struct map_desc map;
1601
1602		map.virtual = fix_to_virt(i);
1603		pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual);
1604
1605		/* Only i/o device mappings are supported ATM */
1606		if (pte_none(*pte) ||
1607		    (pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED)
1608			continue;
1609
1610		map.pfn = pte_pfn(*pte);
1611		map.type = MT_DEVICE;
1612		map.length = PAGE_SIZE;
1613
1614		create_mapping(&map);
1615	}
1616}
1617
1618/*
1619 * paging_init() sets up the page tables, initialises the zone memory
1620 * maps, and sets up the zero page, bad page and bad page tables.
1621 */
1622void __init paging_init(const struct machine_desc *mdesc)
1623{
1624	void *zero_page;
1625
 
 
 
1626	prepare_page_table();
1627	map_lowmem();
1628	memblock_set_current_limit(arm_lowmem_limit);
1629	dma_contiguous_remap();
1630	early_fixmap_shutdown();
1631	devicemaps_init(mdesc);
1632	kmap_init();
1633	tcm_init();
1634
1635	top_pmd = pmd_off_k(0xffff0000);
1636
1637	/* allocate the zero page. */
1638	zero_page = early_alloc(PAGE_SIZE);
1639
1640	bootmem_init();
1641
1642	empty_zero_page = virt_to_page(zero_page);
1643	__flush_dcache_page(NULL, empty_zero_page);
1644
1645	/* Compute the virt/idmap offset, mostly for the sake of KVM */
1646	kimage_voffset = (unsigned long)&kimage_voffset - virt_to_idmap(&kimage_voffset);
1647}
1648
1649void __init early_mm_init(const struct machine_desc *mdesc)
1650{
1651	build_mem_type_table();
1652	early_paging_init(mdesc);
1653}
v3.1
   1/*
   2 *  linux/arch/arm/mm/mmu.c
   3 *
   4 *  Copyright (C) 1995-2005 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#include <linux/module.h>
  11#include <linux/kernel.h>
  12#include <linux/errno.h>
  13#include <linux/init.h>
  14#include <linux/mman.h>
  15#include <linux/nodemask.h>
  16#include <linux/memblock.h>
  17#include <linux/fs.h>
 
 
  18
 
  19#include <asm/cputype.h>
  20#include <asm/sections.h>
  21#include <asm/cachetype.h>
 
 
  22#include <asm/setup.h>
  23#include <asm/sizes.h>
  24#include <asm/smp_plat.h>
  25#include <asm/tlb.h>
  26#include <asm/highmem.h>
 
  27#include <asm/traps.h>
 
 
  28
  29#include <asm/mach/arch.h>
  30#include <asm/mach/map.h>
 
 
  31
 
  32#include "mm.h"
 
  33
  34/*
  35 * empty_zero_page is a special page that is used for
  36 * zero-initialized data and COW.
  37 */
  38struct page *empty_zero_page;
  39EXPORT_SYMBOL(empty_zero_page);
  40
  41/*
  42 * The pmd table for the upper-most set of pages.
  43 */
  44pmd_t *top_pmd;
  45
 
 
  46#define CPOLICY_UNCACHED	0
  47#define CPOLICY_BUFFERED	1
  48#define CPOLICY_WRITETHROUGH	2
  49#define CPOLICY_WRITEBACK	3
  50#define CPOLICY_WRITEALLOC	4
  51
  52static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
  53static unsigned int ecc_mask __initdata = 0;
  54pgprot_t pgprot_user;
  55pgprot_t pgprot_kernel;
 
 
 
  56
  57EXPORT_SYMBOL(pgprot_user);
  58EXPORT_SYMBOL(pgprot_kernel);
  59
  60struct cachepolicy {
  61	const char	policy[16];
  62	unsigned int	cr_mask;
  63	unsigned int	pmd;
  64	pteval_t	pte;
 
  65};
  66
 
 
 
 
 
 
 
 
  67static struct cachepolicy cache_policies[] __initdata = {
  68	{
  69		.policy		= "uncached",
  70		.cr_mask	= CR_W|CR_C,
  71		.pmd		= PMD_SECT_UNCACHED,
  72		.pte		= L_PTE_MT_UNCACHED,
 
  73	}, {
  74		.policy		= "buffered",
  75		.cr_mask	= CR_C,
  76		.pmd		= PMD_SECT_BUFFERED,
  77		.pte		= L_PTE_MT_BUFFERABLE,
 
  78	}, {
  79		.policy		= "writethrough",
  80		.cr_mask	= 0,
  81		.pmd		= PMD_SECT_WT,
  82		.pte		= L_PTE_MT_WRITETHROUGH,
 
  83	}, {
  84		.policy		= "writeback",
  85		.cr_mask	= 0,
  86		.pmd		= PMD_SECT_WB,
  87		.pte		= L_PTE_MT_WRITEBACK,
 
  88	}, {
  89		.policy		= "writealloc",
  90		.cr_mask	= 0,
  91		.pmd		= PMD_SECT_WBWA,
  92		.pte		= L_PTE_MT_WRITEALLOC,
 
  93	}
  94};
  95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  96/*
  97 * These are useful for identifying cache coherency
  98 * problems by allowing the cache or the cache and
  99 * writebuffer to be turned off.  (Note: the write
 100 * buffer should not be on and the cache off).
 101 */
 102static int __init early_cachepolicy(char *p)
 103{
 104	int i;
 105
 106	for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
 107		int len = strlen(cache_policies[i].policy);
 108
 109		if (memcmp(p, cache_policies[i].policy, len) == 0) {
 110			cachepolicy = i;
 111			cr_alignment &= ~cache_policies[i].cr_mask;
 112			cr_no_alignment &= ~cache_policies[i].cr_mask;
 113			break;
 114		}
 115	}
 116	if (i == ARRAY_SIZE(cache_policies))
 117		printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
 
 
 118	/*
 119	 * This restriction is partly to do with the way we boot; it is
 120	 * unpredictable to have memory mapped using two different sets of
 121	 * memory attributes (shared, type, and cache attribs).  We can not
 122	 * change these attributes once the initial assembly has setup the
 123	 * page tables.
 124	 */
 125	if (cpu_architecture() >= CPU_ARCH_ARMv6) {
 126		printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
 127		cachepolicy = CPOLICY_WRITEBACK;
 
 
 
 
 
 
 
 
 128	}
 129	flush_cache_all();
 130	set_cr(cr_alignment);
 131	return 0;
 132}
 133early_param("cachepolicy", early_cachepolicy);
 134
 135static int __init early_nocache(char *__unused)
 136{
 137	char *p = "buffered";
 138	printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
 139	early_cachepolicy(p);
 140	return 0;
 141}
 142early_param("nocache", early_nocache);
 143
 144static int __init early_nowrite(char *__unused)
 145{
 146	char *p = "uncached";
 147	printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
 148	early_cachepolicy(p);
 149	return 0;
 150}
 151early_param("nowb", early_nowrite);
 152
 
 153static int __init early_ecc(char *p)
 154{
 155	if (memcmp(p, "on", 2) == 0)
 156		ecc_mask = PMD_PROTECTION;
 157	else if (memcmp(p, "off", 3) == 0)
 158		ecc_mask = 0;
 159	return 0;
 160}
 161early_param("ecc", early_ecc);
 
 
 
 
 
 
 
 
 
 162
 163static int __init noalign_setup(char *__unused)
 164{
 165	cr_alignment &= ~CR_A;
 166	cr_no_alignment &= ~CR_A;
 167	set_cr(cr_alignment);
 168	return 1;
 169}
 170__setup("noalign", noalign_setup);
 171
 172#ifndef CONFIG_SMP
 173void adjust_cr(unsigned long mask, unsigned long set)
 174{
 175	unsigned long flags;
 176
 177	mask &= ~CR_A;
 178
 179	set &= mask;
 180
 181	local_irq_save(flags);
 182
 183	cr_no_alignment = (cr_no_alignment & ~mask) | set;
 184	cr_alignment = (cr_alignment & ~mask) | set;
 185
 186	set_cr((get_cr() & ~mask) | set);
 187
 188	local_irq_restore(flags);
 189}
 190#endif
 191
 192#define PROT_PTE_DEVICE		L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
 
 193#define PROT_SECT_DEVICE	PMD_TYPE_SECT|PMD_SECT_AP_WRITE
 194
 195static struct mem_type mem_types[] = {
 196	[MT_DEVICE] = {		  /* Strongly ordered / ARMv6 shared device */
 197		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
 198				  L_PTE_SHARED,
 
 
 
 199		.prot_l1	= PMD_TYPE_TABLE,
 200		.prot_sect	= PROT_SECT_DEVICE | PMD_SECT_S,
 201		.domain		= DOMAIN_IO,
 202	},
 203	[MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
 204		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
 205		.prot_l1	= PMD_TYPE_TABLE,
 206		.prot_sect	= PROT_SECT_DEVICE,
 207		.domain		= DOMAIN_IO,
 208	},
 209	[MT_DEVICE_CACHED] = {	  /* ioremap_cached */
 210		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
 211		.prot_l1	= PMD_TYPE_TABLE,
 212		.prot_sect	= PROT_SECT_DEVICE | PMD_SECT_WB,
 213		.domain		= DOMAIN_IO,
 214	},	
 215	[MT_DEVICE_WC] = {	/* ioremap_wc */
 216		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
 217		.prot_l1	= PMD_TYPE_TABLE,
 218		.prot_sect	= PROT_SECT_DEVICE,
 219		.domain		= DOMAIN_IO,
 220	},
 221	[MT_UNCACHED] = {
 222		.prot_pte	= PROT_PTE_DEVICE,
 223		.prot_l1	= PMD_TYPE_TABLE,
 224		.prot_sect	= PMD_TYPE_SECT | PMD_SECT_XN,
 225		.domain		= DOMAIN_IO,
 226	},
 227	[MT_CACHECLEAN] = {
 228		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
 229		.domain    = DOMAIN_KERNEL,
 230	},
 
 231	[MT_MINICLEAN] = {
 232		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
 233		.domain    = DOMAIN_KERNEL,
 234	},
 
 235	[MT_LOW_VECTORS] = {
 236		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 237				L_PTE_RDONLY,
 238		.prot_l1   = PMD_TYPE_TABLE,
 239		.domain    = DOMAIN_USER,
 240	},
 241	[MT_HIGH_VECTORS] = {
 242		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 243				L_PTE_USER | L_PTE_RDONLY,
 244		.prot_l1   = PMD_TYPE_TABLE,
 245		.domain    = DOMAIN_USER,
 246	},
 247	[MT_MEMORY] = {
 248		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
 249		.prot_l1   = PMD_TYPE_TABLE,
 250		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 251		.domain    = DOMAIN_KERNEL,
 252	},
 
 
 
 
 
 
 
 253	[MT_ROM] = {
 254		.prot_sect = PMD_TYPE_SECT,
 255		.domain    = DOMAIN_KERNEL,
 256	},
 257	[MT_MEMORY_NONCACHED] = {
 258		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 259				L_PTE_MT_BUFFERABLE,
 260		.prot_l1   = PMD_TYPE_TABLE,
 261		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 262		.domain    = DOMAIN_KERNEL,
 263	},
 264	[MT_MEMORY_DTCM] = {
 265		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 266				L_PTE_XN,
 267		.prot_l1   = PMD_TYPE_TABLE,
 268		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
 269		.domain    = DOMAIN_KERNEL,
 270	},
 271	[MT_MEMORY_ITCM] = {
 272		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
 273		.prot_l1   = PMD_TYPE_TABLE,
 274		.domain    = DOMAIN_KERNEL,
 275	},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 276};
 277
 278const struct mem_type *get_mem_type(unsigned int type)
 279{
 280	return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
 281}
 282EXPORT_SYMBOL(get_mem_type);
 283
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 284/*
 285 * Adjust the PMD section entries according to the CPU in use.
 286 */
 287static void __init build_mem_type_table(void)
 288{
 289	struct cachepolicy *cp;
 290	unsigned int cr = get_cr();
 291	unsigned int user_pgprot, kern_pgprot, vecs_pgprot;
 
 292	int cpu_arch = cpu_architecture();
 293	int i;
 294
 295	if (cpu_arch < CPU_ARCH_ARMv6) {
 296#if defined(CONFIG_CPU_DCACHE_DISABLE)
 297		if (cachepolicy > CPOLICY_BUFFERED)
 298			cachepolicy = CPOLICY_BUFFERED;
 299#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
 300		if (cachepolicy > CPOLICY_WRITETHROUGH)
 301			cachepolicy = CPOLICY_WRITETHROUGH;
 302#endif
 303	}
 304	if (cpu_arch < CPU_ARCH_ARMv5) {
 305		if (cachepolicy >= CPOLICY_WRITEALLOC)
 306			cachepolicy = CPOLICY_WRITEBACK;
 307		ecc_mask = 0;
 308	}
 309	if (is_smp())
 310		cachepolicy = CPOLICY_WRITEALLOC;
 
 
 
 
 
 
 
 
 
 311
 312	/*
 313	 * Strip out features not present on earlier architectures.
 314	 * Pre-ARMv5 CPUs don't have TEX bits.  Pre-ARMv6 CPUs or those
 315	 * without extended page tables don't have the 'Shared' bit.
 316	 */
 317	if (cpu_arch < CPU_ARCH_ARMv5)
 318		for (i = 0; i < ARRAY_SIZE(mem_types); i++)
 319			mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
 320	if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
 321		for (i = 0; i < ARRAY_SIZE(mem_types); i++)
 322			mem_types[i].prot_sect &= ~PMD_SECT_S;
 323
 324	/*
 325	 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
 326	 * "update-able on write" bit on ARM610).  However, Xscale and
 327	 * Xscale3 require this bit to be cleared.
 328	 */
 329	if (cpu_is_xscale() || cpu_is_xsc3()) {
 330		for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
 331			mem_types[i].prot_sect &= ~PMD_BIT4;
 332			mem_types[i].prot_l1 &= ~PMD_BIT4;
 333		}
 334	} else if (cpu_arch < CPU_ARCH_ARMv6) {
 335		for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
 336			if (mem_types[i].prot_l1)
 337				mem_types[i].prot_l1 |= PMD_BIT4;
 338			if (mem_types[i].prot_sect)
 339				mem_types[i].prot_sect |= PMD_BIT4;
 340		}
 341	}
 342
 343	/*
 344	 * Mark the device areas according to the CPU/architecture.
 345	 */
 346	if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
 347		if (!cpu_is_xsc3()) {
 348			/*
 349			 * Mark device regions on ARMv6+ as execute-never
 350			 * to prevent speculative instruction fetches.
 351			 */
 352			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
 353			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
 354			mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
 355			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
 
 
 
 356		}
 357		if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
 358			/*
 359			 * For ARMv7 with TEX remapping,
 360			 * - shared device is SXCB=1100
 361			 * - nonshared device is SXCB=0100
 362			 * - write combine device mem is SXCB=0001
 363			 * (Uncached Normal memory)
 364			 */
 365			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
 366			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
 367			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
 368		} else if (cpu_is_xsc3()) {
 369			/*
 370			 * For Xscale3,
 371			 * - shared device is TEXCB=00101
 372			 * - nonshared device is TEXCB=01000
 373			 * - write combine device mem is TEXCB=00100
 374			 * (Inner/Outer Uncacheable in xsc3 parlance)
 375			 */
 376			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
 377			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
 378			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
 379		} else {
 380			/*
 381			 * For ARMv6 and ARMv7 without TEX remapping,
 382			 * - shared device is TEXCB=00001
 383			 * - nonshared device is TEXCB=01000
 384			 * - write combine device mem is TEXCB=00100
 385			 * (Uncached Normal in ARMv6 parlance).
 386			 */
 387			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
 388			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
 389			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
 390		}
 391	} else {
 392		/*
 393		 * On others, write combining is "Uncached/Buffered"
 394		 */
 395		mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
 396	}
 397
 398	/*
 399	 * Now deal with the memory-type mappings
 400	 */
 401	cp = &cache_policies[cachepolicy];
 402	vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
 
 
 
 403
 
 404	/*
 405	 * Only use write-through for non-SMP systems
 
 
 406	 */
 407	if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
 408		vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
 409
 410	/*
 411	 * Enable CPU-specific coherency if supported.
 412	 * (Only available on XSC3 at the moment.)
 413	 */
 414	if (arch_is_coherent() && cpu_is_xsc3()) {
 415		mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
 416		mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
 417		mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
 418		mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
 419	}
 
 
 420	/*
 421	 * ARMv6 and above have extended page tables.
 422	 */
 423	if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
 
 424		/*
 425		 * Mark cache clean areas and XIP ROM read only
 426		 * from SVC mode and no access from userspace.
 427		 */
 428		mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 429		mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 430		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 
 431
 432		if (is_smp()) {
 433			/*
 434			 * Mark memory with the "shared" attribute
 435			 * for SMP systems
 436			 */
 
 437			user_pgprot |= L_PTE_SHARED;
 438			kern_pgprot |= L_PTE_SHARED;
 439			vecs_pgprot |= L_PTE_SHARED;
 
 440			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
 441			mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
 442			mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
 443			mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
 444			mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
 445			mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
 446			mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
 447			mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
 
 
 
 448		}
 449	}
 450
 451	/*
 452	 * Non-cacheable Normal - intended for memory areas that must
 453	 * not cause dirty cache line writebacks when used
 454	 */
 455	if (cpu_arch >= CPU_ARCH_ARMv6) {
 456		if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
 457			/* Non-cacheable Normal is XCB = 001 */
 458			mem_types[MT_MEMORY_NONCACHED].prot_sect |=
 459				PMD_SECT_BUFFERED;
 460		} else {
 461			/* For both ARMv6 and non-TEX-remapping ARMv7 */
 462			mem_types[MT_MEMORY_NONCACHED].prot_sect |=
 463				PMD_SECT_TEX(1);
 464		}
 465	} else {
 466		mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
 
 
 
 
 
 
 
 
 
 
 467	}
 
 
 
 
 
 
 
 
 468
 469	for (i = 0; i < 16; i++) {
 470		unsigned long v = pgprot_val(protection_map[i]);
 471		protection_map[i] = __pgprot(v | user_pgprot);
 472	}
 473
 474	mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
 475	mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
 476
 477	pgprot_user   = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
 478	pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
 479				 L_PTE_DIRTY | kern_pgprot);
 
 
 
 480
 481	mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
 482	mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
 483	mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
 484	mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
 485	mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
 
 
 
 486	mem_types[MT_ROM].prot_sect |= cp->pmd;
 487
 488	switch (cp->pmd) {
 489	case PMD_SECT_WT:
 490		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
 491		break;
 492	case PMD_SECT_WB:
 493	case PMD_SECT_WBWA:
 494		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
 495		break;
 496	}
 497	printk("Memory policy: ECC %sabled, Data cache %s\n",
 498		ecc_mask ? "en" : "dis", cp->policy);
 499
 500	for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
 501		struct mem_type *t = &mem_types[i];
 502		if (t->prot_l1)
 503			t->prot_l1 |= PMD_DOMAIN(t->domain);
 504		if (t->prot_sect)
 505			t->prot_sect |= PMD_DOMAIN(t->domain);
 506	}
 507}
 508
 509#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
 510pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 511			      unsigned long size, pgprot_t vma_prot)
 512{
 513	if (!pfn_valid(pfn))
 514		return pgprot_noncached(vma_prot);
 515	else if (file->f_flags & O_SYNC)
 516		return pgprot_writecombine(vma_prot);
 517	return vma_prot;
 518}
 519EXPORT_SYMBOL(phys_mem_access_prot);
 520#endif
 521
 522#define vectors_base()	(vectors_high() ? 0xffff0000 : 0)
 523
 
 
 
 
 
 
 
 524static void __init *early_alloc(unsigned long sz)
 525{
 526	void *ptr = __va(memblock_alloc(sz, sz));
 527	memset(ptr, 0, sz);
 
 
 
 
 
 
 
 528	return ptr;
 529}
 530
 531static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
 
 
 532{
 533	if (pmd_none(*pmd)) {
 534		pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
 535		__pmd_populate(pmd, __pa(pte), prot);
 536	}
 537	BUG_ON(pmd_bad(*pmd));
 538	return pte_offset_kernel(pmd, addr);
 539}
 540
 
 
 
 
 
 
 541static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
 542				  unsigned long end, unsigned long pfn,
 543				  const struct mem_type *type)
 
 
 544{
 545	pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
 546	do {
 547		set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
 
 548		pfn++;
 549	} while (pte++, addr += PAGE_SIZE, addr != end);
 550}
 551
 552static void __init alloc_init_section(pud_t *pud, unsigned long addr,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 553				      unsigned long end, phys_addr_t phys,
 554				      const struct mem_type *type)
 
 555{
 556	pmd_t *pmd = pmd_offset(pud, addr);
 
 557
 558	/*
 559	 * Try a section mapping - end, addr and phys must all be aligned
 560	 * to a section boundary.  Note that PMDs refer to the individual
 561	 * L1 entries, whereas PGDs refer to a group of L1 entries making
 562	 * up one logical pointer to an L2 table.
 563	 */
 564	if (((addr | end | phys) & ~SECTION_MASK) == 0) {
 565		pmd_t *p = pmd;
 566
 567		if (addr & SECTION_SIZE)
 568			pmd++;
 569
 570		do {
 571			*pmd = __pmd(phys | type->prot_sect);
 572			phys += SECTION_SIZE;
 573		} while (pmd++, addr += SECTION_SIZE, addr != end);
 574
 575		flush_pmd_entry(p);
 576	} else {
 577		/*
 578		 * No need to loop; pte's aren't interested in the
 579		 * individual L1 entries.
 580		 */
 581		alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
 582	}
 
 
 
 
 
 
 
 
 
 583}
 584
 585static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
 586	unsigned long phys, const struct mem_type *type)
 
 
 587{
 588	pud_t *pud = pud_offset(pgd, addr);
 589	unsigned long next;
 590
 591	do {
 592		next = pud_addr_end(addr, end);
 593		alloc_init_section(pud, addr, next, phys, type);
 594		phys += next - addr;
 595	} while (pud++, addr = next, addr != end);
 596}
 597
 598static void __init create_36bit_mapping(struct map_desc *md,
 599					const struct mem_type *type)
 
 
 
 600{
 601	unsigned long addr, length, end;
 602	phys_addr_t phys;
 603	pgd_t *pgd;
 604
 605	addr = md->virtual;
 606	phys = __pfn_to_phys(md->pfn);
 607	length = PAGE_ALIGN(md->length);
 608
 609	if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
 610		printk(KERN_ERR "MM: CPU does not support supersection "
 611		       "mapping for 0x%08llx at 0x%08lx\n",
 612		       (long long)__pfn_to_phys((u64)md->pfn), addr);
 613		return;
 614	}
 615
 616	/* N.B.	ARMv6 supersections are only defined to work with domain 0.
 617	 *	Since domain assignments can in fact be arbitrary, the
 618	 *	'domain == 0' check below is required to insure that ARMv6
 619	 *	supersections are only allocated for domain 0 regardless
 620	 *	of the actual domain assignments in use.
 621	 */
 622	if (type->domain) {
 623		printk(KERN_ERR "MM: invalid domain in supersection "
 624		       "mapping for 0x%08llx at 0x%08lx\n",
 625		       (long long)__pfn_to_phys((u64)md->pfn), addr);
 626		return;
 627	}
 628
 629	if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
 630		printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
 631		       " at 0x%08lx invalid alignment\n",
 632		       (long long)__pfn_to_phys((u64)md->pfn), addr);
 633		return;
 634	}
 635
 636	/*
 637	 * Shift bits [35:32] of address into bits [23:20] of PMD
 638	 * (See ARMv6 spec).
 639	 */
 640	phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
 641
 642	pgd = pgd_offset_k(addr);
 643	end = addr + length;
 644	do {
 645		pud_t *pud = pud_offset(pgd, addr);
 646		pmd_t *pmd = pmd_offset(pud, addr);
 647		int i;
 648
 649		for (i = 0; i < 16; i++)
 650			*pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
 
 651
 652		addr += SUPERSECTION_SIZE;
 653		phys += SUPERSECTION_SIZE;
 654		pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
 655	} while (addr != end);
 656}
 
 657
 658/*
 659 * Create the page directory entries and any necessary
 660 * page tables for the mapping specified by `md'.  We
 661 * are able to cope here with varying sizes and address
 662 * offsets, and we take full advantage of sections and
 663 * supersections.
 664 */
 665static void __init create_mapping(struct map_desc *md)
 666{
 667	unsigned long addr, length, end;
 668	phys_addr_t phys;
 669	const struct mem_type *type;
 670	pgd_t *pgd;
 671
 672	if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
 673		printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
 674		       " at 0x%08lx in user region\n",
 675		       (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
 676		return;
 677	}
 678
 679	if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
 680	    md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
 681		printk(KERN_WARNING "BUG: mapping for 0x%08llx"
 682		       " at 0x%08lx overlaps vmalloc space\n",
 683		       (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
 684	}
 685
 686	type = &mem_types[md->type];
 687
 
 688	/*
 689	 * Catch 36-bit addresses
 690	 */
 691	if (md->pfn >= 0x100000) {
 692		create_36bit_mapping(md, type);
 693		return;
 694	}
 
 695
 696	addr = md->virtual & PAGE_MASK;
 697	phys = __pfn_to_phys(md->pfn);
 698	length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
 699
 700	if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
 701		printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
 702		       "be mapped using pages, ignoring.\n",
 703		       (long long)__pfn_to_phys(md->pfn), addr);
 704		return;
 705	}
 706
 707	pgd = pgd_offset_k(addr);
 708	end = addr + length;
 709	do {
 710		unsigned long next = pgd_addr_end(addr, end);
 711
 712		alloc_init_pud(pgd, addr, next, phys, type);
 713
 714		phys += next - addr;
 715		addr = next;
 716	} while (pgd++, addr != end);
 717}
 718
 719/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 720 * Create the architecture specific mappings
 721 */
 722void __init iotable_init(struct map_desc *io_desc, int nr)
 723{
 724	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 725
 726	for (i = 0; i < nr; i++)
 727		create_mapping(io_desc + i);
 
 
 
 
 
 
 728}
 
 729
 730static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M);
 
 731
 732/*
 733 * vmalloc=size forces the vmalloc area to be exactly 'size'
 734 * bytes. This can be used to increase (or decrease) the vmalloc
 735 * area - the default is 128m.
 736 */
 737static int __init early_vmalloc(char *arg)
 738{
 739	unsigned long vmalloc_reserve = memparse(arg, NULL);
 740
 741	if (vmalloc_reserve < SZ_16M) {
 742		vmalloc_reserve = SZ_16M;
 743		printk(KERN_WARNING
 744			"vmalloc area too small, limiting to %luMB\n",
 745			vmalloc_reserve >> 20);
 746	}
 747
 748	if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
 749		vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
 750		printk(KERN_WARNING
 751			"vmalloc area is too big, limiting to %luMB\n",
 752			vmalloc_reserve >> 20);
 753	}
 754
 755	vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
 756	return 0;
 757}
 758early_param("vmalloc", early_vmalloc);
 759
 760static phys_addr_t lowmem_limit __initdata = 0;
 761
 762void __init sanity_check_meminfo(void)
 763{
 764	int i, j, highmem = 0;
 
 
 
 765
 766	for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
 767		struct membank *bank = &meminfo.bank[j];
 768		*bank = meminfo.bank[i];
 
 
 
 
 
 769
 770#ifdef CONFIG_HIGHMEM
 771		if (__va(bank->start) >= vmalloc_min ||
 772		    __va(bank->start) < (void *)PAGE_OFFSET)
 773			highmem = 1;
 774
 775		bank->highmem = highmem;
 
 
 
 
 
 
 
 
 
 
 776
 777		/*
 778		 * Split those memory banks which are partially overlapping
 779		 * the vmalloc area greatly simplifying things later.
 780		 */
 781		if (__va(bank->start) < vmalloc_min &&
 782		    bank->size > vmalloc_min - __va(bank->start)) {
 783			if (meminfo.nr_banks >= NR_BANKS) {
 784				printk(KERN_CRIT "NR_BANKS too low, "
 785						 "ignoring high memory\n");
 786			} else {
 787				memmove(bank + 1, bank,
 788					(meminfo.nr_banks - i) * sizeof(*bank));
 789				meminfo.nr_banks++;
 790				i++;
 791				bank[1].size -= vmalloc_min - __va(bank->start);
 792				bank[1].start = __pa(vmalloc_min - 1) + 1;
 793				bank[1].highmem = highmem = 1;
 794				j++;
 795			}
 796			bank->size = vmalloc_min - __va(bank->start);
 797		}
 798#else
 799		bank->highmem = highmem;
 
 
 
 
 
 
 800
 801		/*
 802		 * Check whether this memory bank would entirely overlap
 803		 * the vmalloc area.
 804		 */
 805		if (__va(bank->start) >= vmalloc_min ||
 806		    __va(bank->start) < (void *)PAGE_OFFSET) {
 807			printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
 808			       "(vmalloc region overlap).\n",
 809			       (unsigned long long)bank->start,
 810			       (unsigned long long)bank->start + bank->size - 1);
 811			continue;
 812		}
 813
 814		/*
 815		 * Check whether this memory bank would partially overlap
 816		 * the vmalloc area.
 817		 */
 818		if (__va(bank->start + bank->size) > vmalloc_min ||
 819		    __va(bank->start + bank->size) < __va(bank->start)) {
 820			unsigned long newsize = vmalloc_min - __va(bank->start);
 821			printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
 822			       "to -%.8llx (vmalloc region overlap).\n",
 823			       (unsigned long long)bank->start,
 824			       (unsigned long long)bank->start + bank->size - 1,
 825			       (unsigned long long)bank->start + newsize - 1);
 826			bank->size = newsize;
 827		}
 828#endif
 829		if (!bank->highmem && bank->start + bank->size > lowmem_limit)
 830			lowmem_limit = bank->start + bank->size;
 831
 832		j++;
 833	}
 834#ifdef CONFIG_HIGHMEM
 835	if (highmem) {
 836		const char *reason = NULL;
 837
 838		if (cache_is_vipt_aliasing()) {
 839			/*
 840			 * Interactions between kmap and other mappings
 841			 * make highmem support with aliasing VIPT caches
 842			 * rather difficult.
 843			 */
 844			reason = "with VIPT aliasing cache";
 845		}
 846		if (reason) {
 847			printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
 848				reason);
 849			while (j > 0 && meminfo.bank[j - 1].highmem)
 850				j--;
 851		}
 852	}
 853#endif
 854	meminfo.nr_banks = j;
 855	memblock_set_current_limit(lowmem_limit);
 856}
 857
 858static inline void prepare_page_table(void)
 859{
 860	unsigned long addr;
 861	phys_addr_t end;
 862
 863	/*
 864	 * Clear out all the mappings below the kernel image.
 865	 */
 866	for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
 867		pmd_clear(pmd_off_k(addr));
 868
 869#ifdef CONFIG_XIP_KERNEL
 870	/* The XIP kernel is mapped in the module area -- skip over it */
 871	addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
 872#endif
 873	for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
 874		pmd_clear(pmd_off_k(addr));
 875
 876	/*
 877	 * Find the end of the first block of lowmem.
 878	 */
 879	end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
 880	if (end >= lowmem_limit)
 881		end = lowmem_limit;
 882
 883	/*
 884	 * Clear out all the kernel space mappings, except for the first
 885	 * memory bank, up to the end of the vmalloc region.
 886	 */
 887	for (addr = __phys_to_virt(end);
 888	     addr < VMALLOC_END; addr += PGDIR_SIZE)
 889		pmd_clear(pmd_off_k(addr));
 890}
 891
 
 
 
 
 
 
 
 
 892/*
 893 * Reserve the special regions of memory
 894 */
 895void __init arm_mm_memblock_reserve(void)
 896{
 897	/*
 898	 * Reserve the page tables.  These are already in use,
 899	 * and can only be in node 0.
 900	 */
 901	memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t));
 902
 903#ifdef CONFIG_SA1111
 904	/*
 905	 * Because of the SA1111 DMA bug, we want to preserve our
 906	 * precious DMA-able memory...
 907	 */
 908	memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
 909#endif
 910}
 911
 912/*
 913 * Set up device the mappings.  Since we clear out the page tables for all
 914 * mappings above VMALLOC_END, we will remove any debug device mappings.
 915 * This means you have to be careful how you debug this function, or any
 916 * called function.  This means you can't use any function or debugging
 917 * method which may touch any device, otherwise the kernel _will_ crash.
 918 */
 919static void __init devicemaps_init(struct machine_desc *mdesc)
 920{
 921	struct map_desc map;
 922	unsigned long addr;
 
 923
 924	/*
 925	 * Allocate the vector page early.
 926	 */
 927	vectors_page = early_alloc(PAGE_SIZE);
 
 
 928
 929	for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
 
 
 
 930		pmd_clear(pmd_off_k(addr));
 931
 932	/*
 933	 * Map the kernel if it is XIP.
 934	 * It is always first in the modulearea.
 935	 */
 936#ifdef CONFIG_XIP_KERNEL
 937	map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
 938	map.virtual = MODULES_VADDR;
 939	map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
 940	map.type = MT_ROM;
 941	create_mapping(&map);
 942#endif
 943
 944	/*
 945	 * Map the cache flushing regions.
 946	 */
 947#ifdef FLUSH_BASE
 948	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
 949	map.virtual = FLUSH_BASE;
 950	map.length = SZ_1M;
 951	map.type = MT_CACHECLEAN;
 952	create_mapping(&map);
 953#endif
 954#ifdef FLUSH_BASE_MINICACHE
 955	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
 956	map.virtual = FLUSH_BASE_MINICACHE;
 957	map.length = SZ_1M;
 958	map.type = MT_MINICLEAN;
 959	create_mapping(&map);
 960#endif
 961
 962	/*
 963	 * Create a mapping for the machine vectors at the high-vectors
 964	 * location (0xffff0000).  If we aren't using high-vectors, also
 965	 * create a mapping at the low-vectors virtual address.
 966	 */
 967	map.pfn = __phys_to_pfn(virt_to_phys(vectors_page));
 968	map.virtual = 0xffff0000;
 969	map.length = PAGE_SIZE;
 
 970	map.type = MT_HIGH_VECTORS;
 
 
 
 971	create_mapping(&map);
 972
 973	if (!vectors_high()) {
 974		map.virtual = 0;
 
 975		map.type = MT_LOW_VECTORS;
 976		create_mapping(&map);
 977	}
 978
 
 
 
 
 
 
 
 979	/*
 980	 * Ask the machine support to map in the statically mapped devices.
 981	 */
 982	if (mdesc->map_io)
 983		mdesc->map_io();
 
 
 
 
 
 
 984
 985	/*
 986	 * Finally flush the caches and tlb to ensure that we're in a
 987	 * consistent state wrt the writebuffer.  This also ensures that
 988	 * any write-allocated cache lines in the vector page are written
 989	 * back.  After this point, we can start to touch devices again.
 990	 */
 991	local_flush_tlb_all();
 992	flush_cache_all();
 
 
 
 993}
 994
 995static void __init kmap_init(void)
 996{
 997#ifdef CONFIG_HIGHMEM
 998	pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
 999		PKMAP_BASE, _PAGE_KERNEL_TABLE);
1000#endif
 
 
 
1001}
1002
1003static void __init map_lowmem(void)
1004{
1005	struct memblock_region *reg;
 
 
1006
1007	/* Map all the lowmem memory banks. */
1008	for_each_memblock(memory, reg) {
1009		phys_addr_t start = reg->base;
1010		phys_addr_t end = start + reg->size;
1011		struct map_desc map;
1012
1013		if (end > lowmem_limit)
1014			end = lowmem_limit;
 
 
 
1015		if (start >= end)
1016			break;
1017
1018		map.pfn = __phys_to_pfn(start);
1019		map.virtual = __phys_to_virt(start);
1020		map.length = end - start;
1021		map.type = MT_MEMORY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1022
1023		create_mapping(&map);
1024	}
1025}
1026
1027/*
1028 * paging_init() sets up the page tables, initialises the zone memory
1029 * maps, and sets up the zero page, bad page and bad page tables.
1030 */
1031void __init paging_init(struct machine_desc *mdesc)
1032{
1033	void *zero_page;
1034
1035	memblock_set_current_limit(lowmem_limit);
1036
1037	build_mem_type_table();
1038	prepare_page_table();
1039	map_lowmem();
 
 
 
1040	devicemaps_init(mdesc);
1041	kmap_init();
 
1042
1043	top_pmd = pmd_off_k(0xffff0000);
1044
1045	/* allocate the zero page. */
1046	zero_page = early_alloc(PAGE_SIZE);
1047
1048	bootmem_init();
1049
1050	empty_zero_page = virt_to_page(zero_page);
1051	__flush_dcache_page(NULL, empty_zero_page);
 
 
 
 
 
 
 
 
 
1052}