Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v3.1
   1/*
   2 *  linux/arch/arm/mm/mmu.c
   3 *
   4 *  Copyright (C) 1995-2005 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#include <linux/module.h>
  11#include <linux/kernel.h>
  12#include <linux/errno.h>
  13#include <linux/init.h>
  14#include <linux/mman.h>
  15#include <linux/nodemask.h>
  16#include <linux/memblock.h>
  17#include <linux/fs.h>
 
 
  18
 
  19#include <asm/cputype.h>
  20#include <asm/sections.h>
  21#include <asm/cachetype.h>
 
 
  22#include <asm/setup.h>
  23#include <asm/sizes.h>
  24#include <asm/smp_plat.h>
  25#include <asm/tlb.h>
  26#include <asm/highmem.h>
 
  27#include <asm/traps.h>
 
 
  28
  29#include <asm/mach/arch.h>
  30#include <asm/mach/map.h>
 
 
  31
 
  32#include "mm.h"
 
  33
  34/*
  35 * empty_zero_page is a special page that is used for
  36 * zero-initialized data and COW.
  37 */
  38struct page *empty_zero_page;
  39EXPORT_SYMBOL(empty_zero_page);
  40
  41/*
  42 * The pmd table for the upper-most set of pages.
  43 */
  44pmd_t *top_pmd;
  45
 
 
  46#define CPOLICY_UNCACHED	0
  47#define CPOLICY_BUFFERED	1
  48#define CPOLICY_WRITETHROUGH	2
  49#define CPOLICY_WRITEBACK	3
  50#define CPOLICY_WRITEALLOC	4
  51
  52static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
  53static unsigned int ecc_mask __initdata = 0;
  54pgprot_t pgprot_user;
  55pgprot_t pgprot_kernel;
 
 
 
  56
  57EXPORT_SYMBOL(pgprot_user);
  58EXPORT_SYMBOL(pgprot_kernel);
  59
  60struct cachepolicy {
  61	const char	policy[16];
  62	unsigned int	cr_mask;
  63	unsigned int	pmd;
  64	pteval_t	pte;
 
  65};
  66
 
 
 
 
 
 
  67static struct cachepolicy cache_policies[] __initdata = {
  68	{
  69		.policy		= "uncached",
  70		.cr_mask	= CR_W|CR_C,
  71		.pmd		= PMD_SECT_UNCACHED,
  72		.pte		= L_PTE_MT_UNCACHED,
 
  73	}, {
  74		.policy		= "buffered",
  75		.cr_mask	= CR_C,
  76		.pmd		= PMD_SECT_BUFFERED,
  77		.pte		= L_PTE_MT_BUFFERABLE,
 
  78	}, {
  79		.policy		= "writethrough",
  80		.cr_mask	= 0,
  81		.pmd		= PMD_SECT_WT,
  82		.pte		= L_PTE_MT_WRITETHROUGH,
 
  83	}, {
  84		.policy		= "writeback",
  85		.cr_mask	= 0,
  86		.pmd		= PMD_SECT_WB,
  87		.pte		= L_PTE_MT_WRITEBACK,
 
  88	}, {
  89		.policy		= "writealloc",
  90		.cr_mask	= 0,
  91		.pmd		= PMD_SECT_WBWA,
  92		.pte		= L_PTE_MT_WRITEALLOC,
 
  93	}
  94};
  95
 
 
 
  96/*
  97 * These are useful for identifying cache coherency
  98 * problems by allowing the cache or the cache and
  99 * writebuffer to be turned off.  (Note: the write
 100 * buffer should not be on and the cache off).
 
 101 */
 102static int __init early_cachepolicy(char *p)
 103{
 104	int i;
 105
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 106	for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
 107		int len = strlen(cache_policies[i].policy);
 108
 109		if (memcmp(p, cache_policies[i].policy, len) == 0) {
 110			cachepolicy = i;
 111			cr_alignment &= ~cache_policies[i].cr_mask;
 112			cr_no_alignment &= ~cache_policies[i].cr_mask;
 113			break;
 114		}
 115	}
 116	if (i == ARRAY_SIZE(cache_policies))
 117		printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
 
 
 118	/*
 119	 * This restriction is partly to do with the way we boot; it is
 120	 * unpredictable to have memory mapped using two different sets of
 121	 * memory attributes (shared, type, and cache attribs).  We can not
 122	 * change these attributes once the initial assembly has setup the
 123	 * page tables.
 124	 */
 125	if (cpu_architecture() >= CPU_ARCH_ARMv6) {
 126		printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
 127		cachepolicy = CPOLICY_WRITEBACK;
 
 
 
 
 
 
 
 
 128	}
 129	flush_cache_all();
 130	set_cr(cr_alignment);
 131	return 0;
 132}
 133early_param("cachepolicy", early_cachepolicy);
 134
 135static int __init early_nocache(char *__unused)
 136{
 137	char *p = "buffered";
 138	printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
 139	early_cachepolicy(p);
 140	return 0;
 141}
 142early_param("nocache", early_nocache);
 143
 144static int __init early_nowrite(char *__unused)
 145{
 146	char *p = "uncached";
 147	printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
 148	early_cachepolicy(p);
 149	return 0;
 150}
 151early_param("nowb", early_nowrite);
 152
 
 153static int __init early_ecc(char *p)
 154{
 155	if (memcmp(p, "on", 2) == 0)
 156		ecc_mask = PMD_PROTECTION;
 157	else if (memcmp(p, "off", 3) == 0)
 158		ecc_mask = 0;
 159	return 0;
 160}
 161early_param("ecc", early_ecc);
 
 162
 163static int __init noalign_setup(char *__unused)
 
 
 164{
 165	cr_alignment &= ~CR_A;
 166	cr_no_alignment &= ~CR_A;
 167	set_cr(cr_alignment);
 168	return 1;
 169}
 170__setup("noalign", noalign_setup);
 171
 172#ifndef CONFIG_SMP
 173void adjust_cr(unsigned long mask, unsigned long set)
 174{
 175	unsigned long flags;
 176
 177	mask &= ~CR_A;
 178
 179	set &= mask;
 180
 181	local_irq_save(flags);
 182
 183	cr_no_alignment = (cr_no_alignment & ~mask) | set;
 184	cr_alignment = (cr_alignment & ~mask) | set;
 185
 186	set_cr((get_cr() & ~mask) | set);
 187
 188	local_irq_restore(flags);
 189}
 190#endif
 
 
 191
 192#define PROT_PTE_DEVICE		L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
 
 193#define PROT_SECT_DEVICE	PMD_TYPE_SECT|PMD_SECT_AP_WRITE
 194
 195static struct mem_type mem_types[] = {
 196	[MT_DEVICE] = {		  /* Strongly ordered / ARMv6 shared device */
 197		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
 198				  L_PTE_SHARED,
 
 
 
 199		.prot_l1	= PMD_TYPE_TABLE,
 200		.prot_sect	= PROT_SECT_DEVICE | PMD_SECT_S,
 201		.domain		= DOMAIN_IO,
 202	},
 203	[MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
 204		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
 205		.prot_l1	= PMD_TYPE_TABLE,
 206		.prot_sect	= PROT_SECT_DEVICE,
 207		.domain		= DOMAIN_IO,
 208	},
 209	[MT_DEVICE_CACHED] = {	  /* ioremap_cached */
 210		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
 211		.prot_l1	= PMD_TYPE_TABLE,
 212		.prot_sect	= PROT_SECT_DEVICE | PMD_SECT_WB,
 213		.domain		= DOMAIN_IO,
 214	},	
 215	[MT_DEVICE_WC] = {	/* ioremap_wc */
 216		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
 217		.prot_l1	= PMD_TYPE_TABLE,
 218		.prot_sect	= PROT_SECT_DEVICE,
 219		.domain		= DOMAIN_IO,
 220	},
 221	[MT_UNCACHED] = {
 222		.prot_pte	= PROT_PTE_DEVICE,
 223		.prot_l1	= PMD_TYPE_TABLE,
 224		.prot_sect	= PMD_TYPE_SECT | PMD_SECT_XN,
 225		.domain		= DOMAIN_IO,
 226	},
 227	[MT_CACHECLEAN] = {
 228		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
 229		.domain    = DOMAIN_KERNEL,
 230	},
 
 231	[MT_MINICLEAN] = {
 232		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
 233		.domain    = DOMAIN_KERNEL,
 234	},
 
 235	[MT_LOW_VECTORS] = {
 236		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 237				L_PTE_RDONLY,
 238		.prot_l1   = PMD_TYPE_TABLE,
 239		.domain    = DOMAIN_USER,
 240	},
 241	[MT_HIGH_VECTORS] = {
 242		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 243				L_PTE_USER | L_PTE_RDONLY,
 244		.prot_l1   = PMD_TYPE_TABLE,
 245		.domain    = DOMAIN_USER,
 246	},
 247	[MT_MEMORY] = {
 248		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
 249		.prot_l1   = PMD_TYPE_TABLE,
 250		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 251		.domain    = DOMAIN_KERNEL,
 252	},
 
 
 
 
 
 
 
 253	[MT_ROM] = {
 254		.prot_sect = PMD_TYPE_SECT,
 255		.domain    = DOMAIN_KERNEL,
 256	},
 257	[MT_MEMORY_NONCACHED] = {
 258		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 259				L_PTE_MT_BUFFERABLE,
 260		.prot_l1   = PMD_TYPE_TABLE,
 261		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 262		.domain    = DOMAIN_KERNEL,
 263	},
 264	[MT_MEMORY_DTCM] = {
 265		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 266				L_PTE_XN,
 267		.prot_l1   = PMD_TYPE_TABLE,
 268		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
 269		.domain    = DOMAIN_KERNEL,
 270	},
 271	[MT_MEMORY_ITCM] = {
 272		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
 273		.prot_l1   = PMD_TYPE_TABLE,
 274		.domain    = DOMAIN_KERNEL,
 275	},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 276};
 277
 278const struct mem_type *get_mem_type(unsigned int type)
 279{
 280	return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
 281}
 282EXPORT_SYMBOL(get_mem_type);
 283
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 284/*
 285 * Adjust the PMD section entries according to the CPU in use.
 286 */
 287static void __init build_mem_type_table(void)
 288{
 289	struct cachepolicy *cp;
 290	unsigned int cr = get_cr();
 291	unsigned int user_pgprot, kern_pgprot, vecs_pgprot;
 
 292	int cpu_arch = cpu_architecture();
 293	int i;
 294
 295	if (cpu_arch < CPU_ARCH_ARMv6) {
 296#if defined(CONFIG_CPU_DCACHE_DISABLE)
 297		if (cachepolicy > CPOLICY_BUFFERED)
 298			cachepolicy = CPOLICY_BUFFERED;
 299#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
 300		if (cachepolicy > CPOLICY_WRITETHROUGH)
 301			cachepolicy = CPOLICY_WRITETHROUGH;
 302#endif
 303	}
 304	if (cpu_arch < CPU_ARCH_ARMv5) {
 305		if (cachepolicy >= CPOLICY_WRITEALLOC)
 306			cachepolicy = CPOLICY_WRITEBACK;
 307		ecc_mask = 0;
 308	}
 309	if (is_smp())
 310		cachepolicy = CPOLICY_WRITEALLOC;
 
 
 
 
 
 
 
 
 
 311
 312	/*
 313	 * Strip out features not present on earlier architectures.
 314	 * Pre-ARMv5 CPUs don't have TEX bits.  Pre-ARMv6 CPUs or those
 315	 * without extended page tables don't have the 'Shared' bit.
 316	 */
 317	if (cpu_arch < CPU_ARCH_ARMv5)
 318		for (i = 0; i < ARRAY_SIZE(mem_types); i++)
 319			mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
 320	if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
 321		for (i = 0; i < ARRAY_SIZE(mem_types); i++)
 322			mem_types[i].prot_sect &= ~PMD_SECT_S;
 323
 324	/*
 325	 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
 326	 * "update-able on write" bit on ARM610).  However, Xscale and
 327	 * Xscale3 require this bit to be cleared.
 328	 */
 329	if (cpu_is_xscale() || cpu_is_xsc3()) {
 330		for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
 331			mem_types[i].prot_sect &= ~PMD_BIT4;
 332			mem_types[i].prot_l1 &= ~PMD_BIT4;
 333		}
 334	} else if (cpu_arch < CPU_ARCH_ARMv6) {
 335		for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
 336			if (mem_types[i].prot_l1)
 337				mem_types[i].prot_l1 |= PMD_BIT4;
 338			if (mem_types[i].prot_sect)
 339				mem_types[i].prot_sect |= PMD_BIT4;
 340		}
 341	}
 342
 343	/*
 344	 * Mark the device areas according to the CPU/architecture.
 345	 */
 346	if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
 347		if (!cpu_is_xsc3()) {
 348			/*
 349			 * Mark device regions on ARMv6+ as execute-never
 350			 * to prevent speculative instruction fetches.
 351			 */
 352			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
 353			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
 354			mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
 355			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
 
 
 
 356		}
 357		if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
 358			/*
 359			 * For ARMv7 with TEX remapping,
 360			 * - shared device is SXCB=1100
 361			 * - nonshared device is SXCB=0100
 362			 * - write combine device mem is SXCB=0001
 363			 * (Uncached Normal memory)
 364			 */
 365			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
 366			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
 367			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
 368		} else if (cpu_is_xsc3()) {
 369			/*
 370			 * For Xscale3,
 371			 * - shared device is TEXCB=00101
 372			 * - nonshared device is TEXCB=01000
 373			 * - write combine device mem is TEXCB=00100
 374			 * (Inner/Outer Uncacheable in xsc3 parlance)
 375			 */
 376			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
 377			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
 378			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
 379		} else {
 380			/*
 381			 * For ARMv6 and ARMv7 without TEX remapping,
 382			 * - shared device is TEXCB=00001
 383			 * - nonshared device is TEXCB=01000
 384			 * - write combine device mem is TEXCB=00100
 385			 * (Uncached Normal in ARMv6 parlance).
 386			 */
 387			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
 388			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
 389			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
 390		}
 391	} else {
 392		/*
 393		 * On others, write combining is "Uncached/Buffered"
 394		 */
 395		mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
 396	}
 397
 398	/*
 399	 * Now deal with the memory-type mappings
 400	 */
 401	cp = &cache_policies[cachepolicy];
 402	vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
 
 
 
 403
 
 404	/*
 405	 * Only use write-through for non-SMP systems
 
 
 406	 */
 407	if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
 408		vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
 409
 410	/*
 411	 * Enable CPU-specific coherency if supported.
 412	 * (Only available on XSC3 at the moment.)
 413	 */
 414	if (arch_is_coherent() && cpu_is_xsc3()) {
 415		mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
 416		mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
 417		mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
 418		mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
 419	}
 
 
 420	/*
 421	 * ARMv6 and above have extended page tables.
 422	 */
 423	if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
 
 424		/*
 425		 * Mark cache clean areas and XIP ROM read only
 426		 * from SVC mode and no access from userspace.
 427		 */
 428		mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 429		mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 430		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 
 431
 432		if (is_smp()) {
 433			/*
 434			 * Mark memory with the "shared" attribute
 435			 * for SMP systems
 436			 */
 
 437			user_pgprot |= L_PTE_SHARED;
 438			kern_pgprot |= L_PTE_SHARED;
 439			vecs_pgprot |= L_PTE_SHARED;
 
 440			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
 441			mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
 442			mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
 443			mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
 444			mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
 445			mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
 446			mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
 447			mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
 
 
 
 448		}
 449	}
 450
 451	/*
 452	 * Non-cacheable Normal - intended for memory areas that must
 453	 * not cause dirty cache line writebacks when used
 454	 */
 455	if (cpu_arch >= CPU_ARCH_ARMv6) {
 456		if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
 457			/* Non-cacheable Normal is XCB = 001 */
 458			mem_types[MT_MEMORY_NONCACHED].prot_sect |=
 459				PMD_SECT_BUFFERED;
 460		} else {
 461			/* For both ARMv6 and non-TEX-remapping ARMv7 */
 462			mem_types[MT_MEMORY_NONCACHED].prot_sect |=
 463				PMD_SECT_TEX(1);
 464		}
 465	} else {
 466		mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
 
 
 
 
 
 
 
 
 
 
 467	}
 
 
 
 
 
 
 
 
 468
 469	for (i = 0; i < 16; i++) {
 470		unsigned long v = pgprot_val(protection_map[i]);
 471		protection_map[i] = __pgprot(v | user_pgprot);
 472	}
 473
 474	mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
 475	mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
 476
 477	pgprot_user   = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
 478	pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
 479				 L_PTE_DIRTY | kern_pgprot);
 
 
 
 480
 481	mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
 482	mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
 483	mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
 484	mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
 485	mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
 
 
 
 486	mem_types[MT_ROM].prot_sect |= cp->pmd;
 487
 488	switch (cp->pmd) {
 489	case PMD_SECT_WT:
 490		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
 491		break;
 492	case PMD_SECT_WB:
 493	case PMD_SECT_WBWA:
 494		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
 495		break;
 496	}
 497	printk("Memory policy: ECC %sabled, Data cache %s\n",
 498		ecc_mask ? "en" : "dis", cp->policy);
 499
 500	for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
 501		struct mem_type *t = &mem_types[i];
 502		if (t->prot_l1)
 503			t->prot_l1 |= PMD_DOMAIN(t->domain);
 504		if (t->prot_sect)
 505			t->prot_sect |= PMD_DOMAIN(t->domain);
 506	}
 507}
 508
 509#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
 510pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 511			      unsigned long size, pgprot_t vma_prot)
 512{
 513	if (!pfn_valid(pfn))
 514		return pgprot_noncached(vma_prot);
 515	else if (file->f_flags & O_SYNC)
 516		return pgprot_writecombine(vma_prot);
 517	return vma_prot;
 518}
 519EXPORT_SYMBOL(phys_mem_access_prot);
 520#endif
 521
 522#define vectors_base()	(vectors_high() ? 0xffff0000 : 0)
 523
 524static void __init *early_alloc(unsigned long sz)
 525{
 526	void *ptr = __va(memblock_alloc(sz, sz));
 527	memset(ptr, 0, sz);
 528	return ptr;
 529}
 530
 531static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 532{
 533	if (pmd_none(*pmd)) {
 534		pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
 535		__pmd_populate(pmd, __pa(pte), prot);
 536	}
 537	BUG_ON(pmd_bad(*pmd));
 538	return pte_offset_kernel(pmd, addr);
 539}
 540
 
 
 
 
 
 
 541static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
 542				  unsigned long end, unsigned long pfn,
 543				  const struct mem_type *type)
 
 
 544{
 545	pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
 546	do {
 547		set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
 
 548		pfn++;
 549	} while (pte++, addr += PAGE_SIZE, addr != end);
 550}
 551
 552static void __init alloc_init_section(pud_t *pud, unsigned long addr,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 553				      unsigned long end, phys_addr_t phys,
 554				      const struct mem_type *type)
 
 555{
 556	pmd_t *pmd = pmd_offset(pud, addr);
 
 557
 558	/*
 559	 * Try a section mapping - end, addr and phys must all be aligned
 560	 * to a section boundary.  Note that PMDs refer to the individual
 561	 * L1 entries, whereas PGDs refer to a group of L1 entries making
 562	 * up one logical pointer to an L2 table.
 563	 */
 564	if (((addr | end | phys) & ~SECTION_MASK) == 0) {
 565		pmd_t *p = pmd;
 566
 567		if (addr & SECTION_SIZE)
 568			pmd++;
 569
 570		do {
 571			*pmd = __pmd(phys | type->prot_sect);
 572			phys += SECTION_SIZE;
 573		} while (pmd++, addr += SECTION_SIZE, addr != end);
 574
 575		flush_pmd_entry(p);
 576	} else {
 577		/*
 578		 * No need to loop; pte's aren't interested in the
 579		 * individual L1 entries.
 580		 */
 581		alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
 582	}
 
 
 
 
 
 
 
 
 
 583}
 584
 585static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
 586	unsigned long phys, const struct mem_type *type)
 
 
 587{
 588	pud_t *pud = pud_offset(pgd, addr);
 589	unsigned long next;
 590
 591	do {
 592		next = pud_addr_end(addr, end);
 593		alloc_init_section(pud, addr, next, phys, type);
 594		phys += next - addr;
 595	} while (pud++, addr = next, addr != end);
 596}
 597
 598static void __init create_36bit_mapping(struct map_desc *md,
 599					const struct mem_type *type)
 
 
 
 600{
 601	unsigned long addr, length, end;
 602	phys_addr_t phys;
 603	pgd_t *pgd;
 604
 605	addr = md->virtual;
 606	phys = __pfn_to_phys(md->pfn);
 607	length = PAGE_ALIGN(md->length);
 608
 609	if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
 610		printk(KERN_ERR "MM: CPU does not support supersection "
 611		       "mapping for 0x%08llx at 0x%08lx\n",
 612		       (long long)__pfn_to_phys((u64)md->pfn), addr);
 613		return;
 614	}
 615
 616	/* N.B.	ARMv6 supersections are only defined to work with domain 0.
 617	 *	Since domain assignments can in fact be arbitrary, the
 618	 *	'domain == 0' check below is required to insure that ARMv6
 619	 *	supersections are only allocated for domain 0 regardless
 620	 *	of the actual domain assignments in use.
 621	 */
 622	if (type->domain) {
 623		printk(KERN_ERR "MM: invalid domain in supersection "
 624		       "mapping for 0x%08llx at 0x%08lx\n",
 625		       (long long)__pfn_to_phys((u64)md->pfn), addr);
 626		return;
 627	}
 628
 629	if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
 630		printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
 631		       " at 0x%08lx invalid alignment\n",
 632		       (long long)__pfn_to_phys((u64)md->pfn), addr);
 633		return;
 634	}
 635
 636	/*
 637	 * Shift bits [35:32] of address into bits [23:20] of PMD
 638	 * (See ARMv6 spec).
 639	 */
 640	phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
 641
 642	pgd = pgd_offset_k(addr);
 643	end = addr + length;
 644	do {
 645		pud_t *pud = pud_offset(pgd, addr);
 646		pmd_t *pmd = pmd_offset(pud, addr);
 647		int i;
 648
 649		for (i = 0; i < 16; i++)
 650			*pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
 
 651
 652		addr += SUPERSECTION_SIZE;
 653		phys += SUPERSECTION_SIZE;
 654		pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
 655	} while (addr != end);
 656}
 
 657
 658/*
 659 * Create the page directory entries and any necessary
 660 * page tables for the mapping specified by `md'.  We
 661 * are able to cope here with varying sizes and address
 662 * offsets, and we take full advantage of sections and
 663 * supersections.
 664 */
 665static void __init create_mapping(struct map_desc *md)
 666{
 667	unsigned long addr, length, end;
 668	phys_addr_t phys;
 669	const struct mem_type *type;
 670	pgd_t *pgd;
 671
 672	if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
 673		printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
 674		       " at 0x%08lx in user region\n",
 675		       (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
 676		return;
 677	}
 678
 679	if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
 680	    md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
 681		printk(KERN_WARNING "BUG: mapping for 0x%08llx"
 682		       " at 0x%08lx overlaps vmalloc space\n",
 683		       (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
 684	}
 685
 686	type = &mem_types[md->type];
 687
 
 688	/*
 689	 * Catch 36-bit addresses
 690	 */
 691	if (md->pfn >= 0x100000) {
 692		create_36bit_mapping(md, type);
 693		return;
 694	}
 
 695
 696	addr = md->virtual & PAGE_MASK;
 697	phys = __pfn_to_phys(md->pfn);
 698	length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
 699
 700	if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
 701		printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
 702		       "be mapped using pages, ignoring.\n",
 703		       (long long)__pfn_to_phys(md->pfn), addr);
 704		return;
 705	}
 706
 707	pgd = pgd_offset_k(addr);
 708	end = addr + length;
 709	do {
 710		unsigned long next = pgd_addr_end(addr, end);
 711
 712		alloc_init_pud(pgd, addr, next, phys, type);
 713
 714		phys += next - addr;
 715		addr = next;
 716	} while (pgd++, addr != end);
 717}
 718
 719/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 720 * Create the architecture specific mappings
 721 */
 722void __init iotable_init(struct map_desc *io_desc, int nr)
 723{
 724	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 725
 726	for (i = 0; i < nr; i++)
 727		create_mapping(io_desc + i);
 
 
 
 
 
 
 728}
 729
 730static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 731
 732/*
 733 * vmalloc=size forces the vmalloc area to be exactly 'size'
 734 * bytes. This can be used to increase (or decrease) the vmalloc
 735 * area - the default is 128m.
 736 */
 737static int __init early_vmalloc(char *arg)
 738{
 739	unsigned long vmalloc_reserve = memparse(arg, NULL);
 740
 741	if (vmalloc_reserve < SZ_16M) {
 742		vmalloc_reserve = SZ_16M;
 743		printk(KERN_WARNING
 744			"vmalloc area too small, limiting to %luMB\n",
 745			vmalloc_reserve >> 20);
 746	}
 747
 748	if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
 749		vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
 750		printk(KERN_WARNING
 751			"vmalloc area is too big, limiting to %luMB\n",
 752			vmalloc_reserve >> 20);
 753	}
 754
 755	vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
 756	return 0;
 757}
 758early_param("vmalloc", early_vmalloc);
 759
 760static phys_addr_t lowmem_limit __initdata = 0;
 761
 762void __init sanity_check_meminfo(void)
 763{
 764	int i, j, highmem = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 765
 766	for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
 767		struct membank *bank = &meminfo.bank[j];
 768		*bank = meminfo.bank[i];
 
 769
 770#ifdef CONFIG_HIGHMEM
 771		if (__va(bank->start) >= vmalloc_min ||
 772		    __va(bank->start) < (void *)PAGE_OFFSET)
 773			highmem = 1;
 
 
 774
 775		bank->highmem = highmem;
 776
 777		/*
 778		 * Split those memory banks which are partially overlapping
 779		 * the vmalloc area greatly simplifying things later.
 780		 */
 781		if (__va(bank->start) < vmalloc_min &&
 782		    bank->size > vmalloc_min - __va(bank->start)) {
 783			if (meminfo.nr_banks >= NR_BANKS) {
 784				printk(KERN_CRIT "NR_BANKS too low, "
 785						 "ignoring high memory\n");
 786			} else {
 787				memmove(bank + 1, bank,
 788					(meminfo.nr_banks - i) * sizeof(*bank));
 789				meminfo.nr_banks++;
 790				i++;
 791				bank[1].size -= vmalloc_min - __va(bank->start);
 792				bank[1].start = __pa(vmalloc_min - 1) + 1;
 793				bank[1].highmem = highmem = 1;
 794				j++;
 795			}
 796			bank->size = vmalloc_min - __va(bank->start);
 797		}
 798#else
 799		bank->highmem = highmem;
 800
 801		/*
 802		 * Check whether this memory bank would entirely overlap
 803		 * the vmalloc area.
 804		 */
 805		if (__va(bank->start) >= vmalloc_min ||
 806		    __va(bank->start) < (void *)PAGE_OFFSET) {
 807			printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
 808			       "(vmalloc region overlap).\n",
 809			       (unsigned long long)bank->start,
 810			       (unsigned long long)bank->start + bank->size - 1);
 811			continue;
 812		}
 813
 814		/*
 815		 * Check whether this memory bank would partially overlap
 816		 * the vmalloc area.
 817		 */
 818		if (__va(bank->start + bank->size) > vmalloc_min ||
 819		    __va(bank->start + bank->size) < __va(bank->start)) {
 820			unsigned long newsize = vmalloc_min - __va(bank->start);
 821			printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
 822			       "to -%.8llx (vmalloc region overlap).\n",
 823			       (unsigned long long)bank->start,
 824			       (unsigned long long)bank->start + bank->size - 1,
 825			       (unsigned long long)bank->start + newsize - 1);
 826			bank->size = newsize;
 827		}
 828#endif
 829		if (!bank->highmem && bank->start + bank->size > lowmem_limit)
 830			lowmem_limit = bank->start + bank->size;
 831
 832		j++;
 833	}
 834#ifdef CONFIG_HIGHMEM
 835	if (highmem) {
 836		const char *reason = NULL;
 
 
 837
 838		if (cache_is_vipt_aliasing()) {
 839			/*
 840			 * Interactions between kmap and other mappings
 841			 * make highmem support with aliasing VIPT caches
 842			 * rather difficult.
 
 
 
 
 
 
 
 
 843			 */
 844			reason = "with VIPT aliasing cache";
 845		}
 846		if (reason) {
 847			printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
 848				reason);
 849			while (j > 0 && meminfo.bank[j - 1].highmem)
 850				j--;
 851		}
 852	}
 853#endif
 854	meminfo.nr_banks = j;
 855	memblock_set_current_limit(lowmem_limit);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 856}
 857
 858static inline void prepare_page_table(void)
 859{
 860	unsigned long addr;
 861	phys_addr_t end;
 862
 863	/*
 864	 * Clear out all the mappings below the kernel image.
 865	 */
 866	for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
 867		pmd_clear(pmd_off_k(addr));
 868
 869#ifdef CONFIG_XIP_KERNEL
 870	/* The XIP kernel is mapped in the module area -- skip over it */
 871	addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
 872#endif
 873	for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
 874		pmd_clear(pmd_off_k(addr));
 875
 876	/*
 877	 * Find the end of the first block of lowmem.
 878	 */
 879	end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
 880	if (end >= lowmem_limit)
 881		end = lowmem_limit;
 882
 883	/*
 884	 * Clear out all the kernel space mappings, except for the first
 885	 * memory bank, up to the end of the vmalloc region.
 886	 */
 887	for (addr = __phys_to_virt(end);
 888	     addr < VMALLOC_END; addr += PGDIR_SIZE)
 889		pmd_clear(pmd_off_k(addr));
 890}
 891
 
 
 
 
 
 
 
 
 892/*
 893 * Reserve the special regions of memory
 894 */
 895void __init arm_mm_memblock_reserve(void)
 896{
 897	/*
 898	 * Reserve the page tables.  These are already in use,
 899	 * and can only be in node 0.
 900	 */
 901	memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t));
 902
 903#ifdef CONFIG_SA1111
 904	/*
 905	 * Because of the SA1111 DMA bug, we want to preserve our
 906	 * precious DMA-able memory...
 907	 */
 908	memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
 909#endif
 910}
 911
 912/*
 913 * Set up device the mappings.  Since we clear out the page tables for all
 914 * mappings above VMALLOC_END, we will remove any debug device mappings.
 915 * This means you have to be careful how you debug this function, or any
 916 * called function.  This means you can't use any function or debugging
 917 * method which may touch any device, otherwise the kernel _will_ crash.
 918 */
 919static void __init devicemaps_init(struct machine_desc *mdesc)
 920{
 921	struct map_desc map;
 922	unsigned long addr;
 
 923
 924	/*
 925	 * Allocate the vector page early.
 926	 */
 927	vectors_page = early_alloc(PAGE_SIZE);
 
 
 928
 929	for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
 
 
 
 930		pmd_clear(pmd_off_k(addr));
 931
 932	/*
 933	 * Map the kernel if it is XIP.
 934	 * It is always first in the modulearea.
 935	 */
 936#ifdef CONFIG_XIP_KERNEL
 937	map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
 938	map.virtual = MODULES_VADDR;
 939	map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
 940	map.type = MT_ROM;
 941	create_mapping(&map);
 942#endif
 943
 944	/*
 945	 * Map the cache flushing regions.
 946	 */
 947#ifdef FLUSH_BASE
 948	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
 949	map.virtual = FLUSH_BASE;
 950	map.length = SZ_1M;
 951	map.type = MT_CACHECLEAN;
 952	create_mapping(&map);
 953#endif
 954#ifdef FLUSH_BASE_MINICACHE
 955	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
 956	map.virtual = FLUSH_BASE_MINICACHE;
 957	map.length = SZ_1M;
 958	map.type = MT_MINICLEAN;
 959	create_mapping(&map);
 960#endif
 961
 962	/*
 963	 * Create a mapping for the machine vectors at the high-vectors
 964	 * location (0xffff0000).  If we aren't using high-vectors, also
 965	 * create a mapping at the low-vectors virtual address.
 966	 */
 967	map.pfn = __phys_to_pfn(virt_to_phys(vectors_page));
 968	map.virtual = 0xffff0000;
 969	map.length = PAGE_SIZE;
 
 970	map.type = MT_HIGH_VECTORS;
 
 
 
 971	create_mapping(&map);
 972
 973	if (!vectors_high()) {
 974		map.virtual = 0;
 
 975		map.type = MT_LOW_VECTORS;
 976		create_mapping(&map);
 977	}
 978
 
 
 
 
 
 
 
 979	/*
 980	 * Ask the machine support to map in the statically mapped devices.
 981	 */
 982	if (mdesc->map_io)
 983		mdesc->map_io();
 
 
 
 
 
 
 984
 985	/*
 986	 * Finally flush the caches and tlb to ensure that we're in a
 987	 * consistent state wrt the writebuffer.  This also ensures that
 988	 * any write-allocated cache lines in the vector page are written
 989	 * back.  After this point, we can start to touch devices again.
 990	 */
 991	local_flush_tlb_all();
 992	flush_cache_all();
 
 
 
 993}
 994
 995static void __init kmap_init(void)
 996{
 997#ifdef CONFIG_HIGHMEM
 998	pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
 999		PKMAP_BASE, _PAGE_KERNEL_TABLE);
1000#endif
 
 
 
1001}
1002
1003static void __init map_lowmem(void)
1004{
1005	struct memblock_region *reg;
 
 
 
 
 
 
1006
1007	/* Map all the lowmem memory banks. */
1008	for_each_memblock(memory, reg) {
1009		phys_addr_t start = reg->base;
1010		phys_addr_t end = start + reg->size;
1011		struct map_desc map;
1012
1013		if (end > lowmem_limit)
1014			end = lowmem_limit;
 
 
 
1015		if (start >= end)
1016			break;
1017
1018		map.pfn = __phys_to_pfn(start);
1019		map.virtual = __phys_to_virt(start);
1020		map.length = end - start;
1021		map.type = MT_MEMORY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1022
1023		create_mapping(&map);
1024	}
1025}
1026
1027/*
1028 * paging_init() sets up the page tables, initialises the zone memory
1029 * maps, and sets up the zero page, bad page and bad page tables.
1030 */
1031void __init paging_init(struct machine_desc *mdesc)
1032{
1033	void *zero_page;
1034
1035	memblock_set_current_limit(lowmem_limit);
1036
1037	build_mem_type_table();
1038	prepare_page_table();
1039	map_lowmem();
 
 
 
1040	devicemaps_init(mdesc);
1041	kmap_init();
 
1042
1043	top_pmd = pmd_off_k(0xffff0000);
1044
1045	/* allocate the zero page. */
1046	zero_page = early_alloc(PAGE_SIZE);
1047
1048	bootmem_init();
1049
1050	empty_zero_page = virt_to_page(zero_page);
1051	__flush_dcache_page(NULL, empty_zero_page);
1052}
v4.10.11
   1/*
   2 *  linux/arch/arm/mm/mmu.c
   3 *
   4 *  Copyright (C) 1995-2005 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#include <linux/module.h>
  11#include <linux/kernel.h>
  12#include <linux/errno.h>
  13#include <linux/init.h>
  14#include <linux/mman.h>
  15#include <linux/nodemask.h>
  16#include <linux/memblock.h>
  17#include <linux/fs.h>
  18#include <linux/vmalloc.h>
  19#include <linux/sizes.h>
  20
  21#include <asm/cp15.h>
  22#include <asm/cputype.h>
  23#include <asm/sections.h>
  24#include <asm/cachetype.h>
  25#include <asm/fixmap.h>
  26#include <asm/sections.h>
  27#include <asm/setup.h>
 
  28#include <asm/smp_plat.h>
  29#include <asm/tlb.h>
  30#include <asm/highmem.h>
  31#include <asm/system_info.h>
  32#include <asm/traps.h>
  33#include <asm/procinfo.h>
  34#include <asm/memory.h>
  35
  36#include <asm/mach/arch.h>
  37#include <asm/mach/map.h>
  38#include <asm/mach/pci.h>
  39#include <asm/fixmap.h>
  40
  41#include "fault.h"
  42#include "mm.h"
  43#include "tcm.h"
  44
  45/*
  46 * empty_zero_page is a special page that is used for
  47 * zero-initialized data and COW.
  48 */
  49struct page *empty_zero_page;
  50EXPORT_SYMBOL(empty_zero_page);
  51
  52/*
  53 * The pmd table for the upper-most set of pages.
  54 */
  55pmd_t *top_pmd;
  56
  57pmdval_t user_pmd_table = _PAGE_USER_TABLE;
  58
  59#define CPOLICY_UNCACHED	0
  60#define CPOLICY_BUFFERED	1
  61#define CPOLICY_WRITETHROUGH	2
  62#define CPOLICY_WRITEBACK	3
  63#define CPOLICY_WRITEALLOC	4
  64
  65static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
  66static unsigned int ecc_mask __initdata = 0;
  67pgprot_t pgprot_user;
  68pgprot_t pgprot_kernel;
  69pgprot_t pgprot_hyp_device;
  70pgprot_t pgprot_s2;
  71pgprot_t pgprot_s2_device;
  72
  73EXPORT_SYMBOL(pgprot_user);
  74EXPORT_SYMBOL(pgprot_kernel);
  75
  76struct cachepolicy {
  77	const char	policy[16];
  78	unsigned int	cr_mask;
  79	pmdval_t	pmd;
  80	pteval_t	pte;
  81	pteval_t	pte_s2;
  82};
  83
  84#ifdef CONFIG_ARM_LPAE
  85#define s2_policy(policy)	policy
  86#else
  87#define s2_policy(policy)	0
  88#endif
  89
  90static struct cachepolicy cache_policies[] __initdata = {
  91	{
  92		.policy		= "uncached",
  93		.cr_mask	= CR_W|CR_C,
  94		.pmd		= PMD_SECT_UNCACHED,
  95		.pte		= L_PTE_MT_UNCACHED,
  96		.pte_s2		= s2_policy(L_PTE_S2_MT_UNCACHED),
  97	}, {
  98		.policy		= "buffered",
  99		.cr_mask	= CR_C,
 100		.pmd		= PMD_SECT_BUFFERED,
 101		.pte		= L_PTE_MT_BUFFERABLE,
 102		.pte_s2		= s2_policy(L_PTE_S2_MT_UNCACHED),
 103	}, {
 104		.policy		= "writethrough",
 105		.cr_mask	= 0,
 106		.pmd		= PMD_SECT_WT,
 107		.pte		= L_PTE_MT_WRITETHROUGH,
 108		.pte_s2		= s2_policy(L_PTE_S2_MT_WRITETHROUGH),
 109	}, {
 110		.policy		= "writeback",
 111		.cr_mask	= 0,
 112		.pmd		= PMD_SECT_WB,
 113		.pte		= L_PTE_MT_WRITEBACK,
 114		.pte_s2		= s2_policy(L_PTE_S2_MT_WRITEBACK),
 115	}, {
 116		.policy		= "writealloc",
 117		.cr_mask	= 0,
 118		.pmd		= PMD_SECT_WBWA,
 119		.pte		= L_PTE_MT_WRITEALLOC,
 120		.pte_s2		= s2_policy(L_PTE_S2_MT_WRITEBACK),
 121	}
 122};
 123
 124#ifdef CONFIG_CPU_CP15
 125static unsigned long initial_pmd_value __initdata = 0;
 126
 127/*
 128 * Initialise the cache_policy variable with the initial state specified
 129 * via the "pmd" value.  This is used to ensure that on ARMv6 and later,
 130 * the C code sets the page tables up with the same policy as the head
 131 * assembly code, which avoids an illegal state where the TLBs can get
 132 * confused.  See comments in early_cachepolicy() for more information.
 133 */
 134void __init init_default_cache_policy(unsigned long pmd)
 135{
 136	int i;
 137
 138	initial_pmd_value = pmd;
 139
 140	pmd &= PMD_SECT_CACHE_MASK;
 141
 142	for (i = 0; i < ARRAY_SIZE(cache_policies); i++)
 143		if (cache_policies[i].pmd == pmd) {
 144			cachepolicy = i;
 145			break;
 146		}
 147
 148	if (i == ARRAY_SIZE(cache_policies))
 149		pr_err("ERROR: could not find cache policy\n");
 150}
 151
 152/*
 153 * These are useful for identifying cache coherency problems by allowing
 154 * the cache or the cache and writebuffer to be turned off.  (Note: the
 155 * write buffer should not be on and the cache off).
 156 */
 157static int __init early_cachepolicy(char *p)
 158{
 159	int i, selected = -1;
 160
 161	for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
 162		int len = strlen(cache_policies[i].policy);
 163
 164		if (memcmp(p, cache_policies[i].policy, len) == 0) {
 165			selected = i;
 
 
 166			break;
 167		}
 168	}
 169
 170	if (selected == -1)
 171		pr_err("ERROR: unknown or unsupported cache policy\n");
 172
 173	/*
 174	 * This restriction is partly to do with the way we boot; it is
 175	 * unpredictable to have memory mapped using two different sets of
 176	 * memory attributes (shared, type, and cache attribs).  We can not
 177	 * change these attributes once the initial assembly has setup the
 178	 * page tables.
 179	 */
 180	if (cpu_architecture() >= CPU_ARCH_ARMv6 && selected != cachepolicy) {
 181		pr_warn("Only cachepolicy=%s supported on ARMv6 and later\n",
 182			cache_policies[cachepolicy].policy);
 183		return 0;
 184	}
 185
 186	if (selected != cachepolicy) {
 187		unsigned long cr = __clear_cr(cache_policies[selected].cr_mask);
 188		cachepolicy = selected;
 189		flush_cache_all();
 190		set_cr(cr);
 191	}
 
 
 192	return 0;
 193}
 194early_param("cachepolicy", early_cachepolicy);
 195
 196static int __init early_nocache(char *__unused)
 197{
 198	char *p = "buffered";
 199	pr_warn("nocache is deprecated; use cachepolicy=%s\n", p);
 200	early_cachepolicy(p);
 201	return 0;
 202}
 203early_param("nocache", early_nocache);
 204
 205static int __init early_nowrite(char *__unused)
 206{
 207	char *p = "uncached";
 208	pr_warn("nowb is deprecated; use cachepolicy=%s\n", p);
 209	early_cachepolicy(p);
 210	return 0;
 211}
 212early_param("nowb", early_nowrite);
 213
 214#ifndef CONFIG_ARM_LPAE
 215static int __init early_ecc(char *p)
 216{
 217	if (memcmp(p, "on", 2) == 0)
 218		ecc_mask = PMD_PROTECTION;
 219	else if (memcmp(p, "off", 3) == 0)
 220		ecc_mask = 0;
 221	return 0;
 222}
 223early_param("ecc", early_ecc);
 224#endif
 225
 226#else /* ifdef CONFIG_CPU_CP15 */
 227
 228static int __init early_cachepolicy(char *p)
 229{
 230	pr_warn("cachepolicy kernel parameter not supported without cp15\n");
 
 
 
 231}
 232early_param("cachepolicy", early_cachepolicy);
 233
 234static int __init noalign_setup(char *__unused)
 
 235{
 236	pr_warn("noalign kernel parameter not supported without cp15\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 237}
 238__setup("noalign", noalign_setup);
 239
 240#endif /* ifdef CONFIG_CPU_CP15 / else */
 241
 242#define PROT_PTE_DEVICE		L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
 243#define PROT_PTE_S2_DEVICE	PROT_PTE_DEVICE
 244#define PROT_SECT_DEVICE	PMD_TYPE_SECT|PMD_SECT_AP_WRITE
 245
 246static struct mem_type mem_types[] __ro_after_init = {
 247	[MT_DEVICE] = {		  /* Strongly ordered / ARMv6 shared device */
 248		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
 249				  L_PTE_SHARED,
 250		.prot_pte_s2	= s2_policy(PROT_PTE_S2_DEVICE) |
 251				  s2_policy(L_PTE_S2_MT_DEV_SHARED) |
 252				  L_PTE_SHARED,
 253		.prot_l1	= PMD_TYPE_TABLE,
 254		.prot_sect	= PROT_SECT_DEVICE | PMD_SECT_S,
 255		.domain		= DOMAIN_IO,
 256	},
 257	[MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
 258		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
 259		.prot_l1	= PMD_TYPE_TABLE,
 260		.prot_sect	= PROT_SECT_DEVICE,
 261		.domain		= DOMAIN_IO,
 262	},
 263	[MT_DEVICE_CACHED] = {	  /* ioremap_cached */
 264		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
 265		.prot_l1	= PMD_TYPE_TABLE,
 266		.prot_sect	= PROT_SECT_DEVICE | PMD_SECT_WB,
 267		.domain		= DOMAIN_IO,
 268	},
 269	[MT_DEVICE_WC] = {	/* ioremap_wc */
 270		.prot_pte	= PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
 271		.prot_l1	= PMD_TYPE_TABLE,
 272		.prot_sect	= PROT_SECT_DEVICE,
 273		.domain		= DOMAIN_IO,
 274	},
 275	[MT_UNCACHED] = {
 276		.prot_pte	= PROT_PTE_DEVICE,
 277		.prot_l1	= PMD_TYPE_TABLE,
 278		.prot_sect	= PMD_TYPE_SECT | PMD_SECT_XN,
 279		.domain		= DOMAIN_IO,
 280	},
 281	[MT_CACHECLEAN] = {
 282		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
 283		.domain    = DOMAIN_KERNEL,
 284	},
 285#ifndef CONFIG_ARM_LPAE
 286	[MT_MINICLEAN] = {
 287		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
 288		.domain    = DOMAIN_KERNEL,
 289	},
 290#endif
 291	[MT_LOW_VECTORS] = {
 292		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 293				L_PTE_RDONLY,
 294		.prot_l1   = PMD_TYPE_TABLE,
 295		.domain    = DOMAIN_VECTORS,
 296	},
 297	[MT_HIGH_VECTORS] = {
 298		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 299				L_PTE_USER | L_PTE_RDONLY,
 300		.prot_l1   = PMD_TYPE_TABLE,
 301		.domain    = DOMAIN_VECTORS,
 302	},
 303	[MT_MEMORY_RWX] = {
 304		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
 305		.prot_l1   = PMD_TYPE_TABLE,
 306		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 307		.domain    = DOMAIN_KERNEL,
 308	},
 309	[MT_MEMORY_RW] = {
 310		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 311			     L_PTE_XN,
 312		.prot_l1   = PMD_TYPE_TABLE,
 313		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 314		.domain    = DOMAIN_KERNEL,
 315	},
 316	[MT_ROM] = {
 317		.prot_sect = PMD_TYPE_SECT,
 318		.domain    = DOMAIN_KERNEL,
 319	},
 320	[MT_MEMORY_RWX_NONCACHED] = {
 321		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 322				L_PTE_MT_BUFFERABLE,
 323		.prot_l1   = PMD_TYPE_TABLE,
 324		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
 325		.domain    = DOMAIN_KERNEL,
 326	},
 327	[MT_MEMORY_RW_DTCM] = {
 328		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 329				L_PTE_XN,
 330		.prot_l1   = PMD_TYPE_TABLE,
 331		.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
 332		.domain    = DOMAIN_KERNEL,
 333	},
 334	[MT_MEMORY_RWX_ITCM] = {
 335		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
 336		.prot_l1   = PMD_TYPE_TABLE,
 337		.domain    = DOMAIN_KERNEL,
 338	},
 339	[MT_MEMORY_RW_SO] = {
 340		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 341				L_PTE_MT_UNCACHED | L_PTE_XN,
 342		.prot_l1   = PMD_TYPE_TABLE,
 343		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
 344				PMD_SECT_UNCACHED | PMD_SECT_XN,
 345		.domain    = DOMAIN_KERNEL,
 346	},
 347	[MT_MEMORY_DMA_READY] = {
 348		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
 349				L_PTE_XN,
 350		.prot_l1   = PMD_TYPE_TABLE,
 351		.domain    = DOMAIN_KERNEL,
 352	},
 353};
 354
 355const struct mem_type *get_mem_type(unsigned int type)
 356{
 357	return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
 358}
 359EXPORT_SYMBOL(get_mem_type);
 360
 361static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr);
 362
 363static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
 364	__aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata;
 365
 366static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr)
 367{
 368	return &bm_pte[pte_index(addr)];
 369}
 370
 371static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
 372{
 373	return pte_offset_kernel(dir, addr);
 374}
 375
 376static inline pmd_t * __init fixmap_pmd(unsigned long addr)
 377{
 378	pgd_t *pgd = pgd_offset_k(addr);
 379	pud_t *pud = pud_offset(pgd, addr);
 380	pmd_t *pmd = pmd_offset(pud, addr);
 381
 382	return pmd;
 383}
 384
 385void __init early_fixmap_init(void)
 386{
 387	pmd_t *pmd;
 388
 389	/*
 390	 * The early fixmap range spans multiple pmds, for which
 391	 * we are not prepared:
 392	 */
 393	BUILD_BUG_ON((__fix_to_virt(__end_of_early_ioremap_region) >> PMD_SHIFT)
 394		     != FIXADDR_TOP >> PMD_SHIFT);
 395
 396	pmd = fixmap_pmd(FIXADDR_TOP);
 397	pmd_populate_kernel(&init_mm, pmd, bm_pte);
 398
 399	pte_offset_fixmap = pte_offset_early_fixmap;
 400}
 401
 402/*
 403 * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
 404 * As a result, this can only be called with preemption disabled, as under
 405 * stop_machine().
 406 */
 407void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
 408{
 409	unsigned long vaddr = __fix_to_virt(idx);
 410	pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
 411
 412	/* Make sure fixmap region does not exceed available allocation. */
 413	BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
 414		     FIXADDR_END);
 415	BUG_ON(idx >= __end_of_fixed_addresses);
 416
 417	if (pgprot_val(prot))
 418		set_pte_at(NULL, vaddr, pte,
 419			pfn_pte(phys >> PAGE_SHIFT, prot));
 420	else
 421		pte_clear(NULL, vaddr, pte);
 422	local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
 423}
 424
 425/*
 426 * Adjust the PMD section entries according to the CPU in use.
 427 */
 428static void __init build_mem_type_table(void)
 429{
 430	struct cachepolicy *cp;
 431	unsigned int cr = get_cr();
 432	pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
 433	pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;
 434	int cpu_arch = cpu_architecture();
 435	int i;
 436
 437	if (cpu_arch < CPU_ARCH_ARMv6) {
 438#if defined(CONFIG_CPU_DCACHE_DISABLE)
 439		if (cachepolicy > CPOLICY_BUFFERED)
 440			cachepolicy = CPOLICY_BUFFERED;
 441#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
 442		if (cachepolicy > CPOLICY_WRITETHROUGH)
 443			cachepolicy = CPOLICY_WRITETHROUGH;
 444#endif
 445	}
 446	if (cpu_arch < CPU_ARCH_ARMv5) {
 447		if (cachepolicy >= CPOLICY_WRITEALLOC)
 448			cachepolicy = CPOLICY_WRITEBACK;
 449		ecc_mask = 0;
 450	}
 451
 452	if (is_smp()) {
 453		if (cachepolicy != CPOLICY_WRITEALLOC) {
 454			pr_warn("Forcing write-allocate cache policy for SMP\n");
 455			cachepolicy = CPOLICY_WRITEALLOC;
 456		}
 457		if (!(initial_pmd_value & PMD_SECT_S)) {
 458			pr_warn("Forcing shared mappings for SMP\n");
 459			initial_pmd_value |= PMD_SECT_S;
 460		}
 461	}
 462
 463	/*
 464	 * Strip out features not present on earlier architectures.
 465	 * Pre-ARMv5 CPUs don't have TEX bits.  Pre-ARMv6 CPUs or those
 466	 * without extended page tables don't have the 'Shared' bit.
 467	 */
 468	if (cpu_arch < CPU_ARCH_ARMv5)
 469		for (i = 0; i < ARRAY_SIZE(mem_types); i++)
 470			mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
 471	if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
 472		for (i = 0; i < ARRAY_SIZE(mem_types); i++)
 473			mem_types[i].prot_sect &= ~PMD_SECT_S;
 474
 475	/*
 476	 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
 477	 * "update-able on write" bit on ARM610).  However, Xscale and
 478	 * Xscale3 require this bit to be cleared.
 479	 */
 480	if (cpu_is_xscale_family()) {
 481		for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
 482			mem_types[i].prot_sect &= ~PMD_BIT4;
 483			mem_types[i].prot_l1 &= ~PMD_BIT4;
 484		}
 485	} else if (cpu_arch < CPU_ARCH_ARMv6) {
 486		for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
 487			if (mem_types[i].prot_l1)
 488				mem_types[i].prot_l1 |= PMD_BIT4;
 489			if (mem_types[i].prot_sect)
 490				mem_types[i].prot_sect |= PMD_BIT4;
 491		}
 492	}
 493
 494	/*
 495	 * Mark the device areas according to the CPU/architecture.
 496	 */
 497	if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
 498		if (!cpu_is_xsc3()) {
 499			/*
 500			 * Mark device regions on ARMv6+ as execute-never
 501			 * to prevent speculative instruction fetches.
 502			 */
 503			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
 504			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
 505			mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
 506			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
 507
 508			/* Also setup NX memory mapping */
 509			mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
 510		}
 511		if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
 512			/*
 513			 * For ARMv7 with TEX remapping,
 514			 * - shared device is SXCB=1100
 515			 * - nonshared device is SXCB=0100
 516			 * - write combine device mem is SXCB=0001
 517			 * (Uncached Normal memory)
 518			 */
 519			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
 520			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
 521			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
 522		} else if (cpu_is_xsc3()) {
 523			/*
 524			 * For Xscale3,
 525			 * - shared device is TEXCB=00101
 526			 * - nonshared device is TEXCB=01000
 527			 * - write combine device mem is TEXCB=00100
 528			 * (Inner/Outer Uncacheable in xsc3 parlance)
 529			 */
 530			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
 531			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
 532			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
 533		} else {
 534			/*
 535			 * For ARMv6 and ARMv7 without TEX remapping,
 536			 * - shared device is TEXCB=00001
 537			 * - nonshared device is TEXCB=01000
 538			 * - write combine device mem is TEXCB=00100
 539			 * (Uncached Normal in ARMv6 parlance).
 540			 */
 541			mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
 542			mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
 543			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
 544		}
 545	} else {
 546		/*
 547		 * On others, write combining is "Uncached/Buffered"
 548		 */
 549		mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
 550	}
 551
 552	/*
 553	 * Now deal with the memory-type mappings
 554	 */
 555	cp = &cache_policies[cachepolicy];
 556	vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
 557	s2_pgprot = cp->pte_s2;
 558	hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
 559	s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
 560
 561#ifndef CONFIG_ARM_LPAE
 562	/*
 563	 * We don't use domains on ARMv6 (since this causes problems with
 564	 * v6/v7 kernels), so we must use a separate memory type for user
 565	 * r/o, kernel r/w to map the vectors page.
 566	 */
 567	if (cpu_arch == CPU_ARCH_ARMv6)
 568		vecs_pgprot |= L_PTE_MT_VECTORS;
 569
 570	/*
 571	 * Check is it with support for the PXN bit
 572	 * in the Short-descriptor translation table format descriptors.
 573	 */
 574	if (cpu_arch == CPU_ARCH_ARMv7 &&
 575		(read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) >= 4) {
 576		user_pmd_table |= PMD_PXNTABLE;
 
 
 577	}
 578#endif
 579
 580	/*
 581	 * ARMv6 and above have extended page tables.
 582	 */
 583	if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
 584#ifndef CONFIG_ARM_LPAE
 585		/*
 586		 * Mark cache clean areas and XIP ROM read only
 587		 * from SVC mode and no access from userspace.
 588		 */
 589		mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 590		mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 591		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 592#endif
 593
 594		/*
 595		 * If the initial page tables were created with the S bit
 596		 * set, then we need to do the same here for the same
 597		 * reasons given in early_cachepolicy().
 598		 */
 599		if (initial_pmd_value & PMD_SECT_S) {
 600			user_pgprot |= L_PTE_SHARED;
 601			kern_pgprot |= L_PTE_SHARED;
 602			vecs_pgprot |= L_PTE_SHARED;
 603			s2_pgprot |= L_PTE_SHARED;
 604			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
 605			mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
 606			mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
 607			mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
 608			mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
 609			mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
 610			mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
 611			mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
 612			mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
 613			mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
 614			mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
 615		}
 616	}
 617
 618	/*
 619	 * Non-cacheable Normal - intended for memory areas that must
 620	 * not cause dirty cache line writebacks when used
 621	 */
 622	if (cpu_arch >= CPU_ARCH_ARMv6) {
 623		if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
 624			/* Non-cacheable Normal is XCB = 001 */
 625			mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
 626				PMD_SECT_BUFFERED;
 627		} else {
 628			/* For both ARMv6 and non-TEX-remapping ARMv7 */
 629			mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
 630				PMD_SECT_TEX(1);
 631		}
 632	} else {
 633		mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
 634	}
 635
 636#ifdef CONFIG_ARM_LPAE
 637	/*
 638	 * Do not generate access flag faults for the kernel mappings.
 639	 */
 640	for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
 641		mem_types[i].prot_pte |= PTE_EXT_AF;
 642		if (mem_types[i].prot_sect)
 643			mem_types[i].prot_sect |= PMD_SECT_AF;
 644	}
 645	kern_pgprot |= PTE_EXT_AF;
 646	vecs_pgprot |= PTE_EXT_AF;
 647
 648	/*
 649	 * Set PXN for user mappings
 650	 */
 651	user_pgprot |= PTE_EXT_PXN;
 652#endif
 653
 654	for (i = 0; i < 16; i++) {
 655		pteval_t v = pgprot_val(protection_map[i]);
 656		protection_map[i] = __pgprot(v | user_pgprot);
 657	}
 658
 659	mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
 660	mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
 661
 662	pgprot_user   = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
 663	pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
 664				 L_PTE_DIRTY | kern_pgprot);
 665	pgprot_s2  = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot);
 666	pgprot_s2_device  = __pgprot(s2_device_pgprot);
 667	pgprot_hyp_device  = __pgprot(hyp_device_pgprot);
 668
 669	mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
 670	mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
 671	mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
 672	mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
 673	mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
 674	mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
 675	mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
 676	mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
 677	mem_types[MT_ROM].prot_sect |= cp->pmd;
 678
 679	switch (cp->pmd) {
 680	case PMD_SECT_WT:
 681		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
 682		break;
 683	case PMD_SECT_WB:
 684	case PMD_SECT_WBWA:
 685		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
 686		break;
 687	}
 688	pr_info("Memory policy: %sData cache %s\n",
 689		ecc_mask ? "ECC enabled, " : "", cp->policy);
 690
 691	for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
 692		struct mem_type *t = &mem_types[i];
 693		if (t->prot_l1)
 694			t->prot_l1 |= PMD_DOMAIN(t->domain);
 695		if (t->prot_sect)
 696			t->prot_sect |= PMD_DOMAIN(t->domain);
 697	}
 698}
 699
 700#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
 701pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 702			      unsigned long size, pgprot_t vma_prot)
 703{
 704	if (!pfn_valid(pfn))
 705		return pgprot_noncached(vma_prot);
 706	else if (file->f_flags & O_SYNC)
 707		return pgprot_writecombine(vma_prot);
 708	return vma_prot;
 709}
 710EXPORT_SYMBOL(phys_mem_access_prot);
 711#endif
 712
 713#define vectors_base()	(vectors_high() ? 0xffff0000 : 0)
 714
 715static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
 716{
 717	void *ptr = __va(memblock_alloc(sz, align));
 718	memset(ptr, 0, sz);
 719	return ptr;
 720}
 721
 722static void __init *early_alloc(unsigned long sz)
 723{
 724	return early_alloc_aligned(sz, sz);
 725}
 726
 727static void *__init late_alloc(unsigned long sz)
 728{
 729	void *ptr = (void *)__get_free_pages(PGALLOC_GFP, get_order(sz));
 730
 731	if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
 732		BUG();
 733	return ptr;
 734}
 735
 736static pte_t * __init arm_pte_alloc(pmd_t *pmd, unsigned long addr,
 737				unsigned long prot,
 738				void *(*alloc)(unsigned long sz))
 739{
 740	if (pmd_none(*pmd)) {
 741		pte_t *pte = alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
 742		__pmd_populate(pmd, __pa(pte), prot);
 743	}
 744	BUG_ON(pmd_bad(*pmd));
 745	return pte_offset_kernel(pmd, addr);
 746}
 747
 748static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr,
 749				      unsigned long prot)
 750{
 751	return arm_pte_alloc(pmd, addr, prot, early_alloc);
 752}
 753
 754static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
 755				  unsigned long end, unsigned long pfn,
 756				  const struct mem_type *type,
 757				  void *(*alloc)(unsigned long sz),
 758				  bool ng)
 759{
 760	pte_t *pte = arm_pte_alloc(pmd, addr, type->prot_l1, alloc);
 761	do {
 762		set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)),
 763			    ng ? PTE_EXT_NG : 0);
 764		pfn++;
 765	} while (pte++, addr += PAGE_SIZE, addr != end);
 766}
 767
 768static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
 769			unsigned long end, phys_addr_t phys,
 770			const struct mem_type *type, bool ng)
 771{
 772	pmd_t *p = pmd;
 773
 774#ifndef CONFIG_ARM_LPAE
 775	/*
 776	 * In classic MMU format, puds and pmds are folded in to
 777	 * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
 778	 * group of L1 entries making up one logical pointer to
 779	 * an L2 table (2MB), where as PMDs refer to the individual
 780	 * L1 entries (1MB). Hence increment to get the correct
 781	 * offset for odd 1MB sections.
 782	 * (See arch/arm/include/asm/pgtable-2level.h)
 783	 */
 784	if (addr & SECTION_SIZE)
 785		pmd++;
 786#endif
 787	do {
 788		*pmd = __pmd(phys | type->prot_sect | (ng ? PMD_SECT_nG : 0));
 789		phys += SECTION_SIZE;
 790	} while (pmd++, addr += SECTION_SIZE, addr != end);
 791
 792	flush_pmd_entry(p);
 793}
 794
 795static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
 796				      unsigned long end, phys_addr_t phys,
 797				      const struct mem_type *type,
 798				      void *(*alloc)(unsigned long sz), bool ng)
 799{
 800	pmd_t *pmd = pmd_offset(pud, addr);
 801	unsigned long next;
 802
 803	do {
 804		/*
 805		 * With LPAE, we must loop over to map
 806		 * all the pmds for the given range.
 807		 */
 808		next = pmd_addr_end(addr, end);
 
 
 
 
 
 
 
 
 
 
 809
 
 
 810		/*
 811		 * Try a section mapping - addr, next and phys must all be
 812		 * aligned to a section boundary.
 813		 */
 814		if (type->prot_sect &&
 815				((addr | next | phys) & ~SECTION_MASK) == 0) {
 816			__map_init_section(pmd, addr, next, phys, type, ng);
 817		} else {
 818			alloc_init_pte(pmd, addr, next,
 819				       __phys_to_pfn(phys), type, alloc, ng);
 820		}
 821
 822		phys += next - addr;
 823
 824	} while (pmd++, addr = next, addr != end);
 825}
 826
 827static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
 828				  unsigned long end, phys_addr_t phys,
 829				  const struct mem_type *type,
 830				  void *(*alloc)(unsigned long sz), bool ng)
 831{
 832	pud_t *pud = pud_offset(pgd, addr);
 833	unsigned long next;
 834
 835	do {
 836		next = pud_addr_end(addr, end);
 837		alloc_init_pmd(pud, addr, next, phys, type, alloc, ng);
 838		phys += next - addr;
 839	} while (pud++, addr = next, addr != end);
 840}
 841
 842#ifndef CONFIG_ARM_LPAE
 843static void __init create_36bit_mapping(struct mm_struct *mm,
 844					struct map_desc *md,
 845					const struct mem_type *type,
 846					bool ng)
 847{
 848	unsigned long addr, length, end;
 849	phys_addr_t phys;
 850	pgd_t *pgd;
 851
 852	addr = md->virtual;
 853	phys = __pfn_to_phys(md->pfn);
 854	length = PAGE_ALIGN(md->length);
 855
 856	if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
 857		pr_err("MM: CPU does not support supersection mapping for 0x%08llx at 0x%08lx\n",
 
 858		       (long long)__pfn_to_phys((u64)md->pfn), addr);
 859		return;
 860	}
 861
 862	/* N.B.	ARMv6 supersections are only defined to work with domain 0.
 863	 *	Since domain assignments can in fact be arbitrary, the
 864	 *	'domain == 0' check below is required to insure that ARMv6
 865	 *	supersections are only allocated for domain 0 regardless
 866	 *	of the actual domain assignments in use.
 867	 */
 868	if (type->domain) {
 869		pr_err("MM: invalid domain in supersection mapping for 0x%08llx at 0x%08lx\n",
 
 870		       (long long)__pfn_to_phys((u64)md->pfn), addr);
 871		return;
 872	}
 873
 874	if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
 875		pr_err("MM: cannot create mapping for 0x%08llx at 0x%08lx invalid alignment\n",
 
 876		       (long long)__pfn_to_phys((u64)md->pfn), addr);
 877		return;
 878	}
 879
 880	/*
 881	 * Shift bits [35:32] of address into bits [23:20] of PMD
 882	 * (See ARMv6 spec).
 883	 */
 884	phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
 885
 886	pgd = pgd_offset(mm, addr);
 887	end = addr + length;
 888	do {
 889		pud_t *pud = pud_offset(pgd, addr);
 890		pmd_t *pmd = pmd_offset(pud, addr);
 891		int i;
 892
 893		for (i = 0; i < 16; i++)
 894			*pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER |
 895				       (ng ? PMD_SECT_nG : 0));
 896
 897		addr += SUPERSECTION_SIZE;
 898		phys += SUPERSECTION_SIZE;
 899		pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
 900	} while (addr != end);
 901}
 902#endif	/* !CONFIG_ARM_LPAE */
 903
 904static void __init __create_mapping(struct mm_struct *mm, struct map_desc *md,
 905				    void *(*alloc)(unsigned long sz),
 906				    bool ng)
 
 
 
 
 
 907{
 908	unsigned long addr, length, end;
 909	phys_addr_t phys;
 910	const struct mem_type *type;
 911	pgd_t *pgd;
 912
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 913	type = &mem_types[md->type];
 914
 915#ifndef CONFIG_ARM_LPAE
 916	/*
 917	 * Catch 36-bit addresses
 918	 */
 919	if (md->pfn >= 0x100000) {
 920		create_36bit_mapping(mm, md, type, ng);
 921		return;
 922	}
 923#endif
 924
 925	addr = md->virtual & PAGE_MASK;
 926	phys = __pfn_to_phys(md->pfn);
 927	length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
 928
 929	if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
 930		pr_warn("BUG: map for 0x%08llx at 0x%08lx can not be mapped using pages, ignoring.\n",
 931			(long long)__pfn_to_phys(md->pfn), addr);
 
 932		return;
 933	}
 934
 935	pgd = pgd_offset(mm, addr);
 936	end = addr + length;
 937	do {
 938		unsigned long next = pgd_addr_end(addr, end);
 939
 940		alloc_init_pud(pgd, addr, next, phys, type, alloc, ng);
 941
 942		phys += next - addr;
 943		addr = next;
 944	} while (pgd++, addr != end);
 945}
 946
 947/*
 948 * Create the page directory entries and any necessary
 949 * page tables for the mapping specified by `md'.  We
 950 * are able to cope here with varying sizes and address
 951 * offsets, and we take full advantage of sections and
 952 * supersections.
 953 */
 954static void __init create_mapping(struct map_desc *md)
 955{
 956	if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
 957		pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
 958			(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
 959		return;
 960	}
 961
 962	if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
 963	    md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
 964	    (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
 965		pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
 966			(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
 967	}
 968
 969	__create_mapping(&init_mm, md, early_alloc, false);
 970}
 971
 972void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md,
 973				bool ng)
 974{
 975#ifdef CONFIG_ARM_LPAE
 976	pud_t *pud = pud_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
 977	if (WARN_ON(!pud))
 978		return;
 979	pmd_alloc(mm, pud, 0);
 980#endif
 981	__create_mapping(mm, md, late_alloc, ng);
 982}
 983
 984/*
 985 * Create the architecture specific mappings
 986 */
 987void __init iotable_init(struct map_desc *io_desc, int nr)
 988{
 989	struct map_desc *md;
 990	struct vm_struct *vm;
 991	struct static_vm *svm;
 992
 993	if (!nr)
 994		return;
 995
 996	svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
 997
 998	for (md = io_desc; nr; md++, nr--) {
 999		create_mapping(md);
1000
1001		vm = &svm->vm;
1002		vm->addr = (void *)(md->virtual & PAGE_MASK);
1003		vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
1004		vm->phys_addr = __pfn_to_phys(md->pfn);
1005		vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
1006		vm->flags |= VM_ARM_MTYPE(md->type);
1007		vm->caller = iotable_init;
1008		add_static_vm_early(svm++);
1009	}
1010}
1011
1012void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
1013				  void *caller)
1014{
1015	struct vm_struct *vm;
1016	struct static_vm *svm;
1017
1018	svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm));
1019
1020	vm = &svm->vm;
1021	vm->addr = (void *)addr;
1022	vm->size = size;
1023	vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
1024	vm->caller = caller;
1025	add_static_vm_early(svm);
1026}
1027
1028#ifndef CONFIG_ARM_LPAE
1029
1030/*
1031 * The Linux PMD is made of two consecutive section entries covering 2MB
1032 * (see definition in include/asm/pgtable-2level.h).  However a call to
1033 * create_mapping() may optimize static mappings by using individual
1034 * 1MB section mappings.  This leaves the actual PMD potentially half
1035 * initialized if the top or bottom section entry isn't used, leaving it
1036 * open to problems if a subsequent ioremap() or vmalloc() tries to use
1037 * the virtual space left free by that unused section entry.
1038 *
1039 * Let's avoid the issue by inserting dummy vm entries covering the unused
1040 * PMD halves once the static mappings are in place.
1041 */
1042
1043static void __init pmd_empty_section_gap(unsigned long addr)
1044{
1045	vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap);
1046}
1047
1048static void __init fill_pmd_gaps(void)
1049{
1050	struct static_vm *svm;
1051	struct vm_struct *vm;
1052	unsigned long addr, next = 0;
1053	pmd_t *pmd;
1054
1055	list_for_each_entry(svm, &static_vmlist, list) {
1056		vm = &svm->vm;
1057		addr = (unsigned long)vm->addr;
1058		if (addr < next)
1059			continue;
1060
1061		/*
1062		 * Check if this vm starts on an odd section boundary.
1063		 * If so and the first section entry for this PMD is free
1064		 * then we block the corresponding virtual address.
1065		 */
1066		if ((addr & ~PMD_MASK) == SECTION_SIZE) {
1067			pmd = pmd_off_k(addr);
1068			if (pmd_none(*pmd))
1069				pmd_empty_section_gap(addr & PMD_MASK);
1070		}
1071
1072		/*
1073		 * Then check if this vm ends on an odd section boundary.
1074		 * If so and the second section entry for this PMD is empty
1075		 * then we block the corresponding virtual address.
1076		 */
1077		addr += vm->size;
1078		if ((addr & ~PMD_MASK) == SECTION_SIZE) {
1079			pmd = pmd_off_k(addr) + 1;
1080			if (pmd_none(*pmd))
1081				pmd_empty_section_gap(addr);
1082		}
1083
1084		/* no need to look at any vm entry until we hit the next PMD */
1085		next = (addr + PMD_SIZE - 1) & PMD_MASK;
1086	}
1087}
1088
1089#else
1090#define fill_pmd_gaps() do { } while (0)
1091#endif
1092
1093#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
1094static void __init pci_reserve_io(void)
1095{
1096	struct static_vm *svm;
1097
1098	svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE);
1099	if (svm)
1100		return;
1101
1102	vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
1103}
1104#else
1105#define pci_reserve_io() do { } while (0)
1106#endif
1107
1108#ifdef CONFIG_DEBUG_LL
1109void __init debug_ll_io_init(void)
1110{
1111	struct map_desc map;
1112
1113	debug_ll_addr(&map.pfn, &map.virtual);
1114	if (!map.pfn || !map.virtual)
1115		return;
1116	map.pfn = __phys_to_pfn(map.pfn);
1117	map.virtual &= PAGE_MASK;
1118	map.length = PAGE_SIZE;
1119	map.type = MT_DEVICE;
1120	iotable_init(&map, 1);
1121}
1122#endif
1123
1124static void * __initdata vmalloc_min =
1125	(void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
1126
1127/*
1128 * vmalloc=size forces the vmalloc area to be exactly 'size'
1129 * bytes. This can be used to increase (or decrease) the vmalloc
1130 * area - the default is 240m.
1131 */
1132static int __init early_vmalloc(char *arg)
1133{
1134	unsigned long vmalloc_reserve = memparse(arg, NULL);
1135
1136	if (vmalloc_reserve < SZ_16M) {
1137		vmalloc_reserve = SZ_16M;
1138		pr_warn("vmalloc area too small, limiting to %luMB\n",
 
1139			vmalloc_reserve >> 20);
1140	}
1141
1142	if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
1143		vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
1144		pr_warn("vmalloc area is too big, limiting to %luMB\n",
 
1145			vmalloc_reserve >> 20);
1146	}
1147
1148	vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
1149	return 0;
1150}
1151early_param("vmalloc", early_vmalloc);
1152
1153phys_addr_t arm_lowmem_limit __initdata = 0;
1154
1155void __init sanity_check_meminfo(void)
1156{
1157	phys_addr_t memblock_limit = 0;
1158	int highmem = 0;
1159	u64 vmalloc_limit;
1160	struct memblock_region *reg;
1161	bool should_use_highmem = false;
1162
1163	/*
1164	 * Let's use our own (unoptimized) equivalent of __pa() that is
1165	 * not affected by wrap-arounds when sizeof(phys_addr_t) == 4.
1166	 * The result is used as the upper bound on physical memory address
1167	 * and may itself be outside the valid range for which phys_addr_t
1168	 * and therefore __pa() is defined.
1169	 */
1170	vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
1171
1172	for_each_memblock(memory, reg) {
1173		phys_addr_t block_start = reg->base;
1174		phys_addr_t block_end = reg->base + reg->size;
1175		phys_addr_t size_limit = reg->size;
1176
1177		if (reg->base >= vmalloc_limit)
 
 
1178			highmem = 1;
1179		else
1180			size_limit = vmalloc_limit - reg->base;
1181
 
1182
1183		if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
1184
1185			if (highmem) {
1186				pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
1187					  &block_start, &block_end);
1188				memblock_remove(reg->base, reg->size);
1189				should_use_highmem = true;
1190				continue;
 
 
 
 
 
 
 
 
 
 
1191			}
 
 
 
 
1192
1193			if (reg->size > size_limit) {
1194				phys_addr_t overlap_size = reg->size - size_limit;
 
 
 
 
 
 
 
 
 
 
1195
1196				pr_notice("Truncating RAM at %pa-%pa",
1197					  &block_start, &block_end);
1198				block_end = vmalloc_limit;
1199				pr_cont(" to -%pa", &block_end);
1200				memblock_remove(vmalloc_limit, overlap_size);
1201				should_use_highmem = true;
1202			}
 
 
 
 
 
 
1203		}
 
 
 
1204
1205		if (!highmem) {
1206			if (block_end > arm_lowmem_limit) {
1207				if (reg->size > size_limit)
1208					arm_lowmem_limit = vmalloc_limit;
1209				else
1210					arm_lowmem_limit = block_end;
1211			}
1212
 
1213			/*
1214			 * Find the first non-pmd-aligned page, and point
1215			 * memblock_limit at it. This relies on rounding the
1216			 * limit down to be pmd-aligned, which happens at the
1217			 * end of this function.
1218			 *
1219			 * With this algorithm, the start or end of almost any
1220			 * bank can be non-pmd-aligned. The only exception is
1221			 * that the start of the bank 0 must be section-
1222			 * aligned, since otherwise memory would need to be
1223			 * allocated when mapping the start of bank 0, which
1224			 * occurs before any free memory is mapped.
1225			 */
1226			if (!memblock_limit) {
1227				if (!IS_ALIGNED(block_start, PMD_SIZE))
1228					memblock_limit = block_start;
1229				else if (!IS_ALIGNED(block_end, PMD_SIZE))
1230					memblock_limit = arm_lowmem_limit;
1231			}
1232
1233		}
1234	}
1235
1236	if (should_use_highmem)
1237		pr_notice("Consider using a HIGHMEM enabled kernel.\n");
1238
1239	high_memory = __va(arm_lowmem_limit - 1) + 1;
1240
1241	/*
1242	 * Round the memblock limit down to a pmd size.  This
1243	 * helps to ensure that we will allocate memory from the
1244	 * last full pmd, which should be mapped.
1245	 */
1246	if (memblock_limit)
1247		memblock_limit = round_down(memblock_limit, PMD_SIZE);
1248	if (!memblock_limit)
1249		memblock_limit = arm_lowmem_limit;
1250
1251	memblock_set_current_limit(memblock_limit);
1252}
1253
1254static inline void prepare_page_table(void)
1255{
1256	unsigned long addr;
1257	phys_addr_t end;
1258
1259	/*
1260	 * Clear out all the mappings below the kernel image.
1261	 */
1262	for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
1263		pmd_clear(pmd_off_k(addr));
1264
1265#ifdef CONFIG_XIP_KERNEL
1266	/* The XIP kernel is mapped in the module area -- skip over it */
1267	addr = ((unsigned long)_exiprom + PMD_SIZE - 1) & PMD_MASK;
1268#endif
1269	for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
1270		pmd_clear(pmd_off_k(addr));
1271
1272	/*
1273	 * Find the end of the first block of lowmem.
1274	 */
1275	end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
1276	if (end >= arm_lowmem_limit)
1277		end = arm_lowmem_limit;
1278
1279	/*
1280	 * Clear out all the kernel space mappings, except for the first
1281	 * memory bank, up to the vmalloc region.
1282	 */
1283	for (addr = __phys_to_virt(end);
1284	     addr < VMALLOC_START; addr += PMD_SIZE)
1285		pmd_clear(pmd_off_k(addr));
1286}
1287
1288#ifdef CONFIG_ARM_LPAE
1289/* the first page is reserved for pgd */
1290#define SWAPPER_PG_DIR_SIZE	(PAGE_SIZE + \
1291				 PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
1292#else
1293#define SWAPPER_PG_DIR_SIZE	(PTRS_PER_PGD * sizeof(pgd_t))
1294#endif
1295
1296/*
1297 * Reserve the special regions of memory
1298 */
1299void __init arm_mm_memblock_reserve(void)
1300{
1301	/*
1302	 * Reserve the page tables.  These are already in use,
1303	 * and can only be in node 0.
1304	 */
1305	memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
1306
1307#ifdef CONFIG_SA1111
1308	/*
1309	 * Because of the SA1111 DMA bug, we want to preserve our
1310	 * precious DMA-able memory...
1311	 */
1312	memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
1313#endif
1314}
1315
1316/*
1317 * Set up the device mappings.  Since we clear out the page tables for all
1318 * mappings above VMALLOC_START, except early fixmap, we might remove debug
1319 * device mappings.  This means earlycon can be used to debug this function
1320 * Any other function or debugging method which may touch any device _will_
1321 * crash the kernel.
1322 */
1323static void __init devicemaps_init(const struct machine_desc *mdesc)
1324{
1325	struct map_desc map;
1326	unsigned long addr;
1327	void *vectors;
1328
1329	/*
1330	 * Allocate the vector page early.
1331	 */
1332	vectors = early_alloc(PAGE_SIZE * 2);
1333
1334	early_trap_init(vectors);
1335
1336	/*
1337	 * Clear page table except top pmd used by early fixmaps
1338	 */
1339	for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
1340		pmd_clear(pmd_off_k(addr));
1341
1342	/*
1343	 * Map the kernel if it is XIP.
1344	 * It is always first in the modulearea.
1345	 */
1346#ifdef CONFIG_XIP_KERNEL
1347	map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
1348	map.virtual = MODULES_VADDR;
1349	map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK;
1350	map.type = MT_ROM;
1351	create_mapping(&map);
1352#endif
1353
1354	/*
1355	 * Map the cache flushing regions.
1356	 */
1357#ifdef FLUSH_BASE
1358	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
1359	map.virtual = FLUSH_BASE;
1360	map.length = SZ_1M;
1361	map.type = MT_CACHECLEAN;
1362	create_mapping(&map);
1363#endif
1364#ifdef FLUSH_BASE_MINICACHE
1365	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
1366	map.virtual = FLUSH_BASE_MINICACHE;
1367	map.length = SZ_1M;
1368	map.type = MT_MINICLEAN;
1369	create_mapping(&map);
1370#endif
1371
1372	/*
1373	 * Create a mapping for the machine vectors at the high-vectors
1374	 * location (0xffff0000).  If we aren't using high-vectors, also
1375	 * create a mapping at the low-vectors virtual address.
1376	 */
1377	map.pfn = __phys_to_pfn(virt_to_phys(vectors));
1378	map.virtual = 0xffff0000;
1379	map.length = PAGE_SIZE;
1380#ifdef CONFIG_KUSER_HELPERS
1381	map.type = MT_HIGH_VECTORS;
1382#else
1383	map.type = MT_LOW_VECTORS;
1384#endif
1385	create_mapping(&map);
1386
1387	if (!vectors_high()) {
1388		map.virtual = 0;
1389		map.length = PAGE_SIZE * 2;
1390		map.type = MT_LOW_VECTORS;
1391		create_mapping(&map);
1392	}
1393
1394	/* Now create a kernel read-only mapping */
1395	map.pfn += 1;
1396	map.virtual = 0xffff0000 + PAGE_SIZE;
1397	map.length = PAGE_SIZE;
1398	map.type = MT_LOW_VECTORS;
1399	create_mapping(&map);
1400
1401	/*
1402	 * Ask the machine support to map in the statically mapped devices.
1403	 */
1404	if (mdesc->map_io)
1405		mdesc->map_io();
1406	else
1407		debug_ll_io_init();
1408	fill_pmd_gaps();
1409
1410	/* Reserve fixed i/o space in VMALLOC region */
1411	pci_reserve_io();
1412
1413	/*
1414	 * Finally flush the caches and tlb to ensure that we're in a
1415	 * consistent state wrt the writebuffer.  This also ensures that
1416	 * any write-allocated cache lines in the vector page are written
1417	 * back.  After this point, we can start to touch devices again.
1418	 */
1419	local_flush_tlb_all();
1420	flush_cache_all();
1421
1422	/* Enable asynchronous aborts */
1423	early_abt_enable();
1424}
1425
1426static void __init kmap_init(void)
1427{
1428#ifdef CONFIG_HIGHMEM
1429	pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
1430		PKMAP_BASE, _PAGE_KERNEL_TABLE);
1431#endif
1432
1433	early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START,
1434			_PAGE_KERNEL_TABLE);
1435}
1436
1437static void __init map_lowmem(void)
1438{
1439	struct memblock_region *reg;
1440#ifdef CONFIG_XIP_KERNEL
1441	phys_addr_t kernel_x_start = round_down(__pa(_sdata), SECTION_SIZE);
1442#else
1443	phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
1444#endif
1445	phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
1446
1447	/* Map all the lowmem memory banks. */
1448	for_each_memblock(memory, reg) {
1449		phys_addr_t start = reg->base;
1450		phys_addr_t end = start + reg->size;
1451		struct map_desc map;
1452
1453		if (memblock_is_nomap(reg))
1454			continue;
1455
1456		if (end > arm_lowmem_limit)
1457			end = arm_lowmem_limit;
1458		if (start >= end)
1459			break;
1460
1461		if (end < kernel_x_start) {
1462			map.pfn = __phys_to_pfn(start);
1463			map.virtual = __phys_to_virt(start);
1464			map.length = end - start;
1465			map.type = MT_MEMORY_RWX;
1466
1467			create_mapping(&map);
1468		} else if (start >= kernel_x_end) {
1469			map.pfn = __phys_to_pfn(start);
1470			map.virtual = __phys_to_virt(start);
1471			map.length = end - start;
1472			map.type = MT_MEMORY_RW;
1473
1474			create_mapping(&map);
1475		} else {
1476			/* This better cover the entire kernel */
1477			if (start < kernel_x_start) {
1478				map.pfn = __phys_to_pfn(start);
1479				map.virtual = __phys_to_virt(start);
1480				map.length = kernel_x_start - start;
1481				map.type = MT_MEMORY_RW;
1482
1483				create_mapping(&map);
1484			}
1485
1486			map.pfn = __phys_to_pfn(kernel_x_start);
1487			map.virtual = __phys_to_virt(kernel_x_start);
1488			map.length = kernel_x_end - kernel_x_start;
1489			map.type = MT_MEMORY_RWX;
1490
1491			create_mapping(&map);
1492
1493			if (kernel_x_end < end) {
1494				map.pfn = __phys_to_pfn(kernel_x_end);
1495				map.virtual = __phys_to_virt(kernel_x_end);
1496				map.length = end - kernel_x_end;
1497				map.type = MT_MEMORY_RW;
1498
1499				create_mapping(&map);
1500			}
1501		}
1502	}
1503}
1504
1505#ifdef CONFIG_ARM_PV_FIXUP
1506extern unsigned long __atags_pointer;
1507typedef void pgtables_remap(long long offset, unsigned long pgd, void *bdata);
1508pgtables_remap lpae_pgtables_remap_asm;
1509
1510/*
1511 * early_paging_init() recreates boot time page table setup, allowing machines
1512 * to switch over to a high (>4G) address space on LPAE systems
1513 */
1514void __init early_paging_init(const struct machine_desc *mdesc)
1515{
1516	pgtables_remap *lpae_pgtables_remap;
1517	unsigned long pa_pgd;
1518	unsigned int cr, ttbcr;
1519	long long offset;
1520	void *boot_data;
1521
1522	if (!mdesc->pv_fixup)
1523		return;
1524
1525	offset = mdesc->pv_fixup();
1526	if (offset == 0)
1527		return;
1528
1529	/*
1530	 * Get the address of the remap function in the 1:1 identity
1531	 * mapping setup by the early page table assembly code.  We
1532	 * must get this prior to the pv update.  The following barrier
1533	 * ensures that this is complete before we fixup any P:V offsets.
1534	 */
1535	lpae_pgtables_remap = (pgtables_remap *)(unsigned long)__pa(lpae_pgtables_remap_asm);
1536	pa_pgd = __pa(swapper_pg_dir);
1537	boot_data = __va(__atags_pointer);
1538	barrier();
1539
1540	pr_info("Switching physical address space to 0x%08llx\n",
1541		(u64)PHYS_OFFSET + offset);
1542
1543	/* Re-set the phys pfn offset, and the pv offset */
1544	__pv_offset += offset;
1545	__pv_phys_pfn_offset += PFN_DOWN(offset);
1546
1547	/* Run the patch stub to update the constants */
1548	fixup_pv_table(&__pv_table_begin,
1549		(&__pv_table_end - &__pv_table_begin) << 2);
1550
1551	/*
1552	 * We changing not only the virtual to physical mapping, but also
1553	 * the physical addresses used to access memory.  We need to flush
1554	 * all levels of cache in the system with caching disabled to
1555	 * ensure that all data is written back, and nothing is prefetched
1556	 * into the caches.  We also need to prevent the TLB walkers
1557	 * allocating into the caches too.  Note that this is ARMv7 LPAE
1558	 * specific.
1559	 */
1560	cr = get_cr();
1561	set_cr(cr & ~(CR_I | CR_C));
1562	asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr));
1563	asm volatile("mcr p15, 0, %0, c2, c0, 2"
1564		: : "r" (ttbcr & ~(3 << 8 | 3 << 10)));
1565	flush_cache_all();
1566
1567	/*
1568	 * Fixup the page tables - this must be in the idmap region as
1569	 * we need to disable the MMU to do this safely, and hence it
1570	 * needs to be assembly.  It's fairly simple, as we're using the
1571	 * temporary tables setup by the initial assembly code.
1572	 */
1573	lpae_pgtables_remap(offset, pa_pgd, boot_data);
1574
1575	/* Re-enable the caches and cacheable TLB walks */
1576	asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr));
1577	set_cr(cr);
1578}
1579
1580#else
1581
1582void __init early_paging_init(const struct machine_desc *mdesc)
1583{
1584	long long offset;
1585
1586	if (!mdesc->pv_fixup)
1587		return;
1588
1589	offset = mdesc->pv_fixup();
1590	if (offset == 0)
1591		return;
1592
1593	pr_crit("Physical address space modification is only to support Keystone2.\n");
1594	pr_crit("Please enable ARM_LPAE and ARM_PATCH_PHYS_VIRT support to use this\n");
1595	pr_crit("feature. Your kernel may crash now, have a good day.\n");
1596	add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1597}
1598
1599#endif
1600
1601static void __init early_fixmap_shutdown(void)
1602{
1603	int i;
1604	unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1);
1605
1606	pte_offset_fixmap = pte_offset_late_fixmap;
1607	pmd_clear(fixmap_pmd(va));
1608	local_flush_tlb_kernel_page(va);
1609
1610	for (i = 0; i < __end_of_permanent_fixed_addresses; i++) {
1611		pte_t *pte;
1612		struct map_desc map;
1613
1614		map.virtual = fix_to_virt(i);
1615		pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual);
1616
1617		/* Only i/o device mappings are supported ATM */
1618		if (pte_none(*pte) ||
1619		    (pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED)
1620			continue;
1621
1622		map.pfn = pte_pfn(*pte);
1623		map.type = MT_DEVICE;
1624		map.length = PAGE_SIZE;
1625
1626		create_mapping(&map);
1627	}
1628}
1629
1630/*
1631 * paging_init() sets up the page tables, initialises the zone memory
1632 * maps, and sets up the zero page, bad page and bad page tables.
1633 */
1634void __init paging_init(const struct machine_desc *mdesc)
1635{
1636	void *zero_page;
1637
 
 
1638	build_mem_type_table();
1639	prepare_page_table();
1640	map_lowmem();
1641	memblock_set_current_limit(arm_lowmem_limit);
1642	dma_contiguous_remap();
1643	early_fixmap_shutdown();
1644	devicemaps_init(mdesc);
1645	kmap_init();
1646	tcm_init();
1647
1648	top_pmd = pmd_off_k(0xffff0000);
1649
1650	/* allocate the zero page. */
1651	zero_page = early_alloc(PAGE_SIZE);
1652
1653	bootmem_init();
1654
1655	empty_zero_page = virt_to_page(zero_page);
1656	__flush_dcache_page(NULL, empty_zero_page);
1657}