Loading...
1/*
2 * linux/arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/init.h>
14#include <linux/mman.h>
15#include <linux/nodemask.h>
16#include <linux/memblock.h>
17#include <linux/fs.h>
18#include <linux/vmalloc.h>
19#include <linux/sizes.h>
20
21#include <asm/cp15.h>
22#include <asm/cputype.h>
23#include <asm/sections.h>
24#include <asm/cachetype.h>
25#include <asm/fixmap.h>
26#include <asm/sections.h>
27#include <asm/setup.h>
28#include <asm/smp_plat.h>
29#include <asm/tlb.h>
30#include <asm/highmem.h>
31#include <asm/system_info.h>
32#include <asm/traps.h>
33#include <asm/procinfo.h>
34#include <asm/memory.h>
35
36#include <asm/mach/arch.h>
37#include <asm/mach/map.h>
38#include <asm/mach/pci.h>
39#include <asm/fixmap.h>
40
41#include "fault.h"
42#include "mm.h"
43#include "tcm.h"
44
45/*
46 * empty_zero_page is a special page that is used for
47 * zero-initialized data and COW.
48 */
49struct page *empty_zero_page;
50EXPORT_SYMBOL(empty_zero_page);
51
52/*
53 * The pmd table for the upper-most set of pages.
54 */
55pmd_t *top_pmd;
56
57pmdval_t user_pmd_table = _PAGE_USER_TABLE;
58
59#define CPOLICY_UNCACHED 0
60#define CPOLICY_BUFFERED 1
61#define CPOLICY_WRITETHROUGH 2
62#define CPOLICY_WRITEBACK 3
63#define CPOLICY_WRITEALLOC 4
64
65static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
66static unsigned int ecc_mask __initdata = 0;
67pgprot_t pgprot_user;
68pgprot_t pgprot_kernel;
69pgprot_t pgprot_hyp_device;
70pgprot_t pgprot_s2;
71pgprot_t pgprot_s2_device;
72
73EXPORT_SYMBOL(pgprot_user);
74EXPORT_SYMBOL(pgprot_kernel);
75
76struct cachepolicy {
77 const char policy[16];
78 unsigned int cr_mask;
79 pmdval_t pmd;
80 pteval_t pte;
81 pteval_t pte_s2;
82};
83
84#ifdef CONFIG_ARM_LPAE
85#define s2_policy(policy) policy
86#else
87#define s2_policy(policy) 0
88#endif
89
90static struct cachepolicy cache_policies[] __initdata = {
91 {
92 .policy = "uncached",
93 .cr_mask = CR_W|CR_C,
94 .pmd = PMD_SECT_UNCACHED,
95 .pte = L_PTE_MT_UNCACHED,
96 .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
97 }, {
98 .policy = "buffered",
99 .cr_mask = CR_C,
100 .pmd = PMD_SECT_BUFFERED,
101 .pte = L_PTE_MT_BUFFERABLE,
102 .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
103 }, {
104 .policy = "writethrough",
105 .cr_mask = 0,
106 .pmd = PMD_SECT_WT,
107 .pte = L_PTE_MT_WRITETHROUGH,
108 .pte_s2 = s2_policy(L_PTE_S2_MT_WRITETHROUGH),
109 }, {
110 .policy = "writeback",
111 .cr_mask = 0,
112 .pmd = PMD_SECT_WB,
113 .pte = L_PTE_MT_WRITEBACK,
114 .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
115 }, {
116 .policy = "writealloc",
117 .cr_mask = 0,
118 .pmd = PMD_SECT_WBWA,
119 .pte = L_PTE_MT_WRITEALLOC,
120 .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
121 }
122};
123
124#ifdef CONFIG_CPU_CP15
125static unsigned long initial_pmd_value __initdata = 0;
126
127/*
128 * Initialise the cache_policy variable with the initial state specified
129 * via the "pmd" value. This is used to ensure that on ARMv6 and later,
130 * the C code sets the page tables up with the same policy as the head
131 * assembly code, which avoids an illegal state where the TLBs can get
132 * confused. See comments in early_cachepolicy() for more information.
133 */
134void __init init_default_cache_policy(unsigned long pmd)
135{
136 int i;
137
138 initial_pmd_value = pmd;
139
140 pmd &= PMD_SECT_TEX(1) | PMD_SECT_BUFFERABLE | PMD_SECT_CACHEABLE;
141
142 for (i = 0; i < ARRAY_SIZE(cache_policies); i++)
143 if (cache_policies[i].pmd == pmd) {
144 cachepolicy = i;
145 break;
146 }
147
148 if (i == ARRAY_SIZE(cache_policies))
149 pr_err("ERROR: could not find cache policy\n");
150}
151
152/*
153 * These are useful for identifying cache coherency problems by allowing
154 * the cache or the cache and writebuffer to be turned off. (Note: the
155 * write buffer should not be on and the cache off).
156 */
157static int __init early_cachepolicy(char *p)
158{
159 int i, selected = -1;
160
161 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
162 int len = strlen(cache_policies[i].policy);
163
164 if (memcmp(p, cache_policies[i].policy, len) == 0) {
165 selected = i;
166 break;
167 }
168 }
169
170 if (selected == -1)
171 pr_err("ERROR: unknown or unsupported cache policy\n");
172
173 /*
174 * This restriction is partly to do with the way we boot; it is
175 * unpredictable to have memory mapped using two different sets of
176 * memory attributes (shared, type, and cache attribs). We can not
177 * change these attributes once the initial assembly has setup the
178 * page tables.
179 */
180 if (cpu_architecture() >= CPU_ARCH_ARMv6 && selected != cachepolicy) {
181 pr_warn("Only cachepolicy=%s supported on ARMv6 and later\n",
182 cache_policies[cachepolicy].policy);
183 return 0;
184 }
185
186 if (selected != cachepolicy) {
187 unsigned long cr = __clear_cr(cache_policies[selected].cr_mask);
188 cachepolicy = selected;
189 flush_cache_all();
190 set_cr(cr);
191 }
192 return 0;
193}
194early_param("cachepolicy", early_cachepolicy);
195
196static int __init early_nocache(char *__unused)
197{
198 char *p = "buffered";
199 pr_warn("nocache is deprecated; use cachepolicy=%s\n", p);
200 early_cachepolicy(p);
201 return 0;
202}
203early_param("nocache", early_nocache);
204
205static int __init early_nowrite(char *__unused)
206{
207 char *p = "uncached";
208 pr_warn("nowb is deprecated; use cachepolicy=%s\n", p);
209 early_cachepolicy(p);
210 return 0;
211}
212early_param("nowb", early_nowrite);
213
214#ifndef CONFIG_ARM_LPAE
215static int __init early_ecc(char *p)
216{
217 if (memcmp(p, "on", 2) == 0)
218 ecc_mask = PMD_PROTECTION;
219 else if (memcmp(p, "off", 3) == 0)
220 ecc_mask = 0;
221 return 0;
222}
223early_param("ecc", early_ecc);
224#endif
225
226#else /* ifdef CONFIG_CPU_CP15 */
227
228static int __init early_cachepolicy(char *p)
229{
230 pr_warn("cachepolicy kernel parameter not supported without cp15\n");
231}
232early_param("cachepolicy", early_cachepolicy);
233
234static int __init noalign_setup(char *__unused)
235{
236 pr_warn("noalign kernel parameter not supported without cp15\n");
237}
238__setup("noalign", noalign_setup);
239
240#endif /* ifdef CONFIG_CPU_CP15 / else */
241
242#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
243#define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
244#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
245
246static struct mem_type mem_types[] = {
247 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
248 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
249 L_PTE_SHARED,
250 .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) |
251 s2_policy(L_PTE_S2_MT_DEV_SHARED) |
252 L_PTE_SHARED,
253 .prot_l1 = PMD_TYPE_TABLE,
254 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
255 .domain = DOMAIN_IO,
256 },
257 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
258 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
259 .prot_l1 = PMD_TYPE_TABLE,
260 .prot_sect = PROT_SECT_DEVICE,
261 .domain = DOMAIN_IO,
262 },
263 [MT_DEVICE_CACHED] = { /* ioremap_cached */
264 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
265 .prot_l1 = PMD_TYPE_TABLE,
266 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
267 .domain = DOMAIN_IO,
268 },
269 [MT_DEVICE_WC] = { /* ioremap_wc */
270 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
271 .prot_l1 = PMD_TYPE_TABLE,
272 .prot_sect = PROT_SECT_DEVICE,
273 .domain = DOMAIN_IO,
274 },
275 [MT_UNCACHED] = {
276 .prot_pte = PROT_PTE_DEVICE,
277 .prot_l1 = PMD_TYPE_TABLE,
278 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
279 .domain = DOMAIN_IO,
280 },
281 [MT_CACHECLEAN] = {
282 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
283 .domain = DOMAIN_KERNEL,
284 },
285#ifndef CONFIG_ARM_LPAE
286 [MT_MINICLEAN] = {
287 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
288 .domain = DOMAIN_KERNEL,
289 },
290#endif
291 [MT_LOW_VECTORS] = {
292 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
293 L_PTE_RDONLY,
294 .prot_l1 = PMD_TYPE_TABLE,
295 .domain = DOMAIN_VECTORS,
296 },
297 [MT_HIGH_VECTORS] = {
298 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
299 L_PTE_USER | L_PTE_RDONLY,
300 .prot_l1 = PMD_TYPE_TABLE,
301 .domain = DOMAIN_VECTORS,
302 },
303 [MT_MEMORY_RWX] = {
304 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
305 .prot_l1 = PMD_TYPE_TABLE,
306 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
307 .domain = DOMAIN_KERNEL,
308 },
309 [MT_MEMORY_RW] = {
310 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
311 L_PTE_XN,
312 .prot_l1 = PMD_TYPE_TABLE,
313 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
314 .domain = DOMAIN_KERNEL,
315 },
316 [MT_ROM] = {
317 .prot_sect = PMD_TYPE_SECT,
318 .domain = DOMAIN_KERNEL,
319 },
320 [MT_MEMORY_RWX_NONCACHED] = {
321 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
322 L_PTE_MT_BUFFERABLE,
323 .prot_l1 = PMD_TYPE_TABLE,
324 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
325 .domain = DOMAIN_KERNEL,
326 },
327 [MT_MEMORY_RW_DTCM] = {
328 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
329 L_PTE_XN,
330 .prot_l1 = PMD_TYPE_TABLE,
331 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
332 .domain = DOMAIN_KERNEL,
333 },
334 [MT_MEMORY_RWX_ITCM] = {
335 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
336 .prot_l1 = PMD_TYPE_TABLE,
337 .domain = DOMAIN_KERNEL,
338 },
339 [MT_MEMORY_RW_SO] = {
340 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
341 L_PTE_MT_UNCACHED | L_PTE_XN,
342 .prot_l1 = PMD_TYPE_TABLE,
343 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
344 PMD_SECT_UNCACHED | PMD_SECT_XN,
345 .domain = DOMAIN_KERNEL,
346 },
347 [MT_MEMORY_DMA_READY] = {
348 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
349 L_PTE_XN,
350 .prot_l1 = PMD_TYPE_TABLE,
351 .domain = DOMAIN_KERNEL,
352 },
353};
354
355const struct mem_type *get_mem_type(unsigned int type)
356{
357 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
358}
359EXPORT_SYMBOL(get_mem_type);
360
361static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr);
362
363static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
364 __aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata;
365
366static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr)
367{
368 return &bm_pte[pte_index(addr)];
369}
370
371static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
372{
373 return pte_offset_kernel(dir, addr);
374}
375
376static inline pmd_t * __init fixmap_pmd(unsigned long addr)
377{
378 pgd_t *pgd = pgd_offset_k(addr);
379 pud_t *pud = pud_offset(pgd, addr);
380 pmd_t *pmd = pmd_offset(pud, addr);
381
382 return pmd;
383}
384
385void __init early_fixmap_init(void)
386{
387 pmd_t *pmd;
388
389 /*
390 * The early fixmap range spans multiple pmds, for which
391 * we are not prepared:
392 */
393 BUILD_BUG_ON((__fix_to_virt(__end_of_early_ioremap_region) >> PMD_SHIFT)
394 != FIXADDR_TOP >> PMD_SHIFT);
395
396 pmd = fixmap_pmd(FIXADDR_TOP);
397 pmd_populate_kernel(&init_mm, pmd, bm_pte);
398
399 pte_offset_fixmap = pte_offset_early_fixmap;
400}
401
402/*
403 * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
404 * As a result, this can only be called with preemption disabled, as under
405 * stop_machine().
406 */
407void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
408{
409 unsigned long vaddr = __fix_to_virt(idx);
410 pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
411
412 /* Make sure fixmap region does not exceed available allocation. */
413 BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
414 FIXADDR_END);
415 BUG_ON(idx >= __end_of_fixed_addresses);
416
417 if (pgprot_val(prot))
418 set_pte_at(NULL, vaddr, pte,
419 pfn_pte(phys >> PAGE_SHIFT, prot));
420 else
421 pte_clear(NULL, vaddr, pte);
422 local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
423}
424
425/*
426 * Adjust the PMD section entries according to the CPU in use.
427 */
428static void __init build_mem_type_table(void)
429{
430 struct cachepolicy *cp;
431 unsigned int cr = get_cr();
432 pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
433 pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;
434 int cpu_arch = cpu_architecture();
435 int i;
436
437 if (cpu_arch < CPU_ARCH_ARMv6) {
438#if defined(CONFIG_CPU_DCACHE_DISABLE)
439 if (cachepolicy > CPOLICY_BUFFERED)
440 cachepolicy = CPOLICY_BUFFERED;
441#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
442 if (cachepolicy > CPOLICY_WRITETHROUGH)
443 cachepolicy = CPOLICY_WRITETHROUGH;
444#endif
445 }
446 if (cpu_arch < CPU_ARCH_ARMv5) {
447 if (cachepolicy >= CPOLICY_WRITEALLOC)
448 cachepolicy = CPOLICY_WRITEBACK;
449 ecc_mask = 0;
450 }
451
452 if (is_smp()) {
453 if (cachepolicy != CPOLICY_WRITEALLOC) {
454 pr_warn("Forcing write-allocate cache policy for SMP\n");
455 cachepolicy = CPOLICY_WRITEALLOC;
456 }
457 if (!(initial_pmd_value & PMD_SECT_S)) {
458 pr_warn("Forcing shared mappings for SMP\n");
459 initial_pmd_value |= PMD_SECT_S;
460 }
461 }
462
463 /*
464 * Strip out features not present on earlier architectures.
465 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
466 * without extended page tables don't have the 'Shared' bit.
467 */
468 if (cpu_arch < CPU_ARCH_ARMv5)
469 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
470 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
471 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
472 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
473 mem_types[i].prot_sect &= ~PMD_SECT_S;
474
475 /*
476 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
477 * "update-able on write" bit on ARM610). However, Xscale and
478 * Xscale3 require this bit to be cleared.
479 */
480 if (cpu_is_xscale_family()) {
481 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
482 mem_types[i].prot_sect &= ~PMD_BIT4;
483 mem_types[i].prot_l1 &= ~PMD_BIT4;
484 }
485 } else if (cpu_arch < CPU_ARCH_ARMv6) {
486 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
487 if (mem_types[i].prot_l1)
488 mem_types[i].prot_l1 |= PMD_BIT4;
489 if (mem_types[i].prot_sect)
490 mem_types[i].prot_sect |= PMD_BIT4;
491 }
492 }
493
494 /*
495 * Mark the device areas according to the CPU/architecture.
496 */
497 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
498 if (!cpu_is_xsc3()) {
499 /*
500 * Mark device regions on ARMv6+ as execute-never
501 * to prevent speculative instruction fetches.
502 */
503 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
504 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
505 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
506 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
507
508 /* Also setup NX memory mapping */
509 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
510 }
511 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
512 /*
513 * For ARMv7 with TEX remapping,
514 * - shared device is SXCB=1100
515 * - nonshared device is SXCB=0100
516 * - write combine device mem is SXCB=0001
517 * (Uncached Normal memory)
518 */
519 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
520 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
521 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
522 } else if (cpu_is_xsc3()) {
523 /*
524 * For Xscale3,
525 * - shared device is TEXCB=00101
526 * - nonshared device is TEXCB=01000
527 * - write combine device mem is TEXCB=00100
528 * (Inner/Outer Uncacheable in xsc3 parlance)
529 */
530 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
531 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
532 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
533 } else {
534 /*
535 * For ARMv6 and ARMv7 without TEX remapping,
536 * - shared device is TEXCB=00001
537 * - nonshared device is TEXCB=01000
538 * - write combine device mem is TEXCB=00100
539 * (Uncached Normal in ARMv6 parlance).
540 */
541 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
542 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
543 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
544 }
545 } else {
546 /*
547 * On others, write combining is "Uncached/Buffered"
548 */
549 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
550 }
551
552 /*
553 * Now deal with the memory-type mappings
554 */
555 cp = &cache_policies[cachepolicy];
556 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
557 s2_pgprot = cp->pte_s2;
558 hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
559 s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
560
561#ifndef CONFIG_ARM_LPAE
562 /*
563 * We don't use domains on ARMv6 (since this causes problems with
564 * v6/v7 kernels), so we must use a separate memory type for user
565 * r/o, kernel r/w to map the vectors page.
566 */
567 if (cpu_arch == CPU_ARCH_ARMv6)
568 vecs_pgprot |= L_PTE_MT_VECTORS;
569
570 /*
571 * Check is it with support for the PXN bit
572 * in the Short-descriptor translation table format descriptors.
573 */
574 if (cpu_arch == CPU_ARCH_ARMv7 &&
575 (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) >= 4) {
576 user_pmd_table |= PMD_PXNTABLE;
577 }
578#endif
579
580 /*
581 * ARMv6 and above have extended page tables.
582 */
583 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
584#ifndef CONFIG_ARM_LPAE
585 /*
586 * Mark cache clean areas and XIP ROM read only
587 * from SVC mode and no access from userspace.
588 */
589 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
590 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
591 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
592#endif
593
594 /*
595 * If the initial page tables were created with the S bit
596 * set, then we need to do the same here for the same
597 * reasons given in early_cachepolicy().
598 */
599 if (initial_pmd_value & PMD_SECT_S) {
600 user_pgprot |= L_PTE_SHARED;
601 kern_pgprot |= L_PTE_SHARED;
602 vecs_pgprot |= L_PTE_SHARED;
603 s2_pgprot |= L_PTE_SHARED;
604 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
605 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
606 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
607 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
608 mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
609 mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
610 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
611 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
612 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
613 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
614 mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
615 }
616 }
617
618 /*
619 * Non-cacheable Normal - intended for memory areas that must
620 * not cause dirty cache line writebacks when used
621 */
622 if (cpu_arch >= CPU_ARCH_ARMv6) {
623 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
624 /* Non-cacheable Normal is XCB = 001 */
625 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
626 PMD_SECT_BUFFERED;
627 } else {
628 /* For both ARMv6 and non-TEX-remapping ARMv7 */
629 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
630 PMD_SECT_TEX(1);
631 }
632 } else {
633 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
634 }
635
636#ifdef CONFIG_ARM_LPAE
637 /*
638 * Do not generate access flag faults for the kernel mappings.
639 */
640 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
641 mem_types[i].prot_pte |= PTE_EXT_AF;
642 if (mem_types[i].prot_sect)
643 mem_types[i].prot_sect |= PMD_SECT_AF;
644 }
645 kern_pgprot |= PTE_EXT_AF;
646 vecs_pgprot |= PTE_EXT_AF;
647
648 /*
649 * Set PXN for user mappings
650 */
651 user_pgprot |= PTE_EXT_PXN;
652#endif
653
654 for (i = 0; i < 16; i++) {
655 pteval_t v = pgprot_val(protection_map[i]);
656 protection_map[i] = __pgprot(v | user_pgprot);
657 }
658
659 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
660 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
661
662 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
663 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
664 L_PTE_DIRTY | kern_pgprot);
665 pgprot_s2 = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot);
666 pgprot_s2_device = __pgprot(s2_device_pgprot);
667 pgprot_hyp_device = __pgprot(hyp_device_pgprot);
668
669 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
670 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
671 mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
672 mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
673 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
674 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
675 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
676 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
677 mem_types[MT_ROM].prot_sect |= cp->pmd;
678
679 switch (cp->pmd) {
680 case PMD_SECT_WT:
681 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
682 break;
683 case PMD_SECT_WB:
684 case PMD_SECT_WBWA:
685 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
686 break;
687 }
688 pr_info("Memory policy: %sData cache %s\n",
689 ecc_mask ? "ECC enabled, " : "", cp->policy);
690
691 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
692 struct mem_type *t = &mem_types[i];
693 if (t->prot_l1)
694 t->prot_l1 |= PMD_DOMAIN(t->domain);
695 if (t->prot_sect)
696 t->prot_sect |= PMD_DOMAIN(t->domain);
697 }
698}
699
700#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
701pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
702 unsigned long size, pgprot_t vma_prot)
703{
704 if (!pfn_valid(pfn))
705 return pgprot_noncached(vma_prot);
706 else if (file->f_flags & O_SYNC)
707 return pgprot_writecombine(vma_prot);
708 return vma_prot;
709}
710EXPORT_SYMBOL(phys_mem_access_prot);
711#endif
712
713#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
714
715static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
716{
717 void *ptr = __va(memblock_alloc(sz, align));
718 memset(ptr, 0, sz);
719 return ptr;
720}
721
722static void __init *early_alloc(unsigned long sz)
723{
724 return early_alloc_aligned(sz, sz);
725}
726
727static void *__init late_alloc(unsigned long sz)
728{
729 void *ptr = (void *)__get_free_pages(PGALLOC_GFP, get_order(sz));
730
731 BUG_ON(!ptr);
732 return ptr;
733}
734
735static pte_t * __init arm_pte_alloc(pmd_t *pmd, unsigned long addr,
736 unsigned long prot,
737 void *(*alloc)(unsigned long sz))
738{
739 if (pmd_none(*pmd)) {
740 pte_t *pte = alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
741 __pmd_populate(pmd, __pa(pte), prot);
742 }
743 BUG_ON(pmd_bad(*pmd));
744 return pte_offset_kernel(pmd, addr);
745}
746
747static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr,
748 unsigned long prot)
749{
750 return arm_pte_alloc(pmd, addr, prot, early_alloc);
751}
752
753static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
754 unsigned long end, unsigned long pfn,
755 const struct mem_type *type,
756 void *(*alloc)(unsigned long sz),
757 bool ng)
758{
759 pte_t *pte = arm_pte_alloc(pmd, addr, type->prot_l1, alloc);
760 do {
761 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)),
762 ng ? PTE_EXT_NG : 0);
763 pfn++;
764 } while (pte++, addr += PAGE_SIZE, addr != end);
765}
766
767static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
768 unsigned long end, phys_addr_t phys,
769 const struct mem_type *type, bool ng)
770{
771 pmd_t *p = pmd;
772
773#ifndef CONFIG_ARM_LPAE
774 /*
775 * In classic MMU format, puds and pmds are folded in to
776 * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
777 * group of L1 entries making up one logical pointer to
778 * an L2 table (2MB), where as PMDs refer to the individual
779 * L1 entries (1MB). Hence increment to get the correct
780 * offset for odd 1MB sections.
781 * (See arch/arm/include/asm/pgtable-2level.h)
782 */
783 if (addr & SECTION_SIZE)
784 pmd++;
785#endif
786 do {
787 *pmd = __pmd(phys | type->prot_sect | (ng ? PMD_SECT_nG : 0));
788 phys += SECTION_SIZE;
789 } while (pmd++, addr += SECTION_SIZE, addr != end);
790
791 flush_pmd_entry(p);
792}
793
794static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
795 unsigned long end, phys_addr_t phys,
796 const struct mem_type *type,
797 void *(*alloc)(unsigned long sz), bool ng)
798{
799 pmd_t *pmd = pmd_offset(pud, addr);
800 unsigned long next;
801
802 do {
803 /*
804 * With LPAE, we must loop over to map
805 * all the pmds for the given range.
806 */
807 next = pmd_addr_end(addr, end);
808
809 /*
810 * Try a section mapping - addr, next and phys must all be
811 * aligned to a section boundary.
812 */
813 if (type->prot_sect &&
814 ((addr | next | phys) & ~SECTION_MASK) == 0) {
815 __map_init_section(pmd, addr, next, phys, type, ng);
816 } else {
817 alloc_init_pte(pmd, addr, next,
818 __phys_to_pfn(phys), type, alloc, ng);
819 }
820
821 phys += next - addr;
822
823 } while (pmd++, addr = next, addr != end);
824}
825
826static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
827 unsigned long end, phys_addr_t phys,
828 const struct mem_type *type,
829 void *(*alloc)(unsigned long sz), bool ng)
830{
831 pud_t *pud = pud_offset(pgd, addr);
832 unsigned long next;
833
834 do {
835 next = pud_addr_end(addr, end);
836 alloc_init_pmd(pud, addr, next, phys, type, alloc, ng);
837 phys += next - addr;
838 } while (pud++, addr = next, addr != end);
839}
840
841#ifndef CONFIG_ARM_LPAE
842static void __init create_36bit_mapping(struct mm_struct *mm,
843 struct map_desc *md,
844 const struct mem_type *type,
845 bool ng)
846{
847 unsigned long addr, length, end;
848 phys_addr_t phys;
849 pgd_t *pgd;
850
851 addr = md->virtual;
852 phys = __pfn_to_phys(md->pfn);
853 length = PAGE_ALIGN(md->length);
854
855 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
856 pr_err("MM: CPU does not support supersection mapping for 0x%08llx at 0x%08lx\n",
857 (long long)__pfn_to_phys((u64)md->pfn), addr);
858 return;
859 }
860
861 /* N.B. ARMv6 supersections are only defined to work with domain 0.
862 * Since domain assignments can in fact be arbitrary, the
863 * 'domain == 0' check below is required to insure that ARMv6
864 * supersections are only allocated for domain 0 regardless
865 * of the actual domain assignments in use.
866 */
867 if (type->domain) {
868 pr_err("MM: invalid domain in supersection mapping for 0x%08llx at 0x%08lx\n",
869 (long long)__pfn_to_phys((u64)md->pfn), addr);
870 return;
871 }
872
873 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
874 pr_err("MM: cannot create mapping for 0x%08llx at 0x%08lx invalid alignment\n",
875 (long long)__pfn_to_phys((u64)md->pfn), addr);
876 return;
877 }
878
879 /*
880 * Shift bits [35:32] of address into bits [23:20] of PMD
881 * (See ARMv6 spec).
882 */
883 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
884
885 pgd = pgd_offset(mm, addr);
886 end = addr + length;
887 do {
888 pud_t *pud = pud_offset(pgd, addr);
889 pmd_t *pmd = pmd_offset(pud, addr);
890 int i;
891
892 for (i = 0; i < 16; i++)
893 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER |
894 (ng ? PMD_SECT_nG : 0));
895
896 addr += SUPERSECTION_SIZE;
897 phys += SUPERSECTION_SIZE;
898 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
899 } while (addr != end);
900}
901#endif /* !CONFIG_ARM_LPAE */
902
903static void __init __create_mapping(struct mm_struct *mm, struct map_desc *md,
904 void *(*alloc)(unsigned long sz),
905 bool ng)
906{
907 unsigned long addr, length, end;
908 phys_addr_t phys;
909 const struct mem_type *type;
910 pgd_t *pgd;
911
912 type = &mem_types[md->type];
913
914#ifndef CONFIG_ARM_LPAE
915 /*
916 * Catch 36-bit addresses
917 */
918 if (md->pfn >= 0x100000) {
919 create_36bit_mapping(mm, md, type, ng);
920 return;
921 }
922#endif
923
924 addr = md->virtual & PAGE_MASK;
925 phys = __pfn_to_phys(md->pfn);
926 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
927
928 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
929 pr_warn("BUG: map for 0x%08llx at 0x%08lx can not be mapped using pages, ignoring.\n",
930 (long long)__pfn_to_phys(md->pfn), addr);
931 return;
932 }
933
934 pgd = pgd_offset(mm, addr);
935 end = addr + length;
936 do {
937 unsigned long next = pgd_addr_end(addr, end);
938
939 alloc_init_pud(pgd, addr, next, phys, type, alloc, ng);
940
941 phys += next - addr;
942 addr = next;
943 } while (pgd++, addr != end);
944}
945
946/*
947 * Create the page directory entries and any necessary
948 * page tables for the mapping specified by `md'. We
949 * are able to cope here with varying sizes and address
950 * offsets, and we take full advantage of sections and
951 * supersections.
952 */
953static void __init create_mapping(struct map_desc *md)
954{
955 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
956 pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
957 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
958 return;
959 }
960
961 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
962 md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
963 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
964 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
965 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
966 }
967
968 __create_mapping(&init_mm, md, early_alloc, false);
969}
970
971void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md,
972 bool ng)
973{
974#ifdef CONFIG_ARM_LPAE
975 pud_t *pud = pud_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
976 if (WARN_ON(!pud))
977 return;
978 pmd_alloc(mm, pud, 0);
979#endif
980 __create_mapping(mm, md, late_alloc, ng);
981}
982
983/*
984 * Create the architecture specific mappings
985 */
986void __init iotable_init(struct map_desc *io_desc, int nr)
987{
988 struct map_desc *md;
989 struct vm_struct *vm;
990 struct static_vm *svm;
991
992 if (!nr)
993 return;
994
995 svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
996
997 for (md = io_desc; nr; md++, nr--) {
998 create_mapping(md);
999
1000 vm = &svm->vm;
1001 vm->addr = (void *)(md->virtual & PAGE_MASK);
1002 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
1003 vm->phys_addr = __pfn_to_phys(md->pfn);
1004 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
1005 vm->flags |= VM_ARM_MTYPE(md->type);
1006 vm->caller = iotable_init;
1007 add_static_vm_early(svm++);
1008 }
1009}
1010
1011void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
1012 void *caller)
1013{
1014 struct vm_struct *vm;
1015 struct static_vm *svm;
1016
1017 svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm));
1018
1019 vm = &svm->vm;
1020 vm->addr = (void *)addr;
1021 vm->size = size;
1022 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
1023 vm->caller = caller;
1024 add_static_vm_early(svm);
1025}
1026
1027#ifndef CONFIG_ARM_LPAE
1028
1029/*
1030 * The Linux PMD is made of two consecutive section entries covering 2MB
1031 * (see definition in include/asm/pgtable-2level.h). However a call to
1032 * create_mapping() may optimize static mappings by using individual
1033 * 1MB section mappings. This leaves the actual PMD potentially half
1034 * initialized if the top or bottom section entry isn't used, leaving it
1035 * open to problems if a subsequent ioremap() or vmalloc() tries to use
1036 * the virtual space left free by that unused section entry.
1037 *
1038 * Let's avoid the issue by inserting dummy vm entries covering the unused
1039 * PMD halves once the static mappings are in place.
1040 */
1041
1042static void __init pmd_empty_section_gap(unsigned long addr)
1043{
1044 vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap);
1045}
1046
1047static void __init fill_pmd_gaps(void)
1048{
1049 struct static_vm *svm;
1050 struct vm_struct *vm;
1051 unsigned long addr, next = 0;
1052 pmd_t *pmd;
1053
1054 list_for_each_entry(svm, &static_vmlist, list) {
1055 vm = &svm->vm;
1056 addr = (unsigned long)vm->addr;
1057 if (addr < next)
1058 continue;
1059
1060 /*
1061 * Check if this vm starts on an odd section boundary.
1062 * If so and the first section entry for this PMD is free
1063 * then we block the corresponding virtual address.
1064 */
1065 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
1066 pmd = pmd_off_k(addr);
1067 if (pmd_none(*pmd))
1068 pmd_empty_section_gap(addr & PMD_MASK);
1069 }
1070
1071 /*
1072 * Then check if this vm ends on an odd section boundary.
1073 * If so and the second section entry for this PMD is empty
1074 * then we block the corresponding virtual address.
1075 */
1076 addr += vm->size;
1077 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
1078 pmd = pmd_off_k(addr) + 1;
1079 if (pmd_none(*pmd))
1080 pmd_empty_section_gap(addr);
1081 }
1082
1083 /* no need to look at any vm entry until we hit the next PMD */
1084 next = (addr + PMD_SIZE - 1) & PMD_MASK;
1085 }
1086}
1087
1088#else
1089#define fill_pmd_gaps() do { } while (0)
1090#endif
1091
1092#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
1093static void __init pci_reserve_io(void)
1094{
1095 struct static_vm *svm;
1096
1097 svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE);
1098 if (svm)
1099 return;
1100
1101 vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
1102}
1103#else
1104#define pci_reserve_io() do { } while (0)
1105#endif
1106
1107#ifdef CONFIG_DEBUG_LL
1108void __init debug_ll_io_init(void)
1109{
1110 struct map_desc map;
1111
1112 debug_ll_addr(&map.pfn, &map.virtual);
1113 if (!map.pfn || !map.virtual)
1114 return;
1115 map.pfn = __phys_to_pfn(map.pfn);
1116 map.virtual &= PAGE_MASK;
1117 map.length = PAGE_SIZE;
1118 map.type = MT_DEVICE;
1119 iotable_init(&map, 1);
1120}
1121#endif
1122
1123static void * __initdata vmalloc_min =
1124 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
1125
1126/*
1127 * vmalloc=size forces the vmalloc area to be exactly 'size'
1128 * bytes. This can be used to increase (or decrease) the vmalloc
1129 * area - the default is 240m.
1130 */
1131static int __init early_vmalloc(char *arg)
1132{
1133 unsigned long vmalloc_reserve = memparse(arg, NULL);
1134
1135 if (vmalloc_reserve < SZ_16M) {
1136 vmalloc_reserve = SZ_16M;
1137 pr_warn("vmalloc area too small, limiting to %luMB\n",
1138 vmalloc_reserve >> 20);
1139 }
1140
1141 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
1142 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
1143 pr_warn("vmalloc area is too big, limiting to %luMB\n",
1144 vmalloc_reserve >> 20);
1145 }
1146
1147 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
1148 return 0;
1149}
1150early_param("vmalloc", early_vmalloc);
1151
1152phys_addr_t arm_lowmem_limit __initdata = 0;
1153
1154void __init sanity_check_meminfo(void)
1155{
1156 phys_addr_t memblock_limit = 0;
1157 int highmem = 0;
1158 phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
1159 struct memblock_region *reg;
1160 bool should_use_highmem = false;
1161
1162 for_each_memblock(memory, reg) {
1163 phys_addr_t block_start = reg->base;
1164 phys_addr_t block_end = reg->base + reg->size;
1165 phys_addr_t size_limit = reg->size;
1166
1167 if (reg->base >= vmalloc_limit)
1168 highmem = 1;
1169 else
1170 size_limit = vmalloc_limit - reg->base;
1171
1172
1173 if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
1174
1175 if (highmem) {
1176 pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
1177 &block_start, &block_end);
1178 memblock_remove(reg->base, reg->size);
1179 should_use_highmem = true;
1180 continue;
1181 }
1182
1183 if (reg->size > size_limit) {
1184 phys_addr_t overlap_size = reg->size - size_limit;
1185
1186 pr_notice("Truncating RAM at %pa-%pa to -%pa",
1187 &block_start, &block_end, &vmalloc_limit);
1188 memblock_remove(vmalloc_limit, overlap_size);
1189 block_end = vmalloc_limit;
1190 should_use_highmem = true;
1191 }
1192 }
1193
1194 if (!highmem) {
1195 if (block_end > arm_lowmem_limit) {
1196 if (reg->size > size_limit)
1197 arm_lowmem_limit = vmalloc_limit;
1198 else
1199 arm_lowmem_limit = block_end;
1200 }
1201
1202 /*
1203 * Find the first non-pmd-aligned page, and point
1204 * memblock_limit at it. This relies on rounding the
1205 * limit down to be pmd-aligned, which happens at the
1206 * end of this function.
1207 *
1208 * With this algorithm, the start or end of almost any
1209 * bank can be non-pmd-aligned. The only exception is
1210 * that the start of the bank 0 must be section-
1211 * aligned, since otherwise memory would need to be
1212 * allocated when mapping the start of bank 0, which
1213 * occurs before any free memory is mapped.
1214 */
1215 if (!memblock_limit) {
1216 if (!IS_ALIGNED(block_start, PMD_SIZE))
1217 memblock_limit = block_start;
1218 else if (!IS_ALIGNED(block_end, PMD_SIZE))
1219 memblock_limit = arm_lowmem_limit;
1220 }
1221
1222 }
1223 }
1224
1225 if (should_use_highmem)
1226 pr_notice("Consider using a HIGHMEM enabled kernel.\n");
1227
1228 high_memory = __va(arm_lowmem_limit - 1) + 1;
1229
1230 /*
1231 * Round the memblock limit down to a pmd size. This
1232 * helps to ensure that we will allocate memory from the
1233 * last full pmd, which should be mapped.
1234 */
1235 if (memblock_limit)
1236 memblock_limit = round_down(memblock_limit, PMD_SIZE);
1237 if (!memblock_limit)
1238 memblock_limit = arm_lowmem_limit;
1239
1240 memblock_set_current_limit(memblock_limit);
1241}
1242
1243static inline void prepare_page_table(void)
1244{
1245 unsigned long addr;
1246 phys_addr_t end;
1247
1248 /*
1249 * Clear out all the mappings below the kernel image.
1250 */
1251 for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
1252 pmd_clear(pmd_off_k(addr));
1253
1254#ifdef CONFIG_XIP_KERNEL
1255 /* The XIP kernel is mapped in the module area -- skip over it */
1256 addr = ((unsigned long)_exiprom + PMD_SIZE - 1) & PMD_MASK;
1257#endif
1258 for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
1259 pmd_clear(pmd_off_k(addr));
1260
1261 /*
1262 * Find the end of the first block of lowmem.
1263 */
1264 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
1265 if (end >= arm_lowmem_limit)
1266 end = arm_lowmem_limit;
1267
1268 /*
1269 * Clear out all the kernel space mappings, except for the first
1270 * memory bank, up to the vmalloc region.
1271 */
1272 for (addr = __phys_to_virt(end);
1273 addr < VMALLOC_START; addr += PMD_SIZE)
1274 pmd_clear(pmd_off_k(addr));
1275}
1276
1277#ifdef CONFIG_ARM_LPAE
1278/* the first page is reserved for pgd */
1279#define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
1280 PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
1281#else
1282#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
1283#endif
1284
1285/*
1286 * Reserve the special regions of memory
1287 */
1288void __init arm_mm_memblock_reserve(void)
1289{
1290 /*
1291 * Reserve the page tables. These are already in use,
1292 * and can only be in node 0.
1293 */
1294 memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
1295
1296#ifdef CONFIG_SA1111
1297 /*
1298 * Because of the SA1111 DMA bug, we want to preserve our
1299 * precious DMA-able memory...
1300 */
1301 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
1302#endif
1303}
1304
1305/*
1306 * Set up the device mappings. Since we clear out the page tables for all
1307 * mappings above VMALLOC_START, except early fixmap, we might remove debug
1308 * device mappings. This means earlycon can be used to debug this function
1309 * Any other function or debugging method which may touch any device _will_
1310 * crash the kernel.
1311 */
1312static void __init devicemaps_init(const struct machine_desc *mdesc)
1313{
1314 struct map_desc map;
1315 unsigned long addr;
1316 void *vectors;
1317
1318 /*
1319 * Allocate the vector page early.
1320 */
1321 vectors = early_alloc(PAGE_SIZE * 2);
1322
1323 early_trap_init(vectors);
1324
1325 /*
1326 * Clear page table except top pmd used by early fixmaps
1327 */
1328 for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
1329 pmd_clear(pmd_off_k(addr));
1330
1331 /*
1332 * Map the kernel if it is XIP.
1333 * It is always first in the modulearea.
1334 */
1335#ifdef CONFIG_XIP_KERNEL
1336 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
1337 map.virtual = MODULES_VADDR;
1338 map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK;
1339 map.type = MT_ROM;
1340 create_mapping(&map);
1341#endif
1342
1343 /*
1344 * Map the cache flushing regions.
1345 */
1346#ifdef FLUSH_BASE
1347 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
1348 map.virtual = FLUSH_BASE;
1349 map.length = SZ_1M;
1350 map.type = MT_CACHECLEAN;
1351 create_mapping(&map);
1352#endif
1353#ifdef FLUSH_BASE_MINICACHE
1354 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
1355 map.virtual = FLUSH_BASE_MINICACHE;
1356 map.length = SZ_1M;
1357 map.type = MT_MINICLEAN;
1358 create_mapping(&map);
1359#endif
1360
1361 /*
1362 * Create a mapping for the machine vectors at the high-vectors
1363 * location (0xffff0000). If we aren't using high-vectors, also
1364 * create a mapping at the low-vectors virtual address.
1365 */
1366 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
1367 map.virtual = 0xffff0000;
1368 map.length = PAGE_SIZE;
1369#ifdef CONFIG_KUSER_HELPERS
1370 map.type = MT_HIGH_VECTORS;
1371#else
1372 map.type = MT_LOW_VECTORS;
1373#endif
1374 create_mapping(&map);
1375
1376 if (!vectors_high()) {
1377 map.virtual = 0;
1378 map.length = PAGE_SIZE * 2;
1379 map.type = MT_LOW_VECTORS;
1380 create_mapping(&map);
1381 }
1382
1383 /* Now create a kernel read-only mapping */
1384 map.pfn += 1;
1385 map.virtual = 0xffff0000 + PAGE_SIZE;
1386 map.length = PAGE_SIZE;
1387 map.type = MT_LOW_VECTORS;
1388 create_mapping(&map);
1389
1390 /*
1391 * Ask the machine support to map in the statically mapped devices.
1392 */
1393 if (mdesc->map_io)
1394 mdesc->map_io();
1395 else
1396 debug_ll_io_init();
1397 fill_pmd_gaps();
1398
1399 /* Reserve fixed i/o space in VMALLOC region */
1400 pci_reserve_io();
1401
1402 /*
1403 * Finally flush the caches and tlb to ensure that we're in a
1404 * consistent state wrt the writebuffer. This also ensures that
1405 * any write-allocated cache lines in the vector page are written
1406 * back. After this point, we can start to touch devices again.
1407 */
1408 local_flush_tlb_all();
1409 flush_cache_all();
1410
1411 /* Enable asynchronous aborts */
1412 early_abt_enable();
1413}
1414
1415static void __init kmap_init(void)
1416{
1417#ifdef CONFIG_HIGHMEM
1418 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
1419 PKMAP_BASE, _PAGE_KERNEL_TABLE);
1420#endif
1421
1422 early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START,
1423 _PAGE_KERNEL_TABLE);
1424}
1425
1426static void __init map_lowmem(void)
1427{
1428 struct memblock_region *reg;
1429#ifdef CONFIG_XIP_KERNEL
1430 phys_addr_t kernel_x_start = round_down(__pa(_sdata), SECTION_SIZE);
1431#else
1432 phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
1433#endif
1434 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
1435
1436 /* Map all the lowmem memory banks. */
1437 for_each_memblock(memory, reg) {
1438 phys_addr_t start = reg->base;
1439 phys_addr_t end = start + reg->size;
1440 struct map_desc map;
1441
1442 if (memblock_is_nomap(reg))
1443 continue;
1444
1445 if (end > arm_lowmem_limit)
1446 end = arm_lowmem_limit;
1447 if (start >= end)
1448 break;
1449
1450 if (end < kernel_x_start) {
1451 map.pfn = __phys_to_pfn(start);
1452 map.virtual = __phys_to_virt(start);
1453 map.length = end - start;
1454 map.type = MT_MEMORY_RWX;
1455
1456 create_mapping(&map);
1457 } else if (start >= kernel_x_end) {
1458 map.pfn = __phys_to_pfn(start);
1459 map.virtual = __phys_to_virt(start);
1460 map.length = end - start;
1461 map.type = MT_MEMORY_RW;
1462
1463 create_mapping(&map);
1464 } else {
1465 /* This better cover the entire kernel */
1466 if (start < kernel_x_start) {
1467 map.pfn = __phys_to_pfn(start);
1468 map.virtual = __phys_to_virt(start);
1469 map.length = kernel_x_start - start;
1470 map.type = MT_MEMORY_RW;
1471
1472 create_mapping(&map);
1473 }
1474
1475 map.pfn = __phys_to_pfn(kernel_x_start);
1476 map.virtual = __phys_to_virt(kernel_x_start);
1477 map.length = kernel_x_end - kernel_x_start;
1478 map.type = MT_MEMORY_RWX;
1479
1480 create_mapping(&map);
1481
1482 if (kernel_x_end < end) {
1483 map.pfn = __phys_to_pfn(kernel_x_end);
1484 map.virtual = __phys_to_virt(kernel_x_end);
1485 map.length = end - kernel_x_end;
1486 map.type = MT_MEMORY_RW;
1487
1488 create_mapping(&map);
1489 }
1490 }
1491 }
1492}
1493
1494#ifdef CONFIG_ARM_PV_FIXUP
1495extern unsigned long __atags_pointer;
1496typedef void pgtables_remap(long long offset, unsigned long pgd, void *bdata);
1497pgtables_remap lpae_pgtables_remap_asm;
1498
1499/*
1500 * early_paging_init() recreates boot time page table setup, allowing machines
1501 * to switch over to a high (>4G) address space on LPAE systems
1502 */
1503void __init early_paging_init(const struct machine_desc *mdesc)
1504{
1505 pgtables_remap *lpae_pgtables_remap;
1506 unsigned long pa_pgd;
1507 unsigned int cr, ttbcr;
1508 long long offset;
1509 void *boot_data;
1510
1511 if (!mdesc->pv_fixup)
1512 return;
1513
1514 offset = mdesc->pv_fixup();
1515 if (offset == 0)
1516 return;
1517
1518 /*
1519 * Get the address of the remap function in the 1:1 identity
1520 * mapping setup by the early page table assembly code. We
1521 * must get this prior to the pv update. The following barrier
1522 * ensures that this is complete before we fixup any P:V offsets.
1523 */
1524 lpae_pgtables_remap = (pgtables_remap *)(unsigned long)__pa(lpae_pgtables_remap_asm);
1525 pa_pgd = __pa(swapper_pg_dir);
1526 boot_data = __va(__atags_pointer);
1527 barrier();
1528
1529 pr_info("Switching physical address space to 0x%08llx\n",
1530 (u64)PHYS_OFFSET + offset);
1531
1532 /* Re-set the phys pfn offset, and the pv offset */
1533 __pv_offset += offset;
1534 __pv_phys_pfn_offset += PFN_DOWN(offset);
1535
1536 /* Run the patch stub to update the constants */
1537 fixup_pv_table(&__pv_table_begin,
1538 (&__pv_table_end - &__pv_table_begin) << 2);
1539
1540 /*
1541 * We changing not only the virtual to physical mapping, but also
1542 * the physical addresses used to access memory. We need to flush
1543 * all levels of cache in the system with caching disabled to
1544 * ensure that all data is written back, and nothing is prefetched
1545 * into the caches. We also need to prevent the TLB walkers
1546 * allocating into the caches too. Note that this is ARMv7 LPAE
1547 * specific.
1548 */
1549 cr = get_cr();
1550 set_cr(cr & ~(CR_I | CR_C));
1551 asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr));
1552 asm volatile("mcr p15, 0, %0, c2, c0, 2"
1553 : : "r" (ttbcr & ~(3 << 8 | 3 << 10)));
1554 flush_cache_all();
1555
1556 /*
1557 * Fixup the page tables - this must be in the idmap region as
1558 * we need to disable the MMU to do this safely, and hence it
1559 * needs to be assembly. It's fairly simple, as we're using the
1560 * temporary tables setup by the initial assembly code.
1561 */
1562 lpae_pgtables_remap(offset, pa_pgd, boot_data);
1563
1564 /* Re-enable the caches and cacheable TLB walks */
1565 asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr));
1566 set_cr(cr);
1567}
1568
1569#else
1570
1571void __init early_paging_init(const struct machine_desc *mdesc)
1572{
1573 long long offset;
1574
1575 if (!mdesc->pv_fixup)
1576 return;
1577
1578 offset = mdesc->pv_fixup();
1579 if (offset == 0)
1580 return;
1581
1582 pr_crit("Physical address space modification is only to support Keystone2.\n");
1583 pr_crit("Please enable ARM_LPAE and ARM_PATCH_PHYS_VIRT support to use this\n");
1584 pr_crit("feature. Your kernel may crash now, have a good day.\n");
1585 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1586}
1587
1588#endif
1589
1590static void __init early_fixmap_shutdown(void)
1591{
1592 int i;
1593 unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1);
1594
1595 pte_offset_fixmap = pte_offset_late_fixmap;
1596 pmd_clear(fixmap_pmd(va));
1597 local_flush_tlb_kernel_page(va);
1598
1599 for (i = 0; i < __end_of_permanent_fixed_addresses; i++) {
1600 pte_t *pte;
1601 struct map_desc map;
1602
1603 map.virtual = fix_to_virt(i);
1604 pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual);
1605
1606 /* Only i/o device mappings are supported ATM */
1607 if (pte_none(*pte) ||
1608 (pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED)
1609 continue;
1610
1611 map.pfn = pte_pfn(*pte);
1612 map.type = MT_DEVICE;
1613 map.length = PAGE_SIZE;
1614
1615 create_mapping(&map);
1616 }
1617}
1618
1619/*
1620 * paging_init() sets up the page tables, initialises the zone memory
1621 * maps, and sets up the zero page, bad page and bad page tables.
1622 */
1623void __init paging_init(const struct machine_desc *mdesc)
1624{
1625 void *zero_page;
1626
1627 build_mem_type_table();
1628 prepare_page_table();
1629 map_lowmem();
1630 memblock_set_current_limit(arm_lowmem_limit);
1631 dma_contiguous_remap();
1632 early_fixmap_shutdown();
1633 devicemaps_init(mdesc);
1634 kmap_init();
1635 tcm_init();
1636
1637 top_pmd = pmd_off_k(0xffff0000);
1638
1639 /* allocate the zero page. */
1640 zero_page = early_alloc(PAGE_SIZE);
1641
1642 bootmem_init();
1643
1644 empty_zero_page = virt_to_page(zero_page);
1645 __flush_dcache_page(NULL, empty_zero_page);
1646}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/mm/mmu.c
4 *
5 * Copyright (C) 1995-2005 Russell King
6 */
7#include <linux/module.h>
8#include <linux/kernel.h>
9#include <linux/errno.h>
10#include <linux/init.h>
11#include <linux/mman.h>
12#include <linux/nodemask.h>
13#include <linux/memblock.h>
14#include <linux/fs.h>
15#include <linux/vmalloc.h>
16#include <linux/sizes.h>
17
18#include <asm/cp15.h>
19#include <asm/cputype.h>
20#include <asm/sections.h>
21#include <asm/cachetype.h>
22#include <asm/fixmap.h>
23#include <asm/sections.h>
24#include <asm/setup.h>
25#include <asm/smp_plat.h>
26#include <asm/tlb.h>
27#include <asm/highmem.h>
28#include <asm/system_info.h>
29#include <asm/traps.h>
30#include <asm/procinfo.h>
31#include <asm/memory.h>
32#include <asm/pgalloc.h>
33
34#include <asm/mach/arch.h>
35#include <asm/mach/map.h>
36#include <asm/mach/pci.h>
37#include <asm/fixmap.h>
38
39#include "fault.h"
40#include "mm.h"
41#include "tcm.h"
42
43/*
44 * empty_zero_page is a special page that is used for
45 * zero-initialized data and COW.
46 */
47struct page *empty_zero_page;
48EXPORT_SYMBOL(empty_zero_page);
49
50/*
51 * The pmd table for the upper-most set of pages.
52 */
53pmd_t *top_pmd;
54
55pmdval_t user_pmd_table = _PAGE_USER_TABLE;
56
57#define CPOLICY_UNCACHED 0
58#define CPOLICY_BUFFERED 1
59#define CPOLICY_WRITETHROUGH 2
60#define CPOLICY_WRITEBACK 3
61#define CPOLICY_WRITEALLOC 4
62
63static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
64static unsigned int ecc_mask __initdata = 0;
65pgprot_t pgprot_user;
66pgprot_t pgprot_kernel;
67
68EXPORT_SYMBOL(pgprot_user);
69EXPORT_SYMBOL(pgprot_kernel);
70
71struct cachepolicy {
72 const char policy[16];
73 unsigned int cr_mask;
74 pmdval_t pmd;
75 pteval_t pte;
76};
77
78static struct cachepolicy cache_policies[] __initdata = {
79 {
80 .policy = "uncached",
81 .cr_mask = CR_W|CR_C,
82 .pmd = PMD_SECT_UNCACHED,
83 .pte = L_PTE_MT_UNCACHED,
84 }, {
85 .policy = "buffered",
86 .cr_mask = CR_C,
87 .pmd = PMD_SECT_BUFFERED,
88 .pte = L_PTE_MT_BUFFERABLE,
89 }, {
90 .policy = "writethrough",
91 .cr_mask = 0,
92 .pmd = PMD_SECT_WT,
93 .pte = L_PTE_MT_WRITETHROUGH,
94 }, {
95 .policy = "writeback",
96 .cr_mask = 0,
97 .pmd = PMD_SECT_WB,
98 .pte = L_PTE_MT_WRITEBACK,
99 }, {
100 .policy = "writealloc",
101 .cr_mask = 0,
102 .pmd = PMD_SECT_WBWA,
103 .pte = L_PTE_MT_WRITEALLOC,
104 }
105};
106
107#ifdef CONFIG_CPU_CP15
108static unsigned long initial_pmd_value __initdata = 0;
109
110/*
111 * Initialise the cache_policy variable with the initial state specified
112 * via the "pmd" value. This is used to ensure that on ARMv6 and later,
113 * the C code sets the page tables up with the same policy as the head
114 * assembly code, which avoids an illegal state where the TLBs can get
115 * confused. See comments in early_cachepolicy() for more information.
116 */
117void __init init_default_cache_policy(unsigned long pmd)
118{
119 int i;
120
121 initial_pmd_value = pmd;
122
123 pmd &= PMD_SECT_CACHE_MASK;
124
125 for (i = 0; i < ARRAY_SIZE(cache_policies); i++)
126 if (cache_policies[i].pmd == pmd) {
127 cachepolicy = i;
128 break;
129 }
130
131 if (i == ARRAY_SIZE(cache_policies))
132 pr_err("ERROR: could not find cache policy\n");
133}
134
135/*
136 * These are useful for identifying cache coherency problems by allowing
137 * the cache or the cache and writebuffer to be turned off. (Note: the
138 * write buffer should not be on and the cache off).
139 */
140static int __init early_cachepolicy(char *p)
141{
142 int i, selected = -1;
143
144 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
145 int len = strlen(cache_policies[i].policy);
146
147 if (memcmp(p, cache_policies[i].policy, len) == 0) {
148 selected = i;
149 break;
150 }
151 }
152
153 if (selected == -1)
154 pr_err("ERROR: unknown or unsupported cache policy\n");
155
156 /*
157 * This restriction is partly to do with the way we boot; it is
158 * unpredictable to have memory mapped using two different sets of
159 * memory attributes (shared, type, and cache attribs). We can not
160 * change these attributes once the initial assembly has setup the
161 * page tables.
162 */
163 if (cpu_architecture() >= CPU_ARCH_ARMv6 && selected != cachepolicy) {
164 pr_warn("Only cachepolicy=%s supported on ARMv6 and later\n",
165 cache_policies[cachepolicy].policy);
166 return 0;
167 }
168
169 if (selected != cachepolicy) {
170 unsigned long cr = __clear_cr(cache_policies[selected].cr_mask);
171 cachepolicy = selected;
172 flush_cache_all();
173 set_cr(cr);
174 }
175 return 0;
176}
177early_param("cachepolicy", early_cachepolicy);
178
179static int __init early_nocache(char *__unused)
180{
181 char *p = "buffered";
182 pr_warn("nocache is deprecated; use cachepolicy=%s\n", p);
183 early_cachepolicy(p);
184 return 0;
185}
186early_param("nocache", early_nocache);
187
188static int __init early_nowrite(char *__unused)
189{
190 char *p = "uncached";
191 pr_warn("nowb is deprecated; use cachepolicy=%s\n", p);
192 early_cachepolicy(p);
193 return 0;
194}
195early_param("nowb", early_nowrite);
196
197#ifndef CONFIG_ARM_LPAE
198static int __init early_ecc(char *p)
199{
200 if (memcmp(p, "on", 2) == 0)
201 ecc_mask = PMD_PROTECTION;
202 else if (memcmp(p, "off", 3) == 0)
203 ecc_mask = 0;
204 return 0;
205}
206early_param("ecc", early_ecc);
207#endif
208
209#else /* ifdef CONFIG_CPU_CP15 */
210
211static int __init early_cachepolicy(char *p)
212{
213 pr_warn("cachepolicy kernel parameter not supported without cp15\n");
214}
215early_param("cachepolicy", early_cachepolicy);
216
217static int __init noalign_setup(char *__unused)
218{
219 pr_warn("noalign kernel parameter not supported without cp15\n");
220}
221__setup("noalign", noalign_setup);
222
223#endif /* ifdef CONFIG_CPU_CP15 / else */
224
225#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
226#define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
227#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
228
229static struct mem_type mem_types[] __ro_after_init = {
230 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
231 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
232 L_PTE_SHARED,
233 .prot_l1 = PMD_TYPE_TABLE,
234 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
235 .domain = DOMAIN_IO,
236 },
237 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
238 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
239 .prot_l1 = PMD_TYPE_TABLE,
240 .prot_sect = PROT_SECT_DEVICE,
241 .domain = DOMAIN_IO,
242 },
243 [MT_DEVICE_CACHED] = { /* ioremap_cache */
244 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
245 .prot_l1 = PMD_TYPE_TABLE,
246 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
247 .domain = DOMAIN_IO,
248 },
249 [MT_DEVICE_WC] = { /* ioremap_wc */
250 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
251 .prot_l1 = PMD_TYPE_TABLE,
252 .prot_sect = PROT_SECT_DEVICE,
253 .domain = DOMAIN_IO,
254 },
255 [MT_UNCACHED] = {
256 .prot_pte = PROT_PTE_DEVICE,
257 .prot_l1 = PMD_TYPE_TABLE,
258 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
259 .domain = DOMAIN_IO,
260 },
261 [MT_CACHECLEAN] = {
262 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
263 .domain = DOMAIN_KERNEL,
264 },
265#ifndef CONFIG_ARM_LPAE
266 [MT_MINICLEAN] = {
267 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
268 .domain = DOMAIN_KERNEL,
269 },
270#endif
271 [MT_LOW_VECTORS] = {
272 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
273 L_PTE_RDONLY,
274 .prot_l1 = PMD_TYPE_TABLE,
275 .domain = DOMAIN_VECTORS,
276 },
277 [MT_HIGH_VECTORS] = {
278 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
279 L_PTE_USER | L_PTE_RDONLY,
280 .prot_l1 = PMD_TYPE_TABLE,
281 .domain = DOMAIN_VECTORS,
282 },
283 [MT_MEMORY_RWX] = {
284 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
285 .prot_l1 = PMD_TYPE_TABLE,
286 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
287 .domain = DOMAIN_KERNEL,
288 },
289 [MT_MEMORY_RW] = {
290 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
291 L_PTE_XN,
292 .prot_l1 = PMD_TYPE_TABLE,
293 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
294 .domain = DOMAIN_KERNEL,
295 },
296 [MT_ROM] = {
297 .prot_sect = PMD_TYPE_SECT,
298 .domain = DOMAIN_KERNEL,
299 },
300 [MT_MEMORY_RWX_NONCACHED] = {
301 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
302 L_PTE_MT_BUFFERABLE,
303 .prot_l1 = PMD_TYPE_TABLE,
304 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
305 .domain = DOMAIN_KERNEL,
306 },
307 [MT_MEMORY_RW_DTCM] = {
308 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
309 L_PTE_XN,
310 .prot_l1 = PMD_TYPE_TABLE,
311 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
312 .domain = DOMAIN_KERNEL,
313 },
314 [MT_MEMORY_RWX_ITCM] = {
315 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
316 .prot_l1 = PMD_TYPE_TABLE,
317 .domain = DOMAIN_KERNEL,
318 },
319 [MT_MEMORY_RW_SO] = {
320 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
321 L_PTE_MT_UNCACHED | L_PTE_XN,
322 .prot_l1 = PMD_TYPE_TABLE,
323 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
324 PMD_SECT_UNCACHED | PMD_SECT_XN,
325 .domain = DOMAIN_KERNEL,
326 },
327 [MT_MEMORY_DMA_READY] = {
328 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
329 L_PTE_XN,
330 .prot_l1 = PMD_TYPE_TABLE,
331 .domain = DOMAIN_KERNEL,
332 },
333};
334
335const struct mem_type *get_mem_type(unsigned int type)
336{
337 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
338}
339EXPORT_SYMBOL(get_mem_type);
340
341static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr);
342
343static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
344 __aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata;
345
346static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr)
347{
348 return &bm_pte[pte_index(addr)];
349}
350
351static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
352{
353 return pte_offset_kernel(dir, addr);
354}
355
356static inline pmd_t * __init fixmap_pmd(unsigned long addr)
357{
358 return pmd_off_k(addr);
359}
360
361void __init early_fixmap_init(void)
362{
363 pmd_t *pmd;
364
365 /*
366 * The early fixmap range spans multiple pmds, for which
367 * we are not prepared:
368 */
369 BUILD_BUG_ON((__fix_to_virt(__end_of_early_ioremap_region) >> PMD_SHIFT)
370 != FIXADDR_TOP >> PMD_SHIFT);
371
372 pmd = fixmap_pmd(FIXADDR_TOP);
373 pmd_populate_kernel(&init_mm, pmd, bm_pte);
374
375 pte_offset_fixmap = pte_offset_early_fixmap;
376}
377
378/*
379 * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
380 * As a result, this can only be called with preemption disabled, as under
381 * stop_machine().
382 */
383void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
384{
385 unsigned long vaddr = __fix_to_virt(idx);
386 pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
387
388 /* Make sure fixmap region does not exceed available allocation. */
389 BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
390 FIXADDR_END);
391 BUG_ON(idx >= __end_of_fixed_addresses);
392
393 /* we only support device mappings until pgprot_kernel has been set */
394 if (WARN_ON(pgprot_val(prot) != pgprot_val(FIXMAP_PAGE_IO) &&
395 pgprot_val(pgprot_kernel) == 0))
396 return;
397
398 if (pgprot_val(prot))
399 set_pte_at(NULL, vaddr, pte,
400 pfn_pte(phys >> PAGE_SHIFT, prot));
401 else
402 pte_clear(NULL, vaddr, pte);
403 local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
404}
405
406/*
407 * Adjust the PMD section entries according to the CPU in use.
408 */
409static void __init build_mem_type_table(void)
410{
411 struct cachepolicy *cp;
412 unsigned int cr = get_cr();
413 pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
414 int cpu_arch = cpu_architecture();
415 int i;
416
417 if (cpu_arch < CPU_ARCH_ARMv6) {
418#if defined(CONFIG_CPU_DCACHE_DISABLE)
419 if (cachepolicy > CPOLICY_BUFFERED)
420 cachepolicy = CPOLICY_BUFFERED;
421#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
422 if (cachepolicy > CPOLICY_WRITETHROUGH)
423 cachepolicy = CPOLICY_WRITETHROUGH;
424#endif
425 }
426 if (cpu_arch < CPU_ARCH_ARMv5) {
427 if (cachepolicy >= CPOLICY_WRITEALLOC)
428 cachepolicy = CPOLICY_WRITEBACK;
429 ecc_mask = 0;
430 }
431
432 if (is_smp()) {
433 if (cachepolicy != CPOLICY_WRITEALLOC) {
434 pr_warn("Forcing write-allocate cache policy for SMP\n");
435 cachepolicy = CPOLICY_WRITEALLOC;
436 }
437 if (!(initial_pmd_value & PMD_SECT_S)) {
438 pr_warn("Forcing shared mappings for SMP\n");
439 initial_pmd_value |= PMD_SECT_S;
440 }
441 }
442
443 /*
444 * Strip out features not present on earlier architectures.
445 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
446 * without extended page tables don't have the 'Shared' bit.
447 */
448 if (cpu_arch < CPU_ARCH_ARMv5)
449 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
450 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
451 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
452 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
453 mem_types[i].prot_sect &= ~PMD_SECT_S;
454
455 /*
456 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
457 * "update-able on write" bit on ARM610). However, Xscale and
458 * Xscale3 require this bit to be cleared.
459 */
460 if (cpu_is_xscale_family()) {
461 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
462 mem_types[i].prot_sect &= ~PMD_BIT4;
463 mem_types[i].prot_l1 &= ~PMD_BIT4;
464 }
465 } else if (cpu_arch < CPU_ARCH_ARMv6) {
466 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
467 if (mem_types[i].prot_l1)
468 mem_types[i].prot_l1 |= PMD_BIT4;
469 if (mem_types[i].prot_sect)
470 mem_types[i].prot_sect |= PMD_BIT4;
471 }
472 }
473
474 /*
475 * Mark the device areas according to the CPU/architecture.
476 */
477 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
478 if (!cpu_is_xsc3()) {
479 /*
480 * Mark device regions on ARMv6+ as execute-never
481 * to prevent speculative instruction fetches.
482 */
483 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
484 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
485 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
486 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
487
488 /* Also setup NX memory mapping */
489 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
490 }
491 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
492 /*
493 * For ARMv7 with TEX remapping,
494 * - shared device is SXCB=1100
495 * - nonshared device is SXCB=0100
496 * - write combine device mem is SXCB=0001
497 * (Uncached Normal memory)
498 */
499 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
500 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
501 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
502 } else if (cpu_is_xsc3()) {
503 /*
504 * For Xscale3,
505 * - shared device is TEXCB=00101
506 * - nonshared device is TEXCB=01000
507 * - write combine device mem is TEXCB=00100
508 * (Inner/Outer Uncacheable in xsc3 parlance)
509 */
510 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
511 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
512 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
513 } else {
514 /*
515 * For ARMv6 and ARMv7 without TEX remapping,
516 * - shared device is TEXCB=00001
517 * - nonshared device is TEXCB=01000
518 * - write combine device mem is TEXCB=00100
519 * (Uncached Normal in ARMv6 parlance).
520 */
521 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
522 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
523 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
524 }
525 } else {
526 /*
527 * On others, write combining is "Uncached/Buffered"
528 */
529 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
530 }
531
532 /*
533 * Now deal with the memory-type mappings
534 */
535 cp = &cache_policies[cachepolicy];
536 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
537
538#ifndef CONFIG_ARM_LPAE
539 /*
540 * We don't use domains on ARMv6 (since this causes problems with
541 * v6/v7 kernels), so we must use a separate memory type for user
542 * r/o, kernel r/w to map the vectors page.
543 */
544 if (cpu_arch == CPU_ARCH_ARMv6)
545 vecs_pgprot |= L_PTE_MT_VECTORS;
546
547 /*
548 * Check is it with support for the PXN bit
549 * in the Short-descriptor translation table format descriptors.
550 */
551 if (cpu_arch == CPU_ARCH_ARMv7 &&
552 (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) >= 4) {
553 user_pmd_table |= PMD_PXNTABLE;
554 }
555#endif
556
557 /*
558 * ARMv6 and above have extended page tables.
559 */
560 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
561#ifndef CONFIG_ARM_LPAE
562 /*
563 * Mark cache clean areas and XIP ROM read only
564 * from SVC mode and no access from userspace.
565 */
566 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
567 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
568 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
569#endif
570
571 /*
572 * If the initial page tables were created with the S bit
573 * set, then we need to do the same here for the same
574 * reasons given in early_cachepolicy().
575 */
576 if (initial_pmd_value & PMD_SECT_S) {
577 user_pgprot |= L_PTE_SHARED;
578 kern_pgprot |= L_PTE_SHARED;
579 vecs_pgprot |= L_PTE_SHARED;
580 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
581 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
582 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
583 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
584 mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
585 mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
586 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
587 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
588 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
589 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
590 mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
591 }
592 }
593
594 /*
595 * Non-cacheable Normal - intended for memory areas that must
596 * not cause dirty cache line writebacks when used
597 */
598 if (cpu_arch >= CPU_ARCH_ARMv6) {
599 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
600 /* Non-cacheable Normal is XCB = 001 */
601 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
602 PMD_SECT_BUFFERED;
603 } else {
604 /* For both ARMv6 and non-TEX-remapping ARMv7 */
605 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
606 PMD_SECT_TEX(1);
607 }
608 } else {
609 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
610 }
611
612#ifdef CONFIG_ARM_LPAE
613 /*
614 * Do not generate access flag faults for the kernel mappings.
615 */
616 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
617 mem_types[i].prot_pte |= PTE_EXT_AF;
618 if (mem_types[i].prot_sect)
619 mem_types[i].prot_sect |= PMD_SECT_AF;
620 }
621 kern_pgprot |= PTE_EXT_AF;
622 vecs_pgprot |= PTE_EXT_AF;
623
624 /*
625 * Set PXN for user mappings
626 */
627 user_pgprot |= PTE_EXT_PXN;
628#endif
629
630 for (i = 0; i < 16; i++) {
631 pteval_t v = pgprot_val(protection_map[i]);
632 protection_map[i] = __pgprot(v | user_pgprot);
633 }
634
635 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
636 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
637
638 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
639 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
640 L_PTE_DIRTY | kern_pgprot);
641
642 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
643 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
644 mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
645 mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
646 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
647 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
648 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
649 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
650 mem_types[MT_ROM].prot_sect |= cp->pmd;
651
652 switch (cp->pmd) {
653 case PMD_SECT_WT:
654 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
655 break;
656 case PMD_SECT_WB:
657 case PMD_SECT_WBWA:
658 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
659 break;
660 }
661 pr_info("Memory policy: %sData cache %s\n",
662 ecc_mask ? "ECC enabled, " : "", cp->policy);
663
664 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
665 struct mem_type *t = &mem_types[i];
666 if (t->prot_l1)
667 t->prot_l1 |= PMD_DOMAIN(t->domain);
668 if (t->prot_sect)
669 t->prot_sect |= PMD_DOMAIN(t->domain);
670 }
671}
672
673#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
674pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
675 unsigned long size, pgprot_t vma_prot)
676{
677 if (!pfn_valid(pfn))
678 return pgprot_noncached(vma_prot);
679 else if (file->f_flags & O_SYNC)
680 return pgprot_writecombine(vma_prot);
681 return vma_prot;
682}
683EXPORT_SYMBOL(phys_mem_access_prot);
684#endif
685
686#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
687
688static void __init *early_alloc(unsigned long sz)
689{
690 void *ptr = memblock_alloc(sz, sz);
691
692 if (!ptr)
693 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
694 __func__, sz, sz);
695
696 return ptr;
697}
698
699static void *__init late_alloc(unsigned long sz)
700{
701 void *ptr = (void *)__get_free_pages(GFP_PGTABLE_KERNEL, get_order(sz));
702
703 if (!ptr || !pgtable_pte_page_ctor(virt_to_page(ptr)))
704 BUG();
705 return ptr;
706}
707
708static pte_t * __init arm_pte_alloc(pmd_t *pmd, unsigned long addr,
709 unsigned long prot,
710 void *(*alloc)(unsigned long sz))
711{
712 if (pmd_none(*pmd)) {
713 pte_t *pte = alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
714 __pmd_populate(pmd, __pa(pte), prot);
715 }
716 BUG_ON(pmd_bad(*pmd));
717 return pte_offset_kernel(pmd, addr);
718}
719
720static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr,
721 unsigned long prot)
722{
723 return arm_pte_alloc(pmd, addr, prot, early_alloc);
724}
725
726static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
727 unsigned long end, unsigned long pfn,
728 const struct mem_type *type,
729 void *(*alloc)(unsigned long sz),
730 bool ng)
731{
732 pte_t *pte = arm_pte_alloc(pmd, addr, type->prot_l1, alloc);
733 do {
734 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)),
735 ng ? PTE_EXT_NG : 0);
736 pfn++;
737 } while (pte++, addr += PAGE_SIZE, addr != end);
738}
739
740static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
741 unsigned long end, phys_addr_t phys,
742 const struct mem_type *type, bool ng)
743{
744 pmd_t *p = pmd;
745
746#ifndef CONFIG_ARM_LPAE
747 /*
748 * In classic MMU format, puds and pmds are folded in to
749 * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
750 * group of L1 entries making up one logical pointer to
751 * an L2 table (2MB), where as PMDs refer to the individual
752 * L1 entries (1MB). Hence increment to get the correct
753 * offset for odd 1MB sections.
754 * (See arch/arm/include/asm/pgtable-2level.h)
755 */
756 if (addr & SECTION_SIZE)
757 pmd++;
758#endif
759 do {
760 *pmd = __pmd(phys | type->prot_sect | (ng ? PMD_SECT_nG : 0));
761 phys += SECTION_SIZE;
762 } while (pmd++, addr += SECTION_SIZE, addr != end);
763
764 flush_pmd_entry(p);
765}
766
767static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
768 unsigned long end, phys_addr_t phys,
769 const struct mem_type *type,
770 void *(*alloc)(unsigned long sz), bool ng)
771{
772 pmd_t *pmd = pmd_offset(pud, addr);
773 unsigned long next;
774
775 do {
776 /*
777 * With LPAE, we must loop over to map
778 * all the pmds for the given range.
779 */
780 next = pmd_addr_end(addr, end);
781
782 /*
783 * Try a section mapping - addr, next and phys must all be
784 * aligned to a section boundary.
785 */
786 if (type->prot_sect &&
787 ((addr | next | phys) & ~SECTION_MASK) == 0) {
788 __map_init_section(pmd, addr, next, phys, type, ng);
789 } else {
790 alloc_init_pte(pmd, addr, next,
791 __phys_to_pfn(phys), type, alloc, ng);
792 }
793
794 phys += next - addr;
795
796 } while (pmd++, addr = next, addr != end);
797}
798
799static void __init alloc_init_pud(p4d_t *p4d, unsigned long addr,
800 unsigned long end, phys_addr_t phys,
801 const struct mem_type *type,
802 void *(*alloc)(unsigned long sz), bool ng)
803{
804 pud_t *pud = pud_offset(p4d, addr);
805 unsigned long next;
806
807 do {
808 next = pud_addr_end(addr, end);
809 alloc_init_pmd(pud, addr, next, phys, type, alloc, ng);
810 phys += next - addr;
811 } while (pud++, addr = next, addr != end);
812}
813
814static void __init alloc_init_p4d(pgd_t *pgd, unsigned long addr,
815 unsigned long end, phys_addr_t phys,
816 const struct mem_type *type,
817 void *(*alloc)(unsigned long sz), bool ng)
818{
819 p4d_t *p4d = p4d_offset(pgd, addr);
820 unsigned long next;
821
822 do {
823 next = p4d_addr_end(addr, end);
824 alloc_init_pud(p4d, addr, next, phys, type, alloc, ng);
825 phys += next - addr;
826 } while (p4d++, addr = next, addr != end);
827}
828
829#ifndef CONFIG_ARM_LPAE
830static void __init create_36bit_mapping(struct mm_struct *mm,
831 struct map_desc *md,
832 const struct mem_type *type,
833 bool ng)
834{
835 unsigned long addr, length, end;
836 phys_addr_t phys;
837 pgd_t *pgd;
838
839 addr = md->virtual;
840 phys = __pfn_to_phys(md->pfn);
841 length = PAGE_ALIGN(md->length);
842
843 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
844 pr_err("MM: CPU does not support supersection mapping for 0x%08llx at 0x%08lx\n",
845 (long long)__pfn_to_phys((u64)md->pfn), addr);
846 return;
847 }
848
849 /* N.B. ARMv6 supersections are only defined to work with domain 0.
850 * Since domain assignments can in fact be arbitrary, the
851 * 'domain == 0' check below is required to insure that ARMv6
852 * supersections are only allocated for domain 0 regardless
853 * of the actual domain assignments in use.
854 */
855 if (type->domain) {
856 pr_err("MM: invalid domain in supersection mapping for 0x%08llx at 0x%08lx\n",
857 (long long)__pfn_to_phys((u64)md->pfn), addr);
858 return;
859 }
860
861 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
862 pr_err("MM: cannot create mapping for 0x%08llx at 0x%08lx invalid alignment\n",
863 (long long)__pfn_to_phys((u64)md->pfn), addr);
864 return;
865 }
866
867 /*
868 * Shift bits [35:32] of address into bits [23:20] of PMD
869 * (See ARMv6 spec).
870 */
871 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
872
873 pgd = pgd_offset(mm, addr);
874 end = addr + length;
875 do {
876 p4d_t *p4d = p4d_offset(pgd, addr);
877 pud_t *pud = pud_offset(p4d, addr);
878 pmd_t *pmd = pmd_offset(pud, addr);
879 int i;
880
881 for (i = 0; i < 16; i++)
882 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER |
883 (ng ? PMD_SECT_nG : 0));
884
885 addr += SUPERSECTION_SIZE;
886 phys += SUPERSECTION_SIZE;
887 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
888 } while (addr != end);
889}
890#endif /* !CONFIG_ARM_LPAE */
891
892static void __init __create_mapping(struct mm_struct *mm, struct map_desc *md,
893 void *(*alloc)(unsigned long sz),
894 bool ng)
895{
896 unsigned long addr, length, end;
897 phys_addr_t phys;
898 const struct mem_type *type;
899 pgd_t *pgd;
900
901 type = &mem_types[md->type];
902
903#ifndef CONFIG_ARM_LPAE
904 /*
905 * Catch 36-bit addresses
906 */
907 if (md->pfn >= 0x100000) {
908 create_36bit_mapping(mm, md, type, ng);
909 return;
910 }
911#endif
912
913 addr = md->virtual & PAGE_MASK;
914 phys = __pfn_to_phys(md->pfn);
915 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
916
917 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
918 pr_warn("BUG: map for 0x%08llx at 0x%08lx can not be mapped using pages, ignoring.\n",
919 (long long)__pfn_to_phys(md->pfn), addr);
920 return;
921 }
922
923 pgd = pgd_offset(mm, addr);
924 end = addr + length;
925 do {
926 unsigned long next = pgd_addr_end(addr, end);
927
928 alloc_init_p4d(pgd, addr, next, phys, type, alloc, ng);
929
930 phys += next - addr;
931 addr = next;
932 } while (pgd++, addr != end);
933}
934
935/*
936 * Create the page directory entries and any necessary
937 * page tables for the mapping specified by `md'. We
938 * are able to cope here with varying sizes and address
939 * offsets, and we take full advantage of sections and
940 * supersections.
941 */
942static void __init create_mapping(struct map_desc *md)
943{
944 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
945 pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
946 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
947 return;
948 }
949
950 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
951 md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
952 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
953 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
954 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
955 }
956
957 __create_mapping(&init_mm, md, early_alloc, false);
958}
959
960void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md,
961 bool ng)
962{
963#ifdef CONFIG_ARM_LPAE
964 p4d_t *p4d;
965 pud_t *pud;
966
967 p4d = p4d_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
968 if (WARN_ON(!p4d))
969 return;
970 pud = pud_alloc(mm, p4d, md->virtual);
971 if (WARN_ON(!pud))
972 return;
973 pmd_alloc(mm, pud, 0);
974#endif
975 __create_mapping(mm, md, late_alloc, ng);
976}
977
978/*
979 * Create the architecture specific mappings
980 */
981void __init iotable_init(struct map_desc *io_desc, int nr)
982{
983 struct map_desc *md;
984 struct vm_struct *vm;
985 struct static_vm *svm;
986
987 if (!nr)
988 return;
989
990 svm = memblock_alloc(sizeof(*svm) * nr, __alignof__(*svm));
991 if (!svm)
992 panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
993 __func__, sizeof(*svm) * nr, __alignof__(*svm));
994
995 for (md = io_desc; nr; md++, nr--) {
996 create_mapping(md);
997
998 vm = &svm->vm;
999 vm->addr = (void *)(md->virtual & PAGE_MASK);
1000 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
1001 vm->phys_addr = __pfn_to_phys(md->pfn);
1002 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
1003 vm->flags |= VM_ARM_MTYPE(md->type);
1004 vm->caller = iotable_init;
1005 add_static_vm_early(svm++);
1006 }
1007}
1008
1009void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
1010 void *caller)
1011{
1012 struct vm_struct *vm;
1013 struct static_vm *svm;
1014
1015 svm = memblock_alloc(sizeof(*svm), __alignof__(*svm));
1016 if (!svm)
1017 panic("%s: Failed to allocate %zu bytes align=0x%zx\n",
1018 __func__, sizeof(*svm), __alignof__(*svm));
1019
1020 vm = &svm->vm;
1021 vm->addr = (void *)addr;
1022 vm->size = size;
1023 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
1024 vm->caller = caller;
1025 add_static_vm_early(svm);
1026}
1027
1028#ifndef CONFIG_ARM_LPAE
1029
1030/*
1031 * The Linux PMD is made of two consecutive section entries covering 2MB
1032 * (see definition in include/asm/pgtable-2level.h). However a call to
1033 * create_mapping() may optimize static mappings by using individual
1034 * 1MB section mappings. This leaves the actual PMD potentially half
1035 * initialized if the top or bottom section entry isn't used, leaving it
1036 * open to problems if a subsequent ioremap() or vmalloc() tries to use
1037 * the virtual space left free by that unused section entry.
1038 *
1039 * Let's avoid the issue by inserting dummy vm entries covering the unused
1040 * PMD halves once the static mappings are in place.
1041 */
1042
1043static void __init pmd_empty_section_gap(unsigned long addr)
1044{
1045 vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap);
1046}
1047
1048static void __init fill_pmd_gaps(void)
1049{
1050 struct static_vm *svm;
1051 struct vm_struct *vm;
1052 unsigned long addr, next = 0;
1053 pmd_t *pmd;
1054
1055 list_for_each_entry(svm, &static_vmlist, list) {
1056 vm = &svm->vm;
1057 addr = (unsigned long)vm->addr;
1058 if (addr < next)
1059 continue;
1060
1061 /*
1062 * Check if this vm starts on an odd section boundary.
1063 * If so and the first section entry for this PMD is free
1064 * then we block the corresponding virtual address.
1065 */
1066 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
1067 pmd = pmd_off_k(addr);
1068 if (pmd_none(*pmd))
1069 pmd_empty_section_gap(addr & PMD_MASK);
1070 }
1071
1072 /*
1073 * Then check if this vm ends on an odd section boundary.
1074 * If so and the second section entry for this PMD is empty
1075 * then we block the corresponding virtual address.
1076 */
1077 addr += vm->size;
1078 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
1079 pmd = pmd_off_k(addr) + 1;
1080 if (pmd_none(*pmd))
1081 pmd_empty_section_gap(addr);
1082 }
1083
1084 /* no need to look at any vm entry until we hit the next PMD */
1085 next = (addr + PMD_SIZE - 1) & PMD_MASK;
1086 }
1087}
1088
1089#else
1090#define fill_pmd_gaps() do { } while (0)
1091#endif
1092
1093#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
1094static void __init pci_reserve_io(void)
1095{
1096 struct static_vm *svm;
1097
1098 svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE);
1099 if (svm)
1100 return;
1101
1102 vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
1103}
1104#else
1105#define pci_reserve_io() do { } while (0)
1106#endif
1107
1108#ifdef CONFIG_DEBUG_LL
1109void __init debug_ll_io_init(void)
1110{
1111 struct map_desc map;
1112
1113 debug_ll_addr(&map.pfn, &map.virtual);
1114 if (!map.pfn || !map.virtual)
1115 return;
1116 map.pfn = __phys_to_pfn(map.pfn);
1117 map.virtual &= PAGE_MASK;
1118 map.length = PAGE_SIZE;
1119 map.type = MT_DEVICE;
1120 iotable_init(&map, 1);
1121}
1122#endif
1123
1124static void * __initdata vmalloc_min =
1125 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
1126
1127/*
1128 * vmalloc=size forces the vmalloc area to be exactly 'size'
1129 * bytes. This can be used to increase (or decrease) the vmalloc
1130 * area - the default is 240m.
1131 */
1132static int __init early_vmalloc(char *arg)
1133{
1134 unsigned long vmalloc_reserve = memparse(arg, NULL);
1135
1136 if (vmalloc_reserve < SZ_16M) {
1137 vmalloc_reserve = SZ_16M;
1138 pr_warn("vmalloc area too small, limiting to %luMB\n",
1139 vmalloc_reserve >> 20);
1140 }
1141
1142 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
1143 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
1144 pr_warn("vmalloc area is too big, limiting to %luMB\n",
1145 vmalloc_reserve >> 20);
1146 }
1147
1148 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
1149 return 0;
1150}
1151early_param("vmalloc", early_vmalloc);
1152
1153phys_addr_t arm_lowmem_limit __initdata = 0;
1154
1155void __init adjust_lowmem_bounds(void)
1156{
1157 phys_addr_t memblock_limit = 0;
1158 u64 vmalloc_limit;
1159 struct memblock_region *reg;
1160 phys_addr_t lowmem_limit = 0;
1161
1162 /*
1163 * Let's use our own (unoptimized) equivalent of __pa() that is
1164 * not affected by wrap-arounds when sizeof(phys_addr_t) == 4.
1165 * The result is used as the upper bound on physical memory address
1166 * and may itself be outside the valid range for which phys_addr_t
1167 * and therefore __pa() is defined.
1168 */
1169 vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
1170
1171 /*
1172 * The first usable region must be PMD aligned. Mark its start
1173 * as MEMBLOCK_NOMAP if it isn't
1174 */
1175 for_each_memblock(memory, reg) {
1176 if (!memblock_is_nomap(reg)) {
1177 if (!IS_ALIGNED(reg->base, PMD_SIZE)) {
1178 phys_addr_t len;
1179
1180 len = round_up(reg->base, PMD_SIZE) - reg->base;
1181 memblock_mark_nomap(reg->base, len);
1182 }
1183 break;
1184 }
1185 }
1186
1187 for_each_memblock(memory, reg) {
1188 phys_addr_t block_start = reg->base;
1189 phys_addr_t block_end = reg->base + reg->size;
1190
1191 if (memblock_is_nomap(reg))
1192 continue;
1193
1194 if (reg->base < vmalloc_limit) {
1195 if (block_end > lowmem_limit)
1196 /*
1197 * Compare as u64 to ensure vmalloc_limit does
1198 * not get truncated. block_end should always
1199 * fit in phys_addr_t so there should be no
1200 * issue with assignment.
1201 */
1202 lowmem_limit = min_t(u64,
1203 vmalloc_limit,
1204 block_end);
1205
1206 /*
1207 * Find the first non-pmd-aligned page, and point
1208 * memblock_limit at it. This relies on rounding the
1209 * limit down to be pmd-aligned, which happens at the
1210 * end of this function.
1211 *
1212 * With this algorithm, the start or end of almost any
1213 * bank can be non-pmd-aligned. The only exception is
1214 * that the start of the bank 0 must be section-
1215 * aligned, since otherwise memory would need to be
1216 * allocated when mapping the start of bank 0, which
1217 * occurs before any free memory is mapped.
1218 */
1219 if (!memblock_limit) {
1220 if (!IS_ALIGNED(block_start, PMD_SIZE))
1221 memblock_limit = block_start;
1222 else if (!IS_ALIGNED(block_end, PMD_SIZE))
1223 memblock_limit = lowmem_limit;
1224 }
1225
1226 }
1227 }
1228
1229 arm_lowmem_limit = lowmem_limit;
1230
1231 high_memory = __va(arm_lowmem_limit - 1) + 1;
1232
1233 if (!memblock_limit)
1234 memblock_limit = arm_lowmem_limit;
1235
1236 /*
1237 * Round the memblock limit down to a pmd size. This
1238 * helps to ensure that we will allocate memory from the
1239 * last full pmd, which should be mapped.
1240 */
1241 memblock_limit = round_down(memblock_limit, PMD_SIZE);
1242
1243 if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
1244 if (memblock_end_of_DRAM() > arm_lowmem_limit) {
1245 phys_addr_t end = memblock_end_of_DRAM();
1246
1247 pr_notice("Ignoring RAM at %pa-%pa\n",
1248 &memblock_limit, &end);
1249 pr_notice("Consider using a HIGHMEM enabled kernel.\n");
1250
1251 memblock_remove(memblock_limit, end - memblock_limit);
1252 }
1253 }
1254
1255 memblock_set_current_limit(memblock_limit);
1256}
1257
1258static inline void prepare_page_table(void)
1259{
1260 unsigned long addr;
1261 phys_addr_t end;
1262
1263 /*
1264 * Clear out all the mappings below the kernel image.
1265 */
1266 for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
1267 pmd_clear(pmd_off_k(addr));
1268
1269#ifdef CONFIG_XIP_KERNEL
1270 /* The XIP kernel is mapped in the module area -- skip over it */
1271 addr = ((unsigned long)_exiprom + PMD_SIZE - 1) & PMD_MASK;
1272#endif
1273 for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
1274 pmd_clear(pmd_off_k(addr));
1275
1276 /*
1277 * Find the end of the first block of lowmem.
1278 */
1279 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
1280 if (end >= arm_lowmem_limit)
1281 end = arm_lowmem_limit;
1282
1283 /*
1284 * Clear out all the kernel space mappings, except for the first
1285 * memory bank, up to the vmalloc region.
1286 */
1287 for (addr = __phys_to_virt(end);
1288 addr < VMALLOC_START; addr += PMD_SIZE)
1289 pmd_clear(pmd_off_k(addr));
1290}
1291
1292#ifdef CONFIG_ARM_LPAE
1293/* the first page is reserved for pgd */
1294#define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
1295 PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
1296#else
1297#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
1298#endif
1299
1300/*
1301 * Reserve the special regions of memory
1302 */
1303void __init arm_mm_memblock_reserve(void)
1304{
1305 /*
1306 * Reserve the page tables. These are already in use,
1307 * and can only be in node 0.
1308 */
1309 memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
1310
1311#ifdef CONFIG_SA1111
1312 /*
1313 * Because of the SA1111 DMA bug, we want to preserve our
1314 * precious DMA-able memory...
1315 */
1316 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
1317#endif
1318}
1319
1320/*
1321 * Set up the device mappings. Since we clear out the page tables for all
1322 * mappings above VMALLOC_START, except early fixmap, we might remove debug
1323 * device mappings. This means earlycon can be used to debug this function
1324 * Any other function or debugging method which may touch any device _will_
1325 * crash the kernel.
1326 */
1327static void __init devicemaps_init(const struct machine_desc *mdesc)
1328{
1329 struct map_desc map;
1330 unsigned long addr;
1331 void *vectors;
1332
1333 /*
1334 * Allocate the vector page early.
1335 */
1336 vectors = early_alloc(PAGE_SIZE * 2);
1337
1338 early_trap_init(vectors);
1339
1340 /*
1341 * Clear page table except top pmd used by early fixmaps
1342 */
1343 for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
1344 pmd_clear(pmd_off_k(addr));
1345
1346 /*
1347 * Map the kernel if it is XIP.
1348 * It is always first in the modulearea.
1349 */
1350#ifdef CONFIG_XIP_KERNEL
1351 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
1352 map.virtual = MODULES_VADDR;
1353 map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK;
1354 map.type = MT_ROM;
1355 create_mapping(&map);
1356#endif
1357
1358 /*
1359 * Map the cache flushing regions.
1360 */
1361#ifdef FLUSH_BASE
1362 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
1363 map.virtual = FLUSH_BASE;
1364 map.length = SZ_1M;
1365 map.type = MT_CACHECLEAN;
1366 create_mapping(&map);
1367#endif
1368#ifdef FLUSH_BASE_MINICACHE
1369 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
1370 map.virtual = FLUSH_BASE_MINICACHE;
1371 map.length = SZ_1M;
1372 map.type = MT_MINICLEAN;
1373 create_mapping(&map);
1374#endif
1375
1376 /*
1377 * Create a mapping for the machine vectors at the high-vectors
1378 * location (0xffff0000). If we aren't using high-vectors, also
1379 * create a mapping at the low-vectors virtual address.
1380 */
1381 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
1382 map.virtual = 0xffff0000;
1383 map.length = PAGE_SIZE;
1384#ifdef CONFIG_KUSER_HELPERS
1385 map.type = MT_HIGH_VECTORS;
1386#else
1387 map.type = MT_LOW_VECTORS;
1388#endif
1389 create_mapping(&map);
1390
1391 if (!vectors_high()) {
1392 map.virtual = 0;
1393 map.length = PAGE_SIZE * 2;
1394 map.type = MT_LOW_VECTORS;
1395 create_mapping(&map);
1396 }
1397
1398 /* Now create a kernel read-only mapping */
1399 map.pfn += 1;
1400 map.virtual = 0xffff0000 + PAGE_SIZE;
1401 map.length = PAGE_SIZE;
1402 map.type = MT_LOW_VECTORS;
1403 create_mapping(&map);
1404
1405 /*
1406 * Ask the machine support to map in the statically mapped devices.
1407 */
1408 if (mdesc->map_io)
1409 mdesc->map_io();
1410 else
1411 debug_ll_io_init();
1412 fill_pmd_gaps();
1413
1414 /* Reserve fixed i/o space in VMALLOC region */
1415 pci_reserve_io();
1416
1417 /*
1418 * Finally flush the caches and tlb to ensure that we're in a
1419 * consistent state wrt the writebuffer. This also ensures that
1420 * any write-allocated cache lines in the vector page are written
1421 * back. After this point, we can start to touch devices again.
1422 */
1423 local_flush_tlb_all();
1424 flush_cache_all();
1425
1426 /* Enable asynchronous aborts */
1427 early_abt_enable();
1428}
1429
1430static void __init kmap_init(void)
1431{
1432#ifdef CONFIG_HIGHMEM
1433 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
1434 PKMAP_BASE, _PAGE_KERNEL_TABLE);
1435#endif
1436
1437 early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START,
1438 _PAGE_KERNEL_TABLE);
1439}
1440
1441static void __init map_lowmem(void)
1442{
1443 struct memblock_region *reg;
1444 phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE);
1445 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
1446
1447 /* Map all the lowmem memory banks. */
1448 for_each_memblock(memory, reg) {
1449 phys_addr_t start = reg->base;
1450 phys_addr_t end = start + reg->size;
1451 struct map_desc map;
1452
1453 if (memblock_is_nomap(reg))
1454 continue;
1455
1456 if (end > arm_lowmem_limit)
1457 end = arm_lowmem_limit;
1458 if (start >= end)
1459 break;
1460
1461 if (end < kernel_x_start) {
1462 map.pfn = __phys_to_pfn(start);
1463 map.virtual = __phys_to_virt(start);
1464 map.length = end - start;
1465 map.type = MT_MEMORY_RWX;
1466
1467 create_mapping(&map);
1468 } else if (start >= kernel_x_end) {
1469 map.pfn = __phys_to_pfn(start);
1470 map.virtual = __phys_to_virt(start);
1471 map.length = end - start;
1472 map.type = MT_MEMORY_RW;
1473
1474 create_mapping(&map);
1475 } else {
1476 /* This better cover the entire kernel */
1477 if (start < kernel_x_start) {
1478 map.pfn = __phys_to_pfn(start);
1479 map.virtual = __phys_to_virt(start);
1480 map.length = kernel_x_start - start;
1481 map.type = MT_MEMORY_RW;
1482
1483 create_mapping(&map);
1484 }
1485
1486 map.pfn = __phys_to_pfn(kernel_x_start);
1487 map.virtual = __phys_to_virt(kernel_x_start);
1488 map.length = kernel_x_end - kernel_x_start;
1489 map.type = MT_MEMORY_RWX;
1490
1491 create_mapping(&map);
1492
1493 if (kernel_x_end < end) {
1494 map.pfn = __phys_to_pfn(kernel_x_end);
1495 map.virtual = __phys_to_virt(kernel_x_end);
1496 map.length = end - kernel_x_end;
1497 map.type = MT_MEMORY_RW;
1498
1499 create_mapping(&map);
1500 }
1501 }
1502 }
1503}
1504
1505#ifdef CONFIG_ARM_PV_FIXUP
1506extern unsigned long __atags_pointer;
1507typedef void pgtables_remap(long long offset, unsigned long pgd, void *bdata);
1508pgtables_remap lpae_pgtables_remap_asm;
1509
1510/*
1511 * early_paging_init() recreates boot time page table setup, allowing machines
1512 * to switch over to a high (>4G) address space on LPAE systems
1513 */
1514static void __init early_paging_init(const struct machine_desc *mdesc)
1515{
1516 pgtables_remap *lpae_pgtables_remap;
1517 unsigned long pa_pgd;
1518 unsigned int cr, ttbcr;
1519 long long offset;
1520 void *boot_data;
1521
1522 if (!mdesc->pv_fixup)
1523 return;
1524
1525 offset = mdesc->pv_fixup();
1526 if (offset == 0)
1527 return;
1528
1529 /*
1530 * Get the address of the remap function in the 1:1 identity
1531 * mapping setup by the early page table assembly code. We
1532 * must get this prior to the pv update. The following barrier
1533 * ensures that this is complete before we fixup any P:V offsets.
1534 */
1535 lpae_pgtables_remap = (pgtables_remap *)(unsigned long)__pa(lpae_pgtables_remap_asm);
1536 pa_pgd = __pa(swapper_pg_dir);
1537 boot_data = __va(__atags_pointer);
1538 barrier();
1539
1540 pr_info("Switching physical address space to 0x%08llx\n",
1541 (u64)PHYS_OFFSET + offset);
1542
1543 /* Re-set the phys pfn offset, and the pv offset */
1544 __pv_offset += offset;
1545 __pv_phys_pfn_offset += PFN_DOWN(offset);
1546
1547 /* Run the patch stub to update the constants */
1548 fixup_pv_table(&__pv_table_begin,
1549 (&__pv_table_end - &__pv_table_begin) << 2);
1550
1551 /*
1552 * We changing not only the virtual to physical mapping, but also
1553 * the physical addresses used to access memory. We need to flush
1554 * all levels of cache in the system with caching disabled to
1555 * ensure that all data is written back, and nothing is prefetched
1556 * into the caches. We also need to prevent the TLB walkers
1557 * allocating into the caches too. Note that this is ARMv7 LPAE
1558 * specific.
1559 */
1560 cr = get_cr();
1561 set_cr(cr & ~(CR_I | CR_C));
1562 asm("mrc p15, 0, %0, c2, c0, 2" : "=r" (ttbcr));
1563 asm volatile("mcr p15, 0, %0, c2, c0, 2"
1564 : : "r" (ttbcr & ~(3 << 8 | 3 << 10)));
1565 flush_cache_all();
1566
1567 /*
1568 * Fixup the page tables - this must be in the idmap region as
1569 * we need to disable the MMU to do this safely, and hence it
1570 * needs to be assembly. It's fairly simple, as we're using the
1571 * temporary tables setup by the initial assembly code.
1572 */
1573 lpae_pgtables_remap(offset, pa_pgd, boot_data);
1574
1575 /* Re-enable the caches and cacheable TLB walks */
1576 asm volatile("mcr p15, 0, %0, c2, c0, 2" : : "r" (ttbcr));
1577 set_cr(cr);
1578}
1579
1580#else
1581
1582static void __init early_paging_init(const struct machine_desc *mdesc)
1583{
1584 long long offset;
1585
1586 if (!mdesc->pv_fixup)
1587 return;
1588
1589 offset = mdesc->pv_fixup();
1590 if (offset == 0)
1591 return;
1592
1593 pr_crit("Physical address space modification is only to support Keystone2.\n");
1594 pr_crit("Please enable ARM_LPAE and ARM_PATCH_PHYS_VIRT support to use this\n");
1595 pr_crit("feature. Your kernel may crash now, have a good day.\n");
1596 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1597}
1598
1599#endif
1600
1601static void __init early_fixmap_shutdown(void)
1602{
1603 int i;
1604 unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1);
1605
1606 pte_offset_fixmap = pte_offset_late_fixmap;
1607 pmd_clear(fixmap_pmd(va));
1608 local_flush_tlb_kernel_page(va);
1609
1610 for (i = 0; i < __end_of_permanent_fixed_addresses; i++) {
1611 pte_t *pte;
1612 struct map_desc map;
1613
1614 map.virtual = fix_to_virt(i);
1615 pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual);
1616
1617 /* Only i/o device mappings are supported ATM */
1618 if (pte_none(*pte) ||
1619 (pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED)
1620 continue;
1621
1622 map.pfn = pte_pfn(*pte);
1623 map.type = MT_DEVICE;
1624 map.length = PAGE_SIZE;
1625
1626 create_mapping(&map);
1627 }
1628}
1629
1630/*
1631 * paging_init() sets up the page tables, initialises the zone memory
1632 * maps, and sets up the zero page, bad page and bad page tables.
1633 */
1634void __init paging_init(const struct machine_desc *mdesc)
1635{
1636 void *zero_page;
1637
1638 prepare_page_table();
1639 map_lowmem();
1640 memblock_set_current_limit(arm_lowmem_limit);
1641 dma_contiguous_remap();
1642 early_fixmap_shutdown();
1643 devicemaps_init(mdesc);
1644 kmap_init();
1645 tcm_init();
1646
1647 top_pmd = pmd_off_k(0xffff0000);
1648
1649 /* allocate the zero page. */
1650 zero_page = early_alloc(PAGE_SIZE);
1651
1652 bootmem_init();
1653
1654 empty_zero_page = virt_to_page(zero_page);
1655 __flush_dcache_page(NULL, empty_zero_page);
1656}
1657
1658void __init early_mm_init(const struct machine_desc *mdesc)
1659{
1660 build_mem_type_table();
1661 early_paging_init(mdesc);
1662}
1663
1664void set_pte_at(struct mm_struct *mm, unsigned long addr,
1665 pte_t *ptep, pte_t pteval)
1666{
1667 unsigned long ext = 0;
1668
1669 if (addr < TASK_SIZE && pte_valid_user(pteval)) {
1670 if (!pte_special(pteval))
1671 __sync_icache_dcache(pteval);
1672 ext |= PTE_EXT_NG;
1673 }
1674
1675 set_pte_ext(ptep, pteval, ext);
1676}