Loading...
1#ifdef CONFIG_MMU
2
3/* the upper-most page table pointer */
4extern pmd_t *top_pmd;
5
6#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
7
8static inline pmd_t *pmd_off_k(unsigned long virt)
9{
10 return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
11}
12
13struct mem_type {
14 pteval_t prot_pte;
15 unsigned int prot_l1;
16 unsigned int prot_sect;
17 unsigned int domain;
18};
19
20const struct mem_type *get_mem_type(unsigned int type);
21
22extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
23
24#endif
25
26#ifdef CONFIG_ZONE_DMA
27extern u32 arm_dma_limit;
28#else
29#define arm_dma_limit ((u32)~0)
30#endif
31
32void __init bootmem_init(void);
33void arm_mm_memblock_reserve(void);
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifdef CONFIG_MMU
3#include <linux/list.h>
4#include <linux/vmalloc.h>
5
6#include <asm/pgtable.h>
7
8/* the upper-most page table pointer */
9extern pmd_t *top_pmd;
10
11/*
12 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
13 * specific hacks for copying pages efficiently, while 0xffff4000
14 * is reserved for VIPT aliasing flushing by generic code.
15 *
16 * Note that we don't allow VIPT aliasing caches with SMP.
17 */
18#define COPYPAGE_MINICACHE 0xffff8000
19#define COPYPAGE_V6_FROM 0xffff8000
20#define COPYPAGE_V6_TO 0xffffc000
21/* PFN alias flushing, for VIPT caches */
22#define FLUSH_ALIAS_START 0xffff4000
23
24static inline void set_top_pte(unsigned long va, pte_t pte)
25{
26 pte_t *ptep = pte_offset_kernel(top_pmd, va);
27 set_pte_ext(ptep, pte, 0);
28 local_flush_tlb_kernel_page(va);
29}
30
31static inline pte_t get_top_pte(unsigned long va)
32{
33 pte_t *ptep = pte_offset_kernel(top_pmd, va);
34 return *ptep;
35}
36
37static inline pmd_t *pmd_off_k(unsigned long virt)
38{
39 return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
40}
41
42struct mem_type {
43 pteval_t prot_pte;
44 pteval_t prot_pte_s2;
45 pmdval_t prot_l1;
46 pmdval_t prot_sect;
47 unsigned int domain;
48};
49
50const struct mem_type *get_mem_type(unsigned int type);
51
52extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
53
54/*
55 * ARM specific vm_struct->flags bits.
56 */
57
58/* (super)section-mapped I/O regions used by ioremap()/iounmap() */
59#define VM_ARM_SECTION_MAPPING 0x80000000
60
61/* permanent static mappings from iotable_init() */
62#define VM_ARM_STATIC_MAPPING 0x40000000
63
64/* empty mapping */
65#define VM_ARM_EMPTY_MAPPING 0x20000000
66
67/* mapping type (attributes) for permanent static mappings */
68#define VM_ARM_MTYPE(mt) ((mt) << 20)
69#define VM_ARM_MTYPE_MASK (0x1f << 20)
70
71/* consistent regions used by dma_alloc_attrs() */
72#define VM_ARM_DMA_CONSISTENT 0x20000000
73
74
75struct static_vm {
76 struct vm_struct vm;
77 struct list_head list;
78};
79
80extern struct list_head static_vmlist;
81extern struct static_vm *find_static_vm_vaddr(void *vaddr);
82extern __init void add_static_vm_early(struct static_vm *svm);
83
84#endif
85
86#ifdef CONFIG_ZONE_DMA
87extern phys_addr_t arm_dma_limit;
88extern unsigned long arm_dma_pfn_limit;
89#else
90#define arm_dma_limit ((phys_addr_t)~0)
91#define arm_dma_pfn_limit (~0ul >> PAGE_SHIFT)
92#endif
93
94extern phys_addr_t arm_lowmem_limit;
95
96void __init bootmem_init(void);
97void arm_mm_memblock_reserve(void);
98void dma_contiguous_remap(void);
99
100unsigned long __clear_cr(unsigned long mask);