Loading...
1/*
2 * arch/arm/include/asm/memory.h
3 *
4 * Copyright (C) 2000-2002 Russell King
5 * modification for nommu, Hyok S. Choi, 2004
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Note: this file should not be included by non-asm/.h files
12 */
13#ifndef __ASM_ARM_MEMORY_H
14#define __ASM_ARM_MEMORY_H
15
16#include <linux/compiler.h>
17#include <linux/const.h>
18#include <linux/types.h>
19#include <linux/sizes.h>
20
21#include <asm/cache.h>
22
23#ifdef CONFIG_NEED_MACH_MEMORY_H
24#include <mach/memory.h>
25#endif
26
27/*
28 * Allow for constants defined here to be used from assembly code
29 * by prepending the UL suffix only with actual C code compilation.
30 */
31#define UL(x) _AC(x, UL)
32
33/* PAGE_OFFSET - the virtual address of the start of the kernel image */
34#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
35
36#ifdef CONFIG_MMU
37
38/*
39 * TASK_SIZE - the maximum size of a user space task.
40 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
41 */
42#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
43#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
44
45/*
46 * The maximum size of a 26-bit user space task.
47 */
48#define TASK_SIZE_26 (UL(1) << 26)
49
50/*
51 * The module space lives between the addresses given by TASK_SIZE
52 * and PAGE_OFFSET - it must be within 32MB of the kernel text.
53 */
54#ifndef CONFIG_THUMB2_KERNEL
55#define MODULES_VADDR (PAGE_OFFSET - SZ_16M)
56#else
57/* smaller range for Thumb-2 symbols relocation (2^24)*/
58#define MODULES_VADDR (PAGE_OFFSET - SZ_8M)
59#endif
60
61#if TASK_SIZE > MODULES_VADDR
62#error Top of user space clashes with start of module space
63#endif
64
65/*
66 * The highmem pkmap virtual space shares the end of the module area.
67 */
68#ifdef CONFIG_HIGHMEM
69#define MODULES_END (PAGE_OFFSET - PMD_SIZE)
70#else
71#define MODULES_END (PAGE_OFFSET)
72#endif
73
74/*
75 * The XIP kernel gets mapped at the bottom of the module vm area.
76 * Since we use sections to map it, this macro replaces the physical address
77 * with its virtual address while keeping offset from the base section.
78 */
79#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
80
81/*
82 * Allow 16MB-aligned ioremap pages
83 */
84#define IOREMAP_MAX_ORDER 24
85
86#define CONSISTENT_END (0xffe00000UL)
87
88#else /* CONFIG_MMU */
89
90/*
91 * The limitation of user task size can grow up to the end of free ram region.
92 * It is difficult to define and perhaps will never meet the original meaning
93 * of this define that was meant to.
94 * Fortunately, there is no reference for this in noMMU mode, for now.
95 */
96#ifndef TASK_SIZE
97#define TASK_SIZE (CONFIG_DRAM_SIZE)
98#endif
99
100#ifndef TASK_UNMAPPED_BASE
101#define TASK_UNMAPPED_BASE UL(0x00000000)
102#endif
103
104#ifndef END_MEM
105#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
106#endif
107
108/*
109 * The module can be at any place in ram in nommu mode.
110 */
111#define MODULES_END (END_MEM)
112#define MODULES_VADDR PAGE_OFFSET
113
114#define XIP_VIRT_ADDR(physaddr) (physaddr)
115
116#endif /* !CONFIG_MMU */
117
118/*
119 * We fix the TCM memories max 32 KiB ITCM resp DTCM at these
120 * locations
121 */
122#ifdef CONFIG_HAVE_TCM
123#define ITCM_OFFSET UL(0xfffe0000)
124#define DTCM_OFFSET UL(0xfffe8000)
125#endif
126
127/*
128 * Convert a physical address to a Page Frame Number and back
129 */
130#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
131#define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT)
132
133/*
134 * Convert a page to/from a physical address
135 */
136#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
137#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
138
139/*
140 * Minimum guaranted alignment in pgd_alloc(). The page table pointers passed
141 * around in head.S and proc-*.S are shifted by this amount, in order to
142 * leave spare high bits for systems with physical address extension. This
143 * does not fully accomodate the 40-bit addressing capability of ARM LPAE, but
144 * gives us about 38-bits or so.
145 */
146#ifdef CONFIG_ARM_LPAE
147#define ARCH_PGD_SHIFT L1_CACHE_SHIFT
148#else
149#define ARCH_PGD_SHIFT 0
150#endif
151#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1)
152
153/*
154 * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
155 * memory. This is used for XIP and NoMMU kernels, or by kernels which
156 * have their own mach/memory.h. Assembly code must always use
157 * PLAT_PHYS_OFFSET and not PHYS_OFFSET.
158 */
159#ifndef PLAT_PHYS_OFFSET
160#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
161#endif
162
163#ifndef __ASSEMBLY__
164
165/*
166 * Physical vs virtual RAM address space conversion. These are
167 * private definitions which should NOT be used outside memory.h
168 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
169 *
170 * PFNs are used to describe any physical page; this means
171 * PFN 0 == physical address 0.
172 */
173#if defined(__virt_to_phys)
174#define PHYS_OFFSET PLAT_PHYS_OFFSET
175#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
176
177#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
178
179#elif defined(CONFIG_ARM_PATCH_PHYS_VIRT)
180
181/*
182 * Constants used to force the right instruction encodings and shifts
183 * so that all we need to do is modify the 8-bit constant field.
184 */
185#define __PV_BITS_31_24 0x81000000
186#define __PV_BITS_7_0 0x81
187
188extern unsigned long __pv_phys_pfn_offset;
189extern u64 __pv_offset;
190extern void fixup_pv_table(const void *, unsigned long);
191extern const void *__pv_table_begin, *__pv_table_end;
192
193#define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
194#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset)
195
196#define virt_to_pfn(kaddr) \
197 ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
198 PHYS_PFN_OFFSET)
199
200#define __pv_stub(from,to,instr,type) \
201 __asm__("@ __pv_stub\n" \
202 "1: " instr " %0, %1, %2\n" \
203 " .pushsection .pv_table,\"a\"\n" \
204 " .long 1b\n" \
205 " .popsection\n" \
206 : "=r" (to) \
207 : "r" (from), "I" (type))
208
209#define __pv_stub_mov_hi(t) \
210 __asm__ volatile("@ __pv_stub_mov\n" \
211 "1: mov %R0, %1\n" \
212 " .pushsection .pv_table,\"a\"\n" \
213 " .long 1b\n" \
214 " .popsection\n" \
215 : "=r" (t) \
216 : "I" (__PV_BITS_7_0))
217
218#define __pv_add_carry_stub(x, y) \
219 __asm__ volatile("@ __pv_add_carry_stub\n" \
220 "1: adds %Q0, %1, %2\n" \
221 " adc %R0, %R0, #0\n" \
222 " .pushsection .pv_table,\"a\"\n" \
223 " .long 1b\n" \
224 " .popsection\n" \
225 : "+r" (y) \
226 : "r" (x), "I" (__PV_BITS_31_24) \
227 : "cc")
228
229static inline phys_addr_t __virt_to_phys(unsigned long x)
230{
231 phys_addr_t t;
232
233 if (sizeof(phys_addr_t) == 4) {
234 __pv_stub(x, t, "add", __PV_BITS_31_24);
235 } else {
236 __pv_stub_mov_hi(t);
237 __pv_add_carry_stub(x, t);
238 }
239 return t;
240}
241
242static inline unsigned long __phys_to_virt(phys_addr_t x)
243{
244 unsigned long t;
245
246 /*
247 * 'unsigned long' cast discard upper word when
248 * phys_addr_t is 64 bit, and makes sure that inline
249 * assembler expression receives 32 bit argument
250 * in place where 'r' 32 bit operand is expected.
251 */
252 __pv_stub((unsigned long) x, t, "sub", __PV_BITS_31_24);
253 return t;
254}
255
256#else
257
258#define PHYS_OFFSET PLAT_PHYS_OFFSET
259#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
260
261static inline phys_addr_t __virt_to_phys(unsigned long x)
262{
263 return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
264}
265
266static inline unsigned long __phys_to_virt(phys_addr_t x)
267{
268 return x - PHYS_OFFSET + PAGE_OFFSET;
269}
270
271#define virt_to_pfn(kaddr) \
272 ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
273 PHYS_PFN_OFFSET)
274
275#endif
276
277/*
278 * These are *only* valid on the kernel direct mapped RAM memory.
279 * Note: Drivers should NOT use these. They are the wrong
280 * translation for translating DMA addresses. Use the driver
281 * DMA support - see dma-mapping.h.
282 */
283static inline phys_addr_t virt_to_phys(const volatile void *x)
284{
285 return __virt_to_phys((unsigned long)(x));
286}
287
288static inline void *phys_to_virt(phys_addr_t x)
289{
290 return (void *)__phys_to_virt(x);
291}
292
293/*
294 * Drivers should NOT use these either.
295 */
296#define __pa(x) __virt_to_phys((unsigned long)(x))
297#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
298#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
299
300extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
301
302/*
303 * These are for systems that have a hardware interconnect supported alias of
304 * physical memory for idmap purposes. Most cases should leave these
305 * untouched.
306 */
307static inline phys_addr_t __virt_to_idmap(unsigned long x)
308{
309 if (arch_virt_to_idmap)
310 return arch_virt_to_idmap(x);
311 else
312 return __virt_to_phys(x);
313}
314
315#define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x))
316
317/*
318 * Virtual <-> DMA view memory address translations
319 * Again, these are *only* valid on the kernel direct mapped RAM
320 * memory. Use of these is *deprecated* (and that doesn't mean
321 * use the __ prefixed forms instead.) See dma-mapping.h.
322 */
323#ifndef __virt_to_bus
324#define __virt_to_bus __virt_to_phys
325#define __bus_to_virt __phys_to_virt
326#define __pfn_to_bus(x) __pfn_to_phys(x)
327#define __bus_to_pfn(x) __phys_to_pfn(x)
328#endif
329
330#ifdef CONFIG_VIRT_TO_BUS
331static inline __deprecated unsigned long virt_to_bus(void *x)
332{
333 return __virt_to_bus((unsigned long)x);
334}
335
336static inline __deprecated void *bus_to_virt(unsigned long x)
337{
338 return (void *)__bus_to_virt(x);
339}
340#endif
341
342/*
343 * Conversion between a struct page and a physical address.
344 *
345 * page_to_pfn(page) convert a struct page * to a PFN number
346 * pfn_to_page(pfn) convert a _valid_ PFN number to struct page *
347 *
348 * virt_to_page(k) convert a _valid_ virtual address to struct page *
349 * virt_addr_valid(k) indicates whether a virtual address is valid
350 */
351#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
352
353#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
354#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
355 && pfn_valid(virt_to_pfn(kaddr)))
356
357#endif
358
359#include <asm-generic/memory_model.h>
360
361#endif
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/include/asm/memory.h
4 *
5 * Copyright (C) 2000-2002 Russell King
6 * modification for nommu, Hyok S. Choi, 2004
7 *
8 * Note: this file should not be included by non-asm/.h files
9 */
10#ifndef __ASM_ARM_MEMORY_H
11#define __ASM_ARM_MEMORY_H
12
13#include <linux/compiler.h>
14#include <linux/const.h>
15#include <linux/types.h>
16#include <linux/sizes.h>
17
18#ifdef CONFIG_NEED_MACH_MEMORY_H
19#include <mach/memory.h>
20#endif
21#include <asm/kasan_def.h>
22
23/*
24 * PAGE_OFFSET: the virtual address of the start of lowmem, memory above
25 * the virtual address range for userspace.
26 * KERNEL_OFFSET: the virtual address of the start of the kernel image.
27 * we may further offset this with TEXT_OFFSET in practice.
28 */
29#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
30#define KERNEL_OFFSET (PAGE_OFFSET)
31
32#ifdef CONFIG_MMU
33
34/*
35 * TASK_SIZE - the maximum size of a user space task.
36 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
37 */
38#ifndef CONFIG_KASAN
39#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
40#else
41#define TASK_SIZE (KASAN_SHADOW_START)
42#endif
43#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
44
45/*
46 * The maximum size of a 26-bit user space task.
47 */
48#define TASK_SIZE_26 (UL(1) << 26)
49
50/*
51 * The module space lives between the addresses given by TASK_SIZE
52 * and PAGE_OFFSET - it must be within 32MB of the kernel text.
53 */
54#ifndef CONFIG_THUMB2_KERNEL
55#define MODULES_VADDR (PAGE_OFFSET - SZ_16M)
56#else
57/* smaller range for Thumb-2 symbols relocation (2^24)*/
58#define MODULES_VADDR (PAGE_OFFSET - SZ_8M)
59#endif
60
61#if TASK_SIZE > MODULES_VADDR
62#error Top of user space clashes with start of module space
63#endif
64
65/*
66 * The highmem pkmap virtual space shares the end of the module area.
67 */
68#ifdef CONFIG_HIGHMEM
69#define MODULES_END (PAGE_OFFSET - PMD_SIZE)
70#else
71#define MODULES_END (PAGE_OFFSET)
72#endif
73
74/*
75 * The XIP kernel gets mapped at the bottom of the module vm area.
76 * Since we use sections to map it, this macro replaces the physical address
77 * with its virtual address while keeping offset from the base section.
78 */
79#define XIP_VIRT_ADDR(physaddr) (MODULES_VADDR + ((physaddr) & 0x000fffff))
80
81#define FDT_FIXED_BASE UL(0xff800000)
82#define FDT_FIXED_SIZE (2 * SECTION_SIZE)
83#define FDT_VIRT_BASE(physbase) ((void *)(FDT_FIXED_BASE | (physbase) % SECTION_SIZE))
84
85#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
86/*
87 * Allow 16MB-aligned ioremap pages
88 */
89#define IOREMAP_MAX_ORDER 24
90#endif
91
92#define VECTORS_BASE UL(0xffff0000)
93
94#else /* CONFIG_MMU */
95
96#ifndef __ASSEMBLY__
97extern unsigned long setup_vectors_base(void);
98extern unsigned long vectors_base;
99#define VECTORS_BASE vectors_base
100#endif
101
102/*
103 * The limitation of user task size can grow up to the end of free ram region.
104 * It is difficult to define and perhaps will never meet the original meaning
105 * of this define that was meant to.
106 * Fortunately, there is no reference for this in noMMU mode, for now.
107 */
108#define TASK_SIZE UL(0xffffffff)
109
110#ifndef TASK_UNMAPPED_BASE
111#define TASK_UNMAPPED_BASE UL(0x00000000)
112#endif
113
114#ifndef END_MEM
115#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
116#endif
117
118/*
119 * The module can be at any place in ram in nommu mode.
120 */
121#define MODULES_END (END_MEM)
122#define MODULES_VADDR PAGE_OFFSET
123
124#define XIP_VIRT_ADDR(physaddr) (physaddr)
125#define FDT_VIRT_BASE(physbase) ((void *)(physbase))
126
127#endif /* !CONFIG_MMU */
128
129#ifdef CONFIG_XIP_KERNEL
130#define KERNEL_START _sdata
131#else
132#define KERNEL_START _stext
133#endif
134#define KERNEL_END _end
135
136/*
137 * We fix the TCM memories max 32 KiB ITCM resp DTCM at these
138 * locations
139 */
140#ifdef CONFIG_HAVE_TCM
141#define ITCM_OFFSET UL(0xfffe0000)
142#define DTCM_OFFSET UL(0xfffe8000)
143#endif
144
145/*
146 * Convert a page to/from a physical address
147 */
148#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
149#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
150
151/*
152 * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
153 * memory. This is used for XIP and NoMMU kernels, and on platforms that don't
154 * have CONFIG_ARM_PATCH_PHYS_VIRT. Assembly code must always use
155 * PLAT_PHYS_OFFSET and not PHYS_OFFSET.
156 */
157#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
158
159#ifndef __ASSEMBLY__
160
161/*
162 * Physical start and end address of the kernel sections. These addresses are
163 * 2MB-aligned to match the section mappings placed over the kernel. We use
164 * u64 so that LPAE mappings beyond the 32bit limit will work out as well.
165 */
166extern u64 kernel_sec_start;
167extern u64 kernel_sec_end;
168
169/*
170 * Physical vs virtual RAM address space conversion. These are
171 * private definitions which should NOT be used outside memory.h
172 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
173 *
174 * PFNs are used to describe any physical page; this means
175 * PFN 0 == physical address 0.
176 */
177
178#if defined(CONFIG_ARM_PATCH_PHYS_VIRT)
179
180/*
181 * Constants used to force the right instruction encodings and shifts
182 * so that all we need to do is modify the 8-bit constant field.
183 */
184#define __PV_BITS_31_24 0x81000000
185#define __PV_BITS_23_16 0x810000
186#define __PV_BITS_7_0 0x81
187
188extern unsigned long __pv_phys_pfn_offset;
189extern u64 __pv_offset;
190extern void fixup_pv_table(const void *, unsigned long);
191extern const void *__pv_table_begin, *__pv_table_end;
192
193#define PHYS_OFFSET ((phys_addr_t)__pv_phys_pfn_offset << PAGE_SHIFT)
194#define PHYS_PFN_OFFSET (__pv_phys_pfn_offset)
195
196#ifndef CONFIG_THUMB2_KERNEL
197#define __pv_stub(from,to,instr) \
198 __asm__("@ __pv_stub\n" \
199 "1: " instr " %0, %1, %2\n" \
200 "2: " instr " %0, %0, %3\n" \
201 " .pushsection .pv_table,\"a\"\n" \
202 " .long 1b - ., 2b - .\n" \
203 " .popsection\n" \
204 : "=r" (to) \
205 : "r" (from), "I" (__PV_BITS_31_24), \
206 "I"(__PV_BITS_23_16))
207
208#define __pv_add_carry_stub(x, y) \
209 __asm__("@ __pv_add_carry_stub\n" \
210 "0: movw %R0, #0\n" \
211 " adds %Q0, %1, %R0, lsl #20\n" \
212 "1: mov %R0, %2\n" \
213 " adc %R0, %R0, #0\n" \
214 " .pushsection .pv_table,\"a\"\n" \
215 " .long 0b - ., 1b - .\n" \
216 " .popsection\n" \
217 : "=&r" (y) \
218 : "r" (x), "I" (__PV_BITS_7_0) \
219 : "cc")
220
221#else
222#define __pv_stub(from,to,instr) \
223 __asm__("@ __pv_stub\n" \
224 "0: movw %0, #0\n" \
225 " lsl %0, #21\n" \
226 " " instr " %0, %1, %0\n" \
227 " .pushsection .pv_table,\"a\"\n" \
228 " .long 0b - .\n" \
229 " .popsection\n" \
230 : "=&r" (to) \
231 : "r" (from))
232
233#define __pv_add_carry_stub(x, y) \
234 __asm__("@ __pv_add_carry_stub\n" \
235 "0: movw %R0, #0\n" \
236 " lsls %R0, #21\n" \
237 " adds %Q0, %1, %R0\n" \
238 "1: mvn %R0, #0\n" \
239 " adc %R0, %R0, #0\n" \
240 " .pushsection .pv_table,\"a\"\n" \
241 " .long 0b - ., 1b - .\n" \
242 " .popsection\n" \
243 : "=&r" (y) \
244 : "r" (x) \
245 : "cc")
246#endif
247
248static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
249{
250 phys_addr_t t;
251
252 if (sizeof(phys_addr_t) == 4) {
253 __pv_stub(x, t, "add");
254 } else {
255 __pv_add_carry_stub(x, t);
256 }
257 return t;
258}
259
260static inline unsigned long __phys_to_virt(phys_addr_t x)
261{
262 unsigned long t;
263
264 /*
265 * 'unsigned long' cast discard upper word when
266 * phys_addr_t is 64 bit, and makes sure that inline
267 * assembler expression receives 32 bit argument
268 * in place where 'r' 32 bit operand is expected.
269 */
270 __pv_stub((unsigned long) x, t, "sub");
271 return t;
272}
273
274#else
275
276#define PHYS_OFFSET PLAT_PHYS_OFFSET
277#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
278
279static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
280{
281 return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
282}
283
284static inline unsigned long __phys_to_virt(phys_addr_t x)
285{
286 return x - PHYS_OFFSET + PAGE_OFFSET;
287}
288
289#endif
290
291#define virt_to_pfn(kaddr) \
292 ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
293 PHYS_PFN_OFFSET)
294
295#define __pa_symbol_nodebug(x) __virt_to_phys_nodebug((x))
296
297#ifdef CONFIG_DEBUG_VIRTUAL
298extern phys_addr_t __virt_to_phys(unsigned long x);
299extern phys_addr_t __phys_addr_symbol(unsigned long x);
300#else
301#define __virt_to_phys(x) __virt_to_phys_nodebug(x)
302#define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
303#endif
304
305/*
306 * These are *only* valid on the kernel direct mapped RAM memory.
307 * Note: Drivers should NOT use these. They are the wrong
308 * translation for translating DMA addresses. Use the driver
309 * DMA support - see dma-mapping.h.
310 */
311#define virt_to_phys virt_to_phys
312static inline phys_addr_t virt_to_phys(const volatile void *x)
313{
314 return __virt_to_phys((unsigned long)(x));
315}
316
317#define phys_to_virt phys_to_virt
318static inline void *phys_to_virt(phys_addr_t x)
319{
320 return (void *)__phys_to_virt(x);
321}
322
323/*
324 * Drivers should NOT use these either.
325 */
326#define __pa(x) __virt_to_phys((unsigned long)(x))
327#define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
328#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
329#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT)
330
331extern long long arch_phys_to_idmap_offset;
332
333/*
334 * These are for systems that have a hardware interconnect supported alias
335 * of physical memory for idmap purposes. Most cases should leave these
336 * untouched. Note: this can only return addresses less than 4GiB.
337 */
338static inline bool arm_has_idmap_alias(void)
339{
340 return IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset != 0;
341}
342
343#define IDMAP_INVALID_ADDR ((u32)~0)
344
345static inline unsigned long phys_to_idmap(phys_addr_t addr)
346{
347 if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset) {
348 addr += arch_phys_to_idmap_offset;
349 if (addr > (u32)~0)
350 addr = IDMAP_INVALID_ADDR;
351 }
352 return addr;
353}
354
355static inline phys_addr_t idmap_to_phys(unsigned long idmap)
356{
357 phys_addr_t addr = idmap;
358
359 if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset)
360 addr -= arch_phys_to_idmap_offset;
361
362 return addr;
363}
364
365static inline unsigned long __virt_to_idmap(unsigned long x)
366{
367 return phys_to_idmap(__virt_to_phys(x));
368}
369
370#define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x))
371
372/*
373 * Conversion between a struct page and a physical address.
374 *
375 * page_to_pfn(page) convert a struct page * to a PFN number
376 * pfn_to_page(pfn) convert a _valid_ PFN number to struct page *
377 *
378 * virt_to_page(k) convert a _valid_ virtual address to struct page *
379 * virt_addr_valid(k) indicates whether a virtual address is valid
380 */
381#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
382
383#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
384#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
385 && pfn_valid(virt_to_pfn(kaddr)))
386
387#endif
388
389#include <asm-generic/memory_model.h>
390
391#endif