Linux Audio

Check our new training course

Loading...
v6.9.4
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2#ifndef _ASM_POWERPC_PAGE_H
  3#define _ASM_POWERPC_PAGE_H
  4
  5/*
  6 * Copyright (C) 2001,2005 IBM Corporation.
  7 */
  8
  9#ifndef __ASSEMBLY__
 10#include <linux/types.h>
 11#include <linux/kernel.h>
 12#include <linux/bug.h>
 13#else
 14#include <asm/types.h>
 15#endif
 16#include <asm/asm-const.h>
 17
 18/*
 19 * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages
 20 * on PPC44x and 4K/16K on 8xx). For PPC64 we support either 4K or 64K software
 21 * page size. When using 64K pages however, whether we are really supporting
 22 * 64K pages in HW or not is irrelevant to those definitions.
 23 */
 24#define PAGE_SHIFT		CONFIG_PAGE_SHIFT
 25#define PAGE_SIZE		(ASM_CONST(1) << PAGE_SHIFT)
 26
 27#ifndef __ASSEMBLY__
 28#ifndef CONFIG_HUGETLB_PAGE
 29#define HPAGE_SHIFT PAGE_SHIFT
 30#elif defined(CONFIG_PPC_BOOK3S_64)
 31extern unsigned int hpage_shift;
 32#define HPAGE_SHIFT hpage_shift
 33#elif defined(CONFIG_PPC_8xx)
 34#define HPAGE_SHIFT		19	/* 512k pages */
 35#elif defined(CONFIG_PPC_E500)
 36#define HPAGE_SHIFT		22	/* 4M pages */
 37#endif
 38#define HPAGE_SIZE		((1UL) << HPAGE_SHIFT)
 39#define HPAGE_MASK		(~(HPAGE_SIZE - 1))
 40#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
 41#define HUGE_MAX_HSTATE		(MMU_PAGE_COUNT-1)
 42#endif
 43
 44/*
 45 * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
 46 * assign PAGE_MASK to a larger type it gets extended the way we want
 47 * (i.e. with 1s in the high bits)
 48 */
 49#define PAGE_MASK      (~((1 << PAGE_SHIFT) - 1))
 50
 51/*
 52 * KERNELBASE is the virtual address of the start of the kernel, it's often
 53 * the same as PAGE_OFFSET, but _might not be_.
 54 *
 55 * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
 56 *
 57 * PAGE_OFFSET is the virtual address of the start of lowmem.
 58 *
 59 * PHYSICAL_START is the physical address of the start of the kernel.
 60 *
 61 * MEMORY_START is the physical address of the start of lowmem.
 62 *
 63 * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on
 64 * ppc32 and based on how they are set we determine MEMORY_START.
 65 *
 66 * For the linear mapping the following equation should be true:
 67 * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START
 68 *
 69 * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
 70 *
 71 * There are two ways to determine a physical address from a virtual one:
 72 * va = pa + PAGE_OFFSET - MEMORY_START
 73 * va = pa + KERNELBASE - PHYSICAL_START
 74 *
 75 * If you want to know something's offset from the start of the kernel you
 76 * should subtract KERNELBASE.
 77 *
 78 * If you want to test if something's a kernel address, use is_kernel_addr().
 79 */
 80
 81#define KERNELBASE      ASM_CONST(CONFIG_KERNEL_START)
 82#define PAGE_OFFSET	ASM_CONST(CONFIG_PAGE_OFFSET)
 83#define LOAD_OFFSET	ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
 84
 85#if defined(CONFIG_NONSTATIC_KERNEL)
 86#ifndef __ASSEMBLY__
 87
 88extern phys_addr_t memstart_addr;
 89extern phys_addr_t kernstart_addr;
 90
 91#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC32)
 92extern long long virt_phys_offset;
 93#endif
 94
 95#endif /* __ASSEMBLY__ */
 96#define PHYSICAL_START	kernstart_addr
 97
 98#else	/* !CONFIG_NONSTATIC_KERNEL */
 99#define PHYSICAL_START	ASM_CONST(CONFIG_PHYSICAL_START)
100#endif
101
102/* See Description below for VIRT_PHYS_OFFSET */
103#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
104#ifdef CONFIG_RELOCATABLE
105#define VIRT_PHYS_OFFSET virt_phys_offset
106#else
107#define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
108#endif
109#endif
110
111#ifdef CONFIG_PPC64
112#define MEMORY_START	0UL
113#elif defined(CONFIG_NONSTATIC_KERNEL)
114#define MEMORY_START	memstart_addr
115#else
116#define MEMORY_START	(PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
117#endif
118
119#ifdef CONFIG_FLATMEM
120#define ARCH_PFN_OFFSET		((unsigned long)(MEMORY_START >> PAGE_SHIFT))
 
 
 
 
 
 
 
 
 
121#endif
122
 
 
 
 
 
 
123/*
124 * On Book-E parts we need __va to parse the device tree and we can't
125 * determine MEMORY_START until then.  However we can determine PHYSICAL_START
126 * from information at hand (program counter, TLB lookup).
127 *
128 * On BookE with RELOCATABLE && PPC32
129 *
130 *   With RELOCATABLE && PPC32,  we support loading the kernel at any physical
131 *   address without any restriction on the page alignment.
132 *
133 *   We find the runtime address of _stext and relocate ourselves based on 
134 *   the following calculation:
135 *
136 *  	  virtual_base = ALIGN_DOWN(KERNELBASE,256M) +
137 *  				MODULO(_stext.run,256M)
138 *   and create the following mapping:
139 *
140 * 	  ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M)
141 *
142 *   When we process relocations, we cannot depend on the
143 *   existing equation for the __va()/__pa() translations:
144 *
145 * 	   __va(x) = (x)  - PHYSICAL_START + KERNELBASE
146 *
147 *   Where:
148 *   	 PHYSICAL_START = kernstart_addr = Physical address of _stext
149 *  	 KERNELBASE = Compiled virtual address of _stext.
150 *
151 *   This formula holds true iff, kernel load address is TLB page aligned.
152 *
153 *   In our case, we need to also account for the shift in the kernel Virtual 
154 *   address.
155 *
156 *   E.g.,
157 *
158 *   Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET).
159 *   In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M
160 *
161 *   Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000
162 *                 = 0xbc100000 , which is wrong.
163 *
164 *   Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000
165 *      	according to our mapping.
166 *
167 *   Hence we use the following formula to get the translations right:
168 *
169 * 	  __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ]
170 *
171 * 	  Where :
172 * 		PHYSICAL_START = dynamic load address.(kernstart_addr variable)
173 * 		Effective KERNELBASE = virtual_base =
174 * 				     = ALIGN_DOWN(KERNELBASE,256M) +
175 * 						MODULO(PHYSICAL_START,256M)
176 *
177 * 	To make the cost of __va() / __pa() more light weight, we introduce
178 * 	a new variable virt_phys_offset, which will hold :
179 *
180 * 	virt_phys_offset = Effective KERNELBASE - PHYSICAL_START
181 * 			 = ALIGN_DOWN(KERNELBASE,256M) - 
182 * 			 	ALIGN_DOWN(PHYSICALSTART,256M)
183 *
184 * 	Hence :
185 *
186 * 	__va(x) = x - PHYSICAL_START + Effective KERNELBASE
187 * 		= x + virt_phys_offset
188 *
189 * 		and
190 * 	__pa(x) = x + PHYSICAL_START - Effective KERNELBASE
191 * 		= x - virt_phys_offset
192 * 		
193 * On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use
194 * the other definitions for __va & __pa.
195 */
196#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
197#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
198#define __pa(x) ((phys_addr_t)(unsigned long)(x) - VIRT_PHYS_OFFSET)
199#else
200#ifdef CONFIG_PPC64
201
202#define VIRTUAL_WARN_ON(x)	WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && (x))
203
204/*
205 * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
206 * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
207 * This also results in better code generation.
208 */
209#define __va(x)								\
210({									\
211	VIRTUAL_WARN_ON((unsigned long)(x) >= PAGE_OFFSET);		\
212	(void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET);	\
213})
214
215#define __pa(x)								\
216({									\
217	VIRTUAL_WARN_ON((unsigned long)(x) < PAGE_OFFSET);		\
218	(unsigned long)(x) & 0x0fffffffffffffffUL;			\
219})
220
221#else /* 32-bit, non book E */
222#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
223#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
224#endif
225#endif
226
227#ifndef __ASSEMBLY__
228static inline unsigned long virt_to_pfn(const void *kaddr)
229{
230	return __pa(kaddr) >> PAGE_SHIFT;
231}
232
233static inline const void *pfn_to_kaddr(unsigned long pfn)
234{
235	return __va(pfn << PAGE_SHIFT);
236}
237#endif
238
239#define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn(kaddr))
240#define virt_addr_valid(vaddr)	({					\
241	unsigned long _addr = (unsigned long)vaddr;			\
242	_addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory &&	\
243	pfn_valid(virt_to_pfn((void *)_addr));				\
244})
245
246/*
247 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
248 * and needs to be executable.  This means the whole heap ends
249 * up being executable.
250 */
251#define VM_DATA_DEFAULT_FLAGS32	VM_DATA_FLAGS_TSK_EXEC
252#define VM_DATA_DEFAULT_FLAGS64	VM_DATA_FLAGS_NON_EXEC
 
 
 
 
 
253
254#ifdef __powerpc64__
255#include <asm/page_64.h>
256#else
257#include <asm/page_32.h>
258#endif
259
 
 
 
 
 
 
 
260/*
261 * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
262 * "kernelness", use is_kernel_addr() - it should do what you want.
263 */
264#ifdef CONFIG_PPC_BOOK3E_64
265#define is_kernel_addr(x)	((x) >= 0x8000000000000000ul)
266#elif defined(CONFIG_PPC_BOOK3S_64)
267#define is_kernel_addr(x)	((x) >= PAGE_OFFSET)
268#else
269#define is_kernel_addr(x)	((x) >= TASK_SIZE)
270#endif
271
272#ifndef CONFIG_PPC_BOOK3S_64
273/*
274 * Use the top bit of the higher-level page table entries to indicate whether
275 * the entries we point to contain hugepages.  This works because we know that
276 * the page tables live in kernel space.  If we ever decide to support having
277 * page tables at arbitrary addresses, this breaks and will have to change.
278 */
279#ifdef CONFIG_PPC64
280#define PD_HUGE 0x8000000000000000UL
281#else
282#define PD_HUGE 0x80000000
283#endif
284
285#else	/* CONFIG_PPC_BOOK3S_64 */
286/*
287 * Book3S 64 stores real addresses in the hugepd entries to
288 * avoid overlaps with _PAGE_PRESENT and _PAGE_PTE.
289 */
290#define HUGEPD_ADDR_MASK	(0x0ffffffffffffffful & ~HUGEPD_SHIFT_MASK)
291#endif /* CONFIG_PPC_BOOK3S_64 */
292
293/*
294 * Some number of bits at the level of the page table that points to
295 * a hugepte are used to encode the size.  This masks those bits.
296 * On 8xx, HW assistance requires 4k alignment for the hugepte.
297 */
298#ifdef CONFIG_PPC_8xx
299#define HUGEPD_SHIFT_MASK     0xfff
300#else
301#define HUGEPD_SHIFT_MASK     0x3f
302#endif
303
304#ifndef __ASSEMBLY__
305
306#ifdef CONFIG_PPC_BOOK3S_64
307#include <asm/pgtable-be-types.h>
308#else
309#include <asm/pgtable-types.h>
310#endif
311
 
 
 
 
 
 
312struct page;
313extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
314extern void copy_user_page(void *to, void *from, unsigned long vaddr,
315		struct page *p);
316extern int devmem_is_allowed(unsigned long pfn);
317
318#ifdef CONFIG_PPC_SMLPAR
319void arch_free_page(struct page *page, int order);
320#define HAVE_ARCH_FREE_PAGE
321#endif
322
323struct vm_area_struct;
324
325extern unsigned long kernstart_virt_addr;
326
327static inline unsigned long kaslr_offset(void)
328{
329	return kernstart_virt_addr - KERNELBASE;
330}
331
332#include <asm-generic/memory_model.h>
333#endif /* __ASSEMBLY__ */
 
 
 
 
 
 
 
 
 
 
334
335#endif /* _ASM_POWERPC_PAGE_H */
v5.4
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2#ifndef _ASM_POWERPC_PAGE_H
  3#define _ASM_POWERPC_PAGE_H
  4
  5/*
  6 * Copyright (C) 2001,2005 IBM Corporation.
  7 */
  8
  9#ifndef __ASSEMBLY__
 10#include <linux/types.h>
 11#include <linux/kernel.h>
 
 12#else
 13#include <asm/types.h>
 14#endif
 15#include <asm/asm-const.h>
 16
 17/*
 18 * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages
 19 * on PPC44x and 4K/16K on 8xx). For PPC64 we support either 4K or 64K software
 20 * page size. When using 64K pages however, whether we are really supporting
 21 * 64K pages in HW or not is irrelevant to those definitions.
 22 */
 23#define PAGE_SHIFT		CONFIG_PPC_PAGE_SHIFT
 24#define PAGE_SIZE		(ASM_CONST(1) << PAGE_SHIFT)
 25
 26#ifndef __ASSEMBLY__
 27#ifndef CONFIG_HUGETLB_PAGE
 28#define HPAGE_SHIFT PAGE_SHIFT
 29#elif defined(CONFIG_PPC_BOOK3S_64)
 30extern unsigned int hpage_shift;
 31#define HPAGE_SHIFT hpage_shift
 32#elif defined(CONFIG_PPC_8xx)
 33#define HPAGE_SHIFT		19	/* 512k pages */
 34#elif defined(CONFIG_PPC_FSL_BOOK3E)
 35#define HPAGE_SHIFT		22	/* 4M pages */
 36#endif
 37#define HPAGE_SIZE		((1UL) << HPAGE_SHIFT)
 38#define HPAGE_MASK		(~(HPAGE_SIZE - 1))
 39#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
 40#define HUGE_MAX_HSTATE		(MMU_PAGE_COUNT-1)
 41#endif
 42
 43/*
 44 * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
 45 * assign PAGE_MASK to a larger type it gets extended the way we want
 46 * (i.e. with 1s in the high bits)
 47 */
 48#define PAGE_MASK      (~((1 << PAGE_SHIFT) - 1))
 49
 50/*
 51 * KERNELBASE is the virtual address of the start of the kernel, it's often
 52 * the same as PAGE_OFFSET, but _might not be_.
 53 *
 54 * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
 55 *
 56 * PAGE_OFFSET is the virtual address of the start of lowmem.
 57 *
 58 * PHYSICAL_START is the physical address of the start of the kernel.
 59 *
 60 * MEMORY_START is the physical address of the start of lowmem.
 61 *
 62 * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on
 63 * ppc32 and based on how they are set we determine MEMORY_START.
 64 *
 65 * For the linear mapping the following equation should be true:
 66 * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START
 67 *
 68 * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
 69 *
 70 * There are two ways to determine a physical address from a virtual one:
 71 * va = pa + PAGE_OFFSET - MEMORY_START
 72 * va = pa + KERNELBASE - PHYSICAL_START
 73 *
 74 * If you want to know something's offset from the start of the kernel you
 75 * should subtract KERNELBASE.
 76 *
 77 * If you want to test if something's a kernel address, use is_kernel_addr().
 78 */
 79
 80#define KERNELBASE      ASM_CONST(CONFIG_KERNEL_START)
 81#define PAGE_OFFSET	ASM_CONST(CONFIG_PAGE_OFFSET)
 82#define LOAD_OFFSET	ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
 83
 84#if defined(CONFIG_NONSTATIC_KERNEL)
 85#ifndef __ASSEMBLY__
 86
 87extern phys_addr_t memstart_addr;
 88extern phys_addr_t kernstart_addr;
 89
 90#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC32)
 91extern long long virt_phys_offset;
 92#endif
 93
 94#endif /* __ASSEMBLY__ */
 95#define PHYSICAL_START	kernstart_addr
 96
 97#else	/* !CONFIG_NONSTATIC_KERNEL */
 98#define PHYSICAL_START	ASM_CONST(CONFIG_PHYSICAL_START)
 99#endif
100
101/* See Description below for VIRT_PHYS_OFFSET */
102#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
103#ifdef CONFIG_RELOCATABLE
104#define VIRT_PHYS_OFFSET virt_phys_offset
105#else
106#define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
107#endif
108#endif
109
110#ifdef CONFIG_PPC64
111#define MEMORY_START	0UL
112#elif defined(CONFIG_NONSTATIC_KERNEL)
113#define MEMORY_START	memstart_addr
114#else
115#define MEMORY_START	(PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
116#endif
117
118#ifdef CONFIG_FLATMEM
119#define ARCH_PFN_OFFSET		((unsigned long)(MEMORY_START >> PAGE_SHIFT))
120#ifndef __ASSEMBLY__
121extern unsigned long max_mapnr;
122static inline bool pfn_valid(unsigned long pfn)
123{
124	unsigned long min_pfn = ARCH_PFN_OFFSET;
125
126	return pfn >= min_pfn && pfn < max_mapnr;
127}
128#endif
129#endif
130
131#define virt_to_pfn(kaddr)	(__pa(kaddr) >> PAGE_SHIFT)
132#define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn(kaddr))
133#define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
134
135#define virt_addr_valid(kaddr)	pfn_valid(virt_to_pfn(kaddr))
136
137/*
138 * On Book-E parts we need __va to parse the device tree and we can't
139 * determine MEMORY_START until then.  However we can determine PHYSICAL_START
140 * from information at hand (program counter, TLB lookup).
141 *
142 * On BookE with RELOCATABLE && PPC32
143 *
144 *   With RELOCATABLE && PPC32,  we support loading the kernel at any physical
145 *   address without any restriction on the page alignment.
146 *
147 *   We find the runtime address of _stext and relocate ourselves based on 
148 *   the following calculation:
149 *
150 *  	  virtual_base = ALIGN_DOWN(KERNELBASE,256M) +
151 *  				MODULO(_stext.run,256M)
152 *   and create the following mapping:
153 *
154 * 	  ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M)
155 *
156 *   When we process relocations, we cannot depend on the
157 *   existing equation for the __va()/__pa() translations:
158 *
159 * 	   __va(x) = (x)  - PHYSICAL_START + KERNELBASE
160 *
161 *   Where:
162 *   	 PHYSICAL_START = kernstart_addr = Physical address of _stext
163 *  	 KERNELBASE = Compiled virtual address of _stext.
164 *
165 *   This formula holds true iff, kernel load address is TLB page aligned.
166 *
167 *   In our case, we need to also account for the shift in the kernel Virtual 
168 *   address.
169 *
170 *   E.g.,
171 *
172 *   Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET).
173 *   In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M
174 *
175 *   Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000
176 *                 = 0xbc100000 , which is wrong.
177 *
178 *   Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000
179 *      	according to our mapping.
180 *
181 *   Hence we use the following formula to get the translations right:
182 *
183 * 	  __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ]
184 *
185 * 	  Where :
186 * 		PHYSICAL_START = dynamic load address.(kernstart_addr variable)
187 * 		Effective KERNELBASE = virtual_base =
188 * 				     = ALIGN_DOWN(KERNELBASE,256M) +
189 * 						MODULO(PHYSICAL_START,256M)
190 *
191 * 	To make the cost of __va() / __pa() more light weight, we introduce
192 * 	a new variable virt_phys_offset, which will hold :
193 *
194 * 	virt_phys_offset = Effective KERNELBASE - PHYSICAL_START
195 * 			 = ALIGN_DOWN(KERNELBASE,256M) - 
196 * 			 	ALIGN_DOWN(PHYSICALSTART,256M)
197 *
198 * 	Hence :
199 *
200 * 	__va(x) = x - PHYSICAL_START + Effective KERNELBASE
201 * 		= x + virt_phys_offset
202 *
203 * 		and
204 * 	__pa(x) = x + PHYSICAL_START - Effective KERNELBASE
205 * 		= x - virt_phys_offset
206 * 		
207 * On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use
208 * the other definitions for __va & __pa.
209 */
210#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
211#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
212#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
213#else
214#ifdef CONFIG_PPC64
 
 
 
215/*
216 * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
217 * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
218 * This also results in better code generation.
219 */
220#define __va(x)								\
221({									\
222	VIRTUAL_BUG_ON((unsigned long)(x) >= PAGE_OFFSET);		\
223	(void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET);	\
224})
225
226#define __pa(x)								\
227({									\
228	VIRTUAL_BUG_ON((unsigned long)(x) < PAGE_OFFSET);		\
229	(unsigned long)(x) & 0x0fffffffffffffffUL;			\
230})
231
232#else /* 32-bit, non book E */
233#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
234#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
235#endif
236#endif
237
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
238/*
239 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
240 * and needs to be executable.  This means the whole heap ends
241 * up being executable.
242 */
243#define VM_DATA_DEFAULT_FLAGS32 \
244	(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
245				 VM_READ | VM_WRITE | \
246				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
247
248#define VM_DATA_DEFAULT_FLAGS64	(VM_READ | VM_WRITE | \
249				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
250
251#ifdef __powerpc64__
252#include <asm/page_64.h>
253#else
254#include <asm/page_32.h>
255#endif
256
257/* align addr on a size boundary - adjust address up/down if needed */
258#define _ALIGN_UP(addr, size)   __ALIGN_KERNEL(addr, size)
259#define _ALIGN_DOWN(addr, size)	((addr)&(~((typeof(addr))(size)-1)))
260
261/* align addr on a size boundary - adjust address up if needed */
262#define _ALIGN(addr,size)     _ALIGN_UP(addr,size)
263
264/*
265 * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
266 * "kernelness", use is_kernel_addr() - it should do what you want.
267 */
268#ifdef CONFIG_PPC_BOOK3E_64
269#define is_kernel_addr(x)	((x) >= 0x8000000000000000ul)
 
 
270#else
271#define is_kernel_addr(x)	((x) >= PAGE_OFFSET)
272#endif
273
274#ifndef CONFIG_PPC_BOOK3S_64
275/*
276 * Use the top bit of the higher-level page table entries to indicate whether
277 * the entries we point to contain hugepages.  This works because we know that
278 * the page tables live in kernel space.  If we ever decide to support having
279 * page tables at arbitrary addresses, this breaks and will have to change.
280 */
281#ifdef CONFIG_PPC64
282#define PD_HUGE 0x8000000000000000UL
283#else
284#define PD_HUGE 0x80000000
285#endif
286
287#else	/* CONFIG_PPC_BOOK3S_64 */
288/*
289 * Book3S 64 stores real addresses in the hugepd entries to
290 * avoid overlaps with _PAGE_PRESENT and _PAGE_PTE.
291 */
292#define HUGEPD_ADDR_MASK	(0x0ffffffffffffffful & ~HUGEPD_SHIFT_MASK)
293#endif /* CONFIG_PPC_BOOK3S_64 */
294
295/*
296 * Some number of bits at the level of the page table that points to
297 * a hugepte are used to encode the size.  This masks those bits.
 
298 */
 
 
 
299#define HUGEPD_SHIFT_MASK     0x3f
 
300
301#ifndef __ASSEMBLY__
302
303#ifdef CONFIG_PPC_BOOK3S_64
304#include <asm/pgtable-be-types.h>
305#else
306#include <asm/pgtable-types.h>
307#endif
308
309
310#ifndef CONFIG_HUGETLB_PAGE
311#define is_hugepd(pdep)		(0)
312#define pgd_huge(pgd)		(0)
313#endif /* CONFIG_HUGETLB_PAGE */
314
315struct page;
316extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
317extern void copy_user_page(void *to, void *from, unsigned long vaddr,
318		struct page *p);
319extern int devmem_is_allowed(unsigned long pfn);
320
321#ifdef CONFIG_PPC_SMLPAR
322void arch_free_page(struct page *page, int order);
323#define HAVE_ARCH_FREE_PAGE
324#endif
325
326struct vm_area_struct;
327
 
 
 
 
 
 
 
328#include <asm-generic/memory_model.h>
329#endif /* __ASSEMBLY__ */
330#include <asm/slice.h>
331
332/*
333 * Allow 30-bit DMA for very limited Broadcom wifi chips on many powerbooks.
334 */
335#ifdef CONFIG_PPC32
336#define ARCH_ZONE_DMA_BITS 30
337#else
338#define ARCH_ZONE_DMA_BITS 31
339#endif
340
341#endif /* _ASM_POWERPC_PAGE_H */