Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994 - 1999, 2000, 03 Ralf Baechle
  7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8 */
  9#ifndef _ASM_PAGE_H
 10#define _ASM_PAGE_H
 11
 12#include <spaces.h>
 13#include <linux/const.h>
 14#include <linux/kernel.h>
 15#include <asm/mipsregs.h>
 16
 17/*
 18 * PAGE_SHIFT determines the page size
 19 */
 20#ifdef CONFIG_PAGE_SIZE_4KB
 21#define PAGE_SHIFT	12
 22#endif
 23#ifdef CONFIG_PAGE_SIZE_8KB
 24#define PAGE_SHIFT	13
 25#endif
 26#ifdef CONFIG_PAGE_SIZE_16KB
 27#define PAGE_SHIFT	14
 28#endif
 29#ifdef CONFIG_PAGE_SIZE_32KB
 30#define PAGE_SHIFT	15
 31#endif
 32#ifdef CONFIG_PAGE_SIZE_64KB
 33#define PAGE_SHIFT	16
 34#endif
 35#define PAGE_SIZE	(_AC(1,UL) << PAGE_SHIFT)
 36#define PAGE_MASK	(~((1 << PAGE_SHIFT) - 1))
 37
 38/*
 39 * This is used for calculating the real page sizes
 40 * for FTLB or VTLB + FTLB configurations.
 41 */
 42static inline unsigned int page_size_ftlb(unsigned int mmuextdef)
 43{
 44	switch (mmuextdef) {
 45	case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT:
 46		if (PAGE_SIZE == (1 << 30))
 47			return 5;
 48		if (PAGE_SIZE == (1llu << 32))
 49			return 6;
 50		if (PAGE_SIZE > (256 << 10))
 51			return 7; /* reserved */
 52			/* fall through */
 53	case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT:
 54		return (PAGE_SHIFT - 10) / 2;
 55	default:
 56		panic("Invalid FTLB configuration with Conf4_mmuextdef=%d value\n",
 57		      mmuextdef >> 14);
 58	}
 59}
 60
 61#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 62#define HPAGE_SHIFT	(PAGE_SHIFT + PAGE_SHIFT - 3)
 63#define HPAGE_SIZE	(_AC(1,UL) << HPAGE_SHIFT)
 64#define HPAGE_MASK	(~(HPAGE_SIZE - 1))
 65#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
 66#else /* !CONFIG_MIPS_HUGE_TLB_SUPPORT */
 67#define HPAGE_SHIFT	({BUILD_BUG(); 0; })
 68#define HPAGE_SIZE	({BUILD_BUG(); 0; })
 69#define HPAGE_MASK	({BUILD_BUG(); 0; })
 70#define HUGETLB_PAGE_ORDER	({BUILD_BUG(); 0; })
 71#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
 72
 73#include <linux/pfn.h>
 74
 75extern void build_clear_page(void);
 76extern void build_copy_page(void);
 77
 78/*
 79 * It's normally defined only for FLATMEM config but it's
 80 * used in our early mem init code for all memory models.
 81 * So always define it.
 82 */
 83#define ARCH_PFN_OFFSET		PFN_UP(PHYS_OFFSET)
 
 
 
 
 
 84
 85extern void clear_page(void * page);
 86extern void copy_page(void * to, void * from);
 87
 88extern unsigned long shm_align_mask;
 89
 90static inline unsigned long pages_do_alias(unsigned long addr1,
 91	unsigned long addr2)
 92{
 93	return (addr1 ^ addr2) & shm_align_mask;
 94}
 95
 96struct page;
 97
 98static inline void clear_user_page(void *addr, unsigned long vaddr,
 99	struct page *page)
100{
101	extern void (*flush_data_cache_page)(unsigned long addr);
102
103	clear_page(addr);
104	if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK))
105		flush_data_cache_page((unsigned long)addr);
106}
107
108struct vm_area_struct;
109extern void copy_user_highpage(struct page *to, struct page *from,
110	unsigned long vaddr, struct vm_area_struct *vma);
111
112#define __HAVE_ARCH_COPY_USER_HIGHPAGE
113
114/*
115 * These are used to make use of C type-checking..
116 */
117#ifdef CONFIG_PHYS_ADDR_T_64BIT
118  #ifdef CONFIG_CPU_MIPS32
119    typedef struct { unsigned long pte_low, pte_high; } pte_t;
120    #define pte_val(x)	  ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
121    #define __pte(x)	  ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
122  #else
123     typedef struct { unsigned long long pte; } pte_t;
124     #define pte_val(x) ((x).pte)
125     #define __pte(x)	((pte_t) { (x) } )
126  #endif
127#else
128typedef struct { unsigned long pte; } pte_t;
129#define pte_val(x)	((x).pte)
130#define __pte(x)	((pte_t) { (x) } )
131#endif
132typedef struct page *pgtable_t;
133
134/*
135 * Right now we don't support 4-level pagetables, so all pud-related
136 * definitions come from <asm-generic/pgtable-nopud.h>.
137 */
138
139/*
140 * Finall the top of the hierarchy, the pgd
141 */
142typedef struct { unsigned long pgd; } pgd_t;
143#define pgd_val(x)	((x).pgd)
144#define __pgd(x)	((pgd_t) { (x) } )
145
146/*
147 * Manipulate page protection bits
148 */
149typedef struct { unsigned long pgprot; } pgprot_t;
150#define pgprot_val(x)	((x).pgprot)
151#define __pgprot(x)	((pgprot_t) { (x) } )
 
152
153/*
154 * On R4000-style MMUs where a TLB entry is mapping a adjacent even / odd
155 * pair of pages we only have a single global bit per pair of pages.  When
156 * writing to the TLB make sure we always have the bit set for both pages
157 * or none.  This macro is used to access the `buddy' of the pte we're just
158 * working on.
159 */
160#define ptep_buddy(x)	((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
161
162/*
163 * __pa()/__va() should be used only during mem init.
164 */
165static inline unsigned long ___pa(unsigned long x)
166{
167	if (IS_ENABLED(CONFIG_64BIT)) {
168		/*
169		 * For MIPS64 the virtual address may either be in one of
170		 * the compatibility segements ckseg0 or ckseg1, or it may
171		 * be in xkphys.
172		 */
173		return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
174	}
175
176	if (!IS_ENABLED(CONFIG_EVA)) {
177		/*
178		 * We're using the standard MIPS32 legacy memory map, ie.
179		 * the address x is going to be in kseg0 or kseg1. We can
180		 * handle either case by masking out the desired bits using
181		 * CPHYSADDR.
182		 */
183		return CPHYSADDR(x);
184	}
185
186	/*
187	 * EVA is in use so the memory map could be anything, making it not
188	 * safe to just mask out bits.
189	 */
190	return x - PAGE_OFFSET + PHYS_OFFSET;
191}
192#define __pa(x)		___pa((unsigned long)(x))
193#define __va(x)		((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
194#include <asm/io.h>
195
196/*
197 * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
198 * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org).  The
199 * discussion can be found in lkml posting
200 * <a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com> which is
201 * archived at http://lists.linuxcoding.com/kernel/2006-q3/msg17360.html
202 *
203 * It is unclear if the misscompilations mentioned in
204 * http://lkml.org/lkml/2010/8/8/138 also affect MIPS so we keep this one
205 * until GCC 3.x has been retired before we can apply
206 * https://patchwork.linux-mips.org/patch/1541/
207 */
 
 
 
 
 
 
 
208
209#ifndef __pa_symbol
210#define __pa_symbol(x)	__pa(RELOC_HIDE((unsigned long)(x), 0))
211#endif
212
213#define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
214
215#ifdef CONFIG_FLATMEM
216
217static inline int pfn_valid(unsigned long pfn)
218{
219	/* avoid <linux/mm.h> include hell */
220	extern unsigned long max_mapnr;
221	unsigned long pfn_offset = ARCH_PFN_OFFSET;
222
223	return pfn >= pfn_offset && pfn < max_mapnr;
224}
225
226#elif defined(CONFIG_SPARSEMEM)
227
228/* pfn_valid is defined in linux/mmzone.h */
229
230#elif defined(CONFIG_NEED_MULTIPLE_NODES)
231
232#define pfn_valid(pfn)							\
233({									\
234	unsigned long __pfn = (pfn);					\
235	int __n = pfn_to_nid(__pfn);					\
236	((__n >= 0) ? (__pfn < NODE_DATA(__n)->node_start_pfn +		\
237			       NODE_DATA(__n)->node_spanned_pages)	\
238		    : 0);						\
239})
240
241#endif
242
243#define virt_to_pfn(kaddr)   	PFN_DOWN(virt_to_phys((void *)(kaddr)))
244#define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn(kaddr))
245
246extern int __virt_addr_valid(const volatile void *kaddr);
247#define virt_addr_valid(kaddr)						\
248	__virt_addr_valid((const volatile void *) (kaddr))
249
250#define VM_DATA_DEFAULT_FLAGS \
251	(VM_READ | VM_WRITE | \
252	 ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
253	 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
254
255#define UNCAC_ADDR(addr)	((addr) - PAGE_OFFSET + UNCAC_BASE)
256#define CAC_ADDR(addr)		((addr) - UNCAC_BASE + PAGE_OFFSET)
 
 
 
257
258#include <asm-generic/memory_model.h>
259#include <asm-generic/getorder.h>
260
261#endif /* _ASM_PAGE_H */
v6.9.4
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994 - 1999, 2000, 03 Ralf Baechle
  7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8 */
  9#ifndef _ASM_PAGE_H
 10#define _ASM_PAGE_H
 11
 12#include <spaces.h>
 13#include <linux/const.h>
 14#include <linux/kernel.h>
 15#include <asm/mipsregs.h>
 16
 17/*
 18 * PAGE_SHIFT determines the page size
 19 */
 20#define PAGE_SHIFT	CONFIG_PAGE_SHIFT
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 21#define PAGE_SIZE	(_AC(1,UL) << PAGE_SHIFT)
 22#define PAGE_MASK	(~((1 << PAGE_SHIFT) - 1))
 23
 24/*
 25 * This is used for calculating the real page sizes
 26 * for FTLB or VTLB + FTLB configurations.
 27 */
 28static inline unsigned int page_size_ftlb(unsigned int mmuextdef)
 29{
 30	switch (mmuextdef) {
 31	case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT:
 32		if (PAGE_SIZE == (1 << 30))
 33			return 5;
 34		if (PAGE_SIZE == (1llu << 32))
 35			return 6;
 36		if (PAGE_SIZE > (256 << 10))
 37			return 7; /* reserved */
 38		fallthrough;
 39	case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT:
 40		return (PAGE_SHIFT - 10) / 2;
 41	default:
 42		panic("Invalid FTLB configuration with Conf4_mmuextdef=%d value\n",
 43		      mmuextdef >> 14);
 44	}
 45}
 46
 47#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 48#define HPAGE_SHIFT	(PAGE_SHIFT + PAGE_SHIFT - 3)
 49#define HPAGE_SIZE	(_AC(1,UL) << HPAGE_SHIFT)
 50#define HPAGE_MASK	(~(HPAGE_SIZE - 1))
 51#define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
 52#else /* !CONFIG_MIPS_HUGE_TLB_SUPPORT */
 53#define HPAGE_SHIFT	({BUILD_BUG(); 0; })
 54#define HPAGE_SIZE	({BUILD_BUG(); 0; })
 55#define HPAGE_MASK	({BUILD_BUG(); 0; })
 56#define HUGETLB_PAGE_ORDER	({BUILD_BUG(); 0; })
 57#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
 58
 59#include <linux/pfn.h>
 60
 61extern void build_clear_page(void);
 62extern void build_copy_page(void);
 63
 64/*
 65 * It's normally defined only for FLATMEM config but it's
 66 * used in our early mem init code for all memory models.
 67 * So always define it.
 68 */
 69#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
 70extern unsigned long ARCH_PFN_OFFSET;
 71# define ARCH_PFN_OFFSET	ARCH_PFN_OFFSET
 72#else
 73# define ARCH_PFN_OFFSET	PFN_UP(PHYS_OFFSET)
 74#endif
 75
 76extern void clear_page(void * page);
 77extern void copy_page(void * to, void * from);
 78
 79extern unsigned long shm_align_mask;
 80
 81static inline unsigned long pages_do_alias(unsigned long addr1,
 82	unsigned long addr2)
 83{
 84	return (addr1 ^ addr2) & shm_align_mask;
 85}
 86
 87struct page;
 88
 89static inline void clear_user_page(void *addr, unsigned long vaddr,
 90	struct page *page)
 91{
 92	extern void (*flush_data_cache_page)(unsigned long addr);
 93
 94	clear_page(addr);
 95	if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK))
 96		flush_data_cache_page((unsigned long)addr);
 97}
 98
 99struct vm_area_struct;
100extern void copy_user_highpage(struct page *to, struct page *from,
101	unsigned long vaddr, struct vm_area_struct *vma);
102
103#define __HAVE_ARCH_COPY_USER_HIGHPAGE
104
105/*
106 * These are used to make use of C type-checking..
107 */
108#ifdef CONFIG_PHYS_ADDR_T_64BIT
109  #ifdef CONFIG_CPU_MIPS32
110    typedef struct { unsigned long pte_low, pte_high; } pte_t;
111    #define pte_val(x)	  ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
112    #define __pte(x)	  ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
113  #else
114     typedef struct { unsigned long long pte; } pte_t;
115     #define pte_val(x) ((x).pte)
116     #define __pte(x)	((pte_t) { (x) } )
117  #endif
118#else
119typedef struct { unsigned long pte; } pte_t;
120#define pte_val(x)	((x).pte)
121#define __pte(x)	((pte_t) { (x) } )
122#endif
123typedef struct page *pgtable_t;
124
125/*
126 * Right now we don't support 4-level pagetables, so all pud-related
127 * definitions come from <asm-generic/pgtable-nopud.h>.
128 */
129
130/*
131 * Finall the top of the hierarchy, the pgd
132 */
133typedef struct { unsigned long pgd; } pgd_t;
134#define pgd_val(x)	((x).pgd)
135#define __pgd(x)	((pgd_t) { (x) } )
136
137/*
138 * Manipulate page protection bits
139 */
140typedef struct { unsigned long pgprot; } pgprot_t;
141#define pgprot_val(x)	((x).pgprot)
142#define __pgprot(x)	((pgprot_t) { (x) } )
143#define pte_pgprot(x)	__pgprot(pte_val(x) & ~_PFN_MASK)
144
145/*
146 * On R4000-style MMUs where a TLB entry is mapping a adjacent even / odd
147 * pair of pages we only have a single global bit per pair of pages.  When
148 * writing to the TLB make sure we always have the bit set for both pages
149 * or none.  This macro is used to access the `buddy' of the pte we're just
150 * working on.
151 */
152#define ptep_buddy(x)	((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
153
154/*
155 * __pa()/__va() should be used only during mem init.
156 */
157static inline unsigned long ___pa(unsigned long x)
158{
159	if (IS_ENABLED(CONFIG_64BIT)) {
160		/*
161		 * For MIPS64 the virtual address may either be in one of
162		 * the compatibility segments ckseg0 or ckseg1, or it may
163		 * be in xkphys.
164		 */
165		return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
166	}
167
168	if (!IS_ENABLED(CONFIG_EVA)) {
169		/*
170		 * We're using the standard MIPS32 legacy memory map, ie.
171		 * the address x is going to be in kseg0 or kseg1. We can
172		 * handle either case by masking out the desired bits using
173		 * CPHYSADDR.
174		 */
175		return CPHYSADDR(x);
176	}
177
178	/*
179	 * EVA is in use so the memory map could be anything, making it not
180	 * safe to just mask out bits.
181	 */
182	return x - PAGE_OFFSET + PHYS_OFFSET;
183}
184#define __pa(x)		___pa((unsigned long)(x))
185#define __va(x)		((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
186#include <asm/io.h>
187
188/*
189 * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
190 * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org).  The
191 * discussion can be found in
192 * https://lore.kernel.org/lkml/a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com
 
193 *
194 * It is unclear if the misscompilations mentioned in
195 * https://lore.kernel.org/lkml/1281303490-390-1-git-send-email-namhyung@gmail.com
196 * also affect MIPS so we keep this one until GCC 3.x has been retired
197 * before we can apply https://patchwork.linux-mips.org/patch/1541/
198 */
199#define __pa_symbol_nodebug(x)	__pa(RELOC_HIDE((unsigned long)(x), 0))
200
201#ifdef CONFIG_DEBUG_VIRTUAL
202extern phys_addr_t __phys_addr_symbol(unsigned long x);
203#else
204#define __phys_addr_symbol(x)	__pa_symbol_nodebug(x)
205#endif
206
207#ifndef __pa_symbol
208#define __pa_symbol(x)		__phys_addr_symbol((unsigned long)(x))
209#endif
210
211#define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
212
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213#define virt_to_pfn(kaddr)   	PFN_DOWN(virt_to_phys((void *)(kaddr)))
214#define virt_to_page(kaddr)	pfn_to_page(virt_to_pfn(kaddr))
215
216extern bool __virt_addr_valid(const volatile void *kaddr);
217#define virt_addr_valid(kaddr)						\
218	__virt_addr_valid((const volatile void *) (kaddr))
219
220#define VM_DATA_DEFAULT_FLAGS	VM_DATA_FLAGS_TSK_EXEC
 
 
 
221
222extern unsigned long __kaslr_offset;
223static inline unsigned long kaslr_offset(void)
224{
225	return __kaslr_offset;
226}
227
228#include <asm-generic/memory_model.h>
229#include <asm-generic/getorder.h>
230
231#endif /* _ASM_PAGE_H */