Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 1999, 2000, 03 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_PAGE_H
10#define _ASM_PAGE_H
11
12#include <spaces.h>
13#include <linux/const.h>
14#include <linux/kernel.h>
15#include <asm/mipsregs.h>
16
17#include <vdso/page.h>
18
19/*
20 * This is used for calculating the real page sizes
21 * for FTLB or VTLB + FTLB configurations.
22 */
23static inline unsigned int page_size_ftlb(unsigned int mmuextdef)
24{
25 switch (mmuextdef) {
26 case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT:
27 if (PAGE_SIZE == (1 << 30))
28 return 5;
29 if (PAGE_SIZE == (1llu << 32))
30 return 6;
31 if (PAGE_SIZE > (256 << 10))
32 return 7; /* reserved */
33 fallthrough;
34 case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT:
35 return (PAGE_SHIFT - 10) / 2;
36 default:
37 panic("Invalid FTLB configuration with Conf4_mmuextdef=%d value\n",
38 mmuextdef >> 14);
39 }
40}
41
42#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
43#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
44#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
45#define HPAGE_MASK (~(HPAGE_SIZE - 1))
46#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
47#else /* !CONFIG_MIPS_HUGE_TLB_SUPPORT */
48#define HPAGE_SHIFT ({BUILD_BUG(); 0; })
49#define HPAGE_SIZE ({BUILD_BUG(); 0; })
50#define HPAGE_MASK ({BUILD_BUG(); 0; })
51#define HUGETLB_PAGE_ORDER ({BUILD_BUG(); 0; })
52#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
53
54#include <linux/pfn.h>
55
56extern void build_clear_page(void);
57extern void build_copy_page(void);
58
59/*
60 * It's normally defined only for FLATMEM config but it's
61 * used in our early mem init code for all memory models.
62 * So always define it.
63 */
64#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
65extern unsigned long ARCH_PFN_OFFSET;
66# define ARCH_PFN_OFFSET ARCH_PFN_OFFSET
67#else
68# define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)
69#endif
70
71extern void clear_page(void * page);
72extern void copy_page(void * to, void * from);
73
74extern unsigned long shm_align_mask;
75
76static inline unsigned long pages_do_alias(unsigned long addr1,
77 unsigned long addr2)
78{
79 return (addr1 ^ addr2) & shm_align_mask;
80}
81
82struct page;
83
84static inline void clear_user_page(void *addr, unsigned long vaddr,
85 struct page *page)
86{
87 extern void (*flush_data_cache_page)(unsigned long addr);
88
89 clear_page(addr);
90 if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK))
91 flush_data_cache_page((unsigned long)addr);
92}
93
94struct vm_area_struct;
95extern void copy_user_highpage(struct page *to, struct page *from,
96 unsigned long vaddr, struct vm_area_struct *vma);
97
98#define __HAVE_ARCH_COPY_USER_HIGHPAGE
99
100/*
101 * These are used to make use of C type-checking..
102 */
103#ifdef CONFIG_PHYS_ADDR_T_64BIT
104 #ifdef CONFIG_CPU_MIPS32
105 typedef struct { unsigned long pte_low, pte_high; } pte_t;
106 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
107 #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
108 #else
109 typedef struct { unsigned long long pte; } pte_t;
110 #define pte_val(x) ((x).pte)
111 #define __pte(x) ((pte_t) { (x) } )
112 #endif
113#else
114typedef struct { unsigned long pte; } pte_t;
115#define pte_val(x) ((x).pte)
116#define __pte(x) ((pte_t) { (x) } )
117#endif
118typedef struct page *pgtable_t;
119
120/*
121 * Right now we don't support 4-level pagetables, so all pud-related
122 * definitions come from <asm-generic/pgtable-nopud.h>.
123 */
124
125/*
126 * Finall the top of the hierarchy, the pgd
127 */
128typedef struct { unsigned long pgd; } pgd_t;
129#define pgd_val(x) ((x).pgd)
130#define __pgd(x) ((pgd_t) { (x) } )
131
132/*
133 * Manipulate page protection bits
134 */
135typedef struct { unsigned long pgprot; } pgprot_t;
136#define pgprot_val(x) ((x).pgprot)
137#define __pgprot(x) ((pgprot_t) { (x) } )
138#define pte_pgprot(x) __pgprot(pte_val(x) & ~_PFN_MASK)
139
140/*
141 * On R4000-style MMUs where a TLB entry is mapping a adjacent even / odd
142 * pair of pages we only have a single global bit per pair of pages. When
143 * writing to the TLB make sure we always have the bit set for both pages
144 * or none. This macro is used to access the `buddy' of the pte we're just
145 * working on.
146 */
147#define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
148
149/*
150 * __pa()/__va() should be used only during mem init.
151 */
152static inline unsigned long ___pa(unsigned long x)
153{
154 if (IS_ENABLED(CONFIG_64BIT)) {
155 /*
156 * For MIPS64 the virtual address may either be in one of
157 * the compatibility segments ckseg0 or ckseg1, or it may
158 * be in xkphys.
159 */
160 return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
161 }
162
163 if (!IS_ENABLED(CONFIG_EVA)) {
164 /*
165 * We're using the standard MIPS32 legacy memory map, ie.
166 * the address x is going to be in kseg0 or kseg1. We can
167 * handle either case by masking out the desired bits using
168 * CPHYSADDR.
169 */
170 return CPHYSADDR(x);
171 }
172
173 /*
174 * EVA is in use so the memory map could be anything, making it not
175 * safe to just mask out bits.
176 */
177 return x - PAGE_OFFSET + PHYS_OFFSET;
178}
179#define __pa(x) ___pa((unsigned long)(x))
180#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
181#include <asm/io.h>
182
183/*
184 * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
185 * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org). The
186 * discussion can be found in
187 * https://lore.kernel.org/lkml/a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com
188 *
189 * It is unclear if the misscompilations mentioned in
190 * https://lore.kernel.org/lkml/1281303490-390-1-git-send-email-namhyung@gmail.com
191 * also affect MIPS so we keep this one until GCC 3.x has been retired
192 * before we can apply https://patchwork.linux-mips.org/patch/1541/
193 */
194#define __pa_symbol_nodebug(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
195
196#ifdef CONFIG_DEBUG_VIRTUAL
197extern phys_addr_t __phys_addr_symbol(unsigned long x);
198#else
199#define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
200#endif
201
202#ifndef __pa_symbol
203#define __pa_symbol(x) __phys_addr_symbol((unsigned long)(x))
204#endif
205
206#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
207
208#define virt_to_pfn(kaddr) PFN_DOWN(virt_to_phys((void *)(kaddr)))
209#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
210
211extern bool __virt_addr_valid(const volatile void *kaddr);
212#define virt_addr_valid(kaddr) \
213 __virt_addr_valid((const volatile void *) (kaddr))
214
215#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
216
217extern unsigned long __kaslr_offset;
218static inline unsigned long kaslr_offset(void)
219{
220 return __kaslr_offset;
221}
222
223#include <asm-generic/memory_model.h>
224#include <asm-generic/getorder.h>
225
226#endif /* _ASM_PAGE_H */
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 1999, 2000, 03 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 */
9#ifndef _ASM_PAGE_H
10#define _ASM_PAGE_H
11
12#include <spaces.h>
13#include <linux/const.h>
14#include <linux/kernel.h>
15#include <asm/mipsregs.h>
16
17/*
18 * PAGE_SHIFT determines the page size
19 */
20#define PAGE_SHIFT CONFIG_PAGE_SHIFT
21#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
22#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
23
24/*
25 * This is used for calculating the real page sizes
26 * for FTLB or VTLB + FTLB configurations.
27 */
28static inline unsigned int page_size_ftlb(unsigned int mmuextdef)
29{
30 switch (mmuextdef) {
31 case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT:
32 if (PAGE_SIZE == (1 << 30))
33 return 5;
34 if (PAGE_SIZE == (1llu << 32))
35 return 6;
36 if (PAGE_SIZE > (256 << 10))
37 return 7; /* reserved */
38 fallthrough;
39 case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT:
40 return (PAGE_SHIFT - 10) / 2;
41 default:
42 panic("Invalid FTLB configuration with Conf4_mmuextdef=%d value\n",
43 mmuextdef >> 14);
44 }
45}
46
47#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
48#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
49#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
50#define HPAGE_MASK (~(HPAGE_SIZE - 1))
51#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
52#else /* !CONFIG_MIPS_HUGE_TLB_SUPPORT */
53#define HPAGE_SHIFT ({BUILD_BUG(); 0; })
54#define HPAGE_SIZE ({BUILD_BUG(); 0; })
55#define HPAGE_MASK ({BUILD_BUG(); 0; })
56#define HUGETLB_PAGE_ORDER ({BUILD_BUG(); 0; })
57#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
58
59#include <linux/pfn.h>
60
61extern void build_clear_page(void);
62extern void build_copy_page(void);
63
64/*
65 * It's normally defined only for FLATMEM config but it's
66 * used in our early mem init code for all memory models.
67 * So always define it.
68 */
69#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
70extern unsigned long ARCH_PFN_OFFSET;
71# define ARCH_PFN_OFFSET ARCH_PFN_OFFSET
72#else
73# define ARCH_PFN_OFFSET PFN_UP(PHYS_OFFSET)
74#endif
75
76extern void clear_page(void * page);
77extern void copy_page(void * to, void * from);
78
79extern unsigned long shm_align_mask;
80
81static inline unsigned long pages_do_alias(unsigned long addr1,
82 unsigned long addr2)
83{
84 return (addr1 ^ addr2) & shm_align_mask;
85}
86
87struct page;
88
89static inline void clear_user_page(void *addr, unsigned long vaddr,
90 struct page *page)
91{
92 extern void (*flush_data_cache_page)(unsigned long addr);
93
94 clear_page(addr);
95 if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK))
96 flush_data_cache_page((unsigned long)addr);
97}
98
99struct vm_area_struct;
100extern void copy_user_highpage(struct page *to, struct page *from,
101 unsigned long vaddr, struct vm_area_struct *vma);
102
103#define __HAVE_ARCH_COPY_USER_HIGHPAGE
104
105/*
106 * These are used to make use of C type-checking..
107 */
108#ifdef CONFIG_PHYS_ADDR_T_64BIT
109 #ifdef CONFIG_CPU_MIPS32
110 typedef struct { unsigned long pte_low, pte_high; } pte_t;
111 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
112 #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
113 #else
114 typedef struct { unsigned long long pte; } pte_t;
115 #define pte_val(x) ((x).pte)
116 #define __pte(x) ((pte_t) { (x) } )
117 #endif
118#else
119typedef struct { unsigned long pte; } pte_t;
120#define pte_val(x) ((x).pte)
121#define __pte(x) ((pte_t) { (x) } )
122#endif
123typedef struct page *pgtable_t;
124
125/*
126 * Right now we don't support 4-level pagetables, so all pud-related
127 * definitions come from <asm-generic/pgtable-nopud.h>.
128 */
129
130/*
131 * Finall the top of the hierarchy, the pgd
132 */
133typedef struct { unsigned long pgd; } pgd_t;
134#define pgd_val(x) ((x).pgd)
135#define __pgd(x) ((pgd_t) { (x) } )
136
137/*
138 * Manipulate page protection bits
139 */
140typedef struct { unsigned long pgprot; } pgprot_t;
141#define pgprot_val(x) ((x).pgprot)
142#define __pgprot(x) ((pgprot_t) { (x) } )
143#define pte_pgprot(x) __pgprot(pte_val(x) & ~_PFN_MASK)
144
145/*
146 * On R4000-style MMUs where a TLB entry is mapping a adjacent even / odd
147 * pair of pages we only have a single global bit per pair of pages. When
148 * writing to the TLB make sure we always have the bit set for both pages
149 * or none. This macro is used to access the `buddy' of the pte we're just
150 * working on.
151 */
152#define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
153
154/*
155 * __pa()/__va() should be used only during mem init.
156 */
157static inline unsigned long ___pa(unsigned long x)
158{
159 if (IS_ENABLED(CONFIG_64BIT)) {
160 /*
161 * For MIPS64 the virtual address may either be in one of
162 * the compatibility segments ckseg0 or ckseg1, or it may
163 * be in xkphys.
164 */
165 return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
166 }
167
168 if (!IS_ENABLED(CONFIG_EVA)) {
169 /*
170 * We're using the standard MIPS32 legacy memory map, ie.
171 * the address x is going to be in kseg0 or kseg1. We can
172 * handle either case by masking out the desired bits using
173 * CPHYSADDR.
174 */
175 return CPHYSADDR(x);
176 }
177
178 /*
179 * EVA is in use so the memory map could be anything, making it not
180 * safe to just mask out bits.
181 */
182 return x - PAGE_OFFSET + PHYS_OFFSET;
183}
184#define __pa(x) ___pa((unsigned long)(x))
185#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
186#include <asm/io.h>
187
188/*
189 * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
190 * (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org). The
191 * discussion can be found in
192 * https://lore.kernel.org/lkml/a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com
193 *
194 * It is unclear if the misscompilations mentioned in
195 * https://lore.kernel.org/lkml/1281303490-390-1-git-send-email-namhyung@gmail.com
196 * also affect MIPS so we keep this one until GCC 3.x has been retired
197 * before we can apply https://patchwork.linux-mips.org/patch/1541/
198 */
199#define __pa_symbol_nodebug(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
200
201#ifdef CONFIG_DEBUG_VIRTUAL
202extern phys_addr_t __phys_addr_symbol(unsigned long x);
203#else
204#define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
205#endif
206
207#ifndef __pa_symbol
208#define __pa_symbol(x) __phys_addr_symbol((unsigned long)(x))
209#endif
210
211#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
212
213#define virt_to_pfn(kaddr) PFN_DOWN(virt_to_phys((void *)(kaddr)))
214#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
215
216extern bool __virt_addr_valid(const volatile void *kaddr);
217#define virt_addr_valid(kaddr) \
218 __virt_addr_valid((const volatile void *) (kaddr))
219
220#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
221
222extern unsigned long __kaslr_offset;
223static inline unsigned long kaslr_offset(void)
224{
225 return __kaslr_offset;
226}
227
228#include <asm-generic/memory_model.h>
229#include <asm-generic/getorder.h>
230
231#endif /* _ASM_PAGE_H */