Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2011
4 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
5 */
6#include <linux/hugetlb.h>
7#include <linux/mm.h>
8#include <asm/cacheflush.h>
9#include <asm/facility.h>
10#include <asm/pgalloc.h>
11#include <asm/kfence.h>
12#include <asm/page.h>
13#include <asm/set_memory.h>
14
15static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
16{
17 asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],1,0"
18 : [addr] "+a" (addr) : [skey] "d" (skey));
19 return addr;
20}
21
22void __storage_key_init_range(unsigned long start, unsigned long end)
23{
24 unsigned long boundary, size;
25
26 while (start < end) {
27 if (MACHINE_HAS_EDAT1) {
28 /* set storage keys for a 1MB frame */
29 size = 1UL << 20;
30 boundary = (start + size) & ~(size - 1);
31 if (boundary <= end) {
32 do {
33 start = sske_frame(start, PAGE_DEFAULT_KEY);
34 } while (start < boundary);
35 continue;
36 }
37 }
38 page_set_storage_key(start, PAGE_DEFAULT_KEY, 1);
39 start += PAGE_SIZE;
40 }
41}
42
43#ifdef CONFIG_PROC_FS
44atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
45
46void arch_report_meminfo(struct seq_file *m)
47{
48 seq_printf(m, "DirectMap4k: %8lu kB\n",
49 atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_4K]) << 2);
50 seq_printf(m, "DirectMap1M: %8lu kB\n",
51 atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_1M]) << 10);
52 seq_printf(m, "DirectMap2G: %8lu kB\n",
53 atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_2G]) << 21);
54}
55#endif /* CONFIG_PROC_FS */
56
57static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
58 unsigned long dtt)
59{
60 unsigned long *table, mask;
61
62 mask = 0;
63 if (MACHINE_HAS_EDAT2) {
64 switch (dtt) {
65 case CRDTE_DTT_REGION3:
66 mask = ~(PTRS_PER_PUD * sizeof(pud_t) - 1);
67 break;
68 case CRDTE_DTT_SEGMENT:
69 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
70 break;
71 case CRDTE_DTT_PAGE:
72 mask = ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
73 break;
74 }
75 table = (unsigned long *)((unsigned long)old & mask);
76 crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce);
77 } else if (MACHINE_HAS_IDTE) {
78 cspg(old, *old, new);
79 } else {
80 csp((unsigned int *)old + 1, *old, new);
81 }
82}
83
84static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
85 unsigned long flags)
86{
87 pte_t *ptep, new;
88
89 if (flags == SET_MEMORY_4K)
90 return 0;
91 ptep = pte_offset_kernel(pmdp, addr);
92 do {
93 new = *ptep;
94 if (pte_none(new))
95 return -EINVAL;
96 if (flags & SET_MEMORY_RO)
97 new = pte_wrprotect(new);
98 else if (flags & SET_MEMORY_RW)
99 new = pte_mkwrite(pte_mkdirty(new));
100 if (flags & SET_MEMORY_NX)
101 new = set_pte_bit(new, __pgprot(_PAGE_NOEXEC));
102 else if (flags & SET_MEMORY_X)
103 new = clear_pte_bit(new, __pgprot(_PAGE_NOEXEC));
104 pgt_set((unsigned long *)ptep, pte_val(new), addr, CRDTE_DTT_PAGE);
105 ptep++;
106 addr += PAGE_SIZE;
107 cond_resched();
108 } while (addr < end);
109 return 0;
110}
111
112static int split_pmd_page(pmd_t *pmdp, unsigned long addr)
113{
114 unsigned long pte_addr, prot;
115 pte_t *pt_dir, *ptep;
116 pmd_t new;
117 int i, ro, nx;
118
119 pt_dir = vmem_pte_alloc();
120 if (!pt_dir)
121 return -ENOMEM;
122 pte_addr = pmd_pfn(*pmdp) << PAGE_SHIFT;
123 ro = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT);
124 nx = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_NOEXEC);
125 prot = pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
126 if (!nx)
127 prot &= ~_PAGE_NOEXEC;
128 ptep = pt_dir;
129 for (i = 0; i < PTRS_PER_PTE; i++) {
130 set_pte(ptep, __pte(pte_addr | prot));
131 pte_addr += PAGE_SIZE;
132 ptep++;
133 }
134 new = __pmd(__pa(pt_dir) | _SEGMENT_ENTRY);
135 pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
136 update_page_count(PG_DIRECT_MAP_4K, PTRS_PER_PTE);
137 update_page_count(PG_DIRECT_MAP_1M, -1);
138 return 0;
139}
140
141static void modify_pmd_page(pmd_t *pmdp, unsigned long addr,
142 unsigned long flags)
143{
144 pmd_t new = *pmdp;
145
146 if (flags & SET_MEMORY_RO)
147 new = pmd_wrprotect(new);
148 else if (flags & SET_MEMORY_RW)
149 new = pmd_mkwrite(pmd_mkdirty(new));
150 if (flags & SET_MEMORY_NX)
151 new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
152 else if (flags & SET_MEMORY_X)
153 new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
154 pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
155}
156
157static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
158 unsigned long flags)
159{
160 unsigned long next;
161 int need_split;
162 pmd_t *pmdp;
163 int rc = 0;
164
165 pmdp = pmd_offset(pudp, addr);
166 do {
167 if (pmd_none(*pmdp))
168 return -EINVAL;
169 next = pmd_addr_end(addr, end);
170 if (pmd_large(*pmdp)) {
171 need_split = !!(flags & SET_MEMORY_4K);
172 need_split |= !!(addr & ~PMD_MASK);
173 need_split |= !!(addr + PMD_SIZE > next);
174 if (need_split) {
175 rc = split_pmd_page(pmdp, addr);
176 if (rc)
177 return rc;
178 continue;
179 }
180 modify_pmd_page(pmdp, addr, flags);
181 } else {
182 rc = walk_pte_level(pmdp, addr, next, flags);
183 if (rc)
184 return rc;
185 }
186 pmdp++;
187 addr = next;
188 cond_resched();
189 } while (addr < end);
190 return rc;
191}
192
193static int split_pud_page(pud_t *pudp, unsigned long addr)
194{
195 unsigned long pmd_addr, prot;
196 pmd_t *pm_dir, *pmdp;
197 pud_t new;
198 int i, ro, nx;
199
200 pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
201 if (!pm_dir)
202 return -ENOMEM;
203 pmd_addr = pud_pfn(*pudp) << PAGE_SHIFT;
204 ro = !!(pud_val(*pudp) & _REGION_ENTRY_PROTECT);
205 nx = !!(pud_val(*pudp) & _REGION_ENTRY_NOEXEC);
206 prot = pgprot_val(ro ? SEGMENT_KERNEL_RO : SEGMENT_KERNEL);
207 if (!nx)
208 prot &= ~_SEGMENT_ENTRY_NOEXEC;
209 pmdp = pm_dir;
210 for (i = 0; i < PTRS_PER_PMD; i++) {
211 set_pmd(pmdp, __pmd(pmd_addr | prot));
212 pmd_addr += PMD_SIZE;
213 pmdp++;
214 }
215 new = __pud(__pa(pm_dir) | _REGION3_ENTRY);
216 pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
217 update_page_count(PG_DIRECT_MAP_1M, PTRS_PER_PMD);
218 update_page_count(PG_DIRECT_MAP_2G, -1);
219 return 0;
220}
221
222static void modify_pud_page(pud_t *pudp, unsigned long addr,
223 unsigned long flags)
224{
225 pud_t new = *pudp;
226
227 if (flags & SET_MEMORY_RO)
228 new = pud_wrprotect(new);
229 else if (flags & SET_MEMORY_RW)
230 new = pud_mkwrite(pud_mkdirty(new));
231 if (flags & SET_MEMORY_NX)
232 new = set_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
233 else if (flags & SET_MEMORY_X)
234 new = clear_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
235 pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
236}
237
238static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
239 unsigned long flags)
240{
241 unsigned long next;
242 int need_split;
243 pud_t *pudp;
244 int rc = 0;
245
246 pudp = pud_offset(p4d, addr);
247 do {
248 if (pud_none(*pudp))
249 return -EINVAL;
250 next = pud_addr_end(addr, end);
251 if (pud_large(*pudp)) {
252 need_split = !!(flags & SET_MEMORY_4K);
253 need_split |= !!(addr & ~PUD_MASK);
254 need_split |= !!(addr + PUD_SIZE > next);
255 if (need_split) {
256 rc = split_pud_page(pudp, addr);
257 if (rc)
258 break;
259 continue;
260 }
261 modify_pud_page(pudp, addr, flags);
262 } else {
263 rc = walk_pmd_level(pudp, addr, next, flags);
264 }
265 pudp++;
266 addr = next;
267 cond_resched();
268 } while (addr < end && !rc);
269 return rc;
270}
271
272static int walk_p4d_level(pgd_t *pgd, unsigned long addr, unsigned long end,
273 unsigned long flags)
274{
275 unsigned long next;
276 p4d_t *p4dp;
277 int rc = 0;
278
279 p4dp = p4d_offset(pgd, addr);
280 do {
281 if (p4d_none(*p4dp))
282 return -EINVAL;
283 next = p4d_addr_end(addr, end);
284 rc = walk_pud_level(p4dp, addr, next, flags);
285 p4dp++;
286 addr = next;
287 cond_resched();
288 } while (addr < end && !rc);
289 return rc;
290}
291
292DEFINE_MUTEX(cpa_mutex);
293
294static int change_page_attr(unsigned long addr, unsigned long end,
295 unsigned long flags)
296{
297 unsigned long next;
298 int rc = -EINVAL;
299 pgd_t *pgdp;
300
301 if (addr == end)
302 return 0;
303 if (end >= MODULES_END)
304 return -EINVAL;
305 mutex_lock(&cpa_mutex);
306 pgdp = pgd_offset_k(addr);
307 do {
308 if (pgd_none(*pgdp))
309 break;
310 next = pgd_addr_end(addr, end);
311 rc = walk_p4d_level(pgdp, addr, next, flags);
312 if (rc)
313 break;
314 cond_resched();
315 } while (pgdp++, addr = next, addr < end && !rc);
316 mutex_unlock(&cpa_mutex);
317 return rc;
318}
319
320int __set_memory(unsigned long addr, int numpages, unsigned long flags)
321{
322 if (!MACHINE_HAS_NX)
323 flags &= ~(SET_MEMORY_NX | SET_MEMORY_X);
324 if (!flags)
325 return 0;
326 addr &= PAGE_MASK;
327 return change_page_attr(addr, addr + numpages * PAGE_SIZE, flags);
328}
329
330#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
331
332static void ipte_range(pte_t *pte, unsigned long address, int nr)
333{
334 int i;
335
336 if (test_facility(13)) {
337 __ptep_ipte_range(address, nr - 1, pte, IPTE_GLOBAL);
338 return;
339 }
340 for (i = 0; i < nr; i++) {
341 __ptep_ipte(address, pte, 0, 0, IPTE_GLOBAL);
342 address += PAGE_SIZE;
343 pte++;
344 }
345}
346
347void __kernel_map_pages(struct page *page, int numpages, int enable)
348{
349 unsigned long address;
350 pte_t *ptep, pte;
351 int nr, i, j;
352
353 for (i = 0; i < numpages;) {
354 address = (unsigned long)page_to_virt(page + i);
355 ptep = virt_to_kpte(address);
356 nr = (unsigned long)ptep >> ilog2(sizeof(long));
357 nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
358 nr = min(numpages - i, nr);
359 if (enable) {
360 for (j = 0; j < nr; j++) {
361 pte = clear_pte_bit(*ptep, __pgprot(_PAGE_INVALID));
362 set_pte(ptep, pte);
363 address += PAGE_SIZE;
364 ptep++;
365 }
366 } else {
367 ipte_range(ptep, address, nr);
368 }
369 i += nr;
370 }
371}
372
373#endif /* CONFIG_DEBUG_PAGEALLOC */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2011
4 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
5 */
6#include <linux/hugetlb.h>
7#include <linux/mm.h>
8#include <asm/cacheflush.h>
9#include <asm/facility.h>
10#include <asm/pgtable.h>
11#include <asm/pgalloc.h>
12#include <asm/page.h>
13#include <asm/set_memory.h>
14
15static inline unsigned long sske_frame(unsigned long addr, unsigned char skey)
16{
17 asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
18 : [addr] "+a" (addr) : [skey] "d" (skey));
19 return addr;
20}
21
22void __storage_key_init_range(unsigned long start, unsigned long end)
23{
24 unsigned long boundary, size;
25
26 if (!PAGE_DEFAULT_KEY)
27 return;
28 while (start < end) {
29 if (MACHINE_HAS_EDAT1) {
30 /* set storage keys for a 1MB frame */
31 size = 1UL << 20;
32 boundary = (start + size) & ~(size - 1);
33 if (boundary <= end) {
34 do {
35 start = sske_frame(start, PAGE_DEFAULT_KEY);
36 } while (start < boundary);
37 continue;
38 }
39 }
40 page_set_storage_key(start, PAGE_DEFAULT_KEY, 0);
41 start += PAGE_SIZE;
42 }
43}
44
45#ifdef CONFIG_PROC_FS
46atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
47
48void arch_report_meminfo(struct seq_file *m)
49{
50 seq_printf(m, "DirectMap4k: %8lu kB\n",
51 atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_4K]) << 2);
52 seq_printf(m, "DirectMap1M: %8lu kB\n",
53 atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_1M]) << 10);
54 seq_printf(m, "DirectMap2G: %8lu kB\n",
55 atomic_long_read(&direct_pages_count[PG_DIRECT_MAP_2G]) << 21);
56}
57#endif /* CONFIG_PROC_FS */
58
59static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
60 unsigned long dtt)
61{
62 unsigned long table, mask;
63
64 mask = 0;
65 if (MACHINE_HAS_EDAT2) {
66 switch (dtt) {
67 case CRDTE_DTT_REGION3:
68 mask = ~(PTRS_PER_PUD * sizeof(pud_t) - 1);
69 break;
70 case CRDTE_DTT_SEGMENT:
71 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
72 break;
73 case CRDTE_DTT_PAGE:
74 mask = ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
75 break;
76 }
77 table = (unsigned long)old & mask;
78 crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce);
79 } else if (MACHINE_HAS_IDTE) {
80 cspg(old, *old, new);
81 } else {
82 csp((unsigned int *)old + 1, *old, new);
83 }
84}
85
86static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
87 unsigned long flags)
88{
89 pte_t *ptep, new;
90
91 ptep = pte_offset(pmdp, addr);
92 do {
93 new = *ptep;
94 if (pte_none(new))
95 return -EINVAL;
96 if (flags & SET_MEMORY_RO)
97 new = pte_wrprotect(new);
98 else if (flags & SET_MEMORY_RW)
99 new = pte_mkwrite(pte_mkdirty(new));
100 if (flags & SET_MEMORY_NX)
101 pte_val(new) |= _PAGE_NOEXEC;
102 else if (flags & SET_MEMORY_X)
103 pte_val(new) &= ~_PAGE_NOEXEC;
104 pgt_set((unsigned long *)ptep, pte_val(new), addr, CRDTE_DTT_PAGE);
105 ptep++;
106 addr += PAGE_SIZE;
107 cond_resched();
108 } while (addr < end);
109 return 0;
110}
111
112static int split_pmd_page(pmd_t *pmdp, unsigned long addr)
113{
114 unsigned long pte_addr, prot;
115 pte_t *pt_dir, *ptep;
116 pmd_t new;
117 int i, ro, nx;
118
119 pt_dir = vmem_pte_alloc();
120 if (!pt_dir)
121 return -ENOMEM;
122 pte_addr = pmd_pfn(*pmdp) << PAGE_SHIFT;
123 ro = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT);
124 nx = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_NOEXEC);
125 prot = pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
126 if (!nx)
127 prot &= ~_PAGE_NOEXEC;
128 ptep = pt_dir;
129 for (i = 0; i < PTRS_PER_PTE; i++) {
130 pte_val(*ptep) = pte_addr | prot;
131 pte_addr += PAGE_SIZE;
132 ptep++;
133 }
134 pmd_val(new) = __pa(pt_dir) | _SEGMENT_ENTRY;
135 pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
136 update_page_count(PG_DIRECT_MAP_4K, PTRS_PER_PTE);
137 update_page_count(PG_DIRECT_MAP_1M, -1);
138 return 0;
139}
140
141static void modify_pmd_page(pmd_t *pmdp, unsigned long addr,
142 unsigned long flags)
143{
144 pmd_t new = *pmdp;
145
146 if (flags & SET_MEMORY_RO)
147 new = pmd_wrprotect(new);
148 else if (flags & SET_MEMORY_RW)
149 new = pmd_mkwrite(pmd_mkdirty(new));
150 if (flags & SET_MEMORY_NX)
151 pmd_val(new) |= _SEGMENT_ENTRY_NOEXEC;
152 else if (flags & SET_MEMORY_X)
153 pmd_val(new) &= ~_SEGMENT_ENTRY_NOEXEC;
154 pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
155}
156
157static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
158 unsigned long flags)
159{
160 unsigned long next;
161 pmd_t *pmdp;
162 int rc = 0;
163
164 pmdp = pmd_offset(pudp, addr);
165 do {
166 if (pmd_none(*pmdp))
167 return -EINVAL;
168 next = pmd_addr_end(addr, end);
169 if (pmd_large(*pmdp)) {
170 if (addr & ~PMD_MASK || addr + PMD_SIZE > next) {
171 rc = split_pmd_page(pmdp, addr);
172 if (rc)
173 return rc;
174 continue;
175 }
176 modify_pmd_page(pmdp, addr, flags);
177 } else {
178 rc = walk_pte_level(pmdp, addr, next, flags);
179 if (rc)
180 return rc;
181 }
182 pmdp++;
183 addr = next;
184 cond_resched();
185 } while (addr < end);
186 return rc;
187}
188
189static int split_pud_page(pud_t *pudp, unsigned long addr)
190{
191 unsigned long pmd_addr, prot;
192 pmd_t *pm_dir, *pmdp;
193 pud_t new;
194 int i, ro, nx;
195
196 pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
197 if (!pm_dir)
198 return -ENOMEM;
199 pmd_addr = pud_pfn(*pudp) << PAGE_SHIFT;
200 ro = !!(pud_val(*pudp) & _REGION_ENTRY_PROTECT);
201 nx = !!(pud_val(*pudp) & _REGION_ENTRY_NOEXEC);
202 prot = pgprot_val(ro ? SEGMENT_KERNEL_RO : SEGMENT_KERNEL);
203 if (!nx)
204 prot &= ~_SEGMENT_ENTRY_NOEXEC;
205 pmdp = pm_dir;
206 for (i = 0; i < PTRS_PER_PMD; i++) {
207 pmd_val(*pmdp) = pmd_addr | prot;
208 pmd_addr += PMD_SIZE;
209 pmdp++;
210 }
211 pud_val(new) = __pa(pm_dir) | _REGION3_ENTRY;
212 pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
213 update_page_count(PG_DIRECT_MAP_1M, PTRS_PER_PMD);
214 update_page_count(PG_DIRECT_MAP_2G, -1);
215 return 0;
216}
217
218static void modify_pud_page(pud_t *pudp, unsigned long addr,
219 unsigned long flags)
220{
221 pud_t new = *pudp;
222
223 if (flags & SET_MEMORY_RO)
224 new = pud_wrprotect(new);
225 else if (flags & SET_MEMORY_RW)
226 new = pud_mkwrite(pud_mkdirty(new));
227 if (flags & SET_MEMORY_NX)
228 pud_val(new) |= _REGION_ENTRY_NOEXEC;
229 else if (flags & SET_MEMORY_X)
230 pud_val(new) &= ~_REGION_ENTRY_NOEXEC;
231 pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
232}
233
234static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
235 unsigned long flags)
236{
237 unsigned long next;
238 pud_t *pudp;
239 int rc = 0;
240
241 pudp = pud_offset(p4d, addr);
242 do {
243 if (pud_none(*pudp))
244 return -EINVAL;
245 next = pud_addr_end(addr, end);
246 if (pud_large(*pudp)) {
247 if (addr & ~PUD_MASK || addr + PUD_SIZE > next) {
248 rc = split_pud_page(pudp, addr);
249 if (rc)
250 break;
251 continue;
252 }
253 modify_pud_page(pudp, addr, flags);
254 } else {
255 rc = walk_pmd_level(pudp, addr, next, flags);
256 }
257 pudp++;
258 addr = next;
259 cond_resched();
260 } while (addr < end && !rc);
261 return rc;
262}
263
264static int walk_p4d_level(pgd_t *pgd, unsigned long addr, unsigned long end,
265 unsigned long flags)
266{
267 unsigned long next;
268 p4d_t *p4dp;
269 int rc = 0;
270
271 p4dp = p4d_offset(pgd, addr);
272 do {
273 if (p4d_none(*p4dp))
274 return -EINVAL;
275 next = p4d_addr_end(addr, end);
276 rc = walk_pud_level(p4dp, addr, next, flags);
277 p4dp++;
278 addr = next;
279 cond_resched();
280 } while (addr < end && !rc);
281 return rc;
282}
283
284static DEFINE_MUTEX(cpa_mutex);
285
286static int change_page_attr(unsigned long addr, unsigned long end,
287 unsigned long flags)
288{
289 unsigned long next;
290 int rc = -EINVAL;
291 pgd_t *pgdp;
292
293 if (addr == end)
294 return 0;
295 if (end >= MODULES_END)
296 return -EINVAL;
297 mutex_lock(&cpa_mutex);
298 pgdp = pgd_offset_k(addr);
299 do {
300 if (pgd_none(*pgdp))
301 break;
302 next = pgd_addr_end(addr, end);
303 rc = walk_p4d_level(pgdp, addr, next, flags);
304 if (rc)
305 break;
306 cond_resched();
307 } while (pgdp++, addr = next, addr < end && !rc);
308 mutex_unlock(&cpa_mutex);
309 return rc;
310}
311
312int __set_memory(unsigned long addr, int numpages, unsigned long flags)
313{
314 if (!MACHINE_HAS_NX)
315 flags &= ~(SET_MEMORY_NX | SET_MEMORY_X);
316 if (!flags)
317 return 0;
318 addr &= PAGE_MASK;
319 return change_page_attr(addr, addr + numpages * PAGE_SIZE, flags);
320}
321
322#ifdef CONFIG_DEBUG_PAGEALLOC
323
324static void ipte_range(pte_t *pte, unsigned long address, int nr)
325{
326 int i;
327
328 if (test_facility(13)) {
329 __ptep_ipte_range(address, nr - 1, pte, IPTE_GLOBAL);
330 return;
331 }
332 for (i = 0; i < nr; i++) {
333 __ptep_ipte(address, pte, 0, 0, IPTE_GLOBAL);
334 address += PAGE_SIZE;
335 pte++;
336 }
337}
338
339void __kernel_map_pages(struct page *page, int numpages, int enable)
340{
341 unsigned long address;
342 int nr, i, j;
343 pgd_t *pgd;
344 p4d_t *p4d;
345 pud_t *pud;
346 pmd_t *pmd;
347 pte_t *pte;
348
349 for (i = 0; i < numpages;) {
350 address = page_to_phys(page + i);
351 pgd = pgd_offset_k(address);
352 p4d = p4d_offset(pgd, address);
353 pud = pud_offset(p4d, address);
354 pmd = pmd_offset(pud, address);
355 pte = pte_offset_kernel(pmd, address);
356 nr = (unsigned long)pte >> ilog2(sizeof(long));
357 nr = PTRS_PER_PTE - (nr & (PTRS_PER_PTE - 1));
358 nr = min(numpages - i, nr);
359 if (enable) {
360 for (j = 0; j < nr; j++) {
361 pte_val(*pte) &= ~_PAGE_INVALID;
362 address += PAGE_SIZE;
363 pte++;
364 }
365 } else {
366 ipte_range(pte, address, nr);
367 }
368 i += nr;
369 }
370}
371
372#ifdef CONFIG_HIBERNATION
373bool kernel_page_present(struct page *page)
374{
375 unsigned long addr;
376 int cc;
377
378 addr = page_to_phys(page);
379 asm volatile(
380 " lra %1,0(%1)\n"
381 " ipm %0\n"
382 " srl %0,28"
383 : "=d" (cc), "+a" (addr) : : "cc");
384 return cc == 0;
385}
386#endif /* CONFIG_HIBERNATION */
387
388#endif /* CONFIG_DEBUG_PAGEALLOC */