Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2006
4 */
5
6#include <linux/memory_hotplug.h>
7#include <linux/memblock.h>
8#include <linux/pfn.h>
9#include <linux/mm.h>
10#include <linux/init.h>
11#include <linux/list.h>
12#include <linux/hugetlb.h>
13#include <linux/slab.h>
14#include <asm/cacheflush.h>
15#include <asm/nospec-branch.h>
16#include <asm/pgalloc.h>
17#include <asm/setup.h>
18#include <asm/tlbflush.h>
19#include <asm/sections.h>
20#include <asm/set_memory.h>
21
22static DEFINE_MUTEX(vmem_mutex);
23
24static void __ref *vmem_alloc_pages(unsigned int order)
25{
26 unsigned long size = PAGE_SIZE << order;
27
28 if (slab_is_available())
29 return (void *)__get_free_pages(GFP_KERNEL, order);
30 return memblock_alloc(size, size);
31}
32
33static void vmem_free_pages(unsigned long addr, int order)
34{
35 /* We don't expect boot memory to be removed ever. */
36 if (!slab_is_available() ||
37 WARN_ON_ONCE(PageReserved(virt_to_page(addr))))
38 return;
39 free_pages(addr, order);
40}
41
42void *vmem_crst_alloc(unsigned long val)
43{
44 unsigned long *table;
45
46 table = vmem_alloc_pages(CRST_ALLOC_ORDER);
47 if (table)
48 crst_table_init(table, val);
49 return table;
50}
51
52pte_t __ref *vmem_pte_alloc(void)
53{
54 unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
55 pte_t *pte;
56
57 if (slab_is_available())
58 pte = (pte_t *) page_table_alloc(&init_mm);
59 else
60 pte = (pte_t *) memblock_alloc(size, size);
61 if (!pte)
62 return NULL;
63 memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
64 return pte;
65}
66
67static void vmem_pte_free(unsigned long *table)
68{
69 /* We don't expect boot memory to be removed ever. */
70 if (!slab_is_available() ||
71 WARN_ON_ONCE(PageReserved(virt_to_page(table))))
72 return;
73 page_table_free(&init_mm, table);
74}
75
76#define PAGE_UNUSED 0xFD
77
78/*
79 * The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges
80 * from unused_sub_pmd_start to next PMD_SIZE boundary.
81 */
82static unsigned long unused_sub_pmd_start;
83
84static void vmemmap_flush_unused_sub_pmd(void)
85{
86 if (!unused_sub_pmd_start)
87 return;
88 memset((void *)unused_sub_pmd_start, PAGE_UNUSED,
89 ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start);
90 unused_sub_pmd_start = 0;
91}
92
93static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end)
94{
95 /*
96 * As we expect to add in the same granularity as we remove, it's
97 * sufficient to mark only some piece used to block the memmap page from
98 * getting removed (just in case the memmap never gets initialized,
99 * e.g., because the memory block never gets onlined).
100 */
101 memset((void *)start, 0, sizeof(struct page));
102}
103
104static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
105{
106 /*
107 * We only optimize if the new used range directly follows the
108 * previously unused range (esp., when populating consecutive sections).
109 */
110 if (unused_sub_pmd_start == start) {
111 unused_sub_pmd_start = end;
112 if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE)))
113 unused_sub_pmd_start = 0;
114 return;
115 }
116 vmemmap_flush_unused_sub_pmd();
117 vmemmap_mark_sub_pmd_used(start, end);
118}
119
120static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
121{
122 unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
123
124 vmemmap_flush_unused_sub_pmd();
125
126 /* Could be our memmap page is filled with PAGE_UNUSED already ... */
127 vmemmap_mark_sub_pmd_used(start, end);
128
129 /* Mark the unused parts of the new memmap page PAGE_UNUSED. */
130 if (!IS_ALIGNED(start, PMD_SIZE))
131 memset((void *)page, PAGE_UNUSED, start - page);
132 /*
133 * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
134 * consecutive sections. Remember for the last added PMD the last
135 * unused range in the populated PMD.
136 */
137 if (!IS_ALIGNED(end, PMD_SIZE))
138 unused_sub_pmd_start = end;
139}
140
141/* Returns true if the PMD is completely unused and can be freed. */
142static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
143{
144 unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
145
146 vmemmap_flush_unused_sub_pmd();
147 memset((void *)start, PAGE_UNUSED, end - start);
148 return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE);
149}
150
151/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
152static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
153 unsigned long end, bool add, bool direct)
154{
155 unsigned long prot, pages = 0;
156 int ret = -ENOMEM;
157 pte_t *pte;
158
159 prot = pgprot_val(PAGE_KERNEL);
160 if (!MACHINE_HAS_NX)
161 prot &= ~_PAGE_NOEXEC;
162
163 pte = pte_offset_kernel(pmd, addr);
164 for (; addr < end; addr += PAGE_SIZE, pte++) {
165 if (!add) {
166 if (pte_none(*pte))
167 continue;
168 if (!direct)
169 vmem_free_pages((unsigned long) pfn_to_virt(pte_pfn(*pte)), 0);
170 pte_clear(&init_mm, addr, pte);
171 } else if (pte_none(*pte)) {
172 if (!direct) {
173 void *new_page = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
174
175 if (!new_page)
176 goto out;
177 set_pte(pte, __pte(__pa(new_page) | prot));
178 } else {
179 set_pte(pte, __pte(__pa(addr) | prot));
180 }
181 } else {
182 continue;
183 }
184 pages++;
185 }
186 ret = 0;
187out:
188 if (direct)
189 update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages);
190 return ret;
191}
192
193static void try_free_pte_table(pmd_t *pmd, unsigned long start)
194{
195 pte_t *pte;
196 int i;
197
198 /* We can safely assume this is fully in 1:1 mapping & vmemmap area */
199 pte = pte_offset_kernel(pmd, start);
200 for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
201 if (!pte_none(*pte))
202 return;
203 }
204 vmem_pte_free((unsigned long *) pmd_deref(*pmd));
205 pmd_clear(pmd);
206}
207
208/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
209static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
210 unsigned long end, bool add, bool direct)
211{
212 unsigned long next, prot, pages = 0;
213 int ret = -ENOMEM;
214 pmd_t *pmd;
215 pte_t *pte;
216
217 prot = pgprot_val(SEGMENT_KERNEL);
218 if (!MACHINE_HAS_NX)
219 prot &= ~_SEGMENT_ENTRY_NOEXEC;
220
221 pmd = pmd_offset(pud, addr);
222 for (; addr < end; addr = next, pmd++) {
223 next = pmd_addr_end(addr, end);
224 if (!add) {
225 if (pmd_none(*pmd))
226 continue;
227 if (pmd_large(*pmd)) {
228 if (IS_ALIGNED(addr, PMD_SIZE) &&
229 IS_ALIGNED(next, PMD_SIZE)) {
230 if (!direct)
231 vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
232 pmd_clear(pmd);
233 pages++;
234 } else if (!direct && vmemmap_unuse_sub_pmd(addr, next)) {
235 vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
236 pmd_clear(pmd);
237 }
238 continue;
239 }
240 } else if (pmd_none(*pmd)) {
241 if (IS_ALIGNED(addr, PMD_SIZE) &&
242 IS_ALIGNED(next, PMD_SIZE) &&
243 MACHINE_HAS_EDAT1 && direct &&
244 !debug_pagealloc_enabled()) {
245 set_pmd(pmd, __pmd(__pa(addr) | prot));
246 pages++;
247 continue;
248 } else if (!direct && MACHINE_HAS_EDAT1) {
249 void *new_page;
250
251 /*
252 * Use 1MB frames for vmemmap if available. We
253 * always use large frames even if they are only
254 * partially used. Otherwise we would have also
255 * page tables since vmemmap_populate gets
256 * called for each section separately.
257 */
258 new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE);
259 if (new_page) {
260 set_pmd(pmd, __pmd(__pa(new_page) | prot));
261 if (!IS_ALIGNED(addr, PMD_SIZE) ||
262 !IS_ALIGNED(next, PMD_SIZE)) {
263 vmemmap_use_new_sub_pmd(addr, next);
264 }
265 continue;
266 }
267 }
268 pte = vmem_pte_alloc();
269 if (!pte)
270 goto out;
271 pmd_populate(&init_mm, pmd, pte);
272 } else if (pmd_large(*pmd)) {
273 if (!direct)
274 vmemmap_use_sub_pmd(addr, next);
275 continue;
276 }
277 ret = modify_pte_table(pmd, addr, next, add, direct);
278 if (ret)
279 goto out;
280 if (!add)
281 try_free_pte_table(pmd, addr & PMD_MASK);
282 }
283 ret = 0;
284out:
285 if (direct)
286 update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages);
287 return ret;
288}
289
290static void try_free_pmd_table(pud_t *pud, unsigned long start)
291{
292 const unsigned long end = start + PUD_SIZE;
293 pmd_t *pmd;
294 int i;
295
296 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
297 if (end > VMALLOC_START)
298 return;
299#ifdef CONFIG_KASAN
300 if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
301 return;
302#endif
303 pmd = pmd_offset(pud, start);
304 for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
305 if (!pmd_none(*pmd))
306 return;
307 vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER);
308 pud_clear(pud);
309}
310
311static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
312 bool add, bool direct)
313{
314 unsigned long next, prot, pages = 0;
315 int ret = -ENOMEM;
316 pud_t *pud;
317 pmd_t *pmd;
318
319 prot = pgprot_val(REGION3_KERNEL);
320 if (!MACHINE_HAS_NX)
321 prot &= ~_REGION_ENTRY_NOEXEC;
322 pud = pud_offset(p4d, addr);
323 for (; addr < end; addr = next, pud++) {
324 next = pud_addr_end(addr, end);
325 if (!add) {
326 if (pud_none(*pud))
327 continue;
328 if (pud_large(*pud)) {
329 if (IS_ALIGNED(addr, PUD_SIZE) &&
330 IS_ALIGNED(next, PUD_SIZE)) {
331 pud_clear(pud);
332 pages++;
333 }
334 continue;
335 }
336 } else if (pud_none(*pud)) {
337 if (IS_ALIGNED(addr, PUD_SIZE) &&
338 IS_ALIGNED(next, PUD_SIZE) &&
339 MACHINE_HAS_EDAT2 && direct &&
340 !debug_pagealloc_enabled()) {
341 set_pud(pud, __pud(__pa(addr) | prot));
342 pages++;
343 continue;
344 }
345 pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
346 if (!pmd)
347 goto out;
348 pud_populate(&init_mm, pud, pmd);
349 } else if (pud_large(*pud)) {
350 continue;
351 }
352 ret = modify_pmd_table(pud, addr, next, add, direct);
353 if (ret)
354 goto out;
355 if (!add)
356 try_free_pmd_table(pud, addr & PUD_MASK);
357 }
358 ret = 0;
359out:
360 if (direct)
361 update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages);
362 return ret;
363}
364
365static void try_free_pud_table(p4d_t *p4d, unsigned long start)
366{
367 const unsigned long end = start + P4D_SIZE;
368 pud_t *pud;
369 int i;
370
371 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
372 if (end > VMALLOC_START)
373 return;
374#ifdef CONFIG_KASAN
375 if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
376 return;
377#endif
378
379 pud = pud_offset(p4d, start);
380 for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
381 if (!pud_none(*pud))
382 return;
383 }
384 vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER);
385 p4d_clear(p4d);
386}
387
388static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
389 bool add, bool direct)
390{
391 unsigned long next;
392 int ret = -ENOMEM;
393 p4d_t *p4d;
394 pud_t *pud;
395
396 p4d = p4d_offset(pgd, addr);
397 for (; addr < end; addr = next, p4d++) {
398 next = p4d_addr_end(addr, end);
399 if (!add) {
400 if (p4d_none(*p4d))
401 continue;
402 } else if (p4d_none(*p4d)) {
403 pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
404 if (!pud)
405 goto out;
406 p4d_populate(&init_mm, p4d, pud);
407 }
408 ret = modify_pud_table(p4d, addr, next, add, direct);
409 if (ret)
410 goto out;
411 if (!add)
412 try_free_pud_table(p4d, addr & P4D_MASK);
413 }
414 ret = 0;
415out:
416 return ret;
417}
418
419static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
420{
421 const unsigned long end = start + PGDIR_SIZE;
422 p4d_t *p4d;
423 int i;
424
425 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
426 if (end > VMALLOC_START)
427 return;
428#ifdef CONFIG_KASAN
429 if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
430 return;
431#endif
432
433 p4d = p4d_offset(pgd, start);
434 for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
435 if (!p4d_none(*p4d))
436 return;
437 }
438 vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER);
439 pgd_clear(pgd);
440}
441
442static int modify_pagetable(unsigned long start, unsigned long end, bool add,
443 bool direct)
444{
445 unsigned long addr, next;
446 int ret = -ENOMEM;
447 pgd_t *pgd;
448 p4d_t *p4d;
449
450 if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
451 return -EINVAL;
452 for (addr = start; addr < end; addr = next) {
453 next = pgd_addr_end(addr, end);
454 pgd = pgd_offset_k(addr);
455
456 if (!add) {
457 if (pgd_none(*pgd))
458 continue;
459 } else if (pgd_none(*pgd)) {
460 p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
461 if (!p4d)
462 goto out;
463 pgd_populate(&init_mm, pgd, p4d);
464 }
465 ret = modify_p4d_table(pgd, addr, next, add, direct);
466 if (ret)
467 goto out;
468 if (!add)
469 try_free_p4d_table(pgd, addr & PGDIR_MASK);
470 }
471 ret = 0;
472out:
473 if (!add)
474 flush_tlb_kernel_range(start, end);
475 return ret;
476}
477
478static int add_pagetable(unsigned long start, unsigned long end, bool direct)
479{
480 return modify_pagetable(start, end, true, direct);
481}
482
483static int remove_pagetable(unsigned long start, unsigned long end, bool direct)
484{
485 return modify_pagetable(start, end, false, direct);
486}
487
488/*
489 * Add a physical memory range to the 1:1 mapping.
490 */
491static int vmem_add_range(unsigned long start, unsigned long size)
492{
493 return add_pagetable(start, start + size, true);
494}
495
496/*
497 * Remove a physical memory range from the 1:1 mapping.
498 */
499static void vmem_remove_range(unsigned long start, unsigned long size)
500{
501 remove_pagetable(start, start + size, true);
502}
503
504/*
505 * Add a backed mem_map array to the virtual mem_map array.
506 */
507int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
508 struct vmem_altmap *altmap)
509{
510 int ret;
511
512 mutex_lock(&vmem_mutex);
513 /* We don't care about the node, just use NUMA_NO_NODE on allocations */
514 ret = add_pagetable(start, end, false);
515 if (ret)
516 remove_pagetable(start, end, false);
517 mutex_unlock(&vmem_mutex);
518 return ret;
519}
520
521void vmemmap_free(unsigned long start, unsigned long end,
522 struct vmem_altmap *altmap)
523{
524 mutex_lock(&vmem_mutex);
525 remove_pagetable(start, end, false);
526 mutex_unlock(&vmem_mutex);
527}
528
529void vmem_remove_mapping(unsigned long start, unsigned long size)
530{
531 mutex_lock(&vmem_mutex);
532 vmem_remove_range(start, size);
533 mutex_unlock(&vmem_mutex);
534}
535
536struct range arch_get_mappable_range(void)
537{
538 struct range mhp_range;
539
540 mhp_range.start = 0;
541 mhp_range.end = VMEM_MAX_PHYS - 1;
542 return mhp_range;
543}
544
545int vmem_add_mapping(unsigned long start, unsigned long size)
546{
547 struct range range = arch_get_mappable_range();
548 int ret;
549
550 if (start < range.start ||
551 start + size > range.end + 1 ||
552 start + size < start)
553 return -ERANGE;
554
555 mutex_lock(&vmem_mutex);
556 ret = vmem_add_range(start, size);
557 if (ret)
558 vmem_remove_range(start, size);
559 mutex_unlock(&vmem_mutex);
560 return ret;
561}
562
563/*
564 * Allocate new or return existing page-table entry, but do not map it
565 * to any physical address. If missing, allocate segment- and region-
566 * table entries along. Meeting a large segment- or region-table entry
567 * while traversing is an error, since the function is expected to be
568 * called against virtual regions reserverd for 4KB mappings only.
569 */
570pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
571{
572 pte_t *ptep = NULL;
573 pgd_t *pgd;
574 p4d_t *p4d;
575 pud_t *pud;
576 pmd_t *pmd;
577 pte_t *pte;
578
579 pgd = pgd_offset_k(addr);
580 if (pgd_none(*pgd)) {
581 if (!alloc)
582 goto out;
583 p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
584 if (!p4d)
585 goto out;
586 pgd_populate(&init_mm, pgd, p4d);
587 }
588 p4d = p4d_offset(pgd, addr);
589 if (p4d_none(*p4d)) {
590 if (!alloc)
591 goto out;
592 pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
593 if (!pud)
594 goto out;
595 p4d_populate(&init_mm, p4d, pud);
596 }
597 pud = pud_offset(p4d, addr);
598 if (pud_none(*pud)) {
599 if (!alloc)
600 goto out;
601 pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
602 if (!pmd)
603 goto out;
604 pud_populate(&init_mm, pud, pmd);
605 } else if (WARN_ON_ONCE(pud_large(*pud))) {
606 goto out;
607 }
608 pmd = pmd_offset(pud, addr);
609 if (pmd_none(*pmd)) {
610 if (!alloc)
611 goto out;
612 pte = vmem_pte_alloc();
613 if (!pte)
614 goto out;
615 pmd_populate(&init_mm, pmd, pte);
616 } else if (WARN_ON_ONCE(pmd_large(*pmd))) {
617 goto out;
618 }
619 ptep = pte_offset_kernel(pmd, addr);
620out:
621 return ptep;
622}
623
624int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc)
625{
626 pte_t *ptep, pte;
627
628 if (!IS_ALIGNED(addr, PAGE_SIZE))
629 return -EINVAL;
630 ptep = vmem_get_alloc_pte(addr, alloc);
631 if (!ptep)
632 return -ENOMEM;
633 __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
634 pte = mk_pte_phys(phys, prot);
635 set_pte(ptep, pte);
636 return 0;
637}
638
639int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot)
640{
641 int rc;
642
643 mutex_lock(&vmem_mutex);
644 rc = __vmem_map_4k_page(addr, phys, prot, true);
645 mutex_unlock(&vmem_mutex);
646 return rc;
647}
648
649void vmem_unmap_4k_page(unsigned long addr)
650{
651 pte_t *ptep;
652
653 mutex_lock(&vmem_mutex);
654 ptep = virt_to_kpte(addr);
655 __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
656 pte_clear(&init_mm, addr, ptep);
657 mutex_unlock(&vmem_mutex);
658}
659
660/*
661 * map whole physical memory to virtual memory (identity mapping)
662 * we reserve enough space in the vmalloc area for vmemmap to hotplug
663 * additional memory segments.
664 */
665void __init vmem_map_init(void)
666{
667 phys_addr_t base, end;
668 u64 i;
669
670 for_each_mem_range(i, &base, &end)
671 vmem_add_range(base, end - base);
672 __set_memory((unsigned long)_stext,
673 (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
674 SET_MEMORY_RO | SET_MEMORY_X);
675 __set_memory((unsigned long)_etext,
676 (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
677 SET_MEMORY_RO);
678 __set_memory((unsigned long)_sinittext,
679 (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
680 SET_MEMORY_RO | SET_MEMORY_X);
681 __set_memory(__stext_amode31, (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT,
682 SET_MEMORY_RO | SET_MEMORY_X);
683
684 /* lowcore requires 4k mapping for real addresses / prefixing */
685 set_memory_4k(0, LC_PAGES);
686
687 /* lowcore must be executable for LPSWE */
688 if (!static_key_enabled(&cpu_has_bear))
689 set_memory_x(0, 1);
690
691 pr_info("Write protected kernel read-only data: %luk\n",
692 (unsigned long)(__end_rodata - _stext) >> 10);
693}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2006
4 */
5
6#include <linux/memory_hotplug.h>
7#include <linux/memblock.h>
8#include <linux/pfn.h>
9#include <linux/mm.h>
10#include <linux/init.h>
11#include <linux/list.h>
12#include <linux/hugetlb.h>
13#include <linux/slab.h>
14#include <linux/sort.h>
15#include <asm/page-states.h>
16#include <asm/cacheflush.h>
17#include <asm/nospec-branch.h>
18#include <asm/ctlreg.h>
19#include <asm/pgalloc.h>
20#include <asm/setup.h>
21#include <asm/tlbflush.h>
22#include <asm/sections.h>
23#include <asm/set_memory.h>
24
25static DEFINE_MUTEX(vmem_mutex);
26
27static void __ref *vmem_alloc_pages(unsigned int order)
28{
29 unsigned long size = PAGE_SIZE << order;
30
31 if (slab_is_available())
32 return (void *)__get_free_pages(GFP_KERNEL, order);
33 return memblock_alloc(size, size);
34}
35
36static void vmem_free_pages(unsigned long addr, int order)
37{
38 /* We don't expect boot memory to be removed ever. */
39 if (!slab_is_available() ||
40 WARN_ON_ONCE(PageReserved(virt_to_page((void *)addr))))
41 return;
42 free_pages(addr, order);
43}
44
45void *vmem_crst_alloc(unsigned long val)
46{
47 unsigned long *table;
48
49 table = vmem_alloc_pages(CRST_ALLOC_ORDER);
50 if (!table)
51 return NULL;
52 crst_table_init(table, val);
53 __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
54 return table;
55}
56
57pte_t __ref *vmem_pte_alloc(void)
58{
59 unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
60 pte_t *pte;
61
62 if (slab_is_available())
63 pte = (pte_t *) page_table_alloc(&init_mm);
64 else
65 pte = (pte_t *) memblock_alloc(size, size);
66 if (!pte)
67 return NULL;
68 memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
69 __arch_set_page_dat(pte, 1);
70 return pte;
71}
72
73static void vmem_pte_free(unsigned long *table)
74{
75 /* We don't expect boot memory to be removed ever. */
76 if (!slab_is_available() ||
77 WARN_ON_ONCE(PageReserved(virt_to_page(table))))
78 return;
79 page_table_free(&init_mm, table);
80}
81
82#define PAGE_UNUSED 0xFD
83
84/*
85 * The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges
86 * from unused_sub_pmd_start to next PMD_SIZE boundary.
87 */
88static unsigned long unused_sub_pmd_start;
89
90static void vmemmap_flush_unused_sub_pmd(void)
91{
92 if (!unused_sub_pmd_start)
93 return;
94 memset((void *)unused_sub_pmd_start, PAGE_UNUSED,
95 ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start);
96 unused_sub_pmd_start = 0;
97}
98
99static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end)
100{
101 /*
102 * As we expect to add in the same granularity as we remove, it's
103 * sufficient to mark only some piece used to block the memmap page from
104 * getting removed (just in case the memmap never gets initialized,
105 * e.g., because the memory block never gets onlined).
106 */
107 memset((void *)start, 0, sizeof(struct page));
108}
109
110static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
111{
112 /*
113 * We only optimize if the new used range directly follows the
114 * previously unused range (esp., when populating consecutive sections).
115 */
116 if (unused_sub_pmd_start == start) {
117 unused_sub_pmd_start = end;
118 if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE)))
119 unused_sub_pmd_start = 0;
120 return;
121 }
122 vmemmap_flush_unused_sub_pmd();
123 vmemmap_mark_sub_pmd_used(start, end);
124}
125
126static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
127{
128 unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
129
130 vmemmap_flush_unused_sub_pmd();
131
132 /* Could be our memmap page is filled with PAGE_UNUSED already ... */
133 vmemmap_mark_sub_pmd_used(start, end);
134
135 /* Mark the unused parts of the new memmap page PAGE_UNUSED. */
136 if (!IS_ALIGNED(start, PMD_SIZE))
137 memset((void *)page, PAGE_UNUSED, start - page);
138 /*
139 * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
140 * consecutive sections. Remember for the last added PMD the last
141 * unused range in the populated PMD.
142 */
143 if (!IS_ALIGNED(end, PMD_SIZE))
144 unused_sub_pmd_start = end;
145}
146
147/* Returns true if the PMD is completely unused and can be freed. */
148static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
149{
150 unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
151
152 vmemmap_flush_unused_sub_pmd();
153 memset((void *)start, PAGE_UNUSED, end - start);
154 return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE);
155}
156
157/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
158static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
159 unsigned long end, bool add, bool direct)
160{
161 unsigned long prot, pages = 0;
162 int ret = -ENOMEM;
163 pte_t *pte;
164
165 prot = pgprot_val(PAGE_KERNEL);
166 if (!MACHINE_HAS_NX)
167 prot &= ~_PAGE_NOEXEC;
168
169 pte = pte_offset_kernel(pmd, addr);
170 for (; addr < end; addr += PAGE_SIZE, pte++) {
171 if (!add) {
172 if (pte_none(*pte))
173 continue;
174 if (!direct)
175 vmem_free_pages((unsigned long) pfn_to_virt(pte_pfn(*pte)), 0);
176 pte_clear(&init_mm, addr, pte);
177 } else if (pte_none(*pte)) {
178 if (!direct) {
179 void *new_page = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
180
181 if (!new_page)
182 goto out;
183 set_pte(pte, __pte(__pa(new_page) | prot));
184 } else {
185 set_pte(pte, __pte(__pa(addr) | prot));
186 }
187 } else {
188 continue;
189 }
190 pages++;
191 }
192 ret = 0;
193out:
194 if (direct)
195 update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages);
196 return ret;
197}
198
199static void try_free_pte_table(pmd_t *pmd, unsigned long start)
200{
201 pte_t *pte;
202 int i;
203
204 /* We can safely assume this is fully in 1:1 mapping & vmemmap area */
205 pte = pte_offset_kernel(pmd, start);
206 for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
207 if (!pte_none(*pte))
208 return;
209 }
210 vmem_pte_free((unsigned long *) pmd_deref(*pmd));
211 pmd_clear(pmd);
212}
213
214/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
215static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
216 unsigned long end, bool add, bool direct)
217{
218 unsigned long next, prot, pages = 0;
219 int ret = -ENOMEM;
220 pmd_t *pmd;
221 pte_t *pte;
222
223 prot = pgprot_val(SEGMENT_KERNEL);
224 if (!MACHINE_HAS_NX)
225 prot &= ~_SEGMENT_ENTRY_NOEXEC;
226
227 pmd = pmd_offset(pud, addr);
228 for (; addr < end; addr = next, pmd++) {
229 next = pmd_addr_end(addr, end);
230 if (!add) {
231 if (pmd_none(*pmd))
232 continue;
233 if (pmd_large(*pmd)) {
234 if (IS_ALIGNED(addr, PMD_SIZE) &&
235 IS_ALIGNED(next, PMD_SIZE)) {
236 if (!direct)
237 vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
238 pmd_clear(pmd);
239 pages++;
240 } else if (!direct && vmemmap_unuse_sub_pmd(addr, next)) {
241 vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
242 pmd_clear(pmd);
243 }
244 continue;
245 }
246 } else if (pmd_none(*pmd)) {
247 if (IS_ALIGNED(addr, PMD_SIZE) &&
248 IS_ALIGNED(next, PMD_SIZE) &&
249 MACHINE_HAS_EDAT1 && direct &&
250 !debug_pagealloc_enabled()) {
251 set_pmd(pmd, __pmd(__pa(addr) | prot));
252 pages++;
253 continue;
254 } else if (!direct && MACHINE_HAS_EDAT1) {
255 void *new_page;
256
257 /*
258 * Use 1MB frames for vmemmap if available. We
259 * always use large frames even if they are only
260 * partially used. Otherwise we would have also
261 * page tables since vmemmap_populate gets
262 * called for each section separately.
263 */
264 new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE);
265 if (new_page) {
266 set_pmd(pmd, __pmd(__pa(new_page) | prot));
267 if (!IS_ALIGNED(addr, PMD_SIZE) ||
268 !IS_ALIGNED(next, PMD_SIZE)) {
269 vmemmap_use_new_sub_pmd(addr, next);
270 }
271 continue;
272 }
273 }
274 pte = vmem_pte_alloc();
275 if (!pte)
276 goto out;
277 pmd_populate(&init_mm, pmd, pte);
278 } else if (pmd_large(*pmd)) {
279 if (!direct)
280 vmemmap_use_sub_pmd(addr, next);
281 continue;
282 }
283 ret = modify_pte_table(pmd, addr, next, add, direct);
284 if (ret)
285 goto out;
286 if (!add)
287 try_free_pte_table(pmd, addr & PMD_MASK);
288 }
289 ret = 0;
290out:
291 if (direct)
292 update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages);
293 return ret;
294}
295
296static void try_free_pmd_table(pud_t *pud, unsigned long start)
297{
298 pmd_t *pmd;
299 int i;
300
301 pmd = pmd_offset(pud, start);
302 for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
303 if (!pmd_none(*pmd))
304 return;
305 vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER);
306 pud_clear(pud);
307}
308
309static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
310 bool add, bool direct)
311{
312 unsigned long next, prot, pages = 0;
313 int ret = -ENOMEM;
314 pud_t *pud;
315 pmd_t *pmd;
316
317 prot = pgprot_val(REGION3_KERNEL);
318 if (!MACHINE_HAS_NX)
319 prot &= ~_REGION_ENTRY_NOEXEC;
320 pud = pud_offset(p4d, addr);
321 for (; addr < end; addr = next, pud++) {
322 next = pud_addr_end(addr, end);
323 if (!add) {
324 if (pud_none(*pud))
325 continue;
326 if (pud_large(*pud)) {
327 if (IS_ALIGNED(addr, PUD_SIZE) &&
328 IS_ALIGNED(next, PUD_SIZE)) {
329 pud_clear(pud);
330 pages++;
331 }
332 continue;
333 }
334 } else if (pud_none(*pud)) {
335 if (IS_ALIGNED(addr, PUD_SIZE) &&
336 IS_ALIGNED(next, PUD_SIZE) &&
337 MACHINE_HAS_EDAT2 && direct &&
338 !debug_pagealloc_enabled()) {
339 set_pud(pud, __pud(__pa(addr) | prot));
340 pages++;
341 continue;
342 }
343 pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
344 if (!pmd)
345 goto out;
346 pud_populate(&init_mm, pud, pmd);
347 } else if (pud_large(*pud)) {
348 continue;
349 }
350 ret = modify_pmd_table(pud, addr, next, add, direct);
351 if (ret)
352 goto out;
353 if (!add)
354 try_free_pmd_table(pud, addr & PUD_MASK);
355 }
356 ret = 0;
357out:
358 if (direct)
359 update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages);
360 return ret;
361}
362
363static void try_free_pud_table(p4d_t *p4d, unsigned long start)
364{
365 pud_t *pud;
366 int i;
367
368 pud = pud_offset(p4d, start);
369 for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
370 if (!pud_none(*pud))
371 return;
372 }
373 vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER);
374 p4d_clear(p4d);
375}
376
377static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
378 bool add, bool direct)
379{
380 unsigned long next;
381 int ret = -ENOMEM;
382 p4d_t *p4d;
383 pud_t *pud;
384
385 p4d = p4d_offset(pgd, addr);
386 for (; addr < end; addr = next, p4d++) {
387 next = p4d_addr_end(addr, end);
388 if (!add) {
389 if (p4d_none(*p4d))
390 continue;
391 } else if (p4d_none(*p4d)) {
392 pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
393 if (!pud)
394 goto out;
395 p4d_populate(&init_mm, p4d, pud);
396 }
397 ret = modify_pud_table(p4d, addr, next, add, direct);
398 if (ret)
399 goto out;
400 if (!add)
401 try_free_pud_table(p4d, addr & P4D_MASK);
402 }
403 ret = 0;
404out:
405 return ret;
406}
407
408static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
409{
410 p4d_t *p4d;
411 int i;
412
413 p4d = p4d_offset(pgd, start);
414 for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
415 if (!p4d_none(*p4d))
416 return;
417 }
418 vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER);
419 pgd_clear(pgd);
420}
421
422static int modify_pagetable(unsigned long start, unsigned long end, bool add,
423 bool direct)
424{
425 unsigned long addr, next;
426 int ret = -ENOMEM;
427 pgd_t *pgd;
428 p4d_t *p4d;
429
430 if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
431 return -EINVAL;
432 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
433 if (WARN_ON_ONCE(end > VMALLOC_START))
434 return -EINVAL;
435 for (addr = start; addr < end; addr = next) {
436 next = pgd_addr_end(addr, end);
437 pgd = pgd_offset_k(addr);
438
439 if (!add) {
440 if (pgd_none(*pgd))
441 continue;
442 } else if (pgd_none(*pgd)) {
443 p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
444 if (!p4d)
445 goto out;
446 pgd_populate(&init_mm, pgd, p4d);
447 }
448 ret = modify_p4d_table(pgd, addr, next, add, direct);
449 if (ret)
450 goto out;
451 if (!add)
452 try_free_p4d_table(pgd, addr & PGDIR_MASK);
453 }
454 ret = 0;
455out:
456 if (!add)
457 flush_tlb_kernel_range(start, end);
458 return ret;
459}
460
461static int add_pagetable(unsigned long start, unsigned long end, bool direct)
462{
463 return modify_pagetable(start, end, true, direct);
464}
465
466static int remove_pagetable(unsigned long start, unsigned long end, bool direct)
467{
468 return modify_pagetable(start, end, false, direct);
469}
470
471/*
472 * Add a physical memory range to the 1:1 mapping.
473 */
474static int vmem_add_range(unsigned long start, unsigned long size)
475{
476 start = (unsigned long)__va(start);
477 return add_pagetable(start, start + size, true);
478}
479
480/*
481 * Remove a physical memory range from the 1:1 mapping.
482 */
483static void vmem_remove_range(unsigned long start, unsigned long size)
484{
485 start = (unsigned long)__va(start);
486 remove_pagetable(start, start + size, true);
487}
488
489/*
490 * Add a backed mem_map array to the virtual mem_map array.
491 */
492int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
493 struct vmem_altmap *altmap)
494{
495 int ret;
496
497 mutex_lock(&vmem_mutex);
498 /* We don't care about the node, just use NUMA_NO_NODE on allocations */
499 ret = add_pagetable(start, end, false);
500 if (ret)
501 remove_pagetable(start, end, false);
502 mutex_unlock(&vmem_mutex);
503 return ret;
504}
505
506#ifdef CONFIG_MEMORY_HOTPLUG
507
508void vmemmap_free(unsigned long start, unsigned long end,
509 struct vmem_altmap *altmap)
510{
511 mutex_lock(&vmem_mutex);
512 remove_pagetable(start, end, false);
513 mutex_unlock(&vmem_mutex);
514}
515
516#endif
517
518void vmem_remove_mapping(unsigned long start, unsigned long size)
519{
520 mutex_lock(&vmem_mutex);
521 vmem_remove_range(start, size);
522 mutex_unlock(&vmem_mutex);
523}
524
525struct range arch_get_mappable_range(void)
526{
527 struct range mhp_range;
528
529 mhp_range.start = 0;
530 mhp_range.end = max_mappable - 1;
531 return mhp_range;
532}
533
534int vmem_add_mapping(unsigned long start, unsigned long size)
535{
536 struct range range = arch_get_mappable_range();
537 int ret;
538
539 if (start < range.start ||
540 start + size > range.end + 1 ||
541 start + size < start)
542 return -ERANGE;
543
544 mutex_lock(&vmem_mutex);
545 ret = vmem_add_range(start, size);
546 if (ret)
547 vmem_remove_range(start, size);
548 mutex_unlock(&vmem_mutex);
549 return ret;
550}
551
552/*
553 * Allocate new or return existing page-table entry, but do not map it
554 * to any physical address. If missing, allocate segment- and region-
555 * table entries along. Meeting a large segment- or region-table entry
556 * while traversing is an error, since the function is expected to be
557 * called against virtual regions reserved for 4KB mappings only.
558 */
559pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
560{
561 pte_t *ptep = NULL;
562 pgd_t *pgd;
563 p4d_t *p4d;
564 pud_t *pud;
565 pmd_t *pmd;
566 pte_t *pte;
567
568 pgd = pgd_offset_k(addr);
569 if (pgd_none(*pgd)) {
570 if (!alloc)
571 goto out;
572 p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
573 if (!p4d)
574 goto out;
575 pgd_populate(&init_mm, pgd, p4d);
576 }
577 p4d = p4d_offset(pgd, addr);
578 if (p4d_none(*p4d)) {
579 if (!alloc)
580 goto out;
581 pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
582 if (!pud)
583 goto out;
584 p4d_populate(&init_mm, p4d, pud);
585 }
586 pud = pud_offset(p4d, addr);
587 if (pud_none(*pud)) {
588 if (!alloc)
589 goto out;
590 pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
591 if (!pmd)
592 goto out;
593 pud_populate(&init_mm, pud, pmd);
594 } else if (WARN_ON_ONCE(pud_large(*pud))) {
595 goto out;
596 }
597 pmd = pmd_offset(pud, addr);
598 if (pmd_none(*pmd)) {
599 if (!alloc)
600 goto out;
601 pte = vmem_pte_alloc();
602 if (!pte)
603 goto out;
604 pmd_populate(&init_mm, pmd, pte);
605 } else if (WARN_ON_ONCE(pmd_large(*pmd))) {
606 goto out;
607 }
608 ptep = pte_offset_kernel(pmd, addr);
609out:
610 return ptep;
611}
612
613int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc)
614{
615 pte_t *ptep, pte;
616
617 if (!IS_ALIGNED(addr, PAGE_SIZE))
618 return -EINVAL;
619 ptep = vmem_get_alloc_pte(addr, alloc);
620 if (!ptep)
621 return -ENOMEM;
622 __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
623 pte = mk_pte_phys(phys, prot);
624 set_pte(ptep, pte);
625 return 0;
626}
627
628int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot)
629{
630 int rc;
631
632 mutex_lock(&vmem_mutex);
633 rc = __vmem_map_4k_page(addr, phys, prot, true);
634 mutex_unlock(&vmem_mutex);
635 return rc;
636}
637
638void vmem_unmap_4k_page(unsigned long addr)
639{
640 pte_t *ptep;
641
642 mutex_lock(&vmem_mutex);
643 ptep = virt_to_kpte(addr);
644 __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
645 pte_clear(&init_mm, addr, ptep);
646 mutex_unlock(&vmem_mutex);
647}
648
649void __init vmem_map_init(void)
650{
651 __set_memory_rox(_stext, _etext);
652 __set_memory_ro(_etext, __end_rodata);
653 __set_memory_rox(_sinittext, _einittext);
654 __set_memory_rox(__stext_amode31, __etext_amode31);
655 /*
656 * If the BEAR-enhancement facility is not installed the first
657 * prefix page is used to return to the previous context with
658 * an LPSWE instruction and therefore must be executable.
659 */
660 if (!static_key_enabled(&cpu_has_bear))
661 set_memory_x(0, 1);
662 if (debug_pagealloc_enabled()) {
663 /*
664 * Use RELOC_HIDE() as long as __va(0) translates to NULL,
665 * since performing pointer arithmetic on a NULL pointer
666 * has undefined behavior and generates compiler warnings.
667 */
668 __set_memory_4k(__va(0), RELOC_HIDE(__va(0), ident_map_size));
669 }
670 if (MACHINE_HAS_NX)
671 system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT);
672 pr_info("Write protected kernel read-only data: %luk\n",
673 (unsigned long)(__end_rodata - _stext) >> 10);
674}