Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *    Copyright IBM Corp. 2006
 
  4 */
  5
  6#include <linux/memory_hotplug.h>
  7#include <linux/memblock.h>
  8#include <linux/pfn.h>
  9#include <linux/mm.h>
 10#include <linux/init.h>
 11#include <linux/list.h>
 12#include <linux/hugetlb.h>
 13#include <linux/slab.h>
 14#include <linux/sort.h>
 15#include <asm/page-states.h>
 16#include <asm/cacheflush.h>
 17#include <asm/nospec-branch.h>
 18#include <asm/ctlreg.h>
 19#include <asm/pgalloc.h>
 20#include <asm/setup.h>
 21#include <asm/tlbflush.h>
 22#include <asm/sections.h>
 23#include <asm/set_memory.h>
 24
 25static DEFINE_MUTEX(vmem_mutex);
 26
 27static void __ref *vmem_alloc_pages(unsigned int order)
 28{
 29	unsigned long size = PAGE_SIZE << order;
 30
 31	if (slab_is_available())
 32		return (void *)__get_free_pages(GFP_KERNEL, order);
 33	return memblock_alloc(size, size);
 34}
 35
 36static void vmem_free_pages(unsigned long addr, int order)
 37{
 38	/* We don't expect boot memory to be removed ever. */
 39	if (!slab_is_available() ||
 40	    WARN_ON_ONCE(PageReserved(virt_to_page((void *)addr))))
 41		return;
 42	free_pages(addr, order);
 43}
 44
 45void *vmem_crst_alloc(unsigned long val)
 46{
 47	unsigned long *table;
 48
 49	table = vmem_alloc_pages(CRST_ALLOC_ORDER);
 50	if (!table)
 51		return NULL;
 52	crst_table_init(table, val);
 53	__arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
 54	return table;
 55}
 56
 57pte_t __ref *vmem_pte_alloc(void)
 58{
 59	unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
 60	pte_t *pte;
 61
 62	if (slab_is_available())
 63		pte = (pte_t *) page_table_alloc(&init_mm);
 64	else
 65		pte = (pte_t *) memblock_alloc(size, size);
 66	if (!pte)
 67		return NULL;
 68	memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
 69	__arch_set_page_dat(pte, 1);
 70	return pte;
 71}
 72
 73static void vmem_pte_free(unsigned long *table)
 74{
 75	/* We don't expect boot memory to be removed ever. */
 76	if (!slab_is_available() ||
 77	    WARN_ON_ONCE(PageReserved(virt_to_page(table))))
 78		return;
 79	page_table_free(&init_mm, table);
 80}
 81
 82#define PAGE_UNUSED 0xFD
 83
 84/*
 85 * The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges
 86 * from unused_sub_pmd_start to next PMD_SIZE boundary.
 87 */
 88static unsigned long unused_sub_pmd_start;
 89
 90static void vmemmap_flush_unused_sub_pmd(void)
 91{
 92	if (!unused_sub_pmd_start)
 93		return;
 94	memset((void *)unused_sub_pmd_start, PAGE_UNUSED,
 95	       ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start);
 96	unused_sub_pmd_start = 0;
 97}
 98
 99static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end)
100{
101	/*
102	 * As we expect to add in the same granularity as we remove, it's
103	 * sufficient to mark only some piece used to block the memmap page from
104	 * getting removed (just in case the memmap never gets initialized,
105	 * e.g., because the memory block never gets onlined).
106	 */
107	memset((void *)start, 0, sizeof(struct page));
108}
109
110static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
111{
112	/*
113	 * We only optimize if the new used range directly follows the
114	 * previously unused range (esp., when populating consecutive sections).
115	 */
116	if (unused_sub_pmd_start == start) {
117		unused_sub_pmd_start = end;
118		if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE)))
119			unused_sub_pmd_start = 0;
120		return;
121	}
122	vmemmap_flush_unused_sub_pmd();
123	vmemmap_mark_sub_pmd_used(start, end);
124}
125
126static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
127{
128	unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
129
130	vmemmap_flush_unused_sub_pmd();
131
132	/* Could be our memmap page is filled with PAGE_UNUSED already ... */
133	vmemmap_mark_sub_pmd_used(start, end);
134
135	/* Mark the unused parts of the new memmap page PAGE_UNUSED. */
136	if (!IS_ALIGNED(start, PMD_SIZE))
137		memset((void *)page, PAGE_UNUSED, start - page);
138	/*
139	 * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
140	 * consecutive sections. Remember for the last added PMD the last
141	 * unused range in the populated PMD.
142	 */
143	if (!IS_ALIGNED(end, PMD_SIZE))
144		unused_sub_pmd_start = end;
145}
146
147/* Returns true if the PMD is completely unused and can be freed. */
148static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
149{
150	unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
151
152	vmemmap_flush_unused_sub_pmd();
153	memset((void *)start, PAGE_UNUSED, end - start);
154	return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE);
155}
156
157/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
158static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
159				  unsigned long end, bool add, bool direct)
160{
161	unsigned long prot, pages = 0;
162	int ret = -ENOMEM;
163	pte_t *pte;
164
165	prot = pgprot_val(PAGE_KERNEL);
166	if (!MACHINE_HAS_NX)
167		prot &= ~_PAGE_NOEXEC;
168
169	pte = pte_offset_kernel(pmd, addr);
170	for (; addr < end; addr += PAGE_SIZE, pte++) {
171		if (!add) {
172			if (pte_none(*pte))
173				continue;
174			if (!direct)
175				vmem_free_pages((unsigned long) pfn_to_virt(pte_pfn(*pte)), 0);
176			pte_clear(&init_mm, addr, pte);
177		} else if (pte_none(*pte)) {
178			if (!direct) {
179				void *new_page = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
180
181				if (!new_page)
182					goto out;
183				set_pte(pte, __pte(__pa(new_page) | prot));
184			} else {
185				set_pte(pte, __pte(__pa(addr) | prot));
186			}
187		} else {
188			continue;
189		}
190		pages++;
191	}
192	ret = 0;
193out:
194	if (direct)
195		update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages);
196	return ret;
197}
198
199static void try_free_pte_table(pmd_t *pmd, unsigned long start)
200{
201	pte_t *pte;
202	int i;
203
204	/* We can safely assume this is fully in 1:1 mapping & vmemmap area */
205	pte = pte_offset_kernel(pmd, start);
206	for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
207		if (!pte_none(*pte))
208			return;
209	}
210	vmem_pte_free((unsigned long *) pmd_deref(*pmd));
211	pmd_clear(pmd);
212}
213
214/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
215static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
216				  unsigned long end, bool add, bool direct)
217{
218	unsigned long next, prot, pages = 0;
219	int ret = -ENOMEM;
220	pmd_t *pmd;
221	pte_t *pte;
222
223	prot = pgprot_val(SEGMENT_KERNEL);
224	if (!MACHINE_HAS_NX)
225		prot &= ~_SEGMENT_ENTRY_NOEXEC;
226
227	pmd = pmd_offset(pud, addr);
228	for (; addr < end; addr = next, pmd++) {
229		next = pmd_addr_end(addr, end);
230		if (!add) {
231			if (pmd_none(*pmd))
232				continue;
233			if (pmd_large(*pmd)) {
234				if (IS_ALIGNED(addr, PMD_SIZE) &&
235				    IS_ALIGNED(next, PMD_SIZE)) {
236					if (!direct)
237						vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
238					pmd_clear(pmd);
239					pages++;
240				} else if (!direct && vmemmap_unuse_sub_pmd(addr, next)) {
241					vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
242					pmd_clear(pmd);
243				}
244				continue;
245			}
246		} else if (pmd_none(*pmd)) {
247			if (IS_ALIGNED(addr, PMD_SIZE) &&
248			    IS_ALIGNED(next, PMD_SIZE) &&
249			    MACHINE_HAS_EDAT1 && direct &&
250			    !debug_pagealloc_enabled()) {
251				set_pmd(pmd, __pmd(__pa(addr) | prot));
252				pages++;
253				continue;
254			} else if (!direct && MACHINE_HAS_EDAT1) {
255				void *new_page;
256
257				/*
258				 * Use 1MB frames for vmemmap if available. We
259				 * always use large frames even if they are only
260				 * partially used. Otherwise we would have also
261				 * page tables since vmemmap_populate gets
262				 * called for each section separately.
263				 */
264				new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE);
265				if (new_page) {
266					set_pmd(pmd, __pmd(__pa(new_page) | prot));
267					if (!IS_ALIGNED(addr, PMD_SIZE) ||
268					    !IS_ALIGNED(next, PMD_SIZE)) {
269						vmemmap_use_new_sub_pmd(addr, next);
270					}
271					continue;
272				}
273			}
274			pte = vmem_pte_alloc();
275			if (!pte)
276				goto out;
277			pmd_populate(&init_mm, pmd, pte);
278		} else if (pmd_large(*pmd)) {
279			if (!direct)
280				vmemmap_use_sub_pmd(addr, next);
281			continue;
282		}
283		ret = modify_pte_table(pmd, addr, next, add, direct);
284		if (ret)
285			goto out;
286		if (!add)
287			try_free_pte_table(pmd, addr & PMD_MASK);
288	}
289	ret = 0;
290out:
291	if (direct)
292		update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages);
293	return ret;
294}
295
296static void try_free_pmd_table(pud_t *pud, unsigned long start)
297{
 
298	pmd_t *pmd;
299	int i;
300
 
 
 
 
 
 
 
301	pmd = pmd_offset(pud, start);
302	for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
303		if (!pmd_none(*pmd))
304			return;
305	vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER);
306	pud_clear(pud);
307}
308
309static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
310			    bool add, bool direct)
311{
312	unsigned long next, prot, pages = 0;
313	int ret = -ENOMEM;
314	pud_t *pud;
315	pmd_t *pmd;
316
317	prot = pgprot_val(REGION3_KERNEL);
318	if (!MACHINE_HAS_NX)
319		prot &= ~_REGION_ENTRY_NOEXEC;
320	pud = pud_offset(p4d, addr);
321	for (; addr < end; addr = next, pud++) {
322		next = pud_addr_end(addr, end);
323		if (!add) {
324			if (pud_none(*pud))
325				continue;
326			if (pud_large(*pud)) {
327				if (IS_ALIGNED(addr, PUD_SIZE) &&
328				    IS_ALIGNED(next, PUD_SIZE)) {
329					pud_clear(pud);
330					pages++;
331				}
332				continue;
333			}
334		} else if (pud_none(*pud)) {
335			if (IS_ALIGNED(addr, PUD_SIZE) &&
336			    IS_ALIGNED(next, PUD_SIZE) &&
337			    MACHINE_HAS_EDAT2 && direct &&
338			    !debug_pagealloc_enabled()) {
339				set_pud(pud, __pud(__pa(addr) | prot));
340				pages++;
341				continue;
342			}
343			pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
344			if (!pmd)
345				goto out;
346			pud_populate(&init_mm, pud, pmd);
347		} else if (pud_large(*pud)) {
348			continue;
349		}
350		ret = modify_pmd_table(pud, addr, next, add, direct);
351		if (ret)
352			goto out;
353		if (!add)
354			try_free_pmd_table(pud, addr & PUD_MASK);
355	}
356	ret = 0;
357out:
358	if (direct)
359		update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages);
360	return ret;
361}
362
363static void try_free_pud_table(p4d_t *p4d, unsigned long start)
364{
 
365	pud_t *pud;
366	int i;
367
 
 
 
 
 
 
 
 
368	pud = pud_offset(p4d, start);
369	for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
370		if (!pud_none(*pud))
371			return;
372	}
373	vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER);
374	p4d_clear(p4d);
375}
376
377static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
378			    bool add, bool direct)
379{
380	unsigned long next;
381	int ret = -ENOMEM;
382	p4d_t *p4d;
383	pud_t *pud;
384
385	p4d = p4d_offset(pgd, addr);
386	for (; addr < end; addr = next, p4d++) {
387		next = p4d_addr_end(addr, end);
388		if (!add) {
389			if (p4d_none(*p4d))
390				continue;
391		} else if (p4d_none(*p4d)) {
392			pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
393			if (!pud)
394				goto out;
395			p4d_populate(&init_mm, p4d, pud);
396		}
397		ret = modify_pud_table(p4d, addr, next, add, direct);
398		if (ret)
399			goto out;
400		if (!add)
401			try_free_pud_table(p4d, addr & P4D_MASK);
402	}
403	ret = 0;
404out:
405	return ret;
406}
407
408static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
409{
 
410	p4d_t *p4d;
411	int i;
412
 
 
 
 
 
 
 
 
413	p4d = p4d_offset(pgd, start);
414	for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
415		if (!p4d_none(*p4d))
416			return;
417	}
418	vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER);
419	pgd_clear(pgd);
420}
421
422static int modify_pagetable(unsigned long start, unsigned long end, bool add,
423			    bool direct)
424{
425	unsigned long addr, next;
426	int ret = -ENOMEM;
427	pgd_t *pgd;
428	p4d_t *p4d;
429
430	if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
431		return -EINVAL;
432	/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
433	if (WARN_ON_ONCE(end > VMALLOC_START))
434		return -EINVAL;
435	for (addr = start; addr < end; addr = next) {
436		next = pgd_addr_end(addr, end);
437		pgd = pgd_offset_k(addr);
438
439		if (!add) {
440			if (pgd_none(*pgd))
441				continue;
442		} else if (pgd_none(*pgd)) {
443			p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
444			if (!p4d)
445				goto out;
446			pgd_populate(&init_mm, pgd, p4d);
447		}
448		ret = modify_p4d_table(pgd, addr, next, add, direct);
449		if (ret)
450			goto out;
451		if (!add)
452			try_free_p4d_table(pgd, addr & PGDIR_MASK);
453	}
454	ret = 0;
455out:
456	if (!add)
457		flush_tlb_kernel_range(start, end);
458	return ret;
459}
460
461static int add_pagetable(unsigned long start, unsigned long end, bool direct)
462{
463	return modify_pagetable(start, end, true, direct);
464}
465
466static int remove_pagetable(unsigned long start, unsigned long end, bool direct)
467{
468	return modify_pagetable(start, end, false, direct);
469}
470
471/*
472 * Add a physical memory range to the 1:1 mapping.
473 */
474static int vmem_add_range(unsigned long start, unsigned long size)
475{
476	start = (unsigned long)__va(start);
477	return add_pagetable(start, start + size, true);
478}
479
480/*
481 * Remove a physical memory range from the 1:1 mapping.
482 */
483static void vmem_remove_range(unsigned long start, unsigned long size)
484{
485	start = (unsigned long)__va(start);
486	remove_pagetable(start, start + size, true);
487}
488
489/*
490 * Add a backed mem_map array to the virtual mem_map array.
491 */
492int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
493			       struct vmem_altmap *altmap)
494{
495	int ret;
496
497	mutex_lock(&vmem_mutex);
498	/* We don't care about the node, just use NUMA_NO_NODE on allocations */
499	ret = add_pagetable(start, end, false);
500	if (ret)
501		remove_pagetable(start, end, false);
502	mutex_unlock(&vmem_mutex);
503	return ret;
504}
505
506#ifdef CONFIG_MEMORY_HOTPLUG
507
508void vmemmap_free(unsigned long start, unsigned long end,
509		  struct vmem_altmap *altmap)
510{
511	mutex_lock(&vmem_mutex);
512	remove_pagetable(start, end, false);
513	mutex_unlock(&vmem_mutex);
514}
515
516#endif
517
518void vmem_remove_mapping(unsigned long start, unsigned long size)
519{
520	mutex_lock(&vmem_mutex);
521	vmem_remove_range(start, size);
522	mutex_unlock(&vmem_mutex);
523}
524
525struct range arch_get_mappable_range(void)
526{
527	struct range mhp_range;
528
529	mhp_range.start = 0;
530	mhp_range.end = max_mappable - 1;
531	return mhp_range;
532}
533
534int vmem_add_mapping(unsigned long start, unsigned long size)
535{
536	struct range range = arch_get_mappable_range();
537	int ret;
538
539	if (start < range.start ||
540	    start + size > range.end + 1 ||
541	    start + size < start)
542		return -ERANGE;
543
544	mutex_lock(&vmem_mutex);
545	ret = vmem_add_range(start, size);
546	if (ret)
547		vmem_remove_range(start, size);
548	mutex_unlock(&vmem_mutex);
549	return ret;
550}
551
552/*
553 * Allocate new or return existing page-table entry, but do not map it
554 * to any physical address. If missing, allocate segment- and region-
555 * table entries along. Meeting a large segment- or region-table entry
556 * while traversing is an error, since the function is expected to be
557 * called against virtual regions reserved for 4KB mappings only.
558 */
559pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
560{
561	pte_t *ptep = NULL;
562	pgd_t *pgd;
563	p4d_t *p4d;
564	pud_t *pud;
565	pmd_t *pmd;
566	pte_t *pte;
567
568	pgd = pgd_offset_k(addr);
569	if (pgd_none(*pgd)) {
570		if (!alloc)
571			goto out;
572		p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
573		if (!p4d)
574			goto out;
575		pgd_populate(&init_mm, pgd, p4d);
576	}
577	p4d = p4d_offset(pgd, addr);
578	if (p4d_none(*p4d)) {
579		if (!alloc)
580			goto out;
581		pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
582		if (!pud)
583			goto out;
584		p4d_populate(&init_mm, p4d, pud);
585	}
586	pud = pud_offset(p4d, addr);
587	if (pud_none(*pud)) {
588		if (!alloc)
589			goto out;
590		pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
591		if (!pmd)
592			goto out;
593		pud_populate(&init_mm, pud, pmd);
594	} else if (WARN_ON_ONCE(pud_large(*pud))) {
595		goto out;
596	}
597	pmd = pmd_offset(pud, addr);
598	if (pmd_none(*pmd)) {
599		if (!alloc)
600			goto out;
601		pte = vmem_pte_alloc();
602		if (!pte)
603			goto out;
604		pmd_populate(&init_mm, pmd, pte);
605	} else if (WARN_ON_ONCE(pmd_large(*pmd))) {
606		goto out;
607	}
608	ptep = pte_offset_kernel(pmd, addr);
609out:
610	return ptep;
611}
612
613int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc)
614{
615	pte_t *ptep, pte;
616
617	if (!IS_ALIGNED(addr, PAGE_SIZE))
618		return -EINVAL;
619	ptep = vmem_get_alloc_pte(addr, alloc);
620	if (!ptep)
621		return -ENOMEM;
622	__ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
623	pte = mk_pte_phys(phys, prot);
624	set_pte(ptep, pte);
625	return 0;
626}
627
628int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot)
629{
630	int rc;
631
632	mutex_lock(&vmem_mutex);
633	rc = __vmem_map_4k_page(addr, phys, prot, true);
634	mutex_unlock(&vmem_mutex);
635	return rc;
636}
 
 
 
 
 
 
 
 
637
638void vmem_unmap_4k_page(unsigned long addr)
639{
640	pte_t *ptep;
641
642	mutex_lock(&vmem_mutex);
643	ptep = virt_to_kpte(addr);
644	__ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
645	pte_clear(&init_mm, addr, ptep);
646	mutex_unlock(&vmem_mutex);
647}
648
649void __init vmem_map_init(void)
650{
651	__set_memory_rox(_stext, _etext);
652	__set_memory_ro(_etext, __end_rodata);
653	__set_memory_rox(_sinittext, _einittext);
654	__set_memory_rox(__stext_amode31, __etext_amode31);
655	/*
656	 * If the BEAR-enhancement facility is not installed the first
657	 * prefix page is used to return to the previous context with
658	 * an LPSWE instruction and therefore must be executable.
659	 */
660	if (!static_key_enabled(&cpu_has_bear))
661		set_memory_x(0, 1);
662	if (debug_pagealloc_enabled()) {
663		/*
664		 * Use RELOC_HIDE() as long as __va(0) translates to NULL,
665		 * since performing pointer arithmetic on a NULL pointer
666		 * has undefined behavior and generates compiler warnings.
667		 */
668		__set_memory_4k(__va(0), RELOC_HIDE(__va(0), ident_map_size));
669	}
670	if (MACHINE_HAS_NX)
671		system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT);
672	pr_info("Write protected kernel read-only data: %luk\n",
673		(unsigned long)(__end_rodata - _stext) >> 10);
674}
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *    Copyright IBM Corp. 2006
  4 *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
  5 */
  6
 
  7#include <linux/memblock.h>
  8#include <linux/pfn.h>
  9#include <linux/mm.h>
 10#include <linux/init.h>
 11#include <linux/list.h>
 12#include <linux/hugetlb.h>
 13#include <linux/slab.h>
 
 
 14#include <asm/cacheflush.h>
 
 
 15#include <asm/pgalloc.h>
 16#include <asm/setup.h>
 17#include <asm/tlbflush.h>
 18#include <asm/sections.h>
 19#include <asm/set_memory.h>
 20
 21static DEFINE_MUTEX(vmem_mutex);
 22
 23static void __ref *vmem_alloc_pages(unsigned int order)
 24{
 25	unsigned long size = PAGE_SIZE << order;
 26
 27	if (slab_is_available())
 28		return (void *)__get_free_pages(GFP_KERNEL, order);
 29	return (void *) memblock_phys_alloc(size, size);
 30}
 31
 32static void vmem_free_pages(unsigned long addr, int order)
 33{
 34	/* We don't expect boot memory to be removed ever. */
 35	if (!slab_is_available() ||
 36	    WARN_ON_ONCE(PageReserved(phys_to_page(addr))))
 37		return;
 38	free_pages(addr, order);
 39}
 40
 41void *vmem_crst_alloc(unsigned long val)
 42{
 43	unsigned long *table;
 44
 45	table = vmem_alloc_pages(CRST_ALLOC_ORDER);
 46	if (table)
 47		crst_table_init(table, val);
 
 
 48	return table;
 49}
 50
 51pte_t __ref *vmem_pte_alloc(void)
 52{
 53	unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
 54	pte_t *pte;
 55
 56	if (slab_is_available())
 57		pte = (pte_t *) page_table_alloc(&init_mm);
 58	else
 59		pte = (pte_t *) memblock_phys_alloc(size, size);
 60	if (!pte)
 61		return NULL;
 62	memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
 
 63	return pte;
 64}
 65
 66static void vmem_pte_free(unsigned long *table)
 67{
 68	/* We don't expect boot memory to be removed ever. */
 69	if (!slab_is_available() ||
 70	    WARN_ON_ONCE(PageReserved(virt_to_page(table))))
 71		return;
 72	page_table_free(&init_mm, table);
 73}
 74
 75#define PAGE_UNUSED 0xFD
 76
 77/*
 78 * The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges
 79 * from unused_pmd_start to next PMD_SIZE boundary.
 80 */
 81static unsigned long unused_pmd_start;
 82
 83static void vmemmap_flush_unused_pmd(void)
 84{
 85	if (!unused_pmd_start)
 86		return;
 87	memset(__va(unused_pmd_start), PAGE_UNUSED,
 88	       ALIGN(unused_pmd_start, PMD_SIZE) - unused_pmd_start);
 89	unused_pmd_start = 0;
 90}
 91
 92static void __vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
 93{
 94	/*
 95	 * As we expect to add in the same granularity as we remove, it's
 96	 * sufficient to mark only some piece used to block the memmap page from
 97	 * getting removed (just in case the memmap never gets initialized,
 98	 * e.g., because the memory block never gets onlined).
 99	 */
100	memset(__va(start), 0, sizeof(struct page));
101}
102
103static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
104{
105	/*
106	 * We only optimize if the new used range directly follows the
107	 * previously unused range (esp., when populating consecutive sections).
108	 */
109	if (unused_pmd_start == start) {
110		unused_pmd_start = end;
111		if (likely(IS_ALIGNED(unused_pmd_start, PMD_SIZE)))
112			unused_pmd_start = 0;
113		return;
114	}
115	vmemmap_flush_unused_pmd();
116	__vmemmap_use_sub_pmd(start, end);
117}
118
119static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
120{
121	void *page = __va(ALIGN_DOWN(start, PMD_SIZE));
122
123	vmemmap_flush_unused_pmd();
124
125	/* Could be our memmap page is filled with PAGE_UNUSED already ... */
126	__vmemmap_use_sub_pmd(start, end);
127
128	/* Mark the unused parts of the new memmap page PAGE_UNUSED. */
129	if (!IS_ALIGNED(start, PMD_SIZE))
130		memset(page, PAGE_UNUSED, start - __pa(page));
131	/*
132	 * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
133	 * consecutive sections. Remember for the last added PMD the last
134	 * unused range in the populated PMD.
135	 */
136	if (!IS_ALIGNED(end, PMD_SIZE))
137		unused_pmd_start = end;
138}
139
140/* Returns true if the PMD is completely unused and can be freed. */
141static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
142{
143	void *page = __va(ALIGN_DOWN(start, PMD_SIZE));
144
145	vmemmap_flush_unused_pmd();
146	memset(__va(start), PAGE_UNUSED, end - start);
147	return !memchr_inv(page, PAGE_UNUSED, PMD_SIZE);
148}
149
150/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
151static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
152				  unsigned long end, bool add, bool direct)
153{
154	unsigned long prot, pages = 0;
155	int ret = -ENOMEM;
156	pte_t *pte;
157
158	prot = pgprot_val(PAGE_KERNEL);
159	if (!MACHINE_HAS_NX)
160		prot &= ~_PAGE_NOEXEC;
161
162	pte = pte_offset_kernel(pmd, addr);
163	for (; addr < end; addr += PAGE_SIZE, pte++) {
164		if (!add) {
165			if (pte_none(*pte))
166				continue;
167			if (!direct)
168				vmem_free_pages(pfn_to_phys(pte_pfn(*pte)), 0);
169			pte_clear(&init_mm, addr, pte);
170		} else if (pte_none(*pte)) {
171			if (!direct) {
172				void *new_page = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
173
174				if (!new_page)
175					goto out;
176				pte_val(*pte) = __pa(new_page) | prot;
177			} else {
178				pte_val(*pte) = addr | prot;
179			}
180		} else {
181			continue;
182		}
183		pages++;
184	}
185	ret = 0;
186out:
187	if (direct)
188		update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages);
189	return ret;
190}
191
192static void try_free_pte_table(pmd_t *pmd, unsigned long start)
193{
194	pte_t *pte;
195	int i;
196
197	/* We can safely assume this is fully in 1:1 mapping & vmemmap area */
198	pte = pte_offset_kernel(pmd, start);
199	for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
200		if (!pte_none(*pte))
201			return;
202	}
203	vmem_pte_free(__va(pmd_deref(*pmd)));
204	pmd_clear(pmd);
205}
206
207/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
208static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
209				  unsigned long end, bool add, bool direct)
210{
211	unsigned long next, prot, pages = 0;
212	int ret = -ENOMEM;
213	pmd_t *pmd;
214	pte_t *pte;
215
216	prot = pgprot_val(SEGMENT_KERNEL);
217	if (!MACHINE_HAS_NX)
218		prot &= ~_SEGMENT_ENTRY_NOEXEC;
219
220	pmd = pmd_offset(pud, addr);
221	for (; addr < end; addr = next, pmd++) {
222		next = pmd_addr_end(addr, end);
223		if (!add) {
224			if (pmd_none(*pmd))
225				continue;
226			if (pmd_large(*pmd) && !add) {
227				if (IS_ALIGNED(addr, PMD_SIZE) &&
228				    IS_ALIGNED(next, PMD_SIZE)) {
229					if (!direct)
230						vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
231					pmd_clear(pmd);
232					pages++;
233				} else if (!direct && vmemmap_unuse_sub_pmd(addr, next)) {
234					vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
235					pmd_clear(pmd);
236				}
237				continue;
238			}
239		} else if (pmd_none(*pmd)) {
240			if (IS_ALIGNED(addr, PMD_SIZE) &&
241			    IS_ALIGNED(next, PMD_SIZE) &&
242			    MACHINE_HAS_EDAT1 && addr && direct &&
243			    !debug_pagealloc_enabled()) {
244				pmd_val(*pmd) = addr | prot;
245				pages++;
246				continue;
247			} else if (!direct && MACHINE_HAS_EDAT1) {
248				void *new_page;
249
250				/*
251				 * Use 1MB frames for vmemmap if available. We
252				 * always use large frames even if they are only
253				 * partially used. Otherwise we would have also
254				 * page tables since vmemmap_populate gets
255				 * called for each section separately.
256				 */
257				new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE);
258				if (new_page) {
259					pmd_val(*pmd) = __pa(new_page) | prot;
260					if (!IS_ALIGNED(addr, PMD_SIZE) ||
261					    !IS_ALIGNED(next, PMD_SIZE)) {
262						vmemmap_use_new_sub_pmd(addr, next);
263					}
264					continue;
265				}
266			}
267			pte = vmem_pte_alloc();
268			if (!pte)
269				goto out;
270			pmd_populate(&init_mm, pmd, pte);
271		} else if (pmd_large(*pmd)) {
272			if (!direct)
273				vmemmap_use_sub_pmd(addr, next);
274			continue;
275		}
276		ret = modify_pte_table(pmd, addr, next, add, direct);
277		if (ret)
278			goto out;
279		if (!add)
280			try_free_pte_table(pmd, addr & PMD_MASK);
281	}
282	ret = 0;
283out:
284	if (direct)
285		update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages);
286	return ret;
287}
288
289static void try_free_pmd_table(pud_t *pud, unsigned long start)
290{
291	const unsigned long end = start + PUD_SIZE;
292	pmd_t *pmd;
293	int i;
294
295	/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
296	if (end > VMALLOC_START)
297		return;
298#ifdef CONFIG_KASAN
299	if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
300		return;
301#endif
302	pmd = pmd_offset(pud, start);
303	for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
304		if (!pmd_none(*pmd))
305			return;
306	vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER);
307	pud_clear(pud);
308}
309
310static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
311			    bool add, bool direct)
312{
313	unsigned long next, prot, pages = 0;
314	int ret = -ENOMEM;
315	pud_t *pud;
316	pmd_t *pmd;
317
318	prot = pgprot_val(REGION3_KERNEL);
319	if (!MACHINE_HAS_NX)
320		prot &= ~_REGION_ENTRY_NOEXEC;
321	pud = pud_offset(p4d, addr);
322	for (; addr < end; addr = next, pud++) {
323		next = pud_addr_end(addr, end);
324		if (!add) {
325			if (pud_none(*pud))
326				continue;
327			if (pud_large(*pud)) {
328				if (IS_ALIGNED(addr, PUD_SIZE) &&
329				    IS_ALIGNED(next, PUD_SIZE)) {
330					pud_clear(pud);
331					pages++;
332				}
333				continue;
334			}
335		} else if (pud_none(*pud)) {
336			if (IS_ALIGNED(addr, PUD_SIZE) &&
337			    IS_ALIGNED(next, PUD_SIZE) &&
338			    MACHINE_HAS_EDAT2 && addr && direct &&
339			    !debug_pagealloc_enabled()) {
340				pud_val(*pud) = addr | prot;
341				pages++;
342				continue;
343			}
344			pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
345			if (!pmd)
346				goto out;
347			pud_populate(&init_mm, pud, pmd);
348		} else if (pud_large(*pud)) {
349			continue;
350		}
351		ret = modify_pmd_table(pud, addr, next, add, direct);
352		if (ret)
353			goto out;
354		if (!add)
355			try_free_pmd_table(pud, addr & PUD_MASK);
356	}
357	ret = 0;
358out:
359	if (direct)
360		update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages);
361	return ret;
362}
363
364static void try_free_pud_table(p4d_t *p4d, unsigned long start)
365{
366	const unsigned long end = start + P4D_SIZE;
367	pud_t *pud;
368	int i;
369
370	/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
371	if (end > VMALLOC_START)
372		return;
373#ifdef CONFIG_KASAN
374	if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
375		return;
376#endif
377
378	pud = pud_offset(p4d, start);
379	for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
380		if (!pud_none(*pud))
381			return;
382	}
383	vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER);
384	p4d_clear(p4d);
385}
386
387static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
388			    bool add, bool direct)
389{
390	unsigned long next;
391	int ret = -ENOMEM;
392	p4d_t *p4d;
393	pud_t *pud;
394
395	p4d = p4d_offset(pgd, addr);
396	for (; addr < end; addr = next, p4d++) {
397		next = p4d_addr_end(addr, end);
398		if (!add) {
399			if (p4d_none(*p4d))
400				continue;
401		} else if (p4d_none(*p4d)) {
402			pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
403			if (!pud)
404				goto out;
405			p4d_populate(&init_mm, p4d, pud);
406		}
407		ret = modify_pud_table(p4d, addr, next, add, direct);
408		if (ret)
409			goto out;
410		if (!add)
411			try_free_pud_table(p4d, addr & P4D_MASK);
412	}
413	ret = 0;
414out:
415	return ret;
416}
417
418static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
419{
420	const unsigned long end = start + PGDIR_SIZE;
421	p4d_t *p4d;
422	int i;
423
424	/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
425	if (end > VMALLOC_START)
426		return;
427#ifdef CONFIG_KASAN
428	if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
429		return;
430#endif
431
432	p4d = p4d_offset(pgd, start);
433	for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
434		if (!p4d_none(*p4d))
435			return;
436	}
437	vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER);
438	pgd_clear(pgd);
439}
440
441static int modify_pagetable(unsigned long start, unsigned long end, bool add,
442			    bool direct)
443{
444	unsigned long addr, next;
445	int ret = -ENOMEM;
446	pgd_t *pgd;
447	p4d_t *p4d;
448
449	if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
450		return -EINVAL;
 
 
 
451	for (addr = start; addr < end; addr = next) {
452		next = pgd_addr_end(addr, end);
453		pgd = pgd_offset_k(addr);
454
455		if (!add) {
456			if (pgd_none(*pgd))
457				continue;
458		} else if (pgd_none(*pgd)) {
459			p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
460			if (!p4d)
461				goto out;
462			pgd_populate(&init_mm, pgd, p4d);
463		}
464		ret = modify_p4d_table(pgd, addr, next, add, direct);
465		if (ret)
466			goto out;
467		if (!add)
468			try_free_p4d_table(pgd, addr & PGDIR_MASK);
469	}
470	ret = 0;
471out:
472	if (!add)
473		flush_tlb_kernel_range(start, end);
474	return ret;
475}
476
477static int add_pagetable(unsigned long start, unsigned long end, bool direct)
478{
479	return modify_pagetable(start, end, true, direct);
480}
481
482static int remove_pagetable(unsigned long start, unsigned long end, bool direct)
483{
484	return modify_pagetable(start, end, false, direct);
485}
486
487/*
488 * Add a physical memory range to the 1:1 mapping.
489 */
490static int vmem_add_range(unsigned long start, unsigned long size)
491{
 
492	return add_pagetable(start, start + size, true);
493}
494
495/*
496 * Remove a physical memory range from the 1:1 mapping.
497 */
498static void vmem_remove_range(unsigned long start, unsigned long size)
499{
 
500	remove_pagetable(start, start + size, true);
501}
502
503/*
504 * Add a backed mem_map array to the virtual mem_map array.
505 */
506int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
507			       struct vmem_altmap *altmap)
508{
509	int ret;
510
511	mutex_lock(&vmem_mutex);
512	/* We don't care about the node, just use NUMA_NO_NODE on allocations */
513	ret = add_pagetable(start, end, false);
514	if (ret)
515		remove_pagetable(start, end, false);
516	mutex_unlock(&vmem_mutex);
517	return ret;
518}
519
 
 
520void vmemmap_free(unsigned long start, unsigned long end,
521		  struct vmem_altmap *altmap)
522{
523	mutex_lock(&vmem_mutex);
524	remove_pagetable(start, end, false);
525	mutex_unlock(&vmem_mutex);
526}
527
 
 
528void vmem_remove_mapping(unsigned long start, unsigned long size)
529{
530	mutex_lock(&vmem_mutex);
531	vmem_remove_range(start, size);
532	mutex_unlock(&vmem_mutex);
533}
534
 
 
 
 
 
 
 
 
 
535int vmem_add_mapping(unsigned long start, unsigned long size)
536{
 
537	int ret;
538
539	if (start + size > VMEM_MAX_PHYS ||
 
540	    start + size < start)
541		return -ERANGE;
542
543	mutex_lock(&vmem_mutex);
544	ret = vmem_add_range(start, size);
545	if (ret)
546		vmem_remove_range(start, size);
547	mutex_unlock(&vmem_mutex);
548	return ret;
549}
550
551/*
552 * map whole physical memory to virtual memory (identity mapping)
553 * we reserve enough space in the vmalloc area for vmemmap to hotplug
554 * additional memory segments.
 
 
555 */
556void __init vmem_map_init(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
557{
558	struct memblock_region *reg;
559
560	for_each_memblock(memory, reg)
561		vmem_add_range(reg->base, reg->size);
562	__set_memory((unsigned long)_stext,
563		     (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
564		     SET_MEMORY_RO | SET_MEMORY_X);
565	__set_memory((unsigned long)_etext,
566		     (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
567		     SET_MEMORY_RO);
568	__set_memory((unsigned long)_sinittext,
569		     (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
570		     SET_MEMORY_RO | SET_MEMORY_X);
571	__set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
572		     SET_MEMORY_RO | SET_MEMORY_X);
573
574	/* we need lowcore executable for our LPSWE instructions */
575	set_memory_x(0, 1);
 
576
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
577	pr_info("Write protected kernel read-only data: %luk\n",
578		(unsigned long)(__end_rodata - _stext) >> 10);
579}