Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *    Copyright IBM Corp. 2006
 
  4 */
  5
  6#include <linux/memory_hotplug.h>
  7#include <linux/memblock.h>
  8#include <linux/pfn.h>
  9#include <linux/mm.h>
 10#include <linux/init.h>
 11#include <linux/list.h>
 12#include <linux/hugetlb.h>
 13#include <linux/slab.h>
 
 14#include <asm/cacheflush.h>
 15#include <asm/nospec-branch.h>
 16#include <asm/pgalloc.h>
 
 17#include <asm/setup.h>
 18#include <asm/tlbflush.h>
 19#include <asm/sections.h>
 20#include <asm/set_memory.h>
 21
 22static DEFINE_MUTEX(vmem_mutex);
 23
 
 
 
 
 
 
 
 
 24static void __ref *vmem_alloc_pages(unsigned int order)
 25{
 26	unsigned long size = PAGE_SIZE << order;
 27
 28	if (slab_is_available())
 29		return (void *)__get_free_pages(GFP_KERNEL, order);
 30	return memblock_alloc(size, size);
 31}
 32
 33static void vmem_free_pages(unsigned long addr, int order)
 34{
 35	/* We don't expect boot memory to be removed ever. */
 36	if (!slab_is_available() ||
 37	    WARN_ON_ONCE(PageReserved(virt_to_page(addr))))
 38		return;
 39	free_pages(addr, order);
 40}
 41
 42void *vmem_crst_alloc(unsigned long val)
 43{
 44	unsigned long *table;
 45
 46	table = vmem_alloc_pages(CRST_ALLOC_ORDER);
 47	if (table)
 48		crst_table_init(table, val);
 49	return table;
 50}
 51
 52pte_t __ref *vmem_pte_alloc(void)
 53{
 54	unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
 55	pte_t *pte;
 56
 57	if (slab_is_available())
 58		pte = (pte_t *) page_table_alloc(&init_mm);
 59	else
 60		pte = (pte_t *) memblock_alloc(size, size);
 61	if (!pte)
 62		return NULL;
 63	memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
 64	return pte;
 65}
 66
 67static void vmem_pte_free(unsigned long *table)
 68{
 69	/* We don't expect boot memory to be removed ever. */
 70	if (!slab_is_available() ||
 71	    WARN_ON_ONCE(PageReserved(virt_to_page(table))))
 72		return;
 73	page_table_free(&init_mm, table);
 74}
 75
 76#define PAGE_UNUSED 0xFD
 77
 78/*
 79 * The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges
 80 * from unused_sub_pmd_start to next PMD_SIZE boundary.
 81 */
 82static unsigned long unused_sub_pmd_start;
 83
 84static void vmemmap_flush_unused_sub_pmd(void)
 85{
 86	if (!unused_sub_pmd_start)
 87		return;
 88	memset((void *)unused_sub_pmd_start, PAGE_UNUSED,
 89	       ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start);
 90	unused_sub_pmd_start = 0;
 91}
 92
 93static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end)
 94{
 95	/*
 96	 * As we expect to add in the same granularity as we remove, it's
 97	 * sufficient to mark only some piece used to block the memmap page from
 98	 * getting removed (just in case the memmap never gets initialized,
 99	 * e.g., because the memory block never gets onlined).
100	 */
101	memset((void *)start, 0, sizeof(struct page));
102}
103
104static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
105{
106	/*
107	 * We only optimize if the new used range directly follows the
108	 * previously unused range (esp., when populating consecutive sections).
109	 */
110	if (unused_sub_pmd_start == start) {
111		unused_sub_pmd_start = end;
112		if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE)))
113			unused_sub_pmd_start = 0;
114		return;
115	}
116	vmemmap_flush_unused_sub_pmd();
117	vmemmap_mark_sub_pmd_used(start, end);
118}
119
120static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
121{
122	unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
123
124	vmemmap_flush_unused_sub_pmd();
125
126	/* Could be our memmap page is filled with PAGE_UNUSED already ... */
127	vmemmap_mark_sub_pmd_used(start, end);
128
129	/* Mark the unused parts of the new memmap page PAGE_UNUSED. */
130	if (!IS_ALIGNED(start, PMD_SIZE))
131		memset((void *)page, PAGE_UNUSED, start - page);
132	/*
133	 * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
134	 * consecutive sections. Remember for the last added PMD the last
135	 * unused range in the populated PMD.
136	 */
137	if (!IS_ALIGNED(end, PMD_SIZE))
138		unused_sub_pmd_start = end;
139}
140
141/* Returns true if the PMD is completely unused and can be freed. */
142static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
143{
144	unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
145
146	vmemmap_flush_unused_sub_pmd();
147	memset((void *)start, PAGE_UNUSED, end - start);
148	return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE);
149}
150
151/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
152static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
153				  unsigned long end, bool add, bool direct)
154{
155	unsigned long prot, pages = 0;
 
 
 
 
 
 
 
 
156	int ret = -ENOMEM;
157	pte_t *pte;
158
159	prot = pgprot_val(PAGE_KERNEL);
160	if (!MACHINE_HAS_NX)
161		prot &= ~_PAGE_NOEXEC;
162
163	pte = pte_offset_kernel(pmd, addr);
164	for (; addr < end; addr += PAGE_SIZE, pte++) {
165		if (!add) {
166			if (pte_none(*pte))
167				continue;
168			if (!direct)
169				vmem_free_pages((unsigned long) pfn_to_virt(pte_pfn(*pte)), 0);
170			pte_clear(&init_mm, addr, pte);
171		} else if (pte_none(*pte)) {
172			if (!direct) {
173				void *new_page = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
174
175				if (!new_page)
176					goto out;
177				set_pte(pte, __pte(__pa(new_page) | prot));
178			} else {
179				set_pte(pte, __pte(__pa(addr) | prot));
180			}
181		} else {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182			continue;
183		}
184		pages++;
 
 
 
 
 
 
 
 
 
 
185	}
186	ret = 0;
187out:
188	if (direct)
189		update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages);
 
190	return ret;
191}
192
193static void try_free_pte_table(pmd_t *pmd, unsigned long start)
 
 
 
 
194{
195	pte_t *pte;
196	int i;
197
198	/* We can safely assume this is fully in 1:1 mapping & vmemmap area */
199	pte = pte_offset_kernel(pmd, start);
200	for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
201		if (!pte_none(*pte))
202			return;
203	}
204	vmem_pte_free((unsigned long *) pmd_deref(*pmd));
205	pmd_clear(pmd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206}
207
208/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
209static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
210				  unsigned long end, bool add, bool direct)
 
 
211{
212	unsigned long next, prot, pages = 0;
 
 
 
 
 
 
213	int ret = -ENOMEM;
214	pmd_t *pmd;
215	pte_t *pte;
216
217	prot = pgprot_val(SEGMENT_KERNEL);
218	if (!MACHINE_HAS_NX)
219		prot &= ~_SEGMENT_ENTRY_NOEXEC;
220
221	pmd = pmd_offset(pud, addr);
222	for (; addr < end; addr = next, pmd++) {
223		next = pmd_addr_end(addr, end);
224		if (!add) {
225			if (pmd_none(*pmd))
226				continue;
227			if (pmd_large(*pmd)) {
228				if (IS_ALIGNED(addr, PMD_SIZE) &&
229				    IS_ALIGNED(next, PMD_SIZE)) {
230					if (!direct)
231						vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
232					pmd_clear(pmd);
233					pages++;
234				} else if (!direct && vmemmap_unuse_sub_pmd(addr, next)) {
235					vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
236					pmd_clear(pmd);
237				}
238				continue;
239			}
240		} else if (pmd_none(*pmd)) {
241			if (IS_ALIGNED(addr, PMD_SIZE) &&
242			    IS_ALIGNED(next, PMD_SIZE) &&
243			    MACHINE_HAS_EDAT1 && direct &&
244			    !debug_pagealloc_enabled()) {
245				set_pmd(pmd, __pmd(__pa(addr) | prot));
246				pages++;
247				continue;
248			} else if (!direct && MACHINE_HAS_EDAT1) {
249				void *new_page;
250
251				/*
252				 * Use 1MB frames for vmemmap if available. We
253				 * always use large frames even if they are only
254				 * partially used. Otherwise we would have also
255				 * page tables since vmemmap_populate gets
256				 * called for each section separately.
257				 */
258				new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE);
259				if (new_page) {
260					set_pmd(pmd, __pmd(__pa(new_page) | prot));
261					if (!IS_ALIGNED(addr, PMD_SIZE) ||
262					    !IS_ALIGNED(next, PMD_SIZE)) {
263						vmemmap_use_new_sub_pmd(addr, next);
264					}
265					continue;
266				}
267			}
268			pte = vmem_pte_alloc();
269			if (!pte)
270				goto out;
271			pmd_populate(&init_mm, pmd, pte);
272		} else if (pmd_large(*pmd)) {
273			if (!direct)
274				vmemmap_use_sub_pmd(addr, next);
275			continue;
276		}
277		ret = modify_pte_table(pmd, addr, next, add, direct);
278		if (ret)
279			goto out;
280		if (!add)
281			try_free_pte_table(pmd, addr & PMD_MASK);
282	}
283	ret = 0;
284out:
285	if (direct)
286		update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages);
287	return ret;
288}
289
290static void try_free_pmd_table(pud_t *pud, unsigned long start)
291{
292	const unsigned long end = start + PUD_SIZE;
293	pmd_t *pmd;
294	int i;
295
296	/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
297	if (end > VMALLOC_START)
298		return;
299#ifdef CONFIG_KASAN
300	if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
301		return;
302#endif
303	pmd = pmd_offset(pud, start);
304	for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
305		if (!pmd_none(*pmd))
306			return;
307	vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER);
308	pud_clear(pud);
309}
310
311static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
312			    bool add, bool direct)
313{
314	unsigned long next, prot, pages = 0;
315	int ret = -ENOMEM;
316	pud_t *pud;
317	pmd_t *pmd;
 
 
 
318
319	prot = pgprot_val(REGION3_KERNEL);
320	if (!MACHINE_HAS_NX)
321		prot &= ~_REGION_ENTRY_NOEXEC;
322	pud = pud_offset(p4d, addr);
323	for (; addr < end; addr = next, pud++) {
324		next = pud_addr_end(addr, end);
325		if (!add) {
326			if (pud_none(*pud))
327				continue;
328			if (pud_large(*pud)) {
329				if (IS_ALIGNED(addr, PUD_SIZE) &&
330				    IS_ALIGNED(next, PUD_SIZE)) {
331					pud_clear(pud);
332					pages++;
333				}
334				continue;
335			}
336		} else if (pud_none(*pud)) {
337			if (IS_ALIGNED(addr, PUD_SIZE) &&
338			    IS_ALIGNED(next, PUD_SIZE) &&
339			    MACHINE_HAS_EDAT2 && direct &&
340			    !debug_pagealloc_enabled()) {
341				set_pud(pud, __pud(__pa(addr) | prot));
342				pages++;
343				continue;
344			}
345			pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
346			if (!pmd)
347				goto out;
348			pud_populate(&init_mm, pud, pmd);
349		} else if (pud_large(*pud)) {
 
350			continue;
351		}
352		ret = modify_pmd_table(pud, addr, next, add, direct);
353		if (ret)
354			goto out;
355		if (!add)
356			try_free_pmd_table(pud, addr & PUD_MASK);
357	}
358	ret = 0;
359out:
360	if (direct)
361		update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages);
362	return ret;
363}
364
365static void try_free_pud_table(p4d_t *p4d, unsigned long start)
366{
367	const unsigned long end = start + P4D_SIZE;
368	pud_t *pud;
369	int i;
370
371	/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
372	if (end > VMALLOC_START)
373		return;
374#ifdef CONFIG_KASAN
375	if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
376		return;
377#endif
378
379	pud = pud_offset(p4d, start);
380	for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
381		if (!pud_none(*pud))
382			return;
383	}
384	vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER);
385	p4d_clear(p4d);
386}
387
388static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
389			    bool add, bool direct)
390{
391	unsigned long next;
392	int ret = -ENOMEM;
393	p4d_t *p4d;
394	pud_t *pud;
395
396	p4d = p4d_offset(pgd, addr);
397	for (; addr < end; addr = next, p4d++) {
398		next = p4d_addr_end(addr, end);
399		if (!add) {
400			if (p4d_none(*p4d))
401				continue;
402		} else if (p4d_none(*p4d)) {
403			pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
404			if (!pud)
405				goto out;
406			p4d_populate(&init_mm, p4d, pud);
407		}
408		ret = modify_pud_table(p4d, addr, next, add, direct);
409		if (ret)
410			goto out;
411		if (!add)
412			try_free_pud_table(p4d, addr & P4D_MASK);
413	}
414	ret = 0;
415out:
416	return ret;
417}
418
419static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
 
420{
421	const unsigned long end = start + PGDIR_SIZE;
422	p4d_t *p4d;
423	int i;
424
425	/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
426	if (end > VMALLOC_START)
427		return;
428#ifdef CONFIG_KASAN
429	if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
430		return;
431#endif
432
433	p4d = p4d_offset(pgd, start);
434	for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
435		if (!p4d_none(*p4d))
436			return;
437	}
438	vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER);
439	pgd_clear(pgd);
440}
441
442static int modify_pagetable(unsigned long start, unsigned long end, bool add,
443			    bool direct)
 
 
 
444{
445	unsigned long addr, next;
446	int ret = -ENOMEM;
447	pgd_t *pgd;
448	p4d_t *p4d;
449
450	if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
451		return -EINVAL;
452	for (addr = start; addr < end; addr = next) {
453		next = pgd_addr_end(addr, end);
454		pgd = pgd_offset_k(addr);
455
456		if (!add) {
457			if (pgd_none(*pgd))
458				continue;
459		} else if (pgd_none(*pgd)) {
460			p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
461			if (!p4d)
462				goto out;
463			pgd_populate(&init_mm, pgd, p4d);
464		}
465		ret = modify_p4d_table(pgd, addr, next, add, direct);
466		if (ret)
467			goto out;
468		if (!add)
469			try_free_p4d_table(pgd, addr & PGDIR_MASK);
470	}
471	ret = 0;
472out:
473	if (!add)
474		flush_tlb_kernel_range(start, end);
475	return ret;
476}
477
478static int add_pagetable(unsigned long start, unsigned long end, bool direct)
479{
480	return modify_pagetable(start, end, true, direct);
481}
482
483static int remove_pagetable(unsigned long start, unsigned long end, bool direct)
484{
485	return modify_pagetable(start, end, false, direct);
486}
487
488/*
489 * Add a physical memory range to the 1:1 mapping.
490 */
491static int vmem_add_range(unsigned long start, unsigned long size)
492{
493	return add_pagetable(start, start + size, true);
494}
495
496/*
497 * Remove a physical memory range from the 1:1 mapping.
498 */
499static void vmem_remove_range(unsigned long start, unsigned long size)
500{
501	remove_pagetable(start, start + size, true);
 
502}
503
504/*
505 * Add a backed mem_map array to the virtual mem_map array.
506 */
507int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
508			       struct vmem_altmap *altmap)
509{
 
510	int ret;
511
512	mutex_lock(&vmem_mutex);
513	/* We don't care about the node, just use NUMA_NO_NODE on allocations */
514	ret = add_pagetable(start, end, false);
515	if (ret)
516		remove_pagetable(start, end, false);
517	mutex_unlock(&vmem_mutex);
518	return ret;
519}
520
521void vmemmap_free(unsigned long start, unsigned long end,
522		  struct vmem_altmap *altmap)
523{
524	mutex_lock(&vmem_mutex);
525	remove_pagetable(start, end, false);
526	mutex_unlock(&vmem_mutex);
527}
528
529void vmem_remove_mapping(unsigned long start, unsigned long size)
530{
531	mutex_lock(&vmem_mutex);
532	vmem_remove_range(start, size);
533	mutex_unlock(&vmem_mutex);
534}
535
536struct range arch_get_mappable_range(void)
537{
538	struct range mhp_range;
539
540	mhp_range.start = 0;
541	mhp_range.end =  VMEM_MAX_PHYS - 1;
542	return mhp_range;
 
 
 
543}
544
545int vmem_add_mapping(unsigned long start, unsigned long size)
546{
547	struct range range = arch_get_mappable_range();
548	int ret;
549
550	if (start < range.start ||
551	    start + size > range.end + 1 ||
552	    start + size < start)
553		return -ERANGE;
554
555	mutex_lock(&vmem_mutex);
556	ret = vmem_add_range(start, size);
557	if (ret)
558		vmem_remove_range(start, size);
559	mutex_unlock(&vmem_mutex);
560	return ret;
561}
562
563/*
564 * Allocate new or return existing page-table entry, but do not map it
565 * to any physical address. If missing, allocate segment- and region-
566 * table entries along. Meeting a large segment- or region-table entry
567 * while traversing is an error, since the function is expected to be
568 * called against virtual regions reserverd for 4KB mappings only.
569 */
570pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
571{
572	pte_t *ptep = NULL;
573	pgd_t *pgd;
574	p4d_t *p4d;
575	pud_t *pud;
576	pmd_t *pmd;
577	pte_t *pte;
578
579	pgd = pgd_offset_k(addr);
580	if (pgd_none(*pgd)) {
581		if (!alloc)
582			goto out;
583		p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
584		if (!p4d)
585			goto out;
586		pgd_populate(&init_mm, pgd, p4d);
587	}
588	p4d = p4d_offset(pgd, addr);
589	if (p4d_none(*p4d)) {
590		if (!alloc)
591			goto out;
592		pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
593		if (!pud)
594			goto out;
595		p4d_populate(&init_mm, p4d, pud);
596	}
597	pud = pud_offset(p4d, addr);
598	if (pud_none(*pud)) {
599		if (!alloc)
600			goto out;
601		pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
602		if (!pmd)
603			goto out;
604		pud_populate(&init_mm, pud, pmd);
605	} else if (WARN_ON_ONCE(pud_large(*pud))) {
606		goto out;
607	}
608	pmd = pmd_offset(pud, addr);
609	if (pmd_none(*pmd)) {
610		if (!alloc)
611			goto out;
612		pte = vmem_pte_alloc();
613		if (!pte)
614			goto out;
615		pmd_populate(&init_mm, pmd, pte);
616	} else if (WARN_ON_ONCE(pmd_large(*pmd))) {
617		goto out;
618	}
619	ptep = pte_offset_kernel(pmd, addr);
620out:
621	return ptep;
622}
623
624int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc)
625{
626	pte_t *ptep, pte;
627
628	if (!IS_ALIGNED(addr, PAGE_SIZE))
629		return -EINVAL;
630	ptep = vmem_get_alloc_pte(addr, alloc);
631	if (!ptep)
632		return -ENOMEM;
633	__ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
634	pte = mk_pte_phys(phys, prot);
635	set_pte(ptep, pte);
636	return 0;
637}
638
639int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot)
640{
641	int rc;
642
643	mutex_lock(&vmem_mutex);
644	rc = __vmem_map_4k_page(addr, phys, prot, true);
645	mutex_unlock(&vmem_mutex);
646	return rc;
647}
648
649void vmem_unmap_4k_page(unsigned long addr)
650{
651	pte_t *ptep;
 
652
653	mutex_lock(&vmem_mutex);
654	ptep = virt_to_kpte(addr);
655	__ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
656	pte_clear(&init_mm, addr, ptep);
 
657	mutex_unlock(&vmem_mutex);
 
658}
659
660/*
661 * map whole physical memory to virtual memory (identity mapping)
662 * we reserve enough space in the vmalloc area for vmemmap to hotplug
663 * additional memory segments.
664 */
665void __init vmem_map_init(void)
666{
667	phys_addr_t base, end;
668	u64 i;
669
670	for_each_mem_range(i, &base, &end)
671		vmem_add_range(base, end - base);
672	__set_memory((unsigned long)_stext,
673		     (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
674		     SET_MEMORY_RO | SET_MEMORY_X);
675	__set_memory((unsigned long)_etext,
676		     (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
677		     SET_MEMORY_RO);
678	__set_memory((unsigned long)_sinittext,
679		     (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
680		     SET_MEMORY_RO | SET_MEMORY_X);
681	__set_memory(__stext_amode31, (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT,
682		     SET_MEMORY_RO | SET_MEMORY_X);
683
684	/* lowcore requires 4k mapping for real addresses / prefixing */
685	set_memory_4k(0, LC_PAGES);
686
687	/* lowcore must be executable for LPSWE */
688	if (!static_key_enabled(&cpu_has_bear))
689		set_memory_x(0, 1);
690
691	pr_info("Write protected kernel read-only data: %luk\n",
692		(unsigned long)(__end_rodata - _stext) >> 10);
693}
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *    Copyright IBM Corp. 2006
  4 *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
  5 */
  6
  7#include <linux/bootmem.h>
 
  8#include <linux/pfn.h>
  9#include <linux/mm.h>
 10#include <linux/init.h>
 11#include <linux/list.h>
 12#include <linux/hugetlb.h>
 13#include <linux/slab.h>
 14#include <linux/memblock.h>
 15#include <asm/cacheflush.h>
 
 16#include <asm/pgalloc.h>
 17#include <asm/pgtable.h>
 18#include <asm/setup.h>
 19#include <asm/tlbflush.h>
 20#include <asm/sections.h>
 21#include <asm/set_memory.h>
 22
 23static DEFINE_MUTEX(vmem_mutex);
 24
 25struct memory_segment {
 26	struct list_head list;
 27	unsigned long start;
 28	unsigned long size;
 29};
 30
 31static LIST_HEAD(mem_segs);
 32
 33static void __ref *vmem_alloc_pages(unsigned int order)
 34{
 35	unsigned long size = PAGE_SIZE << order;
 36
 37	if (slab_is_available())
 38		return (void *)__get_free_pages(GFP_KERNEL, order);
 39	return (void *) memblock_alloc(size, size);
 
 
 
 
 
 
 
 
 
 40}
 41
 42void *vmem_crst_alloc(unsigned long val)
 43{
 44	unsigned long *table;
 45
 46	table = vmem_alloc_pages(CRST_ALLOC_ORDER);
 47	if (table)
 48		crst_table_init(table, val);
 49	return table;
 50}
 51
 52pte_t __ref *vmem_pte_alloc(void)
 53{
 54	unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
 55	pte_t *pte;
 56
 57	if (slab_is_available())
 58		pte = (pte_t *) page_table_alloc(&init_mm);
 59	else
 60		pte = (pte_t *) memblock_alloc(size, size);
 61	if (!pte)
 62		return NULL;
 63	memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
 64	return pte;
 65}
 66
 
 
 
 
 
 
 
 
 
 
 
 67/*
 68 * Add a physical memory range to the 1:1 mapping.
 
 69 */
 70static int vmem_add_mem(unsigned long start, unsigned long size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 71{
 72	unsigned long pgt_prot, sgt_prot, r3_prot;
 73	unsigned long pages4k, pages1m, pages2g;
 74	unsigned long end = start + size;
 75	unsigned long address = start;
 76	pgd_t *pg_dir;
 77	p4d_t *p4_dir;
 78	pud_t *pu_dir;
 79	pmd_t *pm_dir;
 80	pte_t *pt_dir;
 81	int ret = -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 82
 83	pgt_prot = pgprot_val(PAGE_KERNEL);
 84	sgt_prot = pgprot_val(SEGMENT_KERNEL);
 85	r3_prot = pgprot_val(REGION3_KERNEL);
 86	if (!MACHINE_HAS_NX) {
 87		pgt_prot &= ~_PAGE_NOEXEC;
 88		sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
 89		r3_prot &= ~_REGION_ENTRY_NOEXEC;
 90	}
 91	pages4k = pages1m = pages2g = 0;
 92	while (address < end) {
 93		pg_dir = pgd_offset_k(address);
 94		if (pgd_none(*pg_dir)) {
 95			p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
 96			if (!p4_dir)
 97				goto out;
 98			pgd_populate(&init_mm, pg_dir, p4_dir);
 99		}
100		p4_dir = p4d_offset(pg_dir, address);
101		if (p4d_none(*p4_dir)) {
102			pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
103			if (!pu_dir)
104				goto out;
105			p4d_populate(&init_mm, p4_dir, pu_dir);
106		}
107		pu_dir = pud_offset(p4_dir, address);
108		if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
109		    !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
110		     !debug_pagealloc_enabled()) {
111			pud_val(*pu_dir) = address | r3_prot;
112			address += PUD_SIZE;
113			pages2g++;
114			continue;
115		}
116		if (pud_none(*pu_dir)) {
117			pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
118			if (!pm_dir)
119				goto out;
120			pud_populate(&init_mm, pu_dir, pm_dir);
121		}
122		pm_dir = pmd_offset(pu_dir, address);
123		if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
124		    !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
125		    !debug_pagealloc_enabled()) {
126			pmd_val(*pm_dir) = address | sgt_prot;
127			address += PMD_SIZE;
128			pages1m++;
129			continue;
130		}
131		if (pmd_none(*pm_dir)) {
132			pt_dir = vmem_pte_alloc();
133			if (!pt_dir)
134				goto out;
135			pmd_populate(&init_mm, pm_dir, pt_dir);
136		}
137
138		pt_dir = pte_offset_kernel(pm_dir, address);
139		pte_val(*pt_dir) = address | pgt_prot;
140		address += PAGE_SIZE;
141		pages4k++;
142	}
143	ret = 0;
144out:
145	update_page_count(PG_DIRECT_MAP_4K, pages4k);
146	update_page_count(PG_DIRECT_MAP_1M, pages1m);
147	update_page_count(PG_DIRECT_MAP_2G, pages2g);
148	return ret;
149}
150
151/*
152 * Remove a physical memory range from the 1:1 mapping.
153 * Currently only invalidates page table entries.
154 */
155static void vmem_remove_range(unsigned long start, unsigned long size)
156{
157	unsigned long pages4k, pages1m, pages2g;
158	unsigned long end = start + size;
159	unsigned long address = start;
160	pgd_t *pg_dir;
161	p4d_t *p4_dir;
162	pud_t *pu_dir;
163	pmd_t *pm_dir;
164	pte_t *pt_dir;
165
166	pages4k = pages1m = pages2g = 0;
167	while (address < end) {
168		pg_dir = pgd_offset_k(address);
169		if (pgd_none(*pg_dir)) {
170			address += PGDIR_SIZE;
171			continue;
172		}
173		p4_dir = p4d_offset(pg_dir, address);
174		if (p4d_none(*p4_dir)) {
175			address += P4D_SIZE;
176			continue;
177		}
178		pu_dir = pud_offset(p4_dir, address);
179		if (pud_none(*pu_dir)) {
180			address += PUD_SIZE;
181			continue;
182		}
183		if (pud_large(*pu_dir)) {
184			pud_clear(pu_dir);
185			address += PUD_SIZE;
186			pages2g++;
187			continue;
188		}
189		pm_dir = pmd_offset(pu_dir, address);
190		if (pmd_none(*pm_dir)) {
191			address += PMD_SIZE;
192			continue;
193		}
194		if (pmd_large(*pm_dir)) {
195			pmd_clear(pm_dir);
196			address += PMD_SIZE;
197			pages1m++;
198			continue;
199		}
200		pt_dir = pte_offset_kernel(pm_dir, address);
201		pte_clear(&init_mm, address, pt_dir);
202		address += PAGE_SIZE;
203		pages4k++;
204	}
205	flush_tlb_kernel_range(start, end);
206	update_page_count(PG_DIRECT_MAP_4K, -pages4k);
207	update_page_count(PG_DIRECT_MAP_1M, -pages1m);
208	update_page_count(PG_DIRECT_MAP_2G, -pages2g);
209}
210
211/*
212 * Add a backed mem_map array to the virtual mem_map array.
213 */
214int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
215		struct vmem_altmap *altmap)
216{
217	unsigned long pgt_prot, sgt_prot;
218	unsigned long address = start;
219	pgd_t *pg_dir;
220	p4d_t *p4_dir;
221	pud_t *pu_dir;
222	pmd_t *pm_dir;
223	pte_t *pt_dir;
224	int ret = -ENOMEM;
 
 
225
226	pgt_prot = pgprot_val(PAGE_KERNEL);
227	sgt_prot = pgprot_val(SEGMENT_KERNEL);
228	if (!MACHINE_HAS_NX) {
229		pgt_prot &= ~_PAGE_NOEXEC;
230		sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
231	}
232	for (address = start; address < end;) {
233		pg_dir = pgd_offset_k(address);
234		if (pgd_none(*pg_dir)) {
235			p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
236			if (!p4_dir)
237				goto out;
238			pgd_populate(&init_mm, pg_dir, p4_dir);
239		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240
241		p4_dir = p4d_offset(pg_dir, address);
242		if (p4d_none(*p4_dir)) {
243			pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
244			if (!pu_dir)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
245				goto out;
246			p4d_populate(&init_mm, p4_dir, pu_dir);
 
 
 
 
247		}
 
 
 
 
 
 
 
 
 
 
 
 
248
249		pu_dir = pud_offset(p4_dir, address);
250		if (pud_none(*pu_dir)) {
251			pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
252			if (!pm_dir)
253				goto out;
254			pud_populate(&init_mm, pu_dir, pm_dir);
255		}
 
 
 
 
 
 
 
 
 
 
 
 
 
256
257		pm_dir = pmd_offset(pu_dir, address);
258		if (pmd_none(*pm_dir)) {
259			/* Use 1MB frames for vmemmap if available. We always
260			 * use large frames even if they are only partially
261			 * used.
262			 * Otherwise we would have also page tables since
263			 * vmemmap_populate gets called for each section
264			 * separately. */
265			if (MACHINE_HAS_EDAT1) {
266				void *new_page;
267
268				new_page = vmemmap_alloc_block(PMD_SIZE, node);
269				if (!new_page)
270					goto out;
271				pmd_val(*pm_dir) = __pa(new_page) | sgt_prot;
272				address = (address + PMD_SIZE) & PMD_MASK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
273				continue;
274			}
275			pt_dir = vmem_pte_alloc();
276			if (!pt_dir)
277				goto out;
278			pmd_populate(&init_mm, pm_dir, pt_dir);
279		} else if (pmd_large(*pm_dir)) {
280			address = (address + PMD_SIZE) & PMD_MASK;
281			continue;
282		}
 
 
 
 
 
 
 
 
 
 
 
 
283
284		pt_dir = pte_offset_kernel(pm_dir, address);
285		if (pte_none(*pt_dir)) {
286			void *new_page;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
287
288			new_page = vmemmap_alloc_block(PAGE_SIZE, node);
289			if (!new_page)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
290				goto out;
291			pte_val(*pt_dir) = __pa(new_page) | pgt_prot;
292		}
293		address += PAGE_SIZE;
 
 
 
 
294	}
295	ret = 0;
296out:
297	return ret;
298}
299
300void vmemmap_free(unsigned long start, unsigned long end,
301		struct vmem_altmap *altmap)
302{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
303}
304
305/*
306 * Add memory segment to the segment list if it doesn't overlap with
307 * an already present segment.
308 */
309static int insert_memory_segment(struct memory_segment *seg)
310{
311	struct memory_segment *tmp;
 
 
 
312
313	if (seg->start + seg->size > VMEM_MAX_PHYS ||
314	    seg->start + seg->size < seg->start)
315		return -ERANGE;
 
 
316
317	list_for_each_entry(tmp, &mem_segs, list) {
318		if (seg->start >= tmp->start + tmp->size)
319			continue;
320		if (seg->start + seg->size <= tmp->start)
321			continue;
322		return -ENOSPC;
 
 
 
 
 
 
 
 
323	}
324	list_add(&seg->list, &mem_segs);
325	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
326}
327
328/*
329 * Remove memory segment from the segment list.
330 */
331static void remove_memory_segment(struct memory_segment *seg)
332{
333	list_del(&seg->list);
334}
335
336static void __remove_shared_memory(struct memory_segment *seg)
 
 
 
337{
338	remove_memory_segment(seg);
339	vmem_remove_range(seg->start, seg->size);
340}
341
342int vmem_remove_mapping(unsigned long start, unsigned long size)
 
 
 
 
343{
344	struct memory_segment *seg;
345	int ret;
346
347	mutex_lock(&vmem_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
348
349	ret = -ENOENT;
350	list_for_each_entry(seg, &mem_segs, list) {
351		if (seg->start == start && seg->size == size)
352			break;
353	}
 
354
355	if (seg->start != start || seg->size != size)
356		goto out;
 
357
358	ret = 0;
359	__remove_shared_memory(seg);
360	kfree(seg);
361out:
362	mutex_unlock(&vmem_mutex);
363	return ret;
364}
365
366int vmem_add_mapping(unsigned long start, unsigned long size)
367{
368	struct memory_segment *seg;
369	int ret;
370
 
 
 
 
 
371	mutex_lock(&vmem_mutex);
372	ret = -ENOMEM;
373	seg = kzalloc(sizeof(*seg), GFP_KERNEL);
374	if (!seg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
375		goto out;
376	seg->start = start;
377	seg->size = size;
 
 
 
378
379	ret = insert_memory_segment(seg);
380	if (ret)
381		goto out_free;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
382
383	ret = vmem_add_mem(start, size);
384	if (ret)
385		goto out_remove;
386	goto out;
387
388out_remove:
389	__remove_shared_memory(seg);
390out_free:
391	kfree(seg);
392out:
393	mutex_unlock(&vmem_mutex);
394	return ret;
395}
396
397/*
398 * map whole physical memory to virtual memory (identity mapping)
399 * we reserve enough space in the vmalloc area for vmemmap to hotplug
400 * additional memory segments.
401 */
402void __init vmem_map_init(void)
403{
404	struct memblock_region *reg;
 
405
406	for_each_memblock(memory, reg)
407		vmem_add_mem(reg->base, reg->size);
408	__set_memory((unsigned long)_stext,
409		     (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
410		     SET_MEMORY_RO | SET_MEMORY_X);
411	__set_memory((unsigned long)_etext,
412		     (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
413		     SET_MEMORY_RO);
414	__set_memory((unsigned long)_sinittext,
415		     (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
416		     SET_MEMORY_RO | SET_MEMORY_X);
 
 
 
 
 
 
 
 
 
 
417	pr_info("Write protected kernel read-only data: %luk\n",
418		(unsigned long)(__end_rodata - _stext) >> 10);
419}
420
421/*
422 * Convert memblock.memory  to a memory segment list so there is a single
423 * list that contains all memory segments.
424 */
425static int __init vmem_convert_memory_chunk(void)
426{
427	struct memblock_region *reg;
428	struct memory_segment *seg;
429
430	mutex_lock(&vmem_mutex);
431	for_each_memblock(memory, reg) {
432		seg = kzalloc(sizeof(*seg), GFP_KERNEL);
433		if (!seg)
434			panic("Out of memory...\n");
435		seg->start = reg->base;
436		seg->size = reg->size;
437		insert_memory_segment(seg);
438	}
439	mutex_unlock(&vmem_mutex);
440	return 0;
441}
442
443core_initcall(vmem_convert_memory_chunk);