Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *    Copyright IBM Corp. 2006
  4 *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
  5 */
  6
  7#include <linux/memblock.h>
  8#include <linux/pfn.h>
  9#include <linux/mm.h>
 10#include <linux/init.h>
 11#include <linux/list.h>
 12#include <linux/hugetlb.h>
 13#include <linux/slab.h>
 
 14#include <asm/cacheflush.h>
 15#include <asm/pgalloc.h>
 16#include <asm/pgtable.h>
 17#include <asm/setup.h>
 18#include <asm/tlbflush.h>
 19#include <asm/sections.h>
 20#include <asm/set_memory.h>
 21
 22static DEFINE_MUTEX(vmem_mutex);
 23
 24struct memory_segment {
 25	struct list_head list;
 26	unsigned long start;
 27	unsigned long size;
 28};
 29
 30static LIST_HEAD(mem_segs);
 31
 32static void __ref *vmem_alloc_pages(unsigned int order)
 33{
 34	unsigned long size = PAGE_SIZE << order;
 35
 36	if (slab_is_available())
 37		return (void *)__get_free_pages(GFP_KERNEL, order);
 38	return (void *) memblock_phys_alloc(size, size);
 39}
 40
 41void *vmem_crst_alloc(unsigned long val)
 42{
 43	unsigned long *table;
 44
 45	table = vmem_alloc_pages(CRST_ALLOC_ORDER);
 46	if (table)
 47		crst_table_init(table, val);
 48	return table;
 49}
 50
 51pte_t __ref *vmem_pte_alloc(void)
 52{
 53	unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
 54	pte_t *pte;
 55
 56	if (slab_is_available())
 57		pte = (pte_t *) page_table_alloc(&init_mm);
 58	else
 59		pte = (pte_t *) memblock_phys_alloc(size, size);
 60	if (!pte)
 61		return NULL;
 62	memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
 63	return pte;
 64}
 65
 66/*
 67 * Add a physical memory range to the 1:1 mapping.
 68 */
 69static int vmem_add_mem(unsigned long start, unsigned long size)
 70{
 71	unsigned long pgt_prot, sgt_prot, r3_prot;
 72	unsigned long pages4k, pages1m, pages2g;
 73	unsigned long end = start + size;
 74	unsigned long address = start;
 75	pgd_t *pg_dir;
 76	p4d_t *p4_dir;
 77	pud_t *pu_dir;
 78	pmd_t *pm_dir;
 79	pte_t *pt_dir;
 80	int ret = -ENOMEM;
 81
 82	pgt_prot = pgprot_val(PAGE_KERNEL);
 83	sgt_prot = pgprot_val(SEGMENT_KERNEL);
 84	r3_prot = pgprot_val(REGION3_KERNEL);
 85	if (!MACHINE_HAS_NX) {
 86		pgt_prot &= ~_PAGE_NOEXEC;
 87		sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
 88		r3_prot &= ~_REGION_ENTRY_NOEXEC;
 89	}
 90	pages4k = pages1m = pages2g = 0;
 91	while (address < end) {
 92		pg_dir = pgd_offset_k(address);
 93		if (pgd_none(*pg_dir)) {
 94			p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
 95			if (!p4_dir)
 96				goto out;
 97			pgd_populate(&init_mm, pg_dir, p4_dir);
 98		}
 99		p4_dir = p4d_offset(pg_dir, address);
100		if (p4d_none(*p4_dir)) {
101			pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
102			if (!pu_dir)
103				goto out;
104			p4d_populate(&init_mm, p4_dir, pu_dir);
105		}
106		pu_dir = pud_offset(p4_dir, address);
107		if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
108		    !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
109		     !debug_pagealloc_enabled()) {
110			pud_val(*pu_dir) = address | r3_prot;
111			address += PUD_SIZE;
112			pages2g++;
113			continue;
114		}
115		if (pud_none(*pu_dir)) {
116			pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
117			if (!pm_dir)
118				goto out;
119			pud_populate(&init_mm, pu_dir, pm_dir);
120		}
121		pm_dir = pmd_offset(pu_dir, address);
122		if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
123		    !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
124		    !debug_pagealloc_enabled()) {
125			pmd_val(*pm_dir) = address | sgt_prot;
126			address += PMD_SIZE;
127			pages1m++;
128			continue;
129		}
130		if (pmd_none(*pm_dir)) {
131			pt_dir = vmem_pte_alloc();
132			if (!pt_dir)
133				goto out;
134			pmd_populate(&init_mm, pm_dir, pt_dir);
135		}
136
137		pt_dir = pte_offset_kernel(pm_dir, address);
138		pte_val(*pt_dir) = address | pgt_prot;
139		address += PAGE_SIZE;
140		pages4k++;
141	}
142	ret = 0;
143out:
144	update_page_count(PG_DIRECT_MAP_4K, pages4k);
145	update_page_count(PG_DIRECT_MAP_1M, pages1m);
146	update_page_count(PG_DIRECT_MAP_2G, pages2g);
147	return ret;
148}
149
150/*
151 * Remove a physical memory range from the 1:1 mapping.
152 * Currently only invalidates page table entries.
153 */
154static void vmem_remove_range(unsigned long start, unsigned long size)
155{
156	unsigned long pages4k, pages1m, pages2g;
157	unsigned long end = start + size;
158	unsigned long address = start;
159	pgd_t *pg_dir;
160	p4d_t *p4_dir;
161	pud_t *pu_dir;
162	pmd_t *pm_dir;
163	pte_t *pt_dir;
164
165	pages4k = pages1m = pages2g = 0;
166	while (address < end) {
167		pg_dir = pgd_offset_k(address);
168		if (pgd_none(*pg_dir)) {
169			address += PGDIR_SIZE;
170			continue;
171		}
172		p4_dir = p4d_offset(pg_dir, address);
173		if (p4d_none(*p4_dir)) {
174			address += P4D_SIZE;
175			continue;
176		}
177		pu_dir = pud_offset(p4_dir, address);
178		if (pud_none(*pu_dir)) {
179			address += PUD_SIZE;
180			continue;
181		}
182		if (pud_large(*pu_dir)) {
183			pud_clear(pu_dir);
184			address += PUD_SIZE;
185			pages2g++;
186			continue;
187		}
188		pm_dir = pmd_offset(pu_dir, address);
189		if (pmd_none(*pm_dir)) {
190			address += PMD_SIZE;
191			continue;
192		}
193		if (pmd_large(*pm_dir)) {
194			pmd_clear(pm_dir);
195			address += PMD_SIZE;
196			pages1m++;
197			continue;
198		}
199		pt_dir = pte_offset_kernel(pm_dir, address);
200		pte_clear(&init_mm, address, pt_dir);
201		address += PAGE_SIZE;
202		pages4k++;
203	}
204	flush_tlb_kernel_range(start, end);
205	update_page_count(PG_DIRECT_MAP_4K, -pages4k);
206	update_page_count(PG_DIRECT_MAP_1M, -pages1m);
207	update_page_count(PG_DIRECT_MAP_2G, -pages2g);
208}
209
210/*
211 * Add a backed mem_map array to the virtual mem_map array.
212 */
213int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
214		struct vmem_altmap *altmap)
215{
216	unsigned long pgt_prot, sgt_prot;
217	unsigned long address = start;
218	pgd_t *pg_dir;
219	p4d_t *p4_dir;
220	pud_t *pu_dir;
221	pmd_t *pm_dir;
222	pte_t *pt_dir;
223	int ret = -ENOMEM;
224
225	pgt_prot = pgprot_val(PAGE_KERNEL);
226	sgt_prot = pgprot_val(SEGMENT_KERNEL);
227	if (!MACHINE_HAS_NX) {
228		pgt_prot &= ~_PAGE_NOEXEC;
229		sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
230	}
231	for (address = start; address < end;) {
232		pg_dir = pgd_offset_k(address);
233		if (pgd_none(*pg_dir)) {
234			p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
235			if (!p4_dir)
236				goto out;
237			pgd_populate(&init_mm, pg_dir, p4_dir);
238		}
239
240		p4_dir = p4d_offset(pg_dir, address);
241		if (p4d_none(*p4_dir)) {
242			pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
243			if (!pu_dir)
244				goto out;
245			p4d_populate(&init_mm, p4_dir, pu_dir);
246		}
247
248		pu_dir = pud_offset(p4_dir, address);
249		if (pud_none(*pu_dir)) {
250			pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
251			if (!pm_dir)
252				goto out;
253			pud_populate(&init_mm, pu_dir, pm_dir);
254		}
255
256		pm_dir = pmd_offset(pu_dir, address);
257		if (pmd_none(*pm_dir)) {
258			/* Use 1MB frames for vmemmap if available. We always
259			 * use large frames even if they are only partially
260			 * used.
261			 * Otherwise we would have also page tables since
262			 * vmemmap_populate gets called for each section
263			 * separately. */
264			if (MACHINE_HAS_EDAT1) {
265				void *new_page;
266
267				new_page = vmemmap_alloc_block(PMD_SIZE, node);
268				if (!new_page)
269					goto out;
270				pmd_val(*pm_dir) = __pa(new_page) | sgt_prot;
271				address = (address + PMD_SIZE) & PMD_MASK;
272				continue;
273			}
274			pt_dir = vmem_pte_alloc();
275			if (!pt_dir)
276				goto out;
277			pmd_populate(&init_mm, pm_dir, pt_dir);
278		} else if (pmd_large(*pm_dir)) {
279			address = (address + PMD_SIZE) & PMD_MASK;
280			continue;
281		}
282
283		pt_dir = pte_offset_kernel(pm_dir, address);
284		if (pte_none(*pt_dir)) {
285			void *new_page;
286
287			new_page = vmemmap_alloc_block(PAGE_SIZE, node);
288			if (!new_page)
289				goto out;
290			pte_val(*pt_dir) = __pa(new_page) | pgt_prot;
291		}
292		address += PAGE_SIZE;
293	}
294	ret = 0;
295out:
296	return ret;
297}
298
299void vmemmap_free(unsigned long start, unsigned long end,
300		struct vmem_altmap *altmap)
301{
302}
303
304/*
305 * Add memory segment to the segment list if it doesn't overlap with
306 * an already present segment.
307 */
308static int insert_memory_segment(struct memory_segment *seg)
309{
310	struct memory_segment *tmp;
311
312	if (seg->start + seg->size > VMEM_MAX_PHYS ||
313	    seg->start + seg->size < seg->start)
314		return -ERANGE;
315
316	list_for_each_entry(tmp, &mem_segs, list) {
317		if (seg->start >= tmp->start + tmp->size)
318			continue;
319		if (seg->start + seg->size <= tmp->start)
320			continue;
321		return -ENOSPC;
322	}
323	list_add(&seg->list, &mem_segs);
324	return 0;
325}
326
327/*
328 * Remove memory segment from the segment list.
329 */
330static void remove_memory_segment(struct memory_segment *seg)
331{
332	list_del(&seg->list);
333}
334
335static void __remove_shared_memory(struct memory_segment *seg)
336{
337	remove_memory_segment(seg);
338	vmem_remove_range(seg->start, seg->size);
339}
340
341int vmem_remove_mapping(unsigned long start, unsigned long size)
342{
343	struct memory_segment *seg;
344	int ret;
345
346	mutex_lock(&vmem_mutex);
347
348	ret = -ENOENT;
349	list_for_each_entry(seg, &mem_segs, list) {
350		if (seg->start == start && seg->size == size)
351			break;
352	}
353
354	if (seg->start != start || seg->size != size)
355		goto out;
356
357	ret = 0;
358	__remove_shared_memory(seg);
359	kfree(seg);
360out:
361	mutex_unlock(&vmem_mutex);
362	return ret;
363}
364
365int vmem_add_mapping(unsigned long start, unsigned long size)
366{
367	struct memory_segment *seg;
368	int ret;
369
370	mutex_lock(&vmem_mutex);
371	ret = -ENOMEM;
372	seg = kzalloc(sizeof(*seg), GFP_KERNEL);
373	if (!seg)
374		goto out;
375	seg->start = start;
376	seg->size = size;
377
378	ret = insert_memory_segment(seg);
379	if (ret)
380		goto out_free;
381
382	ret = vmem_add_mem(start, size);
383	if (ret)
384		goto out_remove;
385	goto out;
386
387out_remove:
388	__remove_shared_memory(seg);
389out_free:
390	kfree(seg);
391out:
392	mutex_unlock(&vmem_mutex);
393	return ret;
394}
395
396/*
397 * map whole physical memory to virtual memory (identity mapping)
398 * we reserve enough space in the vmalloc area for vmemmap to hotplug
399 * additional memory segments.
400 */
401void __init vmem_map_init(void)
402{
403	struct memblock_region *reg;
404
405	for_each_memblock(memory, reg)
406		vmem_add_mem(reg->base, reg->size);
407	__set_memory((unsigned long)_stext,
408		     (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
409		     SET_MEMORY_RO | SET_MEMORY_X);
410	__set_memory((unsigned long)_etext,
411		     (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
412		     SET_MEMORY_RO);
413	__set_memory((unsigned long)_sinittext,
414		     (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
415		     SET_MEMORY_RO | SET_MEMORY_X);
416	__set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
417		     SET_MEMORY_RO | SET_MEMORY_X);
418	pr_info("Write protected kernel read-only data: %luk\n",
419		(unsigned long)(__end_rodata - _stext) >> 10);
420}
421
422/*
423 * Convert memblock.memory  to a memory segment list so there is a single
424 * list that contains all memory segments.
425 */
426static int __init vmem_convert_memory_chunk(void)
427{
428	struct memblock_region *reg;
429	struct memory_segment *seg;
430
431	mutex_lock(&vmem_mutex);
432	for_each_memblock(memory, reg) {
433		seg = kzalloc(sizeof(*seg), GFP_KERNEL);
434		if (!seg)
435			panic("Out of memory...\n");
436		seg->start = reg->base;
437		seg->size = reg->size;
438		insert_memory_segment(seg);
439	}
440	mutex_unlock(&vmem_mutex);
441	return 0;
442}
443
444core_initcall(vmem_convert_memory_chunk);
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *    Copyright IBM Corp. 2006
  4 *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
  5 */
  6
  7#include <linux/bootmem.h>
  8#include <linux/pfn.h>
  9#include <linux/mm.h>
 10#include <linux/init.h>
 11#include <linux/list.h>
 12#include <linux/hugetlb.h>
 13#include <linux/slab.h>
 14#include <linux/memblock.h>
 15#include <asm/cacheflush.h>
 16#include <asm/pgalloc.h>
 17#include <asm/pgtable.h>
 18#include <asm/setup.h>
 19#include <asm/tlbflush.h>
 20#include <asm/sections.h>
 21#include <asm/set_memory.h>
 22
 23static DEFINE_MUTEX(vmem_mutex);
 24
 25struct memory_segment {
 26	struct list_head list;
 27	unsigned long start;
 28	unsigned long size;
 29};
 30
 31static LIST_HEAD(mem_segs);
 32
 33static void __ref *vmem_alloc_pages(unsigned int order)
 34{
 35	unsigned long size = PAGE_SIZE << order;
 36
 37	if (slab_is_available())
 38		return (void *)__get_free_pages(GFP_KERNEL, order);
 39	return (void *) memblock_alloc(size, size);
 40}
 41
 42void *vmem_crst_alloc(unsigned long val)
 43{
 44	unsigned long *table;
 45
 46	table = vmem_alloc_pages(CRST_ALLOC_ORDER);
 47	if (table)
 48		crst_table_init(table, val);
 49	return table;
 50}
 51
 52pte_t __ref *vmem_pte_alloc(void)
 53{
 54	unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
 55	pte_t *pte;
 56
 57	if (slab_is_available())
 58		pte = (pte_t *) page_table_alloc(&init_mm);
 59	else
 60		pte = (pte_t *) memblock_alloc(size, size);
 61	if (!pte)
 62		return NULL;
 63	memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
 64	return pte;
 65}
 66
 67/*
 68 * Add a physical memory range to the 1:1 mapping.
 69 */
 70static int vmem_add_mem(unsigned long start, unsigned long size)
 71{
 72	unsigned long pgt_prot, sgt_prot, r3_prot;
 73	unsigned long pages4k, pages1m, pages2g;
 74	unsigned long end = start + size;
 75	unsigned long address = start;
 76	pgd_t *pg_dir;
 77	p4d_t *p4_dir;
 78	pud_t *pu_dir;
 79	pmd_t *pm_dir;
 80	pte_t *pt_dir;
 81	int ret = -ENOMEM;
 82
 83	pgt_prot = pgprot_val(PAGE_KERNEL);
 84	sgt_prot = pgprot_val(SEGMENT_KERNEL);
 85	r3_prot = pgprot_val(REGION3_KERNEL);
 86	if (!MACHINE_HAS_NX) {
 87		pgt_prot &= ~_PAGE_NOEXEC;
 88		sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
 89		r3_prot &= ~_REGION_ENTRY_NOEXEC;
 90	}
 91	pages4k = pages1m = pages2g = 0;
 92	while (address < end) {
 93		pg_dir = pgd_offset_k(address);
 94		if (pgd_none(*pg_dir)) {
 95			p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
 96			if (!p4_dir)
 97				goto out;
 98			pgd_populate(&init_mm, pg_dir, p4_dir);
 99		}
100		p4_dir = p4d_offset(pg_dir, address);
101		if (p4d_none(*p4_dir)) {
102			pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
103			if (!pu_dir)
104				goto out;
105			p4d_populate(&init_mm, p4_dir, pu_dir);
106		}
107		pu_dir = pud_offset(p4_dir, address);
108		if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
109		    !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
110		     !debug_pagealloc_enabled()) {
111			pud_val(*pu_dir) = address | r3_prot;
112			address += PUD_SIZE;
113			pages2g++;
114			continue;
115		}
116		if (pud_none(*pu_dir)) {
117			pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
118			if (!pm_dir)
119				goto out;
120			pud_populate(&init_mm, pu_dir, pm_dir);
121		}
122		pm_dir = pmd_offset(pu_dir, address);
123		if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
124		    !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
125		    !debug_pagealloc_enabled()) {
126			pmd_val(*pm_dir) = address | sgt_prot;
127			address += PMD_SIZE;
128			pages1m++;
129			continue;
130		}
131		if (pmd_none(*pm_dir)) {
132			pt_dir = vmem_pte_alloc();
133			if (!pt_dir)
134				goto out;
135			pmd_populate(&init_mm, pm_dir, pt_dir);
136		}
137
138		pt_dir = pte_offset_kernel(pm_dir, address);
139		pte_val(*pt_dir) = address | pgt_prot;
140		address += PAGE_SIZE;
141		pages4k++;
142	}
143	ret = 0;
144out:
145	update_page_count(PG_DIRECT_MAP_4K, pages4k);
146	update_page_count(PG_DIRECT_MAP_1M, pages1m);
147	update_page_count(PG_DIRECT_MAP_2G, pages2g);
148	return ret;
149}
150
151/*
152 * Remove a physical memory range from the 1:1 mapping.
153 * Currently only invalidates page table entries.
154 */
155static void vmem_remove_range(unsigned long start, unsigned long size)
156{
157	unsigned long pages4k, pages1m, pages2g;
158	unsigned long end = start + size;
159	unsigned long address = start;
160	pgd_t *pg_dir;
161	p4d_t *p4_dir;
162	pud_t *pu_dir;
163	pmd_t *pm_dir;
164	pte_t *pt_dir;
165
166	pages4k = pages1m = pages2g = 0;
167	while (address < end) {
168		pg_dir = pgd_offset_k(address);
169		if (pgd_none(*pg_dir)) {
170			address += PGDIR_SIZE;
171			continue;
172		}
173		p4_dir = p4d_offset(pg_dir, address);
174		if (p4d_none(*p4_dir)) {
175			address += P4D_SIZE;
176			continue;
177		}
178		pu_dir = pud_offset(p4_dir, address);
179		if (pud_none(*pu_dir)) {
180			address += PUD_SIZE;
181			continue;
182		}
183		if (pud_large(*pu_dir)) {
184			pud_clear(pu_dir);
185			address += PUD_SIZE;
186			pages2g++;
187			continue;
188		}
189		pm_dir = pmd_offset(pu_dir, address);
190		if (pmd_none(*pm_dir)) {
191			address += PMD_SIZE;
192			continue;
193		}
194		if (pmd_large(*pm_dir)) {
195			pmd_clear(pm_dir);
196			address += PMD_SIZE;
197			pages1m++;
198			continue;
199		}
200		pt_dir = pte_offset_kernel(pm_dir, address);
201		pte_clear(&init_mm, address, pt_dir);
202		address += PAGE_SIZE;
203		pages4k++;
204	}
205	flush_tlb_kernel_range(start, end);
206	update_page_count(PG_DIRECT_MAP_4K, -pages4k);
207	update_page_count(PG_DIRECT_MAP_1M, -pages1m);
208	update_page_count(PG_DIRECT_MAP_2G, -pages2g);
209}
210
211/*
212 * Add a backed mem_map array to the virtual mem_map array.
213 */
214int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
215		struct vmem_altmap *altmap)
216{
217	unsigned long pgt_prot, sgt_prot;
218	unsigned long address = start;
219	pgd_t *pg_dir;
220	p4d_t *p4_dir;
221	pud_t *pu_dir;
222	pmd_t *pm_dir;
223	pte_t *pt_dir;
224	int ret = -ENOMEM;
225
226	pgt_prot = pgprot_val(PAGE_KERNEL);
227	sgt_prot = pgprot_val(SEGMENT_KERNEL);
228	if (!MACHINE_HAS_NX) {
229		pgt_prot &= ~_PAGE_NOEXEC;
230		sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
231	}
232	for (address = start; address < end;) {
233		pg_dir = pgd_offset_k(address);
234		if (pgd_none(*pg_dir)) {
235			p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
236			if (!p4_dir)
237				goto out;
238			pgd_populate(&init_mm, pg_dir, p4_dir);
239		}
240
241		p4_dir = p4d_offset(pg_dir, address);
242		if (p4d_none(*p4_dir)) {
243			pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
244			if (!pu_dir)
245				goto out;
246			p4d_populate(&init_mm, p4_dir, pu_dir);
247		}
248
249		pu_dir = pud_offset(p4_dir, address);
250		if (pud_none(*pu_dir)) {
251			pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
252			if (!pm_dir)
253				goto out;
254			pud_populate(&init_mm, pu_dir, pm_dir);
255		}
256
257		pm_dir = pmd_offset(pu_dir, address);
258		if (pmd_none(*pm_dir)) {
259			/* Use 1MB frames for vmemmap if available. We always
260			 * use large frames even if they are only partially
261			 * used.
262			 * Otherwise we would have also page tables since
263			 * vmemmap_populate gets called for each section
264			 * separately. */
265			if (MACHINE_HAS_EDAT1) {
266				void *new_page;
267
268				new_page = vmemmap_alloc_block(PMD_SIZE, node);
269				if (!new_page)
270					goto out;
271				pmd_val(*pm_dir) = __pa(new_page) | sgt_prot;
272				address = (address + PMD_SIZE) & PMD_MASK;
273				continue;
274			}
275			pt_dir = vmem_pte_alloc();
276			if (!pt_dir)
277				goto out;
278			pmd_populate(&init_mm, pm_dir, pt_dir);
279		} else if (pmd_large(*pm_dir)) {
280			address = (address + PMD_SIZE) & PMD_MASK;
281			continue;
282		}
283
284		pt_dir = pte_offset_kernel(pm_dir, address);
285		if (pte_none(*pt_dir)) {
286			void *new_page;
287
288			new_page = vmemmap_alloc_block(PAGE_SIZE, node);
289			if (!new_page)
290				goto out;
291			pte_val(*pt_dir) = __pa(new_page) | pgt_prot;
292		}
293		address += PAGE_SIZE;
294	}
295	ret = 0;
296out:
297	return ret;
298}
299
300void vmemmap_free(unsigned long start, unsigned long end,
301		struct vmem_altmap *altmap)
302{
303}
304
305/*
306 * Add memory segment to the segment list if it doesn't overlap with
307 * an already present segment.
308 */
309static int insert_memory_segment(struct memory_segment *seg)
310{
311	struct memory_segment *tmp;
312
313	if (seg->start + seg->size > VMEM_MAX_PHYS ||
314	    seg->start + seg->size < seg->start)
315		return -ERANGE;
316
317	list_for_each_entry(tmp, &mem_segs, list) {
318		if (seg->start >= tmp->start + tmp->size)
319			continue;
320		if (seg->start + seg->size <= tmp->start)
321			continue;
322		return -ENOSPC;
323	}
324	list_add(&seg->list, &mem_segs);
325	return 0;
326}
327
328/*
329 * Remove memory segment from the segment list.
330 */
331static void remove_memory_segment(struct memory_segment *seg)
332{
333	list_del(&seg->list);
334}
335
336static void __remove_shared_memory(struct memory_segment *seg)
337{
338	remove_memory_segment(seg);
339	vmem_remove_range(seg->start, seg->size);
340}
341
342int vmem_remove_mapping(unsigned long start, unsigned long size)
343{
344	struct memory_segment *seg;
345	int ret;
346
347	mutex_lock(&vmem_mutex);
348
349	ret = -ENOENT;
350	list_for_each_entry(seg, &mem_segs, list) {
351		if (seg->start == start && seg->size == size)
352			break;
353	}
354
355	if (seg->start != start || seg->size != size)
356		goto out;
357
358	ret = 0;
359	__remove_shared_memory(seg);
360	kfree(seg);
361out:
362	mutex_unlock(&vmem_mutex);
363	return ret;
364}
365
366int vmem_add_mapping(unsigned long start, unsigned long size)
367{
368	struct memory_segment *seg;
369	int ret;
370
371	mutex_lock(&vmem_mutex);
372	ret = -ENOMEM;
373	seg = kzalloc(sizeof(*seg), GFP_KERNEL);
374	if (!seg)
375		goto out;
376	seg->start = start;
377	seg->size = size;
378
379	ret = insert_memory_segment(seg);
380	if (ret)
381		goto out_free;
382
383	ret = vmem_add_mem(start, size);
384	if (ret)
385		goto out_remove;
386	goto out;
387
388out_remove:
389	__remove_shared_memory(seg);
390out_free:
391	kfree(seg);
392out:
393	mutex_unlock(&vmem_mutex);
394	return ret;
395}
396
397/*
398 * map whole physical memory to virtual memory (identity mapping)
399 * we reserve enough space in the vmalloc area for vmemmap to hotplug
400 * additional memory segments.
401 */
402void __init vmem_map_init(void)
403{
404	struct memblock_region *reg;
405
406	for_each_memblock(memory, reg)
407		vmem_add_mem(reg->base, reg->size);
408	__set_memory((unsigned long)_stext,
409		     (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
410		     SET_MEMORY_RO | SET_MEMORY_X);
411	__set_memory((unsigned long)_etext,
412		     (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
413		     SET_MEMORY_RO);
414	__set_memory((unsigned long)_sinittext,
415		     (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
 
 
416		     SET_MEMORY_RO | SET_MEMORY_X);
417	pr_info("Write protected kernel read-only data: %luk\n",
418		(unsigned long)(__end_rodata - _stext) >> 10);
419}
420
421/*
422 * Convert memblock.memory  to a memory segment list so there is a single
423 * list that contains all memory segments.
424 */
425static int __init vmem_convert_memory_chunk(void)
426{
427	struct memblock_region *reg;
428	struct memory_segment *seg;
429
430	mutex_lock(&vmem_mutex);
431	for_each_memblock(memory, reg) {
432		seg = kzalloc(sizeof(*seg), GFP_KERNEL);
433		if (!seg)
434			panic("Out of memory...\n");
435		seg->start = reg->base;
436		seg->size = reg->size;
437		insert_memory_segment(seg);
438	}
439	mutex_unlock(&vmem_mutex);
440	return 0;
441}
442
443core_initcall(vmem_convert_memory_chunk);