Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
 
 
  3 *    Copyright IBM Corp. 2006
  4 *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
  5 */
  6
  7#include <linux/memblock.h>
  8#include <linux/pfn.h>
  9#include <linux/mm.h>
 10#include <linux/init.h>
 11#include <linux/list.h>
 12#include <linux/hugetlb.h>
 13#include <linux/slab.h>
 14#include <asm/cacheflush.h>
 15#include <asm/pgalloc.h>
 
 16#include <asm/setup.h>
 17#include <asm/tlbflush.h>
 18#include <asm/sections.h>
 19#include <asm/set_memory.h>
 20
 21static DEFINE_MUTEX(vmem_mutex);
 22
 
 
 
 
 
 
 
 
 23static void __ref *vmem_alloc_pages(unsigned int order)
 24{
 25	unsigned long size = PAGE_SIZE << order;
 26
 27	if (slab_is_available())
 28		return (void *)__get_free_pages(GFP_KERNEL, order);
 29	return (void *) memblock_phys_alloc(size, size);
 30}
 31
 32static void vmem_free_pages(unsigned long addr, int order)
 33{
 34	/* We don't expect boot memory to be removed ever. */
 35	if (!slab_is_available() ||
 36	    WARN_ON_ONCE(PageReserved(phys_to_page(addr))))
 37		return;
 38	free_pages(addr, order);
 
 
 
 
 39}
 40
 41void *vmem_crst_alloc(unsigned long val)
 42{
 43	unsigned long *table;
 44
 45	table = vmem_alloc_pages(CRST_ALLOC_ORDER);
 46	if (table)
 47		crst_table_init(table, val);
 48	return table;
 
 
 
 49}
 50
 51pte_t __ref *vmem_pte_alloc(void)
 52{
 53	unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
 54	pte_t *pte;
 55
 56	if (slab_is_available())
 57		pte = (pte_t *) page_table_alloc(&init_mm);
 58	else
 59		pte = (pte_t *) memblock_phys_alloc(size, size);
 60	if (!pte)
 61		return NULL;
 62	memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
 
 63	return pte;
 64}
 65
 66static void vmem_pte_free(unsigned long *table)
 67{
 68	/* We don't expect boot memory to be removed ever. */
 69	if (!slab_is_available() ||
 70	    WARN_ON_ONCE(PageReserved(virt_to_page(table))))
 71		return;
 72	page_table_free(&init_mm, table);
 73}
 74
 75#define PAGE_UNUSED 0xFD
 76
 77/*
 78 * The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges
 79 * from unused_pmd_start to next PMD_SIZE boundary.
 80 */
 81static unsigned long unused_pmd_start;
 82
 83static void vmemmap_flush_unused_pmd(void)
 84{
 85	if (!unused_pmd_start)
 86		return;
 87	memset(__va(unused_pmd_start), PAGE_UNUSED,
 88	       ALIGN(unused_pmd_start, PMD_SIZE) - unused_pmd_start);
 89	unused_pmd_start = 0;
 90}
 91
 92static void __vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
 93{
 94	/*
 95	 * As we expect to add in the same granularity as we remove, it's
 96	 * sufficient to mark only some piece used to block the memmap page from
 97	 * getting removed (just in case the memmap never gets initialized,
 98	 * e.g., because the memory block never gets onlined).
 99	 */
100	memset(__va(start), 0, sizeof(struct page));
101}
102
103static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
104{
105	/*
106	 * We only optimize if the new used range directly follows the
107	 * previously unused range (esp., when populating consecutive sections).
108	 */
109	if (unused_pmd_start == start) {
110		unused_pmd_start = end;
111		if (likely(IS_ALIGNED(unused_pmd_start, PMD_SIZE)))
112			unused_pmd_start = 0;
113		return;
114	}
115	vmemmap_flush_unused_pmd();
116	__vmemmap_use_sub_pmd(start, end);
117}
118
119static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
120{
121	void *page = __va(ALIGN_DOWN(start, PMD_SIZE));
122
123	vmemmap_flush_unused_pmd();
124
125	/* Could be our memmap page is filled with PAGE_UNUSED already ... */
126	__vmemmap_use_sub_pmd(start, end);
127
128	/* Mark the unused parts of the new memmap page PAGE_UNUSED. */
129	if (!IS_ALIGNED(start, PMD_SIZE))
130		memset(page, PAGE_UNUSED, start - __pa(page));
131	/*
132	 * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
133	 * consecutive sections. Remember for the last added PMD the last
134	 * unused range in the populated PMD.
135	 */
136	if (!IS_ALIGNED(end, PMD_SIZE))
137		unused_pmd_start = end;
138}
139
140/* Returns true if the PMD is completely unused and can be freed. */
141static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
142{
143	void *page = __va(ALIGN_DOWN(start, PMD_SIZE));
144
145	vmemmap_flush_unused_pmd();
146	memset(__va(start), PAGE_UNUSED, end - start);
147	return !memchr_inv(page, PAGE_UNUSED, PMD_SIZE);
148}
149
150/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
151static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
152				  unsigned long end, bool add, bool direct)
153{
154	unsigned long prot, pages = 0;
155	int ret = -ENOMEM;
156	pte_t *pte;
157
158	prot = pgprot_val(PAGE_KERNEL);
159	if (!MACHINE_HAS_NX)
160		prot &= ~_PAGE_NOEXEC;
161
162	pte = pte_offset_kernel(pmd, addr);
163	for (; addr < end; addr += PAGE_SIZE, pte++) {
164		if (!add) {
165			if (pte_none(*pte))
166				continue;
167			if (!direct)
168				vmem_free_pages(pfn_to_phys(pte_pfn(*pte)), 0);
169			pte_clear(&init_mm, addr, pte);
170		} else if (pte_none(*pte)) {
171			if (!direct) {
172				void *new_page = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
173
174				if (!new_page)
175					goto out;
176				pte_val(*pte) = __pa(new_page) | prot;
177			} else {
178				pte_val(*pte) = addr | prot;
179			}
180		} else {
181			continue;
182		}
183		pages++;
184	}
185	ret = 0;
186out:
187	if (direct)
188		update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages);
189	return ret;
190}
191
192static void try_free_pte_table(pmd_t *pmd, unsigned long start)
193{
194	pte_t *pte;
195	int i;
196
197	/* We can safely assume this is fully in 1:1 mapping & vmemmap area */
198	pte = pte_offset_kernel(pmd, start);
199	for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
200		if (!pte_none(*pte))
201			return;
202	}
203	vmem_pte_free(__va(pmd_deref(*pmd)));
204	pmd_clear(pmd);
205}
206
207/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
208static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
209				  unsigned long end, bool add, bool direct)
210{
211	unsigned long next, prot, pages = 0;
212	int ret = -ENOMEM;
213	pmd_t *pmd;
214	pte_t *pte;
215
216	prot = pgprot_val(SEGMENT_KERNEL);
217	if (!MACHINE_HAS_NX)
218		prot &= ~_SEGMENT_ENTRY_NOEXEC;
219
220	pmd = pmd_offset(pud, addr);
221	for (; addr < end; addr = next, pmd++) {
222		next = pmd_addr_end(addr, end);
223		if (!add) {
224			if (pmd_none(*pmd))
225				continue;
226			if (pmd_large(*pmd) && !add) {
227				if (IS_ALIGNED(addr, PMD_SIZE) &&
228				    IS_ALIGNED(next, PMD_SIZE)) {
229					if (!direct)
230						vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
231					pmd_clear(pmd);
232					pages++;
233				} else if (!direct && vmemmap_unuse_sub_pmd(addr, next)) {
234					vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
235					pmd_clear(pmd);
236				}
237				continue;
238			}
239		} else if (pmd_none(*pmd)) {
240			if (IS_ALIGNED(addr, PMD_SIZE) &&
241			    IS_ALIGNED(next, PMD_SIZE) &&
242			    MACHINE_HAS_EDAT1 && addr && direct &&
243			    !debug_pagealloc_enabled()) {
244				pmd_val(*pmd) = addr | prot;
245				pages++;
246				continue;
247			} else if (!direct && MACHINE_HAS_EDAT1) {
248				void *new_page;
249
250				/*
251				 * Use 1MB frames for vmemmap if available. We
252				 * always use large frames even if they are only
253				 * partially used. Otherwise we would have also
254				 * page tables since vmemmap_populate gets
255				 * called for each section separately.
256				 */
257				new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE);
258				if (new_page) {
259					pmd_val(*pmd) = __pa(new_page) | prot;
260					if (!IS_ALIGNED(addr, PMD_SIZE) ||
261					    !IS_ALIGNED(next, PMD_SIZE)) {
262						vmemmap_use_new_sub_pmd(addr, next);
263					}
264					continue;
265				}
266			}
267			pte = vmem_pte_alloc();
268			if (!pte)
269				goto out;
270			pmd_populate(&init_mm, pmd, pte);
271		} else if (pmd_large(*pmd)) {
272			if (!direct)
273				vmemmap_use_sub_pmd(addr, next);
274			continue;
275		}
276		ret = modify_pte_table(pmd, addr, next, add, direct);
277		if (ret)
278			goto out;
279		if (!add)
280			try_free_pte_table(pmd, addr & PMD_MASK);
281	}
282	ret = 0;
283out:
284	if (direct)
285		update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages);
286	return ret;
287}
288
289static void try_free_pmd_table(pud_t *pud, unsigned long start)
290{
291	const unsigned long end = start + PUD_SIZE;
292	pmd_t *pmd;
293	int i;
294
295	/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
296	if (end > VMALLOC_START)
297		return;
298#ifdef CONFIG_KASAN
299	if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
300		return;
 
 
 
301#endif
302	pmd = pmd_offset(pud, start);
303	for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
304		if (!pmd_none(*pmd))
305			return;
306	vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER);
307	pud_clear(pud);
308}
309
310static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
311			    bool add, bool direct)
312{
313	unsigned long next, prot, pages = 0;
314	int ret = -ENOMEM;
315	pud_t *pud;
316	pmd_t *pmd;
317
318	prot = pgprot_val(REGION3_KERNEL);
319	if (!MACHINE_HAS_NX)
320		prot &= ~_REGION_ENTRY_NOEXEC;
321	pud = pud_offset(p4d, addr);
322	for (; addr < end; addr = next, pud++) {
323		next = pud_addr_end(addr, end);
324		if (!add) {
325			if (pud_none(*pud))
326				continue;
327			if (pud_large(*pud)) {
328				if (IS_ALIGNED(addr, PUD_SIZE) &&
329				    IS_ALIGNED(next, PUD_SIZE)) {
330					pud_clear(pud);
331					pages++;
332				}
333				continue;
334			}
335		} else if (pud_none(*pud)) {
336			if (IS_ALIGNED(addr, PUD_SIZE) &&
337			    IS_ALIGNED(next, PUD_SIZE) &&
338			    MACHINE_HAS_EDAT2 && addr && direct &&
339			    !debug_pagealloc_enabled()) {
340				pud_val(*pud) = addr | prot;
341				pages++;
342				continue;
343			}
344			pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
345			if (!pmd)
346				goto out;
347			pud_populate(&init_mm, pud, pmd);
348		} else if (pud_large(*pud)) {
349			continue;
350		}
351		ret = modify_pmd_table(pud, addr, next, add, direct);
352		if (ret)
353			goto out;
354		if (!add)
355			try_free_pmd_table(pud, addr & PUD_MASK);
356	}
357	ret = 0;
358out:
359	if (direct)
360		update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages);
361	return ret;
362}
363
364static void try_free_pud_table(p4d_t *p4d, unsigned long start)
 
 
 
 
365{
366	const unsigned long end = start + P4D_SIZE;
367	pud_t *pud;
368	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
369
370	/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
371	if (end > VMALLOC_START)
372		return;
373#ifdef CONFIG_KASAN
374	if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
375		return;
376#endif
377
378	pud = pud_offset(p4d, start);
379	for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
380		if (!pud_none(*pud))
381			return;
382	}
383	vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER);
384	p4d_clear(p4d);
385}
386
387static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
388			    bool add, bool direct)
 
 
389{
390	unsigned long next;
 
 
 
 
 
391	int ret = -ENOMEM;
392	p4d_t *p4d;
393	pud_t *pud;
394
395	p4d = p4d_offset(pgd, addr);
396	for (; addr < end; addr = next, p4d++) {
397		next = p4d_addr_end(addr, end);
398		if (!add) {
399			if (p4d_none(*p4d))
400				continue;
401		} else if (p4d_none(*p4d)) {
402			pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
403			if (!pud)
404				goto out;
405			p4d_populate(&init_mm, p4d, pud);
406		}
407		ret = modify_pud_table(p4d, addr, next, add, direct);
408		if (ret)
409			goto out;
410		if (!add)
411			try_free_pud_table(p4d, addr & P4D_MASK);
412	}
413	ret = 0;
414out:
415	return ret;
416}
417
418static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
419{
420	const unsigned long end = start + PGDIR_SIZE;
421	p4d_t *p4d;
422	int i;
 
 
423
424	/* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
425	if (end > VMALLOC_START)
426		return;
427#ifdef CONFIG_KASAN
428	if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
429		return;
430#endif
431
432	p4d = p4d_offset(pgd, start);
433	for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
434		if (!p4d_none(*p4d))
435			return;
436	}
437	vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER);
438	pgd_clear(pgd);
439}
440
441static int modify_pagetable(unsigned long start, unsigned long end, bool add,
442			    bool direct)
443{
444	unsigned long addr, next;
445	int ret = -ENOMEM;
446	pgd_t *pgd;
447	p4d_t *p4d;
448
449	if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
450		return -EINVAL;
451	for (addr = start; addr < end; addr = next) {
452		next = pgd_addr_end(addr, end);
453		pgd = pgd_offset_k(addr);
454
455		if (!add) {
456			if (pgd_none(*pgd))
457				continue;
458		} else if (pgd_none(*pgd)) {
459			p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
460			if (!p4d)
461				goto out;
462			pgd_populate(&init_mm, pgd, p4d);
 
463		}
464		ret = modify_p4d_table(pgd, addr, next, add, direct);
465		if (ret)
466			goto out;
467		if (!add)
468			try_free_p4d_table(pgd, addr & PGDIR_MASK);
469	}
 
470	ret = 0;
471out:
472	if (!add)
473		flush_tlb_kernel_range(start, end);
474	return ret;
475}
476
477static int add_pagetable(unsigned long start, unsigned long end, bool direct)
 
 
 
 
478{
479	return modify_pagetable(start, end, true, direct);
480}
481
482static int remove_pagetable(unsigned long start, unsigned long end, bool direct)
483{
484	return modify_pagetable(start, end, false, direct);
 
 
 
 
 
 
 
 
 
 
485}
486
487/*
488 * Add a physical memory range to the 1:1 mapping.
489 */
490static int vmem_add_range(unsigned long start, unsigned long size)
491{
492	return add_pagetable(start, start + size, true);
493}
494
495/*
496 * Remove a physical memory range from the 1:1 mapping.
497 */
498static void vmem_remove_range(unsigned long start, unsigned long size)
499{
500	remove_pagetable(start, start + size, true);
 
501}
502
503/*
504 * Add a backed mem_map array to the virtual mem_map array.
505 */
506int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
507			       struct vmem_altmap *altmap)
508{
 
509	int ret;
510
511	mutex_lock(&vmem_mutex);
512	/* We don't care about the node, just use NUMA_NO_NODE on allocations */
513	ret = add_pagetable(start, end, false);
514	if (ret)
515		remove_pagetable(start, end, false);
516	mutex_unlock(&vmem_mutex);
517	return ret;
518}
519
520void vmemmap_free(unsigned long start, unsigned long end,
521		  struct vmem_altmap *altmap)
522{
523	mutex_lock(&vmem_mutex);
524	remove_pagetable(start, end, false);
525	mutex_unlock(&vmem_mutex);
526}
527
528void vmem_remove_mapping(unsigned long start, unsigned long size)
529{
530	mutex_lock(&vmem_mutex);
531	vmem_remove_range(start, size);
 
 
 
532	mutex_unlock(&vmem_mutex);
 
533}
534
535int vmem_add_mapping(unsigned long start, unsigned long size)
536{
 
537	int ret;
538
539	if (start + size > VMEM_MAX_PHYS ||
540	    start + size < start)
541		return -ERANGE;
542
543	mutex_lock(&vmem_mutex);
544	ret = vmem_add_range(start, size);
 
 
 
 
 
 
 
545	if (ret)
546		vmem_remove_range(start, size);
 
 
 
 
 
 
 
 
 
 
 
547	mutex_unlock(&vmem_mutex);
548	return ret;
549}
550
551/*
552 * map whole physical memory to virtual memory (identity mapping)
553 * we reserve enough space in the vmalloc area for vmemmap to hotplug
554 * additional memory segments.
555 */
556void __init vmem_map_init(void)
557{
558	struct memblock_region *reg;
 
 
559
560	for_each_memblock(memory, reg)
561		vmem_add_range(reg->base, reg->size);
562	__set_memory((unsigned long)_stext,
563		     (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
564		     SET_MEMORY_RO | SET_MEMORY_X);
565	__set_memory((unsigned long)_etext,
566		     (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
567		     SET_MEMORY_RO);
568	__set_memory((unsigned long)_sinittext,
569		     (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
570		     SET_MEMORY_RO | SET_MEMORY_X);
571	__set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
572		     SET_MEMORY_RO | SET_MEMORY_X);
 
 
 
 
 
 
 
 
 
573
574	/* we need lowcore executable for our LPSWE instructions */
575	set_memory_x(0, 1);
 
 
 
 
 
 
576
577	pr_info("Write protected kernel read-only data: %luk\n",
578		(unsigned long)(__end_rodata - _stext) >> 10);
 
 
 
 
 
 
 
 
 
 
 
579}
v3.1
 
  1/*
  2 *  arch/s390/mm/vmem.c
  3 *
  4 *    Copyright IBM Corp. 2006
  5 *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
  6 */
  7
  8#include <linux/bootmem.h>
  9#include <linux/pfn.h>
 10#include <linux/mm.h>
 11#include <linux/module.h>
 12#include <linux/list.h>
 13#include <linux/hugetlb.h>
 14#include <linux/slab.h>
 
 15#include <asm/pgalloc.h>
 16#include <asm/pgtable.h>
 17#include <asm/setup.h>
 18#include <asm/tlbflush.h>
 19#include <asm/sections.h>
 
 20
 21static DEFINE_MUTEX(vmem_mutex);
 22
 23struct memory_segment {
 24	struct list_head list;
 25	unsigned long start;
 26	unsigned long size;
 27};
 28
 29static LIST_HEAD(mem_segs);
 30
 31static void __ref *vmem_alloc_pages(unsigned int order)
 32{
 
 
 33	if (slab_is_available())
 34		return (void *)__get_free_pages(GFP_KERNEL, order);
 35	return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
 36}
 37
 38static inline pud_t *vmem_pud_alloc(void)
 39{
 40	pud_t *pud = NULL;
 41
 42#ifdef CONFIG_64BIT
 43	pud = vmem_alloc_pages(2);
 44	if (!pud)
 45		return NULL;
 46	clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
 47#endif
 48	return pud;
 49}
 50
 51static inline pmd_t *vmem_pmd_alloc(void)
 52{
 53	pmd_t *pmd = NULL;
 54
 55#ifdef CONFIG_64BIT
 56	pmd = vmem_alloc_pages(2);
 57	if (!pmd)
 58		return NULL;
 59	clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
 60#endif
 61	return pmd;
 62}
 63
 64static pte_t __ref *vmem_pte_alloc(unsigned long address)
 65{
 
 66	pte_t *pte;
 67
 68	if (slab_is_available())
 69		pte = (pte_t *) page_table_alloc(&init_mm, address);
 70	else
 71		pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
 72	if (!pte)
 73		return NULL;
 74	clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
 75		    PTRS_PER_PTE * sizeof(pte_t));
 76	return pte;
 77}
 78
 
 
 
 
 
 
 
 
 
 
 
 79/*
 80 * Add a physical memory range to the 1:1 mapping.
 
 81 */
 82static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 83{
 84	unsigned long address;
 85	pgd_t *pg_dir;
 86	pud_t *pu_dir;
 87	pmd_t *pm_dir;
 88	pte_t *pt_dir;
 89	pte_t  pte;
 
 
 
 
 
 
 90	int ret = -ENOMEM;
 
 91
 92	for (address = start; address < start + size; address += PAGE_SIZE) {
 93		pg_dir = pgd_offset_k(address);
 94		if (pgd_none(*pg_dir)) {
 95			pu_dir = vmem_pud_alloc();
 96			if (!pu_dir)
 97				goto out;
 98			pgd_populate(&init_mm, pg_dir, pu_dir);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 99		}
 
 
 
 
 
 
 
 
100
101		pu_dir = pud_offset(pg_dir, address);
102		if (pud_none(*pu_dir)) {
103			pm_dir = vmem_pmd_alloc();
104			if (!pm_dir)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105				goto out;
106			pud_populate(&init_mm, pu_dir, pm_dir);
 
 
 
 
107		}
 
 
 
 
 
 
 
 
 
 
 
 
108
109		pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
110		pm_dir = pmd_offset(pu_dir, address);
 
 
 
111
112#ifdef __s390x__
113		if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
114		    (address + HPAGE_SIZE <= start + size) &&
115		    (address >= HPAGE_SIZE)) {
116			pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
117			pmd_val(*pm_dir) = pte_val(pte);
118			address += HPAGE_SIZE - PAGE_SIZE;
119			continue;
120		}
121#endif
122		if (pmd_none(*pm_dir)) {
123			pt_dir = vmem_pte_alloc(address);
124			if (!pt_dir)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125				goto out;
126			pmd_populate(&init_mm, pm_dir, pt_dir);
 
 
127		}
128
129		pt_dir = pte_offset_kernel(pm_dir, address);
130		*pt_dir = pte;
 
 
131	}
132	ret = 0;
133out:
134	flush_tlb_kernel_range(start, start + size);
 
135	return ret;
136}
137
138/*
139 * Remove a physical memory range from the 1:1 mapping.
140 * Currently only invalidates page table entries.
141 */
142static void vmem_remove_range(unsigned long start, unsigned long size)
143{
144	unsigned long address;
145	pgd_t *pg_dir;
146	pud_t *pu_dir;
147	pmd_t *pm_dir;
148	pte_t *pt_dir;
149	pte_t  pte;
150
151	pte_val(pte) = _PAGE_TYPE_EMPTY;
152	for (address = start; address < start + size; address += PAGE_SIZE) {
153		pg_dir = pgd_offset_k(address);
154		pu_dir = pud_offset(pg_dir, address);
155		if (pud_none(*pu_dir))
156			continue;
157		pm_dir = pmd_offset(pu_dir, address);
158		if (pmd_none(*pm_dir))
159			continue;
160
161		if (pmd_huge(*pm_dir)) {
162			pmd_clear(pm_dir);
163			address += HPAGE_SIZE - PAGE_SIZE;
164			continue;
165		}
 
 
166
167		pt_dir = pte_offset_kernel(pm_dir, address);
168		*pt_dir = pte;
 
 
169	}
170	flush_tlb_kernel_range(start, start + size);
 
171}
172
173/*
174 * Add a backed mem_map array to the virtual mem_map array.
175 */
176int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
177{
178	unsigned long address, start_addr, end_addr;
179	pgd_t *pg_dir;
180	pud_t *pu_dir;
181	pmd_t *pm_dir;
182	pte_t *pt_dir;
183	pte_t  pte;
184	int ret = -ENOMEM;
 
 
185
186	start_addr = (unsigned long) start;
187	end_addr = (unsigned long) (start + nr);
188
189	for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
190		pg_dir = pgd_offset_k(address);
191		if (pgd_none(*pg_dir)) {
192			pu_dir = vmem_pud_alloc();
193			if (!pu_dir)
 
194				goto out;
195			pgd_populate(&init_mm, pg_dir, pu_dir);
196		}
 
 
 
 
 
 
 
 
 
 
197
198		pu_dir = pud_offset(pg_dir, address);
199		if (pud_none(*pu_dir)) {
200			pm_dir = vmem_pmd_alloc();
201			if (!pm_dir)
202				goto out;
203			pud_populate(&init_mm, pu_dir, pm_dir);
204		}
205
206		pm_dir = pmd_offset(pu_dir, address);
207		if (pmd_none(*pm_dir)) {
208			pt_dir = vmem_pte_alloc(address);
209			if (!pt_dir)
210				goto out;
211			pmd_populate(&init_mm, pm_dir, pt_dir);
212		}
213
214		pt_dir = pte_offset_kernel(pm_dir, address);
215		if (pte_none(*pt_dir)) {
216			unsigned long new_page;
 
 
 
 
 
217
218			new_page =__pa(vmem_alloc_pages(0));
219			if (!new_page)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220				goto out;
221			pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
222			*pt_dir = pte;
223		}
 
 
 
 
 
224	}
225	memset(start, 0, nr * sizeof(struct page));
226	ret = 0;
227out:
228	flush_tlb_kernel_range(start_addr, end_addr);
 
229	return ret;
230}
231
232/*
233 * Add memory segment to the segment list if it doesn't overlap with
234 * an already present segment.
235 */
236static int insert_memory_segment(struct memory_segment *seg)
237{
238	struct memory_segment *tmp;
 
239
240	if (seg->start + seg->size > VMEM_MAX_PHYS ||
241	    seg->start + seg->size < seg->start)
242		return -ERANGE;
243
244	list_for_each_entry(tmp, &mem_segs, list) {
245		if (seg->start >= tmp->start + tmp->size)
246			continue;
247		if (seg->start + seg->size <= tmp->start)
248			continue;
249		return -ENOSPC;
250	}
251	list_add(&seg->list, &mem_segs);
252	return 0;
253}
254
255/*
256 * Remove memory segment from the segment list.
257 */
258static void remove_memory_segment(struct memory_segment *seg)
259{
260	list_del(&seg->list);
261}
262
263static void __remove_shared_memory(struct memory_segment *seg)
 
 
 
264{
265	remove_memory_segment(seg);
266	vmem_remove_range(seg->start, seg->size);
267}
268
269int vmem_remove_mapping(unsigned long start, unsigned long size)
 
 
 
 
270{
271	struct memory_segment *seg;
272	int ret;
273
274	mutex_lock(&vmem_mutex);
 
 
 
 
 
 
 
275
276	ret = -ENOENT;
277	list_for_each_entry(seg, &mem_segs, list) {
278		if (seg->start == start && seg->size == size)
279			break;
280	}
 
 
281
282	if (seg->start != start || seg->size != size)
283		goto out;
284
285	ret = 0;
286	__remove_shared_memory(seg);
287	kfree(seg);
288out:
289	mutex_unlock(&vmem_mutex);
290	return ret;
291}
292
293int vmem_add_mapping(unsigned long start, unsigned long size)
294{
295	struct memory_segment *seg;
296	int ret;
297
 
 
 
 
298	mutex_lock(&vmem_mutex);
299	ret = -ENOMEM;
300	seg = kzalloc(sizeof(*seg), GFP_KERNEL);
301	if (!seg)
302		goto out;
303	seg->start = start;
304	seg->size = size;
305
306	ret = insert_memory_segment(seg);
307	if (ret)
308		goto out_free;
309
310	ret = vmem_add_mem(start, size, 0);
311	if (ret)
312		goto out_remove;
313	goto out;
314
315out_remove:
316	__remove_shared_memory(seg);
317out_free:
318	kfree(seg);
319out:
320	mutex_unlock(&vmem_mutex);
321	return ret;
322}
323
324/*
325 * map whole physical memory to virtual memory (identity mapping)
326 * we reserve enough space in the vmalloc area for vmemmap to hotplug
327 * additional memory segments.
328 */
329void __init vmem_map_init(void)
330{
331	unsigned long ro_start, ro_end;
332	unsigned long start, end;
333	int i;
334
335	ro_start = ((unsigned long)&_stext) & PAGE_MASK;
336	ro_end = PFN_ALIGN((unsigned long)&_eshared);
337	for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
338		start = memory_chunk[i].addr;
339		end = memory_chunk[i].addr + memory_chunk[i].size;
340		if (start >= ro_end || end <= ro_start)
341			vmem_add_mem(start, end - start, 0);
342		else if (start >= ro_start && end <= ro_end)
343			vmem_add_mem(start, end - start, 1);
344		else if (start >= ro_start) {
345			vmem_add_mem(start, ro_end - start, 1);
346			vmem_add_mem(ro_end, end - ro_end, 0);
347		} else if (end < ro_end) {
348			vmem_add_mem(start, ro_start - start, 0);
349			vmem_add_mem(ro_start, end - ro_start, 1);
350		} else {
351			vmem_add_mem(start, ro_start - start, 0);
352			vmem_add_mem(ro_start, ro_end - ro_start, 1);
353			vmem_add_mem(ro_end, end - ro_end, 0);
354		}
355	}
356}
357
358/*
359 * Convert memory chunk array to a memory segment list so there is a single
360 * list that contains both r/w memory and shared memory segments.
361 */
362static int __init vmem_convert_memory_chunk(void)
363{
364	struct memory_segment *seg;
365	int i;
366
367	mutex_lock(&vmem_mutex);
368	for (i = 0; i < MEMORY_CHUNKS; i++) {
369		if (!memory_chunk[i].size)
370			continue;
371		seg = kzalloc(sizeof(*seg), GFP_KERNEL);
372		if (!seg)
373			panic("Out of memory...\n");
374		seg->start = memory_chunk[i].addr;
375		seg->size = memory_chunk[i].size;
376		insert_memory_segment(seg);
377	}
378	mutex_unlock(&vmem_mutex);
379	return 0;
380}
381
382core_initcall(vmem_convert_memory_chunk);