Linux Audio

Check our new training course

Loading...
v5.9
  1/*
  2 * Copyright (C) 2013 Altera Corporation
  3 * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
  4 * Copyright (C) 2009 Wind River Systems Inc
  5 *   Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
  6 * Copyright (C) 2004 Microtronix Datacom Ltd
  7 *
  8 * based on arch/m68k/mm/init.c
  9 *
 10 * This file is subject to the terms and conditions of the GNU General Public
 11 * License. See the file "COPYING" in the main directory of this archive
 12 * for more details.
 13 */
 14
 15#include <linux/signal.h>
 16#include <linux/sched.h>
 17#include <linux/kernel.h>
 18#include <linux/errno.h>
 19#include <linux/string.h>
 20#include <linux/types.h>
 21#include <linux/ptrace.h>
 22#include <linux/mman.h>
 23#include <linux/mm.h>
 24#include <linux/init.h>
 25#include <linux/pagemap.h>
 26#include <linux/memblock.h>
 27#include <linux/slab.h>
 28#include <linux/binfmts.h>
 29
 30#include <asm/setup.h>
 31#include <asm/page.h>
 
 32#include <asm/sections.h>
 33#include <asm/tlb.h>
 34#include <asm/mmu_context.h>
 35#include <asm/cpuinfo.h>
 36#include <asm/processor.h>
 37
 38pgd_t *pgd_current;
 39
 40/*
 41 * paging_init() continues the virtual memory environment setup which
 42 * was begun by the code in arch/head.S.
 43 * The parameters are pointers to where to stick the starting and ending
 44 * addresses of available kernel virtual memory.
 45 */
 46void __init paging_init(void)
 47{
 48	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
 
 
 49
 50	pagetable_init();
 51	pgd_current = swapper_pg_dir;
 52
 53	max_zone_pfn[ZONE_NORMAL] = max_mapnr;
 54
 55	/* pass the memory from the bootmem allocator to the main allocator */
 56	free_area_init(max_zone_pfn);
 57
 58	flush_dcache_range((unsigned long)empty_zero_page,
 59			(unsigned long)empty_zero_page + PAGE_SIZE);
 60}
 61
 62void __init mem_init(void)
 63{
 64	unsigned long end_mem   = memory_end; /* this must not include
 65						kernel stack at top */
 66
 67	pr_debug("mem_init: start=%lx, end=%lx\n", memory_start, memory_end);
 68
 69	end_mem &= PAGE_MASK;
 70	high_memory = __va(end_mem);
 71
 72	/* this will put all memory onto the freelists */
 73	memblock_free_all();
 74	mem_init_print_info(NULL);
 75}
 76
 77void __init mmu_init(void)
 78{
 79	flush_tlb_all();
 80}
 81
 
 
 
 
 
 
 
 
 
 
 
 
 82#define __page_aligned(order) __aligned(PAGE_SIZE << (order))
 83pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
 84pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
 85static struct page *kuser_page[1];
 86
 87static int alloc_kuser_page(void)
 88{
 89	extern char __kuser_helper_start[], __kuser_helper_end[];
 90	int kuser_sz = __kuser_helper_end - __kuser_helper_start;
 91	unsigned long vpage;
 92
 93	vpage = get_zeroed_page(GFP_ATOMIC);
 94	if (!vpage)
 95		return -ENOMEM;
 96
 97	/* Copy kuser helpers */
 98	memcpy((void *)vpage, __kuser_helper_start, kuser_sz);
 99
100	flush_icache_range(vpage, vpage + KUSER_SIZE);
101	kuser_page[0] = virt_to_page(vpage);
102
103	return 0;
104}
105arch_initcall(alloc_kuser_page);
106
107int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
108{
109	struct mm_struct *mm = current->mm;
110	int ret;
111
112	mmap_write_lock(mm);
113
114	/* Map kuser helpers to user space address */
115	ret = install_special_mapping(mm, KUSER_BASE, KUSER_SIZE,
116				      VM_READ | VM_EXEC | VM_MAYREAD |
117				      VM_MAYEXEC, kuser_page);
118
119	mmap_write_unlock(mm);
120
121	return ret;
122}
123
124const char *arch_vma_name(struct vm_area_struct *vma)
125{
126	return (vma->vm_start == KUSER_BASE) ? "[kuser]" : NULL;
127}
v4.6
  1/*
  2 * Copyright (C) 2013 Altera Corporation
  3 * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
  4 * Copyright (C) 2009 Wind River Systems Inc
  5 *   Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
  6 * Copyright (C) 2004 Microtronix Datacom Ltd
  7 *
  8 * based on arch/m68k/mm/init.c
  9 *
 10 * This file is subject to the terms and conditions of the GNU General Public
 11 * License. See the file "COPYING" in the main directory of this archive
 12 * for more details.
 13 */
 14
 15#include <linux/signal.h>
 16#include <linux/sched.h>
 17#include <linux/kernel.h>
 18#include <linux/errno.h>
 19#include <linux/string.h>
 20#include <linux/types.h>
 21#include <linux/ptrace.h>
 22#include <linux/mman.h>
 23#include <linux/mm.h>
 24#include <linux/init.h>
 25#include <linux/pagemap.h>
 26#include <linux/bootmem.h>
 27#include <linux/slab.h>
 28#include <linux/binfmts.h>
 29
 30#include <asm/setup.h>
 31#include <asm/page.h>
 32#include <asm/pgtable.h>
 33#include <asm/sections.h>
 34#include <asm/tlb.h>
 35#include <asm/mmu_context.h>
 36#include <asm/cpuinfo.h>
 37#include <asm/processor.h>
 38
 39pgd_t *pgd_current;
 40
 41/*
 42 * paging_init() continues the virtual memory environment setup which
 43 * was begun by the code in arch/head.S.
 44 * The parameters are pointers to where to stick the starting and ending
 45 * addresses of available kernel virtual memory.
 46 */
 47void __init paging_init(void)
 48{
 49	unsigned long zones_size[MAX_NR_ZONES];
 50
 51	memset(zones_size, 0, sizeof(zones_size));
 52
 53	pagetable_init();
 54	pgd_current = swapper_pg_dir;
 55
 56	zones_size[ZONE_NORMAL] = max_mapnr;
 57
 58	/* pass the memory from the bootmem allocator to the main allocator */
 59	free_area_init(zones_size);
 60
 61	flush_dcache_range((unsigned long)empty_zero_page,
 62			(unsigned long)empty_zero_page + PAGE_SIZE);
 63}
 64
 65void __init mem_init(void)
 66{
 67	unsigned long end_mem   = memory_end; /* this must not include
 68						kernel stack at top */
 69
 70	pr_debug("mem_init: start=%lx, end=%lx\n", memory_start, memory_end);
 71
 72	end_mem &= PAGE_MASK;
 73	high_memory = __va(end_mem);
 74
 75	/* this will put all memory onto the freelists */
 76	free_all_bootmem();
 77	mem_init_print_info(NULL);
 78}
 79
 80void __init mmu_init(void)
 81{
 82	flush_tlb_all();
 83}
 84
 85#ifdef CONFIG_BLK_DEV_INITRD
 86void __init free_initrd_mem(unsigned long start, unsigned long end)
 87{
 88	free_reserved_area((void *)start, (void *)end, -1, "initrd");
 89}
 90#endif
 91
 92void __init_refok free_initmem(void)
 93{
 94	free_initmem_default(-1);
 95}
 96
 97#define __page_aligned(order) __aligned(PAGE_SIZE << (order))
 98pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
 99pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
100static struct page *kuser_page[1];
101
102static int alloc_kuser_page(void)
103{
104	extern char __kuser_helper_start[], __kuser_helper_end[];
105	int kuser_sz = __kuser_helper_end - __kuser_helper_start;
106	unsigned long vpage;
107
108	vpage = get_zeroed_page(GFP_ATOMIC);
109	if (!vpage)
110		return -ENOMEM;
111
112	/* Copy kuser helpers */
113	memcpy((void *)vpage, __kuser_helper_start, kuser_sz);
114
115	flush_icache_range(vpage, vpage + KUSER_SIZE);
116	kuser_page[0] = virt_to_page(vpage);
117
118	return 0;
119}
120arch_initcall(alloc_kuser_page);
121
122int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
123{
124	struct mm_struct *mm = current->mm;
125	int ret;
126
127	down_write(&mm->mmap_sem);
128
129	/* Map kuser helpers to user space address */
130	ret = install_special_mapping(mm, KUSER_BASE, KUSER_SIZE,
131				      VM_READ | VM_EXEC | VM_MAYREAD |
132				      VM_MAYEXEC, kuser_page);
133
134	up_write(&mm->mmap_sem);
135
136	return ret;
137}
138
139const char *arch_vma_name(struct vm_area_struct *vma)
140{
141	return (vma->vm_start == KUSER_BASE) ? "[kuser]" : NULL;
142}