Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 *  This file contains the routines setting up the linux page tables.
  3 *
  4 * Copyright (C) 2008 Michal Simek
  5 * Copyright (C) 2008 PetaLogix
  6 *
  7 *    Copyright (C) 2007 Xilinx, Inc.  All rights reserved.
  8 *
  9 *  Derived from arch/ppc/mm/pgtable.c:
 10 *    -- paulus
 11 *
 12 *  Derived from arch/ppc/mm/init.c:
 13 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 14 *
 15 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 16 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 17 *    Copyright (C) 1996 Paul Mackerras
 18 *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
 19 *
 20 *  Derived from "arch/i386/mm/init.c"
 21 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 22 *
 23 *  This file is subject to the terms and conditions of the GNU General
 24 *  Public License.  See the file COPYING in the main directory of this
 25 *  archive for more details.
 26 *
 27 */
 28
 29#include <linux/export.h>
 30#include <linux/kernel.h>
 31#include <linux/types.h>
 32#include <linux/vmalloc.h>
 33#include <linux/init.h>
 34#include <linux/mm_types.h>
 
 35
 36#include <asm/pgtable.h>
 37#include <asm/pgalloc.h>
 38#include <linux/io.h>
 39#include <asm/mmu.h>
 40#include <asm/sections.h>
 41#include <asm/fixmap.h>
 42
 43unsigned long ioremap_base;
 44unsigned long ioremap_bot;
 45EXPORT_SYMBOL(ioremap_bot);
 46
 47#ifndef CONFIG_SMP
 48struct pgtable_cache_struct quicklists;
 49#endif
 50
 51static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
 52		unsigned long flags)
 53{
 54	unsigned long v, i;
 55	phys_addr_t p;
 56	int err;
 57
 58	/*
 59	 * Choose an address to map it to.
 60	 * Once the vmalloc system is running, we use it.
 61	 * Before then, we use space going down from ioremap_base
 62	 * (ioremap_bot records where we're up to).
 63	 */
 64	p = addr & PAGE_MASK;
 65	size = PAGE_ALIGN(addr + size) - p;
 66
 67	/*
 68	 * Don't allow anybody to remap normal RAM that we're using.
 69	 * mem_init() sets high_memory so only do the check after that.
 70	 *
 71	 * However, allow remap of rootfs: TBD
 72	 */
 73
 74	if (mem_init_done &&
 75		p >= memory_start && p < virt_to_phys(high_memory) &&
 76		!(p >= __virt_to_phys((phys_addr_t)__bss_stop) &&
 77		p < __virt_to_phys((phys_addr_t)__bss_stop))) {
 78		pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %pf\n",
 79			(unsigned long)p, __builtin_return_address(0));
 80		return NULL;
 81	}
 82
 83	if (size == 0)
 84		return NULL;
 85
 86	/*
 87	 * Is it already mapped? If the whole area is mapped then we're
 88	 * done, otherwise remap it since we want to keep the virt addrs for
 89	 * each request contiguous.
 90	 *
 91	 * We make the assumption here that if the bottom and top
 92	 * of the range we want are mapped then it's mapped to the
 93	 * same virt address (and this is contiguous).
 94	 *  -- Cort
 95	 */
 96
 97	if (mem_init_done) {
 98		struct vm_struct *area;
 99		area = get_vm_area(size, VM_IOREMAP);
100		if (area == NULL)
101			return NULL;
102		v = (unsigned long) area->addr;
103	} else {
104		v = (ioremap_bot -= size);
105	}
106
107	if ((flags & _PAGE_PRESENT) == 0)
108		flags |= _PAGE_KERNEL;
109	if (flags & _PAGE_NO_CACHE)
110		flags |= _PAGE_GUARDED;
111
112	err = 0;
113	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
114		err = map_page(v + i, p + i, flags);
115	if (err) {
116		if (mem_init_done)
117			vfree((void *)v);
118		return NULL;
119	}
120
121	return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
122}
123
124void __iomem *ioremap(phys_addr_t addr, unsigned long size)
125{
126	return __ioremap(addr, size, _PAGE_NO_CACHE);
127}
128EXPORT_SYMBOL(ioremap);
129
130void iounmap(volatile void __iomem *addr)
131{
132	if ((__force void *)addr > high_memory &&
133					(unsigned long) addr < ioremap_bot)
134		vfree((void *) (PAGE_MASK & (unsigned long) addr));
135}
136EXPORT_SYMBOL(iounmap);
137
138
139int map_page(unsigned long va, phys_addr_t pa, int flags)
140{
 
 
141	pmd_t *pd;
142	pte_t *pg;
143	int err = -ENOMEM;
 
144	/* Use upper 10 bits of VA to index the first level map */
145	pd = pmd_offset(pgd_offset_k(va), va);
 
 
146	/* Use middle 10 bits of VA to index the second-level map */
147	pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */
148	/* pg = pte_alloc_kernel(&init_mm, pd, va); */
149
150	if (pg != NULL) {
151		err = 0;
152		set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
153				__pgprot(flags)));
154		if (unlikely(mem_init_done))
155			_tlbie(va);
156	}
157	return err;
158}
159
160/*
161 * Map in all of physical memory starting at CONFIG_KERNEL_START.
162 */
163void __init mapin_ram(void)
164{
165	unsigned long v, p, s, f;
166
167	v = CONFIG_KERNEL_START;
168	p = memory_start;
169	for (s = 0; s < lowmem_size; s += PAGE_SIZE) {
170		f = _PAGE_PRESENT | _PAGE_ACCESSED |
171				_PAGE_SHARED | _PAGE_HWEXEC;
172		if ((char *) v < _stext || (char *) v >= _etext)
173			f |= _PAGE_WRENABLE;
174		else
175			/* On the MicroBlaze, no user access
176			   forces R/W kernel access */
177			f |= _PAGE_USER;
178		map_page(v, p, f);
179		v += PAGE_SIZE;
180		p += PAGE_SIZE;
181	}
182}
183
184/* is x a power of 2? */
185#define is_power_of_2(x)	((x) != 0 && (((x) & ((x) - 1)) == 0))
186
187/* Scan the real Linux page tables and return a PTE pointer for
188 * a virtual address in a context.
189 * Returns true (1) if PTE was found, zero otherwise.  The pointer to
190 * the PTE pointer is unmodified if PTE is not found.
191 */
192static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
193{
194	pgd_t	*pgd;
 
 
195	pmd_t	*pmd;
196	pte_t	*pte;
197	int     retval = 0;
198
199	pgd = pgd_offset(mm, addr & PAGE_MASK);
200	if (pgd) {
201		pmd = pmd_offset(pgd, addr & PAGE_MASK);
 
 
202		if (pmd_present(*pmd)) {
203			pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
204			if (pte) {
205				retval = 1;
206				*ptep = pte;
207			}
208		}
209	}
210	return retval;
211}
212
213/* Find physical address for this virtual address.  Normally used by
214 * I/O functions, but anyone can call it.
215 */
216unsigned long iopa(unsigned long addr)
217{
218	unsigned long pa;
219
220	pte_t *pte;
221	struct mm_struct *mm;
222
223	/* Allow mapping of user addresses (within the thread)
224	 * for DMA if necessary.
225	 */
226	if (addr < TASK_SIZE)
227		mm = current->mm;
228	else
229		mm = &init_mm;
230
231	pa = 0;
232	if (get_pteptr(mm, addr, &pte))
233		pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
234
235	return pa;
236}
237
238__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
239		unsigned long address)
240{
241	pte_t *pte;
242	if (mem_init_done) {
243		pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
244	} else {
245		pte = (pte_t *)early_get_page();
246		if (pte)
247			clear_page(pte);
248	}
249	return pte;
250}
251
252void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
253{
254	unsigned long address = __fix_to_virt(idx);
255
256	if (idx >= __end_of_fixed_addresses)
257		BUG();
258
259	map_page(address, phys, pgprot_val(flags));
260}
v5.9
  1/*
  2 *  This file contains the routines setting up the linux page tables.
  3 *
  4 * Copyright (C) 2008 Michal Simek
  5 * Copyright (C) 2008 PetaLogix
  6 *
  7 *    Copyright (C) 2007 Xilinx, Inc.  All rights reserved.
  8 *
  9 *  Derived from arch/ppc/mm/pgtable.c:
 10 *    -- paulus
 11 *
 12 *  Derived from arch/ppc/mm/init.c:
 13 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 14 *
 15 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 16 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 17 *    Copyright (C) 1996 Paul Mackerras
 18 *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
 19 *
 20 *  Derived from "arch/i386/mm/init.c"
 21 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 22 *
 23 *  This file is subject to the terms and conditions of the GNU General
 24 *  Public License.  See the file COPYING in the main directory of this
 25 *  archive for more details.
 26 *
 27 */
 28
 29#include <linux/export.h>
 30#include <linux/kernel.h>
 31#include <linux/types.h>
 32#include <linux/vmalloc.h>
 33#include <linux/init.h>
 34#include <linux/mm_types.h>
 35#include <linux/pgtable.h>
 36
 
 37#include <asm/pgalloc.h>
 38#include <linux/io.h>
 39#include <asm/mmu.h>
 40#include <asm/sections.h>
 41#include <asm/fixmap.h>
 42
 43unsigned long ioremap_base;
 44unsigned long ioremap_bot;
 45EXPORT_SYMBOL(ioremap_bot);
 46
 
 
 
 
 47static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
 48		unsigned long flags)
 49{
 50	unsigned long v, i;
 51	phys_addr_t p;
 52	int err;
 53
 54	/*
 55	 * Choose an address to map it to.
 56	 * Once the vmalloc system is running, we use it.
 57	 * Before then, we use space going down from ioremap_base
 58	 * (ioremap_bot records where we're up to).
 59	 */
 60	p = addr & PAGE_MASK;
 61	size = PAGE_ALIGN(addr + size) - p;
 62
 63	/*
 64	 * Don't allow anybody to remap normal RAM that we're using.
 65	 * mem_init() sets high_memory so only do the check after that.
 66	 *
 67	 * However, allow remap of rootfs: TBD
 68	 */
 69
 70	if (mem_init_done &&
 71		p >= memory_start && p < virt_to_phys(high_memory) &&
 72		!(p >= __virt_to_phys((phys_addr_t)__bss_stop) &&
 73		p < __virt_to_phys((phys_addr_t)__bss_stop))) {
 74		pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %ps\n",
 75			(unsigned long)p, __builtin_return_address(0));
 76		return NULL;
 77	}
 78
 79	if (size == 0)
 80		return NULL;
 81
 82	/*
 83	 * Is it already mapped? If the whole area is mapped then we're
 84	 * done, otherwise remap it since we want to keep the virt addrs for
 85	 * each request contiguous.
 86	 *
 87	 * We make the assumption here that if the bottom and top
 88	 * of the range we want are mapped then it's mapped to the
 89	 * same virt address (and this is contiguous).
 90	 *  -- Cort
 91	 */
 92
 93	if (mem_init_done) {
 94		struct vm_struct *area;
 95		area = get_vm_area(size, VM_IOREMAP);
 96		if (area == NULL)
 97			return NULL;
 98		v = (unsigned long) area->addr;
 99	} else {
100		v = (ioremap_bot -= size);
101	}
102
103	if ((flags & _PAGE_PRESENT) == 0)
104		flags |= _PAGE_KERNEL;
105	if (flags & _PAGE_NO_CACHE)
106		flags |= _PAGE_GUARDED;
107
108	err = 0;
109	for (i = 0; i < size && err == 0; i += PAGE_SIZE)
110		err = map_page(v + i, p + i, flags);
111	if (err) {
112		if (mem_init_done)
113			vfree((void *)v);
114		return NULL;
115	}
116
117	return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
118}
119
120void __iomem *ioremap(phys_addr_t addr, unsigned long size)
121{
122	return __ioremap(addr, size, _PAGE_NO_CACHE);
123}
124EXPORT_SYMBOL(ioremap);
125
126void iounmap(volatile void __iomem *addr)
127{
128	if ((__force void *)addr > high_memory &&
129					(unsigned long) addr < ioremap_bot)
130		vfree((void *) (PAGE_MASK & (unsigned long) addr));
131}
132EXPORT_SYMBOL(iounmap);
133
134
135int map_page(unsigned long va, phys_addr_t pa, int flags)
136{
137	p4d_t *p4d;
138	pud_t *pud;
139	pmd_t *pd;
140	pte_t *pg;
141	int err = -ENOMEM;
142
143	/* Use upper 10 bits of VA to index the first level map */
144	p4d = p4d_offset(pgd_offset_k(va), va);
145	pud = pud_offset(p4d, va);
146	pd = pmd_offset(pud, va);
147	/* Use middle 10 bits of VA to index the second-level map */
148	pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */
149	/* pg = pte_alloc_kernel(&init_mm, pd, va); */
150
151	if (pg != NULL) {
152		err = 0;
153		set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
154				__pgprot(flags)));
155		if (unlikely(mem_init_done))
156			_tlbie(va);
157	}
158	return err;
159}
160
161/*
162 * Map in all of physical memory starting at CONFIG_KERNEL_START.
163 */
164void __init mapin_ram(void)
165{
166	unsigned long v, p, s, f;
167
168	v = CONFIG_KERNEL_START;
169	p = memory_start;
170	for (s = 0; s < lowmem_size; s += PAGE_SIZE) {
171		f = _PAGE_PRESENT | _PAGE_ACCESSED |
172				_PAGE_SHARED | _PAGE_HWEXEC;
173		if ((char *) v < _stext || (char *) v >= _etext)
174			f |= _PAGE_WRENABLE;
175		else
176			/* On the MicroBlaze, no user access
177			   forces R/W kernel access */
178			f |= _PAGE_USER;
179		map_page(v, p, f);
180		v += PAGE_SIZE;
181		p += PAGE_SIZE;
182	}
183}
184
185/* is x a power of 2? */
186#define is_power_of_2(x)	((x) != 0 && (((x) & ((x) - 1)) == 0))
187
188/* Scan the real Linux page tables and return a PTE pointer for
189 * a virtual address in a context.
190 * Returns true (1) if PTE was found, zero otherwise.  The pointer to
191 * the PTE pointer is unmodified if PTE is not found.
192 */
193static int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep)
194{
195	pgd_t	*pgd;
196	p4d_t	*p4d;
197	pud_t	*pud;
198	pmd_t	*pmd;
199	pte_t	*pte;
200	int     retval = 0;
201
202	pgd = pgd_offset(mm, addr & PAGE_MASK);
203	if (pgd) {
204		p4d = p4d_offset(pgd, addr & PAGE_MASK);
205		pud = pud_offset(p4d, addr & PAGE_MASK);
206		pmd = pmd_offset(pud, addr & PAGE_MASK);
207		if (pmd_present(*pmd)) {
208			pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
209			if (pte) {
210				retval = 1;
211				*ptep = pte;
212			}
213		}
214	}
215	return retval;
216}
217
218/* Find physical address for this virtual address.  Normally used by
219 * I/O functions, but anyone can call it.
220 */
221unsigned long iopa(unsigned long addr)
222{
223	unsigned long pa;
224
225	pte_t *pte;
226	struct mm_struct *mm;
227
228	/* Allow mapping of user addresses (within the thread)
229	 * for DMA if necessary.
230	 */
231	if (addr < TASK_SIZE)
232		mm = current->mm;
233	else
234		mm = &init_mm;
235
236	pa = 0;
237	if (get_pteptr(mm, addr, &pte))
238		pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
239
240	return pa;
241}
242
243__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
 
244{
245	pte_t *pte;
246	if (mem_init_done) {
247		pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
248	} else {
249		pte = (pte_t *)early_get_page();
250		if (pte)
251			clear_page(pte);
252	}
253	return pte;
254}
255
256void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
257{
258	unsigned long address = __fix_to_virt(idx);
259
260	if (idx >= __end_of_fixed_addresses)
261		BUG();
262
263	map_page(address, phys, pgprot_val(flags));
264}