Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * (C) Copyright 1995 1996 Linus Torvalds
  7 * (C) Copyright 2001, 2002 Ralf Baechle
  8 */
  9#include <linux/export.h>
 10#include <asm/addrspace.h>
 11#include <asm/byteorder.h>
 12#include <linux/ioport.h>
 13#include <linux/sched.h>
 14#include <linux/slab.h>
 15#include <linux/vmalloc.h>
 16#include <linux/mm_types.h>
 17#include <asm/cacheflush.h>
 18#include <asm/io.h>
 19#include <asm/tlbflush.h>
 20
 21static inline void remap_area_pte(pte_t * pte, unsigned long address,
 22	phys_addr_t size, phys_addr_t phys_addr, unsigned long flags)
 23{
 24	phys_addr_t end;
 25	unsigned long pfn;
 26	pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
 27				   | __WRITEABLE | flags);
 28
 29	address &= ~PMD_MASK;
 30	end = address + size;
 31	if (end > PMD_SIZE)
 32		end = PMD_SIZE;
 33	BUG_ON(address >= end);
 34	pfn = phys_addr >> PAGE_SHIFT;
 35	do {
 36		if (!pte_none(*pte)) {
 37			printk("remap_area_pte: page already exists\n");
 38			BUG();
 39		}
 40		set_pte(pte, pfn_pte(pfn, pgprot));
 41		address += PAGE_SIZE;
 42		pfn++;
 43		pte++;
 44	} while (address && (address < end));
 45}
 46
 47static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
 48	phys_addr_t size, phys_addr_t phys_addr, unsigned long flags)
 49{
 50	phys_addr_t end;
 51
 52	address &= ~PGDIR_MASK;
 53	end = address + size;
 54	if (end > PGDIR_SIZE)
 55		end = PGDIR_SIZE;
 56	phys_addr -= address;
 57	BUG_ON(address >= end);
 58	do {
 59		pte_t * pte = pte_alloc_kernel(pmd, address);
 60		if (!pte)
 61			return -ENOMEM;
 62		remap_area_pte(pte, address, end - address, address + phys_addr, flags);
 63		address = (address + PMD_SIZE) & PMD_MASK;
 64		pmd++;
 65	} while (address && (address < end));
 66	return 0;
 67}
 68
 69static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
 70	phys_addr_t size, unsigned long flags)
 71{
 72	int error;
 73	pgd_t * dir;
 74	unsigned long end = address + size;
 75
 76	phys_addr -= address;
 77	dir = pgd_offset(&init_mm, address);
 78	flush_cache_all();
 79	BUG_ON(address >= end);
 80	do {
 81		pud_t *pud;
 82		pmd_t *pmd;
 83
 84		error = -ENOMEM;
 85		pud = pud_alloc(&init_mm, dir, address);
 86		if (!pud)
 87			break;
 88		pmd = pmd_alloc(&init_mm, pud, address);
 89		if (!pmd)
 90			break;
 91		if (remap_area_pmd(pmd, address, end - address,
 92					 phys_addr + address, flags))
 93			break;
 94		error = 0;
 95		address = (address + PGDIR_SIZE) & PGDIR_MASK;
 96		dir++;
 97	} while (address && (address < end));
 98	flush_tlb_all();
 99	return error;
100}
101
102static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
103			       void *arg)
104{
105	unsigned long i;
106
107	for (i = 0; i < nr_pages; i++) {
108		if (pfn_valid(start_pfn + i) &&
109		    !PageReserved(pfn_to_page(start_pfn + i)))
110			return 1;
111	}
112
113	return 0;
114}
115
116/*
117 * Generic mapping function (not visible outside):
118 */
119
120/*
121 * Remap an arbitrary physical address space into the kernel virtual
122 * address space. Needed when the kernel wants to access high addresses
123 * directly.
124 *
125 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
126 * have to convert them into an offset in a page-aligned mapping, but the
127 * caller shouldn't need to know that small detail.
128 */
129
130#define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
131
132void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
133{
134	unsigned long offset, pfn, last_pfn;
135	struct vm_struct * area;
 
136	phys_addr_t last_addr;
137	void * addr;
138
139	phys_addr = fixup_bigphys_addr(phys_addr, size);
140
141	/* Don't allow wraparound or zero size */
142	last_addr = phys_addr + size - 1;
143	if (!size || last_addr < phys_addr)
144		return NULL;
145
146	/*
147	 * Map uncached objects in the low 512mb of address space using KSEG1,
148	 * otherwise map using page tables.
149	 */
150	if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
151	    flags == _CACHE_UNCACHED)
152		return (void __iomem *) CKSEG1ADDR(phys_addr);
153
154	/*
155	 * Don't allow anybody to remap RAM that may be allocated by the page
156	 * allocator, since that could lead to races & data clobbering.
157	 */
158	pfn = PFN_DOWN(phys_addr);
159	last_pfn = PFN_DOWN(last_addr);
160	if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
161				  __ioremap_check_ram) == 1) {
162		WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
163			  &phys_addr, &last_addr);
164		return NULL;
 
 
 
165	}
166
167	/*
168	 * Mappings have to be page-aligned
169	 */
170	offset = phys_addr & ~PAGE_MASK;
171	phys_addr &= PAGE_MASK;
172	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
173
174	/*
175	 * Ok, go for it..
176	 */
177	area = get_vm_area(size, VM_IOREMAP);
178	if (!area)
179		return NULL;
180	addr = area->addr;
181	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
182		vunmap(addr);
183		return NULL;
184	}
185
186	return (void __iomem *) (offset + (char *)addr);
187}
188
189#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
190
191void __iounmap(const volatile void __iomem *addr)
192{
193	struct vm_struct *p;
194
195	if (IS_KSEG1(addr))
196		return;
197
198	p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
199	if (!p)
200		printk(KERN_ERR "iounmap: bad address %p\n", addr);
201
202	kfree(p);
203}
204
205EXPORT_SYMBOL(__ioremap);
206EXPORT_SYMBOL(__iounmap);
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * (C) Copyright 1995 1996 Linus Torvalds
  7 * (C) Copyright 2001, 2002 Ralf Baechle
  8 */
  9#include <linux/export.h>
 10#include <asm/addrspace.h>
 11#include <asm/byteorder.h>
 
 12#include <linux/sched.h>
 13#include <linux/slab.h>
 14#include <linux/vmalloc.h>
 15#include <linux/mm_types.h>
 16#include <asm/cacheflush.h>
 17#include <asm/io.h>
 18#include <asm/tlbflush.h>
 19
 20static inline void remap_area_pte(pte_t * pte, unsigned long address,
 21	phys_addr_t size, phys_addr_t phys_addr, unsigned long flags)
 22{
 23	phys_addr_t end;
 24	unsigned long pfn;
 25	pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
 26				   | __WRITEABLE | flags);
 27
 28	address &= ~PMD_MASK;
 29	end = address + size;
 30	if (end > PMD_SIZE)
 31		end = PMD_SIZE;
 32	BUG_ON(address >= end);
 33	pfn = phys_addr >> PAGE_SHIFT;
 34	do {
 35		if (!pte_none(*pte)) {
 36			printk("remap_area_pte: page already exists\n");
 37			BUG();
 38		}
 39		set_pte(pte, pfn_pte(pfn, pgprot));
 40		address += PAGE_SIZE;
 41		pfn++;
 42		pte++;
 43	} while (address && (address < end));
 44}
 45
 46static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
 47	phys_addr_t size, phys_addr_t phys_addr, unsigned long flags)
 48{
 49	phys_addr_t end;
 50
 51	address &= ~PGDIR_MASK;
 52	end = address + size;
 53	if (end > PGDIR_SIZE)
 54		end = PGDIR_SIZE;
 55	phys_addr -= address;
 56	BUG_ON(address >= end);
 57	do {
 58		pte_t * pte = pte_alloc_kernel(pmd, address);
 59		if (!pte)
 60			return -ENOMEM;
 61		remap_area_pte(pte, address, end - address, address + phys_addr, flags);
 62		address = (address + PMD_SIZE) & PMD_MASK;
 63		pmd++;
 64	} while (address && (address < end));
 65	return 0;
 66}
 67
 68static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
 69	phys_addr_t size, unsigned long flags)
 70{
 71	int error;
 72	pgd_t * dir;
 73	unsigned long end = address + size;
 74
 75	phys_addr -= address;
 76	dir = pgd_offset(&init_mm, address);
 77	flush_cache_all();
 78	BUG_ON(address >= end);
 79	do {
 80		pud_t *pud;
 81		pmd_t *pmd;
 82
 83		error = -ENOMEM;
 84		pud = pud_alloc(&init_mm, dir, address);
 85		if (!pud)
 86			break;
 87		pmd = pmd_alloc(&init_mm, pud, address);
 88		if (!pmd)
 89			break;
 90		if (remap_area_pmd(pmd, address, end - address,
 91					 phys_addr + address, flags))
 92			break;
 93		error = 0;
 94		address = (address + PGDIR_SIZE) & PGDIR_MASK;
 95		dir++;
 96	} while (address && (address < end));
 97	flush_tlb_all();
 98	return error;
 99}
100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101/*
102 * Generic mapping function (not visible outside):
103 */
104
105/*
106 * Remap an arbitrary physical address space into the kernel virtual
107 * address space. Needed when the kernel wants to access high addresses
108 * directly.
109 *
110 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
111 * have to convert them into an offset in a page-aligned mapping, but the
112 * caller shouldn't need to know that small detail.
113 */
114
115#define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
116
117void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
118{
 
119	struct vm_struct * area;
120	unsigned long offset;
121	phys_addr_t last_addr;
122	void * addr;
123
124	phys_addr = fixup_bigphys_addr(phys_addr, size);
125
126	/* Don't allow wraparound or zero size */
127	last_addr = phys_addr + size - 1;
128	if (!size || last_addr < phys_addr)
129		return NULL;
130
131	/*
132	 * Map uncached objects in the low 512mb of address space using KSEG1,
133	 * otherwise map using page tables.
134	 */
135	if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
136	    flags == _CACHE_UNCACHED)
137		return (void __iomem *) CKSEG1ADDR(phys_addr);
138
139	/*
140	 * Don't allow anybody to remap normal RAM that we're using..
 
141	 */
142	if (phys_addr < virt_to_phys(high_memory)) {
143		char *t_addr, *t_end;
144		struct page *page;
145
146		t_addr = __va(phys_addr);
147		t_end = t_addr + (size - 1);
148
149		for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
150			if(!PageReserved(page))
151				return NULL;
152	}
153
154	/*
155	 * Mappings have to be page-aligned
156	 */
157	offset = phys_addr & ~PAGE_MASK;
158	phys_addr &= PAGE_MASK;
159	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
160
161	/*
162	 * Ok, go for it..
163	 */
164	area = get_vm_area(size, VM_IOREMAP);
165	if (!area)
166		return NULL;
167	addr = area->addr;
168	if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
169		vunmap(addr);
170		return NULL;
171	}
172
173	return (void __iomem *) (offset + (char *)addr);
174}
175
176#define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
177
178void __iounmap(const volatile void __iomem *addr)
179{
180	struct vm_struct *p;
181
182	if (IS_KSEG1(addr))
183		return;
184
185	p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
186	if (!p)
187		printk(KERN_ERR "iounmap: bad address %p\n", addr);
188
189	kfree(p);
190}
191
192EXPORT_SYMBOL(__ioremap);
193EXPORT_SYMBOL(__iounmap);