Linux Audio

Check our new training course

Loading...
v3.15
 1/*
 2 * Re-map IO memory to kernel address space so that we can access it.
 3 * This is needed for high PCI addresses that aren't mapped in the
 4 * 640k-1MB IO memory area on PC's
 5 *
 6 * (C) Copyright 1995 1996 Linus Torvalds
 7 */
 8#include <linux/vmalloc.h>
 9#include <linux/mm.h>
10#include <linux/sched.h>
11#include <linux/io.h>
12#include <linux/export.h>
13#include <asm/cacheflush.h>
14#include <asm/pgtable.h>
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
17		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
18{
19	pte_t *pte;
20	u64 pfn;
21
22	pfn = phys_addr >> PAGE_SHIFT;
23	pte = pte_alloc_kernel(pmd, addr);
24	if (!pte)
25		return -ENOMEM;
26	do {
27		BUG_ON(!pte_none(*pte));
28		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
29		pfn++;
30	} while (pte++, addr += PAGE_SIZE, addr != end);
31	return 0;
32}
33
34static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
35		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
36{
37	pmd_t *pmd;
38	unsigned long next;
39
40	phys_addr -= addr;
41	pmd = pmd_alloc(&init_mm, pud, addr);
42	if (!pmd)
43		return -ENOMEM;
44	do {
45		next = pmd_addr_end(addr, end);
 
 
 
 
 
 
 
 
46		if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
47			return -ENOMEM;
48	} while (pmd++, addr = next, addr != end);
49	return 0;
50}
51
52static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
53		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
54{
55	pud_t *pud;
56	unsigned long next;
57
58	phys_addr -= addr;
59	pud = pud_alloc(&init_mm, pgd, addr);
60	if (!pud)
61		return -ENOMEM;
62	do {
63		next = pud_addr_end(addr, end);
 
 
 
 
 
 
 
 
64		if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
65			return -ENOMEM;
66	} while (pud++, addr = next, addr != end);
67	return 0;
68}
69
70int ioremap_page_range(unsigned long addr,
71		       unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
72{
73	pgd_t *pgd;
74	unsigned long start;
75	unsigned long next;
76	int err;
77
78	BUG_ON(addr >= end);
79
80	start = addr;
81	phys_addr -= addr;
82	pgd = pgd_offset_k(addr);
83	do {
84		next = pgd_addr_end(addr, end);
85		err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, prot);
86		if (err)
87			break;
88	} while (pgd++, addr = next, addr != end);
89
90	flush_cache_vmap(start, end);
91
92	return err;
93}
94EXPORT_SYMBOL_GPL(ioremap_page_range);
v4.6
  1/*
  2 * Re-map IO memory to kernel address space so that we can access it.
  3 * This is needed for high PCI addresses that aren't mapped in the
  4 * 640k-1MB IO memory area on PC's
  5 *
  6 * (C) Copyright 1995 1996 Linus Torvalds
  7 */
  8#include <linux/vmalloc.h>
  9#include <linux/mm.h>
 10#include <linux/sched.h>
 11#include <linux/io.h>
 12#include <linux/export.h>
 13#include <asm/cacheflush.h>
 14#include <asm/pgtable.h>
 15
 16#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
 17static int __read_mostly ioremap_pud_capable;
 18static int __read_mostly ioremap_pmd_capable;
 19static int __read_mostly ioremap_huge_disabled;
 20
 21static int __init set_nohugeiomap(char *str)
 22{
 23	ioremap_huge_disabled = 1;
 24	return 0;
 25}
 26early_param("nohugeiomap", set_nohugeiomap);
 27
 28void __init ioremap_huge_init(void)
 29{
 30	if (!ioremap_huge_disabled) {
 31		if (arch_ioremap_pud_supported())
 32			ioremap_pud_capable = 1;
 33		if (arch_ioremap_pmd_supported())
 34			ioremap_pmd_capable = 1;
 35	}
 36}
 37
 38static inline int ioremap_pud_enabled(void)
 39{
 40	return ioremap_pud_capable;
 41}
 42
 43static inline int ioremap_pmd_enabled(void)
 44{
 45	return ioremap_pmd_capable;
 46}
 47
 48#else	/* !CONFIG_HAVE_ARCH_HUGE_VMAP */
 49static inline int ioremap_pud_enabled(void) { return 0; }
 50static inline int ioremap_pmd_enabled(void) { return 0; }
 51#endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
 52
 53static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
 54		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
 55{
 56	pte_t *pte;
 57	u64 pfn;
 58
 59	pfn = phys_addr >> PAGE_SHIFT;
 60	pte = pte_alloc_kernel(pmd, addr);
 61	if (!pte)
 62		return -ENOMEM;
 63	do {
 64		BUG_ON(!pte_none(*pte));
 65		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
 66		pfn++;
 67	} while (pte++, addr += PAGE_SIZE, addr != end);
 68	return 0;
 69}
 70
 71static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
 72		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
 73{
 74	pmd_t *pmd;
 75	unsigned long next;
 76
 77	phys_addr -= addr;
 78	pmd = pmd_alloc(&init_mm, pud, addr);
 79	if (!pmd)
 80		return -ENOMEM;
 81	do {
 82		next = pmd_addr_end(addr, end);
 83
 84		if (ioremap_pmd_enabled() &&
 85		    ((next - addr) == PMD_SIZE) &&
 86		    IS_ALIGNED(phys_addr + addr, PMD_SIZE)) {
 87			if (pmd_set_huge(pmd, phys_addr + addr, prot))
 88				continue;
 89		}
 90
 91		if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
 92			return -ENOMEM;
 93	} while (pmd++, addr = next, addr != end);
 94	return 0;
 95}
 96
 97static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
 98		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
 99{
100	pud_t *pud;
101	unsigned long next;
102
103	phys_addr -= addr;
104	pud = pud_alloc(&init_mm, pgd, addr);
105	if (!pud)
106		return -ENOMEM;
107	do {
108		next = pud_addr_end(addr, end);
109
110		if (ioremap_pud_enabled() &&
111		    ((next - addr) == PUD_SIZE) &&
112		    IS_ALIGNED(phys_addr + addr, PUD_SIZE)) {
113			if (pud_set_huge(pud, phys_addr + addr, prot))
114				continue;
115		}
116
117		if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
118			return -ENOMEM;
119	} while (pud++, addr = next, addr != end);
120	return 0;
121}
122
123int ioremap_page_range(unsigned long addr,
124		       unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
125{
126	pgd_t *pgd;
127	unsigned long start;
128	unsigned long next;
129	int err;
130
131	BUG_ON(addr >= end);
132
133	start = addr;
134	phys_addr -= addr;
135	pgd = pgd_offset_k(addr);
136	do {
137		next = pgd_addr_end(addr, end);
138		err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, prot);
139		if (err)
140			break;
141	} while (pgd++, addr = next, addr != end);
142
143	flush_cache_vmap(start, end);
144
145	return err;
146}
147EXPORT_SYMBOL_GPL(ioremap_page_range);