Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v3.15
 
 1/*
 2 * Re-map IO memory to kernel address space so that we can access it.
 3 * This is needed for high PCI addresses that aren't mapped in the
 4 * 640k-1MB IO memory area on PC's
 5 *
 6 * (C) Copyright 1995 1996 Linus Torvalds
 7 */
 8#include <linux/vmalloc.h>
 9#include <linux/mm.h>
10#include <linux/sched.h>
11#include <linux/io.h>
12#include <linux/export.h>
13#include <asm/cacheflush.h>
14#include <asm/pgtable.h>
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
17		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
18{
19	pte_t *pte;
20	u64 pfn;
21
22	pfn = phys_addr >> PAGE_SHIFT;
23	pte = pte_alloc_kernel(pmd, addr);
24	if (!pte)
25		return -ENOMEM;
26	do {
27		BUG_ON(!pte_none(*pte));
28		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
29		pfn++;
30	} while (pte++, addr += PAGE_SIZE, addr != end);
31	return 0;
32}
33
34static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
35		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
36{
37	pmd_t *pmd;
38	unsigned long next;
39
40	phys_addr -= addr;
41	pmd = pmd_alloc(&init_mm, pud, addr);
42	if (!pmd)
43		return -ENOMEM;
44	do {
45		next = pmd_addr_end(addr, end);
 
 
 
 
 
 
 
 
 
46		if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
47			return -ENOMEM;
48	} while (pmd++, addr = next, addr != end);
49	return 0;
50}
51
52static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
53		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
54{
55	pud_t *pud;
56	unsigned long next;
57
58	phys_addr -= addr;
59	pud = pud_alloc(&init_mm, pgd, addr);
60	if (!pud)
61		return -ENOMEM;
62	do {
63		next = pud_addr_end(addr, end);
 
 
 
 
 
 
 
 
 
64		if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
65			return -ENOMEM;
66	} while (pud++, addr = next, addr != end);
67	return 0;
68}
69
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70int ioremap_page_range(unsigned long addr,
71		       unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
72{
73	pgd_t *pgd;
74	unsigned long start;
75	unsigned long next;
76	int err;
77
 
78	BUG_ON(addr >= end);
79
80	start = addr;
81	phys_addr -= addr;
82	pgd = pgd_offset_k(addr);
83	do {
84		next = pgd_addr_end(addr, end);
85		err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, prot);
86		if (err)
87			break;
88	} while (pgd++, addr = next, addr != end);
89
90	flush_cache_vmap(start, end);
91
92	return err;
93}
94EXPORT_SYMBOL_GPL(ioremap_page_range);
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Re-map IO memory to kernel address space so that we can access it.
  4 * This is needed for high PCI addresses that aren't mapped in the
  5 * 640k-1MB IO memory area on PC's
  6 *
  7 * (C) Copyright 1995 1996 Linus Torvalds
  8 */
  9#include <linux/vmalloc.h>
 10#include <linux/mm.h>
 11#include <linux/sched.h>
 12#include <linux/io.h>
 13#include <linux/export.h>
 14#include <asm/cacheflush.h>
 15#include <asm/pgtable.h>
 16
 17#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
 18static int __read_mostly ioremap_p4d_capable;
 19static int __read_mostly ioremap_pud_capable;
 20static int __read_mostly ioremap_pmd_capable;
 21static int __read_mostly ioremap_huge_disabled;
 22
 23static int __init set_nohugeiomap(char *str)
 24{
 25	ioremap_huge_disabled = 1;
 26	return 0;
 27}
 28early_param("nohugeiomap", set_nohugeiomap);
 29
 30void __init ioremap_huge_init(void)
 31{
 32	if (!ioremap_huge_disabled) {
 33		if (arch_ioremap_pud_supported())
 34			ioremap_pud_capable = 1;
 35		if (arch_ioremap_pmd_supported())
 36			ioremap_pmd_capable = 1;
 37	}
 38}
 39
 40static inline int ioremap_p4d_enabled(void)
 41{
 42	return ioremap_p4d_capable;
 43}
 44
 45static inline int ioremap_pud_enabled(void)
 46{
 47	return ioremap_pud_capable;
 48}
 49
 50static inline int ioremap_pmd_enabled(void)
 51{
 52	return ioremap_pmd_capable;
 53}
 54
 55#else	/* !CONFIG_HAVE_ARCH_HUGE_VMAP */
 56static inline int ioremap_p4d_enabled(void) { return 0; }
 57static inline int ioremap_pud_enabled(void) { return 0; }
 58static inline int ioremap_pmd_enabled(void) { return 0; }
 59#endif	/* CONFIG_HAVE_ARCH_HUGE_VMAP */
 60
 61static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
 62		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
 63{
 64	pte_t *pte;
 65	u64 pfn;
 66
 67	pfn = phys_addr >> PAGE_SHIFT;
 68	pte = pte_alloc_kernel(pmd, addr);
 69	if (!pte)
 70		return -ENOMEM;
 71	do {
 72		BUG_ON(!pte_none(*pte));
 73		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
 74		pfn++;
 75	} while (pte++, addr += PAGE_SIZE, addr != end);
 76	return 0;
 77}
 78
 79static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
 80		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
 81{
 82	pmd_t *pmd;
 83	unsigned long next;
 84
 85	phys_addr -= addr;
 86	pmd = pmd_alloc(&init_mm, pud, addr);
 87	if (!pmd)
 88		return -ENOMEM;
 89	do {
 90		next = pmd_addr_end(addr, end);
 91
 92		if (ioremap_pmd_enabled() &&
 93		    ((next - addr) == PMD_SIZE) &&
 94		    IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
 95		    pmd_free_pte_page(pmd)) {
 96			if (pmd_set_huge(pmd, phys_addr + addr, prot))
 97				continue;
 98		}
 99
100		if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
101			return -ENOMEM;
102	} while (pmd++, addr = next, addr != end);
103	return 0;
104}
105
106static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
107		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
108{
109	pud_t *pud;
110	unsigned long next;
111
112	phys_addr -= addr;
113	pud = pud_alloc(&init_mm, p4d, addr);
114	if (!pud)
115		return -ENOMEM;
116	do {
117		next = pud_addr_end(addr, end);
118
119		if (ioremap_pud_enabled() &&
120		    ((next - addr) == PUD_SIZE) &&
121		    IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
122		    pud_free_pmd_page(pud)) {
123			if (pud_set_huge(pud, phys_addr + addr, prot))
124				continue;
125		}
126
127		if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot))
128			return -ENOMEM;
129	} while (pud++, addr = next, addr != end);
130	return 0;
131}
132
133static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
134		unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
135{
136	p4d_t *p4d;
137	unsigned long next;
138
139	phys_addr -= addr;
140	p4d = p4d_alloc(&init_mm, pgd, addr);
141	if (!p4d)
142		return -ENOMEM;
143	do {
144		next = p4d_addr_end(addr, end);
145
146		if (ioremap_p4d_enabled() &&
147		    ((next - addr) == P4D_SIZE) &&
148		    IS_ALIGNED(phys_addr + addr, P4D_SIZE)) {
149			if (p4d_set_huge(p4d, phys_addr + addr, prot))
150				continue;
151		}
152
153		if (ioremap_pud_range(p4d, addr, next, phys_addr + addr, prot))
154			return -ENOMEM;
155	} while (p4d++, addr = next, addr != end);
156	return 0;
157}
158
159int ioremap_page_range(unsigned long addr,
160		       unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
161{
162	pgd_t *pgd;
163	unsigned long start;
164	unsigned long next;
165	int err;
166
167	might_sleep();
168	BUG_ON(addr >= end);
169
170	start = addr;
171	phys_addr -= addr;
172	pgd = pgd_offset_k(addr);
173	do {
174		next = pgd_addr_end(addr, end);
175		err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot);
176		if (err)
177			break;
178	} while (pgd++, addr = next, addr != end);
179
180	flush_cache_vmap(start, end);
181
182	return err;
183}