Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 */
5
6#include <linux/vmalloc.h>
7#include <linux/init.h>
8#include <linux/module.h>
9#include <linux/io.h>
10#include <linux/mm.h>
11#include <linux/slab.h>
12#include <linux/cache.h>
13
14static inline bool arc_uncached_addr_space(phys_addr_t paddr)
15{
16 if (is_isa_arcompact()) {
17 if (paddr >= ARC_UNCACHED_ADDR_SPACE)
18 return true;
19 } else if (paddr >= perip_base && paddr <= perip_end) {
20 return true;
21 }
22
23 return false;
24}
25
26void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
27{
28 phys_addr_t end;
29
30 /* Don't allow wraparound or zero size */
31 end = paddr + size - 1;
32 if (!size || (end < paddr))
33 return NULL;
34
35 /*
36 * If the region is h/w uncached, MMU mapping can be elided as optim
37 * The cast to u32 is fine as this region can only be inside 4GB
38 */
39 if (arc_uncached_addr_space(paddr))
40 return (void __iomem *)(u32)paddr;
41
42 return ioremap_prot(paddr, size, PAGE_KERNEL_NO_CACHE);
43}
44EXPORT_SYMBOL(ioremap);
45
46/*
47 * ioremap with access flags
48 * Cache semantics wise it is same as ioremap - "forced" uncached.
49 * However unlike vanilla ioremap which bypasses ARC MMU for addresses in
50 * ARC hardware uncached region, this one still goes thru the MMU as caller
51 * might need finer access control (R/W/X)
52 */
53void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
54 unsigned long flags)
55{
56 unsigned long vaddr;
57 struct vm_struct *area;
58 phys_addr_t off, end;
59 pgprot_t prot = __pgprot(flags);
60
61 /* Don't allow wraparound, zero size */
62 end = paddr + size - 1;
63 if ((!size) || (end < paddr))
64 return NULL;
65
66 /* An early platform driver might end up here */
67 if (!slab_is_available())
68 return NULL;
69
70 /* force uncached */
71 prot = pgprot_noncached(prot);
72
73 /* Mappings have to be page-aligned */
74 off = paddr & ~PAGE_MASK;
75 paddr &= PAGE_MASK;
76 size = PAGE_ALIGN(end + 1) - paddr;
77
78 /*
79 * Ok, go for it..
80 */
81 area = get_vm_area(size, VM_IOREMAP);
82 if (!area)
83 return NULL;
84 area->phys_addr = paddr;
85 vaddr = (unsigned long)area->addr;
86 if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) {
87 vunmap((void __force *)vaddr);
88 return NULL;
89 }
90 return (void __iomem *)(off + (char __iomem *)vaddr);
91}
92EXPORT_SYMBOL(ioremap_prot);
93
94
95void iounmap(const void __iomem *addr)
96{
97 /* weird double cast to handle phys_addr_t > 32 bits */
98 if (arc_uncached_addr_space((phys_addr_t)(u32)addr))
99 return;
100
101 vfree((void *)(PAGE_MASK & (unsigned long __force)addr));
102}
103EXPORT_SYMBOL(iounmap);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 */
5
6#include <linux/vmalloc.h>
7#include <linux/init.h>
8#include <linux/module.h>
9#include <linux/io.h>
10#include <linux/mm.h>
11#include <linux/slab.h>
12#include <linux/cache.h>
13
14static inline bool arc_uncached_addr_space(phys_addr_t paddr)
15{
16 if (is_isa_arcompact()) {
17 if (paddr >= ARC_UNCACHED_ADDR_SPACE)
18 return true;
19 } else if (paddr >= perip_base && paddr <= perip_end) {
20 return true;
21 }
22
23 return false;
24}
25
26void __iomem *ioremap(phys_addr_t paddr, unsigned long size)
27{
28 phys_addr_t end;
29
30 /* Don't allow wraparound or zero size */
31 end = paddr + size - 1;
32 if (!size || (end < paddr))
33 return NULL;
34
35 /*
36 * If the region is h/w uncached, MMU mapping can be elided as optim
37 * The cast to u32 is fine as this region can only be inside 4GB
38 */
39 if (arc_uncached_addr_space(paddr))
40 return (void __iomem *)(u32)paddr;
41
42 return ioremap_prot(paddr, size,
43 pgprot_val(pgprot_noncached(PAGE_KERNEL)));
44}
45EXPORT_SYMBOL(ioremap);
46
47/*
48 * ioremap with access flags
49 * Cache semantics wise it is same as ioremap - "forced" uncached.
50 * However unlike vanilla ioremap which bypasses ARC MMU for addresses in
51 * ARC hardware uncached region, this one still goes thru the MMU as caller
52 * might need finer access control (R/W/X)
53 */
54void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
55 unsigned long flags)
56{
57 unsigned int off;
58 unsigned long vaddr;
59 struct vm_struct *area;
60 phys_addr_t end;
61 pgprot_t prot = __pgprot(flags);
62
63 /* Don't allow wraparound, zero size */
64 end = paddr + size - 1;
65 if ((!size) || (end < paddr))
66 return NULL;
67
68 /* An early platform driver might end up here */
69 if (!slab_is_available())
70 return NULL;
71
72 /* force uncached */
73 prot = pgprot_noncached(prot);
74
75 /* Mappings have to be page-aligned */
76 off = paddr & ~PAGE_MASK;
77 paddr &= PAGE_MASK_PHYS;
78 size = PAGE_ALIGN(end + 1) - paddr;
79
80 /*
81 * Ok, go for it..
82 */
83 area = get_vm_area(size, VM_IOREMAP);
84 if (!area)
85 return NULL;
86 area->phys_addr = paddr;
87 vaddr = (unsigned long)area->addr;
88 if (ioremap_page_range(vaddr, vaddr + size, paddr, prot)) {
89 vunmap((void __force *)vaddr);
90 return NULL;
91 }
92 return (void __iomem *)(off + (char __iomem *)vaddr);
93}
94EXPORT_SYMBOL(ioremap_prot);
95
96
97void iounmap(const volatile void __iomem *addr)
98{
99 /* weird double cast to handle phys_addr_t > 32 bits */
100 if (arc_uncached_addr_space((phys_addr_t)(u32)addr))
101 return;
102
103 vfree((void *)(PAGE_MASK & (unsigned long __force)addr));
104}
105EXPORT_SYMBOL(iounmap);