Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2017 HiSilicon Limited, All Rights Reserved.
4 * Author: Gabriele Paoloni <gabriele.paoloni@huawei.com>
5 * Author: Zhichang Yuan <yuanzhichang@hisilicon.com>
6 * Author: John Garry <john.garry@huawei.com>
7 */
8
9#define pr_fmt(fmt) "LOGIC PIO: " fmt
10
11#include <linux/of.h>
12#include <linux/io.h>
13#include <linux/logic_pio.h>
14#include <linux/mm.h>
15#include <linux/rculist.h>
16#include <linux/sizes.h>
17#include <linux/slab.h>
18
19/* The unique hardware address list */
20static LIST_HEAD(io_range_list);
21static DEFINE_MUTEX(io_range_mutex);
22
23/* Consider a kernel general helper for this */
24#define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
25
26/**
27 * logic_pio_register_range - register logical PIO range for a host
28 * @new_range: pointer to the IO range to be registered.
29 *
30 * Returns 0 on success, the error code in case of failure.
31 * If the range already exists, -EEXIST will be returned, which should be
32 * considered a success.
33 *
34 * Register a new IO range node in the IO range list.
35 */
36int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
37{
38 struct logic_pio_hwaddr *range;
39 resource_size_t start;
40 resource_size_t end;
41 resource_size_t mmio_end = 0;
42 resource_size_t iio_sz = MMIO_UPPER_LIMIT;
43 int ret = 0;
44
45 if (!new_range || !new_range->fwnode || !new_range->size ||
46 (new_range->flags == LOGIC_PIO_INDIRECT && !new_range->ops))
47 return -EINVAL;
48
49 start = new_range->hw_start;
50 end = new_range->hw_start + new_range->size;
51
52 mutex_lock(&io_range_mutex);
53 list_for_each_entry(range, &io_range_list, list) {
54 if (range->fwnode == new_range->fwnode) {
55 /* range already there */
56 ret = -EEXIST;
57 goto end_register;
58 }
59 if (range->flags == LOGIC_PIO_CPU_MMIO &&
60 new_range->flags == LOGIC_PIO_CPU_MMIO) {
61 /* for MMIO ranges we need to check for overlap */
62 if (start >= range->hw_start + range->size ||
63 end < range->hw_start) {
64 mmio_end = range->io_start + range->size;
65 } else {
66 ret = -EFAULT;
67 goto end_register;
68 }
69 } else if (range->flags == LOGIC_PIO_INDIRECT &&
70 new_range->flags == LOGIC_PIO_INDIRECT) {
71 iio_sz += range->size;
72 }
73 }
74
75 /* range not registered yet, check for available space */
76 if (new_range->flags == LOGIC_PIO_CPU_MMIO) {
77 if (mmio_end + new_range->size - 1 > MMIO_UPPER_LIMIT) {
78 /* if it's too big check if 64K space can be reserved */
79 if (mmio_end + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
80 ret = -E2BIG;
81 goto end_register;
82 }
83 new_range->size = SZ_64K;
84 pr_warn("Requested IO range too big, new size set to 64K\n");
85 }
86 new_range->io_start = mmio_end;
87 } else if (new_range->flags == LOGIC_PIO_INDIRECT) {
88 if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) {
89 ret = -E2BIG;
90 goto end_register;
91 }
92 new_range->io_start = iio_sz;
93 } else {
94 /* invalid flag */
95 ret = -EINVAL;
96 goto end_register;
97 }
98
99 list_add_tail_rcu(&new_range->list, &io_range_list);
100
101end_register:
102 mutex_unlock(&io_range_mutex);
103 return ret;
104}
105
106/**
107 * logic_pio_unregister_range - unregister a logical PIO range for a host
108 * @range: pointer to the IO range which has been already registered.
109 *
110 * Unregister a previously-registered IO range node.
111 */
112void logic_pio_unregister_range(struct logic_pio_hwaddr *range)
113{
114 mutex_lock(&io_range_mutex);
115 list_del_rcu(&range->list);
116 mutex_unlock(&io_range_mutex);
117 synchronize_rcu();
118}
119
120/**
121 * find_io_range_by_fwnode - find logical PIO range for given FW node
122 * @fwnode: FW node handle associated with logical PIO range
123 *
124 * Returns pointer to node on success, NULL otherwise.
125 *
126 * Traverse the io_range_list to find the registered node for @fwnode.
127 */
128struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
129{
130 struct logic_pio_hwaddr *range, *found_range = NULL;
131
132 rcu_read_lock();
133 list_for_each_entry_rcu(range, &io_range_list, list) {
134 if (range->fwnode == fwnode) {
135 found_range = range;
136 break;
137 }
138 }
139 rcu_read_unlock();
140
141 return found_range;
142}
143
144/* Return a registered range given an input PIO token */
145static struct logic_pio_hwaddr *find_io_range(unsigned long pio)
146{
147 struct logic_pio_hwaddr *range, *found_range = NULL;
148
149 rcu_read_lock();
150 list_for_each_entry_rcu(range, &io_range_list, list) {
151 if (in_range(pio, range->io_start, range->size)) {
152 found_range = range;
153 break;
154 }
155 }
156 rcu_read_unlock();
157
158 if (!found_range)
159 pr_err("PIO entry token 0x%lx invalid\n", pio);
160
161 return found_range;
162}
163
164/**
165 * logic_pio_to_hwaddr - translate logical PIO to HW address
166 * @pio: logical PIO value
167 *
168 * Returns HW address if valid, ~0 otherwise.
169 *
170 * Translate the input logical PIO to the corresponding hardware address.
171 * The input PIO should be unique in the whole logical PIO space.
172 */
173resource_size_t logic_pio_to_hwaddr(unsigned long pio)
174{
175 struct logic_pio_hwaddr *range;
176
177 range = find_io_range(pio);
178 if (range)
179 return range->hw_start + pio - range->io_start;
180
181 return (resource_size_t)~0;
182}
183
184/**
185 * logic_pio_trans_hwaddr - translate HW address to logical PIO
186 * @fwnode: FW node reference for the host
187 * @addr: Host-relative HW address
188 * @size: size to translate
189 *
190 * Returns Logical PIO value if successful, ~0UL otherwise
191 */
192unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
193 resource_size_t addr, resource_size_t size)
194{
195 struct logic_pio_hwaddr *range;
196
197 range = find_io_range_by_fwnode(fwnode);
198 if (!range || range->flags == LOGIC_PIO_CPU_MMIO) {
199 pr_err("IO range not found or invalid\n");
200 return ~0UL;
201 }
202 if (range->size < size) {
203 pr_err("resource size %pa cannot fit in IO range size %pa\n",
204 &size, &range->size);
205 return ~0UL;
206 }
207 return addr - range->hw_start + range->io_start;
208}
209
210unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
211{
212 struct logic_pio_hwaddr *range;
213
214 rcu_read_lock();
215 list_for_each_entry_rcu(range, &io_range_list, list) {
216 if (range->flags != LOGIC_PIO_CPU_MMIO)
217 continue;
218 if (in_range(addr, range->hw_start, range->size)) {
219 unsigned long cpuaddr;
220
221 cpuaddr = addr - range->hw_start + range->io_start;
222
223 rcu_read_unlock();
224 return cpuaddr;
225 }
226 }
227 rcu_read_unlock();
228
229 pr_err("addr %pa not registered in io_range_list\n", &addr);
230
231 return ~0UL;
232}
233
234#if defined(CONFIG_INDIRECT_PIO) && defined(PCI_IOBASE)
235#define BUILD_LOGIC_IO(bwl, type) \
236type logic_in##bwl(unsigned long addr) \
237{ \
238 type ret = (type)~0; \
239 \
240 if (addr < MMIO_UPPER_LIMIT) { \
241 ret = _in##bwl(addr); \
242 } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
243 struct logic_pio_hwaddr *entry = find_io_range(addr); \
244 \
245 if (entry) \
246 ret = entry->ops->in(entry->hostdata, \
247 addr, sizeof(type)); \
248 else \
249 WARN_ON_ONCE(1); \
250 } \
251 return ret; \
252} \
253 \
254void logic_out##bwl(type value, unsigned long addr) \
255{ \
256 if (addr < MMIO_UPPER_LIMIT) { \
257 _out##bwl(value, addr); \
258 } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
259 struct logic_pio_hwaddr *entry = find_io_range(addr); \
260 \
261 if (entry) \
262 entry->ops->out(entry->hostdata, \
263 addr, value, sizeof(type)); \
264 else \
265 WARN_ON_ONCE(1); \
266 } \
267} \
268 \
269void logic_ins##bwl(unsigned long addr, void *buffer, \
270 unsigned int count) \
271{ \
272 if (addr < MMIO_UPPER_LIMIT) { \
273 reads##bwl(PCI_IOBASE + addr, buffer, count); \
274 } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
275 struct logic_pio_hwaddr *entry = find_io_range(addr); \
276 \
277 if (entry) \
278 entry->ops->ins(entry->hostdata, \
279 addr, buffer, sizeof(type), count); \
280 else \
281 WARN_ON_ONCE(1); \
282 } \
283 \
284} \
285 \
286void logic_outs##bwl(unsigned long addr, const void *buffer, \
287 unsigned int count) \
288{ \
289 if (addr < MMIO_UPPER_LIMIT) { \
290 writes##bwl(PCI_IOBASE + addr, buffer, count); \
291 } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
292 struct logic_pio_hwaddr *entry = find_io_range(addr); \
293 \
294 if (entry) \
295 entry->ops->outs(entry->hostdata, \
296 addr, buffer, sizeof(type), count); \
297 else \
298 WARN_ON_ONCE(1); \
299 } \
300}
301
302BUILD_LOGIC_IO(b, u8)
303EXPORT_SYMBOL(logic_inb);
304EXPORT_SYMBOL(logic_insb);
305EXPORT_SYMBOL(logic_outb);
306EXPORT_SYMBOL(logic_outsb);
307
308BUILD_LOGIC_IO(w, u16)
309EXPORT_SYMBOL(logic_inw);
310EXPORT_SYMBOL(logic_insw);
311EXPORT_SYMBOL(logic_outw);
312EXPORT_SYMBOL(logic_outsw);
313
314BUILD_LOGIC_IO(l, u32)
315EXPORT_SYMBOL(logic_inl);
316EXPORT_SYMBOL(logic_insl);
317EXPORT_SYMBOL(logic_outl);
318EXPORT_SYMBOL(logic_outsl);
319
320#endif /* CONFIG_INDIRECT_PIO && PCI_IOBASE */
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2017 HiSilicon Limited, All Rights Reserved.
4 * Author: Gabriele Paoloni <gabriele.paoloni@huawei.com>
5 * Author: Zhichang Yuan <yuanzhichang@hisilicon.com>
6 */
7
8#define pr_fmt(fmt) "LOGIC PIO: " fmt
9
10#include <linux/of.h>
11#include <linux/io.h>
12#include <linux/logic_pio.h>
13#include <linux/mm.h>
14#include <linux/rculist.h>
15#include <linux/sizes.h>
16#include <linux/slab.h>
17
18/* The unique hardware address list */
19static LIST_HEAD(io_range_list);
20static DEFINE_MUTEX(io_range_mutex);
21
22/* Consider a kernel general helper for this */
23#define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
24
25/**
26 * logic_pio_register_range - register logical PIO range for a host
27 * @new_range: pointer to the IO range to be registered.
28 *
29 * Returns 0 on success, the error code in case of failure.
30 *
31 * Register a new IO range node in the IO range list.
32 */
33int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
34{
35 struct logic_pio_hwaddr *range;
36 resource_size_t start;
37 resource_size_t end;
38 resource_size_t mmio_end = 0;
39 resource_size_t iio_sz = MMIO_UPPER_LIMIT;
40 int ret = 0;
41
42 if (!new_range || !new_range->fwnode || !new_range->size)
43 return -EINVAL;
44
45 start = new_range->hw_start;
46 end = new_range->hw_start + new_range->size;
47
48 mutex_lock(&io_range_mutex);
49 list_for_each_entry(range, &io_range_list, list) {
50 if (range->fwnode == new_range->fwnode) {
51 /* range already there */
52 goto end_register;
53 }
54 if (range->flags == LOGIC_PIO_CPU_MMIO &&
55 new_range->flags == LOGIC_PIO_CPU_MMIO) {
56 /* for MMIO ranges we need to check for overlap */
57 if (start >= range->hw_start + range->size ||
58 end < range->hw_start) {
59 mmio_end = range->io_start + range->size;
60 } else {
61 ret = -EFAULT;
62 goto end_register;
63 }
64 } else if (range->flags == LOGIC_PIO_INDIRECT &&
65 new_range->flags == LOGIC_PIO_INDIRECT) {
66 iio_sz += range->size;
67 }
68 }
69
70 /* range not registered yet, check for available space */
71 if (new_range->flags == LOGIC_PIO_CPU_MMIO) {
72 if (mmio_end + new_range->size - 1 > MMIO_UPPER_LIMIT) {
73 /* if it's too big check if 64K space can be reserved */
74 if (mmio_end + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
75 ret = -E2BIG;
76 goto end_register;
77 }
78 new_range->size = SZ_64K;
79 pr_warn("Requested IO range too big, new size set to 64K\n");
80 }
81 new_range->io_start = mmio_end;
82 } else if (new_range->flags == LOGIC_PIO_INDIRECT) {
83 if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) {
84 ret = -E2BIG;
85 goto end_register;
86 }
87 new_range->io_start = iio_sz;
88 } else {
89 /* invalid flag */
90 ret = -EINVAL;
91 goto end_register;
92 }
93
94 list_add_tail_rcu(&new_range->list, &io_range_list);
95
96end_register:
97 mutex_unlock(&io_range_mutex);
98 return ret;
99}
100
101/**
102 * logic_pio_unregister_range - unregister a logical PIO range for a host
103 * @range: pointer to the IO range which has been already registered.
104 *
105 * Unregister a previously-registered IO range node.
106 */
107void logic_pio_unregister_range(struct logic_pio_hwaddr *range)
108{
109 mutex_lock(&io_range_mutex);
110 list_del_rcu(&range->list);
111 mutex_unlock(&io_range_mutex);
112 synchronize_rcu();
113}
114
115/**
116 * find_io_range_by_fwnode - find logical PIO range for given FW node
117 * @fwnode: FW node handle associated with logical PIO range
118 *
119 * Returns pointer to node on success, NULL otherwise.
120 *
121 * Traverse the io_range_list to find the registered node for @fwnode.
122 */
123struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
124{
125 struct logic_pio_hwaddr *range, *found_range = NULL;
126
127 rcu_read_lock();
128 list_for_each_entry_rcu(range, &io_range_list, list) {
129 if (range->fwnode == fwnode) {
130 found_range = range;
131 break;
132 }
133 }
134 rcu_read_unlock();
135
136 return found_range;
137}
138
139/* Return a registered range given an input PIO token */
140static struct logic_pio_hwaddr *find_io_range(unsigned long pio)
141{
142 struct logic_pio_hwaddr *range, *found_range = NULL;
143
144 rcu_read_lock();
145 list_for_each_entry_rcu(range, &io_range_list, list) {
146 if (in_range(pio, range->io_start, range->size)) {
147 found_range = range;
148 break;
149 }
150 }
151 rcu_read_unlock();
152
153 if (!found_range)
154 pr_err("PIO entry token 0x%lx invalid\n", pio);
155
156 return found_range;
157}
158
159/**
160 * logic_pio_to_hwaddr - translate logical PIO to HW address
161 * @pio: logical PIO value
162 *
163 * Returns HW address if valid, ~0 otherwise.
164 *
165 * Translate the input logical PIO to the corresponding hardware address.
166 * The input PIO should be unique in the whole logical PIO space.
167 */
168resource_size_t logic_pio_to_hwaddr(unsigned long pio)
169{
170 struct logic_pio_hwaddr *range;
171
172 range = find_io_range(pio);
173 if (range)
174 return range->hw_start + pio - range->io_start;
175
176 return (resource_size_t)~0;
177}
178
179/**
180 * logic_pio_trans_hwaddr - translate HW address to logical PIO
181 * @fwnode: FW node reference for the host
182 * @addr: Host-relative HW address
183 * @size: size to translate
184 *
185 * Returns Logical PIO value if successful, ~0UL otherwise
186 */
187unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
188 resource_size_t addr, resource_size_t size)
189{
190 struct logic_pio_hwaddr *range;
191
192 range = find_io_range_by_fwnode(fwnode);
193 if (!range || range->flags == LOGIC_PIO_CPU_MMIO) {
194 pr_err("IO range not found or invalid\n");
195 return ~0UL;
196 }
197 if (range->size < size) {
198 pr_err("resource size %pa cannot fit in IO range size %pa\n",
199 &size, &range->size);
200 return ~0UL;
201 }
202 return addr - range->hw_start + range->io_start;
203}
204
205unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
206{
207 struct logic_pio_hwaddr *range;
208
209 rcu_read_lock();
210 list_for_each_entry_rcu(range, &io_range_list, list) {
211 if (range->flags != LOGIC_PIO_CPU_MMIO)
212 continue;
213 if (in_range(addr, range->hw_start, range->size)) {
214 unsigned long cpuaddr;
215
216 cpuaddr = addr - range->hw_start + range->io_start;
217
218 rcu_read_unlock();
219 return cpuaddr;
220 }
221 }
222 rcu_read_unlock();
223
224 pr_err("addr %pa not registered in io_range_list\n", &addr);
225
226 return ~0UL;
227}
228
229#if defined(CONFIG_INDIRECT_PIO) && defined(PCI_IOBASE)
230#define BUILD_LOGIC_IO(bw, type) \
231type logic_in##bw(unsigned long addr) \
232{ \
233 type ret = (type)~0; \
234 \
235 if (addr < MMIO_UPPER_LIMIT) { \
236 ret = read##bw(PCI_IOBASE + addr); \
237 } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
238 struct logic_pio_hwaddr *entry = find_io_range(addr); \
239 \
240 if (entry && entry->ops) \
241 ret = entry->ops->in(entry->hostdata, \
242 addr, sizeof(type)); \
243 else \
244 WARN_ON_ONCE(1); \
245 } \
246 return ret; \
247} \
248 \
249void logic_out##bw(type value, unsigned long addr) \
250{ \
251 if (addr < MMIO_UPPER_LIMIT) { \
252 write##bw(value, PCI_IOBASE + addr); \
253 } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
254 struct logic_pio_hwaddr *entry = find_io_range(addr); \
255 \
256 if (entry && entry->ops) \
257 entry->ops->out(entry->hostdata, \
258 addr, value, sizeof(type)); \
259 else \
260 WARN_ON_ONCE(1); \
261 } \
262} \
263 \
264void logic_ins##bw(unsigned long addr, void *buffer, \
265 unsigned int count) \
266{ \
267 if (addr < MMIO_UPPER_LIMIT) { \
268 reads##bw(PCI_IOBASE + addr, buffer, count); \
269 } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
270 struct logic_pio_hwaddr *entry = find_io_range(addr); \
271 \
272 if (entry && entry->ops) \
273 entry->ops->ins(entry->hostdata, \
274 addr, buffer, sizeof(type), count); \
275 else \
276 WARN_ON_ONCE(1); \
277 } \
278 \
279} \
280 \
281void logic_outs##bw(unsigned long addr, const void *buffer, \
282 unsigned int count) \
283{ \
284 if (addr < MMIO_UPPER_LIMIT) { \
285 writes##bw(PCI_IOBASE + addr, buffer, count); \
286 } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
287 struct logic_pio_hwaddr *entry = find_io_range(addr); \
288 \
289 if (entry && entry->ops) \
290 entry->ops->outs(entry->hostdata, \
291 addr, buffer, sizeof(type), count); \
292 else \
293 WARN_ON_ONCE(1); \
294 } \
295}
296
297BUILD_LOGIC_IO(b, u8)
298EXPORT_SYMBOL(logic_inb);
299EXPORT_SYMBOL(logic_insb);
300EXPORT_SYMBOL(logic_outb);
301EXPORT_SYMBOL(logic_outsb);
302
303BUILD_LOGIC_IO(w, u16)
304EXPORT_SYMBOL(logic_inw);
305EXPORT_SYMBOL(logic_insw);
306EXPORT_SYMBOL(logic_outw);
307EXPORT_SYMBOL(logic_outsw);
308
309BUILD_LOGIC_IO(l, u32)
310EXPORT_SYMBOL(logic_inl);
311EXPORT_SYMBOL(logic_insl);
312EXPORT_SYMBOL(logic_outl);
313EXPORT_SYMBOL(logic_outsl);
314
315#endif /* CONFIG_INDIRECT_PIO && PCI_IOBASE */