Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Access to PCI I/O memory from user space programs.
4 *
5 * Copyright IBM Corp. 2014
6 * Author(s): Alexey Ishchuk <aishchuk@linux.vnet.ibm.com>
7 */
8#include <linux/kernel.h>
9#include <linux/syscalls.h>
10#include <linux/init.h>
11#include <linux/mm.h>
12#include <linux/errno.h>
13#include <linux/pci.h>
14#include <asm/asm-extable.h>
15#include <asm/pci_io.h>
16#include <asm/pci_debug.h>
17#include <asm/asm.h>
18
19static inline void zpci_err_mmio(u8 cc, u8 status, u64 offset)
20{
21 struct {
22 u64 offset;
23 u8 cc;
24 u8 status;
25 } data = {offset, cc, status};
26
27 zpci_err_hex(&data, sizeof(data));
28}
29
30static inline int __pcistb_mio_inuser(
31 void __iomem *ioaddr, const void __user *src,
32 u64 len, u8 *status)
33{
34 int cc, exception;
35
36 exception = 1;
37 asm volatile (
38 " sacf 256\n"
39 "0: .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n"
40 "1: lhi %[exc],0\n"
41 "2: sacf 768\n"
42 CC_IPM(cc)
43 EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
44 : CC_OUT(cc, cc), [len] "+d" (len), [exc] "+d" (exception)
45 : [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src))
46 : CC_CLOBBER_LIST("memory"));
47 *status = len >> 24 & 0xff;
48 return exception ? -ENXIO : CC_TRANSFORM(cc);
49}
50
51static inline int __pcistg_mio_inuser(
52 void __iomem *ioaddr, const void __user *src,
53 u64 ulen, u8 *status)
54{
55 union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
56 int cc, exception;
57 u64 val = 0;
58 u64 cnt = ulen;
59 u8 tmp;
60
61 /*
62 * copy 0 < @len <= 8 bytes from @src into the right most bytes of
63 * a register, then store it to PCI at @ioaddr while in secondary
64 * address space. pcistg then uses the user mappings.
65 */
66 exception = 1;
67 asm volatile (
68 " sacf 256\n"
69 "0: llgc %[tmp],0(%[src])\n"
70 "4: sllg %[val],%[val],8\n"
71 " aghi %[src],1\n"
72 " ogr %[val],%[tmp]\n"
73 " brctg %[cnt],0b\n"
74 "1: .insn rre,0xb9d40000,%[val],%[ioaddr_len]\n"
75 "2: lhi %[exc],0\n"
76 "3: sacf 768\n"
77 CC_IPM(cc)
78 EX_TABLE(0b, 3b) EX_TABLE(4b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
79 : [src] "+a" (src), [cnt] "+d" (cnt),
80 [val] "+d" (val), [tmp] "=d" (tmp), [exc] "+d" (exception),
81 CC_OUT(cc, cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
82 :
83 : CC_CLOBBER_LIST("memory"));
84 *status = ioaddr_len.odd >> 24 & 0xff;
85
86 cc = exception ? -ENXIO : CC_TRANSFORM(cc);
87 /* did we read everything from user memory? */
88 if (!cc && cnt != 0)
89 cc = -EFAULT;
90
91 return cc;
92}
93
94static inline int __memcpy_toio_inuser(void __iomem *dst,
95 const void __user *src, size_t n)
96{
97 int size, rc = 0;
98 u8 status = 0;
99
100 if (!src)
101 return -EINVAL;
102
103 while (n > 0) {
104 size = zpci_get_max_io_size((u64 __force) dst,
105 (u64 __force) src, n,
106 ZPCI_MAX_WRITE_SIZE);
107 if (size > 8) /* main path */
108 rc = __pcistb_mio_inuser(dst, src, size, &status);
109 else
110 rc = __pcistg_mio_inuser(dst, src, size, &status);
111 if (rc)
112 break;
113 src += size;
114 dst += size;
115 n -= size;
116 }
117 if (rc)
118 zpci_err_mmio(rc, status, (__force u64) dst);
119 return rc;
120}
121
122SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
123 const void __user *, user_buffer, size_t, length)
124{
125 struct follow_pfnmap_args args = { };
126 u8 local_buf[64];
127 void __iomem *io_addr;
128 void *buf;
129 struct vm_area_struct *vma;
130 long ret;
131
132 if (!zpci_is_enabled())
133 return -ENODEV;
134
135 if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
136 return -EINVAL;
137
138 /*
139 * We only support write access to MIO capable devices if we are on
140 * a MIO enabled system. Otherwise we would have to check for every
141 * address if it is a special ZPCI_ADDR and would have to do
142 * a pfn lookup which we don't need for MIO capable devices. Currently
143 * ISM devices are the only devices without MIO support and there is no
144 * known need for accessing these from userspace.
145 */
146 if (static_branch_likely(&have_mio)) {
147 ret = __memcpy_toio_inuser((void __iomem *) mmio_addr,
148 user_buffer,
149 length);
150 return ret;
151 }
152
153 if (length > 64) {
154 buf = kmalloc(length, GFP_KERNEL);
155 if (!buf)
156 return -ENOMEM;
157 } else
158 buf = local_buf;
159
160 ret = -EFAULT;
161 if (copy_from_user(buf, user_buffer, length))
162 goto out_free;
163
164 mmap_read_lock(current->mm);
165 ret = -EINVAL;
166 vma = vma_lookup(current->mm, mmio_addr);
167 if (!vma)
168 goto out_unlock_mmap;
169 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
170 goto out_unlock_mmap;
171 ret = -EACCES;
172 if (!(vma->vm_flags & VM_WRITE))
173 goto out_unlock_mmap;
174
175 args.address = mmio_addr;
176 args.vma = vma;
177 ret = follow_pfnmap_start(&args);
178 if (ret)
179 goto out_unlock_mmap;
180
181 io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) |
182 (mmio_addr & ~PAGE_MASK));
183
184 if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
185 goto out_unlock_pt;
186
187 ret = zpci_memcpy_toio(io_addr, buf, length);
188out_unlock_pt:
189 follow_pfnmap_end(&args);
190out_unlock_mmap:
191 mmap_read_unlock(current->mm);
192out_free:
193 if (buf != local_buf)
194 kfree(buf);
195 return ret;
196}
197
198static inline int __pcilg_mio_inuser(
199 void __user *dst, const void __iomem *ioaddr,
200 u64 ulen, u8 *status)
201{
202 union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
203 u64 cnt = ulen;
204 int shift = ulen * 8;
205 int cc, exception;
206 u64 val, tmp;
207
208 /*
209 * read 0 < @len <= 8 bytes from the PCI memory mapped at @ioaddr (in
210 * user space) into a register using pcilg then store these bytes at
211 * user address @dst
212 */
213 exception = 1;
214 asm volatile (
215 " sacf 256\n"
216 "0: .insn rre,0xb9d60000,%[val],%[ioaddr_len]\n"
217 "1: lhi %[exc],0\n"
218 " jne 4f\n"
219 "2: ahi %[shift],-8\n"
220 " srlg %[tmp],%[val],0(%[shift])\n"
221 "3: stc %[tmp],0(%[dst])\n"
222 "5: aghi %[dst],1\n"
223 " brctg %[cnt],2b\n"
224 /*
225 * Use xr to clear exc and set condition code to zero
226 * to ensure flag output is correct for this branch.
227 */
228 " xr %[exc],%[exc]\n"
229 "4: sacf 768\n"
230 CC_IPM(cc)
231 EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) EX_TABLE(5b, 4b)
232 : [ioaddr_len] "+&d" (ioaddr_len.pair), [exc] "+d" (exception),
233 CC_OUT(cc, cc), [val] "=d" (val),
234 [dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
235 [shift] "+d" (shift)
236 :
237 : CC_CLOBBER_LIST("memory"));
238
239 cc = exception ? -ENXIO : CC_TRANSFORM(cc);
240 /* did we write everything to the user space buffer? */
241 if (!cc && cnt != 0)
242 cc = -EFAULT;
243
244 *status = ioaddr_len.odd >> 24 & 0xff;
245 return cc;
246}
247
248static inline int __memcpy_fromio_inuser(void __user *dst,
249 const void __iomem *src,
250 unsigned long n)
251{
252 int size, rc = 0;
253 u8 status;
254
255 while (n > 0) {
256 size = zpci_get_max_io_size((u64 __force) src,
257 (u64 __force) dst, n,
258 ZPCI_MAX_READ_SIZE);
259 rc = __pcilg_mio_inuser(dst, src, size, &status);
260 if (rc)
261 break;
262 src += size;
263 dst += size;
264 n -= size;
265 }
266 if (rc)
267 zpci_err_mmio(rc, status, (__force u64) dst);
268 return rc;
269}
270
271SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
272 void __user *, user_buffer, size_t, length)
273{
274 struct follow_pfnmap_args args = { };
275 u8 local_buf[64];
276 void __iomem *io_addr;
277 void *buf;
278 struct vm_area_struct *vma;
279 long ret;
280
281 if (!zpci_is_enabled())
282 return -ENODEV;
283
284 if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
285 return -EINVAL;
286
287 /*
288 * We only support read access to MIO capable devices if we are on
289 * a MIO enabled system. Otherwise we would have to check for every
290 * address if it is a special ZPCI_ADDR and would have to do
291 * a pfn lookup which we don't need for MIO capable devices. Currently
292 * ISM devices are the only devices without MIO support and there is no
293 * known need for accessing these from userspace.
294 */
295 if (static_branch_likely(&have_mio)) {
296 ret = __memcpy_fromio_inuser(
297 user_buffer, (const void __iomem *)mmio_addr,
298 length);
299 return ret;
300 }
301
302 if (length > 64) {
303 buf = kmalloc(length, GFP_KERNEL);
304 if (!buf)
305 return -ENOMEM;
306 } else {
307 buf = local_buf;
308 }
309
310 mmap_read_lock(current->mm);
311 ret = -EINVAL;
312 vma = vma_lookup(current->mm, mmio_addr);
313 if (!vma)
314 goto out_unlock_mmap;
315 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
316 goto out_unlock_mmap;
317 ret = -EACCES;
318 if (!(vma->vm_flags & VM_WRITE))
319 goto out_unlock_mmap;
320
321 args.vma = vma;
322 args.address = mmio_addr;
323 ret = follow_pfnmap_start(&args);
324 if (ret)
325 goto out_unlock_mmap;
326
327 io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) |
328 (mmio_addr & ~PAGE_MASK));
329
330 if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
331 ret = -EFAULT;
332 goto out_unlock_pt;
333 }
334 ret = zpci_memcpy_fromio(buf, io_addr, length);
335
336out_unlock_pt:
337 follow_pfnmap_end(&args);
338out_unlock_mmap:
339 mmap_read_unlock(current->mm);
340
341 if (!ret && copy_to_user(user_buffer, buf, length))
342 ret = -EFAULT;
343
344 if (buf != local_buf)
345 kfree(buf);
346 return ret;
347}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Access to PCI I/O memory from user space programs.
4 *
5 * Copyright IBM Corp. 2014
6 * Author(s): Alexey Ishchuk <aishchuk@linux.vnet.ibm.com>
7 */
8#include <linux/kernel.h>
9#include <linux/syscalls.h>
10#include <linux/init.h>
11#include <linux/mm.h>
12#include <linux/errno.h>
13#include <linux/pci.h>
14#include <asm/pci_io.h>
15#include <asm/pci_debug.h>
16
17static inline void zpci_err_mmio(u8 cc, u8 status, u64 offset)
18{
19 struct {
20 u64 offset;
21 u8 cc;
22 u8 status;
23 } data = {offset, cc, status};
24
25 zpci_err_hex(&data, sizeof(data));
26}
27
28static inline int __pcistb_mio_inuser(
29 void __iomem *ioaddr, const void __user *src,
30 u64 len, u8 *status)
31{
32 int cc = -ENXIO;
33
34 asm volatile (
35 " sacf 256\n"
36 "0: .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n"
37 "1: ipm %[cc]\n"
38 " srl %[cc],28\n"
39 "2: sacf 768\n"
40 EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
41 : [cc] "+d" (cc), [len] "+d" (len)
42 : [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src))
43 : "cc", "memory");
44 *status = len >> 24 & 0xff;
45 return cc;
46}
47
48static inline int __pcistg_mio_inuser(
49 void __iomem *ioaddr, const void __user *src,
50 u64 ulen, u8 *status)
51{
52 register u64 addr asm("2") = (u64 __force) ioaddr;
53 register u64 len asm("3") = ulen;
54 int cc = -ENXIO;
55 u64 val = 0;
56 u64 cnt = ulen;
57 u8 tmp;
58
59 /*
60 * copy 0 < @len <= 8 bytes from @src into the right most bytes of
61 * a register, then store it to PCI at @ioaddr while in secondary
62 * address space. pcistg then uses the user mappings.
63 */
64 asm volatile (
65 " sacf 256\n"
66 "0: llgc %[tmp],0(%[src])\n"
67 " sllg %[val],%[val],8\n"
68 " aghi %[src],1\n"
69 " ogr %[val],%[tmp]\n"
70 " brctg %[cnt],0b\n"
71 "1: .insn rre,0xb9d40000,%[val],%[ioaddr]\n"
72 "2: ipm %[cc]\n"
73 " srl %[cc],28\n"
74 "3: sacf 768\n"
75 EX_TABLE(0b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
76 :
77 [src] "+a" (src), [cnt] "+d" (cnt),
78 [val] "+d" (val), [tmp] "=d" (tmp),
79 [len] "+d" (len), [cc] "+d" (cc),
80 [ioaddr] "+a" (addr)
81 :: "cc", "memory");
82 *status = len >> 24 & 0xff;
83
84 /* did we read everything from user memory? */
85 if (!cc && cnt != 0)
86 cc = -EFAULT;
87
88 return cc;
89}
90
91static inline int __memcpy_toio_inuser(void __iomem *dst,
92 const void __user *src, size_t n)
93{
94 int size, rc = 0;
95 u8 status = 0;
96 mm_segment_t old_fs;
97
98 if (!src)
99 return -EINVAL;
100
101 old_fs = enable_sacf_uaccess();
102 while (n > 0) {
103 size = zpci_get_max_write_size((u64 __force) dst,
104 (u64 __force) src, n,
105 ZPCI_MAX_WRITE_SIZE);
106 if (size > 8) /* main path */
107 rc = __pcistb_mio_inuser(dst, src, size, &status);
108 else
109 rc = __pcistg_mio_inuser(dst, src, size, &status);
110 if (rc)
111 break;
112 src += size;
113 dst += size;
114 n -= size;
115 }
116 disable_sacf_uaccess(old_fs);
117 if (rc)
118 zpci_err_mmio(rc, status, (__force u64) dst);
119 return rc;
120}
121
122static long get_pfn(unsigned long user_addr, unsigned long access,
123 unsigned long *pfn)
124{
125 struct vm_area_struct *vma;
126 long ret;
127
128 mmap_read_lock(current->mm);
129 ret = -EINVAL;
130 vma = find_vma(current->mm, user_addr);
131 if (!vma)
132 goto out;
133 ret = -EACCES;
134 if (!(vma->vm_flags & access))
135 goto out;
136 ret = follow_pfn(vma, user_addr, pfn);
137out:
138 mmap_read_unlock(current->mm);
139 return ret;
140}
141
142SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
143 const void __user *, user_buffer, size_t, length)
144{
145 u8 local_buf[64];
146 void __iomem *io_addr;
147 void *buf;
148 unsigned long pfn;
149 long ret;
150
151 if (!zpci_is_enabled())
152 return -ENODEV;
153
154 if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
155 return -EINVAL;
156
157 /*
158 * We only support write access to MIO capable devices if we are on
159 * a MIO enabled system. Otherwise we would have to check for every
160 * address if it is a special ZPCI_ADDR and would have to do
161 * a get_pfn() which we don't need for MIO capable devices. Currently
162 * ISM devices are the only devices without MIO support and there is no
163 * known need for accessing these from userspace.
164 */
165 if (static_branch_likely(&have_mio)) {
166 ret = __memcpy_toio_inuser((void __iomem *) mmio_addr,
167 user_buffer,
168 length);
169 return ret;
170 }
171
172 if (length > 64) {
173 buf = kmalloc(length, GFP_KERNEL);
174 if (!buf)
175 return -ENOMEM;
176 } else
177 buf = local_buf;
178
179 ret = get_pfn(mmio_addr, VM_WRITE, &pfn);
180 if (ret)
181 goto out;
182 io_addr = (void __iomem *)((pfn << PAGE_SHIFT) |
183 (mmio_addr & ~PAGE_MASK));
184
185 ret = -EFAULT;
186 if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
187 goto out;
188
189 if (copy_from_user(buf, user_buffer, length))
190 goto out;
191
192 ret = zpci_memcpy_toio(io_addr, buf, length);
193out:
194 if (buf != local_buf)
195 kfree(buf);
196 return ret;
197}
198
199static inline int __pcilg_mio_inuser(
200 void __user *dst, const void __iomem *ioaddr,
201 u64 ulen, u8 *status)
202{
203 register u64 addr asm("2") = (u64 __force) ioaddr;
204 register u64 len asm("3") = ulen;
205 u64 cnt = ulen;
206 int shift = ulen * 8;
207 int cc = -ENXIO;
208 u64 val, tmp;
209
210 /*
211 * read 0 < @len <= 8 bytes from the PCI memory mapped at @ioaddr (in
212 * user space) into a register using pcilg then store these bytes at
213 * user address @dst
214 */
215 asm volatile (
216 " sacf 256\n"
217 "0: .insn rre,0xb9d60000,%[val],%[ioaddr]\n"
218 "1: ipm %[cc]\n"
219 " srl %[cc],28\n"
220 " ltr %[cc],%[cc]\n"
221 " jne 4f\n"
222 "2: ahi %[shift],-8\n"
223 " srlg %[tmp],%[val],0(%[shift])\n"
224 "3: stc %[tmp],0(%[dst])\n"
225 " aghi %[dst],1\n"
226 " brctg %[cnt],2b\n"
227 "4: sacf 768\n"
228 EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b)
229 :
230 [cc] "+d" (cc), [val] "=d" (val), [len] "+d" (len),
231 [dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
232 [shift] "+d" (shift)
233 :
234 [ioaddr] "a" (addr)
235 : "cc", "memory");
236
237 /* did we write everything to the user space buffer? */
238 if (!cc && cnt != 0)
239 cc = -EFAULT;
240
241 *status = len >> 24 & 0xff;
242 return cc;
243}
244
245static inline int __memcpy_fromio_inuser(void __user *dst,
246 const void __iomem *src,
247 unsigned long n)
248{
249 int size, rc = 0;
250 u8 status;
251 mm_segment_t old_fs;
252
253 old_fs = enable_sacf_uaccess();
254 while (n > 0) {
255 size = zpci_get_max_write_size((u64 __force) src,
256 (u64 __force) dst, n,
257 ZPCI_MAX_READ_SIZE);
258 rc = __pcilg_mio_inuser(dst, src, size, &status);
259 if (rc)
260 break;
261 src += size;
262 dst += size;
263 n -= size;
264 }
265 disable_sacf_uaccess(old_fs);
266 if (rc)
267 zpci_err_mmio(rc, status, (__force u64) dst);
268 return rc;
269}
270
271SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
272 void __user *, user_buffer, size_t, length)
273{
274 u8 local_buf[64];
275 void __iomem *io_addr;
276 void *buf;
277 unsigned long pfn;
278 long ret;
279
280 if (!zpci_is_enabled())
281 return -ENODEV;
282
283 if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
284 return -EINVAL;
285
286 /*
287 * We only support read access to MIO capable devices if we are on
288 * a MIO enabled system. Otherwise we would have to check for every
289 * address if it is a special ZPCI_ADDR and would have to do
290 * a get_pfn() which we don't need for MIO capable devices. Currently
291 * ISM devices are the only devices without MIO support and there is no
292 * known need for accessing these from userspace.
293 */
294 if (static_branch_likely(&have_mio)) {
295 ret = __memcpy_fromio_inuser(
296 user_buffer, (const void __iomem *)mmio_addr,
297 length);
298 return ret;
299 }
300
301 if (length > 64) {
302 buf = kmalloc(length, GFP_KERNEL);
303 if (!buf)
304 return -ENOMEM;
305 } else {
306 buf = local_buf;
307 }
308
309 ret = get_pfn(mmio_addr, VM_READ, &pfn);
310 if (ret)
311 goto out;
312 io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
313
314 if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
315 ret = -EFAULT;
316 goto out;
317 }
318 ret = zpci_memcpy_fromio(buf, io_addr, length);
319 if (ret)
320 goto out;
321 if (copy_to_user(user_buffer, buf, length))
322 ret = -EFAULT;
323
324out:
325 if (buf != local_buf)
326 kfree(buf);
327 return ret;
328}