Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Access to PCI I/O memory from user space programs.
  4 *
  5 * Copyright IBM Corp. 2014
  6 * Author(s): Alexey Ishchuk <aishchuk@linux.vnet.ibm.com>
  7 */
  8#include <linux/kernel.h>
  9#include <linux/syscalls.h>
 10#include <linux/init.h>
 11#include <linux/mm.h>
 12#include <linux/errno.h>
 13#include <linux/pci.h>
 14#include <asm/asm-extable.h>
 15#include <asm/pci_io.h>
 16#include <asm/pci_debug.h>
 17#include <asm/asm.h>
 18
 19static inline void zpci_err_mmio(u8 cc, u8 status, u64 offset)
 20{
 21	struct {
 22		u64 offset;
 23		u8 cc;
 24		u8 status;
 25	} data = {offset, cc, status};
 26
 27	zpci_err_hex(&data, sizeof(data));
 28}
 29
 30static inline int __pcistb_mio_inuser(
 31		void __iomem *ioaddr, const void __user *src,
 32		u64 len, u8 *status)
 33{
 34	int cc, exception;
 35
 36	exception = 1;
 37	asm volatile (
 38		"	sacf	256\n"
 39		"0:	.insn	rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n"
 40		"1:	lhi	%[exc],0\n"
 41		"2:	sacf	768\n"
 42		CC_IPM(cc)
 43		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
 44		: CC_OUT(cc, cc), [len] "+d" (len), [exc] "+d" (exception)
 45		: [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src))
 46		: CC_CLOBBER_LIST("memory"));
 47	*status = len >> 24 & 0xff;
 48	return exception ? -ENXIO : CC_TRANSFORM(cc);
 49}
 50
 51static inline int __pcistg_mio_inuser(
 52		void __iomem *ioaddr, const void __user *src,
 53		u64 ulen, u8 *status)
 54{
 55	union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
 56	int cc, exception;
 57	u64 val = 0;
 58	u64 cnt = ulen;
 59	u8 tmp;
 60
 61	/*
 62	 * copy 0 < @len <= 8 bytes from @src into the right most bytes of
 63	 * a register, then store it to PCI at @ioaddr while in secondary
 64	 * address space. pcistg then uses the user mappings.
 65	 */
 66	exception = 1;
 67	asm volatile (
 68		"	sacf	256\n"
 69		"0:	llgc	%[tmp],0(%[src])\n"
 70		"4:	sllg	%[val],%[val],8\n"
 71		"	aghi	%[src],1\n"
 72		"	ogr	%[val],%[tmp]\n"
 73		"	brctg	%[cnt],0b\n"
 74		"1:	.insn	rre,0xb9d40000,%[val],%[ioaddr_len]\n"
 75		"2:	lhi	%[exc],0\n"
 76		"3:	sacf	768\n"
 77		CC_IPM(cc)
 78		EX_TABLE(0b, 3b) EX_TABLE(4b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
 79		: [src] "+a" (src), [cnt] "+d" (cnt),
 80		  [val] "+d" (val), [tmp] "=d" (tmp), [exc] "+d" (exception),
 81		  CC_OUT(cc, cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
 82		:
 83		: CC_CLOBBER_LIST("memory"));
 
 
 
 84	*status = ioaddr_len.odd >> 24 & 0xff;
 85
 86	cc = exception ? -ENXIO : CC_TRANSFORM(cc);
 87	/* did we read everything from user memory? */
 88	if (!cc && cnt != 0)
 89		cc = -EFAULT;
 90
 91	return cc;
 92}
 93
 94static inline int __memcpy_toio_inuser(void __iomem *dst,
 95				   const void __user *src, size_t n)
 96{
 97	int size, rc = 0;
 98	u8 status = 0;
 99
100	if (!src)
101		return -EINVAL;
102
103	while (n > 0) {
104		size = zpci_get_max_io_size((u64 __force) dst,
105					    (u64 __force) src, n,
106					    ZPCI_MAX_WRITE_SIZE);
107		if (size > 8) /* main path */
108			rc = __pcistb_mio_inuser(dst, src, size, &status);
109		else
110			rc = __pcistg_mio_inuser(dst, src, size, &status);
111		if (rc)
112			break;
113		src += size;
114		dst += size;
115		n -= size;
116	}
117	if (rc)
118		zpci_err_mmio(rc, status, (__force u64) dst);
119	return rc;
120}
121
122SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
123		const void __user *, user_buffer, size_t, length)
124{
125	struct follow_pfnmap_args args = { };
126	u8 local_buf[64];
127	void __iomem *io_addr;
128	void *buf;
129	struct vm_area_struct *vma;
 
 
130	long ret;
131
132	if (!zpci_is_enabled())
133		return -ENODEV;
134
135	if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
136		return -EINVAL;
137
138	/*
139	 * We only support write access to MIO capable devices if we are on
140	 * a MIO enabled system. Otherwise we would have to check for every
141	 * address if it is a special ZPCI_ADDR and would have to do
142	 * a pfn lookup which we don't need for MIO capable devices.  Currently
143	 * ISM devices are the only devices without MIO support and there is no
144	 * known need for accessing these from userspace.
145	 */
146	if (static_branch_likely(&have_mio)) {
147		ret = __memcpy_toio_inuser((void  __iomem *) mmio_addr,
148					user_buffer,
149					length);
150		return ret;
151	}
152
153	if (length > 64) {
154		buf = kmalloc(length, GFP_KERNEL);
155		if (!buf)
156			return -ENOMEM;
157	} else
158		buf = local_buf;
159
160	ret = -EFAULT;
161	if (copy_from_user(buf, user_buffer, length))
162		goto out_free;
163
164	mmap_read_lock(current->mm);
165	ret = -EINVAL;
166	vma = vma_lookup(current->mm, mmio_addr);
167	if (!vma)
168		goto out_unlock_mmap;
169	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
170		goto out_unlock_mmap;
171	ret = -EACCES;
172	if (!(vma->vm_flags & VM_WRITE))
173		goto out_unlock_mmap;
174
175	args.address = mmio_addr;
176	args.vma = vma;
177	ret = follow_pfnmap_start(&args);
178	if (ret)
179		goto out_unlock_mmap;
180
181	io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) |
182			(mmio_addr & ~PAGE_MASK));
183
184	if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
185		goto out_unlock_pt;
186
187	ret = zpci_memcpy_toio(io_addr, buf, length);
188out_unlock_pt:
189	follow_pfnmap_end(&args);
190out_unlock_mmap:
191	mmap_read_unlock(current->mm);
192out_free:
193	if (buf != local_buf)
194		kfree(buf);
195	return ret;
196}
197
198static inline int __pcilg_mio_inuser(
199		void __user *dst, const void __iomem *ioaddr,
200		u64 ulen, u8 *status)
201{
202	union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
203	u64 cnt = ulen;
204	int shift = ulen * 8;
205	int cc, exception;
206	u64 val, tmp;
207
208	/*
209	 * read 0 < @len <= 8 bytes from the PCI memory mapped at @ioaddr (in
210	 * user space) into a register using pcilg then store these bytes at
211	 * user address @dst
212	 */
213	exception = 1;
214	asm volatile (
215		"	sacf	256\n"
216		"0:	.insn	rre,0xb9d60000,%[val],%[ioaddr_len]\n"
217		"1:	lhi	%[exc],0\n"
218		"	jne	4f\n"
219		"2:	ahi	%[shift],-8\n"
220		"	srlg	%[tmp],%[val],0(%[shift])\n"
221		"3:	stc	%[tmp],0(%[dst])\n"
 
 
222		"5:	aghi	%[dst],1\n"
223		"	brctg	%[cnt],2b\n"
224		/*
225		 * Use xr to clear exc and set condition code to zero
226		 * to ensure flag output is correct for this branch.
227		 */
228		"	xr	%[exc],%[exc]\n"
229		"4:	sacf	768\n"
230		CC_IPM(cc)
231		EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) EX_TABLE(5b, 4b)
232		: [ioaddr_len] "+&d" (ioaddr_len.pair), [exc] "+d" (exception),
233		  CC_OUT(cc, cc), [val] "=d" (val),
234		  [dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
235		  [shift] "+d" (shift)
236		:
237		: CC_CLOBBER_LIST("memory"));
 
 
 
 
238
239	cc = exception ? -ENXIO : CC_TRANSFORM(cc);
240	/* did we write everything to the user space buffer? */
241	if (!cc && cnt != 0)
242		cc = -EFAULT;
243
244	*status = ioaddr_len.odd >> 24 & 0xff;
245	return cc;
246}
247
248static inline int __memcpy_fromio_inuser(void __user *dst,
249				     const void __iomem *src,
250				     unsigned long n)
251{
252	int size, rc = 0;
253	u8 status;
254
255	while (n > 0) {
256		size = zpci_get_max_io_size((u64 __force) src,
257					    (u64 __force) dst, n,
258					    ZPCI_MAX_READ_SIZE);
259		rc = __pcilg_mio_inuser(dst, src, size, &status);
260		if (rc)
261			break;
262		src += size;
263		dst += size;
264		n -= size;
265	}
266	if (rc)
267		zpci_err_mmio(rc, status, (__force u64) dst);
268	return rc;
269}
270
271SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
272		void __user *, user_buffer, size_t, length)
273{
274	struct follow_pfnmap_args args = { };
275	u8 local_buf[64];
276	void __iomem *io_addr;
277	void *buf;
278	struct vm_area_struct *vma;
 
 
279	long ret;
280
281	if (!zpci_is_enabled())
282		return -ENODEV;
283
284	if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
285		return -EINVAL;
286
287	/*
288	 * We only support read access to MIO capable devices if we are on
289	 * a MIO enabled system. Otherwise we would have to check for every
290	 * address if it is a special ZPCI_ADDR and would have to do
291	 * a pfn lookup which we don't need for MIO capable devices.  Currently
292	 * ISM devices are the only devices without MIO support and there is no
293	 * known need for accessing these from userspace.
294	 */
295	if (static_branch_likely(&have_mio)) {
296		ret = __memcpy_fromio_inuser(
297				user_buffer, (const void __iomem *)mmio_addr,
298				length);
299		return ret;
300	}
301
302	if (length > 64) {
303		buf = kmalloc(length, GFP_KERNEL);
304		if (!buf)
305			return -ENOMEM;
306	} else {
307		buf = local_buf;
308	}
309
310	mmap_read_lock(current->mm);
311	ret = -EINVAL;
312	vma = vma_lookup(current->mm, mmio_addr);
313	if (!vma)
314		goto out_unlock_mmap;
315	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
316		goto out_unlock_mmap;
317	ret = -EACCES;
318	if (!(vma->vm_flags & VM_WRITE))
319		goto out_unlock_mmap;
320
321	args.vma = vma;
322	args.address = mmio_addr;
323	ret = follow_pfnmap_start(&args);
324	if (ret)
325		goto out_unlock_mmap;
326
327	io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) |
328			(mmio_addr & ~PAGE_MASK));
329
330	if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
331		ret = -EFAULT;
332		goto out_unlock_pt;
333	}
334	ret = zpci_memcpy_fromio(buf, io_addr, length);
335
336out_unlock_pt:
337	follow_pfnmap_end(&args);
338out_unlock_mmap:
339	mmap_read_unlock(current->mm);
340
341	if (!ret && copy_to_user(user_buffer, buf, length))
342		ret = -EFAULT;
343
344	if (buf != local_buf)
345		kfree(buf);
346	return ret;
347}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Access to PCI I/O memory from user space programs.
  4 *
  5 * Copyright IBM Corp. 2014
  6 * Author(s): Alexey Ishchuk <aishchuk@linux.vnet.ibm.com>
  7 */
  8#include <linux/kernel.h>
  9#include <linux/syscalls.h>
 10#include <linux/init.h>
 11#include <linux/mm.h>
 12#include <linux/errno.h>
 13#include <linux/pci.h>
 14#include <asm/asm-extable.h>
 15#include <asm/pci_io.h>
 16#include <asm/pci_debug.h>
 
 17
 18static inline void zpci_err_mmio(u8 cc, u8 status, u64 offset)
 19{
 20	struct {
 21		u64 offset;
 22		u8 cc;
 23		u8 status;
 24	} data = {offset, cc, status};
 25
 26	zpci_err_hex(&data, sizeof(data));
 27}
 28
 29static inline int __pcistb_mio_inuser(
 30		void __iomem *ioaddr, const void __user *src,
 31		u64 len, u8 *status)
 32{
 33	int cc = -ENXIO;
 34
 
 35	asm volatile (
 36		"       sacf 256\n"
 37		"0:     .insn   rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n"
 38		"1:     ipm     %[cc]\n"
 39		"       srl     %[cc],28\n"
 40		"2:     sacf 768\n"
 41		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
 42		: [cc] "+d" (cc), [len] "+d" (len)
 43		: [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src))
 44		: "cc", "memory");
 45	*status = len >> 24 & 0xff;
 46	return cc;
 47}
 48
 49static inline int __pcistg_mio_inuser(
 50		void __iomem *ioaddr, const void __user *src,
 51		u64 ulen, u8 *status)
 52{
 53	union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
 54	int cc = -ENXIO;
 55	u64 val = 0;
 56	u64 cnt = ulen;
 57	u8 tmp;
 58
 59	/*
 60	 * copy 0 < @len <= 8 bytes from @src into the right most bytes of
 61	 * a register, then store it to PCI at @ioaddr while in secondary
 62	 * address space. pcistg then uses the user mappings.
 63	 */
 
 64	asm volatile (
 65		"       sacf    256\n"
 66		"0:     llgc    %[tmp],0(%[src])\n"
 67		"4:	sllg	%[val],%[val],8\n"
 68		"       aghi    %[src],1\n"
 69		"       ogr     %[val],%[tmp]\n"
 70		"       brctg   %[cnt],0b\n"
 71		"1:     .insn   rre,0xb9d40000,%[val],%[ioaddr_len]\n"
 72		"2:     ipm     %[cc]\n"
 73		"       srl     %[cc],28\n"
 74		"3:     sacf    768\n"
 75		EX_TABLE(0b, 3b) EX_TABLE(4b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
 
 
 
 76		:
 77		[src] "+a" (src), [cnt] "+d" (cnt),
 78		[val] "+d" (val), [tmp] "=d" (tmp),
 79		[cc] "+d" (cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
 80		:: "cc", "memory");
 81	*status = ioaddr_len.odd >> 24 & 0xff;
 82
 
 83	/* did we read everything from user memory? */
 84	if (!cc && cnt != 0)
 85		cc = -EFAULT;
 86
 87	return cc;
 88}
 89
 90static inline int __memcpy_toio_inuser(void __iomem *dst,
 91				   const void __user *src, size_t n)
 92{
 93	int size, rc = 0;
 94	u8 status = 0;
 95
 96	if (!src)
 97		return -EINVAL;
 98
 99	while (n > 0) {
100		size = zpci_get_max_write_size((u64 __force) dst,
101					       (u64 __force) src, n,
102					       ZPCI_MAX_WRITE_SIZE);
103		if (size > 8) /* main path */
104			rc = __pcistb_mio_inuser(dst, src, size, &status);
105		else
106			rc = __pcistg_mio_inuser(dst, src, size, &status);
107		if (rc)
108			break;
109		src += size;
110		dst += size;
111		n -= size;
112	}
113	if (rc)
114		zpci_err_mmio(rc, status, (__force u64) dst);
115	return rc;
116}
117
118SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
119		const void __user *, user_buffer, size_t, length)
120{
 
121	u8 local_buf[64];
122	void __iomem *io_addr;
123	void *buf;
124	struct vm_area_struct *vma;
125	pte_t *ptep;
126	spinlock_t *ptl;
127	long ret;
128
129	if (!zpci_is_enabled())
130		return -ENODEV;
131
132	if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
133		return -EINVAL;
134
135	/*
136	 * We only support write access to MIO capable devices if we are on
137	 * a MIO enabled system. Otherwise we would have to check for every
138	 * address if it is a special ZPCI_ADDR and would have to do
139	 * a pfn lookup which we don't need for MIO capable devices.  Currently
140	 * ISM devices are the only devices without MIO support and there is no
141	 * known need for accessing these from userspace.
142	 */
143	if (static_branch_likely(&have_mio)) {
144		ret = __memcpy_toio_inuser((void  __iomem *) mmio_addr,
145					user_buffer,
146					length);
147		return ret;
148	}
149
150	if (length > 64) {
151		buf = kmalloc(length, GFP_KERNEL);
152		if (!buf)
153			return -ENOMEM;
154	} else
155		buf = local_buf;
156
157	ret = -EFAULT;
158	if (copy_from_user(buf, user_buffer, length))
159		goto out_free;
160
161	mmap_read_lock(current->mm);
162	ret = -EINVAL;
163	vma = vma_lookup(current->mm, mmio_addr);
164	if (!vma)
165		goto out_unlock_mmap;
166	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
167		goto out_unlock_mmap;
168	ret = -EACCES;
169	if (!(vma->vm_flags & VM_WRITE))
170		goto out_unlock_mmap;
171
172	ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl);
 
 
173	if (ret)
174		goto out_unlock_mmap;
175
176	io_addr = (void __iomem *)((pte_pfn(*ptep) << PAGE_SHIFT) |
177			(mmio_addr & ~PAGE_MASK));
178
179	if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
180		goto out_unlock_pt;
181
182	ret = zpci_memcpy_toio(io_addr, buf, length);
183out_unlock_pt:
184	pte_unmap_unlock(ptep, ptl);
185out_unlock_mmap:
186	mmap_read_unlock(current->mm);
187out_free:
188	if (buf != local_buf)
189		kfree(buf);
190	return ret;
191}
192
193static inline int __pcilg_mio_inuser(
194		void __user *dst, const void __iomem *ioaddr,
195		u64 ulen, u8 *status)
196{
197	union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
198	u64 cnt = ulen;
199	int shift = ulen * 8;
200	int cc = -ENXIO;
201	u64 val, tmp;
202
203	/*
204	 * read 0 < @len <= 8 bytes from the PCI memory mapped at @ioaddr (in
205	 * user space) into a register using pcilg then store these bytes at
206	 * user address @dst
207	 */
 
208	asm volatile (
209		"       sacf    256\n"
210		"0:     .insn   rre,0xb9d60000,%[val],%[ioaddr_len]\n"
211		"1:     ipm     %[cc]\n"
212		"       srl     %[cc],28\n"
213		"       ltr     %[cc],%[cc]\n"
214		"       jne     4f\n"
215		"2:     ahi     %[shift],-8\n"
216		"       srlg    %[tmp],%[val],0(%[shift])\n"
217		"3:     stc     %[tmp],0(%[dst])\n"
218		"5:	aghi	%[dst],1\n"
219		"       brctg   %[cnt],2b\n"
220		"4:     sacf    768\n"
 
 
 
 
 
 
221		EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) EX_TABLE(5b, 4b)
 
 
 
 
222		:
223		[ioaddr_len] "+&d" (ioaddr_len.pair),
224		[cc] "+d" (cc), [val] "=d" (val),
225		[dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
226		[shift] "+d" (shift)
227		:: "cc", "memory");
228
 
229	/* did we write everything to the user space buffer? */
230	if (!cc && cnt != 0)
231		cc = -EFAULT;
232
233	*status = ioaddr_len.odd >> 24 & 0xff;
234	return cc;
235}
236
237static inline int __memcpy_fromio_inuser(void __user *dst,
238				     const void __iomem *src,
239				     unsigned long n)
240{
241	int size, rc = 0;
242	u8 status;
243
244	while (n > 0) {
245		size = zpci_get_max_write_size((u64 __force) src,
246					       (u64 __force) dst, n,
247					       ZPCI_MAX_READ_SIZE);
248		rc = __pcilg_mio_inuser(dst, src, size, &status);
249		if (rc)
250			break;
251		src += size;
252		dst += size;
253		n -= size;
254	}
255	if (rc)
256		zpci_err_mmio(rc, status, (__force u64) dst);
257	return rc;
258}
259
260SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
261		void __user *, user_buffer, size_t, length)
262{
 
263	u8 local_buf[64];
264	void __iomem *io_addr;
265	void *buf;
266	struct vm_area_struct *vma;
267	pte_t *ptep;
268	spinlock_t *ptl;
269	long ret;
270
271	if (!zpci_is_enabled())
272		return -ENODEV;
273
274	if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
275		return -EINVAL;
276
277	/*
278	 * We only support read access to MIO capable devices if we are on
279	 * a MIO enabled system. Otherwise we would have to check for every
280	 * address if it is a special ZPCI_ADDR and would have to do
281	 * a pfn lookup which we don't need for MIO capable devices.  Currently
282	 * ISM devices are the only devices without MIO support and there is no
283	 * known need for accessing these from userspace.
284	 */
285	if (static_branch_likely(&have_mio)) {
286		ret = __memcpy_fromio_inuser(
287				user_buffer, (const void __iomem *)mmio_addr,
288				length);
289		return ret;
290	}
291
292	if (length > 64) {
293		buf = kmalloc(length, GFP_KERNEL);
294		if (!buf)
295			return -ENOMEM;
296	} else {
297		buf = local_buf;
298	}
299
300	mmap_read_lock(current->mm);
301	ret = -EINVAL;
302	vma = vma_lookup(current->mm, mmio_addr);
303	if (!vma)
304		goto out_unlock_mmap;
305	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
306		goto out_unlock_mmap;
307	ret = -EACCES;
308	if (!(vma->vm_flags & VM_WRITE))
309		goto out_unlock_mmap;
310
311	ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl);
 
 
312	if (ret)
313		goto out_unlock_mmap;
314
315	io_addr = (void __iomem *)((pte_pfn(*ptep) << PAGE_SHIFT) |
316			(mmio_addr & ~PAGE_MASK));
317
318	if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
319		ret = -EFAULT;
320		goto out_unlock_pt;
321	}
322	ret = zpci_memcpy_fromio(buf, io_addr, length);
323
324out_unlock_pt:
325	pte_unmap_unlock(ptep, ptl);
326out_unlock_mmap:
327	mmap_read_unlock(current->mm);
328
329	if (!ret && copy_to_user(user_buffer, buf, length))
330		ret = -EFAULT;
331
332	if (buf != local_buf)
333		kfree(buf);
334	return ret;
335}