Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Access to PCI I/O memory from user space programs.
  4 *
  5 * Copyright IBM Corp. 2014
  6 * Author(s): Alexey Ishchuk <aishchuk@linux.vnet.ibm.com>
  7 */
  8#include <linux/kernel.h>
  9#include <linux/syscalls.h>
 10#include <linux/init.h>
 11#include <linux/mm.h>
 12#include <linux/errno.h>
 13#include <linux/pci.h>
 14#include <asm/asm-extable.h>
 15#include <asm/pci_io.h>
 16#include <asm/pci_debug.h>
 17#include <asm/asm.h>
 18
 19static inline void zpci_err_mmio(u8 cc, u8 status, u64 offset)
 20{
 21	struct {
 22		u64 offset;
 23		u8 cc;
 24		u8 status;
 25	} data = {offset, cc, status};
 26
 27	zpci_err_hex(&data, sizeof(data));
 28}
 29
 30static inline int __pcistb_mio_inuser(
 31		void __iomem *ioaddr, const void __user *src,
 32		u64 len, u8 *status)
 33{
 34	int cc, exception;
 35
 36	exception = 1;
 37	asm volatile (
 38		"	sacf	256\n"
 39		"0:	.insn	rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n"
 40		"1:	lhi	%[exc],0\n"
 41		"2:	sacf	768\n"
 42		CC_IPM(cc)
 43		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
 44		: CC_OUT(cc, cc), [len] "+d" (len), [exc] "+d" (exception)
 45		: [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src))
 46		: CC_CLOBBER_LIST("memory"));
 47	*status = len >> 24 & 0xff;
 48	return exception ? -ENXIO : CC_TRANSFORM(cc);
 49}
 50
 51static inline int __pcistg_mio_inuser(
 52		void __iomem *ioaddr, const void __user *src,
 53		u64 ulen, u8 *status)
 54{
 55	union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
 56	int cc, exception;
 57	u64 val = 0;
 58	u64 cnt = ulen;
 59	u8 tmp;
 60
 61	/*
 62	 * copy 0 < @len <= 8 bytes from @src into the right most bytes of
 63	 * a register, then store it to PCI at @ioaddr while in secondary
 64	 * address space. pcistg then uses the user mappings.
 65	 */
 66	exception = 1;
 67	asm volatile (
 68		"	sacf	256\n"
 69		"0:	llgc	%[tmp],0(%[src])\n"
 70		"4:	sllg	%[val],%[val],8\n"
 71		"	aghi	%[src],1\n"
 72		"	ogr	%[val],%[tmp]\n"
 73		"	brctg	%[cnt],0b\n"
 74		"1:	.insn	rre,0xb9d40000,%[val],%[ioaddr_len]\n"
 75		"2:	lhi	%[exc],0\n"
 76		"3:	sacf	768\n"
 77		CC_IPM(cc)
 78		EX_TABLE(0b, 3b) EX_TABLE(4b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
 79		: [src] "+a" (src), [cnt] "+d" (cnt),
 80		  [val] "+d" (val), [tmp] "=d" (tmp), [exc] "+d" (exception),
 81		  CC_OUT(cc, cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
 82		:
 83		: CC_CLOBBER_LIST("memory"));
 
 
 
 84	*status = ioaddr_len.odd >> 24 & 0xff;
 85
 86	cc = exception ? -ENXIO : CC_TRANSFORM(cc);
 87	/* did we read everything from user memory? */
 88	if (!cc && cnt != 0)
 89		cc = -EFAULT;
 90
 91	return cc;
 92}
 93
 94static inline int __memcpy_toio_inuser(void __iomem *dst,
 95				   const void __user *src, size_t n)
 96{
 97	int size, rc = 0;
 98	u8 status = 0;
 99
100	if (!src)
101		return -EINVAL;
102
103	while (n > 0) {
104		size = zpci_get_max_io_size((u64 __force) dst,
105					    (u64 __force) src, n,
106					    ZPCI_MAX_WRITE_SIZE);
107		if (size > 8) /* main path */
108			rc = __pcistb_mio_inuser(dst, src, size, &status);
109		else
110			rc = __pcistg_mio_inuser(dst, src, size, &status);
111		if (rc)
112			break;
113		src += size;
114		dst += size;
115		n -= size;
116	}
117	if (rc)
118		zpci_err_mmio(rc, status, (__force u64) dst);
119	return rc;
120}
121
122SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
123		const void __user *, user_buffer, size_t, length)
124{
125	struct follow_pfnmap_args args = { };
126	u8 local_buf[64];
127	void __iomem *io_addr;
128	void *buf;
129	struct vm_area_struct *vma;
 
 
130	long ret;
131
132	if (!zpci_is_enabled())
133		return -ENODEV;
134
135	if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
136		return -EINVAL;
137
138	/*
139	 * We only support write access to MIO capable devices if we are on
140	 * a MIO enabled system. Otherwise we would have to check for every
141	 * address if it is a special ZPCI_ADDR and would have to do
142	 * a pfn lookup which we don't need for MIO capable devices.  Currently
143	 * ISM devices are the only devices without MIO support and there is no
144	 * known need for accessing these from userspace.
145	 */
146	if (static_branch_likely(&have_mio)) {
147		ret = __memcpy_toio_inuser((void  __iomem *) mmio_addr,
148					user_buffer,
149					length);
150		return ret;
151	}
152
153	if (length > 64) {
154		buf = kmalloc(length, GFP_KERNEL);
155		if (!buf)
156			return -ENOMEM;
157	} else
158		buf = local_buf;
159
160	ret = -EFAULT;
161	if (copy_from_user(buf, user_buffer, length))
162		goto out_free;
163
164	mmap_read_lock(current->mm);
165	ret = -EINVAL;
166	vma = vma_lookup(current->mm, mmio_addr);
167	if (!vma)
168		goto out_unlock_mmap;
169	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
170		goto out_unlock_mmap;
171	ret = -EACCES;
172	if (!(vma->vm_flags & VM_WRITE))
173		goto out_unlock_mmap;
174
175	args.address = mmio_addr;
176	args.vma = vma;
177	ret = follow_pfnmap_start(&args);
178	if (ret)
179		goto out_unlock_mmap;
180
181	io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) |
182			(mmio_addr & ~PAGE_MASK));
183
184	if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
185		goto out_unlock_pt;
186
187	ret = zpci_memcpy_toio(io_addr, buf, length);
188out_unlock_pt:
189	follow_pfnmap_end(&args);
190out_unlock_mmap:
191	mmap_read_unlock(current->mm);
192out_free:
193	if (buf != local_buf)
194		kfree(buf);
195	return ret;
196}
197
198static inline int __pcilg_mio_inuser(
199		void __user *dst, const void __iomem *ioaddr,
200		u64 ulen, u8 *status)
201{
202	union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
203	u64 cnt = ulen;
204	int shift = ulen * 8;
205	int cc, exception;
206	u64 val, tmp;
207
208	/*
209	 * read 0 < @len <= 8 bytes from the PCI memory mapped at @ioaddr (in
210	 * user space) into a register using pcilg then store these bytes at
211	 * user address @dst
212	 */
213	exception = 1;
214	asm volatile (
215		"	sacf	256\n"
216		"0:	.insn	rre,0xb9d60000,%[val],%[ioaddr_len]\n"
217		"1:	lhi	%[exc],0\n"
218		"	jne	4f\n"
219		"2:	ahi	%[shift],-8\n"
220		"	srlg	%[tmp],%[val],0(%[shift])\n"
221		"3:	stc	%[tmp],0(%[dst])\n"
222		"5:	aghi	%[dst],1\n"
223		"	brctg	%[cnt],2b\n"
224		/*
225		 * Use xr to clear exc and set condition code to zero
226		 * to ensure flag output is correct for this branch.
227		 */
228		"	xr	%[exc],%[exc]\n"
229		"4:	sacf	768\n"
230		CC_IPM(cc)
231		EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) EX_TABLE(5b, 4b)
232		: [ioaddr_len] "+&d" (ioaddr_len.pair), [exc] "+d" (exception),
233		  CC_OUT(cc, cc), [val] "=d" (val),
234		  [dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
235		  [shift] "+d" (shift)
236		:
237		: CC_CLOBBER_LIST("memory"));
 
 
 
 
238
239	cc = exception ? -ENXIO : CC_TRANSFORM(cc);
240	/* did we write everything to the user space buffer? */
241	if (!cc && cnt != 0)
242		cc = -EFAULT;
243
244	*status = ioaddr_len.odd >> 24 & 0xff;
245	return cc;
246}
247
248static inline int __memcpy_fromio_inuser(void __user *dst,
249				     const void __iomem *src,
250				     unsigned long n)
251{
252	int size, rc = 0;
253	u8 status;
254
255	while (n > 0) {
256		size = zpci_get_max_io_size((u64 __force) src,
257					    (u64 __force) dst, n,
258					    ZPCI_MAX_READ_SIZE);
259		rc = __pcilg_mio_inuser(dst, src, size, &status);
260		if (rc)
261			break;
262		src += size;
263		dst += size;
264		n -= size;
265	}
266	if (rc)
267		zpci_err_mmio(rc, status, (__force u64) dst);
268	return rc;
269}
270
271SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
272		void __user *, user_buffer, size_t, length)
273{
274	struct follow_pfnmap_args args = { };
275	u8 local_buf[64];
276	void __iomem *io_addr;
277	void *buf;
278	struct vm_area_struct *vma;
 
 
279	long ret;
280
281	if (!zpci_is_enabled())
282		return -ENODEV;
283
284	if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
285		return -EINVAL;
286
287	/*
288	 * We only support read access to MIO capable devices if we are on
289	 * a MIO enabled system. Otherwise we would have to check for every
290	 * address if it is a special ZPCI_ADDR and would have to do
291	 * a pfn lookup which we don't need for MIO capable devices.  Currently
292	 * ISM devices are the only devices without MIO support and there is no
293	 * known need for accessing these from userspace.
294	 */
295	if (static_branch_likely(&have_mio)) {
296		ret = __memcpy_fromio_inuser(
297				user_buffer, (const void __iomem *)mmio_addr,
298				length);
299		return ret;
300	}
301
302	if (length > 64) {
303		buf = kmalloc(length, GFP_KERNEL);
304		if (!buf)
305			return -ENOMEM;
306	} else {
307		buf = local_buf;
308	}
309
310	mmap_read_lock(current->mm);
311	ret = -EINVAL;
312	vma = vma_lookup(current->mm, mmio_addr);
313	if (!vma)
314		goto out_unlock_mmap;
315	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
316		goto out_unlock_mmap;
317	ret = -EACCES;
318	if (!(vma->vm_flags & VM_WRITE))
319		goto out_unlock_mmap;
320
321	args.vma = vma;
322	args.address = mmio_addr;
323	ret = follow_pfnmap_start(&args);
324	if (ret)
325		goto out_unlock_mmap;
326
327	io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) |
328			(mmio_addr & ~PAGE_MASK));
329
330	if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
331		ret = -EFAULT;
332		goto out_unlock_pt;
333	}
334	ret = zpci_memcpy_fromio(buf, io_addr, length);
335
336out_unlock_pt:
337	follow_pfnmap_end(&args);
338out_unlock_mmap:
339	mmap_read_unlock(current->mm);
340
341	if (!ret && copy_to_user(user_buffer, buf, length))
342		ret = -EFAULT;
343
344	if (buf != local_buf)
345		kfree(buf);
346	return ret;
347}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Access to PCI I/O memory from user space programs.
  4 *
  5 * Copyright IBM Corp. 2014
  6 * Author(s): Alexey Ishchuk <aishchuk@linux.vnet.ibm.com>
  7 */
  8#include <linux/kernel.h>
  9#include <linux/syscalls.h>
 10#include <linux/init.h>
 11#include <linux/mm.h>
 12#include <linux/errno.h>
 13#include <linux/pci.h>
 
 14#include <asm/pci_io.h>
 15#include <asm/pci_debug.h>
 
 16
 17static inline void zpci_err_mmio(u8 cc, u8 status, u64 offset)
 18{
 19	struct {
 20		u64 offset;
 21		u8 cc;
 22		u8 status;
 23	} data = {offset, cc, status};
 24
 25	zpci_err_hex(&data, sizeof(data));
 26}
 27
 28static inline int __pcistb_mio_inuser(
 29		void __iomem *ioaddr, const void __user *src,
 30		u64 len, u8 *status)
 31{
 32	int cc = -ENXIO;
 33
 
 34	asm volatile (
 35		"       sacf 256\n"
 36		"0:     .insn   rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n"
 37		"1:     ipm     %[cc]\n"
 38		"       srl     %[cc],28\n"
 39		"2:     sacf 768\n"
 40		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
 41		: [cc] "+d" (cc), [len] "+d" (len)
 42		: [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src))
 43		: "cc", "memory");
 44	*status = len >> 24 & 0xff;
 45	return cc;
 46}
 47
 48static inline int __pcistg_mio_inuser(
 49		void __iomem *ioaddr, const void __user *src,
 50		u64 ulen, u8 *status)
 51{
 52	union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
 53	int cc = -ENXIO;
 54	u64 val = 0;
 55	u64 cnt = ulen;
 56	u8 tmp;
 57
 58	/*
 59	 * copy 0 < @len <= 8 bytes from @src into the right most bytes of
 60	 * a register, then store it to PCI at @ioaddr while in secondary
 61	 * address space. pcistg then uses the user mappings.
 62	 */
 
 63	asm volatile (
 64		"       sacf    256\n"
 65		"0:     llgc    %[tmp],0(%[src])\n"
 66		"       sllg    %[val],%[val],8\n"
 67		"       aghi    %[src],1\n"
 68		"       ogr     %[val],%[tmp]\n"
 69		"       brctg   %[cnt],0b\n"
 70		"1:     .insn   rre,0xb9d40000,%[val],%[ioaddr_len]\n"
 71		"2:     ipm     %[cc]\n"
 72		"       srl     %[cc],28\n"
 73		"3:     sacf    768\n"
 74		EX_TABLE(0b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
 
 
 
 75		:
 76		[src] "+a" (src), [cnt] "+d" (cnt),
 77		[val] "+d" (val), [tmp] "=d" (tmp),
 78		[cc] "+d" (cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
 79		:: "cc", "memory");
 80	*status = ioaddr_len.odd >> 24 & 0xff;
 81
 
 82	/* did we read everything from user memory? */
 83	if (!cc && cnt != 0)
 84		cc = -EFAULT;
 85
 86	return cc;
 87}
 88
 89static inline int __memcpy_toio_inuser(void __iomem *dst,
 90				   const void __user *src, size_t n)
 91{
 92	int size, rc = 0;
 93	u8 status = 0;
 94
 95	if (!src)
 96		return -EINVAL;
 97
 98	while (n > 0) {
 99		size = zpci_get_max_write_size((u64 __force) dst,
100					       (u64 __force) src, n,
101					       ZPCI_MAX_WRITE_SIZE);
102		if (size > 8) /* main path */
103			rc = __pcistb_mio_inuser(dst, src, size, &status);
104		else
105			rc = __pcistg_mio_inuser(dst, src, size, &status);
106		if (rc)
107			break;
108		src += size;
109		dst += size;
110		n -= size;
111	}
112	if (rc)
113		zpci_err_mmio(rc, status, (__force u64) dst);
114	return rc;
115}
116
117SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
118		const void __user *, user_buffer, size_t, length)
119{
 
120	u8 local_buf[64];
121	void __iomem *io_addr;
122	void *buf;
123	struct vm_area_struct *vma;
124	pte_t *ptep;
125	spinlock_t *ptl;
126	long ret;
127
128	if (!zpci_is_enabled())
129		return -ENODEV;
130
131	if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
132		return -EINVAL;
133
134	/*
135	 * We only support write access to MIO capable devices if we are on
136	 * a MIO enabled system. Otherwise we would have to check for every
137	 * address if it is a special ZPCI_ADDR and would have to do
138	 * a pfn lookup which we don't need for MIO capable devices.  Currently
139	 * ISM devices are the only devices without MIO support and there is no
140	 * known need for accessing these from userspace.
141	 */
142	if (static_branch_likely(&have_mio)) {
143		ret = __memcpy_toio_inuser((void  __iomem *) mmio_addr,
144					user_buffer,
145					length);
146		return ret;
147	}
148
149	if (length > 64) {
150		buf = kmalloc(length, GFP_KERNEL);
151		if (!buf)
152			return -ENOMEM;
153	} else
154		buf = local_buf;
155
156	ret = -EFAULT;
157	if (copy_from_user(buf, user_buffer, length))
158		goto out_free;
159
160	mmap_read_lock(current->mm);
161	ret = -EINVAL;
162	vma = vma_lookup(current->mm, mmio_addr);
163	if (!vma)
164		goto out_unlock_mmap;
165	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
166		goto out_unlock_mmap;
167	ret = -EACCES;
168	if (!(vma->vm_flags & VM_WRITE))
169		goto out_unlock_mmap;
170
171	ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl);
 
 
172	if (ret)
173		goto out_unlock_mmap;
174
175	io_addr = (void __iomem *)((pte_pfn(*ptep) << PAGE_SHIFT) |
176			(mmio_addr & ~PAGE_MASK));
177
178	if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
179		goto out_unlock_pt;
180
181	ret = zpci_memcpy_toio(io_addr, buf, length);
182out_unlock_pt:
183	pte_unmap_unlock(ptep, ptl);
184out_unlock_mmap:
185	mmap_read_unlock(current->mm);
186out_free:
187	if (buf != local_buf)
188		kfree(buf);
189	return ret;
190}
191
192static inline int __pcilg_mio_inuser(
193		void __user *dst, const void __iomem *ioaddr,
194		u64 ulen, u8 *status)
195{
196	union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
197	u64 cnt = ulen;
198	int shift = ulen * 8;
199	int cc = -ENXIO;
200	u64 val, tmp;
201
202	/*
203	 * read 0 < @len <= 8 bytes from the PCI memory mapped at @ioaddr (in
204	 * user space) into a register using pcilg then store these bytes at
205	 * user address @dst
206	 */
 
207	asm volatile (
208		"       sacf    256\n"
209		"0:     .insn   rre,0xb9d60000,%[val],%[ioaddr_len]\n"
210		"1:     ipm     %[cc]\n"
211		"       srl     %[cc],28\n"
212		"       ltr     %[cc],%[cc]\n"
213		"       jne     4f\n"
214		"2:     ahi     %[shift],-8\n"
215		"       srlg    %[tmp],%[val],0(%[shift])\n"
216		"3:     stc     %[tmp],0(%[dst])\n"
217		"       aghi    %[dst],1\n"
218		"       brctg   %[cnt],2b\n"
219		"4:     sacf    768\n"
220		EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b)
 
 
 
 
 
 
 
 
221		:
222		[ioaddr_len] "+&d" (ioaddr_len.pair),
223		[cc] "+d" (cc), [val] "=d" (val),
224		[dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
225		[shift] "+d" (shift)
226		:: "cc", "memory");
227
 
228	/* did we write everything to the user space buffer? */
229	if (!cc && cnt != 0)
230		cc = -EFAULT;
231
232	*status = ioaddr_len.odd >> 24 & 0xff;
233	return cc;
234}
235
236static inline int __memcpy_fromio_inuser(void __user *dst,
237				     const void __iomem *src,
238				     unsigned long n)
239{
240	int size, rc = 0;
241	u8 status;
242
243	while (n > 0) {
244		size = zpci_get_max_write_size((u64 __force) src,
245					       (u64 __force) dst, n,
246					       ZPCI_MAX_READ_SIZE);
247		rc = __pcilg_mio_inuser(dst, src, size, &status);
248		if (rc)
249			break;
250		src += size;
251		dst += size;
252		n -= size;
253	}
254	if (rc)
255		zpci_err_mmio(rc, status, (__force u64) dst);
256	return rc;
257}
258
259SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
260		void __user *, user_buffer, size_t, length)
261{
 
262	u8 local_buf[64];
263	void __iomem *io_addr;
264	void *buf;
265	struct vm_area_struct *vma;
266	pte_t *ptep;
267	spinlock_t *ptl;
268	long ret;
269
270	if (!zpci_is_enabled())
271		return -ENODEV;
272
273	if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
274		return -EINVAL;
275
276	/*
277	 * We only support read access to MIO capable devices if we are on
278	 * a MIO enabled system. Otherwise we would have to check for every
279	 * address if it is a special ZPCI_ADDR and would have to do
280	 * a pfn lookup which we don't need for MIO capable devices.  Currently
281	 * ISM devices are the only devices without MIO support and there is no
282	 * known need for accessing these from userspace.
283	 */
284	if (static_branch_likely(&have_mio)) {
285		ret = __memcpy_fromio_inuser(
286				user_buffer, (const void __iomem *)mmio_addr,
287				length);
288		return ret;
289	}
290
291	if (length > 64) {
292		buf = kmalloc(length, GFP_KERNEL);
293		if (!buf)
294			return -ENOMEM;
295	} else {
296		buf = local_buf;
297	}
298
299	mmap_read_lock(current->mm);
300	ret = -EINVAL;
301	vma = vma_lookup(current->mm, mmio_addr);
302	if (!vma)
303		goto out_unlock_mmap;
304	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
305		goto out_unlock_mmap;
306	ret = -EACCES;
307	if (!(vma->vm_flags & VM_WRITE))
308		goto out_unlock_mmap;
309
310	ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl);
 
 
311	if (ret)
312		goto out_unlock_mmap;
313
314	io_addr = (void __iomem *)((pte_pfn(*ptep) << PAGE_SHIFT) |
315			(mmio_addr & ~PAGE_MASK));
316
317	if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
318		ret = -EFAULT;
319		goto out_unlock_pt;
320	}
321	ret = zpci_memcpy_fromio(buf, io_addr, length);
322
323out_unlock_pt:
324	pte_unmap_unlock(ptep, ptl);
325out_unlock_mmap:
326	mmap_read_unlock(current->mm);
327
328	if (!ret && copy_to_user(user_buffer, buf, length))
329		ret = -EFAULT;
330
331	if (buf != local_buf)
332		kfree(buf);
333	return ret;
334}