Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * linux/mm/process_vm_access.c
  4 *
  5 * Copyright (C) 2010-2011 Christopher Yeoh <cyeoh@au1.ibm.com>, IBM Corp.
 
 
 
 
 
  6 */
  7
  8#include <linux/compat.h>
  9#include <linux/mm.h>
 10#include <linux/uio.h>
 11#include <linux/sched.h>
 12#include <linux/sched/mm.h>
 13#include <linux/highmem.h>
 14#include <linux/ptrace.h>
 15#include <linux/slab.h>
 16#include <linux/syscalls.h>
 17
 
 
 
 
 18/**
 19 * process_vm_rw_pages - read/write pages from task specified
 20 * @pages: array of pointers to pages we want to copy
 21 * @offset: offset in page to start copying from/to
 
 
 
 
 22 * @len: number of bytes to copy
 23 * @iter: where to copy to/from locally
 
 
 
 24 * @vm_write: 0 means copy from, 1 means copy to
 
 
 25 * Returns 0 on success, error code otherwise
 26 */
 27static int process_vm_rw_pages(struct page **pages,
 28			       unsigned offset,
 29			       size_t len,
 30			       struct iov_iter *iter,
 31			       int vm_write)
 
 
 
 
 
 
 
 
 32{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 33	/* Do the copy for each page */
 34	while (len && iov_iter_count(iter)) {
 35		struct page *page = *pages++;
 36		size_t copy = PAGE_SIZE - offset;
 37		size_t copied;
 
 
 
 
 
 38
 39		if (copy > len)
 40			copy = len;
 
 
 
 
 
 
 
 
 
 
 41
 42		if (vm_write)
 43			copied = copy_page_from_iter(page, offset, copy, iter);
 
 
 
 44		else
 45			copied = copy_page_to_iter(page, offset, copy, iter);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 46
 47		len -= copied;
 48		if (copied < copy && iov_iter_count(iter))
 49			return -EFAULT;
 50		offset = 0;
 
 
 
 
 
 
 51	}
 52	return 0;
 
 53}
 54
 55/* Maximum number of pages kmalloc'd to hold struct page's during copy */
 56#define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
 57
 58/**
 59 * process_vm_rw_single_vec - read/write pages from task specified
 60 * @addr: start memory address of target process
 61 * @len: size of area to copy to/from
 62 * @iter: where to copy to/from locally
 
 
 
 63 * @process_pages: struct pages area that can store at least
 64 *  nr_pages_to_copy struct page pointers
 65 * @mm: mm for task
 66 * @task: task to read/write from
 67 * @vm_write: 0 means copy from, 1 means copy to
 
 68 * Returns 0 on success or on failure error code
 69 */
 70static int process_vm_rw_single_vec(unsigned long addr,
 71				    unsigned long len,
 72				    struct iov_iter *iter,
 
 
 
 73				    struct page **process_pages,
 74				    struct mm_struct *mm,
 75				    struct task_struct *task,
 76				    int vm_write)
 
 77{
 78	unsigned long pa = addr & PAGE_MASK;
 79	unsigned long start_offset = addr - pa;
 80	unsigned long nr_pages;
 
 81	ssize_t rc = 0;
 
 
 82	unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
 83		/ sizeof(struct pages *);
 84	unsigned int flags = 0;
 
 85
 86	/* Work out address and page range required */
 87	if (len == 0)
 88		return 0;
 89	nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
 90
 91	if (vm_write)
 92		flags |= FOLL_WRITE;
 93
 94	while (!rc && nr_pages && iov_iter_count(iter)) {
 95		int pinned_pages = min(nr_pages, max_pages_per_loop);
 96		int locked = 1;
 97		size_t bytes;
 98
 99		/*
100		 * Get the pages we're interested in.  We must
101		 * access remotely because task/mm might not
102		 * current/current->mm
103		 */
104		mmap_read_lock(mm);
105		pinned_pages = pin_user_pages_remote(mm, pa, pinned_pages,
106						     flags, process_pages,
107						     NULL, &locked);
108		if (locked)
109			mmap_read_unlock(mm);
110		if (pinned_pages <= 0)
111			return -EFAULT;
112
113		bytes = pinned_pages * PAGE_SIZE - start_offset;
114		if (bytes > len)
115			bytes = len;
116
117		rc = process_vm_rw_pages(process_pages,
118					 start_offset, bytes, iter,
119					 vm_write);
120		len -= bytes;
121		start_offset = 0;
122		nr_pages -= pinned_pages;
123		pa += pinned_pages * PAGE_SIZE;
124
125		/* If vm_write is set, the pages need to be made dirty: */
126		unpin_user_pages_dirty_lock(process_pages, pinned_pages,
127					    vm_write);
 
 
 
 
128	}
129
130	return rc;
131}
132
133/* Maximum number of entries for process pages array
134   which lives on stack */
135#define PVM_MAX_PP_ARRAY_COUNT 16
136
137/**
138 * process_vm_rw_core - core of reading/writing pages from task specified
139 * @pid: PID of process to read/write from/to
140 * @iter: where to copy to/from locally
 
141 * @rvec: iovec array specifying where to copy to/from in the other process
142 * @riovcnt: size of rvec array
143 * @flags: currently unused
144 * @vm_write: 0 if reading from other process, 1 if writing to other process
145 *
146 * Returns the number of bytes read/written or error code. May
147 *  return less bytes than expected if an error occurs during the copying
148 *  process.
149 */
150static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
 
151				  const struct iovec *rvec,
152				  unsigned long riovcnt,
153				  unsigned long flags, int vm_write)
154{
155	struct task_struct *task;
156	struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT];
157	struct page **process_pages = pp_stack;
158	struct mm_struct *mm;
159	unsigned long i;
160	ssize_t rc = 0;
 
 
161	unsigned long nr_pages = 0;
162	unsigned long nr_pages_iov;
 
 
163	ssize_t iov_len;
164	size_t total_len = iov_iter_count(iter);
165
166	/*
167	 * Work out how many pages of struct pages we're going to need
168	 * when eventually calling get_user_pages
169	 */
170	for (i = 0; i < riovcnt; i++) {
171		iov_len = rvec[i].iov_len;
172		if (iov_len > 0) {
173			nr_pages_iov = ((unsigned long)rvec[i].iov_base
174					+ iov_len)
175				/ PAGE_SIZE - (unsigned long)rvec[i].iov_base
176				/ PAGE_SIZE + 1;
177			nr_pages = max(nr_pages, nr_pages_iov);
178		}
179	}
180
181	if (nr_pages == 0)
182		return 0;
183
184	if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) {
185		/* For reliability don't try to kmalloc more than
186		   2 pages worth */
187		process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES,
188					      sizeof(struct pages *)*nr_pages),
189					GFP_KERNEL);
190
191		if (!process_pages)
192			return -ENOMEM;
193	}
194
195	/* Get process information */
196	task = find_get_task_by_vpid(pid);
 
 
 
 
197	if (!task) {
198		rc = -ESRCH;
199		goto free_proc_pages;
200	}
201
202	mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
203	if (!mm || IS_ERR(mm)) {
204		rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
205		/*
206		 * Explicitly map EACCES to EPERM as EPERM is a more
207		 * appropriate error code for process_vw_readv/writev
208		 */
209		if (rc == -EACCES)
210			rc = -EPERM;
211		goto put_task_struct;
212	}
213
214	for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++)
215		rc = process_vm_rw_single_vec(
216			(unsigned long)rvec[i].iov_base, rvec[i].iov_len,
217			iter, process_pages, mm, task, vm_write);
218
219	/* copied = space before - space after */
220	total_len -= iov_iter_count(iter);
221
222	/* If we have managed to copy any data at all then
223	   we return the number of bytes copied. Otherwise
224	   we return the error code */
225	if (total_len)
226		rc = total_len;
 
 
227
 
 
228	mmput(mm);
229
230put_task_struct:
231	put_task_struct(task);
232
233free_proc_pages:
234	if (process_pages != pp_stack)
235		kfree(process_pages);
236	return rc;
237}
238
239/**
240 * process_vm_rw - check iovecs before calling core routine
241 * @pid: PID of process to read/write from/to
242 * @lvec: iovec array specifying where to copy to/from locally
243 * @liovcnt: size of lvec array
244 * @rvec: iovec array specifying where to copy to/from in the other process
245 * @riovcnt: size of rvec array
246 * @flags: currently unused
247 * @vm_write: 0 if reading from other process, 1 if writing to other process
248 *
249 * Returns the number of bytes read/written or error code. May
250 *  return less bytes than expected if an error occurs during the copying
251 *  process.
252 */
253static ssize_t process_vm_rw(pid_t pid,
254			     const struct iovec __user *lvec,
255			     unsigned long liovcnt,
256			     const struct iovec __user *rvec,
257			     unsigned long riovcnt,
258			     unsigned long flags, int vm_write)
259{
260	struct iovec iovstack_l[UIO_FASTIOV];
261	struct iovec iovstack_r[UIO_FASTIOV];
262	struct iovec *iov_l = iovstack_l;
263	struct iovec *iov_r;
264	struct iov_iter iter;
265	ssize_t rc;
266	int dir = vm_write ? WRITE : READ;
267
268	if (flags != 0)
269		return -EINVAL;
270
271	/* Check iovecs */
272	rc = import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter);
273	if (rc < 0)
274		return rc;
275	if (!iov_iter_count(&iter))
276		goto free_iov_l;
277	iov_r = iovec_from_user(rvec, riovcnt, UIO_FASTIOV, iovstack_r,
278				in_compat_syscall());
279	if (IS_ERR(iov_r)) {
280		rc = PTR_ERR(iov_r);
281		goto free_iov_l;
282	}
283	rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
 
 
 
 
 
 
284	if (iov_r != iovstack_r)
285		kfree(iov_r);
286free_iov_l:
287	kfree(iov_l);
 
288	return rc;
289}
290
291SYSCALL_DEFINE6(process_vm_readv, pid_t, pid, const struct iovec __user *, lvec,
292		unsigned long, liovcnt, const struct iovec __user *, rvec,
293		unsigned long, riovcnt,	unsigned long, flags)
294{
295	return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0);
296}
297
298SYSCALL_DEFINE6(process_vm_writev, pid_t, pid,
299		const struct iovec __user *, lvec,
300		unsigned long, liovcnt, const struct iovec __user *, rvec,
301		unsigned long, riovcnt,	unsigned long, flags)
302{
303	return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1);
304}
v3.5.6
 
  1/*
  2 * linux/mm/process_vm_access.c
  3 *
  4 * Copyright (C) 2010-2011 Christopher Yeoh <cyeoh@au1.ibm.com>, IBM Corp.
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License
  8 * as published by the Free Software Foundation; either version
  9 * 2 of the License, or (at your option) any later version.
 10 */
 11
 
 12#include <linux/mm.h>
 13#include <linux/uio.h>
 14#include <linux/sched.h>
 
 15#include <linux/highmem.h>
 16#include <linux/ptrace.h>
 17#include <linux/slab.h>
 18#include <linux/syscalls.h>
 19
 20#ifdef CONFIG_COMPAT
 21#include <linux/compat.h>
 22#endif
 23
 24/**
 25 * process_vm_rw_pages - read/write pages from task specified
 26 * @task: task to read/write from
 27 * @mm: mm for task
 28 * @process_pages: struct pages area that can store at least
 29 *  nr_pages_to_copy struct page pointers
 30 * @pa: address of page in task to start copying from/to
 31 * @start_offset: offset in page to start copying from/to
 32 * @len: number of bytes to copy
 33 * @lvec: iovec array specifying where to copy to/from
 34 * @lvec_cnt: number of elements in iovec array
 35 * @lvec_current: index in iovec array we are up to
 36 * @lvec_offset: offset in bytes from current iovec iov_base we are up to
 37 * @vm_write: 0 means copy from, 1 means copy to
 38 * @nr_pages_to_copy: number of pages to copy
 39 * @bytes_copied: returns number of bytes successfully copied
 40 * Returns 0 on success, error code otherwise
 41 */
 42static int process_vm_rw_pages(struct task_struct *task,
 43			       struct mm_struct *mm,
 44			       struct page **process_pages,
 45			       unsigned long pa,
 46			       unsigned long start_offset,
 47			       unsigned long len,
 48			       const struct iovec *lvec,
 49			       unsigned long lvec_cnt,
 50			       unsigned long *lvec_current,
 51			       size_t *lvec_offset,
 52			       int vm_write,
 53			       unsigned int nr_pages_to_copy,
 54			       ssize_t *bytes_copied)
 55{
 56	int pages_pinned;
 57	void *target_kaddr;
 58	int pgs_copied = 0;
 59	int j;
 60	int ret;
 61	ssize_t bytes_to_copy;
 62	ssize_t rc = 0;
 63
 64	*bytes_copied = 0;
 65
 66	/* Get the pages we're interested in */
 67	down_read(&mm->mmap_sem);
 68	pages_pinned = get_user_pages(task, mm, pa,
 69				      nr_pages_to_copy,
 70				      vm_write, 0, process_pages, NULL);
 71	up_read(&mm->mmap_sem);
 72
 73	if (pages_pinned != nr_pages_to_copy) {
 74		rc = -EFAULT;
 75		goto end;
 76	}
 77
 78	/* Do the copy for each page */
 79	for (pgs_copied = 0;
 80	     (pgs_copied < nr_pages_to_copy) && (*lvec_current < lvec_cnt);
 81	     pgs_copied++) {
 82		/* Make sure we have a non zero length iovec */
 83		while (*lvec_current < lvec_cnt
 84		       && lvec[*lvec_current].iov_len == 0)
 85			(*lvec_current)++;
 86		if (*lvec_current == lvec_cnt)
 87			break;
 88
 89		/*
 90		 * Will copy smallest of:
 91		 * - bytes remaining in page
 92		 * - bytes remaining in destination iovec
 93		 */
 94		bytes_to_copy = min_t(ssize_t, PAGE_SIZE - start_offset,
 95				      len - *bytes_copied);
 96		bytes_to_copy = min_t(ssize_t, bytes_to_copy,
 97				      lvec[*lvec_current].iov_len
 98				      - *lvec_offset);
 99
100		target_kaddr = kmap(process_pages[pgs_copied]) + start_offset;
101
102		if (vm_write)
103			ret = copy_from_user(target_kaddr,
104					     lvec[*lvec_current].iov_base
105					     + *lvec_offset,
106					     bytes_to_copy);
107		else
108			ret = copy_to_user(lvec[*lvec_current].iov_base
109					   + *lvec_offset,
110					   target_kaddr, bytes_to_copy);
111		kunmap(process_pages[pgs_copied]);
112		if (ret) {
113			*bytes_copied += bytes_to_copy - ret;
114			pgs_copied++;
115			rc = -EFAULT;
116			goto end;
117		}
118		*bytes_copied += bytes_to_copy;
119		*lvec_offset += bytes_to_copy;
120		if (*lvec_offset == lvec[*lvec_current].iov_len) {
121			/*
122			 * Need to copy remaining part of page into the
123			 * next iovec if there are any bytes left in page
124			 */
125			(*lvec_current)++;
126			*lvec_offset = 0;
127			start_offset = (start_offset + bytes_to_copy)
128				% PAGE_SIZE;
129			if (start_offset)
130				pgs_copied--;
131		} else {
132			start_offset = 0;
133		}
134	}
135
136end:
137	if (vm_write) {
138		for (j = 0; j < pages_pinned; j++) {
139			if (j < pgs_copied)
140				set_page_dirty_lock(process_pages[j]);
141			put_page(process_pages[j]);
142		}
143	} else {
144		for (j = 0; j < pages_pinned; j++)
145			put_page(process_pages[j]);
146	}
147
148	return rc;
149}
150
151/* Maximum number of pages kmalloc'd to hold struct page's during copy */
152#define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
153
154/**
155 * process_vm_rw_single_vec - read/write pages from task specified
156 * @addr: start memory address of target process
157 * @len: size of area to copy to/from
158 * @lvec: iovec array specifying where to copy to/from locally
159 * @lvec_cnt: number of elements in iovec array
160 * @lvec_current: index in iovec array we are up to
161 * @lvec_offset: offset in bytes from current iovec iov_base we are up to
162 * @process_pages: struct pages area that can store at least
163 *  nr_pages_to_copy struct page pointers
164 * @mm: mm for task
165 * @task: task to read/write from
166 * @vm_write: 0 means copy from, 1 means copy to
167 * @bytes_copied: returns number of bytes successfully copied
168 * Returns 0 on success or on failure error code
169 */
170static int process_vm_rw_single_vec(unsigned long addr,
171				    unsigned long len,
172				    const struct iovec *lvec,
173				    unsigned long lvec_cnt,
174				    unsigned long *lvec_current,
175				    size_t *lvec_offset,
176				    struct page **process_pages,
177				    struct mm_struct *mm,
178				    struct task_struct *task,
179				    int vm_write,
180				    ssize_t *bytes_copied)
181{
182	unsigned long pa = addr & PAGE_MASK;
183	unsigned long start_offset = addr - pa;
184	unsigned long nr_pages;
185	ssize_t bytes_copied_loop;
186	ssize_t rc = 0;
187	unsigned long nr_pages_copied = 0;
188	unsigned long nr_pages_to_copy;
189	unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
190		/ sizeof(struct pages *);
191
192	*bytes_copied = 0;
193
194	/* Work out address and page range required */
195	if (len == 0)
196		return 0;
197	nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
198
199	while ((nr_pages_copied < nr_pages) && (*lvec_current < lvec_cnt)) {
200		nr_pages_to_copy = min(nr_pages - nr_pages_copied,
201				       max_pages_per_loop);
202
203		rc = process_vm_rw_pages(task, mm, process_pages, pa,
204					 start_offset, len,
205					 lvec, lvec_cnt,
206					 lvec_current, lvec_offset,
207					 vm_write, nr_pages_to_copy,
208					 &bytes_copied_loop);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209		start_offset = 0;
210		*bytes_copied += bytes_copied_loop;
 
211
212		if (rc < 0) {
213			return rc;
214		} else {
215			len -= bytes_copied_loop;
216			nr_pages_copied += nr_pages_to_copy;
217			pa += nr_pages_to_copy * PAGE_SIZE;
218		}
219	}
220
221	return rc;
222}
223
224/* Maximum number of entries for process pages array
225   which lives on stack */
226#define PVM_MAX_PP_ARRAY_COUNT 16
227
228/**
229 * process_vm_rw_core - core of reading/writing pages from task specified
230 * @pid: PID of process to read/write from/to
231 * @lvec: iovec array specifying where to copy to/from locally
232 * @liovcnt: size of lvec array
233 * @rvec: iovec array specifying where to copy to/from in the other process
234 * @riovcnt: size of rvec array
235 * @flags: currently unused
236 * @vm_write: 0 if reading from other process, 1 if writing to other process
 
237 * Returns the number of bytes read/written or error code. May
238 *  return less bytes than expected if an error occurs during the copying
239 *  process.
240 */
241static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
242				  unsigned long liovcnt,
243				  const struct iovec *rvec,
244				  unsigned long riovcnt,
245				  unsigned long flags, int vm_write)
246{
247	struct task_struct *task;
248	struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT];
249	struct page **process_pages = pp_stack;
250	struct mm_struct *mm;
251	unsigned long i;
252	ssize_t rc = 0;
253	ssize_t bytes_copied_loop;
254	ssize_t bytes_copied = 0;
255	unsigned long nr_pages = 0;
256	unsigned long nr_pages_iov;
257	unsigned long iov_l_curr_idx = 0;
258	size_t iov_l_curr_offset = 0;
259	ssize_t iov_len;
 
260
261	/*
262	 * Work out how many pages of struct pages we're going to need
263	 * when eventually calling get_user_pages
264	 */
265	for (i = 0; i < riovcnt; i++) {
266		iov_len = rvec[i].iov_len;
267		if (iov_len > 0) {
268			nr_pages_iov = ((unsigned long)rvec[i].iov_base
269					+ iov_len)
270				/ PAGE_SIZE - (unsigned long)rvec[i].iov_base
271				/ PAGE_SIZE + 1;
272			nr_pages = max(nr_pages, nr_pages_iov);
273		}
274	}
275
276	if (nr_pages == 0)
277		return 0;
278
279	if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) {
280		/* For reliability don't try to kmalloc more than
281		   2 pages worth */
282		process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES,
283					      sizeof(struct pages *)*nr_pages),
284					GFP_KERNEL);
285
286		if (!process_pages)
287			return -ENOMEM;
288	}
289
290	/* Get process information */
291	rcu_read_lock();
292	task = find_task_by_vpid(pid);
293	if (task)
294		get_task_struct(task);
295	rcu_read_unlock();
296	if (!task) {
297		rc = -ESRCH;
298		goto free_proc_pages;
299	}
300
301	mm = mm_access(task, PTRACE_MODE_ATTACH);
302	if (!mm || IS_ERR(mm)) {
303		rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
304		/*
305		 * Explicitly map EACCES to EPERM as EPERM is a more a
306		 * appropriate error code for process_vw_readv/writev
307		 */
308		if (rc == -EACCES)
309			rc = -EPERM;
310		goto put_task_struct;
311	}
312
313	for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
314		rc = process_vm_rw_single_vec(
315			(unsigned long)rvec[i].iov_base, rvec[i].iov_len,
316			lvec, liovcnt, &iov_l_curr_idx, &iov_l_curr_offset,
317			process_pages, mm, task, vm_write, &bytes_copied_loop);
318		bytes_copied += bytes_copied_loop;
319		if (rc != 0) {
320			/* If we have managed to copy any data at all then
321			   we return the number of bytes copied. Otherwise
322			   we return the error code */
323			if (bytes_copied)
324				rc = bytes_copied;
325			goto put_mm;
326		}
327	}
328
329	rc = bytes_copied;
330put_mm:
331	mmput(mm);
332
333put_task_struct:
334	put_task_struct(task);
335
336free_proc_pages:
337	if (process_pages != pp_stack)
338		kfree(process_pages);
339	return rc;
340}
341
342/**
343 * process_vm_rw - check iovecs before calling core routine
344 * @pid: PID of process to read/write from/to
345 * @lvec: iovec array specifying where to copy to/from locally
346 * @liovcnt: size of lvec array
347 * @rvec: iovec array specifying where to copy to/from in the other process
348 * @riovcnt: size of rvec array
349 * @flags: currently unused
350 * @vm_write: 0 if reading from other process, 1 if writing to other process
 
351 * Returns the number of bytes read/written or error code. May
352 *  return less bytes than expected if an error occurs during the copying
353 *  process.
354 */
355static ssize_t process_vm_rw(pid_t pid,
356			     const struct iovec __user *lvec,
357			     unsigned long liovcnt,
358			     const struct iovec __user *rvec,
359			     unsigned long riovcnt,
360			     unsigned long flags, int vm_write)
361{
362	struct iovec iovstack_l[UIO_FASTIOV];
363	struct iovec iovstack_r[UIO_FASTIOV];
364	struct iovec *iov_l = iovstack_l;
365	struct iovec *iov_r = iovstack_r;
 
366	ssize_t rc;
 
367
368	if (flags != 0)
369		return -EINVAL;
370
371	/* Check iovecs */
372	if (vm_write)
373		rc = rw_copy_check_uvector(WRITE, lvec, liovcnt, UIO_FASTIOV,
374					   iovstack_l, &iov_l);
375	else
376		rc = rw_copy_check_uvector(READ, lvec, liovcnt, UIO_FASTIOV,
377					   iovstack_l, &iov_l);
378	if (rc <= 0)
379		goto free_iovecs;
380
381	rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,
382				   iovstack_r, &iov_r);
383	if (rc <= 0)
384		goto free_iovecs;
385
386	rc = process_vm_rw_core(pid, iov_l, liovcnt, iov_r, riovcnt, flags,
387				vm_write);
388
389free_iovecs:
390	if (iov_r != iovstack_r)
391		kfree(iov_r);
392	if (iov_l != iovstack_l)
393		kfree(iov_l);
394
395	return rc;
396}
397
398SYSCALL_DEFINE6(process_vm_readv, pid_t, pid, const struct iovec __user *, lvec,
399		unsigned long, liovcnt, const struct iovec __user *, rvec,
400		unsigned long, riovcnt,	unsigned long, flags)
401{
402	return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0);
403}
404
405SYSCALL_DEFINE6(process_vm_writev, pid_t, pid,
406		const struct iovec __user *, lvec,
407		unsigned long, liovcnt, const struct iovec __user *, rvec,
408		unsigned long, riovcnt,	unsigned long, flags)
409{
410	return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1);
411}
412
413#ifdef CONFIG_COMPAT
414
415asmlinkage ssize_t
416compat_process_vm_rw(compat_pid_t pid,
417		     const struct compat_iovec __user *lvec,
418		     unsigned long liovcnt,
419		     const struct compat_iovec __user *rvec,
420		     unsigned long riovcnt,
421		     unsigned long flags, int vm_write)
422{
423	struct iovec iovstack_l[UIO_FASTIOV];
424	struct iovec iovstack_r[UIO_FASTIOV];
425	struct iovec *iov_l = iovstack_l;
426	struct iovec *iov_r = iovstack_r;
427	ssize_t rc = -EFAULT;
428
429	if (flags != 0)
430		return -EINVAL;
431
432	if (!access_ok(VERIFY_READ, lvec, liovcnt * sizeof(*lvec)))
433		goto out;
434
435	if (!access_ok(VERIFY_READ, rvec, riovcnt * sizeof(*rvec)))
436		goto out;
437
438	if (vm_write)
439		rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt,
440						  UIO_FASTIOV, iovstack_l,
441						  &iov_l);
442	else
443		rc = compat_rw_copy_check_uvector(READ, lvec, liovcnt,
444						  UIO_FASTIOV, iovstack_l,
445						  &iov_l);
446	if (rc <= 0)
447		goto free_iovecs;
448	rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,
449					  UIO_FASTIOV, iovstack_r,
450					  &iov_r);
451	if (rc <= 0)
452		goto free_iovecs;
453
454	rc = process_vm_rw_core(pid, iov_l, liovcnt, iov_r, riovcnt, flags,
455			   vm_write);
456
457free_iovecs:
458	if (iov_r != iovstack_r)
459		kfree(iov_r);
460	if (iov_l != iovstack_l)
461		kfree(iov_l);
462
463out:
464	return rc;
465}
466
467asmlinkage ssize_t
468compat_sys_process_vm_readv(compat_pid_t pid,
469			    const struct compat_iovec __user *lvec,
470			    unsigned long liovcnt,
471			    const struct compat_iovec __user *rvec,
472			    unsigned long riovcnt,
473			    unsigned long flags)
474{
475	return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
476				    riovcnt, flags, 0);
477}
478
479asmlinkage ssize_t
480compat_sys_process_vm_writev(compat_pid_t pid,
481			     const struct compat_iovec __user *lvec,
482			     unsigned long liovcnt,
483			     const struct compat_iovec __user *rvec,
484			     unsigned long riovcnt,
485			     unsigned long flags)
486{
487	return compat_process_vm_rw(pid, lvec, liovcnt, rvec,
488				    riovcnt, flags, 1);
489}
490
491#endif