Loading...
Note: File does not exist in v3.15.
1// SPDX-License-Identifier: GPL-2.0-only
2#include <crypto/hash.h>
3#include <linux/export.h>
4#include <linux/bvec.h>
5#include <linux/fault-inject-usercopy.h>
6#include <linux/uio.h>
7#include <linux/pagemap.h>
8#include <linux/highmem.h>
9#include <linux/slab.h>
10#include <linux/vmalloc.h>
11#include <linux/splice.h>
12#include <linux/compat.h>
13#include <net/checksum.h>
14#include <linux/scatterlist.h>
15#include <linux/instrumented.h>
16
17#define PIPE_PARANOIA /* for now */
18
19/* covers ubuf and kbuf alike */
20#define iterate_buf(i, n, base, len, off, __p, STEP) { \
21 size_t __maybe_unused off = 0; \
22 len = n; \
23 base = __p + i->iov_offset; \
24 len -= (STEP); \
25 i->iov_offset += len; \
26 n = len; \
27}
28
29/* covers iovec and kvec alike */
30#define iterate_iovec(i, n, base, len, off, __p, STEP) { \
31 size_t off = 0; \
32 size_t skip = i->iov_offset; \
33 do { \
34 len = min(n, __p->iov_len - skip); \
35 if (likely(len)) { \
36 base = __p->iov_base + skip; \
37 len -= (STEP); \
38 off += len; \
39 skip += len; \
40 n -= len; \
41 if (skip < __p->iov_len) \
42 break; \
43 } \
44 __p++; \
45 skip = 0; \
46 } while (n); \
47 i->iov_offset = skip; \
48 n = off; \
49}
50
51#define iterate_bvec(i, n, base, len, off, p, STEP) { \
52 size_t off = 0; \
53 unsigned skip = i->iov_offset; \
54 while (n) { \
55 unsigned offset = p->bv_offset + skip; \
56 unsigned left; \
57 void *kaddr = kmap_local_page(p->bv_page + \
58 offset / PAGE_SIZE); \
59 base = kaddr + offset % PAGE_SIZE; \
60 len = min(min(n, (size_t)(p->bv_len - skip)), \
61 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \
62 left = (STEP); \
63 kunmap_local(kaddr); \
64 len -= left; \
65 off += len; \
66 skip += len; \
67 if (skip == p->bv_len) { \
68 skip = 0; \
69 p++; \
70 } \
71 n -= len; \
72 if (left) \
73 break; \
74 } \
75 i->iov_offset = skip; \
76 n = off; \
77}
78
79#define iterate_xarray(i, n, base, len, __off, STEP) { \
80 __label__ __out; \
81 size_t __off = 0; \
82 struct folio *folio; \
83 loff_t start = i->xarray_start + i->iov_offset; \
84 pgoff_t index = start / PAGE_SIZE; \
85 XA_STATE(xas, i->xarray, index); \
86 \
87 len = PAGE_SIZE - offset_in_page(start); \
88 rcu_read_lock(); \
89 xas_for_each(&xas, folio, ULONG_MAX) { \
90 unsigned left; \
91 size_t offset; \
92 if (xas_retry(&xas, folio)) \
93 continue; \
94 if (WARN_ON(xa_is_value(folio))) \
95 break; \
96 if (WARN_ON(folio_test_hugetlb(folio))) \
97 break; \
98 offset = offset_in_folio(folio, start + __off); \
99 while (offset < folio_size(folio)) { \
100 base = kmap_local_folio(folio, offset); \
101 len = min(n, len); \
102 left = (STEP); \
103 kunmap_local(base); \
104 len -= left; \
105 __off += len; \
106 n -= len; \
107 if (left || n == 0) \
108 goto __out; \
109 offset += len; \
110 len = PAGE_SIZE; \
111 } \
112 } \
113__out: \
114 rcu_read_unlock(); \
115 i->iov_offset += __off; \
116 n = __off; \
117}
118
119#define __iterate_and_advance(i, n, base, len, off, I, K) { \
120 if (unlikely(i->count < n)) \
121 n = i->count; \
122 if (likely(n)) { \
123 if (likely(iter_is_ubuf(i))) { \
124 void __user *base; \
125 size_t len; \
126 iterate_buf(i, n, base, len, off, \
127 i->ubuf, (I)) \
128 } else if (likely(iter_is_iovec(i))) { \
129 const struct iovec *iov = i->iov; \
130 void __user *base; \
131 size_t len; \
132 iterate_iovec(i, n, base, len, off, \
133 iov, (I)) \
134 i->nr_segs -= iov - i->iov; \
135 i->iov = iov; \
136 } else if (iov_iter_is_bvec(i)) { \
137 const struct bio_vec *bvec = i->bvec; \
138 void *base; \
139 size_t len; \
140 iterate_bvec(i, n, base, len, off, \
141 bvec, (K)) \
142 i->nr_segs -= bvec - i->bvec; \
143 i->bvec = bvec; \
144 } else if (iov_iter_is_kvec(i)) { \
145 const struct kvec *kvec = i->kvec; \
146 void *base; \
147 size_t len; \
148 iterate_iovec(i, n, base, len, off, \
149 kvec, (K)) \
150 i->nr_segs -= kvec - i->kvec; \
151 i->kvec = kvec; \
152 } else if (iov_iter_is_xarray(i)) { \
153 void *base; \
154 size_t len; \
155 iterate_xarray(i, n, base, len, off, \
156 (K)) \
157 } \
158 i->count -= n; \
159 } \
160}
161#define iterate_and_advance(i, n, base, len, off, I, K) \
162 __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
163
164static int copyout(void __user *to, const void *from, size_t n)
165{
166 if (should_fail_usercopy())
167 return n;
168 if (access_ok(to, n)) {
169 instrument_copy_to_user(to, from, n);
170 n = raw_copy_to_user(to, from, n);
171 }
172 return n;
173}
174
175static int copyin(void *to, const void __user *from, size_t n)
176{
177 size_t res = n;
178
179 if (should_fail_usercopy())
180 return n;
181 if (access_ok(from, n)) {
182 instrument_copy_from_user_before(to, from, n);
183 res = raw_copy_from_user(to, from, n);
184 instrument_copy_from_user_after(to, from, n, res);
185 }
186 return res;
187}
188
189static inline struct pipe_buffer *pipe_buf(const struct pipe_inode_info *pipe,
190 unsigned int slot)
191{
192 return &pipe->bufs[slot & (pipe->ring_size - 1)];
193}
194
195#ifdef PIPE_PARANOIA
196static bool sanity(const struct iov_iter *i)
197{
198 struct pipe_inode_info *pipe = i->pipe;
199 unsigned int p_head = pipe->head;
200 unsigned int p_tail = pipe->tail;
201 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
202 unsigned int i_head = i->head;
203 unsigned int idx;
204
205 if (i->last_offset) {
206 struct pipe_buffer *p;
207 if (unlikely(p_occupancy == 0))
208 goto Bad; // pipe must be non-empty
209 if (unlikely(i_head != p_head - 1))
210 goto Bad; // must be at the last buffer...
211
212 p = pipe_buf(pipe, i_head);
213 if (unlikely(p->offset + p->len != abs(i->last_offset)))
214 goto Bad; // ... at the end of segment
215 } else {
216 if (i_head != p_head)
217 goto Bad; // must be right after the last buffer
218 }
219 return true;
220Bad:
221 printk(KERN_ERR "idx = %d, offset = %d\n", i_head, i->last_offset);
222 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
223 p_head, p_tail, pipe->ring_size);
224 for (idx = 0; idx < pipe->ring_size; idx++)
225 printk(KERN_ERR "[%p %p %d %d]\n",
226 pipe->bufs[idx].ops,
227 pipe->bufs[idx].page,
228 pipe->bufs[idx].offset,
229 pipe->bufs[idx].len);
230 WARN_ON(1);
231 return false;
232}
233#else
234#define sanity(i) true
235#endif
236
237static struct page *push_anon(struct pipe_inode_info *pipe, unsigned size)
238{
239 struct page *page = alloc_page(GFP_USER);
240 if (page) {
241 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
242 *buf = (struct pipe_buffer) {
243 .ops = &default_pipe_buf_ops,
244 .page = page,
245 .offset = 0,
246 .len = size
247 };
248 }
249 return page;
250}
251
252static void push_page(struct pipe_inode_info *pipe, struct page *page,
253 unsigned int offset, unsigned int size)
254{
255 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
256 *buf = (struct pipe_buffer) {
257 .ops = &page_cache_pipe_buf_ops,
258 .page = page,
259 .offset = offset,
260 .len = size
261 };
262 get_page(page);
263}
264
265static inline int last_offset(const struct pipe_buffer *buf)
266{
267 if (buf->ops == &default_pipe_buf_ops)
268 return buf->len; // buf->offset is 0 for those
269 else
270 return -(buf->offset + buf->len);
271}
272
273static struct page *append_pipe(struct iov_iter *i, size_t size,
274 unsigned int *off)
275{
276 struct pipe_inode_info *pipe = i->pipe;
277 int offset = i->last_offset;
278 struct pipe_buffer *buf;
279 struct page *page;
280
281 if (offset > 0 && offset < PAGE_SIZE) {
282 // some space in the last buffer; add to it
283 buf = pipe_buf(pipe, pipe->head - 1);
284 size = min_t(size_t, size, PAGE_SIZE - offset);
285 buf->len += size;
286 i->last_offset += size;
287 i->count -= size;
288 *off = offset;
289 return buf->page;
290 }
291 // OK, we need a new buffer
292 *off = 0;
293 size = min_t(size_t, size, PAGE_SIZE);
294 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
295 return NULL;
296 page = push_anon(pipe, size);
297 if (!page)
298 return NULL;
299 i->head = pipe->head - 1;
300 i->last_offset = size;
301 i->count -= size;
302 return page;
303}
304
305static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
306 struct iov_iter *i)
307{
308 struct pipe_inode_info *pipe = i->pipe;
309 unsigned int head = pipe->head;
310
311 if (unlikely(bytes > i->count))
312 bytes = i->count;
313
314 if (unlikely(!bytes))
315 return 0;
316
317 if (!sanity(i))
318 return 0;
319
320 if (offset && i->last_offset == -offset) { // could we merge it?
321 struct pipe_buffer *buf = pipe_buf(pipe, head - 1);
322 if (buf->page == page) {
323 buf->len += bytes;
324 i->last_offset -= bytes;
325 i->count -= bytes;
326 return bytes;
327 }
328 }
329 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
330 return 0;
331
332 push_page(pipe, page, offset, bytes);
333 i->last_offset = -(offset + bytes);
334 i->head = head;
335 i->count -= bytes;
336 return bytes;
337}
338
339/*
340 * fault_in_iov_iter_readable - fault in iov iterator for reading
341 * @i: iterator
342 * @size: maximum length
343 *
344 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
345 * @size. For each iovec, fault in each page that constitutes the iovec.
346 *
347 * Returns the number of bytes not faulted in (like copy_to_user() and
348 * copy_from_user()).
349 *
350 * Always returns 0 for non-userspace iterators.
351 */
352size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
353{
354 if (iter_is_ubuf(i)) {
355 size_t n = min(size, iov_iter_count(i));
356 n -= fault_in_readable(i->ubuf + i->iov_offset, n);
357 return size - n;
358 } else if (iter_is_iovec(i)) {
359 size_t count = min(size, iov_iter_count(i));
360 const struct iovec *p;
361 size_t skip;
362
363 size -= count;
364 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
365 size_t len = min(count, p->iov_len - skip);
366 size_t ret;
367
368 if (unlikely(!len))
369 continue;
370 ret = fault_in_readable(p->iov_base + skip, len);
371 count -= len - ret;
372 if (ret)
373 break;
374 }
375 return count + size;
376 }
377 return 0;
378}
379EXPORT_SYMBOL(fault_in_iov_iter_readable);
380
381/*
382 * fault_in_iov_iter_writeable - fault in iov iterator for writing
383 * @i: iterator
384 * @size: maximum length
385 *
386 * Faults in the iterator using get_user_pages(), i.e., without triggering
387 * hardware page faults. This is primarily useful when we already know that
388 * some or all of the pages in @i aren't in memory.
389 *
390 * Returns the number of bytes not faulted in, like copy_to_user() and
391 * copy_from_user().
392 *
393 * Always returns 0 for non-user-space iterators.
394 */
395size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
396{
397 if (iter_is_ubuf(i)) {
398 size_t n = min(size, iov_iter_count(i));
399 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
400 return size - n;
401 } else if (iter_is_iovec(i)) {
402 size_t count = min(size, iov_iter_count(i));
403 const struct iovec *p;
404 size_t skip;
405
406 size -= count;
407 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
408 size_t len = min(count, p->iov_len - skip);
409 size_t ret;
410
411 if (unlikely(!len))
412 continue;
413 ret = fault_in_safe_writeable(p->iov_base + skip, len);
414 count -= len - ret;
415 if (ret)
416 break;
417 }
418 return count + size;
419 }
420 return 0;
421}
422EXPORT_SYMBOL(fault_in_iov_iter_writeable);
423
424void iov_iter_init(struct iov_iter *i, unsigned int direction,
425 const struct iovec *iov, unsigned long nr_segs,
426 size_t count)
427{
428 WARN_ON(direction & ~(READ | WRITE));
429 *i = (struct iov_iter) {
430 .iter_type = ITER_IOVEC,
431 .nofault = false,
432 .user_backed = true,
433 .data_source = direction,
434 .iov = iov,
435 .nr_segs = nr_segs,
436 .iov_offset = 0,
437 .count = count
438 };
439}
440EXPORT_SYMBOL(iov_iter_init);
441
442// returns the offset in partial buffer (if any)
443static inline unsigned int pipe_npages(const struct iov_iter *i, int *npages)
444{
445 struct pipe_inode_info *pipe = i->pipe;
446 int used = pipe->head - pipe->tail;
447 int off = i->last_offset;
448
449 *npages = max((int)pipe->max_usage - used, 0);
450
451 if (off > 0 && off < PAGE_SIZE) { // anon and not full
452 (*npages)++;
453 return off;
454 }
455 return 0;
456}
457
458static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
459 struct iov_iter *i)
460{
461 unsigned int off, chunk;
462
463 if (unlikely(bytes > i->count))
464 bytes = i->count;
465 if (unlikely(!bytes))
466 return 0;
467
468 if (!sanity(i))
469 return 0;
470
471 for (size_t n = bytes; n; n -= chunk) {
472 struct page *page = append_pipe(i, n, &off);
473 chunk = min_t(size_t, n, PAGE_SIZE - off);
474 if (!page)
475 return bytes - n;
476 memcpy_to_page(page, off, addr, chunk);
477 addr += chunk;
478 }
479 return bytes;
480}
481
482static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
483 __wsum sum, size_t off)
484{
485 __wsum next = csum_partial_copy_nocheck(from, to, len);
486 return csum_block_add(sum, next, off);
487}
488
489static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
490 struct iov_iter *i, __wsum *sump)
491{
492 __wsum sum = *sump;
493 size_t off = 0;
494 unsigned int chunk, r;
495
496 if (unlikely(bytes > i->count))
497 bytes = i->count;
498 if (unlikely(!bytes))
499 return 0;
500
501 if (!sanity(i))
502 return 0;
503
504 while (bytes) {
505 struct page *page = append_pipe(i, bytes, &r);
506 char *p;
507
508 if (!page)
509 break;
510 chunk = min_t(size_t, bytes, PAGE_SIZE - r);
511 p = kmap_local_page(page);
512 sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
513 kunmap_local(p);
514 off += chunk;
515 bytes -= chunk;
516 }
517 *sump = sum;
518 return off;
519}
520
521size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
522{
523 if (WARN_ON_ONCE(i->data_source))
524 return 0;
525 if (unlikely(iov_iter_is_pipe(i)))
526 return copy_pipe_to_iter(addr, bytes, i);
527 if (user_backed_iter(i))
528 might_fault();
529 iterate_and_advance(i, bytes, base, len, off,
530 copyout(base, addr + off, len),
531 memcpy(base, addr + off, len)
532 )
533
534 return bytes;
535}
536EXPORT_SYMBOL(_copy_to_iter);
537
538#ifdef CONFIG_ARCH_HAS_COPY_MC
539static int copyout_mc(void __user *to, const void *from, size_t n)
540{
541 if (access_ok(to, n)) {
542 instrument_copy_to_user(to, from, n);
543 n = copy_mc_to_user((__force void *) to, from, n);
544 }
545 return n;
546}
547
548static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
549 struct iov_iter *i)
550{
551 size_t xfer = 0;
552 unsigned int off, chunk;
553
554 if (unlikely(bytes > i->count))
555 bytes = i->count;
556 if (unlikely(!bytes))
557 return 0;
558
559 if (!sanity(i))
560 return 0;
561
562 while (bytes) {
563 struct page *page = append_pipe(i, bytes, &off);
564 unsigned long rem;
565 char *p;
566
567 if (!page)
568 break;
569 chunk = min_t(size_t, bytes, PAGE_SIZE - off);
570 p = kmap_local_page(page);
571 rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
572 chunk -= rem;
573 kunmap_local(p);
574 xfer += chunk;
575 bytes -= chunk;
576 if (rem) {
577 iov_iter_revert(i, rem);
578 break;
579 }
580 }
581 return xfer;
582}
583
584/**
585 * _copy_mc_to_iter - copy to iter with source memory error exception handling
586 * @addr: source kernel address
587 * @bytes: total transfer length
588 * @i: destination iterator
589 *
590 * The pmem driver deploys this for the dax operation
591 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
592 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
593 * successfully copied.
594 *
595 * The main differences between this and typical _copy_to_iter().
596 *
597 * * Typical tail/residue handling after a fault retries the copy
598 * byte-by-byte until the fault happens again. Re-triggering machine
599 * checks is potentially fatal so the implementation uses source
600 * alignment and poison alignment assumptions to avoid re-triggering
601 * hardware exceptions.
602 *
603 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
604 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
605 * a short copy.
606 *
607 * Return: number of bytes copied (may be %0)
608 */
609size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
610{
611 if (WARN_ON_ONCE(i->data_source))
612 return 0;
613 if (unlikely(iov_iter_is_pipe(i)))
614 return copy_mc_pipe_to_iter(addr, bytes, i);
615 if (user_backed_iter(i))
616 might_fault();
617 __iterate_and_advance(i, bytes, base, len, off,
618 copyout_mc(base, addr + off, len),
619 copy_mc_to_kernel(base, addr + off, len)
620 )
621
622 return bytes;
623}
624EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
625#endif /* CONFIG_ARCH_HAS_COPY_MC */
626
627size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
628{
629 if (WARN_ON_ONCE(!i->data_source))
630 return 0;
631
632 if (user_backed_iter(i))
633 might_fault();
634 iterate_and_advance(i, bytes, base, len, off,
635 copyin(addr + off, base, len),
636 memcpy(addr + off, base, len)
637 )
638
639 return bytes;
640}
641EXPORT_SYMBOL(_copy_from_iter);
642
643size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
644{
645 if (WARN_ON_ONCE(!i->data_source))
646 return 0;
647
648 iterate_and_advance(i, bytes, base, len, off,
649 __copy_from_user_inatomic_nocache(addr + off, base, len),
650 memcpy(addr + off, base, len)
651 )
652
653 return bytes;
654}
655EXPORT_SYMBOL(_copy_from_iter_nocache);
656
657#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
658/**
659 * _copy_from_iter_flushcache - write destination through cpu cache
660 * @addr: destination kernel address
661 * @bytes: total transfer length
662 * @i: source iterator
663 *
664 * The pmem driver arranges for filesystem-dax to use this facility via
665 * dax_copy_from_iter() for ensuring that writes to persistent memory
666 * are flushed through the CPU cache. It is differentiated from
667 * _copy_from_iter_nocache() in that guarantees all data is flushed for
668 * all iterator types. The _copy_from_iter_nocache() only attempts to
669 * bypass the cache for the ITER_IOVEC case, and on some archs may use
670 * instructions that strand dirty-data in the cache.
671 *
672 * Return: number of bytes copied (may be %0)
673 */
674size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
675{
676 if (WARN_ON_ONCE(!i->data_source))
677 return 0;
678
679 iterate_and_advance(i, bytes, base, len, off,
680 __copy_from_user_flushcache(addr + off, base, len),
681 memcpy_flushcache(addr + off, base, len)
682 )
683
684 return bytes;
685}
686EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
687#endif
688
689static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
690{
691 struct page *head;
692 size_t v = n + offset;
693
694 /*
695 * The general case needs to access the page order in order
696 * to compute the page size.
697 * However, we mostly deal with order-0 pages and thus can
698 * avoid a possible cache line miss for requests that fit all
699 * page orders.
700 */
701 if (n <= v && v <= PAGE_SIZE)
702 return true;
703
704 head = compound_head(page);
705 v += (page - head) << PAGE_SHIFT;
706
707 if (WARN_ON(n > v || v > page_size(head)))
708 return false;
709 return true;
710}
711
712size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
713 struct iov_iter *i)
714{
715 size_t res = 0;
716 if (!page_copy_sane(page, offset, bytes))
717 return 0;
718 if (WARN_ON_ONCE(i->data_source))
719 return 0;
720 if (unlikely(iov_iter_is_pipe(i)))
721 return copy_page_to_iter_pipe(page, offset, bytes, i);
722 page += offset / PAGE_SIZE; // first subpage
723 offset %= PAGE_SIZE;
724 while (1) {
725 void *kaddr = kmap_local_page(page);
726 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
727 n = _copy_to_iter(kaddr + offset, n, i);
728 kunmap_local(kaddr);
729 res += n;
730 bytes -= n;
731 if (!bytes || !n)
732 break;
733 offset += n;
734 if (offset == PAGE_SIZE) {
735 page++;
736 offset = 0;
737 }
738 }
739 return res;
740}
741EXPORT_SYMBOL(copy_page_to_iter);
742
743size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
744 struct iov_iter *i)
745{
746 size_t res = 0;
747 if (!page_copy_sane(page, offset, bytes))
748 return 0;
749 page += offset / PAGE_SIZE; // first subpage
750 offset %= PAGE_SIZE;
751 while (1) {
752 void *kaddr = kmap_local_page(page);
753 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
754 n = _copy_from_iter(kaddr + offset, n, i);
755 kunmap_local(kaddr);
756 res += n;
757 bytes -= n;
758 if (!bytes || !n)
759 break;
760 offset += n;
761 if (offset == PAGE_SIZE) {
762 page++;
763 offset = 0;
764 }
765 }
766 return res;
767}
768EXPORT_SYMBOL(copy_page_from_iter);
769
770static size_t pipe_zero(size_t bytes, struct iov_iter *i)
771{
772 unsigned int chunk, off;
773
774 if (unlikely(bytes > i->count))
775 bytes = i->count;
776 if (unlikely(!bytes))
777 return 0;
778
779 if (!sanity(i))
780 return 0;
781
782 for (size_t n = bytes; n; n -= chunk) {
783 struct page *page = append_pipe(i, n, &off);
784 char *p;
785
786 if (!page)
787 return bytes - n;
788 chunk = min_t(size_t, n, PAGE_SIZE - off);
789 p = kmap_local_page(page);
790 memset(p + off, 0, chunk);
791 kunmap_local(p);
792 }
793 return bytes;
794}
795
796size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
797{
798 if (unlikely(iov_iter_is_pipe(i)))
799 return pipe_zero(bytes, i);
800 iterate_and_advance(i, bytes, base, len, count,
801 clear_user(base, len),
802 memset(base, 0, len)
803 )
804
805 return bytes;
806}
807EXPORT_SYMBOL(iov_iter_zero);
808
809size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
810 struct iov_iter *i)
811{
812 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
813 if (!page_copy_sane(page, offset, bytes)) {
814 kunmap_atomic(kaddr);
815 return 0;
816 }
817 if (WARN_ON_ONCE(!i->data_source)) {
818 kunmap_atomic(kaddr);
819 return 0;
820 }
821 iterate_and_advance(i, bytes, base, len, off,
822 copyin(p + off, base, len),
823 memcpy(p + off, base, len)
824 )
825 kunmap_atomic(kaddr);
826 return bytes;
827}
828EXPORT_SYMBOL(copy_page_from_iter_atomic);
829
830static void pipe_advance(struct iov_iter *i, size_t size)
831{
832 struct pipe_inode_info *pipe = i->pipe;
833 int off = i->last_offset;
834
835 if (!off && !size) {
836 pipe_discard_from(pipe, i->start_head); // discard everything
837 return;
838 }
839 i->count -= size;
840 while (1) {
841 struct pipe_buffer *buf = pipe_buf(pipe, i->head);
842 if (off) /* make it relative to the beginning of buffer */
843 size += abs(off) - buf->offset;
844 if (size <= buf->len) {
845 buf->len = size;
846 i->last_offset = last_offset(buf);
847 break;
848 }
849 size -= buf->len;
850 i->head++;
851 off = 0;
852 }
853 pipe_discard_from(pipe, i->head + 1); // discard everything past this one
854}
855
856static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
857{
858 const struct bio_vec *bvec, *end;
859
860 if (!i->count)
861 return;
862 i->count -= size;
863
864 size += i->iov_offset;
865
866 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
867 if (likely(size < bvec->bv_len))
868 break;
869 size -= bvec->bv_len;
870 }
871 i->iov_offset = size;
872 i->nr_segs -= bvec - i->bvec;
873 i->bvec = bvec;
874}
875
876static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
877{
878 const struct iovec *iov, *end;
879
880 if (!i->count)
881 return;
882 i->count -= size;
883
884 size += i->iov_offset; // from beginning of current segment
885 for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
886 if (likely(size < iov->iov_len))
887 break;
888 size -= iov->iov_len;
889 }
890 i->iov_offset = size;
891 i->nr_segs -= iov - i->iov;
892 i->iov = iov;
893}
894
895void iov_iter_advance(struct iov_iter *i, size_t size)
896{
897 if (unlikely(i->count < size))
898 size = i->count;
899 if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
900 i->iov_offset += size;
901 i->count -= size;
902 } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
903 /* iovec and kvec have identical layouts */
904 iov_iter_iovec_advance(i, size);
905 } else if (iov_iter_is_bvec(i)) {
906 iov_iter_bvec_advance(i, size);
907 } else if (iov_iter_is_pipe(i)) {
908 pipe_advance(i, size);
909 } else if (iov_iter_is_discard(i)) {
910 i->count -= size;
911 }
912}
913EXPORT_SYMBOL(iov_iter_advance);
914
915void iov_iter_revert(struct iov_iter *i, size_t unroll)
916{
917 if (!unroll)
918 return;
919 if (WARN_ON(unroll > MAX_RW_COUNT))
920 return;
921 i->count += unroll;
922 if (unlikely(iov_iter_is_pipe(i))) {
923 struct pipe_inode_info *pipe = i->pipe;
924 unsigned int head = pipe->head;
925
926 while (head > i->start_head) {
927 struct pipe_buffer *b = pipe_buf(pipe, --head);
928 if (unroll < b->len) {
929 b->len -= unroll;
930 i->last_offset = last_offset(b);
931 i->head = head;
932 return;
933 }
934 unroll -= b->len;
935 pipe_buf_release(pipe, b);
936 pipe->head--;
937 }
938 i->last_offset = 0;
939 i->head = head;
940 return;
941 }
942 if (unlikely(iov_iter_is_discard(i)))
943 return;
944 if (unroll <= i->iov_offset) {
945 i->iov_offset -= unroll;
946 return;
947 }
948 unroll -= i->iov_offset;
949 if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
950 BUG(); /* We should never go beyond the start of the specified
951 * range since we might then be straying into pages that
952 * aren't pinned.
953 */
954 } else if (iov_iter_is_bvec(i)) {
955 const struct bio_vec *bvec = i->bvec;
956 while (1) {
957 size_t n = (--bvec)->bv_len;
958 i->nr_segs++;
959 if (unroll <= n) {
960 i->bvec = bvec;
961 i->iov_offset = n - unroll;
962 return;
963 }
964 unroll -= n;
965 }
966 } else { /* same logics for iovec and kvec */
967 const struct iovec *iov = i->iov;
968 while (1) {
969 size_t n = (--iov)->iov_len;
970 i->nr_segs++;
971 if (unroll <= n) {
972 i->iov = iov;
973 i->iov_offset = n - unroll;
974 return;
975 }
976 unroll -= n;
977 }
978 }
979}
980EXPORT_SYMBOL(iov_iter_revert);
981
982/*
983 * Return the count of just the current iov_iter segment.
984 */
985size_t iov_iter_single_seg_count(const struct iov_iter *i)
986{
987 if (i->nr_segs > 1) {
988 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
989 return min(i->count, i->iov->iov_len - i->iov_offset);
990 if (iov_iter_is_bvec(i))
991 return min(i->count, i->bvec->bv_len - i->iov_offset);
992 }
993 return i->count;
994}
995EXPORT_SYMBOL(iov_iter_single_seg_count);
996
997void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
998 const struct kvec *kvec, unsigned long nr_segs,
999 size_t count)
1000{
1001 WARN_ON(direction & ~(READ | WRITE));
1002 *i = (struct iov_iter){
1003 .iter_type = ITER_KVEC,
1004 .data_source = direction,
1005 .kvec = kvec,
1006 .nr_segs = nr_segs,
1007 .iov_offset = 0,
1008 .count = count
1009 };
1010}
1011EXPORT_SYMBOL(iov_iter_kvec);
1012
1013void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1014 const struct bio_vec *bvec, unsigned long nr_segs,
1015 size_t count)
1016{
1017 WARN_ON(direction & ~(READ | WRITE));
1018 *i = (struct iov_iter){
1019 .iter_type = ITER_BVEC,
1020 .data_source = direction,
1021 .bvec = bvec,
1022 .nr_segs = nr_segs,
1023 .iov_offset = 0,
1024 .count = count
1025 };
1026}
1027EXPORT_SYMBOL(iov_iter_bvec);
1028
1029void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1030 struct pipe_inode_info *pipe,
1031 size_t count)
1032{
1033 BUG_ON(direction != READ);
1034 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1035 *i = (struct iov_iter){
1036 .iter_type = ITER_PIPE,
1037 .data_source = false,
1038 .pipe = pipe,
1039 .head = pipe->head,
1040 .start_head = pipe->head,
1041 .last_offset = 0,
1042 .count = count
1043 };
1044}
1045EXPORT_SYMBOL(iov_iter_pipe);
1046
1047/**
1048 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1049 * @i: The iterator to initialise.
1050 * @direction: The direction of the transfer.
1051 * @xarray: The xarray to access.
1052 * @start: The start file position.
1053 * @count: The size of the I/O buffer in bytes.
1054 *
1055 * Set up an I/O iterator to either draw data out of the pages attached to an
1056 * inode or to inject data into those pages. The pages *must* be prevented
1057 * from evaporation, either by taking a ref on them or locking them by the
1058 * caller.
1059 */
1060void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1061 struct xarray *xarray, loff_t start, size_t count)
1062{
1063 BUG_ON(direction & ~1);
1064 *i = (struct iov_iter) {
1065 .iter_type = ITER_XARRAY,
1066 .data_source = direction,
1067 .xarray = xarray,
1068 .xarray_start = start,
1069 .count = count,
1070 .iov_offset = 0
1071 };
1072}
1073EXPORT_SYMBOL(iov_iter_xarray);
1074
1075/**
1076 * iov_iter_discard - Initialise an I/O iterator that discards data
1077 * @i: The iterator to initialise.
1078 * @direction: The direction of the transfer.
1079 * @count: The size of the I/O buffer in bytes.
1080 *
1081 * Set up an I/O iterator that just discards everything that's written to it.
1082 * It's only available as a READ iterator.
1083 */
1084void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1085{
1086 BUG_ON(direction != READ);
1087 *i = (struct iov_iter){
1088 .iter_type = ITER_DISCARD,
1089 .data_source = false,
1090 .count = count,
1091 .iov_offset = 0
1092 };
1093}
1094EXPORT_SYMBOL(iov_iter_discard);
1095
1096static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
1097 unsigned len_mask)
1098{
1099 size_t size = i->count;
1100 size_t skip = i->iov_offset;
1101 unsigned k;
1102
1103 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1104 size_t len = i->iov[k].iov_len - skip;
1105
1106 if (len > size)
1107 len = size;
1108 if (len & len_mask)
1109 return false;
1110 if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask)
1111 return false;
1112
1113 size -= len;
1114 if (!size)
1115 break;
1116 }
1117 return true;
1118}
1119
1120static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
1121 unsigned len_mask)
1122{
1123 size_t size = i->count;
1124 unsigned skip = i->iov_offset;
1125 unsigned k;
1126
1127 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1128 size_t len = i->bvec[k].bv_len - skip;
1129
1130 if (len > size)
1131 len = size;
1132 if (len & len_mask)
1133 return false;
1134 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
1135 return false;
1136
1137 size -= len;
1138 if (!size)
1139 break;
1140 }
1141 return true;
1142}
1143
1144/**
1145 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
1146 * are aligned to the parameters.
1147 *
1148 * @i: &struct iov_iter to restore
1149 * @addr_mask: bit mask to check against the iov element's addresses
1150 * @len_mask: bit mask to check against the iov element's lengths
1151 *
1152 * Return: false if any addresses or lengths intersect with the provided masks
1153 */
1154bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
1155 unsigned len_mask)
1156{
1157 if (likely(iter_is_ubuf(i))) {
1158 if (i->count & len_mask)
1159 return false;
1160 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask)
1161 return false;
1162 return true;
1163 }
1164
1165 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1166 return iov_iter_aligned_iovec(i, addr_mask, len_mask);
1167
1168 if (iov_iter_is_bvec(i))
1169 return iov_iter_aligned_bvec(i, addr_mask, len_mask);
1170
1171 if (iov_iter_is_pipe(i)) {
1172 size_t size = i->count;
1173
1174 if (size & len_mask)
1175 return false;
1176 if (size && i->last_offset > 0) {
1177 if (i->last_offset & addr_mask)
1178 return false;
1179 }
1180
1181 return true;
1182 }
1183
1184 if (iov_iter_is_xarray(i)) {
1185 if (i->count & len_mask)
1186 return false;
1187 if ((i->xarray_start + i->iov_offset) & addr_mask)
1188 return false;
1189 }
1190
1191 return true;
1192}
1193EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
1194
1195static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
1196{
1197 unsigned long res = 0;
1198 size_t size = i->count;
1199 size_t skip = i->iov_offset;
1200 unsigned k;
1201
1202 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1203 size_t len = i->iov[k].iov_len - skip;
1204 if (len) {
1205 res |= (unsigned long)i->iov[k].iov_base + skip;
1206 if (len > size)
1207 len = size;
1208 res |= len;
1209 size -= len;
1210 if (!size)
1211 break;
1212 }
1213 }
1214 return res;
1215}
1216
1217static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
1218{
1219 unsigned res = 0;
1220 size_t size = i->count;
1221 unsigned skip = i->iov_offset;
1222 unsigned k;
1223
1224 for (k = 0; k < i->nr_segs; k++, skip = 0) {
1225 size_t len = i->bvec[k].bv_len - skip;
1226 res |= (unsigned long)i->bvec[k].bv_offset + skip;
1227 if (len > size)
1228 len = size;
1229 res |= len;
1230 size -= len;
1231 if (!size)
1232 break;
1233 }
1234 return res;
1235}
1236
1237unsigned long iov_iter_alignment(const struct iov_iter *i)
1238{
1239 if (likely(iter_is_ubuf(i))) {
1240 size_t size = i->count;
1241 if (size)
1242 return ((unsigned long)i->ubuf + i->iov_offset) | size;
1243 return 0;
1244 }
1245
1246 /* iovec and kvec have identical layouts */
1247 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1248 return iov_iter_alignment_iovec(i);
1249
1250 if (iov_iter_is_bvec(i))
1251 return iov_iter_alignment_bvec(i);
1252
1253 if (iov_iter_is_pipe(i)) {
1254 size_t size = i->count;
1255
1256 if (size && i->last_offset > 0)
1257 return size | i->last_offset;
1258 return size;
1259 }
1260
1261 if (iov_iter_is_xarray(i))
1262 return (i->xarray_start + i->iov_offset) | i->count;
1263
1264 return 0;
1265}
1266EXPORT_SYMBOL(iov_iter_alignment);
1267
1268unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1269{
1270 unsigned long res = 0;
1271 unsigned long v = 0;
1272 size_t size = i->count;
1273 unsigned k;
1274
1275 if (iter_is_ubuf(i))
1276 return 0;
1277
1278 if (WARN_ON(!iter_is_iovec(i)))
1279 return ~0U;
1280
1281 for (k = 0; k < i->nr_segs; k++) {
1282 if (i->iov[k].iov_len) {
1283 unsigned long base = (unsigned long)i->iov[k].iov_base;
1284 if (v) // if not the first one
1285 res |= base | v; // this start | previous end
1286 v = base + i->iov[k].iov_len;
1287 if (size <= i->iov[k].iov_len)
1288 break;
1289 size -= i->iov[k].iov_len;
1290 }
1291 }
1292 return res;
1293}
1294EXPORT_SYMBOL(iov_iter_gap_alignment);
1295
1296static int want_pages_array(struct page ***res, size_t size,
1297 size_t start, unsigned int maxpages)
1298{
1299 unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE);
1300
1301 if (count > maxpages)
1302 count = maxpages;
1303 WARN_ON(!count); // caller should've prevented that
1304 if (!*res) {
1305 *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
1306 if (!*res)
1307 return 0;
1308 }
1309 return count;
1310}
1311
1312static ssize_t pipe_get_pages(struct iov_iter *i,
1313 struct page ***pages, size_t maxsize, unsigned maxpages,
1314 size_t *start)
1315{
1316 unsigned int npages, count, off, chunk;
1317 struct page **p;
1318 size_t left;
1319
1320 if (!sanity(i))
1321 return -EFAULT;
1322
1323 *start = off = pipe_npages(i, &npages);
1324 if (!npages)
1325 return -EFAULT;
1326 count = want_pages_array(pages, maxsize, off, min(npages, maxpages));
1327 if (!count)
1328 return -ENOMEM;
1329 p = *pages;
1330 for (npages = 0, left = maxsize ; npages < count; npages++, left -= chunk) {
1331 struct page *page = append_pipe(i, left, &off);
1332 if (!page)
1333 break;
1334 chunk = min_t(size_t, left, PAGE_SIZE - off);
1335 get_page(*p++ = page);
1336 }
1337 if (!npages)
1338 return -EFAULT;
1339 return maxsize - left;
1340}
1341
1342static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1343 pgoff_t index, unsigned int nr_pages)
1344{
1345 XA_STATE(xas, xa, index);
1346 struct page *page;
1347 unsigned int ret = 0;
1348
1349 rcu_read_lock();
1350 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1351 if (xas_retry(&xas, page))
1352 continue;
1353
1354 /* Has the page moved or been split? */
1355 if (unlikely(page != xas_reload(&xas))) {
1356 xas_reset(&xas);
1357 continue;
1358 }
1359
1360 pages[ret] = find_subpage(page, xas.xa_index);
1361 get_page(pages[ret]);
1362 if (++ret == nr_pages)
1363 break;
1364 }
1365 rcu_read_unlock();
1366 return ret;
1367}
1368
1369static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1370 struct page ***pages, size_t maxsize,
1371 unsigned maxpages, size_t *_start_offset)
1372{
1373 unsigned nr, offset, count;
1374 pgoff_t index;
1375 loff_t pos;
1376
1377 pos = i->xarray_start + i->iov_offset;
1378 index = pos >> PAGE_SHIFT;
1379 offset = pos & ~PAGE_MASK;
1380 *_start_offset = offset;
1381
1382 count = want_pages_array(pages, maxsize, offset, maxpages);
1383 if (!count)
1384 return -ENOMEM;
1385 nr = iter_xarray_populate_pages(*pages, i->xarray, index, count);
1386 if (nr == 0)
1387 return 0;
1388
1389 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1390 i->iov_offset += maxsize;
1391 i->count -= maxsize;
1392 return maxsize;
1393}
1394
1395/* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
1396static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
1397{
1398 size_t skip;
1399 long k;
1400
1401 if (iter_is_ubuf(i))
1402 return (unsigned long)i->ubuf + i->iov_offset;
1403
1404 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
1405 size_t len = i->iov[k].iov_len - skip;
1406
1407 if (unlikely(!len))
1408 continue;
1409 if (*size > len)
1410 *size = len;
1411 return (unsigned long)i->iov[k].iov_base + skip;
1412 }
1413 BUG(); // if it had been empty, we wouldn't get called
1414}
1415
1416/* must be done on non-empty ITER_BVEC one */
1417static struct page *first_bvec_segment(const struct iov_iter *i,
1418 size_t *size, size_t *start)
1419{
1420 struct page *page;
1421 size_t skip = i->iov_offset, len;
1422
1423 len = i->bvec->bv_len - skip;
1424 if (*size > len)
1425 *size = len;
1426 skip += i->bvec->bv_offset;
1427 page = i->bvec->bv_page + skip / PAGE_SIZE;
1428 *start = skip % PAGE_SIZE;
1429 return page;
1430}
1431
1432static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
1433 struct page ***pages, size_t maxsize,
1434 unsigned int maxpages, size_t *start,
1435 unsigned int gup_flags)
1436{
1437 unsigned int n;
1438
1439 if (maxsize > i->count)
1440 maxsize = i->count;
1441 if (!maxsize)
1442 return 0;
1443 if (maxsize > MAX_RW_COUNT)
1444 maxsize = MAX_RW_COUNT;
1445
1446 if (likely(user_backed_iter(i))) {
1447 unsigned long addr;
1448 int res;
1449
1450 if (iov_iter_rw(i) != WRITE)
1451 gup_flags |= FOLL_WRITE;
1452 if (i->nofault)
1453 gup_flags |= FOLL_NOFAULT;
1454
1455 addr = first_iovec_segment(i, &maxsize);
1456 *start = addr % PAGE_SIZE;
1457 addr &= PAGE_MASK;
1458 n = want_pages_array(pages, maxsize, *start, maxpages);
1459 if (!n)
1460 return -ENOMEM;
1461 res = get_user_pages_fast(addr, n, gup_flags, *pages);
1462 if (unlikely(res <= 0))
1463 return res;
1464 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start);
1465 iov_iter_advance(i, maxsize);
1466 return maxsize;
1467 }
1468 if (iov_iter_is_bvec(i)) {
1469 struct page **p;
1470 struct page *page;
1471
1472 page = first_bvec_segment(i, &maxsize, start);
1473 n = want_pages_array(pages, maxsize, *start, maxpages);
1474 if (!n)
1475 return -ENOMEM;
1476 p = *pages;
1477 for (int k = 0; k < n; k++)
1478 get_page(p[k] = page + k);
1479 maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start);
1480 i->count -= maxsize;
1481 i->iov_offset += maxsize;
1482 if (i->iov_offset == i->bvec->bv_len) {
1483 i->iov_offset = 0;
1484 i->bvec++;
1485 i->nr_segs--;
1486 }
1487 return maxsize;
1488 }
1489 if (iov_iter_is_pipe(i))
1490 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1491 if (iov_iter_is_xarray(i))
1492 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1493 return -EFAULT;
1494}
1495
1496ssize_t iov_iter_get_pages(struct iov_iter *i,
1497 struct page **pages, size_t maxsize, unsigned maxpages,
1498 size_t *start, unsigned gup_flags)
1499{
1500 if (!maxpages)
1501 return 0;
1502 BUG_ON(!pages);
1503
1504 return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages,
1505 start, gup_flags);
1506}
1507EXPORT_SYMBOL_GPL(iov_iter_get_pages);
1508
1509ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
1510 size_t maxsize, unsigned maxpages, size_t *start)
1511{
1512 return iov_iter_get_pages(i, pages, maxsize, maxpages, start, 0);
1513}
1514EXPORT_SYMBOL(iov_iter_get_pages2);
1515
1516ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1517 struct page ***pages, size_t maxsize,
1518 size_t *start, unsigned gup_flags)
1519{
1520 ssize_t len;
1521
1522 *pages = NULL;
1523
1524 len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start,
1525 gup_flags);
1526 if (len <= 0) {
1527 kvfree(*pages);
1528 *pages = NULL;
1529 }
1530 return len;
1531}
1532EXPORT_SYMBOL_GPL(iov_iter_get_pages_alloc);
1533
1534ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
1535 struct page ***pages, size_t maxsize, size_t *start)
1536{
1537 return iov_iter_get_pages_alloc(i, pages, maxsize, start, 0);
1538}
1539EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
1540
1541size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1542 struct iov_iter *i)
1543{
1544 __wsum sum, next;
1545 sum = *csum;
1546 if (WARN_ON_ONCE(!i->data_source))
1547 return 0;
1548
1549 iterate_and_advance(i, bytes, base, len, off, ({
1550 next = csum_and_copy_from_user(base, addr + off, len);
1551 sum = csum_block_add(sum, next, off);
1552 next ? 0 : len;
1553 }), ({
1554 sum = csum_and_memcpy(addr + off, base, len, sum, off);
1555 })
1556 )
1557 *csum = sum;
1558 return bytes;
1559}
1560EXPORT_SYMBOL(csum_and_copy_from_iter);
1561
1562size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1563 struct iov_iter *i)
1564{
1565 struct csum_state *csstate = _csstate;
1566 __wsum sum, next;
1567
1568 if (WARN_ON_ONCE(i->data_source))
1569 return 0;
1570 if (unlikely(iov_iter_is_discard(i))) {
1571 // can't use csum_memcpy() for that one - data is not copied
1572 csstate->csum = csum_block_add(csstate->csum,
1573 csum_partial(addr, bytes, 0),
1574 csstate->off);
1575 csstate->off += bytes;
1576 return bytes;
1577 }
1578
1579 sum = csum_shift(csstate->csum, csstate->off);
1580 if (unlikely(iov_iter_is_pipe(i)))
1581 bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum);
1582 else iterate_and_advance(i, bytes, base, len, off, ({
1583 next = csum_and_copy_to_user(addr + off, base, len);
1584 sum = csum_block_add(sum, next, off);
1585 next ? 0 : len;
1586 }), ({
1587 sum = csum_and_memcpy(base, addr + off, len, sum, off);
1588 })
1589 )
1590 csstate->csum = csum_shift(sum, csstate->off);
1591 csstate->off += bytes;
1592 return bytes;
1593}
1594EXPORT_SYMBOL(csum_and_copy_to_iter);
1595
1596size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1597 struct iov_iter *i)
1598{
1599#ifdef CONFIG_CRYPTO_HASH
1600 struct ahash_request *hash = hashp;
1601 struct scatterlist sg;
1602 size_t copied;
1603
1604 copied = copy_to_iter(addr, bytes, i);
1605 sg_init_one(&sg, addr, copied);
1606 ahash_request_set_crypt(hash, &sg, NULL, copied);
1607 crypto_ahash_update(hash);
1608 return copied;
1609#else
1610 return 0;
1611#endif
1612}
1613EXPORT_SYMBOL(hash_and_copy_to_iter);
1614
1615static int iov_npages(const struct iov_iter *i, int maxpages)
1616{
1617 size_t skip = i->iov_offset, size = i->count;
1618 const struct iovec *p;
1619 int npages = 0;
1620
1621 for (p = i->iov; size; skip = 0, p++) {
1622 unsigned offs = offset_in_page(p->iov_base + skip);
1623 size_t len = min(p->iov_len - skip, size);
1624
1625 if (len) {
1626 size -= len;
1627 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1628 if (unlikely(npages > maxpages))
1629 return maxpages;
1630 }
1631 }
1632 return npages;
1633}
1634
1635static int bvec_npages(const struct iov_iter *i, int maxpages)
1636{
1637 size_t skip = i->iov_offset, size = i->count;
1638 const struct bio_vec *p;
1639 int npages = 0;
1640
1641 for (p = i->bvec; size; skip = 0, p++) {
1642 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1643 size_t len = min(p->bv_len - skip, size);
1644
1645 size -= len;
1646 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1647 if (unlikely(npages > maxpages))
1648 return maxpages;
1649 }
1650 return npages;
1651}
1652
1653int iov_iter_npages(const struct iov_iter *i, int maxpages)
1654{
1655 if (unlikely(!i->count))
1656 return 0;
1657 if (likely(iter_is_ubuf(i))) {
1658 unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
1659 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
1660 return min(npages, maxpages);
1661 }
1662 /* iovec and kvec have identical layouts */
1663 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1664 return iov_npages(i, maxpages);
1665 if (iov_iter_is_bvec(i))
1666 return bvec_npages(i, maxpages);
1667 if (iov_iter_is_pipe(i)) {
1668 int npages;
1669
1670 if (!sanity(i))
1671 return 0;
1672
1673 pipe_npages(i, &npages);
1674 return min(npages, maxpages);
1675 }
1676 if (iov_iter_is_xarray(i)) {
1677 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1678 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1679 return min(npages, maxpages);
1680 }
1681 return 0;
1682}
1683EXPORT_SYMBOL(iov_iter_npages);
1684
1685const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1686{
1687 *new = *old;
1688 if (unlikely(iov_iter_is_pipe(new))) {
1689 WARN_ON(1);
1690 return NULL;
1691 }
1692 if (iov_iter_is_bvec(new))
1693 return new->bvec = kmemdup(new->bvec,
1694 new->nr_segs * sizeof(struct bio_vec),
1695 flags);
1696 else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
1697 /* iovec and kvec have identical layout */
1698 return new->iov = kmemdup(new->iov,
1699 new->nr_segs * sizeof(struct iovec),
1700 flags);
1701 return NULL;
1702}
1703EXPORT_SYMBOL(dup_iter);
1704
1705static int copy_compat_iovec_from_user(struct iovec *iov,
1706 const struct iovec __user *uvec, unsigned long nr_segs)
1707{
1708 const struct compat_iovec __user *uiov =
1709 (const struct compat_iovec __user *)uvec;
1710 int ret = -EFAULT, i;
1711
1712 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1713 return -EFAULT;
1714
1715 for (i = 0; i < nr_segs; i++) {
1716 compat_uptr_t buf;
1717 compat_ssize_t len;
1718
1719 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1720 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1721
1722 /* check for compat_size_t not fitting in compat_ssize_t .. */
1723 if (len < 0) {
1724 ret = -EINVAL;
1725 goto uaccess_end;
1726 }
1727 iov[i].iov_base = compat_ptr(buf);
1728 iov[i].iov_len = len;
1729 }
1730
1731 ret = 0;
1732uaccess_end:
1733 user_access_end();
1734 return ret;
1735}
1736
1737static int copy_iovec_from_user(struct iovec *iov,
1738 const struct iovec __user *uvec, unsigned long nr_segs)
1739{
1740 unsigned long seg;
1741
1742 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1743 return -EFAULT;
1744 for (seg = 0; seg < nr_segs; seg++) {
1745 if ((ssize_t)iov[seg].iov_len < 0)
1746 return -EINVAL;
1747 }
1748
1749 return 0;
1750}
1751
1752struct iovec *iovec_from_user(const struct iovec __user *uvec,
1753 unsigned long nr_segs, unsigned long fast_segs,
1754 struct iovec *fast_iov, bool compat)
1755{
1756 struct iovec *iov = fast_iov;
1757 int ret;
1758
1759 /*
1760 * SuS says "The readv() function *may* fail if the iovcnt argument was
1761 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1762 * traditionally returned zero for zero segments, so...
1763 */
1764 if (nr_segs == 0)
1765 return iov;
1766 if (nr_segs > UIO_MAXIOV)
1767 return ERR_PTR(-EINVAL);
1768 if (nr_segs > fast_segs) {
1769 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1770 if (!iov)
1771 return ERR_PTR(-ENOMEM);
1772 }
1773
1774 if (compat)
1775 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1776 else
1777 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1778 if (ret) {
1779 if (iov != fast_iov)
1780 kfree(iov);
1781 return ERR_PTR(ret);
1782 }
1783
1784 return iov;
1785}
1786
1787ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1788 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1789 struct iov_iter *i, bool compat)
1790{
1791 ssize_t total_len = 0;
1792 unsigned long seg;
1793 struct iovec *iov;
1794
1795 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1796 if (IS_ERR(iov)) {
1797 *iovp = NULL;
1798 return PTR_ERR(iov);
1799 }
1800
1801 /*
1802 * According to the Single Unix Specification we should return EINVAL if
1803 * an element length is < 0 when cast to ssize_t or if the total length
1804 * would overflow the ssize_t return value of the system call.
1805 *
1806 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1807 * overflow case.
1808 */
1809 for (seg = 0; seg < nr_segs; seg++) {
1810 ssize_t len = (ssize_t)iov[seg].iov_len;
1811
1812 if (!access_ok(iov[seg].iov_base, len)) {
1813 if (iov != *iovp)
1814 kfree(iov);
1815 *iovp = NULL;
1816 return -EFAULT;
1817 }
1818
1819 if (len > MAX_RW_COUNT - total_len) {
1820 len = MAX_RW_COUNT - total_len;
1821 iov[seg].iov_len = len;
1822 }
1823 total_len += len;
1824 }
1825
1826 iov_iter_init(i, type, iov, nr_segs, total_len);
1827 if (iov == *iovp)
1828 *iovp = NULL;
1829 else
1830 *iovp = iov;
1831 return total_len;
1832}
1833
1834/**
1835 * import_iovec() - Copy an array of &struct iovec from userspace
1836 * into the kernel, check that it is valid, and initialize a new
1837 * &struct iov_iter iterator to access it.
1838 *
1839 * @type: One of %READ or %WRITE.
1840 * @uvec: Pointer to the userspace array.
1841 * @nr_segs: Number of elements in userspace array.
1842 * @fast_segs: Number of elements in @iov.
1843 * @iovp: (input and output parameter) Pointer to pointer to (usually small
1844 * on-stack) kernel array.
1845 * @i: Pointer to iterator that will be initialized on success.
1846 *
1847 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1848 * then this function places %NULL in *@iov on return. Otherwise, a new
1849 * array will be allocated and the result placed in *@iov. This means that
1850 * the caller may call kfree() on *@iov regardless of whether the small
1851 * on-stack array was used or not (and regardless of whether this function
1852 * returns an error or not).
1853 *
1854 * Return: Negative error code on error, bytes imported on success
1855 */
1856ssize_t import_iovec(int type, const struct iovec __user *uvec,
1857 unsigned nr_segs, unsigned fast_segs,
1858 struct iovec **iovp, struct iov_iter *i)
1859{
1860 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1861 in_compat_syscall());
1862}
1863EXPORT_SYMBOL(import_iovec);
1864
1865int import_single_range(int rw, void __user *buf, size_t len,
1866 struct iovec *iov, struct iov_iter *i)
1867{
1868 if (len > MAX_RW_COUNT)
1869 len = MAX_RW_COUNT;
1870 if (unlikely(!access_ok(buf, len)))
1871 return -EFAULT;
1872
1873 iov->iov_base = buf;
1874 iov->iov_len = len;
1875 iov_iter_init(i, rw, iov, 1, len);
1876 return 0;
1877}
1878EXPORT_SYMBOL(import_single_range);
1879
1880/**
1881 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1882 * iov_iter_save_state() was called.
1883 *
1884 * @i: &struct iov_iter to restore
1885 * @state: state to restore from
1886 *
1887 * Used after iov_iter_save_state() to bring restore @i, if operations may
1888 * have advanced it.
1889 *
1890 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
1891 */
1892void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
1893{
1894 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
1895 !iov_iter_is_kvec(i) && !iter_is_ubuf(i))
1896 return;
1897 i->iov_offset = state->iov_offset;
1898 i->count = state->count;
1899 if (iter_is_ubuf(i))
1900 return;
1901 /*
1902 * For the *vec iters, nr_segs + iov is constant - if we increment
1903 * the vec, then we also decrement the nr_segs count. Hence we don't
1904 * need to track both of these, just one is enough and we can deduct
1905 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
1906 * size, so we can just increment the iov pointer as they are unionzed.
1907 * ITER_BVEC _may_ be the same size on some archs, but on others it is
1908 * not. Be safe and handle it separately.
1909 */
1910 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
1911 if (iov_iter_is_bvec(i))
1912 i->bvec -= state->nr_segs - i->nr_segs;
1913 else
1914 i->iov -= state->nr_segs - i->nr_segs;
1915 i->nr_segs = state->nr_segs;
1916}