Loading...
1#include <linux/export.h>
2#include <linux/bvec.h>
3#include <linux/uio.h>
4#include <linux/pagemap.h>
5#include <linux/slab.h>
6#include <linux/vmalloc.h>
7#include <linux/splice.h>
8#include <net/checksum.h>
9
10#define PIPE_PARANOIA /* for now */
11
12#define iterate_iovec(i, n, __v, __p, skip, STEP) { \
13 size_t left; \
14 size_t wanted = n; \
15 __p = i->iov; \
16 __v.iov_len = min(n, __p->iov_len - skip); \
17 if (likely(__v.iov_len)) { \
18 __v.iov_base = __p->iov_base + skip; \
19 left = (STEP); \
20 __v.iov_len -= left; \
21 skip += __v.iov_len; \
22 n -= __v.iov_len; \
23 } else { \
24 left = 0; \
25 } \
26 while (unlikely(!left && n)) { \
27 __p++; \
28 __v.iov_len = min(n, __p->iov_len); \
29 if (unlikely(!__v.iov_len)) \
30 continue; \
31 __v.iov_base = __p->iov_base; \
32 left = (STEP); \
33 __v.iov_len -= left; \
34 skip = __v.iov_len; \
35 n -= __v.iov_len; \
36 } \
37 n = wanted - n; \
38}
39
40#define iterate_kvec(i, n, __v, __p, skip, STEP) { \
41 size_t wanted = n; \
42 __p = i->kvec; \
43 __v.iov_len = min(n, __p->iov_len - skip); \
44 if (likely(__v.iov_len)) { \
45 __v.iov_base = __p->iov_base + skip; \
46 (void)(STEP); \
47 skip += __v.iov_len; \
48 n -= __v.iov_len; \
49 } \
50 while (unlikely(n)) { \
51 __p++; \
52 __v.iov_len = min(n, __p->iov_len); \
53 if (unlikely(!__v.iov_len)) \
54 continue; \
55 __v.iov_base = __p->iov_base; \
56 (void)(STEP); \
57 skip = __v.iov_len; \
58 n -= __v.iov_len; \
59 } \
60 n = wanted; \
61}
62
63#define iterate_bvec(i, n, __v, __bi, skip, STEP) { \
64 struct bvec_iter __start; \
65 __start.bi_size = n; \
66 __start.bi_bvec_done = skip; \
67 __start.bi_idx = 0; \
68 for_each_bvec(__v, i->bvec, __bi, __start) { \
69 if (!__v.bv_len) \
70 continue; \
71 (void)(STEP); \
72 } \
73}
74
75#define iterate_all_kinds(i, n, v, I, B, K) { \
76 if (likely(n)) { \
77 size_t skip = i->iov_offset; \
78 if (unlikely(i->type & ITER_BVEC)) { \
79 struct bio_vec v; \
80 struct bvec_iter __bi; \
81 iterate_bvec(i, n, v, __bi, skip, (B)) \
82 } else if (unlikely(i->type & ITER_KVEC)) { \
83 const struct kvec *kvec; \
84 struct kvec v; \
85 iterate_kvec(i, n, v, kvec, skip, (K)) \
86 } else { \
87 const struct iovec *iov; \
88 struct iovec v; \
89 iterate_iovec(i, n, v, iov, skip, (I)) \
90 } \
91 } \
92}
93
94#define iterate_and_advance(i, n, v, I, B, K) { \
95 if (unlikely(i->count < n)) \
96 n = i->count; \
97 if (i->count) { \
98 size_t skip = i->iov_offset; \
99 if (unlikely(i->type & ITER_BVEC)) { \
100 const struct bio_vec *bvec = i->bvec; \
101 struct bio_vec v; \
102 struct bvec_iter __bi; \
103 iterate_bvec(i, n, v, __bi, skip, (B)) \
104 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \
105 i->nr_segs -= i->bvec - bvec; \
106 skip = __bi.bi_bvec_done; \
107 } else if (unlikely(i->type & ITER_KVEC)) { \
108 const struct kvec *kvec; \
109 struct kvec v; \
110 iterate_kvec(i, n, v, kvec, skip, (K)) \
111 if (skip == kvec->iov_len) { \
112 kvec++; \
113 skip = 0; \
114 } \
115 i->nr_segs -= kvec - i->kvec; \
116 i->kvec = kvec; \
117 } else { \
118 const struct iovec *iov; \
119 struct iovec v; \
120 iterate_iovec(i, n, v, iov, skip, (I)) \
121 if (skip == iov->iov_len) { \
122 iov++; \
123 skip = 0; \
124 } \
125 i->nr_segs -= iov - i->iov; \
126 i->iov = iov; \
127 } \
128 i->count -= n; \
129 i->iov_offset = skip; \
130 } \
131}
132
133static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
134 struct iov_iter *i)
135{
136 size_t skip, copy, left, wanted;
137 const struct iovec *iov;
138 char __user *buf;
139 void *kaddr, *from;
140
141 if (unlikely(bytes > i->count))
142 bytes = i->count;
143
144 if (unlikely(!bytes))
145 return 0;
146
147 wanted = bytes;
148 iov = i->iov;
149 skip = i->iov_offset;
150 buf = iov->iov_base + skip;
151 copy = min(bytes, iov->iov_len - skip);
152
153 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
154 kaddr = kmap_atomic(page);
155 from = kaddr + offset;
156
157 /* first chunk, usually the only one */
158 left = __copy_to_user_inatomic(buf, from, copy);
159 copy -= left;
160 skip += copy;
161 from += copy;
162 bytes -= copy;
163
164 while (unlikely(!left && bytes)) {
165 iov++;
166 buf = iov->iov_base;
167 copy = min(bytes, iov->iov_len);
168 left = __copy_to_user_inatomic(buf, from, copy);
169 copy -= left;
170 skip = copy;
171 from += copy;
172 bytes -= copy;
173 }
174 if (likely(!bytes)) {
175 kunmap_atomic(kaddr);
176 goto done;
177 }
178 offset = from - kaddr;
179 buf += copy;
180 kunmap_atomic(kaddr);
181 copy = min(bytes, iov->iov_len - skip);
182 }
183 /* Too bad - revert to non-atomic kmap */
184
185 kaddr = kmap(page);
186 from = kaddr + offset;
187 left = __copy_to_user(buf, from, copy);
188 copy -= left;
189 skip += copy;
190 from += copy;
191 bytes -= copy;
192 while (unlikely(!left && bytes)) {
193 iov++;
194 buf = iov->iov_base;
195 copy = min(bytes, iov->iov_len);
196 left = __copy_to_user(buf, from, copy);
197 copy -= left;
198 skip = copy;
199 from += copy;
200 bytes -= copy;
201 }
202 kunmap(page);
203
204done:
205 if (skip == iov->iov_len) {
206 iov++;
207 skip = 0;
208 }
209 i->count -= wanted - bytes;
210 i->nr_segs -= iov - i->iov;
211 i->iov = iov;
212 i->iov_offset = skip;
213 return wanted - bytes;
214}
215
216static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
217 struct iov_iter *i)
218{
219 size_t skip, copy, left, wanted;
220 const struct iovec *iov;
221 char __user *buf;
222 void *kaddr, *to;
223
224 if (unlikely(bytes > i->count))
225 bytes = i->count;
226
227 if (unlikely(!bytes))
228 return 0;
229
230 wanted = bytes;
231 iov = i->iov;
232 skip = i->iov_offset;
233 buf = iov->iov_base + skip;
234 copy = min(bytes, iov->iov_len - skip);
235
236 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
237 kaddr = kmap_atomic(page);
238 to = kaddr + offset;
239
240 /* first chunk, usually the only one */
241 left = __copy_from_user_inatomic(to, buf, copy);
242 copy -= left;
243 skip += copy;
244 to += copy;
245 bytes -= copy;
246
247 while (unlikely(!left && bytes)) {
248 iov++;
249 buf = iov->iov_base;
250 copy = min(bytes, iov->iov_len);
251 left = __copy_from_user_inatomic(to, buf, copy);
252 copy -= left;
253 skip = copy;
254 to += copy;
255 bytes -= copy;
256 }
257 if (likely(!bytes)) {
258 kunmap_atomic(kaddr);
259 goto done;
260 }
261 offset = to - kaddr;
262 buf += copy;
263 kunmap_atomic(kaddr);
264 copy = min(bytes, iov->iov_len - skip);
265 }
266 /* Too bad - revert to non-atomic kmap */
267
268 kaddr = kmap(page);
269 to = kaddr + offset;
270 left = __copy_from_user(to, buf, copy);
271 copy -= left;
272 skip += copy;
273 to += copy;
274 bytes -= copy;
275 while (unlikely(!left && bytes)) {
276 iov++;
277 buf = iov->iov_base;
278 copy = min(bytes, iov->iov_len);
279 left = __copy_from_user(to, buf, copy);
280 copy -= left;
281 skip = copy;
282 to += copy;
283 bytes -= copy;
284 }
285 kunmap(page);
286
287done:
288 if (skip == iov->iov_len) {
289 iov++;
290 skip = 0;
291 }
292 i->count -= wanted - bytes;
293 i->nr_segs -= iov - i->iov;
294 i->iov = iov;
295 i->iov_offset = skip;
296 return wanted - bytes;
297}
298
299#ifdef PIPE_PARANOIA
300static bool sanity(const struct iov_iter *i)
301{
302 struct pipe_inode_info *pipe = i->pipe;
303 int idx = i->idx;
304 int next = pipe->curbuf + pipe->nrbufs;
305 if (i->iov_offset) {
306 struct pipe_buffer *p;
307 if (unlikely(!pipe->nrbufs))
308 goto Bad; // pipe must be non-empty
309 if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
310 goto Bad; // must be at the last buffer...
311
312 p = &pipe->bufs[idx];
313 if (unlikely(p->offset + p->len != i->iov_offset))
314 goto Bad; // ... at the end of segment
315 } else {
316 if (idx != (next & (pipe->buffers - 1)))
317 goto Bad; // must be right after the last buffer
318 }
319 return true;
320Bad:
321 printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
322 printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
323 pipe->curbuf, pipe->nrbufs, pipe->buffers);
324 for (idx = 0; idx < pipe->buffers; idx++)
325 printk(KERN_ERR "[%p %p %d %d]\n",
326 pipe->bufs[idx].ops,
327 pipe->bufs[idx].page,
328 pipe->bufs[idx].offset,
329 pipe->bufs[idx].len);
330 WARN_ON(1);
331 return false;
332}
333#else
334#define sanity(i) true
335#endif
336
337static inline int next_idx(int idx, struct pipe_inode_info *pipe)
338{
339 return (idx + 1) & (pipe->buffers - 1);
340}
341
342static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
343 struct iov_iter *i)
344{
345 struct pipe_inode_info *pipe = i->pipe;
346 struct pipe_buffer *buf;
347 size_t off;
348 int idx;
349
350 if (unlikely(bytes > i->count))
351 bytes = i->count;
352
353 if (unlikely(!bytes))
354 return 0;
355
356 if (!sanity(i))
357 return 0;
358
359 off = i->iov_offset;
360 idx = i->idx;
361 buf = &pipe->bufs[idx];
362 if (off) {
363 if (offset == off && buf->page == page) {
364 /* merge with the last one */
365 buf->len += bytes;
366 i->iov_offset += bytes;
367 goto out;
368 }
369 idx = next_idx(idx, pipe);
370 buf = &pipe->bufs[idx];
371 }
372 if (idx == pipe->curbuf && pipe->nrbufs)
373 return 0;
374 pipe->nrbufs++;
375 buf->ops = &page_cache_pipe_buf_ops;
376 get_page(buf->page = page);
377 buf->offset = offset;
378 buf->len = bytes;
379 i->iov_offset = offset + bytes;
380 i->idx = idx;
381out:
382 i->count -= bytes;
383 return bytes;
384}
385
386/*
387 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
388 * bytes. For each iovec, fault in each page that constitutes the iovec.
389 *
390 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
391 * because it is an invalid address).
392 */
393int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
394{
395 size_t skip = i->iov_offset;
396 const struct iovec *iov;
397 int err;
398 struct iovec v;
399
400 if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
401 iterate_iovec(i, bytes, v, iov, skip, ({
402 err = fault_in_pages_readable(v.iov_base, v.iov_len);
403 if (unlikely(err))
404 return err;
405 0;}))
406 }
407 return 0;
408}
409EXPORT_SYMBOL(iov_iter_fault_in_readable);
410
411void iov_iter_init(struct iov_iter *i, int direction,
412 const struct iovec *iov, unsigned long nr_segs,
413 size_t count)
414{
415 /* It will get better. Eventually... */
416 if (segment_eq(get_fs(), KERNEL_DS)) {
417 direction |= ITER_KVEC;
418 i->type = direction;
419 i->kvec = (struct kvec *)iov;
420 } else {
421 i->type = direction;
422 i->iov = iov;
423 }
424 i->nr_segs = nr_segs;
425 i->iov_offset = 0;
426 i->count = count;
427}
428EXPORT_SYMBOL(iov_iter_init);
429
430static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
431{
432 char *from = kmap_atomic(page);
433 memcpy(to, from + offset, len);
434 kunmap_atomic(from);
435}
436
437static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
438{
439 char *to = kmap_atomic(page);
440 memcpy(to + offset, from, len);
441 kunmap_atomic(to);
442}
443
444static void memzero_page(struct page *page, size_t offset, size_t len)
445{
446 char *addr = kmap_atomic(page);
447 memset(addr + offset, 0, len);
448 kunmap_atomic(addr);
449}
450
451static inline bool allocated(struct pipe_buffer *buf)
452{
453 return buf->ops == &default_pipe_buf_ops;
454}
455
456static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
457{
458 size_t off = i->iov_offset;
459 int idx = i->idx;
460 if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
461 idx = next_idx(idx, i->pipe);
462 off = 0;
463 }
464 *idxp = idx;
465 *offp = off;
466}
467
468static size_t push_pipe(struct iov_iter *i, size_t size,
469 int *idxp, size_t *offp)
470{
471 struct pipe_inode_info *pipe = i->pipe;
472 size_t off;
473 int idx;
474 ssize_t left;
475
476 if (unlikely(size > i->count))
477 size = i->count;
478 if (unlikely(!size))
479 return 0;
480
481 left = size;
482 data_start(i, &idx, &off);
483 *idxp = idx;
484 *offp = off;
485 if (off) {
486 left -= PAGE_SIZE - off;
487 if (left <= 0) {
488 pipe->bufs[idx].len += size;
489 return size;
490 }
491 pipe->bufs[idx].len = PAGE_SIZE;
492 idx = next_idx(idx, pipe);
493 }
494 while (idx != pipe->curbuf || !pipe->nrbufs) {
495 struct page *page = alloc_page(GFP_USER);
496 if (!page)
497 break;
498 pipe->nrbufs++;
499 pipe->bufs[idx].ops = &default_pipe_buf_ops;
500 pipe->bufs[idx].page = page;
501 pipe->bufs[idx].offset = 0;
502 if (left <= PAGE_SIZE) {
503 pipe->bufs[idx].len = left;
504 return size;
505 }
506 pipe->bufs[idx].len = PAGE_SIZE;
507 left -= PAGE_SIZE;
508 idx = next_idx(idx, pipe);
509 }
510 return size - left;
511}
512
513static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
514 struct iov_iter *i)
515{
516 struct pipe_inode_info *pipe = i->pipe;
517 size_t n, off;
518 int idx;
519
520 if (!sanity(i))
521 return 0;
522
523 bytes = n = push_pipe(i, bytes, &idx, &off);
524 if (unlikely(!n))
525 return 0;
526 for ( ; n; idx = next_idx(idx, pipe), off = 0) {
527 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
528 memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
529 i->idx = idx;
530 i->iov_offset = off + chunk;
531 n -= chunk;
532 addr += chunk;
533 }
534 i->count -= bytes;
535 return bytes;
536}
537
538size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
539{
540 const char *from = addr;
541 if (unlikely(i->type & ITER_PIPE))
542 return copy_pipe_to_iter(addr, bytes, i);
543 iterate_and_advance(i, bytes, v,
544 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
545 v.iov_len),
546 memcpy_to_page(v.bv_page, v.bv_offset,
547 (from += v.bv_len) - v.bv_len, v.bv_len),
548 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
549 )
550
551 return bytes;
552}
553EXPORT_SYMBOL(copy_to_iter);
554
555size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
556{
557 char *to = addr;
558 if (unlikely(i->type & ITER_PIPE)) {
559 WARN_ON(1);
560 return 0;
561 }
562 iterate_and_advance(i, bytes, v,
563 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
564 v.iov_len),
565 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
566 v.bv_offset, v.bv_len),
567 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
568 )
569
570 return bytes;
571}
572EXPORT_SYMBOL(copy_from_iter);
573
574bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
575{
576 char *to = addr;
577 if (unlikely(i->type & ITER_PIPE)) {
578 WARN_ON(1);
579 return false;
580 }
581 if (unlikely(i->count < bytes))
582 return false;
583
584 iterate_all_kinds(i, bytes, v, ({
585 if (__copy_from_user((to += v.iov_len) - v.iov_len,
586 v.iov_base, v.iov_len))
587 return false;
588 0;}),
589 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
590 v.bv_offset, v.bv_len),
591 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
592 )
593
594 iov_iter_advance(i, bytes);
595 return true;
596}
597EXPORT_SYMBOL(copy_from_iter_full);
598
599size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
600{
601 char *to = addr;
602 if (unlikely(i->type & ITER_PIPE)) {
603 WARN_ON(1);
604 return 0;
605 }
606 iterate_and_advance(i, bytes, v,
607 __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
608 v.iov_base, v.iov_len),
609 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
610 v.bv_offset, v.bv_len),
611 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
612 )
613
614 return bytes;
615}
616EXPORT_SYMBOL(copy_from_iter_nocache);
617
618bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
619{
620 char *to = addr;
621 if (unlikely(i->type & ITER_PIPE)) {
622 WARN_ON(1);
623 return false;
624 }
625 if (unlikely(i->count < bytes))
626 return false;
627 iterate_all_kinds(i, bytes, v, ({
628 if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
629 v.iov_base, v.iov_len))
630 return false;
631 0;}),
632 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
633 v.bv_offset, v.bv_len),
634 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
635 )
636
637 iov_iter_advance(i, bytes);
638 return true;
639}
640EXPORT_SYMBOL(copy_from_iter_full_nocache);
641
642size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
643 struct iov_iter *i)
644{
645 if (i->type & (ITER_BVEC|ITER_KVEC)) {
646 void *kaddr = kmap_atomic(page);
647 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
648 kunmap_atomic(kaddr);
649 return wanted;
650 } else if (likely(!(i->type & ITER_PIPE)))
651 return copy_page_to_iter_iovec(page, offset, bytes, i);
652 else
653 return copy_page_to_iter_pipe(page, offset, bytes, i);
654}
655EXPORT_SYMBOL(copy_page_to_iter);
656
657size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
658 struct iov_iter *i)
659{
660 if (unlikely(i->type & ITER_PIPE)) {
661 WARN_ON(1);
662 return 0;
663 }
664 if (i->type & (ITER_BVEC|ITER_KVEC)) {
665 void *kaddr = kmap_atomic(page);
666 size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
667 kunmap_atomic(kaddr);
668 return wanted;
669 } else
670 return copy_page_from_iter_iovec(page, offset, bytes, i);
671}
672EXPORT_SYMBOL(copy_page_from_iter);
673
674static size_t pipe_zero(size_t bytes, struct iov_iter *i)
675{
676 struct pipe_inode_info *pipe = i->pipe;
677 size_t n, off;
678 int idx;
679
680 if (!sanity(i))
681 return 0;
682
683 bytes = n = push_pipe(i, bytes, &idx, &off);
684 if (unlikely(!n))
685 return 0;
686
687 for ( ; n; idx = next_idx(idx, pipe), off = 0) {
688 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
689 memzero_page(pipe->bufs[idx].page, off, chunk);
690 i->idx = idx;
691 i->iov_offset = off + chunk;
692 n -= chunk;
693 }
694 i->count -= bytes;
695 return bytes;
696}
697
698size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
699{
700 if (unlikely(i->type & ITER_PIPE))
701 return pipe_zero(bytes, i);
702 iterate_and_advance(i, bytes, v,
703 __clear_user(v.iov_base, v.iov_len),
704 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
705 memset(v.iov_base, 0, v.iov_len)
706 )
707
708 return bytes;
709}
710EXPORT_SYMBOL(iov_iter_zero);
711
712size_t iov_iter_copy_from_user_atomic(struct page *page,
713 struct iov_iter *i, unsigned long offset, size_t bytes)
714{
715 char *kaddr = kmap_atomic(page), *p = kaddr + offset;
716 if (unlikely(i->type & ITER_PIPE)) {
717 kunmap_atomic(kaddr);
718 WARN_ON(1);
719 return 0;
720 }
721 iterate_all_kinds(i, bytes, v,
722 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
723 v.iov_base, v.iov_len),
724 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
725 v.bv_offset, v.bv_len),
726 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
727 )
728 kunmap_atomic(kaddr);
729 return bytes;
730}
731EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
732
733static inline void pipe_truncate(struct iov_iter *i)
734{
735 struct pipe_inode_info *pipe = i->pipe;
736 if (pipe->nrbufs) {
737 size_t off = i->iov_offset;
738 int idx = i->idx;
739 int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
740 if (off) {
741 pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
742 idx = next_idx(idx, pipe);
743 nrbufs++;
744 }
745 while (pipe->nrbufs > nrbufs) {
746 pipe_buf_release(pipe, &pipe->bufs[idx]);
747 idx = next_idx(idx, pipe);
748 pipe->nrbufs--;
749 }
750 }
751}
752
753static void pipe_advance(struct iov_iter *i, size_t size)
754{
755 struct pipe_inode_info *pipe = i->pipe;
756 if (unlikely(i->count < size))
757 size = i->count;
758 if (size) {
759 struct pipe_buffer *buf;
760 size_t off = i->iov_offset, left = size;
761 int idx = i->idx;
762 if (off) /* make it relative to the beginning of buffer */
763 left += off - pipe->bufs[idx].offset;
764 while (1) {
765 buf = &pipe->bufs[idx];
766 if (left <= buf->len)
767 break;
768 left -= buf->len;
769 idx = next_idx(idx, pipe);
770 }
771 i->idx = idx;
772 i->iov_offset = buf->offset + left;
773 }
774 i->count -= size;
775 /* ... and discard everything past that point */
776 pipe_truncate(i);
777}
778
779void iov_iter_advance(struct iov_iter *i, size_t size)
780{
781 if (unlikely(i->type & ITER_PIPE)) {
782 pipe_advance(i, size);
783 return;
784 }
785 iterate_and_advance(i, size, v, 0, 0, 0)
786}
787EXPORT_SYMBOL(iov_iter_advance);
788
789/*
790 * Return the count of just the current iov_iter segment.
791 */
792size_t iov_iter_single_seg_count(const struct iov_iter *i)
793{
794 if (unlikely(i->type & ITER_PIPE))
795 return i->count; // it is a silly place, anyway
796 if (i->nr_segs == 1)
797 return i->count;
798 else if (i->type & ITER_BVEC)
799 return min(i->count, i->bvec->bv_len - i->iov_offset);
800 else
801 return min(i->count, i->iov->iov_len - i->iov_offset);
802}
803EXPORT_SYMBOL(iov_iter_single_seg_count);
804
805void iov_iter_kvec(struct iov_iter *i, int direction,
806 const struct kvec *kvec, unsigned long nr_segs,
807 size_t count)
808{
809 BUG_ON(!(direction & ITER_KVEC));
810 i->type = direction;
811 i->kvec = kvec;
812 i->nr_segs = nr_segs;
813 i->iov_offset = 0;
814 i->count = count;
815}
816EXPORT_SYMBOL(iov_iter_kvec);
817
818void iov_iter_bvec(struct iov_iter *i, int direction,
819 const struct bio_vec *bvec, unsigned long nr_segs,
820 size_t count)
821{
822 BUG_ON(!(direction & ITER_BVEC));
823 i->type = direction;
824 i->bvec = bvec;
825 i->nr_segs = nr_segs;
826 i->iov_offset = 0;
827 i->count = count;
828}
829EXPORT_SYMBOL(iov_iter_bvec);
830
831void iov_iter_pipe(struct iov_iter *i, int direction,
832 struct pipe_inode_info *pipe,
833 size_t count)
834{
835 BUG_ON(direction != ITER_PIPE);
836 WARN_ON(pipe->nrbufs == pipe->buffers);
837 i->type = direction;
838 i->pipe = pipe;
839 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
840 i->iov_offset = 0;
841 i->count = count;
842}
843EXPORT_SYMBOL(iov_iter_pipe);
844
845unsigned long iov_iter_alignment(const struct iov_iter *i)
846{
847 unsigned long res = 0;
848 size_t size = i->count;
849
850 if (unlikely(i->type & ITER_PIPE)) {
851 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
852 return size | i->iov_offset;
853 return size;
854 }
855 iterate_all_kinds(i, size, v,
856 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
857 res |= v.bv_offset | v.bv_len,
858 res |= (unsigned long)v.iov_base | v.iov_len
859 )
860 return res;
861}
862EXPORT_SYMBOL(iov_iter_alignment);
863
864unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
865{
866 unsigned long res = 0;
867 size_t size = i->count;
868
869 if (unlikely(i->type & ITER_PIPE)) {
870 WARN_ON(1);
871 return ~0U;
872 }
873
874 iterate_all_kinds(i, size, v,
875 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
876 (size != v.iov_len ? size : 0), 0),
877 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
878 (size != v.bv_len ? size : 0)),
879 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
880 (size != v.iov_len ? size : 0))
881 );
882 return res;
883}
884EXPORT_SYMBOL(iov_iter_gap_alignment);
885
886static inline size_t __pipe_get_pages(struct iov_iter *i,
887 size_t maxsize,
888 struct page **pages,
889 int idx,
890 size_t *start)
891{
892 struct pipe_inode_info *pipe = i->pipe;
893 ssize_t n = push_pipe(i, maxsize, &idx, start);
894 if (!n)
895 return -EFAULT;
896
897 maxsize = n;
898 n += *start;
899 while (n > 0) {
900 get_page(*pages++ = pipe->bufs[idx].page);
901 idx = next_idx(idx, pipe);
902 n -= PAGE_SIZE;
903 }
904
905 return maxsize;
906}
907
908static ssize_t pipe_get_pages(struct iov_iter *i,
909 struct page **pages, size_t maxsize, unsigned maxpages,
910 size_t *start)
911{
912 unsigned npages;
913 size_t capacity;
914 int idx;
915
916 if (!maxsize)
917 return 0;
918
919 if (!sanity(i))
920 return -EFAULT;
921
922 data_start(i, &idx, start);
923 /* some of this one + all after this one */
924 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
925 capacity = min(npages,maxpages) * PAGE_SIZE - *start;
926
927 return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
928}
929
930ssize_t iov_iter_get_pages(struct iov_iter *i,
931 struct page **pages, size_t maxsize, unsigned maxpages,
932 size_t *start)
933{
934 if (maxsize > i->count)
935 maxsize = i->count;
936
937 if (unlikely(i->type & ITER_PIPE))
938 return pipe_get_pages(i, pages, maxsize, maxpages, start);
939 iterate_all_kinds(i, maxsize, v, ({
940 unsigned long addr = (unsigned long)v.iov_base;
941 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
942 int n;
943 int res;
944
945 if (len > maxpages * PAGE_SIZE)
946 len = maxpages * PAGE_SIZE;
947 addr &= ~(PAGE_SIZE - 1);
948 n = DIV_ROUND_UP(len, PAGE_SIZE);
949 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
950 if (unlikely(res < 0))
951 return res;
952 return (res == n ? len : res * PAGE_SIZE) - *start;
953 0;}),({
954 /* can't be more than PAGE_SIZE */
955 *start = v.bv_offset;
956 get_page(*pages = v.bv_page);
957 return v.bv_len;
958 }),({
959 return -EFAULT;
960 })
961 )
962 return 0;
963}
964EXPORT_SYMBOL(iov_iter_get_pages);
965
966static struct page **get_pages_array(size_t n)
967{
968 struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
969 if (!p)
970 p = vmalloc(n * sizeof(struct page *));
971 return p;
972}
973
974static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
975 struct page ***pages, size_t maxsize,
976 size_t *start)
977{
978 struct page **p;
979 size_t n;
980 int idx;
981 int npages;
982
983 if (!maxsize)
984 return 0;
985
986 if (!sanity(i))
987 return -EFAULT;
988
989 data_start(i, &idx, start);
990 /* some of this one + all after this one */
991 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
992 n = npages * PAGE_SIZE - *start;
993 if (maxsize > n)
994 maxsize = n;
995 else
996 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
997 p = get_pages_array(npages);
998 if (!p)
999 return -ENOMEM;
1000 n = __pipe_get_pages(i, maxsize, p, idx, start);
1001 if (n > 0)
1002 *pages = p;
1003 else
1004 kvfree(p);
1005 return n;
1006}
1007
1008ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1009 struct page ***pages, size_t maxsize,
1010 size_t *start)
1011{
1012 struct page **p;
1013
1014 if (maxsize > i->count)
1015 maxsize = i->count;
1016
1017 if (unlikely(i->type & ITER_PIPE))
1018 return pipe_get_pages_alloc(i, pages, maxsize, start);
1019 iterate_all_kinds(i, maxsize, v, ({
1020 unsigned long addr = (unsigned long)v.iov_base;
1021 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1022 int n;
1023 int res;
1024
1025 addr &= ~(PAGE_SIZE - 1);
1026 n = DIV_ROUND_UP(len, PAGE_SIZE);
1027 p = get_pages_array(n);
1028 if (!p)
1029 return -ENOMEM;
1030 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
1031 if (unlikely(res < 0)) {
1032 kvfree(p);
1033 return res;
1034 }
1035 *pages = p;
1036 return (res == n ? len : res * PAGE_SIZE) - *start;
1037 0;}),({
1038 /* can't be more than PAGE_SIZE */
1039 *start = v.bv_offset;
1040 *pages = p = get_pages_array(1);
1041 if (!p)
1042 return -ENOMEM;
1043 get_page(*p = v.bv_page);
1044 return v.bv_len;
1045 }),({
1046 return -EFAULT;
1047 })
1048 )
1049 return 0;
1050}
1051EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1052
1053size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1054 struct iov_iter *i)
1055{
1056 char *to = addr;
1057 __wsum sum, next;
1058 size_t off = 0;
1059 sum = *csum;
1060 if (unlikely(i->type & ITER_PIPE)) {
1061 WARN_ON(1);
1062 return 0;
1063 }
1064 iterate_and_advance(i, bytes, v, ({
1065 int err = 0;
1066 next = csum_and_copy_from_user(v.iov_base,
1067 (to += v.iov_len) - v.iov_len,
1068 v.iov_len, 0, &err);
1069 if (!err) {
1070 sum = csum_block_add(sum, next, off);
1071 off += v.iov_len;
1072 }
1073 err ? v.iov_len : 0;
1074 }), ({
1075 char *p = kmap_atomic(v.bv_page);
1076 next = csum_partial_copy_nocheck(p + v.bv_offset,
1077 (to += v.bv_len) - v.bv_len,
1078 v.bv_len, 0);
1079 kunmap_atomic(p);
1080 sum = csum_block_add(sum, next, off);
1081 off += v.bv_len;
1082 }),({
1083 next = csum_partial_copy_nocheck(v.iov_base,
1084 (to += v.iov_len) - v.iov_len,
1085 v.iov_len, 0);
1086 sum = csum_block_add(sum, next, off);
1087 off += v.iov_len;
1088 })
1089 )
1090 *csum = sum;
1091 return bytes;
1092}
1093EXPORT_SYMBOL(csum_and_copy_from_iter);
1094
1095bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1096 struct iov_iter *i)
1097{
1098 char *to = addr;
1099 __wsum sum, next;
1100 size_t off = 0;
1101 sum = *csum;
1102 if (unlikely(i->type & ITER_PIPE)) {
1103 WARN_ON(1);
1104 return false;
1105 }
1106 if (unlikely(i->count < bytes))
1107 return false;
1108 iterate_all_kinds(i, bytes, v, ({
1109 int err = 0;
1110 next = csum_and_copy_from_user(v.iov_base,
1111 (to += v.iov_len) - v.iov_len,
1112 v.iov_len, 0, &err);
1113 if (err)
1114 return false;
1115 sum = csum_block_add(sum, next, off);
1116 off += v.iov_len;
1117 0;
1118 }), ({
1119 char *p = kmap_atomic(v.bv_page);
1120 next = csum_partial_copy_nocheck(p + v.bv_offset,
1121 (to += v.bv_len) - v.bv_len,
1122 v.bv_len, 0);
1123 kunmap_atomic(p);
1124 sum = csum_block_add(sum, next, off);
1125 off += v.bv_len;
1126 }),({
1127 next = csum_partial_copy_nocheck(v.iov_base,
1128 (to += v.iov_len) - v.iov_len,
1129 v.iov_len, 0);
1130 sum = csum_block_add(sum, next, off);
1131 off += v.iov_len;
1132 })
1133 )
1134 *csum = sum;
1135 iov_iter_advance(i, bytes);
1136 return true;
1137}
1138EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1139
1140size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1141 struct iov_iter *i)
1142{
1143 const char *from = addr;
1144 __wsum sum, next;
1145 size_t off = 0;
1146 sum = *csum;
1147 if (unlikely(i->type & ITER_PIPE)) {
1148 WARN_ON(1); /* for now */
1149 return 0;
1150 }
1151 iterate_and_advance(i, bytes, v, ({
1152 int err = 0;
1153 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1154 v.iov_base,
1155 v.iov_len, 0, &err);
1156 if (!err) {
1157 sum = csum_block_add(sum, next, off);
1158 off += v.iov_len;
1159 }
1160 err ? v.iov_len : 0;
1161 }), ({
1162 char *p = kmap_atomic(v.bv_page);
1163 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
1164 p + v.bv_offset,
1165 v.bv_len, 0);
1166 kunmap_atomic(p);
1167 sum = csum_block_add(sum, next, off);
1168 off += v.bv_len;
1169 }),({
1170 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
1171 v.iov_base,
1172 v.iov_len, 0);
1173 sum = csum_block_add(sum, next, off);
1174 off += v.iov_len;
1175 })
1176 )
1177 *csum = sum;
1178 return bytes;
1179}
1180EXPORT_SYMBOL(csum_and_copy_to_iter);
1181
1182int iov_iter_npages(const struct iov_iter *i, int maxpages)
1183{
1184 size_t size = i->count;
1185 int npages = 0;
1186
1187 if (!size)
1188 return 0;
1189
1190 if (unlikely(i->type & ITER_PIPE)) {
1191 struct pipe_inode_info *pipe = i->pipe;
1192 size_t off;
1193 int idx;
1194
1195 if (!sanity(i))
1196 return 0;
1197
1198 data_start(i, &idx, &off);
1199 /* some of this one + all after this one */
1200 npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
1201 if (npages >= maxpages)
1202 return maxpages;
1203 } else iterate_all_kinds(i, size, v, ({
1204 unsigned long p = (unsigned long)v.iov_base;
1205 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1206 - p / PAGE_SIZE;
1207 if (npages >= maxpages)
1208 return maxpages;
1209 0;}),({
1210 npages++;
1211 if (npages >= maxpages)
1212 return maxpages;
1213 }),({
1214 unsigned long p = (unsigned long)v.iov_base;
1215 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1216 - p / PAGE_SIZE;
1217 if (npages >= maxpages)
1218 return maxpages;
1219 })
1220 )
1221 return npages;
1222}
1223EXPORT_SYMBOL(iov_iter_npages);
1224
1225const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1226{
1227 *new = *old;
1228 if (unlikely(new->type & ITER_PIPE)) {
1229 WARN_ON(1);
1230 return NULL;
1231 }
1232 if (new->type & ITER_BVEC)
1233 return new->bvec = kmemdup(new->bvec,
1234 new->nr_segs * sizeof(struct bio_vec),
1235 flags);
1236 else
1237 /* iovec and kvec have identical layout */
1238 return new->iov = kmemdup(new->iov,
1239 new->nr_segs * sizeof(struct iovec),
1240 flags);
1241}
1242EXPORT_SYMBOL(dup_iter);
1243
1244/**
1245 * import_iovec() - Copy an array of &struct iovec from userspace
1246 * into the kernel, check that it is valid, and initialize a new
1247 * &struct iov_iter iterator to access it.
1248 *
1249 * @type: One of %READ or %WRITE.
1250 * @uvector: Pointer to the userspace array.
1251 * @nr_segs: Number of elements in userspace array.
1252 * @fast_segs: Number of elements in @iov.
1253 * @iov: (input and output parameter) Pointer to pointer to (usually small
1254 * on-stack) kernel array.
1255 * @i: Pointer to iterator that will be initialized on success.
1256 *
1257 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1258 * then this function places %NULL in *@iov on return. Otherwise, a new
1259 * array will be allocated and the result placed in *@iov. This means that
1260 * the caller may call kfree() on *@iov regardless of whether the small
1261 * on-stack array was used or not (and regardless of whether this function
1262 * returns an error or not).
1263 *
1264 * Return: 0 on success or negative error code on error.
1265 */
1266int import_iovec(int type, const struct iovec __user * uvector,
1267 unsigned nr_segs, unsigned fast_segs,
1268 struct iovec **iov, struct iov_iter *i)
1269{
1270 ssize_t n;
1271 struct iovec *p;
1272 n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1273 *iov, &p);
1274 if (n < 0) {
1275 if (p != *iov)
1276 kfree(p);
1277 *iov = NULL;
1278 return n;
1279 }
1280 iov_iter_init(i, type, p, nr_segs, n);
1281 *iov = p == *iov ? NULL : p;
1282 return 0;
1283}
1284EXPORT_SYMBOL(import_iovec);
1285
1286#ifdef CONFIG_COMPAT
1287#include <linux/compat.h>
1288
1289int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1290 unsigned nr_segs, unsigned fast_segs,
1291 struct iovec **iov, struct iov_iter *i)
1292{
1293 ssize_t n;
1294 struct iovec *p;
1295 n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1296 *iov, &p);
1297 if (n < 0) {
1298 if (p != *iov)
1299 kfree(p);
1300 *iov = NULL;
1301 return n;
1302 }
1303 iov_iter_init(i, type, p, nr_segs, n);
1304 *iov = p == *iov ? NULL : p;
1305 return 0;
1306}
1307#endif
1308
1309int import_single_range(int rw, void __user *buf, size_t len,
1310 struct iovec *iov, struct iov_iter *i)
1311{
1312 if (len > MAX_RW_COUNT)
1313 len = MAX_RW_COUNT;
1314 if (unlikely(!access_ok(!rw, buf, len)))
1315 return -EFAULT;
1316
1317 iov->iov_base = buf;
1318 iov->iov_len = len;
1319 iov_iter_init(i, rw, iov, 1, len);
1320 return 0;
1321}
1322EXPORT_SYMBOL(import_single_range);
1// SPDX-License-Identifier: GPL-2.0-only
2#include <linux/export.h>
3#include <linux/bvec.h>
4#include <linux/fault-inject-usercopy.h>
5#include <linux/uio.h>
6#include <linux/pagemap.h>
7#include <linux/highmem.h>
8#include <linux/slab.h>
9#include <linux/vmalloc.h>
10#include <linux/splice.h>
11#include <linux/compat.h>
12#include <linux/scatterlist.h>
13#include <linux/instrumented.h>
14#include <linux/iov_iter.h>
15
16static __always_inline
17size_t copy_to_user_iter(void __user *iter_to, size_t progress,
18 size_t len, void *from, void *priv2)
19{
20 if (should_fail_usercopy())
21 return len;
22 if (access_ok(iter_to, len)) {
23 from += progress;
24 instrument_copy_to_user(iter_to, from, len);
25 len = raw_copy_to_user(iter_to, from, len);
26 }
27 return len;
28}
29
30static __always_inline
31size_t copy_to_user_iter_nofault(void __user *iter_to, size_t progress,
32 size_t len, void *from, void *priv2)
33{
34 ssize_t res;
35
36 if (should_fail_usercopy())
37 return len;
38
39 from += progress;
40 res = copy_to_user_nofault(iter_to, from, len);
41 return res < 0 ? len : res;
42}
43
44static __always_inline
45size_t copy_from_user_iter(void __user *iter_from, size_t progress,
46 size_t len, void *to, void *priv2)
47{
48 size_t res = len;
49
50 if (should_fail_usercopy())
51 return len;
52 if (access_ok(iter_from, len)) {
53 to += progress;
54 instrument_copy_from_user_before(to, iter_from, len);
55 res = raw_copy_from_user(to, iter_from, len);
56 instrument_copy_from_user_after(to, iter_from, len, res);
57 }
58 return res;
59}
60
61static __always_inline
62size_t memcpy_to_iter(void *iter_to, size_t progress,
63 size_t len, void *from, void *priv2)
64{
65 memcpy(iter_to, from + progress, len);
66 return 0;
67}
68
69static __always_inline
70size_t memcpy_from_iter(void *iter_from, size_t progress,
71 size_t len, void *to, void *priv2)
72{
73 memcpy(to + progress, iter_from, len);
74 return 0;
75}
76
77/*
78 * fault_in_iov_iter_readable - fault in iov iterator for reading
79 * @i: iterator
80 * @size: maximum length
81 *
82 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
83 * @size. For each iovec, fault in each page that constitutes the iovec.
84 *
85 * Returns the number of bytes not faulted in (like copy_to_user() and
86 * copy_from_user()).
87 *
88 * Always returns 0 for non-userspace iterators.
89 */
90size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
91{
92 if (iter_is_ubuf(i)) {
93 size_t n = min(size, iov_iter_count(i));
94 n -= fault_in_readable(i->ubuf + i->iov_offset, n);
95 return size - n;
96 } else if (iter_is_iovec(i)) {
97 size_t count = min(size, iov_iter_count(i));
98 const struct iovec *p;
99 size_t skip;
100
101 size -= count;
102 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
103 size_t len = min(count, p->iov_len - skip);
104 size_t ret;
105
106 if (unlikely(!len))
107 continue;
108 ret = fault_in_readable(p->iov_base + skip, len);
109 count -= len - ret;
110 if (ret)
111 break;
112 }
113 return count + size;
114 }
115 return 0;
116}
117EXPORT_SYMBOL(fault_in_iov_iter_readable);
118
119/*
120 * fault_in_iov_iter_writeable - fault in iov iterator for writing
121 * @i: iterator
122 * @size: maximum length
123 *
124 * Faults in the iterator using get_user_pages(), i.e., without triggering
125 * hardware page faults. This is primarily useful when we already know that
126 * some or all of the pages in @i aren't in memory.
127 *
128 * Returns the number of bytes not faulted in, like copy_to_user() and
129 * copy_from_user().
130 *
131 * Always returns 0 for non-user-space iterators.
132 */
133size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
134{
135 if (iter_is_ubuf(i)) {
136 size_t n = min(size, iov_iter_count(i));
137 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
138 return size - n;
139 } else if (iter_is_iovec(i)) {
140 size_t count = min(size, iov_iter_count(i));
141 const struct iovec *p;
142 size_t skip;
143
144 size -= count;
145 for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) {
146 size_t len = min(count, p->iov_len - skip);
147 size_t ret;
148
149 if (unlikely(!len))
150 continue;
151 ret = fault_in_safe_writeable(p->iov_base + skip, len);
152 count -= len - ret;
153 if (ret)
154 break;
155 }
156 return count + size;
157 }
158 return 0;
159}
160EXPORT_SYMBOL(fault_in_iov_iter_writeable);
161
162void iov_iter_init(struct iov_iter *i, unsigned int direction,
163 const struct iovec *iov, unsigned long nr_segs,
164 size_t count)
165{
166 WARN_ON(direction & ~(READ | WRITE));
167 *i = (struct iov_iter) {
168 .iter_type = ITER_IOVEC,
169 .nofault = false,
170 .data_source = direction,
171 .__iov = iov,
172 .nr_segs = nr_segs,
173 .iov_offset = 0,
174 .count = count
175 };
176}
177EXPORT_SYMBOL(iov_iter_init);
178
179size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
180{
181 if (WARN_ON_ONCE(i->data_source))
182 return 0;
183 if (user_backed_iter(i))
184 might_fault();
185 return iterate_and_advance(i, bytes, (void *)addr,
186 copy_to_user_iter, memcpy_to_iter);
187}
188EXPORT_SYMBOL(_copy_to_iter);
189
190#ifdef CONFIG_ARCH_HAS_COPY_MC
191static __always_inline
192size_t copy_to_user_iter_mc(void __user *iter_to, size_t progress,
193 size_t len, void *from, void *priv2)
194{
195 if (access_ok(iter_to, len)) {
196 from += progress;
197 instrument_copy_to_user(iter_to, from, len);
198 len = copy_mc_to_user(iter_to, from, len);
199 }
200 return len;
201}
202
203static __always_inline
204size_t memcpy_to_iter_mc(void *iter_to, size_t progress,
205 size_t len, void *from, void *priv2)
206{
207 return copy_mc_to_kernel(iter_to, from + progress, len);
208}
209
210/**
211 * _copy_mc_to_iter - copy to iter with source memory error exception handling
212 * @addr: source kernel address
213 * @bytes: total transfer length
214 * @i: destination iterator
215 *
216 * The pmem driver deploys this for the dax operation
217 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
218 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
219 * successfully copied.
220 *
221 * The main differences between this and typical _copy_to_iter().
222 *
223 * * Typical tail/residue handling after a fault retries the copy
224 * byte-by-byte until the fault happens again. Re-triggering machine
225 * checks is potentially fatal so the implementation uses source
226 * alignment and poison alignment assumptions to avoid re-triggering
227 * hardware exceptions.
228 *
229 * * ITER_KVEC and ITER_BVEC can return short copies. Compare to
230 * copy_to_iter() where only ITER_IOVEC attempts might return a short copy.
231 *
232 * Return: number of bytes copied (may be %0)
233 */
234size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
235{
236 if (WARN_ON_ONCE(i->data_source))
237 return 0;
238 if (user_backed_iter(i))
239 might_fault();
240 return iterate_and_advance(i, bytes, (void *)addr,
241 copy_to_user_iter_mc, memcpy_to_iter_mc);
242}
243EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
244#endif /* CONFIG_ARCH_HAS_COPY_MC */
245
246static __always_inline
247size_t __copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
248{
249 return iterate_and_advance(i, bytes, addr,
250 copy_from_user_iter, memcpy_from_iter);
251}
252
253size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
254{
255 if (WARN_ON_ONCE(!i->data_source))
256 return 0;
257
258 if (user_backed_iter(i))
259 might_fault();
260 return __copy_from_iter(addr, bytes, i);
261}
262EXPORT_SYMBOL(_copy_from_iter);
263
264static __always_inline
265size_t copy_from_user_iter_nocache(void __user *iter_from, size_t progress,
266 size_t len, void *to, void *priv2)
267{
268 return __copy_from_user_inatomic_nocache(to + progress, iter_from, len);
269}
270
271size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
272{
273 if (WARN_ON_ONCE(!i->data_source))
274 return 0;
275
276 return iterate_and_advance(i, bytes, addr,
277 copy_from_user_iter_nocache,
278 memcpy_from_iter);
279}
280EXPORT_SYMBOL(_copy_from_iter_nocache);
281
282#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
283static __always_inline
284size_t copy_from_user_iter_flushcache(void __user *iter_from, size_t progress,
285 size_t len, void *to, void *priv2)
286{
287 return __copy_from_user_flushcache(to + progress, iter_from, len);
288}
289
290static __always_inline
291size_t memcpy_from_iter_flushcache(void *iter_from, size_t progress,
292 size_t len, void *to, void *priv2)
293{
294 memcpy_flushcache(to + progress, iter_from, len);
295 return 0;
296}
297
298/**
299 * _copy_from_iter_flushcache - write destination through cpu cache
300 * @addr: destination kernel address
301 * @bytes: total transfer length
302 * @i: source iterator
303 *
304 * The pmem driver arranges for filesystem-dax to use this facility via
305 * dax_copy_from_iter() for ensuring that writes to persistent memory
306 * are flushed through the CPU cache. It is differentiated from
307 * _copy_from_iter_nocache() in that guarantees all data is flushed for
308 * all iterator types. The _copy_from_iter_nocache() only attempts to
309 * bypass the cache for the ITER_IOVEC case, and on some archs may use
310 * instructions that strand dirty-data in the cache.
311 *
312 * Return: number of bytes copied (may be %0)
313 */
314size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
315{
316 if (WARN_ON_ONCE(!i->data_source))
317 return 0;
318
319 return iterate_and_advance(i, bytes, addr,
320 copy_from_user_iter_flushcache,
321 memcpy_from_iter_flushcache);
322}
323EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
324#endif
325
326static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
327{
328 struct page *head;
329 size_t v = n + offset;
330
331 /*
332 * The general case needs to access the page order in order
333 * to compute the page size.
334 * However, we mostly deal with order-0 pages and thus can
335 * avoid a possible cache line miss for requests that fit all
336 * page orders.
337 */
338 if (n <= v && v <= PAGE_SIZE)
339 return true;
340
341 head = compound_head(page);
342 v += (page - head) << PAGE_SHIFT;
343
344 if (WARN_ON(n > v || v > page_size(head)))
345 return false;
346 return true;
347}
348
349size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
350 struct iov_iter *i)
351{
352 size_t res = 0;
353 if (!page_copy_sane(page, offset, bytes))
354 return 0;
355 if (WARN_ON_ONCE(i->data_source))
356 return 0;
357 page += offset / PAGE_SIZE; // first subpage
358 offset %= PAGE_SIZE;
359 while (1) {
360 void *kaddr = kmap_local_page(page);
361 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
362 n = _copy_to_iter(kaddr + offset, n, i);
363 kunmap_local(kaddr);
364 res += n;
365 bytes -= n;
366 if (!bytes || !n)
367 break;
368 offset += n;
369 if (offset == PAGE_SIZE) {
370 page++;
371 offset = 0;
372 }
373 }
374 return res;
375}
376EXPORT_SYMBOL(copy_page_to_iter);
377
378size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t bytes,
379 struct iov_iter *i)
380{
381 size_t res = 0;
382
383 if (!page_copy_sane(page, offset, bytes))
384 return 0;
385 if (WARN_ON_ONCE(i->data_source))
386 return 0;
387 page += offset / PAGE_SIZE; // first subpage
388 offset %= PAGE_SIZE;
389 while (1) {
390 void *kaddr = kmap_local_page(page);
391 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
392
393 n = iterate_and_advance(i, n, kaddr + offset,
394 copy_to_user_iter_nofault,
395 memcpy_to_iter);
396 kunmap_local(kaddr);
397 res += n;
398 bytes -= n;
399 if (!bytes || !n)
400 break;
401 offset += n;
402 if (offset == PAGE_SIZE) {
403 page++;
404 offset = 0;
405 }
406 }
407 return res;
408}
409EXPORT_SYMBOL(copy_page_to_iter_nofault);
410
411size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
412 struct iov_iter *i)
413{
414 size_t res = 0;
415 if (!page_copy_sane(page, offset, bytes))
416 return 0;
417 page += offset / PAGE_SIZE; // first subpage
418 offset %= PAGE_SIZE;
419 while (1) {
420 void *kaddr = kmap_local_page(page);
421 size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
422 n = _copy_from_iter(kaddr + offset, n, i);
423 kunmap_local(kaddr);
424 res += n;
425 bytes -= n;
426 if (!bytes || !n)
427 break;
428 offset += n;
429 if (offset == PAGE_SIZE) {
430 page++;
431 offset = 0;
432 }
433 }
434 return res;
435}
436EXPORT_SYMBOL(copy_page_from_iter);
437
438static __always_inline
439size_t zero_to_user_iter(void __user *iter_to, size_t progress,
440 size_t len, void *priv, void *priv2)
441{
442 return clear_user(iter_to, len);
443}
444
445static __always_inline
446size_t zero_to_iter(void *iter_to, size_t progress,
447 size_t len, void *priv, void *priv2)
448{
449 memset(iter_to, 0, len);
450 return 0;
451}
452
453size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
454{
455 return iterate_and_advance(i, bytes, NULL,
456 zero_to_user_iter, zero_to_iter);
457}
458EXPORT_SYMBOL(iov_iter_zero);
459
460size_t copy_page_from_iter_atomic(struct page *page, size_t offset,
461 size_t bytes, struct iov_iter *i)
462{
463 size_t n, copied = 0;
464
465 if (!page_copy_sane(page, offset, bytes))
466 return 0;
467 if (WARN_ON_ONCE(!i->data_source))
468 return 0;
469
470 do {
471 char *p;
472
473 n = bytes - copied;
474 if (PageHighMem(page)) {
475 page += offset / PAGE_SIZE;
476 offset %= PAGE_SIZE;
477 n = min_t(size_t, n, PAGE_SIZE - offset);
478 }
479
480 p = kmap_atomic(page) + offset;
481 n = __copy_from_iter(p, n, i);
482 kunmap_atomic(p);
483 copied += n;
484 offset += n;
485 } while (PageHighMem(page) && copied != bytes && n > 0);
486
487 return copied;
488}
489EXPORT_SYMBOL(copy_page_from_iter_atomic);
490
491static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
492{
493 const struct bio_vec *bvec, *end;
494
495 if (!i->count)
496 return;
497 i->count -= size;
498
499 size += i->iov_offset;
500
501 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
502 if (likely(size < bvec->bv_len))
503 break;
504 size -= bvec->bv_len;
505 }
506 i->iov_offset = size;
507 i->nr_segs -= bvec - i->bvec;
508 i->bvec = bvec;
509}
510
511static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
512{
513 const struct iovec *iov, *end;
514
515 if (!i->count)
516 return;
517 i->count -= size;
518
519 size += i->iov_offset; // from beginning of current segment
520 for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) {
521 if (likely(size < iov->iov_len))
522 break;
523 size -= iov->iov_len;
524 }
525 i->iov_offset = size;
526 i->nr_segs -= iov - iter_iov(i);
527 i->__iov = iov;
528}
529
530void iov_iter_advance(struct iov_iter *i, size_t size)
531{
532 if (unlikely(i->count < size))
533 size = i->count;
534 if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
535 i->iov_offset += size;
536 i->count -= size;
537 } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
538 /* iovec and kvec have identical layouts */
539 iov_iter_iovec_advance(i, size);
540 } else if (iov_iter_is_bvec(i)) {
541 iov_iter_bvec_advance(i, size);
542 } else if (iov_iter_is_discard(i)) {
543 i->count -= size;
544 }
545}
546EXPORT_SYMBOL(iov_iter_advance);
547
548void iov_iter_revert(struct iov_iter *i, size_t unroll)
549{
550 if (!unroll)
551 return;
552 if (WARN_ON(unroll > MAX_RW_COUNT))
553 return;
554 i->count += unroll;
555 if (unlikely(iov_iter_is_discard(i)))
556 return;
557 if (unroll <= i->iov_offset) {
558 i->iov_offset -= unroll;
559 return;
560 }
561 unroll -= i->iov_offset;
562 if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
563 BUG(); /* We should never go beyond the start of the specified
564 * range since we might then be straying into pages that
565 * aren't pinned.
566 */
567 } else if (iov_iter_is_bvec(i)) {
568 const struct bio_vec *bvec = i->bvec;
569 while (1) {
570 size_t n = (--bvec)->bv_len;
571 i->nr_segs++;
572 if (unroll <= n) {
573 i->bvec = bvec;
574 i->iov_offset = n - unroll;
575 return;
576 }
577 unroll -= n;
578 }
579 } else { /* same logics for iovec and kvec */
580 const struct iovec *iov = iter_iov(i);
581 while (1) {
582 size_t n = (--iov)->iov_len;
583 i->nr_segs++;
584 if (unroll <= n) {
585 i->__iov = iov;
586 i->iov_offset = n - unroll;
587 return;
588 }
589 unroll -= n;
590 }
591 }
592}
593EXPORT_SYMBOL(iov_iter_revert);
594
595/*
596 * Return the count of just the current iov_iter segment.
597 */
598size_t iov_iter_single_seg_count(const struct iov_iter *i)
599{
600 if (i->nr_segs > 1) {
601 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
602 return min(i->count, iter_iov(i)->iov_len - i->iov_offset);
603 if (iov_iter_is_bvec(i))
604 return min(i->count, i->bvec->bv_len - i->iov_offset);
605 }
606 return i->count;
607}
608EXPORT_SYMBOL(iov_iter_single_seg_count);
609
610void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
611 const struct kvec *kvec, unsigned long nr_segs,
612 size_t count)
613{
614 WARN_ON(direction & ~(READ | WRITE));
615 *i = (struct iov_iter){
616 .iter_type = ITER_KVEC,
617 .data_source = direction,
618 .kvec = kvec,
619 .nr_segs = nr_segs,
620 .iov_offset = 0,
621 .count = count
622 };
623}
624EXPORT_SYMBOL(iov_iter_kvec);
625
626void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
627 const struct bio_vec *bvec, unsigned long nr_segs,
628 size_t count)
629{
630 WARN_ON(direction & ~(READ | WRITE));
631 *i = (struct iov_iter){
632 .iter_type = ITER_BVEC,
633 .data_source = direction,
634 .bvec = bvec,
635 .nr_segs = nr_segs,
636 .iov_offset = 0,
637 .count = count
638 };
639}
640EXPORT_SYMBOL(iov_iter_bvec);
641
642/**
643 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
644 * @i: The iterator to initialise.
645 * @direction: The direction of the transfer.
646 * @xarray: The xarray to access.
647 * @start: The start file position.
648 * @count: The size of the I/O buffer in bytes.
649 *
650 * Set up an I/O iterator to either draw data out of the pages attached to an
651 * inode or to inject data into those pages. The pages *must* be prevented
652 * from evaporation, either by taking a ref on them or locking them by the
653 * caller.
654 */
655void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
656 struct xarray *xarray, loff_t start, size_t count)
657{
658 BUG_ON(direction & ~1);
659 *i = (struct iov_iter) {
660 .iter_type = ITER_XARRAY,
661 .data_source = direction,
662 .xarray = xarray,
663 .xarray_start = start,
664 .count = count,
665 .iov_offset = 0
666 };
667}
668EXPORT_SYMBOL(iov_iter_xarray);
669
670/**
671 * iov_iter_discard - Initialise an I/O iterator that discards data
672 * @i: The iterator to initialise.
673 * @direction: The direction of the transfer.
674 * @count: The size of the I/O buffer in bytes.
675 *
676 * Set up an I/O iterator that just discards everything that's written to it.
677 * It's only available as a READ iterator.
678 */
679void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
680{
681 BUG_ON(direction != READ);
682 *i = (struct iov_iter){
683 .iter_type = ITER_DISCARD,
684 .data_source = false,
685 .count = count,
686 .iov_offset = 0
687 };
688}
689EXPORT_SYMBOL(iov_iter_discard);
690
691static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
692 unsigned len_mask)
693{
694 size_t size = i->count;
695 size_t skip = i->iov_offset;
696 unsigned k;
697
698 for (k = 0; k < i->nr_segs; k++, skip = 0) {
699 const struct iovec *iov = iter_iov(i) + k;
700 size_t len = iov->iov_len - skip;
701
702 if (len > size)
703 len = size;
704 if (len & len_mask)
705 return false;
706 if ((unsigned long)(iov->iov_base + skip) & addr_mask)
707 return false;
708
709 size -= len;
710 if (!size)
711 break;
712 }
713 return true;
714}
715
716static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
717 unsigned len_mask)
718{
719 size_t size = i->count;
720 unsigned skip = i->iov_offset;
721 unsigned k;
722
723 for (k = 0; k < i->nr_segs; k++, skip = 0) {
724 size_t len = i->bvec[k].bv_len - skip;
725
726 if (len > size)
727 len = size;
728 if (len & len_mask)
729 return false;
730 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
731 return false;
732
733 size -= len;
734 if (!size)
735 break;
736 }
737 return true;
738}
739
740/**
741 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
742 * are aligned to the parameters.
743 *
744 * @i: &struct iov_iter to restore
745 * @addr_mask: bit mask to check against the iov element's addresses
746 * @len_mask: bit mask to check against the iov element's lengths
747 *
748 * Return: false if any addresses or lengths intersect with the provided masks
749 */
750bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
751 unsigned len_mask)
752{
753 if (likely(iter_is_ubuf(i))) {
754 if (i->count & len_mask)
755 return false;
756 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask)
757 return false;
758 return true;
759 }
760
761 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
762 return iov_iter_aligned_iovec(i, addr_mask, len_mask);
763
764 if (iov_iter_is_bvec(i))
765 return iov_iter_aligned_bvec(i, addr_mask, len_mask);
766
767 if (iov_iter_is_xarray(i)) {
768 if (i->count & len_mask)
769 return false;
770 if ((i->xarray_start + i->iov_offset) & addr_mask)
771 return false;
772 }
773
774 return true;
775}
776EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
777
778static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
779{
780 unsigned long res = 0;
781 size_t size = i->count;
782 size_t skip = i->iov_offset;
783 unsigned k;
784
785 for (k = 0; k < i->nr_segs; k++, skip = 0) {
786 const struct iovec *iov = iter_iov(i) + k;
787 size_t len = iov->iov_len - skip;
788 if (len) {
789 res |= (unsigned long)iov->iov_base + skip;
790 if (len > size)
791 len = size;
792 res |= len;
793 size -= len;
794 if (!size)
795 break;
796 }
797 }
798 return res;
799}
800
801static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
802{
803 unsigned res = 0;
804 size_t size = i->count;
805 unsigned skip = i->iov_offset;
806 unsigned k;
807
808 for (k = 0; k < i->nr_segs; k++, skip = 0) {
809 size_t len = i->bvec[k].bv_len - skip;
810 res |= (unsigned long)i->bvec[k].bv_offset + skip;
811 if (len > size)
812 len = size;
813 res |= len;
814 size -= len;
815 if (!size)
816 break;
817 }
818 return res;
819}
820
821unsigned long iov_iter_alignment(const struct iov_iter *i)
822{
823 if (likely(iter_is_ubuf(i))) {
824 size_t size = i->count;
825 if (size)
826 return ((unsigned long)i->ubuf + i->iov_offset) | size;
827 return 0;
828 }
829
830 /* iovec and kvec have identical layouts */
831 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
832 return iov_iter_alignment_iovec(i);
833
834 if (iov_iter_is_bvec(i))
835 return iov_iter_alignment_bvec(i);
836
837 if (iov_iter_is_xarray(i))
838 return (i->xarray_start + i->iov_offset) | i->count;
839
840 return 0;
841}
842EXPORT_SYMBOL(iov_iter_alignment);
843
844unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
845{
846 unsigned long res = 0;
847 unsigned long v = 0;
848 size_t size = i->count;
849 unsigned k;
850
851 if (iter_is_ubuf(i))
852 return 0;
853
854 if (WARN_ON(!iter_is_iovec(i)))
855 return ~0U;
856
857 for (k = 0; k < i->nr_segs; k++) {
858 const struct iovec *iov = iter_iov(i) + k;
859 if (iov->iov_len) {
860 unsigned long base = (unsigned long)iov->iov_base;
861 if (v) // if not the first one
862 res |= base | v; // this start | previous end
863 v = base + iov->iov_len;
864 if (size <= iov->iov_len)
865 break;
866 size -= iov->iov_len;
867 }
868 }
869 return res;
870}
871EXPORT_SYMBOL(iov_iter_gap_alignment);
872
873static int want_pages_array(struct page ***res, size_t size,
874 size_t start, unsigned int maxpages)
875{
876 unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE);
877
878 if (count > maxpages)
879 count = maxpages;
880 WARN_ON(!count); // caller should've prevented that
881 if (!*res) {
882 *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
883 if (!*res)
884 return 0;
885 }
886 return count;
887}
888
889static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
890 pgoff_t index, unsigned int nr_pages)
891{
892 XA_STATE(xas, xa, index);
893 struct page *page;
894 unsigned int ret = 0;
895
896 rcu_read_lock();
897 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
898 if (xas_retry(&xas, page))
899 continue;
900
901 /* Has the page moved or been split? */
902 if (unlikely(page != xas_reload(&xas))) {
903 xas_reset(&xas);
904 continue;
905 }
906
907 pages[ret] = find_subpage(page, xas.xa_index);
908 get_page(pages[ret]);
909 if (++ret == nr_pages)
910 break;
911 }
912 rcu_read_unlock();
913 return ret;
914}
915
916static ssize_t iter_xarray_get_pages(struct iov_iter *i,
917 struct page ***pages, size_t maxsize,
918 unsigned maxpages, size_t *_start_offset)
919{
920 unsigned nr, offset, count;
921 pgoff_t index;
922 loff_t pos;
923
924 pos = i->xarray_start + i->iov_offset;
925 index = pos >> PAGE_SHIFT;
926 offset = pos & ~PAGE_MASK;
927 *_start_offset = offset;
928
929 count = want_pages_array(pages, maxsize, offset, maxpages);
930 if (!count)
931 return -ENOMEM;
932 nr = iter_xarray_populate_pages(*pages, i->xarray, index, count);
933 if (nr == 0)
934 return 0;
935
936 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
937 i->iov_offset += maxsize;
938 i->count -= maxsize;
939 return maxsize;
940}
941
942/* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
943static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
944{
945 size_t skip;
946 long k;
947
948 if (iter_is_ubuf(i))
949 return (unsigned long)i->ubuf + i->iov_offset;
950
951 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
952 const struct iovec *iov = iter_iov(i) + k;
953 size_t len = iov->iov_len - skip;
954
955 if (unlikely(!len))
956 continue;
957 if (*size > len)
958 *size = len;
959 return (unsigned long)iov->iov_base + skip;
960 }
961 BUG(); // if it had been empty, we wouldn't get called
962}
963
964/* must be done on non-empty ITER_BVEC one */
965static struct page *first_bvec_segment(const struct iov_iter *i,
966 size_t *size, size_t *start)
967{
968 struct page *page;
969 size_t skip = i->iov_offset, len;
970
971 len = i->bvec->bv_len - skip;
972 if (*size > len)
973 *size = len;
974 skip += i->bvec->bv_offset;
975 page = i->bvec->bv_page + skip / PAGE_SIZE;
976 *start = skip % PAGE_SIZE;
977 return page;
978}
979
980static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
981 struct page ***pages, size_t maxsize,
982 unsigned int maxpages, size_t *start)
983{
984 unsigned int n, gup_flags = 0;
985
986 if (maxsize > i->count)
987 maxsize = i->count;
988 if (!maxsize)
989 return 0;
990 if (maxsize > MAX_RW_COUNT)
991 maxsize = MAX_RW_COUNT;
992
993 if (likely(user_backed_iter(i))) {
994 unsigned long addr;
995 int res;
996
997 if (iov_iter_rw(i) != WRITE)
998 gup_flags |= FOLL_WRITE;
999 if (i->nofault)
1000 gup_flags |= FOLL_NOFAULT;
1001
1002 addr = first_iovec_segment(i, &maxsize);
1003 *start = addr % PAGE_SIZE;
1004 addr &= PAGE_MASK;
1005 n = want_pages_array(pages, maxsize, *start, maxpages);
1006 if (!n)
1007 return -ENOMEM;
1008 res = get_user_pages_fast(addr, n, gup_flags, *pages);
1009 if (unlikely(res <= 0))
1010 return res;
1011 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start);
1012 iov_iter_advance(i, maxsize);
1013 return maxsize;
1014 }
1015 if (iov_iter_is_bvec(i)) {
1016 struct page **p;
1017 struct page *page;
1018
1019 page = first_bvec_segment(i, &maxsize, start);
1020 n = want_pages_array(pages, maxsize, *start, maxpages);
1021 if (!n)
1022 return -ENOMEM;
1023 p = *pages;
1024 for (int k = 0; k < n; k++)
1025 get_page(p[k] = page + k);
1026 maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start);
1027 i->count -= maxsize;
1028 i->iov_offset += maxsize;
1029 if (i->iov_offset == i->bvec->bv_len) {
1030 i->iov_offset = 0;
1031 i->bvec++;
1032 i->nr_segs--;
1033 }
1034 return maxsize;
1035 }
1036 if (iov_iter_is_xarray(i))
1037 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1038 return -EFAULT;
1039}
1040
1041ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
1042 size_t maxsize, unsigned maxpages, size_t *start)
1043{
1044 if (!maxpages)
1045 return 0;
1046 BUG_ON(!pages);
1047
1048 return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start);
1049}
1050EXPORT_SYMBOL(iov_iter_get_pages2);
1051
1052ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
1053 struct page ***pages, size_t maxsize, size_t *start)
1054{
1055 ssize_t len;
1056
1057 *pages = NULL;
1058
1059 len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start);
1060 if (len <= 0) {
1061 kvfree(*pages);
1062 *pages = NULL;
1063 }
1064 return len;
1065}
1066EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
1067
1068static int iov_npages(const struct iov_iter *i, int maxpages)
1069{
1070 size_t skip = i->iov_offset, size = i->count;
1071 const struct iovec *p;
1072 int npages = 0;
1073
1074 for (p = iter_iov(i); size; skip = 0, p++) {
1075 unsigned offs = offset_in_page(p->iov_base + skip);
1076 size_t len = min(p->iov_len - skip, size);
1077
1078 if (len) {
1079 size -= len;
1080 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1081 if (unlikely(npages > maxpages))
1082 return maxpages;
1083 }
1084 }
1085 return npages;
1086}
1087
1088static int bvec_npages(const struct iov_iter *i, int maxpages)
1089{
1090 size_t skip = i->iov_offset, size = i->count;
1091 const struct bio_vec *p;
1092 int npages = 0;
1093
1094 for (p = i->bvec; size; skip = 0, p++) {
1095 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1096 size_t len = min(p->bv_len - skip, size);
1097
1098 size -= len;
1099 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1100 if (unlikely(npages > maxpages))
1101 return maxpages;
1102 }
1103 return npages;
1104}
1105
1106int iov_iter_npages(const struct iov_iter *i, int maxpages)
1107{
1108 if (unlikely(!i->count))
1109 return 0;
1110 if (likely(iter_is_ubuf(i))) {
1111 unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
1112 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
1113 return min(npages, maxpages);
1114 }
1115 /* iovec and kvec have identical layouts */
1116 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1117 return iov_npages(i, maxpages);
1118 if (iov_iter_is_bvec(i))
1119 return bvec_npages(i, maxpages);
1120 if (iov_iter_is_xarray(i)) {
1121 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1122 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1123 return min(npages, maxpages);
1124 }
1125 return 0;
1126}
1127EXPORT_SYMBOL(iov_iter_npages);
1128
1129const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1130{
1131 *new = *old;
1132 if (iov_iter_is_bvec(new))
1133 return new->bvec = kmemdup(new->bvec,
1134 new->nr_segs * sizeof(struct bio_vec),
1135 flags);
1136 else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
1137 /* iovec and kvec have identical layout */
1138 return new->__iov = kmemdup(new->__iov,
1139 new->nr_segs * sizeof(struct iovec),
1140 flags);
1141 return NULL;
1142}
1143EXPORT_SYMBOL(dup_iter);
1144
1145static __noclone int copy_compat_iovec_from_user(struct iovec *iov,
1146 const struct iovec __user *uvec, unsigned long nr_segs)
1147{
1148 const struct compat_iovec __user *uiov =
1149 (const struct compat_iovec __user *)uvec;
1150 int ret = -EFAULT, i;
1151
1152 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1153 return -EFAULT;
1154
1155 for (i = 0; i < nr_segs; i++) {
1156 compat_uptr_t buf;
1157 compat_ssize_t len;
1158
1159 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1160 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1161
1162 /* check for compat_size_t not fitting in compat_ssize_t .. */
1163 if (len < 0) {
1164 ret = -EINVAL;
1165 goto uaccess_end;
1166 }
1167 iov[i].iov_base = compat_ptr(buf);
1168 iov[i].iov_len = len;
1169 }
1170
1171 ret = 0;
1172uaccess_end:
1173 user_access_end();
1174 return ret;
1175}
1176
1177static __noclone int copy_iovec_from_user(struct iovec *iov,
1178 const struct iovec __user *uiov, unsigned long nr_segs)
1179{
1180 int ret = -EFAULT;
1181
1182 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1183 return -EFAULT;
1184
1185 do {
1186 void __user *buf;
1187 ssize_t len;
1188
1189 unsafe_get_user(len, &uiov->iov_len, uaccess_end);
1190 unsafe_get_user(buf, &uiov->iov_base, uaccess_end);
1191
1192 /* check for size_t not fitting in ssize_t .. */
1193 if (unlikely(len < 0)) {
1194 ret = -EINVAL;
1195 goto uaccess_end;
1196 }
1197 iov->iov_base = buf;
1198 iov->iov_len = len;
1199
1200 uiov++; iov++;
1201 } while (--nr_segs);
1202
1203 ret = 0;
1204uaccess_end:
1205 user_access_end();
1206 return ret;
1207}
1208
1209struct iovec *iovec_from_user(const struct iovec __user *uvec,
1210 unsigned long nr_segs, unsigned long fast_segs,
1211 struct iovec *fast_iov, bool compat)
1212{
1213 struct iovec *iov = fast_iov;
1214 int ret;
1215
1216 /*
1217 * SuS says "The readv() function *may* fail if the iovcnt argument was
1218 * less than or equal to 0, or greater than {IOV_MAX}. Linux has
1219 * traditionally returned zero for zero segments, so...
1220 */
1221 if (nr_segs == 0)
1222 return iov;
1223 if (nr_segs > UIO_MAXIOV)
1224 return ERR_PTR(-EINVAL);
1225 if (nr_segs > fast_segs) {
1226 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1227 if (!iov)
1228 return ERR_PTR(-ENOMEM);
1229 }
1230
1231 if (unlikely(compat))
1232 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1233 else
1234 ret = copy_iovec_from_user(iov, uvec, nr_segs);
1235 if (ret) {
1236 if (iov != fast_iov)
1237 kfree(iov);
1238 return ERR_PTR(ret);
1239 }
1240
1241 return iov;
1242}
1243
1244/*
1245 * Single segment iovec supplied by the user, import it as ITER_UBUF.
1246 */
1247static ssize_t __import_iovec_ubuf(int type, const struct iovec __user *uvec,
1248 struct iovec **iovp, struct iov_iter *i,
1249 bool compat)
1250{
1251 struct iovec *iov = *iovp;
1252 ssize_t ret;
1253
1254 if (compat)
1255 ret = copy_compat_iovec_from_user(iov, uvec, 1);
1256 else
1257 ret = copy_iovec_from_user(iov, uvec, 1);
1258 if (unlikely(ret))
1259 return ret;
1260
1261 ret = import_ubuf(type, iov->iov_base, iov->iov_len, i);
1262 if (unlikely(ret))
1263 return ret;
1264 *iovp = NULL;
1265 return i->count;
1266}
1267
1268ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1269 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1270 struct iov_iter *i, bool compat)
1271{
1272 ssize_t total_len = 0;
1273 unsigned long seg;
1274 struct iovec *iov;
1275
1276 if (nr_segs == 1)
1277 return __import_iovec_ubuf(type, uvec, iovp, i, compat);
1278
1279 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1280 if (IS_ERR(iov)) {
1281 *iovp = NULL;
1282 return PTR_ERR(iov);
1283 }
1284
1285 /*
1286 * According to the Single Unix Specification we should return EINVAL if
1287 * an element length is < 0 when cast to ssize_t or if the total length
1288 * would overflow the ssize_t return value of the system call.
1289 *
1290 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1291 * overflow case.
1292 */
1293 for (seg = 0; seg < nr_segs; seg++) {
1294 ssize_t len = (ssize_t)iov[seg].iov_len;
1295
1296 if (!access_ok(iov[seg].iov_base, len)) {
1297 if (iov != *iovp)
1298 kfree(iov);
1299 *iovp = NULL;
1300 return -EFAULT;
1301 }
1302
1303 if (len > MAX_RW_COUNT - total_len) {
1304 len = MAX_RW_COUNT - total_len;
1305 iov[seg].iov_len = len;
1306 }
1307 total_len += len;
1308 }
1309
1310 iov_iter_init(i, type, iov, nr_segs, total_len);
1311 if (iov == *iovp)
1312 *iovp = NULL;
1313 else
1314 *iovp = iov;
1315 return total_len;
1316}
1317
1318/**
1319 * import_iovec() - Copy an array of &struct iovec from userspace
1320 * into the kernel, check that it is valid, and initialize a new
1321 * &struct iov_iter iterator to access it.
1322 *
1323 * @type: One of %READ or %WRITE.
1324 * @uvec: Pointer to the userspace array.
1325 * @nr_segs: Number of elements in userspace array.
1326 * @fast_segs: Number of elements in @iov.
1327 * @iovp: (input and output parameter) Pointer to pointer to (usually small
1328 * on-stack) kernel array.
1329 * @i: Pointer to iterator that will be initialized on success.
1330 *
1331 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1332 * then this function places %NULL in *@iov on return. Otherwise, a new
1333 * array will be allocated and the result placed in *@iov. This means that
1334 * the caller may call kfree() on *@iov regardless of whether the small
1335 * on-stack array was used or not (and regardless of whether this function
1336 * returns an error or not).
1337 *
1338 * Return: Negative error code on error, bytes imported on success
1339 */
1340ssize_t import_iovec(int type, const struct iovec __user *uvec,
1341 unsigned nr_segs, unsigned fast_segs,
1342 struct iovec **iovp, struct iov_iter *i)
1343{
1344 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1345 in_compat_syscall());
1346}
1347EXPORT_SYMBOL(import_iovec);
1348
1349int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i)
1350{
1351 if (len > MAX_RW_COUNT)
1352 len = MAX_RW_COUNT;
1353 if (unlikely(!access_ok(buf, len)))
1354 return -EFAULT;
1355
1356 iov_iter_ubuf(i, rw, buf, len);
1357 return 0;
1358}
1359EXPORT_SYMBOL_GPL(import_ubuf);
1360
1361/**
1362 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1363 * iov_iter_save_state() was called.
1364 *
1365 * @i: &struct iov_iter to restore
1366 * @state: state to restore from
1367 *
1368 * Used after iov_iter_save_state() to bring restore @i, if operations may
1369 * have advanced it.
1370 *
1371 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
1372 */
1373void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
1374{
1375 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i) &&
1376 !iter_is_ubuf(i)) && !iov_iter_is_kvec(i))
1377 return;
1378 i->iov_offset = state->iov_offset;
1379 i->count = state->count;
1380 if (iter_is_ubuf(i))
1381 return;
1382 /*
1383 * For the *vec iters, nr_segs + iov is constant - if we increment
1384 * the vec, then we also decrement the nr_segs count. Hence we don't
1385 * need to track both of these, just one is enough and we can deduct
1386 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
1387 * size, so we can just increment the iov pointer as they are unionzed.
1388 * ITER_BVEC _may_ be the same size on some archs, but on others it is
1389 * not. Be safe and handle it separately.
1390 */
1391 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
1392 if (iov_iter_is_bvec(i))
1393 i->bvec -= state->nr_segs - i->nr_segs;
1394 else
1395 i->__iov -= state->nr_segs - i->nr_segs;
1396 i->nr_segs = state->nr_segs;
1397}
1398
1399/*
1400 * Extract a list of contiguous pages from an ITER_XARRAY iterator. This does not
1401 * get references on the pages, nor does it get a pin on them.
1402 */
1403static ssize_t iov_iter_extract_xarray_pages(struct iov_iter *i,
1404 struct page ***pages, size_t maxsize,
1405 unsigned int maxpages,
1406 iov_iter_extraction_t extraction_flags,
1407 size_t *offset0)
1408{
1409 struct page *page, **p;
1410 unsigned int nr = 0, offset;
1411 loff_t pos = i->xarray_start + i->iov_offset;
1412 pgoff_t index = pos >> PAGE_SHIFT;
1413 XA_STATE(xas, i->xarray, index);
1414
1415 offset = pos & ~PAGE_MASK;
1416 *offset0 = offset;
1417
1418 maxpages = want_pages_array(pages, maxsize, offset, maxpages);
1419 if (!maxpages)
1420 return -ENOMEM;
1421 p = *pages;
1422
1423 rcu_read_lock();
1424 for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1425 if (xas_retry(&xas, page))
1426 continue;
1427
1428 /* Has the page moved or been split? */
1429 if (unlikely(page != xas_reload(&xas))) {
1430 xas_reset(&xas);
1431 continue;
1432 }
1433
1434 p[nr++] = find_subpage(page, xas.xa_index);
1435 if (nr == maxpages)
1436 break;
1437 }
1438 rcu_read_unlock();
1439
1440 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1441 iov_iter_advance(i, maxsize);
1442 return maxsize;
1443}
1444
1445/*
1446 * Extract a list of contiguous pages from an ITER_BVEC iterator. This does
1447 * not get references on the pages, nor does it get a pin on them.
1448 */
1449static ssize_t iov_iter_extract_bvec_pages(struct iov_iter *i,
1450 struct page ***pages, size_t maxsize,
1451 unsigned int maxpages,
1452 iov_iter_extraction_t extraction_flags,
1453 size_t *offset0)
1454{
1455 struct page **p, *page;
1456 size_t skip = i->iov_offset, offset, size;
1457 int k;
1458
1459 for (;;) {
1460 if (i->nr_segs == 0)
1461 return 0;
1462 size = min(maxsize, i->bvec->bv_len - skip);
1463 if (size)
1464 break;
1465 i->iov_offset = 0;
1466 i->nr_segs--;
1467 i->bvec++;
1468 skip = 0;
1469 }
1470
1471 skip += i->bvec->bv_offset;
1472 page = i->bvec->bv_page + skip / PAGE_SIZE;
1473 offset = skip % PAGE_SIZE;
1474 *offset0 = offset;
1475
1476 maxpages = want_pages_array(pages, size, offset, maxpages);
1477 if (!maxpages)
1478 return -ENOMEM;
1479 p = *pages;
1480 for (k = 0; k < maxpages; k++)
1481 p[k] = page + k;
1482
1483 size = min_t(size_t, size, maxpages * PAGE_SIZE - offset);
1484 iov_iter_advance(i, size);
1485 return size;
1486}
1487
1488/*
1489 * Extract a list of virtually contiguous pages from an ITER_KVEC iterator.
1490 * This does not get references on the pages, nor does it get a pin on them.
1491 */
1492static ssize_t iov_iter_extract_kvec_pages(struct iov_iter *i,
1493 struct page ***pages, size_t maxsize,
1494 unsigned int maxpages,
1495 iov_iter_extraction_t extraction_flags,
1496 size_t *offset0)
1497{
1498 struct page **p, *page;
1499 const void *kaddr;
1500 size_t skip = i->iov_offset, offset, len, size;
1501 int k;
1502
1503 for (;;) {
1504 if (i->nr_segs == 0)
1505 return 0;
1506 size = min(maxsize, i->kvec->iov_len - skip);
1507 if (size)
1508 break;
1509 i->iov_offset = 0;
1510 i->nr_segs--;
1511 i->kvec++;
1512 skip = 0;
1513 }
1514
1515 kaddr = i->kvec->iov_base + skip;
1516 offset = (unsigned long)kaddr & ~PAGE_MASK;
1517 *offset0 = offset;
1518
1519 maxpages = want_pages_array(pages, size, offset, maxpages);
1520 if (!maxpages)
1521 return -ENOMEM;
1522 p = *pages;
1523
1524 kaddr -= offset;
1525 len = offset + size;
1526 for (k = 0; k < maxpages; k++) {
1527 size_t seg = min_t(size_t, len, PAGE_SIZE);
1528
1529 if (is_vmalloc_or_module_addr(kaddr))
1530 page = vmalloc_to_page(kaddr);
1531 else
1532 page = virt_to_page(kaddr);
1533
1534 p[k] = page;
1535 len -= seg;
1536 kaddr += PAGE_SIZE;
1537 }
1538
1539 size = min_t(size_t, size, maxpages * PAGE_SIZE - offset);
1540 iov_iter_advance(i, size);
1541 return size;
1542}
1543
1544/*
1545 * Extract a list of contiguous pages from a user iterator and get a pin on
1546 * each of them. This should only be used if the iterator is user-backed
1547 * (IOBUF/UBUF).
1548 *
1549 * It does not get refs on the pages, but the pages must be unpinned by the
1550 * caller once the transfer is complete.
1551 *
1552 * This is safe to be used where background IO/DMA *is* going to be modifying
1553 * the buffer; using a pin rather than a ref makes forces fork() to give the
1554 * child a copy of the page.
1555 */
1556static ssize_t iov_iter_extract_user_pages(struct iov_iter *i,
1557 struct page ***pages,
1558 size_t maxsize,
1559 unsigned int maxpages,
1560 iov_iter_extraction_t extraction_flags,
1561 size_t *offset0)
1562{
1563 unsigned long addr;
1564 unsigned int gup_flags = 0;
1565 size_t offset;
1566 int res;
1567
1568 if (i->data_source == ITER_DEST)
1569 gup_flags |= FOLL_WRITE;
1570 if (extraction_flags & ITER_ALLOW_P2PDMA)
1571 gup_flags |= FOLL_PCI_P2PDMA;
1572 if (i->nofault)
1573 gup_flags |= FOLL_NOFAULT;
1574
1575 addr = first_iovec_segment(i, &maxsize);
1576 *offset0 = offset = addr % PAGE_SIZE;
1577 addr &= PAGE_MASK;
1578 maxpages = want_pages_array(pages, maxsize, offset, maxpages);
1579 if (!maxpages)
1580 return -ENOMEM;
1581 res = pin_user_pages_fast(addr, maxpages, gup_flags, *pages);
1582 if (unlikely(res <= 0))
1583 return res;
1584 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - offset);
1585 iov_iter_advance(i, maxsize);
1586 return maxsize;
1587}
1588
1589/**
1590 * iov_iter_extract_pages - Extract a list of contiguous pages from an iterator
1591 * @i: The iterator to extract from
1592 * @pages: Where to return the list of pages
1593 * @maxsize: The maximum amount of iterator to extract
1594 * @maxpages: The maximum size of the list of pages
1595 * @extraction_flags: Flags to qualify request
1596 * @offset0: Where to return the starting offset into (*@pages)[0]
1597 *
1598 * Extract a list of contiguous pages from the current point of the iterator,
1599 * advancing the iterator. The maximum number of pages and the maximum amount
1600 * of page contents can be set.
1601 *
1602 * If *@pages is NULL, a page list will be allocated to the required size and
1603 * *@pages will be set to its base. If *@pages is not NULL, it will be assumed
1604 * that the caller allocated a page list at least @maxpages in size and this
1605 * will be filled in.
1606 *
1607 * @extraction_flags can have ITER_ALLOW_P2PDMA set to request peer-to-peer DMA
1608 * be allowed on the pages extracted.
1609 *
1610 * The iov_iter_extract_will_pin() function can be used to query how cleanup
1611 * should be performed.
1612 *
1613 * Extra refs or pins on the pages may be obtained as follows:
1614 *
1615 * (*) If the iterator is user-backed (ITER_IOVEC/ITER_UBUF), pins will be
1616 * added to the pages, but refs will not be taken.
1617 * iov_iter_extract_will_pin() will return true.
1618 *
1619 * (*) If the iterator is ITER_KVEC, ITER_BVEC or ITER_XARRAY, the pages are
1620 * merely listed; no extra refs or pins are obtained.
1621 * iov_iter_extract_will_pin() will return 0.
1622 *
1623 * Note also:
1624 *
1625 * (*) Use with ITER_DISCARD is not supported as that has no content.
1626 *
1627 * On success, the function sets *@pages to the new pagelist, if allocated, and
1628 * sets *offset0 to the offset into the first page.
1629 *
1630 * It may also return -ENOMEM and -EFAULT.
1631 */
1632ssize_t iov_iter_extract_pages(struct iov_iter *i,
1633 struct page ***pages,
1634 size_t maxsize,
1635 unsigned int maxpages,
1636 iov_iter_extraction_t extraction_flags,
1637 size_t *offset0)
1638{
1639 maxsize = min_t(size_t, min_t(size_t, maxsize, i->count), MAX_RW_COUNT);
1640 if (!maxsize)
1641 return 0;
1642
1643 if (likely(user_backed_iter(i)))
1644 return iov_iter_extract_user_pages(i, pages, maxsize,
1645 maxpages, extraction_flags,
1646 offset0);
1647 if (iov_iter_is_kvec(i))
1648 return iov_iter_extract_kvec_pages(i, pages, maxsize,
1649 maxpages, extraction_flags,
1650 offset0);
1651 if (iov_iter_is_bvec(i))
1652 return iov_iter_extract_bvec_pages(i, pages, maxsize,
1653 maxpages, extraction_flags,
1654 offset0);
1655 if (iov_iter_is_xarray(i))
1656 return iov_iter_extract_xarray_pages(i, pages, maxsize,
1657 maxpages, extraction_flags,
1658 offset0);
1659 return -EFAULT;
1660}
1661EXPORT_SYMBOL_GPL(iov_iter_extract_pages);