Linux Audio

Check our new training course

Loading...
v4.10.11
 
 
   1#include <linux/export.h>
   2#include <linux/bvec.h>
   3#include <linux/uio.h>
   4#include <linux/pagemap.h>
   5#include <linux/slab.h>
   6#include <linux/vmalloc.h>
   7#include <linux/splice.h>
   8#include <net/checksum.h>
 
 
   9
  10#define PIPE_PARANOIA /* for now */
  11
  12#define iterate_iovec(i, n, __v, __p, skip, STEP) {	\
  13	size_t left;					\
  14	size_t wanted = n;				\
  15	__p = i->iov;					\
  16	__v.iov_len = min(n, __p->iov_len - skip);	\
  17	if (likely(__v.iov_len)) {			\
  18		__v.iov_base = __p->iov_base + skip;	\
  19		left = (STEP);				\
  20		__v.iov_len -= left;			\
  21		skip += __v.iov_len;			\
  22		n -= __v.iov_len;			\
  23	} else {					\
  24		left = 0;				\
  25	}						\
  26	while (unlikely(!left && n)) {			\
  27		__p++;					\
  28		__v.iov_len = min(n, __p->iov_len);	\
  29		if (unlikely(!__v.iov_len))		\
  30			continue;			\
  31		__v.iov_base = __p->iov_base;		\
  32		left = (STEP);				\
  33		__v.iov_len -= left;			\
  34		skip = __v.iov_len;			\
  35		n -= __v.iov_len;			\
  36	}						\
  37	n = wanted - n;					\
  38}
  39
  40#define iterate_kvec(i, n, __v, __p, skip, STEP) {	\
  41	size_t wanted = n;				\
  42	__p = i->kvec;					\
  43	__v.iov_len = min(n, __p->iov_len - skip);	\
  44	if (likely(__v.iov_len)) {			\
  45		__v.iov_base = __p->iov_base + skip;	\
  46		(void)(STEP);				\
  47		skip += __v.iov_len;			\
  48		n -= __v.iov_len;			\
  49	}						\
  50	while (unlikely(n)) {				\
  51		__p++;					\
  52		__v.iov_len = min(n, __p->iov_len);	\
  53		if (unlikely(!__v.iov_len))		\
  54			continue;			\
  55		__v.iov_base = __p->iov_base;		\
  56		(void)(STEP);				\
  57		skip = __v.iov_len;			\
  58		n -= __v.iov_len;			\
  59	}						\
  60	n = wanted;					\
  61}
  62
  63#define iterate_bvec(i, n, __v, __bi, skip, STEP) {	\
  64	struct bvec_iter __start;			\
  65	__start.bi_size = n;				\
  66	__start.bi_bvec_done = skip;			\
  67	__start.bi_idx = 0;				\
  68	for_each_bvec(__v, i->bvec, __bi, __start) {	\
  69		if (!__v.bv_len)			\
  70			continue;			\
  71		(void)(STEP);				\
  72	}						\
  73}
  74
  75#define iterate_all_kinds(i, n, v, I, B, K) {			\
  76	if (likely(n)) {					\
  77		size_t skip = i->iov_offset;			\
  78		if (unlikely(i->type & ITER_BVEC)) {		\
  79			struct bio_vec v;			\
  80			struct bvec_iter __bi;			\
  81			iterate_bvec(i, n, v, __bi, skip, (B))	\
  82		} else if (unlikely(i->type & ITER_KVEC)) {	\
  83			const struct kvec *kvec;		\
  84			struct kvec v;				\
  85			iterate_kvec(i, n, v, kvec, skip, (K))	\
 
  86		} else {					\
  87			const struct iovec *iov;		\
  88			struct iovec v;				\
  89			iterate_iovec(i, n, v, iov, skip, (I))	\
  90		}						\
  91	}							\
  92}
  93
  94#define iterate_and_advance(i, n, v, I, B, K) {			\
  95	if (unlikely(i->count < n))				\
  96		n = i->count;					\
  97	if (i->count) {						\
  98		size_t skip = i->iov_offset;			\
  99		if (unlikely(i->type & ITER_BVEC)) {		\
 100			const struct bio_vec *bvec = i->bvec;	\
 101			struct bio_vec v;			\
 102			struct bvec_iter __bi;			\
 103			iterate_bvec(i, n, v, __bi, skip, (B))	\
 104			i->bvec = __bvec_iter_bvec(i->bvec, __bi);	\
 105			i->nr_segs -= i->bvec - bvec;		\
 106			skip = __bi.bi_bvec_done;		\
 107		} else if (unlikely(i->type & ITER_KVEC)) {	\
 108			const struct kvec *kvec;		\
 109			struct kvec v;				\
 110			iterate_kvec(i, n, v, kvec, skip, (K))	\
 111			if (skip == kvec->iov_len) {		\
 112				kvec++;				\
 113				skip = 0;			\
 114			}					\
 115			i->nr_segs -= kvec - i->kvec;		\
 116			i->kvec = kvec;				\
 
 
 117		} else {					\
 118			const struct iovec *iov;		\
 119			struct iovec v;				\
 120			iterate_iovec(i, n, v, iov, skip, (I))	\
 121			if (skip == iov->iov_len) {		\
 122				iov++;				\
 123				skip = 0;			\
 124			}					\
 125			i->nr_segs -= iov - i->iov;		\
 126			i->iov = iov;				\
 127		}						\
 128		i->count -= n;					\
 129		i->iov_offset = skip;				\
 130	}							\
 131}
 132
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 133static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
 134			 struct iov_iter *i)
 135{
 136	size_t skip, copy, left, wanted;
 137	const struct iovec *iov;
 138	char __user *buf;
 139	void *kaddr, *from;
 140
 141	if (unlikely(bytes > i->count))
 142		bytes = i->count;
 143
 144	if (unlikely(!bytes))
 145		return 0;
 146
 
 147	wanted = bytes;
 148	iov = i->iov;
 149	skip = i->iov_offset;
 150	buf = iov->iov_base + skip;
 151	copy = min(bytes, iov->iov_len - skip);
 152
 153	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
 154		kaddr = kmap_atomic(page);
 155		from = kaddr + offset;
 156
 157		/* first chunk, usually the only one */
 158		left = __copy_to_user_inatomic(buf, from, copy);
 159		copy -= left;
 160		skip += copy;
 161		from += copy;
 162		bytes -= copy;
 163
 164		while (unlikely(!left && bytes)) {
 165			iov++;
 166			buf = iov->iov_base;
 167			copy = min(bytes, iov->iov_len);
 168			left = __copy_to_user_inatomic(buf, from, copy);
 169			copy -= left;
 170			skip = copy;
 171			from += copy;
 172			bytes -= copy;
 173		}
 174		if (likely(!bytes)) {
 175			kunmap_atomic(kaddr);
 176			goto done;
 177		}
 178		offset = from - kaddr;
 179		buf += copy;
 180		kunmap_atomic(kaddr);
 181		copy = min(bytes, iov->iov_len - skip);
 182	}
 183	/* Too bad - revert to non-atomic kmap */
 184
 185	kaddr = kmap(page);
 186	from = kaddr + offset;
 187	left = __copy_to_user(buf, from, copy);
 188	copy -= left;
 189	skip += copy;
 190	from += copy;
 191	bytes -= copy;
 192	while (unlikely(!left && bytes)) {
 193		iov++;
 194		buf = iov->iov_base;
 195		copy = min(bytes, iov->iov_len);
 196		left = __copy_to_user(buf, from, copy);
 197		copy -= left;
 198		skip = copy;
 199		from += copy;
 200		bytes -= copy;
 201	}
 202	kunmap(page);
 203
 204done:
 205	if (skip == iov->iov_len) {
 206		iov++;
 207		skip = 0;
 208	}
 209	i->count -= wanted - bytes;
 210	i->nr_segs -= iov - i->iov;
 211	i->iov = iov;
 212	i->iov_offset = skip;
 213	return wanted - bytes;
 214}
 215
 216static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
 217			 struct iov_iter *i)
 218{
 219	size_t skip, copy, left, wanted;
 220	const struct iovec *iov;
 221	char __user *buf;
 222	void *kaddr, *to;
 223
 224	if (unlikely(bytes > i->count))
 225		bytes = i->count;
 226
 227	if (unlikely(!bytes))
 228		return 0;
 229
 
 230	wanted = bytes;
 231	iov = i->iov;
 232	skip = i->iov_offset;
 233	buf = iov->iov_base + skip;
 234	copy = min(bytes, iov->iov_len - skip);
 235
 236	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
 237		kaddr = kmap_atomic(page);
 238		to = kaddr + offset;
 239
 240		/* first chunk, usually the only one */
 241		left = __copy_from_user_inatomic(to, buf, copy);
 242		copy -= left;
 243		skip += copy;
 244		to += copy;
 245		bytes -= copy;
 246
 247		while (unlikely(!left && bytes)) {
 248			iov++;
 249			buf = iov->iov_base;
 250			copy = min(bytes, iov->iov_len);
 251			left = __copy_from_user_inatomic(to, buf, copy);
 252			copy -= left;
 253			skip = copy;
 254			to += copy;
 255			bytes -= copy;
 256		}
 257		if (likely(!bytes)) {
 258			kunmap_atomic(kaddr);
 259			goto done;
 260		}
 261		offset = to - kaddr;
 262		buf += copy;
 263		kunmap_atomic(kaddr);
 264		copy = min(bytes, iov->iov_len - skip);
 265	}
 266	/* Too bad - revert to non-atomic kmap */
 267
 268	kaddr = kmap(page);
 269	to = kaddr + offset;
 270	left = __copy_from_user(to, buf, copy);
 271	copy -= left;
 272	skip += copy;
 273	to += copy;
 274	bytes -= copy;
 275	while (unlikely(!left && bytes)) {
 276		iov++;
 277		buf = iov->iov_base;
 278		copy = min(bytes, iov->iov_len);
 279		left = __copy_from_user(to, buf, copy);
 280		copy -= left;
 281		skip = copy;
 282		to += copy;
 283		bytes -= copy;
 284	}
 285	kunmap(page);
 286
 287done:
 288	if (skip == iov->iov_len) {
 289		iov++;
 290		skip = 0;
 291	}
 292	i->count -= wanted - bytes;
 293	i->nr_segs -= iov - i->iov;
 294	i->iov = iov;
 295	i->iov_offset = skip;
 296	return wanted - bytes;
 297}
 298
 299#ifdef PIPE_PARANOIA
 300static bool sanity(const struct iov_iter *i)
 301{
 302	struct pipe_inode_info *pipe = i->pipe;
 303	int idx = i->idx;
 304	int next = pipe->curbuf + pipe->nrbufs;
 
 
 
 
 
 305	if (i->iov_offset) {
 306		struct pipe_buffer *p;
 307		if (unlikely(!pipe->nrbufs))
 308			goto Bad;	// pipe must be non-empty
 309		if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
 310			goto Bad;	// must be at the last buffer...
 311
 312		p = &pipe->bufs[idx];
 313		if (unlikely(p->offset + p->len != i->iov_offset))
 314			goto Bad;	// ... at the end of segment
 315	} else {
 316		if (idx != (next & (pipe->buffers - 1)))
 317			goto Bad;	// must be right after the last buffer
 318	}
 319	return true;
 320Bad:
 321	printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
 322	printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
 323			pipe->curbuf, pipe->nrbufs, pipe->buffers);
 324	for (idx = 0; idx < pipe->buffers; idx++)
 325		printk(KERN_ERR "[%p %p %d %d]\n",
 326			pipe->bufs[idx].ops,
 327			pipe->bufs[idx].page,
 328			pipe->bufs[idx].offset,
 329			pipe->bufs[idx].len);
 330	WARN_ON(1);
 331	return false;
 332}
 333#else
 334#define sanity(i) true
 335#endif
 336
 337static inline int next_idx(int idx, struct pipe_inode_info *pipe)
 338{
 339	return (idx + 1) & (pipe->buffers - 1);
 340}
 341
 342static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
 343			 struct iov_iter *i)
 344{
 345	struct pipe_inode_info *pipe = i->pipe;
 346	struct pipe_buffer *buf;
 
 
 
 347	size_t off;
 348	int idx;
 349
 350	if (unlikely(bytes > i->count))
 351		bytes = i->count;
 352
 353	if (unlikely(!bytes))
 354		return 0;
 355
 356	if (!sanity(i))
 357		return 0;
 358
 359	off = i->iov_offset;
 360	idx = i->idx;
 361	buf = &pipe->bufs[idx];
 362	if (off) {
 363		if (offset == off && buf->page == page) {
 364			/* merge with the last one */
 365			buf->len += bytes;
 366			i->iov_offset += bytes;
 367			goto out;
 368		}
 369		idx = next_idx(idx, pipe);
 370		buf = &pipe->bufs[idx];
 371	}
 372	if (idx == pipe->curbuf && pipe->nrbufs)
 373		return 0;
 374	pipe->nrbufs++;
 375	buf->ops = &page_cache_pipe_buf_ops;
 376	get_page(buf->page = page);
 
 377	buf->offset = offset;
 378	buf->len = bytes;
 
 
 379	i->iov_offset = offset + bytes;
 380	i->idx = idx;
 381out:
 382	i->count -= bytes;
 383	return bytes;
 384}
 385
 386/*
 387 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
 388 * bytes.  For each iovec, fault in each page that constitutes the iovec.
 389 *
 390 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
 391 * because it is an invalid address).
 392 */
 393int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
 394{
 395	size_t skip = i->iov_offset;
 396	const struct iovec *iov;
 397	int err;
 398	struct iovec v;
 399
 400	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
 401		iterate_iovec(i, bytes, v, iov, skip, ({
 402			err = fault_in_pages_readable(v.iov_base, v.iov_len);
 403			if (unlikely(err))
 404			return err;
 405		0;}))
 406	}
 407	return 0;
 408}
 409EXPORT_SYMBOL(iov_iter_fault_in_readable);
 410
 411void iov_iter_init(struct iov_iter *i, int direction,
 412			const struct iovec *iov, unsigned long nr_segs,
 413			size_t count)
 414{
 
 
 
 415	/* It will get better.  Eventually... */
 416	if (segment_eq(get_fs(), KERNEL_DS)) {
 417		direction |= ITER_KVEC;
 418		i->type = direction;
 419		i->kvec = (struct kvec *)iov;
 420	} else {
 421		i->type = direction;
 422		i->iov = iov;
 423	}
 424	i->nr_segs = nr_segs;
 425	i->iov_offset = 0;
 426	i->count = count;
 427}
 428EXPORT_SYMBOL(iov_iter_init);
 429
 430static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
 431{
 432	char *from = kmap_atomic(page);
 433	memcpy(to, from + offset, len);
 434	kunmap_atomic(from);
 435}
 436
 437static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
 438{
 439	char *to = kmap_atomic(page);
 440	memcpy(to + offset, from, len);
 441	kunmap_atomic(to);
 442}
 443
 444static void memzero_page(struct page *page, size_t offset, size_t len)
 445{
 446	char *addr = kmap_atomic(page);
 447	memset(addr + offset, 0, len);
 448	kunmap_atomic(addr);
 449}
 450
 451static inline bool allocated(struct pipe_buffer *buf)
 452{
 453	return buf->ops == &default_pipe_buf_ops;
 454}
 455
 456static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
 
 457{
 
 
 458	size_t off = i->iov_offset;
 459	int idx = i->idx;
 460	if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
 461		idx = next_idx(idx, i->pipe);
 
 462		off = 0;
 463	}
 464	*idxp = idx;
 465	*offp = off;
 466}
 467
 468static size_t push_pipe(struct iov_iter *i, size_t size,
 469			int *idxp, size_t *offp)
 470{
 471	struct pipe_inode_info *pipe = i->pipe;
 
 
 
 472	size_t off;
 473	int idx;
 474	ssize_t left;
 475
 476	if (unlikely(size > i->count))
 477		size = i->count;
 478	if (unlikely(!size))
 479		return 0;
 480
 481	left = size;
 482	data_start(i, &idx, &off);
 483	*idxp = idx;
 484	*offp = off;
 485	if (off) {
 486		left -= PAGE_SIZE - off;
 487		if (left <= 0) {
 488			pipe->bufs[idx].len += size;
 489			return size;
 490		}
 491		pipe->bufs[idx].len = PAGE_SIZE;
 492		idx = next_idx(idx, pipe);
 493	}
 494	while (idx != pipe->curbuf || !pipe->nrbufs) {
 
 495		struct page *page = alloc_page(GFP_USER);
 496		if (!page)
 497			break;
 498		pipe->nrbufs++;
 499		pipe->bufs[idx].ops = &default_pipe_buf_ops;
 500		pipe->bufs[idx].page = page;
 501		pipe->bufs[idx].offset = 0;
 502		if (left <= PAGE_SIZE) {
 503			pipe->bufs[idx].len = left;
 
 
 
 
 504			return size;
 505		}
 506		pipe->bufs[idx].len = PAGE_SIZE;
 507		left -= PAGE_SIZE;
 508		idx = next_idx(idx, pipe);
 509	}
 510	return size - left;
 511}
 512
 513static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
 514				struct iov_iter *i)
 515{
 516	struct pipe_inode_info *pipe = i->pipe;
 
 
 517	size_t n, off;
 518	int idx;
 519
 520	if (!sanity(i))
 521		return 0;
 522
 523	bytes = n = push_pipe(i, bytes, &idx, &off);
 524	if (unlikely(!n))
 525		return 0;
 526	for ( ; n; idx = next_idx(idx, pipe), off = 0) {
 527		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
 528		memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
 529		i->idx = idx;
 530		i->iov_offset = off + chunk;
 531		n -= chunk;
 532		addr += chunk;
 533	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 534	i->count -= bytes;
 
 535	return bytes;
 536}
 537
 538size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
 539{
 540	const char *from = addr;
 541	if (unlikely(i->type & ITER_PIPE))
 542		return copy_pipe_to_iter(addr, bytes, i);
 
 
 543	iterate_and_advance(i, bytes, v,
 544		__copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
 545			       v.iov_len),
 546		memcpy_to_page(v.bv_page, v.bv_offset,
 547			       (from += v.bv_len) - v.bv_len, v.bv_len),
 548		memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
 549	)
 550
 551	return bytes;
 552}
 553EXPORT_SYMBOL(copy_to_iter);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 554
 555size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
 556{
 557	char *to = addr;
 558	if (unlikely(i->type & ITER_PIPE)) {
 559		WARN_ON(1);
 560		return 0;
 561	}
 
 
 562	iterate_and_advance(i, bytes, v,
 563		__copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
 564				 v.iov_len),
 565		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
 566				 v.bv_offset, v.bv_len),
 567		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
 568	)
 569
 570	return bytes;
 571}
 572EXPORT_SYMBOL(copy_from_iter);
 573
 574bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
 575{
 576	char *to = addr;
 577	if (unlikely(i->type & ITER_PIPE)) {
 578		WARN_ON(1);
 579		return false;
 580	}
 581	if (unlikely(i->count < bytes))
 582		return false;
 583
 
 
 584	iterate_all_kinds(i, bytes, v, ({
 585		if (__copy_from_user((to += v.iov_len) - v.iov_len,
 586				      v.iov_base, v.iov_len))
 587			return false;
 588		0;}),
 589		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
 590				 v.bv_offset, v.bv_len),
 591		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
 592	)
 593
 594	iov_iter_advance(i, bytes);
 595	return true;
 596}
 597EXPORT_SYMBOL(copy_from_iter_full);
 598
 599size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
 600{
 601	char *to = addr;
 602	if (unlikely(i->type & ITER_PIPE)) {
 603		WARN_ON(1);
 604		return 0;
 605	}
 606	iterate_and_advance(i, bytes, v,
 607		__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
 608					 v.iov_base, v.iov_len),
 609		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
 610				 v.bv_offset, v.bv_len),
 611		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
 612	)
 613
 614	return bytes;
 615}
 616EXPORT_SYMBOL(copy_from_iter_nocache);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 617
 618bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
 619{
 620	char *to = addr;
 621	if (unlikely(i->type & ITER_PIPE)) {
 622		WARN_ON(1);
 623		return false;
 624	}
 625	if (unlikely(i->count < bytes))
 626		return false;
 627	iterate_all_kinds(i, bytes, v, ({
 628		if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
 629					     v.iov_base, v.iov_len))
 630			return false;
 631		0;}),
 632		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
 633				 v.bv_offset, v.bv_len),
 634		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
 635	)
 636
 637	iov_iter_advance(i, bytes);
 638	return true;
 639}
 640EXPORT_SYMBOL(copy_from_iter_full_nocache);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 641
 642size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
 643			 struct iov_iter *i)
 644{
 
 
 645	if (i->type & (ITER_BVEC|ITER_KVEC)) {
 646		void *kaddr = kmap_atomic(page);
 647		size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
 648		kunmap_atomic(kaddr);
 649		return wanted;
 650	} else if (likely(!(i->type & ITER_PIPE)))
 
 
 651		return copy_page_to_iter_iovec(page, offset, bytes, i);
 652	else
 653		return copy_page_to_iter_pipe(page, offset, bytes, i);
 654}
 655EXPORT_SYMBOL(copy_page_to_iter);
 656
 657size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
 658			 struct iov_iter *i)
 659{
 660	if (unlikely(i->type & ITER_PIPE)) {
 
 
 661		WARN_ON(1);
 662		return 0;
 663	}
 664	if (i->type & (ITER_BVEC|ITER_KVEC)) {
 665		void *kaddr = kmap_atomic(page);
 666		size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
 667		kunmap_atomic(kaddr);
 668		return wanted;
 669	} else
 670		return copy_page_from_iter_iovec(page, offset, bytes, i);
 671}
 672EXPORT_SYMBOL(copy_page_from_iter);
 673
 674static size_t pipe_zero(size_t bytes, struct iov_iter *i)
 675{
 676	struct pipe_inode_info *pipe = i->pipe;
 
 
 677	size_t n, off;
 678	int idx;
 679
 680	if (!sanity(i))
 681		return 0;
 682
 683	bytes = n = push_pipe(i, bytes, &idx, &off);
 684	if (unlikely(!n))
 685		return 0;
 686
 687	for ( ; n; idx = next_idx(idx, pipe), off = 0) {
 688		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
 689		memzero_page(pipe->bufs[idx].page, off, chunk);
 690		i->idx = idx;
 691		i->iov_offset = off + chunk;
 692		n -= chunk;
 693	}
 
 
 694	i->count -= bytes;
 695	return bytes;
 696}
 697
 698size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
 699{
 700	if (unlikely(i->type & ITER_PIPE))
 701		return pipe_zero(bytes, i);
 702	iterate_and_advance(i, bytes, v,
 703		__clear_user(v.iov_base, v.iov_len),
 704		memzero_page(v.bv_page, v.bv_offset, v.bv_len),
 705		memset(v.iov_base, 0, v.iov_len)
 706	)
 707
 708	return bytes;
 709}
 710EXPORT_SYMBOL(iov_iter_zero);
 711
 712size_t iov_iter_copy_from_user_atomic(struct page *page,
 713		struct iov_iter *i, unsigned long offset, size_t bytes)
 714{
 715	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
 716	if (unlikely(i->type & ITER_PIPE)) {
 
 
 
 
 717		kunmap_atomic(kaddr);
 718		WARN_ON(1);
 719		return 0;
 720	}
 721	iterate_all_kinds(i, bytes, v,
 722		__copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
 723					  v.iov_base, v.iov_len),
 724		memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
 725				 v.bv_offset, v.bv_len),
 726		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
 727	)
 728	kunmap_atomic(kaddr);
 729	return bytes;
 730}
 731EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
 732
 733static inline void pipe_truncate(struct iov_iter *i)
 734{
 735	struct pipe_inode_info *pipe = i->pipe;
 736	if (pipe->nrbufs) {
 
 
 
 
 
 
 737		size_t off = i->iov_offset;
 738		int idx = i->idx;
 739		int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
 740		if (off) {
 741			pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
 742			idx = next_idx(idx, pipe);
 743			nrbufs++;
 744		}
 745		while (pipe->nrbufs > nrbufs) {
 746			pipe_buf_release(pipe, &pipe->bufs[idx]);
 747			idx = next_idx(idx, pipe);
 748			pipe->nrbufs--;
 749		}
 
 
 750	}
 751}
 752
 753static void pipe_advance(struct iov_iter *i, size_t size)
 754{
 755	struct pipe_inode_info *pipe = i->pipe;
 756	if (unlikely(i->count < size))
 757		size = i->count;
 758	if (size) {
 759		struct pipe_buffer *buf;
 
 
 760		size_t off = i->iov_offset, left = size;
 761		int idx = i->idx;
 762		if (off) /* make it relative to the beginning of buffer */
 763			left += off - pipe->bufs[idx].offset;
 764		while (1) {
 765			buf = &pipe->bufs[idx];
 766			if (left <= buf->len)
 767				break;
 768			left -= buf->len;
 769			idx = next_idx(idx, pipe);
 770		}
 771		i->idx = idx;
 772		i->iov_offset = buf->offset + left;
 773	}
 774	i->count -= size;
 775	/* ... and discard everything past that point */
 776	pipe_truncate(i);
 777}
 778
 779void iov_iter_advance(struct iov_iter *i, size_t size)
 780{
 781	if (unlikely(i->type & ITER_PIPE)) {
 782		pipe_advance(i, size);
 783		return;
 784	}
 
 
 
 
 785	iterate_and_advance(i, size, v, 0, 0, 0)
 786}
 787EXPORT_SYMBOL(iov_iter_advance);
 788
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 789/*
 790 * Return the count of just the current iov_iter segment.
 791 */
 792size_t iov_iter_single_seg_count(const struct iov_iter *i)
 793{
 794	if (unlikely(i->type & ITER_PIPE))
 795		return i->count;	// it is a silly place, anyway
 796	if (i->nr_segs == 1)
 797		return i->count;
 798	else if (i->type & ITER_BVEC)
 
 
 799		return min(i->count, i->bvec->bv_len - i->iov_offset);
 800	else
 801		return min(i->count, i->iov->iov_len - i->iov_offset);
 802}
 803EXPORT_SYMBOL(iov_iter_single_seg_count);
 804
 805void iov_iter_kvec(struct iov_iter *i, int direction,
 806			const struct kvec *kvec, unsigned long nr_segs,
 807			size_t count)
 808{
 809	BUG_ON(!(direction & ITER_KVEC));
 810	i->type = direction;
 811	i->kvec = kvec;
 812	i->nr_segs = nr_segs;
 813	i->iov_offset = 0;
 814	i->count = count;
 815}
 816EXPORT_SYMBOL(iov_iter_kvec);
 817
 818void iov_iter_bvec(struct iov_iter *i, int direction,
 819			const struct bio_vec *bvec, unsigned long nr_segs,
 820			size_t count)
 821{
 822	BUG_ON(!(direction & ITER_BVEC));
 823	i->type = direction;
 824	i->bvec = bvec;
 825	i->nr_segs = nr_segs;
 826	i->iov_offset = 0;
 827	i->count = count;
 828}
 829EXPORT_SYMBOL(iov_iter_bvec);
 830
 831void iov_iter_pipe(struct iov_iter *i, int direction,
 832			struct pipe_inode_info *pipe,
 833			size_t count)
 834{
 835	BUG_ON(direction != ITER_PIPE);
 836	WARN_ON(pipe->nrbufs == pipe->buffers);
 837	i->type = direction;
 838	i->pipe = pipe;
 839	i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
 840	i->iov_offset = 0;
 841	i->count = count;
 
 842}
 843EXPORT_SYMBOL(iov_iter_pipe);
 844
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 845unsigned long iov_iter_alignment(const struct iov_iter *i)
 846{
 847	unsigned long res = 0;
 848	size_t size = i->count;
 849
 850	if (unlikely(i->type & ITER_PIPE)) {
 851		if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
 
 
 852			return size | i->iov_offset;
 853		return size;
 854	}
 855	iterate_all_kinds(i, size, v,
 856		(res |= (unsigned long)v.iov_base | v.iov_len, 0),
 857		res |= v.bv_offset | v.bv_len,
 858		res |= (unsigned long)v.iov_base | v.iov_len
 859	)
 860	return res;
 861}
 862EXPORT_SYMBOL(iov_iter_alignment);
 863
 864unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
 865{
 866	unsigned long res = 0;
 867	size_t size = i->count;
 868
 869	if (unlikely(i->type & ITER_PIPE)) {
 870		WARN_ON(1);
 871		return ~0U;
 872	}
 873
 874	iterate_all_kinds(i, size, v,
 875		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
 876			(size != v.iov_len ? size : 0), 0),
 877		(res |= (!res ? 0 : (unsigned long)v.bv_offset) |
 878			(size != v.bv_len ? size : 0)),
 879		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
 880			(size != v.iov_len ? size : 0))
 881		);
 882	return res;
 883}
 884EXPORT_SYMBOL(iov_iter_gap_alignment);
 885
 886static inline size_t __pipe_get_pages(struct iov_iter *i,
 887				size_t maxsize,
 888				struct page **pages,
 889				int idx,
 890				size_t *start)
 891{
 892	struct pipe_inode_info *pipe = i->pipe;
 893	ssize_t n = push_pipe(i, maxsize, &idx, start);
 
 894	if (!n)
 895		return -EFAULT;
 896
 897	maxsize = n;
 898	n += *start;
 899	while (n > 0) {
 900		get_page(*pages++ = pipe->bufs[idx].page);
 901		idx = next_idx(idx, pipe);
 902		n -= PAGE_SIZE;
 903	}
 904
 905	return maxsize;
 906}
 907
 908static ssize_t pipe_get_pages(struct iov_iter *i,
 909		   struct page **pages, size_t maxsize, unsigned maxpages,
 910		   size_t *start)
 911{
 912	unsigned npages;
 913	size_t capacity;
 914	int idx;
 915
 916	if (!maxsize)
 917		return 0;
 918
 919	if (!sanity(i))
 920		return -EFAULT;
 921
 922	data_start(i, &idx, start);
 923	/* some of this one + all after this one */
 924	npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
 925	capacity = min(npages,maxpages) * PAGE_SIZE - *start;
 926
 927	return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
 928}
 929
 930ssize_t iov_iter_get_pages(struct iov_iter *i,
 931		   struct page **pages, size_t maxsize, unsigned maxpages,
 932		   size_t *start)
 933{
 934	if (maxsize > i->count)
 935		maxsize = i->count;
 936
 937	if (unlikely(i->type & ITER_PIPE))
 938		return pipe_get_pages(i, pages, maxsize, maxpages, start);
 
 
 
 939	iterate_all_kinds(i, maxsize, v, ({
 940		unsigned long addr = (unsigned long)v.iov_base;
 941		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
 942		int n;
 943		int res;
 944
 945		if (len > maxpages * PAGE_SIZE)
 946			len = maxpages * PAGE_SIZE;
 947		addr &= ~(PAGE_SIZE - 1);
 948		n = DIV_ROUND_UP(len, PAGE_SIZE);
 949		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
 
 
 950		if (unlikely(res < 0))
 951			return res;
 952		return (res == n ? len : res * PAGE_SIZE) - *start;
 953	0;}),({
 954		/* can't be more than PAGE_SIZE */
 955		*start = v.bv_offset;
 956		get_page(*pages = v.bv_page);
 957		return v.bv_len;
 958	}),({
 959		return -EFAULT;
 960	})
 961	)
 962	return 0;
 963}
 964EXPORT_SYMBOL(iov_iter_get_pages);
 965
 966static struct page **get_pages_array(size_t n)
 967{
 968	struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
 969	if (!p)
 970		p = vmalloc(n * sizeof(struct page *));
 971	return p;
 972}
 973
 974static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
 975		   struct page ***pages, size_t maxsize,
 976		   size_t *start)
 977{
 978	struct page **p;
 979	size_t n;
 980	int idx;
 981	int npages;
 982
 983	if (!maxsize)
 984		return 0;
 985
 986	if (!sanity(i))
 987		return -EFAULT;
 988
 989	data_start(i, &idx, start);
 990	/* some of this one + all after this one */
 991	npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
 992	n = npages * PAGE_SIZE - *start;
 993	if (maxsize > n)
 994		maxsize = n;
 995	else
 996		npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
 997	p = get_pages_array(npages);
 998	if (!p)
 999		return -ENOMEM;
1000	n = __pipe_get_pages(i, maxsize, p, idx, start);
1001	if (n > 0)
1002		*pages = p;
1003	else
1004		kvfree(p);
1005	return n;
1006}
1007
1008ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1009		   struct page ***pages, size_t maxsize,
1010		   size_t *start)
1011{
1012	struct page **p;
1013
1014	if (maxsize > i->count)
1015		maxsize = i->count;
1016
1017	if (unlikely(i->type & ITER_PIPE))
1018		return pipe_get_pages_alloc(i, pages, maxsize, start);
 
 
 
1019	iterate_all_kinds(i, maxsize, v, ({
1020		unsigned long addr = (unsigned long)v.iov_base;
1021		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1022		int n;
1023		int res;
1024
1025		addr &= ~(PAGE_SIZE - 1);
1026		n = DIV_ROUND_UP(len, PAGE_SIZE);
1027		p = get_pages_array(n);
1028		if (!p)
1029			return -ENOMEM;
1030		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
 
1031		if (unlikely(res < 0)) {
1032			kvfree(p);
1033			return res;
1034		}
1035		*pages = p;
1036		return (res == n ? len : res * PAGE_SIZE) - *start;
1037	0;}),({
1038		/* can't be more than PAGE_SIZE */
1039		*start = v.bv_offset;
1040		*pages = p = get_pages_array(1);
1041		if (!p)
1042			return -ENOMEM;
1043		get_page(*p = v.bv_page);
1044		return v.bv_len;
1045	}),({
1046		return -EFAULT;
1047	})
1048	)
1049	return 0;
1050}
1051EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1052
1053size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1054			       struct iov_iter *i)
1055{
1056	char *to = addr;
1057	__wsum sum, next;
1058	size_t off = 0;
1059	sum = *csum;
1060	if (unlikely(i->type & ITER_PIPE)) {
1061		WARN_ON(1);
1062		return 0;
1063	}
1064	iterate_and_advance(i, bytes, v, ({
1065		int err = 0;
1066		next = csum_and_copy_from_user(v.iov_base,
1067					       (to += v.iov_len) - v.iov_len,
1068					       v.iov_len, 0, &err);
1069		if (!err) {
1070			sum = csum_block_add(sum, next, off);
1071			off += v.iov_len;
1072		}
1073		err ? v.iov_len : 0;
1074	}), ({
1075		char *p = kmap_atomic(v.bv_page);
1076		next = csum_partial_copy_nocheck(p + v.bv_offset,
1077						 (to += v.bv_len) - v.bv_len,
1078						 v.bv_len, 0);
1079		kunmap_atomic(p);
1080		sum = csum_block_add(sum, next, off);
1081		off += v.bv_len;
1082	}),({
1083		next = csum_partial_copy_nocheck(v.iov_base,
1084						 (to += v.iov_len) - v.iov_len,
1085						 v.iov_len, 0);
1086		sum = csum_block_add(sum, next, off);
1087		off += v.iov_len;
1088	})
1089	)
1090	*csum = sum;
1091	return bytes;
1092}
1093EXPORT_SYMBOL(csum_and_copy_from_iter);
1094
1095bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1096			       struct iov_iter *i)
1097{
1098	char *to = addr;
1099	__wsum sum, next;
1100	size_t off = 0;
1101	sum = *csum;
1102	if (unlikely(i->type & ITER_PIPE)) {
1103		WARN_ON(1);
1104		return false;
1105	}
1106	if (unlikely(i->count < bytes))
1107		return false;
1108	iterate_all_kinds(i, bytes, v, ({
1109		int err = 0;
1110		next = csum_and_copy_from_user(v.iov_base,
1111					       (to += v.iov_len) - v.iov_len,
1112					       v.iov_len, 0, &err);
1113		if (err)
1114			return false;
1115		sum = csum_block_add(sum, next, off);
1116		off += v.iov_len;
1117		0;
1118	}), ({
1119		char *p = kmap_atomic(v.bv_page);
1120		next = csum_partial_copy_nocheck(p + v.bv_offset,
1121						 (to += v.bv_len) - v.bv_len,
1122						 v.bv_len, 0);
1123		kunmap_atomic(p);
1124		sum = csum_block_add(sum, next, off);
1125		off += v.bv_len;
1126	}),({
1127		next = csum_partial_copy_nocheck(v.iov_base,
1128						 (to += v.iov_len) - v.iov_len,
1129						 v.iov_len, 0);
1130		sum = csum_block_add(sum, next, off);
1131		off += v.iov_len;
1132	})
1133	)
1134	*csum = sum;
1135	iov_iter_advance(i, bytes);
1136	return true;
1137}
1138EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1139
1140size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1141			     struct iov_iter *i)
1142{
1143	const char *from = addr;
 
1144	__wsum sum, next;
1145	size_t off = 0;
 
 
 
 
1146	sum = *csum;
1147	if (unlikely(i->type & ITER_PIPE)) {
1148		WARN_ON(1);	/* for now */
1149		return 0;
1150	}
1151	iterate_and_advance(i, bytes, v, ({
1152		int err = 0;
1153		next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1154					     v.iov_base,
1155					     v.iov_len, 0, &err);
1156		if (!err) {
1157			sum = csum_block_add(sum, next, off);
1158			off += v.iov_len;
1159		}
1160		err ? v.iov_len : 0;
1161	}), ({
1162		char *p = kmap_atomic(v.bv_page);
1163		next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
1164						 p + v.bv_offset,
1165						 v.bv_len, 0);
1166		kunmap_atomic(p);
1167		sum = csum_block_add(sum, next, off);
1168		off += v.bv_len;
1169	}),({
1170		next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
1171						 v.iov_base,
1172						 v.iov_len, 0);
1173		sum = csum_block_add(sum, next, off);
1174		off += v.iov_len;
1175	})
1176	)
1177	*csum = sum;
1178	return bytes;
1179}
1180EXPORT_SYMBOL(csum_and_copy_to_iter);
1181
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1182int iov_iter_npages(const struct iov_iter *i, int maxpages)
1183{
1184	size_t size = i->count;
1185	int npages = 0;
1186
1187	if (!size)
1188		return 0;
 
 
1189
1190	if (unlikely(i->type & ITER_PIPE)) {
1191		struct pipe_inode_info *pipe = i->pipe;
 
1192		size_t off;
1193		int idx;
1194
1195		if (!sanity(i))
1196			return 0;
1197
1198		data_start(i, &idx, &off);
1199		/* some of this one + all after this one */
1200		npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
1201		if (npages >= maxpages)
1202			return maxpages;
1203	} else iterate_all_kinds(i, size, v, ({
1204		unsigned long p = (unsigned long)v.iov_base;
1205		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1206			- p / PAGE_SIZE;
1207		if (npages >= maxpages)
1208			return maxpages;
1209	0;}),({
1210		npages++;
1211		if (npages >= maxpages)
1212			return maxpages;
1213	}),({
1214		unsigned long p = (unsigned long)v.iov_base;
1215		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1216			- p / PAGE_SIZE;
1217		if (npages >= maxpages)
1218			return maxpages;
1219	})
1220	)
1221	return npages;
1222}
1223EXPORT_SYMBOL(iov_iter_npages);
1224
1225const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1226{
1227	*new = *old;
1228	if (unlikely(new->type & ITER_PIPE)) {
1229		WARN_ON(1);
1230		return NULL;
1231	}
1232	if (new->type & ITER_BVEC)
 
 
1233		return new->bvec = kmemdup(new->bvec,
1234				    new->nr_segs * sizeof(struct bio_vec),
1235				    flags);
1236	else
1237		/* iovec and kvec have identical layout */
1238		return new->iov = kmemdup(new->iov,
1239				   new->nr_segs * sizeof(struct iovec),
1240				   flags);
1241}
1242EXPORT_SYMBOL(dup_iter);
1243
1244/**
1245 * import_iovec() - Copy an array of &struct iovec from userspace
1246 *     into the kernel, check that it is valid, and initialize a new
1247 *     &struct iov_iter iterator to access it.
1248 *
1249 * @type: One of %READ or %WRITE.
1250 * @uvector: Pointer to the userspace array.
1251 * @nr_segs: Number of elements in userspace array.
1252 * @fast_segs: Number of elements in @iov.
1253 * @iov: (input and output parameter) Pointer to pointer to (usually small
1254 *     on-stack) kernel array.
1255 * @i: Pointer to iterator that will be initialized on success.
1256 *
1257 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1258 * then this function places %NULL in *@iov on return. Otherwise, a new
1259 * array will be allocated and the result placed in *@iov. This means that
1260 * the caller may call kfree() on *@iov regardless of whether the small
1261 * on-stack array was used or not (and regardless of whether this function
1262 * returns an error or not).
1263 *
1264 * Return: 0 on success or negative error code on error.
1265 */
1266int import_iovec(int type, const struct iovec __user * uvector,
1267		 unsigned nr_segs, unsigned fast_segs,
1268		 struct iovec **iov, struct iov_iter *i)
1269{
1270	ssize_t n;
1271	struct iovec *p;
1272	n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1273				  *iov, &p);
1274	if (n < 0) {
1275		if (p != *iov)
1276			kfree(p);
1277		*iov = NULL;
1278		return n;
1279	}
1280	iov_iter_init(i, type, p, nr_segs, n);
1281	*iov = p == *iov ? NULL : p;
1282	return 0;
1283}
1284EXPORT_SYMBOL(import_iovec);
1285
1286#ifdef CONFIG_COMPAT
1287#include <linux/compat.h>
1288
1289int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1290		 unsigned nr_segs, unsigned fast_segs,
1291		 struct iovec **iov, struct iov_iter *i)
 
1292{
1293	ssize_t n;
1294	struct iovec *p;
1295	n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1296				  *iov, &p);
1297	if (n < 0) {
1298		if (p != *iov)
1299			kfree(p);
1300		*iov = NULL;
1301		return n;
1302	}
1303	iov_iter_init(i, type, p, nr_segs, n);
1304	*iov = p == *iov ? NULL : p;
1305	return 0;
1306}
 
1307#endif
1308
1309int import_single_range(int rw, void __user *buf, size_t len,
1310		 struct iovec *iov, struct iov_iter *i)
1311{
1312	if (len > MAX_RW_COUNT)
1313		len = MAX_RW_COUNT;
1314	if (unlikely(!access_ok(!rw, buf, len)))
1315		return -EFAULT;
1316
1317	iov->iov_base = buf;
1318	iov->iov_len = len;
1319	iov_iter_init(i, rw, iov, 1, len);
1320	return 0;
1321}
1322EXPORT_SYMBOL(import_single_range);
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <crypto/hash.h>
   3#include <linux/export.h>
   4#include <linux/bvec.h>
   5#include <linux/uio.h>
   6#include <linux/pagemap.h>
   7#include <linux/slab.h>
   8#include <linux/vmalloc.h>
   9#include <linux/splice.h>
  10#include <net/checksum.h>
  11#include <linux/scatterlist.h>
  12#include <linux/instrumented.h>
  13
  14#define PIPE_PARANOIA /* for now */
  15
  16#define iterate_iovec(i, n, __v, __p, skip, STEP) {	\
  17	size_t left;					\
  18	size_t wanted = n;				\
  19	__p = i->iov;					\
  20	__v.iov_len = min(n, __p->iov_len - skip);	\
  21	if (likely(__v.iov_len)) {			\
  22		__v.iov_base = __p->iov_base + skip;	\
  23		left = (STEP);				\
  24		__v.iov_len -= left;			\
  25		skip += __v.iov_len;			\
  26		n -= __v.iov_len;			\
  27	} else {					\
  28		left = 0;				\
  29	}						\
  30	while (unlikely(!left && n)) {			\
  31		__p++;					\
  32		__v.iov_len = min(n, __p->iov_len);	\
  33		if (unlikely(!__v.iov_len))		\
  34			continue;			\
  35		__v.iov_base = __p->iov_base;		\
  36		left = (STEP);				\
  37		__v.iov_len -= left;			\
  38		skip = __v.iov_len;			\
  39		n -= __v.iov_len;			\
  40	}						\
  41	n = wanted - n;					\
  42}
  43
  44#define iterate_kvec(i, n, __v, __p, skip, STEP) {	\
  45	size_t wanted = n;				\
  46	__p = i->kvec;					\
  47	__v.iov_len = min(n, __p->iov_len - skip);	\
  48	if (likely(__v.iov_len)) {			\
  49		__v.iov_base = __p->iov_base + skip;	\
  50		(void)(STEP);				\
  51		skip += __v.iov_len;			\
  52		n -= __v.iov_len;			\
  53	}						\
  54	while (unlikely(n)) {				\
  55		__p++;					\
  56		__v.iov_len = min(n, __p->iov_len);	\
  57		if (unlikely(!__v.iov_len))		\
  58			continue;			\
  59		__v.iov_base = __p->iov_base;		\
  60		(void)(STEP);				\
  61		skip = __v.iov_len;			\
  62		n -= __v.iov_len;			\
  63	}						\
  64	n = wanted;					\
  65}
  66
  67#define iterate_bvec(i, n, __v, __bi, skip, STEP) {	\
  68	struct bvec_iter __start;			\
  69	__start.bi_size = n;				\
  70	__start.bi_bvec_done = skip;			\
  71	__start.bi_idx = 0;				\
  72	for_each_bvec(__v, i->bvec, __bi, __start) {	\
  73		if (!__v.bv_len)			\
  74			continue;			\
  75		(void)(STEP);				\
  76	}						\
  77}
  78
  79#define iterate_all_kinds(i, n, v, I, B, K) {			\
  80	if (likely(n)) {					\
  81		size_t skip = i->iov_offset;			\
  82		if (unlikely(i->type & ITER_BVEC)) {		\
  83			struct bio_vec v;			\
  84			struct bvec_iter __bi;			\
  85			iterate_bvec(i, n, v, __bi, skip, (B))	\
  86		} else if (unlikely(i->type & ITER_KVEC)) {	\
  87			const struct kvec *kvec;		\
  88			struct kvec v;				\
  89			iterate_kvec(i, n, v, kvec, skip, (K))	\
  90		} else if (unlikely(i->type & ITER_DISCARD)) {	\
  91		} else {					\
  92			const struct iovec *iov;		\
  93			struct iovec v;				\
  94			iterate_iovec(i, n, v, iov, skip, (I))	\
  95		}						\
  96	}							\
  97}
  98
  99#define iterate_and_advance(i, n, v, I, B, K) {			\
 100	if (unlikely(i->count < n))				\
 101		n = i->count;					\
 102	if (i->count) {						\
 103		size_t skip = i->iov_offset;			\
 104		if (unlikely(i->type & ITER_BVEC)) {		\
 105			const struct bio_vec *bvec = i->bvec;	\
 106			struct bio_vec v;			\
 107			struct bvec_iter __bi;			\
 108			iterate_bvec(i, n, v, __bi, skip, (B))	\
 109			i->bvec = __bvec_iter_bvec(i->bvec, __bi);	\
 110			i->nr_segs -= i->bvec - bvec;		\
 111			skip = __bi.bi_bvec_done;		\
 112		} else if (unlikely(i->type & ITER_KVEC)) {	\
 113			const struct kvec *kvec;		\
 114			struct kvec v;				\
 115			iterate_kvec(i, n, v, kvec, skip, (K))	\
 116			if (skip == kvec->iov_len) {		\
 117				kvec++;				\
 118				skip = 0;			\
 119			}					\
 120			i->nr_segs -= kvec - i->kvec;		\
 121			i->kvec = kvec;				\
 122		} else if (unlikely(i->type & ITER_DISCARD)) {	\
 123			skip += n;				\
 124		} else {					\
 125			const struct iovec *iov;		\
 126			struct iovec v;				\
 127			iterate_iovec(i, n, v, iov, skip, (I))	\
 128			if (skip == iov->iov_len) {		\
 129				iov++;				\
 130				skip = 0;			\
 131			}					\
 132			i->nr_segs -= iov - i->iov;		\
 133			i->iov = iov;				\
 134		}						\
 135		i->count -= n;					\
 136		i->iov_offset = skip;				\
 137	}							\
 138}
 139
 140static int copyout(void __user *to, const void *from, size_t n)
 141{
 142	if (access_ok(to, n)) {
 143		instrument_copy_to_user(to, from, n);
 144		n = raw_copy_to_user(to, from, n);
 145	}
 146	return n;
 147}
 148
 149static int copyin(void *to, const void __user *from, size_t n)
 150{
 151	if (access_ok(from, n)) {
 152		instrument_copy_from_user(to, from, n);
 153		n = raw_copy_from_user(to, from, n);
 154	}
 155	return n;
 156}
 157
 158static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
 159			 struct iov_iter *i)
 160{
 161	size_t skip, copy, left, wanted;
 162	const struct iovec *iov;
 163	char __user *buf;
 164	void *kaddr, *from;
 165
 166	if (unlikely(bytes > i->count))
 167		bytes = i->count;
 168
 169	if (unlikely(!bytes))
 170		return 0;
 171
 172	might_fault();
 173	wanted = bytes;
 174	iov = i->iov;
 175	skip = i->iov_offset;
 176	buf = iov->iov_base + skip;
 177	copy = min(bytes, iov->iov_len - skip);
 178
 179	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
 180		kaddr = kmap_atomic(page);
 181		from = kaddr + offset;
 182
 183		/* first chunk, usually the only one */
 184		left = copyout(buf, from, copy);
 185		copy -= left;
 186		skip += copy;
 187		from += copy;
 188		bytes -= copy;
 189
 190		while (unlikely(!left && bytes)) {
 191			iov++;
 192			buf = iov->iov_base;
 193			copy = min(bytes, iov->iov_len);
 194			left = copyout(buf, from, copy);
 195			copy -= left;
 196			skip = copy;
 197			from += copy;
 198			bytes -= copy;
 199		}
 200		if (likely(!bytes)) {
 201			kunmap_atomic(kaddr);
 202			goto done;
 203		}
 204		offset = from - kaddr;
 205		buf += copy;
 206		kunmap_atomic(kaddr);
 207		copy = min(bytes, iov->iov_len - skip);
 208	}
 209	/* Too bad - revert to non-atomic kmap */
 210
 211	kaddr = kmap(page);
 212	from = kaddr + offset;
 213	left = copyout(buf, from, copy);
 214	copy -= left;
 215	skip += copy;
 216	from += copy;
 217	bytes -= copy;
 218	while (unlikely(!left && bytes)) {
 219		iov++;
 220		buf = iov->iov_base;
 221		copy = min(bytes, iov->iov_len);
 222		left = copyout(buf, from, copy);
 223		copy -= left;
 224		skip = copy;
 225		from += copy;
 226		bytes -= copy;
 227	}
 228	kunmap(page);
 229
 230done:
 231	if (skip == iov->iov_len) {
 232		iov++;
 233		skip = 0;
 234	}
 235	i->count -= wanted - bytes;
 236	i->nr_segs -= iov - i->iov;
 237	i->iov = iov;
 238	i->iov_offset = skip;
 239	return wanted - bytes;
 240}
 241
 242static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
 243			 struct iov_iter *i)
 244{
 245	size_t skip, copy, left, wanted;
 246	const struct iovec *iov;
 247	char __user *buf;
 248	void *kaddr, *to;
 249
 250	if (unlikely(bytes > i->count))
 251		bytes = i->count;
 252
 253	if (unlikely(!bytes))
 254		return 0;
 255
 256	might_fault();
 257	wanted = bytes;
 258	iov = i->iov;
 259	skip = i->iov_offset;
 260	buf = iov->iov_base + skip;
 261	copy = min(bytes, iov->iov_len - skip);
 262
 263	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
 264		kaddr = kmap_atomic(page);
 265		to = kaddr + offset;
 266
 267		/* first chunk, usually the only one */
 268		left = copyin(to, buf, copy);
 269		copy -= left;
 270		skip += copy;
 271		to += copy;
 272		bytes -= copy;
 273
 274		while (unlikely(!left && bytes)) {
 275			iov++;
 276			buf = iov->iov_base;
 277			copy = min(bytes, iov->iov_len);
 278			left = copyin(to, buf, copy);
 279			copy -= left;
 280			skip = copy;
 281			to += copy;
 282			bytes -= copy;
 283		}
 284		if (likely(!bytes)) {
 285			kunmap_atomic(kaddr);
 286			goto done;
 287		}
 288		offset = to - kaddr;
 289		buf += copy;
 290		kunmap_atomic(kaddr);
 291		copy = min(bytes, iov->iov_len - skip);
 292	}
 293	/* Too bad - revert to non-atomic kmap */
 294
 295	kaddr = kmap(page);
 296	to = kaddr + offset;
 297	left = copyin(to, buf, copy);
 298	copy -= left;
 299	skip += copy;
 300	to += copy;
 301	bytes -= copy;
 302	while (unlikely(!left && bytes)) {
 303		iov++;
 304		buf = iov->iov_base;
 305		copy = min(bytes, iov->iov_len);
 306		left = copyin(to, buf, copy);
 307		copy -= left;
 308		skip = copy;
 309		to += copy;
 310		bytes -= copy;
 311	}
 312	kunmap(page);
 313
 314done:
 315	if (skip == iov->iov_len) {
 316		iov++;
 317		skip = 0;
 318	}
 319	i->count -= wanted - bytes;
 320	i->nr_segs -= iov - i->iov;
 321	i->iov = iov;
 322	i->iov_offset = skip;
 323	return wanted - bytes;
 324}
 325
 326#ifdef PIPE_PARANOIA
 327static bool sanity(const struct iov_iter *i)
 328{
 329	struct pipe_inode_info *pipe = i->pipe;
 330	unsigned int p_head = pipe->head;
 331	unsigned int p_tail = pipe->tail;
 332	unsigned int p_mask = pipe->ring_size - 1;
 333	unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
 334	unsigned int i_head = i->head;
 335	unsigned int idx;
 336
 337	if (i->iov_offset) {
 338		struct pipe_buffer *p;
 339		if (unlikely(p_occupancy == 0))
 340			goto Bad;	// pipe must be non-empty
 341		if (unlikely(i_head != p_head - 1))
 342			goto Bad;	// must be at the last buffer...
 343
 344		p = &pipe->bufs[i_head & p_mask];
 345		if (unlikely(p->offset + p->len != i->iov_offset))
 346			goto Bad;	// ... at the end of segment
 347	} else {
 348		if (i_head != p_head)
 349			goto Bad;	// must be right after the last buffer
 350	}
 351	return true;
 352Bad:
 353	printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset);
 354	printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
 355			p_head, p_tail, pipe->ring_size);
 356	for (idx = 0; idx < pipe->ring_size; idx++)
 357		printk(KERN_ERR "[%p %p %d %d]\n",
 358			pipe->bufs[idx].ops,
 359			pipe->bufs[idx].page,
 360			pipe->bufs[idx].offset,
 361			pipe->bufs[idx].len);
 362	WARN_ON(1);
 363	return false;
 364}
 365#else
 366#define sanity(i) true
 367#endif
 368
 
 
 
 
 
 369static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
 370			 struct iov_iter *i)
 371{
 372	struct pipe_inode_info *pipe = i->pipe;
 373	struct pipe_buffer *buf;
 374	unsigned int p_tail = pipe->tail;
 375	unsigned int p_mask = pipe->ring_size - 1;
 376	unsigned int i_head = i->head;
 377	size_t off;
 
 378
 379	if (unlikely(bytes > i->count))
 380		bytes = i->count;
 381
 382	if (unlikely(!bytes))
 383		return 0;
 384
 385	if (!sanity(i))
 386		return 0;
 387
 388	off = i->iov_offset;
 389	buf = &pipe->bufs[i_head & p_mask];
 
 390	if (off) {
 391		if (offset == off && buf->page == page) {
 392			/* merge with the last one */
 393			buf->len += bytes;
 394			i->iov_offset += bytes;
 395			goto out;
 396		}
 397		i_head++;
 398		buf = &pipe->bufs[i_head & p_mask];
 399	}
 400	if (pipe_full(i_head, p_tail, pipe->max_usage))
 401		return 0;
 402
 403	buf->ops = &page_cache_pipe_buf_ops;
 404	get_page(page);
 405	buf->page = page;
 406	buf->offset = offset;
 407	buf->len = bytes;
 408
 409	pipe->head = i_head + 1;
 410	i->iov_offset = offset + bytes;
 411	i->head = i_head;
 412out:
 413	i->count -= bytes;
 414	return bytes;
 415}
 416
 417/*
 418 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
 419 * bytes.  For each iovec, fault in each page that constitutes the iovec.
 420 *
 421 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
 422 * because it is an invalid address).
 423 */
 424int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
 425{
 426	size_t skip = i->iov_offset;
 427	const struct iovec *iov;
 428	int err;
 429	struct iovec v;
 430
 431	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
 432		iterate_iovec(i, bytes, v, iov, skip, ({
 433			err = fault_in_pages_readable(v.iov_base, v.iov_len);
 434			if (unlikely(err))
 435			return err;
 436		0;}))
 437	}
 438	return 0;
 439}
 440EXPORT_SYMBOL(iov_iter_fault_in_readable);
 441
 442void iov_iter_init(struct iov_iter *i, unsigned int direction,
 443			const struct iovec *iov, unsigned long nr_segs,
 444			size_t count)
 445{
 446	WARN_ON(direction & ~(READ | WRITE));
 447	direction &= READ | WRITE;
 448
 449	/* It will get better.  Eventually... */
 450	if (uaccess_kernel()) {
 451		i->type = ITER_KVEC | direction;
 
 452		i->kvec = (struct kvec *)iov;
 453	} else {
 454		i->type = ITER_IOVEC | direction;
 455		i->iov = iov;
 456	}
 457	i->nr_segs = nr_segs;
 458	i->iov_offset = 0;
 459	i->count = count;
 460}
 461EXPORT_SYMBOL(iov_iter_init);
 462
 463static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
 464{
 465	char *from = kmap_atomic(page);
 466	memcpy(to, from + offset, len);
 467	kunmap_atomic(from);
 468}
 469
 470static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
 471{
 472	char *to = kmap_atomic(page);
 473	memcpy(to + offset, from, len);
 474	kunmap_atomic(to);
 475}
 476
 477static void memzero_page(struct page *page, size_t offset, size_t len)
 478{
 479	char *addr = kmap_atomic(page);
 480	memset(addr + offset, 0, len);
 481	kunmap_atomic(addr);
 482}
 483
 484static inline bool allocated(struct pipe_buffer *buf)
 485{
 486	return buf->ops == &default_pipe_buf_ops;
 487}
 488
 489static inline void data_start(const struct iov_iter *i,
 490			      unsigned int *iter_headp, size_t *offp)
 491{
 492	unsigned int p_mask = i->pipe->ring_size - 1;
 493	unsigned int iter_head = i->head;
 494	size_t off = i->iov_offset;
 495
 496	if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
 497		    off == PAGE_SIZE)) {
 498		iter_head++;
 499		off = 0;
 500	}
 501	*iter_headp = iter_head;
 502	*offp = off;
 503}
 504
 505static size_t push_pipe(struct iov_iter *i, size_t size,
 506			int *iter_headp, size_t *offp)
 507{
 508	struct pipe_inode_info *pipe = i->pipe;
 509	unsigned int p_tail = pipe->tail;
 510	unsigned int p_mask = pipe->ring_size - 1;
 511	unsigned int iter_head;
 512	size_t off;
 
 513	ssize_t left;
 514
 515	if (unlikely(size > i->count))
 516		size = i->count;
 517	if (unlikely(!size))
 518		return 0;
 519
 520	left = size;
 521	data_start(i, &iter_head, &off);
 522	*iter_headp = iter_head;
 523	*offp = off;
 524	if (off) {
 525		left -= PAGE_SIZE - off;
 526		if (left <= 0) {
 527			pipe->bufs[iter_head & p_mask].len += size;
 528			return size;
 529		}
 530		pipe->bufs[iter_head & p_mask].len = PAGE_SIZE;
 531		iter_head++;
 532	}
 533	while (!pipe_full(iter_head, p_tail, pipe->max_usage)) {
 534		struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask];
 535		struct page *page = alloc_page(GFP_USER);
 536		if (!page)
 537			break;
 538
 539		buf->ops = &default_pipe_buf_ops;
 540		buf->page = page;
 541		buf->offset = 0;
 542		buf->len = min_t(ssize_t, left, PAGE_SIZE);
 543		left -= buf->len;
 544		iter_head++;
 545		pipe->head = iter_head;
 546
 547		if (left == 0)
 548			return size;
 
 
 
 
 549	}
 550	return size - left;
 551}
 552
 553static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
 554				struct iov_iter *i)
 555{
 556	struct pipe_inode_info *pipe = i->pipe;
 557	unsigned int p_mask = pipe->ring_size - 1;
 558	unsigned int i_head;
 559	size_t n, off;
 
 560
 561	if (!sanity(i))
 562		return 0;
 563
 564	bytes = n = push_pipe(i, bytes, &i_head, &off);
 565	if (unlikely(!n))
 566		return 0;
 567	do {
 568		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
 569		memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk);
 570		i->head = i_head;
 571		i->iov_offset = off + chunk;
 572		n -= chunk;
 573		addr += chunk;
 574		off = 0;
 575		i_head++;
 576	} while (n);
 577	i->count -= bytes;
 578	return bytes;
 579}
 580
 581static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
 582			      __wsum sum, size_t off)
 583{
 584	__wsum next = csum_partial_copy_nocheck(from, to, len, 0);
 585	return csum_block_add(sum, next, off);
 586}
 587
 588static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
 589				__wsum *csum, struct iov_iter *i)
 590{
 591	struct pipe_inode_info *pipe = i->pipe;
 592	unsigned int p_mask = pipe->ring_size - 1;
 593	unsigned int i_head;
 594	size_t n, r;
 595	size_t off = 0;
 596	__wsum sum = *csum;
 597
 598	if (!sanity(i))
 599		return 0;
 600
 601	bytes = n = push_pipe(i, bytes, &i_head, &r);
 602	if (unlikely(!n))
 603		return 0;
 604	do {
 605		size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
 606		char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page);
 607		sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
 608		kunmap_atomic(p);
 609		i->head = i_head;
 610		i->iov_offset = r + chunk;
 611		n -= chunk;
 612		off += chunk;
 613		addr += chunk;
 614		r = 0;
 615		i_head++;
 616	} while (n);
 617	i->count -= bytes;
 618	*csum = sum;
 619	return bytes;
 620}
 621
 622size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
 623{
 624	const char *from = addr;
 625	if (unlikely(iov_iter_is_pipe(i)))
 626		return copy_pipe_to_iter(addr, bytes, i);
 627	if (iter_is_iovec(i))
 628		might_fault();
 629	iterate_and_advance(i, bytes, v,
 630		copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
 
 631		memcpy_to_page(v.bv_page, v.bv_offset,
 632			       (from += v.bv_len) - v.bv_len, v.bv_len),
 633		memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
 634	)
 635
 636	return bytes;
 637}
 638EXPORT_SYMBOL(_copy_to_iter);
 639
 640#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
 641static int copyout_mcsafe(void __user *to, const void *from, size_t n)
 642{
 643	if (access_ok(to, n)) {
 644		instrument_copy_to_user(to, from, n);
 645		n = copy_to_user_mcsafe((__force void *) to, from, n);
 646	}
 647	return n;
 648}
 649
 650static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
 651		const char *from, size_t len)
 652{
 653	unsigned long ret;
 654	char *to;
 655
 656	to = kmap_atomic(page);
 657	ret = memcpy_mcsafe(to + offset, from, len);
 658	kunmap_atomic(to);
 659
 660	return ret;
 661}
 662
 663static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
 664				struct iov_iter *i)
 665{
 666	struct pipe_inode_info *pipe = i->pipe;
 667	unsigned int p_mask = pipe->ring_size - 1;
 668	unsigned int i_head;
 669	size_t n, off, xfer = 0;
 670
 671	if (!sanity(i))
 672		return 0;
 673
 674	bytes = n = push_pipe(i, bytes, &i_head, &off);
 675	if (unlikely(!n))
 676		return 0;
 677	do {
 678		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
 679		unsigned long rem;
 680
 681		rem = memcpy_mcsafe_to_page(pipe->bufs[i_head & p_mask].page,
 682					    off, addr, chunk);
 683		i->head = i_head;
 684		i->iov_offset = off + chunk - rem;
 685		xfer += chunk - rem;
 686		if (rem)
 687			break;
 688		n -= chunk;
 689		addr += chunk;
 690		off = 0;
 691		i_head++;
 692	} while (n);
 693	i->count -= xfer;
 694	return xfer;
 695}
 696
 697/**
 698 * _copy_to_iter_mcsafe - copy to user with source-read error exception handling
 699 * @addr: source kernel address
 700 * @bytes: total transfer length
 701 * @iter: destination iterator
 702 *
 703 * The pmem driver arranges for filesystem-dax to use this facility via
 704 * dax_copy_to_iter() for protecting read/write to persistent memory.
 705 * Unless / until an architecture can guarantee identical performance
 706 * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a
 707 * performance regression to switch more users to the mcsafe version.
 708 *
 709 * Otherwise, the main differences between this and typical _copy_to_iter().
 710 *
 711 * * Typical tail/residue handling after a fault retries the copy
 712 *   byte-by-byte until the fault happens again. Re-triggering machine
 713 *   checks is potentially fatal so the implementation uses source
 714 *   alignment and poison alignment assumptions to avoid re-triggering
 715 *   hardware exceptions.
 716 *
 717 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
 718 *   Compare to copy_to_iter() where only ITER_IOVEC attempts might return
 719 *   a short copy.
 720 *
 721 * See MCSAFE_TEST for self-test.
 722 */
 723size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
 724{
 725	const char *from = addr;
 726	unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
 727
 728	if (unlikely(iov_iter_is_pipe(i)))
 729		return copy_pipe_to_iter_mcsafe(addr, bytes, i);
 730	if (iter_is_iovec(i))
 731		might_fault();
 732	iterate_and_advance(i, bytes, v,
 733		copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
 734		({
 735		rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset,
 736                               (from += v.bv_len) - v.bv_len, v.bv_len);
 737		if (rem) {
 738			curr_addr = (unsigned long) from;
 739			bytes = curr_addr - s_addr - rem;
 740			return bytes;
 741		}
 742		}),
 743		({
 744		rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len,
 745				v.iov_len);
 746		if (rem) {
 747			curr_addr = (unsigned long) from;
 748			bytes = curr_addr - s_addr - rem;
 749			return bytes;
 750		}
 751		})
 752	)
 753
 754	return bytes;
 755}
 756EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
 757#endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
 758
 759size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
 760{
 761	char *to = addr;
 762	if (unlikely(iov_iter_is_pipe(i))) {
 763		WARN_ON(1);
 764		return 0;
 765	}
 766	if (iter_is_iovec(i))
 767		might_fault();
 768	iterate_and_advance(i, bytes, v,
 769		copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
 
 770		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
 771				 v.bv_offset, v.bv_len),
 772		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
 773	)
 774
 775	return bytes;
 776}
 777EXPORT_SYMBOL(_copy_from_iter);
 778
 779bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
 780{
 781	char *to = addr;
 782	if (unlikely(iov_iter_is_pipe(i))) {
 783		WARN_ON(1);
 784		return false;
 785	}
 786	if (unlikely(i->count < bytes))
 787		return false;
 788
 789	if (iter_is_iovec(i))
 790		might_fault();
 791	iterate_all_kinds(i, bytes, v, ({
 792		if (copyin((to += v.iov_len) - v.iov_len,
 793				      v.iov_base, v.iov_len))
 794			return false;
 795		0;}),
 796		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
 797				 v.bv_offset, v.bv_len),
 798		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
 799	)
 800
 801	iov_iter_advance(i, bytes);
 802	return true;
 803}
 804EXPORT_SYMBOL(_copy_from_iter_full);
 805
 806size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
 807{
 808	char *to = addr;
 809	if (unlikely(iov_iter_is_pipe(i))) {
 810		WARN_ON(1);
 811		return 0;
 812	}
 813	iterate_and_advance(i, bytes, v,
 814		__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
 815					 v.iov_base, v.iov_len),
 816		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
 817				 v.bv_offset, v.bv_len),
 818		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
 819	)
 820
 821	return bytes;
 822}
 823EXPORT_SYMBOL(_copy_from_iter_nocache);
 824
 825#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
 826/**
 827 * _copy_from_iter_flushcache - write destination through cpu cache
 828 * @addr: destination kernel address
 829 * @bytes: total transfer length
 830 * @iter: source iterator
 831 *
 832 * The pmem driver arranges for filesystem-dax to use this facility via
 833 * dax_copy_from_iter() for ensuring that writes to persistent memory
 834 * are flushed through the CPU cache. It is differentiated from
 835 * _copy_from_iter_nocache() in that guarantees all data is flushed for
 836 * all iterator types. The _copy_from_iter_nocache() only attempts to
 837 * bypass the cache for the ITER_IOVEC case, and on some archs may use
 838 * instructions that strand dirty-data in the cache.
 839 */
 840size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
 841{
 842	char *to = addr;
 843	if (unlikely(iov_iter_is_pipe(i))) {
 844		WARN_ON(1);
 845		return 0;
 846	}
 847	iterate_and_advance(i, bytes, v,
 848		__copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
 849					 v.iov_base, v.iov_len),
 850		memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page,
 851				 v.bv_offset, v.bv_len),
 852		memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
 853			v.iov_len)
 854	)
 855
 856	return bytes;
 857}
 858EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
 859#endif
 860
 861bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
 862{
 863	char *to = addr;
 864	if (unlikely(iov_iter_is_pipe(i))) {
 865		WARN_ON(1);
 866		return false;
 867	}
 868	if (unlikely(i->count < bytes))
 869		return false;
 870	iterate_all_kinds(i, bytes, v, ({
 871		if (__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
 872					     v.iov_base, v.iov_len))
 873			return false;
 874		0;}),
 875		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
 876				 v.bv_offset, v.bv_len),
 877		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
 878	)
 879
 880	iov_iter_advance(i, bytes);
 881	return true;
 882}
 883EXPORT_SYMBOL(_copy_from_iter_full_nocache);
 884
 885static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
 886{
 887	struct page *head;
 888	size_t v = n + offset;
 889
 890	/*
 891	 * The general case needs to access the page order in order
 892	 * to compute the page size.
 893	 * However, we mostly deal with order-0 pages and thus can
 894	 * avoid a possible cache line miss for requests that fit all
 895	 * page orders.
 896	 */
 897	if (n <= v && v <= PAGE_SIZE)
 898		return true;
 899
 900	head = compound_head(page);
 901	v += (page - head) << PAGE_SHIFT;
 902
 903	if (likely(n <= v && v <= (page_size(head))))
 904		return true;
 905	WARN_ON(1);
 906	return false;
 907}
 908
 909size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
 910			 struct iov_iter *i)
 911{
 912	if (unlikely(!page_copy_sane(page, offset, bytes)))
 913		return 0;
 914	if (i->type & (ITER_BVEC|ITER_KVEC)) {
 915		void *kaddr = kmap_atomic(page);
 916		size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
 917		kunmap_atomic(kaddr);
 918		return wanted;
 919	} else if (unlikely(iov_iter_is_discard(i)))
 920		return bytes;
 921	else if (likely(!iov_iter_is_pipe(i)))
 922		return copy_page_to_iter_iovec(page, offset, bytes, i);
 923	else
 924		return copy_page_to_iter_pipe(page, offset, bytes, i);
 925}
 926EXPORT_SYMBOL(copy_page_to_iter);
 927
 928size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
 929			 struct iov_iter *i)
 930{
 931	if (unlikely(!page_copy_sane(page, offset, bytes)))
 932		return 0;
 933	if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
 934		WARN_ON(1);
 935		return 0;
 936	}
 937	if (i->type & (ITER_BVEC|ITER_KVEC)) {
 938		void *kaddr = kmap_atomic(page);
 939		size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
 940		kunmap_atomic(kaddr);
 941		return wanted;
 942	} else
 943		return copy_page_from_iter_iovec(page, offset, bytes, i);
 944}
 945EXPORT_SYMBOL(copy_page_from_iter);
 946
 947static size_t pipe_zero(size_t bytes, struct iov_iter *i)
 948{
 949	struct pipe_inode_info *pipe = i->pipe;
 950	unsigned int p_mask = pipe->ring_size - 1;
 951	unsigned int i_head;
 952	size_t n, off;
 
 953
 954	if (!sanity(i))
 955		return 0;
 956
 957	bytes = n = push_pipe(i, bytes, &i_head, &off);
 958	if (unlikely(!n))
 959		return 0;
 960
 961	do {
 962		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
 963		memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk);
 964		i->head = i_head;
 965		i->iov_offset = off + chunk;
 966		n -= chunk;
 967		off = 0;
 968		i_head++;
 969	} while (n);
 970	i->count -= bytes;
 971	return bytes;
 972}
 973
 974size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
 975{
 976	if (unlikely(iov_iter_is_pipe(i)))
 977		return pipe_zero(bytes, i);
 978	iterate_and_advance(i, bytes, v,
 979		clear_user(v.iov_base, v.iov_len),
 980		memzero_page(v.bv_page, v.bv_offset, v.bv_len),
 981		memset(v.iov_base, 0, v.iov_len)
 982	)
 983
 984	return bytes;
 985}
 986EXPORT_SYMBOL(iov_iter_zero);
 987
 988size_t iov_iter_copy_from_user_atomic(struct page *page,
 989		struct iov_iter *i, unsigned long offset, size_t bytes)
 990{
 991	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
 992	if (unlikely(!page_copy_sane(page, offset, bytes))) {
 993		kunmap_atomic(kaddr);
 994		return 0;
 995	}
 996	if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
 997		kunmap_atomic(kaddr);
 998		WARN_ON(1);
 999		return 0;
1000	}
1001	iterate_all_kinds(i, bytes, v,
1002		copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
 
1003		memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
1004				 v.bv_offset, v.bv_len),
1005		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
1006	)
1007	kunmap_atomic(kaddr);
1008	return bytes;
1009}
1010EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
1011
1012static inline void pipe_truncate(struct iov_iter *i)
1013{
1014	struct pipe_inode_info *pipe = i->pipe;
1015	unsigned int p_tail = pipe->tail;
1016	unsigned int p_head = pipe->head;
1017	unsigned int p_mask = pipe->ring_size - 1;
1018
1019	if (!pipe_empty(p_head, p_tail)) {
1020		struct pipe_buffer *buf;
1021		unsigned int i_head = i->head;
1022		size_t off = i->iov_offset;
1023
 
1024		if (off) {
1025			buf = &pipe->bufs[i_head & p_mask];
1026			buf->len = off - buf->offset;
1027			i_head++;
1028		}
1029		while (p_head != i_head) {
1030			p_head--;
1031			pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]);
 
1032		}
1033
1034		pipe->head = p_head;
1035	}
1036}
1037
1038static void pipe_advance(struct iov_iter *i, size_t size)
1039{
1040	struct pipe_inode_info *pipe = i->pipe;
1041	if (unlikely(i->count < size))
1042		size = i->count;
1043	if (size) {
1044		struct pipe_buffer *buf;
1045		unsigned int p_mask = pipe->ring_size - 1;
1046		unsigned int i_head = i->head;
1047		size_t off = i->iov_offset, left = size;
1048
1049		if (off) /* make it relative to the beginning of buffer */
1050			left += off - pipe->bufs[i_head & p_mask].offset;
1051		while (1) {
1052			buf = &pipe->bufs[i_head & p_mask];
1053			if (left <= buf->len)
1054				break;
1055			left -= buf->len;
1056			i_head++;
1057		}
1058		i->head = i_head;
1059		i->iov_offset = buf->offset + left;
1060	}
1061	i->count -= size;
1062	/* ... and discard everything past that point */
1063	pipe_truncate(i);
1064}
1065
1066void iov_iter_advance(struct iov_iter *i, size_t size)
1067{
1068	if (unlikely(iov_iter_is_pipe(i))) {
1069		pipe_advance(i, size);
1070		return;
1071	}
1072	if (unlikely(iov_iter_is_discard(i))) {
1073		i->count -= size;
1074		return;
1075	}
1076	iterate_and_advance(i, size, v, 0, 0, 0)
1077}
1078EXPORT_SYMBOL(iov_iter_advance);
1079
1080void iov_iter_revert(struct iov_iter *i, size_t unroll)
1081{
1082	if (!unroll)
1083		return;
1084	if (WARN_ON(unroll > MAX_RW_COUNT))
1085		return;
1086	i->count += unroll;
1087	if (unlikely(iov_iter_is_pipe(i))) {
1088		struct pipe_inode_info *pipe = i->pipe;
1089		unsigned int p_mask = pipe->ring_size - 1;
1090		unsigned int i_head = i->head;
1091		size_t off = i->iov_offset;
1092		while (1) {
1093			struct pipe_buffer *b = &pipe->bufs[i_head & p_mask];
1094			size_t n = off - b->offset;
1095			if (unroll < n) {
1096				off -= unroll;
1097				break;
1098			}
1099			unroll -= n;
1100			if (!unroll && i_head == i->start_head) {
1101				off = 0;
1102				break;
1103			}
1104			i_head--;
1105			b = &pipe->bufs[i_head & p_mask];
1106			off = b->offset + b->len;
1107		}
1108		i->iov_offset = off;
1109		i->head = i_head;
1110		pipe_truncate(i);
1111		return;
1112	}
1113	if (unlikely(iov_iter_is_discard(i)))
1114		return;
1115	if (unroll <= i->iov_offset) {
1116		i->iov_offset -= unroll;
1117		return;
1118	}
1119	unroll -= i->iov_offset;
1120	if (iov_iter_is_bvec(i)) {
1121		const struct bio_vec *bvec = i->bvec;
1122		while (1) {
1123			size_t n = (--bvec)->bv_len;
1124			i->nr_segs++;
1125			if (unroll <= n) {
1126				i->bvec = bvec;
1127				i->iov_offset = n - unroll;
1128				return;
1129			}
1130			unroll -= n;
1131		}
1132	} else { /* same logics for iovec and kvec */
1133		const struct iovec *iov = i->iov;
1134		while (1) {
1135			size_t n = (--iov)->iov_len;
1136			i->nr_segs++;
1137			if (unroll <= n) {
1138				i->iov = iov;
1139				i->iov_offset = n - unroll;
1140				return;
1141			}
1142			unroll -= n;
1143		}
1144	}
1145}
1146EXPORT_SYMBOL(iov_iter_revert);
1147
1148/*
1149 * Return the count of just the current iov_iter segment.
1150 */
1151size_t iov_iter_single_seg_count(const struct iov_iter *i)
1152{
1153	if (unlikely(iov_iter_is_pipe(i)))
1154		return i->count;	// it is a silly place, anyway
1155	if (i->nr_segs == 1)
1156		return i->count;
1157	if (unlikely(iov_iter_is_discard(i)))
1158		return i->count;
1159	else if (iov_iter_is_bvec(i))
1160		return min(i->count, i->bvec->bv_len - i->iov_offset);
1161	else
1162		return min(i->count, i->iov->iov_len - i->iov_offset);
1163}
1164EXPORT_SYMBOL(iov_iter_single_seg_count);
1165
1166void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1167			const struct kvec *kvec, unsigned long nr_segs,
1168			size_t count)
1169{
1170	WARN_ON(direction & ~(READ | WRITE));
1171	i->type = ITER_KVEC | (direction & (READ | WRITE));
1172	i->kvec = kvec;
1173	i->nr_segs = nr_segs;
1174	i->iov_offset = 0;
1175	i->count = count;
1176}
1177EXPORT_SYMBOL(iov_iter_kvec);
1178
1179void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1180			const struct bio_vec *bvec, unsigned long nr_segs,
1181			size_t count)
1182{
1183	WARN_ON(direction & ~(READ | WRITE));
1184	i->type = ITER_BVEC | (direction & (READ | WRITE));
1185	i->bvec = bvec;
1186	i->nr_segs = nr_segs;
1187	i->iov_offset = 0;
1188	i->count = count;
1189}
1190EXPORT_SYMBOL(iov_iter_bvec);
1191
1192void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1193			struct pipe_inode_info *pipe,
1194			size_t count)
1195{
1196	BUG_ON(direction != READ);
1197	WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1198	i->type = ITER_PIPE | READ;
1199	i->pipe = pipe;
1200	i->head = pipe->head;
1201	i->iov_offset = 0;
1202	i->count = count;
1203	i->start_head = i->head;
1204}
1205EXPORT_SYMBOL(iov_iter_pipe);
1206
1207/**
1208 * iov_iter_discard - Initialise an I/O iterator that discards data
1209 * @i: The iterator to initialise.
1210 * @direction: The direction of the transfer.
1211 * @count: The size of the I/O buffer in bytes.
1212 *
1213 * Set up an I/O iterator that just discards everything that's written to it.
1214 * It's only available as a READ iterator.
1215 */
1216void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1217{
1218	BUG_ON(direction != READ);
1219	i->type = ITER_DISCARD | READ;
1220	i->count = count;
1221	i->iov_offset = 0;
1222}
1223EXPORT_SYMBOL(iov_iter_discard);
1224
1225unsigned long iov_iter_alignment(const struct iov_iter *i)
1226{
1227	unsigned long res = 0;
1228	size_t size = i->count;
1229
1230	if (unlikely(iov_iter_is_pipe(i))) {
1231		unsigned int p_mask = i->pipe->ring_size - 1;
1232
1233		if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
1234			return size | i->iov_offset;
1235		return size;
1236	}
1237	iterate_all_kinds(i, size, v,
1238		(res |= (unsigned long)v.iov_base | v.iov_len, 0),
1239		res |= v.bv_offset | v.bv_len,
1240		res |= (unsigned long)v.iov_base | v.iov_len
1241	)
1242	return res;
1243}
1244EXPORT_SYMBOL(iov_iter_alignment);
1245
1246unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1247{
1248	unsigned long res = 0;
1249	size_t size = i->count;
1250
1251	if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1252		WARN_ON(1);
1253		return ~0U;
1254	}
1255
1256	iterate_all_kinds(i, size, v,
1257		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
1258			(size != v.iov_len ? size : 0), 0),
1259		(res |= (!res ? 0 : (unsigned long)v.bv_offset) |
1260			(size != v.bv_len ? size : 0)),
1261		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
1262			(size != v.iov_len ? size : 0))
1263		);
1264	return res;
1265}
1266EXPORT_SYMBOL(iov_iter_gap_alignment);
1267
1268static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1269				size_t maxsize,
1270				struct page **pages,
1271				int iter_head,
1272				size_t *start)
1273{
1274	struct pipe_inode_info *pipe = i->pipe;
1275	unsigned int p_mask = pipe->ring_size - 1;
1276	ssize_t n = push_pipe(i, maxsize, &iter_head, start);
1277	if (!n)
1278		return -EFAULT;
1279
1280	maxsize = n;
1281	n += *start;
1282	while (n > 0) {
1283		get_page(*pages++ = pipe->bufs[iter_head & p_mask].page);
1284		iter_head++;
1285		n -= PAGE_SIZE;
1286	}
1287
1288	return maxsize;
1289}
1290
1291static ssize_t pipe_get_pages(struct iov_iter *i,
1292		   struct page **pages, size_t maxsize, unsigned maxpages,
1293		   size_t *start)
1294{
1295	unsigned int iter_head, npages;
1296	size_t capacity;
 
1297
1298	if (!maxsize)
1299		return 0;
1300
1301	if (!sanity(i))
1302		return -EFAULT;
1303
1304	data_start(i, &iter_head, start);
1305	/* Amount of free space: some of this one + all after this one */
1306	npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1307	capacity = min(npages, maxpages) * PAGE_SIZE - *start;
1308
1309	return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start);
1310}
1311
1312ssize_t iov_iter_get_pages(struct iov_iter *i,
1313		   struct page **pages, size_t maxsize, unsigned maxpages,
1314		   size_t *start)
1315{
1316	if (maxsize > i->count)
1317		maxsize = i->count;
1318
1319	if (unlikely(iov_iter_is_pipe(i)))
1320		return pipe_get_pages(i, pages, maxsize, maxpages, start);
1321	if (unlikely(iov_iter_is_discard(i)))
1322		return -EFAULT;
1323
1324	iterate_all_kinds(i, maxsize, v, ({
1325		unsigned long addr = (unsigned long)v.iov_base;
1326		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1327		int n;
1328		int res;
1329
1330		if (len > maxpages * PAGE_SIZE)
1331			len = maxpages * PAGE_SIZE;
1332		addr &= ~(PAGE_SIZE - 1);
1333		n = DIV_ROUND_UP(len, PAGE_SIZE);
1334		res = get_user_pages_fast(addr, n,
1335				iov_iter_rw(i) != WRITE ?  FOLL_WRITE : 0,
1336				pages);
1337		if (unlikely(res < 0))
1338			return res;
1339		return (res == n ? len : res * PAGE_SIZE) - *start;
1340	0;}),({
1341		/* can't be more than PAGE_SIZE */
1342		*start = v.bv_offset;
1343		get_page(*pages = v.bv_page);
1344		return v.bv_len;
1345	}),({
1346		return -EFAULT;
1347	})
1348	)
1349	return 0;
1350}
1351EXPORT_SYMBOL(iov_iter_get_pages);
1352
1353static struct page **get_pages_array(size_t n)
1354{
1355	return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL);
 
 
 
1356}
1357
1358static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1359		   struct page ***pages, size_t maxsize,
1360		   size_t *start)
1361{
1362	struct page **p;
1363	unsigned int iter_head, npages;
1364	ssize_t n;
 
1365
1366	if (!maxsize)
1367		return 0;
1368
1369	if (!sanity(i))
1370		return -EFAULT;
1371
1372	data_start(i, &iter_head, start);
1373	/* Amount of free space: some of this one + all after this one */
1374	npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe);
1375	n = npages * PAGE_SIZE - *start;
1376	if (maxsize > n)
1377		maxsize = n;
1378	else
1379		npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1380	p = get_pages_array(npages);
1381	if (!p)
1382		return -ENOMEM;
1383	n = __pipe_get_pages(i, maxsize, p, iter_head, start);
1384	if (n > 0)
1385		*pages = p;
1386	else
1387		kvfree(p);
1388	return n;
1389}
1390
1391ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1392		   struct page ***pages, size_t maxsize,
1393		   size_t *start)
1394{
1395	struct page **p;
1396
1397	if (maxsize > i->count)
1398		maxsize = i->count;
1399
1400	if (unlikely(iov_iter_is_pipe(i)))
1401		return pipe_get_pages_alloc(i, pages, maxsize, start);
1402	if (unlikely(iov_iter_is_discard(i)))
1403		return -EFAULT;
1404
1405	iterate_all_kinds(i, maxsize, v, ({
1406		unsigned long addr = (unsigned long)v.iov_base;
1407		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1408		int n;
1409		int res;
1410
1411		addr &= ~(PAGE_SIZE - 1);
1412		n = DIV_ROUND_UP(len, PAGE_SIZE);
1413		p = get_pages_array(n);
1414		if (!p)
1415			return -ENOMEM;
1416		res = get_user_pages_fast(addr, n,
1417				iov_iter_rw(i) != WRITE ?  FOLL_WRITE : 0, p);
1418		if (unlikely(res < 0)) {
1419			kvfree(p);
1420			return res;
1421		}
1422		*pages = p;
1423		return (res == n ? len : res * PAGE_SIZE) - *start;
1424	0;}),({
1425		/* can't be more than PAGE_SIZE */
1426		*start = v.bv_offset;
1427		*pages = p = get_pages_array(1);
1428		if (!p)
1429			return -ENOMEM;
1430		get_page(*p = v.bv_page);
1431		return v.bv_len;
1432	}),({
1433		return -EFAULT;
1434	})
1435	)
1436	return 0;
1437}
1438EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1439
1440size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1441			       struct iov_iter *i)
1442{
1443	char *to = addr;
1444	__wsum sum, next;
1445	size_t off = 0;
1446	sum = *csum;
1447	if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1448		WARN_ON(1);
1449		return 0;
1450	}
1451	iterate_and_advance(i, bytes, v, ({
1452		int err = 0;
1453		next = csum_and_copy_from_user(v.iov_base,
1454					       (to += v.iov_len) - v.iov_len,
1455					       v.iov_len, 0, &err);
1456		if (!err) {
1457			sum = csum_block_add(sum, next, off);
1458			off += v.iov_len;
1459		}
1460		err ? v.iov_len : 0;
1461	}), ({
1462		char *p = kmap_atomic(v.bv_page);
1463		sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1464				      p + v.bv_offset, v.bv_len,
1465				      sum, off);
1466		kunmap_atomic(p);
 
1467		off += v.bv_len;
1468	}),({
1469		sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1470				      v.iov_base, v.iov_len,
1471				      sum, off);
 
1472		off += v.iov_len;
1473	})
1474	)
1475	*csum = sum;
1476	return bytes;
1477}
1478EXPORT_SYMBOL(csum_and_copy_from_iter);
1479
1480bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1481			       struct iov_iter *i)
1482{
1483	char *to = addr;
1484	__wsum sum, next;
1485	size_t off = 0;
1486	sum = *csum;
1487	if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1488		WARN_ON(1);
1489		return false;
1490	}
1491	if (unlikely(i->count < bytes))
1492		return false;
1493	iterate_all_kinds(i, bytes, v, ({
1494		int err = 0;
1495		next = csum_and_copy_from_user(v.iov_base,
1496					       (to += v.iov_len) - v.iov_len,
1497					       v.iov_len, 0, &err);
1498		if (err)
1499			return false;
1500		sum = csum_block_add(sum, next, off);
1501		off += v.iov_len;
1502		0;
1503	}), ({
1504		char *p = kmap_atomic(v.bv_page);
1505		sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1506				      p + v.bv_offset, v.bv_len,
1507				      sum, off);
1508		kunmap_atomic(p);
 
1509		off += v.bv_len;
1510	}),({
1511		sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1512				      v.iov_base, v.iov_len,
1513				      sum, off);
 
1514		off += v.iov_len;
1515	})
1516	)
1517	*csum = sum;
1518	iov_iter_advance(i, bytes);
1519	return true;
1520}
1521EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1522
1523size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
1524			     struct iov_iter *i)
1525{
1526	const char *from = addr;
1527	__wsum *csum = csump;
1528	__wsum sum, next;
1529	size_t off = 0;
1530
1531	if (unlikely(iov_iter_is_pipe(i)))
1532		return csum_and_copy_to_pipe_iter(addr, bytes, csum, i);
1533
1534	sum = *csum;
1535	if (unlikely(iov_iter_is_discard(i))) {
1536		WARN_ON(1);	/* for now */
1537		return 0;
1538	}
1539	iterate_and_advance(i, bytes, v, ({
1540		int err = 0;
1541		next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1542					     v.iov_base,
1543					     v.iov_len, 0, &err);
1544		if (!err) {
1545			sum = csum_block_add(sum, next, off);
1546			off += v.iov_len;
1547		}
1548		err ? v.iov_len : 0;
1549	}), ({
1550		char *p = kmap_atomic(v.bv_page);
1551		sum = csum_and_memcpy(p + v.bv_offset,
1552				      (from += v.bv_len) - v.bv_len,
1553				      v.bv_len, sum, off);
1554		kunmap_atomic(p);
 
1555		off += v.bv_len;
1556	}),({
1557		sum = csum_and_memcpy(v.iov_base,
1558				     (from += v.iov_len) - v.iov_len,
1559				     v.iov_len, sum, off);
 
1560		off += v.iov_len;
1561	})
1562	)
1563	*csum = sum;
1564	return bytes;
1565}
1566EXPORT_SYMBOL(csum_and_copy_to_iter);
1567
1568size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1569		struct iov_iter *i)
1570{
1571#ifdef CONFIG_CRYPTO_HASH
1572	struct ahash_request *hash = hashp;
1573	struct scatterlist sg;
1574	size_t copied;
1575
1576	copied = copy_to_iter(addr, bytes, i);
1577	sg_init_one(&sg, addr, copied);
1578	ahash_request_set_crypt(hash, &sg, NULL, copied);
1579	crypto_ahash_update(hash);
1580	return copied;
1581#else
1582	return 0;
1583#endif
1584}
1585EXPORT_SYMBOL(hash_and_copy_to_iter);
1586
1587int iov_iter_npages(const struct iov_iter *i, int maxpages)
1588{
1589	size_t size = i->count;
1590	int npages = 0;
1591
1592	if (!size)
1593		return 0;
1594	if (unlikely(iov_iter_is_discard(i)))
1595		return 0;
1596
1597	if (unlikely(iov_iter_is_pipe(i))) {
1598		struct pipe_inode_info *pipe = i->pipe;
1599		unsigned int iter_head;
1600		size_t off;
 
1601
1602		if (!sanity(i))
1603			return 0;
1604
1605		data_start(i, &iter_head, &off);
1606		/* some of this one + all after this one */
1607		npages = pipe_space_for_user(iter_head, pipe->tail, pipe);
1608		if (npages >= maxpages)
1609			return maxpages;
1610	} else iterate_all_kinds(i, size, v, ({
1611		unsigned long p = (unsigned long)v.iov_base;
1612		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1613			- p / PAGE_SIZE;
1614		if (npages >= maxpages)
1615			return maxpages;
1616	0;}),({
1617		npages++;
1618		if (npages >= maxpages)
1619			return maxpages;
1620	}),({
1621		unsigned long p = (unsigned long)v.iov_base;
1622		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1623			- p / PAGE_SIZE;
1624		if (npages >= maxpages)
1625			return maxpages;
1626	})
1627	)
1628	return npages;
1629}
1630EXPORT_SYMBOL(iov_iter_npages);
1631
1632const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1633{
1634	*new = *old;
1635	if (unlikely(iov_iter_is_pipe(new))) {
1636		WARN_ON(1);
1637		return NULL;
1638	}
1639	if (unlikely(iov_iter_is_discard(new)))
1640		return NULL;
1641	if (iov_iter_is_bvec(new))
1642		return new->bvec = kmemdup(new->bvec,
1643				    new->nr_segs * sizeof(struct bio_vec),
1644				    flags);
1645	else
1646		/* iovec and kvec have identical layout */
1647		return new->iov = kmemdup(new->iov,
1648				   new->nr_segs * sizeof(struct iovec),
1649				   flags);
1650}
1651EXPORT_SYMBOL(dup_iter);
1652
1653/**
1654 * import_iovec() - Copy an array of &struct iovec from userspace
1655 *     into the kernel, check that it is valid, and initialize a new
1656 *     &struct iov_iter iterator to access it.
1657 *
1658 * @type: One of %READ or %WRITE.
1659 * @uvector: Pointer to the userspace array.
1660 * @nr_segs: Number of elements in userspace array.
1661 * @fast_segs: Number of elements in @iov.
1662 * @iov: (input and output parameter) Pointer to pointer to (usually small
1663 *     on-stack) kernel array.
1664 * @i: Pointer to iterator that will be initialized on success.
1665 *
1666 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1667 * then this function places %NULL in *@iov on return. Otherwise, a new
1668 * array will be allocated and the result placed in *@iov. This means that
1669 * the caller may call kfree() on *@iov regardless of whether the small
1670 * on-stack array was used or not (and regardless of whether this function
1671 * returns an error or not).
1672 *
1673 * Return: Negative error code on error, bytes imported on success
1674 */
1675ssize_t import_iovec(int type, const struct iovec __user * uvector,
1676		 unsigned nr_segs, unsigned fast_segs,
1677		 struct iovec **iov, struct iov_iter *i)
1678{
1679	ssize_t n;
1680	struct iovec *p;
1681	n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1682				  *iov, &p);
1683	if (n < 0) {
1684		if (p != *iov)
1685			kfree(p);
1686		*iov = NULL;
1687		return n;
1688	}
1689	iov_iter_init(i, type, p, nr_segs, n);
1690	*iov = p == *iov ? NULL : p;
1691	return n;
1692}
1693EXPORT_SYMBOL(import_iovec);
1694
1695#ifdef CONFIG_COMPAT
1696#include <linux/compat.h>
1697
1698ssize_t compat_import_iovec(int type,
1699		const struct compat_iovec __user * uvector,
1700		unsigned nr_segs, unsigned fast_segs,
1701		struct iovec **iov, struct iov_iter *i)
1702{
1703	ssize_t n;
1704	struct iovec *p;
1705	n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1706				  *iov, &p);
1707	if (n < 0) {
1708		if (p != *iov)
1709			kfree(p);
1710		*iov = NULL;
1711		return n;
1712	}
1713	iov_iter_init(i, type, p, nr_segs, n);
1714	*iov = p == *iov ? NULL : p;
1715	return n;
1716}
1717EXPORT_SYMBOL(compat_import_iovec);
1718#endif
1719
1720int import_single_range(int rw, void __user *buf, size_t len,
1721		 struct iovec *iov, struct iov_iter *i)
1722{
1723	if (len > MAX_RW_COUNT)
1724		len = MAX_RW_COUNT;
1725	if (unlikely(!access_ok(buf, len)))
1726		return -EFAULT;
1727
1728	iov->iov_base = buf;
1729	iov->iov_len = len;
1730	iov_iter_init(i, rw, iov, 1, len);
1731	return 0;
1732}
1733EXPORT_SYMBOL(import_single_range);
1734
1735int iov_iter_for_each_range(struct iov_iter *i, size_t bytes,
1736			    int (*f)(struct kvec *vec, void *context),
1737			    void *context)
1738{
1739	struct kvec w;
1740	int err = -EINVAL;
1741	if (!bytes)
1742		return 0;
1743
1744	iterate_all_kinds(i, bytes, v, -EINVAL, ({
1745		w.iov_base = kmap(v.bv_page) + v.bv_offset;
1746		w.iov_len = v.bv_len;
1747		err = f(&w, context);
1748		kunmap(v.bv_page);
1749		err;}), ({
1750		w = v;
1751		err = f(&w, context);})
1752	)
1753	return err;
1754}
1755EXPORT_SYMBOL(iov_iter_for_each_range);