Linux Audio

Check our new training course

Loading...
v4.10.11
   1#include <linux/export.h>
   2#include <linux/bvec.h>
   3#include <linux/uio.h>
   4#include <linux/pagemap.h>
   5#include <linux/slab.h>
   6#include <linux/vmalloc.h>
   7#include <linux/splice.h>
   8#include <net/checksum.h>
   9
  10#define PIPE_PARANOIA /* for now */
  11
  12#define iterate_iovec(i, n, __v, __p, skip, STEP) {	\
  13	size_t left;					\
  14	size_t wanted = n;				\
  15	__p = i->iov;					\
  16	__v.iov_len = min(n, __p->iov_len - skip);	\
  17	if (likely(__v.iov_len)) {			\
  18		__v.iov_base = __p->iov_base + skip;	\
  19		left = (STEP);				\
  20		__v.iov_len -= left;			\
  21		skip += __v.iov_len;			\
  22		n -= __v.iov_len;			\
  23	} else {					\
  24		left = 0;				\
  25	}						\
  26	while (unlikely(!left && n)) {			\
  27		__p++;					\
  28		__v.iov_len = min(n, __p->iov_len);	\
  29		if (unlikely(!__v.iov_len))		\
  30			continue;			\
  31		__v.iov_base = __p->iov_base;		\
  32		left = (STEP);				\
  33		__v.iov_len -= left;			\
  34		skip = __v.iov_len;			\
  35		n -= __v.iov_len;			\
  36	}						\
  37	n = wanted - n;					\
  38}
  39
  40#define iterate_kvec(i, n, __v, __p, skip, STEP) {	\
  41	size_t wanted = n;				\
  42	__p = i->kvec;					\
  43	__v.iov_len = min(n, __p->iov_len - skip);	\
  44	if (likely(__v.iov_len)) {			\
  45		__v.iov_base = __p->iov_base + skip;	\
  46		(void)(STEP);				\
  47		skip += __v.iov_len;			\
  48		n -= __v.iov_len;			\
  49	}						\
  50	while (unlikely(n)) {				\
  51		__p++;					\
  52		__v.iov_len = min(n, __p->iov_len);	\
  53		if (unlikely(!__v.iov_len))		\
  54			continue;			\
  55		__v.iov_base = __p->iov_base;		\
  56		(void)(STEP);				\
  57		skip = __v.iov_len;			\
  58		n -= __v.iov_len;			\
  59	}						\
  60	n = wanted;					\
  61}
  62
  63#define iterate_bvec(i, n, __v, __bi, skip, STEP) {	\
  64	struct bvec_iter __start;			\
  65	__start.bi_size = n;				\
  66	__start.bi_bvec_done = skip;			\
  67	__start.bi_idx = 0;				\
  68	for_each_bvec(__v, i->bvec, __bi, __start) {	\
  69		if (!__v.bv_len)			\
 
 
 
 
 
 
 
 
  70			continue;			\
 
 
  71		(void)(STEP);				\
 
 
  72	}						\
 
  73}
  74
  75#define iterate_all_kinds(i, n, v, I, B, K) {			\
  76	if (likely(n)) {					\
  77		size_t skip = i->iov_offset;			\
  78		if (unlikely(i->type & ITER_BVEC)) {		\
  79			struct bio_vec v;			\
  80			struct bvec_iter __bi;			\
  81			iterate_bvec(i, n, v, __bi, skip, (B))	\
  82		} else if (unlikely(i->type & ITER_KVEC)) {	\
  83			const struct kvec *kvec;		\
  84			struct kvec v;				\
  85			iterate_kvec(i, n, v, kvec, skip, (K))	\
  86		} else {					\
  87			const struct iovec *iov;		\
  88			struct iovec v;				\
  89			iterate_iovec(i, n, v, iov, skip, (I))	\
  90		}						\
  91	}							\
  92}
  93
  94#define iterate_and_advance(i, n, v, I, B, K) {			\
  95	if (unlikely(i->count < n))				\
  96		n = i->count;					\
  97	if (i->count) {						\
  98		size_t skip = i->iov_offset;			\
  99		if (unlikely(i->type & ITER_BVEC)) {		\
 100			const struct bio_vec *bvec = i->bvec;	\
 101			struct bio_vec v;			\
 102			struct bvec_iter __bi;			\
 103			iterate_bvec(i, n, v, __bi, skip, (B))	\
 104			i->bvec = __bvec_iter_bvec(i->bvec, __bi);	\
 105			i->nr_segs -= i->bvec - bvec;		\
 106			skip = __bi.bi_bvec_done;		\
 107		} else if (unlikely(i->type & ITER_KVEC)) {	\
 108			const struct kvec *kvec;		\
 109			struct kvec v;				\
 110			iterate_kvec(i, n, v, kvec, skip, (K))	\
 111			if (skip == kvec->iov_len) {		\
 112				kvec++;				\
 113				skip = 0;			\
 114			}					\
 115			i->nr_segs -= kvec - i->kvec;		\
 116			i->kvec = kvec;				\
 117		} else {					\
 118			const struct iovec *iov;		\
 119			struct iovec v;				\
 120			iterate_iovec(i, n, v, iov, skip, (I))	\
 121			if (skip == iov->iov_len) {		\
 122				iov++;				\
 123				skip = 0;			\
 124			}					\
 125			i->nr_segs -= iov - i->iov;		\
 126			i->iov = iov;				\
 127		}						\
 128		i->count -= n;					\
 129		i->iov_offset = skip;				\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 130	}							\
 
 
 131}
 132
 133static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
 134			 struct iov_iter *i)
 135{
 136	size_t skip, copy, left, wanted;
 137	const struct iovec *iov;
 138	char __user *buf;
 139	void *kaddr, *from;
 140
 141	if (unlikely(bytes > i->count))
 142		bytes = i->count;
 143
 144	if (unlikely(!bytes))
 145		return 0;
 146
 147	wanted = bytes;
 148	iov = i->iov;
 149	skip = i->iov_offset;
 150	buf = iov->iov_base + skip;
 151	copy = min(bytes, iov->iov_len - skip);
 152
 153	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
 154		kaddr = kmap_atomic(page);
 155		from = kaddr + offset;
 156
 157		/* first chunk, usually the only one */
 158		left = __copy_to_user_inatomic(buf, from, copy);
 159		copy -= left;
 160		skip += copy;
 161		from += copy;
 162		bytes -= copy;
 163
 164		while (unlikely(!left && bytes)) {
 165			iov++;
 166			buf = iov->iov_base;
 167			copy = min(bytes, iov->iov_len);
 168			left = __copy_to_user_inatomic(buf, from, copy);
 169			copy -= left;
 170			skip = copy;
 171			from += copy;
 172			bytes -= copy;
 173		}
 174		if (likely(!bytes)) {
 175			kunmap_atomic(kaddr);
 176			goto done;
 177		}
 178		offset = from - kaddr;
 179		buf += copy;
 180		kunmap_atomic(kaddr);
 181		copy = min(bytes, iov->iov_len - skip);
 182	}
 183	/* Too bad - revert to non-atomic kmap */
 184
 185	kaddr = kmap(page);
 186	from = kaddr + offset;
 187	left = __copy_to_user(buf, from, copy);
 188	copy -= left;
 189	skip += copy;
 190	from += copy;
 191	bytes -= copy;
 192	while (unlikely(!left && bytes)) {
 193		iov++;
 194		buf = iov->iov_base;
 195		copy = min(bytes, iov->iov_len);
 196		left = __copy_to_user(buf, from, copy);
 197		copy -= left;
 198		skip = copy;
 199		from += copy;
 200		bytes -= copy;
 201	}
 202	kunmap(page);
 203
 204done:
 205	if (skip == iov->iov_len) {
 206		iov++;
 207		skip = 0;
 208	}
 209	i->count -= wanted - bytes;
 210	i->nr_segs -= iov - i->iov;
 211	i->iov = iov;
 212	i->iov_offset = skip;
 213	return wanted - bytes;
 214}
 215
 216static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
 217			 struct iov_iter *i)
 218{
 219	size_t skip, copy, left, wanted;
 220	const struct iovec *iov;
 221	char __user *buf;
 222	void *kaddr, *to;
 223
 224	if (unlikely(bytes > i->count))
 225		bytes = i->count;
 226
 227	if (unlikely(!bytes))
 228		return 0;
 229
 230	wanted = bytes;
 231	iov = i->iov;
 232	skip = i->iov_offset;
 233	buf = iov->iov_base + skip;
 234	copy = min(bytes, iov->iov_len - skip);
 235
 236	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
 237		kaddr = kmap_atomic(page);
 238		to = kaddr + offset;
 239
 240		/* first chunk, usually the only one */
 241		left = __copy_from_user_inatomic(to, buf, copy);
 242		copy -= left;
 243		skip += copy;
 244		to += copy;
 245		bytes -= copy;
 246
 247		while (unlikely(!left && bytes)) {
 248			iov++;
 249			buf = iov->iov_base;
 250			copy = min(bytes, iov->iov_len);
 251			left = __copy_from_user_inatomic(to, buf, copy);
 252			copy -= left;
 253			skip = copy;
 254			to += copy;
 255			bytes -= copy;
 256		}
 257		if (likely(!bytes)) {
 258			kunmap_atomic(kaddr);
 259			goto done;
 260		}
 261		offset = to - kaddr;
 262		buf += copy;
 263		kunmap_atomic(kaddr);
 264		copy = min(bytes, iov->iov_len - skip);
 265	}
 266	/* Too bad - revert to non-atomic kmap */
 267
 268	kaddr = kmap(page);
 269	to = kaddr + offset;
 270	left = __copy_from_user(to, buf, copy);
 271	copy -= left;
 272	skip += copy;
 273	to += copy;
 274	bytes -= copy;
 275	while (unlikely(!left && bytes)) {
 276		iov++;
 277		buf = iov->iov_base;
 278		copy = min(bytes, iov->iov_len);
 279		left = __copy_from_user(to, buf, copy);
 280		copy -= left;
 281		skip = copy;
 282		to += copy;
 283		bytes -= copy;
 284	}
 285	kunmap(page);
 286
 287done:
 288	if (skip == iov->iov_len) {
 289		iov++;
 290		skip = 0;
 291	}
 292	i->count -= wanted - bytes;
 293	i->nr_segs -= iov - i->iov;
 294	i->iov = iov;
 295	i->iov_offset = skip;
 296	return wanted - bytes;
 297}
 298
 299#ifdef PIPE_PARANOIA
 300static bool sanity(const struct iov_iter *i)
 301{
 302	struct pipe_inode_info *pipe = i->pipe;
 303	int idx = i->idx;
 304	int next = pipe->curbuf + pipe->nrbufs;
 305	if (i->iov_offset) {
 306		struct pipe_buffer *p;
 307		if (unlikely(!pipe->nrbufs))
 308			goto Bad;	// pipe must be non-empty
 309		if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
 310			goto Bad;	// must be at the last buffer...
 311
 312		p = &pipe->bufs[idx];
 313		if (unlikely(p->offset + p->len != i->iov_offset))
 314			goto Bad;	// ... at the end of segment
 315	} else {
 316		if (idx != (next & (pipe->buffers - 1)))
 317			goto Bad;	// must be right after the last buffer
 318	}
 319	return true;
 320Bad:
 321	printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
 322	printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
 323			pipe->curbuf, pipe->nrbufs, pipe->buffers);
 324	for (idx = 0; idx < pipe->buffers; idx++)
 325		printk(KERN_ERR "[%p %p %d %d]\n",
 326			pipe->bufs[idx].ops,
 327			pipe->bufs[idx].page,
 328			pipe->bufs[idx].offset,
 329			pipe->bufs[idx].len);
 330	WARN_ON(1);
 331	return false;
 332}
 333#else
 334#define sanity(i) true
 335#endif
 336
 337static inline int next_idx(int idx, struct pipe_inode_info *pipe)
 338{
 339	return (idx + 1) & (pipe->buffers - 1);
 340}
 341
 342static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
 343			 struct iov_iter *i)
 344{
 345	struct pipe_inode_info *pipe = i->pipe;
 346	struct pipe_buffer *buf;
 347	size_t off;
 348	int idx;
 349
 350	if (unlikely(bytes > i->count))
 351		bytes = i->count;
 352
 353	if (unlikely(!bytes))
 354		return 0;
 355
 356	if (!sanity(i))
 357		return 0;
 358
 359	off = i->iov_offset;
 360	idx = i->idx;
 361	buf = &pipe->bufs[idx];
 362	if (off) {
 363		if (offset == off && buf->page == page) {
 364			/* merge with the last one */
 365			buf->len += bytes;
 366			i->iov_offset += bytes;
 367			goto out;
 368		}
 369		idx = next_idx(idx, pipe);
 370		buf = &pipe->bufs[idx];
 371	}
 372	if (idx == pipe->curbuf && pipe->nrbufs)
 373		return 0;
 374	pipe->nrbufs++;
 375	buf->ops = &page_cache_pipe_buf_ops;
 376	get_page(buf->page = page);
 377	buf->offset = offset;
 378	buf->len = bytes;
 379	i->iov_offset = offset + bytes;
 380	i->idx = idx;
 381out:
 382	i->count -= bytes;
 383	return bytes;
 384}
 
 385
 386/*
 387 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
 388 * bytes.  For each iovec, fault in each page that constitutes the iovec.
 389 *
 390 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
 391 * because it is an invalid address).
 392 */
 393int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
 394{
 395	size_t skip = i->iov_offset;
 396	const struct iovec *iov;
 397	int err;
 398	struct iovec v;
 399
 400	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
 401		iterate_iovec(i, bytes, v, iov, skip, ({
 402			err = fault_in_pages_readable(v.iov_base, v.iov_len);
 
 403			if (unlikely(err))
 404			return err;
 405		0;}))
 406	}
 407	return 0;
 408}
 409EXPORT_SYMBOL(iov_iter_fault_in_readable);
 410
 411void iov_iter_init(struct iov_iter *i, int direction,
 412			const struct iovec *iov, unsigned long nr_segs,
 413			size_t count)
 414{
 415	/* It will get better.  Eventually... */
 416	if (segment_eq(get_fs(), KERNEL_DS)) {
 417		direction |= ITER_KVEC;
 418		i->type = direction;
 419		i->kvec = (struct kvec *)iov;
 420	} else {
 421		i->type = direction;
 422		i->iov = iov;
 423	}
 424	i->nr_segs = nr_segs;
 425	i->iov_offset = 0;
 426	i->count = count;
 427}
 428EXPORT_SYMBOL(iov_iter_init);
 429
 430static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
 431{
 432	char *from = kmap_atomic(page);
 433	memcpy(to, from + offset, len);
 434	kunmap_atomic(from);
 435}
 436
 437static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
 438{
 439	char *to = kmap_atomic(page);
 440	memcpy(to + offset, from, len);
 441	kunmap_atomic(to);
 442}
 443
 444static void memzero_page(struct page *page, size_t offset, size_t len)
 445{
 446	char *addr = kmap_atomic(page);
 447	memset(addr + offset, 0, len);
 448	kunmap_atomic(addr);
 449}
 450
 451static inline bool allocated(struct pipe_buffer *buf)
 452{
 453	return buf->ops == &default_pipe_buf_ops;
 454}
 455
 456static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
 457{
 458	size_t off = i->iov_offset;
 459	int idx = i->idx;
 460	if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
 461		idx = next_idx(idx, i->pipe);
 462		off = 0;
 463	}
 464	*idxp = idx;
 465	*offp = off;
 466}
 467
 468static size_t push_pipe(struct iov_iter *i, size_t size,
 469			int *idxp, size_t *offp)
 470{
 471	struct pipe_inode_info *pipe = i->pipe;
 472	size_t off;
 473	int idx;
 474	ssize_t left;
 475
 476	if (unlikely(size > i->count))
 477		size = i->count;
 478	if (unlikely(!size))
 479		return 0;
 480
 481	left = size;
 482	data_start(i, &idx, &off);
 483	*idxp = idx;
 484	*offp = off;
 485	if (off) {
 486		left -= PAGE_SIZE - off;
 487		if (left <= 0) {
 488			pipe->bufs[idx].len += size;
 489			return size;
 490		}
 491		pipe->bufs[idx].len = PAGE_SIZE;
 492		idx = next_idx(idx, pipe);
 493	}
 494	while (idx != pipe->curbuf || !pipe->nrbufs) {
 495		struct page *page = alloc_page(GFP_USER);
 496		if (!page)
 497			break;
 498		pipe->nrbufs++;
 499		pipe->bufs[idx].ops = &default_pipe_buf_ops;
 500		pipe->bufs[idx].page = page;
 501		pipe->bufs[idx].offset = 0;
 502		if (left <= PAGE_SIZE) {
 503			pipe->bufs[idx].len = left;
 504			return size;
 505		}
 506		pipe->bufs[idx].len = PAGE_SIZE;
 507		left -= PAGE_SIZE;
 508		idx = next_idx(idx, pipe);
 509	}
 510	return size - left;
 511}
 512
 513static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
 514				struct iov_iter *i)
 515{
 516	struct pipe_inode_info *pipe = i->pipe;
 517	size_t n, off;
 518	int idx;
 519
 520	if (!sanity(i))
 521		return 0;
 522
 523	bytes = n = push_pipe(i, bytes, &idx, &off);
 524	if (unlikely(!n))
 525		return 0;
 526	for ( ; n; idx = next_idx(idx, pipe), off = 0) {
 527		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
 528		memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
 529		i->idx = idx;
 530		i->iov_offset = off + chunk;
 531		n -= chunk;
 532		addr += chunk;
 533	}
 534	i->count -= bytes;
 535	return bytes;
 536}
 537
 538size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
 539{
 540	const char *from = addr;
 541	if (unlikely(i->type & ITER_PIPE))
 542		return copy_pipe_to_iter(addr, bytes, i);
 543	iterate_and_advance(i, bytes, v,
 544		__copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
 545			       v.iov_len),
 546		memcpy_to_page(v.bv_page, v.bv_offset,
 547			       (from += v.bv_len) - v.bv_len, v.bv_len),
 548		memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
 549	)
 550
 551	return bytes;
 552}
 553EXPORT_SYMBOL(copy_to_iter);
 554
 555size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
 556{
 557	char *to = addr;
 558	if (unlikely(i->type & ITER_PIPE)) {
 559		WARN_ON(1);
 
 
 560		return 0;
 561	}
 562	iterate_and_advance(i, bytes, v,
 563		__copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
 564				 v.iov_len),
 565		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
 566				 v.bv_offset, v.bv_len),
 567		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
 568	)
 569
 570	return bytes;
 571}
 572EXPORT_SYMBOL(copy_from_iter);
 573
 574bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
 575{
 576	char *to = addr;
 577	if (unlikely(i->type & ITER_PIPE)) {
 578		WARN_ON(1);
 579		return false;
 580	}
 581	if (unlikely(i->count < bytes))
 582		return false;
 583
 584	iterate_all_kinds(i, bytes, v, ({
 585		if (__copy_from_user((to += v.iov_len) - v.iov_len,
 586				      v.iov_base, v.iov_len))
 587			return false;
 588		0;}),
 589		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
 590				 v.bv_offset, v.bv_len),
 591		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
 592	)
 593
 594	iov_iter_advance(i, bytes);
 595	return true;
 596}
 597EXPORT_SYMBOL(copy_from_iter_full);
 598
 599size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
 600{
 601	char *to = addr;
 602	if (unlikely(i->type & ITER_PIPE)) {
 603		WARN_ON(1);
 
 
 604		return 0;
 605	}
 606	iterate_and_advance(i, bytes, v,
 607		__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
 608					 v.iov_base, v.iov_len),
 609		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
 610				 v.bv_offset, v.bv_len),
 611		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
 612	)
 613
 614	return bytes;
 615}
 616EXPORT_SYMBOL(copy_from_iter_nocache);
 617
 618bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
 619{
 620	char *to = addr;
 621	if (unlikely(i->type & ITER_PIPE)) {
 622		WARN_ON(1);
 623		return false;
 624	}
 625	if (unlikely(i->count < bytes))
 626		return false;
 627	iterate_all_kinds(i, bytes, v, ({
 628		if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
 629					     v.iov_base, v.iov_len))
 630			return false;
 631		0;}),
 632		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
 633				 v.bv_offset, v.bv_len),
 634		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
 635	)
 636
 637	iov_iter_advance(i, bytes);
 638	return true;
 639}
 640EXPORT_SYMBOL(copy_from_iter_full_nocache);
 641
 642size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
 643			 struct iov_iter *i)
 644{
 645	if (i->type & (ITER_BVEC|ITER_KVEC)) {
 646		void *kaddr = kmap_atomic(page);
 647		size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
 648		kunmap_atomic(kaddr);
 649		return wanted;
 650	} else if (likely(!(i->type & ITER_PIPE)))
 651		return copy_page_to_iter_iovec(page, offset, bytes, i);
 652	else
 653		return copy_page_to_iter_pipe(page, offset, bytes, i);
 654}
 655EXPORT_SYMBOL(copy_page_to_iter);
 656
 657size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
 658			 struct iov_iter *i)
 659{
 660	if (unlikely(i->type & ITER_PIPE)) {
 661		WARN_ON(1);
 662		return 0;
 663	}
 664	if (i->type & (ITER_BVEC|ITER_KVEC)) {
 665		void *kaddr = kmap_atomic(page);
 666		size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
 667		kunmap_atomic(kaddr);
 668		return wanted;
 669	} else
 670		return copy_page_from_iter_iovec(page, offset, bytes, i);
 671}
 672EXPORT_SYMBOL(copy_page_from_iter);
 673
 674static size_t pipe_zero(size_t bytes, struct iov_iter *i)
 675{
 676	struct pipe_inode_info *pipe = i->pipe;
 677	size_t n, off;
 678	int idx;
 679
 680	if (!sanity(i))
 681		return 0;
 682
 683	bytes = n = push_pipe(i, bytes, &idx, &off);
 684	if (unlikely(!n))
 685		return 0;
 686
 687	for ( ; n; idx = next_idx(idx, pipe), off = 0) {
 688		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
 689		memzero_page(pipe->bufs[idx].page, off, chunk);
 690		i->idx = idx;
 691		i->iov_offset = off + chunk;
 692		n -= chunk;
 693	}
 694	i->count -= bytes;
 695	return bytes;
 696}
 697
 698size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
 699{
 700	if (unlikely(i->type & ITER_PIPE))
 701		return pipe_zero(bytes, i);
 702	iterate_and_advance(i, bytes, v,
 703		__clear_user(v.iov_base, v.iov_len),
 704		memzero_page(v.bv_page, v.bv_offset, v.bv_len),
 705		memset(v.iov_base, 0, v.iov_len)
 706	)
 707
 708	return bytes;
 709}
 710EXPORT_SYMBOL(iov_iter_zero);
 711
 712size_t iov_iter_copy_from_user_atomic(struct page *page,
 713		struct iov_iter *i, unsigned long offset, size_t bytes)
 714{
 715	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
 716	if (unlikely(i->type & ITER_PIPE)) {
 717		kunmap_atomic(kaddr);
 718		WARN_ON(1);
 719		return 0;
 720	}
 721	iterate_all_kinds(i, bytes, v,
 722		__copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
 723					  v.iov_base, v.iov_len),
 724		memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
 725				 v.bv_offset, v.bv_len),
 726		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
 727	)
 728	kunmap_atomic(kaddr);
 729	return bytes;
 730}
 731EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
 732
 733static inline void pipe_truncate(struct iov_iter *i)
 734{
 735	struct pipe_inode_info *pipe = i->pipe;
 736	if (pipe->nrbufs) {
 737		size_t off = i->iov_offset;
 738		int idx = i->idx;
 739		int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
 740		if (off) {
 741			pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
 742			idx = next_idx(idx, pipe);
 743			nrbufs++;
 744		}
 745		while (pipe->nrbufs > nrbufs) {
 746			pipe_buf_release(pipe, &pipe->bufs[idx]);
 747			idx = next_idx(idx, pipe);
 748			pipe->nrbufs--;
 749		}
 750	}
 751}
 752
 753static void pipe_advance(struct iov_iter *i, size_t size)
 754{
 755	struct pipe_inode_info *pipe = i->pipe;
 756	if (unlikely(i->count < size))
 757		size = i->count;
 758	if (size) {
 759		struct pipe_buffer *buf;
 760		size_t off = i->iov_offset, left = size;
 761		int idx = i->idx;
 762		if (off) /* make it relative to the beginning of buffer */
 763			left += off - pipe->bufs[idx].offset;
 764		while (1) {
 765			buf = &pipe->bufs[idx];
 766			if (left <= buf->len)
 767				break;
 768			left -= buf->len;
 769			idx = next_idx(idx, pipe);
 770		}
 771		i->idx = idx;
 772		i->iov_offset = buf->offset + left;
 773	}
 774	i->count -= size;
 775	/* ... and discard everything past that point */
 776	pipe_truncate(i);
 777}
 778
 779void iov_iter_advance(struct iov_iter *i, size_t size)
 780{
 781	if (unlikely(i->type & ITER_PIPE)) {
 782		pipe_advance(i, size);
 783		return;
 784	}
 785	iterate_and_advance(i, size, v, 0, 0, 0)
 786}
 787EXPORT_SYMBOL(iov_iter_advance);
 788
 789/*
 790 * Return the count of just the current iov_iter segment.
 791 */
 792size_t iov_iter_single_seg_count(const struct iov_iter *i)
 793{
 794	if (unlikely(i->type & ITER_PIPE))
 795		return i->count;	// it is a silly place, anyway
 796	if (i->nr_segs == 1)
 797		return i->count;
 798	else if (i->type & ITER_BVEC)
 799		return min(i->count, i->bvec->bv_len - i->iov_offset);
 800	else
 801		return min(i->count, i->iov->iov_len - i->iov_offset);
 802}
 803EXPORT_SYMBOL(iov_iter_single_seg_count);
 804
 805void iov_iter_kvec(struct iov_iter *i, int direction,
 806			const struct kvec *kvec, unsigned long nr_segs,
 807			size_t count)
 808{
 809	BUG_ON(!(direction & ITER_KVEC));
 810	i->type = direction;
 811	i->kvec = kvec;
 812	i->nr_segs = nr_segs;
 813	i->iov_offset = 0;
 814	i->count = count;
 815}
 816EXPORT_SYMBOL(iov_iter_kvec);
 817
 818void iov_iter_bvec(struct iov_iter *i, int direction,
 819			const struct bio_vec *bvec, unsigned long nr_segs,
 820			size_t count)
 821{
 822	BUG_ON(!(direction & ITER_BVEC));
 823	i->type = direction;
 824	i->bvec = bvec;
 825	i->nr_segs = nr_segs;
 826	i->iov_offset = 0;
 827	i->count = count;
 828}
 829EXPORT_SYMBOL(iov_iter_bvec);
 830
 831void iov_iter_pipe(struct iov_iter *i, int direction,
 832			struct pipe_inode_info *pipe,
 833			size_t count)
 834{
 835	BUG_ON(direction != ITER_PIPE);
 836	WARN_ON(pipe->nrbufs == pipe->buffers);
 837	i->type = direction;
 838	i->pipe = pipe;
 839	i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
 840	i->iov_offset = 0;
 841	i->count = count;
 842}
 843EXPORT_SYMBOL(iov_iter_pipe);
 844
 845unsigned long iov_iter_alignment(const struct iov_iter *i)
 846{
 847	unsigned long res = 0;
 848	size_t size = i->count;
 849
 850	if (unlikely(i->type & ITER_PIPE)) {
 851		if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
 852			return size | i->iov_offset;
 853		return size;
 854	}
 855	iterate_all_kinds(i, size, v,
 856		(res |= (unsigned long)v.iov_base | v.iov_len, 0),
 857		res |= v.bv_offset | v.bv_len,
 858		res |= (unsigned long)v.iov_base | v.iov_len
 859	)
 860	return res;
 861}
 862EXPORT_SYMBOL(iov_iter_alignment);
 863
 864unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
 865{
 866	unsigned long res = 0;
 867	size_t size = i->count;
 868
 869	if (unlikely(i->type & ITER_PIPE)) {
 870		WARN_ON(1);
 871		return ~0U;
 872	}
 873
 874	iterate_all_kinds(i, size, v,
 875		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
 876			(size != v.iov_len ? size : 0), 0),
 877		(res |= (!res ? 0 : (unsigned long)v.bv_offset) |
 878			(size != v.bv_len ? size : 0)),
 879		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
 880			(size != v.iov_len ? size : 0))
 881		);
 882	return res;
 883}
 884EXPORT_SYMBOL(iov_iter_gap_alignment);
 885
 886static inline size_t __pipe_get_pages(struct iov_iter *i,
 887				size_t maxsize,
 888				struct page **pages,
 889				int idx,
 890				size_t *start)
 891{
 892	struct pipe_inode_info *pipe = i->pipe;
 893	ssize_t n = push_pipe(i, maxsize, &idx, start);
 894	if (!n)
 895		return -EFAULT;
 896
 897	maxsize = n;
 898	n += *start;
 899	while (n > 0) {
 900		get_page(*pages++ = pipe->bufs[idx].page);
 901		idx = next_idx(idx, pipe);
 902		n -= PAGE_SIZE;
 903	}
 904
 905	return maxsize;
 906}
 907
 908static ssize_t pipe_get_pages(struct iov_iter *i,
 909		   struct page **pages, size_t maxsize, unsigned maxpages,
 910		   size_t *start)
 911{
 912	unsigned npages;
 913	size_t capacity;
 914	int idx;
 915
 916	if (!maxsize)
 917		return 0;
 918
 919	if (!sanity(i))
 920		return -EFAULT;
 921
 922	data_start(i, &idx, start);
 923	/* some of this one + all after this one */
 924	npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
 925	capacity = min(npages,maxpages) * PAGE_SIZE - *start;
 926
 927	return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
 928}
 929
 930ssize_t iov_iter_get_pages(struct iov_iter *i,
 931		   struct page **pages, size_t maxsize, unsigned maxpages,
 932		   size_t *start)
 933{
 934	if (maxsize > i->count)
 935		maxsize = i->count;
 936
 937	if (unlikely(i->type & ITER_PIPE))
 938		return pipe_get_pages(i, pages, maxsize, maxpages, start);
 
 939	iterate_all_kinds(i, maxsize, v, ({
 940		unsigned long addr = (unsigned long)v.iov_base;
 941		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
 942		int n;
 943		int res;
 944
 945		if (len > maxpages * PAGE_SIZE)
 946			len = maxpages * PAGE_SIZE;
 947		addr &= ~(PAGE_SIZE - 1);
 948		n = DIV_ROUND_UP(len, PAGE_SIZE);
 949		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
 950		if (unlikely(res < 0))
 951			return res;
 952		return (res == n ? len : res * PAGE_SIZE) - *start;
 953	0;}),({
 954		/* can't be more than PAGE_SIZE */
 955		*start = v.bv_offset;
 956		get_page(*pages = v.bv_page);
 957		return v.bv_len;
 958	}),({
 959		return -EFAULT;
 960	})
 961	)
 962	return 0;
 963}
 964EXPORT_SYMBOL(iov_iter_get_pages);
 965
 966static struct page **get_pages_array(size_t n)
 967{
 968	struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
 969	if (!p)
 970		p = vmalloc(n * sizeof(struct page *));
 971	return p;
 972}
 973
 974static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
 975		   struct page ***pages, size_t maxsize,
 976		   size_t *start)
 977{
 978	struct page **p;
 979	size_t n;
 980	int idx;
 981	int npages;
 982
 983	if (!maxsize)
 984		return 0;
 985
 986	if (!sanity(i))
 987		return -EFAULT;
 988
 989	data_start(i, &idx, start);
 990	/* some of this one + all after this one */
 991	npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
 992	n = npages * PAGE_SIZE - *start;
 993	if (maxsize > n)
 994		maxsize = n;
 995	else
 996		npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
 997	p = get_pages_array(npages);
 998	if (!p)
 999		return -ENOMEM;
1000	n = __pipe_get_pages(i, maxsize, p, idx, start);
1001	if (n > 0)
1002		*pages = p;
1003	else
1004		kvfree(p);
1005	return n;
1006}
1007
1008ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1009		   struct page ***pages, size_t maxsize,
1010		   size_t *start)
1011{
1012	struct page **p;
1013
1014	if (maxsize > i->count)
1015		maxsize = i->count;
1016
1017	if (unlikely(i->type & ITER_PIPE))
1018		return pipe_get_pages_alloc(i, pages, maxsize, start);
 
1019	iterate_all_kinds(i, maxsize, v, ({
1020		unsigned long addr = (unsigned long)v.iov_base;
1021		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1022		int n;
1023		int res;
1024
1025		addr &= ~(PAGE_SIZE - 1);
1026		n = DIV_ROUND_UP(len, PAGE_SIZE);
1027		p = get_pages_array(n);
1028		if (!p)
1029			return -ENOMEM;
1030		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
1031		if (unlikely(res < 0)) {
1032			kvfree(p);
1033			return res;
1034		}
1035		*pages = p;
1036		return (res == n ? len : res * PAGE_SIZE) - *start;
1037	0;}),({
1038		/* can't be more than PAGE_SIZE */
1039		*start = v.bv_offset;
1040		*pages = p = get_pages_array(1);
1041		if (!p)
1042			return -ENOMEM;
1043		get_page(*p = v.bv_page);
1044		return v.bv_len;
1045	}),({
1046		return -EFAULT;
1047	})
1048	)
1049	return 0;
1050}
1051EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1052
1053size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1054			       struct iov_iter *i)
1055{
1056	char *to = addr;
1057	__wsum sum, next;
1058	size_t off = 0;
1059	sum = *csum;
1060	if (unlikely(i->type & ITER_PIPE)) {
1061		WARN_ON(1);
 
1062		return 0;
1063	}
 
1064	iterate_and_advance(i, bytes, v, ({
1065		int err = 0;
1066		next = csum_and_copy_from_user(v.iov_base,
1067					       (to += v.iov_len) - v.iov_len,
1068					       v.iov_len, 0, &err);
1069		if (!err) {
1070			sum = csum_block_add(sum, next, off);
1071			off += v.iov_len;
1072		}
1073		err ? v.iov_len : 0;
1074	}), ({
1075		char *p = kmap_atomic(v.bv_page);
1076		next = csum_partial_copy_nocheck(p + v.bv_offset,
1077						 (to += v.bv_len) - v.bv_len,
1078						 v.bv_len, 0);
1079		kunmap_atomic(p);
1080		sum = csum_block_add(sum, next, off);
1081		off += v.bv_len;
1082	}),({
1083		next = csum_partial_copy_nocheck(v.iov_base,
1084						 (to += v.iov_len) - v.iov_len,
1085						 v.iov_len, 0);
1086		sum = csum_block_add(sum, next, off);
1087		off += v.iov_len;
1088	})
1089	)
1090	*csum = sum;
1091	return bytes;
1092}
1093EXPORT_SYMBOL(csum_and_copy_from_iter);
1094
1095bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1096			       struct iov_iter *i)
1097{
1098	char *to = addr;
1099	__wsum sum, next;
1100	size_t off = 0;
1101	sum = *csum;
1102	if (unlikely(i->type & ITER_PIPE)) {
1103		WARN_ON(1);
1104		return false;
1105	}
1106	if (unlikely(i->count < bytes))
1107		return false;
1108	iterate_all_kinds(i, bytes, v, ({
1109		int err = 0;
1110		next = csum_and_copy_from_user(v.iov_base,
1111					       (to += v.iov_len) - v.iov_len,
1112					       v.iov_len, 0, &err);
1113		if (err)
1114			return false;
1115		sum = csum_block_add(sum, next, off);
1116		off += v.iov_len;
1117		0;
1118	}), ({
1119		char *p = kmap_atomic(v.bv_page);
1120		next = csum_partial_copy_nocheck(p + v.bv_offset,
1121						 (to += v.bv_len) - v.bv_len,
1122						 v.bv_len, 0);
1123		kunmap_atomic(p);
1124		sum = csum_block_add(sum, next, off);
1125		off += v.bv_len;
1126	}),({
1127		next = csum_partial_copy_nocheck(v.iov_base,
1128						 (to += v.iov_len) - v.iov_len,
1129						 v.iov_len, 0);
1130		sum = csum_block_add(sum, next, off);
1131		off += v.iov_len;
1132	})
1133	)
1134	*csum = sum;
1135	iov_iter_advance(i, bytes);
1136	return true;
1137}
1138EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1139
1140size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1141			     struct iov_iter *i)
1142{
1143	const char *from = addr;
1144	__wsum sum, next;
1145	size_t off = 0;
1146	sum = *csum;
1147	if (unlikely(i->type & ITER_PIPE)) {
1148		WARN_ON(1);	/* for now */
 
1149		return 0;
1150	}
 
1151	iterate_and_advance(i, bytes, v, ({
1152		int err = 0;
1153		next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1154					     v.iov_base,
1155					     v.iov_len, 0, &err);
1156		if (!err) {
1157			sum = csum_block_add(sum, next, off);
1158			off += v.iov_len;
1159		}
1160		err ? v.iov_len : 0;
1161	}), ({
1162		char *p = kmap_atomic(v.bv_page);
1163		next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
1164						 p + v.bv_offset,
1165						 v.bv_len, 0);
1166		kunmap_atomic(p);
1167		sum = csum_block_add(sum, next, off);
1168		off += v.bv_len;
1169	}),({
1170		next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
1171						 v.iov_base,
1172						 v.iov_len, 0);
1173		sum = csum_block_add(sum, next, off);
1174		off += v.iov_len;
1175	})
1176	)
1177	*csum = sum;
1178	return bytes;
1179}
1180EXPORT_SYMBOL(csum_and_copy_to_iter);
1181
1182int iov_iter_npages(const struct iov_iter *i, int maxpages)
1183{
1184	size_t size = i->count;
1185	int npages = 0;
1186
1187	if (!size)
1188		return 0;
1189
1190	if (unlikely(i->type & ITER_PIPE)) {
1191		struct pipe_inode_info *pipe = i->pipe;
1192		size_t off;
1193		int idx;
1194
1195		if (!sanity(i))
1196			return 0;
1197
1198		data_start(i, &idx, &off);
1199		/* some of this one + all after this one */
1200		npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
1201		if (npages >= maxpages)
1202			return maxpages;
1203	} else iterate_all_kinds(i, size, v, ({
1204		unsigned long p = (unsigned long)v.iov_base;
1205		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1206			- p / PAGE_SIZE;
1207		if (npages >= maxpages)
1208			return maxpages;
1209	0;}),({
1210		npages++;
1211		if (npages >= maxpages)
1212			return maxpages;
1213	}),({
1214		unsigned long p = (unsigned long)v.iov_base;
1215		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1216			- p / PAGE_SIZE;
1217		if (npages >= maxpages)
1218			return maxpages;
1219	})
1220	)
1221	return npages;
1222}
1223EXPORT_SYMBOL(iov_iter_npages);
1224
1225const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1226{
1227	*new = *old;
1228	if (unlikely(new->type & ITER_PIPE)) {
1229		WARN_ON(1);
1230		return NULL;
1231	}
1232	if (new->type & ITER_BVEC)
1233		return new->bvec = kmemdup(new->bvec,
1234				    new->nr_segs * sizeof(struct bio_vec),
1235				    flags);
1236	else
1237		/* iovec and kvec have identical layout */
1238		return new->iov = kmemdup(new->iov,
1239				   new->nr_segs * sizeof(struct iovec),
1240				   flags);
1241}
1242EXPORT_SYMBOL(dup_iter);
1243
1244/**
1245 * import_iovec() - Copy an array of &struct iovec from userspace
1246 *     into the kernel, check that it is valid, and initialize a new
1247 *     &struct iov_iter iterator to access it.
1248 *
1249 * @type: One of %READ or %WRITE.
1250 * @uvector: Pointer to the userspace array.
1251 * @nr_segs: Number of elements in userspace array.
1252 * @fast_segs: Number of elements in @iov.
1253 * @iov: (input and output parameter) Pointer to pointer to (usually small
1254 *     on-stack) kernel array.
1255 * @i: Pointer to iterator that will be initialized on success.
1256 *
1257 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1258 * then this function places %NULL in *@iov on return. Otherwise, a new
1259 * array will be allocated and the result placed in *@iov. This means that
1260 * the caller may call kfree() on *@iov regardless of whether the small
1261 * on-stack array was used or not (and regardless of whether this function
1262 * returns an error or not).
1263 *
1264 * Return: 0 on success or negative error code on error.
1265 */
1266int import_iovec(int type, const struct iovec __user * uvector,
1267		 unsigned nr_segs, unsigned fast_segs,
1268		 struct iovec **iov, struct iov_iter *i)
1269{
1270	ssize_t n;
1271	struct iovec *p;
1272	n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1273				  *iov, &p);
1274	if (n < 0) {
1275		if (p != *iov)
1276			kfree(p);
1277		*iov = NULL;
1278		return n;
1279	}
1280	iov_iter_init(i, type, p, nr_segs, n);
1281	*iov = p == *iov ? NULL : p;
1282	return 0;
1283}
1284EXPORT_SYMBOL(import_iovec);
1285
1286#ifdef CONFIG_COMPAT
1287#include <linux/compat.h>
1288
1289int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1290		 unsigned nr_segs, unsigned fast_segs,
1291		 struct iovec **iov, struct iov_iter *i)
1292{
1293	ssize_t n;
1294	struct iovec *p;
1295	n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1296				  *iov, &p);
1297	if (n < 0) {
1298		if (p != *iov)
1299			kfree(p);
1300		*iov = NULL;
1301		return n;
1302	}
1303	iov_iter_init(i, type, p, nr_segs, n);
1304	*iov = p == *iov ? NULL : p;
1305	return 0;
1306}
1307#endif
1308
1309int import_single_range(int rw, void __user *buf, size_t len,
1310		 struct iovec *iov, struct iov_iter *i)
1311{
1312	if (len > MAX_RW_COUNT)
1313		len = MAX_RW_COUNT;
1314	if (unlikely(!access_ok(!rw, buf, len)))
1315		return -EFAULT;
1316
1317	iov->iov_base = buf;
1318	iov->iov_len = len;
1319	iov_iter_init(i, rw, iov, 1, len);
1320	return 0;
1321}
1322EXPORT_SYMBOL(import_single_range);
v4.6
  1#include <linux/export.h>
 
  2#include <linux/uio.h>
  3#include <linux/pagemap.h>
  4#include <linux/slab.h>
  5#include <linux/vmalloc.h>
 
  6#include <net/checksum.h>
  7
 
 
  8#define iterate_iovec(i, n, __v, __p, skip, STEP) {	\
  9	size_t left;					\
 10	size_t wanted = n;				\
 11	__p = i->iov;					\
 12	__v.iov_len = min(n, __p->iov_len - skip);	\
 13	if (likely(__v.iov_len)) {			\
 14		__v.iov_base = __p->iov_base + skip;	\
 15		left = (STEP);				\
 16		__v.iov_len -= left;			\
 17		skip += __v.iov_len;			\
 18		n -= __v.iov_len;			\
 19	} else {					\
 20		left = 0;				\
 21	}						\
 22	while (unlikely(!left && n)) {			\
 23		__p++;					\
 24		__v.iov_len = min(n, __p->iov_len);	\
 25		if (unlikely(!__v.iov_len))		\
 26			continue;			\
 27		__v.iov_base = __p->iov_base;		\
 28		left = (STEP);				\
 29		__v.iov_len -= left;			\
 30		skip = __v.iov_len;			\
 31		n -= __v.iov_len;			\
 32	}						\
 33	n = wanted - n;					\
 34}
 35
 36#define iterate_kvec(i, n, __v, __p, skip, STEP) {	\
 37	size_t wanted = n;				\
 38	__p = i->kvec;					\
 39	__v.iov_len = min(n, __p->iov_len - skip);	\
 40	if (likely(__v.iov_len)) {			\
 41		__v.iov_base = __p->iov_base + skip;	\
 42		(void)(STEP);				\
 43		skip += __v.iov_len;			\
 44		n -= __v.iov_len;			\
 45	}						\
 46	while (unlikely(n)) {				\
 47		__p++;					\
 48		__v.iov_len = min(n, __p->iov_len);	\
 49		if (unlikely(!__v.iov_len))		\
 50			continue;			\
 51		__v.iov_base = __p->iov_base;		\
 52		(void)(STEP);				\
 53		skip = __v.iov_len;			\
 54		n -= __v.iov_len;			\
 55	}						\
 56	n = wanted;					\
 57}
 58
 59#define iterate_bvec(i, n, __v, __p, skip, STEP) {	\
 60	size_t wanted = n;				\
 61	__p = i->bvec;					\
 62	__v.bv_len = min_t(size_t, n, __p->bv_len - skip);	\
 63	if (likely(__v.bv_len)) {			\
 64		__v.bv_page = __p->bv_page;		\
 65		__v.bv_offset = __p->bv_offset + skip; 	\
 66		(void)(STEP);				\
 67		skip += __v.bv_len;			\
 68		n -= __v.bv_len;			\
 69	}						\
 70	while (unlikely(n)) {				\
 71		__p++;					\
 72		__v.bv_len = min_t(size_t, n, __p->bv_len);	\
 73		if (unlikely(!__v.bv_len))		\
 74			continue;			\
 75		__v.bv_page = __p->bv_page;		\
 76		__v.bv_offset = __p->bv_offset;		\
 77		(void)(STEP);				\
 78		skip = __v.bv_len;			\
 79		n -= __v.bv_len;			\
 80	}						\
 81	n = wanted;					\
 82}
 83
 84#define iterate_all_kinds(i, n, v, I, B, K) {			\
 85	size_t skip = i->iov_offset;				\
 86	if (unlikely(i->type & ITER_BVEC)) {			\
 87		const struct bio_vec *bvec;			\
 88		struct bio_vec v;				\
 89		iterate_bvec(i, n, v, bvec, skip, (B))		\
 90	} else if (unlikely(i->type & ITER_KVEC)) {		\
 91		const struct kvec *kvec;			\
 92		struct kvec v;					\
 93		iterate_kvec(i, n, v, kvec, skip, (K))		\
 94	} else {						\
 95		const struct iovec *iov;			\
 96		struct iovec v;					\
 97		iterate_iovec(i, n, v, iov, skip, (I))		\
 
 
 98	}							\
 99}
100
101#define iterate_and_advance(i, n, v, I, B, K) {			\
102	size_t skip = i->iov_offset;				\
103	if (unlikely(i->type & ITER_BVEC)) {			\
104		const struct bio_vec *bvec;			\
105		struct bio_vec v;				\
106		iterate_bvec(i, n, v, bvec, skip, (B))		\
107		if (skip == bvec->bv_len) {			\
108			bvec++;					\
109			skip = 0;				\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110		}						\
111		i->nr_segs -= bvec - i->bvec;			\
112		i->bvec = bvec;					\
113	} else if (unlikely(i->type & ITER_KVEC)) {		\
114		const struct kvec *kvec;			\
115		struct kvec v;					\
116		iterate_kvec(i, n, v, kvec, skip, (K))		\
117		if (skip == kvec->iov_len) {			\
118			kvec++;					\
119			skip = 0;				\
120		}						\
121		i->nr_segs -= kvec - i->kvec;			\
122		i->kvec = kvec;					\
123	} else {						\
124		const struct iovec *iov;			\
125		struct iovec v;					\
126		iterate_iovec(i, n, v, iov, skip, (I))		\
127		if (skip == iov->iov_len) {			\
128			iov++;					\
129			skip = 0;				\
130		}						\
131		i->nr_segs -= iov - i->iov;			\
132		i->iov = iov;					\
133	}							\
134	i->count -= n;						\
135	i->iov_offset = skip;					\
136}
137
138static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
139			 struct iov_iter *i)
140{
141	size_t skip, copy, left, wanted;
142	const struct iovec *iov;
143	char __user *buf;
144	void *kaddr, *from;
145
146	if (unlikely(bytes > i->count))
147		bytes = i->count;
148
149	if (unlikely(!bytes))
150		return 0;
151
152	wanted = bytes;
153	iov = i->iov;
154	skip = i->iov_offset;
155	buf = iov->iov_base + skip;
156	copy = min(bytes, iov->iov_len - skip);
157
158	if (!fault_in_pages_writeable(buf, copy)) {
159		kaddr = kmap_atomic(page);
160		from = kaddr + offset;
161
162		/* first chunk, usually the only one */
163		left = __copy_to_user_inatomic(buf, from, copy);
164		copy -= left;
165		skip += copy;
166		from += copy;
167		bytes -= copy;
168
169		while (unlikely(!left && bytes)) {
170			iov++;
171			buf = iov->iov_base;
172			copy = min(bytes, iov->iov_len);
173			left = __copy_to_user_inatomic(buf, from, copy);
174			copy -= left;
175			skip = copy;
176			from += copy;
177			bytes -= copy;
178		}
179		if (likely(!bytes)) {
180			kunmap_atomic(kaddr);
181			goto done;
182		}
183		offset = from - kaddr;
184		buf += copy;
185		kunmap_atomic(kaddr);
186		copy = min(bytes, iov->iov_len - skip);
187	}
188	/* Too bad - revert to non-atomic kmap */
 
189	kaddr = kmap(page);
190	from = kaddr + offset;
191	left = __copy_to_user(buf, from, copy);
192	copy -= left;
193	skip += copy;
194	from += copy;
195	bytes -= copy;
196	while (unlikely(!left && bytes)) {
197		iov++;
198		buf = iov->iov_base;
199		copy = min(bytes, iov->iov_len);
200		left = __copy_to_user(buf, from, copy);
201		copy -= left;
202		skip = copy;
203		from += copy;
204		bytes -= copy;
205	}
206	kunmap(page);
 
207done:
208	if (skip == iov->iov_len) {
209		iov++;
210		skip = 0;
211	}
212	i->count -= wanted - bytes;
213	i->nr_segs -= iov - i->iov;
214	i->iov = iov;
215	i->iov_offset = skip;
216	return wanted - bytes;
217}
218
219static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
220			 struct iov_iter *i)
221{
222	size_t skip, copy, left, wanted;
223	const struct iovec *iov;
224	char __user *buf;
225	void *kaddr, *to;
226
227	if (unlikely(bytes > i->count))
228		bytes = i->count;
229
230	if (unlikely(!bytes))
231		return 0;
232
233	wanted = bytes;
234	iov = i->iov;
235	skip = i->iov_offset;
236	buf = iov->iov_base + skip;
237	copy = min(bytes, iov->iov_len - skip);
238
239	if (!fault_in_pages_readable(buf, copy)) {
240		kaddr = kmap_atomic(page);
241		to = kaddr + offset;
242
243		/* first chunk, usually the only one */
244		left = __copy_from_user_inatomic(to, buf, copy);
245		copy -= left;
246		skip += copy;
247		to += copy;
248		bytes -= copy;
249
250		while (unlikely(!left && bytes)) {
251			iov++;
252			buf = iov->iov_base;
253			copy = min(bytes, iov->iov_len);
254			left = __copy_from_user_inatomic(to, buf, copy);
255			copy -= left;
256			skip = copy;
257			to += copy;
258			bytes -= copy;
259		}
260		if (likely(!bytes)) {
261			kunmap_atomic(kaddr);
262			goto done;
263		}
264		offset = to - kaddr;
265		buf += copy;
266		kunmap_atomic(kaddr);
267		copy = min(bytes, iov->iov_len - skip);
268	}
269	/* Too bad - revert to non-atomic kmap */
 
270	kaddr = kmap(page);
271	to = kaddr + offset;
272	left = __copy_from_user(to, buf, copy);
273	copy -= left;
274	skip += copy;
275	to += copy;
276	bytes -= copy;
277	while (unlikely(!left && bytes)) {
278		iov++;
279		buf = iov->iov_base;
280		copy = min(bytes, iov->iov_len);
281		left = __copy_from_user(to, buf, copy);
282		copy -= left;
283		skip = copy;
284		to += copy;
285		bytes -= copy;
286	}
287	kunmap(page);
 
288done:
289	if (skip == iov->iov_len) {
290		iov++;
291		skip = 0;
292	}
293	i->count -= wanted - bytes;
294	i->nr_segs -= iov - i->iov;
295	i->iov = iov;
296	i->iov_offset = skip;
297	return wanted - bytes;
298}
299
300/*
301 * Fault in the first iovec of the given iov_iter, to a maximum length
302 * of bytes. Returns 0 on success, or non-zero if the memory could not be
303 * accessed (ie. because it is an invalid address).
304 *
305 * writev-intensive code may want this to prefault several iovecs -- that
306 * would be possible (callers must not rely on the fact that _only_ the
307 * first iovec will be faulted with the current implementation).
308 */
309int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310{
311	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
312		char __user *buf = i->iov->iov_base + i->iov_offset;
313		bytes = min(bytes, i->iov->iov_len - i->iov_offset);
314		return fault_in_pages_readable(buf, bytes);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315	}
316	return 0;
 
 
 
 
 
 
 
 
 
 
 
317}
318EXPORT_SYMBOL(iov_iter_fault_in_readable);
319
320/*
321 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
322 * bytes.  For each iovec, fault in each page that constitutes the iovec.
323 *
324 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
325 * because it is an invalid address).
326 */
327int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
328{
329	size_t skip = i->iov_offset;
330	const struct iovec *iov;
331	int err;
332	struct iovec v;
333
334	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
335		iterate_iovec(i, bytes, v, iov, skip, ({
336			err = fault_in_multipages_readable(v.iov_base,
337					v.iov_len);
338			if (unlikely(err))
339			return err;
340		0;}))
341	}
342	return 0;
343}
344EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable);
345
346void iov_iter_init(struct iov_iter *i, int direction,
347			const struct iovec *iov, unsigned long nr_segs,
348			size_t count)
349{
350	/* It will get better.  Eventually... */
351	if (segment_eq(get_fs(), KERNEL_DS)) {
352		direction |= ITER_KVEC;
353		i->type = direction;
354		i->kvec = (struct kvec *)iov;
355	} else {
356		i->type = direction;
357		i->iov = iov;
358	}
359	i->nr_segs = nr_segs;
360	i->iov_offset = 0;
361	i->count = count;
362}
363EXPORT_SYMBOL(iov_iter_init);
364
365static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
366{
367	char *from = kmap_atomic(page);
368	memcpy(to, from + offset, len);
369	kunmap_atomic(from);
370}
371
372static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
373{
374	char *to = kmap_atomic(page);
375	memcpy(to + offset, from, len);
376	kunmap_atomic(to);
377}
378
379static void memzero_page(struct page *page, size_t offset, size_t len)
380{
381	char *addr = kmap_atomic(page);
382	memset(addr + offset, 0, len);
383	kunmap_atomic(addr);
384}
385
386size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
387{
388	const char *from = addr;
389	if (unlikely(bytes > i->count))
390		bytes = i->count;
 
 
 
391
392	if (unlikely(!bytes))
 
393		return 0;
 
 
 
 
 
 
 
 
 
 
 
394
 
 
 
 
 
395	iterate_and_advance(i, bytes, v,
396		__copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
397			       v.iov_len),
398		memcpy_to_page(v.bv_page, v.bv_offset,
399			       (from += v.bv_len) - v.bv_len, v.bv_len),
400		memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
401	)
402
403	return bytes;
404}
405EXPORT_SYMBOL(copy_to_iter);
406
407size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
408{
409	char *to = addr;
410	if (unlikely(bytes > i->count))
411		bytes = i->count;
412
413	if (unlikely(!bytes))
414		return 0;
415
416	iterate_and_advance(i, bytes, v,
417		__copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
418				 v.iov_len),
419		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
420				 v.bv_offset, v.bv_len),
421		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
422	)
423
424	return bytes;
425}
426EXPORT_SYMBOL(copy_from_iter);
427
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
428size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
429{
430	char *to = addr;
431	if (unlikely(bytes > i->count))
432		bytes = i->count;
433
434	if (unlikely(!bytes))
435		return 0;
436
437	iterate_and_advance(i, bytes, v,
438		__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
439					 v.iov_base, v.iov_len),
440		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
441				 v.bv_offset, v.bv_len),
442		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
443	)
444
445	return bytes;
446}
447EXPORT_SYMBOL(copy_from_iter_nocache);
448
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
449size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
450			 struct iov_iter *i)
451{
452	if (i->type & (ITER_BVEC|ITER_KVEC)) {
453		void *kaddr = kmap_atomic(page);
454		size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
455		kunmap_atomic(kaddr);
456		return wanted;
457	} else
458		return copy_page_to_iter_iovec(page, offset, bytes, i);
 
 
459}
460EXPORT_SYMBOL(copy_page_to_iter);
461
462size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
463			 struct iov_iter *i)
464{
 
 
 
 
465	if (i->type & (ITER_BVEC|ITER_KVEC)) {
466		void *kaddr = kmap_atomic(page);
467		size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
468		kunmap_atomic(kaddr);
469		return wanted;
470	} else
471		return copy_page_from_iter_iovec(page, offset, bytes, i);
472}
473EXPORT_SYMBOL(copy_page_from_iter);
474
475size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
476{
477	if (unlikely(bytes > i->count))
478		bytes = i->count;
 
 
 
 
479
480	if (unlikely(!bytes))
 
481		return 0;
482
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
483	iterate_and_advance(i, bytes, v,
484		__clear_user(v.iov_base, v.iov_len),
485		memzero_page(v.bv_page, v.bv_offset, v.bv_len),
486		memset(v.iov_base, 0, v.iov_len)
487	)
488
489	return bytes;
490}
491EXPORT_SYMBOL(iov_iter_zero);
492
493size_t iov_iter_copy_from_user_atomic(struct page *page,
494		struct iov_iter *i, unsigned long offset, size_t bytes)
495{
496	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
 
 
 
 
 
497	iterate_all_kinds(i, bytes, v,
498		__copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
499					  v.iov_base, v.iov_len),
500		memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
501				 v.bv_offset, v.bv_len),
502		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
503	)
504	kunmap_atomic(kaddr);
505	return bytes;
506}
507EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
508
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
509void iov_iter_advance(struct iov_iter *i, size_t size)
510{
 
 
 
 
511	iterate_and_advance(i, size, v, 0, 0, 0)
512}
513EXPORT_SYMBOL(iov_iter_advance);
514
515/*
516 * Return the count of just the current iov_iter segment.
517 */
518size_t iov_iter_single_seg_count(const struct iov_iter *i)
519{
 
 
520	if (i->nr_segs == 1)
521		return i->count;
522	else if (i->type & ITER_BVEC)
523		return min(i->count, i->bvec->bv_len - i->iov_offset);
524	else
525		return min(i->count, i->iov->iov_len - i->iov_offset);
526}
527EXPORT_SYMBOL(iov_iter_single_seg_count);
528
529void iov_iter_kvec(struct iov_iter *i, int direction,
530			const struct kvec *kvec, unsigned long nr_segs,
531			size_t count)
532{
533	BUG_ON(!(direction & ITER_KVEC));
534	i->type = direction;
535	i->kvec = kvec;
536	i->nr_segs = nr_segs;
537	i->iov_offset = 0;
538	i->count = count;
539}
540EXPORT_SYMBOL(iov_iter_kvec);
541
542void iov_iter_bvec(struct iov_iter *i, int direction,
543			const struct bio_vec *bvec, unsigned long nr_segs,
544			size_t count)
545{
546	BUG_ON(!(direction & ITER_BVEC));
547	i->type = direction;
548	i->bvec = bvec;
549	i->nr_segs = nr_segs;
550	i->iov_offset = 0;
551	i->count = count;
552}
553EXPORT_SYMBOL(iov_iter_bvec);
554
 
 
 
 
 
 
 
 
 
 
 
 
 
 
555unsigned long iov_iter_alignment(const struct iov_iter *i)
556{
557	unsigned long res = 0;
558	size_t size = i->count;
559
560	if (!size)
561		return 0;
562
 
 
563	iterate_all_kinds(i, size, v,
564		(res |= (unsigned long)v.iov_base | v.iov_len, 0),
565		res |= v.bv_offset | v.bv_len,
566		res |= (unsigned long)v.iov_base | v.iov_len
567	)
568	return res;
569}
570EXPORT_SYMBOL(iov_iter_alignment);
571
572unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
573{
574        unsigned long res = 0;
575	size_t size = i->count;
576	if (!size)
577		return 0;
 
 
 
578
579	iterate_all_kinds(i, size, v,
580		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
581			(size != v.iov_len ? size : 0), 0),
582		(res |= (!res ? 0 : (unsigned long)v.bv_offset) |
583			(size != v.bv_len ? size : 0)),
584		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
585			(size != v.iov_len ? size : 0))
586		);
587		return res;
588}
589EXPORT_SYMBOL(iov_iter_gap_alignment);
590
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
591ssize_t iov_iter_get_pages(struct iov_iter *i,
592		   struct page **pages, size_t maxsize, unsigned maxpages,
593		   size_t *start)
594{
595	if (maxsize > i->count)
596		maxsize = i->count;
597
598	if (!maxsize)
599		return 0;
600
601	iterate_all_kinds(i, maxsize, v, ({
602		unsigned long addr = (unsigned long)v.iov_base;
603		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
604		int n;
605		int res;
606
607		if (len > maxpages * PAGE_SIZE)
608			len = maxpages * PAGE_SIZE;
609		addr &= ~(PAGE_SIZE - 1);
610		n = DIV_ROUND_UP(len, PAGE_SIZE);
611		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
612		if (unlikely(res < 0))
613			return res;
614		return (res == n ? len : res * PAGE_SIZE) - *start;
615	0;}),({
616		/* can't be more than PAGE_SIZE */
617		*start = v.bv_offset;
618		get_page(*pages = v.bv_page);
619		return v.bv_len;
620	}),({
621		return -EFAULT;
622	})
623	)
624	return 0;
625}
626EXPORT_SYMBOL(iov_iter_get_pages);
627
628static struct page **get_pages_array(size_t n)
629{
630	struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
631	if (!p)
632		p = vmalloc(n * sizeof(struct page *));
633	return p;
634}
635
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
636ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
637		   struct page ***pages, size_t maxsize,
638		   size_t *start)
639{
640	struct page **p;
641
642	if (maxsize > i->count)
643		maxsize = i->count;
644
645	if (!maxsize)
646		return 0;
647
648	iterate_all_kinds(i, maxsize, v, ({
649		unsigned long addr = (unsigned long)v.iov_base;
650		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
651		int n;
652		int res;
653
654		addr &= ~(PAGE_SIZE - 1);
655		n = DIV_ROUND_UP(len, PAGE_SIZE);
656		p = get_pages_array(n);
657		if (!p)
658			return -ENOMEM;
659		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
660		if (unlikely(res < 0)) {
661			kvfree(p);
662			return res;
663		}
664		*pages = p;
665		return (res == n ? len : res * PAGE_SIZE) - *start;
666	0;}),({
667		/* can't be more than PAGE_SIZE */
668		*start = v.bv_offset;
669		*pages = p = get_pages_array(1);
670		if (!p)
671			return -ENOMEM;
672		get_page(*p = v.bv_page);
673		return v.bv_len;
674	}),({
675		return -EFAULT;
676	})
677	)
678	return 0;
679}
680EXPORT_SYMBOL(iov_iter_get_pages_alloc);
681
682size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
683			       struct iov_iter *i)
684{
685	char *to = addr;
686	__wsum sum, next;
687	size_t off = 0;
688	if (unlikely(bytes > i->count))
689		bytes = i->count;
690
691	if (unlikely(!bytes))
692		return 0;
693
694	sum = *csum;
695	iterate_and_advance(i, bytes, v, ({
696		int err = 0;
697		next = csum_and_copy_from_user(v.iov_base, 
698					       (to += v.iov_len) - v.iov_len,
699					       v.iov_len, 0, &err);
700		if (!err) {
701			sum = csum_block_add(sum, next, off);
702			off += v.iov_len;
703		}
704		err ? v.iov_len : 0;
705	}), ({
706		char *p = kmap_atomic(v.bv_page);
707		next = csum_partial_copy_nocheck(p + v.bv_offset,
708						 (to += v.bv_len) - v.bv_len,
709						 v.bv_len, 0);
710		kunmap_atomic(p);
711		sum = csum_block_add(sum, next, off);
712		off += v.bv_len;
713	}),({
714		next = csum_partial_copy_nocheck(v.iov_base,
715						 (to += v.iov_len) - v.iov_len,
716						 v.iov_len, 0);
717		sum = csum_block_add(sum, next, off);
718		off += v.iov_len;
719	})
720	)
721	*csum = sum;
722	return bytes;
723}
724EXPORT_SYMBOL(csum_and_copy_from_iter);
725
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
726size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
727			     struct iov_iter *i)
728{
729	const char *from = addr;
730	__wsum sum, next;
731	size_t off = 0;
732	if (unlikely(bytes > i->count))
733		bytes = i->count;
734
735	if (unlikely(!bytes))
736		return 0;
737
738	sum = *csum;
739	iterate_and_advance(i, bytes, v, ({
740		int err = 0;
741		next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
742					     v.iov_base, 
743					     v.iov_len, 0, &err);
744		if (!err) {
745			sum = csum_block_add(sum, next, off);
746			off += v.iov_len;
747		}
748		err ? v.iov_len : 0;
749	}), ({
750		char *p = kmap_atomic(v.bv_page);
751		next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
752						 p + v.bv_offset,
753						 v.bv_len, 0);
754		kunmap_atomic(p);
755		sum = csum_block_add(sum, next, off);
756		off += v.bv_len;
757	}),({
758		next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
759						 v.iov_base,
760						 v.iov_len, 0);
761		sum = csum_block_add(sum, next, off);
762		off += v.iov_len;
763	})
764	)
765	*csum = sum;
766	return bytes;
767}
768EXPORT_SYMBOL(csum_and_copy_to_iter);
769
770int iov_iter_npages(const struct iov_iter *i, int maxpages)
771{
772	size_t size = i->count;
773	int npages = 0;
774
775	if (!size)
776		return 0;
777
778	iterate_all_kinds(i, size, v, ({
 
 
 
 
 
 
 
 
 
 
 
 
 
779		unsigned long p = (unsigned long)v.iov_base;
780		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
781			- p / PAGE_SIZE;
782		if (npages >= maxpages)
783			return maxpages;
784	0;}),({
785		npages++;
786		if (npages >= maxpages)
787			return maxpages;
788	}),({
789		unsigned long p = (unsigned long)v.iov_base;
790		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
791			- p / PAGE_SIZE;
792		if (npages >= maxpages)
793			return maxpages;
794	})
795	)
796	return npages;
797}
798EXPORT_SYMBOL(iov_iter_npages);
799
800const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
801{
802	*new = *old;
 
 
 
 
803	if (new->type & ITER_BVEC)
804		return new->bvec = kmemdup(new->bvec,
805				    new->nr_segs * sizeof(struct bio_vec),
806				    flags);
807	else
808		/* iovec and kvec have identical layout */
809		return new->iov = kmemdup(new->iov,
810				   new->nr_segs * sizeof(struct iovec),
811				   flags);
812}
813EXPORT_SYMBOL(dup_iter);
814
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
815int import_iovec(int type, const struct iovec __user * uvector,
816		 unsigned nr_segs, unsigned fast_segs,
817		 struct iovec **iov, struct iov_iter *i)
818{
819	ssize_t n;
820	struct iovec *p;
821	n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
822				  *iov, &p);
823	if (n < 0) {
824		if (p != *iov)
825			kfree(p);
826		*iov = NULL;
827		return n;
828	}
829	iov_iter_init(i, type, p, nr_segs, n);
830	*iov = p == *iov ? NULL : p;
831	return 0;
832}
833EXPORT_SYMBOL(import_iovec);
834
835#ifdef CONFIG_COMPAT
836#include <linux/compat.h>
837
838int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
839		 unsigned nr_segs, unsigned fast_segs,
840		 struct iovec **iov, struct iov_iter *i)
841{
842	ssize_t n;
843	struct iovec *p;
844	n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
845				  *iov, &p);
846	if (n < 0) {
847		if (p != *iov)
848			kfree(p);
849		*iov = NULL;
850		return n;
851	}
852	iov_iter_init(i, type, p, nr_segs, n);
853	*iov = p == *iov ? NULL : p;
854	return 0;
855}
856#endif
857
858int import_single_range(int rw, void __user *buf, size_t len,
859		 struct iovec *iov, struct iov_iter *i)
860{
861	if (len > MAX_RW_COUNT)
862		len = MAX_RW_COUNT;
863	if (unlikely(!access_ok(!rw, buf, len)))
864		return -EFAULT;
865
866	iov->iov_base = buf;
867	iov->iov_len = len;
868	iov_iter_init(i, rw, iov, 1, len);
869	return 0;
870}
871EXPORT_SYMBOL(import_single_range);