Linux Audio

Check our new training course

Loading...
v4.10.11
 
 
   1#include <linux/export.h>
   2#include <linux/bvec.h>
 
   3#include <linux/uio.h>
   4#include <linux/pagemap.h>
 
   5#include <linux/slab.h>
   6#include <linux/vmalloc.h>
   7#include <linux/splice.h>
 
   8#include <net/checksum.h>
 
 
   9
  10#define PIPE_PARANOIA /* for now */
  11
  12#define iterate_iovec(i, n, __v, __p, skip, STEP) {	\
  13	size_t left;					\
  14	size_t wanted = n;				\
  15	__p = i->iov;					\
  16	__v.iov_len = min(n, __p->iov_len - skip);	\
  17	if (likely(__v.iov_len)) {			\
  18		__v.iov_base = __p->iov_base + skip;	\
  19		left = (STEP);				\
  20		__v.iov_len -= left;			\
  21		skip += __v.iov_len;			\
  22		n -= __v.iov_len;			\
  23	} else {					\
  24		left = 0;				\
  25	}						\
  26	while (unlikely(!left && n)) {			\
  27		__p++;					\
  28		__v.iov_len = min(n, __p->iov_len);	\
  29		if (unlikely(!__v.iov_len))		\
  30			continue;			\
  31		__v.iov_base = __p->iov_base;		\
  32		left = (STEP);				\
  33		__v.iov_len -= left;			\
  34		skip = __v.iov_len;			\
  35		n -= __v.iov_len;			\
  36	}						\
  37	n = wanted - n;					\
  38}
  39
  40#define iterate_kvec(i, n, __v, __p, skip, STEP) {	\
  41	size_t wanted = n;				\
  42	__p = i->kvec;					\
  43	__v.iov_len = min(n, __p->iov_len - skip);	\
  44	if (likely(__v.iov_len)) {			\
  45		__v.iov_base = __p->iov_base + skip;	\
  46		(void)(STEP);				\
  47		skip += __v.iov_len;			\
  48		n -= __v.iov_len;			\
  49	}						\
  50	while (unlikely(n)) {				\
  51		__p++;					\
  52		__v.iov_len = min(n, __p->iov_len);	\
  53		if (unlikely(!__v.iov_len))		\
  54			continue;			\
  55		__v.iov_base = __p->iov_base;		\
  56		(void)(STEP);				\
  57		skip = __v.iov_len;			\
  58		n -= __v.iov_len;			\
  59	}						\
  60	n = wanted;					\
  61}
  62
  63#define iterate_bvec(i, n, __v, __bi, skip, STEP) {	\
  64	struct bvec_iter __start;			\
  65	__start.bi_size = n;				\
  66	__start.bi_bvec_done = skip;			\
  67	__start.bi_idx = 0;				\
  68	for_each_bvec(__v, i->bvec, __bi, __start) {	\
  69		if (!__v.bv_len)			\
  70			continue;			\
  71		(void)(STEP);				\
  72	}						\
  73}
  74
  75#define iterate_all_kinds(i, n, v, I, B, K) {			\
  76	if (likely(n)) {					\
  77		size_t skip = i->iov_offset;			\
  78		if (unlikely(i->type & ITER_BVEC)) {		\
  79			struct bio_vec v;			\
  80			struct bvec_iter __bi;			\
  81			iterate_bvec(i, n, v, __bi, skip, (B))	\
  82		} else if (unlikely(i->type & ITER_KVEC)) {	\
  83			const struct kvec *kvec;		\
  84			struct kvec v;				\
  85			iterate_kvec(i, n, v, kvec, skip, (K))	\
  86		} else {					\
  87			const struct iovec *iov;		\
  88			struct iovec v;				\
  89			iterate_iovec(i, n, v, iov, skip, (I))	\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  90		}						\
  91	}							\
 
 
 
 
  92}
  93
  94#define iterate_and_advance(i, n, v, I, B, K) {			\
  95	if (unlikely(i->count < n))				\
  96		n = i->count;					\
  97	if (i->count) {						\
  98		size_t skip = i->iov_offset;			\
  99		if (unlikely(i->type & ITER_BVEC)) {		\
 
 
 
 
 
 
 
 
 
 
 
 
 100			const struct bio_vec *bvec = i->bvec;	\
 101			struct bio_vec v;			\
 102			struct bvec_iter __bi;			\
 103			iterate_bvec(i, n, v, __bi, skip, (B))	\
 104			i->bvec = __bvec_iter_bvec(i->bvec, __bi);	\
 105			i->nr_segs -= i->bvec - bvec;		\
 106			skip = __bi.bi_bvec_done;		\
 107		} else if (unlikely(i->type & ITER_KVEC)) {	\
 108			const struct kvec *kvec;		\
 109			struct kvec v;				\
 110			iterate_kvec(i, n, v, kvec, skip, (K))	\
 111			if (skip == kvec->iov_len) {		\
 112				kvec++;				\
 113				skip = 0;			\
 114			}					\
 115			i->nr_segs -= kvec - i->kvec;		\
 116			i->kvec = kvec;				\
 117		} else {					\
 118			const struct iovec *iov;		\
 119			struct iovec v;				\
 120			iterate_iovec(i, n, v, iov, skip, (I))	\
 121			if (skip == iov->iov_len) {		\
 122				iov++;				\
 123				skip = 0;			\
 124			}					\
 125			i->nr_segs -= iov - i->iov;		\
 126			i->iov = iov;				\
 127		}						\
 128		i->count -= n;					\
 129		i->iov_offset = skip;				\
 130	}							\
 131}
 
 
 132
 133static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
 134			 struct iov_iter *i)
 135{
 136	size_t skip, copy, left, wanted;
 137	const struct iovec *iov;
 138	char __user *buf;
 139	void *kaddr, *from;
 140
 141	if (unlikely(bytes > i->count))
 142		bytes = i->count;
 143
 144	if (unlikely(!bytes))
 145		return 0;
 146
 147	wanted = bytes;
 148	iov = i->iov;
 149	skip = i->iov_offset;
 150	buf = iov->iov_base + skip;
 151	copy = min(bytes, iov->iov_len - skip);
 152
 153	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
 154		kaddr = kmap_atomic(page);
 155		from = kaddr + offset;
 156
 157		/* first chunk, usually the only one */
 158		left = __copy_to_user_inatomic(buf, from, copy);
 159		copy -= left;
 160		skip += copy;
 161		from += copy;
 162		bytes -= copy;
 163
 164		while (unlikely(!left && bytes)) {
 165			iov++;
 166			buf = iov->iov_base;
 167			copy = min(bytes, iov->iov_len);
 168			left = __copy_to_user_inatomic(buf, from, copy);
 169			copy -= left;
 170			skip = copy;
 171			from += copy;
 172			bytes -= copy;
 173		}
 174		if (likely(!bytes)) {
 175			kunmap_atomic(kaddr);
 176			goto done;
 177		}
 178		offset = from - kaddr;
 179		buf += copy;
 180		kunmap_atomic(kaddr);
 181		copy = min(bytes, iov->iov_len - skip);
 182	}
 183	/* Too bad - revert to non-atomic kmap */
 184
 185	kaddr = kmap(page);
 186	from = kaddr + offset;
 187	left = __copy_to_user(buf, from, copy);
 188	copy -= left;
 189	skip += copy;
 190	from += copy;
 191	bytes -= copy;
 192	while (unlikely(!left && bytes)) {
 193		iov++;
 194		buf = iov->iov_base;
 195		copy = min(bytes, iov->iov_len);
 196		left = __copy_to_user(buf, from, copy);
 197		copy -= left;
 198		skip = copy;
 199		from += copy;
 200		bytes -= copy;
 201	}
 202	kunmap(page);
 203
 204done:
 205	if (skip == iov->iov_len) {
 206		iov++;
 207		skip = 0;
 208	}
 209	i->count -= wanted - bytes;
 210	i->nr_segs -= iov - i->iov;
 211	i->iov = iov;
 212	i->iov_offset = skip;
 213	return wanted - bytes;
 214}
 215
 216static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
 217			 struct iov_iter *i)
 218{
 219	size_t skip, copy, left, wanted;
 220	const struct iovec *iov;
 221	char __user *buf;
 222	void *kaddr, *to;
 223
 224	if (unlikely(bytes > i->count))
 225		bytes = i->count;
 226
 227	if (unlikely(!bytes))
 228		return 0;
 229
 230	wanted = bytes;
 231	iov = i->iov;
 232	skip = i->iov_offset;
 233	buf = iov->iov_base + skip;
 234	copy = min(bytes, iov->iov_len - skip);
 235
 236	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
 237		kaddr = kmap_atomic(page);
 238		to = kaddr + offset;
 239
 240		/* first chunk, usually the only one */
 241		left = __copy_from_user_inatomic(to, buf, copy);
 242		copy -= left;
 243		skip += copy;
 244		to += copy;
 245		bytes -= copy;
 246
 247		while (unlikely(!left && bytes)) {
 248			iov++;
 249			buf = iov->iov_base;
 250			copy = min(bytes, iov->iov_len);
 251			left = __copy_from_user_inatomic(to, buf, copy);
 252			copy -= left;
 253			skip = copy;
 254			to += copy;
 255			bytes -= copy;
 256		}
 257		if (likely(!bytes)) {
 258			kunmap_atomic(kaddr);
 259			goto done;
 260		}
 261		offset = to - kaddr;
 262		buf += copy;
 263		kunmap_atomic(kaddr);
 264		copy = min(bytes, iov->iov_len - skip);
 265	}
 266	/* Too bad - revert to non-atomic kmap */
 
 267
 268	kaddr = kmap(page);
 269	to = kaddr + offset;
 270	left = __copy_from_user(to, buf, copy);
 271	copy -= left;
 272	skip += copy;
 273	to += copy;
 274	bytes -= copy;
 275	while (unlikely(!left && bytes)) {
 276		iov++;
 277		buf = iov->iov_base;
 278		copy = min(bytes, iov->iov_len);
 279		left = __copy_from_user(to, buf, copy);
 280		copy -= left;
 281		skip = copy;
 282		to += copy;
 283		bytes -= copy;
 284	}
 285	kunmap(page);
 286
 287done:
 288	if (skip == iov->iov_len) {
 289		iov++;
 290		skip = 0;
 291	}
 292	i->count -= wanted - bytes;
 293	i->nr_segs -= iov - i->iov;
 294	i->iov = iov;
 295	i->iov_offset = skip;
 296	return wanted - bytes;
 297}
 298
 299#ifdef PIPE_PARANOIA
 300static bool sanity(const struct iov_iter *i)
 301{
 302	struct pipe_inode_info *pipe = i->pipe;
 303	int idx = i->idx;
 304	int next = pipe->curbuf + pipe->nrbufs;
 305	if (i->iov_offset) {
 
 
 
 
 306		struct pipe_buffer *p;
 307		if (unlikely(!pipe->nrbufs))
 308			goto Bad;	// pipe must be non-empty
 309		if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
 310			goto Bad;	// must be at the last buffer...
 311
 312		p = &pipe->bufs[idx];
 313		if (unlikely(p->offset + p->len != i->iov_offset))
 314			goto Bad;	// ... at the end of segment
 315	} else {
 316		if (idx != (next & (pipe->buffers - 1)))
 317			goto Bad;	// must be right after the last buffer
 318	}
 319	return true;
 320Bad:
 321	printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
 322	printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
 323			pipe->curbuf, pipe->nrbufs, pipe->buffers);
 324	for (idx = 0; idx < pipe->buffers; idx++)
 325		printk(KERN_ERR "[%p %p %d %d]\n",
 326			pipe->bufs[idx].ops,
 327			pipe->bufs[idx].page,
 328			pipe->bufs[idx].offset,
 329			pipe->bufs[idx].len);
 330	WARN_ON(1);
 331	return false;
 332}
 333#else
 334#define sanity(i) true
 335#endif
 336
 337static inline int next_idx(int idx, struct pipe_inode_info *pipe)
 338{
 339	return (idx + 1) & (pipe->buffers - 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 340}
 341
 342static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
 343			 struct iov_iter *i)
 344{
 345	struct pipe_inode_info *pipe = i->pipe;
 346	struct pipe_buffer *buf;
 347	size_t off;
 348	int idx;
 349
 350	if (unlikely(bytes > i->count))
 351		bytes = i->count;
 352
 353	if (unlikely(!bytes))
 354		return 0;
 355
 356	if (!sanity(i))
 357		return 0;
 358
 359	off = i->iov_offset;
 360	idx = i->idx;
 361	buf = &pipe->bufs[idx];
 362	if (off) {
 363		if (offset == off && buf->page == page) {
 364			/* merge with the last one */
 365			buf->len += bytes;
 366			i->iov_offset += bytes;
 367			goto out;
 
 368		}
 369		idx = next_idx(idx, pipe);
 370		buf = &pipe->bufs[idx];
 371	}
 372	if (idx == pipe->curbuf && pipe->nrbufs)
 373		return 0;
 374	pipe->nrbufs++;
 375	buf->ops = &page_cache_pipe_buf_ops;
 376	get_page(buf->page = page);
 377	buf->offset = offset;
 378	buf->len = bytes;
 379	i->iov_offset = offset + bytes;
 380	i->idx = idx;
 381out:
 382	i->count -= bytes;
 383	return bytes;
 384}
 385
 386/*
 
 
 
 
 387 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
 388 * bytes.  For each iovec, fault in each page that constitutes the iovec.
 389 *
 390 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
 391 * because it is an invalid address).
 
 
 392 */
 393int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
 394{
 395	size_t skip = i->iov_offset;
 396	const struct iovec *iov;
 397	int err;
 398	struct iovec v;
 399
 400	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
 401		iterate_iovec(i, bytes, v, iov, skip, ({
 402			err = fault_in_pages_readable(v.iov_base, v.iov_len);
 403			if (unlikely(err))
 404			return err;
 405		0;}))
 
 
 
 
 
 
 
 
 
 
 
 406	}
 407	return 0;
 408}
 409EXPORT_SYMBOL(iov_iter_fault_in_readable);
 410
 411void iov_iter_init(struct iov_iter *i, int direction,
 412			const struct iovec *iov, unsigned long nr_segs,
 413			size_t count)
 
 
 
 
 
 
 
 
 
 
 
 
 414{
 415	/* It will get better.  Eventually... */
 416	if (segment_eq(get_fs(), KERNEL_DS)) {
 417		direction |= ITER_KVEC;
 418		i->type = direction;
 419		i->kvec = (struct kvec *)iov;
 420	} else {
 421		i->type = direction;
 422		i->iov = iov;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 423	}
 424	i->nr_segs = nr_segs;
 425	i->iov_offset = 0;
 426	i->count = count;
 427}
 428EXPORT_SYMBOL(iov_iter_init);
 429
 430static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
 
 
 431{
 432	char *from = kmap_atomic(page);
 433	memcpy(to, from + offset, len);
 434	kunmap_atomic(from);
 
 
 
 
 
 
 
 
 435}
 
 436
 437static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
 
 438{
 439	char *to = kmap_atomic(page);
 440	memcpy(to + offset, from, len);
 441	kunmap_atomic(to);
 442}
 443
 444static void memzero_page(struct page *page, size_t offset, size_t len)
 445{
 446	char *addr = kmap_atomic(page);
 447	memset(addr + offset, 0, len);
 448	kunmap_atomic(addr);
 
 
 449}
 450
 451static inline bool allocated(struct pipe_buffer *buf)
 
 452{
 453	return buf->ops == &default_pipe_buf_ops;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 454}
 455
 456static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
 
 457{
 458	size_t off = i->iov_offset;
 459	int idx = i->idx;
 460	if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
 461		idx = next_idx(idx, i->pipe);
 462		off = 0;
 463	}
 464	*idxp = idx;
 465	*offp = off;
 466}
 467
 468static size_t push_pipe(struct iov_iter *i, size_t size,
 469			int *idxp, size_t *offp)
 470{
 471	struct pipe_inode_info *pipe = i->pipe;
 472	size_t off;
 473	int idx;
 474	ssize_t left;
 475
 476	if (unlikely(size > i->count))
 477		size = i->count;
 478	if (unlikely(!size))
 479		return 0;
 480
 481	left = size;
 482	data_start(i, &idx, &off);
 483	*idxp = idx;
 484	*offp = off;
 485	if (off) {
 486		left -= PAGE_SIZE - off;
 487		if (left <= 0) {
 488			pipe->bufs[idx].len += size;
 489			return size;
 490		}
 491		pipe->bufs[idx].len = PAGE_SIZE;
 492		idx = next_idx(idx, pipe);
 493	}
 494	while (idx != pipe->curbuf || !pipe->nrbufs) {
 495		struct page *page = alloc_page(GFP_USER);
 496		if (!page)
 497			break;
 498		pipe->nrbufs++;
 499		pipe->bufs[idx].ops = &default_pipe_buf_ops;
 500		pipe->bufs[idx].page = page;
 501		pipe->bufs[idx].offset = 0;
 502		if (left <= PAGE_SIZE) {
 503			pipe->bufs[idx].len = left;
 504			return size;
 505		}
 506		pipe->bufs[idx].len = PAGE_SIZE;
 507		left -= PAGE_SIZE;
 508		idx = next_idx(idx, pipe);
 509	}
 510	return size - left;
 
 511}
 512
 513static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 514				struct iov_iter *i)
 515{
 516	struct pipe_inode_info *pipe = i->pipe;
 517	size_t n, off;
 518	int idx;
 519
 520	if (!sanity(i))
 
 
 521		return 0;
 522
 523	bytes = n = push_pipe(i, bytes, &idx, &off);
 524	if (unlikely(!n))
 525		return 0;
 526	for ( ; n; idx = next_idx(idx, pipe), off = 0) {
 527		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
 528		memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
 529		i->idx = idx;
 530		i->iov_offset = off + chunk;
 531		n -= chunk;
 532		addr += chunk;
 
 
 
 
 
 
 
 
 
 
 
 
 533	}
 534	i->count -= bytes;
 535	return bytes;
 536}
 537
 538size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 539{
 540	const char *from = addr;
 541	if (unlikely(i->type & ITER_PIPE))
 542		return copy_pipe_to_iter(addr, bytes, i);
 543	iterate_and_advance(i, bytes, v,
 544		__copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
 545			       v.iov_len),
 546		memcpy_to_page(v.bv_page, v.bv_offset,
 547			       (from += v.bv_len) - v.bv_len, v.bv_len),
 548		memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
 549	)
 550
 551	return bytes;
 552}
 553EXPORT_SYMBOL(copy_to_iter);
 
 554
 555size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
 556{
 557	char *to = addr;
 558	if (unlikely(i->type & ITER_PIPE)) {
 559		WARN_ON(1);
 560		return 0;
 561	}
 562	iterate_and_advance(i, bytes, v,
 563		__copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
 564				 v.iov_len),
 565		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
 566				 v.bv_offset, v.bv_len),
 567		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
 568	)
 569
 570	return bytes;
 571}
 572EXPORT_SYMBOL(copy_from_iter);
 573
 574bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
 575{
 576	char *to = addr;
 577	if (unlikely(i->type & ITER_PIPE)) {
 578		WARN_ON(1);
 579		return false;
 580	}
 581	if (unlikely(i->count < bytes))
 582		return false;
 583
 584	iterate_all_kinds(i, bytes, v, ({
 585		if (__copy_from_user((to += v.iov_len) - v.iov_len,
 586				      v.iov_base, v.iov_len))
 587			return false;
 588		0;}),
 589		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
 590				 v.bv_offset, v.bv_len),
 591		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
 592	)
 593
 594	iov_iter_advance(i, bytes);
 595	return true;
 596}
 597EXPORT_SYMBOL(copy_from_iter_full);
 598
 599size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 600{
 601	char *to = addr;
 602	if (unlikely(i->type & ITER_PIPE)) {
 603		WARN_ON(1);
 604		return 0;
 605	}
 606	iterate_and_advance(i, bytes, v,
 607		__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
 608					 v.iov_base, v.iov_len),
 609		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
 610				 v.bv_offset, v.bv_len),
 611		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
 612	)
 613
 614	return bytes;
 615}
 616EXPORT_SYMBOL(copy_from_iter_nocache);
 
 617
 618bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
 619{
 620	char *to = addr;
 621	if (unlikely(i->type & ITER_PIPE)) {
 622		WARN_ON(1);
 623		return false;
 624	}
 625	if (unlikely(i->count < bytes))
 626		return false;
 627	iterate_all_kinds(i, bytes, v, ({
 628		if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len,
 629					     v.iov_base, v.iov_len))
 630			return false;
 631		0;}),
 632		memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
 633				 v.bv_offset, v.bv_len),
 634		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
 635	)
 636
 637	iov_iter_advance(i, bytes);
 
 
 
 
 638	return true;
 639}
 640EXPORT_SYMBOL(copy_from_iter_full_nocache);
 641
 642size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
 643			 struct iov_iter *i)
 644{
 645	if (i->type & (ITER_BVEC|ITER_KVEC)) {
 646		void *kaddr = kmap_atomic(page);
 647		size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
 648		kunmap_atomic(kaddr);
 649		return wanted;
 650	} else if (likely(!(i->type & ITER_PIPE)))
 651		return copy_page_to_iter_iovec(page, offset, bytes, i);
 652	else
 653		return copy_page_to_iter_pipe(page, offset, bytes, i);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 654}
 655EXPORT_SYMBOL(copy_page_to_iter);
 656
 657size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
 658			 struct iov_iter *i)
 659{
 660	if (unlikely(i->type & ITER_PIPE)) {
 661		WARN_ON(1);
 662		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 663	}
 664	if (i->type & (ITER_BVEC|ITER_KVEC)) {
 665		void *kaddr = kmap_atomic(page);
 666		size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
 667		kunmap_atomic(kaddr);
 668		return wanted;
 669	} else
 670		return copy_page_from_iter_iovec(page, offset, bytes, i);
 671}
 672EXPORT_SYMBOL(copy_page_from_iter);
 673
 674static size_t pipe_zero(size_t bytes, struct iov_iter *i)
 675{
 676	struct pipe_inode_info *pipe = i->pipe;
 677	size_t n, off;
 678	int idx;
 679
 680	if (!sanity(i))
 
 
 681		return 0;
 682
 683	bytes = n = push_pipe(i, bytes, &idx, &off);
 684	if (unlikely(!n))
 685		return 0;
 686
 687	for ( ; n; idx = next_idx(idx, pipe), off = 0) {
 688		size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
 689		memzero_page(pipe->bufs[idx].page, off, chunk);
 690		i->idx = idx;
 691		i->iov_offset = off + chunk;
 692		n -= chunk;
 
 
 
 
 693	}
 694	i->count -= bytes;
 695	return bytes;
 696}
 697
 698size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
 699{
 700	if (unlikely(i->type & ITER_PIPE))
 701		return pipe_zero(bytes, i);
 702	iterate_and_advance(i, bytes, v,
 703		__clear_user(v.iov_base, v.iov_len),
 704		memzero_page(v.bv_page, v.bv_offset, v.bv_len),
 705		memset(v.iov_base, 0, v.iov_len)
 706	)
 707
 708	return bytes;
 709}
 710EXPORT_SYMBOL(iov_iter_zero);
 711
 712size_t iov_iter_copy_from_user_atomic(struct page *page,
 713		struct iov_iter *i, unsigned long offset, size_t bytes)
 714{
 715	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
 716	if (unlikely(i->type & ITER_PIPE)) {
 
 
 
 
 717		kunmap_atomic(kaddr);
 718		WARN_ON(1);
 719		return 0;
 720	}
 721	iterate_all_kinds(i, bytes, v,
 722		__copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
 723					  v.iov_base, v.iov_len),
 724		memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
 725				 v.bv_offset, v.bv_len),
 726		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
 727	)
 728	kunmap_atomic(kaddr);
 729	return bytes;
 730}
 731EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
 732
 733static inline void pipe_truncate(struct iov_iter *i)
 734{
 735	struct pipe_inode_info *pipe = i->pipe;
 736	if (pipe->nrbufs) {
 737		size_t off = i->iov_offset;
 738		int idx = i->idx;
 739		int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
 740		if (off) {
 741			pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
 742			idx = next_idx(idx, pipe);
 743			nrbufs++;
 744		}
 745		while (pipe->nrbufs > nrbufs) {
 746			pipe_buf_release(pipe, &pipe->bufs[idx]);
 747			idx = next_idx(idx, pipe);
 748			pipe->nrbufs--;
 
 
 749		}
 
 
 
 750	}
 
 751}
 752
 753static void pipe_advance(struct iov_iter *i, size_t size)
 754{
 755	struct pipe_inode_info *pipe = i->pipe;
 756	if (unlikely(i->count < size))
 757		size = i->count;
 758	if (size) {
 759		struct pipe_buffer *buf;
 760		size_t off = i->iov_offset, left = size;
 761		int idx = i->idx;
 762		if (off) /* make it relative to the beginning of buffer */
 763			left += off - pipe->bufs[idx].offset;
 764		while (1) {
 765			buf = &pipe->bufs[idx];
 766			if (left <= buf->len)
 767				break;
 768			left -= buf->len;
 769			idx = next_idx(idx, pipe);
 770		}
 771		i->idx = idx;
 772		i->iov_offset = buf->offset + left;
 773	}
 
 
 
 
 
 
 
 
 
 
 
 774	i->count -= size;
 775	/* ... and discard everything past that point */
 776	pipe_truncate(i);
 
 
 
 
 
 
 
 
 777}
 778
 779void iov_iter_advance(struct iov_iter *i, size_t size)
 780{
 781	if (unlikely(i->type & ITER_PIPE)) {
 
 
 
 
 
 
 
 
 
 
 782		pipe_advance(i, size);
 783		return;
 
 784	}
 785	iterate_and_advance(i, size, v, 0, 0, 0)
 786}
 787EXPORT_SYMBOL(iov_iter_advance);
 788
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 789/*
 790 * Return the count of just the current iov_iter segment.
 791 */
 792size_t iov_iter_single_seg_count(const struct iov_iter *i)
 793{
 794	if (unlikely(i->type & ITER_PIPE))
 795		return i->count;	// it is a silly place, anyway
 796	if (i->nr_segs == 1)
 797		return i->count;
 798	else if (i->type & ITER_BVEC)
 799		return min(i->count, i->bvec->bv_len - i->iov_offset);
 800	else
 801		return min(i->count, i->iov->iov_len - i->iov_offset);
 802}
 803EXPORT_SYMBOL(iov_iter_single_seg_count);
 804
 805void iov_iter_kvec(struct iov_iter *i, int direction,
 806			const struct kvec *kvec, unsigned long nr_segs,
 807			size_t count)
 808{
 809	BUG_ON(!(direction & ITER_KVEC));
 810	i->type = direction;
 811	i->kvec = kvec;
 812	i->nr_segs = nr_segs;
 813	i->iov_offset = 0;
 814	i->count = count;
 
 
 
 815}
 816EXPORT_SYMBOL(iov_iter_kvec);
 817
 818void iov_iter_bvec(struct iov_iter *i, int direction,
 819			const struct bio_vec *bvec, unsigned long nr_segs,
 820			size_t count)
 821{
 822	BUG_ON(!(direction & ITER_BVEC));
 823	i->type = direction;
 824	i->bvec = bvec;
 825	i->nr_segs = nr_segs;
 826	i->iov_offset = 0;
 827	i->count = count;
 
 
 
 828}
 829EXPORT_SYMBOL(iov_iter_bvec);
 830
 831void iov_iter_pipe(struct iov_iter *i, int direction,
 832			struct pipe_inode_info *pipe,
 833			size_t count)
 834{
 835	BUG_ON(direction != ITER_PIPE);
 836	WARN_ON(pipe->nrbufs == pipe->buffers);
 837	i->type = direction;
 838	i->pipe = pipe;
 839	i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
 840	i->iov_offset = 0;
 841	i->count = count;
 
 
 
 
 842}
 843EXPORT_SYMBOL(iov_iter_pipe);
 844
 845unsigned long iov_iter_alignment(const struct iov_iter *i)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 846{
 847	unsigned long res = 0;
 848	size_t size = i->count;
 
 
 849
 850	if (unlikely(i->type & ITER_PIPE)) {
 851		if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
 852			return size | i->iov_offset;
 853		return size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 854	}
 855	iterate_all_kinds(i, size, v,
 856		(res |= (unsigned long)v.iov_base | v.iov_len, 0),
 857		res |= v.bv_offset | v.bv_len,
 858		res |= (unsigned long)v.iov_base | v.iov_len
 859	)
 860	return res;
 861}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 862EXPORT_SYMBOL(iov_iter_alignment);
 863
 864unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
 865{
 866	unsigned long res = 0;
 
 867	size_t size = i->count;
 
 868
 869	if (unlikely(i->type & ITER_PIPE)) {
 870		WARN_ON(1);
 
 
 871		return ~0U;
 872	}
 873
 874	iterate_all_kinds(i, size, v,
 875		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
 876			(size != v.iov_len ? size : 0), 0),
 877		(res |= (!res ? 0 : (unsigned long)v.bv_offset) |
 878			(size != v.bv_len ? size : 0)),
 879		(res |= (!res ? 0 : (unsigned long)v.iov_base) |
 880			(size != v.iov_len ? size : 0))
 881		);
 
 
 
 882	return res;
 883}
 884EXPORT_SYMBOL(iov_iter_gap_alignment);
 885
 886static inline size_t __pipe_get_pages(struct iov_iter *i,
 887				size_t maxsize,
 888				struct page **pages,
 889				int idx,
 890				size_t *start)
 891{
 892	struct pipe_inode_info *pipe = i->pipe;
 893	ssize_t n = push_pipe(i, maxsize, &idx, start);
 894	if (!n)
 895		return -EFAULT;
 896
 897	maxsize = n;
 898	n += *start;
 899	while (n > 0) {
 900		get_page(*pages++ = pipe->bufs[idx].page);
 901		idx = next_idx(idx, pipe);
 902		n -= PAGE_SIZE;
 
 903	}
 904
 905	return maxsize;
 906}
 907
 908static ssize_t pipe_get_pages(struct iov_iter *i,
 909		   struct page **pages, size_t maxsize, unsigned maxpages,
 910		   size_t *start)
 911{
 912	unsigned npages;
 913	size_t capacity;
 914	int idx;
 915
 916	if (!maxsize)
 917		return 0;
 918
 919	if (!sanity(i))
 920		return -EFAULT;
 921
 922	data_start(i, &idx, start);
 923	/* some of this one + all after this one */
 924	npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
 925	capacity = min(npages,maxpages) * PAGE_SIZE - *start;
 926
 927	return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
 
 
 
 
 
 
 
 
 
 
 
 928}
 929
 930ssize_t iov_iter_get_pages(struct iov_iter *i,
 931		   struct page **pages, size_t maxsize, unsigned maxpages,
 932		   size_t *start)
 933{
 934	if (maxsize > i->count)
 935		maxsize = i->count;
 
 936
 937	if (unlikely(i->type & ITER_PIPE))
 938		return pipe_get_pages(i, pages, maxsize, maxpages, start);
 939	iterate_all_kinds(i, maxsize, v, ({
 940		unsigned long addr = (unsigned long)v.iov_base;
 941		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
 942		int n;
 943		int res;
 944
 945		if (len > maxpages * PAGE_SIZE)
 946			len = maxpages * PAGE_SIZE;
 947		addr &= ~(PAGE_SIZE - 1);
 948		n = DIV_ROUND_UP(len, PAGE_SIZE);
 949		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
 950		if (unlikely(res < 0))
 951			return res;
 952		return (res == n ? len : res * PAGE_SIZE) - *start;
 953	0;}),({
 954		/* can't be more than PAGE_SIZE */
 955		*start = v.bv_offset;
 956		get_page(*pages = v.bv_page);
 957		return v.bv_len;
 958	}),({
 959		return -EFAULT;
 960	})
 961	)
 962	return 0;
 963}
 964EXPORT_SYMBOL(iov_iter_get_pages);
 965
 966static struct page **get_pages_array(size_t n)
 
 
 967{
 968	struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
 969	if (!p)
 970		p = vmalloc(n * sizeof(struct page *));
 971	return p;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 972}
 973
 974static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
 975		   struct page ***pages, size_t maxsize,
 976		   size_t *start)
 977{
 978	struct page **p;
 979	size_t n;
 980	int idx;
 981	int npages;
 982
 983	if (!maxsize)
 984		return 0;
 985
 986	if (!sanity(i))
 987		return -EFAULT;
 988
 989	data_start(i, &idx, start);
 990	/* some of this one + all after this one */
 991	npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
 992	n = npages * PAGE_SIZE - *start;
 993	if (maxsize > n)
 994		maxsize = n;
 995	else
 996		npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
 997	p = get_pages_array(npages);
 998	if (!p)
 999		return -ENOMEM;
1000	n = __pipe_get_pages(i, maxsize, p, idx, start);
1001	if (n > 0)
1002		*pages = p;
1003	else
1004		kvfree(p);
1005	return n;
1006}
1007
1008ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1009		   struct page ***pages, size_t maxsize,
1010		   size_t *start)
 
1011{
1012	struct page **p;
1013
1014	if (maxsize > i->count)
1015		maxsize = i->count;
 
 
 
 
1016
1017	if (unlikely(i->type & ITER_PIPE))
1018		return pipe_get_pages_alloc(i, pages, maxsize, start);
1019	iterate_all_kinds(i, maxsize, v, ({
1020		unsigned long addr = (unsigned long)v.iov_base;
1021		size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1022		int n;
1023		int res;
1024
1025		addr &= ~(PAGE_SIZE - 1);
1026		n = DIV_ROUND_UP(len, PAGE_SIZE);
1027		p = get_pages_array(n);
1028		if (!p)
 
 
 
 
 
 
1029			return -ENOMEM;
1030		res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
1031		if (unlikely(res < 0)) {
1032			kvfree(p);
1033			return res;
1034		}
1035		*pages = p;
1036		return (res == n ? len : res * PAGE_SIZE) - *start;
1037	0;}),({
1038		/* can't be more than PAGE_SIZE */
1039		*start = v.bv_offset;
1040		*pages = p = get_pages_array(1);
1041		if (!p)
 
 
 
1042			return -ENOMEM;
1043		get_page(*p = v.bv_page);
1044		return v.bv_len;
1045	}),({
1046		return -EFAULT;
1047	})
1048	)
1049	return 0;
 
 
 
 
 
 
 
 
 
 
 
1050}
1051EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1052
1053size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1054			       struct iov_iter *i)
 
1055{
1056	char *to = addr;
1057	__wsum sum, next;
1058	size_t off = 0;
1059	sum = *csum;
1060	if (unlikely(i->type & ITER_PIPE)) {
1061		WARN_ON(1);
1062		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1063	}
1064	iterate_and_advance(i, bytes, v, ({
1065		int err = 0;
1066		next = csum_and_copy_from_user(v.iov_base,
1067					       (to += v.iov_len) - v.iov_len,
1068					       v.iov_len, 0, &err);
1069		if (!err) {
1070			sum = csum_block_add(sum, next, off);
1071			off += v.iov_len;
1072		}
1073		err ? v.iov_len : 0;
1074	}), ({
1075		char *p = kmap_atomic(v.bv_page);
1076		next = csum_partial_copy_nocheck(p + v.bv_offset,
1077						 (to += v.bv_len) - v.bv_len,
1078						 v.bv_len, 0);
1079		kunmap_atomic(p);
1080		sum = csum_block_add(sum, next, off);
1081		off += v.bv_len;
1082	}),({
1083		next = csum_partial_copy_nocheck(v.iov_base,
1084						 (to += v.iov_len) - v.iov_len,
1085						 v.iov_len, 0);
1086		sum = csum_block_add(sum, next, off);
1087		off += v.iov_len;
1088	})
1089	)
1090	*csum = sum;
1091	return bytes;
1092}
1093EXPORT_SYMBOL(csum_and_copy_from_iter);
 
 
 
 
 
 
 
1094
1095bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1096			       struct iov_iter *i)
1097{
1098	char *to = addr;
1099	__wsum sum, next;
1100	size_t off = 0;
1101	sum = *csum;
1102	if (unlikely(i->type & ITER_PIPE)) {
1103		WARN_ON(1);
1104		return false;
1105	}
1106	if (unlikely(i->count < bytes))
1107		return false;
1108	iterate_all_kinds(i, bytes, v, ({
1109		int err = 0;
1110		next = csum_and_copy_from_user(v.iov_base,
1111					       (to += v.iov_len) - v.iov_len,
1112					       v.iov_len, 0, &err);
1113		if (err)
1114			return false;
1115		sum = csum_block_add(sum, next, off);
1116		off += v.iov_len;
1117		0;
1118	}), ({
1119		char *p = kmap_atomic(v.bv_page);
1120		next = csum_partial_copy_nocheck(p + v.bv_offset,
1121						 (to += v.bv_len) - v.bv_len,
1122						 v.bv_len, 0);
1123		kunmap_atomic(p);
1124		sum = csum_block_add(sum, next, off);
1125		off += v.bv_len;
1126	}),({
1127		next = csum_partial_copy_nocheck(v.iov_base,
1128						 (to += v.iov_len) - v.iov_len,
1129						 v.iov_len, 0);
1130		sum = csum_block_add(sum, next, off);
1131		off += v.iov_len;
1132	})
1133	)
1134	*csum = sum;
1135	iov_iter_advance(i, bytes);
1136	return true;
1137}
1138EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1139
1140size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1141			     struct iov_iter *i)
1142{
1143	const char *from = addr;
1144	__wsum sum, next;
1145	size_t off = 0;
1146	sum = *csum;
1147	if (unlikely(i->type & ITER_PIPE)) {
1148		WARN_ON(1);	/* for now */
1149		return 0;
 
 
 
 
 
 
 
1150	}
1151	iterate_and_advance(i, bytes, v, ({
1152		int err = 0;
1153		next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1154					     v.iov_base,
1155					     v.iov_len, 0, &err);
1156		if (!err) {
1157			sum = csum_block_add(sum, next, off);
1158			off += v.iov_len;
1159		}
1160		err ? v.iov_len : 0;
1161	}), ({
1162		char *p = kmap_atomic(v.bv_page);
1163		next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
1164						 p + v.bv_offset,
1165						 v.bv_len, 0);
1166		kunmap_atomic(p);
1167		sum = csum_block_add(sum, next, off);
1168		off += v.bv_len;
1169	}),({
1170		next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
1171						 v.iov_base,
1172						 v.iov_len, 0);
1173		sum = csum_block_add(sum, next, off);
1174		off += v.iov_len;
 
 
1175	})
1176	)
1177	*csum = sum;
 
1178	return bytes;
1179}
1180EXPORT_SYMBOL(csum_and_copy_to_iter);
1181
1182int iov_iter_npages(const struct iov_iter *i, int maxpages)
 
1183{
1184	size_t size = i->count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1185	int npages = 0;
1186
1187	if (!size)
1188		return 0;
 
 
 
 
 
 
 
 
 
 
 
1189
1190	if (unlikely(i->type & ITER_PIPE)) {
1191		struct pipe_inode_info *pipe = i->pipe;
1192		size_t off;
1193		int idx;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1194
1195		if (!sanity(i))
1196			return 0;
1197
1198		data_start(i, &idx, &off);
1199		/* some of this one + all after this one */
1200		npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
1201		if (npages >= maxpages)
1202			return maxpages;
1203	} else iterate_all_kinds(i, size, v, ({
1204		unsigned long p = (unsigned long)v.iov_base;
1205		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1206			- p / PAGE_SIZE;
1207		if (npages >= maxpages)
1208			return maxpages;
1209	0;}),({
1210		npages++;
1211		if (npages >= maxpages)
1212			return maxpages;
1213	}),({
1214		unsigned long p = (unsigned long)v.iov_base;
1215		npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1216			- p / PAGE_SIZE;
1217		if (npages >= maxpages)
1218			return maxpages;
1219	})
1220	)
1221	return npages;
1222}
1223EXPORT_SYMBOL(iov_iter_npages);
1224
1225const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1226{
1227	*new = *old;
1228	if (unlikely(new->type & ITER_PIPE)) {
1229		WARN_ON(1);
1230		return NULL;
1231	}
1232	if (new->type & ITER_BVEC)
1233		return new->bvec = kmemdup(new->bvec,
1234				    new->nr_segs * sizeof(struct bio_vec),
1235				    flags);
1236	else
1237		/* iovec and kvec have identical layout */
1238		return new->iov = kmemdup(new->iov,
1239				   new->nr_segs * sizeof(struct iovec),
1240				   flags);
 
1241}
1242EXPORT_SYMBOL(dup_iter);
1243
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244/**
1245 * import_iovec() - Copy an array of &struct iovec from userspace
1246 *     into the kernel, check that it is valid, and initialize a new
1247 *     &struct iov_iter iterator to access it.
1248 *
1249 * @type: One of %READ or %WRITE.
1250 * @uvector: Pointer to the userspace array.
1251 * @nr_segs: Number of elements in userspace array.
1252 * @fast_segs: Number of elements in @iov.
1253 * @iov: (input and output parameter) Pointer to pointer to (usually small
1254 *     on-stack) kernel array.
1255 * @i: Pointer to iterator that will be initialized on success.
1256 *
1257 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1258 * then this function places %NULL in *@iov on return. Otherwise, a new
1259 * array will be allocated and the result placed in *@iov. This means that
1260 * the caller may call kfree() on *@iov regardless of whether the small
1261 * on-stack array was used or not (and regardless of whether this function
1262 * returns an error or not).
1263 *
1264 * Return: 0 on success or negative error code on error.
1265 */
1266int import_iovec(int type, const struct iovec __user * uvector,
1267		 unsigned nr_segs, unsigned fast_segs,
1268		 struct iovec **iov, struct iov_iter *i)
1269{
1270	ssize_t n;
1271	struct iovec *p;
1272	n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1273				  *iov, &p);
1274	if (n < 0) {
1275		if (p != *iov)
1276			kfree(p);
1277		*iov = NULL;
1278		return n;
1279	}
1280	iov_iter_init(i, type, p, nr_segs, n);
1281	*iov = p == *iov ? NULL : p;
1282	return 0;
1283}
1284EXPORT_SYMBOL(import_iovec);
1285
1286#ifdef CONFIG_COMPAT
1287#include <linux/compat.h>
1288
1289int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1290		 unsigned nr_segs, unsigned fast_segs,
1291		 struct iovec **iov, struct iov_iter *i)
1292{
1293	ssize_t n;
1294	struct iovec *p;
1295	n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1296				  *iov, &p);
1297	if (n < 0) {
1298		if (p != *iov)
1299			kfree(p);
1300		*iov = NULL;
1301		return n;
1302	}
1303	iov_iter_init(i, type, p, nr_segs, n);
1304	*iov = p == *iov ? NULL : p;
1305	return 0;
1306}
1307#endif
1308
1309int import_single_range(int rw, void __user *buf, size_t len,
1310		 struct iovec *iov, struct iov_iter *i)
1311{
1312	if (len > MAX_RW_COUNT)
1313		len = MAX_RW_COUNT;
1314	if (unlikely(!access_ok(!rw, buf, len)))
1315		return -EFAULT;
1316
1317	iov->iov_base = buf;
1318	iov->iov_len = len;
1319	iov_iter_init(i, rw, iov, 1, len);
1320	return 0;
1321}
1322EXPORT_SYMBOL(import_single_range);
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <crypto/hash.h>
   3#include <linux/export.h>
   4#include <linux/bvec.h>
   5#include <linux/fault-inject-usercopy.h>
   6#include <linux/uio.h>
   7#include <linux/pagemap.h>
   8#include <linux/highmem.h>
   9#include <linux/slab.h>
  10#include <linux/vmalloc.h>
  11#include <linux/splice.h>
  12#include <linux/compat.h>
  13#include <net/checksum.h>
  14#include <linux/scatterlist.h>
  15#include <linux/instrumented.h>
  16
  17#define PIPE_PARANOIA /* for now */
  18
  19/* covers ubuf and kbuf alike */
  20#define iterate_buf(i, n, base, len, off, __p, STEP) {		\
  21	size_t __maybe_unused off = 0;				\
  22	len = n;						\
  23	base = __p + i->iov_offset;				\
  24	len -= (STEP);						\
  25	i->iov_offset += len;					\
  26	n = len;						\
  27}
  28
  29/* covers iovec and kvec alike */
  30#define iterate_iovec(i, n, base, len, off, __p, STEP) {	\
  31	size_t off = 0;						\
  32	size_t skip = i->iov_offset;				\
  33	do {							\
  34		len = min(n, __p->iov_len - skip);		\
  35		if (likely(len)) {				\
  36			base = __p->iov_base + skip;		\
  37			len -= (STEP);				\
  38			off += len;				\
  39			skip += len;				\
  40			n -= len;				\
  41			if (skip < __p->iov_len)		\
  42				break;				\
  43		}						\
  44		__p++;						\
  45		skip = 0;					\
  46	} while (n);						\
  47	i->iov_offset = skip;					\
  48	n = off;						\
  49}
  50
  51#define iterate_bvec(i, n, base, len, off, p, STEP) {		\
  52	size_t off = 0;						\
  53	unsigned skip = i->iov_offset;				\
  54	while (n) {						\
  55		unsigned offset = p->bv_offset + skip;		\
  56		unsigned left;					\
  57		void *kaddr = kmap_local_page(p->bv_page +	\
  58					offset / PAGE_SIZE);	\
  59		base = kaddr + offset % PAGE_SIZE;		\
  60		len = min(min(n, (size_t)(p->bv_len - skip)),	\
  61		     (size_t)(PAGE_SIZE - offset % PAGE_SIZE));	\
  62		left = (STEP);					\
  63		kunmap_local(kaddr);				\
  64		len -= left;					\
  65		off += len;					\
  66		skip += len;					\
  67		if (skip == p->bv_len) {			\
  68			skip = 0;				\
  69			p++;					\
  70		}						\
  71		n -= len;					\
  72		if (left)					\
  73			break;					\
  74	}							\
  75	i->iov_offset = skip;					\
  76	n = off;						\
 
 
 
  77}
  78
  79#define iterate_xarray(i, n, base, len, __off, STEP) {		\
  80	__label__ __out;					\
  81	size_t __off = 0;					\
  82	struct folio *folio;					\
  83	loff_t start = i->xarray_start + i->iov_offset;		\
  84	pgoff_t index = start / PAGE_SIZE;			\
  85	XA_STATE(xas, i->xarray, index);			\
  86								\
  87	len = PAGE_SIZE - offset_in_page(start);		\
  88	rcu_read_lock();					\
  89	xas_for_each(&xas, folio, ULONG_MAX) {			\
  90		unsigned left;					\
  91		size_t offset;					\
  92		if (xas_retry(&xas, folio))			\
  93			continue;				\
  94		if (WARN_ON(xa_is_value(folio)))		\
  95			break;					\
  96		if (WARN_ON(folio_test_hugetlb(folio)))		\
  97			break;					\
  98		offset = offset_in_folio(folio, start + __off);	\
  99		while (offset < folio_size(folio)) {		\
 100			base = kmap_local_folio(folio, offset);	\
 101			len = min(n, len);			\
 102			left = (STEP);				\
 103			kunmap_local(base);			\
 104			len -= left;				\
 105			__off += len;				\
 106			n -= len;				\
 107			if (left || n == 0)			\
 108				goto __out;			\
 109			offset += len;				\
 110			len = PAGE_SIZE;			\
 111		}						\
 112	}							\
 113__out:								\
 114	rcu_read_unlock();					\
 115	i->iov_offset += __off;					\
 116	n = __off;						\
 117}
 118
 119#define __iterate_and_advance(i, n, base, len, off, I, K) {	\
 120	if (unlikely(i->count < n))				\
 121		n = i->count;					\
 122	if (likely(n)) {					\
 123		if (likely(iter_is_ubuf(i))) {			\
 124			void __user *base;			\
 125			size_t len;				\
 126			iterate_buf(i, n, base, len, off,	\
 127						i->ubuf, (I)) 	\
 128		} else if (likely(iter_is_iovec(i))) {		\
 129			const struct iovec *iov = i->iov;	\
 130			void __user *base;			\
 131			size_t len;				\
 132			iterate_iovec(i, n, base, len, off,	\
 133						iov, (I))	\
 134			i->nr_segs -= iov - i->iov;		\
 135			i->iov = iov;				\
 136		} else if (iov_iter_is_bvec(i)) {		\
 137			const struct bio_vec *bvec = i->bvec;	\
 138			void *base;				\
 139			size_t len;				\
 140			iterate_bvec(i, n, base, len, off,	\
 141						bvec, (K))	\
 142			i->nr_segs -= bvec - i->bvec;		\
 143			i->bvec = bvec;				\
 144		} else if (iov_iter_is_kvec(i)) {		\
 145			const struct kvec *kvec = i->kvec;	\
 146			void *base;				\
 147			size_t len;				\
 148			iterate_iovec(i, n, base, len, off,	\
 149						kvec, (K))	\
 
 
 150			i->nr_segs -= kvec - i->kvec;		\
 151			i->kvec = kvec;				\
 152		} else if (iov_iter_is_xarray(i)) {		\
 153			void *base;				\
 154			size_t len;				\
 155			iterate_xarray(i, n, base, len, off,	\
 156							(K))	\
 
 
 
 
 
 157		}						\
 158		i->count -= n;					\
 
 159	}							\
 160}
 161#define iterate_and_advance(i, n, base, len, off, I, K) \
 162	__iterate_and_advance(i, n, base, len, off, I, ((void)(K),0))
 163
 164static int copyout(void __user *to, const void *from, size_t n)
 
 165{
 166	if (should_fail_usercopy())
 167		return n;
 168	if (access_ok(to, n)) {
 169		instrument_copy_to_user(to, from, n);
 170		n = raw_copy_to_user(to, from, n);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 171	}
 172	return n;
 
 
 
 
 173}
 174
 175static int copyin(void *to, const void __user *from, size_t n)
 
 176{
 177	size_t res = n;
 
 
 
 178
 179	if (should_fail_usercopy())
 180		return n;
 181	if (access_ok(from, n)) {
 182		instrument_copy_from_user_before(to, from, n);
 183		res = raw_copy_from_user(to, from, n);
 184		instrument_copy_from_user_after(to, from, n, res);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 185	}
 186	return res;
 187}
 188
 189static inline struct pipe_buffer *pipe_buf(const struct pipe_inode_info *pipe,
 190					   unsigned int slot)
 191{
 192	return &pipe->bufs[slot & (pipe->ring_size - 1)];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 193}
 194
 195#ifdef PIPE_PARANOIA
 196static bool sanity(const struct iov_iter *i)
 197{
 198	struct pipe_inode_info *pipe = i->pipe;
 199	unsigned int p_head = pipe->head;
 200	unsigned int p_tail = pipe->tail;
 201	unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
 202	unsigned int i_head = i->head;
 203	unsigned int idx;
 204
 205	if (i->last_offset) {
 206		struct pipe_buffer *p;
 207		if (unlikely(p_occupancy == 0))
 208			goto Bad;	// pipe must be non-empty
 209		if (unlikely(i_head != p_head - 1))
 210			goto Bad;	// must be at the last buffer...
 211
 212		p = pipe_buf(pipe, i_head);
 213		if (unlikely(p->offset + p->len != abs(i->last_offset)))
 214			goto Bad;	// ... at the end of segment
 215	} else {
 216		if (i_head != p_head)
 217			goto Bad;	// must be right after the last buffer
 218	}
 219	return true;
 220Bad:
 221	printk(KERN_ERR "idx = %d, offset = %d\n", i_head, i->last_offset);
 222	printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n",
 223			p_head, p_tail, pipe->ring_size);
 224	for (idx = 0; idx < pipe->ring_size; idx++)
 225		printk(KERN_ERR "[%p %p %d %d]\n",
 226			pipe->bufs[idx].ops,
 227			pipe->bufs[idx].page,
 228			pipe->bufs[idx].offset,
 229			pipe->bufs[idx].len);
 230	WARN_ON(1);
 231	return false;
 232}
 233#else
 234#define sanity(i) true
 235#endif
 236
 237static struct page *push_anon(struct pipe_inode_info *pipe, unsigned size)
 238{
 239	struct page *page = alloc_page(GFP_USER);
 240	if (page) {
 241		struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
 242		*buf = (struct pipe_buffer) {
 243			.ops = &default_pipe_buf_ops,
 244			.page = page,
 245			.offset = 0,
 246			.len = size
 247		};
 248	}
 249	return page;
 250}
 251
 252static void push_page(struct pipe_inode_info *pipe, struct page *page,
 253			unsigned int offset, unsigned int size)
 254{
 255	struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++);
 256	*buf = (struct pipe_buffer) {
 257		.ops = &page_cache_pipe_buf_ops,
 258		.page = page,
 259		.offset = offset,
 260		.len = size
 261	};
 262	get_page(page);
 263}
 264
 265static inline int last_offset(const struct pipe_buffer *buf)
 266{
 267	if (buf->ops == &default_pipe_buf_ops)
 268		return buf->len;	// buf->offset is 0 for those
 269	else
 270		return -(buf->offset + buf->len);
 271}
 272
 273static struct page *append_pipe(struct iov_iter *i, size_t size,
 274				unsigned int *off)
 275{
 276	struct pipe_inode_info *pipe = i->pipe;
 277	int offset = i->last_offset;
 278	struct pipe_buffer *buf;
 279	struct page *page;
 280
 281	if (offset > 0 && offset < PAGE_SIZE) {
 282		// some space in the last buffer; add to it
 283		buf = pipe_buf(pipe, pipe->head - 1);
 284		size = min_t(size_t, size, PAGE_SIZE - offset);
 285		buf->len += size;
 286		i->last_offset += size;
 287		i->count -= size;
 288		*off = offset;
 289		return buf->page;
 290	}
 291	// OK, we need a new buffer
 292	*off = 0;
 293	size = min_t(size_t, size, PAGE_SIZE);
 294	if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
 295		return NULL;
 296	page = push_anon(pipe, size);
 297	if (!page)
 298		return NULL;
 299	i->head = pipe->head - 1;
 300	i->last_offset = size;
 301	i->count -= size;
 302	return page;
 303}
 304
 305static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
 306			 struct iov_iter *i)
 307{
 308	struct pipe_inode_info *pipe = i->pipe;
 309	unsigned int head = pipe->head;
 
 
 310
 311	if (unlikely(bytes > i->count))
 312		bytes = i->count;
 313
 314	if (unlikely(!bytes))
 315		return 0;
 316
 317	if (!sanity(i))
 318		return 0;
 319
 320	if (offset && i->last_offset == -offset) { // could we merge it?
 321		struct pipe_buffer *buf = pipe_buf(pipe, head - 1);
 322		if (buf->page == page) {
 
 
 
 323			buf->len += bytes;
 324			i->last_offset -= bytes;
 325			i->count -= bytes;
 326			return bytes;
 327		}
 
 
 328	}
 329	if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
 330		return 0;
 331
 332	push_page(pipe, page, offset, bytes);
 333	i->last_offset = -(offset + bytes);
 334	i->head = head;
 
 
 
 
 335	i->count -= bytes;
 336	return bytes;
 337}
 338
 339/*
 340 * fault_in_iov_iter_readable - fault in iov iterator for reading
 341 * @i: iterator
 342 * @size: maximum length
 343 *
 344 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
 345 * @size.  For each iovec, fault in each page that constitutes the iovec.
 346 *
 347 * Returns the number of bytes not faulted in (like copy_to_user() and
 348 * copy_from_user()).
 349 *
 350 * Always returns 0 for non-userspace iterators.
 351 */
 352size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
 353{
 354	if (iter_is_ubuf(i)) {
 355		size_t n = min(size, iov_iter_count(i));
 356		n -= fault_in_readable(i->ubuf + i->iov_offset, n);
 357		return size - n;
 358	} else if (iter_is_iovec(i)) {
 359		size_t count = min(size, iov_iter_count(i));
 360		const struct iovec *p;
 361		size_t skip;
 362
 363		size -= count;
 364		for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
 365			size_t len = min(count, p->iov_len - skip);
 366			size_t ret;
 367
 368			if (unlikely(!len))
 369				continue;
 370			ret = fault_in_readable(p->iov_base + skip, len);
 371			count -= len - ret;
 372			if (ret)
 373				break;
 374		}
 375		return count + size;
 376	}
 377	return 0;
 378}
 379EXPORT_SYMBOL(fault_in_iov_iter_readable);
 380
 381/*
 382 * fault_in_iov_iter_writeable - fault in iov iterator for writing
 383 * @i: iterator
 384 * @size: maximum length
 385 *
 386 * Faults in the iterator using get_user_pages(), i.e., without triggering
 387 * hardware page faults.  This is primarily useful when we already know that
 388 * some or all of the pages in @i aren't in memory.
 389 *
 390 * Returns the number of bytes not faulted in, like copy_to_user() and
 391 * copy_from_user().
 392 *
 393 * Always returns 0 for non-user-space iterators.
 394 */
 395size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
 396{
 397	if (iter_is_ubuf(i)) {
 398		size_t n = min(size, iov_iter_count(i));
 399		n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
 400		return size - n;
 401	} else if (iter_is_iovec(i)) {
 402		size_t count = min(size, iov_iter_count(i));
 403		const struct iovec *p;
 404		size_t skip;
 405
 406		size -= count;
 407		for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) {
 408			size_t len = min(count, p->iov_len - skip);
 409			size_t ret;
 410
 411			if (unlikely(!len))
 412				continue;
 413			ret = fault_in_safe_writeable(p->iov_base + skip, len);
 414			count -= len - ret;
 415			if (ret)
 416				break;
 417		}
 418		return count + size;
 419	}
 420	return 0;
 
 
 421}
 422EXPORT_SYMBOL(fault_in_iov_iter_writeable);
 423
 424void iov_iter_init(struct iov_iter *i, unsigned int direction,
 425			const struct iovec *iov, unsigned long nr_segs,
 426			size_t count)
 427{
 428	WARN_ON(direction & ~(READ | WRITE));
 429	*i = (struct iov_iter) {
 430		.iter_type = ITER_IOVEC,
 431		.nofault = false,
 432		.user_backed = true,
 433		.data_source = direction,
 434		.iov = iov,
 435		.nr_segs = nr_segs,
 436		.iov_offset = 0,
 437		.count = count
 438	};
 439}
 440EXPORT_SYMBOL(iov_iter_init);
 441
 442// returns the offset in partial buffer (if any)
 443static inline unsigned int pipe_npages(const struct iov_iter *i, int *npages)
 444{
 445	struct pipe_inode_info *pipe = i->pipe;
 446	int used = pipe->head - pipe->tail;
 447	int off = i->last_offset;
 
 448
 449	*npages = max((int)pipe->max_usage - used, 0);
 450
 451	if (off > 0 && off < PAGE_SIZE) { // anon and not full
 452		(*npages)++;
 453		return off;
 454	}
 455	return 0;
 456}
 457
 458static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
 459				struct iov_iter *i)
 460{
 461	unsigned int off, chunk;
 462
 463	if (unlikely(bytes > i->count))
 464		bytes = i->count;
 465	if (unlikely(!bytes))
 466		return 0;
 467
 468	if (!sanity(i))
 469		return 0;
 470
 471	for (size_t n = bytes; n; n -= chunk) {
 472		struct page *page = append_pipe(i, n, &off);
 473		chunk = min_t(size_t, n, PAGE_SIZE - off);
 474		if (!page)
 475			return bytes - n;
 476		memcpy_to_page(page, off, addr, chunk);
 477		addr += chunk;
 478	}
 479	return bytes;
 480}
 481
 482static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
 483			      __wsum sum, size_t off)
 484{
 485	__wsum next = csum_partial_copy_nocheck(from, to, len);
 486	return csum_block_add(sum, next, off);
 
 
 
 
 
 
 487}
 488
 489static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
 490					 struct iov_iter *i, __wsum *sump)
 491{
 492	__wsum sum = *sump;
 493	size_t off = 0;
 494	unsigned int chunk, r;
 
 495
 496	if (unlikely(bytes > i->count))
 497		bytes = i->count;
 498	if (unlikely(!bytes))
 499		return 0;
 500
 501	if (!sanity(i))
 502		return 0;
 503
 504	while (bytes) {
 505		struct page *page = append_pipe(i, bytes, &r);
 506		char *p;
 507
 
 
 
 
 
 
 
 
 508		if (!page)
 509			break;
 510		chunk = min_t(size_t, bytes, PAGE_SIZE - r);
 511		p = kmap_local_page(page);
 512		sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
 513		kunmap_local(p);
 514		off += chunk;
 515		bytes -= chunk;
 
 
 
 
 
 516	}
 517	*sump = sum;
 518	return off;
 519}
 520
 521size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
 522{
 523	if (WARN_ON_ONCE(i->data_source))
 524		return 0;
 525	if (unlikely(iov_iter_is_pipe(i)))
 526		return copy_pipe_to_iter(addr, bytes, i);
 527	if (user_backed_iter(i))
 528		might_fault();
 529	iterate_and_advance(i, bytes, base, len, off,
 530		copyout(base, addr + off, len),
 531		memcpy(base, addr + off, len)
 532	)
 533
 534	return bytes;
 535}
 536EXPORT_SYMBOL(_copy_to_iter);
 537
 538#ifdef CONFIG_ARCH_HAS_COPY_MC
 539static int copyout_mc(void __user *to, const void *from, size_t n)
 540{
 541	if (access_ok(to, n)) {
 542		instrument_copy_to_user(to, from, n);
 543		n = copy_mc_to_user((__force void *) to, from, n);
 544	}
 545	return n;
 546}
 547
 548static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
 549				struct iov_iter *i)
 550{
 551	size_t xfer = 0;
 552	unsigned int off, chunk;
 
 553
 554	if (unlikely(bytes > i->count))
 555		bytes = i->count;
 556	if (unlikely(!bytes))
 557		return 0;
 558
 559	if (!sanity(i))
 
 560		return 0;
 561
 562	while (bytes) {
 563		struct page *page = append_pipe(i, bytes, &off);
 564		unsigned long rem;
 565		char *p;
 566
 567		if (!page)
 568			break;
 569		chunk = min_t(size_t, bytes, PAGE_SIZE - off);
 570		p = kmap_local_page(page);
 571		rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
 572		chunk -= rem;
 573		kunmap_local(p);
 574		xfer += chunk;
 575		bytes -= chunk;
 576		if (rem) {
 577			iov_iter_revert(i, rem);
 578			break;
 579		}
 580	}
 581	return xfer;
 
 582}
 583
 584/**
 585 * _copy_mc_to_iter - copy to iter with source memory error exception handling
 586 * @addr: source kernel address
 587 * @bytes: total transfer length
 588 * @i: destination iterator
 589 *
 590 * The pmem driver deploys this for the dax operation
 591 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the
 592 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
 593 * successfully copied.
 594 *
 595 * The main differences between this and typical _copy_to_iter().
 596 *
 597 * * Typical tail/residue handling after a fault retries the copy
 598 *   byte-by-byte until the fault happens again. Re-triggering machine
 599 *   checks is potentially fatal so the implementation uses source
 600 *   alignment and poison alignment assumptions to avoid re-triggering
 601 *   hardware exceptions.
 602 *
 603 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
 604 *   Compare to copy_to_iter() where only ITER_IOVEC attempts might return
 605 *   a short copy.
 606 *
 607 * Return: number of bytes copied (may be %0)
 608 */
 609size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
 610{
 611	if (WARN_ON_ONCE(i->data_source))
 612		return 0;
 613	if (unlikely(iov_iter_is_pipe(i)))
 614		return copy_mc_pipe_to_iter(addr, bytes, i);
 615	if (user_backed_iter(i))
 616		might_fault();
 617	__iterate_and_advance(i, bytes, base, len, off,
 618		copyout_mc(base, addr + off, len),
 619		copy_mc_to_kernel(base, addr + off, len)
 620	)
 621
 622	return bytes;
 623}
 624EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
 625#endif /* CONFIG_ARCH_HAS_COPY_MC */
 626
 627size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
 628{
 629	if (WARN_ON_ONCE(!i->data_source))
 
 
 630		return 0;
 631
 632	if (user_backed_iter(i))
 633		might_fault();
 634	iterate_and_advance(i, bytes, base, len, off,
 635		copyin(addr + off, base, len),
 636		memcpy(addr + off, base, len)
 
 637	)
 638
 639	return bytes;
 640}
 641EXPORT_SYMBOL(_copy_from_iter);
 642
 643size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
 644{
 645	if (WARN_ON_ONCE(!i->data_source))
 646		return 0;
 
 
 
 
 
 647
 648	iterate_and_advance(i, bytes, base, len, off,
 649		__copy_from_user_inatomic_nocache(addr + off, base, len),
 650		memcpy(addr + off, base, len)
 
 
 
 
 
 651	)
 652
 653	return bytes;
 
 654}
 655EXPORT_SYMBOL(_copy_from_iter_nocache);
 656
 657#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
 658/**
 659 * _copy_from_iter_flushcache - write destination through cpu cache
 660 * @addr: destination kernel address
 661 * @bytes: total transfer length
 662 * @i: source iterator
 663 *
 664 * The pmem driver arranges for filesystem-dax to use this facility via
 665 * dax_copy_from_iter() for ensuring that writes to persistent memory
 666 * are flushed through the CPU cache. It is differentiated from
 667 * _copy_from_iter_nocache() in that guarantees all data is flushed for
 668 * all iterator types. The _copy_from_iter_nocache() only attempts to
 669 * bypass the cache for the ITER_IOVEC case, and on some archs may use
 670 * instructions that strand dirty-data in the cache.
 671 *
 672 * Return: number of bytes copied (may be %0)
 673 */
 674size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
 675{
 676	if (WARN_ON_ONCE(!i->data_source))
 
 
 677		return 0;
 678
 679	iterate_and_advance(i, bytes, base, len, off,
 680		__copy_from_user_flushcache(addr + off, base, len),
 681		memcpy_flushcache(addr + off, base, len)
 
 
 
 682	)
 683
 684	return bytes;
 685}
 686EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
 687#endif
 688
 689static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
 690{
 691	struct page *head;
 692	size_t v = n + offset;
 693
 694	/*
 695	 * The general case needs to access the page order in order
 696	 * to compute the page size.
 697	 * However, we mostly deal with order-0 pages and thus can
 698	 * avoid a possible cache line miss for requests that fit all
 699	 * page orders.
 700	 */
 701	if (n <= v && v <= PAGE_SIZE)
 702		return true;
 
 
 
 
 703
 704	head = compound_head(page);
 705	v += (page - head) << PAGE_SHIFT;
 706
 707	if (WARN_ON(n > v || v > page_size(head)))
 708		return false;
 709	return true;
 710}
 
 711
 712size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
 713			 struct iov_iter *i)
 714{
 715	size_t res = 0;
 716	if (!page_copy_sane(page, offset, bytes))
 717		return 0;
 718	if (WARN_ON_ONCE(i->data_source))
 719		return 0;
 720	if (unlikely(iov_iter_is_pipe(i)))
 
 
 721		return copy_page_to_iter_pipe(page, offset, bytes, i);
 722	page += offset / PAGE_SIZE; // first subpage
 723	offset %= PAGE_SIZE;
 724	while (1) {
 725		void *kaddr = kmap_local_page(page);
 726		size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
 727		n = _copy_to_iter(kaddr + offset, n, i);
 728		kunmap_local(kaddr);
 729		res += n;
 730		bytes -= n;
 731		if (!bytes || !n)
 732			break;
 733		offset += n;
 734		if (offset == PAGE_SIZE) {
 735			page++;
 736			offset = 0;
 737		}
 738	}
 739	return res;
 740}
 741EXPORT_SYMBOL(copy_page_to_iter);
 742
 743size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
 744			 struct iov_iter *i)
 745{
 746	size_t res = 0;
 747	if (!page_copy_sane(page, offset, bytes))
 748		return 0;
 749	page += offset / PAGE_SIZE; // first subpage
 750	offset %= PAGE_SIZE;
 751	while (1) {
 752		void *kaddr = kmap_local_page(page);
 753		size_t n = min(bytes, (size_t)PAGE_SIZE - offset);
 754		n = _copy_from_iter(kaddr + offset, n, i);
 755		kunmap_local(kaddr);
 756		res += n;
 757		bytes -= n;
 758		if (!bytes || !n)
 759			break;
 760		offset += n;
 761		if (offset == PAGE_SIZE) {
 762			page++;
 763			offset = 0;
 764		}
 765	}
 766	return res;
 
 
 
 
 
 
 767}
 768EXPORT_SYMBOL(copy_page_from_iter);
 769
 770static size_t pipe_zero(size_t bytes, struct iov_iter *i)
 771{
 772	unsigned int chunk, off;
 
 
 773
 774	if (unlikely(bytes > i->count))
 775		bytes = i->count;
 776	if (unlikely(!bytes))
 777		return 0;
 778
 779	if (!sanity(i))
 
 780		return 0;
 781
 782	for (size_t n = bytes; n; n -= chunk) {
 783		struct page *page = append_pipe(i, n, &off);
 784		char *p;
 785
 786		if (!page)
 787			return bytes - n;
 788		chunk = min_t(size_t, n, PAGE_SIZE - off);
 789		p = kmap_local_page(page);
 790		memset(p + off, 0, chunk);
 791		kunmap_local(p);
 792	}
 
 793	return bytes;
 794}
 795
 796size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
 797{
 798	if (unlikely(iov_iter_is_pipe(i)))
 799		return pipe_zero(bytes, i);
 800	iterate_and_advance(i, bytes, base, len, count,
 801		clear_user(base, len),
 802		memset(base, 0, len)
 
 803	)
 804
 805	return bytes;
 806}
 807EXPORT_SYMBOL(iov_iter_zero);
 808
 809size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes,
 810				  struct iov_iter *i)
 811{
 812	char *kaddr = kmap_atomic(page), *p = kaddr + offset;
 813	if (!page_copy_sane(page, offset, bytes)) {
 814		kunmap_atomic(kaddr);
 815		return 0;
 816	}
 817	if (WARN_ON_ONCE(!i->data_source)) {
 818		kunmap_atomic(kaddr);
 
 819		return 0;
 820	}
 821	iterate_and_advance(i, bytes, base, len, off,
 822		copyin(p + off, base, len),
 823		memcpy(p + off, base, len)
 
 
 
 824	)
 825	kunmap_atomic(kaddr);
 826	return bytes;
 827}
 828EXPORT_SYMBOL(copy_page_from_iter_atomic);
 829
 830static void pipe_advance(struct iov_iter *i, size_t size)
 831{
 832	struct pipe_inode_info *pipe = i->pipe;
 833	int off = i->last_offset;
 834
 835	if (!off && !size) {
 836		pipe_discard_from(pipe, i->start_head); // discard everything
 837		return;
 838	}
 839	i->count -= size;
 840	while (1) {
 841		struct pipe_buffer *buf = pipe_buf(pipe, i->head);
 842		if (off) /* make it relative to the beginning of buffer */
 843			size += abs(off) - buf->offset;
 844		if (size <= buf->len) {
 845			buf->len = size;
 846			i->last_offset = last_offset(buf);
 847			break;
 848		}
 849		size -= buf->len;
 850		i->head++;
 851		off = 0;
 852	}
 853	pipe_discard_from(pipe, i->head + 1); // discard everything past this one
 854}
 855
 856static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
 857{
 858	const struct bio_vec *bvec, *end;
 859
 860	if (!i->count)
 861		return;
 862	i->count -= size;
 863
 864	size += i->iov_offset;
 865
 866	for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
 867		if (likely(size < bvec->bv_len))
 868			break;
 869		size -= bvec->bv_len;
 
 
 
 
 
 
 870	}
 871	i->iov_offset = size;
 872	i->nr_segs -= bvec - i->bvec;
 873	i->bvec = bvec;
 874}
 875
 876static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
 877{
 878	const struct iovec *iov, *end;
 879
 880	if (!i->count)
 881		return;
 882	i->count -= size;
 883
 884	size += i->iov_offset; // from beginning of current segment
 885	for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) {
 886		if (likely(size < iov->iov_len))
 887			break;
 888		size -= iov->iov_len;
 889	}
 890	i->iov_offset = size;
 891	i->nr_segs -= iov - i->iov;
 892	i->iov = iov;
 893}
 894
 895void iov_iter_advance(struct iov_iter *i, size_t size)
 896{
 897	if (unlikely(i->count < size))
 898		size = i->count;
 899	if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
 900		i->iov_offset += size;
 901		i->count -= size;
 902	} else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
 903		/* iovec and kvec have identical layouts */
 904		iov_iter_iovec_advance(i, size);
 905	} else if (iov_iter_is_bvec(i)) {
 906		iov_iter_bvec_advance(i, size);
 907	} else if (iov_iter_is_pipe(i)) {
 908		pipe_advance(i, size);
 909	} else if (iov_iter_is_discard(i)) {
 910		i->count -= size;
 911	}
 
 912}
 913EXPORT_SYMBOL(iov_iter_advance);
 914
 915void iov_iter_revert(struct iov_iter *i, size_t unroll)
 916{
 917	if (!unroll)
 918		return;
 919	if (WARN_ON(unroll > MAX_RW_COUNT))
 920		return;
 921	i->count += unroll;
 922	if (unlikely(iov_iter_is_pipe(i))) {
 923		struct pipe_inode_info *pipe = i->pipe;
 924		unsigned int head = pipe->head;
 925
 926		while (head > i->start_head) {
 927			struct pipe_buffer *b = pipe_buf(pipe, --head);
 928			if (unroll < b->len) {
 929				b->len -= unroll;
 930				i->last_offset = last_offset(b);
 931				i->head = head;
 932				return;
 933			}
 934			unroll -= b->len;
 935			pipe_buf_release(pipe, b);
 936			pipe->head--;
 937		}
 938		i->last_offset = 0;
 939		i->head = head;
 940		return;
 941	}
 942	if (unlikely(iov_iter_is_discard(i)))
 943		return;
 944	if (unroll <= i->iov_offset) {
 945		i->iov_offset -= unroll;
 946		return;
 947	}
 948	unroll -= i->iov_offset;
 949	if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
 950		BUG(); /* We should never go beyond the start of the specified
 951			* range since we might then be straying into pages that
 952			* aren't pinned.
 953			*/
 954	} else if (iov_iter_is_bvec(i)) {
 955		const struct bio_vec *bvec = i->bvec;
 956		while (1) {
 957			size_t n = (--bvec)->bv_len;
 958			i->nr_segs++;
 959			if (unroll <= n) {
 960				i->bvec = bvec;
 961				i->iov_offset = n - unroll;
 962				return;
 963			}
 964			unroll -= n;
 965		}
 966	} else { /* same logics for iovec and kvec */
 967		const struct iovec *iov = i->iov;
 968		while (1) {
 969			size_t n = (--iov)->iov_len;
 970			i->nr_segs++;
 971			if (unroll <= n) {
 972				i->iov = iov;
 973				i->iov_offset = n - unroll;
 974				return;
 975			}
 976			unroll -= n;
 977		}
 978	}
 979}
 980EXPORT_SYMBOL(iov_iter_revert);
 981
 982/*
 983 * Return the count of just the current iov_iter segment.
 984 */
 985size_t iov_iter_single_seg_count(const struct iov_iter *i)
 986{
 987	if (i->nr_segs > 1) {
 988		if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
 989			return min(i->count, i->iov->iov_len - i->iov_offset);
 990		if (iov_iter_is_bvec(i))
 991			return min(i->count, i->bvec->bv_len - i->iov_offset);
 992	}
 993	return i->count;
 
 994}
 995EXPORT_SYMBOL(iov_iter_single_seg_count);
 996
 997void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
 998			const struct kvec *kvec, unsigned long nr_segs,
 999			size_t count)
1000{
1001	WARN_ON(direction & ~(READ | WRITE));
1002	*i = (struct iov_iter){
1003		.iter_type = ITER_KVEC,
1004		.data_source = direction,
1005		.kvec = kvec,
1006		.nr_segs = nr_segs,
1007		.iov_offset = 0,
1008		.count = count
1009	};
1010}
1011EXPORT_SYMBOL(iov_iter_kvec);
1012
1013void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1014			const struct bio_vec *bvec, unsigned long nr_segs,
1015			size_t count)
1016{
1017	WARN_ON(direction & ~(READ | WRITE));
1018	*i = (struct iov_iter){
1019		.iter_type = ITER_BVEC,
1020		.data_source = direction,
1021		.bvec = bvec,
1022		.nr_segs = nr_segs,
1023		.iov_offset = 0,
1024		.count = count
1025	};
1026}
1027EXPORT_SYMBOL(iov_iter_bvec);
1028
1029void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1030			struct pipe_inode_info *pipe,
1031			size_t count)
1032{
1033	BUG_ON(direction != READ);
1034	WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size));
1035	*i = (struct iov_iter){
1036		.iter_type = ITER_PIPE,
1037		.data_source = false,
1038		.pipe = pipe,
1039		.head = pipe->head,
1040		.start_head = pipe->head,
1041		.last_offset = 0,
1042		.count = count
1043	};
1044}
1045EXPORT_SYMBOL(iov_iter_pipe);
1046
1047/**
1048 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray
1049 * @i: The iterator to initialise.
1050 * @direction: The direction of the transfer.
1051 * @xarray: The xarray to access.
1052 * @start: The start file position.
1053 * @count: The size of the I/O buffer in bytes.
1054 *
1055 * Set up an I/O iterator to either draw data out of the pages attached to an
1056 * inode or to inject data into those pages.  The pages *must* be prevented
1057 * from evaporation, either by taking a ref on them or locking them by the
1058 * caller.
1059 */
1060void iov_iter_xarray(struct iov_iter *i, unsigned int direction,
1061		     struct xarray *xarray, loff_t start, size_t count)
1062{
1063	BUG_ON(direction & ~1);
1064	*i = (struct iov_iter) {
1065		.iter_type = ITER_XARRAY,
1066		.data_source = direction,
1067		.xarray = xarray,
1068		.xarray_start = start,
1069		.count = count,
1070		.iov_offset = 0
1071	};
1072}
1073EXPORT_SYMBOL(iov_iter_xarray);
1074
1075/**
1076 * iov_iter_discard - Initialise an I/O iterator that discards data
1077 * @i: The iterator to initialise.
1078 * @direction: The direction of the transfer.
1079 * @count: The size of the I/O buffer in bytes.
1080 *
1081 * Set up an I/O iterator that just discards everything that's written to it.
1082 * It's only available as a READ iterator.
1083 */
1084void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1085{
1086	BUG_ON(direction != READ);
1087	*i = (struct iov_iter){
1088		.iter_type = ITER_DISCARD,
1089		.data_source = false,
1090		.count = count,
1091		.iov_offset = 0
1092	};
1093}
1094EXPORT_SYMBOL(iov_iter_discard);
1095
1096static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
1097				   unsigned len_mask)
1098{
1099	size_t size = i->count;
1100	size_t skip = i->iov_offset;
1101	unsigned k;
1102
1103	for (k = 0; k < i->nr_segs; k++, skip = 0) {
1104		size_t len = i->iov[k].iov_len - skip;
1105
1106		if (len > size)
1107			len = size;
1108		if (len & len_mask)
1109			return false;
1110		if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask)
1111			return false;
1112
1113		size -= len;
1114		if (!size)
1115			break;
1116	}
1117	return true;
1118}
1119
1120static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
1121				  unsigned len_mask)
1122{
1123	size_t size = i->count;
1124	unsigned skip = i->iov_offset;
1125	unsigned k;
1126
1127	for (k = 0; k < i->nr_segs; k++, skip = 0) {
1128		size_t len = i->bvec[k].bv_len - skip;
1129
1130		if (len > size)
1131			len = size;
1132		if (len & len_mask)
1133			return false;
1134		if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
1135			return false;
1136
1137		size -= len;
1138		if (!size)
1139			break;
1140	}
1141	return true;
1142}
1143
1144/**
1145 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
1146 * 	are aligned to the parameters.
1147 *
1148 * @i: &struct iov_iter to restore
1149 * @addr_mask: bit mask to check against the iov element's addresses
1150 * @len_mask: bit mask to check against the iov element's lengths
1151 *
1152 * Return: false if any addresses or lengths intersect with the provided masks
1153 */
1154bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
1155			 unsigned len_mask)
1156{
1157	if (likely(iter_is_ubuf(i))) {
1158		if (i->count & len_mask)
1159			return false;
1160		if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask)
1161			return false;
1162		return true;
1163	}
1164
1165	if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1166		return iov_iter_aligned_iovec(i, addr_mask, len_mask);
1167
1168	if (iov_iter_is_bvec(i))
1169		return iov_iter_aligned_bvec(i, addr_mask, len_mask);
1170
1171	if (iov_iter_is_pipe(i)) {
1172		size_t size = i->count;
1173
1174		if (size & len_mask)
1175			return false;
1176		if (size && i->last_offset > 0) {
1177			if (i->last_offset & addr_mask)
1178				return false;
1179		}
1180
1181		return true;
1182	}
1183
1184	if (iov_iter_is_xarray(i)) {
1185		if (i->count & len_mask)
1186			return false;
1187		if ((i->xarray_start + i->iov_offset) & addr_mask)
1188			return false;
1189	}
1190
1191	return true;
1192}
1193EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
1194
1195static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
1196{
1197	unsigned long res = 0;
1198	size_t size = i->count;
1199	size_t skip = i->iov_offset;
1200	unsigned k;
1201
1202	for (k = 0; k < i->nr_segs; k++, skip = 0) {
1203		size_t len = i->iov[k].iov_len - skip;
1204		if (len) {
1205			res |= (unsigned long)i->iov[k].iov_base + skip;
1206			if (len > size)
1207				len = size;
1208			res |= len;
1209			size -= len;
1210			if (!size)
1211				break;
1212		}
1213	}
1214	return res;
1215}
1216
1217static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
1218{
1219	unsigned res = 0;
1220	size_t size = i->count;
1221	unsigned skip = i->iov_offset;
1222	unsigned k;
1223
1224	for (k = 0; k < i->nr_segs; k++, skip = 0) {
1225		size_t len = i->bvec[k].bv_len - skip;
1226		res |= (unsigned long)i->bvec[k].bv_offset + skip;
1227		if (len > size)
1228			len = size;
1229		res |= len;
1230		size -= len;
1231		if (!size)
1232			break;
1233	}
 
 
 
 
 
1234	return res;
1235}
1236
1237unsigned long iov_iter_alignment(const struct iov_iter *i)
1238{
1239	if (likely(iter_is_ubuf(i))) {
1240		size_t size = i->count;
1241		if (size)
1242			return ((unsigned long)i->ubuf + i->iov_offset) | size;
1243		return 0;
1244	}
1245
1246	/* iovec and kvec have identical layouts */
1247	if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1248		return iov_iter_alignment_iovec(i);
1249
1250	if (iov_iter_is_bvec(i))
1251		return iov_iter_alignment_bvec(i);
1252
1253	if (iov_iter_is_pipe(i)) {
1254		size_t size = i->count;
1255
1256		if (size && i->last_offset > 0)
1257			return size | i->last_offset;
1258		return size;
1259	}
1260
1261	if (iov_iter_is_xarray(i))
1262		return (i->xarray_start + i->iov_offset) | i->count;
1263
1264	return 0;
1265}
1266EXPORT_SYMBOL(iov_iter_alignment);
1267
1268unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1269{
1270	unsigned long res = 0;
1271	unsigned long v = 0;
1272	size_t size = i->count;
1273	unsigned k;
1274
1275	if (iter_is_ubuf(i))
1276		return 0;
1277
1278	if (WARN_ON(!iter_is_iovec(i)))
1279		return ~0U;
 
1280
1281	for (k = 0; k < i->nr_segs; k++) {
1282		if (i->iov[k].iov_len) {
1283			unsigned long base = (unsigned long)i->iov[k].iov_base;
1284			if (v) // if not the first one
1285				res |= base | v; // this start | previous end
1286			v = base + i->iov[k].iov_len;
1287			if (size <= i->iov[k].iov_len)
1288				break;
1289			size -= i->iov[k].iov_len;
1290		}
1291	}
1292	return res;
1293}
1294EXPORT_SYMBOL(iov_iter_gap_alignment);
1295
1296static int want_pages_array(struct page ***res, size_t size,
1297			    size_t start, unsigned int maxpages)
 
 
 
1298{
1299	unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE);
 
 
 
1300
1301	if (count > maxpages)
1302		count = maxpages;
1303	WARN_ON(!count);	// caller should've prevented that
1304	if (!*res) {
1305		*res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
1306		if (!*res)
1307			return 0;
1308	}
1309	return count;
 
1310}
1311
1312static ssize_t pipe_get_pages(struct iov_iter *i,
1313		   struct page ***pages, size_t maxsize, unsigned maxpages,
1314		   size_t *start)
1315{
1316	unsigned int npages, count, off, chunk;
1317	struct page **p;
1318	size_t left;
 
 
 
1319
1320	if (!sanity(i))
1321		return -EFAULT;
1322
1323	*start = off = pipe_npages(i, &npages);
1324	if (!npages)
1325		return -EFAULT;
1326	count = want_pages_array(pages, maxsize, off, min(npages, maxpages));
1327	if (!count)
1328		return -ENOMEM;
1329	p = *pages;
1330	for (npages = 0, left = maxsize ; npages < count; npages++, left -= chunk) {
1331		struct page *page = append_pipe(i, left, &off);
1332		if (!page)
1333			break;
1334		chunk = min_t(size_t, left, PAGE_SIZE - off);
1335		get_page(*p++ = page);
1336	}
1337	if (!npages)
1338		return -EFAULT;
1339	return maxsize - left;
1340}
1341
1342static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa,
1343					  pgoff_t index, unsigned int nr_pages)
 
1344{
1345	XA_STATE(xas, xa, index);
1346	struct page *page;
1347	unsigned int ret = 0;
1348
1349	rcu_read_lock();
1350	for (page = xas_load(&xas); page; page = xas_next(&xas)) {
1351		if (xas_retry(&xas, page))
1352			continue;
 
 
 
1353
1354		/* Has the page moved or been split? */
1355		if (unlikely(page != xas_reload(&xas))) {
1356			xas_reset(&xas);
1357			continue;
1358		}
1359
1360		pages[ret] = find_subpage(page, xas.xa_index);
1361		get_page(pages[ret]);
1362		if (++ret == nr_pages)
1363			break;
1364	}
1365	rcu_read_unlock();
1366	return ret;
 
 
 
 
 
1367}
 
1368
1369static ssize_t iter_xarray_get_pages(struct iov_iter *i,
1370				     struct page ***pages, size_t maxsize,
1371				     unsigned maxpages, size_t *_start_offset)
1372{
1373	unsigned nr, offset, count;
1374	pgoff_t index;
1375	loff_t pos;
1376
1377	pos = i->xarray_start + i->iov_offset;
1378	index = pos >> PAGE_SHIFT;
1379	offset = pos & ~PAGE_MASK;
1380	*_start_offset = offset;
1381
1382	count = want_pages_array(pages, maxsize, offset, maxpages);
1383	if (!count)
1384		return -ENOMEM;
1385	nr = iter_xarray_populate_pages(*pages, i->xarray, index, count);
1386	if (nr == 0)
1387		return 0;
1388
1389	maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize);
1390	i->iov_offset += maxsize;
1391	i->count -= maxsize;
1392	return maxsize;
1393}
1394
1395/* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
1396static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
 
1397{
1398	size_t skip;
1399	long k;
 
 
1400
1401	if (iter_is_ubuf(i))
1402		return (unsigned long)i->ubuf + i->iov_offset;
1403
1404	for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
1405		size_t len = i->iov[k].iov_len - skip;
1406
1407		if (unlikely(!len))
1408			continue;
1409		if (*size > len)
1410			*size = len;
1411		return (unsigned long)i->iov[k].iov_base + skip;
1412	}
1413	BUG(); // if it had been empty, we wouldn't get called
 
 
 
 
 
 
 
 
 
 
1414}
1415
1416/* must be done on non-empty ITER_BVEC one */
1417static struct page *first_bvec_segment(const struct iov_iter *i,
1418				       size_t *size, size_t *start)
1419{
1420	struct page *page;
1421	size_t skip = i->iov_offset, len;
1422
1423	len = i->bvec->bv_len - skip;
1424	if (*size > len)
1425		*size = len;
1426	skip += i->bvec->bv_offset;
1427	page = i->bvec->bv_page + skip / PAGE_SIZE;
1428	*start = skip % PAGE_SIZE;
1429	return page;
1430}
1431
1432static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
1433		   struct page ***pages, size_t maxsize,
1434		   unsigned int maxpages, size_t *start,
1435		   unsigned int gup_flags)
1436{
1437	unsigned int n;
1438
1439	if (maxsize > i->count)
1440		maxsize = i->count;
1441	if (!maxsize)
1442		return 0;
1443	if (maxsize > MAX_RW_COUNT)
1444		maxsize = MAX_RW_COUNT;
1445
1446	if (likely(user_backed_iter(i))) {
1447		unsigned long addr;
 
 
 
 
1448		int res;
1449
1450		if (iov_iter_rw(i) != WRITE)
1451			gup_flags |= FOLL_WRITE;
1452		if (i->nofault)
1453			gup_flags |= FOLL_NOFAULT;
1454
1455		addr = first_iovec_segment(i, &maxsize);
1456		*start = addr % PAGE_SIZE;
1457		addr &= PAGE_MASK;
1458		n = want_pages_array(pages, maxsize, *start, maxpages);
1459		if (!n)
1460			return -ENOMEM;
1461		res = get_user_pages_fast(addr, n, gup_flags, *pages);
1462		if (unlikely(res <= 0))
 
1463			return res;
1464		maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start);
1465		iov_iter_advance(i, maxsize);
1466		return maxsize;
1467	}
1468	if (iov_iter_is_bvec(i)) {
1469		struct page **p;
1470		struct page *page;
1471
1472		page = first_bvec_segment(i, &maxsize, start);
1473		n = want_pages_array(pages, maxsize, *start, maxpages);
1474		if (!n)
1475			return -ENOMEM;
1476		p = *pages;
1477		for (int k = 0; k < n; k++)
1478			get_page(p[k] = page + k);
1479		maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start);
1480		i->count -= maxsize;
1481		i->iov_offset += maxsize;
1482		if (i->iov_offset == i->bvec->bv_len) {
1483			i->iov_offset = 0;
1484			i->bvec++;
1485			i->nr_segs--;
1486		}
1487		return maxsize;
1488	}
1489	if (iov_iter_is_pipe(i))
1490		return pipe_get_pages(i, pages, maxsize, maxpages, start);
1491	if (iov_iter_is_xarray(i))
1492		return iter_xarray_get_pages(i, pages, maxsize, maxpages, start);
1493	return -EFAULT;
1494}
 
1495
1496ssize_t iov_iter_get_pages(struct iov_iter *i,
1497		   struct page **pages, size_t maxsize, unsigned maxpages,
1498		   size_t *start, unsigned gup_flags)
1499{
1500	if (!maxpages)
 
 
 
 
 
1501		return 0;
1502	BUG_ON(!pages);
1503
1504	return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages,
1505					  start, gup_flags);
1506}
1507EXPORT_SYMBOL_GPL(iov_iter_get_pages);
1508
1509ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
1510		size_t maxsize, unsigned maxpages, size_t *start)
1511{
1512	return iov_iter_get_pages(i, pages, maxsize, maxpages, start, 0);
1513}
1514EXPORT_SYMBOL(iov_iter_get_pages2);
1515
1516ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1517		   struct page ***pages, size_t maxsize,
1518		   size_t *start, unsigned gup_flags)
1519{
1520	ssize_t len;
1521
1522	*pages = NULL;
1523
1524	len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start,
1525					 gup_flags);
1526	if (len <= 0) {
1527		kvfree(*pages);
1528		*pages = NULL;
1529	}
1530	return len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1531}
1532EXPORT_SYMBOL_GPL(iov_iter_get_pages_alloc);
1533
1534ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
1535		struct page ***pages, size_t maxsize, size_t *start)
1536{
1537	return iov_iter_get_pages_alloc(i, pages, maxsize, start, 0);
1538}
1539EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
1540
1541size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1542			       struct iov_iter *i)
1543{
 
1544	__wsum sum, next;
 
1545	sum = *csum;
1546	if (WARN_ON_ONCE(!i->data_source))
1547		return 0;
1548
1549	iterate_and_advance(i, bytes, base, len, off, ({
1550		next = csum_and_copy_from_user(base, addr + off, len);
 
 
 
 
 
 
 
 
1551		sum = csum_block_add(sum, next, off);
1552		next ? 0 : len;
 
1553	}), ({
1554		sum = csum_and_memcpy(addr + off, base, len, sum, off);
 
 
 
 
 
 
 
 
 
 
 
 
1555	})
1556	)
1557	*csum = sum;
1558	return bytes;
 
1559}
1560EXPORT_SYMBOL(csum_and_copy_from_iter);
1561
1562size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1563			     struct iov_iter *i)
1564{
1565	struct csum_state *csstate = _csstate;
1566	__wsum sum, next;
1567
1568	if (WARN_ON_ONCE(i->data_source))
 
 
1569		return 0;
1570	if (unlikely(iov_iter_is_discard(i))) {
1571		// can't use csum_memcpy() for that one - data is not copied
1572		csstate->csum = csum_block_add(csstate->csum,
1573					       csum_partial(addr, bytes, 0),
1574					       csstate->off);
1575		csstate->off += bytes;
1576		return bytes;
1577	}
1578
1579	sum = csum_shift(csstate->csum, csstate->off);
1580	if (unlikely(iov_iter_is_pipe(i)))
1581		bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum);
1582	else iterate_and_advance(i, bytes, base, len, off, ({
1583		next = csum_and_copy_to_user(addr + off, base, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1584		sum = csum_block_add(sum, next, off);
1585		next ? 0 : len;
1586	}), ({
1587		sum = csum_and_memcpy(base, addr + off, len, sum, off);
1588	})
1589	)
1590	csstate->csum = csum_shift(sum, csstate->off);
1591	csstate->off += bytes;
1592	return bytes;
1593}
1594EXPORT_SYMBOL(csum_and_copy_to_iter);
1595
1596size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1597		struct iov_iter *i)
1598{
1599#ifdef CONFIG_CRYPTO_HASH
1600	struct ahash_request *hash = hashp;
1601	struct scatterlist sg;
1602	size_t copied;
1603
1604	copied = copy_to_iter(addr, bytes, i);
1605	sg_init_one(&sg, addr, copied);
1606	ahash_request_set_crypt(hash, &sg, NULL, copied);
1607	crypto_ahash_update(hash);
1608	return copied;
1609#else
1610	return 0;
1611#endif
1612}
1613EXPORT_SYMBOL(hash_and_copy_to_iter);
1614
1615static int iov_npages(const struct iov_iter *i, int maxpages)
1616{
1617	size_t skip = i->iov_offset, size = i->count;
1618	const struct iovec *p;
1619	int npages = 0;
1620
1621	for (p = i->iov; size; skip = 0, p++) {
1622		unsigned offs = offset_in_page(p->iov_base + skip);
1623		size_t len = min(p->iov_len - skip, size);
1624
1625		if (len) {
1626			size -= len;
1627			npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1628			if (unlikely(npages > maxpages))
1629				return maxpages;
1630		}
1631	}
1632	return npages;
1633}
1634
1635static int bvec_npages(const struct iov_iter *i, int maxpages)
1636{
1637	size_t skip = i->iov_offset, size = i->count;
1638	const struct bio_vec *p;
1639	int npages = 0;
1640
1641	for (p = i->bvec; size; skip = 0, p++) {
1642		unsigned offs = (p->bv_offset + skip) % PAGE_SIZE;
1643		size_t len = min(p->bv_len - skip, size);
1644
1645		size -= len;
1646		npages += DIV_ROUND_UP(offs + len, PAGE_SIZE);
1647		if (unlikely(npages > maxpages))
1648			return maxpages;
1649	}
1650	return npages;
1651}
1652
1653int iov_iter_npages(const struct iov_iter *i, int maxpages)
1654{
1655	if (unlikely(!i->count))
1656		return 0;
1657	if (likely(iter_is_ubuf(i))) {
1658		unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
1659		int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
1660		return min(npages, maxpages);
1661	}
1662	/* iovec and kvec have identical layouts */
1663	if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
1664		return iov_npages(i, maxpages);
1665	if (iov_iter_is_bvec(i))
1666		return bvec_npages(i, maxpages);
1667	if (iov_iter_is_pipe(i)) {
1668		int npages;
1669
1670		if (!sanity(i))
1671			return 0;
1672
1673		pipe_npages(i, &npages);
1674		return min(npages, maxpages);
1675	}
1676	if (iov_iter_is_xarray(i)) {
1677		unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE;
1678		int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE);
1679		return min(npages, maxpages);
1680	}
1681	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1682}
1683EXPORT_SYMBOL(iov_iter_npages);
1684
1685const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1686{
1687	*new = *old;
1688	if (unlikely(iov_iter_is_pipe(new))) {
1689		WARN_ON(1);
1690		return NULL;
1691	}
1692	if (iov_iter_is_bvec(new))
1693		return new->bvec = kmemdup(new->bvec,
1694				    new->nr_segs * sizeof(struct bio_vec),
1695				    flags);
1696	else if (iov_iter_is_kvec(new) || iter_is_iovec(new))
1697		/* iovec and kvec have identical layout */
1698		return new->iov = kmemdup(new->iov,
1699				   new->nr_segs * sizeof(struct iovec),
1700				   flags);
1701	return NULL;
1702}
1703EXPORT_SYMBOL(dup_iter);
1704
1705static int copy_compat_iovec_from_user(struct iovec *iov,
1706		const struct iovec __user *uvec, unsigned long nr_segs)
1707{
1708	const struct compat_iovec __user *uiov =
1709		(const struct compat_iovec __user *)uvec;
1710	int ret = -EFAULT, i;
1711
1712	if (!user_access_begin(uiov, nr_segs * sizeof(*uiov)))
1713		return -EFAULT;
1714
1715	for (i = 0; i < nr_segs; i++) {
1716		compat_uptr_t buf;
1717		compat_ssize_t len;
1718
1719		unsafe_get_user(len, &uiov[i].iov_len, uaccess_end);
1720		unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end);
1721
1722		/* check for compat_size_t not fitting in compat_ssize_t .. */
1723		if (len < 0) {
1724			ret = -EINVAL;
1725			goto uaccess_end;
1726		}
1727		iov[i].iov_base = compat_ptr(buf);
1728		iov[i].iov_len = len;
1729	}
1730
1731	ret = 0;
1732uaccess_end:
1733	user_access_end();
1734	return ret;
1735}
1736
1737static int copy_iovec_from_user(struct iovec *iov,
1738		const struct iovec __user *uvec, unsigned long nr_segs)
1739{
1740	unsigned long seg;
1741
1742	if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec)))
1743		return -EFAULT;
1744	for (seg = 0; seg < nr_segs; seg++) {
1745		if ((ssize_t)iov[seg].iov_len < 0)
1746			return -EINVAL;
1747	}
1748
1749	return 0;
1750}
1751
1752struct iovec *iovec_from_user(const struct iovec __user *uvec,
1753		unsigned long nr_segs, unsigned long fast_segs,
1754		struct iovec *fast_iov, bool compat)
1755{
1756	struct iovec *iov = fast_iov;
1757	int ret;
1758
1759	/*
1760	 * SuS says "The readv() function *may* fail if the iovcnt argument was
1761	 * less than or equal to 0, or greater than {IOV_MAX}.  Linux has
1762	 * traditionally returned zero for zero segments, so...
1763	 */
1764	if (nr_segs == 0)
1765		return iov;
1766	if (nr_segs > UIO_MAXIOV)
1767		return ERR_PTR(-EINVAL);
1768	if (nr_segs > fast_segs) {
1769		iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL);
1770		if (!iov)
1771			return ERR_PTR(-ENOMEM);
1772	}
1773
1774	if (compat)
1775		ret = copy_compat_iovec_from_user(iov, uvec, nr_segs);
1776	else
1777		ret = copy_iovec_from_user(iov, uvec, nr_segs);
1778	if (ret) {
1779		if (iov != fast_iov)
1780			kfree(iov);
1781		return ERR_PTR(ret);
1782	}
1783
1784	return iov;
1785}
1786
1787ssize_t __import_iovec(int type, const struct iovec __user *uvec,
1788		 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp,
1789		 struct iov_iter *i, bool compat)
1790{
1791	ssize_t total_len = 0;
1792	unsigned long seg;
1793	struct iovec *iov;
1794
1795	iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat);
1796	if (IS_ERR(iov)) {
1797		*iovp = NULL;
1798		return PTR_ERR(iov);
1799	}
1800
1801	/*
1802	 * According to the Single Unix Specification we should return EINVAL if
1803	 * an element length is < 0 when cast to ssize_t or if the total length
1804	 * would overflow the ssize_t return value of the system call.
1805	 *
1806	 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the
1807	 * overflow case.
1808	 */
1809	for (seg = 0; seg < nr_segs; seg++) {
1810		ssize_t len = (ssize_t)iov[seg].iov_len;
1811
1812		if (!access_ok(iov[seg].iov_base, len)) {
1813			if (iov != *iovp)
1814				kfree(iov);
1815			*iovp = NULL;
1816			return -EFAULT;
1817		}
1818
1819		if (len > MAX_RW_COUNT - total_len) {
1820			len = MAX_RW_COUNT - total_len;
1821			iov[seg].iov_len = len;
1822		}
1823		total_len += len;
1824	}
1825
1826	iov_iter_init(i, type, iov, nr_segs, total_len);
1827	if (iov == *iovp)
1828		*iovp = NULL;
1829	else
1830		*iovp = iov;
1831	return total_len;
1832}
1833
1834/**
1835 * import_iovec() - Copy an array of &struct iovec from userspace
1836 *     into the kernel, check that it is valid, and initialize a new
1837 *     &struct iov_iter iterator to access it.
1838 *
1839 * @type: One of %READ or %WRITE.
1840 * @uvec: Pointer to the userspace array.
1841 * @nr_segs: Number of elements in userspace array.
1842 * @fast_segs: Number of elements in @iov.
1843 * @iovp: (input and output parameter) Pointer to pointer to (usually small
1844 *     on-stack) kernel array.
1845 * @i: Pointer to iterator that will be initialized on success.
1846 *
1847 * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1848 * then this function places %NULL in *@iov on return. Otherwise, a new
1849 * array will be allocated and the result placed in *@iov. This means that
1850 * the caller may call kfree() on *@iov regardless of whether the small
1851 * on-stack array was used or not (and regardless of whether this function
1852 * returns an error or not).
1853 *
1854 * Return: Negative error code on error, bytes imported on success
1855 */
1856ssize_t import_iovec(int type, const struct iovec __user *uvec,
1857		 unsigned nr_segs, unsigned fast_segs,
1858		 struct iovec **iovp, struct iov_iter *i)
1859{
1860	return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i,
1861			      in_compat_syscall());
 
 
 
 
 
 
 
 
 
 
 
1862}
1863EXPORT_SYMBOL(import_iovec);
1864
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1865int import_single_range(int rw, void __user *buf, size_t len,
1866		 struct iovec *iov, struct iov_iter *i)
1867{
1868	if (len > MAX_RW_COUNT)
1869		len = MAX_RW_COUNT;
1870	if (unlikely(!access_ok(buf, len)))
1871		return -EFAULT;
1872
1873	iov->iov_base = buf;
1874	iov->iov_len = len;
1875	iov_iter_init(i, rw, iov, 1, len);
1876	return 0;
1877}
1878EXPORT_SYMBOL(import_single_range);
1879
1880/**
1881 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when
1882 *     iov_iter_save_state() was called.
1883 *
1884 * @i: &struct iov_iter to restore
1885 * @state: state to restore from
1886 *
1887 * Used after iov_iter_save_state() to bring restore @i, if operations may
1888 * have advanced it.
1889 *
1890 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC
1891 */
1892void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
1893{
1894	if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
1895			 !iov_iter_is_kvec(i) && !iter_is_ubuf(i))
1896		return;
1897	i->iov_offset = state->iov_offset;
1898	i->count = state->count;
1899	if (iter_is_ubuf(i))
1900		return;
1901	/*
1902	 * For the *vec iters, nr_segs + iov is constant - if we increment
1903	 * the vec, then we also decrement the nr_segs count. Hence we don't
1904	 * need to track both of these, just one is enough and we can deduct
1905	 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct
1906	 * size, so we can just increment the iov pointer as they are unionzed.
1907	 * ITER_BVEC _may_ be the same size on some archs, but on others it is
1908	 * not. Be safe and handle it separately.
1909	 */
1910	BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
1911	if (iov_iter_is_bvec(i))
1912		i->bvec -= state->nr_segs - i->nr_segs;
1913	else
1914		i->iov -= state->nr_segs - i->nr_segs;
1915	i->nr_segs = state->nr_segs;
1916}