Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
Note: File does not exist in v3.15.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* I/O iterator tests.  This can only test kernel-backed iterator types.
   3 *
   4 * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
   5 * Written by David Howells (dhowells@redhat.com)
   6 */
   7
   8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   9
  10#include <linux/module.h>
  11#include <linux/vmalloc.h>
  12#include <linux/mm.h>
  13#include <linux/uio.h>
  14#include <linux/bvec.h>
  15#include <linux/folio_queue.h>
  16#include <kunit/test.h>
  17
  18MODULE_DESCRIPTION("iov_iter testing");
  19MODULE_AUTHOR("David Howells <dhowells@redhat.com>");
  20MODULE_LICENSE("GPL");
  21
  22struct kvec_test_range {
  23	int	from, to;
  24};
  25
  26static const struct kvec_test_range kvec_test_ranges[] = {
  27	{ 0x00002, 0x00002 },
  28	{ 0x00027, 0x03000 },
  29	{ 0x05193, 0x18794 },
  30	{ 0x20000, 0x20000 },
  31	{ 0x20000, 0x24000 },
  32	{ 0x24000, 0x27001 },
  33	{ 0x29000, 0xffffb },
  34	{ 0xffffd, 0xffffe },
  35	{ -1 }
  36};
  37
  38static inline u8 pattern(unsigned long x)
  39{
  40	return x & 0xff;
  41}
  42
  43static void iov_kunit_unmap(void *data)
  44{
  45	vunmap(data);
  46}
  47
  48static void *__init iov_kunit_create_buffer(struct kunit *test,
  49					    struct page ***ppages,
  50					    size_t npages)
  51{
  52	struct page **pages;
  53	unsigned long got;
  54	void *buffer;
  55
  56	pages = kunit_kcalloc(test, npages, sizeof(struct page *), GFP_KERNEL);
  57        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pages);
  58	*ppages = pages;
  59
  60	got = alloc_pages_bulk_array(GFP_KERNEL, npages, pages);
  61	if (got != npages) {
  62		release_pages(pages, got);
  63		KUNIT_ASSERT_EQ(test, got, npages);
  64	}
  65
  66	for (int i = 0; i < npages; i++)
  67		pages[i]->index = i;
  68
  69	buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL);
  70        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buffer);
  71
  72	kunit_add_action_or_reset(test, iov_kunit_unmap, buffer);
  73	return buffer;
  74}
  75
  76static void __init iov_kunit_load_kvec(struct kunit *test,
  77				       struct iov_iter *iter, int dir,
  78				       struct kvec *kvec, unsigned int kvmax,
  79				       void *buffer, size_t bufsize,
  80				       const struct kvec_test_range *pr)
  81{
  82	size_t size = 0;
  83	int i;
  84
  85	for (i = 0; i < kvmax; i++, pr++) {
  86		if (pr->from < 0)
  87			break;
  88		KUNIT_ASSERT_GE(test, pr->to, pr->from);
  89		KUNIT_ASSERT_LE(test, pr->to, bufsize);
  90		kvec[i].iov_base = buffer + pr->from;
  91		kvec[i].iov_len = pr->to - pr->from;
  92		size += pr->to - pr->from;
  93	}
  94	KUNIT_ASSERT_LE(test, size, bufsize);
  95
  96	iov_iter_kvec(iter, dir, kvec, i, size);
  97}
  98
  99/*
 100 * Test copying to a ITER_KVEC-type iterator.
 101 */
 102static void __init iov_kunit_copy_to_kvec(struct kunit *test)
 103{
 104	const struct kvec_test_range *pr;
 105	struct iov_iter iter;
 106	struct page **spages, **bpages;
 107	struct kvec kvec[8];
 108	u8 *scratch, *buffer;
 109	size_t bufsize, npages, size, copied;
 110	int i, patt;
 111
 112	bufsize = 0x100000;
 113	npages = bufsize / PAGE_SIZE;
 114
 115	scratch = iov_kunit_create_buffer(test, &spages, npages);
 116	for (i = 0; i < bufsize; i++)
 117		scratch[i] = pattern(i);
 118
 119	buffer = iov_kunit_create_buffer(test, &bpages, npages);
 120	memset(buffer, 0, bufsize);
 121
 122	iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
 123			    buffer, bufsize, kvec_test_ranges);
 124	size = iter.count;
 125
 126	copied = copy_to_iter(scratch, size, &iter);
 127
 128	KUNIT_EXPECT_EQ(test, copied, size);
 129	KUNIT_EXPECT_EQ(test, iter.count, 0);
 130	KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
 131
 132	/* Build the expected image in the scratch buffer. */
 133	patt = 0;
 134	memset(scratch, 0, bufsize);
 135	for (pr = kvec_test_ranges; pr->from >= 0; pr++)
 136		for (i = pr->from; i < pr->to; i++)
 137			scratch[i] = pattern(patt++);
 138
 139	/* Compare the images */
 140	for (i = 0; i < bufsize; i++) {
 141		KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
 142		if (buffer[i] != scratch[i])
 143			return;
 144	}
 145
 146	KUNIT_SUCCEED(test);
 147}
 148
 149/*
 150 * Test copying from a ITER_KVEC-type iterator.
 151 */
 152static void __init iov_kunit_copy_from_kvec(struct kunit *test)
 153{
 154	const struct kvec_test_range *pr;
 155	struct iov_iter iter;
 156	struct page **spages, **bpages;
 157	struct kvec kvec[8];
 158	u8 *scratch, *buffer;
 159	size_t bufsize, npages, size, copied;
 160	int i, j;
 161
 162	bufsize = 0x100000;
 163	npages = bufsize / PAGE_SIZE;
 164
 165	buffer = iov_kunit_create_buffer(test, &bpages, npages);
 166	for (i = 0; i < bufsize; i++)
 167		buffer[i] = pattern(i);
 168
 169	scratch = iov_kunit_create_buffer(test, &spages, npages);
 170	memset(scratch, 0, bufsize);
 171
 172	iov_kunit_load_kvec(test, &iter, WRITE, kvec, ARRAY_SIZE(kvec),
 173			    buffer, bufsize, kvec_test_ranges);
 174	size = min(iter.count, bufsize);
 175
 176	copied = copy_from_iter(scratch, size, &iter);
 177
 178	KUNIT_EXPECT_EQ(test, copied, size);
 179	KUNIT_EXPECT_EQ(test, iter.count, 0);
 180	KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
 181
 182	/* Build the expected image in the main buffer. */
 183	i = 0;
 184	memset(buffer, 0, bufsize);
 185	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
 186		for (j = pr->from; j < pr->to; j++) {
 187			buffer[i++] = pattern(j);
 188			if (i >= bufsize)
 189				goto stop;
 190		}
 191	}
 192stop:
 193
 194	/* Compare the images */
 195	for (i = 0; i < bufsize; i++) {
 196		KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
 197		if (scratch[i] != buffer[i])
 198			return;
 199	}
 200
 201	KUNIT_SUCCEED(test);
 202}
 203
 204struct bvec_test_range {
 205	int	page, from, to;
 206};
 207
 208static const struct bvec_test_range bvec_test_ranges[] = {
 209	{ 0, 0x0002, 0x0002 },
 210	{ 1, 0x0027, 0x0893 },
 211	{ 2, 0x0193, 0x0794 },
 212	{ 3, 0x0000, 0x1000 },
 213	{ 4, 0x0000, 0x1000 },
 214	{ 5, 0x0000, 0x1000 },
 215	{ 6, 0x0000, 0x0ffb },
 216	{ 6, 0x0ffd, 0x0ffe },
 217	{ -1, -1, -1 }
 218};
 219
 220static void __init iov_kunit_load_bvec(struct kunit *test,
 221				       struct iov_iter *iter, int dir,
 222				       struct bio_vec *bvec, unsigned int bvmax,
 223				       struct page **pages, size_t npages,
 224				       size_t bufsize,
 225				       const struct bvec_test_range *pr)
 226{
 227	struct page *can_merge = NULL, *page;
 228	size_t size = 0;
 229	int i;
 230
 231	for (i = 0; i < bvmax; i++, pr++) {
 232		if (pr->from < 0)
 233			break;
 234		KUNIT_ASSERT_LT(test, pr->page, npages);
 235		KUNIT_ASSERT_LT(test, pr->page * PAGE_SIZE, bufsize);
 236		KUNIT_ASSERT_GE(test, pr->from, 0);
 237		KUNIT_ASSERT_GE(test, pr->to, pr->from);
 238		KUNIT_ASSERT_LE(test, pr->to, PAGE_SIZE);
 239
 240		page = pages[pr->page];
 241		if (pr->from == 0 && pr->from != pr->to && page == can_merge) {
 242			i--;
 243			bvec[i].bv_len += pr->to;
 244		} else {
 245			bvec_set_page(&bvec[i], page, pr->to - pr->from, pr->from);
 246		}
 247
 248		size += pr->to - pr->from;
 249		if ((pr->to & ~PAGE_MASK) == 0)
 250			can_merge = page + pr->to / PAGE_SIZE;
 251		else
 252			can_merge = NULL;
 253	}
 254
 255	iov_iter_bvec(iter, dir, bvec, i, size);
 256}
 257
 258/*
 259 * Test copying to a ITER_BVEC-type iterator.
 260 */
 261static void __init iov_kunit_copy_to_bvec(struct kunit *test)
 262{
 263	const struct bvec_test_range *pr;
 264	struct iov_iter iter;
 265	struct bio_vec bvec[8];
 266	struct page **spages, **bpages;
 267	u8 *scratch, *buffer;
 268	size_t bufsize, npages, size, copied;
 269	int i, b, patt;
 270
 271	bufsize = 0x100000;
 272	npages = bufsize / PAGE_SIZE;
 273
 274	scratch = iov_kunit_create_buffer(test, &spages, npages);
 275	for (i = 0; i < bufsize; i++)
 276		scratch[i] = pattern(i);
 277
 278	buffer = iov_kunit_create_buffer(test, &bpages, npages);
 279	memset(buffer, 0, bufsize);
 280
 281	iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
 282			    bpages, npages, bufsize, bvec_test_ranges);
 283	size = iter.count;
 284
 285	copied = copy_to_iter(scratch, size, &iter);
 286
 287	KUNIT_EXPECT_EQ(test, copied, size);
 288	KUNIT_EXPECT_EQ(test, iter.count, 0);
 289	KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
 290
 291	/* Build the expected image in the scratch buffer. */
 292	b = 0;
 293	patt = 0;
 294	memset(scratch, 0, bufsize);
 295	for (pr = bvec_test_ranges; pr->from >= 0; pr++, b++) {
 296		u8 *p = scratch + pr->page * PAGE_SIZE;
 297
 298		for (i = pr->from; i < pr->to; i++)
 299			p[i] = pattern(patt++);
 300	}
 301
 302	/* Compare the images */
 303	for (i = 0; i < bufsize; i++) {
 304		KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
 305		if (buffer[i] != scratch[i])
 306			return;
 307	}
 308
 309	KUNIT_SUCCEED(test);
 310}
 311
 312/*
 313 * Test copying from a ITER_BVEC-type iterator.
 314 */
 315static void __init iov_kunit_copy_from_bvec(struct kunit *test)
 316{
 317	const struct bvec_test_range *pr;
 318	struct iov_iter iter;
 319	struct bio_vec bvec[8];
 320	struct page **spages, **bpages;
 321	u8 *scratch, *buffer;
 322	size_t bufsize, npages, size, copied;
 323	int i, j;
 324
 325	bufsize = 0x100000;
 326	npages = bufsize / PAGE_SIZE;
 327
 328	buffer = iov_kunit_create_buffer(test, &bpages, npages);
 329	for (i = 0; i < bufsize; i++)
 330		buffer[i] = pattern(i);
 331
 332	scratch = iov_kunit_create_buffer(test, &spages, npages);
 333	memset(scratch, 0, bufsize);
 334
 335	iov_kunit_load_bvec(test, &iter, WRITE, bvec, ARRAY_SIZE(bvec),
 336			    bpages, npages, bufsize, bvec_test_ranges);
 337	size = iter.count;
 338
 339	copied = copy_from_iter(scratch, size, &iter);
 340
 341	KUNIT_EXPECT_EQ(test, copied, size);
 342	KUNIT_EXPECT_EQ(test, iter.count, 0);
 343	KUNIT_EXPECT_EQ(test, iter.nr_segs, 0);
 344
 345	/* Build the expected image in the main buffer. */
 346	i = 0;
 347	memset(buffer, 0, bufsize);
 348	for (pr = bvec_test_ranges; pr->from >= 0; pr++) {
 349		size_t patt = pr->page * PAGE_SIZE;
 350
 351		for (j = pr->from; j < pr->to; j++) {
 352			buffer[i++] = pattern(patt + j);
 353			if (i >= bufsize)
 354				goto stop;
 355		}
 356	}
 357stop:
 358
 359	/* Compare the images */
 360	for (i = 0; i < bufsize; i++) {
 361		KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
 362		if (scratch[i] != buffer[i])
 363			return;
 364	}
 365
 366	KUNIT_SUCCEED(test);
 367}
 368
 369static void iov_kunit_destroy_folioq(void *data)
 370{
 371	struct folio_queue *folioq, *next;
 372
 373	for (folioq = data; folioq; folioq = next) {
 374		next = folioq->next;
 375		for (int i = 0; i < folioq_nr_slots(folioq); i++)
 376			if (folioq_folio(folioq, i))
 377				folio_put(folioq_folio(folioq, i));
 378		kfree(folioq);
 379	}
 380}
 381
 382static void __init iov_kunit_load_folioq(struct kunit *test,
 383					struct iov_iter *iter, int dir,
 384					struct folio_queue *folioq,
 385					struct page **pages, size_t npages)
 386{
 387	struct folio_queue *p = folioq;
 388	size_t size = 0;
 389	int i;
 390
 391	for (i = 0; i < npages; i++) {
 392		if (folioq_full(p)) {
 393			p->next = kzalloc(sizeof(struct folio_queue), GFP_KERNEL);
 394			KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p->next);
 395			folioq_init(p->next);
 396			p->next->prev = p;
 397			p = p->next;
 398		}
 399		folioq_append(p, page_folio(pages[i]));
 400		size += PAGE_SIZE;
 401	}
 402	iov_iter_folio_queue(iter, dir, folioq, 0, 0, size);
 403}
 404
 405static struct folio_queue *iov_kunit_create_folioq(struct kunit *test)
 406{
 407	struct folio_queue *folioq;
 408
 409	folioq = kzalloc(sizeof(struct folio_queue), GFP_KERNEL);
 410	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, folioq);
 411	kunit_add_action_or_reset(test, iov_kunit_destroy_folioq, folioq);
 412	folioq_init(folioq);
 413	return folioq;
 414}
 415
 416/*
 417 * Test copying to a ITER_FOLIOQ-type iterator.
 418 */
 419static void __init iov_kunit_copy_to_folioq(struct kunit *test)
 420{
 421	const struct kvec_test_range *pr;
 422	struct iov_iter iter;
 423	struct folio_queue *folioq;
 424	struct page **spages, **bpages;
 425	u8 *scratch, *buffer;
 426	size_t bufsize, npages, size, copied;
 427	int i, patt;
 428
 429	bufsize = 0x100000;
 430	npages = bufsize / PAGE_SIZE;
 431
 432	folioq = iov_kunit_create_folioq(test);
 433
 434	scratch = iov_kunit_create_buffer(test, &spages, npages);
 435	for (i = 0; i < bufsize; i++)
 436		scratch[i] = pattern(i);
 437
 438	buffer = iov_kunit_create_buffer(test, &bpages, npages);
 439	memset(buffer, 0, bufsize);
 440
 441	iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
 442
 443	i = 0;
 444	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
 445		size = pr->to - pr->from;
 446		KUNIT_ASSERT_LE(test, pr->to, bufsize);
 447
 448		iov_iter_folio_queue(&iter, READ, folioq, 0, 0, pr->to);
 449		iov_iter_advance(&iter, pr->from);
 450		copied = copy_to_iter(scratch + i, size, &iter);
 451
 452		KUNIT_EXPECT_EQ(test, copied, size);
 453		KUNIT_EXPECT_EQ(test, iter.count, 0);
 454		KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to % PAGE_SIZE);
 455		i += size;
 456		if (test->status == KUNIT_FAILURE)
 457			goto stop;
 458	}
 459
 460	/* Build the expected image in the scratch buffer. */
 461	patt = 0;
 462	memset(scratch, 0, bufsize);
 463	for (pr = kvec_test_ranges; pr->from >= 0; pr++)
 464		for (i = pr->from; i < pr->to; i++)
 465			scratch[i] = pattern(patt++);
 466
 467	/* Compare the images */
 468	for (i = 0; i < bufsize; i++) {
 469		KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
 470		if (buffer[i] != scratch[i])
 471			return;
 472	}
 473
 474stop:
 475	KUNIT_SUCCEED(test);
 476}
 477
 478/*
 479 * Test copying from a ITER_FOLIOQ-type iterator.
 480 */
 481static void __init iov_kunit_copy_from_folioq(struct kunit *test)
 482{
 483	const struct kvec_test_range *pr;
 484	struct iov_iter iter;
 485	struct folio_queue *folioq;
 486	struct page **spages, **bpages;
 487	u8 *scratch, *buffer;
 488	size_t bufsize, npages, size, copied;
 489	int i, j;
 490
 491	bufsize = 0x100000;
 492	npages = bufsize / PAGE_SIZE;
 493
 494	folioq = iov_kunit_create_folioq(test);
 495
 496	buffer = iov_kunit_create_buffer(test, &bpages, npages);
 497	for (i = 0; i < bufsize; i++)
 498		buffer[i] = pattern(i);
 499
 500	scratch = iov_kunit_create_buffer(test, &spages, npages);
 501	memset(scratch, 0, bufsize);
 502
 503	iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
 504
 505	i = 0;
 506	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
 507		size = pr->to - pr->from;
 508		KUNIT_ASSERT_LE(test, pr->to, bufsize);
 509
 510		iov_iter_folio_queue(&iter, WRITE, folioq, 0, 0, pr->to);
 511		iov_iter_advance(&iter, pr->from);
 512		copied = copy_from_iter(scratch + i, size, &iter);
 513
 514		KUNIT_EXPECT_EQ(test, copied, size);
 515		KUNIT_EXPECT_EQ(test, iter.count, 0);
 516		KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to % PAGE_SIZE);
 517		i += size;
 518	}
 519
 520	/* Build the expected image in the main buffer. */
 521	i = 0;
 522	memset(buffer, 0, bufsize);
 523	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
 524		for (j = pr->from; j < pr->to; j++) {
 525			buffer[i++] = pattern(j);
 526			if (i >= bufsize)
 527				goto stop;
 528		}
 529	}
 530stop:
 531
 532	/* Compare the images */
 533	for (i = 0; i < bufsize; i++) {
 534		KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
 535		if (scratch[i] != buffer[i])
 536			return;
 537	}
 538
 539	KUNIT_SUCCEED(test);
 540}
 541
 542static void iov_kunit_destroy_xarray(void *data)
 543{
 544	struct xarray *xarray = data;
 545
 546	xa_destroy(xarray);
 547	kfree(xarray);
 548}
 549
 550static void __init iov_kunit_load_xarray(struct kunit *test,
 551					 struct iov_iter *iter, int dir,
 552					 struct xarray *xarray,
 553					 struct page **pages, size_t npages)
 554{
 555	size_t size = 0;
 556	int i;
 557
 558	for (i = 0; i < npages; i++) {
 559		void *x = xa_store(xarray, i, pages[i], GFP_KERNEL);
 560
 561		KUNIT_ASSERT_FALSE(test, xa_is_err(x));
 562		size += PAGE_SIZE;
 563	}
 564	iov_iter_xarray(iter, dir, xarray, 0, size);
 565}
 566
 567static struct xarray *iov_kunit_create_xarray(struct kunit *test)
 568{
 569	struct xarray *xarray;
 570
 571	xarray = kzalloc(sizeof(struct xarray), GFP_KERNEL);
 572	xa_init(xarray);
 573	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xarray);
 574	kunit_add_action_or_reset(test, iov_kunit_destroy_xarray, xarray);
 575	return xarray;
 576}
 577
 578/*
 579 * Test copying to a ITER_XARRAY-type iterator.
 580 */
 581static void __init iov_kunit_copy_to_xarray(struct kunit *test)
 582{
 583	const struct kvec_test_range *pr;
 584	struct iov_iter iter;
 585	struct xarray *xarray;
 586	struct page **spages, **bpages;
 587	u8 *scratch, *buffer;
 588	size_t bufsize, npages, size, copied;
 589	int i, patt;
 590
 591	bufsize = 0x100000;
 592	npages = bufsize / PAGE_SIZE;
 593
 594	xarray = iov_kunit_create_xarray(test);
 595
 596	scratch = iov_kunit_create_buffer(test, &spages, npages);
 597	for (i = 0; i < bufsize; i++)
 598		scratch[i] = pattern(i);
 599
 600	buffer = iov_kunit_create_buffer(test, &bpages, npages);
 601	memset(buffer, 0, bufsize);
 602
 603	iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
 604
 605	i = 0;
 606	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
 607		size = pr->to - pr->from;
 608		KUNIT_ASSERT_LE(test, pr->to, bufsize);
 609
 610		iov_iter_xarray(&iter, READ, xarray, pr->from, size);
 611		copied = copy_to_iter(scratch + i, size, &iter);
 612
 613		KUNIT_EXPECT_EQ(test, copied, size);
 614		KUNIT_EXPECT_EQ(test, iter.count, 0);
 615		KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
 616		i += size;
 617	}
 618
 619	/* Build the expected image in the scratch buffer. */
 620	patt = 0;
 621	memset(scratch, 0, bufsize);
 622	for (pr = kvec_test_ranges; pr->from >= 0; pr++)
 623		for (i = pr->from; i < pr->to; i++)
 624			scratch[i] = pattern(patt++);
 625
 626	/* Compare the images */
 627	for (i = 0; i < bufsize; i++) {
 628		KUNIT_EXPECT_EQ_MSG(test, buffer[i], scratch[i], "at i=%x", i);
 629		if (buffer[i] != scratch[i])
 630			return;
 631	}
 632
 633	KUNIT_SUCCEED(test);
 634}
 635
 636/*
 637 * Test copying from a ITER_XARRAY-type iterator.
 638 */
 639static void __init iov_kunit_copy_from_xarray(struct kunit *test)
 640{
 641	const struct kvec_test_range *pr;
 642	struct iov_iter iter;
 643	struct xarray *xarray;
 644	struct page **spages, **bpages;
 645	u8 *scratch, *buffer;
 646	size_t bufsize, npages, size, copied;
 647	int i, j;
 648
 649	bufsize = 0x100000;
 650	npages = bufsize / PAGE_SIZE;
 651
 652	xarray = iov_kunit_create_xarray(test);
 653
 654	buffer = iov_kunit_create_buffer(test, &bpages, npages);
 655	for (i = 0; i < bufsize; i++)
 656		buffer[i] = pattern(i);
 657
 658	scratch = iov_kunit_create_buffer(test, &spages, npages);
 659	memset(scratch, 0, bufsize);
 660
 661	iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
 662
 663	i = 0;
 664	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
 665		size = pr->to - pr->from;
 666		KUNIT_ASSERT_LE(test, pr->to, bufsize);
 667
 668		iov_iter_xarray(&iter, WRITE, xarray, pr->from, size);
 669		copied = copy_from_iter(scratch + i, size, &iter);
 670
 671		KUNIT_EXPECT_EQ(test, copied, size);
 672		KUNIT_EXPECT_EQ(test, iter.count, 0);
 673		KUNIT_EXPECT_EQ(test, iter.iov_offset, size);
 674		i += size;
 675	}
 676
 677	/* Build the expected image in the main buffer. */
 678	i = 0;
 679	memset(buffer, 0, bufsize);
 680	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
 681		for (j = pr->from; j < pr->to; j++) {
 682			buffer[i++] = pattern(j);
 683			if (i >= bufsize)
 684				goto stop;
 685		}
 686	}
 687stop:
 688
 689	/* Compare the images */
 690	for (i = 0; i < bufsize; i++) {
 691		KUNIT_EXPECT_EQ_MSG(test, scratch[i], buffer[i], "at i=%x", i);
 692		if (scratch[i] != buffer[i])
 693			return;
 694	}
 695
 696	KUNIT_SUCCEED(test);
 697}
 698
 699/*
 700 * Test the extraction of ITER_KVEC-type iterators.
 701 */
 702static void __init iov_kunit_extract_pages_kvec(struct kunit *test)
 703{
 704	const struct kvec_test_range *pr;
 705	struct iov_iter iter;
 706	struct page **bpages, *pagelist[8], **pages = pagelist;
 707	struct kvec kvec[8];
 708	u8 *buffer;
 709	ssize_t len;
 710	size_t bufsize, size = 0, npages;
 711	int i, from;
 712
 713	bufsize = 0x100000;
 714	npages = bufsize / PAGE_SIZE;
 715
 716	buffer = iov_kunit_create_buffer(test, &bpages, npages);
 717
 718	iov_kunit_load_kvec(test, &iter, READ, kvec, ARRAY_SIZE(kvec),
 719			    buffer, bufsize, kvec_test_ranges);
 720	size = iter.count;
 721
 722	pr = kvec_test_ranges;
 723	from = pr->from;
 724	do {
 725		size_t offset0 = LONG_MAX;
 726
 727		for (i = 0; i < ARRAY_SIZE(pagelist); i++)
 728			pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
 729
 730		len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
 731					     ARRAY_SIZE(pagelist), 0, &offset0);
 732		KUNIT_EXPECT_GE(test, len, 0);
 733		if (len < 0)
 734			break;
 735		KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
 736		KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
 737		KUNIT_EXPECT_LE(test, len, size);
 738		KUNIT_EXPECT_EQ(test, iter.count, size - len);
 739		size -= len;
 740
 741		if (len == 0)
 742			break;
 743
 744		for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
 745			struct page *p;
 746			ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
 747			int ix;
 748
 749			KUNIT_ASSERT_GE(test, part, 0);
 750			while (from == pr->to) {
 751				pr++;
 752				from = pr->from;
 753				if (from < 0)
 754					goto stop;
 755			}
 756			ix = from / PAGE_SIZE;
 757			KUNIT_ASSERT_LT(test, ix, npages);
 758			p = bpages[ix];
 759			KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
 760			KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
 761			from += part;
 762			len -= part;
 763			KUNIT_ASSERT_GE(test, len, 0);
 764			if (len == 0)
 765				break;
 766			offset0 = 0;
 767		}
 768
 769		if (test->status == KUNIT_FAILURE)
 770			break;
 771	} while (iov_iter_count(&iter) > 0);
 772
 773stop:
 774	KUNIT_EXPECT_EQ(test, size, 0);
 775	KUNIT_EXPECT_EQ(test, iter.count, 0);
 776	KUNIT_SUCCEED(test);
 777}
 778
 779/*
 780 * Test the extraction of ITER_BVEC-type iterators.
 781 */
 782static void __init iov_kunit_extract_pages_bvec(struct kunit *test)
 783{
 784	const struct bvec_test_range *pr;
 785	struct iov_iter iter;
 786	struct page **bpages, *pagelist[8], **pages = pagelist;
 787	struct bio_vec bvec[8];
 788	ssize_t len;
 789	size_t bufsize, size = 0, npages;
 790	int i, from;
 791
 792	bufsize = 0x100000;
 793	npages = bufsize / PAGE_SIZE;
 794
 795	iov_kunit_create_buffer(test, &bpages, npages);
 796	iov_kunit_load_bvec(test, &iter, READ, bvec, ARRAY_SIZE(bvec),
 797			    bpages, npages, bufsize, bvec_test_ranges);
 798	size = iter.count;
 799
 800	pr = bvec_test_ranges;
 801	from = pr->from;
 802	do {
 803		size_t offset0 = LONG_MAX;
 804
 805		for (i = 0; i < ARRAY_SIZE(pagelist); i++)
 806			pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
 807
 808		len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
 809					     ARRAY_SIZE(pagelist), 0, &offset0);
 810		KUNIT_EXPECT_GE(test, len, 0);
 811		if (len < 0)
 812			break;
 813		KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
 814		KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
 815		KUNIT_EXPECT_LE(test, len, size);
 816		KUNIT_EXPECT_EQ(test, iter.count, size - len);
 817		size -= len;
 818
 819		if (len == 0)
 820			break;
 821
 822		for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
 823			struct page *p;
 824			ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
 825			int ix;
 826
 827			KUNIT_ASSERT_GE(test, part, 0);
 828			while (from == pr->to) {
 829				pr++;
 830				from = pr->from;
 831				if (from < 0)
 832					goto stop;
 833			}
 834			ix = pr->page + from / PAGE_SIZE;
 835			KUNIT_ASSERT_LT(test, ix, npages);
 836			p = bpages[ix];
 837			KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
 838			KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
 839			from += part;
 840			len -= part;
 841			KUNIT_ASSERT_GE(test, len, 0);
 842			if (len == 0)
 843				break;
 844			offset0 = 0;
 845		}
 846
 847		if (test->status == KUNIT_FAILURE)
 848			break;
 849	} while (iov_iter_count(&iter) > 0);
 850
 851stop:
 852	KUNIT_EXPECT_EQ(test, size, 0);
 853	KUNIT_EXPECT_EQ(test, iter.count, 0);
 854	KUNIT_SUCCEED(test);
 855}
 856
 857/*
 858 * Test the extraction of ITER_FOLIOQ-type iterators.
 859 */
 860static void __init iov_kunit_extract_pages_folioq(struct kunit *test)
 861{
 862	const struct kvec_test_range *pr;
 863	struct folio_queue *folioq;
 864	struct iov_iter iter;
 865	struct page **bpages, *pagelist[8], **pages = pagelist;
 866	ssize_t len;
 867	size_t bufsize, size = 0, npages;
 868	int i, from;
 869
 870	bufsize = 0x100000;
 871	npages = bufsize / PAGE_SIZE;
 872
 873	folioq = iov_kunit_create_folioq(test);
 874
 875	iov_kunit_create_buffer(test, &bpages, npages);
 876	iov_kunit_load_folioq(test, &iter, READ, folioq, bpages, npages);
 877
 878	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
 879		from = pr->from;
 880		size = pr->to - from;
 881		KUNIT_ASSERT_LE(test, pr->to, bufsize);
 882
 883		iov_iter_folio_queue(&iter, WRITE, folioq, 0, 0, pr->to);
 884		iov_iter_advance(&iter, from);
 885
 886		do {
 887			size_t offset0 = LONG_MAX;
 888
 889			for (i = 0; i < ARRAY_SIZE(pagelist); i++)
 890				pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
 891
 892			len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
 893						     ARRAY_SIZE(pagelist), 0, &offset0);
 894			KUNIT_EXPECT_GE(test, len, 0);
 895			if (len < 0)
 896				break;
 897			KUNIT_EXPECT_LE(test, len, size);
 898			KUNIT_EXPECT_EQ(test, iter.count, size - len);
 899			if (len == 0)
 900				break;
 901			size -= len;
 902			KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
 903			KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
 904
 905			for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
 906				struct page *p;
 907				ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
 908				int ix;
 909
 910				KUNIT_ASSERT_GE(test, part, 0);
 911				ix = from / PAGE_SIZE;
 912				KUNIT_ASSERT_LT(test, ix, npages);
 913				p = bpages[ix];
 914				KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
 915				KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
 916				from += part;
 917				len -= part;
 918				KUNIT_ASSERT_GE(test, len, 0);
 919				if (len == 0)
 920					break;
 921				offset0 = 0;
 922			}
 923
 924			if (test->status == KUNIT_FAILURE)
 925				goto stop;
 926		} while (iov_iter_count(&iter) > 0);
 927
 928		KUNIT_EXPECT_EQ(test, size, 0);
 929		KUNIT_EXPECT_EQ(test, iter.count, 0);
 930	}
 931
 932stop:
 933	KUNIT_SUCCEED(test);
 934}
 935
 936/*
 937 * Test the extraction of ITER_XARRAY-type iterators.
 938 */
 939static void __init iov_kunit_extract_pages_xarray(struct kunit *test)
 940{
 941	const struct kvec_test_range *pr;
 942	struct iov_iter iter;
 943	struct xarray *xarray;
 944	struct page **bpages, *pagelist[8], **pages = pagelist;
 945	ssize_t len;
 946	size_t bufsize, size = 0, npages;
 947	int i, from;
 948
 949	bufsize = 0x100000;
 950	npages = bufsize / PAGE_SIZE;
 951
 952	xarray = iov_kunit_create_xarray(test);
 953
 954	iov_kunit_create_buffer(test, &bpages, npages);
 955	iov_kunit_load_xarray(test, &iter, READ, xarray, bpages, npages);
 956
 957	for (pr = kvec_test_ranges; pr->from >= 0; pr++) {
 958		from = pr->from;
 959		size = pr->to - from;
 960		KUNIT_ASSERT_LE(test, pr->to, bufsize);
 961
 962		iov_iter_xarray(&iter, WRITE, xarray, from, size);
 963
 964		do {
 965			size_t offset0 = LONG_MAX;
 966
 967			for (i = 0; i < ARRAY_SIZE(pagelist); i++)
 968				pagelist[i] = (void *)(unsigned long)0xaa55aa55aa55aa55ULL;
 969
 970			len = iov_iter_extract_pages(&iter, &pages, 100 * 1024,
 971						     ARRAY_SIZE(pagelist), 0, &offset0);
 972			KUNIT_EXPECT_GE(test, len, 0);
 973			if (len < 0)
 974				break;
 975			KUNIT_EXPECT_LE(test, len, size);
 976			KUNIT_EXPECT_EQ(test, iter.count, size - len);
 977			if (len == 0)
 978				break;
 979			size -= len;
 980			KUNIT_EXPECT_GE(test, (ssize_t)offset0, 0);
 981			KUNIT_EXPECT_LT(test, offset0, PAGE_SIZE);
 982
 983			for (i = 0; i < ARRAY_SIZE(pagelist); i++) {
 984				struct page *p;
 985				ssize_t part = min_t(ssize_t, len, PAGE_SIZE - offset0);
 986				int ix;
 987
 988				KUNIT_ASSERT_GE(test, part, 0);
 989				ix = from / PAGE_SIZE;
 990				KUNIT_ASSERT_LT(test, ix, npages);
 991				p = bpages[ix];
 992				KUNIT_EXPECT_PTR_EQ(test, pagelist[i], p);
 993				KUNIT_EXPECT_EQ(test, offset0, from % PAGE_SIZE);
 994				from += part;
 995				len -= part;
 996				KUNIT_ASSERT_GE(test, len, 0);
 997				if (len == 0)
 998					break;
 999				offset0 = 0;
1000			}
1001
1002			if (test->status == KUNIT_FAILURE)
1003				goto stop;
1004		} while (iov_iter_count(&iter) > 0);
1005
1006		KUNIT_EXPECT_EQ(test, size, 0);
1007		KUNIT_EXPECT_EQ(test, iter.count, 0);
1008		KUNIT_EXPECT_EQ(test, iter.iov_offset, pr->to - pr->from);
1009	}
1010
1011stop:
1012	KUNIT_SUCCEED(test);
1013}
1014
1015static struct kunit_case __refdata iov_kunit_cases[] = {
1016	KUNIT_CASE(iov_kunit_copy_to_kvec),
1017	KUNIT_CASE(iov_kunit_copy_from_kvec),
1018	KUNIT_CASE(iov_kunit_copy_to_bvec),
1019	KUNIT_CASE(iov_kunit_copy_from_bvec),
1020	KUNIT_CASE(iov_kunit_copy_to_folioq),
1021	KUNIT_CASE(iov_kunit_copy_from_folioq),
1022	KUNIT_CASE(iov_kunit_copy_to_xarray),
1023	KUNIT_CASE(iov_kunit_copy_from_xarray),
1024	KUNIT_CASE(iov_kunit_extract_pages_kvec),
1025	KUNIT_CASE(iov_kunit_extract_pages_bvec),
1026	KUNIT_CASE(iov_kunit_extract_pages_folioq),
1027	KUNIT_CASE(iov_kunit_extract_pages_xarray),
1028	{}
1029};
1030
1031static struct kunit_suite iov_kunit_suite = {
1032	.name = "iov_iter",
1033	.test_cases = iov_kunit_cases,
1034};
1035
1036kunit_test_suites(&iov_kunit_suite);