Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1/*
   2 * Test cases for the drm_mm range manager
   3 */
   4
   5#define pr_fmt(fmt) "drm_mm: " fmt
   6
   7#include <linux/module.h>
   8#include <linux/prime_numbers.h>
   9#include <linux/slab.h>
  10#include <linux/random.h>
  11#include <linux/vmalloc.h>
  12
  13#include <drm/drm_mm.h>
  14
  15#include "../lib/drm_random.h"
  16
  17#define TESTS "drm_mm_selftests.h"
  18#include "drm_selftest.h"
  19
  20static unsigned int random_seed;
  21static unsigned int max_iterations = 8192;
  22static unsigned int max_prime = 128;
  23
  24enum {
  25	BEST,
  26	BOTTOMUP,
  27	TOPDOWN,
  28	EVICT,
  29};
  30
  31static const struct insert_mode {
  32	const char *name;
  33	enum drm_mm_insert_mode mode;
  34} insert_modes[] = {
  35	[BEST] = { "best", DRM_MM_INSERT_BEST },
  36	[BOTTOMUP] = { "bottom-up", DRM_MM_INSERT_LOW },
  37	[TOPDOWN] = { "top-down", DRM_MM_INSERT_HIGH },
  38	[EVICT] = { "evict", DRM_MM_INSERT_EVICT },
  39	{}
  40}, evict_modes[] = {
  41	{ "bottom-up", DRM_MM_INSERT_LOW },
  42	{ "top-down", DRM_MM_INSERT_HIGH },
  43	{}
  44};
  45
  46static int igt_sanitycheck(void *ignored)
  47{
  48	pr_info("%s - ok!\n", __func__);
  49	return 0;
  50}
  51
  52static bool assert_no_holes(const struct drm_mm *mm)
  53{
  54	struct drm_mm_node *hole;
  55	u64 hole_start, hole_end;
  56	unsigned long count;
  57
  58	count = 0;
  59	drm_mm_for_each_hole(hole, mm, hole_start, hole_end)
  60		count++;
  61	if (count) {
  62		pr_err("Expected to find no holes (after reserve), found %lu instead\n", count);
  63		return false;
  64	}
  65
  66	drm_mm_for_each_node(hole, mm) {
  67		if (drm_mm_hole_follows(hole)) {
  68			pr_err("Hole follows node, expected none!\n");
  69			return false;
  70		}
  71	}
  72
  73	return true;
  74}
  75
  76static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end)
  77{
  78	struct drm_mm_node *hole;
  79	u64 hole_start, hole_end;
  80	unsigned long count;
  81	bool ok = true;
  82
  83	if (end <= start)
  84		return true;
  85
  86	count = 0;
  87	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
  88		if (start != hole_start || end != hole_end) {
  89			if (ok)
  90				pr_err("empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
  91				       hole_start, hole_end,
  92				       start, end);
  93			ok = false;
  94		}
  95		count++;
  96	}
  97	if (count != 1) {
  98		pr_err("Expected to find one hole, found %lu instead\n", count);
  99		ok = false;
 100	}
 101
 102	return ok;
 103}
 104
 105static bool assert_continuous(const struct drm_mm *mm, u64 size)
 106{
 107	struct drm_mm_node *node, *check, *found;
 108	unsigned long n;
 109	u64 addr;
 110
 111	if (!assert_no_holes(mm))
 112		return false;
 113
 114	n = 0;
 115	addr = 0;
 116	drm_mm_for_each_node(node, mm) {
 117		if (node->start != addr) {
 118			pr_err("node[%ld] list out of order, expected %llx found %llx\n",
 119			       n, addr, node->start);
 120			return false;
 121		}
 122
 123		if (node->size != size) {
 124			pr_err("node[%ld].size incorrect, expected %llx, found %llx\n",
 125			       n, size, node->size);
 126			return false;
 127		}
 128
 129		if (drm_mm_hole_follows(node)) {
 130			pr_err("node[%ld] is followed by a hole!\n", n);
 131			return false;
 132		}
 133
 134		found = NULL;
 135		drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
 136			if (node != check) {
 137				pr_err("lookup return wrong node, expected start %llx, found %llx\n",
 138				       node->start, check->start);
 139				return false;
 140			}
 141			found = check;
 142		}
 143		if (!found) {
 144			pr_err("lookup failed for node %llx + %llx\n",
 145			       addr, size);
 146			return false;
 147		}
 148
 149		addr += size;
 150		n++;
 151	}
 152
 153	return true;
 154}
 155
 156static u64 misalignment(struct drm_mm_node *node, u64 alignment)
 157{
 158	u64 rem;
 159
 160	if (!alignment)
 161		return 0;
 162
 163	div64_u64_rem(node->start, alignment, &rem);
 164	return rem;
 165}
 166
 167static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm,
 168			u64 size, u64 alignment, unsigned long color)
 169{
 170	bool ok = true;
 171
 172	if (!drm_mm_node_allocated(node) || node->mm != mm) {
 173		pr_err("node not allocated\n");
 174		ok = false;
 175	}
 176
 177	if (node->size != size) {
 178		pr_err("node has wrong size, found %llu, expected %llu\n",
 179		       node->size, size);
 180		ok = false;
 181	}
 182
 183	if (misalignment(node, alignment)) {
 184		pr_err("node is misaligned, start %llx rem %llu, expected alignment %llu\n",
 185		       node->start, misalignment(node, alignment), alignment);
 186		ok = false;
 187	}
 188
 189	if (node->color != color) {
 190		pr_err("node has wrong color, found %lu, expected %lu\n",
 191		       node->color, color);
 192		ok = false;
 193	}
 194
 195	return ok;
 196}
 197
 198#define show_mm(mm) do { \
 199	struct drm_printer __p = drm_debug_printer(__func__); \
 200	drm_mm_print((mm), &__p); } while (0)
 201
 202static int igt_init(void *ignored)
 203{
 204	const unsigned int size = 4096;
 205	struct drm_mm mm;
 206	struct drm_mm_node tmp;
 207	int ret = -EINVAL;
 208
 209	/* Start with some simple checks on initialising the struct drm_mm */
 210	memset(&mm, 0, sizeof(mm));
 211	if (drm_mm_initialized(&mm)) {
 212		pr_err("zeroed mm claims to be initialized\n");
 213		return ret;
 214	}
 215
 216	memset(&mm, 0xff, sizeof(mm));
 217	drm_mm_init(&mm, 0, size);
 218	if (!drm_mm_initialized(&mm)) {
 219		pr_err("mm claims not to be initialized\n");
 220		goto out;
 221	}
 222
 223	if (!drm_mm_clean(&mm)) {
 224		pr_err("mm not empty on creation\n");
 225		goto out;
 226	}
 227
 228	/* After creation, it should all be one massive hole */
 229	if (!assert_one_hole(&mm, 0, size)) {
 230		ret = -EINVAL;
 231		goto out;
 232	}
 233
 234	memset(&tmp, 0, sizeof(tmp));
 235	tmp.start = 0;
 236	tmp.size = size;
 237	ret = drm_mm_reserve_node(&mm, &tmp);
 238	if (ret) {
 239		pr_err("failed to reserve whole drm_mm\n");
 240		goto out;
 241	}
 242
 243	/* After filling the range entirely, there should be no holes */
 244	if (!assert_no_holes(&mm)) {
 245		ret = -EINVAL;
 246		goto out;
 247	}
 248
 249	/* And then after emptying it again, the massive hole should be back */
 250	drm_mm_remove_node(&tmp);
 251	if (!assert_one_hole(&mm, 0, size)) {
 252		ret = -EINVAL;
 253		goto out;
 254	}
 255
 256out:
 257	if (ret)
 258		show_mm(&mm);
 259	drm_mm_takedown(&mm);
 260	return ret;
 261}
 262
 263static int igt_debug(void *ignored)
 264{
 265	struct drm_mm mm;
 266	struct drm_mm_node nodes[2];
 267	int ret;
 268
 269	/* Create a small drm_mm with a couple of nodes and a few holes, and
 270	 * check that the debug iterator doesn't explode over a trivial drm_mm.
 271	 */
 272
 273	drm_mm_init(&mm, 0, 4096);
 274
 275	memset(nodes, 0, sizeof(nodes));
 276	nodes[0].start = 512;
 277	nodes[0].size = 1024;
 278	ret = drm_mm_reserve_node(&mm, &nodes[0]);
 279	if (ret) {
 280		pr_err("failed to reserve node[0] {start=%lld, size=%lld)\n",
 281		       nodes[0].start, nodes[0].size);
 282		return ret;
 283	}
 284
 285	nodes[1].size = 1024;
 286	nodes[1].start = 4096 - 512 - nodes[1].size;
 287	ret = drm_mm_reserve_node(&mm, &nodes[1]);
 288	if (ret) {
 289		pr_err("failed to reserve node[1] {start=%lld, size=%lld)\n",
 290		       nodes[1].start, nodes[1].size);
 291		return ret;
 292	}
 293
 294	show_mm(&mm);
 295	return 0;
 296}
 297
 298static struct drm_mm_node *set_node(struct drm_mm_node *node,
 299				    u64 start, u64 size)
 300{
 301	node->start = start;
 302	node->size = size;
 303	return node;
 304}
 305
 306static bool expect_reserve_fail(struct drm_mm *mm, struct drm_mm_node *node)
 307{
 308	int err;
 309
 310	err = drm_mm_reserve_node(mm, node);
 311	if (likely(err == -ENOSPC))
 312		return true;
 313
 314	if (!err) {
 315		pr_err("impossible reserve succeeded, node %llu + %llu\n",
 316		       node->start, node->size);
 317		drm_mm_remove_node(node);
 318	} else {
 319		pr_err("impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
 320		       err, -ENOSPC, node->start, node->size);
 321	}
 322	return false;
 323}
 324
 325static bool check_reserve_boundaries(struct drm_mm *mm,
 326				     unsigned int count,
 327				     u64 size)
 328{
 329	const struct boundary {
 330		u64 start, size;
 331		const char *name;
 332	} boundaries[] = {
 333#define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" }
 334		B(0, 0),
 335		B(-size, 0),
 336		B(size, 0),
 337		B(size * count, 0),
 338		B(-size, size),
 339		B(-size, -size),
 340		B(-size, 2*size),
 341		B(0, -size),
 342		B(size, -size),
 343		B(count*size, size),
 344		B(count*size, -size),
 345		B(count*size, count*size),
 346		B(count*size, -count*size),
 347		B(count*size, -(count+1)*size),
 348		B((count+1)*size, size),
 349		B((count+1)*size, -size),
 350		B((count+1)*size, -2*size),
 351#undef B
 352	};
 353	struct drm_mm_node tmp = {};
 354	int n;
 355
 356	for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
 357		if (!expect_reserve_fail(mm,
 358					 set_node(&tmp,
 359						  boundaries[n].start,
 360						  boundaries[n].size))) {
 361			pr_err("boundary[%d:%s] failed, count=%u, size=%lld\n",
 362			       n, boundaries[n].name, count, size);
 363			return false;
 364		}
 365	}
 366
 367	return true;
 368}
 369
 370static int __igt_reserve(unsigned int count, u64 size)
 371{
 372	DRM_RND_STATE(prng, random_seed);
 373	struct drm_mm mm;
 374	struct drm_mm_node tmp, *nodes, *node, *next;
 375	unsigned int *order, n, m, o = 0;
 376	int ret, err;
 377
 378	/* For exercising drm_mm_reserve_node(), we want to check that
 379	 * reservations outside of the drm_mm range are rejected, and to
 380	 * overlapping and otherwise already occupied ranges. Afterwards,
 381	 * the tree and nodes should be intact.
 382	 */
 383
 384	DRM_MM_BUG_ON(!count);
 385	DRM_MM_BUG_ON(!size);
 386
 387	ret = -ENOMEM;
 388	order = drm_random_order(count, &prng);
 389	if (!order)
 390		goto err;
 391
 392	nodes = vzalloc(sizeof(*nodes) * count);
 393	if (!nodes)
 394		goto err_order;
 395
 396	ret = -EINVAL;
 397	drm_mm_init(&mm, 0, count * size);
 398
 399	if (!check_reserve_boundaries(&mm, count, size))
 400		goto out;
 401
 402	for (n = 0; n < count; n++) {
 403		nodes[n].start = order[n] * size;
 404		nodes[n].size = size;
 405
 406		err = drm_mm_reserve_node(&mm, &nodes[n]);
 407		if (err) {
 408			pr_err("reserve failed, step %d, start %llu\n",
 409			       n, nodes[n].start);
 410			ret = err;
 411			goto out;
 412		}
 413
 414		if (!drm_mm_node_allocated(&nodes[n])) {
 415			pr_err("reserved node not allocated! step %d, start %llu\n",
 416			       n, nodes[n].start);
 417			goto out;
 418		}
 419
 420		if (!expect_reserve_fail(&mm, &nodes[n]))
 421			goto out;
 422	}
 423
 424	/* After random insertion the nodes should be in order */
 425	if (!assert_continuous(&mm, size))
 426		goto out;
 427
 428	/* Repeated use should then fail */
 429	drm_random_reorder(order, count, &prng);
 430	for (n = 0; n < count; n++) {
 431		if (!expect_reserve_fail(&mm,
 432					 set_node(&tmp, order[n] * size, 1)))
 433			goto out;
 434
 435		/* Remove and reinsert should work */
 436		drm_mm_remove_node(&nodes[order[n]]);
 437		err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
 438		if (err) {
 439			pr_err("reserve failed, step %d, start %llu\n",
 440			       n, nodes[n].start);
 441			ret = err;
 442			goto out;
 443		}
 444	}
 445
 446	if (!assert_continuous(&mm, size))
 447		goto out;
 448
 449	/* Overlapping use should then fail */
 450	for (n = 0; n < count; n++) {
 451		if (!expect_reserve_fail(&mm, set_node(&tmp, 0, size*count)))
 452			goto out;
 453	}
 454	for (n = 0; n < count; n++) {
 455		if (!expect_reserve_fail(&mm,
 456					 set_node(&tmp,
 457						  size * n,
 458						  size * (count - n))))
 459			goto out;
 460	}
 461
 462	/* Remove several, reinsert, check full */
 463	for_each_prime_number(n, min(max_prime, count)) {
 464		for (m = 0; m < n; m++) {
 465			node = &nodes[order[(o + m) % count]];
 466			drm_mm_remove_node(node);
 467		}
 468
 469		for (m = 0; m < n; m++) {
 470			node = &nodes[order[(o + m) % count]];
 471			err = drm_mm_reserve_node(&mm, node);
 472			if (err) {
 473				pr_err("reserve failed, step %d/%d, start %llu\n",
 474				       m, n, node->start);
 475				ret = err;
 476				goto out;
 477			}
 478		}
 479
 480		o += n;
 481
 482		if (!assert_continuous(&mm, size))
 483			goto out;
 484	}
 485
 486	ret = 0;
 487out:
 488	drm_mm_for_each_node_safe(node, next, &mm)
 489		drm_mm_remove_node(node);
 490	drm_mm_takedown(&mm);
 491	vfree(nodes);
 492err_order:
 493	kfree(order);
 494err:
 495	return ret;
 496}
 497
 498static int igt_reserve(void *ignored)
 499{
 500	const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
 501	int n, ret;
 502
 503	for_each_prime_number_from(n, 1, 54) {
 504		u64 size = BIT_ULL(n);
 505
 506		ret = __igt_reserve(count, size - 1);
 507		if (ret)
 508			return ret;
 509
 510		ret = __igt_reserve(count, size);
 511		if (ret)
 512			return ret;
 513
 514		ret = __igt_reserve(count, size + 1);
 515		if (ret)
 516			return ret;
 517
 518		cond_resched();
 519	}
 520
 521	return 0;
 522}
 523
 524static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
 525			  u64 size, u64 alignment, unsigned long color,
 526			  const struct insert_mode *mode)
 527{
 528	int err;
 529
 530	err = drm_mm_insert_node_generic(mm, node,
 531					 size, alignment, color,
 532					 mode->mode);
 533	if (err) {
 534		pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
 535		       size, alignment, color, mode->name, err);
 536		return false;
 537	}
 538
 539	if (!assert_node(node, mm, size, alignment, color)) {
 540		drm_mm_remove_node(node);
 541		return false;
 542	}
 543
 544	return true;
 545}
 546
 547static bool expect_insert_fail(struct drm_mm *mm, u64 size)
 548{
 549	struct drm_mm_node tmp = {};
 550	int err;
 551
 552	err = drm_mm_insert_node(mm, &tmp, size);
 553	if (likely(err == -ENOSPC))
 554		return true;
 555
 556	if (!err) {
 557		pr_err("impossible insert succeeded, node %llu + %llu\n",
 558		       tmp.start, tmp.size);
 559		drm_mm_remove_node(&tmp);
 560	} else {
 561		pr_err("impossible insert failed with wrong error %d [expected %d], size %llu\n",
 562		       err, -ENOSPC, size);
 563	}
 564	return false;
 565}
 566
 567static int __igt_insert(unsigned int count, u64 size, bool replace)
 568{
 569	DRM_RND_STATE(prng, random_seed);
 570	const struct insert_mode *mode;
 571	struct drm_mm mm;
 572	struct drm_mm_node *nodes, *node, *next;
 573	unsigned int *order, n, m, o = 0;
 574	int ret;
 575
 576	/* Fill a range with lots of nodes, check it doesn't fail too early */
 577
 578	DRM_MM_BUG_ON(!count);
 579	DRM_MM_BUG_ON(!size);
 580
 581	ret = -ENOMEM;
 582	nodes = vmalloc(count * sizeof(*nodes));
 583	if (!nodes)
 584		goto err;
 585
 586	order = drm_random_order(count, &prng);
 587	if (!order)
 588		goto err_nodes;
 589
 590	ret = -EINVAL;
 591	drm_mm_init(&mm, 0, count * size);
 592
 593	for (mode = insert_modes; mode->name; mode++) {
 594		for (n = 0; n < count; n++) {
 595			struct drm_mm_node tmp;
 596
 597			node = replace ? &tmp : &nodes[n];
 598			memset(node, 0, sizeof(*node));
 599			if (!expect_insert(&mm, node, size, 0, n, mode)) {
 600				pr_err("%s insert failed, size %llu step %d\n",
 601				       mode->name, size, n);
 602				goto out;
 603			}
 604
 605			if (replace) {
 606				drm_mm_replace_node(&tmp, &nodes[n]);
 607				if (drm_mm_node_allocated(&tmp)) {
 608					pr_err("replaced old-node still allocated! step %d\n",
 609					       n);
 610					goto out;
 611				}
 612
 613				if (!assert_node(&nodes[n], &mm, size, 0, n)) {
 614					pr_err("replaced node did not inherit parameters, size %llu step %d\n",
 615					       size, n);
 616					goto out;
 617				}
 618
 619				if (tmp.start != nodes[n].start) {
 620					pr_err("replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
 621					       tmp.start, size,
 622					       nodes[n].start, nodes[n].size);
 623					goto out;
 624				}
 625			}
 626		}
 627
 628		/* After random insertion the nodes should be in order */
 629		if (!assert_continuous(&mm, size))
 630			goto out;
 631
 632		/* Repeated use should then fail */
 633		if (!expect_insert_fail(&mm, size))
 634			goto out;
 635
 636		/* Remove one and reinsert, as the only hole it should refill itself */
 637		for (n = 0; n < count; n++) {
 638			u64 addr = nodes[n].start;
 639
 640			drm_mm_remove_node(&nodes[n]);
 641			if (!expect_insert(&mm, &nodes[n], size, 0, n, mode)) {
 642				pr_err("%s reinsert failed, size %llu step %d\n",
 643				       mode->name, size, n);
 644				goto out;
 645			}
 646
 647			if (nodes[n].start != addr) {
 648				pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
 649				       mode->name, n, addr, nodes[n].start);
 650				goto out;
 651			}
 652
 653			if (!assert_continuous(&mm, size))
 654				goto out;
 655		}
 656
 657		/* Remove several, reinsert, check full */
 658		for_each_prime_number(n, min(max_prime, count)) {
 659			for (m = 0; m < n; m++) {
 660				node = &nodes[order[(o + m) % count]];
 661				drm_mm_remove_node(node);
 662			}
 663
 664			for (m = 0; m < n; m++) {
 665				node = &nodes[order[(o + m) % count]];
 666				if (!expect_insert(&mm, node, size, 0, n, mode)) {
 667					pr_err("%s multiple reinsert failed, size %llu step %d\n",
 668					       mode->name, size, n);
 669					goto out;
 670				}
 671			}
 672
 673			o += n;
 674
 675			if (!assert_continuous(&mm, size))
 676				goto out;
 677
 678			if (!expect_insert_fail(&mm, size))
 679				goto out;
 680		}
 681
 682		drm_mm_for_each_node_safe(node, next, &mm)
 683			drm_mm_remove_node(node);
 684		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
 685
 686		cond_resched();
 687	}
 688
 689	ret = 0;
 690out:
 691	drm_mm_for_each_node_safe(node, next, &mm)
 692		drm_mm_remove_node(node);
 693	drm_mm_takedown(&mm);
 694	kfree(order);
 695err_nodes:
 696	vfree(nodes);
 697err:
 698	return ret;
 699}
 700
 701static int igt_insert(void *ignored)
 702{
 703	const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
 704	unsigned int n;
 705	int ret;
 706
 707	for_each_prime_number_from(n, 1, 54) {
 708		u64 size = BIT_ULL(n);
 709
 710		ret = __igt_insert(count, size - 1, false);
 711		if (ret)
 712			return ret;
 713
 714		ret = __igt_insert(count, size, false);
 715		if (ret)
 716			return ret;
 717
 718		ret = __igt_insert(count, size + 1, false);
 719		if (ret)
 720			return ret;
 721
 722		cond_resched();
 723	}
 724
 725	return 0;
 726}
 727
 728static int igt_replace(void *ignored)
 729{
 730	const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
 731	unsigned int n;
 732	int ret;
 733
 734	/* Reuse igt_insert to exercise replacement by inserting a dummy node,
 735	 * then replacing it with the intended node. We want to check that
 736	 * the tree is intact and all the information we need is carried
 737	 * across to the target node.
 738	 */
 739
 740	for_each_prime_number_from(n, 1, 54) {
 741		u64 size = BIT_ULL(n);
 742
 743		ret = __igt_insert(count, size - 1, true);
 744		if (ret)
 745			return ret;
 746
 747		ret = __igt_insert(count, size, true);
 748		if (ret)
 749			return ret;
 750
 751		ret = __igt_insert(count, size + 1, true);
 752		if (ret)
 753			return ret;
 754
 755		cond_resched();
 756	}
 757
 758	return 0;
 759}
 760
 761static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
 762				   u64 size, u64 alignment, unsigned long color,
 763				   u64 range_start, u64 range_end,
 764				   const struct insert_mode *mode)
 765{
 766	int err;
 767
 768	err = drm_mm_insert_node_in_range(mm, node,
 769					  size, alignment, color,
 770					  range_start, range_end,
 771					  mode->mode);
 772	if (err) {
 773		pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
 774		       size, alignment, color, mode->name,
 775		       range_start, range_end, err);
 776		return false;
 777	}
 778
 779	if (!assert_node(node, mm, size, alignment, color)) {
 780		drm_mm_remove_node(node);
 781		return false;
 782	}
 783
 784	return true;
 785}
 786
 787static bool expect_insert_in_range_fail(struct drm_mm *mm,
 788					u64 size,
 789					u64 range_start,
 790					u64 range_end)
 791{
 792	struct drm_mm_node tmp = {};
 793	int err;
 794
 795	err = drm_mm_insert_node_in_range(mm, &tmp,
 796					  size, 0, 0,
 797					  range_start, range_end,
 798					  0);
 799	if (likely(err == -ENOSPC))
 800		return true;
 801
 802	if (!err) {
 803		pr_err("impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
 804		       tmp.start, tmp.size, range_start, range_end);
 805		drm_mm_remove_node(&tmp);
 806	} else {
 807		pr_err("impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
 808		       err, -ENOSPC, size, range_start, range_end);
 809	}
 810
 811	return false;
 812}
 813
 814static bool assert_contiguous_in_range(struct drm_mm *mm,
 815				       u64 size,
 816				       u64 start,
 817				       u64 end)
 818{
 819	struct drm_mm_node *node;
 820	unsigned int n;
 821
 822	if (!expect_insert_in_range_fail(mm, size, start, end))
 823		return false;
 824
 825	n = div64_u64(start + size - 1, size);
 826	drm_mm_for_each_node(node, mm) {
 827		if (node->start < start || node->start + node->size > end) {
 828			pr_err("node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
 829			       n, node->start, node->start + node->size, start, end);
 830			return false;
 831		}
 832
 833		if (node->start != n * size) {
 834			pr_err("node %d out of order, expected start %llx, found %llx\n",
 835			       n, n * size, node->start);
 836			return false;
 837		}
 838
 839		if (node->size != size) {
 840			pr_err("node %d has wrong size, expected size %llx, found %llx\n",
 841			       n, size, node->size);
 842			return false;
 843		}
 844
 845		if (drm_mm_hole_follows(node) &&
 846		    drm_mm_hole_node_end(node) < end) {
 847			pr_err("node %d is followed by a hole!\n", n);
 848			return false;
 849		}
 850
 851		n++;
 852	}
 853
 854	if (start > 0) {
 855		node = __drm_mm_interval_first(mm, 0, start - 1);
 856		if (node->allocated) {
 857			pr_err("node before start: node=%llx+%llu, start=%llx\n",
 858			       node->start, node->size, start);
 859			return false;
 860		}
 861	}
 862
 863	if (end < U64_MAX) {
 864		node = __drm_mm_interval_first(mm, end, U64_MAX);
 865		if (node->allocated) {
 866			pr_err("node after end: node=%llx+%llu, end=%llx\n",
 867			       node->start, node->size, end);
 868			return false;
 869		}
 870	}
 871
 872	return true;
 873}
 874
 875static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
 876{
 877	const struct insert_mode *mode;
 878	struct drm_mm mm;
 879	struct drm_mm_node *nodes, *node, *next;
 880	unsigned int n, start_n, end_n;
 881	int ret;
 882
 883	DRM_MM_BUG_ON(!count);
 884	DRM_MM_BUG_ON(!size);
 885	DRM_MM_BUG_ON(end <= start);
 886
 887	/* Very similar to __igt_insert(), but now instead of populating the
 888	 * full range of the drm_mm, we try to fill a small portion of it.
 889	 */
 890
 891	ret = -ENOMEM;
 892	nodes = vzalloc(count * sizeof(*nodes));
 893	if (!nodes)
 894		goto err;
 895
 896	ret = -EINVAL;
 897	drm_mm_init(&mm, 0, count * size);
 898
 899	start_n = div64_u64(start + size - 1, size);
 900	end_n = div64_u64(end - size, size);
 901
 902	for (mode = insert_modes; mode->name; mode++) {
 903		for (n = start_n; n <= end_n; n++) {
 904			if (!expect_insert_in_range(&mm, &nodes[n],
 905						    size, size, n,
 906						    start, end, mode)) {
 907				pr_err("%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
 908				       mode->name, size, n,
 909				       start_n, end_n,
 910				       start, end);
 911				goto out;
 912			}
 913		}
 914
 915		if (!assert_contiguous_in_range(&mm, size, start, end)) {
 916			pr_err("%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
 917			       mode->name, start, end, size);
 918			goto out;
 919		}
 920
 921		/* Remove one and reinsert, it should refill itself */
 922		for (n = start_n; n <= end_n; n++) {
 923			u64 addr = nodes[n].start;
 924
 925			drm_mm_remove_node(&nodes[n]);
 926			if (!expect_insert_in_range(&mm, &nodes[n],
 927						    size, size, n,
 928						    start, end, mode)) {
 929				pr_err("%s reinsert failed, step %d\n", mode->name, n);
 930				goto out;
 931			}
 932
 933			if (nodes[n].start != addr) {
 934				pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
 935				       mode->name, n, addr, nodes[n].start);
 936				goto out;
 937			}
 938		}
 939
 940		if (!assert_contiguous_in_range(&mm, size, start, end)) {
 941			pr_err("%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
 942			       mode->name, start, end, size);
 943			goto out;
 944		}
 945
 946		drm_mm_for_each_node_safe(node, next, &mm)
 947			drm_mm_remove_node(node);
 948		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
 949
 950		cond_resched();
 951	}
 952
 953	ret = 0;
 954out:
 955	drm_mm_for_each_node_safe(node, next, &mm)
 956		drm_mm_remove_node(node);
 957	drm_mm_takedown(&mm);
 958	vfree(nodes);
 959err:
 960	return ret;
 961}
 962
 963static int insert_outside_range(void)
 964{
 965	struct drm_mm mm;
 966	const unsigned int start = 1024;
 967	const unsigned int end = 2048;
 968	const unsigned int size = end - start;
 969
 970	drm_mm_init(&mm, start, size);
 971
 972	if (!expect_insert_in_range_fail(&mm, 1, 0, start))
 973		return -EINVAL;
 974
 975	if (!expect_insert_in_range_fail(&mm, size,
 976					 start - size/2, start + (size+1)/2))
 977		return -EINVAL;
 978
 979	if (!expect_insert_in_range_fail(&mm, size,
 980					 end - (size+1)/2, end + size/2))
 981		return -EINVAL;
 982
 983	if (!expect_insert_in_range_fail(&mm, 1, end, end + size))
 984		return -EINVAL;
 985
 986	drm_mm_takedown(&mm);
 987	return 0;
 988}
 989
 990static int igt_insert_range(void *ignored)
 991{
 992	const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
 993	unsigned int n;
 994	int ret;
 995
 996	/* Check that requests outside the bounds of drm_mm are rejected. */
 997	ret = insert_outside_range();
 998	if (ret)
 999		return ret;
1000
1001	for_each_prime_number_from(n, 1, 50) {
1002		const u64 size = BIT_ULL(n);
1003		const u64 max = count * size;
1004
1005		ret = __igt_insert_range(count, size, 0, max);
1006		if (ret)
1007			return ret;
1008
1009		ret = __igt_insert_range(count, size, 1, max);
1010		if (ret)
1011			return ret;
1012
1013		ret = __igt_insert_range(count, size, 0, max - 1);
1014		if (ret)
1015			return ret;
1016
1017		ret = __igt_insert_range(count, size, 0, max/2);
1018		if (ret)
1019			return ret;
1020
1021		ret = __igt_insert_range(count, size, max/2, max);
1022		if (ret)
1023			return ret;
1024
1025		ret = __igt_insert_range(count, size, max/4+1, 3*max/4-1);
1026		if (ret)
1027			return ret;
1028
1029		cond_resched();
1030	}
1031
1032	return 0;
1033}
1034
1035static int igt_align(void *ignored)
1036{
1037	const struct insert_mode *mode;
1038	const unsigned int max_count = min(8192u, max_prime);
1039	struct drm_mm mm;
1040	struct drm_mm_node *nodes, *node, *next;
1041	unsigned int prime;
1042	int ret = -EINVAL;
1043
1044	/* For each of the possible insertion modes, we pick a few
1045	 * arbitrary alignments and check that the inserted node
1046	 * meets our requirements.
1047	 */
1048
1049	nodes = vzalloc(max_count * sizeof(*nodes));
1050	if (!nodes)
1051		goto err;
1052
1053	drm_mm_init(&mm, 1, U64_MAX - 2);
1054
1055	for (mode = insert_modes; mode->name; mode++) {
1056		unsigned int i = 0;
1057
1058		for_each_prime_number_from(prime, 1, max_count) {
1059			u64 size = next_prime_number(prime);
1060
1061			if (!expect_insert(&mm, &nodes[i],
1062					   size, prime, i,
1063					   mode)) {
1064				pr_err("%s insert failed with alignment=%d",
1065				       mode->name, prime);
1066				goto out;
1067			}
1068
1069			i++;
1070		}
1071
1072		drm_mm_for_each_node_safe(node, next, &mm)
1073			drm_mm_remove_node(node);
1074		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1075
1076		cond_resched();
1077	}
1078
1079	ret = 0;
1080out:
1081	drm_mm_for_each_node_safe(node, next, &mm)
1082		drm_mm_remove_node(node);
1083	drm_mm_takedown(&mm);
1084	vfree(nodes);
1085err:
1086	return ret;
1087}
1088
1089static int igt_align_pot(int max)
1090{
1091	struct drm_mm mm;
1092	struct drm_mm_node *node, *next;
1093	int bit;
1094	int ret = -EINVAL;
1095
1096	/* Check that we can align to the full u64 address space */
1097
1098	drm_mm_init(&mm, 1, U64_MAX - 2);
1099
1100	for (bit = max - 1; bit; bit--) {
1101		u64 align, size;
1102
1103		node = kzalloc(sizeof(*node), GFP_KERNEL);
1104		if (!node) {
1105			ret = -ENOMEM;
1106			goto out;
1107		}
1108
1109		align = BIT_ULL(bit);
1110		size = BIT_ULL(bit-1) + 1;
1111		if (!expect_insert(&mm, node,
1112				   size, align, bit,
1113				   &insert_modes[0])) {
1114			pr_err("insert failed with alignment=%llx [%d]",
1115			       align, bit);
1116			goto out;
1117		}
1118
1119		cond_resched();
1120	}
1121
1122	ret = 0;
1123out:
1124	drm_mm_for_each_node_safe(node, next, &mm) {
1125		drm_mm_remove_node(node);
1126		kfree(node);
1127	}
1128	drm_mm_takedown(&mm);
1129	return ret;
1130}
1131
1132static int igt_align32(void *ignored)
1133{
1134	return igt_align_pot(32);
1135}
1136
1137static int igt_align64(void *ignored)
1138{
1139	return igt_align_pot(64);
1140}
1141
1142static void show_scan(const struct drm_mm_scan *scan)
1143{
1144	pr_info("scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
1145		scan->hit_start, scan->hit_end,
1146		scan->size, scan->alignment, scan->color);
1147}
1148
1149static void show_holes(const struct drm_mm *mm, int count)
1150{
1151	u64 hole_start, hole_end;
1152	struct drm_mm_node *hole;
1153
1154	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
1155		struct drm_mm_node *next = list_next_entry(hole, node_list);
1156		const char *node1 = NULL, *node2 = NULL;
1157
1158		if (hole->allocated)
1159			node1 = kasprintf(GFP_KERNEL,
1160					  "[%llx + %lld, color=%ld], ",
1161					  hole->start, hole->size, hole->color);
1162
1163		if (next->allocated)
1164			node2 = kasprintf(GFP_KERNEL,
1165					  ", [%llx + %lld, color=%ld]",
1166					  next->start, next->size, next->color);
1167
1168		pr_info("%sHole [%llx - %llx, size %lld]%s\n",
1169			node1,
1170			hole_start, hole_end, hole_end - hole_start,
1171			node2);
1172
1173		kfree(node2);
1174		kfree(node1);
1175
1176		if (!--count)
1177			break;
1178	}
1179}
1180
1181struct evict_node {
1182	struct drm_mm_node node;
1183	struct list_head link;
1184};
1185
1186static bool evict_nodes(struct drm_mm_scan *scan,
1187			struct evict_node *nodes,
1188			unsigned int *order,
1189			unsigned int count,
1190			bool use_color,
1191			struct list_head *evict_list)
1192{
1193	struct evict_node *e, *en;
1194	unsigned int i;
1195
1196	for (i = 0; i < count; i++) {
1197		e = &nodes[order ? order[i] : i];
1198		list_add(&e->link, evict_list);
1199		if (drm_mm_scan_add_block(scan, &e->node))
1200			break;
1201	}
1202	list_for_each_entry_safe(e, en, evict_list, link) {
1203		if (!drm_mm_scan_remove_block(scan, &e->node))
1204			list_del(&e->link);
1205	}
1206	if (list_empty(evict_list)) {
1207		pr_err("Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
1208		       scan->size, count, scan->alignment, scan->color);
1209		return false;
1210	}
1211
1212	list_for_each_entry(e, evict_list, link)
1213		drm_mm_remove_node(&e->node);
1214
1215	if (use_color) {
1216		struct drm_mm_node *node;
1217
1218		while ((node = drm_mm_scan_color_evict(scan))) {
1219			e = container_of(node, typeof(*e), node);
1220			drm_mm_remove_node(&e->node);
1221			list_add(&e->link, evict_list);
1222		}
1223	} else {
1224		if (drm_mm_scan_color_evict(scan)) {
1225			pr_err("drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
1226			return false;
1227		}
1228	}
1229
1230	return true;
1231}
1232
1233static bool evict_nothing(struct drm_mm *mm,
1234			  unsigned int total_size,
1235			  struct evict_node *nodes)
1236{
1237	struct drm_mm_scan scan;
1238	LIST_HEAD(evict_list);
1239	struct evict_node *e;
1240	struct drm_mm_node *node;
1241	unsigned int n;
1242
1243	drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
1244	for (n = 0; n < total_size; n++) {
1245		e = &nodes[n];
1246		list_add(&e->link, &evict_list);
1247		drm_mm_scan_add_block(&scan, &e->node);
1248	}
1249	list_for_each_entry(e, &evict_list, link)
1250		drm_mm_scan_remove_block(&scan, &e->node);
1251
1252	for (n = 0; n < total_size; n++) {
1253		e = &nodes[n];
1254
1255		if (!drm_mm_node_allocated(&e->node)) {
1256			pr_err("node[%d] no longer allocated!\n", n);
1257			return false;
1258		}
1259
1260		e->link.next = NULL;
1261	}
1262
1263	drm_mm_for_each_node(node, mm) {
1264		e = container_of(node, typeof(*e), node);
1265		e->link.next = &e->link;
1266	}
1267
1268	for (n = 0; n < total_size; n++) {
1269		e = &nodes[n];
1270
1271		if (!e->link.next) {
1272			pr_err("node[%d] no longer connected!\n", n);
1273			return false;
1274		}
1275	}
1276
1277	return assert_continuous(mm, nodes[0].node.size);
1278}
1279
1280static bool evict_everything(struct drm_mm *mm,
1281			     unsigned int total_size,
1282			     struct evict_node *nodes)
1283{
1284	struct drm_mm_scan scan;
1285	LIST_HEAD(evict_list);
1286	struct evict_node *e;
1287	unsigned int n;
1288	int err;
1289
1290	drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
1291	for (n = 0; n < total_size; n++) {
1292		e = &nodes[n];
1293		list_add(&e->link, &evict_list);
1294		if (drm_mm_scan_add_block(&scan, &e->node))
1295			break;
1296	}
1297
1298	err = 0;
1299	list_for_each_entry(e, &evict_list, link) {
1300		if (!drm_mm_scan_remove_block(&scan, &e->node)) {
1301			if (!err) {
1302				pr_err("Node %lld not marked for eviction!\n",
1303				       e->node.start);
1304				err = -EINVAL;
1305			}
1306		}
1307	}
1308	if (err)
1309		return false;
1310
1311	list_for_each_entry(e, &evict_list, link)
1312		drm_mm_remove_node(&e->node);
1313
1314	if (!assert_one_hole(mm, 0, total_size))
1315		return false;
1316
1317	list_for_each_entry(e, &evict_list, link) {
1318		err = drm_mm_reserve_node(mm, &e->node);
1319		if (err) {
1320			pr_err("Failed to reinsert node after eviction: start=%llx\n",
1321			       e->node.start);
1322			return false;
1323		}
1324	}
1325
1326	return assert_continuous(mm, nodes[0].node.size);
1327}
1328
1329static int evict_something(struct drm_mm *mm,
1330			   u64 range_start, u64 range_end,
1331			   struct evict_node *nodes,
1332			   unsigned int *order,
1333			   unsigned int count,
1334			   unsigned int size,
1335			   unsigned int alignment,
1336			   const struct insert_mode *mode)
1337{
1338	struct drm_mm_scan scan;
1339	LIST_HEAD(evict_list);
1340	struct evict_node *e;
1341	struct drm_mm_node tmp;
1342	int err;
1343
1344	drm_mm_scan_init_with_range(&scan, mm,
1345				    size, alignment, 0,
1346				    range_start, range_end,
1347				    mode->mode);
1348	if (!evict_nodes(&scan,
1349			 nodes, order, count, false,
1350			 &evict_list))
1351		return -EINVAL;
1352
1353	memset(&tmp, 0, sizeof(tmp));
1354	err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
1355					 DRM_MM_INSERT_EVICT);
1356	if (err) {
1357		pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
1358		       size, alignment);
1359		show_scan(&scan);
1360		show_holes(mm, 3);
1361		return err;
1362	}
1363
1364	if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
1365		pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1366		       tmp.start, tmp.size, range_start, range_end);
1367		err = -EINVAL;
1368	}
1369
1370	if (!assert_node(&tmp, mm, size, alignment, 0) ||
1371	    drm_mm_hole_follows(&tmp)) {
1372		pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
1373		       tmp.size, size,
1374		       alignment, misalignment(&tmp, alignment),
1375		       tmp.start, drm_mm_hole_follows(&tmp));
1376		err = -EINVAL;
1377	}
1378
1379	drm_mm_remove_node(&tmp);
1380	if (err)
1381		return err;
1382
1383	list_for_each_entry(e, &evict_list, link) {
1384		err = drm_mm_reserve_node(mm, &e->node);
1385		if (err) {
1386			pr_err("Failed to reinsert node after eviction: start=%llx\n",
1387			       e->node.start);
1388			return err;
1389		}
1390	}
1391
1392	if (!assert_continuous(mm, nodes[0].node.size)) {
1393		pr_err("range is no longer continuous\n");
1394		return -EINVAL;
1395	}
1396
1397	return 0;
1398}
1399
1400static int igt_evict(void *ignored)
1401{
1402	DRM_RND_STATE(prng, random_seed);
1403	const unsigned int size = 8192;
1404	const struct insert_mode *mode;
1405	struct drm_mm mm;
1406	struct evict_node *nodes;
1407	struct drm_mm_node *node, *next;
1408	unsigned int *order, n;
1409	int ret, err;
1410
1411	/* Here we populate a full drm_mm and then try and insert a new node
1412	 * by evicting other nodes in a random order. The drm_mm_scan should
1413	 * pick the first matching hole it finds from the random list. We
1414	 * repeat that for different allocation strategies, alignments and
1415	 * sizes to try and stress the hole finder.
1416	 */
1417
1418	ret = -ENOMEM;
1419	nodes = vzalloc(size * sizeof(*nodes));
1420	if (!nodes)
1421		goto err;
1422
1423	order = drm_random_order(size, &prng);
1424	if (!order)
1425		goto err_nodes;
1426
1427	ret = -EINVAL;
1428	drm_mm_init(&mm, 0, size);
1429	for (n = 0; n < size; n++) {
1430		err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
1431		if (err) {
1432			pr_err("insert failed, step %d\n", n);
1433			ret = err;
1434			goto out;
1435		}
1436	}
1437
1438	/* First check that using the scanner doesn't break the mm */
1439	if (!evict_nothing(&mm, size, nodes)) {
1440		pr_err("evict_nothing() failed\n");
1441		goto out;
1442	}
1443	if (!evict_everything(&mm, size, nodes)) {
1444		pr_err("evict_everything() failed\n");
1445		goto out;
1446	}
1447
1448	for (mode = evict_modes; mode->name; mode++) {
1449		for (n = 1; n <= size; n <<= 1) {
1450			drm_random_reorder(order, size, &prng);
1451			err = evict_something(&mm, 0, U64_MAX,
1452					      nodes, order, size,
1453					      n, 1,
1454					      mode);
1455			if (err) {
1456				pr_err("%s evict_something(size=%u) failed\n",
1457				       mode->name, n);
1458				ret = err;
1459				goto out;
1460			}
1461		}
1462
1463		for (n = 1; n < size; n <<= 1) {
1464			drm_random_reorder(order, size, &prng);
1465			err = evict_something(&mm, 0, U64_MAX,
1466					      nodes, order, size,
1467					      size/2, n,
1468					      mode);
1469			if (err) {
1470				pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1471				       mode->name, size/2, n);
1472				ret = err;
1473				goto out;
1474			}
1475		}
1476
1477		for_each_prime_number_from(n, 1, min(size, max_prime)) {
1478			unsigned int nsize = (size - n + 1) / 2;
1479
1480			DRM_MM_BUG_ON(!nsize);
1481
1482			drm_random_reorder(order, size, &prng);
1483			err = evict_something(&mm, 0, U64_MAX,
1484					      nodes, order, size,
1485					      nsize, n,
1486					      mode);
1487			if (err) {
1488				pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1489				       mode->name, nsize, n);
1490				ret = err;
1491				goto out;
1492			}
1493		}
1494
1495		cond_resched();
1496	}
1497
1498	ret = 0;
1499out:
1500	drm_mm_for_each_node_safe(node, next, &mm)
1501		drm_mm_remove_node(node);
1502	drm_mm_takedown(&mm);
1503	kfree(order);
1504err_nodes:
1505	vfree(nodes);
1506err:
1507	return ret;
1508}
1509
1510static int igt_evict_range(void *ignored)
1511{
1512	DRM_RND_STATE(prng, random_seed);
1513	const unsigned int size = 8192;
1514	const unsigned int range_size = size / 2;
1515	const unsigned int range_start = size / 4;
1516	const unsigned int range_end = range_start + range_size;
1517	const struct insert_mode *mode;
1518	struct drm_mm mm;
1519	struct evict_node *nodes;
1520	struct drm_mm_node *node, *next;
1521	unsigned int *order, n;
1522	int ret, err;
1523
1524	/* Like igt_evict() but now we are limiting the search to a
1525	 * small portion of the full drm_mm.
1526	 */
1527
1528	ret = -ENOMEM;
1529	nodes = vzalloc(size * sizeof(*nodes));
1530	if (!nodes)
1531		goto err;
1532
1533	order = drm_random_order(size, &prng);
1534	if (!order)
1535		goto err_nodes;
1536
1537	ret = -EINVAL;
1538	drm_mm_init(&mm, 0, size);
1539	for (n = 0; n < size; n++) {
1540		err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
1541		if (err) {
1542			pr_err("insert failed, step %d\n", n);
1543			ret = err;
1544			goto out;
1545		}
1546	}
1547
1548	for (mode = evict_modes; mode->name; mode++) {
1549		for (n = 1; n <= range_size; n <<= 1) {
1550			drm_random_reorder(order, size, &prng);
1551			err = evict_something(&mm, range_start, range_end,
1552					      nodes, order, size,
1553					      n, 1,
1554					      mode);
1555			if (err) {
1556				pr_err("%s evict_something(size=%u) failed with range [%u, %u]\n",
1557				       mode->name, n, range_start, range_end);
1558				goto out;
1559			}
1560		}
1561
1562		for (n = 1; n <= range_size; n <<= 1) {
1563			drm_random_reorder(order, size, &prng);
1564			err = evict_something(&mm, range_start, range_end,
1565					      nodes, order, size,
1566					      range_size/2, n,
1567					      mode);
1568			if (err) {
1569				pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1570				       mode->name, range_size/2, n, range_start, range_end);
1571				goto out;
1572			}
1573		}
1574
1575		for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
1576			unsigned int nsize = (range_size - n + 1) / 2;
1577
1578			DRM_MM_BUG_ON(!nsize);
1579
1580			drm_random_reorder(order, size, &prng);
1581			err = evict_something(&mm, range_start, range_end,
1582					      nodes, order, size,
1583					      nsize, n,
1584					      mode);
1585			if (err) {
1586				pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1587				       mode->name, nsize, n, range_start, range_end);
1588				goto out;
1589			}
1590		}
1591
1592		cond_resched();
1593	}
1594
1595	ret = 0;
1596out:
1597	drm_mm_for_each_node_safe(node, next, &mm)
1598		drm_mm_remove_node(node);
1599	drm_mm_takedown(&mm);
1600	kfree(order);
1601err_nodes:
1602	vfree(nodes);
1603err:
1604	return ret;
1605}
1606
1607static unsigned int node_index(const struct drm_mm_node *node)
1608{
1609	return div64_u64(node->start, node->size);
1610}
1611
1612static int igt_topdown(void *ignored)
1613{
1614	const struct insert_mode *topdown = &insert_modes[TOPDOWN];
1615	DRM_RND_STATE(prng, random_seed);
1616	const unsigned int count = 8192;
1617	unsigned int size;
1618	unsigned long *bitmap = NULL;
1619	struct drm_mm mm;
1620	struct drm_mm_node *nodes, *node, *next;
1621	unsigned int *order, n, m, o = 0;
1622	int ret;
1623
1624	/* When allocating top-down, we expect to be returned a node
1625	 * from a suitable hole at the top of the drm_mm. We check that
1626	 * the returned node does match the highest available slot.
1627	 */
1628
1629	ret = -ENOMEM;
1630	nodes = vzalloc(count * sizeof(*nodes));
1631	if (!nodes)
1632		goto err;
1633
1634	bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long),
1635			 GFP_KERNEL);
1636	if (!bitmap)
1637		goto err_nodes;
1638
1639	order = drm_random_order(count, &prng);
1640	if (!order)
1641		goto err_bitmap;
1642
1643	ret = -EINVAL;
1644	for (size = 1; size <= 64; size <<= 1) {
1645		drm_mm_init(&mm, 0, size*count);
1646		for (n = 0; n < count; n++) {
1647			if (!expect_insert(&mm, &nodes[n],
1648					   size, 0, n,
1649					   topdown)) {
1650				pr_err("insert failed, size %u step %d\n", size, n);
1651				goto out;
1652			}
1653
1654			if (drm_mm_hole_follows(&nodes[n])) {
1655				pr_err("hole after topdown insert %d, start=%llx\n, size=%u",
1656				       n, nodes[n].start, size);
1657				goto out;
1658			}
1659
1660			if (!assert_one_hole(&mm, 0, size*(count - n - 1)))
1661				goto out;
1662		}
1663
1664		if (!assert_continuous(&mm, size))
1665			goto out;
1666
1667		drm_random_reorder(order, count, &prng);
1668		for_each_prime_number_from(n, 1, min(count, max_prime)) {
1669			for (m = 0; m < n; m++) {
1670				node = &nodes[order[(o + m) % count]];
1671				drm_mm_remove_node(node);
1672				__set_bit(node_index(node), bitmap);
1673			}
1674
1675			for (m = 0; m < n; m++) {
1676				unsigned int last;
1677
1678				node = &nodes[order[(o + m) % count]];
1679				if (!expect_insert(&mm, node,
1680						   size, 0, 0,
1681						   topdown)) {
1682					pr_err("insert failed, step %d/%d\n", m, n);
1683					goto out;
1684				}
1685
1686				if (drm_mm_hole_follows(node)) {
1687					pr_err("hole after topdown insert %d/%d, start=%llx\n",
1688					       m, n, node->start);
1689					goto out;
1690				}
1691
1692				last = find_last_bit(bitmap, count);
1693				if (node_index(node) != last) {
1694					pr_err("node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
1695					       m, n, size, last, node_index(node));
1696					goto out;
1697				}
1698
1699				__clear_bit(last, bitmap);
1700			}
1701
1702			DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1703
1704			o += n;
1705		}
1706
1707		drm_mm_for_each_node_safe(node, next, &mm)
1708			drm_mm_remove_node(node);
1709		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1710		cond_resched();
1711	}
1712
1713	ret = 0;
1714out:
1715	drm_mm_for_each_node_safe(node, next, &mm)
1716		drm_mm_remove_node(node);
1717	drm_mm_takedown(&mm);
1718	kfree(order);
1719err_bitmap:
1720	kfree(bitmap);
1721err_nodes:
1722	vfree(nodes);
1723err:
1724	return ret;
1725}
1726
1727static int igt_bottomup(void *ignored)
1728{
1729	const struct insert_mode *bottomup = &insert_modes[BOTTOMUP];
1730	DRM_RND_STATE(prng, random_seed);
1731	const unsigned int count = 8192;
1732	unsigned int size;
1733	unsigned long *bitmap;
1734	struct drm_mm mm;
1735	struct drm_mm_node *nodes, *node, *next;
1736	unsigned int *order, n, m, o = 0;
1737	int ret;
1738
1739	/* Like igt_topdown, but instead of searching for the last hole,
1740	 * we search for the first.
1741	 */
1742
1743	ret = -ENOMEM;
1744	nodes = vzalloc(count * sizeof(*nodes));
1745	if (!nodes)
1746		goto err;
1747
1748	bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long),
1749			 GFP_KERNEL);
1750	if (!bitmap)
1751		goto err_nodes;
1752
1753	order = drm_random_order(count, &prng);
1754	if (!order)
1755		goto err_bitmap;
1756
1757	ret = -EINVAL;
1758	for (size = 1; size <= 64; size <<= 1) {
1759		drm_mm_init(&mm, 0, size*count);
1760		for (n = 0; n < count; n++) {
1761			if (!expect_insert(&mm, &nodes[n],
1762					   size, 0, n,
1763					   bottomup)) {
1764				pr_err("bottomup insert failed, size %u step %d\n", size, n);
1765				goto out;
1766			}
1767
1768			if (!assert_one_hole(&mm, size*(n + 1), size*count))
1769				goto out;
1770		}
1771
1772		if (!assert_continuous(&mm, size))
1773			goto out;
1774
1775		drm_random_reorder(order, count, &prng);
1776		for_each_prime_number_from(n, 1, min(count, max_prime)) {
1777			for (m = 0; m < n; m++) {
1778				node = &nodes[order[(o + m) % count]];
1779				drm_mm_remove_node(node);
1780				__set_bit(node_index(node), bitmap);
1781			}
1782
1783			for (m = 0; m < n; m++) {
1784				unsigned int first;
1785
1786				node = &nodes[order[(o + m) % count]];
1787				if (!expect_insert(&mm, node,
1788						   size, 0, 0,
1789						   bottomup)) {
1790					pr_err("insert failed, step %d/%d\n", m, n);
1791					goto out;
1792				}
1793
1794				first = find_first_bit(bitmap, count);
1795				if (node_index(node) != first) {
1796					pr_err("node %d/%d not inserted into bottom hole, expected %d, found %d\n",
1797					       m, n, first, node_index(node));
1798					goto out;
1799				}
1800				__clear_bit(first, bitmap);
1801			}
1802
1803			DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1804
1805			o += n;
1806		}
1807
1808		drm_mm_for_each_node_safe(node, next, &mm)
1809			drm_mm_remove_node(node);
1810		DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1811		cond_resched();
1812	}
1813
1814	ret = 0;
1815out:
1816	drm_mm_for_each_node_safe(node, next, &mm)
1817		drm_mm_remove_node(node);
1818	drm_mm_takedown(&mm);
1819	kfree(order);
1820err_bitmap:
1821	kfree(bitmap);
1822err_nodes:
1823	vfree(nodes);
1824err:
1825	return ret;
1826}
1827
1828static void separate_adjacent_colors(const struct drm_mm_node *node,
1829				     unsigned long color,
1830				     u64 *start,
1831				     u64 *end)
1832{
1833	if (node->allocated && node->color != color)
1834		++*start;
1835
1836	node = list_next_entry(node, node_list);
1837	if (node->allocated && node->color != color)
1838		--*end;
1839}
1840
1841static bool colors_abutt(const struct drm_mm_node *node)
1842{
1843	if (!drm_mm_hole_follows(node) &&
1844	    list_next_entry(node, node_list)->allocated) {
1845		pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
1846		       node->color, node->start, node->size,
1847		       list_next_entry(node, node_list)->color,
1848		       list_next_entry(node, node_list)->start,
1849		       list_next_entry(node, node_list)->size);
1850		return true;
1851	}
1852
1853	return false;
1854}
1855
1856static int igt_color(void *ignored)
1857{
1858	const unsigned int count = min(4096u, max_iterations);
1859	const struct insert_mode *mode;
1860	struct drm_mm mm;
1861	struct drm_mm_node *node, *nn;
1862	unsigned int n;
1863	int ret = -EINVAL, err;
1864
1865	/* Color adjustment complicates everything. First we just check
1866	 * that when we insert a node we apply any color_adjustment callback.
1867	 * The callback we use should ensure that there is a gap between
1868	 * any two nodes, and so after each insertion we check that those
1869	 * holes are inserted and that they are preserved.
1870	 */
1871
1872	drm_mm_init(&mm, 0, U64_MAX);
1873
1874	for (n = 1; n <= count; n++) {
1875		node = kzalloc(sizeof(*node), GFP_KERNEL);
1876		if (!node) {
1877			ret = -ENOMEM;
1878			goto out;
1879		}
1880
1881		if (!expect_insert(&mm, node,
1882				   n, 0, n,
1883				   &insert_modes[0])) {
1884			pr_err("insert failed, step %d\n", n);
1885			kfree(node);
1886			goto out;
1887		}
1888	}
1889
1890	drm_mm_for_each_node_safe(node, nn, &mm) {
1891		if (node->color != node->size) {
1892			pr_err("invalid color stored: expected %lld, found %ld\n",
1893			       node->size, node->color);
1894
1895			goto out;
1896		}
1897
1898		drm_mm_remove_node(node);
1899		kfree(node);
1900	}
1901
1902	/* Now, let's start experimenting with applying a color callback */
1903	mm.color_adjust = separate_adjacent_colors;
1904	for (mode = insert_modes; mode->name; mode++) {
1905		u64 last;
1906
1907		node = kzalloc(sizeof(*node), GFP_KERNEL);
1908		if (!node) {
1909			ret = -ENOMEM;
1910			goto out;
1911		}
1912
1913		node->size = 1 + 2*count;
1914		node->color = node->size;
1915
1916		err = drm_mm_reserve_node(&mm, node);
1917		if (err) {
1918			pr_err("initial reserve failed!\n");
1919			ret = err;
1920			goto out;
1921		}
1922
1923		last = node->start + node->size;
1924
1925		for (n = 1; n <= count; n++) {
1926			int rem;
1927
1928			node = kzalloc(sizeof(*node), GFP_KERNEL);
1929			if (!node) {
1930				ret = -ENOMEM;
1931				goto out;
1932			}
1933
1934			node->start = last;
1935			node->size = n + count;
1936			node->color = node->size;
1937
1938			err = drm_mm_reserve_node(&mm, node);
1939			if (err != -ENOSPC) {
1940				pr_err("reserve %d did not report color overlap! err=%d\n",
1941				       n, err);
1942				goto out;
1943			}
1944
1945			node->start += n + 1;
1946			rem = misalignment(node, n + count);
1947			node->start += n + count - rem;
1948
1949			err = drm_mm_reserve_node(&mm, node);
1950			if (err) {
1951				pr_err("reserve %d failed, err=%d\n", n, err);
1952				ret = err;
1953				goto out;
1954			}
1955
1956			last = node->start + node->size;
1957		}
1958
1959		for (n = 1; n <= count; n++) {
1960			node = kzalloc(sizeof(*node), GFP_KERNEL);
1961			if (!node) {
1962				ret = -ENOMEM;
1963				goto out;
1964			}
1965
1966			if (!expect_insert(&mm, node,
1967					   n, n, n,
1968					   mode)) {
1969				pr_err("%s insert failed, step %d\n",
1970				       mode->name, n);
1971				kfree(node);
1972				goto out;
1973			}
1974		}
1975
1976		drm_mm_for_each_node_safe(node, nn, &mm) {
1977			u64 rem;
1978
1979			if (node->color != node->size) {
1980				pr_err("%s invalid color stored: expected %lld, found %ld\n",
1981				       mode->name, node->size, node->color);
1982
1983				goto out;
1984			}
1985
1986			if (colors_abutt(node))
1987				goto out;
1988
1989			div64_u64_rem(node->start, node->size, &rem);
1990			if (rem) {
1991				pr_err("%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
1992				       mode->name, node->start, node->size, rem);
1993				goto out;
1994			}
1995
1996			drm_mm_remove_node(node);
1997			kfree(node);
1998		}
1999
2000		cond_resched();
2001	}
2002
2003	ret = 0;
2004out:
2005	drm_mm_for_each_node_safe(node, nn, &mm) {
2006		drm_mm_remove_node(node);
2007		kfree(node);
2008	}
2009	drm_mm_takedown(&mm);
2010	return ret;
2011}
2012
2013static int evict_color(struct drm_mm *mm,
2014		       u64 range_start, u64 range_end,
2015		       struct evict_node *nodes,
2016		       unsigned int *order,
2017		       unsigned int count,
2018		       unsigned int size,
2019		       unsigned int alignment,
2020		       unsigned long color,
2021		       const struct insert_mode *mode)
2022{
2023	struct drm_mm_scan scan;
2024	LIST_HEAD(evict_list);
2025	struct evict_node *e;
2026	struct drm_mm_node tmp;
2027	int err;
2028
2029	drm_mm_scan_init_with_range(&scan, mm,
2030				    size, alignment, color,
2031				    range_start, range_end,
2032				    mode->mode);
2033	if (!evict_nodes(&scan,
2034			 nodes, order, count, true,
2035			 &evict_list))
2036		return -EINVAL;
2037
2038	memset(&tmp, 0, sizeof(tmp));
2039	err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
2040					 DRM_MM_INSERT_EVICT);
2041	if (err) {
2042		pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
2043		       size, alignment, color, err);
2044		show_scan(&scan);
2045		show_holes(mm, 3);
2046		return err;
2047	}
2048
2049	if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
2050		pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
2051		       tmp.start, tmp.size, range_start, range_end);
2052		err = -EINVAL;
2053	}
2054
2055	if (colors_abutt(&tmp))
2056		err = -EINVAL;
2057
2058	if (!assert_node(&tmp, mm, size, alignment, color)) {
2059		pr_err("Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
2060		       tmp.size, size,
2061		       alignment, misalignment(&tmp, alignment), tmp.start);
2062		err = -EINVAL;
2063	}
2064
2065	drm_mm_remove_node(&tmp);
2066	if (err)
2067		return err;
2068
2069	list_for_each_entry(e, &evict_list, link) {
2070		err = drm_mm_reserve_node(mm, &e->node);
2071		if (err) {
2072			pr_err("Failed to reinsert node after eviction: start=%llx\n",
2073			       e->node.start);
2074			return err;
2075		}
2076	}
2077
2078	cond_resched();
2079	return 0;
2080}
2081
2082static int igt_color_evict(void *ignored)
2083{
2084	DRM_RND_STATE(prng, random_seed);
2085	const unsigned int total_size = min(8192u, max_iterations);
2086	const struct insert_mode *mode;
2087	unsigned long color = 0;
2088	struct drm_mm mm;
2089	struct evict_node *nodes;
2090	struct drm_mm_node *node, *next;
2091	unsigned int *order, n;
2092	int ret, err;
2093
2094	/* Check that the drm_mm_scan also honours color adjustment when
2095	 * choosing its victims to create a hole. Our color_adjust does not
2096	 * allow two nodes to be placed together without an intervening hole
2097	 * enlarging the set of victims that must be evicted.
2098	 */
2099
2100	ret = -ENOMEM;
2101	nodes = vzalloc(total_size * sizeof(*nodes));
2102	if (!nodes)
2103		goto err;
2104
2105	order = drm_random_order(total_size, &prng);
2106	if (!order)
2107		goto err_nodes;
2108
2109	ret = -EINVAL;
2110	drm_mm_init(&mm, 0, 2*total_size - 1);
2111	mm.color_adjust = separate_adjacent_colors;
2112	for (n = 0; n < total_size; n++) {
2113		if (!expect_insert(&mm, &nodes[n].node,
2114				   1, 0, color++,
2115				   &insert_modes[0])) {
2116			pr_err("insert failed, step %d\n", n);
2117			goto out;
2118		}
2119	}
2120
2121	for (mode = evict_modes; mode->name; mode++) {
2122		for (n = 1; n <= total_size; n <<= 1) {
2123			drm_random_reorder(order, total_size, &prng);
2124			err = evict_color(&mm, 0, U64_MAX,
2125					  nodes, order, total_size,
2126					  n, 1, color++,
2127					  mode);
2128			if (err) {
2129				pr_err("%s evict_color(size=%u) failed\n",
2130				       mode->name, n);
2131				goto out;
2132			}
2133		}
2134
2135		for (n = 1; n < total_size; n <<= 1) {
2136			drm_random_reorder(order, total_size, &prng);
2137			err = evict_color(&mm, 0, U64_MAX,
2138					  nodes, order, total_size,
2139					  total_size/2, n, color++,
2140					  mode);
2141			if (err) {
2142				pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2143				       mode->name, total_size/2, n);
2144				goto out;
2145			}
2146		}
2147
2148		for_each_prime_number_from(n, 1, min(total_size, max_prime)) {
2149			unsigned int nsize = (total_size - n + 1) / 2;
2150
2151			DRM_MM_BUG_ON(!nsize);
2152
2153			drm_random_reorder(order, total_size, &prng);
2154			err = evict_color(&mm, 0, U64_MAX,
2155					  nodes, order, total_size,
2156					  nsize, n, color++,
2157					  mode);
2158			if (err) {
2159				pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2160				       mode->name, nsize, n);
2161				goto out;
2162			}
2163		}
2164
2165		cond_resched();
2166	}
2167
2168	ret = 0;
2169out:
2170	if (ret)
2171		show_mm(&mm);
2172	drm_mm_for_each_node_safe(node, next, &mm)
2173		drm_mm_remove_node(node);
2174	drm_mm_takedown(&mm);
2175	kfree(order);
2176err_nodes:
2177	vfree(nodes);
2178err:
2179	return ret;
2180}
2181
2182static int igt_color_evict_range(void *ignored)
2183{
2184	DRM_RND_STATE(prng, random_seed);
2185	const unsigned int total_size = 8192;
2186	const unsigned int range_size = total_size / 2;
2187	const unsigned int range_start = total_size / 4;
2188	const unsigned int range_end = range_start + range_size;
2189	const struct insert_mode *mode;
2190	unsigned long color = 0;
2191	struct drm_mm mm;
2192	struct evict_node *nodes;
2193	struct drm_mm_node *node, *next;
2194	unsigned int *order, n;
2195	int ret, err;
2196
2197	/* Like igt_color_evict(), but limited to small portion of the full
2198	 * drm_mm range.
2199	 */
2200
2201	ret = -ENOMEM;
2202	nodes = vzalloc(total_size * sizeof(*nodes));
2203	if (!nodes)
2204		goto err;
2205
2206	order = drm_random_order(total_size, &prng);
2207	if (!order)
2208		goto err_nodes;
2209
2210	ret = -EINVAL;
2211	drm_mm_init(&mm, 0, 2*total_size - 1);
2212	mm.color_adjust = separate_adjacent_colors;
2213	for (n = 0; n < total_size; n++) {
2214		if (!expect_insert(&mm, &nodes[n].node,
2215				   1, 0, color++,
2216				   &insert_modes[0])) {
2217			pr_err("insert failed, step %d\n", n);
2218			goto out;
2219		}
2220	}
2221
2222	for (mode = evict_modes; mode->name; mode++) {
2223		for (n = 1; n <= range_size; n <<= 1) {
2224			drm_random_reorder(order, range_size, &prng);
2225			err = evict_color(&mm, range_start, range_end,
2226					  nodes, order, total_size,
2227					  n, 1, color++,
2228					  mode);
2229			if (err) {
2230				pr_err("%s evict_color(size=%u) failed for range [%x, %x]\n",
2231				       mode->name, n, range_start, range_end);
2232				goto out;
2233			}
2234		}
2235
2236		for (n = 1; n < range_size; n <<= 1) {
2237			drm_random_reorder(order, total_size, &prng);
2238			err = evict_color(&mm, range_start, range_end,
2239					  nodes, order, total_size,
2240					  range_size/2, n, color++,
2241					  mode);
2242			if (err) {
2243				pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2244				       mode->name, total_size/2, n, range_start, range_end);
2245				goto out;
2246			}
2247		}
2248
2249		for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
2250			unsigned int nsize = (range_size - n + 1) / 2;
2251
2252			DRM_MM_BUG_ON(!nsize);
2253
2254			drm_random_reorder(order, total_size, &prng);
2255			err = evict_color(&mm, range_start, range_end,
2256					  nodes, order, total_size,
2257					  nsize, n, color++,
2258					  mode);
2259			if (err) {
2260				pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2261				       mode->name, nsize, n, range_start, range_end);
2262				goto out;
2263			}
2264		}
2265
2266		cond_resched();
2267	}
2268
2269	ret = 0;
2270out:
2271	if (ret)
2272		show_mm(&mm);
2273	drm_mm_for_each_node_safe(node, next, &mm)
2274		drm_mm_remove_node(node);
2275	drm_mm_takedown(&mm);
2276	kfree(order);
2277err_nodes:
2278	vfree(nodes);
2279err:
2280	return ret;
2281}
2282
2283#include "drm_selftest.c"
2284
2285static int __init test_drm_mm_init(void)
2286{
2287	int err;
2288
2289	while (!random_seed)
2290		random_seed = get_random_int();
2291
2292	pr_info("Testing DRM range manger (struct drm_mm), with random_seed=0x%x max_iterations=%u max_prime=%u\n",
2293		random_seed, max_iterations, max_prime);
2294	err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
2295
2296	return err > 0 ? 0 : err;
2297}
2298
2299static void __exit test_drm_mm_exit(void)
2300{
2301}
2302
2303module_init(test_drm_mm_init);
2304module_exit(test_drm_mm_exit);
2305
2306module_param(random_seed, uint, 0400);
2307module_param(max_iterations, uint, 0400);
2308module_param(max_prime, uint, 0400);
2309
2310MODULE_AUTHOR("Intel Corporation");
2311MODULE_LICENSE("GPL");