Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Test cases for the drm_mm range manager
4 */
5
6#define pr_fmt(fmt) "drm_mm: " fmt
7
8#include <linux/module.h>
9#include <linux/prime_numbers.h>
10#include <linux/slab.h>
11#include <linux/random.h>
12#include <linux/vmalloc.h>
13
14#include <drm/drm_mm.h>
15
16#include "../lib/drm_random.h"
17
18#define TESTS "drm_mm_selftests.h"
19#include "drm_selftest.h"
20
21static unsigned int random_seed;
22static unsigned int max_iterations = 8192;
23static unsigned int max_prime = 128;
24
25enum {
26 BEST,
27 BOTTOMUP,
28 TOPDOWN,
29 EVICT,
30};
31
32static const struct insert_mode {
33 const char *name;
34 enum drm_mm_insert_mode mode;
35} insert_modes[] = {
36 [BEST] = { "best", DRM_MM_INSERT_BEST },
37 [BOTTOMUP] = { "bottom-up", DRM_MM_INSERT_LOW },
38 [TOPDOWN] = { "top-down", DRM_MM_INSERT_HIGH },
39 [EVICT] = { "evict", DRM_MM_INSERT_EVICT },
40 {}
41}, evict_modes[] = {
42 { "bottom-up", DRM_MM_INSERT_LOW },
43 { "top-down", DRM_MM_INSERT_HIGH },
44 {}
45};
46
47static int igt_sanitycheck(void *ignored)
48{
49 pr_info("%s - ok!\n", __func__);
50 return 0;
51}
52
53static bool assert_no_holes(const struct drm_mm *mm)
54{
55 struct drm_mm_node *hole;
56 u64 hole_start, hole_end;
57 unsigned long count;
58
59 count = 0;
60 drm_mm_for_each_hole(hole, mm, hole_start, hole_end)
61 count++;
62 if (count) {
63 pr_err("Expected to find no holes (after reserve), found %lu instead\n", count);
64 return false;
65 }
66
67 drm_mm_for_each_node(hole, mm) {
68 if (drm_mm_hole_follows(hole)) {
69 pr_err("Hole follows node, expected none!\n");
70 return false;
71 }
72 }
73
74 return true;
75}
76
77static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end)
78{
79 struct drm_mm_node *hole;
80 u64 hole_start, hole_end;
81 unsigned long count;
82 bool ok = true;
83
84 if (end <= start)
85 return true;
86
87 count = 0;
88 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
89 if (start != hole_start || end != hole_end) {
90 if (ok)
91 pr_err("empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
92 hole_start, hole_end,
93 start, end);
94 ok = false;
95 }
96 count++;
97 }
98 if (count != 1) {
99 pr_err("Expected to find one hole, found %lu instead\n", count);
100 ok = false;
101 }
102
103 return ok;
104}
105
106static bool assert_continuous(const struct drm_mm *mm, u64 size)
107{
108 struct drm_mm_node *node, *check, *found;
109 unsigned long n;
110 u64 addr;
111
112 if (!assert_no_holes(mm))
113 return false;
114
115 n = 0;
116 addr = 0;
117 drm_mm_for_each_node(node, mm) {
118 if (node->start != addr) {
119 pr_err("node[%ld] list out of order, expected %llx found %llx\n",
120 n, addr, node->start);
121 return false;
122 }
123
124 if (node->size != size) {
125 pr_err("node[%ld].size incorrect, expected %llx, found %llx\n",
126 n, size, node->size);
127 return false;
128 }
129
130 if (drm_mm_hole_follows(node)) {
131 pr_err("node[%ld] is followed by a hole!\n", n);
132 return false;
133 }
134
135 found = NULL;
136 drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
137 if (node != check) {
138 pr_err("lookup return wrong node, expected start %llx, found %llx\n",
139 node->start, check->start);
140 return false;
141 }
142 found = check;
143 }
144 if (!found) {
145 pr_err("lookup failed for node %llx + %llx\n",
146 addr, size);
147 return false;
148 }
149
150 addr += size;
151 n++;
152 }
153
154 return true;
155}
156
157static u64 misalignment(struct drm_mm_node *node, u64 alignment)
158{
159 u64 rem;
160
161 if (!alignment)
162 return 0;
163
164 div64_u64_rem(node->start, alignment, &rem);
165 return rem;
166}
167
168static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm,
169 u64 size, u64 alignment, unsigned long color)
170{
171 bool ok = true;
172
173 if (!drm_mm_node_allocated(node) || node->mm != mm) {
174 pr_err("node not allocated\n");
175 ok = false;
176 }
177
178 if (node->size != size) {
179 pr_err("node has wrong size, found %llu, expected %llu\n",
180 node->size, size);
181 ok = false;
182 }
183
184 if (misalignment(node, alignment)) {
185 pr_err("node is misaligned, start %llx rem %llu, expected alignment %llu\n",
186 node->start, misalignment(node, alignment), alignment);
187 ok = false;
188 }
189
190 if (node->color != color) {
191 pr_err("node has wrong color, found %lu, expected %lu\n",
192 node->color, color);
193 ok = false;
194 }
195
196 return ok;
197}
198
199#define show_mm(mm) do { \
200 struct drm_printer __p = drm_debug_printer(__func__); \
201 drm_mm_print((mm), &__p); } while (0)
202
203static int igt_init(void *ignored)
204{
205 const unsigned int size = 4096;
206 struct drm_mm mm;
207 struct drm_mm_node tmp;
208 int ret = -EINVAL;
209
210 /* Start with some simple checks on initialising the struct drm_mm */
211 memset(&mm, 0, sizeof(mm));
212 if (drm_mm_initialized(&mm)) {
213 pr_err("zeroed mm claims to be initialized\n");
214 return ret;
215 }
216
217 memset(&mm, 0xff, sizeof(mm));
218 drm_mm_init(&mm, 0, size);
219 if (!drm_mm_initialized(&mm)) {
220 pr_err("mm claims not to be initialized\n");
221 goto out;
222 }
223
224 if (!drm_mm_clean(&mm)) {
225 pr_err("mm not empty on creation\n");
226 goto out;
227 }
228
229 /* After creation, it should all be one massive hole */
230 if (!assert_one_hole(&mm, 0, size)) {
231 ret = -EINVAL;
232 goto out;
233 }
234
235 memset(&tmp, 0, sizeof(tmp));
236 tmp.start = 0;
237 tmp.size = size;
238 ret = drm_mm_reserve_node(&mm, &tmp);
239 if (ret) {
240 pr_err("failed to reserve whole drm_mm\n");
241 goto out;
242 }
243
244 /* After filling the range entirely, there should be no holes */
245 if (!assert_no_holes(&mm)) {
246 ret = -EINVAL;
247 goto out;
248 }
249
250 /* And then after emptying it again, the massive hole should be back */
251 drm_mm_remove_node(&tmp);
252 if (!assert_one_hole(&mm, 0, size)) {
253 ret = -EINVAL;
254 goto out;
255 }
256
257out:
258 if (ret)
259 show_mm(&mm);
260 drm_mm_takedown(&mm);
261 return ret;
262}
263
264static int igt_debug(void *ignored)
265{
266 struct drm_mm mm;
267 struct drm_mm_node nodes[2];
268 int ret;
269
270 /* Create a small drm_mm with a couple of nodes and a few holes, and
271 * check that the debug iterator doesn't explode over a trivial drm_mm.
272 */
273
274 drm_mm_init(&mm, 0, 4096);
275
276 memset(nodes, 0, sizeof(nodes));
277 nodes[0].start = 512;
278 nodes[0].size = 1024;
279 ret = drm_mm_reserve_node(&mm, &nodes[0]);
280 if (ret) {
281 pr_err("failed to reserve node[0] {start=%lld, size=%lld)\n",
282 nodes[0].start, nodes[0].size);
283 return ret;
284 }
285
286 nodes[1].size = 1024;
287 nodes[1].start = 4096 - 512 - nodes[1].size;
288 ret = drm_mm_reserve_node(&mm, &nodes[1]);
289 if (ret) {
290 pr_err("failed to reserve node[1] {start=%lld, size=%lld)\n",
291 nodes[1].start, nodes[1].size);
292 return ret;
293 }
294
295 show_mm(&mm);
296 return 0;
297}
298
299static struct drm_mm_node *set_node(struct drm_mm_node *node,
300 u64 start, u64 size)
301{
302 node->start = start;
303 node->size = size;
304 return node;
305}
306
307static bool expect_reserve_fail(struct drm_mm *mm, struct drm_mm_node *node)
308{
309 int err;
310
311 err = drm_mm_reserve_node(mm, node);
312 if (likely(err == -ENOSPC))
313 return true;
314
315 if (!err) {
316 pr_err("impossible reserve succeeded, node %llu + %llu\n",
317 node->start, node->size);
318 drm_mm_remove_node(node);
319 } else {
320 pr_err("impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
321 err, -ENOSPC, node->start, node->size);
322 }
323 return false;
324}
325
326static bool check_reserve_boundaries(struct drm_mm *mm,
327 unsigned int count,
328 u64 size)
329{
330 const struct boundary {
331 u64 start, size;
332 const char *name;
333 } boundaries[] = {
334#define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" }
335 B(0, 0),
336 B(-size, 0),
337 B(size, 0),
338 B(size * count, 0),
339 B(-size, size),
340 B(-size, -size),
341 B(-size, 2*size),
342 B(0, -size),
343 B(size, -size),
344 B(count*size, size),
345 B(count*size, -size),
346 B(count*size, count*size),
347 B(count*size, -count*size),
348 B(count*size, -(count+1)*size),
349 B((count+1)*size, size),
350 B((count+1)*size, -size),
351 B((count+1)*size, -2*size),
352#undef B
353 };
354 struct drm_mm_node tmp = {};
355 int n;
356
357 for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
358 if (!expect_reserve_fail(mm,
359 set_node(&tmp,
360 boundaries[n].start,
361 boundaries[n].size))) {
362 pr_err("boundary[%d:%s] failed, count=%u, size=%lld\n",
363 n, boundaries[n].name, count, size);
364 return false;
365 }
366 }
367
368 return true;
369}
370
371static int __igt_reserve(unsigned int count, u64 size)
372{
373 DRM_RND_STATE(prng, random_seed);
374 struct drm_mm mm;
375 struct drm_mm_node tmp, *nodes, *node, *next;
376 unsigned int *order, n, m, o = 0;
377 int ret, err;
378
379 /* For exercising drm_mm_reserve_node(), we want to check that
380 * reservations outside of the drm_mm range are rejected, and to
381 * overlapping and otherwise already occupied ranges. Afterwards,
382 * the tree and nodes should be intact.
383 */
384
385 DRM_MM_BUG_ON(!count);
386 DRM_MM_BUG_ON(!size);
387
388 ret = -ENOMEM;
389 order = drm_random_order(count, &prng);
390 if (!order)
391 goto err;
392
393 nodes = vzalloc(array_size(count, sizeof(*nodes)));
394 if (!nodes)
395 goto err_order;
396
397 ret = -EINVAL;
398 drm_mm_init(&mm, 0, count * size);
399
400 if (!check_reserve_boundaries(&mm, count, size))
401 goto out;
402
403 for (n = 0; n < count; n++) {
404 nodes[n].start = order[n] * size;
405 nodes[n].size = size;
406
407 err = drm_mm_reserve_node(&mm, &nodes[n]);
408 if (err) {
409 pr_err("reserve failed, step %d, start %llu\n",
410 n, nodes[n].start);
411 ret = err;
412 goto out;
413 }
414
415 if (!drm_mm_node_allocated(&nodes[n])) {
416 pr_err("reserved node not allocated! step %d, start %llu\n",
417 n, nodes[n].start);
418 goto out;
419 }
420
421 if (!expect_reserve_fail(&mm, &nodes[n]))
422 goto out;
423 }
424
425 /* After random insertion the nodes should be in order */
426 if (!assert_continuous(&mm, size))
427 goto out;
428
429 /* Repeated use should then fail */
430 drm_random_reorder(order, count, &prng);
431 for (n = 0; n < count; n++) {
432 if (!expect_reserve_fail(&mm,
433 set_node(&tmp, order[n] * size, 1)))
434 goto out;
435
436 /* Remove and reinsert should work */
437 drm_mm_remove_node(&nodes[order[n]]);
438 err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
439 if (err) {
440 pr_err("reserve failed, step %d, start %llu\n",
441 n, nodes[n].start);
442 ret = err;
443 goto out;
444 }
445 }
446
447 if (!assert_continuous(&mm, size))
448 goto out;
449
450 /* Overlapping use should then fail */
451 for (n = 0; n < count; n++) {
452 if (!expect_reserve_fail(&mm, set_node(&tmp, 0, size*count)))
453 goto out;
454 }
455 for (n = 0; n < count; n++) {
456 if (!expect_reserve_fail(&mm,
457 set_node(&tmp,
458 size * n,
459 size * (count - n))))
460 goto out;
461 }
462
463 /* Remove several, reinsert, check full */
464 for_each_prime_number(n, min(max_prime, count)) {
465 for (m = 0; m < n; m++) {
466 node = &nodes[order[(o + m) % count]];
467 drm_mm_remove_node(node);
468 }
469
470 for (m = 0; m < n; m++) {
471 node = &nodes[order[(o + m) % count]];
472 err = drm_mm_reserve_node(&mm, node);
473 if (err) {
474 pr_err("reserve failed, step %d/%d, start %llu\n",
475 m, n, node->start);
476 ret = err;
477 goto out;
478 }
479 }
480
481 o += n;
482
483 if (!assert_continuous(&mm, size))
484 goto out;
485 }
486
487 ret = 0;
488out:
489 drm_mm_for_each_node_safe(node, next, &mm)
490 drm_mm_remove_node(node);
491 drm_mm_takedown(&mm);
492 vfree(nodes);
493err_order:
494 kfree(order);
495err:
496 return ret;
497}
498
499static int igt_reserve(void *ignored)
500{
501 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
502 int n, ret;
503
504 for_each_prime_number_from(n, 1, 54) {
505 u64 size = BIT_ULL(n);
506
507 ret = __igt_reserve(count, size - 1);
508 if (ret)
509 return ret;
510
511 ret = __igt_reserve(count, size);
512 if (ret)
513 return ret;
514
515 ret = __igt_reserve(count, size + 1);
516 if (ret)
517 return ret;
518
519 cond_resched();
520 }
521
522 return 0;
523}
524
525static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
526 u64 size, u64 alignment, unsigned long color,
527 const struct insert_mode *mode)
528{
529 int err;
530
531 err = drm_mm_insert_node_generic(mm, node,
532 size, alignment, color,
533 mode->mode);
534 if (err) {
535 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
536 size, alignment, color, mode->name, err);
537 return false;
538 }
539
540 if (!assert_node(node, mm, size, alignment, color)) {
541 drm_mm_remove_node(node);
542 return false;
543 }
544
545 return true;
546}
547
548static bool expect_insert_fail(struct drm_mm *mm, u64 size)
549{
550 struct drm_mm_node tmp = {};
551 int err;
552
553 err = drm_mm_insert_node(mm, &tmp, size);
554 if (likely(err == -ENOSPC))
555 return true;
556
557 if (!err) {
558 pr_err("impossible insert succeeded, node %llu + %llu\n",
559 tmp.start, tmp.size);
560 drm_mm_remove_node(&tmp);
561 } else {
562 pr_err("impossible insert failed with wrong error %d [expected %d], size %llu\n",
563 err, -ENOSPC, size);
564 }
565 return false;
566}
567
568static int __igt_insert(unsigned int count, u64 size, bool replace)
569{
570 DRM_RND_STATE(prng, random_seed);
571 const struct insert_mode *mode;
572 struct drm_mm mm;
573 struct drm_mm_node *nodes, *node, *next;
574 unsigned int *order, n, m, o = 0;
575 int ret;
576
577 /* Fill a range with lots of nodes, check it doesn't fail too early */
578
579 DRM_MM_BUG_ON(!count);
580 DRM_MM_BUG_ON(!size);
581
582 ret = -ENOMEM;
583 nodes = vmalloc(array_size(count, sizeof(*nodes)));
584 if (!nodes)
585 goto err;
586
587 order = drm_random_order(count, &prng);
588 if (!order)
589 goto err_nodes;
590
591 ret = -EINVAL;
592 drm_mm_init(&mm, 0, count * size);
593
594 for (mode = insert_modes; mode->name; mode++) {
595 for (n = 0; n < count; n++) {
596 struct drm_mm_node tmp;
597
598 node = replace ? &tmp : &nodes[n];
599 memset(node, 0, sizeof(*node));
600 if (!expect_insert(&mm, node, size, 0, n, mode)) {
601 pr_err("%s insert failed, size %llu step %d\n",
602 mode->name, size, n);
603 goto out;
604 }
605
606 if (replace) {
607 drm_mm_replace_node(&tmp, &nodes[n]);
608 if (drm_mm_node_allocated(&tmp)) {
609 pr_err("replaced old-node still allocated! step %d\n",
610 n);
611 goto out;
612 }
613
614 if (!assert_node(&nodes[n], &mm, size, 0, n)) {
615 pr_err("replaced node did not inherit parameters, size %llu step %d\n",
616 size, n);
617 goto out;
618 }
619
620 if (tmp.start != nodes[n].start) {
621 pr_err("replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
622 tmp.start, size,
623 nodes[n].start, nodes[n].size);
624 goto out;
625 }
626 }
627 }
628
629 /* After random insertion the nodes should be in order */
630 if (!assert_continuous(&mm, size))
631 goto out;
632
633 /* Repeated use should then fail */
634 if (!expect_insert_fail(&mm, size))
635 goto out;
636
637 /* Remove one and reinsert, as the only hole it should refill itself */
638 for (n = 0; n < count; n++) {
639 u64 addr = nodes[n].start;
640
641 drm_mm_remove_node(&nodes[n]);
642 if (!expect_insert(&mm, &nodes[n], size, 0, n, mode)) {
643 pr_err("%s reinsert failed, size %llu step %d\n",
644 mode->name, size, n);
645 goto out;
646 }
647
648 if (nodes[n].start != addr) {
649 pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
650 mode->name, n, addr, nodes[n].start);
651 goto out;
652 }
653
654 if (!assert_continuous(&mm, size))
655 goto out;
656 }
657
658 /* Remove several, reinsert, check full */
659 for_each_prime_number(n, min(max_prime, count)) {
660 for (m = 0; m < n; m++) {
661 node = &nodes[order[(o + m) % count]];
662 drm_mm_remove_node(node);
663 }
664
665 for (m = 0; m < n; m++) {
666 node = &nodes[order[(o + m) % count]];
667 if (!expect_insert(&mm, node, size, 0, n, mode)) {
668 pr_err("%s multiple reinsert failed, size %llu step %d\n",
669 mode->name, size, n);
670 goto out;
671 }
672 }
673
674 o += n;
675
676 if (!assert_continuous(&mm, size))
677 goto out;
678
679 if (!expect_insert_fail(&mm, size))
680 goto out;
681 }
682
683 drm_mm_for_each_node_safe(node, next, &mm)
684 drm_mm_remove_node(node);
685 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
686
687 cond_resched();
688 }
689
690 ret = 0;
691out:
692 drm_mm_for_each_node_safe(node, next, &mm)
693 drm_mm_remove_node(node);
694 drm_mm_takedown(&mm);
695 kfree(order);
696err_nodes:
697 vfree(nodes);
698err:
699 return ret;
700}
701
702static int igt_insert(void *ignored)
703{
704 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
705 unsigned int n;
706 int ret;
707
708 for_each_prime_number_from(n, 1, 54) {
709 u64 size = BIT_ULL(n);
710
711 ret = __igt_insert(count, size - 1, false);
712 if (ret)
713 return ret;
714
715 ret = __igt_insert(count, size, false);
716 if (ret)
717 return ret;
718
719 ret = __igt_insert(count, size + 1, false);
720 if (ret)
721 return ret;
722
723 cond_resched();
724 }
725
726 return 0;
727}
728
729static int igt_replace(void *ignored)
730{
731 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
732 unsigned int n;
733 int ret;
734
735 /* Reuse igt_insert to exercise replacement by inserting a dummy node,
736 * then replacing it with the intended node. We want to check that
737 * the tree is intact and all the information we need is carried
738 * across to the target node.
739 */
740
741 for_each_prime_number_from(n, 1, 54) {
742 u64 size = BIT_ULL(n);
743
744 ret = __igt_insert(count, size - 1, true);
745 if (ret)
746 return ret;
747
748 ret = __igt_insert(count, size, true);
749 if (ret)
750 return ret;
751
752 ret = __igt_insert(count, size + 1, true);
753 if (ret)
754 return ret;
755
756 cond_resched();
757 }
758
759 return 0;
760}
761
762static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
763 u64 size, u64 alignment, unsigned long color,
764 u64 range_start, u64 range_end,
765 const struct insert_mode *mode)
766{
767 int err;
768
769 err = drm_mm_insert_node_in_range(mm, node,
770 size, alignment, color,
771 range_start, range_end,
772 mode->mode);
773 if (err) {
774 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
775 size, alignment, color, mode->name,
776 range_start, range_end, err);
777 return false;
778 }
779
780 if (!assert_node(node, mm, size, alignment, color)) {
781 drm_mm_remove_node(node);
782 return false;
783 }
784
785 return true;
786}
787
788static bool expect_insert_in_range_fail(struct drm_mm *mm,
789 u64 size,
790 u64 range_start,
791 u64 range_end)
792{
793 struct drm_mm_node tmp = {};
794 int err;
795
796 err = drm_mm_insert_node_in_range(mm, &tmp,
797 size, 0, 0,
798 range_start, range_end,
799 0);
800 if (likely(err == -ENOSPC))
801 return true;
802
803 if (!err) {
804 pr_err("impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
805 tmp.start, tmp.size, range_start, range_end);
806 drm_mm_remove_node(&tmp);
807 } else {
808 pr_err("impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
809 err, -ENOSPC, size, range_start, range_end);
810 }
811
812 return false;
813}
814
815static bool assert_contiguous_in_range(struct drm_mm *mm,
816 u64 size,
817 u64 start,
818 u64 end)
819{
820 struct drm_mm_node *node;
821 unsigned int n;
822
823 if (!expect_insert_in_range_fail(mm, size, start, end))
824 return false;
825
826 n = div64_u64(start + size - 1, size);
827 drm_mm_for_each_node(node, mm) {
828 if (node->start < start || node->start + node->size > end) {
829 pr_err("node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
830 n, node->start, node->start + node->size, start, end);
831 return false;
832 }
833
834 if (node->start != n * size) {
835 pr_err("node %d out of order, expected start %llx, found %llx\n",
836 n, n * size, node->start);
837 return false;
838 }
839
840 if (node->size != size) {
841 pr_err("node %d has wrong size, expected size %llx, found %llx\n",
842 n, size, node->size);
843 return false;
844 }
845
846 if (drm_mm_hole_follows(node) &&
847 drm_mm_hole_node_end(node) < end) {
848 pr_err("node %d is followed by a hole!\n", n);
849 return false;
850 }
851
852 n++;
853 }
854
855 if (start > 0) {
856 node = __drm_mm_interval_first(mm, 0, start - 1);
857 if (node->allocated) {
858 pr_err("node before start: node=%llx+%llu, start=%llx\n",
859 node->start, node->size, start);
860 return false;
861 }
862 }
863
864 if (end < U64_MAX) {
865 node = __drm_mm_interval_first(mm, end, U64_MAX);
866 if (node->allocated) {
867 pr_err("node after end: node=%llx+%llu, end=%llx\n",
868 node->start, node->size, end);
869 return false;
870 }
871 }
872
873 return true;
874}
875
876static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
877{
878 const struct insert_mode *mode;
879 struct drm_mm mm;
880 struct drm_mm_node *nodes, *node, *next;
881 unsigned int n, start_n, end_n;
882 int ret;
883
884 DRM_MM_BUG_ON(!count);
885 DRM_MM_BUG_ON(!size);
886 DRM_MM_BUG_ON(end <= start);
887
888 /* Very similar to __igt_insert(), but now instead of populating the
889 * full range of the drm_mm, we try to fill a small portion of it.
890 */
891
892 ret = -ENOMEM;
893 nodes = vzalloc(array_size(count, sizeof(*nodes)));
894 if (!nodes)
895 goto err;
896
897 ret = -EINVAL;
898 drm_mm_init(&mm, 0, count * size);
899
900 start_n = div64_u64(start + size - 1, size);
901 end_n = div64_u64(end - size, size);
902
903 for (mode = insert_modes; mode->name; mode++) {
904 for (n = start_n; n <= end_n; n++) {
905 if (!expect_insert_in_range(&mm, &nodes[n],
906 size, size, n,
907 start, end, mode)) {
908 pr_err("%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
909 mode->name, size, n,
910 start_n, end_n,
911 start, end);
912 goto out;
913 }
914 }
915
916 if (!assert_contiguous_in_range(&mm, size, start, end)) {
917 pr_err("%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
918 mode->name, start, end, size);
919 goto out;
920 }
921
922 /* Remove one and reinsert, it should refill itself */
923 for (n = start_n; n <= end_n; n++) {
924 u64 addr = nodes[n].start;
925
926 drm_mm_remove_node(&nodes[n]);
927 if (!expect_insert_in_range(&mm, &nodes[n],
928 size, size, n,
929 start, end, mode)) {
930 pr_err("%s reinsert failed, step %d\n", mode->name, n);
931 goto out;
932 }
933
934 if (nodes[n].start != addr) {
935 pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
936 mode->name, n, addr, nodes[n].start);
937 goto out;
938 }
939 }
940
941 if (!assert_contiguous_in_range(&mm, size, start, end)) {
942 pr_err("%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
943 mode->name, start, end, size);
944 goto out;
945 }
946
947 drm_mm_for_each_node_safe(node, next, &mm)
948 drm_mm_remove_node(node);
949 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
950
951 cond_resched();
952 }
953
954 ret = 0;
955out:
956 drm_mm_for_each_node_safe(node, next, &mm)
957 drm_mm_remove_node(node);
958 drm_mm_takedown(&mm);
959 vfree(nodes);
960err:
961 return ret;
962}
963
964static int insert_outside_range(void)
965{
966 struct drm_mm mm;
967 const unsigned int start = 1024;
968 const unsigned int end = 2048;
969 const unsigned int size = end - start;
970
971 drm_mm_init(&mm, start, size);
972
973 if (!expect_insert_in_range_fail(&mm, 1, 0, start))
974 return -EINVAL;
975
976 if (!expect_insert_in_range_fail(&mm, size,
977 start - size/2, start + (size+1)/2))
978 return -EINVAL;
979
980 if (!expect_insert_in_range_fail(&mm, size,
981 end - (size+1)/2, end + size/2))
982 return -EINVAL;
983
984 if (!expect_insert_in_range_fail(&mm, 1, end, end + size))
985 return -EINVAL;
986
987 drm_mm_takedown(&mm);
988 return 0;
989}
990
991static int igt_insert_range(void *ignored)
992{
993 const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
994 unsigned int n;
995 int ret;
996
997 /* Check that requests outside the bounds of drm_mm are rejected. */
998 ret = insert_outside_range();
999 if (ret)
1000 return ret;
1001
1002 for_each_prime_number_from(n, 1, 50) {
1003 const u64 size = BIT_ULL(n);
1004 const u64 max = count * size;
1005
1006 ret = __igt_insert_range(count, size, 0, max);
1007 if (ret)
1008 return ret;
1009
1010 ret = __igt_insert_range(count, size, 1, max);
1011 if (ret)
1012 return ret;
1013
1014 ret = __igt_insert_range(count, size, 0, max - 1);
1015 if (ret)
1016 return ret;
1017
1018 ret = __igt_insert_range(count, size, 0, max/2);
1019 if (ret)
1020 return ret;
1021
1022 ret = __igt_insert_range(count, size, max/2, max);
1023 if (ret)
1024 return ret;
1025
1026 ret = __igt_insert_range(count, size, max/4+1, 3*max/4-1);
1027 if (ret)
1028 return ret;
1029
1030 cond_resched();
1031 }
1032
1033 return 0;
1034}
1035
1036static int igt_align(void *ignored)
1037{
1038 const struct insert_mode *mode;
1039 const unsigned int max_count = min(8192u, max_prime);
1040 struct drm_mm mm;
1041 struct drm_mm_node *nodes, *node, *next;
1042 unsigned int prime;
1043 int ret = -EINVAL;
1044
1045 /* For each of the possible insertion modes, we pick a few
1046 * arbitrary alignments and check that the inserted node
1047 * meets our requirements.
1048 */
1049
1050 nodes = vzalloc(array_size(max_count, sizeof(*nodes)));
1051 if (!nodes)
1052 goto err;
1053
1054 drm_mm_init(&mm, 1, U64_MAX - 2);
1055
1056 for (mode = insert_modes; mode->name; mode++) {
1057 unsigned int i = 0;
1058
1059 for_each_prime_number_from(prime, 1, max_count) {
1060 u64 size = next_prime_number(prime);
1061
1062 if (!expect_insert(&mm, &nodes[i],
1063 size, prime, i,
1064 mode)) {
1065 pr_err("%s insert failed with alignment=%d",
1066 mode->name, prime);
1067 goto out;
1068 }
1069
1070 i++;
1071 }
1072
1073 drm_mm_for_each_node_safe(node, next, &mm)
1074 drm_mm_remove_node(node);
1075 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1076
1077 cond_resched();
1078 }
1079
1080 ret = 0;
1081out:
1082 drm_mm_for_each_node_safe(node, next, &mm)
1083 drm_mm_remove_node(node);
1084 drm_mm_takedown(&mm);
1085 vfree(nodes);
1086err:
1087 return ret;
1088}
1089
1090static int igt_align_pot(int max)
1091{
1092 struct drm_mm mm;
1093 struct drm_mm_node *node, *next;
1094 int bit;
1095 int ret = -EINVAL;
1096
1097 /* Check that we can align to the full u64 address space */
1098
1099 drm_mm_init(&mm, 1, U64_MAX - 2);
1100
1101 for (bit = max - 1; bit; bit--) {
1102 u64 align, size;
1103
1104 node = kzalloc(sizeof(*node), GFP_KERNEL);
1105 if (!node) {
1106 ret = -ENOMEM;
1107 goto out;
1108 }
1109
1110 align = BIT_ULL(bit);
1111 size = BIT_ULL(bit-1) + 1;
1112 if (!expect_insert(&mm, node,
1113 size, align, bit,
1114 &insert_modes[0])) {
1115 pr_err("insert failed with alignment=%llx [%d]",
1116 align, bit);
1117 goto out;
1118 }
1119
1120 cond_resched();
1121 }
1122
1123 ret = 0;
1124out:
1125 drm_mm_for_each_node_safe(node, next, &mm) {
1126 drm_mm_remove_node(node);
1127 kfree(node);
1128 }
1129 drm_mm_takedown(&mm);
1130 return ret;
1131}
1132
1133static int igt_align32(void *ignored)
1134{
1135 return igt_align_pot(32);
1136}
1137
1138static int igt_align64(void *ignored)
1139{
1140 return igt_align_pot(64);
1141}
1142
1143static void show_scan(const struct drm_mm_scan *scan)
1144{
1145 pr_info("scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
1146 scan->hit_start, scan->hit_end,
1147 scan->size, scan->alignment, scan->color);
1148}
1149
1150static void show_holes(const struct drm_mm *mm, int count)
1151{
1152 u64 hole_start, hole_end;
1153 struct drm_mm_node *hole;
1154
1155 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
1156 struct drm_mm_node *next = list_next_entry(hole, node_list);
1157 const char *node1 = NULL, *node2 = NULL;
1158
1159 if (hole->allocated)
1160 node1 = kasprintf(GFP_KERNEL,
1161 "[%llx + %lld, color=%ld], ",
1162 hole->start, hole->size, hole->color);
1163
1164 if (next->allocated)
1165 node2 = kasprintf(GFP_KERNEL,
1166 ", [%llx + %lld, color=%ld]",
1167 next->start, next->size, next->color);
1168
1169 pr_info("%sHole [%llx - %llx, size %lld]%s\n",
1170 node1,
1171 hole_start, hole_end, hole_end - hole_start,
1172 node2);
1173
1174 kfree(node2);
1175 kfree(node1);
1176
1177 if (!--count)
1178 break;
1179 }
1180}
1181
1182struct evict_node {
1183 struct drm_mm_node node;
1184 struct list_head link;
1185};
1186
1187static bool evict_nodes(struct drm_mm_scan *scan,
1188 struct evict_node *nodes,
1189 unsigned int *order,
1190 unsigned int count,
1191 bool use_color,
1192 struct list_head *evict_list)
1193{
1194 struct evict_node *e, *en;
1195 unsigned int i;
1196
1197 for (i = 0; i < count; i++) {
1198 e = &nodes[order ? order[i] : i];
1199 list_add(&e->link, evict_list);
1200 if (drm_mm_scan_add_block(scan, &e->node))
1201 break;
1202 }
1203 list_for_each_entry_safe(e, en, evict_list, link) {
1204 if (!drm_mm_scan_remove_block(scan, &e->node))
1205 list_del(&e->link);
1206 }
1207 if (list_empty(evict_list)) {
1208 pr_err("Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
1209 scan->size, count, scan->alignment, scan->color);
1210 return false;
1211 }
1212
1213 list_for_each_entry(e, evict_list, link)
1214 drm_mm_remove_node(&e->node);
1215
1216 if (use_color) {
1217 struct drm_mm_node *node;
1218
1219 while ((node = drm_mm_scan_color_evict(scan))) {
1220 e = container_of(node, typeof(*e), node);
1221 drm_mm_remove_node(&e->node);
1222 list_add(&e->link, evict_list);
1223 }
1224 } else {
1225 if (drm_mm_scan_color_evict(scan)) {
1226 pr_err("drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
1227 return false;
1228 }
1229 }
1230
1231 return true;
1232}
1233
1234static bool evict_nothing(struct drm_mm *mm,
1235 unsigned int total_size,
1236 struct evict_node *nodes)
1237{
1238 struct drm_mm_scan scan;
1239 LIST_HEAD(evict_list);
1240 struct evict_node *e;
1241 struct drm_mm_node *node;
1242 unsigned int n;
1243
1244 drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
1245 for (n = 0; n < total_size; n++) {
1246 e = &nodes[n];
1247 list_add(&e->link, &evict_list);
1248 drm_mm_scan_add_block(&scan, &e->node);
1249 }
1250 list_for_each_entry(e, &evict_list, link)
1251 drm_mm_scan_remove_block(&scan, &e->node);
1252
1253 for (n = 0; n < total_size; n++) {
1254 e = &nodes[n];
1255
1256 if (!drm_mm_node_allocated(&e->node)) {
1257 pr_err("node[%d] no longer allocated!\n", n);
1258 return false;
1259 }
1260
1261 e->link.next = NULL;
1262 }
1263
1264 drm_mm_for_each_node(node, mm) {
1265 e = container_of(node, typeof(*e), node);
1266 e->link.next = &e->link;
1267 }
1268
1269 for (n = 0; n < total_size; n++) {
1270 e = &nodes[n];
1271
1272 if (!e->link.next) {
1273 pr_err("node[%d] no longer connected!\n", n);
1274 return false;
1275 }
1276 }
1277
1278 return assert_continuous(mm, nodes[0].node.size);
1279}
1280
1281static bool evict_everything(struct drm_mm *mm,
1282 unsigned int total_size,
1283 struct evict_node *nodes)
1284{
1285 struct drm_mm_scan scan;
1286 LIST_HEAD(evict_list);
1287 struct evict_node *e;
1288 unsigned int n;
1289 int err;
1290
1291 drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
1292 for (n = 0; n < total_size; n++) {
1293 e = &nodes[n];
1294 list_add(&e->link, &evict_list);
1295 if (drm_mm_scan_add_block(&scan, &e->node))
1296 break;
1297 }
1298
1299 err = 0;
1300 list_for_each_entry(e, &evict_list, link) {
1301 if (!drm_mm_scan_remove_block(&scan, &e->node)) {
1302 if (!err) {
1303 pr_err("Node %lld not marked for eviction!\n",
1304 e->node.start);
1305 err = -EINVAL;
1306 }
1307 }
1308 }
1309 if (err)
1310 return false;
1311
1312 list_for_each_entry(e, &evict_list, link)
1313 drm_mm_remove_node(&e->node);
1314
1315 if (!assert_one_hole(mm, 0, total_size))
1316 return false;
1317
1318 list_for_each_entry(e, &evict_list, link) {
1319 err = drm_mm_reserve_node(mm, &e->node);
1320 if (err) {
1321 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1322 e->node.start);
1323 return false;
1324 }
1325 }
1326
1327 return assert_continuous(mm, nodes[0].node.size);
1328}
1329
1330static int evict_something(struct drm_mm *mm,
1331 u64 range_start, u64 range_end,
1332 struct evict_node *nodes,
1333 unsigned int *order,
1334 unsigned int count,
1335 unsigned int size,
1336 unsigned int alignment,
1337 const struct insert_mode *mode)
1338{
1339 struct drm_mm_scan scan;
1340 LIST_HEAD(evict_list);
1341 struct evict_node *e;
1342 struct drm_mm_node tmp;
1343 int err;
1344
1345 drm_mm_scan_init_with_range(&scan, mm,
1346 size, alignment, 0,
1347 range_start, range_end,
1348 mode->mode);
1349 if (!evict_nodes(&scan,
1350 nodes, order, count, false,
1351 &evict_list))
1352 return -EINVAL;
1353
1354 memset(&tmp, 0, sizeof(tmp));
1355 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
1356 DRM_MM_INSERT_EVICT);
1357 if (err) {
1358 pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
1359 size, alignment);
1360 show_scan(&scan);
1361 show_holes(mm, 3);
1362 return err;
1363 }
1364
1365 if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
1366 pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1367 tmp.start, tmp.size, range_start, range_end);
1368 err = -EINVAL;
1369 }
1370
1371 if (!assert_node(&tmp, mm, size, alignment, 0) ||
1372 drm_mm_hole_follows(&tmp)) {
1373 pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
1374 tmp.size, size,
1375 alignment, misalignment(&tmp, alignment),
1376 tmp.start, drm_mm_hole_follows(&tmp));
1377 err = -EINVAL;
1378 }
1379
1380 drm_mm_remove_node(&tmp);
1381 if (err)
1382 return err;
1383
1384 list_for_each_entry(e, &evict_list, link) {
1385 err = drm_mm_reserve_node(mm, &e->node);
1386 if (err) {
1387 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1388 e->node.start);
1389 return err;
1390 }
1391 }
1392
1393 if (!assert_continuous(mm, nodes[0].node.size)) {
1394 pr_err("range is no longer continuous\n");
1395 return -EINVAL;
1396 }
1397
1398 return 0;
1399}
1400
1401static int igt_evict(void *ignored)
1402{
1403 DRM_RND_STATE(prng, random_seed);
1404 const unsigned int size = 8192;
1405 const struct insert_mode *mode;
1406 struct drm_mm mm;
1407 struct evict_node *nodes;
1408 struct drm_mm_node *node, *next;
1409 unsigned int *order, n;
1410 int ret, err;
1411
1412 /* Here we populate a full drm_mm and then try and insert a new node
1413 * by evicting other nodes in a random order. The drm_mm_scan should
1414 * pick the first matching hole it finds from the random list. We
1415 * repeat that for different allocation strategies, alignments and
1416 * sizes to try and stress the hole finder.
1417 */
1418
1419 ret = -ENOMEM;
1420 nodes = vzalloc(array_size(size, sizeof(*nodes)));
1421 if (!nodes)
1422 goto err;
1423
1424 order = drm_random_order(size, &prng);
1425 if (!order)
1426 goto err_nodes;
1427
1428 ret = -EINVAL;
1429 drm_mm_init(&mm, 0, size);
1430 for (n = 0; n < size; n++) {
1431 err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
1432 if (err) {
1433 pr_err("insert failed, step %d\n", n);
1434 ret = err;
1435 goto out;
1436 }
1437 }
1438
1439 /* First check that using the scanner doesn't break the mm */
1440 if (!evict_nothing(&mm, size, nodes)) {
1441 pr_err("evict_nothing() failed\n");
1442 goto out;
1443 }
1444 if (!evict_everything(&mm, size, nodes)) {
1445 pr_err("evict_everything() failed\n");
1446 goto out;
1447 }
1448
1449 for (mode = evict_modes; mode->name; mode++) {
1450 for (n = 1; n <= size; n <<= 1) {
1451 drm_random_reorder(order, size, &prng);
1452 err = evict_something(&mm, 0, U64_MAX,
1453 nodes, order, size,
1454 n, 1,
1455 mode);
1456 if (err) {
1457 pr_err("%s evict_something(size=%u) failed\n",
1458 mode->name, n);
1459 ret = err;
1460 goto out;
1461 }
1462 }
1463
1464 for (n = 1; n < size; n <<= 1) {
1465 drm_random_reorder(order, size, &prng);
1466 err = evict_something(&mm, 0, U64_MAX,
1467 nodes, order, size,
1468 size/2, n,
1469 mode);
1470 if (err) {
1471 pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1472 mode->name, size/2, n);
1473 ret = err;
1474 goto out;
1475 }
1476 }
1477
1478 for_each_prime_number_from(n, 1, min(size, max_prime)) {
1479 unsigned int nsize = (size - n + 1) / 2;
1480
1481 DRM_MM_BUG_ON(!nsize);
1482
1483 drm_random_reorder(order, size, &prng);
1484 err = evict_something(&mm, 0, U64_MAX,
1485 nodes, order, size,
1486 nsize, n,
1487 mode);
1488 if (err) {
1489 pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1490 mode->name, nsize, n);
1491 ret = err;
1492 goto out;
1493 }
1494 }
1495
1496 cond_resched();
1497 }
1498
1499 ret = 0;
1500out:
1501 drm_mm_for_each_node_safe(node, next, &mm)
1502 drm_mm_remove_node(node);
1503 drm_mm_takedown(&mm);
1504 kfree(order);
1505err_nodes:
1506 vfree(nodes);
1507err:
1508 return ret;
1509}
1510
1511static int igt_evict_range(void *ignored)
1512{
1513 DRM_RND_STATE(prng, random_seed);
1514 const unsigned int size = 8192;
1515 const unsigned int range_size = size / 2;
1516 const unsigned int range_start = size / 4;
1517 const unsigned int range_end = range_start + range_size;
1518 const struct insert_mode *mode;
1519 struct drm_mm mm;
1520 struct evict_node *nodes;
1521 struct drm_mm_node *node, *next;
1522 unsigned int *order, n;
1523 int ret, err;
1524
1525 /* Like igt_evict() but now we are limiting the search to a
1526 * small portion of the full drm_mm.
1527 */
1528
1529 ret = -ENOMEM;
1530 nodes = vzalloc(array_size(size, sizeof(*nodes)));
1531 if (!nodes)
1532 goto err;
1533
1534 order = drm_random_order(size, &prng);
1535 if (!order)
1536 goto err_nodes;
1537
1538 ret = -EINVAL;
1539 drm_mm_init(&mm, 0, size);
1540 for (n = 0; n < size; n++) {
1541 err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
1542 if (err) {
1543 pr_err("insert failed, step %d\n", n);
1544 ret = err;
1545 goto out;
1546 }
1547 }
1548
1549 for (mode = evict_modes; mode->name; mode++) {
1550 for (n = 1; n <= range_size; n <<= 1) {
1551 drm_random_reorder(order, size, &prng);
1552 err = evict_something(&mm, range_start, range_end,
1553 nodes, order, size,
1554 n, 1,
1555 mode);
1556 if (err) {
1557 pr_err("%s evict_something(size=%u) failed with range [%u, %u]\n",
1558 mode->name, n, range_start, range_end);
1559 goto out;
1560 }
1561 }
1562
1563 for (n = 1; n <= range_size; n <<= 1) {
1564 drm_random_reorder(order, size, &prng);
1565 err = evict_something(&mm, range_start, range_end,
1566 nodes, order, size,
1567 range_size/2, n,
1568 mode);
1569 if (err) {
1570 pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1571 mode->name, range_size/2, n, range_start, range_end);
1572 goto out;
1573 }
1574 }
1575
1576 for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
1577 unsigned int nsize = (range_size - n + 1) / 2;
1578
1579 DRM_MM_BUG_ON(!nsize);
1580
1581 drm_random_reorder(order, size, &prng);
1582 err = evict_something(&mm, range_start, range_end,
1583 nodes, order, size,
1584 nsize, n,
1585 mode);
1586 if (err) {
1587 pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1588 mode->name, nsize, n, range_start, range_end);
1589 goto out;
1590 }
1591 }
1592
1593 cond_resched();
1594 }
1595
1596 ret = 0;
1597out:
1598 drm_mm_for_each_node_safe(node, next, &mm)
1599 drm_mm_remove_node(node);
1600 drm_mm_takedown(&mm);
1601 kfree(order);
1602err_nodes:
1603 vfree(nodes);
1604err:
1605 return ret;
1606}
1607
1608static unsigned int node_index(const struct drm_mm_node *node)
1609{
1610 return div64_u64(node->start, node->size);
1611}
1612
1613static int igt_topdown(void *ignored)
1614{
1615 const struct insert_mode *topdown = &insert_modes[TOPDOWN];
1616 DRM_RND_STATE(prng, random_seed);
1617 const unsigned int count = 8192;
1618 unsigned int size;
1619 unsigned long *bitmap;
1620 struct drm_mm mm;
1621 struct drm_mm_node *nodes, *node, *next;
1622 unsigned int *order, n, m, o = 0;
1623 int ret;
1624
1625 /* When allocating top-down, we expect to be returned a node
1626 * from a suitable hole at the top of the drm_mm. We check that
1627 * the returned node does match the highest available slot.
1628 */
1629
1630 ret = -ENOMEM;
1631 nodes = vzalloc(array_size(count, sizeof(*nodes)));
1632 if (!nodes)
1633 goto err;
1634
1635 bitmap = bitmap_zalloc(count, GFP_KERNEL);
1636 if (!bitmap)
1637 goto err_nodes;
1638
1639 order = drm_random_order(count, &prng);
1640 if (!order)
1641 goto err_bitmap;
1642
1643 ret = -EINVAL;
1644 for (size = 1; size <= 64; size <<= 1) {
1645 drm_mm_init(&mm, 0, size*count);
1646 for (n = 0; n < count; n++) {
1647 if (!expect_insert(&mm, &nodes[n],
1648 size, 0, n,
1649 topdown)) {
1650 pr_err("insert failed, size %u step %d\n", size, n);
1651 goto out;
1652 }
1653
1654 if (drm_mm_hole_follows(&nodes[n])) {
1655 pr_err("hole after topdown insert %d, start=%llx\n, size=%u",
1656 n, nodes[n].start, size);
1657 goto out;
1658 }
1659
1660 if (!assert_one_hole(&mm, 0, size*(count - n - 1)))
1661 goto out;
1662 }
1663
1664 if (!assert_continuous(&mm, size))
1665 goto out;
1666
1667 drm_random_reorder(order, count, &prng);
1668 for_each_prime_number_from(n, 1, min(count, max_prime)) {
1669 for (m = 0; m < n; m++) {
1670 node = &nodes[order[(o + m) % count]];
1671 drm_mm_remove_node(node);
1672 __set_bit(node_index(node), bitmap);
1673 }
1674
1675 for (m = 0; m < n; m++) {
1676 unsigned int last;
1677
1678 node = &nodes[order[(o + m) % count]];
1679 if (!expect_insert(&mm, node,
1680 size, 0, 0,
1681 topdown)) {
1682 pr_err("insert failed, step %d/%d\n", m, n);
1683 goto out;
1684 }
1685
1686 if (drm_mm_hole_follows(node)) {
1687 pr_err("hole after topdown insert %d/%d, start=%llx\n",
1688 m, n, node->start);
1689 goto out;
1690 }
1691
1692 last = find_last_bit(bitmap, count);
1693 if (node_index(node) != last) {
1694 pr_err("node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
1695 m, n, size, last, node_index(node));
1696 goto out;
1697 }
1698
1699 __clear_bit(last, bitmap);
1700 }
1701
1702 DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1703
1704 o += n;
1705 }
1706
1707 drm_mm_for_each_node_safe(node, next, &mm)
1708 drm_mm_remove_node(node);
1709 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1710 cond_resched();
1711 }
1712
1713 ret = 0;
1714out:
1715 drm_mm_for_each_node_safe(node, next, &mm)
1716 drm_mm_remove_node(node);
1717 drm_mm_takedown(&mm);
1718 kfree(order);
1719err_bitmap:
1720 bitmap_free(bitmap);
1721err_nodes:
1722 vfree(nodes);
1723err:
1724 return ret;
1725}
1726
1727static int igt_bottomup(void *ignored)
1728{
1729 const struct insert_mode *bottomup = &insert_modes[BOTTOMUP];
1730 DRM_RND_STATE(prng, random_seed);
1731 const unsigned int count = 8192;
1732 unsigned int size;
1733 unsigned long *bitmap;
1734 struct drm_mm mm;
1735 struct drm_mm_node *nodes, *node, *next;
1736 unsigned int *order, n, m, o = 0;
1737 int ret;
1738
1739 /* Like igt_topdown, but instead of searching for the last hole,
1740 * we search for the first.
1741 */
1742
1743 ret = -ENOMEM;
1744 nodes = vzalloc(array_size(count, sizeof(*nodes)));
1745 if (!nodes)
1746 goto err;
1747
1748 bitmap = bitmap_zalloc(count, GFP_KERNEL);
1749 if (!bitmap)
1750 goto err_nodes;
1751
1752 order = drm_random_order(count, &prng);
1753 if (!order)
1754 goto err_bitmap;
1755
1756 ret = -EINVAL;
1757 for (size = 1; size <= 64; size <<= 1) {
1758 drm_mm_init(&mm, 0, size*count);
1759 for (n = 0; n < count; n++) {
1760 if (!expect_insert(&mm, &nodes[n],
1761 size, 0, n,
1762 bottomup)) {
1763 pr_err("bottomup insert failed, size %u step %d\n", size, n);
1764 goto out;
1765 }
1766
1767 if (!assert_one_hole(&mm, size*(n + 1), size*count))
1768 goto out;
1769 }
1770
1771 if (!assert_continuous(&mm, size))
1772 goto out;
1773
1774 drm_random_reorder(order, count, &prng);
1775 for_each_prime_number_from(n, 1, min(count, max_prime)) {
1776 for (m = 0; m < n; m++) {
1777 node = &nodes[order[(o + m) % count]];
1778 drm_mm_remove_node(node);
1779 __set_bit(node_index(node), bitmap);
1780 }
1781
1782 for (m = 0; m < n; m++) {
1783 unsigned int first;
1784
1785 node = &nodes[order[(o + m) % count]];
1786 if (!expect_insert(&mm, node,
1787 size, 0, 0,
1788 bottomup)) {
1789 pr_err("insert failed, step %d/%d\n", m, n);
1790 goto out;
1791 }
1792
1793 first = find_first_bit(bitmap, count);
1794 if (node_index(node) != first) {
1795 pr_err("node %d/%d not inserted into bottom hole, expected %d, found %d\n",
1796 m, n, first, node_index(node));
1797 goto out;
1798 }
1799 __clear_bit(first, bitmap);
1800 }
1801
1802 DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1803
1804 o += n;
1805 }
1806
1807 drm_mm_for_each_node_safe(node, next, &mm)
1808 drm_mm_remove_node(node);
1809 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1810 cond_resched();
1811 }
1812
1813 ret = 0;
1814out:
1815 drm_mm_for_each_node_safe(node, next, &mm)
1816 drm_mm_remove_node(node);
1817 drm_mm_takedown(&mm);
1818 kfree(order);
1819err_bitmap:
1820 bitmap_free(bitmap);
1821err_nodes:
1822 vfree(nodes);
1823err:
1824 return ret;
1825}
1826
1827static int __igt_once(unsigned int mode)
1828{
1829 struct drm_mm mm;
1830 struct drm_mm_node rsvd_lo, rsvd_hi, node;
1831 int err;
1832
1833 drm_mm_init(&mm, 0, 7);
1834
1835 memset(&rsvd_lo, 0, sizeof(rsvd_lo));
1836 rsvd_lo.start = 1;
1837 rsvd_lo.size = 1;
1838 err = drm_mm_reserve_node(&mm, &rsvd_lo);
1839 if (err) {
1840 pr_err("Could not reserve low node\n");
1841 goto err;
1842 }
1843
1844 memset(&rsvd_hi, 0, sizeof(rsvd_hi));
1845 rsvd_hi.start = 5;
1846 rsvd_hi.size = 1;
1847 err = drm_mm_reserve_node(&mm, &rsvd_hi);
1848 if (err) {
1849 pr_err("Could not reserve low node\n");
1850 goto err_lo;
1851 }
1852
1853 if (!drm_mm_hole_follows(&rsvd_lo) || !drm_mm_hole_follows(&rsvd_hi)) {
1854 pr_err("Expected a hole after lo and high nodes!\n");
1855 err = -EINVAL;
1856 goto err_hi;
1857 }
1858
1859 memset(&node, 0, sizeof(node));
1860 err = drm_mm_insert_node_generic(&mm, &node,
1861 2, 0, 0,
1862 mode | DRM_MM_INSERT_ONCE);
1863 if (!err) {
1864 pr_err("Unexpectedly inserted the node into the wrong hole: node.start=%llx\n",
1865 node.start);
1866 err = -EINVAL;
1867 goto err_node;
1868 }
1869
1870 err = drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode);
1871 if (err) {
1872 pr_err("Could not insert the node into the available hole!\n");
1873 err = -EINVAL;
1874 goto err_hi;
1875 }
1876
1877err_node:
1878 drm_mm_remove_node(&node);
1879err_hi:
1880 drm_mm_remove_node(&rsvd_hi);
1881err_lo:
1882 drm_mm_remove_node(&rsvd_lo);
1883err:
1884 drm_mm_takedown(&mm);
1885 return err;
1886}
1887
1888static int igt_lowest(void *ignored)
1889{
1890 return __igt_once(DRM_MM_INSERT_LOW);
1891}
1892
1893static int igt_highest(void *ignored)
1894{
1895 return __igt_once(DRM_MM_INSERT_HIGH);
1896}
1897
1898static void separate_adjacent_colors(const struct drm_mm_node *node,
1899 unsigned long color,
1900 u64 *start,
1901 u64 *end)
1902{
1903 if (node->allocated && node->color != color)
1904 ++*start;
1905
1906 node = list_next_entry(node, node_list);
1907 if (node->allocated && node->color != color)
1908 --*end;
1909}
1910
1911static bool colors_abutt(const struct drm_mm_node *node)
1912{
1913 if (!drm_mm_hole_follows(node) &&
1914 list_next_entry(node, node_list)->allocated) {
1915 pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
1916 node->color, node->start, node->size,
1917 list_next_entry(node, node_list)->color,
1918 list_next_entry(node, node_list)->start,
1919 list_next_entry(node, node_list)->size);
1920 return true;
1921 }
1922
1923 return false;
1924}
1925
1926static int igt_color(void *ignored)
1927{
1928 const unsigned int count = min(4096u, max_iterations);
1929 const struct insert_mode *mode;
1930 struct drm_mm mm;
1931 struct drm_mm_node *node, *nn;
1932 unsigned int n;
1933 int ret = -EINVAL, err;
1934
1935 /* Color adjustment complicates everything. First we just check
1936 * that when we insert a node we apply any color_adjustment callback.
1937 * The callback we use should ensure that there is a gap between
1938 * any two nodes, and so after each insertion we check that those
1939 * holes are inserted and that they are preserved.
1940 */
1941
1942 drm_mm_init(&mm, 0, U64_MAX);
1943
1944 for (n = 1; n <= count; n++) {
1945 node = kzalloc(sizeof(*node), GFP_KERNEL);
1946 if (!node) {
1947 ret = -ENOMEM;
1948 goto out;
1949 }
1950
1951 if (!expect_insert(&mm, node,
1952 n, 0, n,
1953 &insert_modes[0])) {
1954 pr_err("insert failed, step %d\n", n);
1955 kfree(node);
1956 goto out;
1957 }
1958 }
1959
1960 drm_mm_for_each_node_safe(node, nn, &mm) {
1961 if (node->color != node->size) {
1962 pr_err("invalid color stored: expected %lld, found %ld\n",
1963 node->size, node->color);
1964
1965 goto out;
1966 }
1967
1968 drm_mm_remove_node(node);
1969 kfree(node);
1970 }
1971
1972 /* Now, let's start experimenting with applying a color callback */
1973 mm.color_adjust = separate_adjacent_colors;
1974 for (mode = insert_modes; mode->name; mode++) {
1975 u64 last;
1976
1977 node = kzalloc(sizeof(*node), GFP_KERNEL);
1978 if (!node) {
1979 ret = -ENOMEM;
1980 goto out;
1981 }
1982
1983 node->size = 1 + 2*count;
1984 node->color = node->size;
1985
1986 err = drm_mm_reserve_node(&mm, node);
1987 if (err) {
1988 pr_err("initial reserve failed!\n");
1989 ret = err;
1990 goto out;
1991 }
1992
1993 last = node->start + node->size;
1994
1995 for (n = 1; n <= count; n++) {
1996 int rem;
1997
1998 node = kzalloc(sizeof(*node), GFP_KERNEL);
1999 if (!node) {
2000 ret = -ENOMEM;
2001 goto out;
2002 }
2003
2004 node->start = last;
2005 node->size = n + count;
2006 node->color = node->size;
2007
2008 err = drm_mm_reserve_node(&mm, node);
2009 if (err != -ENOSPC) {
2010 pr_err("reserve %d did not report color overlap! err=%d\n",
2011 n, err);
2012 goto out;
2013 }
2014
2015 node->start += n + 1;
2016 rem = misalignment(node, n + count);
2017 node->start += n + count - rem;
2018
2019 err = drm_mm_reserve_node(&mm, node);
2020 if (err) {
2021 pr_err("reserve %d failed, err=%d\n", n, err);
2022 ret = err;
2023 goto out;
2024 }
2025
2026 last = node->start + node->size;
2027 }
2028
2029 for (n = 1; n <= count; n++) {
2030 node = kzalloc(sizeof(*node), GFP_KERNEL);
2031 if (!node) {
2032 ret = -ENOMEM;
2033 goto out;
2034 }
2035
2036 if (!expect_insert(&mm, node,
2037 n, n, n,
2038 mode)) {
2039 pr_err("%s insert failed, step %d\n",
2040 mode->name, n);
2041 kfree(node);
2042 goto out;
2043 }
2044 }
2045
2046 drm_mm_for_each_node_safe(node, nn, &mm) {
2047 u64 rem;
2048
2049 if (node->color != node->size) {
2050 pr_err("%s invalid color stored: expected %lld, found %ld\n",
2051 mode->name, node->size, node->color);
2052
2053 goto out;
2054 }
2055
2056 if (colors_abutt(node))
2057 goto out;
2058
2059 div64_u64_rem(node->start, node->size, &rem);
2060 if (rem) {
2061 pr_err("%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
2062 mode->name, node->start, node->size, rem);
2063 goto out;
2064 }
2065
2066 drm_mm_remove_node(node);
2067 kfree(node);
2068 }
2069
2070 cond_resched();
2071 }
2072
2073 ret = 0;
2074out:
2075 drm_mm_for_each_node_safe(node, nn, &mm) {
2076 drm_mm_remove_node(node);
2077 kfree(node);
2078 }
2079 drm_mm_takedown(&mm);
2080 return ret;
2081}
2082
2083static int evict_color(struct drm_mm *mm,
2084 u64 range_start, u64 range_end,
2085 struct evict_node *nodes,
2086 unsigned int *order,
2087 unsigned int count,
2088 unsigned int size,
2089 unsigned int alignment,
2090 unsigned long color,
2091 const struct insert_mode *mode)
2092{
2093 struct drm_mm_scan scan;
2094 LIST_HEAD(evict_list);
2095 struct evict_node *e;
2096 struct drm_mm_node tmp;
2097 int err;
2098
2099 drm_mm_scan_init_with_range(&scan, mm,
2100 size, alignment, color,
2101 range_start, range_end,
2102 mode->mode);
2103 if (!evict_nodes(&scan,
2104 nodes, order, count, true,
2105 &evict_list))
2106 return -EINVAL;
2107
2108 memset(&tmp, 0, sizeof(tmp));
2109 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
2110 DRM_MM_INSERT_EVICT);
2111 if (err) {
2112 pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
2113 size, alignment, color, err);
2114 show_scan(&scan);
2115 show_holes(mm, 3);
2116 return err;
2117 }
2118
2119 if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
2120 pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
2121 tmp.start, tmp.size, range_start, range_end);
2122 err = -EINVAL;
2123 }
2124
2125 if (colors_abutt(&tmp))
2126 err = -EINVAL;
2127
2128 if (!assert_node(&tmp, mm, size, alignment, color)) {
2129 pr_err("Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
2130 tmp.size, size,
2131 alignment, misalignment(&tmp, alignment), tmp.start);
2132 err = -EINVAL;
2133 }
2134
2135 drm_mm_remove_node(&tmp);
2136 if (err)
2137 return err;
2138
2139 list_for_each_entry(e, &evict_list, link) {
2140 err = drm_mm_reserve_node(mm, &e->node);
2141 if (err) {
2142 pr_err("Failed to reinsert node after eviction: start=%llx\n",
2143 e->node.start);
2144 return err;
2145 }
2146 }
2147
2148 cond_resched();
2149 return 0;
2150}
2151
2152static int igt_color_evict(void *ignored)
2153{
2154 DRM_RND_STATE(prng, random_seed);
2155 const unsigned int total_size = min(8192u, max_iterations);
2156 const struct insert_mode *mode;
2157 unsigned long color = 0;
2158 struct drm_mm mm;
2159 struct evict_node *nodes;
2160 struct drm_mm_node *node, *next;
2161 unsigned int *order, n;
2162 int ret, err;
2163
2164 /* Check that the drm_mm_scan also honours color adjustment when
2165 * choosing its victims to create a hole. Our color_adjust does not
2166 * allow two nodes to be placed together without an intervening hole
2167 * enlarging the set of victims that must be evicted.
2168 */
2169
2170 ret = -ENOMEM;
2171 nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
2172 if (!nodes)
2173 goto err;
2174
2175 order = drm_random_order(total_size, &prng);
2176 if (!order)
2177 goto err_nodes;
2178
2179 ret = -EINVAL;
2180 drm_mm_init(&mm, 0, 2*total_size - 1);
2181 mm.color_adjust = separate_adjacent_colors;
2182 for (n = 0; n < total_size; n++) {
2183 if (!expect_insert(&mm, &nodes[n].node,
2184 1, 0, color++,
2185 &insert_modes[0])) {
2186 pr_err("insert failed, step %d\n", n);
2187 goto out;
2188 }
2189 }
2190
2191 for (mode = evict_modes; mode->name; mode++) {
2192 for (n = 1; n <= total_size; n <<= 1) {
2193 drm_random_reorder(order, total_size, &prng);
2194 err = evict_color(&mm, 0, U64_MAX,
2195 nodes, order, total_size,
2196 n, 1, color++,
2197 mode);
2198 if (err) {
2199 pr_err("%s evict_color(size=%u) failed\n",
2200 mode->name, n);
2201 goto out;
2202 }
2203 }
2204
2205 for (n = 1; n < total_size; n <<= 1) {
2206 drm_random_reorder(order, total_size, &prng);
2207 err = evict_color(&mm, 0, U64_MAX,
2208 nodes, order, total_size,
2209 total_size/2, n, color++,
2210 mode);
2211 if (err) {
2212 pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2213 mode->name, total_size/2, n);
2214 goto out;
2215 }
2216 }
2217
2218 for_each_prime_number_from(n, 1, min(total_size, max_prime)) {
2219 unsigned int nsize = (total_size - n + 1) / 2;
2220
2221 DRM_MM_BUG_ON(!nsize);
2222
2223 drm_random_reorder(order, total_size, &prng);
2224 err = evict_color(&mm, 0, U64_MAX,
2225 nodes, order, total_size,
2226 nsize, n, color++,
2227 mode);
2228 if (err) {
2229 pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2230 mode->name, nsize, n);
2231 goto out;
2232 }
2233 }
2234
2235 cond_resched();
2236 }
2237
2238 ret = 0;
2239out:
2240 if (ret)
2241 show_mm(&mm);
2242 drm_mm_for_each_node_safe(node, next, &mm)
2243 drm_mm_remove_node(node);
2244 drm_mm_takedown(&mm);
2245 kfree(order);
2246err_nodes:
2247 vfree(nodes);
2248err:
2249 return ret;
2250}
2251
2252static int igt_color_evict_range(void *ignored)
2253{
2254 DRM_RND_STATE(prng, random_seed);
2255 const unsigned int total_size = 8192;
2256 const unsigned int range_size = total_size / 2;
2257 const unsigned int range_start = total_size / 4;
2258 const unsigned int range_end = range_start + range_size;
2259 const struct insert_mode *mode;
2260 unsigned long color = 0;
2261 struct drm_mm mm;
2262 struct evict_node *nodes;
2263 struct drm_mm_node *node, *next;
2264 unsigned int *order, n;
2265 int ret, err;
2266
2267 /* Like igt_color_evict(), but limited to small portion of the full
2268 * drm_mm range.
2269 */
2270
2271 ret = -ENOMEM;
2272 nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
2273 if (!nodes)
2274 goto err;
2275
2276 order = drm_random_order(total_size, &prng);
2277 if (!order)
2278 goto err_nodes;
2279
2280 ret = -EINVAL;
2281 drm_mm_init(&mm, 0, 2*total_size - 1);
2282 mm.color_adjust = separate_adjacent_colors;
2283 for (n = 0; n < total_size; n++) {
2284 if (!expect_insert(&mm, &nodes[n].node,
2285 1, 0, color++,
2286 &insert_modes[0])) {
2287 pr_err("insert failed, step %d\n", n);
2288 goto out;
2289 }
2290 }
2291
2292 for (mode = evict_modes; mode->name; mode++) {
2293 for (n = 1; n <= range_size; n <<= 1) {
2294 drm_random_reorder(order, range_size, &prng);
2295 err = evict_color(&mm, range_start, range_end,
2296 nodes, order, total_size,
2297 n, 1, color++,
2298 mode);
2299 if (err) {
2300 pr_err("%s evict_color(size=%u) failed for range [%x, %x]\n",
2301 mode->name, n, range_start, range_end);
2302 goto out;
2303 }
2304 }
2305
2306 for (n = 1; n < range_size; n <<= 1) {
2307 drm_random_reorder(order, total_size, &prng);
2308 err = evict_color(&mm, range_start, range_end,
2309 nodes, order, total_size,
2310 range_size/2, n, color++,
2311 mode);
2312 if (err) {
2313 pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2314 mode->name, total_size/2, n, range_start, range_end);
2315 goto out;
2316 }
2317 }
2318
2319 for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
2320 unsigned int nsize = (range_size - n + 1) / 2;
2321
2322 DRM_MM_BUG_ON(!nsize);
2323
2324 drm_random_reorder(order, total_size, &prng);
2325 err = evict_color(&mm, range_start, range_end,
2326 nodes, order, total_size,
2327 nsize, n, color++,
2328 mode);
2329 if (err) {
2330 pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2331 mode->name, nsize, n, range_start, range_end);
2332 goto out;
2333 }
2334 }
2335
2336 cond_resched();
2337 }
2338
2339 ret = 0;
2340out:
2341 if (ret)
2342 show_mm(&mm);
2343 drm_mm_for_each_node_safe(node, next, &mm)
2344 drm_mm_remove_node(node);
2345 drm_mm_takedown(&mm);
2346 kfree(order);
2347err_nodes:
2348 vfree(nodes);
2349err:
2350 return ret;
2351}
2352
2353#include "drm_selftest.c"
2354
2355static int __init test_drm_mm_init(void)
2356{
2357 int err;
2358
2359 while (!random_seed)
2360 random_seed = get_random_int();
2361
2362 pr_info("Testing DRM range manger (struct drm_mm), with random_seed=0x%x max_iterations=%u max_prime=%u\n",
2363 random_seed, max_iterations, max_prime);
2364 err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
2365
2366 return err > 0 ? 0 : err;
2367}
2368
2369static void __exit test_drm_mm_exit(void)
2370{
2371}
2372
2373module_init(test_drm_mm_init);
2374module_exit(test_drm_mm_exit);
2375
2376module_param(random_seed, uint, 0400);
2377module_param(max_iterations, uint, 0400);
2378module_param(max_prime, uint, 0400);
2379
2380MODULE_AUTHOR("Intel Corporation");
2381MODULE_LICENSE("GPL");
1/*
2 * Test cases for the drm_mm range manager
3 */
4
5#define pr_fmt(fmt) "drm_mm: " fmt
6
7#include <linux/module.h>
8#include <linux/prime_numbers.h>
9#include <linux/slab.h>
10#include <linux/random.h>
11#include <linux/vmalloc.h>
12
13#include <drm/drm_mm.h>
14
15#include "../lib/drm_random.h"
16
17#define TESTS "drm_mm_selftests.h"
18#include "drm_selftest.h"
19
20static unsigned int random_seed;
21static unsigned int max_iterations = 8192;
22static unsigned int max_prime = 128;
23
24enum {
25 BEST,
26 BOTTOMUP,
27 TOPDOWN,
28 EVICT,
29};
30
31static const struct insert_mode {
32 const char *name;
33 enum drm_mm_insert_mode mode;
34} insert_modes[] = {
35 [BEST] = { "best", DRM_MM_INSERT_BEST },
36 [BOTTOMUP] = { "bottom-up", DRM_MM_INSERT_LOW },
37 [TOPDOWN] = { "top-down", DRM_MM_INSERT_HIGH },
38 [EVICT] = { "evict", DRM_MM_INSERT_EVICT },
39 {}
40}, evict_modes[] = {
41 { "bottom-up", DRM_MM_INSERT_LOW },
42 { "top-down", DRM_MM_INSERT_HIGH },
43 {}
44};
45
46static int igt_sanitycheck(void *ignored)
47{
48 pr_info("%s - ok!\n", __func__);
49 return 0;
50}
51
52static bool assert_no_holes(const struct drm_mm *mm)
53{
54 struct drm_mm_node *hole;
55 u64 hole_start, hole_end;
56 unsigned long count;
57
58 count = 0;
59 drm_mm_for_each_hole(hole, mm, hole_start, hole_end)
60 count++;
61 if (count) {
62 pr_err("Expected to find no holes (after reserve), found %lu instead\n", count);
63 return false;
64 }
65
66 drm_mm_for_each_node(hole, mm) {
67 if (drm_mm_hole_follows(hole)) {
68 pr_err("Hole follows node, expected none!\n");
69 return false;
70 }
71 }
72
73 return true;
74}
75
76static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end)
77{
78 struct drm_mm_node *hole;
79 u64 hole_start, hole_end;
80 unsigned long count;
81 bool ok = true;
82
83 if (end <= start)
84 return true;
85
86 count = 0;
87 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
88 if (start != hole_start || end != hole_end) {
89 if (ok)
90 pr_err("empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
91 hole_start, hole_end,
92 start, end);
93 ok = false;
94 }
95 count++;
96 }
97 if (count != 1) {
98 pr_err("Expected to find one hole, found %lu instead\n", count);
99 ok = false;
100 }
101
102 return ok;
103}
104
105static bool assert_continuous(const struct drm_mm *mm, u64 size)
106{
107 struct drm_mm_node *node, *check, *found;
108 unsigned long n;
109 u64 addr;
110
111 if (!assert_no_holes(mm))
112 return false;
113
114 n = 0;
115 addr = 0;
116 drm_mm_for_each_node(node, mm) {
117 if (node->start != addr) {
118 pr_err("node[%ld] list out of order, expected %llx found %llx\n",
119 n, addr, node->start);
120 return false;
121 }
122
123 if (node->size != size) {
124 pr_err("node[%ld].size incorrect, expected %llx, found %llx\n",
125 n, size, node->size);
126 return false;
127 }
128
129 if (drm_mm_hole_follows(node)) {
130 pr_err("node[%ld] is followed by a hole!\n", n);
131 return false;
132 }
133
134 found = NULL;
135 drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
136 if (node != check) {
137 pr_err("lookup return wrong node, expected start %llx, found %llx\n",
138 node->start, check->start);
139 return false;
140 }
141 found = check;
142 }
143 if (!found) {
144 pr_err("lookup failed for node %llx + %llx\n",
145 addr, size);
146 return false;
147 }
148
149 addr += size;
150 n++;
151 }
152
153 return true;
154}
155
156static u64 misalignment(struct drm_mm_node *node, u64 alignment)
157{
158 u64 rem;
159
160 if (!alignment)
161 return 0;
162
163 div64_u64_rem(node->start, alignment, &rem);
164 return rem;
165}
166
167static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm,
168 u64 size, u64 alignment, unsigned long color)
169{
170 bool ok = true;
171
172 if (!drm_mm_node_allocated(node) || node->mm != mm) {
173 pr_err("node not allocated\n");
174 ok = false;
175 }
176
177 if (node->size != size) {
178 pr_err("node has wrong size, found %llu, expected %llu\n",
179 node->size, size);
180 ok = false;
181 }
182
183 if (misalignment(node, alignment)) {
184 pr_err("node is misaligned, start %llx rem %llu, expected alignment %llu\n",
185 node->start, misalignment(node, alignment), alignment);
186 ok = false;
187 }
188
189 if (node->color != color) {
190 pr_err("node has wrong color, found %lu, expected %lu\n",
191 node->color, color);
192 ok = false;
193 }
194
195 return ok;
196}
197
198#define show_mm(mm) do { \
199 struct drm_printer __p = drm_debug_printer(__func__); \
200 drm_mm_print((mm), &__p); } while (0)
201
202static int igt_init(void *ignored)
203{
204 const unsigned int size = 4096;
205 struct drm_mm mm;
206 struct drm_mm_node tmp;
207 int ret = -EINVAL;
208
209 /* Start with some simple checks on initialising the struct drm_mm */
210 memset(&mm, 0, sizeof(mm));
211 if (drm_mm_initialized(&mm)) {
212 pr_err("zeroed mm claims to be initialized\n");
213 return ret;
214 }
215
216 memset(&mm, 0xff, sizeof(mm));
217 drm_mm_init(&mm, 0, size);
218 if (!drm_mm_initialized(&mm)) {
219 pr_err("mm claims not to be initialized\n");
220 goto out;
221 }
222
223 if (!drm_mm_clean(&mm)) {
224 pr_err("mm not empty on creation\n");
225 goto out;
226 }
227
228 /* After creation, it should all be one massive hole */
229 if (!assert_one_hole(&mm, 0, size)) {
230 ret = -EINVAL;
231 goto out;
232 }
233
234 memset(&tmp, 0, sizeof(tmp));
235 tmp.start = 0;
236 tmp.size = size;
237 ret = drm_mm_reserve_node(&mm, &tmp);
238 if (ret) {
239 pr_err("failed to reserve whole drm_mm\n");
240 goto out;
241 }
242
243 /* After filling the range entirely, there should be no holes */
244 if (!assert_no_holes(&mm)) {
245 ret = -EINVAL;
246 goto out;
247 }
248
249 /* And then after emptying it again, the massive hole should be back */
250 drm_mm_remove_node(&tmp);
251 if (!assert_one_hole(&mm, 0, size)) {
252 ret = -EINVAL;
253 goto out;
254 }
255
256out:
257 if (ret)
258 show_mm(&mm);
259 drm_mm_takedown(&mm);
260 return ret;
261}
262
263static int igt_debug(void *ignored)
264{
265 struct drm_mm mm;
266 struct drm_mm_node nodes[2];
267 int ret;
268
269 /* Create a small drm_mm with a couple of nodes and a few holes, and
270 * check that the debug iterator doesn't explode over a trivial drm_mm.
271 */
272
273 drm_mm_init(&mm, 0, 4096);
274
275 memset(nodes, 0, sizeof(nodes));
276 nodes[0].start = 512;
277 nodes[0].size = 1024;
278 ret = drm_mm_reserve_node(&mm, &nodes[0]);
279 if (ret) {
280 pr_err("failed to reserve node[0] {start=%lld, size=%lld)\n",
281 nodes[0].start, nodes[0].size);
282 return ret;
283 }
284
285 nodes[1].size = 1024;
286 nodes[1].start = 4096 - 512 - nodes[1].size;
287 ret = drm_mm_reserve_node(&mm, &nodes[1]);
288 if (ret) {
289 pr_err("failed to reserve node[1] {start=%lld, size=%lld)\n",
290 nodes[1].start, nodes[1].size);
291 return ret;
292 }
293
294 show_mm(&mm);
295 return 0;
296}
297
298static struct drm_mm_node *set_node(struct drm_mm_node *node,
299 u64 start, u64 size)
300{
301 node->start = start;
302 node->size = size;
303 return node;
304}
305
306static bool expect_reserve_fail(struct drm_mm *mm, struct drm_mm_node *node)
307{
308 int err;
309
310 err = drm_mm_reserve_node(mm, node);
311 if (likely(err == -ENOSPC))
312 return true;
313
314 if (!err) {
315 pr_err("impossible reserve succeeded, node %llu + %llu\n",
316 node->start, node->size);
317 drm_mm_remove_node(node);
318 } else {
319 pr_err("impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
320 err, -ENOSPC, node->start, node->size);
321 }
322 return false;
323}
324
325static bool check_reserve_boundaries(struct drm_mm *mm,
326 unsigned int count,
327 u64 size)
328{
329 const struct boundary {
330 u64 start, size;
331 const char *name;
332 } boundaries[] = {
333#define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" }
334 B(0, 0),
335 B(-size, 0),
336 B(size, 0),
337 B(size * count, 0),
338 B(-size, size),
339 B(-size, -size),
340 B(-size, 2*size),
341 B(0, -size),
342 B(size, -size),
343 B(count*size, size),
344 B(count*size, -size),
345 B(count*size, count*size),
346 B(count*size, -count*size),
347 B(count*size, -(count+1)*size),
348 B((count+1)*size, size),
349 B((count+1)*size, -size),
350 B((count+1)*size, -2*size),
351#undef B
352 };
353 struct drm_mm_node tmp = {};
354 int n;
355
356 for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
357 if (!expect_reserve_fail(mm,
358 set_node(&tmp,
359 boundaries[n].start,
360 boundaries[n].size))) {
361 pr_err("boundary[%d:%s] failed, count=%u, size=%lld\n",
362 n, boundaries[n].name, count, size);
363 return false;
364 }
365 }
366
367 return true;
368}
369
370static int __igt_reserve(unsigned int count, u64 size)
371{
372 DRM_RND_STATE(prng, random_seed);
373 struct drm_mm mm;
374 struct drm_mm_node tmp, *nodes, *node, *next;
375 unsigned int *order, n, m, o = 0;
376 int ret, err;
377
378 /* For exercising drm_mm_reserve_node(), we want to check that
379 * reservations outside of the drm_mm range are rejected, and to
380 * overlapping and otherwise already occupied ranges. Afterwards,
381 * the tree and nodes should be intact.
382 */
383
384 DRM_MM_BUG_ON(!count);
385 DRM_MM_BUG_ON(!size);
386
387 ret = -ENOMEM;
388 order = drm_random_order(count, &prng);
389 if (!order)
390 goto err;
391
392 nodes = vzalloc(sizeof(*nodes) * count);
393 if (!nodes)
394 goto err_order;
395
396 ret = -EINVAL;
397 drm_mm_init(&mm, 0, count * size);
398
399 if (!check_reserve_boundaries(&mm, count, size))
400 goto out;
401
402 for (n = 0; n < count; n++) {
403 nodes[n].start = order[n] * size;
404 nodes[n].size = size;
405
406 err = drm_mm_reserve_node(&mm, &nodes[n]);
407 if (err) {
408 pr_err("reserve failed, step %d, start %llu\n",
409 n, nodes[n].start);
410 ret = err;
411 goto out;
412 }
413
414 if (!drm_mm_node_allocated(&nodes[n])) {
415 pr_err("reserved node not allocated! step %d, start %llu\n",
416 n, nodes[n].start);
417 goto out;
418 }
419
420 if (!expect_reserve_fail(&mm, &nodes[n]))
421 goto out;
422 }
423
424 /* After random insertion the nodes should be in order */
425 if (!assert_continuous(&mm, size))
426 goto out;
427
428 /* Repeated use should then fail */
429 drm_random_reorder(order, count, &prng);
430 for (n = 0; n < count; n++) {
431 if (!expect_reserve_fail(&mm,
432 set_node(&tmp, order[n] * size, 1)))
433 goto out;
434
435 /* Remove and reinsert should work */
436 drm_mm_remove_node(&nodes[order[n]]);
437 err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
438 if (err) {
439 pr_err("reserve failed, step %d, start %llu\n",
440 n, nodes[n].start);
441 ret = err;
442 goto out;
443 }
444 }
445
446 if (!assert_continuous(&mm, size))
447 goto out;
448
449 /* Overlapping use should then fail */
450 for (n = 0; n < count; n++) {
451 if (!expect_reserve_fail(&mm, set_node(&tmp, 0, size*count)))
452 goto out;
453 }
454 for (n = 0; n < count; n++) {
455 if (!expect_reserve_fail(&mm,
456 set_node(&tmp,
457 size * n,
458 size * (count - n))))
459 goto out;
460 }
461
462 /* Remove several, reinsert, check full */
463 for_each_prime_number(n, min(max_prime, count)) {
464 for (m = 0; m < n; m++) {
465 node = &nodes[order[(o + m) % count]];
466 drm_mm_remove_node(node);
467 }
468
469 for (m = 0; m < n; m++) {
470 node = &nodes[order[(o + m) % count]];
471 err = drm_mm_reserve_node(&mm, node);
472 if (err) {
473 pr_err("reserve failed, step %d/%d, start %llu\n",
474 m, n, node->start);
475 ret = err;
476 goto out;
477 }
478 }
479
480 o += n;
481
482 if (!assert_continuous(&mm, size))
483 goto out;
484 }
485
486 ret = 0;
487out:
488 drm_mm_for_each_node_safe(node, next, &mm)
489 drm_mm_remove_node(node);
490 drm_mm_takedown(&mm);
491 vfree(nodes);
492err_order:
493 kfree(order);
494err:
495 return ret;
496}
497
498static int igt_reserve(void *ignored)
499{
500 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
501 int n, ret;
502
503 for_each_prime_number_from(n, 1, 54) {
504 u64 size = BIT_ULL(n);
505
506 ret = __igt_reserve(count, size - 1);
507 if (ret)
508 return ret;
509
510 ret = __igt_reserve(count, size);
511 if (ret)
512 return ret;
513
514 ret = __igt_reserve(count, size + 1);
515 if (ret)
516 return ret;
517
518 cond_resched();
519 }
520
521 return 0;
522}
523
524static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node,
525 u64 size, u64 alignment, unsigned long color,
526 const struct insert_mode *mode)
527{
528 int err;
529
530 err = drm_mm_insert_node_generic(mm, node,
531 size, alignment, color,
532 mode->mode);
533 if (err) {
534 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
535 size, alignment, color, mode->name, err);
536 return false;
537 }
538
539 if (!assert_node(node, mm, size, alignment, color)) {
540 drm_mm_remove_node(node);
541 return false;
542 }
543
544 return true;
545}
546
547static bool expect_insert_fail(struct drm_mm *mm, u64 size)
548{
549 struct drm_mm_node tmp = {};
550 int err;
551
552 err = drm_mm_insert_node(mm, &tmp, size);
553 if (likely(err == -ENOSPC))
554 return true;
555
556 if (!err) {
557 pr_err("impossible insert succeeded, node %llu + %llu\n",
558 tmp.start, tmp.size);
559 drm_mm_remove_node(&tmp);
560 } else {
561 pr_err("impossible insert failed with wrong error %d [expected %d], size %llu\n",
562 err, -ENOSPC, size);
563 }
564 return false;
565}
566
567static int __igt_insert(unsigned int count, u64 size, bool replace)
568{
569 DRM_RND_STATE(prng, random_seed);
570 const struct insert_mode *mode;
571 struct drm_mm mm;
572 struct drm_mm_node *nodes, *node, *next;
573 unsigned int *order, n, m, o = 0;
574 int ret;
575
576 /* Fill a range with lots of nodes, check it doesn't fail too early */
577
578 DRM_MM_BUG_ON(!count);
579 DRM_MM_BUG_ON(!size);
580
581 ret = -ENOMEM;
582 nodes = vmalloc(count * sizeof(*nodes));
583 if (!nodes)
584 goto err;
585
586 order = drm_random_order(count, &prng);
587 if (!order)
588 goto err_nodes;
589
590 ret = -EINVAL;
591 drm_mm_init(&mm, 0, count * size);
592
593 for (mode = insert_modes; mode->name; mode++) {
594 for (n = 0; n < count; n++) {
595 struct drm_mm_node tmp;
596
597 node = replace ? &tmp : &nodes[n];
598 memset(node, 0, sizeof(*node));
599 if (!expect_insert(&mm, node, size, 0, n, mode)) {
600 pr_err("%s insert failed, size %llu step %d\n",
601 mode->name, size, n);
602 goto out;
603 }
604
605 if (replace) {
606 drm_mm_replace_node(&tmp, &nodes[n]);
607 if (drm_mm_node_allocated(&tmp)) {
608 pr_err("replaced old-node still allocated! step %d\n",
609 n);
610 goto out;
611 }
612
613 if (!assert_node(&nodes[n], &mm, size, 0, n)) {
614 pr_err("replaced node did not inherit parameters, size %llu step %d\n",
615 size, n);
616 goto out;
617 }
618
619 if (tmp.start != nodes[n].start) {
620 pr_err("replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
621 tmp.start, size,
622 nodes[n].start, nodes[n].size);
623 goto out;
624 }
625 }
626 }
627
628 /* After random insertion the nodes should be in order */
629 if (!assert_continuous(&mm, size))
630 goto out;
631
632 /* Repeated use should then fail */
633 if (!expect_insert_fail(&mm, size))
634 goto out;
635
636 /* Remove one and reinsert, as the only hole it should refill itself */
637 for (n = 0; n < count; n++) {
638 u64 addr = nodes[n].start;
639
640 drm_mm_remove_node(&nodes[n]);
641 if (!expect_insert(&mm, &nodes[n], size, 0, n, mode)) {
642 pr_err("%s reinsert failed, size %llu step %d\n",
643 mode->name, size, n);
644 goto out;
645 }
646
647 if (nodes[n].start != addr) {
648 pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
649 mode->name, n, addr, nodes[n].start);
650 goto out;
651 }
652
653 if (!assert_continuous(&mm, size))
654 goto out;
655 }
656
657 /* Remove several, reinsert, check full */
658 for_each_prime_number(n, min(max_prime, count)) {
659 for (m = 0; m < n; m++) {
660 node = &nodes[order[(o + m) % count]];
661 drm_mm_remove_node(node);
662 }
663
664 for (m = 0; m < n; m++) {
665 node = &nodes[order[(o + m) % count]];
666 if (!expect_insert(&mm, node, size, 0, n, mode)) {
667 pr_err("%s multiple reinsert failed, size %llu step %d\n",
668 mode->name, size, n);
669 goto out;
670 }
671 }
672
673 o += n;
674
675 if (!assert_continuous(&mm, size))
676 goto out;
677
678 if (!expect_insert_fail(&mm, size))
679 goto out;
680 }
681
682 drm_mm_for_each_node_safe(node, next, &mm)
683 drm_mm_remove_node(node);
684 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
685
686 cond_resched();
687 }
688
689 ret = 0;
690out:
691 drm_mm_for_each_node_safe(node, next, &mm)
692 drm_mm_remove_node(node);
693 drm_mm_takedown(&mm);
694 kfree(order);
695err_nodes:
696 vfree(nodes);
697err:
698 return ret;
699}
700
701static int igt_insert(void *ignored)
702{
703 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
704 unsigned int n;
705 int ret;
706
707 for_each_prime_number_from(n, 1, 54) {
708 u64 size = BIT_ULL(n);
709
710 ret = __igt_insert(count, size - 1, false);
711 if (ret)
712 return ret;
713
714 ret = __igt_insert(count, size, false);
715 if (ret)
716 return ret;
717
718 ret = __igt_insert(count, size + 1, false);
719 if (ret)
720 return ret;
721
722 cond_resched();
723 }
724
725 return 0;
726}
727
728static int igt_replace(void *ignored)
729{
730 const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
731 unsigned int n;
732 int ret;
733
734 /* Reuse igt_insert to exercise replacement by inserting a dummy node,
735 * then replacing it with the intended node. We want to check that
736 * the tree is intact and all the information we need is carried
737 * across to the target node.
738 */
739
740 for_each_prime_number_from(n, 1, 54) {
741 u64 size = BIT_ULL(n);
742
743 ret = __igt_insert(count, size - 1, true);
744 if (ret)
745 return ret;
746
747 ret = __igt_insert(count, size, true);
748 if (ret)
749 return ret;
750
751 ret = __igt_insert(count, size + 1, true);
752 if (ret)
753 return ret;
754
755 cond_resched();
756 }
757
758 return 0;
759}
760
761static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node,
762 u64 size, u64 alignment, unsigned long color,
763 u64 range_start, u64 range_end,
764 const struct insert_mode *mode)
765{
766 int err;
767
768 err = drm_mm_insert_node_in_range(mm, node,
769 size, alignment, color,
770 range_start, range_end,
771 mode->mode);
772 if (err) {
773 pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
774 size, alignment, color, mode->name,
775 range_start, range_end, err);
776 return false;
777 }
778
779 if (!assert_node(node, mm, size, alignment, color)) {
780 drm_mm_remove_node(node);
781 return false;
782 }
783
784 return true;
785}
786
787static bool expect_insert_in_range_fail(struct drm_mm *mm,
788 u64 size,
789 u64 range_start,
790 u64 range_end)
791{
792 struct drm_mm_node tmp = {};
793 int err;
794
795 err = drm_mm_insert_node_in_range(mm, &tmp,
796 size, 0, 0,
797 range_start, range_end,
798 0);
799 if (likely(err == -ENOSPC))
800 return true;
801
802 if (!err) {
803 pr_err("impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
804 tmp.start, tmp.size, range_start, range_end);
805 drm_mm_remove_node(&tmp);
806 } else {
807 pr_err("impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
808 err, -ENOSPC, size, range_start, range_end);
809 }
810
811 return false;
812}
813
814static bool assert_contiguous_in_range(struct drm_mm *mm,
815 u64 size,
816 u64 start,
817 u64 end)
818{
819 struct drm_mm_node *node;
820 unsigned int n;
821
822 if (!expect_insert_in_range_fail(mm, size, start, end))
823 return false;
824
825 n = div64_u64(start + size - 1, size);
826 drm_mm_for_each_node(node, mm) {
827 if (node->start < start || node->start + node->size > end) {
828 pr_err("node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
829 n, node->start, node->start + node->size, start, end);
830 return false;
831 }
832
833 if (node->start != n * size) {
834 pr_err("node %d out of order, expected start %llx, found %llx\n",
835 n, n * size, node->start);
836 return false;
837 }
838
839 if (node->size != size) {
840 pr_err("node %d has wrong size, expected size %llx, found %llx\n",
841 n, size, node->size);
842 return false;
843 }
844
845 if (drm_mm_hole_follows(node) &&
846 drm_mm_hole_node_end(node) < end) {
847 pr_err("node %d is followed by a hole!\n", n);
848 return false;
849 }
850
851 n++;
852 }
853
854 if (start > 0) {
855 node = __drm_mm_interval_first(mm, 0, start - 1);
856 if (node->allocated) {
857 pr_err("node before start: node=%llx+%llu, start=%llx\n",
858 node->start, node->size, start);
859 return false;
860 }
861 }
862
863 if (end < U64_MAX) {
864 node = __drm_mm_interval_first(mm, end, U64_MAX);
865 if (node->allocated) {
866 pr_err("node after end: node=%llx+%llu, end=%llx\n",
867 node->start, node->size, end);
868 return false;
869 }
870 }
871
872 return true;
873}
874
875static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end)
876{
877 const struct insert_mode *mode;
878 struct drm_mm mm;
879 struct drm_mm_node *nodes, *node, *next;
880 unsigned int n, start_n, end_n;
881 int ret;
882
883 DRM_MM_BUG_ON(!count);
884 DRM_MM_BUG_ON(!size);
885 DRM_MM_BUG_ON(end <= start);
886
887 /* Very similar to __igt_insert(), but now instead of populating the
888 * full range of the drm_mm, we try to fill a small portion of it.
889 */
890
891 ret = -ENOMEM;
892 nodes = vzalloc(count * sizeof(*nodes));
893 if (!nodes)
894 goto err;
895
896 ret = -EINVAL;
897 drm_mm_init(&mm, 0, count * size);
898
899 start_n = div64_u64(start + size - 1, size);
900 end_n = div64_u64(end - size, size);
901
902 for (mode = insert_modes; mode->name; mode++) {
903 for (n = start_n; n <= end_n; n++) {
904 if (!expect_insert_in_range(&mm, &nodes[n],
905 size, size, n,
906 start, end, mode)) {
907 pr_err("%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
908 mode->name, size, n,
909 start_n, end_n,
910 start, end);
911 goto out;
912 }
913 }
914
915 if (!assert_contiguous_in_range(&mm, size, start, end)) {
916 pr_err("%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
917 mode->name, start, end, size);
918 goto out;
919 }
920
921 /* Remove one and reinsert, it should refill itself */
922 for (n = start_n; n <= end_n; n++) {
923 u64 addr = nodes[n].start;
924
925 drm_mm_remove_node(&nodes[n]);
926 if (!expect_insert_in_range(&mm, &nodes[n],
927 size, size, n,
928 start, end, mode)) {
929 pr_err("%s reinsert failed, step %d\n", mode->name, n);
930 goto out;
931 }
932
933 if (nodes[n].start != addr) {
934 pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n",
935 mode->name, n, addr, nodes[n].start);
936 goto out;
937 }
938 }
939
940 if (!assert_contiguous_in_range(&mm, size, start, end)) {
941 pr_err("%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
942 mode->name, start, end, size);
943 goto out;
944 }
945
946 drm_mm_for_each_node_safe(node, next, &mm)
947 drm_mm_remove_node(node);
948 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
949
950 cond_resched();
951 }
952
953 ret = 0;
954out:
955 drm_mm_for_each_node_safe(node, next, &mm)
956 drm_mm_remove_node(node);
957 drm_mm_takedown(&mm);
958 vfree(nodes);
959err:
960 return ret;
961}
962
963static int insert_outside_range(void)
964{
965 struct drm_mm mm;
966 const unsigned int start = 1024;
967 const unsigned int end = 2048;
968 const unsigned int size = end - start;
969
970 drm_mm_init(&mm, start, size);
971
972 if (!expect_insert_in_range_fail(&mm, 1, 0, start))
973 return -EINVAL;
974
975 if (!expect_insert_in_range_fail(&mm, size,
976 start - size/2, start + (size+1)/2))
977 return -EINVAL;
978
979 if (!expect_insert_in_range_fail(&mm, size,
980 end - (size+1)/2, end + size/2))
981 return -EINVAL;
982
983 if (!expect_insert_in_range_fail(&mm, 1, end, end + size))
984 return -EINVAL;
985
986 drm_mm_takedown(&mm);
987 return 0;
988}
989
990static int igt_insert_range(void *ignored)
991{
992 const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
993 unsigned int n;
994 int ret;
995
996 /* Check that requests outside the bounds of drm_mm are rejected. */
997 ret = insert_outside_range();
998 if (ret)
999 return ret;
1000
1001 for_each_prime_number_from(n, 1, 50) {
1002 const u64 size = BIT_ULL(n);
1003 const u64 max = count * size;
1004
1005 ret = __igt_insert_range(count, size, 0, max);
1006 if (ret)
1007 return ret;
1008
1009 ret = __igt_insert_range(count, size, 1, max);
1010 if (ret)
1011 return ret;
1012
1013 ret = __igt_insert_range(count, size, 0, max - 1);
1014 if (ret)
1015 return ret;
1016
1017 ret = __igt_insert_range(count, size, 0, max/2);
1018 if (ret)
1019 return ret;
1020
1021 ret = __igt_insert_range(count, size, max/2, max);
1022 if (ret)
1023 return ret;
1024
1025 ret = __igt_insert_range(count, size, max/4+1, 3*max/4-1);
1026 if (ret)
1027 return ret;
1028
1029 cond_resched();
1030 }
1031
1032 return 0;
1033}
1034
1035static int igt_align(void *ignored)
1036{
1037 const struct insert_mode *mode;
1038 const unsigned int max_count = min(8192u, max_prime);
1039 struct drm_mm mm;
1040 struct drm_mm_node *nodes, *node, *next;
1041 unsigned int prime;
1042 int ret = -EINVAL;
1043
1044 /* For each of the possible insertion modes, we pick a few
1045 * arbitrary alignments and check that the inserted node
1046 * meets our requirements.
1047 */
1048
1049 nodes = vzalloc(max_count * sizeof(*nodes));
1050 if (!nodes)
1051 goto err;
1052
1053 drm_mm_init(&mm, 1, U64_MAX - 2);
1054
1055 for (mode = insert_modes; mode->name; mode++) {
1056 unsigned int i = 0;
1057
1058 for_each_prime_number_from(prime, 1, max_count) {
1059 u64 size = next_prime_number(prime);
1060
1061 if (!expect_insert(&mm, &nodes[i],
1062 size, prime, i,
1063 mode)) {
1064 pr_err("%s insert failed with alignment=%d",
1065 mode->name, prime);
1066 goto out;
1067 }
1068
1069 i++;
1070 }
1071
1072 drm_mm_for_each_node_safe(node, next, &mm)
1073 drm_mm_remove_node(node);
1074 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1075
1076 cond_resched();
1077 }
1078
1079 ret = 0;
1080out:
1081 drm_mm_for_each_node_safe(node, next, &mm)
1082 drm_mm_remove_node(node);
1083 drm_mm_takedown(&mm);
1084 vfree(nodes);
1085err:
1086 return ret;
1087}
1088
1089static int igt_align_pot(int max)
1090{
1091 struct drm_mm mm;
1092 struct drm_mm_node *node, *next;
1093 int bit;
1094 int ret = -EINVAL;
1095
1096 /* Check that we can align to the full u64 address space */
1097
1098 drm_mm_init(&mm, 1, U64_MAX - 2);
1099
1100 for (bit = max - 1; bit; bit--) {
1101 u64 align, size;
1102
1103 node = kzalloc(sizeof(*node), GFP_KERNEL);
1104 if (!node) {
1105 ret = -ENOMEM;
1106 goto out;
1107 }
1108
1109 align = BIT_ULL(bit);
1110 size = BIT_ULL(bit-1) + 1;
1111 if (!expect_insert(&mm, node,
1112 size, align, bit,
1113 &insert_modes[0])) {
1114 pr_err("insert failed with alignment=%llx [%d]",
1115 align, bit);
1116 goto out;
1117 }
1118
1119 cond_resched();
1120 }
1121
1122 ret = 0;
1123out:
1124 drm_mm_for_each_node_safe(node, next, &mm) {
1125 drm_mm_remove_node(node);
1126 kfree(node);
1127 }
1128 drm_mm_takedown(&mm);
1129 return ret;
1130}
1131
1132static int igt_align32(void *ignored)
1133{
1134 return igt_align_pot(32);
1135}
1136
1137static int igt_align64(void *ignored)
1138{
1139 return igt_align_pot(64);
1140}
1141
1142static void show_scan(const struct drm_mm_scan *scan)
1143{
1144 pr_info("scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
1145 scan->hit_start, scan->hit_end,
1146 scan->size, scan->alignment, scan->color);
1147}
1148
1149static void show_holes(const struct drm_mm *mm, int count)
1150{
1151 u64 hole_start, hole_end;
1152 struct drm_mm_node *hole;
1153
1154 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
1155 struct drm_mm_node *next = list_next_entry(hole, node_list);
1156 const char *node1 = NULL, *node2 = NULL;
1157
1158 if (hole->allocated)
1159 node1 = kasprintf(GFP_KERNEL,
1160 "[%llx + %lld, color=%ld], ",
1161 hole->start, hole->size, hole->color);
1162
1163 if (next->allocated)
1164 node2 = kasprintf(GFP_KERNEL,
1165 ", [%llx + %lld, color=%ld]",
1166 next->start, next->size, next->color);
1167
1168 pr_info("%sHole [%llx - %llx, size %lld]%s\n",
1169 node1,
1170 hole_start, hole_end, hole_end - hole_start,
1171 node2);
1172
1173 kfree(node2);
1174 kfree(node1);
1175
1176 if (!--count)
1177 break;
1178 }
1179}
1180
1181struct evict_node {
1182 struct drm_mm_node node;
1183 struct list_head link;
1184};
1185
1186static bool evict_nodes(struct drm_mm_scan *scan,
1187 struct evict_node *nodes,
1188 unsigned int *order,
1189 unsigned int count,
1190 bool use_color,
1191 struct list_head *evict_list)
1192{
1193 struct evict_node *e, *en;
1194 unsigned int i;
1195
1196 for (i = 0; i < count; i++) {
1197 e = &nodes[order ? order[i] : i];
1198 list_add(&e->link, evict_list);
1199 if (drm_mm_scan_add_block(scan, &e->node))
1200 break;
1201 }
1202 list_for_each_entry_safe(e, en, evict_list, link) {
1203 if (!drm_mm_scan_remove_block(scan, &e->node))
1204 list_del(&e->link);
1205 }
1206 if (list_empty(evict_list)) {
1207 pr_err("Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
1208 scan->size, count, scan->alignment, scan->color);
1209 return false;
1210 }
1211
1212 list_for_each_entry(e, evict_list, link)
1213 drm_mm_remove_node(&e->node);
1214
1215 if (use_color) {
1216 struct drm_mm_node *node;
1217
1218 while ((node = drm_mm_scan_color_evict(scan))) {
1219 e = container_of(node, typeof(*e), node);
1220 drm_mm_remove_node(&e->node);
1221 list_add(&e->link, evict_list);
1222 }
1223 } else {
1224 if (drm_mm_scan_color_evict(scan)) {
1225 pr_err("drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
1226 return false;
1227 }
1228 }
1229
1230 return true;
1231}
1232
1233static bool evict_nothing(struct drm_mm *mm,
1234 unsigned int total_size,
1235 struct evict_node *nodes)
1236{
1237 struct drm_mm_scan scan;
1238 LIST_HEAD(evict_list);
1239 struct evict_node *e;
1240 struct drm_mm_node *node;
1241 unsigned int n;
1242
1243 drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
1244 for (n = 0; n < total_size; n++) {
1245 e = &nodes[n];
1246 list_add(&e->link, &evict_list);
1247 drm_mm_scan_add_block(&scan, &e->node);
1248 }
1249 list_for_each_entry(e, &evict_list, link)
1250 drm_mm_scan_remove_block(&scan, &e->node);
1251
1252 for (n = 0; n < total_size; n++) {
1253 e = &nodes[n];
1254
1255 if (!drm_mm_node_allocated(&e->node)) {
1256 pr_err("node[%d] no longer allocated!\n", n);
1257 return false;
1258 }
1259
1260 e->link.next = NULL;
1261 }
1262
1263 drm_mm_for_each_node(node, mm) {
1264 e = container_of(node, typeof(*e), node);
1265 e->link.next = &e->link;
1266 }
1267
1268 for (n = 0; n < total_size; n++) {
1269 e = &nodes[n];
1270
1271 if (!e->link.next) {
1272 pr_err("node[%d] no longer connected!\n", n);
1273 return false;
1274 }
1275 }
1276
1277 return assert_continuous(mm, nodes[0].node.size);
1278}
1279
1280static bool evict_everything(struct drm_mm *mm,
1281 unsigned int total_size,
1282 struct evict_node *nodes)
1283{
1284 struct drm_mm_scan scan;
1285 LIST_HEAD(evict_list);
1286 struct evict_node *e;
1287 unsigned int n;
1288 int err;
1289
1290 drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
1291 for (n = 0; n < total_size; n++) {
1292 e = &nodes[n];
1293 list_add(&e->link, &evict_list);
1294 if (drm_mm_scan_add_block(&scan, &e->node))
1295 break;
1296 }
1297
1298 err = 0;
1299 list_for_each_entry(e, &evict_list, link) {
1300 if (!drm_mm_scan_remove_block(&scan, &e->node)) {
1301 if (!err) {
1302 pr_err("Node %lld not marked for eviction!\n",
1303 e->node.start);
1304 err = -EINVAL;
1305 }
1306 }
1307 }
1308 if (err)
1309 return false;
1310
1311 list_for_each_entry(e, &evict_list, link)
1312 drm_mm_remove_node(&e->node);
1313
1314 if (!assert_one_hole(mm, 0, total_size))
1315 return false;
1316
1317 list_for_each_entry(e, &evict_list, link) {
1318 err = drm_mm_reserve_node(mm, &e->node);
1319 if (err) {
1320 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1321 e->node.start);
1322 return false;
1323 }
1324 }
1325
1326 return assert_continuous(mm, nodes[0].node.size);
1327}
1328
1329static int evict_something(struct drm_mm *mm,
1330 u64 range_start, u64 range_end,
1331 struct evict_node *nodes,
1332 unsigned int *order,
1333 unsigned int count,
1334 unsigned int size,
1335 unsigned int alignment,
1336 const struct insert_mode *mode)
1337{
1338 struct drm_mm_scan scan;
1339 LIST_HEAD(evict_list);
1340 struct evict_node *e;
1341 struct drm_mm_node tmp;
1342 int err;
1343
1344 drm_mm_scan_init_with_range(&scan, mm,
1345 size, alignment, 0,
1346 range_start, range_end,
1347 mode->mode);
1348 if (!evict_nodes(&scan,
1349 nodes, order, count, false,
1350 &evict_list))
1351 return -EINVAL;
1352
1353 memset(&tmp, 0, sizeof(tmp));
1354 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
1355 DRM_MM_INSERT_EVICT);
1356 if (err) {
1357 pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
1358 size, alignment);
1359 show_scan(&scan);
1360 show_holes(mm, 3);
1361 return err;
1362 }
1363
1364 if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
1365 pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
1366 tmp.start, tmp.size, range_start, range_end);
1367 err = -EINVAL;
1368 }
1369
1370 if (!assert_node(&tmp, mm, size, alignment, 0) ||
1371 drm_mm_hole_follows(&tmp)) {
1372 pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
1373 tmp.size, size,
1374 alignment, misalignment(&tmp, alignment),
1375 tmp.start, drm_mm_hole_follows(&tmp));
1376 err = -EINVAL;
1377 }
1378
1379 drm_mm_remove_node(&tmp);
1380 if (err)
1381 return err;
1382
1383 list_for_each_entry(e, &evict_list, link) {
1384 err = drm_mm_reserve_node(mm, &e->node);
1385 if (err) {
1386 pr_err("Failed to reinsert node after eviction: start=%llx\n",
1387 e->node.start);
1388 return err;
1389 }
1390 }
1391
1392 if (!assert_continuous(mm, nodes[0].node.size)) {
1393 pr_err("range is no longer continuous\n");
1394 return -EINVAL;
1395 }
1396
1397 return 0;
1398}
1399
1400static int igt_evict(void *ignored)
1401{
1402 DRM_RND_STATE(prng, random_seed);
1403 const unsigned int size = 8192;
1404 const struct insert_mode *mode;
1405 struct drm_mm mm;
1406 struct evict_node *nodes;
1407 struct drm_mm_node *node, *next;
1408 unsigned int *order, n;
1409 int ret, err;
1410
1411 /* Here we populate a full drm_mm and then try and insert a new node
1412 * by evicting other nodes in a random order. The drm_mm_scan should
1413 * pick the first matching hole it finds from the random list. We
1414 * repeat that for different allocation strategies, alignments and
1415 * sizes to try and stress the hole finder.
1416 */
1417
1418 ret = -ENOMEM;
1419 nodes = vzalloc(size * sizeof(*nodes));
1420 if (!nodes)
1421 goto err;
1422
1423 order = drm_random_order(size, &prng);
1424 if (!order)
1425 goto err_nodes;
1426
1427 ret = -EINVAL;
1428 drm_mm_init(&mm, 0, size);
1429 for (n = 0; n < size; n++) {
1430 err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
1431 if (err) {
1432 pr_err("insert failed, step %d\n", n);
1433 ret = err;
1434 goto out;
1435 }
1436 }
1437
1438 /* First check that using the scanner doesn't break the mm */
1439 if (!evict_nothing(&mm, size, nodes)) {
1440 pr_err("evict_nothing() failed\n");
1441 goto out;
1442 }
1443 if (!evict_everything(&mm, size, nodes)) {
1444 pr_err("evict_everything() failed\n");
1445 goto out;
1446 }
1447
1448 for (mode = evict_modes; mode->name; mode++) {
1449 for (n = 1; n <= size; n <<= 1) {
1450 drm_random_reorder(order, size, &prng);
1451 err = evict_something(&mm, 0, U64_MAX,
1452 nodes, order, size,
1453 n, 1,
1454 mode);
1455 if (err) {
1456 pr_err("%s evict_something(size=%u) failed\n",
1457 mode->name, n);
1458 ret = err;
1459 goto out;
1460 }
1461 }
1462
1463 for (n = 1; n < size; n <<= 1) {
1464 drm_random_reorder(order, size, &prng);
1465 err = evict_something(&mm, 0, U64_MAX,
1466 nodes, order, size,
1467 size/2, n,
1468 mode);
1469 if (err) {
1470 pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1471 mode->name, size/2, n);
1472 ret = err;
1473 goto out;
1474 }
1475 }
1476
1477 for_each_prime_number_from(n, 1, min(size, max_prime)) {
1478 unsigned int nsize = (size - n + 1) / 2;
1479
1480 DRM_MM_BUG_ON(!nsize);
1481
1482 drm_random_reorder(order, size, &prng);
1483 err = evict_something(&mm, 0, U64_MAX,
1484 nodes, order, size,
1485 nsize, n,
1486 mode);
1487 if (err) {
1488 pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
1489 mode->name, nsize, n);
1490 ret = err;
1491 goto out;
1492 }
1493 }
1494
1495 cond_resched();
1496 }
1497
1498 ret = 0;
1499out:
1500 drm_mm_for_each_node_safe(node, next, &mm)
1501 drm_mm_remove_node(node);
1502 drm_mm_takedown(&mm);
1503 kfree(order);
1504err_nodes:
1505 vfree(nodes);
1506err:
1507 return ret;
1508}
1509
1510static int igt_evict_range(void *ignored)
1511{
1512 DRM_RND_STATE(prng, random_seed);
1513 const unsigned int size = 8192;
1514 const unsigned int range_size = size / 2;
1515 const unsigned int range_start = size / 4;
1516 const unsigned int range_end = range_start + range_size;
1517 const struct insert_mode *mode;
1518 struct drm_mm mm;
1519 struct evict_node *nodes;
1520 struct drm_mm_node *node, *next;
1521 unsigned int *order, n;
1522 int ret, err;
1523
1524 /* Like igt_evict() but now we are limiting the search to a
1525 * small portion of the full drm_mm.
1526 */
1527
1528 ret = -ENOMEM;
1529 nodes = vzalloc(size * sizeof(*nodes));
1530 if (!nodes)
1531 goto err;
1532
1533 order = drm_random_order(size, &prng);
1534 if (!order)
1535 goto err_nodes;
1536
1537 ret = -EINVAL;
1538 drm_mm_init(&mm, 0, size);
1539 for (n = 0; n < size; n++) {
1540 err = drm_mm_insert_node(&mm, &nodes[n].node, 1);
1541 if (err) {
1542 pr_err("insert failed, step %d\n", n);
1543 ret = err;
1544 goto out;
1545 }
1546 }
1547
1548 for (mode = evict_modes; mode->name; mode++) {
1549 for (n = 1; n <= range_size; n <<= 1) {
1550 drm_random_reorder(order, size, &prng);
1551 err = evict_something(&mm, range_start, range_end,
1552 nodes, order, size,
1553 n, 1,
1554 mode);
1555 if (err) {
1556 pr_err("%s evict_something(size=%u) failed with range [%u, %u]\n",
1557 mode->name, n, range_start, range_end);
1558 goto out;
1559 }
1560 }
1561
1562 for (n = 1; n <= range_size; n <<= 1) {
1563 drm_random_reorder(order, size, &prng);
1564 err = evict_something(&mm, range_start, range_end,
1565 nodes, order, size,
1566 range_size/2, n,
1567 mode);
1568 if (err) {
1569 pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1570 mode->name, range_size/2, n, range_start, range_end);
1571 goto out;
1572 }
1573 }
1574
1575 for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
1576 unsigned int nsize = (range_size - n + 1) / 2;
1577
1578 DRM_MM_BUG_ON(!nsize);
1579
1580 drm_random_reorder(order, size, &prng);
1581 err = evict_something(&mm, range_start, range_end,
1582 nodes, order, size,
1583 nsize, n,
1584 mode);
1585 if (err) {
1586 pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
1587 mode->name, nsize, n, range_start, range_end);
1588 goto out;
1589 }
1590 }
1591
1592 cond_resched();
1593 }
1594
1595 ret = 0;
1596out:
1597 drm_mm_for_each_node_safe(node, next, &mm)
1598 drm_mm_remove_node(node);
1599 drm_mm_takedown(&mm);
1600 kfree(order);
1601err_nodes:
1602 vfree(nodes);
1603err:
1604 return ret;
1605}
1606
1607static unsigned int node_index(const struct drm_mm_node *node)
1608{
1609 return div64_u64(node->start, node->size);
1610}
1611
1612static int igt_topdown(void *ignored)
1613{
1614 const struct insert_mode *topdown = &insert_modes[TOPDOWN];
1615 DRM_RND_STATE(prng, random_seed);
1616 const unsigned int count = 8192;
1617 unsigned int size;
1618 unsigned long *bitmap = NULL;
1619 struct drm_mm mm;
1620 struct drm_mm_node *nodes, *node, *next;
1621 unsigned int *order, n, m, o = 0;
1622 int ret;
1623
1624 /* When allocating top-down, we expect to be returned a node
1625 * from a suitable hole at the top of the drm_mm. We check that
1626 * the returned node does match the highest available slot.
1627 */
1628
1629 ret = -ENOMEM;
1630 nodes = vzalloc(count * sizeof(*nodes));
1631 if (!nodes)
1632 goto err;
1633
1634 bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long),
1635 GFP_KERNEL);
1636 if (!bitmap)
1637 goto err_nodes;
1638
1639 order = drm_random_order(count, &prng);
1640 if (!order)
1641 goto err_bitmap;
1642
1643 ret = -EINVAL;
1644 for (size = 1; size <= 64; size <<= 1) {
1645 drm_mm_init(&mm, 0, size*count);
1646 for (n = 0; n < count; n++) {
1647 if (!expect_insert(&mm, &nodes[n],
1648 size, 0, n,
1649 topdown)) {
1650 pr_err("insert failed, size %u step %d\n", size, n);
1651 goto out;
1652 }
1653
1654 if (drm_mm_hole_follows(&nodes[n])) {
1655 pr_err("hole after topdown insert %d, start=%llx\n, size=%u",
1656 n, nodes[n].start, size);
1657 goto out;
1658 }
1659
1660 if (!assert_one_hole(&mm, 0, size*(count - n - 1)))
1661 goto out;
1662 }
1663
1664 if (!assert_continuous(&mm, size))
1665 goto out;
1666
1667 drm_random_reorder(order, count, &prng);
1668 for_each_prime_number_from(n, 1, min(count, max_prime)) {
1669 for (m = 0; m < n; m++) {
1670 node = &nodes[order[(o + m) % count]];
1671 drm_mm_remove_node(node);
1672 __set_bit(node_index(node), bitmap);
1673 }
1674
1675 for (m = 0; m < n; m++) {
1676 unsigned int last;
1677
1678 node = &nodes[order[(o + m) % count]];
1679 if (!expect_insert(&mm, node,
1680 size, 0, 0,
1681 topdown)) {
1682 pr_err("insert failed, step %d/%d\n", m, n);
1683 goto out;
1684 }
1685
1686 if (drm_mm_hole_follows(node)) {
1687 pr_err("hole after topdown insert %d/%d, start=%llx\n",
1688 m, n, node->start);
1689 goto out;
1690 }
1691
1692 last = find_last_bit(bitmap, count);
1693 if (node_index(node) != last) {
1694 pr_err("node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
1695 m, n, size, last, node_index(node));
1696 goto out;
1697 }
1698
1699 __clear_bit(last, bitmap);
1700 }
1701
1702 DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1703
1704 o += n;
1705 }
1706
1707 drm_mm_for_each_node_safe(node, next, &mm)
1708 drm_mm_remove_node(node);
1709 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1710 cond_resched();
1711 }
1712
1713 ret = 0;
1714out:
1715 drm_mm_for_each_node_safe(node, next, &mm)
1716 drm_mm_remove_node(node);
1717 drm_mm_takedown(&mm);
1718 kfree(order);
1719err_bitmap:
1720 kfree(bitmap);
1721err_nodes:
1722 vfree(nodes);
1723err:
1724 return ret;
1725}
1726
1727static int igt_bottomup(void *ignored)
1728{
1729 const struct insert_mode *bottomup = &insert_modes[BOTTOMUP];
1730 DRM_RND_STATE(prng, random_seed);
1731 const unsigned int count = 8192;
1732 unsigned int size;
1733 unsigned long *bitmap;
1734 struct drm_mm mm;
1735 struct drm_mm_node *nodes, *node, *next;
1736 unsigned int *order, n, m, o = 0;
1737 int ret;
1738
1739 /* Like igt_topdown, but instead of searching for the last hole,
1740 * we search for the first.
1741 */
1742
1743 ret = -ENOMEM;
1744 nodes = vzalloc(count * sizeof(*nodes));
1745 if (!nodes)
1746 goto err;
1747
1748 bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long),
1749 GFP_KERNEL);
1750 if (!bitmap)
1751 goto err_nodes;
1752
1753 order = drm_random_order(count, &prng);
1754 if (!order)
1755 goto err_bitmap;
1756
1757 ret = -EINVAL;
1758 for (size = 1; size <= 64; size <<= 1) {
1759 drm_mm_init(&mm, 0, size*count);
1760 for (n = 0; n < count; n++) {
1761 if (!expect_insert(&mm, &nodes[n],
1762 size, 0, n,
1763 bottomup)) {
1764 pr_err("bottomup insert failed, size %u step %d\n", size, n);
1765 goto out;
1766 }
1767
1768 if (!assert_one_hole(&mm, size*(n + 1), size*count))
1769 goto out;
1770 }
1771
1772 if (!assert_continuous(&mm, size))
1773 goto out;
1774
1775 drm_random_reorder(order, count, &prng);
1776 for_each_prime_number_from(n, 1, min(count, max_prime)) {
1777 for (m = 0; m < n; m++) {
1778 node = &nodes[order[(o + m) % count]];
1779 drm_mm_remove_node(node);
1780 __set_bit(node_index(node), bitmap);
1781 }
1782
1783 for (m = 0; m < n; m++) {
1784 unsigned int first;
1785
1786 node = &nodes[order[(o + m) % count]];
1787 if (!expect_insert(&mm, node,
1788 size, 0, 0,
1789 bottomup)) {
1790 pr_err("insert failed, step %d/%d\n", m, n);
1791 goto out;
1792 }
1793
1794 first = find_first_bit(bitmap, count);
1795 if (node_index(node) != first) {
1796 pr_err("node %d/%d not inserted into bottom hole, expected %d, found %d\n",
1797 m, n, first, node_index(node));
1798 goto out;
1799 }
1800 __clear_bit(first, bitmap);
1801 }
1802
1803 DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
1804
1805 o += n;
1806 }
1807
1808 drm_mm_for_each_node_safe(node, next, &mm)
1809 drm_mm_remove_node(node);
1810 DRM_MM_BUG_ON(!drm_mm_clean(&mm));
1811 cond_resched();
1812 }
1813
1814 ret = 0;
1815out:
1816 drm_mm_for_each_node_safe(node, next, &mm)
1817 drm_mm_remove_node(node);
1818 drm_mm_takedown(&mm);
1819 kfree(order);
1820err_bitmap:
1821 kfree(bitmap);
1822err_nodes:
1823 vfree(nodes);
1824err:
1825 return ret;
1826}
1827
1828static void separate_adjacent_colors(const struct drm_mm_node *node,
1829 unsigned long color,
1830 u64 *start,
1831 u64 *end)
1832{
1833 if (node->allocated && node->color != color)
1834 ++*start;
1835
1836 node = list_next_entry(node, node_list);
1837 if (node->allocated && node->color != color)
1838 --*end;
1839}
1840
1841static bool colors_abutt(const struct drm_mm_node *node)
1842{
1843 if (!drm_mm_hole_follows(node) &&
1844 list_next_entry(node, node_list)->allocated) {
1845 pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
1846 node->color, node->start, node->size,
1847 list_next_entry(node, node_list)->color,
1848 list_next_entry(node, node_list)->start,
1849 list_next_entry(node, node_list)->size);
1850 return true;
1851 }
1852
1853 return false;
1854}
1855
1856static int igt_color(void *ignored)
1857{
1858 const unsigned int count = min(4096u, max_iterations);
1859 const struct insert_mode *mode;
1860 struct drm_mm mm;
1861 struct drm_mm_node *node, *nn;
1862 unsigned int n;
1863 int ret = -EINVAL, err;
1864
1865 /* Color adjustment complicates everything. First we just check
1866 * that when we insert a node we apply any color_adjustment callback.
1867 * The callback we use should ensure that there is a gap between
1868 * any two nodes, and so after each insertion we check that those
1869 * holes are inserted and that they are preserved.
1870 */
1871
1872 drm_mm_init(&mm, 0, U64_MAX);
1873
1874 for (n = 1; n <= count; n++) {
1875 node = kzalloc(sizeof(*node), GFP_KERNEL);
1876 if (!node) {
1877 ret = -ENOMEM;
1878 goto out;
1879 }
1880
1881 if (!expect_insert(&mm, node,
1882 n, 0, n,
1883 &insert_modes[0])) {
1884 pr_err("insert failed, step %d\n", n);
1885 kfree(node);
1886 goto out;
1887 }
1888 }
1889
1890 drm_mm_for_each_node_safe(node, nn, &mm) {
1891 if (node->color != node->size) {
1892 pr_err("invalid color stored: expected %lld, found %ld\n",
1893 node->size, node->color);
1894
1895 goto out;
1896 }
1897
1898 drm_mm_remove_node(node);
1899 kfree(node);
1900 }
1901
1902 /* Now, let's start experimenting with applying a color callback */
1903 mm.color_adjust = separate_adjacent_colors;
1904 for (mode = insert_modes; mode->name; mode++) {
1905 u64 last;
1906
1907 node = kzalloc(sizeof(*node), GFP_KERNEL);
1908 if (!node) {
1909 ret = -ENOMEM;
1910 goto out;
1911 }
1912
1913 node->size = 1 + 2*count;
1914 node->color = node->size;
1915
1916 err = drm_mm_reserve_node(&mm, node);
1917 if (err) {
1918 pr_err("initial reserve failed!\n");
1919 ret = err;
1920 goto out;
1921 }
1922
1923 last = node->start + node->size;
1924
1925 for (n = 1; n <= count; n++) {
1926 int rem;
1927
1928 node = kzalloc(sizeof(*node), GFP_KERNEL);
1929 if (!node) {
1930 ret = -ENOMEM;
1931 goto out;
1932 }
1933
1934 node->start = last;
1935 node->size = n + count;
1936 node->color = node->size;
1937
1938 err = drm_mm_reserve_node(&mm, node);
1939 if (err != -ENOSPC) {
1940 pr_err("reserve %d did not report color overlap! err=%d\n",
1941 n, err);
1942 goto out;
1943 }
1944
1945 node->start += n + 1;
1946 rem = misalignment(node, n + count);
1947 node->start += n + count - rem;
1948
1949 err = drm_mm_reserve_node(&mm, node);
1950 if (err) {
1951 pr_err("reserve %d failed, err=%d\n", n, err);
1952 ret = err;
1953 goto out;
1954 }
1955
1956 last = node->start + node->size;
1957 }
1958
1959 for (n = 1; n <= count; n++) {
1960 node = kzalloc(sizeof(*node), GFP_KERNEL);
1961 if (!node) {
1962 ret = -ENOMEM;
1963 goto out;
1964 }
1965
1966 if (!expect_insert(&mm, node,
1967 n, n, n,
1968 mode)) {
1969 pr_err("%s insert failed, step %d\n",
1970 mode->name, n);
1971 kfree(node);
1972 goto out;
1973 }
1974 }
1975
1976 drm_mm_for_each_node_safe(node, nn, &mm) {
1977 u64 rem;
1978
1979 if (node->color != node->size) {
1980 pr_err("%s invalid color stored: expected %lld, found %ld\n",
1981 mode->name, node->size, node->color);
1982
1983 goto out;
1984 }
1985
1986 if (colors_abutt(node))
1987 goto out;
1988
1989 div64_u64_rem(node->start, node->size, &rem);
1990 if (rem) {
1991 pr_err("%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
1992 mode->name, node->start, node->size, rem);
1993 goto out;
1994 }
1995
1996 drm_mm_remove_node(node);
1997 kfree(node);
1998 }
1999
2000 cond_resched();
2001 }
2002
2003 ret = 0;
2004out:
2005 drm_mm_for_each_node_safe(node, nn, &mm) {
2006 drm_mm_remove_node(node);
2007 kfree(node);
2008 }
2009 drm_mm_takedown(&mm);
2010 return ret;
2011}
2012
2013static int evict_color(struct drm_mm *mm,
2014 u64 range_start, u64 range_end,
2015 struct evict_node *nodes,
2016 unsigned int *order,
2017 unsigned int count,
2018 unsigned int size,
2019 unsigned int alignment,
2020 unsigned long color,
2021 const struct insert_mode *mode)
2022{
2023 struct drm_mm_scan scan;
2024 LIST_HEAD(evict_list);
2025 struct evict_node *e;
2026 struct drm_mm_node tmp;
2027 int err;
2028
2029 drm_mm_scan_init_with_range(&scan, mm,
2030 size, alignment, color,
2031 range_start, range_end,
2032 mode->mode);
2033 if (!evict_nodes(&scan,
2034 nodes, order, count, true,
2035 &evict_list))
2036 return -EINVAL;
2037
2038 memset(&tmp, 0, sizeof(tmp));
2039 err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
2040 DRM_MM_INSERT_EVICT);
2041 if (err) {
2042 pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
2043 size, alignment, color, err);
2044 show_scan(&scan);
2045 show_holes(mm, 3);
2046 return err;
2047 }
2048
2049 if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
2050 pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
2051 tmp.start, tmp.size, range_start, range_end);
2052 err = -EINVAL;
2053 }
2054
2055 if (colors_abutt(&tmp))
2056 err = -EINVAL;
2057
2058 if (!assert_node(&tmp, mm, size, alignment, color)) {
2059 pr_err("Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
2060 tmp.size, size,
2061 alignment, misalignment(&tmp, alignment), tmp.start);
2062 err = -EINVAL;
2063 }
2064
2065 drm_mm_remove_node(&tmp);
2066 if (err)
2067 return err;
2068
2069 list_for_each_entry(e, &evict_list, link) {
2070 err = drm_mm_reserve_node(mm, &e->node);
2071 if (err) {
2072 pr_err("Failed to reinsert node after eviction: start=%llx\n",
2073 e->node.start);
2074 return err;
2075 }
2076 }
2077
2078 cond_resched();
2079 return 0;
2080}
2081
2082static int igt_color_evict(void *ignored)
2083{
2084 DRM_RND_STATE(prng, random_seed);
2085 const unsigned int total_size = min(8192u, max_iterations);
2086 const struct insert_mode *mode;
2087 unsigned long color = 0;
2088 struct drm_mm mm;
2089 struct evict_node *nodes;
2090 struct drm_mm_node *node, *next;
2091 unsigned int *order, n;
2092 int ret, err;
2093
2094 /* Check that the drm_mm_scan also honours color adjustment when
2095 * choosing its victims to create a hole. Our color_adjust does not
2096 * allow two nodes to be placed together without an intervening hole
2097 * enlarging the set of victims that must be evicted.
2098 */
2099
2100 ret = -ENOMEM;
2101 nodes = vzalloc(total_size * sizeof(*nodes));
2102 if (!nodes)
2103 goto err;
2104
2105 order = drm_random_order(total_size, &prng);
2106 if (!order)
2107 goto err_nodes;
2108
2109 ret = -EINVAL;
2110 drm_mm_init(&mm, 0, 2*total_size - 1);
2111 mm.color_adjust = separate_adjacent_colors;
2112 for (n = 0; n < total_size; n++) {
2113 if (!expect_insert(&mm, &nodes[n].node,
2114 1, 0, color++,
2115 &insert_modes[0])) {
2116 pr_err("insert failed, step %d\n", n);
2117 goto out;
2118 }
2119 }
2120
2121 for (mode = evict_modes; mode->name; mode++) {
2122 for (n = 1; n <= total_size; n <<= 1) {
2123 drm_random_reorder(order, total_size, &prng);
2124 err = evict_color(&mm, 0, U64_MAX,
2125 nodes, order, total_size,
2126 n, 1, color++,
2127 mode);
2128 if (err) {
2129 pr_err("%s evict_color(size=%u) failed\n",
2130 mode->name, n);
2131 goto out;
2132 }
2133 }
2134
2135 for (n = 1; n < total_size; n <<= 1) {
2136 drm_random_reorder(order, total_size, &prng);
2137 err = evict_color(&mm, 0, U64_MAX,
2138 nodes, order, total_size,
2139 total_size/2, n, color++,
2140 mode);
2141 if (err) {
2142 pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2143 mode->name, total_size/2, n);
2144 goto out;
2145 }
2146 }
2147
2148 for_each_prime_number_from(n, 1, min(total_size, max_prime)) {
2149 unsigned int nsize = (total_size - n + 1) / 2;
2150
2151 DRM_MM_BUG_ON(!nsize);
2152
2153 drm_random_reorder(order, total_size, &prng);
2154 err = evict_color(&mm, 0, U64_MAX,
2155 nodes, order, total_size,
2156 nsize, n, color++,
2157 mode);
2158 if (err) {
2159 pr_err("%s evict_color(size=%u, alignment=%u) failed\n",
2160 mode->name, nsize, n);
2161 goto out;
2162 }
2163 }
2164
2165 cond_resched();
2166 }
2167
2168 ret = 0;
2169out:
2170 if (ret)
2171 show_mm(&mm);
2172 drm_mm_for_each_node_safe(node, next, &mm)
2173 drm_mm_remove_node(node);
2174 drm_mm_takedown(&mm);
2175 kfree(order);
2176err_nodes:
2177 vfree(nodes);
2178err:
2179 return ret;
2180}
2181
2182static int igt_color_evict_range(void *ignored)
2183{
2184 DRM_RND_STATE(prng, random_seed);
2185 const unsigned int total_size = 8192;
2186 const unsigned int range_size = total_size / 2;
2187 const unsigned int range_start = total_size / 4;
2188 const unsigned int range_end = range_start + range_size;
2189 const struct insert_mode *mode;
2190 unsigned long color = 0;
2191 struct drm_mm mm;
2192 struct evict_node *nodes;
2193 struct drm_mm_node *node, *next;
2194 unsigned int *order, n;
2195 int ret, err;
2196
2197 /* Like igt_color_evict(), but limited to small portion of the full
2198 * drm_mm range.
2199 */
2200
2201 ret = -ENOMEM;
2202 nodes = vzalloc(total_size * sizeof(*nodes));
2203 if (!nodes)
2204 goto err;
2205
2206 order = drm_random_order(total_size, &prng);
2207 if (!order)
2208 goto err_nodes;
2209
2210 ret = -EINVAL;
2211 drm_mm_init(&mm, 0, 2*total_size - 1);
2212 mm.color_adjust = separate_adjacent_colors;
2213 for (n = 0; n < total_size; n++) {
2214 if (!expect_insert(&mm, &nodes[n].node,
2215 1, 0, color++,
2216 &insert_modes[0])) {
2217 pr_err("insert failed, step %d\n", n);
2218 goto out;
2219 }
2220 }
2221
2222 for (mode = evict_modes; mode->name; mode++) {
2223 for (n = 1; n <= range_size; n <<= 1) {
2224 drm_random_reorder(order, range_size, &prng);
2225 err = evict_color(&mm, range_start, range_end,
2226 nodes, order, total_size,
2227 n, 1, color++,
2228 mode);
2229 if (err) {
2230 pr_err("%s evict_color(size=%u) failed for range [%x, %x]\n",
2231 mode->name, n, range_start, range_end);
2232 goto out;
2233 }
2234 }
2235
2236 for (n = 1; n < range_size; n <<= 1) {
2237 drm_random_reorder(order, total_size, &prng);
2238 err = evict_color(&mm, range_start, range_end,
2239 nodes, order, total_size,
2240 range_size/2, n, color++,
2241 mode);
2242 if (err) {
2243 pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2244 mode->name, total_size/2, n, range_start, range_end);
2245 goto out;
2246 }
2247 }
2248
2249 for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
2250 unsigned int nsize = (range_size - n + 1) / 2;
2251
2252 DRM_MM_BUG_ON(!nsize);
2253
2254 drm_random_reorder(order, total_size, &prng);
2255 err = evict_color(&mm, range_start, range_end,
2256 nodes, order, total_size,
2257 nsize, n, color++,
2258 mode);
2259 if (err) {
2260 pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
2261 mode->name, nsize, n, range_start, range_end);
2262 goto out;
2263 }
2264 }
2265
2266 cond_resched();
2267 }
2268
2269 ret = 0;
2270out:
2271 if (ret)
2272 show_mm(&mm);
2273 drm_mm_for_each_node_safe(node, next, &mm)
2274 drm_mm_remove_node(node);
2275 drm_mm_takedown(&mm);
2276 kfree(order);
2277err_nodes:
2278 vfree(nodes);
2279err:
2280 return ret;
2281}
2282
2283#include "drm_selftest.c"
2284
2285static int __init test_drm_mm_init(void)
2286{
2287 int err;
2288
2289 while (!random_seed)
2290 random_seed = get_random_int();
2291
2292 pr_info("Testing DRM range manger (struct drm_mm), with random_seed=0x%x max_iterations=%u max_prime=%u\n",
2293 random_seed, max_iterations, max_prime);
2294 err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
2295
2296 return err > 0 ? 0 : err;
2297}
2298
2299static void __exit test_drm_mm_exit(void)
2300{
2301}
2302
2303module_init(test_drm_mm_init);
2304module_exit(test_drm_mm_exit);
2305
2306module_param(random_seed, uint, 0400);
2307module_param(max_iterations, uint, 0400);
2308module_param(max_prime, uint, 0400);
2309
2310MODULE_AUTHOR("Intel Corporation");
2311MODULE_LICENSE("GPL");