Loading...
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2019 Intel Corporation
4 * Copyright © 2022 Maíra Canal <mairacanal@riseup.net>
5 */
6
7#include <kunit/test.h>
8
9#include <linux/prime_numbers.h>
10#include <linux/sched/signal.h>
11#include <linux/sizes.h>
12
13#include <drm/drm_buddy.h>
14
15#include "../lib/drm_random.h"
16
17static unsigned int random_seed;
18
19static inline u64 get_size(int order, u64 chunk_size)
20{
21 return (1 << order) * chunk_size;
22}
23
24static void drm_test_buddy_alloc_range_bias(struct kunit *test)
25{
26 u32 mm_size, size, ps, bias_size, bias_start, bias_end, bias_rem;
27 DRM_RND_STATE(prng, random_seed);
28 unsigned int i, count, *order;
29 struct drm_buddy_block *block;
30 unsigned long flags;
31 struct drm_buddy mm;
32 LIST_HEAD(allocated);
33
34 bias_size = SZ_1M;
35 ps = roundup_pow_of_two(prandom_u32_state(&prng) % bias_size);
36 ps = max(SZ_4K, ps);
37 mm_size = (SZ_8M-1) & ~(ps-1); /* Multiple roots */
38
39 kunit_info(test, "mm_size=%u, ps=%u\n", mm_size, ps);
40
41 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
42 "buddy_init failed\n");
43
44 count = mm_size / bias_size;
45 order = drm_random_order(count, &prng);
46 KUNIT_EXPECT_TRUE(test, order);
47
48 /*
49 * Idea is to split the address space into uniform bias ranges, and then
50 * in some random order allocate within each bias, using various
51 * patterns within. This should detect if allocations leak out from a
52 * given bias, for example.
53 */
54
55 for (i = 0; i < count; i++) {
56 LIST_HEAD(tmp);
57 u32 size;
58
59 bias_start = order[i] * bias_size;
60 bias_end = bias_start + bias_size;
61 bias_rem = bias_size;
62
63 /* internal round_up too big */
64 KUNIT_ASSERT_TRUE_MSG(test,
65 drm_buddy_alloc_blocks(&mm, bias_start,
66 bias_end, bias_size + ps, bias_size,
67 &allocated,
68 DRM_BUDDY_RANGE_ALLOCATION),
69 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
70 bias_start, bias_end, bias_size, bias_size);
71
72 /* size too big */
73 KUNIT_ASSERT_TRUE_MSG(test,
74 drm_buddy_alloc_blocks(&mm, bias_start,
75 bias_end, bias_size + ps, ps,
76 &allocated,
77 DRM_BUDDY_RANGE_ALLOCATION),
78 "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
79 bias_start, bias_end, bias_size + ps, ps);
80
81 /* bias range too small for size */
82 KUNIT_ASSERT_TRUE_MSG(test,
83 drm_buddy_alloc_blocks(&mm, bias_start + ps,
84 bias_end, bias_size, ps,
85 &allocated,
86 DRM_BUDDY_RANGE_ALLOCATION),
87 "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
88 bias_start + ps, bias_end, bias_size, ps);
89
90 /* bias misaligned */
91 KUNIT_ASSERT_TRUE_MSG(test,
92 drm_buddy_alloc_blocks(&mm, bias_start + ps,
93 bias_end - ps,
94 bias_size >> 1, bias_size >> 1,
95 &allocated,
96 DRM_BUDDY_RANGE_ALLOCATION),
97 "buddy_alloc h didn't fail with bias(%x-%x), size=%u, ps=%u\n",
98 bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1);
99
100 /* single big page */
101 KUNIT_ASSERT_FALSE_MSG(test,
102 drm_buddy_alloc_blocks(&mm, bias_start,
103 bias_end, bias_size, bias_size,
104 &tmp,
105 DRM_BUDDY_RANGE_ALLOCATION),
106 "buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n",
107 bias_start, bias_end, bias_size, bias_size);
108 drm_buddy_free_list(&mm, &tmp, 0);
109
110 /* single page with internal round_up */
111 KUNIT_ASSERT_FALSE_MSG(test,
112 drm_buddy_alloc_blocks(&mm, bias_start,
113 bias_end, ps, bias_size,
114 &tmp,
115 DRM_BUDDY_RANGE_ALLOCATION),
116 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
117 bias_start, bias_end, ps, bias_size);
118 drm_buddy_free_list(&mm, &tmp, 0);
119
120 /* random size within */
121 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
122 if (size)
123 KUNIT_ASSERT_FALSE_MSG(test,
124 drm_buddy_alloc_blocks(&mm, bias_start,
125 bias_end, size, ps,
126 &tmp,
127 DRM_BUDDY_RANGE_ALLOCATION),
128 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
129 bias_start, bias_end, size, ps);
130
131 bias_rem -= size;
132 /* too big for current avail */
133 KUNIT_ASSERT_TRUE_MSG(test,
134 drm_buddy_alloc_blocks(&mm, bias_start,
135 bias_end, bias_rem + ps, ps,
136 &allocated,
137 DRM_BUDDY_RANGE_ALLOCATION),
138 "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
139 bias_start, bias_end, bias_rem + ps, ps);
140
141 if (bias_rem) {
142 /* random fill of the remainder */
143 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
144 size = max(size, ps);
145
146 KUNIT_ASSERT_FALSE_MSG(test,
147 drm_buddy_alloc_blocks(&mm, bias_start,
148 bias_end, size, ps,
149 &allocated,
150 DRM_BUDDY_RANGE_ALLOCATION),
151 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
152 bias_start, bias_end, size, ps);
153 /*
154 * Intentionally allow some space to be left
155 * unallocated, and ideally not always on the bias
156 * boundaries.
157 */
158 drm_buddy_free_list(&mm, &tmp, 0);
159 } else {
160 list_splice_tail(&tmp, &allocated);
161 }
162 }
163
164 kfree(order);
165 drm_buddy_free_list(&mm, &allocated, 0);
166 drm_buddy_fini(&mm);
167
168 /*
169 * Something more free-form. Idea is to pick a random starting bias
170 * range within the address space and then start filling it up. Also
171 * randomly grow the bias range in both directions as we go along. This
172 * should give us bias start/end which is not always uniform like above,
173 * and in some cases will require the allocator to jump over already
174 * allocated nodes in the middle of the address space.
175 */
176
177 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
178 "buddy_init failed\n");
179
180 bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
181 bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
182 bias_end = max(bias_end, bias_start + ps);
183 bias_rem = bias_end - bias_start;
184
185 do {
186 u32 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
187
188 KUNIT_ASSERT_FALSE_MSG(test,
189 drm_buddy_alloc_blocks(&mm, bias_start,
190 bias_end, size, ps,
191 &allocated,
192 DRM_BUDDY_RANGE_ALLOCATION),
193 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
194 bias_start, bias_end, size, ps);
195 bias_rem -= size;
196
197 /*
198 * Try to randomly grow the bias range in both directions, or
199 * only one, or perhaps don't grow at all.
200 */
201 do {
202 u32 old_bias_start = bias_start;
203 u32 old_bias_end = bias_end;
204
205 if (bias_start)
206 bias_start -= round_up(prandom_u32_state(&prng) % bias_start, ps);
207 if (bias_end != mm_size)
208 bias_end += round_up(prandom_u32_state(&prng) % (mm_size - bias_end), ps);
209
210 bias_rem += old_bias_start - bias_start;
211 bias_rem += bias_end - old_bias_end;
212 } while (!bias_rem && (bias_start || bias_end != mm_size));
213 } while (bias_rem);
214
215 KUNIT_ASSERT_EQ(test, bias_start, 0);
216 KUNIT_ASSERT_EQ(test, bias_end, mm_size);
217 KUNIT_ASSERT_TRUE_MSG(test,
218 drm_buddy_alloc_blocks(&mm, bias_start, bias_end,
219 ps, ps,
220 &allocated,
221 DRM_BUDDY_RANGE_ALLOCATION),
222 "buddy_alloc passed with bias(%x-%x), size=%u\n",
223 bias_start, bias_end, ps);
224
225 drm_buddy_free_list(&mm, &allocated, 0);
226 drm_buddy_fini(&mm);
227
228 /*
229 * Allocate cleared blocks in the bias range when the DRM buddy's clear avail is
230 * zero. This will validate the bias range allocation in scenarios like system boot
231 * when no cleared blocks are available and exercise the fallback path too. The resulting
232 * blocks should always be dirty.
233 */
234
235 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
236 "buddy_init failed\n");
237
238 bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
239 bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
240 bias_end = max(bias_end, bias_start + ps);
241 bias_rem = bias_end - bias_start;
242
243 flags = DRM_BUDDY_CLEAR_ALLOCATION | DRM_BUDDY_RANGE_ALLOCATION;
244 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
245
246 KUNIT_ASSERT_FALSE_MSG(test,
247 drm_buddy_alloc_blocks(&mm, bias_start,
248 bias_end, size, ps,
249 &allocated,
250 flags),
251 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
252 bias_start, bias_end, size, ps);
253
254 list_for_each_entry(block, &allocated, link)
255 KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
256
257 drm_buddy_free_list(&mm, &allocated, 0);
258 drm_buddy_fini(&mm);
259}
260
261static void drm_test_buddy_alloc_clear(struct kunit *test)
262{
263 unsigned long n_pages, total, i = 0;
264 DRM_RND_STATE(prng, random_seed);
265 const unsigned long ps = SZ_4K;
266 struct drm_buddy_block *block;
267 const int max_order = 12;
268 LIST_HEAD(allocated);
269 struct drm_buddy mm;
270 unsigned int order;
271 u32 mm_size, size;
272 LIST_HEAD(dirty);
273 LIST_HEAD(clean);
274
275 mm_size = SZ_4K << max_order;
276 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
277
278 KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
279
280 /*
281 * Idea is to allocate and free some random portion of the address space,
282 * returning those pages as non-dirty and randomly alternate between
283 * requesting dirty and non-dirty pages (not going over the limit
284 * we freed as non-dirty), putting that into two separate lists.
285 * Loop over both lists at the end checking that the dirty list
286 * is indeed all dirty pages and vice versa. Free it all again,
287 * keeping the dirty/clear status.
288 */
289 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
290 5 * ps, ps, &allocated,
291 DRM_BUDDY_TOPDOWN_ALLOCATION),
292 "buddy_alloc hit an error size=%lu\n", 5 * ps);
293 drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
294
295 n_pages = 10;
296 do {
297 unsigned long flags;
298 struct list_head *list;
299 int slot = i % 2;
300
301 if (slot == 0) {
302 list = &dirty;
303 flags = 0;
304 } else {
305 list = &clean;
306 flags = DRM_BUDDY_CLEAR_ALLOCATION;
307 }
308
309 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
310 ps, ps, list,
311 flags),
312 "buddy_alloc hit an error size=%lu\n", ps);
313 } while (++i < n_pages);
314
315 list_for_each_entry(block, &clean, link)
316 KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), true);
317
318 list_for_each_entry(block, &dirty, link)
319 KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
320
321 drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
322
323 /*
324 * Trying to go over the clear limit for some allocation.
325 * The allocation should never fail with reasonable page-size.
326 */
327 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
328 10 * ps, ps, &clean,
329 DRM_BUDDY_CLEAR_ALLOCATION),
330 "buddy_alloc hit an error size=%lu\n", 10 * ps);
331
332 drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
333 drm_buddy_free_list(&mm, &dirty, 0);
334 drm_buddy_fini(&mm);
335
336 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
337
338 /*
339 * Create a new mm. Intentionally fragment the address space by creating
340 * two alternating lists. Free both lists, one as dirty the other as clean.
341 * Try to allocate double the previous size with matching min_page_size. The
342 * allocation should never fail as it calls the force_merge. Also check that
343 * the page is always dirty after force_merge. Free the page as dirty, then
344 * repeat the whole thing, increment the order until we hit the max_order.
345 */
346
347 i = 0;
348 n_pages = mm_size / ps;
349 do {
350 struct list_head *list;
351 int slot = i % 2;
352
353 if (slot == 0)
354 list = &dirty;
355 else
356 list = &clean;
357
358 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
359 ps, ps, list, 0),
360 "buddy_alloc hit an error size=%lu\n", ps);
361 } while (++i < n_pages);
362
363 drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
364 drm_buddy_free_list(&mm, &dirty, 0);
365
366 order = 1;
367 do {
368 size = SZ_4K << order;
369
370 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
371 size, size, &allocated,
372 DRM_BUDDY_CLEAR_ALLOCATION),
373 "buddy_alloc hit an error size=%u\n", size);
374 total = 0;
375 list_for_each_entry(block, &allocated, link) {
376 if (size != mm_size)
377 KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
378 total += drm_buddy_block_size(&mm, block);
379 }
380 KUNIT_EXPECT_EQ(test, total, size);
381
382 drm_buddy_free_list(&mm, &allocated, 0);
383 } while (++order <= max_order);
384
385 drm_buddy_fini(&mm);
386
387 /*
388 * Create a new mm with a non power-of-two size. Allocate a random size, free as
389 * cleared and then call fini. This will ensure the multi-root force merge during
390 * fini.
391 */
392 mm_size = 12 * SZ_4K;
393 size = max(round_up(prandom_u32_state(&prng) % mm_size, ps), ps);
394 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
395 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
396 size, ps, &allocated,
397 DRM_BUDDY_TOPDOWN_ALLOCATION),
398 "buddy_alloc hit an error size=%u\n", size);
399 drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
400 drm_buddy_fini(&mm);
401}
402
403static void drm_test_buddy_alloc_contiguous(struct kunit *test)
404{
405 const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K;
406 unsigned long i, n_pages, total;
407 struct drm_buddy_block *block;
408 struct drm_buddy mm;
409 LIST_HEAD(left);
410 LIST_HEAD(middle);
411 LIST_HEAD(right);
412 LIST_HEAD(allocated);
413
414 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
415
416 /*
417 * Idea is to fragment the address space by alternating block
418 * allocations between three different lists; one for left, middle and
419 * right. We can then free a list to simulate fragmentation. In
420 * particular we want to exercise the DRM_BUDDY_CONTIGUOUS_ALLOCATION,
421 * including the try_harder path.
422 */
423
424 i = 0;
425 n_pages = mm_size / ps;
426 do {
427 struct list_head *list;
428 int slot = i % 3;
429
430 if (slot == 0)
431 list = &left;
432 else if (slot == 1)
433 list = &middle;
434 else
435 list = &right;
436 KUNIT_ASSERT_FALSE_MSG(test,
437 drm_buddy_alloc_blocks(&mm, 0, mm_size,
438 ps, ps, list, 0),
439 "buddy_alloc hit an error size=%lu\n",
440 ps);
441 } while (++i < n_pages);
442
443 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
444 3 * ps, ps, &allocated,
445 DRM_BUDDY_CONTIGUOUS_ALLOCATION),
446 "buddy_alloc didn't error size=%lu\n", 3 * ps);
447
448 drm_buddy_free_list(&mm, &middle, 0);
449 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
450 3 * ps, ps, &allocated,
451 DRM_BUDDY_CONTIGUOUS_ALLOCATION),
452 "buddy_alloc didn't error size=%lu\n", 3 * ps);
453 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
454 2 * ps, ps, &allocated,
455 DRM_BUDDY_CONTIGUOUS_ALLOCATION),
456 "buddy_alloc didn't error size=%lu\n", 2 * ps);
457
458 drm_buddy_free_list(&mm, &right, 0);
459 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
460 3 * ps, ps, &allocated,
461 DRM_BUDDY_CONTIGUOUS_ALLOCATION),
462 "buddy_alloc didn't error size=%lu\n", 3 * ps);
463 /*
464 * At this point we should have enough contiguous space for 2 blocks,
465 * however they are never buddies (since we freed middle and right) so
466 * will require the try_harder logic to find them.
467 */
468 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
469 2 * ps, ps, &allocated,
470 DRM_BUDDY_CONTIGUOUS_ALLOCATION),
471 "buddy_alloc hit an error size=%lu\n", 2 * ps);
472
473 drm_buddy_free_list(&mm, &left, 0);
474 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
475 3 * ps, ps, &allocated,
476 DRM_BUDDY_CONTIGUOUS_ALLOCATION),
477 "buddy_alloc hit an error size=%lu\n", 3 * ps);
478
479 total = 0;
480 list_for_each_entry(block, &allocated, link)
481 total += drm_buddy_block_size(&mm, block);
482
483 KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3);
484
485 drm_buddy_free_list(&mm, &allocated, 0);
486 drm_buddy_fini(&mm);
487}
488
489static void drm_test_buddy_alloc_pathological(struct kunit *test)
490{
491 u64 mm_size, size, start = 0;
492 struct drm_buddy_block *block;
493 const int max_order = 3;
494 unsigned long flags = 0;
495 int order, top;
496 struct drm_buddy mm;
497 LIST_HEAD(blocks);
498 LIST_HEAD(holes);
499 LIST_HEAD(tmp);
500
501 /*
502 * Create a pot-sized mm, then allocate one of each possible
503 * order within. This should leave the mm with exactly one
504 * page left. Free the largest block, then whittle down again.
505 * Eventually we will have a fully 50% fragmented mm.
506 */
507
508 mm_size = SZ_4K << max_order;
509 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
510 "buddy_init failed\n");
511
512 KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
513
514 for (top = max_order; top; top--) {
515 /* Make room by freeing the largest allocated block */
516 block = list_first_entry_or_null(&blocks, typeof(*block), link);
517 if (block) {
518 list_del(&block->link);
519 drm_buddy_free_block(&mm, block);
520 }
521
522 for (order = top; order--;) {
523 size = get_size(order, mm.chunk_size);
524 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
525 mm_size, size, size,
526 &tmp, flags),
527 "buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
528 order, top);
529
530 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
531 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
532
533 list_move_tail(&block->link, &blocks);
534 }
535
536 /* There should be one final page for this sub-allocation */
537 size = get_size(0, mm.chunk_size);
538 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
539 size, size, &tmp, flags),
540 "buddy_alloc hit -ENOMEM for hole\n");
541
542 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
543 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
544
545 list_move_tail(&block->link, &holes);
546
547 size = get_size(top, mm.chunk_size);
548 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
549 size, size, &tmp, flags),
550 "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
551 top, max_order);
552 }
553
554 drm_buddy_free_list(&mm, &holes, 0);
555
556 /* Nothing larger than blocks of chunk_size now available */
557 for (order = 1; order <= max_order; order++) {
558 size = get_size(order, mm.chunk_size);
559 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
560 size, size, &tmp, flags),
561 "buddy_alloc unexpectedly succeeded at order %d, it should be full!",
562 order);
563 }
564
565 list_splice_tail(&holes, &blocks);
566 drm_buddy_free_list(&mm, &blocks, 0);
567 drm_buddy_fini(&mm);
568}
569
570static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
571{
572 u64 mm_size, size, start = 0;
573 struct drm_buddy_block *block, *bn;
574 const unsigned int max_order = 16;
575 unsigned long flags = 0;
576 struct drm_buddy mm;
577 unsigned int order;
578 LIST_HEAD(blocks);
579 LIST_HEAD(tmp);
580
581 /*
582 * Create a pot-sized mm, then allocate one of each possible
583 * order within. This should leave the mm with exactly one
584 * page left.
585 */
586
587 mm_size = SZ_4K << max_order;
588 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
589 "buddy_init failed\n");
590
591 KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
592
593 for (order = 0; order < max_order; order++) {
594 size = get_size(order, mm.chunk_size);
595 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
596 size, size, &tmp, flags),
597 "buddy_alloc hit -ENOMEM with order=%d\n",
598 order);
599
600 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
601 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
602
603 list_move_tail(&block->link, &blocks);
604 }
605
606 /* And now the last remaining block available */
607 size = get_size(0, mm.chunk_size);
608 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
609 size, size, &tmp, flags),
610 "buddy_alloc hit -ENOMEM on final alloc\n");
611
612 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
613 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
614
615 list_move_tail(&block->link, &blocks);
616
617 /* Should be completely full! */
618 for (order = max_order; order--;) {
619 size = get_size(order, mm.chunk_size);
620 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
621 size, size, &tmp, flags),
622 "buddy_alloc unexpectedly succeeded, it should be full!");
623 }
624
625 block = list_last_entry(&blocks, typeof(*block), link);
626 list_del(&block->link);
627 drm_buddy_free_block(&mm, block);
628
629 /* As we free in increasing size, we make available larger blocks */
630 order = 1;
631 list_for_each_entry_safe(block, bn, &blocks, link) {
632 list_del(&block->link);
633 drm_buddy_free_block(&mm, block);
634
635 size = get_size(order, mm.chunk_size);
636 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
637 size, size, &tmp, flags),
638 "buddy_alloc hit -ENOMEM with order=%d\n",
639 order);
640
641 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
642 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
643
644 list_del(&block->link);
645 drm_buddy_free_block(&mm, block);
646 order++;
647 }
648
649 /* To confirm, now the whole mm should be available */
650 size = get_size(max_order, mm.chunk_size);
651 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
652 size, size, &tmp, flags),
653 "buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
654 max_order);
655
656 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
657 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
658
659 list_del(&block->link);
660 drm_buddy_free_block(&mm, block);
661 drm_buddy_free_list(&mm, &blocks, 0);
662 drm_buddy_fini(&mm);
663}
664
665static void drm_test_buddy_alloc_optimistic(struct kunit *test)
666{
667 u64 mm_size, size, start = 0;
668 struct drm_buddy_block *block;
669 unsigned long flags = 0;
670 const int max_order = 16;
671 struct drm_buddy mm;
672 LIST_HEAD(blocks);
673 LIST_HEAD(tmp);
674 int order;
675
676 /*
677 * Create a mm with one block of each order available, and
678 * try to allocate them all.
679 */
680
681 mm_size = SZ_4K * ((1 << (max_order + 1)) - 1);
682
683 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
684 "buddy_init failed\n");
685
686 KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
687
688 for (order = 0; order <= max_order; order++) {
689 size = get_size(order, mm.chunk_size);
690 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
691 size, size, &tmp, flags),
692 "buddy_alloc hit -ENOMEM with order=%d\n",
693 order);
694
695 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
696 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
697
698 list_move_tail(&block->link, &blocks);
699 }
700
701 /* Should be completely full! */
702 size = get_size(0, mm.chunk_size);
703 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
704 size, size, &tmp, flags),
705 "buddy_alloc unexpectedly succeeded, it should be full!");
706
707 drm_buddy_free_list(&mm, &blocks, 0);
708 drm_buddy_fini(&mm);
709}
710
711static void drm_test_buddy_alloc_limit(struct kunit *test)
712{
713 u64 size = U64_MAX, start = 0;
714 struct drm_buddy_block *block;
715 unsigned long flags = 0;
716 LIST_HEAD(allocated);
717 struct drm_buddy mm;
718
719 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, SZ_4K));
720
721 KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
722 "mm.max_order(%d) != %d\n", mm.max_order,
723 DRM_BUDDY_MAX_ORDER);
724
725 size = mm.chunk_size << mm.max_order;
726 KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
727 mm.chunk_size, &allocated, flags));
728
729 block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
730 KUNIT_EXPECT_TRUE(test, block);
731
732 KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), mm.max_order,
733 "block order(%d) != %d\n",
734 drm_buddy_block_order(block), mm.max_order);
735
736 KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
737 BIT_ULL(mm.max_order) * mm.chunk_size,
738 "block size(%llu) != %llu\n",
739 drm_buddy_block_size(&mm, block),
740 BIT_ULL(mm.max_order) * mm.chunk_size);
741
742 drm_buddy_free_list(&mm, &allocated, 0);
743 drm_buddy_fini(&mm);
744}
745
746static int drm_buddy_suite_init(struct kunit_suite *suite)
747{
748 while (!random_seed)
749 random_seed = get_random_u32();
750
751 kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n",
752 random_seed);
753
754 return 0;
755}
756
757static struct kunit_case drm_buddy_tests[] = {
758 KUNIT_CASE(drm_test_buddy_alloc_limit),
759 KUNIT_CASE(drm_test_buddy_alloc_optimistic),
760 KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
761 KUNIT_CASE(drm_test_buddy_alloc_pathological),
762 KUNIT_CASE(drm_test_buddy_alloc_contiguous),
763 KUNIT_CASE(drm_test_buddy_alloc_clear),
764 KUNIT_CASE(drm_test_buddy_alloc_range_bias),
765 {}
766};
767
768static struct kunit_suite drm_buddy_test_suite = {
769 .name = "drm_buddy",
770 .suite_init = drm_buddy_suite_init,
771 .test_cases = drm_buddy_tests,
772};
773
774kunit_test_suite(drm_buddy_test_suite);
775
776MODULE_AUTHOR("Intel Corporation");
777MODULE_DESCRIPTION("Kunit test for drm_buddy functions");
778MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2019 Intel Corporation
4 * Copyright © 2022 Maíra Canal <mairacanal@riseup.net>
5 */
6
7#include <kunit/test.h>
8
9#include <linux/prime_numbers.h>
10#include <linux/sched/signal.h>
11
12#include <drm/drm_buddy.h>
13
14#include "../lib/drm_random.h"
15
16#define TIMEOUT(name__) \
17 unsigned long name__ = jiffies + MAX_SCHEDULE_TIMEOUT
18
19static unsigned int random_seed;
20
21static inline u64 get_size(int order, u64 chunk_size)
22{
23 return (1 << order) * chunk_size;
24}
25
26__printf(2, 3)
27static bool __timeout(unsigned long timeout, const char *fmt, ...)
28{
29 va_list va;
30
31 if (!signal_pending(current)) {
32 cond_resched();
33 if (time_before(jiffies, timeout))
34 return false;
35 }
36
37 if (fmt) {
38 va_start(va, fmt);
39 vprintk(fmt, va);
40 va_end(va);
41 }
42
43 return true;
44}
45
46static void __dump_block(struct kunit *test, struct drm_buddy *mm,
47 struct drm_buddy_block *block, bool buddy)
48{
49 kunit_err(test, "block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%d buddy=%d\n",
50 block->header, drm_buddy_block_state(block),
51 drm_buddy_block_order(block), drm_buddy_block_offset(block),
52 drm_buddy_block_size(mm, block), !block->parent, buddy);
53}
54
55static void dump_block(struct kunit *test, struct drm_buddy *mm,
56 struct drm_buddy_block *block)
57{
58 struct drm_buddy_block *buddy;
59
60 __dump_block(test, mm, block, false);
61
62 buddy = drm_get_buddy(block);
63 if (buddy)
64 __dump_block(test, mm, buddy, true);
65}
66
67static int check_block(struct kunit *test, struct drm_buddy *mm,
68 struct drm_buddy_block *block)
69{
70 struct drm_buddy_block *buddy;
71 unsigned int block_state;
72 u64 block_size;
73 u64 offset;
74 int err = 0;
75
76 block_state = drm_buddy_block_state(block);
77
78 if (block_state != DRM_BUDDY_ALLOCATED &&
79 block_state != DRM_BUDDY_FREE && block_state != DRM_BUDDY_SPLIT) {
80 kunit_err(test, "block state mismatch\n");
81 err = -EINVAL;
82 }
83
84 block_size = drm_buddy_block_size(mm, block);
85 offset = drm_buddy_block_offset(block);
86
87 if (block_size < mm->chunk_size) {
88 kunit_err(test, "block size smaller than min size\n");
89 err = -EINVAL;
90 }
91
92 if (!is_power_of_2(block_size)) {
93 kunit_err(test, "block size not power of two\n");
94 err = -EINVAL;
95 }
96
97 if (!IS_ALIGNED(block_size, mm->chunk_size)) {
98 kunit_err(test, "block size not aligned to min size\n");
99 err = -EINVAL;
100 }
101
102 if (!IS_ALIGNED(offset, mm->chunk_size)) {
103 kunit_err(test, "block offset not aligned to min size\n");
104 err = -EINVAL;
105 }
106
107 if (!IS_ALIGNED(offset, block_size)) {
108 kunit_err(test, "block offset not aligned to block size\n");
109 err = -EINVAL;
110 }
111
112 buddy = drm_get_buddy(block);
113
114 if (!buddy && block->parent) {
115 kunit_err(test, "buddy has gone fishing\n");
116 err = -EINVAL;
117 }
118
119 if (buddy) {
120 if (drm_buddy_block_offset(buddy) != (offset ^ block_size)) {
121 kunit_err(test, "buddy has wrong offset\n");
122 err = -EINVAL;
123 }
124
125 if (drm_buddy_block_size(mm, buddy) != block_size) {
126 kunit_err(test, "buddy size mismatch\n");
127 err = -EINVAL;
128 }
129
130 if (drm_buddy_block_state(buddy) == block_state &&
131 block_state == DRM_BUDDY_FREE) {
132 kunit_err(test, "block and its buddy are free\n");
133 err = -EINVAL;
134 }
135 }
136
137 return err;
138}
139
140static int check_blocks(struct kunit *test, struct drm_buddy *mm,
141 struct list_head *blocks, u64 expected_size, bool is_contiguous)
142{
143 struct drm_buddy_block *block;
144 struct drm_buddy_block *prev;
145 u64 total;
146 int err = 0;
147
148 block = NULL;
149 prev = NULL;
150 total = 0;
151
152 list_for_each_entry(block, blocks, link) {
153 err = check_block(test, mm, block);
154
155 if (!drm_buddy_block_is_allocated(block)) {
156 kunit_err(test, "block not allocated\n");
157 err = -EINVAL;
158 }
159
160 if (is_contiguous && prev) {
161 u64 prev_block_size;
162 u64 prev_offset;
163 u64 offset;
164
165 prev_offset = drm_buddy_block_offset(prev);
166 prev_block_size = drm_buddy_block_size(mm, prev);
167 offset = drm_buddy_block_offset(block);
168
169 if (offset != (prev_offset + prev_block_size)) {
170 kunit_err(test, "block offset mismatch\n");
171 err = -EINVAL;
172 }
173 }
174
175 if (err)
176 break;
177
178 total += drm_buddy_block_size(mm, block);
179 prev = block;
180 }
181
182 if (!err) {
183 if (total != expected_size) {
184 kunit_err(test, "size mismatch, expected=%llx, found=%llx\n",
185 expected_size, total);
186 err = -EINVAL;
187 }
188 return err;
189 }
190
191 if (prev) {
192 kunit_err(test, "prev block, dump:\n");
193 dump_block(test, mm, prev);
194 }
195
196 kunit_err(test, "bad block, dump:\n");
197 dump_block(test, mm, block);
198
199 return err;
200}
201
202static int check_mm(struct kunit *test, struct drm_buddy *mm)
203{
204 struct drm_buddy_block *root;
205 struct drm_buddy_block *prev;
206 unsigned int i;
207 u64 total;
208 int err = 0;
209
210 if (!mm->n_roots) {
211 kunit_err(test, "n_roots is zero\n");
212 return -EINVAL;
213 }
214
215 if (mm->n_roots != hweight64(mm->size)) {
216 kunit_err(test, "n_roots mismatch, n_roots=%u, expected=%lu\n",
217 mm->n_roots, hweight64(mm->size));
218 return -EINVAL;
219 }
220
221 root = NULL;
222 prev = NULL;
223 total = 0;
224
225 for (i = 0; i < mm->n_roots; ++i) {
226 struct drm_buddy_block *block;
227 unsigned int order;
228
229 root = mm->roots[i];
230 if (!root) {
231 kunit_err(test, "root(%u) is NULL\n", i);
232 err = -EINVAL;
233 break;
234 }
235
236 err = check_block(test, mm, root);
237
238 if (!drm_buddy_block_is_free(root)) {
239 kunit_err(test, "root not free\n");
240 err = -EINVAL;
241 }
242
243 order = drm_buddy_block_order(root);
244
245 if (!i) {
246 if (order != mm->max_order) {
247 kunit_err(test, "max order root missing\n");
248 err = -EINVAL;
249 }
250 }
251
252 if (prev) {
253 u64 prev_block_size;
254 u64 prev_offset;
255 u64 offset;
256
257 prev_offset = drm_buddy_block_offset(prev);
258 prev_block_size = drm_buddy_block_size(mm, prev);
259 offset = drm_buddy_block_offset(root);
260
261 if (offset != (prev_offset + prev_block_size)) {
262 kunit_err(test, "root offset mismatch\n");
263 err = -EINVAL;
264 }
265 }
266
267 block = list_first_entry_or_null(&mm->free_list[order],
268 struct drm_buddy_block, link);
269 if (block != root) {
270 kunit_err(test, "root mismatch at order=%u\n", order);
271 err = -EINVAL;
272 }
273
274 if (err)
275 break;
276
277 prev = root;
278 total += drm_buddy_block_size(mm, root);
279 }
280
281 if (!err) {
282 if (total != mm->size) {
283 kunit_err(test, "expected mm size=%llx, found=%llx\n",
284 mm->size, total);
285 err = -EINVAL;
286 }
287 return err;
288 }
289
290 if (prev) {
291 kunit_err(test, "prev root(%u), dump:\n", i - 1);
292 dump_block(test, mm, prev);
293 }
294
295 if (root) {
296 kunit_err(test, "bad root(%u), dump:\n", i);
297 dump_block(test, mm, root);
298 }
299
300 return err;
301}
302
303static void mm_config(u64 *size, u64 *chunk_size)
304{
305 DRM_RND_STATE(prng, random_seed);
306 u32 s, ms;
307
308 /* Nothing fancy, just try to get an interesting bit pattern */
309
310 prandom_seed_state(&prng, random_seed);
311
312 /* Let size be a random number of pages up to 8 GB (2M pages) */
313 s = 1 + drm_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
314 /* Let the chunk size be a random power of 2 less than size */
315 ms = BIT(drm_prandom_u32_max_state(ilog2(s), &prng));
316 /* Round size down to the chunk size */
317 s &= -ms;
318
319 /* Convert from pages to bytes */
320 *chunk_size = (u64)ms << 12;
321 *size = (u64)s << 12;
322}
323
324static void drm_test_buddy_alloc_pathological(struct kunit *test)
325{
326 u64 mm_size, size, start = 0;
327 struct drm_buddy_block *block;
328 const int max_order = 3;
329 unsigned long flags = 0;
330 int order, top;
331 struct drm_buddy mm;
332 LIST_HEAD(blocks);
333 LIST_HEAD(holes);
334 LIST_HEAD(tmp);
335
336 /*
337 * Create a pot-sized mm, then allocate one of each possible
338 * order within. This should leave the mm with exactly one
339 * page left. Free the largest block, then whittle down again.
340 * Eventually we will have a fully 50% fragmented mm.
341 */
342
343 mm_size = PAGE_SIZE << max_order;
344 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
345 "buddy_init failed\n");
346
347 KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
348
349 for (top = max_order; top; top--) {
350 /* Make room by freeing the largest allocated block */
351 block = list_first_entry_or_null(&blocks, typeof(*block), link);
352 if (block) {
353 list_del(&block->link);
354 drm_buddy_free_block(&mm, block);
355 }
356
357 for (order = top; order--;) {
358 size = get_size(order, PAGE_SIZE);
359 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
360 mm_size, size, size,
361 &tmp, flags),
362 "buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
363 order, top);
364
365 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
366 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
367
368 list_move_tail(&block->link, &blocks);
369 }
370
371 /* There should be one final page for this sub-allocation */
372 size = get_size(0, PAGE_SIZE);
373 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
374 size, size, &tmp, flags),
375 "buddy_alloc hit -ENOMEM for hole\n");
376
377 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
378 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
379
380 list_move_tail(&block->link, &holes);
381
382 size = get_size(top, PAGE_SIZE);
383 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
384 size, size, &tmp, flags),
385 "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
386 top, max_order);
387 }
388
389 drm_buddy_free_list(&mm, &holes);
390
391 /* Nothing larger than blocks of chunk_size now available */
392 for (order = 1; order <= max_order; order++) {
393 size = get_size(order, PAGE_SIZE);
394 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
395 size, size, &tmp, flags),
396 "buddy_alloc unexpectedly succeeded at order %d, it should be full!",
397 order);
398 }
399
400 list_splice_tail(&holes, &blocks);
401 drm_buddy_free_list(&mm, &blocks);
402 drm_buddy_fini(&mm);
403}
404
405static void drm_test_buddy_alloc_smoke(struct kunit *test)
406{
407 u64 mm_size, chunk_size, start = 0;
408 unsigned long flags = 0;
409 struct drm_buddy mm;
410 int *order;
411 int i;
412
413 DRM_RND_STATE(prng, random_seed);
414 TIMEOUT(end_time);
415
416 mm_config(&mm_size, &chunk_size);
417
418 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, chunk_size),
419 "buddy_init failed\n");
420
421 order = drm_random_order(mm.max_order + 1, &prng);
422 KUNIT_ASSERT_TRUE(test, order);
423
424 for (i = 0; i <= mm.max_order; ++i) {
425 struct drm_buddy_block *block;
426 int max_order = order[i];
427 bool timeout = false;
428 LIST_HEAD(blocks);
429 u64 total, size;
430 LIST_HEAD(tmp);
431 int order, err;
432
433 KUNIT_ASSERT_FALSE_MSG(test, check_mm(test, &mm),
434 "pre-mm check failed, abort\n");
435
436 order = max_order;
437 total = 0;
438
439 do {
440retry:
441 size = get_size(order, chunk_size);
442 err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags);
443 if (err) {
444 if (err == -ENOMEM) {
445 KUNIT_FAIL(test, "buddy_alloc hit -ENOMEM with order=%d\n",
446 order);
447 } else {
448 if (order--) {
449 err = 0;
450 goto retry;
451 }
452
453 KUNIT_FAIL(test, "buddy_alloc with order=%d failed\n",
454 order);
455 }
456
457 break;
458 }
459
460 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
461 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
462
463 list_move_tail(&block->link, &blocks);
464 KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), order,
465 "buddy_alloc order mismatch\n");
466
467 total += drm_buddy_block_size(&mm, block);
468
469 if (__timeout(end_time, NULL)) {
470 timeout = true;
471 break;
472 }
473 } while (total < mm.size);
474
475 if (!err)
476 err = check_blocks(test, &mm, &blocks, total, false);
477
478 drm_buddy_free_list(&mm, &blocks);
479
480 if (!err) {
481 KUNIT_EXPECT_FALSE_MSG(test, check_mm(test, &mm),
482 "post-mm check failed\n");
483 }
484
485 if (err || timeout)
486 break;
487
488 cond_resched();
489 }
490
491 kfree(order);
492 drm_buddy_fini(&mm);
493}
494
495static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
496{
497 u64 mm_size, size, start = 0;
498 struct drm_buddy_block *block, *bn;
499 const unsigned int max_order = 16;
500 unsigned long flags = 0;
501 struct drm_buddy mm;
502 unsigned int order;
503 LIST_HEAD(blocks);
504 LIST_HEAD(tmp);
505
506 /*
507 * Create a pot-sized mm, then allocate one of each possible
508 * order within. This should leave the mm with exactly one
509 * page left.
510 */
511
512 mm_size = PAGE_SIZE << max_order;
513 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
514 "buddy_init failed\n");
515
516 KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
517
518 for (order = 0; order < max_order; order++) {
519 size = get_size(order, PAGE_SIZE);
520 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
521 size, size, &tmp, flags),
522 "buddy_alloc hit -ENOMEM with order=%d\n",
523 order);
524
525 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
526 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
527
528 list_move_tail(&block->link, &blocks);
529 }
530
531 /* And now the last remaining block available */
532 size = get_size(0, PAGE_SIZE);
533 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
534 size, size, &tmp, flags),
535 "buddy_alloc hit -ENOMEM on final alloc\n");
536
537 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
538 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
539
540 list_move_tail(&block->link, &blocks);
541
542 /* Should be completely full! */
543 for (order = max_order; order--;) {
544 size = get_size(order, PAGE_SIZE);
545 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
546 size, size, &tmp, flags),
547 "buddy_alloc unexpectedly succeeded, it should be full!");
548 }
549
550 block = list_last_entry(&blocks, typeof(*block), link);
551 list_del(&block->link);
552 drm_buddy_free_block(&mm, block);
553
554 /* As we free in increasing size, we make available larger blocks */
555 order = 1;
556 list_for_each_entry_safe(block, bn, &blocks, link) {
557 list_del(&block->link);
558 drm_buddy_free_block(&mm, block);
559
560 size = get_size(order, PAGE_SIZE);
561 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
562 size, size, &tmp, flags),
563 "buddy_alloc hit -ENOMEM with order=%d\n",
564 order);
565
566 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
567 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
568
569 list_del(&block->link);
570 drm_buddy_free_block(&mm, block);
571 order++;
572 }
573
574 /* To confirm, now the whole mm should be available */
575 size = get_size(max_order, PAGE_SIZE);
576 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
577 size, size, &tmp, flags),
578 "buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
579 max_order);
580
581 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
582 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
583
584 list_del(&block->link);
585 drm_buddy_free_block(&mm, block);
586 drm_buddy_free_list(&mm, &blocks);
587 drm_buddy_fini(&mm);
588}
589
590static void drm_test_buddy_alloc_optimistic(struct kunit *test)
591{
592 u64 mm_size, size, start = 0;
593 struct drm_buddy_block *block;
594 unsigned long flags = 0;
595 const int max_order = 16;
596 struct drm_buddy mm;
597 LIST_HEAD(blocks);
598 LIST_HEAD(tmp);
599 int order;
600
601 /*
602 * Create a mm with one block of each order available, and
603 * try to allocate them all.
604 */
605
606 mm_size = PAGE_SIZE * ((1 << (max_order + 1)) - 1);
607
608 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
609 "buddy_init failed\n");
610
611 KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
612
613 for (order = 0; order <= max_order; order++) {
614 size = get_size(order, PAGE_SIZE);
615 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
616 size, size, &tmp, flags),
617 "buddy_alloc hit -ENOMEM with order=%d\n",
618 order);
619
620 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
621 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
622
623 list_move_tail(&block->link, &blocks);
624 }
625
626 /* Should be completely full! */
627 size = get_size(0, PAGE_SIZE);
628 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
629 size, size, &tmp, flags),
630 "buddy_alloc unexpectedly succeeded, it should be full!");
631
632 drm_buddy_free_list(&mm, &blocks);
633 drm_buddy_fini(&mm);
634}
635
636static void drm_test_buddy_alloc_range(struct kunit *test)
637{
638 unsigned long flags = DRM_BUDDY_RANGE_ALLOCATION;
639 u64 offset, size, rem, chunk_size, end;
640 unsigned long page_num;
641 struct drm_buddy mm;
642 LIST_HEAD(blocks);
643
644 mm_config(&size, &chunk_size);
645
646 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, size, chunk_size),
647 "buddy_init failed");
648
649 KUNIT_ASSERT_FALSE_MSG(test, check_mm(test, &mm),
650 "pre-mm check failed, abort!");
651
652 rem = mm.size;
653 offset = 0;
654
655 for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
656 struct drm_buddy_block *block;
657 LIST_HEAD(tmp);
658
659 size = min(page_num * mm.chunk_size, rem);
660 end = offset + size;
661
662 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, offset, end,
663 size, mm.chunk_size,
664 &tmp, flags),
665 "alloc_range with offset=%llx, size=%llx failed\n", offset, size);
666
667 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
668 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_range has no blocks\n");
669
670 KUNIT_ASSERT_EQ_MSG(test, drm_buddy_block_offset(block), offset,
671 "alloc_range start offset mismatch, found=%llx, expected=%llx\n",
672 drm_buddy_block_offset(block), offset);
673
674 KUNIT_ASSERT_FALSE(test, check_blocks(test, &mm, &tmp, size, true));
675
676 list_splice_tail(&tmp, &blocks);
677
678 offset += size;
679
680 rem -= size;
681 if (!rem)
682 break;
683
684 cond_resched();
685 }
686
687 drm_buddy_free_list(&mm, &blocks);
688
689 KUNIT_EXPECT_FALSE_MSG(test, check_mm(test, &mm), "post-mm check failed\n");
690
691 drm_buddy_fini(&mm);
692}
693
694static void drm_test_buddy_alloc_limit(struct kunit *test)
695{
696 u64 size = U64_MAX, start = 0;
697 struct drm_buddy_block *block;
698 unsigned long flags = 0;
699 LIST_HEAD(allocated);
700 struct drm_buddy mm;
701
702 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, PAGE_SIZE));
703
704 KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
705 "mm.max_order(%d) != %d\n", mm.max_order,
706 DRM_BUDDY_MAX_ORDER);
707
708 size = mm.chunk_size << mm.max_order;
709 KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
710 PAGE_SIZE, &allocated, flags));
711
712 block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
713 KUNIT_EXPECT_TRUE(test, block);
714
715 KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), mm.max_order,
716 "block order(%d) != %d\n",
717 drm_buddy_block_order(block), mm.max_order);
718
719 KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
720 BIT_ULL(mm.max_order) * PAGE_SIZE,
721 "block size(%llu) != %llu\n",
722 drm_buddy_block_size(&mm, block),
723 BIT_ULL(mm.max_order) * PAGE_SIZE);
724
725 drm_buddy_free_list(&mm, &allocated);
726 drm_buddy_fini(&mm);
727}
728
729static int drm_buddy_suite_init(struct kunit_suite *suite)
730{
731 while (!random_seed)
732 random_seed = get_random_u32();
733
734 kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n", random_seed);
735
736 return 0;
737}
738
739static struct kunit_case drm_buddy_tests[] = {
740 KUNIT_CASE(drm_test_buddy_alloc_limit),
741 KUNIT_CASE(drm_test_buddy_alloc_range),
742 KUNIT_CASE(drm_test_buddy_alloc_optimistic),
743 KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
744 KUNIT_CASE(drm_test_buddy_alloc_smoke),
745 KUNIT_CASE(drm_test_buddy_alloc_pathological),
746 {}
747};
748
749static struct kunit_suite drm_buddy_test_suite = {
750 .name = "drm_buddy",
751 .suite_init = drm_buddy_suite_init,
752 .test_cases = drm_buddy_tests,
753};
754
755kunit_test_suite(drm_buddy_test_suite);
756
757MODULE_AUTHOR("Intel Corporation");
758MODULE_LICENSE("GPL");