Loading...
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#include <linux/kmemleak.h>
7#include <linux/module.h>
8#include <linux/sizes.h>
9
10#include <drm/drm_buddy.h>
11
12static struct kmem_cache *slab_blocks;
13
14static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
15 struct drm_buddy_block *parent,
16 unsigned int order,
17 u64 offset)
18{
19 struct drm_buddy_block *block;
20
21 BUG_ON(order > DRM_BUDDY_MAX_ORDER);
22
23 block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL);
24 if (!block)
25 return NULL;
26
27 block->header = offset;
28 block->header |= order;
29 block->parent = parent;
30
31 BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
32 return block;
33}
34
35static void drm_block_free(struct drm_buddy *mm,
36 struct drm_buddy_block *block)
37{
38 kmem_cache_free(slab_blocks, block);
39}
40
41static void list_insert_sorted(struct drm_buddy *mm,
42 struct drm_buddy_block *block)
43{
44 struct drm_buddy_block *node;
45 struct list_head *head;
46
47 head = &mm->free_list[drm_buddy_block_order(block)];
48 if (list_empty(head)) {
49 list_add(&block->link, head);
50 return;
51 }
52
53 list_for_each_entry(node, head, link)
54 if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
55 break;
56
57 __list_add(&block->link, node->link.prev, &node->link);
58}
59
60static void clear_reset(struct drm_buddy_block *block)
61{
62 block->header &= ~DRM_BUDDY_HEADER_CLEAR;
63}
64
65static void mark_cleared(struct drm_buddy_block *block)
66{
67 block->header |= DRM_BUDDY_HEADER_CLEAR;
68}
69
70static void mark_allocated(struct drm_buddy_block *block)
71{
72 block->header &= ~DRM_BUDDY_HEADER_STATE;
73 block->header |= DRM_BUDDY_ALLOCATED;
74
75 list_del(&block->link);
76}
77
78static void mark_free(struct drm_buddy *mm,
79 struct drm_buddy_block *block)
80{
81 block->header &= ~DRM_BUDDY_HEADER_STATE;
82 block->header |= DRM_BUDDY_FREE;
83
84 list_insert_sorted(mm, block);
85}
86
87static void mark_split(struct drm_buddy_block *block)
88{
89 block->header &= ~DRM_BUDDY_HEADER_STATE;
90 block->header |= DRM_BUDDY_SPLIT;
91
92 list_del(&block->link);
93}
94
95static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
96{
97 return s1 <= e2 && e1 >= s2;
98}
99
100static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
101{
102 return s1 <= s2 && e1 >= e2;
103}
104
105static struct drm_buddy_block *
106__get_buddy(struct drm_buddy_block *block)
107{
108 struct drm_buddy_block *parent;
109
110 parent = block->parent;
111 if (!parent)
112 return NULL;
113
114 if (parent->left == block)
115 return parent->right;
116
117 return parent->left;
118}
119
120static unsigned int __drm_buddy_free(struct drm_buddy *mm,
121 struct drm_buddy_block *block,
122 bool force_merge)
123{
124 struct drm_buddy_block *parent;
125 unsigned int order;
126
127 while ((parent = block->parent)) {
128 struct drm_buddy_block *buddy;
129
130 buddy = __get_buddy(block);
131
132 if (!drm_buddy_block_is_free(buddy))
133 break;
134
135 if (!force_merge) {
136 /*
137 * Check the block and its buddy clear state and exit
138 * the loop if they both have the dissimilar state.
139 */
140 if (drm_buddy_block_is_clear(block) !=
141 drm_buddy_block_is_clear(buddy))
142 break;
143
144 if (drm_buddy_block_is_clear(block))
145 mark_cleared(parent);
146 }
147
148 list_del(&buddy->link);
149 if (force_merge && drm_buddy_block_is_clear(buddy))
150 mm->clear_avail -= drm_buddy_block_size(mm, buddy);
151
152 drm_block_free(mm, block);
153 drm_block_free(mm, buddy);
154
155 block = parent;
156 }
157
158 order = drm_buddy_block_order(block);
159 mark_free(mm, block);
160
161 return order;
162}
163
164static int __force_merge(struct drm_buddy *mm,
165 u64 start,
166 u64 end,
167 unsigned int min_order)
168{
169 unsigned int order;
170 int i;
171
172 if (!min_order)
173 return -ENOMEM;
174
175 if (min_order > mm->max_order)
176 return -EINVAL;
177
178 for (i = min_order - 1; i >= 0; i--) {
179 struct drm_buddy_block *block, *prev;
180
181 list_for_each_entry_safe_reverse(block, prev, &mm->free_list[i], link) {
182 struct drm_buddy_block *buddy;
183 u64 block_start, block_end;
184
185 if (!block->parent)
186 continue;
187
188 block_start = drm_buddy_block_offset(block);
189 block_end = block_start + drm_buddy_block_size(mm, block) - 1;
190
191 if (!contains(start, end, block_start, block_end))
192 continue;
193
194 buddy = __get_buddy(block);
195 if (!drm_buddy_block_is_free(buddy))
196 continue;
197
198 WARN_ON(drm_buddy_block_is_clear(block) ==
199 drm_buddy_block_is_clear(buddy));
200
201 /*
202 * If the prev block is same as buddy, don't access the
203 * block in the next iteration as we would free the
204 * buddy block as part of the free function.
205 */
206 if (prev == buddy)
207 prev = list_prev_entry(prev, link);
208
209 list_del(&block->link);
210 if (drm_buddy_block_is_clear(block))
211 mm->clear_avail -= drm_buddy_block_size(mm, block);
212
213 order = __drm_buddy_free(mm, block, true);
214 if (order >= min_order)
215 return 0;
216 }
217 }
218
219 return -ENOMEM;
220}
221
222/**
223 * drm_buddy_init - init memory manager
224 *
225 * @mm: DRM buddy manager to initialize
226 * @size: size in bytes to manage
227 * @chunk_size: minimum page size in bytes for our allocations
228 *
229 * Initializes the memory manager and its resources.
230 *
231 * Returns:
232 * 0 on success, error code on failure.
233 */
234int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
235{
236 unsigned int i;
237 u64 offset;
238
239 if (size < chunk_size)
240 return -EINVAL;
241
242 if (chunk_size < SZ_4K)
243 return -EINVAL;
244
245 if (!is_power_of_2(chunk_size))
246 return -EINVAL;
247
248 size = round_down(size, chunk_size);
249
250 mm->size = size;
251 mm->avail = size;
252 mm->clear_avail = 0;
253 mm->chunk_size = chunk_size;
254 mm->max_order = ilog2(size) - ilog2(chunk_size);
255
256 BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
257
258 mm->free_list = kmalloc_array(mm->max_order + 1,
259 sizeof(struct list_head),
260 GFP_KERNEL);
261 if (!mm->free_list)
262 return -ENOMEM;
263
264 for (i = 0; i <= mm->max_order; ++i)
265 INIT_LIST_HEAD(&mm->free_list[i]);
266
267 mm->n_roots = hweight64(size);
268
269 mm->roots = kmalloc_array(mm->n_roots,
270 sizeof(struct drm_buddy_block *),
271 GFP_KERNEL);
272 if (!mm->roots)
273 goto out_free_list;
274
275 offset = 0;
276 i = 0;
277
278 /*
279 * Split into power-of-two blocks, in case we are given a size that is
280 * not itself a power-of-two.
281 */
282 do {
283 struct drm_buddy_block *root;
284 unsigned int order;
285 u64 root_size;
286
287 order = ilog2(size) - ilog2(chunk_size);
288 root_size = chunk_size << order;
289
290 root = drm_block_alloc(mm, NULL, order, offset);
291 if (!root)
292 goto out_free_roots;
293
294 mark_free(mm, root);
295
296 BUG_ON(i > mm->max_order);
297 BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
298
299 mm->roots[i] = root;
300
301 offset += root_size;
302 size -= root_size;
303 i++;
304 } while (size);
305
306 return 0;
307
308out_free_roots:
309 while (i--)
310 drm_block_free(mm, mm->roots[i]);
311 kfree(mm->roots);
312out_free_list:
313 kfree(mm->free_list);
314 return -ENOMEM;
315}
316EXPORT_SYMBOL(drm_buddy_init);
317
318/**
319 * drm_buddy_fini - tear down the memory manager
320 *
321 * @mm: DRM buddy manager to free
322 *
323 * Cleanup memory manager resources and the freelist
324 */
325void drm_buddy_fini(struct drm_buddy *mm)
326{
327 u64 root_size, size;
328 unsigned int order;
329 int i;
330
331 size = mm->size;
332
333 for (i = 0; i < mm->n_roots; ++i) {
334 order = ilog2(size) - ilog2(mm->chunk_size);
335 __force_merge(mm, 0, size, order);
336
337 WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
338 drm_block_free(mm, mm->roots[i]);
339
340 root_size = mm->chunk_size << order;
341 size -= root_size;
342 }
343
344 WARN_ON(mm->avail != mm->size);
345
346 kfree(mm->roots);
347 kfree(mm->free_list);
348}
349EXPORT_SYMBOL(drm_buddy_fini);
350
351static int split_block(struct drm_buddy *mm,
352 struct drm_buddy_block *block)
353{
354 unsigned int block_order = drm_buddy_block_order(block) - 1;
355 u64 offset = drm_buddy_block_offset(block);
356
357 BUG_ON(!drm_buddy_block_is_free(block));
358 BUG_ON(!drm_buddy_block_order(block));
359
360 block->left = drm_block_alloc(mm, block, block_order, offset);
361 if (!block->left)
362 return -ENOMEM;
363
364 block->right = drm_block_alloc(mm, block, block_order,
365 offset + (mm->chunk_size << block_order));
366 if (!block->right) {
367 drm_block_free(mm, block->left);
368 return -ENOMEM;
369 }
370
371 mark_free(mm, block->left);
372 mark_free(mm, block->right);
373
374 if (drm_buddy_block_is_clear(block)) {
375 mark_cleared(block->left);
376 mark_cleared(block->right);
377 clear_reset(block);
378 }
379
380 mark_split(block);
381
382 return 0;
383}
384
385/**
386 * drm_get_buddy - get buddy address
387 *
388 * @block: DRM buddy block
389 *
390 * Returns the corresponding buddy block for @block, or NULL
391 * if this is a root block and can't be merged further.
392 * Requires some kind of locking to protect against
393 * any concurrent allocate and free operations.
394 */
395struct drm_buddy_block *
396drm_get_buddy(struct drm_buddy_block *block)
397{
398 return __get_buddy(block);
399}
400EXPORT_SYMBOL(drm_get_buddy);
401
402/**
403 * drm_buddy_free_block - free a block
404 *
405 * @mm: DRM buddy manager
406 * @block: block to be freed
407 */
408void drm_buddy_free_block(struct drm_buddy *mm,
409 struct drm_buddy_block *block)
410{
411 BUG_ON(!drm_buddy_block_is_allocated(block));
412 mm->avail += drm_buddy_block_size(mm, block);
413 if (drm_buddy_block_is_clear(block))
414 mm->clear_avail += drm_buddy_block_size(mm, block);
415
416 __drm_buddy_free(mm, block, false);
417}
418EXPORT_SYMBOL(drm_buddy_free_block);
419
420static void __drm_buddy_free_list(struct drm_buddy *mm,
421 struct list_head *objects,
422 bool mark_clear,
423 bool mark_dirty)
424{
425 struct drm_buddy_block *block, *on;
426
427 WARN_ON(mark_dirty && mark_clear);
428
429 list_for_each_entry_safe(block, on, objects, link) {
430 if (mark_clear)
431 mark_cleared(block);
432 else if (mark_dirty)
433 clear_reset(block);
434 drm_buddy_free_block(mm, block);
435 cond_resched();
436 }
437 INIT_LIST_HEAD(objects);
438}
439
440static void drm_buddy_free_list_internal(struct drm_buddy *mm,
441 struct list_head *objects)
442{
443 /*
444 * Don't touch the clear/dirty bit, since allocation is still internal
445 * at this point. For example we might have just failed part of the
446 * allocation.
447 */
448 __drm_buddy_free_list(mm, objects, false, false);
449}
450
451/**
452 * drm_buddy_free_list - free blocks
453 *
454 * @mm: DRM buddy manager
455 * @objects: input list head to free blocks
456 * @flags: optional flags like DRM_BUDDY_CLEARED
457 */
458void drm_buddy_free_list(struct drm_buddy *mm,
459 struct list_head *objects,
460 unsigned int flags)
461{
462 bool mark_clear = flags & DRM_BUDDY_CLEARED;
463
464 __drm_buddy_free_list(mm, objects, mark_clear, !mark_clear);
465}
466EXPORT_SYMBOL(drm_buddy_free_list);
467
468static bool block_incompatible(struct drm_buddy_block *block, unsigned int flags)
469{
470 bool needs_clear = flags & DRM_BUDDY_CLEAR_ALLOCATION;
471
472 return needs_clear != drm_buddy_block_is_clear(block);
473}
474
475static struct drm_buddy_block *
476__alloc_range_bias(struct drm_buddy *mm,
477 u64 start, u64 end,
478 unsigned int order,
479 unsigned long flags,
480 bool fallback)
481{
482 u64 req_size = mm->chunk_size << order;
483 struct drm_buddy_block *block;
484 struct drm_buddy_block *buddy;
485 LIST_HEAD(dfs);
486 int err;
487 int i;
488
489 end = end - 1;
490
491 for (i = 0; i < mm->n_roots; ++i)
492 list_add_tail(&mm->roots[i]->tmp_link, &dfs);
493
494 do {
495 u64 block_start;
496 u64 block_end;
497
498 block = list_first_entry_or_null(&dfs,
499 struct drm_buddy_block,
500 tmp_link);
501 if (!block)
502 break;
503
504 list_del(&block->tmp_link);
505
506 if (drm_buddy_block_order(block) < order)
507 continue;
508
509 block_start = drm_buddy_block_offset(block);
510 block_end = block_start + drm_buddy_block_size(mm, block) - 1;
511
512 if (!overlaps(start, end, block_start, block_end))
513 continue;
514
515 if (drm_buddy_block_is_allocated(block))
516 continue;
517
518 if (block_start < start || block_end > end) {
519 u64 adjusted_start = max(block_start, start);
520 u64 adjusted_end = min(block_end, end);
521
522 if (round_down(adjusted_end + 1, req_size) <=
523 round_up(adjusted_start, req_size))
524 continue;
525 }
526
527 if (!fallback && block_incompatible(block, flags))
528 continue;
529
530 if (contains(start, end, block_start, block_end) &&
531 order == drm_buddy_block_order(block)) {
532 /*
533 * Find the free block within the range.
534 */
535 if (drm_buddy_block_is_free(block))
536 return block;
537
538 continue;
539 }
540
541 if (!drm_buddy_block_is_split(block)) {
542 err = split_block(mm, block);
543 if (unlikely(err))
544 goto err_undo;
545 }
546
547 list_add(&block->right->tmp_link, &dfs);
548 list_add(&block->left->tmp_link, &dfs);
549 } while (1);
550
551 return ERR_PTR(-ENOSPC);
552
553err_undo:
554 /*
555 * We really don't want to leave around a bunch of split blocks, since
556 * bigger is better, so make sure we merge everything back before we
557 * free the allocated blocks.
558 */
559 buddy = __get_buddy(block);
560 if (buddy &&
561 (drm_buddy_block_is_free(block) &&
562 drm_buddy_block_is_free(buddy)))
563 __drm_buddy_free(mm, block, false);
564 return ERR_PTR(err);
565}
566
567static struct drm_buddy_block *
568__drm_buddy_alloc_range_bias(struct drm_buddy *mm,
569 u64 start, u64 end,
570 unsigned int order,
571 unsigned long flags)
572{
573 struct drm_buddy_block *block;
574 bool fallback = false;
575
576 block = __alloc_range_bias(mm, start, end, order,
577 flags, fallback);
578 if (IS_ERR(block))
579 return __alloc_range_bias(mm, start, end, order,
580 flags, !fallback);
581
582 return block;
583}
584
585static struct drm_buddy_block *
586get_maxblock(struct drm_buddy *mm, unsigned int order,
587 unsigned long flags)
588{
589 struct drm_buddy_block *max_block = NULL, *block = NULL;
590 unsigned int i;
591
592 for (i = order; i <= mm->max_order; ++i) {
593 struct drm_buddy_block *tmp_block;
594
595 list_for_each_entry_reverse(tmp_block, &mm->free_list[i], link) {
596 if (block_incompatible(tmp_block, flags))
597 continue;
598
599 block = tmp_block;
600 break;
601 }
602
603 if (!block)
604 continue;
605
606 if (!max_block) {
607 max_block = block;
608 continue;
609 }
610
611 if (drm_buddy_block_offset(block) >
612 drm_buddy_block_offset(max_block)) {
613 max_block = block;
614 }
615 }
616
617 return max_block;
618}
619
620static struct drm_buddy_block *
621alloc_from_freelist(struct drm_buddy *mm,
622 unsigned int order,
623 unsigned long flags)
624{
625 struct drm_buddy_block *block = NULL;
626 unsigned int tmp;
627 int err;
628
629 if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
630 block = get_maxblock(mm, order, flags);
631 if (block)
632 /* Store the obtained block order */
633 tmp = drm_buddy_block_order(block);
634 } else {
635 for (tmp = order; tmp <= mm->max_order; ++tmp) {
636 struct drm_buddy_block *tmp_block;
637
638 list_for_each_entry_reverse(tmp_block, &mm->free_list[tmp], link) {
639 if (block_incompatible(tmp_block, flags))
640 continue;
641
642 block = tmp_block;
643 break;
644 }
645
646 if (block)
647 break;
648 }
649 }
650
651 if (!block) {
652 /* Fallback method */
653 for (tmp = order; tmp <= mm->max_order; ++tmp) {
654 if (!list_empty(&mm->free_list[tmp])) {
655 block = list_last_entry(&mm->free_list[tmp],
656 struct drm_buddy_block,
657 link);
658 if (block)
659 break;
660 }
661 }
662
663 if (!block)
664 return ERR_PTR(-ENOSPC);
665 }
666
667 BUG_ON(!drm_buddy_block_is_free(block));
668
669 while (tmp != order) {
670 err = split_block(mm, block);
671 if (unlikely(err))
672 goto err_undo;
673
674 block = block->right;
675 tmp--;
676 }
677 return block;
678
679err_undo:
680 if (tmp != order)
681 __drm_buddy_free(mm, block, false);
682 return ERR_PTR(err);
683}
684
685static int __alloc_range(struct drm_buddy *mm,
686 struct list_head *dfs,
687 u64 start, u64 size,
688 struct list_head *blocks,
689 u64 *total_allocated_on_err)
690{
691 struct drm_buddy_block *block;
692 struct drm_buddy_block *buddy;
693 u64 total_allocated = 0;
694 LIST_HEAD(allocated);
695 u64 end;
696 int err;
697
698 end = start + size - 1;
699
700 do {
701 u64 block_start;
702 u64 block_end;
703
704 block = list_first_entry_or_null(dfs,
705 struct drm_buddy_block,
706 tmp_link);
707 if (!block)
708 break;
709
710 list_del(&block->tmp_link);
711
712 block_start = drm_buddy_block_offset(block);
713 block_end = block_start + drm_buddy_block_size(mm, block) - 1;
714
715 if (!overlaps(start, end, block_start, block_end))
716 continue;
717
718 if (drm_buddy_block_is_allocated(block)) {
719 err = -ENOSPC;
720 goto err_free;
721 }
722
723 if (contains(start, end, block_start, block_end)) {
724 if (drm_buddy_block_is_free(block)) {
725 mark_allocated(block);
726 total_allocated += drm_buddy_block_size(mm, block);
727 mm->avail -= drm_buddy_block_size(mm, block);
728 if (drm_buddy_block_is_clear(block))
729 mm->clear_avail -= drm_buddy_block_size(mm, block);
730 list_add_tail(&block->link, &allocated);
731 continue;
732 } else if (!mm->clear_avail) {
733 err = -ENOSPC;
734 goto err_free;
735 }
736 }
737
738 if (!drm_buddy_block_is_split(block)) {
739 err = split_block(mm, block);
740 if (unlikely(err))
741 goto err_undo;
742 }
743
744 list_add(&block->right->tmp_link, dfs);
745 list_add(&block->left->tmp_link, dfs);
746 } while (1);
747
748 if (total_allocated < size) {
749 err = -ENOSPC;
750 goto err_free;
751 }
752
753 list_splice_tail(&allocated, blocks);
754
755 return 0;
756
757err_undo:
758 /*
759 * We really don't want to leave around a bunch of split blocks, since
760 * bigger is better, so make sure we merge everything back before we
761 * free the allocated blocks.
762 */
763 buddy = __get_buddy(block);
764 if (buddy &&
765 (drm_buddy_block_is_free(block) &&
766 drm_buddy_block_is_free(buddy)))
767 __drm_buddy_free(mm, block, false);
768
769err_free:
770 if (err == -ENOSPC && total_allocated_on_err) {
771 list_splice_tail(&allocated, blocks);
772 *total_allocated_on_err = total_allocated;
773 } else {
774 drm_buddy_free_list_internal(mm, &allocated);
775 }
776
777 return err;
778}
779
780static int __drm_buddy_alloc_range(struct drm_buddy *mm,
781 u64 start,
782 u64 size,
783 u64 *total_allocated_on_err,
784 struct list_head *blocks)
785{
786 LIST_HEAD(dfs);
787 int i;
788
789 for (i = 0; i < mm->n_roots; ++i)
790 list_add_tail(&mm->roots[i]->tmp_link, &dfs);
791
792 return __alloc_range(mm, &dfs, start, size,
793 blocks, total_allocated_on_err);
794}
795
796static int __alloc_contig_try_harder(struct drm_buddy *mm,
797 u64 size,
798 u64 min_block_size,
799 struct list_head *blocks)
800{
801 u64 rhs_offset, lhs_offset, lhs_size, filled;
802 struct drm_buddy_block *block;
803 struct list_head *list;
804 LIST_HEAD(blocks_lhs);
805 unsigned long pages;
806 unsigned int order;
807 u64 modify_size;
808 int err;
809
810 modify_size = rounddown_pow_of_two(size);
811 pages = modify_size >> ilog2(mm->chunk_size);
812 order = fls(pages) - 1;
813 if (order == 0)
814 return -ENOSPC;
815
816 list = &mm->free_list[order];
817 if (list_empty(list))
818 return -ENOSPC;
819
820 list_for_each_entry_reverse(block, list, link) {
821 /* Allocate blocks traversing RHS */
822 rhs_offset = drm_buddy_block_offset(block);
823 err = __drm_buddy_alloc_range(mm, rhs_offset, size,
824 &filled, blocks);
825 if (!err || err != -ENOSPC)
826 return err;
827
828 lhs_size = max((size - filled), min_block_size);
829 if (!IS_ALIGNED(lhs_size, min_block_size))
830 lhs_size = round_up(lhs_size, min_block_size);
831
832 /* Allocate blocks traversing LHS */
833 lhs_offset = drm_buddy_block_offset(block) - lhs_size;
834 err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size,
835 NULL, &blocks_lhs);
836 if (!err) {
837 list_splice(&blocks_lhs, blocks);
838 return 0;
839 } else if (err != -ENOSPC) {
840 drm_buddy_free_list_internal(mm, blocks);
841 return err;
842 }
843 /* Free blocks for the next iteration */
844 drm_buddy_free_list_internal(mm, blocks);
845 }
846
847 return -ENOSPC;
848}
849
850/**
851 * drm_buddy_block_trim - free unused pages
852 *
853 * @mm: DRM buddy manager
854 * @start: start address to begin the trimming.
855 * @new_size: original size requested
856 * @blocks: Input and output list of allocated blocks.
857 * MUST contain single block as input to be trimmed.
858 * On success will contain the newly allocated blocks
859 * making up the @new_size. Blocks always appear in
860 * ascending order
861 *
862 * For contiguous allocation, we round up the size to the nearest
863 * power of two value, drivers consume *actual* size, so remaining
864 * portions are unused and can be optionally freed with this function
865 *
866 * Returns:
867 * 0 on success, error code on failure.
868 */
869int drm_buddy_block_trim(struct drm_buddy *mm,
870 u64 *start,
871 u64 new_size,
872 struct list_head *blocks)
873{
874 struct drm_buddy_block *parent;
875 struct drm_buddy_block *block;
876 u64 block_start, block_end;
877 LIST_HEAD(dfs);
878 u64 new_start;
879 int err;
880
881 if (!list_is_singular(blocks))
882 return -EINVAL;
883
884 block = list_first_entry(blocks,
885 struct drm_buddy_block,
886 link);
887
888 block_start = drm_buddy_block_offset(block);
889 block_end = block_start + drm_buddy_block_size(mm, block);
890
891 if (WARN_ON(!drm_buddy_block_is_allocated(block)))
892 return -EINVAL;
893
894 if (new_size > drm_buddy_block_size(mm, block))
895 return -EINVAL;
896
897 if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size))
898 return -EINVAL;
899
900 if (new_size == drm_buddy_block_size(mm, block))
901 return 0;
902
903 new_start = block_start;
904 if (start) {
905 new_start = *start;
906
907 if (new_start < block_start)
908 return -EINVAL;
909
910 if (!IS_ALIGNED(new_start, mm->chunk_size))
911 return -EINVAL;
912
913 if (range_overflows(new_start, new_size, block_end))
914 return -EINVAL;
915 }
916
917 list_del(&block->link);
918 mark_free(mm, block);
919 mm->avail += drm_buddy_block_size(mm, block);
920 if (drm_buddy_block_is_clear(block))
921 mm->clear_avail += drm_buddy_block_size(mm, block);
922
923 /* Prevent recursively freeing this node */
924 parent = block->parent;
925 block->parent = NULL;
926
927 list_add(&block->tmp_link, &dfs);
928 err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL);
929 if (err) {
930 mark_allocated(block);
931 mm->avail -= drm_buddy_block_size(mm, block);
932 if (drm_buddy_block_is_clear(block))
933 mm->clear_avail -= drm_buddy_block_size(mm, block);
934 list_add(&block->link, blocks);
935 }
936
937 block->parent = parent;
938 return err;
939}
940EXPORT_SYMBOL(drm_buddy_block_trim);
941
942static struct drm_buddy_block *
943__drm_buddy_alloc_blocks(struct drm_buddy *mm,
944 u64 start, u64 end,
945 unsigned int order,
946 unsigned long flags)
947{
948 if (flags & DRM_BUDDY_RANGE_ALLOCATION)
949 /* Allocate traversing within the range */
950 return __drm_buddy_alloc_range_bias(mm, start, end,
951 order, flags);
952 else
953 /* Allocate from freelist */
954 return alloc_from_freelist(mm, order, flags);
955}
956
957/**
958 * drm_buddy_alloc_blocks - allocate power-of-two blocks
959 *
960 * @mm: DRM buddy manager to allocate from
961 * @start: start of the allowed range for this block
962 * @end: end of the allowed range for this block
963 * @size: size of the allocation in bytes
964 * @min_block_size: alignment of the allocation
965 * @blocks: output list head to add allocated blocks
966 * @flags: DRM_BUDDY_*_ALLOCATION flags
967 *
968 * alloc_range_bias() called on range limitations, which traverses
969 * the tree and returns the desired block.
970 *
971 * alloc_from_freelist() called when *no* range restrictions
972 * are enforced, which picks the block from the freelist.
973 *
974 * Returns:
975 * 0 on success, error code on failure.
976 */
977int drm_buddy_alloc_blocks(struct drm_buddy *mm,
978 u64 start, u64 end, u64 size,
979 u64 min_block_size,
980 struct list_head *blocks,
981 unsigned long flags)
982{
983 struct drm_buddy_block *block = NULL;
984 u64 original_size, original_min_size;
985 unsigned int min_order, order;
986 LIST_HEAD(allocated);
987 unsigned long pages;
988 int err;
989
990 if (size < mm->chunk_size)
991 return -EINVAL;
992
993 if (min_block_size < mm->chunk_size)
994 return -EINVAL;
995
996 if (!is_power_of_2(min_block_size))
997 return -EINVAL;
998
999 if (!IS_ALIGNED(start | end | size, mm->chunk_size))
1000 return -EINVAL;
1001
1002 if (end > mm->size)
1003 return -EINVAL;
1004
1005 if (range_overflows(start, size, mm->size))
1006 return -EINVAL;
1007
1008 /* Actual range allocation */
1009 if (start + size == end) {
1010 if (!IS_ALIGNED(start | end, min_block_size))
1011 return -EINVAL;
1012
1013 return __drm_buddy_alloc_range(mm, start, size, NULL, blocks);
1014 }
1015
1016 original_size = size;
1017 original_min_size = min_block_size;
1018
1019 /* Roundup the size to power of 2 */
1020 if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
1021 size = roundup_pow_of_two(size);
1022 min_block_size = size;
1023 /* Align size value to min_block_size */
1024 } else if (!IS_ALIGNED(size, min_block_size)) {
1025 size = round_up(size, min_block_size);
1026 }
1027
1028 pages = size >> ilog2(mm->chunk_size);
1029 order = fls(pages) - 1;
1030 min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
1031
1032 do {
1033 order = min(order, (unsigned int)fls(pages) - 1);
1034 BUG_ON(order > mm->max_order);
1035 BUG_ON(order < min_order);
1036
1037 do {
1038 block = __drm_buddy_alloc_blocks(mm, start,
1039 end,
1040 order,
1041 flags);
1042 if (!IS_ERR(block))
1043 break;
1044
1045 if (order-- == min_order) {
1046 /* Try allocation through force merge method */
1047 if (mm->clear_avail &&
1048 !__force_merge(mm, start, end, min_order)) {
1049 block = __drm_buddy_alloc_blocks(mm, start,
1050 end,
1051 min_order,
1052 flags);
1053 if (!IS_ERR(block)) {
1054 order = min_order;
1055 break;
1056 }
1057 }
1058
1059 /*
1060 * Try contiguous block allocation through
1061 * try harder method.
1062 */
1063 if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
1064 !(flags & DRM_BUDDY_RANGE_ALLOCATION))
1065 return __alloc_contig_try_harder(mm,
1066 original_size,
1067 original_min_size,
1068 blocks);
1069 err = -ENOSPC;
1070 goto err_free;
1071 }
1072 } while (1);
1073
1074 mark_allocated(block);
1075 mm->avail -= drm_buddy_block_size(mm, block);
1076 if (drm_buddy_block_is_clear(block))
1077 mm->clear_avail -= drm_buddy_block_size(mm, block);
1078 kmemleak_update_trace(block);
1079 list_add_tail(&block->link, &allocated);
1080
1081 pages -= BIT(order);
1082
1083 if (!pages)
1084 break;
1085 } while (1);
1086
1087 /* Trim the allocated block to the required size */
1088 if (!(flags & DRM_BUDDY_TRIM_DISABLE) &&
1089 original_size != size) {
1090 struct list_head *trim_list;
1091 LIST_HEAD(temp);
1092 u64 trim_size;
1093
1094 trim_list = &allocated;
1095 trim_size = original_size;
1096
1097 if (!list_is_singular(&allocated)) {
1098 block = list_last_entry(&allocated, typeof(*block), link);
1099 list_move(&block->link, &temp);
1100 trim_list = &temp;
1101 trim_size = drm_buddy_block_size(mm, block) -
1102 (size - original_size);
1103 }
1104
1105 drm_buddy_block_trim(mm,
1106 NULL,
1107 trim_size,
1108 trim_list);
1109
1110 if (!list_empty(&temp))
1111 list_splice_tail(trim_list, &allocated);
1112 }
1113
1114 list_splice_tail(&allocated, blocks);
1115 return 0;
1116
1117err_free:
1118 drm_buddy_free_list_internal(mm, &allocated);
1119 return err;
1120}
1121EXPORT_SYMBOL(drm_buddy_alloc_blocks);
1122
1123/**
1124 * drm_buddy_block_print - print block information
1125 *
1126 * @mm: DRM buddy manager
1127 * @block: DRM buddy block
1128 * @p: DRM printer to use
1129 */
1130void drm_buddy_block_print(struct drm_buddy *mm,
1131 struct drm_buddy_block *block,
1132 struct drm_printer *p)
1133{
1134 u64 start = drm_buddy_block_offset(block);
1135 u64 size = drm_buddy_block_size(mm, block);
1136
1137 drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size);
1138}
1139EXPORT_SYMBOL(drm_buddy_block_print);
1140
1141/**
1142 * drm_buddy_print - print allocator state
1143 *
1144 * @mm: DRM buddy manager
1145 * @p: DRM printer to use
1146 */
1147void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
1148{
1149 int order;
1150
1151 drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB, clear_free: %lluMiB\n",
1152 mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20);
1153
1154 for (order = mm->max_order; order >= 0; order--) {
1155 struct drm_buddy_block *block;
1156 u64 count = 0, free;
1157
1158 list_for_each_entry(block, &mm->free_list[order], link) {
1159 BUG_ON(!drm_buddy_block_is_free(block));
1160 count++;
1161 }
1162
1163 drm_printf(p, "order-%2d ", order);
1164
1165 free = count * (mm->chunk_size << order);
1166 if (free < SZ_1M)
1167 drm_printf(p, "free: %8llu KiB", free >> 10);
1168 else
1169 drm_printf(p, "free: %8llu MiB", free >> 20);
1170
1171 drm_printf(p, ", blocks: %llu\n", count);
1172 }
1173}
1174EXPORT_SYMBOL(drm_buddy_print);
1175
1176static void drm_buddy_module_exit(void)
1177{
1178 kmem_cache_destroy(slab_blocks);
1179}
1180
1181static int __init drm_buddy_module_init(void)
1182{
1183 slab_blocks = KMEM_CACHE(drm_buddy_block, 0);
1184 if (!slab_blocks)
1185 return -ENOMEM;
1186
1187 return 0;
1188}
1189
1190module_init(drm_buddy_module_init);
1191module_exit(drm_buddy_module_exit);
1192
1193MODULE_DESCRIPTION("DRM Buddy Allocator");
1194MODULE_LICENSE("Dual MIT/GPL");
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#include <linux/kmemleak.h>
7#include <linux/module.h>
8#include <linux/sizes.h>
9
10#include <drm/drm_buddy.h>
11
12static struct kmem_cache *slab_blocks;
13
14static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
15 struct drm_buddy_block *parent,
16 unsigned int order,
17 u64 offset)
18{
19 struct drm_buddy_block *block;
20
21 BUG_ON(order > DRM_BUDDY_MAX_ORDER);
22
23 block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL);
24 if (!block)
25 return NULL;
26
27 block->header = offset;
28 block->header |= order;
29 block->parent = parent;
30
31 BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
32 return block;
33}
34
35static void drm_block_free(struct drm_buddy *mm,
36 struct drm_buddy_block *block)
37{
38 kmem_cache_free(slab_blocks, block);
39}
40
41static void list_insert_sorted(struct drm_buddy *mm,
42 struct drm_buddy_block *block)
43{
44 struct drm_buddy_block *node;
45 struct list_head *head;
46
47 head = &mm->free_list[drm_buddy_block_order(block)];
48 if (list_empty(head)) {
49 list_add(&block->link, head);
50 return;
51 }
52
53 list_for_each_entry(node, head, link)
54 if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
55 break;
56
57 __list_add(&block->link, node->link.prev, &node->link);
58}
59
60static void mark_allocated(struct drm_buddy_block *block)
61{
62 block->header &= ~DRM_BUDDY_HEADER_STATE;
63 block->header |= DRM_BUDDY_ALLOCATED;
64
65 list_del(&block->link);
66}
67
68static void mark_free(struct drm_buddy *mm,
69 struct drm_buddy_block *block)
70{
71 block->header &= ~DRM_BUDDY_HEADER_STATE;
72 block->header |= DRM_BUDDY_FREE;
73
74 list_insert_sorted(mm, block);
75}
76
77static void mark_split(struct drm_buddy_block *block)
78{
79 block->header &= ~DRM_BUDDY_HEADER_STATE;
80 block->header |= DRM_BUDDY_SPLIT;
81
82 list_del(&block->link);
83}
84
85/**
86 * drm_buddy_init - init memory manager
87 *
88 * @mm: DRM buddy manager to initialize
89 * @size: size in bytes to manage
90 * @chunk_size: minimum page size in bytes for our allocations
91 *
92 * Initializes the memory manager and its resources.
93 *
94 * Returns:
95 * 0 on success, error code on failure.
96 */
97int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
98{
99 unsigned int i;
100 u64 offset;
101
102 if (size < chunk_size)
103 return -EINVAL;
104
105 if (chunk_size < PAGE_SIZE)
106 return -EINVAL;
107
108 if (!is_power_of_2(chunk_size))
109 return -EINVAL;
110
111 size = round_down(size, chunk_size);
112
113 mm->size = size;
114 mm->avail = size;
115 mm->chunk_size = chunk_size;
116 mm->max_order = ilog2(size) - ilog2(chunk_size);
117
118 BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
119
120 mm->free_list = kmalloc_array(mm->max_order + 1,
121 sizeof(struct list_head),
122 GFP_KERNEL);
123 if (!mm->free_list)
124 return -ENOMEM;
125
126 for (i = 0; i <= mm->max_order; ++i)
127 INIT_LIST_HEAD(&mm->free_list[i]);
128
129 mm->n_roots = hweight64(size);
130
131 mm->roots = kmalloc_array(mm->n_roots,
132 sizeof(struct drm_buddy_block *),
133 GFP_KERNEL);
134 if (!mm->roots)
135 goto out_free_list;
136
137 offset = 0;
138 i = 0;
139
140 /*
141 * Split into power-of-two blocks, in case we are given a size that is
142 * not itself a power-of-two.
143 */
144 do {
145 struct drm_buddy_block *root;
146 unsigned int order;
147 u64 root_size;
148
149 order = ilog2(size) - ilog2(chunk_size);
150 root_size = chunk_size << order;
151
152 root = drm_block_alloc(mm, NULL, order, offset);
153 if (!root)
154 goto out_free_roots;
155
156 mark_free(mm, root);
157
158 BUG_ON(i > mm->max_order);
159 BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
160
161 mm->roots[i] = root;
162
163 offset += root_size;
164 size -= root_size;
165 i++;
166 } while (size);
167
168 return 0;
169
170out_free_roots:
171 while (i--)
172 drm_block_free(mm, mm->roots[i]);
173 kfree(mm->roots);
174out_free_list:
175 kfree(mm->free_list);
176 return -ENOMEM;
177}
178EXPORT_SYMBOL(drm_buddy_init);
179
180/**
181 * drm_buddy_fini - tear down the memory manager
182 *
183 * @mm: DRM buddy manager to free
184 *
185 * Cleanup memory manager resources and the freelist
186 */
187void drm_buddy_fini(struct drm_buddy *mm)
188{
189 int i;
190
191 for (i = 0; i < mm->n_roots; ++i) {
192 WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
193 drm_block_free(mm, mm->roots[i]);
194 }
195
196 WARN_ON(mm->avail != mm->size);
197
198 kfree(mm->roots);
199 kfree(mm->free_list);
200}
201EXPORT_SYMBOL(drm_buddy_fini);
202
203static int split_block(struct drm_buddy *mm,
204 struct drm_buddy_block *block)
205{
206 unsigned int block_order = drm_buddy_block_order(block) - 1;
207 u64 offset = drm_buddy_block_offset(block);
208
209 BUG_ON(!drm_buddy_block_is_free(block));
210 BUG_ON(!drm_buddy_block_order(block));
211
212 block->left = drm_block_alloc(mm, block, block_order, offset);
213 if (!block->left)
214 return -ENOMEM;
215
216 block->right = drm_block_alloc(mm, block, block_order,
217 offset + (mm->chunk_size << block_order));
218 if (!block->right) {
219 drm_block_free(mm, block->left);
220 return -ENOMEM;
221 }
222
223 mark_free(mm, block->left);
224 mark_free(mm, block->right);
225
226 mark_split(block);
227
228 return 0;
229}
230
231static struct drm_buddy_block *
232__get_buddy(struct drm_buddy_block *block)
233{
234 struct drm_buddy_block *parent;
235
236 parent = block->parent;
237 if (!parent)
238 return NULL;
239
240 if (parent->left == block)
241 return parent->right;
242
243 return parent->left;
244}
245
246/**
247 * drm_get_buddy - get buddy address
248 *
249 * @block: DRM buddy block
250 *
251 * Returns the corresponding buddy block for @block, or NULL
252 * if this is a root block and can't be merged further.
253 * Requires some kind of locking to protect against
254 * any concurrent allocate and free operations.
255 */
256struct drm_buddy_block *
257drm_get_buddy(struct drm_buddy_block *block)
258{
259 return __get_buddy(block);
260}
261EXPORT_SYMBOL(drm_get_buddy);
262
263static void __drm_buddy_free(struct drm_buddy *mm,
264 struct drm_buddy_block *block)
265{
266 struct drm_buddy_block *parent;
267
268 while ((parent = block->parent)) {
269 struct drm_buddy_block *buddy;
270
271 buddy = __get_buddy(block);
272
273 if (!drm_buddy_block_is_free(buddy))
274 break;
275
276 list_del(&buddy->link);
277
278 drm_block_free(mm, block);
279 drm_block_free(mm, buddy);
280
281 block = parent;
282 }
283
284 mark_free(mm, block);
285}
286
287/**
288 * drm_buddy_free_block - free a block
289 *
290 * @mm: DRM buddy manager
291 * @block: block to be freed
292 */
293void drm_buddy_free_block(struct drm_buddy *mm,
294 struct drm_buddy_block *block)
295{
296 BUG_ON(!drm_buddy_block_is_allocated(block));
297 mm->avail += drm_buddy_block_size(mm, block);
298 __drm_buddy_free(mm, block);
299}
300EXPORT_SYMBOL(drm_buddy_free_block);
301
302/**
303 * drm_buddy_free_list - free blocks
304 *
305 * @mm: DRM buddy manager
306 * @objects: input list head to free blocks
307 */
308void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects)
309{
310 struct drm_buddy_block *block, *on;
311
312 list_for_each_entry_safe(block, on, objects, link) {
313 drm_buddy_free_block(mm, block);
314 cond_resched();
315 }
316 INIT_LIST_HEAD(objects);
317}
318EXPORT_SYMBOL(drm_buddy_free_list);
319
320static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
321{
322 return s1 <= e2 && e1 >= s2;
323}
324
325static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
326{
327 return s1 <= s2 && e1 >= e2;
328}
329
330static struct drm_buddy_block *
331alloc_range_bias(struct drm_buddy *mm,
332 u64 start, u64 end,
333 unsigned int order)
334{
335 u64 req_size = mm->chunk_size << order;
336 struct drm_buddy_block *block;
337 struct drm_buddy_block *buddy;
338 LIST_HEAD(dfs);
339 int err;
340 int i;
341
342 end = end - 1;
343
344 for (i = 0; i < mm->n_roots; ++i)
345 list_add_tail(&mm->roots[i]->tmp_link, &dfs);
346
347 do {
348 u64 block_start;
349 u64 block_end;
350
351 block = list_first_entry_or_null(&dfs,
352 struct drm_buddy_block,
353 tmp_link);
354 if (!block)
355 break;
356
357 list_del(&block->tmp_link);
358
359 if (drm_buddy_block_order(block) < order)
360 continue;
361
362 block_start = drm_buddy_block_offset(block);
363 block_end = block_start + drm_buddy_block_size(mm, block) - 1;
364
365 if (!overlaps(start, end, block_start, block_end))
366 continue;
367
368 if (drm_buddy_block_is_allocated(block))
369 continue;
370
371 if (block_start < start || block_end > end) {
372 u64 adjusted_start = max(block_start, start);
373 u64 adjusted_end = min(block_end, end);
374
375 if (round_down(adjusted_end + 1, req_size) <=
376 round_up(adjusted_start, req_size))
377 continue;
378 }
379
380 if (contains(start, end, block_start, block_end) &&
381 order == drm_buddy_block_order(block)) {
382 /*
383 * Find the free block within the range.
384 */
385 if (drm_buddy_block_is_free(block))
386 return block;
387
388 continue;
389 }
390
391 if (!drm_buddy_block_is_split(block)) {
392 err = split_block(mm, block);
393 if (unlikely(err))
394 goto err_undo;
395 }
396
397 list_add(&block->right->tmp_link, &dfs);
398 list_add(&block->left->tmp_link, &dfs);
399 } while (1);
400
401 return ERR_PTR(-ENOSPC);
402
403err_undo:
404 /*
405 * We really don't want to leave around a bunch of split blocks, since
406 * bigger is better, so make sure we merge everything back before we
407 * free the allocated blocks.
408 */
409 buddy = __get_buddy(block);
410 if (buddy &&
411 (drm_buddy_block_is_free(block) &&
412 drm_buddy_block_is_free(buddy)))
413 __drm_buddy_free(mm, block);
414 return ERR_PTR(err);
415}
416
417static struct drm_buddy_block *
418get_maxblock(struct drm_buddy *mm, unsigned int order)
419{
420 struct drm_buddy_block *max_block = NULL, *node;
421 unsigned int i;
422
423 for (i = order; i <= mm->max_order; ++i) {
424 if (!list_empty(&mm->free_list[i])) {
425 node = list_last_entry(&mm->free_list[i],
426 struct drm_buddy_block,
427 link);
428 if (!max_block) {
429 max_block = node;
430 continue;
431 }
432
433 if (drm_buddy_block_offset(node) >
434 drm_buddy_block_offset(max_block)) {
435 max_block = node;
436 }
437 }
438 }
439
440 return max_block;
441}
442
443static struct drm_buddy_block *
444alloc_from_freelist(struct drm_buddy *mm,
445 unsigned int order,
446 unsigned long flags)
447{
448 struct drm_buddy_block *block = NULL;
449 unsigned int tmp;
450 int err;
451
452 if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
453 block = get_maxblock(mm, order);
454 if (block)
455 /* Store the obtained block order */
456 tmp = drm_buddy_block_order(block);
457 } else {
458 for (tmp = order; tmp <= mm->max_order; ++tmp) {
459 if (!list_empty(&mm->free_list[tmp])) {
460 block = list_last_entry(&mm->free_list[tmp],
461 struct drm_buddy_block,
462 link);
463 if (block)
464 break;
465 }
466 }
467 }
468
469 if (!block)
470 return ERR_PTR(-ENOSPC);
471
472 BUG_ON(!drm_buddy_block_is_free(block));
473
474 while (tmp != order) {
475 err = split_block(mm, block);
476 if (unlikely(err))
477 goto err_undo;
478
479 block = block->right;
480 tmp--;
481 }
482 return block;
483
484err_undo:
485 if (tmp != order)
486 __drm_buddy_free(mm, block);
487 return ERR_PTR(err);
488}
489
490static int __alloc_range(struct drm_buddy *mm,
491 struct list_head *dfs,
492 u64 start, u64 size,
493 struct list_head *blocks,
494 u64 *total_allocated_on_err)
495{
496 struct drm_buddy_block *block;
497 struct drm_buddy_block *buddy;
498 u64 total_allocated = 0;
499 LIST_HEAD(allocated);
500 u64 end;
501 int err;
502
503 end = start + size - 1;
504
505 do {
506 u64 block_start;
507 u64 block_end;
508
509 block = list_first_entry_or_null(dfs,
510 struct drm_buddy_block,
511 tmp_link);
512 if (!block)
513 break;
514
515 list_del(&block->tmp_link);
516
517 block_start = drm_buddy_block_offset(block);
518 block_end = block_start + drm_buddy_block_size(mm, block) - 1;
519
520 if (!overlaps(start, end, block_start, block_end))
521 continue;
522
523 if (drm_buddy_block_is_allocated(block)) {
524 err = -ENOSPC;
525 goto err_free;
526 }
527
528 if (contains(start, end, block_start, block_end)) {
529 if (!drm_buddy_block_is_free(block)) {
530 err = -ENOSPC;
531 goto err_free;
532 }
533
534 mark_allocated(block);
535 total_allocated += drm_buddy_block_size(mm, block);
536 mm->avail -= drm_buddy_block_size(mm, block);
537 list_add_tail(&block->link, &allocated);
538 continue;
539 }
540
541 if (!drm_buddy_block_is_split(block)) {
542 err = split_block(mm, block);
543 if (unlikely(err))
544 goto err_undo;
545 }
546
547 list_add(&block->right->tmp_link, dfs);
548 list_add(&block->left->tmp_link, dfs);
549 } while (1);
550
551 if (total_allocated < size) {
552 err = -ENOSPC;
553 goto err_free;
554 }
555
556 list_splice_tail(&allocated, blocks);
557
558 return 0;
559
560err_undo:
561 /*
562 * We really don't want to leave around a bunch of split blocks, since
563 * bigger is better, so make sure we merge everything back before we
564 * free the allocated blocks.
565 */
566 buddy = __get_buddy(block);
567 if (buddy &&
568 (drm_buddy_block_is_free(block) &&
569 drm_buddy_block_is_free(buddy)))
570 __drm_buddy_free(mm, block);
571
572err_free:
573 if (err == -ENOSPC && total_allocated_on_err) {
574 list_splice_tail(&allocated, blocks);
575 *total_allocated_on_err = total_allocated;
576 } else {
577 drm_buddy_free_list(mm, &allocated);
578 }
579
580 return err;
581}
582
583static int __drm_buddy_alloc_range(struct drm_buddy *mm,
584 u64 start,
585 u64 size,
586 u64 *total_allocated_on_err,
587 struct list_head *blocks)
588{
589 LIST_HEAD(dfs);
590 int i;
591
592 for (i = 0; i < mm->n_roots; ++i)
593 list_add_tail(&mm->roots[i]->tmp_link, &dfs);
594
595 return __alloc_range(mm, &dfs, start, size,
596 blocks, total_allocated_on_err);
597}
598
599static int __alloc_contig_try_harder(struct drm_buddy *mm,
600 u64 size,
601 u64 min_block_size,
602 struct list_head *blocks)
603{
604 u64 rhs_offset, lhs_offset, lhs_size, filled;
605 struct drm_buddy_block *block;
606 struct list_head *list;
607 LIST_HEAD(blocks_lhs);
608 unsigned long pages;
609 unsigned int order;
610 u64 modify_size;
611 int err;
612
613 modify_size = rounddown_pow_of_two(size);
614 pages = modify_size >> ilog2(mm->chunk_size);
615 order = fls(pages) - 1;
616 if (order == 0)
617 return -ENOSPC;
618
619 list = &mm->free_list[order];
620 if (list_empty(list))
621 return -ENOSPC;
622
623 list_for_each_entry_reverse(block, list, link) {
624 /* Allocate blocks traversing RHS */
625 rhs_offset = drm_buddy_block_offset(block);
626 err = __drm_buddy_alloc_range(mm, rhs_offset, size,
627 &filled, blocks);
628 if (!err || err != -ENOSPC)
629 return err;
630
631 lhs_size = max((size - filled), min_block_size);
632 if (!IS_ALIGNED(lhs_size, min_block_size))
633 lhs_size = round_up(lhs_size, min_block_size);
634
635 /* Allocate blocks traversing LHS */
636 lhs_offset = drm_buddy_block_offset(block) - lhs_size;
637 err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size,
638 NULL, &blocks_lhs);
639 if (!err) {
640 list_splice(&blocks_lhs, blocks);
641 return 0;
642 } else if (err != -ENOSPC) {
643 drm_buddy_free_list(mm, blocks);
644 return err;
645 }
646 /* Free blocks for the next iteration */
647 drm_buddy_free_list(mm, blocks);
648 }
649
650 return -ENOSPC;
651}
652
653/**
654 * drm_buddy_block_trim - free unused pages
655 *
656 * @mm: DRM buddy manager
657 * @new_size: original size requested
658 * @blocks: Input and output list of allocated blocks.
659 * MUST contain single block as input to be trimmed.
660 * On success will contain the newly allocated blocks
661 * making up the @new_size. Blocks always appear in
662 * ascending order
663 *
664 * For contiguous allocation, we round up the size to the nearest
665 * power of two value, drivers consume *actual* size, so remaining
666 * portions are unused and can be optionally freed with this function
667 *
668 * Returns:
669 * 0 on success, error code on failure.
670 */
671int drm_buddy_block_trim(struct drm_buddy *mm,
672 u64 new_size,
673 struct list_head *blocks)
674{
675 struct drm_buddy_block *parent;
676 struct drm_buddy_block *block;
677 LIST_HEAD(dfs);
678 u64 new_start;
679 int err;
680
681 if (!list_is_singular(blocks))
682 return -EINVAL;
683
684 block = list_first_entry(blocks,
685 struct drm_buddy_block,
686 link);
687
688 if (WARN_ON(!drm_buddy_block_is_allocated(block)))
689 return -EINVAL;
690
691 if (new_size > drm_buddy_block_size(mm, block))
692 return -EINVAL;
693
694 if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size))
695 return -EINVAL;
696
697 if (new_size == drm_buddy_block_size(mm, block))
698 return 0;
699
700 list_del(&block->link);
701 mark_free(mm, block);
702 mm->avail += drm_buddy_block_size(mm, block);
703
704 /* Prevent recursively freeing this node */
705 parent = block->parent;
706 block->parent = NULL;
707
708 new_start = drm_buddy_block_offset(block);
709 list_add(&block->tmp_link, &dfs);
710 err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL);
711 if (err) {
712 mark_allocated(block);
713 mm->avail -= drm_buddy_block_size(mm, block);
714 list_add(&block->link, blocks);
715 }
716
717 block->parent = parent;
718 return err;
719}
720EXPORT_SYMBOL(drm_buddy_block_trim);
721
722/**
723 * drm_buddy_alloc_blocks - allocate power-of-two blocks
724 *
725 * @mm: DRM buddy manager to allocate from
726 * @start: start of the allowed range for this block
727 * @end: end of the allowed range for this block
728 * @size: size of the allocation
729 * @min_block_size: alignment of the allocation
730 * @blocks: output list head to add allocated blocks
731 * @flags: DRM_BUDDY_*_ALLOCATION flags
732 *
733 * alloc_range_bias() called on range limitations, which traverses
734 * the tree and returns the desired block.
735 *
736 * alloc_from_freelist() called when *no* range restrictions
737 * are enforced, which picks the block from the freelist.
738 *
739 * Returns:
740 * 0 on success, error code on failure.
741 */
742int drm_buddy_alloc_blocks(struct drm_buddy *mm,
743 u64 start, u64 end, u64 size,
744 u64 min_block_size,
745 struct list_head *blocks,
746 unsigned long flags)
747{
748 struct drm_buddy_block *block = NULL;
749 u64 original_size, original_min_size;
750 unsigned int min_order, order;
751 LIST_HEAD(allocated);
752 unsigned long pages;
753 int err;
754
755 if (size < mm->chunk_size)
756 return -EINVAL;
757
758 if (min_block_size < mm->chunk_size)
759 return -EINVAL;
760
761 if (!is_power_of_2(min_block_size))
762 return -EINVAL;
763
764 if (!IS_ALIGNED(start | end | size, mm->chunk_size))
765 return -EINVAL;
766
767 if (end > mm->size)
768 return -EINVAL;
769
770 if (range_overflows(start, size, mm->size))
771 return -EINVAL;
772
773 /* Actual range allocation */
774 if (start + size == end) {
775 if (!IS_ALIGNED(start | end, min_block_size))
776 return -EINVAL;
777
778 return __drm_buddy_alloc_range(mm, start, size, NULL, blocks);
779 }
780
781 original_size = size;
782 original_min_size = min_block_size;
783
784 /* Roundup the size to power of 2 */
785 if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
786 size = roundup_pow_of_two(size);
787 min_block_size = size;
788 /* Align size value to min_block_size */
789 } else if (!IS_ALIGNED(size, min_block_size)) {
790 size = round_up(size, min_block_size);
791 }
792
793 pages = size >> ilog2(mm->chunk_size);
794 order = fls(pages) - 1;
795 min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
796
797 do {
798 order = min(order, (unsigned int)fls(pages) - 1);
799 BUG_ON(order > mm->max_order);
800 BUG_ON(order < min_order);
801
802 do {
803 if (flags & DRM_BUDDY_RANGE_ALLOCATION)
804 /* Allocate traversing within the range */
805 block = alloc_range_bias(mm, start, end, order);
806 else
807 /* Allocate from freelist */
808 block = alloc_from_freelist(mm, order, flags);
809
810 if (!IS_ERR(block))
811 break;
812
813 if (order-- == min_order) {
814 if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
815 !(flags & DRM_BUDDY_RANGE_ALLOCATION))
816 /*
817 * Try contiguous block allocation through
818 * try harder method
819 */
820 return __alloc_contig_try_harder(mm,
821 original_size,
822 original_min_size,
823 blocks);
824 err = -ENOSPC;
825 goto err_free;
826 }
827 } while (1);
828
829 mark_allocated(block);
830 mm->avail -= drm_buddy_block_size(mm, block);
831 kmemleak_update_trace(block);
832 list_add_tail(&block->link, &allocated);
833
834 pages -= BIT(order);
835
836 if (!pages)
837 break;
838 } while (1);
839
840 /* Trim the allocated block to the required size */
841 if (original_size != size) {
842 struct list_head *trim_list;
843 LIST_HEAD(temp);
844 u64 trim_size;
845
846 trim_list = &allocated;
847 trim_size = original_size;
848
849 if (!list_is_singular(&allocated)) {
850 block = list_last_entry(&allocated, typeof(*block), link);
851 list_move(&block->link, &temp);
852 trim_list = &temp;
853 trim_size = drm_buddy_block_size(mm, block) -
854 (size - original_size);
855 }
856
857 drm_buddy_block_trim(mm,
858 trim_size,
859 trim_list);
860
861 if (!list_empty(&temp))
862 list_splice_tail(trim_list, &allocated);
863 }
864
865 list_splice_tail(&allocated, blocks);
866 return 0;
867
868err_free:
869 drm_buddy_free_list(mm, &allocated);
870 return err;
871}
872EXPORT_SYMBOL(drm_buddy_alloc_blocks);
873
874/**
875 * drm_buddy_block_print - print block information
876 *
877 * @mm: DRM buddy manager
878 * @block: DRM buddy block
879 * @p: DRM printer to use
880 */
881void drm_buddy_block_print(struct drm_buddy *mm,
882 struct drm_buddy_block *block,
883 struct drm_printer *p)
884{
885 u64 start = drm_buddy_block_offset(block);
886 u64 size = drm_buddy_block_size(mm, block);
887
888 drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size);
889}
890EXPORT_SYMBOL(drm_buddy_block_print);
891
892/**
893 * drm_buddy_print - print allocator state
894 *
895 * @mm: DRM buddy manager
896 * @p: DRM printer to use
897 */
898void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
899{
900 int order;
901
902 drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB\n",
903 mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20);
904
905 for (order = mm->max_order; order >= 0; order--) {
906 struct drm_buddy_block *block;
907 u64 count = 0, free;
908
909 list_for_each_entry(block, &mm->free_list[order], link) {
910 BUG_ON(!drm_buddy_block_is_free(block));
911 count++;
912 }
913
914 drm_printf(p, "order-%2d ", order);
915
916 free = count * (mm->chunk_size << order);
917 if (free < SZ_1M)
918 drm_printf(p, "free: %8llu KiB", free >> 10);
919 else
920 drm_printf(p, "free: %8llu MiB", free >> 20);
921
922 drm_printf(p, ", blocks: %llu\n", count);
923 }
924}
925EXPORT_SYMBOL(drm_buddy_print);
926
927static void drm_buddy_module_exit(void)
928{
929 kmem_cache_destroy(slab_blocks);
930}
931
932static int __init drm_buddy_module_init(void)
933{
934 slab_blocks = KMEM_CACHE(drm_buddy_block, 0);
935 if (!slab_blocks)
936 return -ENOMEM;
937
938 return 0;
939}
940
941module_init(drm_buddy_module_init);
942module_exit(drm_buddy_module_exit);
943
944MODULE_DESCRIPTION("DRM Buddy Allocator");
945MODULE_LICENSE("Dual MIT/GPL");