Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2021 Intel Corporation
   4 */
   5
   6#include <linux/kmemleak.h>
   7#include <linux/module.h>
   8#include <linux/sizes.h>
   9
  10#include <drm/drm_buddy.h>
  11
  12static struct kmem_cache *slab_blocks;
  13
  14static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
  15					       struct drm_buddy_block *parent,
  16					       unsigned int order,
  17					       u64 offset)
  18{
  19	struct drm_buddy_block *block;
  20
  21	BUG_ON(order > DRM_BUDDY_MAX_ORDER);
  22
  23	block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL);
  24	if (!block)
  25		return NULL;
  26
  27	block->header = offset;
  28	block->header |= order;
  29	block->parent = parent;
  30
  31	BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
  32	return block;
  33}
  34
  35static void drm_block_free(struct drm_buddy *mm,
  36			   struct drm_buddy_block *block)
  37{
  38	kmem_cache_free(slab_blocks, block);
  39}
  40
  41static void list_insert_sorted(struct drm_buddy *mm,
  42			       struct drm_buddy_block *block)
  43{
  44	struct drm_buddy_block *node;
  45	struct list_head *head;
  46
  47	head = &mm->free_list[drm_buddy_block_order(block)];
  48	if (list_empty(head)) {
  49		list_add(&block->link, head);
  50		return;
  51	}
  52
  53	list_for_each_entry(node, head, link)
  54		if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
  55			break;
  56
  57	__list_add(&block->link, node->link.prev, &node->link);
  58}
  59
  60static void clear_reset(struct drm_buddy_block *block)
  61{
  62	block->header &= ~DRM_BUDDY_HEADER_CLEAR;
  63}
  64
  65static void mark_cleared(struct drm_buddy_block *block)
  66{
  67	block->header |= DRM_BUDDY_HEADER_CLEAR;
  68}
  69
  70static void mark_allocated(struct drm_buddy_block *block)
  71{
  72	block->header &= ~DRM_BUDDY_HEADER_STATE;
  73	block->header |= DRM_BUDDY_ALLOCATED;
  74
  75	list_del(&block->link);
  76}
  77
  78static void mark_free(struct drm_buddy *mm,
  79		      struct drm_buddy_block *block)
  80{
  81	block->header &= ~DRM_BUDDY_HEADER_STATE;
  82	block->header |= DRM_BUDDY_FREE;
  83
  84	list_insert_sorted(mm, block);
  85}
  86
  87static void mark_split(struct drm_buddy_block *block)
  88{
  89	block->header &= ~DRM_BUDDY_HEADER_STATE;
  90	block->header |= DRM_BUDDY_SPLIT;
  91
  92	list_del(&block->link);
  93}
  94
  95static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
  96{
  97	return s1 <= e2 && e1 >= s2;
  98}
  99
 100static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
 101{
 102	return s1 <= s2 && e1 >= e2;
 103}
 104
 105static struct drm_buddy_block *
 106__get_buddy(struct drm_buddy_block *block)
 107{
 108	struct drm_buddy_block *parent;
 109
 110	parent = block->parent;
 111	if (!parent)
 112		return NULL;
 113
 114	if (parent->left == block)
 115		return parent->right;
 116
 117	return parent->left;
 118}
 119
 120static unsigned int __drm_buddy_free(struct drm_buddy *mm,
 121				     struct drm_buddy_block *block,
 122				     bool force_merge)
 123{
 124	struct drm_buddy_block *parent;
 125	unsigned int order;
 126
 127	while ((parent = block->parent)) {
 128		struct drm_buddy_block *buddy;
 129
 130		buddy = __get_buddy(block);
 131
 132		if (!drm_buddy_block_is_free(buddy))
 133			break;
 134
 135		if (!force_merge) {
 136			/*
 137			 * Check the block and its buddy clear state and exit
 138			 * the loop if they both have the dissimilar state.
 139			 */
 140			if (drm_buddy_block_is_clear(block) !=
 141			    drm_buddy_block_is_clear(buddy))
 142				break;
 143
 144			if (drm_buddy_block_is_clear(block))
 145				mark_cleared(parent);
 146		}
 147
 148		list_del(&buddy->link);
 149		if (force_merge && drm_buddy_block_is_clear(buddy))
 150			mm->clear_avail -= drm_buddy_block_size(mm, buddy);
 151
 152		drm_block_free(mm, block);
 153		drm_block_free(mm, buddy);
 154
 155		block = parent;
 156	}
 157
 158	order = drm_buddy_block_order(block);
 159	mark_free(mm, block);
 160
 161	return order;
 162}
 163
 164static int __force_merge(struct drm_buddy *mm,
 165			 u64 start,
 166			 u64 end,
 167			 unsigned int min_order)
 168{
 169	unsigned int order;
 170	int i;
 171
 172	if (!min_order)
 173		return -ENOMEM;
 174
 175	if (min_order > mm->max_order)
 176		return -EINVAL;
 177
 178	for (i = min_order - 1; i >= 0; i--) {
 179		struct drm_buddy_block *block, *prev;
 180
 181		list_for_each_entry_safe_reverse(block, prev, &mm->free_list[i], link) {
 182			struct drm_buddy_block *buddy;
 183			u64 block_start, block_end;
 184
 185			if (!block->parent)
 186				continue;
 187
 188			block_start = drm_buddy_block_offset(block);
 189			block_end = block_start + drm_buddy_block_size(mm, block) - 1;
 190
 191			if (!contains(start, end, block_start, block_end))
 192				continue;
 193
 194			buddy = __get_buddy(block);
 195			if (!drm_buddy_block_is_free(buddy))
 196				continue;
 197
 198			WARN_ON(drm_buddy_block_is_clear(block) ==
 199				drm_buddy_block_is_clear(buddy));
 200
 201			/*
 202			 * If the prev block is same as buddy, don't access the
 203			 * block in the next iteration as we would free the
 204			 * buddy block as part of the free function.
 205			 */
 206			if (prev == buddy)
 207				prev = list_prev_entry(prev, link);
 208
 209			list_del(&block->link);
 210			if (drm_buddy_block_is_clear(block))
 211				mm->clear_avail -= drm_buddy_block_size(mm, block);
 212
 213			order = __drm_buddy_free(mm, block, true);
 214			if (order >= min_order)
 215				return 0;
 216		}
 217	}
 218
 219	return -ENOMEM;
 220}
 221
 222/**
 223 * drm_buddy_init - init memory manager
 224 *
 225 * @mm: DRM buddy manager to initialize
 226 * @size: size in bytes to manage
 227 * @chunk_size: minimum page size in bytes for our allocations
 228 *
 229 * Initializes the memory manager and its resources.
 230 *
 231 * Returns:
 232 * 0 on success, error code on failure.
 233 */
 234int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
 235{
 236	unsigned int i;
 237	u64 offset;
 238
 239	if (size < chunk_size)
 240		return -EINVAL;
 241
 242	if (chunk_size < SZ_4K)
 243		return -EINVAL;
 244
 245	if (!is_power_of_2(chunk_size))
 246		return -EINVAL;
 247
 248	size = round_down(size, chunk_size);
 249
 250	mm->size = size;
 251	mm->avail = size;
 252	mm->clear_avail = 0;
 253	mm->chunk_size = chunk_size;
 254	mm->max_order = ilog2(size) - ilog2(chunk_size);
 255
 256	BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
 257
 258	mm->free_list = kmalloc_array(mm->max_order + 1,
 259				      sizeof(struct list_head),
 260				      GFP_KERNEL);
 261	if (!mm->free_list)
 262		return -ENOMEM;
 263
 264	for (i = 0; i <= mm->max_order; ++i)
 265		INIT_LIST_HEAD(&mm->free_list[i]);
 266
 267	mm->n_roots = hweight64(size);
 268
 269	mm->roots = kmalloc_array(mm->n_roots,
 270				  sizeof(struct drm_buddy_block *),
 271				  GFP_KERNEL);
 272	if (!mm->roots)
 273		goto out_free_list;
 274
 275	offset = 0;
 276	i = 0;
 277
 278	/*
 279	 * Split into power-of-two blocks, in case we are given a size that is
 280	 * not itself a power-of-two.
 281	 */
 282	do {
 283		struct drm_buddy_block *root;
 284		unsigned int order;
 285		u64 root_size;
 286
 287		order = ilog2(size) - ilog2(chunk_size);
 288		root_size = chunk_size << order;
 289
 290		root = drm_block_alloc(mm, NULL, order, offset);
 291		if (!root)
 292			goto out_free_roots;
 293
 294		mark_free(mm, root);
 295
 296		BUG_ON(i > mm->max_order);
 297		BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
 298
 299		mm->roots[i] = root;
 300
 301		offset += root_size;
 302		size -= root_size;
 303		i++;
 304	} while (size);
 305
 306	return 0;
 307
 308out_free_roots:
 309	while (i--)
 310		drm_block_free(mm, mm->roots[i]);
 311	kfree(mm->roots);
 312out_free_list:
 313	kfree(mm->free_list);
 314	return -ENOMEM;
 315}
 316EXPORT_SYMBOL(drm_buddy_init);
 317
 318/**
 319 * drm_buddy_fini - tear down the memory manager
 320 *
 321 * @mm: DRM buddy manager to free
 322 *
 323 * Cleanup memory manager resources and the freelist
 324 */
 325void drm_buddy_fini(struct drm_buddy *mm)
 326{
 327	u64 root_size, size;
 328	unsigned int order;
 329	int i;
 330
 331	size = mm->size;
 332
 333	for (i = 0; i < mm->n_roots; ++i) {
 334		order = ilog2(size) - ilog2(mm->chunk_size);
 335		__force_merge(mm, 0, size, order);
 336
 337		WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
 338		drm_block_free(mm, mm->roots[i]);
 339
 340		root_size = mm->chunk_size << order;
 341		size -= root_size;
 342	}
 343
 344	WARN_ON(mm->avail != mm->size);
 345
 346	kfree(mm->roots);
 347	kfree(mm->free_list);
 348}
 349EXPORT_SYMBOL(drm_buddy_fini);
 350
 351static int split_block(struct drm_buddy *mm,
 352		       struct drm_buddy_block *block)
 353{
 354	unsigned int block_order = drm_buddy_block_order(block) - 1;
 355	u64 offset = drm_buddy_block_offset(block);
 356
 357	BUG_ON(!drm_buddy_block_is_free(block));
 358	BUG_ON(!drm_buddy_block_order(block));
 359
 360	block->left = drm_block_alloc(mm, block, block_order, offset);
 361	if (!block->left)
 362		return -ENOMEM;
 363
 364	block->right = drm_block_alloc(mm, block, block_order,
 365				       offset + (mm->chunk_size << block_order));
 366	if (!block->right) {
 367		drm_block_free(mm, block->left);
 368		return -ENOMEM;
 369	}
 370
 371	mark_free(mm, block->left);
 372	mark_free(mm, block->right);
 373
 374	if (drm_buddy_block_is_clear(block)) {
 375		mark_cleared(block->left);
 376		mark_cleared(block->right);
 377		clear_reset(block);
 378	}
 379
 380	mark_split(block);
 381
 382	return 0;
 383}
 384
 385/**
 386 * drm_get_buddy - get buddy address
 387 *
 388 * @block: DRM buddy block
 389 *
 390 * Returns the corresponding buddy block for @block, or NULL
 391 * if this is a root block and can't be merged further.
 392 * Requires some kind of locking to protect against
 393 * any concurrent allocate and free operations.
 394 */
 395struct drm_buddy_block *
 396drm_get_buddy(struct drm_buddy_block *block)
 397{
 398	return __get_buddy(block);
 399}
 400EXPORT_SYMBOL(drm_get_buddy);
 401
 402/**
 403 * drm_buddy_free_block - free a block
 404 *
 405 * @mm: DRM buddy manager
 406 * @block: block to be freed
 407 */
 408void drm_buddy_free_block(struct drm_buddy *mm,
 409			  struct drm_buddy_block *block)
 410{
 411	BUG_ON(!drm_buddy_block_is_allocated(block));
 412	mm->avail += drm_buddy_block_size(mm, block);
 413	if (drm_buddy_block_is_clear(block))
 414		mm->clear_avail += drm_buddy_block_size(mm, block);
 415
 416	__drm_buddy_free(mm, block, false);
 417}
 418EXPORT_SYMBOL(drm_buddy_free_block);
 419
 420static void __drm_buddy_free_list(struct drm_buddy *mm,
 421				  struct list_head *objects,
 422				  bool mark_clear,
 423				  bool mark_dirty)
 424{
 425	struct drm_buddy_block *block, *on;
 426
 427	WARN_ON(mark_dirty && mark_clear);
 428
 429	list_for_each_entry_safe(block, on, objects, link) {
 430		if (mark_clear)
 431			mark_cleared(block);
 432		else if (mark_dirty)
 433			clear_reset(block);
 434		drm_buddy_free_block(mm, block);
 435		cond_resched();
 436	}
 437	INIT_LIST_HEAD(objects);
 438}
 439
 440static void drm_buddy_free_list_internal(struct drm_buddy *mm,
 441					 struct list_head *objects)
 442{
 443	/*
 444	 * Don't touch the clear/dirty bit, since allocation is still internal
 445	 * at this point. For example we might have just failed part of the
 446	 * allocation.
 447	 */
 448	__drm_buddy_free_list(mm, objects, false, false);
 449}
 450
 451/**
 452 * drm_buddy_free_list - free blocks
 453 *
 454 * @mm: DRM buddy manager
 455 * @objects: input list head to free blocks
 456 * @flags: optional flags like DRM_BUDDY_CLEARED
 457 */
 458void drm_buddy_free_list(struct drm_buddy *mm,
 459			 struct list_head *objects,
 460			 unsigned int flags)
 461{
 462	bool mark_clear = flags & DRM_BUDDY_CLEARED;
 463
 464	__drm_buddy_free_list(mm, objects, mark_clear, !mark_clear);
 465}
 466EXPORT_SYMBOL(drm_buddy_free_list);
 467
 468static bool block_incompatible(struct drm_buddy_block *block, unsigned int flags)
 469{
 470	bool needs_clear = flags & DRM_BUDDY_CLEAR_ALLOCATION;
 471
 472	return needs_clear != drm_buddy_block_is_clear(block);
 473}
 474
 475static struct drm_buddy_block *
 476__alloc_range_bias(struct drm_buddy *mm,
 477		   u64 start, u64 end,
 478		   unsigned int order,
 479		   unsigned long flags,
 480		   bool fallback)
 481{
 482	u64 req_size = mm->chunk_size << order;
 483	struct drm_buddy_block *block;
 484	struct drm_buddy_block *buddy;
 485	LIST_HEAD(dfs);
 486	int err;
 487	int i;
 488
 489	end = end - 1;
 490
 491	for (i = 0; i < mm->n_roots; ++i)
 492		list_add_tail(&mm->roots[i]->tmp_link, &dfs);
 493
 494	do {
 495		u64 block_start;
 496		u64 block_end;
 497
 498		block = list_first_entry_or_null(&dfs,
 499						 struct drm_buddy_block,
 500						 tmp_link);
 501		if (!block)
 502			break;
 503
 504		list_del(&block->tmp_link);
 505
 506		if (drm_buddy_block_order(block) < order)
 507			continue;
 508
 509		block_start = drm_buddy_block_offset(block);
 510		block_end = block_start + drm_buddy_block_size(mm, block) - 1;
 511
 512		if (!overlaps(start, end, block_start, block_end))
 513			continue;
 514
 515		if (drm_buddy_block_is_allocated(block))
 516			continue;
 517
 518		if (block_start < start || block_end > end) {
 519			u64 adjusted_start = max(block_start, start);
 520			u64 adjusted_end = min(block_end, end);
 521
 522			if (round_down(adjusted_end + 1, req_size) <=
 523			    round_up(adjusted_start, req_size))
 524				continue;
 525		}
 526
 527		if (!fallback && block_incompatible(block, flags))
 528			continue;
 529
 530		if (contains(start, end, block_start, block_end) &&
 531		    order == drm_buddy_block_order(block)) {
 532			/*
 533			 * Find the free block within the range.
 534			 */
 535			if (drm_buddy_block_is_free(block))
 536				return block;
 537
 538			continue;
 539		}
 540
 541		if (!drm_buddy_block_is_split(block)) {
 542			err = split_block(mm, block);
 543			if (unlikely(err))
 544				goto err_undo;
 545		}
 546
 547		list_add(&block->right->tmp_link, &dfs);
 548		list_add(&block->left->tmp_link, &dfs);
 549	} while (1);
 550
 551	return ERR_PTR(-ENOSPC);
 552
 553err_undo:
 554	/*
 555	 * We really don't want to leave around a bunch of split blocks, since
 556	 * bigger is better, so make sure we merge everything back before we
 557	 * free the allocated blocks.
 558	 */
 559	buddy = __get_buddy(block);
 560	if (buddy &&
 561	    (drm_buddy_block_is_free(block) &&
 562	     drm_buddy_block_is_free(buddy)))
 563		__drm_buddy_free(mm, block, false);
 564	return ERR_PTR(err);
 565}
 566
 567static struct drm_buddy_block *
 568__drm_buddy_alloc_range_bias(struct drm_buddy *mm,
 569			     u64 start, u64 end,
 570			     unsigned int order,
 571			     unsigned long flags)
 572{
 573	struct drm_buddy_block *block;
 574	bool fallback = false;
 575
 576	block = __alloc_range_bias(mm, start, end, order,
 577				   flags, fallback);
 578	if (IS_ERR(block))
 579		return __alloc_range_bias(mm, start, end, order,
 580					  flags, !fallback);
 581
 582	return block;
 583}
 584
 585static struct drm_buddy_block *
 586get_maxblock(struct drm_buddy *mm, unsigned int order,
 587	     unsigned long flags)
 588{
 589	struct drm_buddy_block *max_block = NULL, *block = NULL;
 590	unsigned int i;
 591
 592	for (i = order; i <= mm->max_order; ++i) {
 593		struct drm_buddy_block *tmp_block;
 594
 595		list_for_each_entry_reverse(tmp_block, &mm->free_list[i], link) {
 596			if (block_incompatible(tmp_block, flags))
 597				continue;
 598
 599			block = tmp_block;
 600			break;
 601		}
 602
 603		if (!block)
 604			continue;
 605
 606		if (!max_block) {
 607			max_block = block;
 608			continue;
 609		}
 610
 611		if (drm_buddy_block_offset(block) >
 612		    drm_buddy_block_offset(max_block)) {
 613			max_block = block;
 614		}
 615	}
 616
 617	return max_block;
 618}
 619
 620static struct drm_buddy_block *
 621alloc_from_freelist(struct drm_buddy *mm,
 622		    unsigned int order,
 623		    unsigned long flags)
 624{
 625	struct drm_buddy_block *block = NULL;
 626	unsigned int tmp;
 627	int err;
 628
 629	if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
 630		block = get_maxblock(mm, order, flags);
 631		if (block)
 632			/* Store the obtained block order */
 633			tmp = drm_buddy_block_order(block);
 634	} else {
 635		for (tmp = order; tmp <= mm->max_order; ++tmp) {
 636			struct drm_buddy_block *tmp_block;
 637
 638			list_for_each_entry_reverse(tmp_block, &mm->free_list[tmp], link) {
 639				if (block_incompatible(tmp_block, flags))
 640					continue;
 641
 642				block = tmp_block;
 643				break;
 644			}
 645
 646			if (block)
 647				break;
 648		}
 649	}
 650
 651	if (!block) {
 652		/* Fallback method */
 653		for (tmp = order; tmp <= mm->max_order; ++tmp) {
 654			if (!list_empty(&mm->free_list[tmp])) {
 655				block = list_last_entry(&mm->free_list[tmp],
 656							struct drm_buddy_block,
 657							link);
 658				if (block)
 659					break;
 660			}
 661		}
 662
 663		if (!block)
 664			return ERR_PTR(-ENOSPC);
 665	}
 666
 667	BUG_ON(!drm_buddy_block_is_free(block));
 668
 669	while (tmp != order) {
 670		err = split_block(mm, block);
 671		if (unlikely(err))
 672			goto err_undo;
 673
 674		block = block->right;
 675		tmp--;
 676	}
 677	return block;
 678
 679err_undo:
 680	if (tmp != order)
 681		__drm_buddy_free(mm, block, false);
 682	return ERR_PTR(err);
 683}
 684
 685static int __alloc_range(struct drm_buddy *mm,
 686			 struct list_head *dfs,
 687			 u64 start, u64 size,
 688			 struct list_head *blocks,
 689			 u64 *total_allocated_on_err)
 690{
 691	struct drm_buddy_block *block;
 692	struct drm_buddy_block *buddy;
 693	u64 total_allocated = 0;
 694	LIST_HEAD(allocated);
 695	u64 end;
 696	int err;
 697
 698	end = start + size - 1;
 699
 700	do {
 701		u64 block_start;
 702		u64 block_end;
 703
 704		block = list_first_entry_or_null(dfs,
 705						 struct drm_buddy_block,
 706						 tmp_link);
 707		if (!block)
 708			break;
 709
 710		list_del(&block->tmp_link);
 711
 712		block_start = drm_buddy_block_offset(block);
 713		block_end = block_start + drm_buddy_block_size(mm, block) - 1;
 714
 715		if (!overlaps(start, end, block_start, block_end))
 716			continue;
 717
 718		if (drm_buddy_block_is_allocated(block)) {
 719			err = -ENOSPC;
 720			goto err_free;
 721		}
 722
 723		if (contains(start, end, block_start, block_end)) {
 724			if (drm_buddy_block_is_free(block)) {
 725				mark_allocated(block);
 726				total_allocated += drm_buddy_block_size(mm, block);
 727				mm->avail -= drm_buddy_block_size(mm, block);
 728				if (drm_buddy_block_is_clear(block))
 729					mm->clear_avail -= drm_buddy_block_size(mm, block);
 730				list_add_tail(&block->link, &allocated);
 731				continue;
 732			} else if (!mm->clear_avail) {
 733				err = -ENOSPC;
 734				goto err_free;
 735			}
 736		}
 737
 738		if (!drm_buddy_block_is_split(block)) {
 739			err = split_block(mm, block);
 740			if (unlikely(err))
 741				goto err_undo;
 742		}
 743
 744		list_add(&block->right->tmp_link, dfs);
 745		list_add(&block->left->tmp_link, dfs);
 746	} while (1);
 747
 748	if (total_allocated < size) {
 749		err = -ENOSPC;
 750		goto err_free;
 751	}
 752
 753	list_splice_tail(&allocated, blocks);
 754
 755	return 0;
 756
 757err_undo:
 758	/*
 759	 * We really don't want to leave around a bunch of split blocks, since
 760	 * bigger is better, so make sure we merge everything back before we
 761	 * free the allocated blocks.
 762	 */
 763	buddy = __get_buddy(block);
 764	if (buddy &&
 765	    (drm_buddy_block_is_free(block) &&
 766	     drm_buddy_block_is_free(buddy)))
 767		__drm_buddy_free(mm, block, false);
 768
 769err_free:
 770	if (err == -ENOSPC && total_allocated_on_err) {
 771		list_splice_tail(&allocated, blocks);
 772		*total_allocated_on_err = total_allocated;
 773	} else {
 774		drm_buddy_free_list_internal(mm, &allocated);
 775	}
 776
 777	return err;
 778}
 779
 780static int __drm_buddy_alloc_range(struct drm_buddy *mm,
 781				   u64 start,
 782				   u64 size,
 783				   u64 *total_allocated_on_err,
 784				   struct list_head *blocks)
 785{
 786	LIST_HEAD(dfs);
 787	int i;
 788
 789	for (i = 0; i < mm->n_roots; ++i)
 790		list_add_tail(&mm->roots[i]->tmp_link, &dfs);
 791
 792	return __alloc_range(mm, &dfs, start, size,
 793			     blocks, total_allocated_on_err);
 794}
 795
 796static int __alloc_contig_try_harder(struct drm_buddy *mm,
 797				     u64 size,
 798				     u64 min_block_size,
 799				     struct list_head *blocks)
 800{
 801	u64 rhs_offset, lhs_offset, lhs_size, filled;
 802	struct drm_buddy_block *block;
 803	struct list_head *list;
 804	LIST_HEAD(blocks_lhs);
 805	unsigned long pages;
 806	unsigned int order;
 807	u64 modify_size;
 808	int err;
 809
 810	modify_size = rounddown_pow_of_two(size);
 811	pages = modify_size >> ilog2(mm->chunk_size);
 812	order = fls(pages) - 1;
 813	if (order == 0)
 814		return -ENOSPC;
 815
 816	list = &mm->free_list[order];
 817	if (list_empty(list))
 818		return -ENOSPC;
 819
 820	list_for_each_entry_reverse(block, list, link) {
 821		/* Allocate blocks traversing RHS */
 822		rhs_offset = drm_buddy_block_offset(block);
 823		err =  __drm_buddy_alloc_range(mm, rhs_offset, size,
 824					       &filled, blocks);
 825		if (!err || err != -ENOSPC)
 826			return err;
 827
 828		lhs_size = max((size - filled), min_block_size);
 829		if (!IS_ALIGNED(lhs_size, min_block_size))
 830			lhs_size = round_up(lhs_size, min_block_size);
 831
 832		/* Allocate blocks traversing LHS */
 833		lhs_offset = drm_buddy_block_offset(block) - lhs_size;
 834		err =  __drm_buddy_alloc_range(mm, lhs_offset, lhs_size,
 835					       NULL, &blocks_lhs);
 836		if (!err) {
 837			list_splice(&blocks_lhs, blocks);
 838			return 0;
 839		} else if (err != -ENOSPC) {
 840			drm_buddy_free_list_internal(mm, blocks);
 841			return err;
 842		}
 843		/* Free blocks for the next iteration */
 844		drm_buddy_free_list_internal(mm, blocks);
 845	}
 846
 847	return -ENOSPC;
 848}
 849
 850/**
 851 * drm_buddy_block_trim - free unused pages
 852 *
 853 * @mm: DRM buddy manager
 854 * @start: start address to begin the trimming.
 855 * @new_size: original size requested
 856 * @blocks: Input and output list of allocated blocks.
 857 * MUST contain single block as input to be trimmed.
 858 * On success will contain the newly allocated blocks
 859 * making up the @new_size. Blocks always appear in
 860 * ascending order
 861 *
 862 * For contiguous allocation, we round up the size to the nearest
 863 * power of two value, drivers consume *actual* size, so remaining
 864 * portions are unused and can be optionally freed with this function
 865 *
 866 * Returns:
 867 * 0 on success, error code on failure.
 868 */
 869int drm_buddy_block_trim(struct drm_buddy *mm,
 870			 u64 *start,
 871			 u64 new_size,
 872			 struct list_head *blocks)
 873{
 874	struct drm_buddy_block *parent;
 875	struct drm_buddy_block *block;
 876	u64 block_start, block_end;
 877	LIST_HEAD(dfs);
 878	u64 new_start;
 879	int err;
 880
 881	if (!list_is_singular(blocks))
 882		return -EINVAL;
 883
 884	block = list_first_entry(blocks,
 885				 struct drm_buddy_block,
 886				 link);
 887
 888	block_start = drm_buddy_block_offset(block);
 889	block_end = block_start + drm_buddy_block_size(mm, block);
 890
 891	if (WARN_ON(!drm_buddy_block_is_allocated(block)))
 892		return -EINVAL;
 893
 894	if (new_size > drm_buddy_block_size(mm, block))
 895		return -EINVAL;
 896
 897	if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size))
 898		return -EINVAL;
 899
 900	if (new_size == drm_buddy_block_size(mm, block))
 901		return 0;
 902
 903	new_start = block_start;
 904	if (start) {
 905		new_start = *start;
 906
 907		if (new_start < block_start)
 908			return -EINVAL;
 909
 910		if (!IS_ALIGNED(new_start, mm->chunk_size))
 911			return -EINVAL;
 912
 913		if (range_overflows(new_start, new_size, block_end))
 914			return -EINVAL;
 915	}
 916
 917	list_del(&block->link);
 918	mark_free(mm, block);
 919	mm->avail += drm_buddy_block_size(mm, block);
 920	if (drm_buddy_block_is_clear(block))
 921		mm->clear_avail += drm_buddy_block_size(mm, block);
 922
 923	/* Prevent recursively freeing this node */
 924	parent = block->parent;
 925	block->parent = NULL;
 926
 927	list_add(&block->tmp_link, &dfs);
 928	err =  __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL);
 929	if (err) {
 930		mark_allocated(block);
 931		mm->avail -= drm_buddy_block_size(mm, block);
 932		if (drm_buddy_block_is_clear(block))
 933			mm->clear_avail -= drm_buddy_block_size(mm, block);
 934		list_add(&block->link, blocks);
 935	}
 936
 937	block->parent = parent;
 938	return err;
 939}
 940EXPORT_SYMBOL(drm_buddy_block_trim);
 941
 942static struct drm_buddy_block *
 943__drm_buddy_alloc_blocks(struct drm_buddy *mm,
 944			 u64 start, u64 end,
 945			 unsigned int order,
 946			 unsigned long flags)
 947{
 948	if (flags & DRM_BUDDY_RANGE_ALLOCATION)
 949		/* Allocate traversing within the range */
 950		return  __drm_buddy_alloc_range_bias(mm, start, end,
 951						     order, flags);
 952	else
 953		/* Allocate from freelist */
 954		return alloc_from_freelist(mm, order, flags);
 955}
 956
 957/**
 958 * drm_buddy_alloc_blocks - allocate power-of-two blocks
 959 *
 960 * @mm: DRM buddy manager to allocate from
 961 * @start: start of the allowed range for this block
 962 * @end: end of the allowed range for this block
 963 * @size: size of the allocation in bytes
 964 * @min_block_size: alignment of the allocation
 965 * @blocks: output list head to add allocated blocks
 966 * @flags: DRM_BUDDY_*_ALLOCATION flags
 967 *
 968 * alloc_range_bias() called on range limitations, which traverses
 969 * the tree and returns the desired block.
 970 *
 971 * alloc_from_freelist() called when *no* range restrictions
 972 * are enforced, which picks the block from the freelist.
 973 *
 974 * Returns:
 975 * 0 on success, error code on failure.
 976 */
 977int drm_buddy_alloc_blocks(struct drm_buddy *mm,
 978			   u64 start, u64 end, u64 size,
 979			   u64 min_block_size,
 980			   struct list_head *blocks,
 981			   unsigned long flags)
 982{
 983	struct drm_buddy_block *block = NULL;
 984	u64 original_size, original_min_size;
 985	unsigned int min_order, order;
 986	LIST_HEAD(allocated);
 987	unsigned long pages;
 988	int err;
 989
 990	if (size < mm->chunk_size)
 991		return -EINVAL;
 992
 993	if (min_block_size < mm->chunk_size)
 994		return -EINVAL;
 995
 996	if (!is_power_of_2(min_block_size))
 997		return -EINVAL;
 998
 999	if (!IS_ALIGNED(start | end | size, mm->chunk_size))
1000		return -EINVAL;
1001
1002	if (end > mm->size)
1003		return -EINVAL;
1004
1005	if (range_overflows(start, size, mm->size))
1006		return -EINVAL;
1007
1008	/* Actual range allocation */
1009	if (start + size == end) {
1010		if (!IS_ALIGNED(start | end, min_block_size))
1011			return -EINVAL;
1012
1013		return __drm_buddy_alloc_range(mm, start, size, NULL, blocks);
1014	}
1015
1016	original_size = size;
1017	original_min_size = min_block_size;
1018
1019	/* Roundup the size to power of 2 */
1020	if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
1021		size = roundup_pow_of_two(size);
1022		min_block_size = size;
1023	/* Align size value to min_block_size */
1024	} else if (!IS_ALIGNED(size, min_block_size)) {
1025		size = round_up(size, min_block_size);
1026	}
1027
1028	pages = size >> ilog2(mm->chunk_size);
1029	order = fls(pages) - 1;
1030	min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
1031
1032	do {
1033		order = min(order, (unsigned int)fls(pages) - 1);
1034		BUG_ON(order > mm->max_order);
1035		BUG_ON(order < min_order);
1036
1037		do {
1038			block = __drm_buddy_alloc_blocks(mm, start,
1039							 end,
1040							 order,
1041							 flags);
1042			if (!IS_ERR(block))
1043				break;
1044
1045			if (order-- == min_order) {
1046				/* Try allocation through force merge method */
1047				if (mm->clear_avail &&
1048				    !__force_merge(mm, start, end, min_order)) {
1049					block = __drm_buddy_alloc_blocks(mm, start,
1050									 end,
1051									 min_order,
1052									 flags);
1053					if (!IS_ERR(block)) {
1054						order = min_order;
1055						break;
1056					}
1057				}
1058
1059				/*
1060				 * Try contiguous block allocation through
1061				 * try harder method.
1062				 */
1063				if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
1064				    !(flags & DRM_BUDDY_RANGE_ALLOCATION))
1065					return __alloc_contig_try_harder(mm,
1066									 original_size,
1067									 original_min_size,
1068									 blocks);
1069				err = -ENOSPC;
1070				goto err_free;
1071			}
1072		} while (1);
1073
1074		mark_allocated(block);
1075		mm->avail -= drm_buddy_block_size(mm, block);
1076		if (drm_buddy_block_is_clear(block))
1077			mm->clear_avail -= drm_buddy_block_size(mm, block);
1078		kmemleak_update_trace(block);
1079		list_add_tail(&block->link, &allocated);
1080
1081		pages -= BIT(order);
1082
1083		if (!pages)
1084			break;
1085	} while (1);
1086
1087	/* Trim the allocated block to the required size */
1088	if (!(flags & DRM_BUDDY_TRIM_DISABLE) &&
1089	    original_size != size) {
1090		struct list_head *trim_list;
1091		LIST_HEAD(temp);
1092		u64 trim_size;
1093
1094		trim_list = &allocated;
1095		trim_size = original_size;
1096
1097		if (!list_is_singular(&allocated)) {
1098			block = list_last_entry(&allocated, typeof(*block), link);
1099			list_move(&block->link, &temp);
1100			trim_list = &temp;
1101			trim_size = drm_buddy_block_size(mm, block) -
1102				(size - original_size);
1103		}
1104
1105		drm_buddy_block_trim(mm,
1106				     NULL,
1107				     trim_size,
1108				     trim_list);
1109
1110		if (!list_empty(&temp))
1111			list_splice_tail(trim_list, &allocated);
1112	}
1113
1114	list_splice_tail(&allocated, blocks);
1115	return 0;
1116
1117err_free:
1118	drm_buddy_free_list_internal(mm, &allocated);
1119	return err;
1120}
1121EXPORT_SYMBOL(drm_buddy_alloc_blocks);
1122
1123/**
1124 * drm_buddy_block_print - print block information
1125 *
1126 * @mm: DRM buddy manager
1127 * @block: DRM buddy block
1128 * @p: DRM printer to use
1129 */
1130void drm_buddy_block_print(struct drm_buddy *mm,
1131			   struct drm_buddy_block *block,
1132			   struct drm_printer *p)
1133{
1134	u64 start = drm_buddy_block_offset(block);
1135	u64 size = drm_buddy_block_size(mm, block);
1136
1137	drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size);
1138}
1139EXPORT_SYMBOL(drm_buddy_block_print);
1140
1141/**
1142 * drm_buddy_print - print allocator state
1143 *
1144 * @mm: DRM buddy manager
1145 * @p: DRM printer to use
1146 */
1147void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
1148{
1149	int order;
1150
1151	drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB, clear_free: %lluMiB\n",
1152		   mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20);
1153
1154	for (order = mm->max_order; order >= 0; order--) {
1155		struct drm_buddy_block *block;
1156		u64 count = 0, free;
1157
1158		list_for_each_entry(block, &mm->free_list[order], link) {
1159			BUG_ON(!drm_buddy_block_is_free(block));
1160			count++;
1161		}
1162
1163		drm_printf(p, "order-%2d ", order);
1164
1165		free = count * (mm->chunk_size << order);
1166		if (free < SZ_1M)
1167			drm_printf(p, "free: %8llu KiB", free >> 10);
1168		else
1169			drm_printf(p, "free: %8llu MiB", free >> 20);
1170
1171		drm_printf(p, ", blocks: %llu\n", count);
1172	}
1173}
1174EXPORT_SYMBOL(drm_buddy_print);
1175
1176static void drm_buddy_module_exit(void)
1177{
1178	kmem_cache_destroy(slab_blocks);
1179}
1180
1181static int __init drm_buddy_module_init(void)
1182{
1183	slab_blocks = KMEM_CACHE(drm_buddy_block, 0);
1184	if (!slab_blocks)
1185		return -ENOMEM;
1186
1187	return 0;
1188}
1189
1190module_init(drm_buddy_module_init);
1191module_exit(drm_buddy_module_exit);
1192
1193MODULE_DESCRIPTION("DRM Buddy Allocator");
1194MODULE_LICENSE("Dual MIT/GPL");