Linux Audio

Check our new training course

Loading...
v5.9
   1/**************************************************************************
   2 *
   3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
   4 * Copyright 2016 Intel Corporation
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 *
  28 **************************************************************************/
  29
  30/*
  31 * Generic simple memory manager implementation. Intended to be used as a base
  32 * class implementation for more advanced memory managers.
  33 *
  34 * Note that the algorithm used is quite simple and there might be substantial
  35 * performance gains if a smarter free list is implemented. Currently it is
  36 * just an unordered stack of free regions. This could easily be improved if
  37 * an RB-tree is used instead. At least if we expect heavy fragmentation.
  38 *
  39 * Aligned allocations can also see improvement.
  40 *
  41 * Authors:
  42 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
  43 */
  44
  45#include <linux/export.h>
  46#include <linux/interval_tree_generic.h>
  47#include <linux/seq_file.h>
  48#include <linux/slab.h>
  49#include <linux/stacktrace.h>
  50
  51#include <drm/drm_mm.h>
  52
  53/**
  54 * DOC: Overview
  55 *
  56 * drm_mm provides a simple range allocator. The drivers are free to use the
  57 * resource allocator from the linux core if it suits them, the upside of drm_mm
  58 * is that it's in the DRM core. Which means that it's easier to extend for
  59 * some of the crazier special purpose needs of gpus.
  60 *
  61 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
  62 * Drivers are free to embed either of them into their own suitable
  63 * datastructures. drm_mm itself will not do any memory allocations of its own,
  64 * so if drivers choose not to embed nodes they need to still allocate them
  65 * themselves.
  66 *
  67 * The range allocator also supports reservation of preallocated blocks. This is
  68 * useful for taking over initial mode setting configurations from the firmware,
  69 * where an object needs to be created which exactly matches the firmware's
  70 * scanout target. As long as the range is still free it can be inserted anytime
  71 * after the allocator is initialized, which helps with avoiding looped
  72 * dependencies in the driver load sequence.
  73 *
  74 * drm_mm maintains a stack of most recently freed holes, which of all
  75 * simplistic datastructures seems to be a fairly decent approach to clustering
  76 * allocations and avoiding too much fragmentation. This means free space
  77 * searches are O(num_holes). Given that all the fancy features drm_mm supports
  78 * something better would be fairly complex and since gfx thrashing is a fairly
  79 * steep cliff not a real concern. Removing a node again is O(1).
  80 *
  81 * drm_mm supports a few features: Alignment and range restrictions can be
  82 * supplied. Furthermore every &drm_mm_node has a color value (which is just an
  83 * opaque unsigned long) which in conjunction with a driver callback can be used
  84 * to implement sophisticated placement restrictions. The i915 DRM driver uses
  85 * this to implement guard pages between incompatible caching domains in the
  86 * graphics TT.
  87 *
  88 * Two behaviors are supported for searching and allocating: bottom-up and
  89 * top-down. The default is bottom-up. Top-down allocation can be used if the
  90 * memory area has different restrictions, or just to reduce fragmentation.
  91 *
  92 * Finally iteration helpers to walk all nodes and all holes are provided as are
  93 * some basic allocator dumpers for debugging.
  94 *
  95 * Note that this range allocator is not thread-safe, drivers need to protect
  96 * modifications with their own locking. The idea behind this is that for a full
  97 * memory manager additional data needs to be protected anyway, hence internal
  98 * locking would be fully redundant.
  99 */
 100
 101#ifdef CONFIG_DRM_DEBUG_MM
 102#include <linux/stackdepot.h>
 103
 104#define STACKDEPTH 32
 105#define BUFSZ 4096
 106
 107static noinline void save_stack(struct drm_mm_node *node)
 108{
 109	unsigned long entries[STACKDEPTH];
 110	unsigned int n;
 111
 112	n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
 113
 114	/* May be called under spinlock, so avoid sleeping */
 115	node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
 116}
 117
 118static void show_leaks(struct drm_mm *mm)
 119{
 120	struct drm_mm_node *node;
 121	unsigned long *entries;
 122	unsigned int nr_entries;
 123	char *buf;
 124
 125	buf = kmalloc(BUFSZ, GFP_KERNEL);
 126	if (!buf)
 127		return;
 128
 129	list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
 130		if (!node->stack) {
 131			DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
 132				  node->start, node->size);
 133			continue;
 134		}
 135
 136		nr_entries = stack_depot_fetch(node->stack, &entries);
 137		stack_trace_snprint(buf, BUFSZ, entries, nr_entries, 0);
 138		DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
 139			  node->start, node->size, buf);
 140	}
 141
 142	kfree(buf);
 143}
 144
 145#undef STACKDEPTH
 146#undef BUFSZ
 147#else
 148static void save_stack(struct drm_mm_node *node) { }
 149static void show_leaks(struct drm_mm *mm) { }
 150#endif
 151
 152#define START(node) ((node)->start)
 153#define LAST(node)  ((node)->start + (node)->size - 1)
 154
 155INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
 156		     u64, __subtree_last,
 157		     START, LAST, static inline, drm_mm_interval_tree)
 158
 159struct drm_mm_node *
 160__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
 161{
 162	return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
 163					       start, last) ?: (struct drm_mm_node *)&mm->head_node;
 164}
 165EXPORT_SYMBOL(__drm_mm_interval_first);
 166
 167static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
 168					  struct drm_mm_node *node)
 169{
 170	struct drm_mm *mm = hole_node->mm;
 171	struct rb_node **link, *rb;
 172	struct drm_mm_node *parent;
 173	bool leftmost;
 174
 175	node->__subtree_last = LAST(node);
 176
 177	if (drm_mm_node_allocated(hole_node)) {
 178		rb = &hole_node->rb;
 179		while (rb) {
 180			parent = rb_entry(rb, struct drm_mm_node, rb);
 181			if (parent->__subtree_last >= node->__subtree_last)
 182				break;
 183
 184			parent->__subtree_last = node->__subtree_last;
 185			rb = rb_parent(rb);
 186		}
 187
 188		rb = &hole_node->rb;
 189		link = &hole_node->rb.rb_right;
 190		leftmost = false;
 191	} else {
 192		rb = NULL;
 193		link = &mm->interval_tree.rb_root.rb_node;
 194		leftmost = true;
 195	}
 196
 197	while (*link) {
 198		rb = *link;
 199		parent = rb_entry(rb, struct drm_mm_node, rb);
 200		if (parent->__subtree_last < node->__subtree_last)
 201			parent->__subtree_last = node->__subtree_last;
 202		if (node->start < parent->start) {
 203			link = &parent->rb.rb_left;
 204		} else {
 205			link = &parent->rb.rb_right;
 206			leftmost = false;
 207		}
 208	}
 209
 210	rb_link_node(&node->rb, rb, link);
 211	rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
 212				   &drm_mm_interval_tree_augment);
 213}
 214
 215#define HOLE_SIZE(NODE) ((NODE)->hole_size)
 216#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
 217
 218static u64 rb_to_hole_size(struct rb_node *rb)
 219{
 220	return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
 221}
 222
 223static void insert_hole_size(struct rb_root_cached *root,
 224			     struct drm_mm_node *node)
 225{
 226	struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
 227	u64 x = node->hole_size;
 228	bool first = true;
 229
 230	while (*link) {
 231		rb = *link;
 232		if (x > rb_to_hole_size(rb)) {
 233			link = &rb->rb_left;
 234		} else {
 235			link = &rb->rb_right;
 236			first = false;
 237		}
 238	}
 239
 240	rb_link_node(&node->rb_hole_size, rb, link);
 241	rb_insert_color_cached(&node->rb_hole_size, root, first);
 242}
 243
 244RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
 245			 struct drm_mm_node, rb_hole_addr,
 246			 u64, subtree_max_hole, HOLE_SIZE)
 247
 248static void insert_hole_addr(struct rb_root *root, struct drm_mm_node *node)
 249{
 250	struct rb_node **link = &root->rb_node, *rb_parent = NULL;
 251	u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole;
 252	struct drm_mm_node *parent;
 253
 254	while (*link) {
 255		rb_parent = *link;
 256		parent = rb_entry(rb_parent, struct drm_mm_node, rb_hole_addr);
 257		if (parent->subtree_max_hole < subtree_max_hole)
 258			parent->subtree_max_hole = subtree_max_hole;
 259		if (start < HOLE_ADDR(parent))
 260			link = &parent->rb_hole_addr.rb_left;
 261		else
 262			link = &parent->rb_hole_addr.rb_right;
 263	}
 264
 265	rb_link_node(&node->rb_hole_addr, rb_parent, link);
 266	rb_insert_augmented(&node->rb_hole_addr, root, &augment_callbacks);
 267}
 268
 269static void add_hole(struct drm_mm_node *node)
 270{
 271	struct drm_mm *mm = node->mm;
 272
 273	node->hole_size =
 274		__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
 275	node->subtree_max_hole = node->hole_size;
 276	DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
 277
 278	insert_hole_size(&mm->holes_size, node);
 279	insert_hole_addr(&mm->holes_addr, node);
 280
 281	list_add(&node->hole_stack, &mm->hole_stack);
 282}
 283
 284static void rm_hole(struct drm_mm_node *node)
 285{
 286	DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
 287
 288	list_del(&node->hole_stack);
 289	rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
 290	rb_erase_augmented(&node->rb_hole_addr, &node->mm->holes_addr,
 291			   &augment_callbacks);
 292	node->hole_size = 0;
 293	node->subtree_max_hole = 0;
 294
 295	DRM_MM_BUG_ON(drm_mm_hole_follows(node));
 296}
 297
 298static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
 299{
 300	return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
 301}
 302
 303static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
 304{
 305	return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
 306}
 307
 308static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
 309{
 310	struct rb_node *rb = mm->holes_size.rb_root.rb_node;
 311	struct drm_mm_node *best = NULL;
 312
 313	do {
 314		struct drm_mm_node *node =
 315			rb_entry(rb, struct drm_mm_node, rb_hole_size);
 316
 317		if (size <= node->hole_size) {
 318			best = node;
 319			rb = rb->rb_right;
 320		} else {
 321			rb = rb->rb_left;
 322		}
 323	} while (rb);
 324
 325	return best;
 326}
 327
 328static bool usable_hole_addr(struct rb_node *rb, u64 size)
 329{
 330	return rb && rb_hole_addr_to_node(rb)->subtree_max_hole >= size;
 331}
 332
 333static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size)
 334{
 335	struct rb_node *rb = mm->holes_addr.rb_node;
 336	struct drm_mm_node *node = NULL;
 337
 338	while (rb) {
 339		u64 hole_start;
 340
 341		if (!usable_hole_addr(rb, size))
 342			break;
 343
 344		node = rb_hole_addr_to_node(rb);
 345		hole_start = __drm_mm_hole_node_start(node);
 346
 347		if (addr < hole_start)
 348			rb = node->rb_hole_addr.rb_left;
 349		else if (addr > hole_start + node->hole_size)
 350			rb = node->rb_hole_addr.rb_right;
 351		else
 352			break;
 353	}
 354
 355	return node;
 356}
 357
 358static struct drm_mm_node *
 359first_hole(struct drm_mm *mm,
 360	   u64 start, u64 end, u64 size,
 361	   enum drm_mm_insert_mode mode)
 362{
 363	switch (mode) {
 364	default:
 365	case DRM_MM_INSERT_BEST:
 366		return best_hole(mm, size);
 367
 368	case DRM_MM_INSERT_LOW:
 369		return find_hole_addr(mm, start, size);
 370
 371	case DRM_MM_INSERT_HIGH:
 372		return find_hole_addr(mm, end, size);
 373
 374	case DRM_MM_INSERT_EVICT:
 375		return list_first_entry_or_null(&mm->hole_stack,
 376						struct drm_mm_node,
 377						hole_stack);
 378	}
 379}
 380
 381/**
 382 * DECLARE_NEXT_HOLE_ADDR - macro to declare next hole functions
 383 * @name: name of function to declare
 384 * @first: first rb member to traverse (either rb_left or rb_right).
 385 * @last: last rb member to traverse (either rb_right or rb_left).
 386 *
 387 * This macro declares a function to return the next hole of the addr rb tree.
 388 * While traversing the tree we take the searched size into account and only
 389 * visit branches with potential big enough holes.
 390 */
 391
 392#define DECLARE_NEXT_HOLE_ADDR(name, first, last)			\
 393static struct drm_mm_node *name(struct drm_mm_node *entry, u64 size)	\
 394{									\
 395	struct rb_node *parent, *node = &entry->rb_hole_addr;		\
 396									\
 397	if (!entry || RB_EMPTY_NODE(node))				\
 398		return NULL;						\
 399									\
 400	if (usable_hole_addr(node->first, size)) {			\
 401		node = node->first;					\
 402		while (usable_hole_addr(node->last, size))		\
 403			node = node->last;				\
 404		return rb_hole_addr_to_node(node);			\
 405	}								\
 406									\
 407	while ((parent = rb_parent(node)) && node == parent->first)	\
 408		node = parent;						\
 409									\
 410	return rb_hole_addr_to_node(parent);				\
 411}
 412
 413DECLARE_NEXT_HOLE_ADDR(next_hole_high_addr, rb_left, rb_right)
 414DECLARE_NEXT_HOLE_ADDR(next_hole_low_addr, rb_right, rb_left)
 415
 416static struct drm_mm_node *
 417next_hole(struct drm_mm *mm,
 418	  struct drm_mm_node *node,
 419	  u64 size,
 420	  enum drm_mm_insert_mode mode)
 421{
 422	switch (mode) {
 423	default:
 424	case DRM_MM_INSERT_BEST:
 425		return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
 426
 427	case DRM_MM_INSERT_LOW:
 428		return next_hole_low_addr(node, size);
 429
 430	case DRM_MM_INSERT_HIGH:
 431		return next_hole_high_addr(node, size);
 432
 433	case DRM_MM_INSERT_EVICT:
 434		node = list_next_entry(node, hole_stack);
 435		return &node->hole_stack == &mm->hole_stack ? NULL : node;
 436	}
 437}
 438
 439/**
 440 * drm_mm_reserve_node - insert an pre-initialized node
 441 * @mm: drm_mm allocator to insert @node into
 442 * @node: drm_mm_node to insert
 443 *
 444 * This functions inserts an already set-up &drm_mm_node into the allocator,
 445 * meaning that start, size and color must be set by the caller. All other
 446 * fields must be cleared to 0. This is useful to initialize the allocator with
 447 * preallocated objects which must be set-up before the range allocator can be
 448 * set-up, e.g. when taking over a firmware framebuffer.
 449 *
 450 * Returns:
 451 * 0 on success, -ENOSPC if there's no hole where @node is.
 452 */
 453int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 454{
 455	struct drm_mm_node *hole;
 456	u64 hole_start, hole_end;
 457	u64 adj_start, adj_end;
 458	u64 end;
 459
 460	end = node->start + node->size;
 461	if (unlikely(end <= node->start))
 462		return -ENOSPC;
 463
 464	/* Find the relevant hole to add our node to */
 465	hole = find_hole_addr(mm, node->start, 0);
 466	if (!hole)
 467		return -ENOSPC;
 468
 469	adj_start = hole_start = __drm_mm_hole_node_start(hole);
 470	adj_end = hole_end = hole_start + hole->hole_size;
 471
 472	if (mm->color_adjust)
 473		mm->color_adjust(hole, node->color, &adj_start, &adj_end);
 474
 475	if (adj_start > node->start || adj_end < end)
 476		return -ENOSPC;
 477
 478	node->mm = mm;
 479
 480	__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
 481	list_add(&node->node_list, &hole->node_list);
 482	drm_mm_interval_tree_add_node(hole, node);
 483	node->hole_size = 0;
 484
 485	rm_hole(hole);
 486	if (node->start > hole_start)
 487		add_hole(hole);
 488	if (end < hole_end)
 489		add_hole(node);
 490
 491	save_stack(node);
 492	return 0;
 493}
 494EXPORT_SYMBOL(drm_mm_reserve_node);
 495
 496static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
 497{
 498	return rb ? rb_to_hole_size(rb) : 0;
 499}
 500
 501/**
 502 * drm_mm_insert_node_in_range - ranged search for space and insert @node
 503 * @mm: drm_mm to allocate from
 504 * @node: preallocate node to insert
 505 * @size: size of the allocation
 506 * @alignment: alignment of the allocation
 507 * @color: opaque tag value to use for this node
 508 * @range_start: start of the allowed range for this node
 509 * @range_end: end of the allowed range for this node
 510 * @mode: fine-tune the allocation search and placement
 511 *
 512 * The preallocated @node must be cleared to 0.
 513 *
 514 * Returns:
 515 * 0 on success, -ENOSPC if there's no suitable hole.
 516 */
 517int drm_mm_insert_node_in_range(struct drm_mm * const mm,
 518				struct drm_mm_node * const node,
 519				u64 size, u64 alignment,
 520				unsigned long color,
 521				u64 range_start, u64 range_end,
 522				enum drm_mm_insert_mode mode)
 523{
 524	struct drm_mm_node *hole;
 525	u64 remainder_mask;
 526	bool once;
 527
 528	DRM_MM_BUG_ON(range_start > range_end);
 529
 530	if (unlikely(size == 0 || range_end - range_start < size))
 531		return -ENOSPC;
 532
 533	if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
 534		return -ENOSPC;
 535
 536	if (alignment <= 1)
 537		alignment = 0;
 538
 539	once = mode & DRM_MM_INSERT_ONCE;
 540	mode &= ~DRM_MM_INSERT_ONCE;
 541
 542	remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
 543	for (hole = first_hole(mm, range_start, range_end, size, mode);
 544	     hole;
 545	     hole = once ? NULL : next_hole(mm, hole, size, mode)) {
 546		u64 hole_start = __drm_mm_hole_node_start(hole);
 547		u64 hole_end = hole_start + hole->hole_size;
 548		u64 adj_start, adj_end;
 549		u64 col_start, col_end;
 550
 551		if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
 552			break;
 553
 554		if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
 555			break;
 556
 557		col_start = hole_start;
 558		col_end = hole_end;
 559		if (mm->color_adjust)
 560			mm->color_adjust(hole, color, &col_start, &col_end);
 561
 562		adj_start = max(col_start, range_start);
 563		adj_end = min(col_end, range_end);
 564
 565		if (adj_end <= adj_start || adj_end - adj_start < size)
 566			continue;
 567
 568		if (mode == DRM_MM_INSERT_HIGH)
 569			adj_start = adj_end - size;
 570
 571		if (alignment) {
 572			u64 rem;
 573
 574			if (likely(remainder_mask))
 575				rem = adj_start & remainder_mask;
 576			else
 577				div64_u64_rem(adj_start, alignment, &rem);
 578			if (rem) {
 579				adj_start -= rem;
 580				if (mode != DRM_MM_INSERT_HIGH)
 581					adj_start += alignment;
 582
 583				if (adj_start < max(col_start, range_start) ||
 584				    min(col_end, range_end) - adj_start < size)
 585					continue;
 586
 587				if (adj_end <= adj_start ||
 588				    adj_end - adj_start < size)
 589					continue;
 590			}
 591		}
 592
 593		node->mm = mm;
 594		node->size = size;
 595		node->start = adj_start;
 596		node->color = color;
 597		node->hole_size = 0;
 598
 599		__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
 600		list_add(&node->node_list, &hole->node_list);
 601		drm_mm_interval_tree_add_node(hole, node);
 602
 603		rm_hole(hole);
 604		if (adj_start > hole_start)
 605			add_hole(hole);
 606		if (adj_start + size < hole_end)
 607			add_hole(node);
 608
 609		save_stack(node);
 610		return 0;
 611	}
 612
 613	return -ENOSPC;
 614}
 615EXPORT_SYMBOL(drm_mm_insert_node_in_range);
 616
 617static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
 618{
 619	return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
 620}
 621
 622/**
 623 * drm_mm_remove_node - Remove a memory node from the allocator.
 624 * @node: drm_mm_node to remove
 625 *
 626 * This just removes a node from its drm_mm allocator. The node does not need to
 627 * be cleared again before it can be re-inserted into this or any other drm_mm
 628 * allocator. It is a bug to call this function on a unallocated node.
 629 */
 630void drm_mm_remove_node(struct drm_mm_node *node)
 631{
 632	struct drm_mm *mm = node->mm;
 633	struct drm_mm_node *prev_node;
 634
 635	DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
 636	DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
 637
 638	prev_node = list_prev_entry(node, node_list);
 639
 640	if (drm_mm_hole_follows(node))
 641		rm_hole(node);
 642
 643	drm_mm_interval_tree_remove(node, &mm->interval_tree);
 644	list_del(&node->node_list);
 645
 646	if (drm_mm_hole_follows(prev_node))
 647		rm_hole(prev_node);
 648	add_hole(prev_node);
 649
 650	clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
 651}
 652EXPORT_SYMBOL(drm_mm_remove_node);
 653
 654/**
 655 * drm_mm_replace_node - move an allocation from @old to @new
 656 * @old: drm_mm_node to remove from the allocator
 657 * @new: drm_mm_node which should inherit @old's allocation
 658 *
 659 * This is useful for when drivers embed the drm_mm_node structure and hence
 660 * can't move allocations by reassigning pointers. It's a combination of remove
 661 * and insert with the guarantee that the allocation start will match.
 662 */
 663void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
 664{
 665	struct drm_mm *mm = old->mm;
 666
 667	DRM_MM_BUG_ON(!drm_mm_node_allocated(old));
 668
 669	*new = *old;
 670
 671	__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
 672	list_replace(&old->node_list, &new->node_list);
 673	rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
 674
 675	if (drm_mm_hole_follows(old)) {
 676		list_replace(&old->hole_stack, &new->hole_stack);
 677		rb_replace_node_cached(&old->rb_hole_size,
 678				       &new->rb_hole_size,
 679				       &mm->holes_size);
 680		rb_replace_node(&old->rb_hole_addr,
 681				&new->rb_hole_addr,
 682				&mm->holes_addr);
 683	}
 684
 685	clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
 686}
 687EXPORT_SYMBOL(drm_mm_replace_node);
 688
 689/**
 690 * DOC: lru scan roster
 691 *
 692 * Very often GPUs need to have continuous allocations for a given object. When
 693 * evicting objects to make space for a new one it is therefore not most
 694 * efficient when we simply start to select all objects from the tail of an LRU
 695 * until there's a suitable hole: Especially for big objects or nodes that
 696 * otherwise have special allocation constraints there's a good chance we evict
 697 * lots of (smaller) objects unnecessarily.
 698 *
 699 * The DRM range allocator supports this use-case through the scanning
 700 * interfaces. First a scan operation needs to be initialized with
 701 * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
 702 * objects to the roster, probably by walking an LRU list, but this can be
 703 * freely implemented. Eviction candiates are added using
 704 * drm_mm_scan_add_block() until a suitable hole is found or there are no
 705 * further evictable objects. Eviction roster metadata is tracked in &struct
 706 * drm_mm_scan.
 707 *
 708 * The driver must walk through all objects again in exactly the reverse
 709 * order to restore the allocator state. Note that while the allocator is used
 710 * in the scan mode no other operation is allowed.
 711 *
 712 * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
 713 * reported true) in the scan, and any overlapping nodes after color adjustment
 714 * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
 715 * since freeing a node is also O(1) the overall complexity is
 716 * O(scanned_objects). So like the free stack which needs to be walked before a
 717 * scan operation even begins this is linear in the number of objects. It
 718 * doesn't seem to hurt too badly.
 719 */
 720
 721/**
 722 * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
 723 * @scan: scan state
 724 * @mm: drm_mm to scan
 725 * @size: size of the allocation
 726 * @alignment: alignment of the allocation
 727 * @color: opaque tag value to use for the allocation
 728 * @start: start of the allowed range for the allocation
 729 * @end: end of the allowed range for the allocation
 730 * @mode: fine-tune the allocation search and placement
 731 *
 732 * This simply sets up the scanning routines with the parameters for the desired
 733 * hole.
 734 *
 735 * Warning:
 736 * As long as the scan list is non-empty, no other operations than
 737 * adding/removing nodes to/from the scan list are allowed.
 738 */
 739void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
 740				 struct drm_mm *mm,
 741				 u64 size,
 742				 u64 alignment,
 743				 unsigned long color,
 744				 u64 start,
 745				 u64 end,
 746				 enum drm_mm_insert_mode mode)
 747{
 748	DRM_MM_BUG_ON(start >= end);
 749	DRM_MM_BUG_ON(!size || size > end - start);
 750	DRM_MM_BUG_ON(mm->scan_active);
 751
 752	scan->mm = mm;
 753
 754	if (alignment <= 1)
 755		alignment = 0;
 756
 757	scan->color = color;
 758	scan->alignment = alignment;
 759	scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
 760	scan->size = size;
 761	scan->mode = mode;
 762
 763	DRM_MM_BUG_ON(end <= start);
 764	scan->range_start = start;
 765	scan->range_end = end;
 766
 767	scan->hit_start = U64_MAX;
 768	scan->hit_end = 0;
 769}
 770EXPORT_SYMBOL(drm_mm_scan_init_with_range);
 771
 772/**
 773 * drm_mm_scan_add_block - add a node to the scan list
 774 * @scan: the active drm_mm scanner
 775 * @node: drm_mm_node to add
 776 *
 777 * Add a node to the scan list that might be freed to make space for the desired
 778 * hole.
 779 *
 780 * Returns:
 781 * True if a hole has been found, false otherwise.
 782 */
 783bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
 784			   struct drm_mm_node *node)
 785{
 786	struct drm_mm *mm = scan->mm;
 787	struct drm_mm_node *hole;
 788	u64 hole_start, hole_end;
 789	u64 col_start, col_end;
 790	u64 adj_start, adj_end;
 791
 792	DRM_MM_BUG_ON(node->mm != mm);
 793	DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
 794	DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
 795	__set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
 796	mm->scan_active++;
 797
 798	/* Remove this block from the node_list so that we enlarge the hole
 799	 * (distance between the end of our previous node and the start of
 800	 * or next), without poisoning the link so that we can restore it
 801	 * later in drm_mm_scan_remove_block().
 802	 */
 803	hole = list_prev_entry(node, node_list);
 804	DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
 805	__list_del_entry(&node->node_list);
 806
 807	hole_start = __drm_mm_hole_node_start(hole);
 808	hole_end = __drm_mm_hole_node_end(hole);
 809
 810	col_start = hole_start;
 811	col_end = hole_end;
 812	if (mm->color_adjust)
 813		mm->color_adjust(hole, scan->color, &col_start, &col_end);
 814
 815	adj_start = max(col_start, scan->range_start);
 816	adj_end = min(col_end, scan->range_end);
 817	if (adj_end <= adj_start || adj_end - adj_start < scan->size)
 818		return false;
 819
 820	if (scan->mode == DRM_MM_INSERT_HIGH)
 821		adj_start = adj_end - scan->size;
 822
 823	if (scan->alignment) {
 824		u64 rem;
 825
 826		if (likely(scan->remainder_mask))
 827			rem = adj_start & scan->remainder_mask;
 828		else
 829			div64_u64_rem(adj_start, scan->alignment, &rem);
 830		if (rem) {
 831			adj_start -= rem;
 832			if (scan->mode != DRM_MM_INSERT_HIGH)
 833				adj_start += scan->alignment;
 834			if (adj_start < max(col_start, scan->range_start) ||
 835			    min(col_end, scan->range_end) - adj_start < scan->size)
 836				return false;
 837
 838			if (adj_end <= adj_start ||
 839			    adj_end - adj_start < scan->size)
 840				return false;
 841		}
 842	}
 843
 844	scan->hit_start = adj_start;
 845	scan->hit_end = adj_start + scan->size;
 846
 847	DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
 848	DRM_MM_BUG_ON(scan->hit_start < hole_start);
 849	DRM_MM_BUG_ON(scan->hit_end > hole_end);
 850
 851	return true;
 852}
 853EXPORT_SYMBOL(drm_mm_scan_add_block);
 854
 855/**
 856 * drm_mm_scan_remove_block - remove a node from the scan list
 857 * @scan: the active drm_mm scanner
 858 * @node: drm_mm_node to remove
 859 *
 860 * Nodes **must** be removed in exactly the reverse order from the scan list as
 861 * they have been added (e.g. using list_add() as they are added and then
 862 * list_for_each() over that eviction list to remove), otherwise the internal
 863 * state of the memory manager will be corrupted.
 864 *
 865 * When the scan list is empty, the selected memory nodes can be freed. An
 866 * immediately following drm_mm_insert_node_in_range_generic() or one of the
 867 * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
 868 * the just freed block (because it's at the top of the free_stack list).
 869 *
 870 * Returns:
 871 * True if this block should be evicted, false otherwise. Will always
 872 * return false when no hole has been found.
 873 */
 874bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
 875			      struct drm_mm_node *node)
 876{
 877	struct drm_mm_node *prev_node;
 878
 879	DRM_MM_BUG_ON(node->mm != scan->mm);
 880	DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
 881	__clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
 882
 883	DRM_MM_BUG_ON(!node->mm->scan_active);
 884	node->mm->scan_active--;
 885
 886	/* During drm_mm_scan_add_block() we decoupled this node leaving
 887	 * its pointers intact. Now that the caller is walking back along
 888	 * the eviction list we can restore this block into its rightful
 889	 * place on the full node_list. To confirm that the caller is walking
 890	 * backwards correctly we check that prev_node->next == node->next,
 891	 * i.e. both believe the same node should be on the other side of the
 892	 * hole.
 893	 */
 894	prev_node = list_prev_entry(node, node_list);
 895	DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
 896		      list_next_entry(node, node_list));
 897	list_add(&node->node_list, &prev_node->node_list);
 898
 899	return (node->start + node->size > scan->hit_start &&
 900		node->start < scan->hit_end);
 901}
 902EXPORT_SYMBOL(drm_mm_scan_remove_block);
 903
 904/**
 905 * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
 906 * @scan: drm_mm scan with target hole
 907 *
 908 * After completing an eviction scan and removing the selected nodes, we may
 909 * need to remove a few more nodes from either side of the target hole if
 910 * mm.color_adjust is being used.
 911 *
 912 * Returns:
 913 * A node to evict, or NULL if there are no overlapping nodes.
 914 */
 915struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
 916{
 917	struct drm_mm *mm = scan->mm;
 918	struct drm_mm_node *hole;
 919	u64 hole_start, hole_end;
 920
 921	DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
 922
 923	if (!mm->color_adjust)
 924		return NULL;
 925
 926	/*
 927	 * The hole found during scanning should ideally be the first element
 928	 * in the hole_stack list, but due to side-effects in the driver it
 929	 * may not be.
 930	 */
 931	list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
 932		hole_start = __drm_mm_hole_node_start(hole);
 933		hole_end = hole_start + hole->hole_size;
 934
 935		if (hole_start <= scan->hit_start &&
 936		    hole_end >= scan->hit_end)
 937			break;
 938	}
 939
 940	/* We should only be called after we found the hole previously */
 941	DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
 942	if (unlikely(&hole->hole_stack == &mm->hole_stack))
 943		return NULL;
 944
 945	DRM_MM_BUG_ON(hole_start > scan->hit_start);
 946	DRM_MM_BUG_ON(hole_end < scan->hit_end);
 947
 948	mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
 949	if (hole_start > scan->hit_start)
 950		return hole;
 951	if (hole_end < scan->hit_end)
 952		return list_next_entry(hole, node_list);
 953
 954	return NULL;
 955}
 956EXPORT_SYMBOL(drm_mm_scan_color_evict);
 957
 958/**
 959 * drm_mm_init - initialize a drm-mm allocator
 960 * @mm: the drm_mm structure to initialize
 961 * @start: start of the range managed by @mm
 962 * @size: end of the range managed by @mm
 963 *
 964 * Note that @mm must be cleared to 0 before calling this function.
 965 */
 966void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
 967{
 968	DRM_MM_BUG_ON(start + size <= start);
 969
 970	mm->color_adjust = NULL;
 971
 972	INIT_LIST_HEAD(&mm->hole_stack);
 973	mm->interval_tree = RB_ROOT_CACHED;
 974	mm->holes_size = RB_ROOT_CACHED;
 975	mm->holes_addr = RB_ROOT;
 976
 977	/* Clever trick to avoid a special case in the free hole tracking. */
 978	INIT_LIST_HEAD(&mm->head_node.node_list);
 979	mm->head_node.flags = 0;
 980	mm->head_node.mm = mm;
 981	mm->head_node.start = start + size;
 982	mm->head_node.size = -size;
 983	add_hole(&mm->head_node);
 984
 985	mm->scan_active = 0;
 
 
 
 
 986}
 987EXPORT_SYMBOL(drm_mm_init);
 988
 989/**
 990 * drm_mm_takedown - clean up a drm_mm allocator
 991 * @mm: drm_mm allocator to clean up
 992 *
 993 * Note that it is a bug to call this function on an allocator which is not
 994 * clean.
 995 */
 996void drm_mm_takedown(struct drm_mm *mm)
 997{
 998	if (WARN(!drm_mm_clean(mm),
 999		 "Memory manager not clean during takedown.\n"))
1000		show_leaks(mm);
1001}
1002EXPORT_SYMBOL(drm_mm_takedown);
1003
1004static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
1005{
1006	u64 start, size;
1007
1008	size = entry->hole_size;
1009	if (size) {
1010		start = drm_mm_hole_node_start(entry);
1011		drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
1012			   start, start + size, size);
1013	}
1014
1015	return size;
1016}
1017/**
1018 * drm_mm_print - print allocator state
1019 * @mm: drm_mm allocator to print
1020 * @p: DRM printer to use
1021 */
1022void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
1023{
1024	const struct drm_mm_node *entry;
1025	u64 total_used = 0, total_free = 0, total = 0;
1026
1027	total_free += drm_mm_dump_hole(p, &mm->head_node);
1028
1029	drm_mm_for_each_node(entry, mm) {
1030		drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
1031			   entry->start + entry->size, entry->size);
1032		total_used += entry->size;
1033		total_free += drm_mm_dump_hole(p, entry);
1034	}
1035	total = total_free + total_used;
1036
1037	drm_printf(p, "total: %llu, used %llu free %llu\n", total,
1038		   total_used, total_free);
1039}
1040EXPORT_SYMBOL(drm_mm_print);
v6.2
   1/**************************************************************************
   2 *
   3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
   4 * Copyright 2016 Intel Corporation
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 *
  28 **************************************************************************/
  29
  30/*
  31 * Generic simple memory manager implementation. Intended to be used as a base
  32 * class implementation for more advanced memory managers.
  33 *
  34 * Note that the algorithm used is quite simple and there might be substantial
  35 * performance gains if a smarter free list is implemented. Currently it is
  36 * just an unordered stack of free regions. This could easily be improved if
  37 * an RB-tree is used instead. At least if we expect heavy fragmentation.
  38 *
  39 * Aligned allocations can also see improvement.
  40 *
  41 * Authors:
  42 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
  43 */
  44
  45#include <linux/export.h>
  46#include <linux/interval_tree_generic.h>
  47#include <linux/seq_file.h>
  48#include <linux/slab.h>
  49#include <linux/stacktrace.h>
  50
  51#include <drm/drm_mm.h>
  52
  53/**
  54 * DOC: Overview
  55 *
  56 * drm_mm provides a simple range allocator. The drivers are free to use the
  57 * resource allocator from the linux core if it suits them, the upside of drm_mm
  58 * is that it's in the DRM core. Which means that it's easier to extend for
  59 * some of the crazier special purpose needs of gpus.
  60 *
  61 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
  62 * Drivers are free to embed either of them into their own suitable
  63 * datastructures. drm_mm itself will not do any memory allocations of its own,
  64 * so if drivers choose not to embed nodes they need to still allocate them
  65 * themselves.
  66 *
  67 * The range allocator also supports reservation of preallocated blocks. This is
  68 * useful for taking over initial mode setting configurations from the firmware,
  69 * where an object needs to be created which exactly matches the firmware's
  70 * scanout target. As long as the range is still free it can be inserted anytime
  71 * after the allocator is initialized, which helps with avoiding looped
  72 * dependencies in the driver load sequence.
  73 *
  74 * drm_mm maintains a stack of most recently freed holes, which of all
  75 * simplistic datastructures seems to be a fairly decent approach to clustering
  76 * allocations and avoiding too much fragmentation. This means free space
  77 * searches are O(num_holes). Given that all the fancy features drm_mm supports
  78 * something better would be fairly complex and since gfx thrashing is a fairly
  79 * steep cliff not a real concern. Removing a node again is O(1).
  80 *
  81 * drm_mm supports a few features: Alignment and range restrictions can be
  82 * supplied. Furthermore every &drm_mm_node has a color value (which is just an
  83 * opaque unsigned long) which in conjunction with a driver callback can be used
  84 * to implement sophisticated placement restrictions. The i915 DRM driver uses
  85 * this to implement guard pages between incompatible caching domains in the
  86 * graphics TT.
  87 *
  88 * Two behaviors are supported for searching and allocating: bottom-up and
  89 * top-down. The default is bottom-up. Top-down allocation can be used if the
  90 * memory area has different restrictions, or just to reduce fragmentation.
  91 *
  92 * Finally iteration helpers to walk all nodes and all holes are provided as are
  93 * some basic allocator dumpers for debugging.
  94 *
  95 * Note that this range allocator is not thread-safe, drivers need to protect
  96 * modifications with their own locking. The idea behind this is that for a full
  97 * memory manager additional data needs to be protected anyway, hence internal
  98 * locking would be fully redundant.
  99 */
 100
 101#ifdef CONFIG_DRM_DEBUG_MM
 102#include <linux/stackdepot.h>
 103
 104#define STACKDEPTH 32
 105#define BUFSZ 4096
 106
 107static noinline void save_stack(struct drm_mm_node *node)
 108{
 109	unsigned long entries[STACKDEPTH];
 110	unsigned int n;
 111
 112	n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
 113
 114	/* May be called under spinlock, so avoid sleeping */
 115	node->stack = stack_depot_save(entries, n, GFP_NOWAIT);
 116}
 117
 118static void show_leaks(struct drm_mm *mm)
 119{
 120	struct drm_mm_node *node;
 
 
 121	char *buf;
 122
 123	buf = kmalloc(BUFSZ, GFP_KERNEL);
 124	if (!buf)
 125		return;
 126
 127	list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
 128		if (!node->stack) {
 129			DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
 130				  node->start, node->size);
 131			continue;
 132		}
 133
 134		stack_depot_snprint(node->stack, buf, BUFSZ, 0);
 
 135		DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
 136			  node->start, node->size, buf);
 137	}
 138
 139	kfree(buf);
 140}
 141
 142#undef STACKDEPTH
 143#undef BUFSZ
 144#else
 145static void save_stack(struct drm_mm_node *node) { }
 146static void show_leaks(struct drm_mm *mm) { }
 147#endif
 148
 149#define START(node) ((node)->start)
 150#define LAST(node)  ((node)->start + (node)->size - 1)
 151
 152INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
 153		     u64, __subtree_last,
 154		     START, LAST, static inline, drm_mm_interval_tree)
 155
 156struct drm_mm_node *
 157__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
 158{
 159	return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
 160					       start, last) ?: (struct drm_mm_node *)&mm->head_node;
 161}
 162EXPORT_SYMBOL(__drm_mm_interval_first);
 163
 164static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
 165					  struct drm_mm_node *node)
 166{
 167	struct drm_mm *mm = hole_node->mm;
 168	struct rb_node **link, *rb;
 169	struct drm_mm_node *parent;
 170	bool leftmost;
 171
 172	node->__subtree_last = LAST(node);
 173
 174	if (drm_mm_node_allocated(hole_node)) {
 175		rb = &hole_node->rb;
 176		while (rb) {
 177			parent = rb_entry(rb, struct drm_mm_node, rb);
 178			if (parent->__subtree_last >= node->__subtree_last)
 179				break;
 180
 181			parent->__subtree_last = node->__subtree_last;
 182			rb = rb_parent(rb);
 183		}
 184
 185		rb = &hole_node->rb;
 186		link = &hole_node->rb.rb_right;
 187		leftmost = false;
 188	} else {
 189		rb = NULL;
 190		link = &mm->interval_tree.rb_root.rb_node;
 191		leftmost = true;
 192	}
 193
 194	while (*link) {
 195		rb = *link;
 196		parent = rb_entry(rb, struct drm_mm_node, rb);
 197		if (parent->__subtree_last < node->__subtree_last)
 198			parent->__subtree_last = node->__subtree_last;
 199		if (node->start < parent->start) {
 200			link = &parent->rb.rb_left;
 201		} else {
 202			link = &parent->rb.rb_right;
 203			leftmost = false;
 204		}
 205	}
 206
 207	rb_link_node(&node->rb, rb, link);
 208	rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
 209				   &drm_mm_interval_tree_augment);
 210}
 211
 212#define HOLE_SIZE(NODE) ((NODE)->hole_size)
 213#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
 214
 215static u64 rb_to_hole_size(struct rb_node *rb)
 216{
 217	return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
 218}
 219
 220static void insert_hole_size(struct rb_root_cached *root,
 221			     struct drm_mm_node *node)
 222{
 223	struct rb_node **link = &root->rb_root.rb_node, *rb = NULL;
 224	u64 x = node->hole_size;
 225	bool first = true;
 226
 227	while (*link) {
 228		rb = *link;
 229		if (x > rb_to_hole_size(rb)) {
 230			link = &rb->rb_left;
 231		} else {
 232			link = &rb->rb_right;
 233			first = false;
 234		}
 235	}
 236
 237	rb_link_node(&node->rb_hole_size, rb, link);
 238	rb_insert_color_cached(&node->rb_hole_size, root, first);
 239}
 240
 241RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
 242			 struct drm_mm_node, rb_hole_addr,
 243			 u64, subtree_max_hole, HOLE_SIZE)
 244
 245static void insert_hole_addr(struct rb_root *root, struct drm_mm_node *node)
 246{
 247	struct rb_node **link = &root->rb_node, *rb_parent = NULL;
 248	u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole;
 249	struct drm_mm_node *parent;
 250
 251	while (*link) {
 252		rb_parent = *link;
 253		parent = rb_entry(rb_parent, struct drm_mm_node, rb_hole_addr);
 254		if (parent->subtree_max_hole < subtree_max_hole)
 255			parent->subtree_max_hole = subtree_max_hole;
 256		if (start < HOLE_ADDR(parent))
 257			link = &parent->rb_hole_addr.rb_left;
 258		else
 259			link = &parent->rb_hole_addr.rb_right;
 260	}
 261
 262	rb_link_node(&node->rb_hole_addr, rb_parent, link);
 263	rb_insert_augmented(&node->rb_hole_addr, root, &augment_callbacks);
 264}
 265
 266static void add_hole(struct drm_mm_node *node)
 267{
 268	struct drm_mm *mm = node->mm;
 269
 270	node->hole_size =
 271		__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
 272	node->subtree_max_hole = node->hole_size;
 273	DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
 274
 275	insert_hole_size(&mm->holes_size, node);
 276	insert_hole_addr(&mm->holes_addr, node);
 277
 278	list_add(&node->hole_stack, &mm->hole_stack);
 279}
 280
 281static void rm_hole(struct drm_mm_node *node)
 282{
 283	DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
 284
 285	list_del(&node->hole_stack);
 286	rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
 287	rb_erase_augmented(&node->rb_hole_addr, &node->mm->holes_addr,
 288			   &augment_callbacks);
 289	node->hole_size = 0;
 290	node->subtree_max_hole = 0;
 291
 292	DRM_MM_BUG_ON(drm_mm_hole_follows(node));
 293}
 294
 295static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
 296{
 297	return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
 298}
 299
 300static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
 301{
 302	return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
 303}
 304
 305static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
 306{
 307	struct rb_node *rb = mm->holes_size.rb_root.rb_node;
 308	struct drm_mm_node *best = NULL;
 309
 310	do {
 311		struct drm_mm_node *node =
 312			rb_entry(rb, struct drm_mm_node, rb_hole_size);
 313
 314		if (size <= node->hole_size) {
 315			best = node;
 316			rb = rb->rb_right;
 317		} else {
 318			rb = rb->rb_left;
 319		}
 320	} while (rb);
 321
 322	return best;
 323}
 324
 325static bool usable_hole_addr(struct rb_node *rb, u64 size)
 326{
 327	return rb && rb_hole_addr_to_node(rb)->subtree_max_hole >= size;
 328}
 329
 330static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size)
 331{
 332	struct rb_node *rb = mm->holes_addr.rb_node;
 333	struct drm_mm_node *node = NULL;
 334
 335	while (rb) {
 336		u64 hole_start;
 337
 338		if (!usable_hole_addr(rb, size))
 339			break;
 340
 341		node = rb_hole_addr_to_node(rb);
 342		hole_start = __drm_mm_hole_node_start(node);
 343
 344		if (addr < hole_start)
 345			rb = node->rb_hole_addr.rb_left;
 346		else if (addr > hole_start + node->hole_size)
 347			rb = node->rb_hole_addr.rb_right;
 348		else
 349			break;
 350	}
 351
 352	return node;
 353}
 354
 355static struct drm_mm_node *
 356first_hole(struct drm_mm *mm,
 357	   u64 start, u64 end, u64 size,
 358	   enum drm_mm_insert_mode mode)
 359{
 360	switch (mode) {
 361	default:
 362	case DRM_MM_INSERT_BEST:
 363		return best_hole(mm, size);
 364
 365	case DRM_MM_INSERT_LOW:
 366		return find_hole_addr(mm, start, size);
 367
 368	case DRM_MM_INSERT_HIGH:
 369		return find_hole_addr(mm, end, size);
 370
 371	case DRM_MM_INSERT_EVICT:
 372		return list_first_entry_or_null(&mm->hole_stack,
 373						struct drm_mm_node,
 374						hole_stack);
 375	}
 376}
 377
 378/**
 379 * DECLARE_NEXT_HOLE_ADDR - macro to declare next hole functions
 380 * @name: name of function to declare
 381 * @first: first rb member to traverse (either rb_left or rb_right).
 382 * @last: last rb member to traverse (either rb_right or rb_left).
 383 *
 384 * This macro declares a function to return the next hole of the addr rb tree.
 385 * While traversing the tree we take the searched size into account and only
 386 * visit branches with potential big enough holes.
 387 */
 388
 389#define DECLARE_NEXT_HOLE_ADDR(name, first, last)			\
 390static struct drm_mm_node *name(struct drm_mm_node *entry, u64 size)	\
 391{									\
 392	struct rb_node *parent, *node = &entry->rb_hole_addr;		\
 393									\
 394	if (!entry || RB_EMPTY_NODE(node))				\
 395		return NULL;						\
 396									\
 397	if (usable_hole_addr(node->first, size)) {			\
 398		node = node->first;					\
 399		while (usable_hole_addr(node->last, size))		\
 400			node = node->last;				\
 401		return rb_hole_addr_to_node(node);			\
 402	}								\
 403									\
 404	while ((parent = rb_parent(node)) && node == parent->first)	\
 405		node = parent;						\
 406									\
 407	return rb_hole_addr_to_node(parent);				\
 408}
 409
 410DECLARE_NEXT_HOLE_ADDR(next_hole_high_addr, rb_left, rb_right)
 411DECLARE_NEXT_HOLE_ADDR(next_hole_low_addr, rb_right, rb_left)
 412
 413static struct drm_mm_node *
 414next_hole(struct drm_mm *mm,
 415	  struct drm_mm_node *node,
 416	  u64 size,
 417	  enum drm_mm_insert_mode mode)
 418{
 419	switch (mode) {
 420	default:
 421	case DRM_MM_INSERT_BEST:
 422		return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
 423
 424	case DRM_MM_INSERT_LOW:
 425		return next_hole_low_addr(node, size);
 426
 427	case DRM_MM_INSERT_HIGH:
 428		return next_hole_high_addr(node, size);
 429
 430	case DRM_MM_INSERT_EVICT:
 431		node = list_next_entry(node, hole_stack);
 432		return &node->hole_stack == &mm->hole_stack ? NULL : node;
 433	}
 434}
 435
 436/**
 437 * drm_mm_reserve_node - insert an pre-initialized node
 438 * @mm: drm_mm allocator to insert @node into
 439 * @node: drm_mm_node to insert
 440 *
 441 * This functions inserts an already set-up &drm_mm_node into the allocator,
 442 * meaning that start, size and color must be set by the caller. All other
 443 * fields must be cleared to 0. This is useful to initialize the allocator with
 444 * preallocated objects which must be set-up before the range allocator can be
 445 * set-up, e.g. when taking over a firmware framebuffer.
 446 *
 447 * Returns:
 448 * 0 on success, -ENOSPC if there's no hole where @node is.
 449 */
 450int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 451{
 452	struct drm_mm_node *hole;
 453	u64 hole_start, hole_end;
 454	u64 adj_start, adj_end;
 455	u64 end;
 456
 457	end = node->start + node->size;
 458	if (unlikely(end <= node->start))
 459		return -ENOSPC;
 460
 461	/* Find the relevant hole to add our node to */
 462	hole = find_hole_addr(mm, node->start, 0);
 463	if (!hole)
 464		return -ENOSPC;
 465
 466	adj_start = hole_start = __drm_mm_hole_node_start(hole);
 467	adj_end = hole_end = hole_start + hole->hole_size;
 468
 469	if (mm->color_adjust)
 470		mm->color_adjust(hole, node->color, &adj_start, &adj_end);
 471
 472	if (adj_start > node->start || adj_end < end)
 473		return -ENOSPC;
 474
 475	node->mm = mm;
 476
 477	__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
 478	list_add(&node->node_list, &hole->node_list);
 479	drm_mm_interval_tree_add_node(hole, node);
 480	node->hole_size = 0;
 481
 482	rm_hole(hole);
 483	if (node->start > hole_start)
 484		add_hole(hole);
 485	if (end < hole_end)
 486		add_hole(node);
 487
 488	save_stack(node);
 489	return 0;
 490}
 491EXPORT_SYMBOL(drm_mm_reserve_node);
 492
 493static u64 rb_to_hole_size_or_zero(struct rb_node *rb)
 494{
 495	return rb ? rb_to_hole_size(rb) : 0;
 496}
 497
 498/**
 499 * drm_mm_insert_node_in_range - ranged search for space and insert @node
 500 * @mm: drm_mm to allocate from
 501 * @node: preallocate node to insert
 502 * @size: size of the allocation
 503 * @alignment: alignment of the allocation
 504 * @color: opaque tag value to use for this node
 505 * @range_start: start of the allowed range for this node
 506 * @range_end: end of the allowed range for this node
 507 * @mode: fine-tune the allocation search and placement
 508 *
 509 * The preallocated @node must be cleared to 0.
 510 *
 511 * Returns:
 512 * 0 on success, -ENOSPC if there's no suitable hole.
 513 */
 514int drm_mm_insert_node_in_range(struct drm_mm * const mm,
 515				struct drm_mm_node * const node,
 516				u64 size, u64 alignment,
 517				unsigned long color,
 518				u64 range_start, u64 range_end,
 519				enum drm_mm_insert_mode mode)
 520{
 521	struct drm_mm_node *hole;
 522	u64 remainder_mask;
 523	bool once;
 524
 525	DRM_MM_BUG_ON(range_start > range_end);
 526
 527	if (unlikely(size == 0 || range_end - range_start < size))
 528		return -ENOSPC;
 529
 530	if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
 531		return -ENOSPC;
 532
 533	if (alignment <= 1)
 534		alignment = 0;
 535
 536	once = mode & DRM_MM_INSERT_ONCE;
 537	mode &= ~DRM_MM_INSERT_ONCE;
 538
 539	remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
 540	for (hole = first_hole(mm, range_start, range_end, size, mode);
 541	     hole;
 542	     hole = once ? NULL : next_hole(mm, hole, size, mode)) {
 543		u64 hole_start = __drm_mm_hole_node_start(hole);
 544		u64 hole_end = hole_start + hole->hole_size;
 545		u64 adj_start, adj_end;
 546		u64 col_start, col_end;
 547
 548		if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
 549			break;
 550
 551		if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
 552			break;
 553
 554		col_start = hole_start;
 555		col_end = hole_end;
 556		if (mm->color_adjust)
 557			mm->color_adjust(hole, color, &col_start, &col_end);
 558
 559		adj_start = max(col_start, range_start);
 560		adj_end = min(col_end, range_end);
 561
 562		if (adj_end <= adj_start || adj_end - adj_start < size)
 563			continue;
 564
 565		if (mode == DRM_MM_INSERT_HIGH)
 566			adj_start = adj_end - size;
 567
 568		if (alignment) {
 569			u64 rem;
 570
 571			if (likely(remainder_mask))
 572				rem = adj_start & remainder_mask;
 573			else
 574				div64_u64_rem(adj_start, alignment, &rem);
 575			if (rem) {
 576				adj_start -= rem;
 577				if (mode != DRM_MM_INSERT_HIGH)
 578					adj_start += alignment;
 579
 580				if (adj_start < max(col_start, range_start) ||
 581				    min(col_end, range_end) - adj_start < size)
 582					continue;
 583
 584				if (adj_end <= adj_start ||
 585				    adj_end - adj_start < size)
 586					continue;
 587			}
 588		}
 589
 590		node->mm = mm;
 591		node->size = size;
 592		node->start = adj_start;
 593		node->color = color;
 594		node->hole_size = 0;
 595
 596		__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
 597		list_add(&node->node_list, &hole->node_list);
 598		drm_mm_interval_tree_add_node(hole, node);
 599
 600		rm_hole(hole);
 601		if (adj_start > hole_start)
 602			add_hole(hole);
 603		if (adj_start + size < hole_end)
 604			add_hole(node);
 605
 606		save_stack(node);
 607		return 0;
 608	}
 609
 610	return -ENOSPC;
 611}
 612EXPORT_SYMBOL(drm_mm_insert_node_in_range);
 613
 614static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node)
 615{
 616	return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
 617}
 618
 619/**
 620 * drm_mm_remove_node - Remove a memory node from the allocator.
 621 * @node: drm_mm_node to remove
 622 *
 623 * This just removes a node from its drm_mm allocator. The node does not need to
 624 * be cleared again before it can be re-inserted into this or any other drm_mm
 625 * allocator. It is a bug to call this function on a unallocated node.
 626 */
 627void drm_mm_remove_node(struct drm_mm_node *node)
 628{
 629	struct drm_mm *mm = node->mm;
 630	struct drm_mm_node *prev_node;
 631
 632	DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
 633	DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
 634
 635	prev_node = list_prev_entry(node, node_list);
 636
 637	if (drm_mm_hole_follows(node))
 638		rm_hole(node);
 639
 640	drm_mm_interval_tree_remove(node, &mm->interval_tree);
 641	list_del(&node->node_list);
 642
 643	if (drm_mm_hole_follows(prev_node))
 644		rm_hole(prev_node);
 645	add_hole(prev_node);
 646
 647	clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
 648}
 649EXPORT_SYMBOL(drm_mm_remove_node);
 650
 651/**
 652 * drm_mm_replace_node - move an allocation from @old to @new
 653 * @old: drm_mm_node to remove from the allocator
 654 * @new: drm_mm_node which should inherit @old's allocation
 655 *
 656 * This is useful for when drivers embed the drm_mm_node structure and hence
 657 * can't move allocations by reassigning pointers. It's a combination of remove
 658 * and insert with the guarantee that the allocation start will match.
 659 */
 660void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
 661{
 662	struct drm_mm *mm = old->mm;
 663
 664	DRM_MM_BUG_ON(!drm_mm_node_allocated(old));
 665
 666	*new = *old;
 667
 668	__set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags);
 669	list_replace(&old->node_list, &new->node_list);
 670	rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
 671
 672	if (drm_mm_hole_follows(old)) {
 673		list_replace(&old->hole_stack, &new->hole_stack);
 674		rb_replace_node_cached(&old->rb_hole_size,
 675				       &new->rb_hole_size,
 676				       &mm->holes_size);
 677		rb_replace_node(&old->rb_hole_addr,
 678				&new->rb_hole_addr,
 679				&mm->holes_addr);
 680	}
 681
 682	clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags);
 683}
 684EXPORT_SYMBOL(drm_mm_replace_node);
 685
 686/**
 687 * DOC: lru scan roster
 688 *
 689 * Very often GPUs need to have continuous allocations for a given object. When
 690 * evicting objects to make space for a new one it is therefore not most
 691 * efficient when we simply start to select all objects from the tail of an LRU
 692 * until there's a suitable hole: Especially for big objects or nodes that
 693 * otherwise have special allocation constraints there's a good chance we evict
 694 * lots of (smaller) objects unnecessarily.
 695 *
 696 * The DRM range allocator supports this use-case through the scanning
 697 * interfaces. First a scan operation needs to be initialized with
 698 * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
 699 * objects to the roster, probably by walking an LRU list, but this can be
 700 * freely implemented. Eviction candidates are added using
 701 * drm_mm_scan_add_block() until a suitable hole is found or there are no
 702 * further evictable objects. Eviction roster metadata is tracked in &struct
 703 * drm_mm_scan.
 704 *
 705 * The driver must walk through all objects again in exactly the reverse
 706 * order to restore the allocator state. Note that while the allocator is used
 707 * in the scan mode no other operation is allowed.
 708 *
 709 * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
 710 * reported true) in the scan, and any overlapping nodes after color adjustment
 711 * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
 712 * since freeing a node is also O(1) the overall complexity is
 713 * O(scanned_objects). So like the free stack which needs to be walked before a
 714 * scan operation even begins this is linear in the number of objects. It
 715 * doesn't seem to hurt too badly.
 716 */
 717
 718/**
 719 * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
 720 * @scan: scan state
 721 * @mm: drm_mm to scan
 722 * @size: size of the allocation
 723 * @alignment: alignment of the allocation
 724 * @color: opaque tag value to use for the allocation
 725 * @start: start of the allowed range for the allocation
 726 * @end: end of the allowed range for the allocation
 727 * @mode: fine-tune the allocation search and placement
 728 *
 729 * This simply sets up the scanning routines with the parameters for the desired
 730 * hole.
 731 *
 732 * Warning:
 733 * As long as the scan list is non-empty, no other operations than
 734 * adding/removing nodes to/from the scan list are allowed.
 735 */
 736void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
 737				 struct drm_mm *mm,
 738				 u64 size,
 739				 u64 alignment,
 740				 unsigned long color,
 741				 u64 start,
 742				 u64 end,
 743				 enum drm_mm_insert_mode mode)
 744{
 745	DRM_MM_BUG_ON(start >= end);
 746	DRM_MM_BUG_ON(!size || size > end - start);
 747	DRM_MM_BUG_ON(mm->scan_active);
 748
 749	scan->mm = mm;
 750
 751	if (alignment <= 1)
 752		alignment = 0;
 753
 754	scan->color = color;
 755	scan->alignment = alignment;
 756	scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
 757	scan->size = size;
 758	scan->mode = mode;
 759
 760	DRM_MM_BUG_ON(end <= start);
 761	scan->range_start = start;
 762	scan->range_end = end;
 763
 764	scan->hit_start = U64_MAX;
 765	scan->hit_end = 0;
 766}
 767EXPORT_SYMBOL(drm_mm_scan_init_with_range);
 768
 769/**
 770 * drm_mm_scan_add_block - add a node to the scan list
 771 * @scan: the active drm_mm scanner
 772 * @node: drm_mm_node to add
 773 *
 774 * Add a node to the scan list that might be freed to make space for the desired
 775 * hole.
 776 *
 777 * Returns:
 778 * True if a hole has been found, false otherwise.
 779 */
 780bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
 781			   struct drm_mm_node *node)
 782{
 783	struct drm_mm *mm = scan->mm;
 784	struct drm_mm_node *hole;
 785	u64 hole_start, hole_end;
 786	u64 col_start, col_end;
 787	u64 adj_start, adj_end;
 788
 789	DRM_MM_BUG_ON(node->mm != mm);
 790	DRM_MM_BUG_ON(!drm_mm_node_allocated(node));
 791	DRM_MM_BUG_ON(drm_mm_node_scanned_block(node));
 792	__set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
 793	mm->scan_active++;
 794
 795	/* Remove this block from the node_list so that we enlarge the hole
 796	 * (distance between the end of our previous node and the start of
 797	 * or next), without poisoning the link so that we can restore it
 798	 * later in drm_mm_scan_remove_block().
 799	 */
 800	hole = list_prev_entry(node, node_list);
 801	DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
 802	__list_del_entry(&node->node_list);
 803
 804	hole_start = __drm_mm_hole_node_start(hole);
 805	hole_end = __drm_mm_hole_node_end(hole);
 806
 807	col_start = hole_start;
 808	col_end = hole_end;
 809	if (mm->color_adjust)
 810		mm->color_adjust(hole, scan->color, &col_start, &col_end);
 811
 812	adj_start = max(col_start, scan->range_start);
 813	adj_end = min(col_end, scan->range_end);
 814	if (adj_end <= adj_start || adj_end - adj_start < scan->size)
 815		return false;
 816
 817	if (scan->mode == DRM_MM_INSERT_HIGH)
 818		adj_start = adj_end - scan->size;
 819
 820	if (scan->alignment) {
 821		u64 rem;
 822
 823		if (likely(scan->remainder_mask))
 824			rem = adj_start & scan->remainder_mask;
 825		else
 826			div64_u64_rem(adj_start, scan->alignment, &rem);
 827		if (rem) {
 828			adj_start -= rem;
 829			if (scan->mode != DRM_MM_INSERT_HIGH)
 830				adj_start += scan->alignment;
 831			if (adj_start < max(col_start, scan->range_start) ||
 832			    min(col_end, scan->range_end) - adj_start < scan->size)
 833				return false;
 834
 835			if (adj_end <= adj_start ||
 836			    adj_end - adj_start < scan->size)
 837				return false;
 838		}
 839	}
 840
 841	scan->hit_start = adj_start;
 842	scan->hit_end = adj_start + scan->size;
 843
 844	DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
 845	DRM_MM_BUG_ON(scan->hit_start < hole_start);
 846	DRM_MM_BUG_ON(scan->hit_end > hole_end);
 847
 848	return true;
 849}
 850EXPORT_SYMBOL(drm_mm_scan_add_block);
 851
 852/**
 853 * drm_mm_scan_remove_block - remove a node from the scan list
 854 * @scan: the active drm_mm scanner
 855 * @node: drm_mm_node to remove
 856 *
 857 * Nodes **must** be removed in exactly the reverse order from the scan list as
 858 * they have been added (e.g. using list_add() as they are added and then
 859 * list_for_each() over that eviction list to remove), otherwise the internal
 860 * state of the memory manager will be corrupted.
 861 *
 862 * When the scan list is empty, the selected memory nodes can be freed. An
 863 * immediately following drm_mm_insert_node_in_range_generic() or one of the
 864 * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
 865 * the just freed block (because it's at the top of the free_stack list).
 866 *
 867 * Returns:
 868 * True if this block should be evicted, false otherwise. Will always
 869 * return false when no hole has been found.
 870 */
 871bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
 872			      struct drm_mm_node *node)
 873{
 874	struct drm_mm_node *prev_node;
 875
 876	DRM_MM_BUG_ON(node->mm != scan->mm);
 877	DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node));
 878	__clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags);
 879
 880	DRM_MM_BUG_ON(!node->mm->scan_active);
 881	node->mm->scan_active--;
 882
 883	/* During drm_mm_scan_add_block() we decoupled this node leaving
 884	 * its pointers intact. Now that the caller is walking back along
 885	 * the eviction list we can restore this block into its rightful
 886	 * place on the full node_list. To confirm that the caller is walking
 887	 * backwards correctly we check that prev_node->next == node->next,
 888	 * i.e. both believe the same node should be on the other side of the
 889	 * hole.
 890	 */
 891	prev_node = list_prev_entry(node, node_list);
 892	DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
 893		      list_next_entry(node, node_list));
 894	list_add(&node->node_list, &prev_node->node_list);
 895
 896	return (node->start + node->size > scan->hit_start &&
 897		node->start < scan->hit_end);
 898}
 899EXPORT_SYMBOL(drm_mm_scan_remove_block);
 900
 901/**
 902 * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
 903 * @scan: drm_mm scan with target hole
 904 *
 905 * After completing an eviction scan and removing the selected nodes, we may
 906 * need to remove a few more nodes from either side of the target hole if
 907 * mm.color_adjust is being used.
 908 *
 909 * Returns:
 910 * A node to evict, or NULL if there are no overlapping nodes.
 911 */
 912struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
 913{
 914	struct drm_mm *mm = scan->mm;
 915	struct drm_mm_node *hole;
 916	u64 hole_start, hole_end;
 917
 918	DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
 919
 920	if (!mm->color_adjust)
 921		return NULL;
 922
 923	/*
 924	 * The hole found during scanning should ideally be the first element
 925	 * in the hole_stack list, but due to side-effects in the driver it
 926	 * may not be.
 927	 */
 928	list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
 929		hole_start = __drm_mm_hole_node_start(hole);
 930		hole_end = hole_start + hole->hole_size;
 931
 932		if (hole_start <= scan->hit_start &&
 933		    hole_end >= scan->hit_end)
 934			break;
 935	}
 936
 937	/* We should only be called after we found the hole previously */
 938	DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
 939	if (unlikely(&hole->hole_stack == &mm->hole_stack))
 940		return NULL;
 941
 942	DRM_MM_BUG_ON(hole_start > scan->hit_start);
 943	DRM_MM_BUG_ON(hole_end < scan->hit_end);
 944
 945	mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
 946	if (hole_start > scan->hit_start)
 947		return hole;
 948	if (hole_end < scan->hit_end)
 949		return list_next_entry(hole, node_list);
 950
 951	return NULL;
 952}
 953EXPORT_SYMBOL(drm_mm_scan_color_evict);
 954
 955/**
 956 * drm_mm_init - initialize a drm-mm allocator
 957 * @mm: the drm_mm structure to initialize
 958 * @start: start of the range managed by @mm
 959 * @size: end of the range managed by @mm
 960 *
 961 * Note that @mm must be cleared to 0 before calling this function.
 962 */
 963void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
 964{
 965	DRM_MM_BUG_ON(start + size <= start);
 966
 967	mm->color_adjust = NULL;
 968
 969	INIT_LIST_HEAD(&mm->hole_stack);
 970	mm->interval_tree = RB_ROOT_CACHED;
 971	mm->holes_size = RB_ROOT_CACHED;
 972	mm->holes_addr = RB_ROOT;
 973
 974	/* Clever trick to avoid a special case in the free hole tracking. */
 975	INIT_LIST_HEAD(&mm->head_node.node_list);
 976	mm->head_node.flags = 0;
 977	mm->head_node.mm = mm;
 978	mm->head_node.start = start + size;
 979	mm->head_node.size = -size;
 980	add_hole(&mm->head_node);
 981
 982	mm->scan_active = 0;
 983
 984#ifdef CONFIG_DRM_DEBUG_MM
 985	stack_depot_init();
 986#endif
 987}
 988EXPORT_SYMBOL(drm_mm_init);
 989
 990/**
 991 * drm_mm_takedown - clean up a drm_mm allocator
 992 * @mm: drm_mm allocator to clean up
 993 *
 994 * Note that it is a bug to call this function on an allocator which is not
 995 * clean.
 996 */
 997void drm_mm_takedown(struct drm_mm *mm)
 998{
 999	if (WARN(!drm_mm_clean(mm),
1000		 "Memory manager not clean during takedown.\n"))
1001		show_leaks(mm);
1002}
1003EXPORT_SYMBOL(drm_mm_takedown);
1004
1005static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
1006{
1007	u64 start, size;
1008
1009	size = entry->hole_size;
1010	if (size) {
1011		start = drm_mm_hole_node_start(entry);
1012		drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
1013			   start, start + size, size);
1014	}
1015
1016	return size;
1017}
1018/**
1019 * drm_mm_print - print allocator state
1020 * @mm: drm_mm allocator to print
1021 * @p: DRM printer to use
1022 */
1023void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
1024{
1025	const struct drm_mm_node *entry;
1026	u64 total_used = 0, total_free = 0, total = 0;
1027
1028	total_free += drm_mm_dump_hole(p, &mm->head_node);
1029
1030	drm_mm_for_each_node(entry, mm) {
1031		drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
1032			   entry->start + entry->size, entry->size);
1033		total_used += entry->size;
1034		total_free += drm_mm_dump_hole(p, entry);
1035	}
1036	total = total_free + total_used;
1037
1038	drm_printf(p, "total: %llu, used %llu free %llu\n", total,
1039		   total_used, total_free);
1040}
1041EXPORT_SYMBOL(drm_mm_print);