Linux Audio

Check our new training course

Loading...
v4.6
  1/**************************************************************************
  2 *
  3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
 
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 *
 27 **************************************************************************/
 28
 29/*
 30 * Generic simple memory manager implementation. Intended to be used as a base
 31 * class implementation for more advanced memory managers.
 32 *
 33 * Note that the algorithm used is quite simple and there might be substantial
 34 * performance gains if a smarter free list is implemented. Currently it is just an
 35 * unordered stack of free regions. This could easily be improved if an RB-tree
 36 * is used instead. At least if we expect heavy fragmentation.
 37 *
 38 * Aligned allocations can also see improvement.
 39 *
 40 * Authors:
 41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
 42 */
 43
 44#include <drm/drmP.h>
 45#include <drm/drm_mm.h>
 46#include <linux/slab.h>
 47#include <linux/seq_file.h>
 48#include <linux/export.h>
 
 49
 50/**
 51 * DOC: Overview
 52 *
 53 * drm_mm provides a simple range allocator. The drivers are free to use the
 54 * resource allocator from the linux core if it suits them, the upside of drm_mm
 55 * is that it's in the DRM core. Which means that it's easier to extend for
 56 * some of the crazier special purpose needs of gpus.
 57 *
 58 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
 59 * Drivers are free to embed either of them into their own suitable
 60 * datastructures. drm_mm itself will not do any allocations of its own, so if
 61 * drivers choose not to embed nodes they need to still allocate them
 62 * themselves.
 63 *
 64 * The range allocator also supports reservation of preallocated blocks. This is
 65 * useful for taking over initial mode setting configurations from the firmware,
 66 * where an object needs to be created which exactly matches the firmware's
 67 * scanout target. As long as the range is still free it can be inserted anytime
 68 * after the allocator is initialized, which helps with avoiding looped
 69 * depencies in the driver load sequence.
 70 *
 71 * drm_mm maintains a stack of most recently freed holes, which of all
 72 * simplistic datastructures seems to be a fairly decent approach to clustering
 73 * allocations and avoiding too much fragmentation. This means free space
 74 * searches are O(num_holes). Given that all the fancy features drm_mm supports
 75 * something better would be fairly complex and since gfx thrashing is a fairly
 76 * steep cliff not a real concern. Removing a node again is O(1).
 77 *
 78 * drm_mm supports a few features: Alignment and range restrictions can be
 79 * supplied. Further more every &drm_mm_node has a color value (which is just an
 80 * opaqua unsigned long) which in conjunction with a driver callback can be used
 81 * to implement sophisticated placement restrictions. The i915 DRM driver uses
 82 * this to implement guard pages between incompatible caching domains in the
 83 * graphics TT.
 84 *
 85 * Two behaviors are supported for searching and allocating: bottom-up and top-down.
 86 * The default is bottom-up. Top-down allocation can be used if the memory area
 87 * has different restrictions, or just to reduce fragmentation.
 88 *
 89 * Finally iteration helpers to walk all nodes and all holes are provided as are
 90 * some basic allocator dumpers for debugging.
 
 
 
 
 
 91 */
 92
 93static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
 94						u64 size,
 95						unsigned alignment,
 96						unsigned long color,
 97						enum drm_mm_search_flags flags);
 98static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
 99						u64 size,
100						unsigned alignment,
101						unsigned long color,
102						u64 start,
103						u64 end,
104						enum drm_mm_search_flags flags);
105
106static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
107				 struct drm_mm_node *node,
108				 u64 size, unsigned alignment,
109				 unsigned long color,
110				 enum drm_mm_allocator_flags flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111{
112	struct drm_mm *mm = hole_node->mm;
113	u64 hole_start = drm_mm_hole_node_start(hole_node);
114	u64 hole_end = drm_mm_hole_node_end(hole_node);
115	u64 adj_start = hole_start;
116	u64 adj_end = hole_end;
 
 
 
 
 
 
 
 
117
118	BUG_ON(node->allocated);
 
 
119
120	if (mm->color_adjust)
121		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
123	if (flags & DRM_MM_CREATE_TOP)
124		adj_start = adj_end - size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
126	if (alignment) {
127		u64 tmp = adj_start;
128		unsigned rem;
129
130		rem = do_div(tmp, alignment);
131		if (rem) {
132			if (flags & DRM_MM_CREATE_TOP)
133				adj_start -= rem;
134			else
135				adj_start += alignment - rem;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136		}
137	}
138
139	BUG_ON(adj_start < hole_start);
140	BUG_ON(adj_end > hole_end);
141
142	if (adj_start == hole_start) {
143		hole_node->hole_follows = 0;
144		list_del(&hole_node->hole_stack);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145	}
146
147	node->start = adj_start;
148	node->size = size;
149	node->mm = mm;
150	node->color = color;
151	node->allocated = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
153	INIT_LIST_HEAD(&node->hole_stack);
154	list_add(&node->node_list, &hole_node->node_list);
155
156	BUG_ON(node->start + node->size > adj_end);
 
157
158	node->hole_follows = 0;
159	if (__drm_mm_hole_node_start(node) < hole_end) {
160		list_add(&node->hole_stack, &mm->hole_stack);
161		node->hole_follows = 1;
162	}
163}
164
165/**
166 * drm_mm_reserve_node - insert an pre-initialized node
167 * @mm: drm_mm allocator to insert @node into
168 * @node: drm_mm_node to insert
169 *
170 * This functions inserts an already set-up drm_mm_node into the allocator,
171 * meaning that start, size and color must be set by the caller. This is useful
172 * to initialize the allocator with preallocated objects which must be set-up
173 * before the range allocator can be set-up, e.g. when taking over a firmware
174 * framebuffer.
175 *
176 * Returns:
177 * 0 on success, -ENOSPC if there's no hole where @node is.
178 */
179int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
180{
181	struct drm_mm_node *hole;
182	u64 end = node->start + node->size;
183	u64 hole_start;
184	u64 hole_end;
 
185
186	BUG_ON(node == NULL);
 
 
187
188	/* Find the relevant hole to add our node to */
189	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
190		if (hole_start > node->start || hole_end < end)
191			continue;
192
193		node->mm = mm;
194		node->allocated = 1;
195
196		INIT_LIST_HEAD(&node->hole_stack);
197		list_add(&node->node_list, &hole->node_list);
198
199		if (node->start == hole_start) {
200			hole->hole_follows = 0;
201			list_del_init(&hole->hole_stack);
202		}
203
204		node->hole_follows = 0;
205		if (end != hole_end) {
206			list_add(&node->hole_stack, &mm->hole_stack);
207			node->hole_follows = 1;
208		}
209
210		return 0;
211	}
 
 
 
 
 
 
 
 
212
213	return -ENOSPC;
 
214}
215EXPORT_SYMBOL(drm_mm_reserve_node);
216
217/**
218 * drm_mm_insert_node_generic - search for space and insert @node
219 * @mm: drm_mm to allocate from
220 * @node: preallocate node to insert
221 * @size: size of the allocation
222 * @alignment: alignment of the allocation
223 * @color: opaque tag value to use for this node
224 * @sflags: flags to fine-tune the allocation search
225 * @aflags: flags to fine-tune the allocation behavior
 
226 *
227 * The preallocated node must be cleared to 0.
228 *
229 * Returns:
230 * 0 on success, -ENOSPC if there's no suitable hole.
231 */
232int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
233			       u64 size, unsigned alignment,
234			       unsigned long color,
235			       enum drm_mm_search_flags sflags,
236			       enum drm_mm_allocator_flags aflags)
237{
238	struct drm_mm_node *hole_node;
239
240	hole_node = drm_mm_search_free_generic(mm, size, alignment,
241					       color, sflags);
242	if (!hole_node)
 
 
243		return -ENOSPC;
244
245	drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
246	return 0;
247}
248EXPORT_SYMBOL(drm_mm_insert_node_generic);
249
250static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
251				       struct drm_mm_node *node,
252				       u64 size, unsigned alignment,
253				       unsigned long color,
254				       u64 start, u64 end,
255				       enum drm_mm_allocator_flags flags)
256{
257	struct drm_mm *mm = hole_node->mm;
258	u64 hole_start = drm_mm_hole_node_start(hole_node);
259	u64 hole_end = drm_mm_hole_node_end(hole_node);
260	u64 adj_start = hole_start;
261	u64 adj_end = hole_end;
262
263	BUG_ON(!hole_node->hole_follows || node->allocated);
264
265	if (adj_start < start)
266		adj_start = start;
267	if (adj_end > end)
268		adj_end = end;
269
270	if (mm->color_adjust)
271		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
272
273	if (flags & DRM_MM_CREATE_TOP)
274		adj_start = adj_end - size;
275
276	if (alignment) {
277		u64 tmp = adj_start;
278		unsigned rem;
279
280		rem = do_div(tmp, alignment);
281		if (rem) {
282			if (flags & DRM_MM_CREATE_TOP)
283				adj_start -= rem;
 
284			else
285				adj_start += alignment - rem;
286		}
287	}
 
 
288
289	if (adj_start == hole_start) {
290		hole_node->hole_follows = 0;
291		list_del(&hole_node->hole_stack);
292	}
 
 
 
 
 
293
294	node->start = adj_start;
295	node->size = size;
296	node->mm = mm;
297	node->color = color;
298	node->allocated = 1;
299
300	INIT_LIST_HEAD(&node->hole_stack);
301	list_add(&node->node_list, &hole_node->node_list);
 
302
303	BUG_ON(node->start < start);
304	BUG_ON(node->start < adj_start);
305	BUG_ON(node->start + node->size > adj_end);
306	BUG_ON(node->start + node->size > end);
 
307
308	node->hole_follows = 0;
309	if (__drm_mm_hole_node_start(node) < hole_end) {
310		list_add(&node->hole_stack, &mm->hole_stack);
311		node->hole_follows = 1;
312	}
313}
314
315/**
316 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
317 * @mm: drm_mm to allocate from
318 * @node: preallocate node to insert
319 * @size: size of the allocation
320 * @alignment: alignment of the allocation
321 * @color: opaque tag value to use for this node
322 * @start: start of the allowed range for this node
323 * @end: end of the allowed range for this node
324 * @sflags: flags to fine-tune the allocation search
325 * @aflags: flags to fine-tune the allocation behavior
326 *
327 * The preallocated node must be cleared to 0.
328 *
329 * Returns:
330 * 0 on success, -ENOSPC if there's no suitable hole.
331 */
332int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
333					u64 size, unsigned alignment,
334					unsigned long color,
335					u64 start, u64 end,
336					enum drm_mm_search_flags sflags,
337					enum drm_mm_allocator_flags aflags)
338{
339	struct drm_mm_node *hole_node;
340
341	hole_node = drm_mm_search_free_in_range_generic(mm,
342							size, alignment, color,
343							start, end, sflags);
344	if (!hole_node)
345		return -ENOSPC;
346
347	drm_mm_insert_helper_range(hole_node, node,
348				   size, alignment, color,
349				   start, end, aflags);
350	return 0;
351}
352EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
353
354/**
355 * drm_mm_remove_node - Remove a memory node from the allocator.
356 * @node: drm_mm_node to remove
357 *
358 * This just removes a node from its drm_mm allocator. The node does not need to
359 * be cleared again before it can be re-inserted into this or any other drm_mm
360 * allocator. It is a bug to call this function on a un-allocated node.
361 */
362void drm_mm_remove_node(struct drm_mm_node *node)
363{
364	struct drm_mm *mm = node->mm;
365	struct drm_mm_node *prev_node;
366
367	if (WARN_ON(!node->allocated))
368		return;
369
370	BUG_ON(node->scanned_block || node->scanned_prev_free
371				   || node->scanned_next_free);
372
373	prev_node =
374	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
375
376	if (node->hole_follows) {
377		BUG_ON(__drm_mm_hole_node_start(node) ==
378		       __drm_mm_hole_node_end(node));
379		list_del(&node->hole_stack);
380	} else
381		BUG_ON(__drm_mm_hole_node_start(node) !=
382		       __drm_mm_hole_node_end(node));
383
384
385	if (!prev_node->hole_follows) {
386		prev_node->hole_follows = 1;
387		list_add(&prev_node->hole_stack, &mm->hole_stack);
388	} else
389		list_move(&prev_node->hole_stack, &mm->hole_stack);
390
 
391	list_del(&node->node_list);
392	node->allocated = 0;
393}
394EXPORT_SYMBOL(drm_mm_remove_node);
395
396static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment)
397{
398	if (end - start < size)
399		return 0;
400
401	if (alignment) {
402		u64 tmp = start;
403		unsigned rem;
404
405		rem = do_div(tmp, alignment);
406		if (rem)
407			start += alignment - rem;
408	}
409
410	return end >= start + size;
411}
412
413static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
414						      u64 size,
415						      unsigned alignment,
416						      unsigned long color,
417						      enum drm_mm_search_flags flags)
418{
419	struct drm_mm_node *entry;
420	struct drm_mm_node *best;
421	u64 adj_start;
422	u64 adj_end;
423	u64 best_size;
424
425	BUG_ON(mm->scanned_blocks);
426
427	best = NULL;
428	best_size = ~0UL;
429
430	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
431			       flags & DRM_MM_SEARCH_BELOW) {
432		u64 hole_size = adj_end - adj_start;
433
434		if (mm->color_adjust) {
435			mm->color_adjust(entry, color, &adj_start, &adj_end);
436			if (adj_end <= adj_start)
437				continue;
438		}
439
440		if (!check_free_hole(adj_start, adj_end, size, alignment))
441			continue;
442
443		if (!(flags & DRM_MM_SEARCH_BEST))
444			return entry;
445
446		if (hole_size < best_size) {
447			best = entry;
448			best_size = hole_size;
449		}
450	}
451
452	return best;
453}
454
455static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
456							u64 size,
457							unsigned alignment,
458							unsigned long color,
459							u64 start,
460							u64 end,
461							enum drm_mm_search_flags flags)
462{
463	struct drm_mm_node *entry;
464	struct drm_mm_node *best;
465	u64 adj_start;
466	u64 adj_end;
467	u64 best_size;
468
469	BUG_ON(mm->scanned_blocks);
470
471	best = NULL;
472	best_size = ~0UL;
473
474	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
475			       flags & DRM_MM_SEARCH_BELOW) {
476		u64 hole_size = adj_end - adj_start;
477
478		if (adj_start < start)
479			adj_start = start;
480		if (adj_end > end)
481			adj_end = end;
482
483		if (mm->color_adjust) {
484			mm->color_adjust(entry, color, &adj_start, &adj_end);
485			if (adj_end <= adj_start)
486				continue;
487		}
488
489		if (!check_free_hole(adj_start, adj_end, size, alignment))
490			continue;
491
492		if (!(flags & DRM_MM_SEARCH_BEST))
493			return entry;
494
495		if (hole_size < best_size) {
496			best = entry;
497			best_size = hole_size;
498		}
499	}
500
501	return best;
 
 
502}
 
503
504/**
505 * drm_mm_replace_node - move an allocation from @old to @new
506 * @old: drm_mm_node to remove from the allocator
507 * @new: drm_mm_node which should inherit @old's allocation
508 *
509 * This is useful for when drivers embed the drm_mm_node structure and hence
510 * can't move allocations by reassigning pointers. It's a combination of remove
511 * and insert with the guarantee that the allocation start will match.
512 */
513void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
514{
 
 
 
 
 
 
515	list_replace(&old->node_list, &new->node_list);
516	list_replace(&old->hole_stack, &new->hole_stack);
517	new->hole_follows = old->hole_follows;
518	new->mm = old->mm;
519	new->start = old->start;
520	new->size = old->size;
521	new->color = old->color;
 
 
 
 
 
522
523	old->allocated = 0;
524	new->allocated = 1;
525}
526EXPORT_SYMBOL(drm_mm_replace_node);
527
528/**
529 * DOC: lru scan roaster
530 *
531 * Very often GPUs need to have continuous allocations for a given object. When
532 * evicting objects to make space for a new one it is therefore not most
533 * efficient when we simply start to select all objects from the tail of an LRU
534 * until there's a suitable hole: Especially for big objects or nodes that
535 * otherwise have special allocation constraints there's a good chance we evict
536 * lots of (smaller) objects unecessarily.
537 *
538 * The DRM range allocator supports this use-case through the scanning
539 * interfaces. First a scan operation needs to be initialized with
540 * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
541 * objects to the roaster (probably by walking an LRU list, but this can be
542 * freely implemented) until a suitable hole is found or there's no further
543 * evitable object.
 
 
544 *
545 * The the driver must walk through all objects again in exactly the reverse
546 * order to restore the allocator state. Note that while the allocator is used
547 * in the scan mode no other operation is allowed.
548 *
549 * Finally the driver evicts all objects selected in the scan. Adding and
550 * removing an object is O(1), and since freeing a node is also O(1) the overall
551 * complexity is O(scanned_objects). So like the free stack which needs to be
552 * walked before a scan operation even begins this is linear in the number of
553 * objects. It doesn't seem to hurt badly.
 
 
554 */
555
556/**
557 * drm_mm_init_scan - initialize lru scanning
558 * @mm: drm_mm to scan
559 * @size: size of the allocation
560 * @alignment: alignment of the allocation
561 * @color: opaque tag value to use for the allocation
562 *
563 * This simply sets up the scanning routines with the parameters for the desired
564 * hole. Note that there's no need to specify allocation flags, since they only
565 * change the place a node is allocated from within a suitable hole.
566 *
567 * Warning:
568 * As long as the scan list is non-empty, no other operations than
569 * adding/removing nodes to/from the scan list are allowed.
570 */
571void drm_mm_init_scan(struct drm_mm *mm,
572		      u64 size,
573		      unsigned alignment,
574		      unsigned long color)
575{
576	mm->scan_color = color;
577	mm->scan_alignment = alignment;
578	mm->scan_size = size;
579	mm->scanned_blocks = 0;
580	mm->scan_hit_start = 0;
581	mm->scan_hit_end = 0;
582	mm->scan_check_range = 0;
583	mm->prev_scanned_node = NULL;
584}
585EXPORT_SYMBOL(drm_mm_init_scan);
586
587/**
588 * drm_mm_init_scan - initialize range-restricted lru scanning
589 * @mm: drm_mm to scan
590 * @size: size of the allocation
591 * @alignment: alignment of the allocation
592 * @color: opaque tag value to use for the allocation
593 * @start: start of the allowed range for the allocation
594 * @end: end of the allowed range for the allocation
 
595 *
596 * This simply sets up the scanning routines with the parameters for the desired
597 * hole. Note that there's no need to specify allocation flags, since they only
598 * change the place a node is allocated from within a suitable hole.
599 *
600 * Warning:
601 * As long as the scan list is non-empty, no other operations than
602 * adding/removing nodes to/from the scan list are allowed.
603 */
604void drm_mm_init_scan_with_range(struct drm_mm *mm,
 
605				 u64 size,
606				 unsigned alignment,
607				 unsigned long color,
608				 u64 start,
609				 u64 end)
 
610{
611	mm->scan_color = color;
612	mm->scan_alignment = alignment;
613	mm->scan_size = size;
614	mm->scanned_blocks = 0;
615	mm->scan_hit_start = 0;
616	mm->scan_hit_end = 0;
617	mm->scan_start = start;
618	mm->scan_end = end;
619	mm->scan_check_range = 1;
620	mm->prev_scanned_node = NULL;
 
 
 
 
 
 
 
 
 
 
 
621}
622EXPORT_SYMBOL(drm_mm_init_scan_with_range);
623
624/**
625 * drm_mm_scan_add_block - add a node to the scan list
 
626 * @node: drm_mm_node to add
627 *
628 * Add a node to the scan list that might be freed to make space for the desired
629 * hole.
630 *
631 * Returns:
632 * True if a hole has been found, false otherwise.
633 */
634bool drm_mm_scan_add_block(struct drm_mm_node *node)
 
635{
636	struct drm_mm *mm = node->mm;
637	struct drm_mm_node *prev_node;
638	u64 hole_start, hole_end;
 
639	u64 adj_start, adj_end;
640
641	mm->scanned_blocks++;
 
 
 
 
 
 
 
 
 
 
 
 
 
642
643	BUG_ON(node->scanned_block);
644	node->scanned_block = 1;
645
646	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
647			       node_list);
 
 
648
649	node->scanned_preceeds_hole = prev_node->hole_follows;
650	prev_node->hole_follows = 1;
651	list_del(&node->node_list);
652	node->node_list.prev = &prev_node->node_list;
653	node->node_list.next = &mm->prev_scanned_node->node_list;
654	mm->prev_scanned_node = node;
655
656	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
657	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
658
659	if (mm->scan_check_range) {
660		if (adj_start < mm->scan_start)
661			adj_start = mm->scan_start;
662		if (adj_end > mm->scan_end)
663			adj_end = mm->scan_end;
 
 
 
 
 
 
 
 
 
 
 
 
664	}
665
666	if (mm->color_adjust)
667		mm->color_adjust(prev_node, mm->scan_color,
668				 &adj_start, &adj_end);
669
670	if (check_free_hole(adj_start, adj_end,
671			    mm->scan_size, mm->scan_alignment)) {
672		mm->scan_hit_start = hole_start;
673		mm->scan_hit_end = hole_end;
674		return true;
675	}
676
677	return false;
678}
679EXPORT_SYMBOL(drm_mm_scan_add_block);
680
681/**
682 * drm_mm_scan_remove_block - remove a node from the scan list
 
683 * @node: drm_mm_node to remove
684 *
685 * Nodes _must_ be removed in the exact same order from the scan list as they
686 * have been added, otherwise the internal state of the memory manager will be
687 * corrupted.
 
688 *
689 * When the scan list is empty, the selected memory nodes can be freed. An
690 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
691 * return the just freed block (because its at the top of the free_stack list).
 
692 *
693 * Returns:
694 * True if this block should be evicted, false otherwise. Will always
695 * return false when no hole has been found.
696 */
697bool drm_mm_scan_remove_block(struct drm_mm_node *node)
 
698{
699	struct drm_mm *mm = node->mm;
700	struct drm_mm_node *prev_node;
701
702	mm->scanned_blocks--;
703
704	BUG_ON(!node->scanned_block);
705	node->scanned_block = 0;
706
707	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
708			       node_list);
709
710	prev_node->hole_follows = node->scanned_preceeds_hole;
 
 
 
 
 
 
 
 
 
711	list_add(&node->node_list, &prev_node->node_list);
712
713	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
714		 node->start < mm->scan_hit_end);
715}
716EXPORT_SYMBOL(drm_mm_scan_remove_block);
717
718/**
719 * drm_mm_clean - checks whether an allocator is clean
720 * @mm: drm_mm allocator to check
 
 
 
 
721 *
722 * Returns:
723 * True if the allocator is completely free, false if there's still a node
724 * allocated in it.
725 */
726bool drm_mm_clean(struct drm_mm * mm)
727{
728	struct list_head *head = &mm->head_node.node_list;
 
 
 
 
729
730	return (head->next->next == head);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
731}
732EXPORT_SYMBOL(drm_mm_clean);
733
734/**
735 * drm_mm_init - initialize a drm-mm allocator
736 * @mm: the drm_mm structure to initialize
737 * @start: start of the range managed by @mm
738 * @size: end of the range managed by @mm
739 *
740 * Note that @mm must be cleared to 0 before calling this function.
741 */
742void drm_mm_init(struct drm_mm * mm, u64 start, u64 size)
743{
 
 
 
 
744	INIT_LIST_HEAD(&mm->hole_stack);
745	mm->scanned_blocks = 0;
 
 
746
747	/* Clever trick to avoid a special case in the free hole tracking. */
748	INIT_LIST_HEAD(&mm->head_node.node_list);
749	INIT_LIST_HEAD(&mm->head_node.hole_stack);
750	mm->head_node.hole_follows = 1;
751	mm->head_node.scanned_block = 0;
752	mm->head_node.scanned_prev_free = 0;
753	mm->head_node.scanned_next_free = 0;
754	mm->head_node.mm = mm;
755	mm->head_node.start = start + size;
756	mm->head_node.size = start - mm->head_node.start;
757	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
758
759	mm->color_adjust = NULL;
760}
761EXPORT_SYMBOL(drm_mm_init);
762
763/**
764 * drm_mm_takedown - clean up a drm_mm allocator
765 * @mm: drm_mm allocator to clean up
766 *
767 * Note that it is a bug to call this function on an allocator which is not
768 * clean.
769 */
770void drm_mm_takedown(struct drm_mm * mm)
771{
772	WARN(!list_empty(&mm->head_node.node_list),
773	     "Memory manager not clean during takedown.\n");
 
774}
775EXPORT_SYMBOL(drm_mm_takedown);
776
777static u64 drm_mm_debug_hole(struct drm_mm_node *entry,
778				     const char *prefix)
779{
780	u64 hole_start, hole_end, hole_size;
781
782	if (entry->hole_follows) {
783		hole_start = drm_mm_hole_node_start(entry);
784		hole_end = drm_mm_hole_node_end(entry);
785		hole_size = hole_end - hole_start;
786		pr_debug("%s %#llx-%#llx: %llu: free\n", prefix, hole_start,
787			 hole_end, hole_size);
788		return hole_size;
789	}
790
791	return 0;
792}
793
794/**
795 * drm_mm_debug_table - dump allocator state to dmesg
796 * @mm: drm_mm allocator to dump
797 * @prefix: prefix to use for dumping to dmesg
798 */
799void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
800{
801	struct drm_mm_node *entry;
802	u64 total_used = 0, total_free = 0, total = 0;
803
804	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
805
806	drm_mm_for_each_node(entry, mm) {
807		pr_debug("%s %#llx-%#llx: %llu: used\n", prefix, entry->start,
808			 entry->start + entry->size, entry->size);
809		total_used += entry->size;
810		total_free += drm_mm_debug_hole(entry, prefix);
811	}
812	total = total_free + total_used;
813
814	pr_debug("%s total: %llu, used %llu free %llu\n", prefix, total,
815		 total_used, total_free);
816}
817EXPORT_SYMBOL(drm_mm_debug_table);
818
819#if defined(CONFIG_DEBUG_FS)
820static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
821{
822	u64 hole_start, hole_end, hole_size;
823
824	if (entry->hole_follows) {
825		hole_start = drm_mm_hole_node_start(entry);
826		hole_end = drm_mm_hole_node_end(entry);
827		hole_size = hole_end - hole_start;
828		seq_printf(m, "%#018llx-%#018llx: %llu: free\n", hole_start,
829			   hole_end, hole_size);
830		return hole_size;
831	}
832
833	return 0;
834}
835
836/**
837 * drm_mm_dump_table - dump allocator state to a seq_file
838 * @m: seq_file to dump to
839 * @mm: drm_mm allocator to dump
840 */
841int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
842{
843	struct drm_mm_node *entry;
844	u64 total_used = 0, total_free = 0, total = 0;
845
846	total_free += drm_mm_dump_hole(m, &mm->head_node);
847
848	drm_mm_for_each_node(entry, mm) {
849		seq_printf(m, "%#018llx-%#018llx: %llu: used\n", entry->start,
850			   entry->start + entry->size, entry->size);
851		total_used += entry->size;
852		total_free += drm_mm_dump_hole(m, entry);
853	}
854	total = total_free + total_used;
855
856	seq_printf(m, "total: %llu, used %llu free %llu\n", total,
857		   total_used, total_free);
858	return 0;
859}
860EXPORT_SYMBOL(drm_mm_dump_table);
861#endif
v4.17
  1/**************************************************************************
  2 *
  3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
  4 * Copyright 2016 Intel Corporation
  5 * All Rights Reserved.
  6 *
  7 * Permission is hereby granted, free of charge, to any person obtaining a
  8 * copy of this software and associated documentation files (the
  9 * "Software"), to deal in the Software without restriction, including
 10 * without limitation the rights to use, copy, modify, merge, publish,
 11 * distribute, sub license, and/or sell copies of the Software, and to
 12 * permit persons to whom the Software is furnished to do so, subject to
 13 * the following conditions:
 14 *
 15 * The above copyright notice and this permission notice (including the
 16 * next paragraph) shall be included in all copies or substantial portions
 17 * of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 26 *
 27 *
 28 **************************************************************************/
 29
 30/*
 31 * Generic simple memory manager implementation. Intended to be used as a base
 32 * class implementation for more advanced memory managers.
 33 *
 34 * Note that the algorithm used is quite simple and there might be substantial
 35 * performance gains if a smarter free list is implemented. Currently it is
 36 * just an unordered stack of free regions. This could easily be improved if
 37 * an RB-tree is used instead. At least if we expect heavy fragmentation.
 38 *
 39 * Aligned allocations can also see improvement.
 40 *
 41 * Authors:
 42 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
 43 */
 44
 45#include <drm/drmP.h>
 46#include <drm/drm_mm.h>
 47#include <linux/slab.h>
 48#include <linux/seq_file.h>
 49#include <linux/export.h>
 50#include <linux/interval_tree_generic.h>
 51
 52/**
 53 * DOC: Overview
 54 *
 55 * drm_mm provides a simple range allocator. The drivers are free to use the
 56 * resource allocator from the linux core if it suits them, the upside of drm_mm
 57 * is that it's in the DRM core. Which means that it's easier to extend for
 58 * some of the crazier special purpose needs of gpus.
 59 *
 60 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
 61 * Drivers are free to embed either of them into their own suitable
 62 * datastructures. drm_mm itself will not do any memory allocations of its own,
 63 * so if drivers choose not to embed nodes they need to still allocate them
 64 * themselves.
 65 *
 66 * The range allocator also supports reservation of preallocated blocks. This is
 67 * useful for taking over initial mode setting configurations from the firmware,
 68 * where an object needs to be created which exactly matches the firmware's
 69 * scanout target. As long as the range is still free it can be inserted anytime
 70 * after the allocator is initialized, which helps with avoiding looped
 71 * dependencies in the driver load sequence.
 72 *
 73 * drm_mm maintains a stack of most recently freed holes, which of all
 74 * simplistic datastructures seems to be a fairly decent approach to clustering
 75 * allocations and avoiding too much fragmentation. This means free space
 76 * searches are O(num_holes). Given that all the fancy features drm_mm supports
 77 * something better would be fairly complex and since gfx thrashing is a fairly
 78 * steep cliff not a real concern. Removing a node again is O(1).
 79 *
 80 * drm_mm supports a few features: Alignment and range restrictions can be
 81 * supplied. Furthermore every &drm_mm_node has a color value (which is just an
 82 * opaque unsigned long) which in conjunction with a driver callback can be used
 83 * to implement sophisticated placement restrictions. The i915 DRM driver uses
 84 * this to implement guard pages between incompatible caching domains in the
 85 * graphics TT.
 86 *
 87 * Two behaviors are supported for searching and allocating: bottom-up and
 88 * top-down. The default is bottom-up. Top-down allocation can be used if the
 89 * memory area has different restrictions, or just to reduce fragmentation.
 90 *
 91 * Finally iteration helpers to walk all nodes and all holes are provided as are
 92 * some basic allocator dumpers for debugging.
 93 *
 94 * Note that this range allocator is not thread-safe, drivers need to protect
 95 * modifications with their own locking. The idea behind this is that for a full
 96 * memory manager additional data needs to be protected anyway, hence internal
 97 * locking would be fully redundant.
 98 */
 99
100#ifdef CONFIG_DRM_DEBUG_MM
101#include <linux/stackdepot.h>
102
103#define STACKDEPTH 32
104#define BUFSZ 4096
105
106static noinline void save_stack(struct drm_mm_node *node)
107{
108	unsigned long entries[STACKDEPTH];
109	struct stack_trace trace = {
110		.entries = entries,
111		.max_entries = STACKDEPTH,
112		.skip = 1
113	};
114
115	save_stack_trace(&trace);
116	if (trace.nr_entries != 0 &&
117	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
118		trace.nr_entries--;
119
120	/* May be called under spinlock, so avoid sleeping */
121	node->stack = depot_save_stack(&trace, GFP_NOWAIT);
122}
123
124static void show_leaks(struct drm_mm *mm)
125{
126	struct drm_mm_node *node;
127	unsigned long entries[STACKDEPTH];
128	char *buf;
129
130	buf = kmalloc(BUFSZ, GFP_KERNEL);
131	if (!buf)
132		return;
133
134	list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
135		struct stack_trace trace = {
136			.entries = entries,
137			.max_entries = STACKDEPTH
138		};
139
140		if (!node->stack) {
141			DRM_ERROR("node [%08llx + %08llx]: unknown owner\n",
142				  node->start, node->size);
143			continue;
144		}
145
146		depot_fetch_stack(node->stack, &trace);
147		snprint_stack_trace(buf, BUFSZ, &trace, 0);
148		DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s",
149			  node->start, node->size, buf);
150	}
151
152	kfree(buf);
153}
154
155#undef STACKDEPTH
156#undef BUFSZ
157#else
158static void save_stack(struct drm_mm_node *node) { }
159static void show_leaks(struct drm_mm *mm) { }
160#endif
161
162#define START(node) ((node)->start)
163#define LAST(node)  ((node)->start + (node)->size - 1)
164
165INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
166		     u64, __subtree_last,
167		     START, LAST, static inline, drm_mm_interval_tree)
168
169struct drm_mm_node *
170__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
171{
172	return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
173					       start, last) ?: (struct drm_mm_node *)&mm->head_node;
174}
175EXPORT_SYMBOL(__drm_mm_interval_first);
176
177static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
178					  struct drm_mm_node *node)
179{
180	struct drm_mm *mm = hole_node->mm;
181	struct rb_node **link, *rb;
182	struct drm_mm_node *parent;
183	bool leftmost;
184
185	node->__subtree_last = LAST(node);
186
187	if (hole_node->allocated) {
188		rb = &hole_node->rb;
189		while (rb) {
190			parent = rb_entry(rb, struct drm_mm_node, rb);
191			if (parent->__subtree_last >= node->__subtree_last)
192				break;
193
194			parent->__subtree_last = node->__subtree_last;
195			rb = rb_parent(rb);
196		}
197
198		rb = &hole_node->rb;
199		link = &hole_node->rb.rb_right;
200		leftmost = false;
201	} else {
202		rb = NULL;
203		link = &mm->interval_tree.rb_root.rb_node;
204		leftmost = true;
205	}
206
207	while (*link) {
208		rb = *link;
209		parent = rb_entry(rb, struct drm_mm_node, rb);
210		if (parent->__subtree_last < node->__subtree_last)
211			parent->__subtree_last = node->__subtree_last;
212		if (node->start < parent->start) {
213			link = &parent->rb.rb_left;
214		} else {
215			link = &parent->rb.rb_right;
216			leftmost = false;
217		}
218	}
219
220	rb_link_node(&node->rb, rb, link);
221	rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
222				   &drm_mm_interval_tree_augment);
223}
224
225#define RB_INSERT(root, member, expr) do { \
226	struct rb_node **link = &root.rb_node, *rb = NULL; \
227	u64 x = expr(node); \
228	while (*link) { \
229		rb = *link; \
230		if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
231			link = &rb->rb_left; \
232		else \
233			link = &rb->rb_right; \
234	} \
235	rb_link_node(&node->member, rb, link); \
236	rb_insert_color(&node->member, &root); \
237} while (0)
238
239#define HOLE_SIZE(NODE) ((NODE)->hole_size)
240#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
 
241
242static void add_hole(struct drm_mm_node *node)
243{
244	struct drm_mm *mm = node->mm;
245
246	node->hole_size =
247		__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
248	DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
249
250	RB_INSERT(mm->holes_size, rb_hole_size, HOLE_SIZE);
251	RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
252
253	list_add(&node->hole_stack, &mm->hole_stack);
254}
255
256static void rm_hole(struct drm_mm_node *node)
257{
258	DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
259
260	list_del(&node->hole_stack);
261	rb_erase(&node->rb_hole_size, &node->mm->holes_size);
262	rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
263	node->hole_size = 0;
264
265	DRM_MM_BUG_ON(drm_mm_hole_follows(node));
266}
267
268static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb)
269{
270	return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size);
271}
272
273static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb)
274{
275	return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr);
276}
277
278static inline u64 rb_hole_size(struct rb_node *rb)
279{
280	return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size;
281}
282
283static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
284{
285	struct rb_node *best = NULL;
286	struct rb_node **link = &mm->holes_size.rb_node;
287
288	while (*link) {
289		struct rb_node *rb = *link;
290
291		if (size <= rb_hole_size(rb)) {
292			link = &rb->rb_left;
293			best = rb;
294		} else {
295			link = &rb->rb_right;
296		}
297	}
298
299	return rb_hole_size_to_node(best);
300}
301
302static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
303{
304	struct drm_mm_node *node = NULL;
305	struct rb_node **link = &mm->holes_addr.rb_node;
306
307	while (*link) {
308		u64 hole_start;
309
310		node = rb_hole_addr_to_node(*link);
311		hole_start = __drm_mm_hole_node_start(node);
312
313		if (addr < hole_start)
314			link = &node->rb_hole_addr.rb_left;
315		else if (addr > hole_start + node->hole_size)
316			link = &node->rb_hole_addr.rb_right;
317		else
318			break;
319	}
320
321	return node;
322}
323
324static struct drm_mm_node *
325first_hole(struct drm_mm *mm,
326	   u64 start, u64 end, u64 size,
327	   enum drm_mm_insert_mode mode)
328{
329	if (RB_EMPTY_ROOT(&mm->holes_size))
330		return NULL;
331
332	switch (mode) {
333	default:
334	case DRM_MM_INSERT_BEST:
335		return best_hole(mm, size);
336
337	case DRM_MM_INSERT_LOW:
338		return find_hole(mm, start);
339
340	case DRM_MM_INSERT_HIGH:
341		return find_hole(mm, end);
342
343	case DRM_MM_INSERT_EVICT:
344		return list_first_entry_or_null(&mm->hole_stack,
345						struct drm_mm_node,
346						hole_stack);
347	}
348}
349
350static struct drm_mm_node *
351next_hole(struct drm_mm *mm,
352	  struct drm_mm_node *node,
353	  enum drm_mm_insert_mode mode)
354{
355	switch (mode) {
356	default:
357	case DRM_MM_INSERT_BEST:
358		return rb_hole_size_to_node(rb_next(&node->rb_hole_size));
359
360	case DRM_MM_INSERT_LOW:
361		return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
362
363	case DRM_MM_INSERT_HIGH:
364		return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
365
366	case DRM_MM_INSERT_EVICT:
367		node = list_next_entry(node, hole_stack);
368		return &node->hole_stack == &mm->hole_stack ? NULL : node;
 
369	}
370}
371
372/**
373 * drm_mm_reserve_node - insert an pre-initialized node
374 * @mm: drm_mm allocator to insert @node into
375 * @node: drm_mm_node to insert
376 *
377 * This functions inserts an already set-up &drm_mm_node into the allocator,
378 * meaning that start, size and color must be set by the caller. All other
379 * fields must be cleared to 0. This is useful to initialize the allocator with
380 * preallocated objects which must be set-up before the range allocator can be
381 * set-up, e.g. when taking over a firmware framebuffer.
382 *
383 * Returns:
384 * 0 on success, -ENOSPC if there's no hole where @node is.
385 */
386int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
387{
 
388	u64 end = node->start + node->size;
389	struct drm_mm_node *hole;
390	u64 hole_start, hole_end;
391	u64 adj_start, adj_end;
392
393	end = node->start + node->size;
394	if (unlikely(end <= node->start))
395		return -ENOSPC;
396
397	/* Find the relevant hole to add our node to */
398	hole = find_hole(mm, node->start);
399	if (!hole)
400		return -ENOSPC;
401
402	adj_start = hole_start = __drm_mm_hole_node_start(hole);
403	adj_end = hole_end = hole_start + hole->hole_size;
404
405	if (mm->color_adjust)
406		mm->color_adjust(hole, node->color, &adj_start, &adj_end);
407
408	if (adj_start > node->start || adj_end < end)
409		return -ENOSPC;
 
 
410
411	node->mm = mm;
 
 
 
 
412
413	list_add(&node->node_list, &hole->node_list);
414	drm_mm_interval_tree_add_node(hole, node);
415	node->allocated = true;
416	node->hole_size = 0;
417
418	rm_hole(hole);
419	if (node->start > hole_start)
420		add_hole(hole);
421	if (end < hole_end)
422		add_hole(node);
423
424	save_stack(node);
425	return 0;
426}
427EXPORT_SYMBOL(drm_mm_reserve_node);
428
429/**
430 * drm_mm_insert_node_in_range - ranged search for space and insert @node
431 * @mm: drm_mm to allocate from
432 * @node: preallocate node to insert
433 * @size: size of the allocation
434 * @alignment: alignment of the allocation
435 * @color: opaque tag value to use for this node
436 * @range_start: start of the allowed range for this node
437 * @range_end: end of the allowed range for this node
438 * @mode: fine-tune the allocation search and placement
439 *
440 * The preallocated @node must be cleared to 0.
441 *
442 * Returns:
443 * 0 on success, -ENOSPC if there's no suitable hole.
444 */
445int drm_mm_insert_node_in_range(struct drm_mm * const mm,
446				struct drm_mm_node * const node,
447				u64 size, u64 alignment,
448				unsigned long color,
449				u64 range_start, u64 range_end,
450				enum drm_mm_insert_mode mode)
451{
452	struct drm_mm_node *hole;
453	u64 remainder_mask;
454
455	DRM_MM_BUG_ON(range_start >= range_end);
456
457	if (unlikely(size == 0 || range_end - range_start < size))
458		return -ENOSPC;
459
460	if (alignment <= 1)
461		alignment = 0;
 
 
462
463	remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
464	for (hole = first_hole(mm, range_start, range_end, size, mode); hole;
465	     hole = next_hole(mm, hole, mode)) {
466		u64 hole_start = __drm_mm_hole_node_start(hole);
467		u64 hole_end = hole_start + hole->hole_size;
468		u64 adj_start, adj_end;
469		u64 col_start, col_end;
470
471		if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end)
472			break;
473
474		if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start)
475			break;
476
477		col_start = hole_start;
478		col_end = hole_end;
479		if (mm->color_adjust)
480			mm->color_adjust(hole, color, &col_start, &col_end);
 
481
482		adj_start = max(col_start, range_start);
483		adj_end = min(col_end, range_end);
484
485		if (adj_end <= adj_start || adj_end - adj_start < size)
486			continue;
487
488		if (mode == DRM_MM_INSERT_HIGH)
489			adj_start = adj_end - size;
 
490
491		if (alignment) {
492			u64 rem;
493
494			if (likely(remainder_mask))
495				rem = adj_start & remainder_mask;
496			else
497				div64_u64_rem(adj_start, alignment, &rem);
498			if (rem) {
499				adj_start -= rem;
500				if (mode != DRM_MM_INSERT_HIGH)
501					adj_start += alignment;
502
503				if (adj_start < max(col_start, range_start) ||
504				    min(col_end, range_end) - adj_start < size)
505					continue;
506
507				if (adj_end <= adj_start ||
508				    adj_end - adj_start < size)
509					continue;
510			}
511		}
512
513		node->mm = mm;
514		node->size = size;
515		node->start = adj_start;
516		node->color = color;
517		node->hole_size = 0;
518
519		list_add(&node->node_list, &hole->node_list);
520		drm_mm_interval_tree_add_node(hole, node);
521		node->allocated = true;
522
523		rm_hole(hole);
524		if (adj_start > hole_start)
525			add_hole(hole);
526		if (adj_start + size < hole_end)
527			add_hole(node);
528
529		save_stack(node);
530		return 0;
 
 
531	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
532
533	return -ENOSPC;
 
 
 
534}
535EXPORT_SYMBOL(drm_mm_insert_node_in_range);
536
537/**
538 * drm_mm_remove_node - Remove a memory node from the allocator.
539 * @node: drm_mm_node to remove
540 *
541 * This just removes a node from its drm_mm allocator. The node does not need to
542 * be cleared again before it can be re-inserted into this or any other drm_mm
543 * allocator. It is a bug to call this function on a unallocated node.
544 */
545void drm_mm_remove_node(struct drm_mm_node *node)
546{
547	struct drm_mm *mm = node->mm;
548	struct drm_mm_node *prev_node;
549
550	DRM_MM_BUG_ON(!node->allocated);
551	DRM_MM_BUG_ON(node->scanned_block);
 
 
 
552
553	prev_node = list_prev_entry(node, node_list);
 
554
555	if (drm_mm_hole_follows(node))
556		rm_hole(node);
 
 
 
 
 
 
 
 
 
 
 
 
557
558	drm_mm_interval_tree_remove(node, &mm->interval_tree);
559	list_del(&node->node_list);
560	node->allocated = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
561
562	if (drm_mm_hole_follows(prev_node))
563		rm_hole(prev_node);
564	add_hole(prev_node);
565}
566EXPORT_SYMBOL(drm_mm_remove_node);
567
568/**
569 * drm_mm_replace_node - move an allocation from @old to @new
570 * @old: drm_mm_node to remove from the allocator
571 * @new: drm_mm_node which should inherit @old's allocation
572 *
573 * This is useful for when drivers embed the drm_mm_node structure and hence
574 * can't move allocations by reassigning pointers. It's a combination of remove
575 * and insert with the guarantee that the allocation start will match.
576 */
577void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
578{
579	struct drm_mm *mm = old->mm;
580
581	DRM_MM_BUG_ON(!old->allocated);
582
583	*new = *old;
584
585	list_replace(&old->node_list, &new->node_list);
586	rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
587
588	if (drm_mm_hole_follows(old)) {
589		list_replace(&old->hole_stack, &new->hole_stack);
590		rb_replace_node(&old->rb_hole_size,
591				&new->rb_hole_size,
592				&mm->holes_size);
593		rb_replace_node(&old->rb_hole_addr,
594				&new->rb_hole_addr,
595				&mm->holes_addr);
596	}
597
598	old->allocated = false;
599	new->allocated = true;
600}
601EXPORT_SYMBOL(drm_mm_replace_node);
602
603/**
604 * DOC: lru scan roster
605 *
606 * Very often GPUs need to have continuous allocations for a given object. When
607 * evicting objects to make space for a new one it is therefore not most
608 * efficient when we simply start to select all objects from the tail of an LRU
609 * until there's a suitable hole: Especially for big objects or nodes that
610 * otherwise have special allocation constraints there's a good chance we evict
611 * lots of (smaller) objects unnecessarily.
612 *
613 * The DRM range allocator supports this use-case through the scanning
614 * interfaces. First a scan operation needs to be initialized with
615 * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds
616 * objects to the roster, probably by walking an LRU list, but this can be
617 * freely implemented. Eviction candiates are added using
618 * drm_mm_scan_add_block() until a suitable hole is found or there are no
619 * further evictable objects. Eviction roster metadata is tracked in &struct
620 * drm_mm_scan.
621 *
622 * The driver must walk through all objects again in exactly the reverse
623 * order to restore the allocator state. Note that while the allocator is used
624 * in the scan mode no other operation is allowed.
625 *
626 * Finally the driver evicts all objects selected (drm_mm_scan_remove_block()
627 * reported true) in the scan, and any overlapping nodes after color adjustment
628 * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and
629 * since freeing a node is also O(1) the overall complexity is
630 * O(scanned_objects). So like the free stack which needs to be walked before a
631 * scan operation even begins this is linear in the number of objects. It
632 * doesn't seem to hurt too badly.
633 */
634
635/**
636 * drm_mm_scan_init_with_range - initialize range-restricted lru scanning
637 * @scan: scan state
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
638 * @mm: drm_mm to scan
639 * @size: size of the allocation
640 * @alignment: alignment of the allocation
641 * @color: opaque tag value to use for the allocation
642 * @start: start of the allowed range for the allocation
643 * @end: end of the allowed range for the allocation
644 * @mode: fine-tune the allocation search and placement
645 *
646 * This simply sets up the scanning routines with the parameters for the desired
647 * hole.
 
648 *
649 * Warning:
650 * As long as the scan list is non-empty, no other operations than
651 * adding/removing nodes to/from the scan list are allowed.
652 */
653void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
654				 struct drm_mm *mm,
655				 u64 size,
656				 u64 alignment,
657				 unsigned long color,
658				 u64 start,
659				 u64 end,
660				 enum drm_mm_insert_mode mode)
661{
662	DRM_MM_BUG_ON(start >= end);
663	DRM_MM_BUG_ON(!size || size > end - start);
664	DRM_MM_BUG_ON(mm->scan_active);
665
666	scan->mm = mm;
667
668	if (alignment <= 1)
669		alignment = 0;
670
671	scan->color = color;
672	scan->alignment = alignment;
673	scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
674	scan->size = size;
675	scan->mode = mode;
676
677	DRM_MM_BUG_ON(end <= start);
678	scan->range_start = start;
679	scan->range_end = end;
680
681	scan->hit_start = U64_MAX;
682	scan->hit_end = 0;
683}
684EXPORT_SYMBOL(drm_mm_scan_init_with_range);
685
686/**
687 * drm_mm_scan_add_block - add a node to the scan list
688 * @scan: the active drm_mm scanner
689 * @node: drm_mm_node to add
690 *
691 * Add a node to the scan list that might be freed to make space for the desired
692 * hole.
693 *
694 * Returns:
695 * True if a hole has been found, false otherwise.
696 */
697bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
698			   struct drm_mm_node *node)
699{
700	struct drm_mm *mm = scan->mm;
701	struct drm_mm_node *hole;
702	u64 hole_start, hole_end;
703	u64 col_start, col_end;
704	u64 adj_start, adj_end;
705
706	DRM_MM_BUG_ON(node->mm != mm);
707	DRM_MM_BUG_ON(!node->allocated);
708	DRM_MM_BUG_ON(node->scanned_block);
709	node->scanned_block = true;
710	mm->scan_active++;
711
712	/* Remove this block from the node_list so that we enlarge the hole
713	 * (distance between the end of our previous node and the start of
714	 * or next), without poisoning the link so that we can restore it
715	 * later in drm_mm_scan_remove_block().
716	 */
717	hole = list_prev_entry(node, node_list);
718	DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node);
719	__list_del_entry(&node->node_list);
720
721	hole_start = __drm_mm_hole_node_start(hole);
722	hole_end = __drm_mm_hole_node_end(hole);
723
724	col_start = hole_start;
725	col_end = hole_end;
726	if (mm->color_adjust)
727		mm->color_adjust(hole, scan->color, &col_start, &col_end);
728
729	adj_start = max(col_start, scan->range_start);
730	adj_end = min(col_end, scan->range_end);
731	if (adj_end <= adj_start || adj_end - adj_start < scan->size)
732		return false;
733
734	if (scan->mode == DRM_MM_INSERT_HIGH)
735		adj_start = adj_end - scan->size;
736
737	if (scan->alignment) {
738		u64 rem;
739
740		if (likely(scan->remainder_mask))
741			rem = adj_start & scan->remainder_mask;
742		else
743			div64_u64_rem(adj_start, scan->alignment, &rem);
744		if (rem) {
745			adj_start -= rem;
746			if (scan->mode != DRM_MM_INSERT_HIGH)
747				adj_start += scan->alignment;
748			if (adj_start < max(col_start, scan->range_start) ||
749			    min(col_end, scan->range_end) - adj_start < scan->size)
750				return false;
751
752			if (adj_end <= adj_start ||
753			    adj_end - adj_start < scan->size)
754				return false;
755		}
756	}
757
758	scan->hit_start = adj_start;
759	scan->hit_end = adj_start + scan->size;
 
760
761	DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
762	DRM_MM_BUG_ON(scan->hit_start < hole_start);
763	DRM_MM_BUG_ON(scan->hit_end > hole_end);
 
 
 
764
765	return true;
766}
767EXPORT_SYMBOL(drm_mm_scan_add_block);
768
769/**
770 * drm_mm_scan_remove_block - remove a node from the scan list
771 * @scan: the active drm_mm scanner
772 * @node: drm_mm_node to remove
773 *
774 * Nodes **must** be removed in exactly the reverse order from the scan list as
775 * they have been added (e.g. using list_add() as they are added and then
776 * list_for_each() over that eviction list to remove), otherwise the internal
777 * state of the memory manager will be corrupted.
778 *
779 * When the scan list is empty, the selected memory nodes can be freed. An
780 * immediately following drm_mm_insert_node_in_range_generic() or one of the
781 * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return
782 * the just freed block (because its at the top of the free_stack list).
783 *
784 * Returns:
785 * True if this block should be evicted, false otherwise. Will always
786 * return false when no hole has been found.
787 */
788bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
789			      struct drm_mm_node *node)
790{
 
791	struct drm_mm_node *prev_node;
792
793	DRM_MM_BUG_ON(node->mm != scan->mm);
794	DRM_MM_BUG_ON(!node->scanned_block);
795	node->scanned_block = false;
796
797	DRM_MM_BUG_ON(!node->mm->scan_active);
798	node->mm->scan_active--;
799
800	/* During drm_mm_scan_add_block() we decoupled this node leaving
801	 * its pointers intact. Now that the caller is walking back along
802	 * the eviction list we can restore this block into its rightful
803	 * place on the full node_list. To confirm that the caller is walking
804	 * backwards correctly we check that prev_node->next == node->next,
805	 * i.e. both believe the same node should be on the other side of the
806	 * hole.
807	 */
808	prev_node = list_prev_entry(node, node_list);
809	DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) !=
810		      list_next_entry(node, node_list));
811	list_add(&node->node_list, &prev_node->node_list);
812
813	return (node->start + node->size > scan->hit_start &&
814		node->start < scan->hit_end);
815}
816EXPORT_SYMBOL(drm_mm_scan_remove_block);
817
818/**
819 * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole
820 * @scan: drm_mm scan with target hole
821 *
822 * After completing an eviction scan and removing the selected nodes, we may
823 * need to remove a few more nodes from either side of the target hole if
824 * mm.color_adjust is being used.
825 *
826 * Returns:
827 * A node to evict, or NULL if there are no overlapping nodes.
 
828 */
829struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
830{
831	struct drm_mm *mm = scan->mm;
832	struct drm_mm_node *hole;
833	u64 hole_start, hole_end;
834
835	DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
836
837	if (!mm->color_adjust)
838		return NULL;
839
840	/*
841	 * The hole found during scanning should ideally be the first element
842	 * in the hole_stack list, but due to side-effects in the driver it
843	 * may not be.
844	 */
845	list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
846		hole_start = __drm_mm_hole_node_start(hole);
847		hole_end = hole_start + hole->hole_size;
848
849		if (hole_start <= scan->hit_start &&
850		    hole_end >= scan->hit_end)
851			break;
852	}
853
854	/* We should only be called after we found the hole previously */
855	DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
856	if (unlikely(&hole->hole_stack == &mm->hole_stack))
857		return NULL;
858
859	DRM_MM_BUG_ON(hole_start > scan->hit_start);
860	DRM_MM_BUG_ON(hole_end < scan->hit_end);
861
862	mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
863	if (hole_start > scan->hit_start)
864		return hole;
865	if (hole_end < scan->hit_end)
866		return list_next_entry(hole, node_list);
867
868	return NULL;
869}
870EXPORT_SYMBOL(drm_mm_scan_color_evict);
871
872/**
873 * drm_mm_init - initialize a drm-mm allocator
874 * @mm: the drm_mm structure to initialize
875 * @start: start of the range managed by @mm
876 * @size: end of the range managed by @mm
877 *
878 * Note that @mm must be cleared to 0 before calling this function.
879 */
880void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
881{
882	DRM_MM_BUG_ON(start + size <= start);
883
884	mm->color_adjust = NULL;
885
886	INIT_LIST_HEAD(&mm->hole_stack);
887	mm->interval_tree = RB_ROOT_CACHED;
888	mm->holes_size = RB_ROOT;
889	mm->holes_addr = RB_ROOT;
890
891	/* Clever trick to avoid a special case in the free hole tracking. */
892	INIT_LIST_HEAD(&mm->head_node.node_list);
893	mm->head_node.allocated = false;
 
 
 
 
894	mm->head_node.mm = mm;
895	mm->head_node.start = start + size;
896	mm->head_node.size = -size;
897	add_hole(&mm->head_node);
898
899	mm->scan_active = 0;
900}
901EXPORT_SYMBOL(drm_mm_init);
902
903/**
904 * drm_mm_takedown - clean up a drm_mm allocator
905 * @mm: drm_mm allocator to clean up
906 *
907 * Note that it is a bug to call this function on an allocator which is not
908 * clean.
909 */
910void drm_mm_takedown(struct drm_mm *mm)
911{
912	if (WARN(!drm_mm_clean(mm),
913		 "Memory manager not clean during takedown.\n"))
914		show_leaks(mm);
915}
916EXPORT_SYMBOL(drm_mm_takedown);
917
918static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry)
 
919{
920	u64 start, size;
921
922	size = entry->hole_size;
923	if (size) {
924		start = drm_mm_hole_node_start(entry);
925		drm_printf(p, "%#018llx-%#018llx: %llu: free\n",
926			   start, start + size, size);
 
 
927	}
928
929	return size;
930}
 
931/**
932 * drm_mm_print - print allocator state
933 * @mm: drm_mm allocator to print
934 * @p: DRM printer to use
935 */
936void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
937{
938	const struct drm_mm_node *entry;
939	u64 total_used = 0, total_free = 0, total = 0;
940
941	total_free += drm_mm_dump_hole(p, &mm->head_node);
942
943	drm_mm_for_each_node(entry, mm) {
944		drm_printf(p, "%#018llx-%#018llx: %llu: used\n", entry->start,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
945			   entry->start + entry->size, entry->size);
946		total_used += entry->size;
947		total_free += drm_mm_dump_hole(p, entry);
948	}
949	total = total_free + total_used;
950
951	drm_printf(p, "total: %llu, used %llu free %llu\n", total,
952		   total_used, total_free);
 
953}
954EXPORT_SYMBOL(drm_mm_print);