Linux Audio

Check our new training course

Loading...
v3.15
  1/**************************************************************************
  2 *
  3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 *
 27 **************************************************************************/
 28
 29/*
 30 * Generic simple memory manager implementation. Intended to be used as a base
 31 * class implementation for more advanced memory managers.
 32 *
 33 * Note that the algorithm used is quite simple and there might be substantial
 34 * performance gains if a smarter free list is implemented. Currently it is just an
 35 * unordered stack of free regions. This could easily be improved if an RB-tree
 36 * is used instead. At least if we expect heavy fragmentation.
 37 *
 38 * Aligned allocations can also see improvement.
 39 *
 40 * Authors:
 41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
 42 */
 43
 44#include <drm/drmP.h>
 45#include <drm/drm_mm.h>
 46#include <linux/slab.h>
 47#include <linux/seq_file.h>
 48#include <linux/export.h>
 49
 50/**
 51 * DOC: Overview
 52 *
 53 * drm_mm provides a simple range allocator. The drivers are free to use the
 54 * resource allocator from the linux core if it suits them, the upside of drm_mm
 55 * is that it's in the DRM core. Which means that it's easier to extend for
 56 * some of the crazier special purpose needs of gpus.
 57 *
 58 * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node.
 59 * Drivers are free to embed either of them into their own suitable
 60 * datastructures. drm_mm itself will not do any allocations of its own, so if
 61 * drivers choose not to embed nodes they need to still allocate them
 62 * themselves.
 63 *
 64 * The range allocator also supports reservation of preallocated blocks. This is
 65 * useful for taking over initial mode setting configurations from the firmware,
 66 * where an object needs to be created which exactly matches the firmware's
 67 * scanout target. As long as the range is still free it can be inserted anytime
 68 * after the allocator is initialized, which helps with avoiding looped
 69 * depencies in the driver load sequence.
 70 *
 71 * drm_mm maintains a stack of most recently freed holes, which of all
 72 * simplistic datastructures seems to be a fairly decent approach to clustering
 73 * allocations and avoiding too much fragmentation. This means free space
 74 * searches are O(num_holes). Given that all the fancy features drm_mm supports
 75 * something better would be fairly complex and since gfx thrashing is a fairly
 76 * steep cliff not a real concern. Removing a node again is O(1).
 77 *
 78 * drm_mm supports a few features: Alignment and range restrictions can be
 79 * supplied. Further more every &drm_mm_node has a color value (which is just an
 80 * opaqua unsigned long) which in conjunction with a driver callback can be used
 81 * to implement sophisticated placement restrictions. The i915 DRM driver uses
 82 * this to implement guard pages between incompatible caching domains in the
 83 * graphics TT.
 84 *
 85 * Two behaviors are supported for searching and allocating: bottom-up and top-down.
 86 * The default is bottom-up. Top-down allocation can be used if the memory area
 87 * has different restrictions, or just to reduce fragmentation.
 88 *
 89 * Finally iteration helpers to walk all nodes and all holes are provided as are
 90 * some basic allocator dumpers for debugging.
 91 */
 
 
 
 92
 93static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
 94						unsigned long size,
 95						unsigned alignment,
 96						unsigned long color,
 97						enum drm_mm_search_flags flags);
 98static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
 99						unsigned long size,
100						unsigned alignment,
101						unsigned long color,
102						unsigned long start,
103						unsigned long end,
104						enum drm_mm_search_flags flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
106static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
107				 struct drm_mm_node *node,
108				 unsigned long size, unsigned alignment,
109				 unsigned long color,
110				 enum drm_mm_allocator_flags flags)
111{
112	struct drm_mm *mm = hole_node->mm;
 
113	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
114	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
115	unsigned long adj_start = hole_start;
116	unsigned long adj_end = hole_end;
117
118	BUG_ON(node->allocated);
119
120	if (mm->color_adjust)
121		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
122
123	if (flags & DRM_MM_CREATE_TOP)
124		adj_start = adj_end - size;
125
126	if (alignment) {
127		unsigned tmp = adj_start % alignment;
128		if (tmp) {
129			if (flags & DRM_MM_CREATE_TOP)
130				adj_start -= tmp;
131			else
132				adj_start += alignment - tmp;
133		}
134	}
135
136	BUG_ON(adj_start < hole_start);
137	BUG_ON(adj_end > hole_end);
138
139	if (adj_start == hole_start) {
140		hole_node->hole_follows = 0;
141		list_del(&hole_node->hole_stack);
142	}
 
143
144	node->start = adj_start;
145	node->size = size;
146	node->mm = mm;
147	node->color = color;
148	node->allocated = 1;
149
150	INIT_LIST_HEAD(&node->hole_stack);
151	list_add(&node->node_list, &hole_node->node_list);
152
153	BUG_ON(node->start + node->size > adj_end);
154
155	node->hole_follows = 0;
156	if (__drm_mm_hole_node_start(node) < hole_end) {
157		list_add(&node->hole_stack, &mm->hole_stack);
158		node->hole_follows = 1;
 
 
159	}
160}
161
162/**
163 * drm_mm_reserve_node - insert an pre-initialized node
164 * @mm: drm_mm allocator to insert @node into
165 * @node: drm_mm_node to insert
166 *
167 * This functions inserts an already set-up drm_mm_node into the allocator,
168 * meaning that start, size and color must be set by the caller. This is useful
169 * to initialize the allocator with preallocated objects which must be set-up
170 * before the range allocator can be set-up, e.g. when taking over a firmware
171 * framebuffer.
172 *
173 * Returns:
174 * 0 on success, -ENOSPC if there's no hole where @node is.
175 */
176int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
177{
178	struct drm_mm_node *hole;
179	unsigned long end = node->start + node->size;
180	unsigned long hole_start;
181	unsigned long hole_end;
182
183	BUG_ON(node == NULL);
184
185	/* Find the relevant hole to add our node to */
186	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
187		if (hole_start > node->start || hole_end < end)
188			continue;
189
190		node->mm = mm;
191		node->allocated = 1;
192
193		INIT_LIST_HEAD(&node->hole_stack);
194		list_add(&node->node_list, &hole->node_list);
195
196		if (node->start == hole_start) {
197			hole->hole_follows = 0;
198			list_del_init(&hole->hole_stack);
199		}
200
201		node->hole_follows = 0;
202		if (end != hole_end) {
203			list_add(&node->hole_stack, &mm->hole_stack);
204			node->hole_follows = 1;
205		}
206
207		return 0;
208	}
209
210	return -ENOSPC;
211}
212EXPORT_SYMBOL(drm_mm_reserve_node);
213
214/**
215 * drm_mm_insert_node_generic - search for space and insert @node
216 * @mm: drm_mm to allocate from
217 * @node: preallocate node to insert
218 * @size: size of the allocation
219 * @alignment: alignment of the allocation
220 * @color: opaque tag value to use for this node
221 * @sflags: flags to fine-tune the allocation search
222 * @aflags: flags to fine-tune the allocation behavior
223 *
224 * The preallocated node must be cleared to 0.
225 *
226 * Returns:
227 * 0 on success, -ENOSPC if there's no suitable hole.
228 */
229int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
230			       unsigned long size, unsigned alignment,
231			       unsigned long color,
232			       enum drm_mm_search_flags sflags,
233			       enum drm_mm_allocator_flags aflags)
234{
235	struct drm_mm_node *hole_node;
236
237	hole_node = drm_mm_search_free_generic(mm, size, alignment,
238					       color, sflags);
239	if (!hole_node)
240		return -ENOSPC;
241
242	drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
 
243	return 0;
244}
245EXPORT_SYMBOL(drm_mm_insert_node_generic);
246
247static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
248				       struct drm_mm_node *node,
249				       unsigned long size, unsigned alignment,
250				       unsigned long color,
251				       unsigned long start, unsigned long end,
252				       enum drm_mm_allocator_flags flags)
253{
254	struct drm_mm *mm = hole_node->mm;
 
255	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
256	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
257	unsigned long adj_start = hole_start;
258	unsigned long adj_end = hole_end;
259
260	BUG_ON(!hole_node->hole_follows || node->allocated);
261
262	if (adj_start < start)
263		adj_start = start;
264	if (adj_end > end)
265		adj_end = end;
266
267	if (flags & DRM_MM_CREATE_TOP)
268		adj_start = adj_end - size;
269
270	if (mm->color_adjust)
271		mm->color_adjust(hole_node, color, &adj_start, &adj_end);
272
273	if (alignment) {
274		unsigned tmp = adj_start % alignment;
275		if (tmp) {
276			if (flags & DRM_MM_CREATE_TOP)
277				adj_start -= tmp;
278			else
279				adj_start += alignment - tmp;
280		}
281	}
282
283	if (adj_start == hole_start) {
284		hole_node->hole_follows = 0;
285		list_del(&hole_node->hole_stack);
286	}
287
288	node->start = adj_start;
289	node->size = size;
290	node->mm = mm;
291	node->color = color;
292	node->allocated = 1;
293
294	INIT_LIST_HEAD(&node->hole_stack);
295	list_add(&node->node_list, &hole_node->node_list);
296
297	BUG_ON(node->start < start);
298	BUG_ON(node->start < adj_start);
299	BUG_ON(node->start + node->size > adj_end);
300	BUG_ON(node->start + node->size > end);
301
302	node->hole_follows = 0;
303	if (__drm_mm_hole_node_start(node) < hole_end) {
304		list_add(&node->hole_stack, &mm->hole_stack);
305		node->hole_follows = 1;
 
 
306	}
307}
308
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
309/**
310 * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node
311 * @mm: drm_mm to allocate from
312 * @node: preallocate node to insert
313 * @size: size of the allocation
314 * @alignment: alignment of the allocation
315 * @color: opaque tag value to use for this node
316 * @start: start of the allowed range for this node
317 * @end: end of the allowed range for this node
318 * @sflags: flags to fine-tune the allocation search
319 * @aflags: flags to fine-tune the allocation behavior
320 *
321 * The preallocated node must be cleared to 0.
322 *
323 * Returns:
324 * 0 on success, -ENOSPC if there's no suitable hole.
325 */
326int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
327					unsigned long size, unsigned alignment,
328					unsigned long color,
329					unsigned long start, unsigned long end,
330					enum drm_mm_search_flags sflags,
331					enum drm_mm_allocator_flags aflags)
332{
333	struct drm_mm_node *hole_node;
334
335	hole_node = drm_mm_search_free_in_range_generic(mm,
336							size, alignment, color,
337							start, end, sflags);
338	if (!hole_node)
339		return -ENOSPC;
340
341	drm_mm_insert_helper_range(hole_node, node,
342				   size, alignment, color,
343				   start, end, aflags);
344	return 0;
345}
346EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
347
348/**
349 * drm_mm_remove_node - Remove a memory node from the allocator.
350 * @node: drm_mm_node to remove
351 *
352 * This just removes a node from its drm_mm allocator. The node does not need to
353 * be cleared again before it can be re-inserted into this or any other drm_mm
354 * allocator. It is a bug to call this function on a un-allocated node.
355 */
356void drm_mm_remove_node(struct drm_mm_node *node)
357{
358	struct drm_mm *mm = node->mm;
359	struct drm_mm_node *prev_node;
360
361	if (WARN_ON(!node->allocated))
362		return;
363
364	BUG_ON(node->scanned_block || node->scanned_prev_free
365				   || node->scanned_next_free);
366
367	prev_node =
368	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
369
370	if (node->hole_follows) {
371		BUG_ON(__drm_mm_hole_node_start(node) ==
372		       __drm_mm_hole_node_end(node));
373		list_del(&node->hole_stack);
374	} else
375		BUG_ON(__drm_mm_hole_node_start(node) !=
376		       __drm_mm_hole_node_end(node));
377
378
379	if (!prev_node->hole_follows) {
380		prev_node->hole_follows = 1;
381		list_add(&prev_node->hole_stack, &mm->hole_stack);
382	} else
383		list_move(&prev_node->hole_stack, &mm->hole_stack);
384
385	list_del(&node->node_list);
386	node->allocated = 0;
387}
388EXPORT_SYMBOL(drm_mm_remove_node);
389
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
390static int check_free_hole(unsigned long start, unsigned long end,
391			   unsigned long size, unsigned alignment)
392{
 
 
393	if (end - start < size)
394		return 0;
395
396	if (alignment) {
397		unsigned tmp = start % alignment;
398		if (tmp)
399			start += alignment - tmp;
400	}
401
402	return end >= start + size;
 
 
 
 
403}
404
405static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
406						      unsigned long size,
407						      unsigned alignment,
408						      unsigned long color,
409						      enum drm_mm_search_flags flags)
410{
411	struct drm_mm_node *entry;
412	struct drm_mm_node *best;
413	unsigned long adj_start;
414	unsigned long adj_end;
415	unsigned long best_size;
416
417	BUG_ON(mm->scanned_blocks);
418
419	best = NULL;
420	best_size = ~0UL;
421
422	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
423			       flags & DRM_MM_SEARCH_BELOW) {
424		unsigned long hole_size = adj_end - adj_start;
425
426		if (mm->color_adjust) {
427			mm->color_adjust(entry, color, &adj_start, &adj_end);
428			if (adj_end <= adj_start)
429				continue;
430		}
431
432		if (!check_free_hole(adj_start, adj_end, size, alignment))
433			continue;
434
435		if (!(flags & DRM_MM_SEARCH_BEST))
436			return entry;
437
438		if (hole_size < best_size) {
439			best = entry;
440			best_size = hole_size;
441		}
442	}
443
444	return best;
445}
 
446
447static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
448							unsigned long size,
449							unsigned alignment,
450							unsigned long color,
451							unsigned long start,
452							unsigned long end,
453							enum drm_mm_search_flags flags)
454{
455	struct drm_mm_node *entry;
456	struct drm_mm_node *best;
457	unsigned long adj_start;
458	unsigned long adj_end;
459	unsigned long best_size;
460
461	BUG_ON(mm->scanned_blocks);
462
463	best = NULL;
464	best_size = ~0UL;
465
466	__drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
467			       flags & DRM_MM_SEARCH_BELOW) {
468		unsigned long hole_size = adj_end - adj_start;
469
470		if (adj_start < start)
471			adj_start = start;
472		if (adj_end > end)
473			adj_end = end;
474
475		if (mm->color_adjust) {
476			mm->color_adjust(entry, color, &adj_start, &adj_end);
477			if (adj_end <= adj_start)
478				continue;
479		}
480
 
481		if (!check_free_hole(adj_start, adj_end, size, alignment))
482			continue;
483
484		if (!(flags & DRM_MM_SEARCH_BEST))
485			return entry;
486
487		if (hole_size < best_size) {
488			best = entry;
489			best_size = hole_size;
490		}
491	}
492
493	return best;
494}
 
495
496/**
497 * drm_mm_replace_node - move an allocation from @old to @new
498 * @old: drm_mm_node to remove from the allocator
499 * @new: drm_mm_node which should inherit @old's allocation
500 *
501 * This is useful for when drivers embed the drm_mm_node structure and hence
502 * can't move allocations by reassigning pointers. It's a combination of remove
503 * and insert with the guarantee that the allocation start will match.
504 */
505void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
506{
507	list_replace(&old->node_list, &new->node_list);
508	list_replace(&old->hole_stack, &new->hole_stack);
509	new->hole_follows = old->hole_follows;
510	new->mm = old->mm;
511	new->start = old->start;
512	new->size = old->size;
513	new->color = old->color;
514
515	old->allocated = 0;
516	new->allocated = 1;
517}
518EXPORT_SYMBOL(drm_mm_replace_node);
519
520/**
521 * DOC: lru scan roaster
522 *
523 * Very often GPUs need to have continuous allocations for a given object. When
524 * evicting objects to make space for a new one it is therefore not most
525 * efficient when we simply start to select all objects from the tail of an LRU
526 * until there's a suitable hole: Especially for big objects or nodes that
527 * otherwise have special allocation constraints there's a good chance we evict
528 * lots of (smaller) objects unecessarily.
529 *
530 * The DRM range allocator supports this use-case through the scanning
531 * interfaces. First a scan operation needs to be initialized with
532 * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds
533 * objects to the roaster (probably by walking an LRU list, but this can be
534 * freely implemented) until a suitable hole is found or there's no further
535 * evitable object.
536 *
537 * The the driver must walk through all objects again in exactly the reverse
538 * order to restore the allocator state. Note that while the allocator is used
539 * in the scan mode no other operation is allowed.
540 *
541 * Finally the driver evicts all objects selected in the scan. Adding and
542 * removing an object is O(1), and since freeing a node is also O(1) the overall
543 * complexity is O(scanned_objects). So like the free stack which needs to be
544 * walked before a scan operation even begins this is linear in the number of
545 * objects. It doesn't seem to hurt badly.
546 */
547
548/**
549 * drm_mm_init_scan - initialize lru scanning
550 * @mm: drm_mm to scan
551 * @size: size of the allocation
552 * @alignment: alignment of the allocation
553 * @color: opaque tag value to use for the allocation
554 *
555 * This simply sets up the scanning routines with the parameters for the desired
556 * hole. Note that there's no need to specify allocation flags, since they only
557 * change the place a node is allocated from within a suitable hole.
558 *
559 * Warning:
560 * As long as the scan list is non-empty, no other operations than
561 * adding/removing nodes to/from the scan list are allowed.
562 */
563void drm_mm_init_scan(struct drm_mm *mm,
564		      unsigned long size,
565		      unsigned alignment,
566		      unsigned long color)
567{
568	mm->scan_color = color;
569	mm->scan_alignment = alignment;
570	mm->scan_size = size;
571	mm->scanned_blocks = 0;
572	mm->scan_hit_start = 0;
573	mm->scan_hit_end = 0;
574	mm->scan_check_range = 0;
575	mm->prev_scanned_node = NULL;
576}
577EXPORT_SYMBOL(drm_mm_init_scan);
578
579/**
580 * drm_mm_init_scan - initialize range-restricted lru scanning
581 * @mm: drm_mm to scan
582 * @size: size of the allocation
583 * @alignment: alignment of the allocation
584 * @color: opaque tag value to use for the allocation
585 * @start: start of the allowed range for the allocation
586 * @end: end of the allowed range for the allocation
587 *
588 * This simply sets up the scanning routines with the parameters for the desired
589 * hole. Note that there's no need to specify allocation flags, since they only
590 * change the place a node is allocated from within a suitable hole.
591 *
592 * Warning:
593 * As long as the scan list is non-empty, no other operations than
594 * adding/removing nodes to/from the scan list are allowed.
595 */
596void drm_mm_init_scan_with_range(struct drm_mm *mm,
597				 unsigned long size,
598				 unsigned alignment,
599				 unsigned long color,
600				 unsigned long start,
601				 unsigned long end)
602{
603	mm->scan_color = color;
604	mm->scan_alignment = alignment;
605	mm->scan_size = size;
606	mm->scanned_blocks = 0;
607	mm->scan_hit_start = 0;
608	mm->scan_hit_end = 0;
609	mm->scan_start = start;
610	mm->scan_end = end;
611	mm->scan_check_range = 1;
612	mm->prev_scanned_node = NULL;
613}
614EXPORT_SYMBOL(drm_mm_init_scan_with_range);
615
616/**
617 * drm_mm_scan_add_block - add a node to the scan list
618 * @node: drm_mm_node to add
619 *
620 * Add a node to the scan list that might be freed to make space for the desired
621 * hole.
622 *
623 * Returns:
624 * True if a hole has been found, false otherwise.
625 */
626bool drm_mm_scan_add_block(struct drm_mm_node *node)
627{
628	struct drm_mm *mm = node->mm;
629	struct drm_mm_node *prev_node;
630	unsigned long hole_start, hole_end;
631	unsigned long adj_start, adj_end;
 
632
633	mm->scanned_blocks++;
634
635	BUG_ON(node->scanned_block);
636	node->scanned_block = 1;
637
638	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
639			       node_list);
640
641	node->scanned_preceeds_hole = prev_node->hole_follows;
642	prev_node->hole_follows = 1;
643	list_del(&node->node_list);
644	node->node_list.prev = &prev_node->node_list;
645	node->node_list.next = &mm->prev_scanned_node->node_list;
646	mm->prev_scanned_node = node;
647
648	adj_start = hole_start = drm_mm_hole_node_start(prev_node);
649	adj_end = hole_end = drm_mm_hole_node_end(prev_node);
650
651	if (mm->scan_check_range) {
652		if (adj_start < mm->scan_start)
653			adj_start = mm->scan_start;
654		if (adj_end > mm->scan_end)
655			adj_end = mm->scan_end;
 
 
 
656	}
657
658	if (mm->color_adjust)
659		mm->color_adjust(prev_node, mm->scan_color,
660				 &adj_start, &adj_end);
661
662	if (check_free_hole(adj_start, adj_end,
663			    mm->scan_size, mm->scan_alignment)) {
664		mm->scan_hit_start = hole_start;
665		mm->scan_hit_end = hole_end;
666		return true;
 
667	}
668
669	return false;
670}
671EXPORT_SYMBOL(drm_mm_scan_add_block);
672
673/**
674 * drm_mm_scan_remove_block - remove a node from the scan list
675 * @node: drm_mm_node to remove
676 *
677 * Nodes _must_ be removed in the exact same order from the scan list as they
678 * have been added, otherwise the internal state of the memory manager will be
679 * corrupted.
680 *
681 * When the scan list is empty, the selected memory nodes can be freed. An
682 * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
683 * return the just freed block (because its at the top of the free_stack list).
684 *
685 * Returns:
686 * True if this block should be evicted, false otherwise. Will always
687 * return false when no hole has been found.
688 */
689bool drm_mm_scan_remove_block(struct drm_mm_node *node)
690{
691	struct drm_mm *mm = node->mm;
692	struct drm_mm_node *prev_node;
693
694	mm->scanned_blocks--;
695
696	BUG_ON(!node->scanned_block);
697	node->scanned_block = 0;
698
699	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
700			       node_list);
701
702	prev_node->hole_follows = node->scanned_preceeds_hole;
 
703	list_add(&node->node_list, &prev_node->node_list);
704
705	 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
706		 node->start < mm->scan_hit_end);
 
 
 
 
 
 
 
 
707}
708EXPORT_SYMBOL(drm_mm_scan_remove_block);
709
710/**
711 * drm_mm_clean - checks whether an allocator is clean
712 * @mm: drm_mm allocator to check
713 *
714 * Returns:
715 * True if the allocator is completely free, false if there's still a node
716 * allocated in it.
717 */
718bool drm_mm_clean(struct drm_mm * mm)
719{
720	struct list_head *head = &mm->head_node.node_list;
721
722	return (head->next->next == head);
723}
724EXPORT_SYMBOL(drm_mm_clean);
725
726/**
727 * drm_mm_init - initialize a drm-mm allocator
728 * @mm: the drm_mm structure to initialize
729 * @start: start of the range managed by @mm
730 * @size: end of the range managed by @mm
731 *
732 * Note that @mm must be cleared to 0 before calling this function.
733 */
734void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
735{
736	INIT_LIST_HEAD(&mm->hole_stack);
 
 
737	mm->scanned_blocks = 0;
 
738
739	/* Clever trick to avoid a special case in the free hole tracking. */
740	INIT_LIST_HEAD(&mm->head_node.node_list);
741	INIT_LIST_HEAD(&mm->head_node.hole_stack);
742	mm->head_node.hole_follows = 1;
743	mm->head_node.scanned_block = 0;
744	mm->head_node.scanned_prev_free = 0;
745	mm->head_node.scanned_next_free = 0;
746	mm->head_node.mm = mm;
747	mm->head_node.start = start + size;
748	mm->head_node.size = start - mm->head_node.start;
749	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
750
751	mm->color_adjust = NULL;
752}
753EXPORT_SYMBOL(drm_mm_init);
754
755/**
756 * drm_mm_takedown - clean up a drm_mm allocator
757 * @mm: drm_mm allocator to clean up
758 *
759 * Note that it is a bug to call this function on an allocator which is not
760 * clean.
761 */
762void drm_mm_takedown(struct drm_mm * mm)
763{
764	WARN(!list_empty(&mm->head_node.node_list),
765	     "Memory manager not clean during takedown.\n");
766}
767EXPORT_SYMBOL(drm_mm_takedown);
768
769static unsigned long drm_mm_debug_hole(struct drm_mm_node *entry,
770				       const char *prefix)
771{
772	unsigned long hole_start, hole_end, hole_size;
773
774	if (entry->hole_follows) {
775		hole_start = drm_mm_hole_node_start(entry);
776		hole_end = drm_mm_hole_node_end(entry);
777		hole_size = hole_end - hole_start;
778		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
779			prefix, hole_start, hole_end,
780			hole_size);
781		return hole_size;
782	}
 
783
784	return 0;
785}
 
786
787/**
788 * drm_mm_debug_table - dump allocator state to dmesg
789 * @mm: drm_mm allocator to dump
790 * @prefix: prefix to use for dumping to dmesg
791 */
792void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
793{
794	struct drm_mm_node *entry;
795	unsigned long total_used = 0, total_free = 0, total = 0;
 
796
797	total_free += drm_mm_debug_hole(&mm->head_node, prefix);
 
 
 
 
 
 
 
798
799	drm_mm_for_each_node(entry, mm) {
800		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
801			prefix, entry->start, entry->start + entry->size,
802			entry->size);
803		total_used += entry->size;
804		total_free += drm_mm_debug_hole(entry, prefix);
 
 
 
 
 
 
 
 
 
805	}
806	total = total_free + total_used;
807
808	printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
809		total_used, total_free);
810}
811EXPORT_SYMBOL(drm_mm_debug_table);
812
813#if defined(CONFIG_DEBUG_FS)
814static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
815{
816	unsigned long hole_start, hole_end, hole_size;
817
818	if (entry->hole_follows) {
819		hole_start = drm_mm_hole_node_start(entry);
820		hole_end = drm_mm_hole_node_end(entry);
821		hole_size = hole_end - hole_start;
822		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
823				hole_start, hole_end, hole_size);
824		return hole_size;
825	}
826
827	return 0;
828}
829
830/**
831 * drm_mm_dump_table - dump allocator state to a seq_file
832 * @m: seq_file to dump to
833 * @mm: drm_mm allocator to dump
834 */
835int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
836{
837	struct drm_mm_node *entry;
838	unsigned long total_used = 0, total_free = 0, total = 0;
 
839
840	total_free += drm_mm_dump_hole(m, &mm->head_node);
 
 
 
 
 
 
841
842	drm_mm_for_each_node(entry, mm) {
843		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
844				entry->start, entry->start + entry->size,
845				entry->size);
846		total_used += entry->size;
847		total_free += drm_mm_dump_hole(m, entry);
 
 
 
 
 
 
 
848	}
849	total = total_free + total_used;
850
851	seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
852	return 0;
853}
854EXPORT_SYMBOL(drm_mm_dump_table);
855#endif
v3.5.6
  1/**************************************************************************
  2 *
  3 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 *
 27 **************************************************************************/
 28
 29/*
 30 * Generic simple memory manager implementation. Intended to be used as a base
 31 * class implementation for more advanced memory managers.
 32 *
 33 * Note that the algorithm used is quite simple and there might be substantial
 34 * performance gains if a smarter free list is implemented. Currently it is just an
 35 * unordered stack of free regions. This could easily be improved if an RB-tree
 36 * is used instead. At least if we expect heavy fragmentation.
 37 *
 38 * Aligned allocations can also see improvement.
 39 *
 40 * Authors:
 41 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
 42 */
 43
 44#include "drmP.h"
 45#include "drm_mm.h"
 46#include <linux/slab.h>
 47#include <linux/seq_file.h>
 48#include <linux/export.h>
 49
 50#define MM_UNUSED_TARGET 4
 51
 52static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
 53{
 54	struct drm_mm_node *child;
 55
 56	if (atomic)
 57		child = kzalloc(sizeof(*child), GFP_ATOMIC);
 58	else
 59		child = kzalloc(sizeof(*child), GFP_KERNEL);
 60
 61	if (unlikely(child == NULL)) {
 62		spin_lock(&mm->unused_lock);
 63		if (list_empty(&mm->unused_nodes))
 64			child = NULL;
 65		else {
 66			child =
 67			    list_entry(mm->unused_nodes.next,
 68				       struct drm_mm_node, node_list);
 69			list_del(&child->node_list);
 70			--mm->num_unused;
 71		}
 72		spin_unlock(&mm->unused_lock);
 73	}
 74	return child;
 75}
 76
 77/* drm_mm_pre_get() - pre allocate drm_mm_node structure
 78 * drm_mm:	memory manager struct we are pre-allocating for
 
 
 
 
 
 
 
 
 
 79 *
 80 * Returns 0 on success or -ENOMEM if allocation fails.
 
 81 */
 82int drm_mm_pre_get(struct drm_mm *mm)
 83{
 84	struct drm_mm_node *node;
 85
 86	spin_lock(&mm->unused_lock);
 87	while (mm->num_unused < MM_UNUSED_TARGET) {
 88		spin_unlock(&mm->unused_lock);
 89		node = kzalloc(sizeof(*node), GFP_KERNEL);
 90		spin_lock(&mm->unused_lock);
 91
 92		if (unlikely(node == NULL)) {
 93			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
 94			spin_unlock(&mm->unused_lock);
 95			return ret;
 96		}
 97		++mm->num_unused;
 98		list_add_tail(&node->node_list, &mm->unused_nodes);
 99	}
100	spin_unlock(&mm->unused_lock);
101	return 0;
102}
103EXPORT_SYMBOL(drm_mm_pre_get);
104
105static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
106{
107	return hole_node->start + hole_node->size;
108}
109
110static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
111{
112	struct drm_mm_node *next_node =
113		list_entry(hole_node->node_list.next, struct drm_mm_node,
114			   node_list);
115
116	return next_node->start;
117}
118
119static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
120				 struct drm_mm_node *node,
121				 unsigned long size, unsigned alignment)
 
 
122{
123	struct drm_mm *mm = hole_node->mm;
124	unsigned long tmp = 0, wasted = 0;
125	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
126	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
 
 
127
128	BUG_ON(!hole_node->hole_follows || node->allocated);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
130	if (alignment)
131		tmp = hole_start % alignment;
132
133	if (!tmp) {
134		hole_node->hole_follows = 0;
135		list_del_init(&hole_node->hole_stack);
136	} else
137		wasted = alignment - tmp;
138
139	node->start = hole_start + wasted;
140	node->size = size;
141	node->mm = mm;
 
142	node->allocated = 1;
143
144	INIT_LIST_HEAD(&node->hole_stack);
145	list_add(&node->node_list, &hole_node->node_list);
146
147	BUG_ON(node->start + node->size > hole_end);
148
149	if (node->start + node->size < hole_end) {
 
150		list_add(&node->hole_stack, &mm->hole_stack);
151		node->hole_follows = 1;
152	} else {
153		node->hole_follows = 0;
154	}
155}
156
157struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
158					     unsigned long size,
159					     unsigned alignment,
160					     int atomic)
 
 
 
 
 
 
 
 
 
 
 
161{
162	struct drm_mm_node *node;
 
 
 
 
 
 
 
 
 
 
 
 
 
163
164	node = drm_mm_kmalloc(hole_node->mm, atomic);
165	if (unlikely(node == NULL))
166		return NULL;
 
 
 
 
 
 
 
 
 
 
167
168	drm_mm_insert_helper(hole_node, node, size, alignment);
 
169
170	return node;
171}
172EXPORT_SYMBOL(drm_mm_get_block_generic);
173
174/**
175 * Search for free space and insert a preallocated memory node. Returns
176 * -ENOSPC if no suitable free area is available. The preallocated memory node
177 * must be cleared.
 
 
 
 
 
 
 
 
 
 
178 */
179int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
180		       unsigned long size, unsigned alignment)
 
 
 
181{
182	struct drm_mm_node *hole_node;
183
184	hole_node = drm_mm_search_free(mm, size, alignment, 0);
 
185	if (!hole_node)
186		return -ENOSPC;
187
188	drm_mm_insert_helper(hole_node, node, size, alignment);
189
190	return 0;
191}
192EXPORT_SYMBOL(drm_mm_insert_node);
193
194static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
195				       struct drm_mm_node *node,
196				       unsigned long size, unsigned alignment,
197				       unsigned long start, unsigned long end)
 
 
198{
199	struct drm_mm *mm = hole_node->mm;
200	unsigned long tmp = 0, wasted = 0;
201	unsigned long hole_start = drm_mm_hole_node_start(hole_node);
202	unsigned long hole_end = drm_mm_hole_node_end(hole_node);
 
 
203
204	BUG_ON(!hole_node->hole_follows || node->allocated);
205
206	if (hole_start < start)
207		wasted += start - hole_start;
208	if (alignment)
209		tmp = (hole_start + wasted) % alignment;
 
 
 
210
211	if (tmp)
212		wasted += alignment - tmp;
213
214	if (!wasted) {
 
 
 
 
 
 
 
 
 
 
215		hole_node->hole_follows = 0;
216		list_del_init(&hole_node->hole_stack);
217	}
218
219	node->start = hole_start + wasted;
220	node->size = size;
221	node->mm = mm;
 
222	node->allocated = 1;
223
224	INIT_LIST_HEAD(&node->hole_stack);
225	list_add(&node->node_list, &hole_node->node_list);
226
227	BUG_ON(node->start + node->size > hole_end);
 
 
228	BUG_ON(node->start + node->size > end);
229
230	if (node->start + node->size < hole_end) {
 
231		list_add(&node->hole_stack, &mm->hole_stack);
232		node->hole_follows = 1;
233	} else {
234		node->hole_follows = 0;
235	}
236}
237
238struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
239						unsigned long size,
240						unsigned alignment,
241						unsigned long start,
242						unsigned long end,
243						int atomic)
244{
245	struct drm_mm_node *node;
246
247	node = drm_mm_kmalloc(hole_node->mm, atomic);
248	if (unlikely(node == NULL))
249		return NULL;
250
251	drm_mm_insert_helper_range(hole_node, node, size, alignment,
252				   start, end);
253
254	return node;
255}
256EXPORT_SYMBOL(drm_mm_get_block_range_generic);
257
258/**
259 * Search for free space and insert a preallocated memory node. Returns
260 * -ENOSPC if no suitable free area is available. This is for range
261 * restricted allocations. The preallocated memory node must be cleared.
 
 
 
 
 
 
 
 
 
 
 
 
262 */
263int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
264				unsigned long size, unsigned alignment,
265				unsigned long start, unsigned long end)
 
 
 
266{
267	struct drm_mm_node *hole_node;
268
269	hole_node = drm_mm_search_free_in_range(mm, size, alignment,
270						start, end, 0);
 
271	if (!hole_node)
272		return -ENOSPC;
273
274	drm_mm_insert_helper_range(hole_node, node, size, alignment,
275				   start, end);
276
277	return 0;
278}
279EXPORT_SYMBOL(drm_mm_insert_node_in_range);
280
281/**
282 * Remove a memory node from the allocator.
 
 
 
 
 
283 */
284void drm_mm_remove_node(struct drm_mm_node *node)
285{
286	struct drm_mm *mm = node->mm;
287	struct drm_mm_node *prev_node;
288
 
 
 
289	BUG_ON(node->scanned_block || node->scanned_prev_free
290				   || node->scanned_next_free);
291
292	prev_node =
293	    list_entry(node->node_list.prev, struct drm_mm_node, node_list);
294
295	if (node->hole_follows) {
296		BUG_ON(drm_mm_hole_node_start(node)
297				== drm_mm_hole_node_end(node));
298		list_del(&node->hole_stack);
299	} else
300		BUG_ON(drm_mm_hole_node_start(node)
301				!= drm_mm_hole_node_end(node));
 
302
303	if (!prev_node->hole_follows) {
304		prev_node->hole_follows = 1;
305		list_add(&prev_node->hole_stack, &mm->hole_stack);
306	} else
307		list_move(&prev_node->hole_stack, &mm->hole_stack);
308
309	list_del(&node->node_list);
310	node->allocated = 0;
311}
312EXPORT_SYMBOL(drm_mm_remove_node);
313
314/*
315 * Remove a memory node from the allocator and free the allocated struct
316 * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
317 * drm_mm_get_block functions.
318 */
319void drm_mm_put_block(struct drm_mm_node *node)
320{
321
322	struct drm_mm *mm = node->mm;
323
324	drm_mm_remove_node(node);
325
326	spin_lock(&mm->unused_lock);
327	if (mm->num_unused < MM_UNUSED_TARGET) {
328		list_add(&node->node_list, &mm->unused_nodes);
329		++mm->num_unused;
330	} else
331		kfree(node);
332	spin_unlock(&mm->unused_lock);
333}
334EXPORT_SYMBOL(drm_mm_put_block);
335
336static int check_free_hole(unsigned long start, unsigned long end,
337			   unsigned long size, unsigned alignment)
338{
339	unsigned wasted = 0;
340
341	if (end - start < size)
342		return 0;
343
344	if (alignment) {
345		unsigned tmp = start % alignment;
346		if (tmp)
347			wasted = alignment - tmp;
348	}
349
350	if (end >= start + size + wasted) {
351		return 1;
352	}
353
354	return 0;
355}
356
357struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
358				       unsigned long size,
359				       unsigned alignment, int best_match)
 
 
360{
361	struct drm_mm_node *entry;
362	struct drm_mm_node *best;
 
 
363	unsigned long best_size;
364
365	BUG_ON(mm->scanned_blocks);
366
367	best = NULL;
368	best_size = ~0UL;
369
370	list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
371		BUG_ON(!entry->hole_follows);
372		if (!check_free_hole(drm_mm_hole_node_start(entry),
373				     drm_mm_hole_node_end(entry),
374				     size, alignment))
 
 
 
 
 
 
375			continue;
376
377		if (!best_match)
378			return entry;
379
380		if (entry->size < best_size) {
381			best = entry;
382			best_size = entry->size;
383		}
384	}
385
386	return best;
387}
388EXPORT_SYMBOL(drm_mm_search_free);
389
390struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
391						unsigned long size,
392						unsigned alignment,
393						unsigned long start,
394						unsigned long end,
395						int best_match)
 
396{
397	struct drm_mm_node *entry;
398	struct drm_mm_node *best;
 
 
399	unsigned long best_size;
400
401	BUG_ON(mm->scanned_blocks);
402
403	best = NULL;
404	best_size = ~0UL;
405
406	list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
407		unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
408			start : drm_mm_hole_node_start(entry);
409		unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
410			end : drm_mm_hole_node_end(entry);
 
 
 
 
 
 
 
 
 
411
412		BUG_ON(!entry->hole_follows);
413		if (!check_free_hole(adj_start, adj_end, size, alignment))
414			continue;
415
416		if (!best_match)
417			return entry;
418
419		if (entry->size < best_size) {
420			best = entry;
421			best_size = entry->size;
422		}
423	}
424
425	return best;
426}
427EXPORT_SYMBOL(drm_mm_search_free_in_range);
428
429/**
430 * Moves an allocation. To be used with embedded struct drm_mm_node.
 
 
 
 
 
 
431 */
432void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
433{
434	list_replace(&old->node_list, &new->node_list);
435	list_replace(&old->hole_stack, &new->hole_stack);
436	new->hole_follows = old->hole_follows;
437	new->mm = old->mm;
438	new->start = old->start;
439	new->size = old->size;
 
440
441	old->allocated = 0;
442	new->allocated = 1;
443}
444EXPORT_SYMBOL(drm_mm_replace_node);
445
446/**
447 * Initializa lru scanning.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
448 *
449 * This simply sets up the scanning routines with the parameters for the desired
450 * hole.
 
451 *
452 * Warning: As long as the scan list is non-empty, no other operations than
 
453 * adding/removing nodes to/from the scan list are allowed.
454 */
455void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
456		      unsigned alignment)
 
 
457{
 
458	mm->scan_alignment = alignment;
459	mm->scan_size = size;
460	mm->scanned_blocks = 0;
461	mm->scan_hit_start = 0;
462	mm->scan_hit_size = 0;
463	mm->scan_check_range = 0;
464	mm->prev_scanned_node = NULL;
465}
466EXPORT_SYMBOL(drm_mm_init_scan);
467
468/**
469 * Initializa lru scanning.
 
 
 
 
 
 
470 *
471 * This simply sets up the scanning routines with the parameters for the desired
472 * hole. This version is for range-restricted scans.
 
473 *
474 * Warning: As long as the scan list is non-empty, no other operations than
 
475 * adding/removing nodes to/from the scan list are allowed.
476 */
477void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
 
478				 unsigned alignment,
 
479				 unsigned long start,
480				 unsigned long end)
481{
 
482	mm->scan_alignment = alignment;
483	mm->scan_size = size;
484	mm->scanned_blocks = 0;
485	mm->scan_hit_start = 0;
486	mm->scan_hit_size = 0;
487	mm->scan_start = start;
488	mm->scan_end = end;
489	mm->scan_check_range = 1;
490	mm->prev_scanned_node = NULL;
491}
492EXPORT_SYMBOL(drm_mm_init_scan_with_range);
493
494/**
 
 
 
495 * Add a node to the scan list that might be freed to make space for the desired
496 * hole.
497 *
498 * Returns non-zero, if a hole has been found, zero otherwise.
 
499 */
500int drm_mm_scan_add_block(struct drm_mm_node *node)
501{
502	struct drm_mm *mm = node->mm;
503	struct drm_mm_node *prev_node;
504	unsigned long hole_start, hole_end;
505	unsigned long adj_start;
506	unsigned long adj_end;
507
508	mm->scanned_blocks++;
509
510	BUG_ON(node->scanned_block);
511	node->scanned_block = 1;
512
513	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
514			       node_list);
515
516	node->scanned_preceeds_hole = prev_node->hole_follows;
517	prev_node->hole_follows = 1;
518	list_del(&node->node_list);
519	node->node_list.prev = &prev_node->node_list;
520	node->node_list.next = &mm->prev_scanned_node->node_list;
521	mm->prev_scanned_node = node;
522
523	hole_start = drm_mm_hole_node_start(prev_node);
524	hole_end = drm_mm_hole_node_end(prev_node);
 
525	if (mm->scan_check_range) {
526		adj_start = hole_start < mm->scan_start ?
527			mm->scan_start : hole_start;
528		adj_end = hole_end > mm->scan_end ?
529			mm->scan_end : hole_end;
530	} else {
531		adj_start = hole_start;
532		adj_end = hole_end;
533	}
534
535	if (check_free_hole(adj_start , adj_end,
 
 
 
 
536			    mm->scan_size, mm->scan_alignment)) {
537		mm->scan_hit_start = hole_start;
538		mm->scan_hit_size = hole_end;
539
540		return 1;
541	}
542
543	return 0;
544}
545EXPORT_SYMBOL(drm_mm_scan_add_block);
546
547/**
548 * Remove a node from the scan list.
 
549 *
550 * Nodes _must_ be removed in the exact same order from the scan list as they
551 * have been added, otherwise the internal state of the memory manager will be
552 * corrupted.
553 *
554 * When the scan list is empty, the selected memory nodes can be freed. An
555 * immediately following drm_mm_search_free with best_match = 0 will then return
556 * the just freed block (because its at the top of the free_stack list).
557 *
558 * Returns one if this block should be evicted, zero otherwise. Will always
559 * return zero when no hole has been found.
 
560 */
561int drm_mm_scan_remove_block(struct drm_mm_node *node)
562{
563	struct drm_mm *mm = node->mm;
564	struct drm_mm_node *prev_node;
565
566	mm->scanned_blocks--;
567
568	BUG_ON(!node->scanned_block);
569	node->scanned_block = 0;
570
571	prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
572			       node_list);
573
574	prev_node->hole_follows = node->scanned_preceeds_hole;
575	INIT_LIST_HEAD(&node->node_list);
576	list_add(&node->node_list, &prev_node->node_list);
577
578	/* Only need to check for containement because start&size for the
579	 * complete resulting free block (not just the desired part) is
580	 * stored. */
581	if (node->start >= mm->scan_hit_start &&
582	    node->start + node->size
583	    		<= mm->scan_hit_start + mm->scan_hit_size) {
584		return 1;
585	}
586
587	return 0;
588}
589EXPORT_SYMBOL(drm_mm_scan_remove_block);
590
591int drm_mm_clean(struct drm_mm * mm)
 
 
 
 
 
 
 
 
592{
593	struct list_head *head = &mm->head_node.node_list;
594
595	return (head->next->next == head);
596}
597EXPORT_SYMBOL(drm_mm_clean);
598
599int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
 
 
 
 
 
 
 
 
600{
601	INIT_LIST_HEAD(&mm->hole_stack);
602	INIT_LIST_HEAD(&mm->unused_nodes);
603	mm->num_unused = 0;
604	mm->scanned_blocks = 0;
605	spin_lock_init(&mm->unused_lock);
606
607	/* Clever trick to avoid a special case in the free hole tracking. */
608	INIT_LIST_HEAD(&mm->head_node.node_list);
609	INIT_LIST_HEAD(&mm->head_node.hole_stack);
610	mm->head_node.hole_follows = 1;
611	mm->head_node.scanned_block = 0;
612	mm->head_node.scanned_prev_free = 0;
613	mm->head_node.scanned_next_free = 0;
614	mm->head_node.mm = mm;
615	mm->head_node.start = start + size;
616	mm->head_node.size = start - mm->head_node.start;
617	list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
618
619	return 0;
620}
621EXPORT_SYMBOL(drm_mm_init);
622
 
 
 
 
 
 
 
623void drm_mm_takedown(struct drm_mm * mm)
624{
625	struct drm_mm_node *entry, *next;
 
 
 
626
627	if (!list_empty(&mm->head_node.node_list)) {
628		DRM_ERROR("Memory manager not clean. Delaying takedown\n");
629		return;
630	}
631
632	spin_lock(&mm->unused_lock);
633	list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
634		list_del(&entry->node_list);
635		kfree(entry);
636		--mm->num_unused;
 
 
 
637	}
638	spin_unlock(&mm->unused_lock);
639
640	BUG_ON(mm->num_unused != 0);
641}
642EXPORT_SYMBOL(drm_mm_takedown);
643
 
 
 
 
 
644void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
645{
646	struct drm_mm_node *entry;
647	unsigned long total_used = 0, total_free = 0, total = 0;
648	unsigned long hole_start, hole_end, hole_size;
649
650	hole_start = drm_mm_hole_node_start(&mm->head_node);
651	hole_end = drm_mm_hole_node_end(&mm->head_node);
652	hole_size = hole_end - hole_start;
653	if (hole_size)
654		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
655			prefix, hole_start, hole_end,
656			hole_size);
657	total_free += hole_size;
658
659	drm_mm_for_each_node(entry, mm) {
660		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
661			prefix, entry->start, entry->start + entry->size,
662			entry->size);
663		total_used += entry->size;
664
665		if (entry->hole_follows) {
666			hole_start = drm_mm_hole_node_start(entry);
667			hole_end = drm_mm_hole_node_end(entry);
668			hole_size = hole_end - hole_start;
669			printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
670				prefix, hole_start, hole_end,
671				hole_size);
672			total_free += hole_size;
673		}
674	}
675	total = total_free + total_used;
676
677	printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
678		total_used, total_free);
679}
680EXPORT_SYMBOL(drm_mm_debug_table);
681
682#if defined(CONFIG_DEBUG_FS)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
683int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
684{
685	struct drm_mm_node *entry;
686	unsigned long total_used = 0, total_free = 0, total = 0;
687	unsigned long hole_start, hole_end, hole_size;
688
689	hole_start = drm_mm_hole_node_start(&mm->head_node);
690	hole_end = drm_mm_hole_node_end(&mm->head_node);
691	hole_size = hole_end - hole_start;
692	if (hole_size)
693		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
694				hole_start, hole_end, hole_size);
695	total_free += hole_size;
696
697	drm_mm_for_each_node(entry, mm) {
698		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
699				entry->start, entry->start + entry->size,
700				entry->size);
701		total_used += entry->size;
702		if (entry->hole_follows) {
703			hole_start = drm_mm_hole_node_start(entry);
704			hole_end = drm_mm_hole_node_end(entry);
705			hole_size = hole_end - hole_start;
706			seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
707					hole_start, hole_end, hole_size);
708			total_free += hole_size;
709		}
710	}
711	total = total_free + total_used;
712
713	seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
714	return 0;
715}
716EXPORT_SYMBOL(drm_mm_dump_table);
717#endif