Linux Audio

Check our new training course

Loading...
v3.1
  1/**************************************************************************
  2 *
  3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 *
 27 **************************************************************************/
 28/*
 29 * Authors:
 30 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 31 */
 32
 33#ifndef _DRM_MM_H_
 34#define _DRM_MM_H_
 35
 36/*
 37 * Generic range manager structs
 38 */
 
 
 
 39#include <linux/list.h>
 
 40#ifdef CONFIG_DEBUG_FS
 41#include <linux/seq_file.h>
 42#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 43
 44struct drm_mm_node {
 45	struct list_head node_list;
 46	struct list_head hole_stack;
 
 47	unsigned hole_follows : 1;
 48	unsigned scanned_block : 1;
 49	unsigned scanned_prev_free : 1;
 50	unsigned scanned_next_free : 1;
 51	unsigned scanned_preceeds_hole : 1;
 52	unsigned allocated : 1;
 53	unsigned long start;
 54	unsigned long size;
 
 
 55	struct drm_mm *mm;
 
 
 
 56};
 57
 58struct drm_mm {
 59	/* List of all memory nodes that immediately precede a free hole. */
 60	struct list_head hole_stack;
 61	/* head_node.node_list is the list of all memory nodes, ordered
 62	 * according to the (increasing) start address of the memory node. */
 63	struct drm_mm_node head_node;
 64	struct list_head unused_nodes;
 65	int num_unused;
 66	spinlock_t unused_lock;
 67	unsigned int scan_check_range : 1;
 68	unsigned scan_alignment;
 69	unsigned long scan_size;
 70	unsigned long scan_hit_start;
 71	unsigned scan_hit_size;
 
 72	unsigned scanned_blocks;
 73	unsigned long scan_start;
 74	unsigned long scan_end;
 75	struct drm_mm_node *prev_scanned_node;
 
 
 
 76};
 77
 
 
 
 
 
 
 
 
 
 
 78static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
 79{
 80	return node->allocated;
 81}
 82
 
 
 
 
 
 
 
 
 
 
 83static inline bool drm_mm_initialized(struct drm_mm *mm)
 84{
 85	return mm->hole_stack.next;
 86}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 87#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
 88						&(mm)->head_node.node_list, \
 89						node_list)
 90#define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \
 91	for (entry = (mm)->prev_scanned_node, \
 92		next = entry ? list_entry(entry->node_list.next, \
 93			struct drm_mm_node, node_list) : NULL; \
 94	     entry != NULL; entry = next, \
 95		next = entry ? list_entry(entry->node_list.next, \
 96			struct drm_mm_node, node_list) : NULL) \
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 97/*
 98 * Basic range manager support (drm_mm.c)
 99 */
100extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
101						    unsigned long size,
102						    unsigned alignment,
103						    int atomic);
104extern struct drm_mm_node *drm_mm_get_block_range_generic(
105						struct drm_mm_node *node,
106						unsigned long size,
107						unsigned alignment,
108						unsigned long start,
109						unsigned long end,
110						int atomic);
111static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
112						   unsigned long size,
113						   unsigned alignment)
114{
115	return drm_mm_get_block_generic(parent, size, alignment, 0);
116}
117static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
118							  unsigned long size,
119							  unsigned alignment)
120{
121	return drm_mm_get_block_generic(parent, size, alignment, 1);
122}
123static inline struct drm_mm_node *drm_mm_get_block_range(
124						struct drm_mm_node *parent,
125						unsigned long size,
126						unsigned alignment,
127						unsigned long start,
128						unsigned long end)
129{
130	return drm_mm_get_block_range_generic(parent, size, alignment,
131						start, end, 0);
132}
133static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
134						struct drm_mm_node *parent,
135						unsigned long size,
136						unsigned alignment,
137						unsigned long start,
138						unsigned long end)
139{
140	return drm_mm_get_block_range_generic(parent, size, alignment,
141						start, end, 1);
142}
143extern int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
144			      unsigned long size, unsigned alignment);
145extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
146				       struct drm_mm_node *node,
147				       unsigned long size, unsigned alignment,
148				       unsigned long start, unsigned long end);
149extern void drm_mm_put_block(struct drm_mm_node *cur);
150extern void drm_mm_remove_node(struct drm_mm_node *node);
151extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
152extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
153					      unsigned long size,
 
 
 
 
 
 
 
 
 
 
 
154					      unsigned alignment,
155					      int best_match);
156extern struct drm_mm_node *drm_mm_search_free_in_range(
157						const struct drm_mm *mm,
158						unsigned long size,
159						unsigned alignment,
160						unsigned long start,
161						unsigned long end,
162						int best_match);
163extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
164		       unsigned long size);
165extern void drm_mm_takedown(struct drm_mm *mm);
166extern int drm_mm_clean(struct drm_mm *mm);
167extern int drm_mm_pre_get(struct drm_mm *mm);
168
169static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
170{
171	return block->mm;
172}
173
174void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
175		      unsigned alignment);
176void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177				 unsigned alignment,
178				 unsigned long start,
179				 unsigned long end);
180int drm_mm_scan_add_block(struct drm_mm_node *node);
181int drm_mm_scan_remove_block(struct drm_mm_node *node);
 
182
183extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
184#ifdef CONFIG_DEBUG_FS
185int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
186#endif
187
188#endif
v4.10.11
  1/**************************************************************************
  2 *
  3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 *
 27 **************************************************************************/
 28/*
 29 * Authors:
 30 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 31 */
 32
 33#ifndef _DRM_MM_H_
 34#define _DRM_MM_H_
 35
 36/*
 37 * Generic range manager structs
 38 */
 39#include <linux/bug.h>
 40#include <linux/rbtree.h>
 41#include <linux/kernel.h>
 42#include <linux/list.h>
 43#include <linux/spinlock.h>
 44#ifdef CONFIG_DEBUG_FS
 45#include <linux/seq_file.h>
 46#endif
 47#ifdef CONFIG_DRM_DEBUG_MM
 48#include <linux/stackdepot.h>
 49#endif
 50
 51enum drm_mm_search_flags {
 52	DRM_MM_SEARCH_DEFAULT =		0,
 53	DRM_MM_SEARCH_BEST =		1 << 0,
 54	DRM_MM_SEARCH_BELOW =		1 << 1,
 55};
 56
 57enum drm_mm_allocator_flags {
 58	DRM_MM_CREATE_DEFAULT =		0,
 59	DRM_MM_CREATE_TOP =		1 << 0,
 60};
 61
 62#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
 63#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
 64
 65struct drm_mm_node {
 66	struct list_head node_list;
 67	struct list_head hole_stack;
 68	struct rb_node rb;
 69	unsigned hole_follows : 1;
 70	unsigned scanned_block : 1;
 71	unsigned scanned_prev_free : 1;
 72	unsigned scanned_next_free : 1;
 73	unsigned scanned_preceeds_hole : 1;
 74	unsigned allocated : 1;
 75	unsigned long color;
 76	u64 start;
 77	u64 size;
 78	u64 __subtree_last;
 79	struct drm_mm *mm;
 80#ifdef CONFIG_DRM_DEBUG_MM
 81	depot_stack_handle_t stack;
 82#endif
 83};
 84
 85struct drm_mm {
 86	/* List of all memory nodes that immediately precede a free hole. */
 87	struct list_head hole_stack;
 88	/* head_node.node_list is the list of all memory nodes, ordered
 89	 * according to the (increasing) start address of the memory node. */
 90	struct drm_mm_node head_node;
 91	/* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
 92	struct rb_root interval_tree;
 93
 94	unsigned int scan_check_range : 1;
 95	unsigned scan_alignment;
 96	unsigned long scan_color;
 97	u64 scan_size;
 98	u64 scan_hit_start;
 99	u64 scan_hit_end;
100	unsigned scanned_blocks;
101	u64 scan_start;
102	u64 scan_end;
103	struct drm_mm_node *prev_scanned_node;
104
105	void (*color_adjust)(struct drm_mm_node *node, unsigned long color,
106			     u64 *start, u64 *end);
107};
108
109/**
110 * drm_mm_node_allocated - checks whether a node is allocated
111 * @node: drm_mm_node to check
112 *
113 * Drivers should use this helpers for proper encapusulation of drm_mm
114 * internals.
115 *
116 * Returns:
117 * True if the @node is allocated.
118 */
119static inline bool drm_mm_node_allocated(struct drm_mm_node *node)
120{
121	return node->allocated;
122}
123
124/**
125 * drm_mm_initialized - checks whether an allocator is initialized
126 * @mm: drm_mm to check
127 *
128 * Drivers should use this helpers for proper encapusulation of drm_mm
129 * internals.
130 *
131 * Returns:
132 * True if the @mm is initialized.
133 */
134static inline bool drm_mm_initialized(struct drm_mm *mm)
135{
136	return mm->hole_stack.next;
137}
138
139static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
140{
141	return hole_node->start + hole_node->size;
142}
143
144/**
145 * drm_mm_hole_node_start - computes the start of the hole following @node
146 * @hole_node: drm_mm_node which implicitly tracks the following hole
147 *
148 * This is useful for driver-sepific debug dumpers. Otherwise drivers should not
149 * inspect holes themselves. Drivers must check first whether a hole indeed
150 * follows by looking at node->hole_follows.
151 *
152 * Returns:
153 * Start of the subsequent hole.
154 */
155static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node)
156{
157	BUG_ON(!hole_node->hole_follows);
158	return __drm_mm_hole_node_start(hole_node);
159}
160
161static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
162{
163	return list_next_entry(hole_node, node_list)->start;
164}
165
166/**
167 * drm_mm_hole_node_end - computes the end of the hole following @node
168 * @hole_node: drm_mm_node which implicitly tracks the following hole
169 *
170 * This is useful for driver-sepific debug dumpers. Otherwise drivers should not
171 * inspect holes themselves. Drivers must check first whether a hole indeed
172 * follows by looking at node->hole_follows.
173 *
174 * Returns:
175 * End of the subsequent hole.
176 */
177static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
178{
179	return __drm_mm_hole_node_end(hole_node);
180}
181
182/**
183 * drm_mm_for_each_node - iterator to walk over all allocated nodes
184 * @entry: drm_mm_node structure to assign to in each iteration step
185 * @mm: drm_mm allocator to walk
186 *
187 * This iterator walks over all nodes in the range allocator. It is implemented
188 * with list_for_each, so not save against removal of elements.
189 */
190#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
191						&(mm)->head_node.node_list, \
192						node_list)
193
194#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
195	for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
196	     &entry->hole_stack != &(mm)->hole_stack ? \
197	     hole_start = drm_mm_hole_node_start(entry), \
198	     hole_end = drm_mm_hole_node_end(entry), \
199	     1 : 0; \
200	     entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
201
202/**
203 * drm_mm_for_each_hole - iterator to walk over all holes
204 * @entry: drm_mm_node used internally to track progress
205 * @mm: drm_mm allocator to walk
206 * @hole_start: ulong variable to assign the hole start to on each iteration
207 * @hole_end: ulong variable to assign the hole end to on each iteration
208 *
209 * This iterator walks over all holes in the range allocator. It is implemented
210 * with list_for_each, so not save against removal of elements. @entry is used
211 * internally and will not reflect a real drm_mm_node for the very first hole.
212 * Hence users of this iterator may not access it.
213 *
214 * Implementation Note:
215 * We need to inline list_for_each_entry in order to be able to set hole_start
216 * and hole_end on each iteration while keeping the macro sane.
217 *
218 * The __drm_mm_for_each_hole version is similar, but with added support for
219 * going backwards.
220 */
221#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
222	__drm_mm_for_each_hole(entry, mm, hole_start, hole_end, 0)
223
224/*
225 * Basic range manager support (drm_mm.c)
226 */
227int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
228
229int drm_mm_insert_node_generic(struct drm_mm *mm,
230			       struct drm_mm_node *node,
231			       u64 size,
232			       unsigned alignment,
233			       unsigned long color,
234			       enum drm_mm_search_flags sflags,
235			       enum drm_mm_allocator_flags aflags);
236/**
237 * drm_mm_insert_node - search for space and insert @node
238 * @mm: drm_mm to allocate from
239 * @node: preallocate node to insert
240 * @size: size of the allocation
241 * @alignment: alignment of the allocation
242 * @flags: flags to fine-tune the allocation
243 *
244 * This is a simplified version of drm_mm_insert_node_generic() with @color set
245 * to 0.
246 *
247 * The preallocated node must be cleared to 0.
248 *
249 * Returns:
250 * 0 on success, -ENOSPC if there's no suitable hole.
251 */
252static inline int drm_mm_insert_node(struct drm_mm *mm,
253				     struct drm_mm_node *node,
254				     u64 size,
255				     unsigned alignment,
256				     enum drm_mm_search_flags flags)
257{
258	return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags,
259					  DRM_MM_CREATE_DEFAULT);
260}
261
262int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
263					struct drm_mm_node *node,
264					u64 size,
265					unsigned alignment,
266					unsigned long color,
267					u64 start,
268					u64 end,
269					enum drm_mm_search_flags sflags,
270					enum drm_mm_allocator_flags aflags);
271/**
272 * drm_mm_insert_node_in_range - ranged search for space and insert @node
273 * @mm: drm_mm to allocate from
274 * @node: preallocate node to insert
275 * @size: size of the allocation
276 * @alignment: alignment of the allocation
277 * @start: start of the allowed range for this node
278 * @end: end of the allowed range for this node
279 * @flags: flags to fine-tune the allocation
280 *
281 * This is a simplified version of drm_mm_insert_node_in_range_generic() with
282 * @color set to 0.
283 *
284 * The preallocated node must be cleared to 0.
285 *
286 * Returns:
287 * 0 on success, -ENOSPC if there's no suitable hole.
288 */
289static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
290					      struct drm_mm_node *node,
291					      u64 size,
292					      unsigned alignment,
293					      u64 start,
294					      u64 end,
295					      enum drm_mm_search_flags flags)
296{
297	return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
298						   0, start, end, flags,
299						   DRM_MM_CREATE_DEFAULT);
300}
301
302void drm_mm_remove_node(struct drm_mm_node *node);
303void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
304void drm_mm_init(struct drm_mm *mm,
305		 u64 start,
306		 u64 size);
307void drm_mm_takedown(struct drm_mm *mm);
308bool drm_mm_clean(struct drm_mm *mm);
309
310struct drm_mm_node *
311__drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last);
312
313/**
314 * drm_mm_for_each_node_in_range - iterator to walk over a range of
315 * allocated nodes
316 * @node__: drm_mm_node structure to assign to in each iteration step
317 * @mm__: drm_mm allocator to walk
318 * @start__: starting offset, the first node will overlap this
319 * @end__: ending offset, the last node will start before this (but may overlap)
320 *
321 * This iterator walks over all nodes in the range allocator that lie
322 * between @start and @end. It is implemented similarly to list_for_each(),
323 * but using the internal interval tree to accelerate the search for the
324 * starting node, and so not safe against removal of elements. It assumes
325 * that @end is within (or is the upper limit of) the drm_mm allocator.
326 */
327#define drm_mm_for_each_node_in_range(node__, mm__, start__, end__)	\
328	for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \
329	     node__ && node__->start < (end__);				\
330	     node__ = list_next_entry(node__, node_list))
331
332void drm_mm_init_scan(struct drm_mm *mm,
333		      u64 size,
334		      unsigned alignment,
335		      unsigned long color);
336void drm_mm_init_scan_with_range(struct drm_mm *mm,
337				 u64 size,
338				 unsigned alignment,
339				 unsigned long color,
340				 u64 start,
341				 u64 end);
342bool drm_mm_scan_add_block(struct drm_mm_node *node);
343bool drm_mm_scan_remove_block(struct drm_mm_node *node);
344
345void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
346#ifdef CONFIG_DEBUG_FS
347int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
348#endif
349
350#endif