Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  3 * Copyright (c) 2012 David Airlie <airlied@linux.ie>
  4 * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the "Software"),
  8 * to deal in the Software without restriction, including without limitation
  9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 10 * and/or sell copies of the Software, and to permit persons to whom the
 11 * Software is furnished to do so, subject to the following conditions:
 12 *
 13 * The above copyright notice and this permission notice shall be included in
 14 * all copies or substantial portions of the Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 22 * OTHER DEALINGS IN THE SOFTWARE.
 23 */
 24
 25#include <drm/drmP.h>
 26#include <drm/drm_mm.h>
 27#include <drm/drm_vma_manager.h>
 28#include <linux/fs.h>
 29#include <linux/mm.h>
 30#include <linux/module.h>
 31#include <linux/rbtree.h>
 32#include <linux/slab.h>
 33#include <linux/spinlock.h>
 34#include <linux/types.h>
 35
 36/**
 37 * DOC: vma offset manager
 38 *
 39 * The vma-manager is responsible to map arbitrary driver-dependent memory
 40 * regions into the linear user address-space. It provides offsets to the
 41 * caller which can then be used on the address_space of the drm-device. It
 42 * takes care to not overlap regions, size them appropriately and to not
 43 * confuse mm-core by inconsistent fake vm_pgoff fields.
 44 * Drivers shouldn't use this for object placement in VMEM. This manager should
 45 * only be used to manage mappings into linear user-space VMs.
 46 *
 47 * We use drm_mm as backend to manage object allocations. But it is highly
 48 * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
 49 * speed up offset lookups.
 50 *
 51 * You must not use multiple offset managers on a single address_space.
 52 * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
 53 * no longer be linear.
 
 54 *
 55 * This offset manager works on page-based addresses. That is, every argument
 56 * and return code (with the exception of drm_vma_node_offset_addr()) is given
 57 * in number of pages, not number of bytes. That means, object sizes and offsets
 58 * must always be page-aligned (as usual).
 59 * If you want to get a valid byte-based user-space address for a given offset,
 60 * please see drm_vma_node_offset_addr().
 61 *
 62 * Additionally to offset management, the vma offset manager also handles access
 63 * management. For every open-file context that is allowed to access a given
 64 * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
 65 * open-file with the offset of the node will fail with -EACCES. To revoke
 66 * access again, use drm_vma_node_revoke(). However, the caller is responsible
 67 * for destroying already existing mappings, if required.
 68 */
 69
 70/**
 71 * drm_vma_offset_manager_init - Initialize new offset-manager
 72 * @mgr: Manager object
 73 * @page_offset: Offset of available memory area (page-based)
 74 * @size: Size of available address space range (page-based)
 75 *
 76 * Initialize a new offset-manager. The offset and area size available for the
 77 * manager are given as @page_offset and @size. Both are interpreted as
 78 * page-numbers, not bytes.
 79 *
 80 * Adding/removing nodes from the manager is locked internally and protected
 81 * against concurrent access. However, node allocation and destruction is left
 82 * for the caller. While calling into the vma-manager, a given node must
 83 * always be guaranteed to be referenced.
 84 */
 85void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
 86				 unsigned long page_offset, unsigned long size)
 87{
 88	rwlock_init(&mgr->vm_lock);
 89	mgr->vm_addr_space_rb = RB_ROOT;
 90	drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
 91}
 92EXPORT_SYMBOL(drm_vma_offset_manager_init);
 93
 94/**
 95 * drm_vma_offset_manager_destroy() - Destroy offset manager
 96 * @mgr: Manager object
 97 *
 98 * Destroy an object manager which was previously created via
 99 * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
100 * before destroying the manager. Otherwise, drm_mm will refuse to free the
101 * requested resources.
102 *
103 * The manager must not be accessed after this function is called.
104 */
105void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
106{
107	/* take the lock to protect against buggy drivers */
108	write_lock(&mgr->vm_lock);
109	drm_mm_takedown(&mgr->vm_addr_space_mm);
110	write_unlock(&mgr->vm_lock);
111}
112EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
113
114/**
115 * drm_vma_offset_lookup_locked() - Find node in offset space
116 * @mgr: Manager object
117 * @start: Start address for object (page-based)
118 * @pages: Size of object (page-based)
119 *
120 * Find a node given a start address and object size. This returns the _best_
121 * match for the given node. That is, @start may point somewhere into a valid
122 * region and the given node will be returned, as long as the node spans the
123 * whole requested area (given the size in number of pages as @pages).
124 *
125 * Note that before lookup the vma offset manager lookup lock must be acquired
126 * with drm_vma_offset_lock_lookup(). See there for an example. This can then be
127 * used to implement weakly referenced lookups using kref_get_unless_zero().
128 *
129 * Example:
130 *     drm_vma_offset_lock_lookup(mgr);
131 *     node = drm_vma_offset_lookup_locked(mgr);
132 *     if (node)
133 *         kref_get_unless_zero(container_of(node, sth, entr));
134 *     drm_vma_offset_unlock_lookup(mgr);
135 *
136 * RETURNS:
137 * Returns NULL if no suitable node can be found. Otherwise, the best match
138 * is returned. It's the caller's responsibility to make sure the node doesn't
139 * get destroyed before the caller can access it.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140 */
141struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
142							 unsigned long start,
143							 unsigned long pages)
144{
145	struct drm_vma_offset_node *node, *best;
146	struct rb_node *iter;
147	unsigned long offset;
148
149	iter = mgr->vm_addr_space_rb.rb_node;
150	best = NULL;
151
152	while (likely(iter)) {
153		node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
154		offset = node->vm_node.start;
155		if (start >= offset) {
156			iter = iter->rb_right;
157			best = node;
158			if (start == offset)
159				break;
160		} else {
161			iter = iter->rb_left;
162		}
163	}
164
165	/* verify that the node spans the requested area */
166	if (best) {
167		offset = best->vm_node.start + best->vm_node.size;
168		if (offset < start + pages)
169			best = NULL;
170	}
171
172	return best;
173}
174EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
175
176/* internal helper to link @node into the rb-tree */
177static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
178				   struct drm_vma_offset_node *node)
179{
180	struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
181	struct rb_node *parent = NULL;
182	struct drm_vma_offset_node *iter_node;
183
184	while (likely(*iter)) {
185		parent = *iter;
186		iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
187
188		if (node->vm_node.start < iter_node->vm_node.start)
189			iter = &(*iter)->rb_left;
190		else if (node->vm_node.start > iter_node->vm_node.start)
191			iter = &(*iter)->rb_right;
192		else
193			BUG();
194	}
195
196	rb_link_node(&node->vm_rb, parent, iter);
197	rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
198}
199
200/**
201 * drm_vma_offset_add() - Add offset node to manager
202 * @mgr: Manager object
203 * @node: Node to be added
204 * @pages: Allocation size visible to user-space (in number of pages)
205 *
206 * Add a node to the offset-manager. If the node was already added, this does
207 * nothing and return 0. @pages is the size of the object given in number of
208 * pages.
209 * After this call succeeds, you can access the offset of the node until it
210 * is removed again.
211 *
212 * If this call fails, it is safe to retry the operation or call
213 * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
214 * case.
215 *
216 * @pages is not required to be the same size as the underlying memory object
217 * that you want to map. It only limits the size that user-space can map into
218 * their address space.
219 *
220 * RETURNS:
221 * 0 on success, negative error code on failure.
222 */
223int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
224		       struct drm_vma_offset_node *node, unsigned long pages)
225{
226	int ret;
227
228	write_lock(&mgr->vm_lock);
229
230	if (drm_mm_node_allocated(&node->vm_node)) {
231		ret = 0;
232		goto out_unlock;
233	}
234
235	ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
236				 pages, 0, DRM_MM_SEARCH_DEFAULT);
237	if (ret)
238		goto out_unlock;
239
240	_drm_vma_offset_add_rb(mgr, node);
241
242out_unlock:
243	write_unlock(&mgr->vm_lock);
244	return ret;
245}
246EXPORT_SYMBOL(drm_vma_offset_add);
247
248/**
249 * drm_vma_offset_remove() - Remove offset node from manager
250 * @mgr: Manager object
251 * @node: Node to be removed
252 *
253 * Remove a node from the offset manager. If the node wasn't added before, this
254 * does nothing. After this call returns, the offset and size will be 0 until a
255 * new offset is allocated via drm_vma_offset_add() again. Helper functions like
256 * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
257 * offset is allocated.
258 */
259void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
260			   struct drm_vma_offset_node *node)
261{
262	write_lock(&mgr->vm_lock);
263
264	if (drm_mm_node_allocated(&node->vm_node)) {
265		rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
266		drm_mm_remove_node(&node->vm_node);
267		memset(&node->vm_node, 0, sizeof(node->vm_node));
268	}
269
270	write_unlock(&mgr->vm_lock);
271}
272EXPORT_SYMBOL(drm_vma_offset_remove);
273
274/**
275 * drm_vma_node_allow - Add open-file to list of allowed users
276 * @node: Node to modify
277 * @filp: Open file to add
278 *
279 * Add @filp to the list of allowed open-files for this node. If @filp is
280 * already on this list, the ref-count is incremented.
281 *
282 * The list of allowed-users is preserved across drm_vma_offset_add() and
283 * drm_vma_offset_remove() calls. You may even call it if the node is currently
284 * not added to any offset-manager.
285 *
286 * You must remove all open-files the same number of times as you added them
287 * before destroying the node. Otherwise, you will leak memory.
288 *
289 * This is locked against concurrent access internally.
290 *
291 * RETURNS:
292 * 0 on success, negative error code on internal failure (out-of-mem)
293 */
294int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
295{
296	struct rb_node **iter;
297	struct rb_node *parent = NULL;
298	struct drm_vma_offset_file *new, *entry;
299	int ret = 0;
300
301	/* Preallocate entry to avoid atomic allocations below. It is quite
302	 * unlikely that an open-file is added twice to a single node so we
303	 * don't optimize for this case. OOM is checked below only if the entry
304	 * is actually used. */
305	new = kmalloc(sizeof(*entry), GFP_KERNEL);
306
307	write_lock(&node->vm_lock);
308
309	iter = &node->vm_files.rb_node;
310
311	while (likely(*iter)) {
312		parent = *iter;
313		entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
314
315		if (filp == entry->vm_filp) {
316			entry->vm_count++;
317			goto unlock;
318		} else if (filp > entry->vm_filp) {
319			iter = &(*iter)->rb_right;
320		} else {
321			iter = &(*iter)->rb_left;
322		}
323	}
324
325	if (!new) {
326		ret = -ENOMEM;
327		goto unlock;
328	}
329
330	new->vm_filp = filp;
331	new->vm_count = 1;
332	rb_link_node(&new->vm_rb, parent, iter);
333	rb_insert_color(&new->vm_rb, &node->vm_files);
334	new = NULL;
335
336unlock:
337	write_unlock(&node->vm_lock);
338	kfree(new);
339	return ret;
340}
341EXPORT_SYMBOL(drm_vma_node_allow);
342
343/**
344 * drm_vma_node_revoke - Remove open-file from list of allowed users
345 * @node: Node to modify
346 * @filp: Open file to remove
347 *
348 * Decrement the ref-count of @filp in the list of allowed open-files on @node.
349 * If the ref-count drops to zero, remove @filp from the list. You must call
350 * this once for every drm_vma_node_allow() on @filp.
351 *
352 * This is locked against concurrent access internally.
353 *
354 * If @filp is not on the list, nothing is done.
355 */
356void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
357{
358	struct drm_vma_offset_file *entry;
359	struct rb_node *iter;
360
361	write_lock(&node->vm_lock);
362
363	iter = node->vm_files.rb_node;
364	while (likely(iter)) {
365		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
366		if (filp == entry->vm_filp) {
367			if (!--entry->vm_count) {
368				rb_erase(&entry->vm_rb, &node->vm_files);
369				kfree(entry);
370			}
371			break;
372		} else if (filp > entry->vm_filp) {
373			iter = iter->rb_right;
374		} else {
375			iter = iter->rb_left;
376		}
377	}
378
379	write_unlock(&node->vm_lock);
380}
381EXPORT_SYMBOL(drm_vma_node_revoke);
382
383/**
384 * drm_vma_node_is_allowed - Check whether an open-file is granted access
385 * @node: Node to check
386 * @filp: Open-file to check for
387 *
388 * Search the list in @node whether @filp is currently on the list of allowed
389 * open-files (see drm_vma_node_allow()).
390 *
391 * This is locked against concurrent access internally.
392 *
393 * RETURNS:
394 * true iff @filp is on the list
395 */
396bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
397			     struct file *filp)
398{
399	struct drm_vma_offset_file *entry;
400	struct rb_node *iter;
401
402	read_lock(&node->vm_lock);
403
404	iter = node->vm_files.rb_node;
405	while (likely(iter)) {
406		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
407		if (filp == entry->vm_filp)
408			break;
409		else if (filp > entry->vm_filp)
410			iter = iter->rb_right;
411		else
412			iter = iter->rb_left;
413	}
414
415	read_unlock(&node->vm_lock);
416
417	return iter;
418}
419EXPORT_SYMBOL(drm_vma_node_is_allowed);
v3.15
  1/*
  2 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  3 * Copyright (c) 2012 David Airlie <airlied@linux.ie>
  4 * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the "Software"),
  8 * to deal in the Software without restriction, including without limitation
  9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 10 * and/or sell copies of the Software, and to permit persons to whom the
 11 * Software is furnished to do so, subject to the following conditions:
 12 *
 13 * The above copyright notice and this permission notice shall be included in
 14 * all copies or substantial portions of the Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 22 * OTHER DEALINGS IN THE SOFTWARE.
 23 */
 24
 25#include <drm/drmP.h>
 26#include <drm/drm_mm.h>
 27#include <drm/drm_vma_manager.h>
 28#include <linux/fs.h>
 29#include <linux/mm.h>
 30#include <linux/module.h>
 31#include <linux/rbtree.h>
 32#include <linux/slab.h>
 33#include <linux/spinlock.h>
 34#include <linux/types.h>
 35
 36/**
 37 * DOC: vma offset manager
 38 *
 39 * The vma-manager is responsible to map arbitrary driver-dependent memory
 40 * regions into the linear user address-space. It provides offsets to the
 41 * caller which can then be used on the address_space of the drm-device. It
 42 * takes care to not overlap regions, size them appropriately and to not
 43 * confuse mm-core by inconsistent fake vm_pgoff fields.
 44 * Drivers shouldn't use this for object placement in VMEM. This manager should
 45 * only be used to manage mappings into linear user-space VMs.
 46 *
 47 * We use drm_mm as backend to manage object allocations. But it is highly
 48 * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
 49 * speed up offset lookups.
 50 *
 51 * You must not use multiple offset managers on a single address_space.
 52 * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
 53 * no longer be linear. Please use VM_NONLINEAR in that case and implement your
 54 * own offset managers.
 55 *
 56 * This offset manager works on page-based addresses. That is, every argument
 57 * and return code (with the exception of drm_vma_node_offset_addr()) is given
 58 * in number of pages, not number of bytes. That means, object sizes and offsets
 59 * must always be page-aligned (as usual).
 60 * If you want to get a valid byte-based user-space address for a given offset,
 61 * please see drm_vma_node_offset_addr().
 62 *
 63 * Additionally to offset management, the vma offset manager also handles access
 64 * management. For every open-file context that is allowed to access a given
 65 * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
 66 * open-file with the offset of the node will fail with -EACCES. To revoke
 67 * access again, use drm_vma_node_revoke(). However, the caller is responsible
 68 * for destroying already existing mappings, if required.
 69 */
 70
 71/**
 72 * drm_vma_offset_manager_init - Initialize new offset-manager
 73 * @mgr: Manager object
 74 * @page_offset: Offset of available memory area (page-based)
 75 * @size: Size of available address space range (page-based)
 76 *
 77 * Initialize a new offset-manager. The offset and area size available for the
 78 * manager are given as @page_offset and @size. Both are interpreted as
 79 * page-numbers, not bytes.
 80 *
 81 * Adding/removing nodes from the manager is locked internally and protected
 82 * against concurrent access. However, node allocation and destruction is left
 83 * for the caller. While calling into the vma-manager, a given node must
 84 * always be guaranteed to be referenced.
 85 */
 86void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
 87				 unsigned long page_offset, unsigned long size)
 88{
 89	rwlock_init(&mgr->vm_lock);
 90	mgr->vm_addr_space_rb = RB_ROOT;
 91	drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
 92}
 93EXPORT_SYMBOL(drm_vma_offset_manager_init);
 94
 95/**
 96 * drm_vma_offset_manager_destroy() - Destroy offset manager
 97 * @mgr: Manager object
 98 *
 99 * Destroy an object manager which was previously created via
100 * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
101 * before destroying the manager. Otherwise, drm_mm will refuse to free the
102 * requested resources.
103 *
104 * The manager must not be accessed after this function is called.
105 */
106void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
107{
108	/* take the lock to protect against buggy drivers */
109	write_lock(&mgr->vm_lock);
110	drm_mm_takedown(&mgr->vm_addr_space_mm);
111	write_unlock(&mgr->vm_lock);
112}
113EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
114
115/**
116 * drm_vma_offset_lookup() - Find node in offset space
117 * @mgr: Manager object
118 * @start: Start address for object (page-based)
119 * @pages: Size of object (page-based)
120 *
121 * Find a node given a start address and object size. This returns the _best_
122 * match for the given node. That is, @start may point somewhere into a valid
123 * region and the given node will be returned, as long as the node spans the
124 * whole requested area (given the size in number of pages as @pages).
125 *
 
 
 
 
 
 
 
 
 
 
 
126 * RETURNS:
127 * Returns NULL if no suitable node can be found. Otherwise, the best match
128 * is returned. It's the caller's responsibility to make sure the node doesn't
129 * get destroyed before the caller can access it.
130 */
131struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
132						  unsigned long start,
133						  unsigned long pages)
134{
135	struct drm_vma_offset_node *node;
136
137	read_lock(&mgr->vm_lock);
138	node = drm_vma_offset_lookup_locked(mgr, start, pages);
139	read_unlock(&mgr->vm_lock);
140
141	return node;
142}
143EXPORT_SYMBOL(drm_vma_offset_lookup);
144
145/**
146 * drm_vma_offset_lookup_locked() - Find node in offset space
147 * @mgr: Manager object
148 * @start: Start address for object (page-based)
149 * @pages: Size of object (page-based)
150 *
151 * Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
152 * manually. See drm_vma_offset_lock_lookup() for an example.
153 *
154 * RETURNS:
155 * Returns NULL if no suitable node can be found. Otherwise, the best match
156 * is returned.
157 */
158struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
159							 unsigned long start,
160							 unsigned long pages)
161{
162	struct drm_vma_offset_node *node, *best;
163	struct rb_node *iter;
164	unsigned long offset;
165
166	iter = mgr->vm_addr_space_rb.rb_node;
167	best = NULL;
168
169	while (likely(iter)) {
170		node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
171		offset = node->vm_node.start;
172		if (start >= offset) {
173			iter = iter->rb_right;
174			best = node;
175			if (start == offset)
176				break;
177		} else {
178			iter = iter->rb_left;
179		}
180	}
181
182	/* verify that the node spans the requested area */
183	if (best) {
184		offset = best->vm_node.start + best->vm_node.size;
185		if (offset < start + pages)
186			best = NULL;
187	}
188
189	return best;
190}
191EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
192
193/* internal helper to link @node into the rb-tree */
194static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
195				   struct drm_vma_offset_node *node)
196{
197	struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
198	struct rb_node *parent = NULL;
199	struct drm_vma_offset_node *iter_node;
200
201	while (likely(*iter)) {
202		parent = *iter;
203		iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
204
205		if (node->vm_node.start < iter_node->vm_node.start)
206			iter = &(*iter)->rb_left;
207		else if (node->vm_node.start > iter_node->vm_node.start)
208			iter = &(*iter)->rb_right;
209		else
210			BUG();
211	}
212
213	rb_link_node(&node->vm_rb, parent, iter);
214	rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
215}
216
217/**
218 * drm_vma_offset_add() - Add offset node to manager
219 * @mgr: Manager object
220 * @node: Node to be added
221 * @pages: Allocation size visible to user-space (in number of pages)
222 *
223 * Add a node to the offset-manager. If the node was already added, this does
224 * nothing and return 0. @pages is the size of the object given in number of
225 * pages.
226 * After this call succeeds, you can access the offset of the node until it
227 * is removed again.
228 *
229 * If this call fails, it is safe to retry the operation or call
230 * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
231 * case.
232 *
233 * @pages is not required to be the same size as the underlying memory object
234 * that you want to map. It only limits the size that user-space can map into
235 * their address space.
236 *
237 * RETURNS:
238 * 0 on success, negative error code on failure.
239 */
240int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
241		       struct drm_vma_offset_node *node, unsigned long pages)
242{
243	int ret;
244
245	write_lock(&mgr->vm_lock);
246
247	if (drm_mm_node_allocated(&node->vm_node)) {
248		ret = 0;
249		goto out_unlock;
250	}
251
252	ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
253				 pages, 0, DRM_MM_SEARCH_DEFAULT);
254	if (ret)
255		goto out_unlock;
256
257	_drm_vma_offset_add_rb(mgr, node);
258
259out_unlock:
260	write_unlock(&mgr->vm_lock);
261	return ret;
262}
263EXPORT_SYMBOL(drm_vma_offset_add);
264
265/**
266 * drm_vma_offset_remove() - Remove offset node from manager
267 * @mgr: Manager object
268 * @node: Node to be removed
269 *
270 * Remove a node from the offset manager. If the node wasn't added before, this
271 * does nothing. After this call returns, the offset and size will be 0 until a
272 * new offset is allocated via drm_vma_offset_add() again. Helper functions like
273 * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
274 * offset is allocated.
275 */
276void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
277			   struct drm_vma_offset_node *node)
278{
279	write_lock(&mgr->vm_lock);
280
281	if (drm_mm_node_allocated(&node->vm_node)) {
282		rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
283		drm_mm_remove_node(&node->vm_node);
284		memset(&node->vm_node, 0, sizeof(node->vm_node));
285	}
286
287	write_unlock(&mgr->vm_lock);
288}
289EXPORT_SYMBOL(drm_vma_offset_remove);
290
291/**
292 * drm_vma_node_allow - Add open-file to list of allowed users
293 * @node: Node to modify
294 * @filp: Open file to add
295 *
296 * Add @filp to the list of allowed open-files for this node. If @filp is
297 * already on this list, the ref-count is incremented.
298 *
299 * The list of allowed-users is preserved across drm_vma_offset_add() and
300 * drm_vma_offset_remove() calls. You may even call it if the node is currently
301 * not added to any offset-manager.
302 *
303 * You must remove all open-files the same number of times as you added them
304 * before destroying the node. Otherwise, you will leak memory.
305 *
306 * This is locked against concurrent access internally.
307 *
308 * RETURNS:
309 * 0 on success, negative error code on internal failure (out-of-mem)
310 */
311int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
312{
313	struct rb_node **iter;
314	struct rb_node *parent = NULL;
315	struct drm_vma_offset_file *new, *entry;
316	int ret = 0;
317
318	/* Preallocate entry to avoid atomic allocations below. It is quite
319	 * unlikely that an open-file is added twice to a single node so we
320	 * don't optimize for this case. OOM is checked below only if the entry
321	 * is actually used. */
322	new = kmalloc(sizeof(*entry), GFP_KERNEL);
323
324	write_lock(&node->vm_lock);
325
326	iter = &node->vm_files.rb_node;
327
328	while (likely(*iter)) {
329		parent = *iter;
330		entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
331
332		if (filp == entry->vm_filp) {
333			entry->vm_count++;
334			goto unlock;
335		} else if (filp > entry->vm_filp) {
336			iter = &(*iter)->rb_right;
337		} else {
338			iter = &(*iter)->rb_left;
339		}
340	}
341
342	if (!new) {
343		ret = -ENOMEM;
344		goto unlock;
345	}
346
347	new->vm_filp = filp;
348	new->vm_count = 1;
349	rb_link_node(&new->vm_rb, parent, iter);
350	rb_insert_color(&new->vm_rb, &node->vm_files);
351	new = NULL;
352
353unlock:
354	write_unlock(&node->vm_lock);
355	kfree(new);
356	return ret;
357}
358EXPORT_SYMBOL(drm_vma_node_allow);
359
360/**
361 * drm_vma_node_revoke - Remove open-file from list of allowed users
362 * @node: Node to modify
363 * @filp: Open file to remove
364 *
365 * Decrement the ref-count of @filp in the list of allowed open-files on @node.
366 * If the ref-count drops to zero, remove @filp from the list. You must call
367 * this once for every drm_vma_node_allow() on @filp.
368 *
369 * This is locked against concurrent access internally.
370 *
371 * If @filp is not on the list, nothing is done.
372 */
373void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
374{
375	struct drm_vma_offset_file *entry;
376	struct rb_node *iter;
377
378	write_lock(&node->vm_lock);
379
380	iter = node->vm_files.rb_node;
381	while (likely(iter)) {
382		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
383		if (filp == entry->vm_filp) {
384			if (!--entry->vm_count) {
385				rb_erase(&entry->vm_rb, &node->vm_files);
386				kfree(entry);
387			}
388			break;
389		} else if (filp > entry->vm_filp) {
390			iter = iter->rb_right;
391		} else {
392			iter = iter->rb_left;
393		}
394	}
395
396	write_unlock(&node->vm_lock);
397}
398EXPORT_SYMBOL(drm_vma_node_revoke);
399
400/**
401 * drm_vma_node_is_allowed - Check whether an open-file is granted access
402 * @node: Node to check
403 * @filp: Open-file to check for
404 *
405 * Search the list in @node whether @filp is currently on the list of allowed
406 * open-files (see drm_vma_node_allow()).
407 *
408 * This is locked against concurrent access internally.
409 *
410 * RETURNS:
411 * true iff @filp is on the list
412 */
413bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
414			     struct file *filp)
415{
416	struct drm_vma_offset_file *entry;
417	struct rb_node *iter;
418
419	read_lock(&node->vm_lock);
420
421	iter = node->vm_files.rb_node;
422	while (likely(iter)) {
423		entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
424		if (filp == entry->vm_filp)
425			break;
426		else if (filp > entry->vm_filp)
427			iter = iter->rb_right;
428		else
429			iter = iter->rb_left;
430	}
431
432	read_unlock(&node->vm_lock);
433
434	return iter;
435}
436EXPORT_SYMBOL(drm_vma_node_is_allowed);