Loading...
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2/**************************************************************************
3 *
4 * Copyright (c) 2009-2023 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28/*
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 *
31 * While no substantial code is shared, the prime code is inspired by
32 * drm_prime.c, with
33 * Authors:
34 * Dave Airlie <airlied@redhat.com>
35 * Rob Clark <rob.clark@linaro.org>
36 */
37/** @file ttm_ref_object.c
38 *
39 * Base- and reference object implementation for the various
40 * ttm objects. Implements reference counting, minimal security checks
41 * and release on file close.
42 */
43
44
45#define pr_fmt(fmt) "[TTM] " fmt
46
47#include "ttm_object.h"
48#include "vmwgfx_drv.h"
49
50#include <linux/list.h>
51#include <linux/spinlock.h>
52#include <linux/slab.h>
53#include <linux/atomic.h>
54#include <linux/module.h>
55#include <linux/hashtable.h>
56
57MODULE_IMPORT_NS("DMA_BUF");
58
59#define VMW_TTM_OBJECT_REF_HT_ORDER 10
60
61/**
62 * struct ttm_object_file
63 *
64 * @tdev: Pointer to the ttm_object_device.
65 *
66 * @lock: Lock that protects the ref_list list and the
67 * ref_hash hash tables.
68 *
69 * @ref_list: List of ttm_ref_objects to be destroyed at
70 * file release.
71 *
72 * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
73 * for fast lookup of ref objects given a base object.
74 *
75 * @refcount: reference/usage count
76 */
77struct ttm_object_file {
78 struct ttm_object_device *tdev;
79 spinlock_t lock;
80 struct list_head ref_list;
81 DECLARE_HASHTABLE(ref_hash, VMW_TTM_OBJECT_REF_HT_ORDER);
82 struct kref refcount;
83};
84
85/*
86 * struct ttm_object_device
87 *
88 * @object_lock: lock that protects idr.
89 *
90 * This is the per-device data structure needed for ttm object management.
91 */
92
93struct ttm_object_device {
94 spinlock_t object_lock;
95 struct dma_buf_ops ops;
96 void (*dmabuf_release)(struct dma_buf *dma_buf);
97 struct idr idr;
98};
99
100/*
101 * struct ttm_ref_object
102 *
103 * @hash: Hash entry for the per-file object reference hash.
104 *
105 * @head: List entry for the per-file list of ref-objects.
106 *
107 * @kref: Ref count.
108 *
109 * @obj: Base object this ref object is referencing.
110 *
111 * @ref_type: Type of ref object.
112 *
113 * This is similar to an idr object, but it also has a hash table entry
114 * that allows lookup with a pointer to the referenced object as a key. In
115 * that way, one can easily detect whether a base object is referenced by
116 * a particular ttm_object_file. It also carries a ref count to avoid creating
117 * multiple ref objects if a ttm_object_file references the same base
118 * object more than once.
119 */
120
121struct ttm_ref_object {
122 struct rcu_head rcu_head;
123 struct vmwgfx_hash_item hash;
124 struct list_head head;
125 struct kref kref;
126 struct ttm_base_object *obj;
127 struct ttm_object_file *tfile;
128};
129
130static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
131
132static inline struct ttm_object_file *
133ttm_object_file_ref(struct ttm_object_file *tfile)
134{
135 kref_get(&tfile->refcount);
136 return tfile;
137}
138
139static int ttm_tfile_find_ref_rcu(struct ttm_object_file *tfile,
140 uint64_t key,
141 struct vmwgfx_hash_item **p_hash)
142{
143 struct vmwgfx_hash_item *hash;
144
145 hash_for_each_possible_rcu(tfile->ref_hash, hash, head, key) {
146 if (hash->key == key) {
147 *p_hash = hash;
148 return 0;
149 }
150 }
151 return -EINVAL;
152}
153
154static int ttm_tfile_find_ref(struct ttm_object_file *tfile,
155 uint64_t key,
156 struct vmwgfx_hash_item **p_hash)
157{
158 struct vmwgfx_hash_item *hash;
159
160 hash_for_each_possible(tfile->ref_hash, hash, head, key) {
161 if (hash->key == key) {
162 *p_hash = hash;
163 return 0;
164 }
165 }
166 return -EINVAL;
167}
168
169static void ttm_object_file_destroy(struct kref *kref)
170{
171 struct ttm_object_file *tfile =
172 container_of(kref, struct ttm_object_file, refcount);
173
174 kfree(tfile);
175}
176
177
178static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
179{
180 struct ttm_object_file *tfile = *p_tfile;
181
182 *p_tfile = NULL;
183 kref_put(&tfile->refcount, ttm_object_file_destroy);
184}
185
186
187int ttm_base_object_init(struct ttm_object_file *tfile,
188 struct ttm_base_object *base,
189 bool shareable,
190 enum ttm_object_type object_type,
191 void (*refcount_release) (struct ttm_base_object **))
192{
193 struct ttm_object_device *tdev = tfile->tdev;
194 int ret;
195
196 base->shareable = shareable;
197 base->tfile = ttm_object_file_ref(tfile);
198 base->refcount_release = refcount_release;
199 base->object_type = object_type;
200 kref_init(&base->refcount);
201 idr_preload(GFP_KERNEL);
202 spin_lock(&tdev->object_lock);
203 ret = idr_alloc(&tdev->idr, base, 1, 0, GFP_NOWAIT);
204 spin_unlock(&tdev->object_lock);
205 idr_preload_end();
206 if (ret < 0)
207 return ret;
208
209 base->handle = ret;
210 ret = ttm_ref_object_add(tfile, base, NULL, false);
211 if (unlikely(ret != 0))
212 goto out_err1;
213
214 ttm_base_object_unref(&base);
215
216 return 0;
217out_err1:
218 spin_lock(&tdev->object_lock);
219 idr_remove(&tdev->idr, base->handle);
220 spin_unlock(&tdev->object_lock);
221 return ret;
222}
223
224static void ttm_release_base(struct kref *kref)
225{
226 struct ttm_base_object *base =
227 container_of(kref, struct ttm_base_object, refcount);
228 struct ttm_object_device *tdev = base->tfile->tdev;
229
230 spin_lock(&tdev->object_lock);
231 idr_remove(&tdev->idr, base->handle);
232 spin_unlock(&tdev->object_lock);
233
234 /*
235 * Note: We don't use synchronize_rcu() here because it's far
236 * too slow. It's up to the user to free the object using
237 * call_rcu() or ttm_base_object_kfree().
238 */
239
240 ttm_object_file_unref(&base->tfile);
241 if (base->refcount_release)
242 base->refcount_release(&base);
243}
244
245void ttm_base_object_unref(struct ttm_base_object **p_base)
246{
247 struct ttm_base_object *base = *p_base;
248
249 *p_base = NULL;
250
251 kref_put(&base->refcount, ttm_release_base);
252}
253
254struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
255 uint64_t key)
256{
257 struct ttm_base_object *base = NULL;
258 struct vmwgfx_hash_item *hash;
259 int ret;
260
261 spin_lock(&tfile->lock);
262 ret = ttm_tfile_find_ref(tfile, key, &hash);
263
264 if (likely(ret == 0)) {
265 base = hlist_entry(hash, struct ttm_ref_object, hash)->obj;
266 if (!kref_get_unless_zero(&base->refcount))
267 base = NULL;
268 }
269 spin_unlock(&tfile->lock);
270
271
272 return base;
273}
274
275struct ttm_base_object *
276ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint64_t key)
277{
278 struct ttm_base_object *base;
279
280 rcu_read_lock();
281 base = idr_find(&tdev->idr, key);
282
283 if (base && !kref_get_unless_zero(&base->refcount))
284 base = NULL;
285 rcu_read_unlock();
286
287 return base;
288}
289
290int ttm_ref_object_add(struct ttm_object_file *tfile,
291 struct ttm_base_object *base,
292 bool *existed,
293 bool require_existed)
294{
295 struct ttm_ref_object *ref;
296 struct vmwgfx_hash_item *hash;
297 int ret = -EINVAL;
298
299 if (base->tfile != tfile && !base->shareable)
300 return -EPERM;
301
302 if (existed != NULL)
303 *existed = true;
304
305 while (ret == -EINVAL) {
306 rcu_read_lock();
307 ret = ttm_tfile_find_ref_rcu(tfile, base->handle, &hash);
308
309 if (ret == 0) {
310 ref = hlist_entry(hash, struct ttm_ref_object, hash);
311 if (kref_get_unless_zero(&ref->kref)) {
312 rcu_read_unlock();
313 break;
314 }
315 }
316
317 rcu_read_unlock();
318 if (require_existed)
319 return -EPERM;
320
321 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
322 if (unlikely(ref == NULL)) {
323 return -ENOMEM;
324 }
325
326 ref->hash.key = base->handle;
327 ref->obj = base;
328 ref->tfile = tfile;
329 kref_init(&ref->kref);
330
331 spin_lock(&tfile->lock);
332 hash_add_rcu(tfile->ref_hash, &ref->hash.head, ref->hash.key);
333 ret = 0;
334
335 list_add_tail(&ref->head, &tfile->ref_list);
336 kref_get(&base->refcount);
337 spin_unlock(&tfile->lock);
338 if (existed != NULL)
339 *existed = false;
340 }
341
342 return ret;
343}
344
345static void __releases(tfile->lock) __acquires(tfile->lock)
346ttm_ref_object_release(struct kref *kref)
347{
348 struct ttm_ref_object *ref =
349 container_of(kref, struct ttm_ref_object, kref);
350 struct ttm_object_file *tfile = ref->tfile;
351
352 hash_del_rcu(&ref->hash.head);
353 list_del(&ref->head);
354 spin_unlock(&tfile->lock);
355
356 ttm_base_object_unref(&ref->obj);
357 kfree_rcu(ref, rcu_head);
358 spin_lock(&tfile->lock);
359}
360
361int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
362 unsigned long key)
363{
364 struct ttm_ref_object *ref;
365 struct vmwgfx_hash_item *hash;
366 int ret;
367
368 spin_lock(&tfile->lock);
369 ret = ttm_tfile_find_ref(tfile, key, &hash);
370 if (unlikely(ret != 0)) {
371 spin_unlock(&tfile->lock);
372 return -EINVAL;
373 }
374 ref = hlist_entry(hash, struct ttm_ref_object, hash);
375 kref_put(&ref->kref, ttm_ref_object_release);
376 spin_unlock(&tfile->lock);
377 return 0;
378}
379
380void ttm_object_file_release(struct ttm_object_file **p_tfile)
381{
382 struct ttm_ref_object *ref;
383 struct list_head *list;
384 struct ttm_object_file *tfile = *p_tfile;
385
386 *p_tfile = NULL;
387 spin_lock(&tfile->lock);
388
389 /*
390 * Since we release the lock within the loop, we have to
391 * restart it from the beginning each time.
392 */
393
394 while (!list_empty(&tfile->ref_list)) {
395 list = tfile->ref_list.next;
396 ref = list_entry(list, struct ttm_ref_object, head);
397 ttm_ref_object_release(&ref->kref);
398 }
399
400 spin_unlock(&tfile->lock);
401
402 ttm_object_file_unref(&tfile);
403}
404
405struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev)
406{
407 struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
408
409 if (unlikely(tfile == NULL))
410 return NULL;
411
412 spin_lock_init(&tfile->lock);
413 tfile->tdev = tdev;
414 kref_init(&tfile->refcount);
415 INIT_LIST_HEAD(&tfile->ref_list);
416
417 hash_init(tfile->ref_hash);
418
419 return tfile;
420}
421
422struct ttm_object_device *
423ttm_object_device_init(const struct dma_buf_ops *ops)
424{
425 struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
426
427 if (unlikely(tdev == NULL))
428 return NULL;
429
430 spin_lock_init(&tdev->object_lock);
431
432 /*
433 * Our base is at VMWGFX_NUM_MOB + 1 because we want to create
434 * a seperate namespace for GEM handles (which are
435 * 1..VMWGFX_NUM_MOB) and the surface handles. Some ioctl's
436 * can take either handle as an argument so we want to
437 * easily be able to tell whether the handle refers to a
438 * GEM buffer or a surface.
439 */
440 idr_init_base(&tdev->idr, VMWGFX_NUM_MOB + 1);
441 tdev->ops = *ops;
442 tdev->dmabuf_release = tdev->ops.release;
443 tdev->ops.release = ttm_prime_dmabuf_release;
444 return tdev;
445}
446
447void ttm_object_device_release(struct ttm_object_device **p_tdev)
448{
449 struct ttm_object_device *tdev = *p_tdev;
450
451 *p_tdev = NULL;
452
453 WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
454 idr_destroy(&tdev->idr);
455
456 kfree(tdev);
457}
458
459/**
460 * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
461 *
462 * @dmabuf: Non-refcounted pointer to a struct dma-buf.
463 *
464 * Obtain a file reference from a lookup structure that doesn't refcount
465 * the file, but synchronizes with its release method to make sure it has
466 * not been freed yet. See for example kref_get_unless_zero documentation.
467 * Returns true if refcounting succeeds, false otherwise.
468 *
469 * Nobody really wants this as a public API yet, so let it mature here
470 * for some time...
471 */
472static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
473{
474 return file_ref_get(&dmabuf->file->f_ref);
475}
476
477/**
478 * ttm_prime_refcount_release - refcount release method for a prime object.
479 *
480 * @p_base: Pointer to ttm_base_object pointer.
481 *
482 * This is a wrapper that calls the refcount_release founction of the
483 * underlying object. At the same time it cleans up the prime object.
484 * This function is called when all references to the base object we
485 * derive from are gone.
486 */
487static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
488{
489 struct ttm_base_object *base = *p_base;
490 struct ttm_prime_object *prime;
491
492 *p_base = NULL;
493 prime = container_of(base, struct ttm_prime_object, base);
494 BUG_ON(prime->dma_buf != NULL);
495 mutex_destroy(&prime->mutex);
496 if (prime->refcount_release)
497 prime->refcount_release(&base);
498}
499
500/**
501 * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
502 *
503 * @dma_buf:
504 *
505 * This function first calls the dma_buf release method the driver
506 * provides. Then it cleans up our dma_buf pointer used for lookup,
507 * and finally releases the reference the dma_buf has on our base
508 * object.
509 */
510static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
511{
512 struct ttm_prime_object *prime =
513 (struct ttm_prime_object *) dma_buf->priv;
514 struct ttm_base_object *base = &prime->base;
515 struct ttm_object_device *tdev = base->tfile->tdev;
516
517 if (tdev->dmabuf_release)
518 tdev->dmabuf_release(dma_buf);
519 mutex_lock(&prime->mutex);
520 if (prime->dma_buf == dma_buf)
521 prime->dma_buf = NULL;
522 mutex_unlock(&prime->mutex);
523 ttm_base_object_unref(&base);
524}
525
526/**
527 * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
528 *
529 * @tfile: A struct ttm_object_file identifying the caller.
530 * @fd: The prime / dmabuf fd.
531 * @handle: The returned handle.
532 *
533 * This function returns a handle to an object that previously exported
534 * a dma-buf. Note that we don't handle imports yet, because we simply
535 * have no consumers of that implementation.
536 */
537int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
538 int fd, u32 *handle)
539{
540 struct ttm_object_device *tdev = tfile->tdev;
541 struct dma_buf *dma_buf;
542 struct ttm_prime_object *prime;
543 struct ttm_base_object *base;
544 int ret;
545
546 dma_buf = dma_buf_get(fd);
547 if (IS_ERR(dma_buf))
548 return PTR_ERR(dma_buf);
549
550 if (dma_buf->ops != &tdev->ops)
551 return -ENOSYS;
552
553 prime = (struct ttm_prime_object *) dma_buf->priv;
554 base = &prime->base;
555 *handle = base->handle;
556 ret = ttm_ref_object_add(tfile, base, NULL, false);
557
558 dma_buf_put(dma_buf);
559
560 return ret;
561}
562
563/**
564 * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
565 *
566 * @tfile: Struct ttm_object_file identifying the caller.
567 * @handle: Handle to the object we're exporting from.
568 * @flags: flags for dma-buf creation. We just pass them on.
569 * @prime_fd: The returned file descriptor.
570 *
571 */
572int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
573 uint32_t handle, uint32_t flags,
574 int *prime_fd)
575{
576 struct ttm_object_device *tdev = tfile->tdev;
577 struct ttm_base_object *base;
578 struct dma_buf *dma_buf;
579 struct ttm_prime_object *prime;
580 int ret;
581
582 base = ttm_base_object_lookup(tfile, handle);
583 if (unlikely(base == NULL ||
584 base->object_type != ttm_prime_type)) {
585 ret = -ENOENT;
586 goto out_unref;
587 }
588
589 prime = container_of(base, struct ttm_prime_object, base);
590 if (unlikely(!base->shareable)) {
591 ret = -EPERM;
592 goto out_unref;
593 }
594
595 ret = mutex_lock_interruptible(&prime->mutex);
596 if (unlikely(ret != 0)) {
597 ret = -ERESTARTSYS;
598 goto out_unref;
599 }
600
601 dma_buf = prime->dma_buf;
602 if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
603 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
604 exp_info.ops = &tdev->ops;
605 exp_info.size = prime->size;
606 exp_info.flags = flags;
607 exp_info.priv = prime;
608
609 /*
610 * Need to create a new dma_buf
611 */
612
613 dma_buf = dma_buf_export(&exp_info);
614 if (IS_ERR(dma_buf)) {
615 ret = PTR_ERR(dma_buf);
616 mutex_unlock(&prime->mutex);
617 goto out_unref;
618 }
619
620 /*
621 * dma_buf has taken the base object reference
622 */
623 base = NULL;
624 prime->dma_buf = dma_buf;
625 }
626 mutex_unlock(&prime->mutex);
627
628 ret = dma_buf_fd(dma_buf, flags);
629 if (ret >= 0) {
630 *prime_fd = ret;
631 ret = 0;
632 } else
633 dma_buf_put(dma_buf);
634
635out_unref:
636 if (base)
637 ttm_base_object_unref(&base);
638 return ret;
639}
640
641/**
642 * ttm_prime_object_init - Initialize a ttm_prime_object
643 *
644 * @tfile: struct ttm_object_file identifying the caller
645 * @size: The size of the dma_bufs we export.
646 * @prime: The object to be initialized.
647 * @type: See ttm_base_object_init
648 * @refcount_release: See ttm_base_object_init
649 *
650 * Initializes an object which is compatible with the drm_prime model
651 * for data sharing between processes and devices.
652 */
653int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
654 struct ttm_prime_object *prime,
655 enum ttm_object_type type,
656 void (*refcount_release) (struct ttm_base_object **))
657{
658 bool shareable = !!(type == VMW_RES_SURFACE);
659 mutex_init(&prime->mutex);
660 prime->size = PAGE_ALIGN(size);
661 prime->real_type = type;
662 prime->dma_buf = NULL;
663 prime->refcount_release = refcount_release;
664 return ttm_base_object_init(tfile, &prime->base, shareable,
665 ttm_prime_type,
666 ttm_prime_refcount_release);
667}
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2/**************************************************************************
3 *
4 * Copyright (c) 2009-2022 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28/*
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 *
31 * While no substantial code is shared, the prime code is inspired by
32 * drm_prime.c, with
33 * Authors:
34 * Dave Airlie <airlied@redhat.com>
35 * Rob Clark <rob.clark@linaro.org>
36 */
37/** @file ttm_ref_object.c
38 *
39 * Base- and reference object implementation for the various
40 * ttm objects. Implements reference counting, minimal security checks
41 * and release on file close.
42 */
43
44
45#define pr_fmt(fmt) "[TTM] " fmt
46
47#include "ttm_object.h"
48#include "vmwgfx_drv.h"
49
50#include <linux/list.h>
51#include <linux/spinlock.h>
52#include <linux/slab.h>
53#include <linux/atomic.h>
54#include <linux/module.h>
55#include <linux/hashtable.h>
56
57MODULE_IMPORT_NS(DMA_BUF);
58
59#define VMW_TTM_OBJECT_REF_HT_ORDER 10
60
61/**
62 * struct ttm_object_file
63 *
64 * @tdev: Pointer to the ttm_object_device.
65 *
66 * @lock: Lock that protects the ref_list list and the
67 * ref_hash hash tables.
68 *
69 * @ref_list: List of ttm_ref_objects to be destroyed at
70 * file release.
71 *
72 * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
73 * for fast lookup of ref objects given a base object.
74 *
75 * @refcount: reference/usage count
76 */
77struct ttm_object_file {
78 struct ttm_object_device *tdev;
79 spinlock_t lock;
80 struct list_head ref_list;
81 DECLARE_HASHTABLE(ref_hash, VMW_TTM_OBJECT_REF_HT_ORDER);
82 struct kref refcount;
83};
84
85/*
86 * struct ttm_object_device
87 *
88 * @object_lock: lock that protects idr.
89 *
90 * @object_count: Per device object count.
91 *
92 * This is the per-device data structure needed for ttm object management.
93 */
94
95struct ttm_object_device {
96 spinlock_t object_lock;
97 atomic_t object_count;
98 struct dma_buf_ops ops;
99 void (*dmabuf_release)(struct dma_buf *dma_buf);
100 struct idr idr;
101};
102
103/*
104 * struct ttm_ref_object
105 *
106 * @hash: Hash entry for the per-file object reference hash.
107 *
108 * @head: List entry for the per-file list of ref-objects.
109 *
110 * @kref: Ref count.
111 *
112 * @obj: Base object this ref object is referencing.
113 *
114 * @ref_type: Type of ref object.
115 *
116 * This is similar to an idr object, but it also has a hash table entry
117 * that allows lookup with a pointer to the referenced object as a key. In
118 * that way, one can easily detect whether a base object is referenced by
119 * a particular ttm_object_file. It also carries a ref count to avoid creating
120 * multiple ref objects if a ttm_object_file references the same base
121 * object more than once.
122 */
123
124struct ttm_ref_object {
125 struct rcu_head rcu_head;
126 struct vmwgfx_hash_item hash;
127 struct list_head head;
128 struct kref kref;
129 struct ttm_base_object *obj;
130 struct ttm_object_file *tfile;
131};
132
133static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
134
135static inline struct ttm_object_file *
136ttm_object_file_ref(struct ttm_object_file *tfile)
137{
138 kref_get(&tfile->refcount);
139 return tfile;
140}
141
142static int ttm_tfile_find_ref_rcu(struct ttm_object_file *tfile,
143 uint64_t key,
144 struct vmwgfx_hash_item **p_hash)
145{
146 struct vmwgfx_hash_item *hash;
147
148 hash_for_each_possible_rcu(tfile->ref_hash, hash, head, key) {
149 if (hash->key == key) {
150 *p_hash = hash;
151 return 0;
152 }
153 }
154 return -EINVAL;
155}
156
157static int ttm_tfile_find_ref(struct ttm_object_file *tfile,
158 uint64_t key,
159 struct vmwgfx_hash_item **p_hash)
160{
161 struct vmwgfx_hash_item *hash;
162
163 hash_for_each_possible(tfile->ref_hash, hash, head, key) {
164 if (hash->key == key) {
165 *p_hash = hash;
166 return 0;
167 }
168 }
169 return -EINVAL;
170}
171
172static void ttm_object_file_destroy(struct kref *kref)
173{
174 struct ttm_object_file *tfile =
175 container_of(kref, struct ttm_object_file, refcount);
176
177 kfree(tfile);
178}
179
180
181static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
182{
183 struct ttm_object_file *tfile = *p_tfile;
184
185 *p_tfile = NULL;
186 kref_put(&tfile->refcount, ttm_object_file_destroy);
187}
188
189
190int ttm_base_object_init(struct ttm_object_file *tfile,
191 struct ttm_base_object *base,
192 bool shareable,
193 enum ttm_object_type object_type,
194 void (*refcount_release) (struct ttm_base_object **))
195{
196 struct ttm_object_device *tdev = tfile->tdev;
197 int ret;
198
199 base->shareable = shareable;
200 base->tfile = ttm_object_file_ref(tfile);
201 base->refcount_release = refcount_release;
202 base->object_type = object_type;
203 kref_init(&base->refcount);
204 idr_preload(GFP_KERNEL);
205 spin_lock(&tdev->object_lock);
206 ret = idr_alloc(&tdev->idr, base, 1, 0, GFP_NOWAIT);
207 spin_unlock(&tdev->object_lock);
208 idr_preload_end();
209 if (ret < 0)
210 return ret;
211
212 base->handle = ret;
213 ret = ttm_ref_object_add(tfile, base, NULL, false);
214 if (unlikely(ret != 0))
215 goto out_err1;
216
217 ttm_base_object_unref(&base);
218
219 return 0;
220out_err1:
221 spin_lock(&tdev->object_lock);
222 idr_remove(&tdev->idr, base->handle);
223 spin_unlock(&tdev->object_lock);
224 return ret;
225}
226
227static void ttm_release_base(struct kref *kref)
228{
229 struct ttm_base_object *base =
230 container_of(kref, struct ttm_base_object, refcount);
231 struct ttm_object_device *tdev = base->tfile->tdev;
232
233 spin_lock(&tdev->object_lock);
234 idr_remove(&tdev->idr, base->handle);
235 spin_unlock(&tdev->object_lock);
236
237 /*
238 * Note: We don't use synchronize_rcu() here because it's far
239 * too slow. It's up to the user to free the object using
240 * call_rcu() or ttm_base_object_kfree().
241 */
242
243 ttm_object_file_unref(&base->tfile);
244 if (base->refcount_release)
245 base->refcount_release(&base);
246}
247
248void ttm_base_object_unref(struct ttm_base_object **p_base)
249{
250 struct ttm_base_object *base = *p_base;
251
252 *p_base = NULL;
253
254 kref_put(&base->refcount, ttm_release_base);
255}
256
257struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
258 uint64_t key)
259{
260 struct ttm_base_object *base = NULL;
261 struct vmwgfx_hash_item *hash;
262 int ret;
263
264 spin_lock(&tfile->lock);
265 ret = ttm_tfile_find_ref(tfile, key, &hash);
266
267 if (likely(ret == 0)) {
268 base = hlist_entry(hash, struct ttm_ref_object, hash)->obj;
269 if (!kref_get_unless_zero(&base->refcount))
270 base = NULL;
271 }
272 spin_unlock(&tfile->lock);
273
274
275 return base;
276}
277
278struct ttm_base_object *
279ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint64_t key)
280{
281 struct ttm_base_object *base;
282
283 rcu_read_lock();
284 base = idr_find(&tdev->idr, key);
285
286 if (base && !kref_get_unless_zero(&base->refcount))
287 base = NULL;
288 rcu_read_unlock();
289
290 return base;
291}
292
293int ttm_ref_object_add(struct ttm_object_file *tfile,
294 struct ttm_base_object *base,
295 bool *existed,
296 bool require_existed)
297{
298 struct ttm_ref_object *ref;
299 struct vmwgfx_hash_item *hash;
300 int ret = -EINVAL;
301
302 if (base->tfile != tfile && !base->shareable)
303 return -EPERM;
304
305 if (existed != NULL)
306 *existed = true;
307
308 while (ret == -EINVAL) {
309 rcu_read_lock();
310 ret = ttm_tfile_find_ref_rcu(tfile, base->handle, &hash);
311
312 if (ret == 0) {
313 ref = hlist_entry(hash, struct ttm_ref_object, hash);
314 if (kref_get_unless_zero(&ref->kref)) {
315 rcu_read_unlock();
316 break;
317 }
318 }
319
320 rcu_read_unlock();
321 if (require_existed)
322 return -EPERM;
323
324 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
325 if (unlikely(ref == NULL)) {
326 return -ENOMEM;
327 }
328
329 ref->hash.key = base->handle;
330 ref->obj = base;
331 ref->tfile = tfile;
332 kref_init(&ref->kref);
333
334 spin_lock(&tfile->lock);
335 hash_add_rcu(tfile->ref_hash, &ref->hash.head, ref->hash.key);
336 ret = 0;
337
338 list_add_tail(&ref->head, &tfile->ref_list);
339 kref_get(&base->refcount);
340 spin_unlock(&tfile->lock);
341 if (existed != NULL)
342 *existed = false;
343 }
344
345 return ret;
346}
347
348static void __releases(tfile->lock) __acquires(tfile->lock)
349ttm_ref_object_release(struct kref *kref)
350{
351 struct ttm_ref_object *ref =
352 container_of(kref, struct ttm_ref_object, kref);
353 struct ttm_object_file *tfile = ref->tfile;
354
355 hash_del_rcu(&ref->hash.head);
356 list_del(&ref->head);
357 spin_unlock(&tfile->lock);
358
359 ttm_base_object_unref(&ref->obj);
360 kfree_rcu(ref, rcu_head);
361 spin_lock(&tfile->lock);
362}
363
364int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
365 unsigned long key)
366{
367 struct ttm_ref_object *ref;
368 struct vmwgfx_hash_item *hash;
369 int ret;
370
371 spin_lock(&tfile->lock);
372 ret = ttm_tfile_find_ref(tfile, key, &hash);
373 if (unlikely(ret != 0)) {
374 spin_unlock(&tfile->lock);
375 return -EINVAL;
376 }
377 ref = hlist_entry(hash, struct ttm_ref_object, hash);
378 kref_put(&ref->kref, ttm_ref_object_release);
379 spin_unlock(&tfile->lock);
380 return 0;
381}
382
383void ttm_object_file_release(struct ttm_object_file **p_tfile)
384{
385 struct ttm_ref_object *ref;
386 struct list_head *list;
387 struct ttm_object_file *tfile = *p_tfile;
388
389 *p_tfile = NULL;
390 spin_lock(&tfile->lock);
391
392 /*
393 * Since we release the lock within the loop, we have to
394 * restart it from the beginning each time.
395 */
396
397 while (!list_empty(&tfile->ref_list)) {
398 list = tfile->ref_list.next;
399 ref = list_entry(list, struct ttm_ref_object, head);
400 ttm_ref_object_release(&ref->kref);
401 }
402
403 spin_unlock(&tfile->lock);
404
405 ttm_object_file_unref(&tfile);
406}
407
408struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev)
409{
410 struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
411
412 if (unlikely(tfile == NULL))
413 return NULL;
414
415 spin_lock_init(&tfile->lock);
416 tfile->tdev = tdev;
417 kref_init(&tfile->refcount);
418 INIT_LIST_HEAD(&tfile->ref_list);
419
420 hash_init(tfile->ref_hash);
421
422 return tfile;
423}
424
425struct ttm_object_device *
426ttm_object_device_init(const struct dma_buf_ops *ops)
427{
428 struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
429
430 if (unlikely(tdev == NULL))
431 return NULL;
432
433 spin_lock_init(&tdev->object_lock);
434 atomic_set(&tdev->object_count, 0);
435
436 /*
437 * Our base is at VMWGFX_NUM_MOB + 1 because we want to create
438 * a seperate namespace for GEM handles (which are
439 * 1..VMWGFX_NUM_MOB) and the surface handles. Some ioctl's
440 * can take either handle as an argument so we want to
441 * easily be able to tell whether the handle refers to a
442 * GEM buffer or a surface.
443 */
444 idr_init_base(&tdev->idr, VMWGFX_NUM_MOB + 1);
445 tdev->ops = *ops;
446 tdev->dmabuf_release = tdev->ops.release;
447 tdev->ops.release = ttm_prime_dmabuf_release;
448 return tdev;
449}
450
451void ttm_object_device_release(struct ttm_object_device **p_tdev)
452{
453 struct ttm_object_device *tdev = *p_tdev;
454
455 *p_tdev = NULL;
456
457 WARN_ON_ONCE(!idr_is_empty(&tdev->idr));
458 idr_destroy(&tdev->idr);
459
460 kfree(tdev);
461}
462
463/**
464 * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
465 *
466 * @dmabuf: Non-refcounted pointer to a struct dma-buf.
467 *
468 * Obtain a file reference from a lookup structure that doesn't refcount
469 * the file, but synchronizes with its release method to make sure it has
470 * not been freed yet. See for example kref_get_unless_zero documentation.
471 * Returns true if refcounting succeeds, false otherwise.
472 *
473 * Nobody really wants this as a public API yet, so let it mature here
474 * for some time...
475 */
476static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
477{
478 return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
479}
480
481/**
482 * ttm_prime_refcount_release - refcount release method for a prime object.
483 *
484 * @p_base: Pointer to ttm_base_object pointer.
485 *
486 * This is a wrapper that calls the refcount_release founction of the
487 * underlying object. At the same time it cleans up the prime object.
488 * This function is called when all references to the base object we
489 * derive from are gone.
490 */
491static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
492{
493 struct ttm_base_object *base = *p_base;
494 struct ttm_prime_object *prime;
495
496 *p_base = NULL;
497 prime = container_of(base, struct ttm_prime_object, base);
498 BUG_ON(prime->dma_buf != NULL);
499 mutex_destroy(&prime->mutex);
500 if (prime->refcount_release)
501 prime->refcount_release(&base);
502}
503
504/**
505 * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
506 *
507 * @dma_buf:
508 *
509 * This function first calls the dma_buf release method the driver
510 * provides. Then it cleans up our dma_buf pointer used for lookup,
511 * and finally releases the reference the dma_buf has on our base
512 * object.
513 */
514static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
515{
516 struct ttm_prime_object *prime =
517 (struct ttm_prime_object *) dma_buf->priv;
518 struct ttm_base_object *base = &prime->base;
519 struct ttm_object_device *tdev = base->tfile->tdev;
520
521 if (tdev->dmabuf_release)
522 tdev->dmabuf_release(dma_buf);
523 mutex_lock(&prime->mutex);
524 if (prime->dma_buf == dma_buf)
525 prime->dma_buf = NULL;
526 mutex_unlock(&prime->mutex);
527 ttm_base_object_unref(&base);
528}
529
530/**
531 * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
532 *
533 * @tfile: A struct ttm_object_file identifying the caller.
534 * @fd: The prime / dmabuf fd.
535 * @handle: The returned handle.
536 *
537 * This function returns a handle to an object that previously exported
538 * a dma-buf. Note that we don't handle imports yet, because we simply
539 * have no consumers of that implementation.
540 */
541int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
542 int fd, u32 *handle)
543{
544 struct ttm_object_device *tdev = tfile->tdev;
545 struct dma_buf *dma_buf;
546 struct ttm_prime_object *prime;
547 struct ttm_base_object *base;
548 int ret;
549
550 dma_buf = dma_buf_get(fd);
551 if (IS_ERR(dma_buf))
552 return PTR_ERR(dma_buf);
553
554 if (dma_buf->ops != &tdev->ops)
555 return -ENOSYS;
556
557 prime = (struct ttm_prime_object *) dma_buf->priv;
558 base = &prime->base;
559 *handle = base->handle;
560 ret = ttm_ref_object_add(tfile, base, NULL, false);
561
562 dma_buf_put(dma_buf);
563
564 return ret;
565}
566
567/**
568 * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
569 *
570 * @tfile: Struct ttm_object_file identifying the caller.
571 * @handle: Handle to the object we're exporting from.
572 * @flags: flags for dma-buf creation. We just pass them on.
573 * @prime_fd: The returned file descriptor.
574 *
575 */
576int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
577 uint32_t handle, uint32_t flags,
578 int *prime_fd)
579{
580 struct ttm_object_device *tdev = tfile->tdev;
581 struct ttm_base_object *base;
582 struct dma_buf *dma_buf;
583 struct ttm_prime_object *prime;
584 int ret;
585
586 base = ttm_base_object_lookup(tfile, handle);
587 if (unlikely(base == NULL ||
588 base->object_type != ttm_prime_type)) {
589 ret = -ENOENT;
590 goto out_unref;
591 }
592
593 prime = container_of(base, struct ttm_prime_object, base);
594 if (unlikely(!base->shareable)) {
595 ret = -EPERM;
596 goto out_unref;
597 }
598
599 ret = mutex_lock_interruptible(&prime->mutex);
600 if (unlikely(ret != 0)) {
601 ret = -ERESTARTSYS;
602 goto out_unref;
603 }
604
605 dma_buf = prime->dma_buf;
606 if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
607 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
608 exp_info.ops = &tdev->ops;
609 exp_info.size = prime->size;
610 exp_info.flags = flags;
611 exp_info.priv = prime;
612
613 /*
614 * Need to create a new dma_buf
615 */
616
617 dma_buf = dma_buf_export(&exp_info);
618 if (IS_ERR(dma_buf)) {
619 ret = PTR_ERR(dma_buf);
620 mutex_unlock(&prime->mutex);
621 goto out_unref;
622 }
623
624 /*
625 * dma_buf has taken the base object reference
626 */
627 base = NULL;
628 prime->dma_buf = dma_buf;
629 }
630 mutex_unlock(&prime->mutex);
631
632 ret = dma_buf_fd(dma_buf, flags);
633 if (ret >= 0) {
634 *prime_fd = ret;
635 ret = 0;
636 } else
637 dma_buf_put(dma_buf);
638
639out_unref:
640 if (base)
641 ttm_base_object_unref(&base);
642 return ret;
643}
644
645/**
646 * ttm_prime_object_init - Initialize a ttm_prime_object
647 *
648 * @tfile: struct ttm_object_file identifying the caller
649 * @size: The size of the dma_bufs we export.
650 * @prime: The object to be initialized.
651 * @shareable: See ttm_base_object_init
652 * @type: See ttm_base_object_init
653 * @refcount_release: See ttm_base_object_init
654 *
655 * Initializes an object which is compatible with the drm_prime model
656 * for data sharing between processes and devices.
657 */
658int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
659 struct ttm_prime_object *prime, bool shareable,
660 enum ttm_object_type type,
661 void (*refcount_release) (struct ttm_base_object **))
662{
663 mutex_init(&prime->mutex);
664 prime->size = PAGE_ALIGN(size);
665 prime->real_type = type;
666 prime->dma_buf = NULL;
667 prime->refcount_release = refcount_release;
668 return ttm_base_object_init(tfile, &prime->base, shareable,
669 ttm_prime_type,
670 ttm_prime_refcount_release);
671}