Loading...
1/**************************************************************************
2 *
3 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30/** @file ttm_ref_object.c
31 *
32 * Base- and reference object implementation for the various
33 * ttm objects. Implements reference counting, minimal security checks
34 * and release on file close.
35 */
36
37/**
38 * struct ttm_object_file
39 *
40 * @tdev: Pointer to the ttm_object_device.
41 *
42 * @lock: Lock that protects the ref_list list and the
43 * ref_hash hash tables.
44 *
45 * @ref_list: List of ttm_ref_objects to be destroyed at
46 * file release.
47 *
48 * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
49 * for fast lookup of ref objects given a base object.
50 */
51
52#include "ttm/ttm_object.h"
53#include "ttm/ttm_module.h"
54#include <linux/list.h>
55#include <linux/spinlock.h>
56#include <linux/slab.h>
57#include <linux/module.h>
58#include <linux/atomic.h>
59
60struct ttm_object_file {
61 struct ttm_object_device *tdev;
62 rwlock_t lock;
63 struct list_head ref_list;
64 struct drm_open_hash ref_hash[TTM_REF_NUM];
65 struct kref refcount;
66};
67
68/**
69 * struct ttm_object_device
70 *
71 * @object_lock: lock that protects the object_hash hash table.
72 *
73 * @object_hash: hash table for fast lookup of object global names.
74 *
75 * @object_count: Per device object count.
76 *
77 * This is the per-device data structure needed for ttm object management.
78 */
79
80struct ttm_object_device {
81 rwlock_t object_lock;
82 struct drm_open_hash object_hash;
83 atomic_t object_count;
84 struct ttm_mem_global *mem_glob;
85};
86
87/**
88 * struct ttm_ref_object
89 *
90 * @hash: Hash entry for the per-file object reference hash.
91 *
92 * @head: List entry for the per-file list of ref-objects.
93 *
94 * @kref: Ref count.
95 *
96 * @obj: Base object this ref object is referencing.
97 *
98 * @ref_type: Type of ref object.
99 *
100 * This is similar to an idr object, but it also has a hash table entry
101 * that allows lookup with a pointer to the referenced object as a key. In
102 * that way, one can easily detect whether a base object is referenced by
103 * a particular ttm_object_file. It also carries a ref count to avoid creating
104 * multiple ref objects if a ttm_object_file references the same base
105 * object more than once.
106 */
107
108struct ttm_ref_object {
109 struct drm_hash_item hash;
110 struct list_head head;
111 struct kref kref;
112 enum ttm_ref_type ref_type;
113 struct ttm_base_object *obj;
114 struct ttm_object_file *tfile;
115};
116
117static inline struct ttm_object_file *
118ttm_object_file_ref(struct ttm_object_file *tfile)
119{
120 kref_get(&tfile->refcount);
121 return tfile;
122}
123
124static void ttm_object_file_destroy(struct kref *kref)
125{
126 struct ttm_object_file *tfile =
127 container_of(kref, struct ttm_object_file, refcount);
128
129 kfree(tfile);
130}
131
132
133static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
134{
135 struct ttm_object_file *tfile = *p_tfile;
136
137 *p_tfile = NULL;
138 kref_put(&tfile->refcount, ttm_object_file_destroy);
139}
140
141
142int ttm_base_object_init(struct ttm_object_file *tfile,
143 struct ttm_base_object *base,
144 bool shareable,
145 enum ttm_object_type object_type,
146 void (*refcount_release) (struct ttm_base_object **),
147 void (*ref_obj_release) (struct ttm_base_object *,
148 enum ttm_ref_type ref_type))
149{
150 struct ttm_object_device *tdev = tfile->tdev;
151 int ret;
152
153 base->shareable = shareable;
154 base->tfile = ttm_object_file_ref(tfile);
155 base->refcount_release = refcount_release;
156 base->ref_obj_release = ref_obj_release;
157 base->object_type = object_type;
158 write_lock(&tdev->object_lock);
159 kref_init(&base->refcount);
160 ret = drm_ht_just_insert_please(&tdev->object_hash,
161 &base->hash,
162 (unsigned long)base, 31, 0, 0);
163 write_unlock(&tdev->object_lock);
164 if (unlikely(ret != 0))
165 goto out_err0;
166
167 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
168 if (unlikely(ret != 0))
169 goto out_err1;
170
171 ttm_base_object_unref(&base);
172
173 return 0;
174out_err1:
175 (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
176out_err0:
177 return ret;
178}
179EXPORT_SYMBOL(ttm_base_object_init);
180
181static void ttm_release_base(struct kref *kref)
182{
183 struct ttm_base_object *base =
184 container_of(kref, struct ttm_base_object, refcount);
185 struct ttm_object_device *tdev = base->tfile->tdev;
186
187 (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
188 write_unlock(&tdev->object_lock);
189 if (base->refcount_release) {
190 ttm_object_file_unref(&base->tfile);
191 base->refcount_release(&base);
192 }
193 write_lock(&tdev->object_lock);
194}
195
196void ttm_base_object_unref(struct ttm_base_object **p_base)
197{
198 struct ttm_base_object *base = *p_base;
199 struct ttm_object_device *tdev = base->tfile->tdev;
200
201 *p_base = NULL;
202
203 /*
204 * Need to take the lock here to avoid racing with
205 * users trying to look up the object.
206 */
207
208 write_lock(&tdev->object_lock);
209 kref_put(&base->refcount, ttm_release_base);
210 write_unlock(&tdev->object_lock);
211}
212EXPORT_SYMBOL(ttm_base_object_unref);
213
214struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
215 uint32_t key)
216{
217 struct ttm_object_device *tdev = tfile->tdev;
218 struct ttm_base_object *base;
219 struct drm_hash_item *hash;
220 int ret;
221
222 read_lock(&tdev->object_lock);
223 ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
224
225 if (likely(ret == 0)) {
226 base = drm_hash_entry(hash, struct ttm_base_object, hash);
227 kref_get(&base->refcount);
228 }
229 read_unlock(&tdev->object_lock);
230
231 if (unlikely(ret != 0))
232 return NULL;
233
234 if (tfile != base->tfile && !base->shareable) {
235 printk(KERN_ERR TTM_PFX
236 "Attempted access of non-shareable object.\n");
237 ttm_base_object_unref(&base);
238 return NULL;
239 }
240
241 return base;
242}
243EXPORT_SYMBOL(ttm_base_object_lookup);
244
245int ttm_ref_object_add(struct ttm_object_file *tfile,
246 struct ttm_base_object *base,
247 enum ttm_ref_type ref_type, bool *existed)
248{
249 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
250 struct ttm_ref_object *ref;
251 struct drm_hash_item *hash;
252 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
253 int ret = -EINVAL;
254
255 if (existed != NULL)
256 *existed = true;
257
258 while (ret == -EINVAL) {
259 read_lock(&tfile->lock);
260 ret = drm_ht_find_item(ht, base->hash.key, &hash);
261
262 if (ret == 0) {
263 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
264 kref_get(&ref->kref);
265 read_unlock(&tfile->lock);
266 break;
267 }
268
269 read_unlock(&tfile->lock);
270 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
271 false, false);
272 if (unlikely(ret != 0))
273 return ret;
274 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
275 if (unlikely(ref == NULL)) {
276 ttm_mem_global_free(mem_glob, sizeof(*ref));
277 return -ENOMEM;
278 }
279
280 ref->hash.key = base->hash.key;
281 ref->obj = base;
282 ref->tfile = tfile;
283 ref->ref_type = ref_type;
284 kref_init(&ref->kref);
285
286 write_lock(&tfile->lock);
287 ret = drm_ht_insert_item(ht, &ref->hash);
288
289 if (likely(ret == 0)) {
290 list_add_tail(&ref->head, &tfile->ref_list);
291 kref_get(&base->refcount);
292 write_unlock(&tfile->lock);
293 if (existed != NULL)
294 *existed = false;
295 break;
296 }
297
298 write_unlock(&tfile->lock);
299 BUG_ON(ret != -EINVAL);
300
301 ttm_mem_global_free(mem_glob, sizeof(*ref));
302 kfree(ref);
303 }
304
305 return ret;
306}
307EXPORT_SYMBOL(ttm_ref_object_add);
308
309static void ttm_ref_object_release(struct kref *kref)
310{
311 struct ttm_ref_object *ref =
312 container_of(kref, struct ttm_ref_object, kref);
313 struct ttm_base_object *base = ref->obj;
314 struct ttm_object_file *tfile = ref->tfile;
315 struct drm_open_hash *ht;
316 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
317
318 ht = &tfile->ref_hash[ref->ref_type];
319 (void)drm_ht_remove_item(ht, &ref->hash);
320 list_del(&ref->head);
321 write_unlock(&tfile->lock);
322
323 if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
324 base->ref_obj_release(base, ref->ref_type);
325
326 ttm_base_object_unref(&ref->obj);
327 ttm_mem_global_free(mem_glob, sizeof(*ref));
328 kfree(ref);
329 write_lock(&tfile->lock);
330}
331
332int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
333 unsigned long key, enum ttm_ref_type ref_type)
334{
335 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
336 struct ttm_ref_object *ref;
337 struct drm_hash_item *hash;
338 int ret;
339
340 write_lock(&tfile->lock);
341 ret = drm_ht_find_item(ht, key, &hash);
342 if (unlikely(ret != 0)) {
343 write_unlock(&tfile->lock);
344 return -EINVAL;
345 }
346 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
347 kref_put(&ref->kref, ttm_ref_object_release);
348 write_unlock(&tfile->lock);
349 return 0;
350}
351EXPORT_SYMBOL(ttm_ref_object_base_unref);
352
353void ttm_object_file_release(struct ttm_object_file **p_tfile)
354{
355 struct ttm_ref_object *ref;
356 struct list_head *list;
357 unsigned int i;
358 struct ttm_object_file *tfile = *p_tfile;
359
360 *p_tfile = NULL;
361 write_lock(&tfile->lock);
362
363 /*
364 * Since we release the lock within the loop, we have to
365 * restart it from the beginning each time.
366 */
367
368 while (!list_empty(&tfile->ref_list)) {
369 list = tfile->ref_list.next;
370 ref = list_entry(list, struct ttm_ref_object, head);
371 ttm_ref_object_release(&ref->kref);
372 }
373
374 for (i = 0; i < TTM_REF_NUM; ++i)
375 drm_ht_remove(&tfile->ref_hash[i]);
376
377 write_unlock(&tfile->lock);
378 ttm_object_file_unref(&tfile);
379}
380EXPORT_SYMBOL(ttm_object_file_release);
381
382struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
383 unsigned int hash_order)
384{
385 struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
386 unsigned int i;
387 unsigned int j = 0;
388 int ret;
389
390 if (unlikely(tfile == NULL))
391 return NULL;
392
393 rwlock_init(&tfile->lock);
394 tfile->tdev = tdev;
395 kref_init(&tfile->refcount);
396 INIT_LIST_HEAD(&tfile->ref_list);
397
398 for (i = 0; i < TTM_REF_NUM; ++i) {
399 ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
400 if (ret) {
401 j = i;
402 goto out_err;
403 }
404 }
405
406 return tfile;
407out_err:
408 for (i = 0; i < j; ++i)
409 drm_ht_remove(&tfile->ref_hash[i]);
410
411 kfree(tfile);
412
413 return NULL;
414}
415EXPORT_SYMBOL(ttm_object_file_init);
416
417struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
418 *mem_glob,
419 unsigned int hash_order)
420{
421 struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
422 int ret;
423
424 if (unlikely(tdev == NULL))
425 return NULL;
426
427 tdev->mem_glob = mem_glob;
428 rwlock_init(&tdev->object_lock);
429 atomic_set(&tdev->object_count, 0);
430 ret = drm_ht_create(&tdev->object_hash, hash_order);
431
432 if (likely(ret == 0))
433 return tdev;
434
435 kfree(tdev);
436 return NULL;
437}
438EXPORT_SYMBOL(ttm_object_device_init);
439
440void ttm_object_device_release(struct ttm_object_device **p_tdev)
441{
442 struct ttm_object_device *tdev = *p_tdev;
443
444 *p_tdev = NULL;
445
446 write_lock(&tdev->object_lock);
447 drm_ht_remove(&tdev->object_hash);
448 write_unlock(&tdev->object_lock);
449
450 kfree(tdev);
451}
452EXPORT_SYMBOL(ttm_object_device_release);
1/**************************************************************************
2 *
3 * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 *
30 * While no substantial code is shared, the prime code is inspired by
31 * drm_prime.c, with
32 * Authors:
33 * Dave Airlie <airlied@redhat.com>
34 * Rob Clark <rob.clark@linaro.org>
35 */
36/** @file ttm_ref_object.c
37 *
38 * Base- and reference object implementation for the various
39 * ttm objects. Implements reference counting, minimal security checks
40 * and release on file close.
41 */
42
43
44/**
45 * struct ttm_object_file
46 *
47 * @tdev: Pointer to the ttm_object_device.
48 *
49 * @lock: Lock that protects the ref_list list and the
50 * ref_hash hash tables.
51 *
52 * @ref_list: List of ttm_ref_objects to be destroyed at
53 * file release.
54 *
55 * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
56 * for fast lookup of ref objects given a base object.
57 */
58
59#define pr_fmt(fmt) "[TTM] " fmt
60
61#include <drm/ttm/ttm_object.h>
62#include <drm/ttm/ttm_module.h>
63#include <linux/list.h>
64#include <linux/spinlock.h>
65#include <linux/slab.h>
66#include <linux/module.h>
67#include <linux/atomic.h>
68
69struct ttm_object_file {
70 struct ttm_object_device *tdev;
71 spinlock_t lock;
72 struct list_head ref_list;
73 struct drm_open_hash ref_hash[TTM_REF_NUM];
74 struct kref refcount;
75};
76
77/**
78 * struct ttm_object_device
79 *
80 * @object_lock: lock that protects the object_hash hash table.
81 *
82 * @object_hash: hash table for fast lookup of object global names.
83 *
84 * @object_count: Per device object count.
85 *
86 * This is the per-device data structure needed for ttm object management.
87 */
88
89struct ttm_object_device {
90 spinlock_t object_lock;
91 struct drm_open_hash object_hash;
92 atomic_t object_count;
93 struct ttm_mem_global *mem_glob;
94 struct dma_buf_ops ops;
95 void (*dmabuf_release)(struct dma_buf *dma_buf);
96 size_t dma_buf_size;
97};
98
99/**
100 * struct ttm_ref_object
101 *
102 * @hash: Hash entry for the per-file object reference hash.
103 *
104 * @head: List entry for the per-file list of ref-objects.
105 *
106 * @kref: Ref count.
107 *
108 * @obj: Base object this ref object is referencing.
109 *
110 * @ref_type: Type of ref object.
111 *
112 * This is similar to an idr object, but it also has a hash table entry
113 * that allows lookup with a pointer to the referenced object as a key. In
114 * that way, one can easily detect whether a base object is referenced by
115 * a particular ttm_object_file. It also carries a ref count to avoid creating
116 * multiple ref objects if a ttm_object_file references the same base
117 * object more than once.
118 */
119
120struct ttm_ref_object {
121 struct rcu_head rcu_head;
122 struct drm_hash_item hash;
123 struct list_head head;
124 struct kref kref;
125 enum ttm_ref_type ref_type;
126 struct ttm_base_object *obj;
127 struct ttm_object_file *tfile;
128};
129
130static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
131
132static inline struct ttm_object_file *
133ttm_object_file_ref(struct ttm_object_file *tfile)
134{
135 kref_get(&tfile->refcount);
136 return tfile;
137}
138
139static void ttm_object_file_destroy(struct kref *kref)
140{
141 struct ttm_object_file *tfile =
142 container_of(kref, struct ttm_object_file, refcount);
143
144 kfree(tfile);
145}
146
147
148static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
149{
150 struct ttm_object_file *tfile = *p_tfile;
151
152 *p_tfile = NULL;
153 kref_put(&tfile->refcount, ttm_object_file_destroy);
154}
155
156
157int ttm_base_object_init(struct ttm_object_file *tfile,
158 struct ttm_base_object *base,
159 bool shareable,
160 enum ttm_object_type object_type,
161 void (*refcount_release) (struct ttm_base_object **),
162 void (*ref_obj_release) (struct ttm_base_object *,
163 enum ttm_ref_type ref_type))
164{
165 struct ttm_object_device *tdev = tfile->tdev;
166 int ret;
167
168 base->shareable = shareable;
169 base->tfile = ttm_object_file_ref(tfile);
170 base->refcount_release = refcount_release;
171 base->ref_obj_release = ref_obj_release;
172 base->object_type = object_type;
173 kref_init(&base->refcount);
174 spin_lock(&tdev->object_lock);
175 ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
176 &base->hash,
177 (unsigned long)base, 31, 0, 0);
178 spin_unlock(&tdev->object_lock);
179 if (unlikely(ret != 0))
180 goto out_err0;
181
182 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
183 if (unlikely(ret != 0))
184 goto out_err1;
185
186 ttm_base_object_unref(&base);
187
188 return 0;
189out_err1:
190 spin_lock(&tdev->object_lock);
191 (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
192 spin_unlock(&tdev->object_lock);
193out_err0:
194 return ret;
195}
196EXPORT_SYMBOL(ttm_base_object_init);
197
198static void ttm_release_base(struct kref *kref)
199{
200 struct ttm_base_object *base =
201 container_of(kref, struct ttm_base_object, refcount);
202 struct ttm_object_device *tdev = base->tfile->tdev;
203
204 spin_lock(&tdev->object_lock);
205 (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
206 spin_unlock(&tdev->object_lock);
207
208 /*
209 * Note: We don't use synchronize_rcu() here because it's far
210 * too slow. It's up to the user to free the object using
211 * call_rcu() or ttm_base_object_kfree().
212 */
213
214 ttm_object_file_unref(&base->tfile);
215 if (base->refcount_release)
216 base->refcount_release(&base);
217}
218
219void ttm_base_object_unref(struct ttm_base_object **p_base)
220{
221 struct ttm_base_object *base = *p_base;
222
223 *p_base = NULL;
224
225 kref_put(&base->refcount, ttm_release_base);
226}
227EXPORT_SYMBOL(ttm_base_object_unref);
228
229struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
230 uint32_t key)
231{
232 struct ttm_base_object *base = NULL;
233 struct drm_hash_item *hash;
234 struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
235 int ret;
236
237 rcu_read_lock();
238 ret = drm_ht_find_item_rcu(ht, key, &hash);
239
240 if (likely(ret == 0)) {
241 base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
242 if (!kref_get_unless_zero(&base->refcount))
243 base = NULL;
244 }
245 rcu_read_unlock();
246
247 return base;
248}
249EXPORT_SYMBOL(ttm_base_object_lookup);
250
251struct ttm_base_object *
252ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
253{
254 struct ttm_base_object *base = NULL;
255 struct drm_hash_item *hash;
256 struct drm_open_hash *ht = &tdev->object_hash;
257 int ret;
258
259 rcu_read_lock();
260 ret = drm_ht_find_item_rcu(ht, key, &hash);
261
262 if (likely(ret == 0)) {
263 base = drm_hash_entry(hash, struct ttm_base_object, hash);
264 if (!kref_get_unless_zero(&base->refcount))
265 base = NULL;
266 }
267 rcu_read_unlock();
268
269 return base;
270}
271EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
272
273/**
274 * ttm_ref_object_exists - Check whether a caller has a valid ref object
275 * (has opened) a base object.
276 *
277 * @tfile: Pointer to a struct ttm_object_file identifying the caller.
278 * @base: Pointer to a struct base object.
279 *
280 * Checks wether the caller identified by @tfile has put a valid USAGE
281 * reference object on the base object identified by @base.
282 */
283bool ttm_ref_object_exists(struct ttm_object_file *tfile,
284 struct ttm_base_object *base)
285{
286 struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
287 struct drm_hash_item *hash;
288 struct ttm_ref_object *ref;
289
290 rcu_read_lock();
291 if (unlikely(drm_ht_find_item_rcu(ht, base->hash.key, &hash) != 0))
292 goto out_false;
293
294 /*
295 * Verify that the ref object is really pointing to our base object.
296 * Our base object could actually be dead, and the ref object pointing
297 * to another base object with the same handle.
298 */
299 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
300 if (unlikely(base != ref->obj))
301 goto out_false;
302
303 /*
304 * Verify that the ref->obj pointer was actually valid!
305 */
306 rmb();
307 if (unlikely(atomic_read(&ref->kref.refcount) == 0))
308 goto out_false;
309
310 rcu_read_unlock();
311 return true;
312
313 out_false:
314 rcu_read_unlock();
315 return false;
316}
317EXPORT_SYMBOL(ttm_ref_object_exists);
318
319int ttm_ref_object_add(struct ttm_object_file *tfile,
320 struct ttm_base_object *base,
321 enum ttm_ref_type ref_type, bool *existed,
322 bool require_existed)
323{
324 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
325 struct ttm_ref_object *ref;
326 struct drm_hash_item *hash;
327 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
328 int ret = -EINVAL;
329
330 if (base->tfile != tfile && !base->shareable)
331 return -EPERM;
332
333 if (existed != NULL)
334 *existed = true;
335
336 while (ret == -EINVAL) {
337 rcu_read_lock();
338 ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
339
340 if (ret == 0) {
341 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
342 if (kref_get_unless_zero(&ref->kref)) {
343 rcu_read_unlock();
344 break;
345 }
346 }
347
348 rcu_read_unlock();
349 if (require_existed)
350 return -EPERM;
351
352 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
353 false, false);
354 if (unlikely(ret != 0))
355 return ret;
356 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
357 if (unlikely(ref == NULL)) {
358 ttm_mem_global_free(mem_glob, sizeof(*ref));
359 return -ENOMEM;
360 }
361
362 ref->hash.key = base->hash.key;
363 ref->obj = base;
364 ref->tfile = tfile;
365 ref->ref_type = ref_type;
366 kref_init(&ref->kref);
367
368 spin_lock(&tfile->lock);
369 ret = drm_ht_insert_item_rcu(ht, &ref->hash);
370
371 if (likely(ret == 0)) {
372 list_add_tail(&ref->head, &tfile->ref_list);
373 kref_get(&base->refcount);
374 spin_unlock(&tfile->lock);
375 if (existed != NULL)
376 *existed = false;
377 break;
378 }
379
380 spin_unlock(&tfile->lock);
381 BUG_ON(ret != -EINVAL);
382
383 ttm_mem_global_free(mem_glob, sizeof(*ref));
384 kfree(ref);
385 }
386
387 return ret;
388}
389EXPORT_SYMBOL(ttm_ref_object_add);
390
391static void ttm_ref_object_release(struct kref *kref)
392{
393 struct ttm_ref_object *ref =
394 container_of(kref, struct ttm_ref_object, kref);
395 struct ttm_base_object *base = ref->obj;
396 struct ttm_object_file *tfile = ref->tfile;
397 struct drm_open_hash *ht;
398 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
399
400 ht = &tfile->ref_hash[ref->ref_type];
401 (void)drm_ht_remove_item_rcu(ht, &ref->hash);
402 list_del(&ref->head);
403 spin_unlock(&tfile->lock);
404
405 if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
406 base->ref_obj_release(base, ref->ref_type);
407
408 ttm_base_object_unref(&ref->obj);
409 ttm_mem_global_free(mem_glob, sizeof(*ref));
410 kfree_rcu(ref, rcu_head);
411 spin_lock(&tfile->lock);
412}
413
414int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
415 unsigned long key, enum ttm_ref_type ref_type)
416{
417 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
418 struct ttm_ref_object *ref;
419 struct drm_hash_item *hash;
420 int ret;
421
422 spin_lock(&tfile->lock);
423 ret = drm_ht_find_item(ht, key, &hash);
424 if (unlikely(ret != 0)) {
425 spin_unlock(&tfile->lock);
426 return -EINVAL;
427 }
428 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
429 kref_put(&ref->kref, ttm_ref_object_release);
430 spin_unlock(&tfile->lock);
431 return 0;
432}
433EXPORT_SYMBOL(ttm_ref_object_base_unref);
434
435void ttm_object_file_release(struct ttm_object_file **p_tfile)
436{
437 struct ttm_ref_object *ref;
438 struct list_head *list;
439 unsigned int i;
440 struct ttm_object_file *tfile = *p_tfile;
441
442 *p_tfile = NULL;
443 spin_lock(&tfile->lock);
444
445 /*
446 * Since we release the lock within the loop, we have to
447 * restart it from the beginning each time.
448 */
449
450 while (!list_empty(&tfile->ref_list)) {
451 list = tfile->ref_list.next;
452 ref = list_entry(list, struct ttm_ref_object, head);
453 ttm_ref_object_release(&ref->kref);
454 }
455
456 for (i = 0; i < TTM_REF_NUM; ++i)
457 drm_ht_remove(&tfile->ref_hash[i]);
458
459 spin_unlock(&tfile->lock);
460 ttm_object_file_unref(&tfile);
461}
462EXPORT_SYMBOL(ttm_object_file_release);
463
464struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
465 unsigned int hash_order)
466{
467 struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
468 unsigned int i;
469 unsigned int j = 0;
470 int ret;
471
472 if (unlikely(tfile == NULL))
473 return NULL;
474
475 spin_lock_init(&tfile->lock);
476 tfile->tdev = tdev;
477 kref_init(&tfile->refcount);
478 INIT_LIST_HEAD(&tfile->ref_list);
479
480 for (i = 0; i < TTM_REF_NUM; ++i) {
481 ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
482 if (ret) {
483 j = i;
484 goto out_err;
485 }
486 }
487
488 return tfile;
489out_err:
490 for (i = 0; i < j; ++i)
491 drm_ht_remove(&tfile->ref_hash[i]);
492
493 kfree(tfile);
494
495 return NULL;
496}
497EXPORT_SYMBOL(ttm_object_file_init);
498
499struct ttm_object_device *
500ttm_object_device_init(struct ttm_mem_global *mem_glob,
501 unsigned int hash_order,
502 const struct dma_buf_ops *ops)
503{
504 struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
505 int ret;
506
507 if (unlikely(tdev == NULL))
508 return NULL;
509
510 tdev->mem_glob = mem_glob;
511 spin_lock_init(&tdev->object_lock);
512 atomic_set(&tdev->object_count, 0);
513 ret = drm_ht_create(&tdev->object_hash, hash_order);
514 if (ret != 0)
515 goto out_no_object_hash;
516
517 tdev->ops = *ops;
518 tdev->dmabuf_release = tdev->ops.release;
519 tdev->ops.release = ttm_prime_dmabuf_release;
520 tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
521 ttm_round_pot(sizeof(struct file));
522 return tdev;
523
524out_no_object_hash:
525 kfree(tdev);
526 return NULL;
527}
528EXPORT_SYMBOL(ttm_object_device_init);
529
530void ttm_object_device_release(struct ttm_object_device **p_tdev)
531{
532 struct ttm_object_device *tdev = *p_tdev;
533
534 *p_tdev = NULL;
535
536 spin_lock(&tdev->object_lock);
537 drm_ht_remove(&tdev->object_hash);
538 spin_unlock(&tdev->object_lock);
539
540 kfree(tdev);
541}
542EXPORT_SYMBOL(ttm_object_device_release);
543
544/**
545 * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
546 *
547 * @dma_buf: Non-refcounted pointer to a struct dma-buf.
548 *
549 * Obtain a file reference from a lookup structure that doesn't refcount
550 * the file, but synchronizes with its release method to make sure it has
551 * not been freed yet. See for example kref_get_unless_zero documentation.
552 * Returns true if refcounting succeeds, false otherwise.
553 *
554 * Nobody really wants this as a public API yet, so let it mature here
555 * for some time...
556 */
557static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
558{
559 return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
560}
561
562/**
563 * ttm_prime_refcount_release - refcount release method for a prime object.
564 *
565 * @p_base: Pointer to ttm_base_object pointer.
566 *
567 * This is a wrapper that calls the refcount_release founction of the
568 * underlying object. At the same time it cleans up the prime object.
569 * This function is called when all references to the base object we
570 * derive from are gone.
571 */
572static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
573{
574 struct ttm_base_object *base = *p_base;
575 struct ttm_prime_object *prime;
576
577 *p_base = NULL;
578 prime = container_of(base, struct ttm_prime_object, base);
579 BUG_ON(prime->dma_buf != NULL);
580 mutex_destroy(&prime->mutex);
581 if (prime->refcount_release)
582 prime->refcount_release(&base);
583}
584
585/**
586 * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
587 *
588 * @dma_buf:
589 *
590 * This function first calls the dma_buf release method the driver
591 * provides. Then it cleans up our dma_buf pointer used for lookup,
592 * and finally releases the reference the dma_buf has on our base
593 * object.
594 */
595static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
596{
597 struct ttm_prime_object *prime =
598 (struct ttm_prime_object *) dma_buf->priv;
599 struct ttm_base_object *base = &prime->base;
600 struct ttm_object_device *tdev = base->tfile->tdev;
601
602 if (tdev->dmabuf_release)
603 tdev->dmabuf_release(dma_buf);
604 mutex_lock(&prime->mutex);
605 if (prime->dma_buf == dma_buf)
606 prime->dma_buf = NULL;
607 mutex_unlock(&prime->mutex);
608 ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
609 ttm_base_object_unref(&base);
610}
611
612/**
613 * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
614 *
615 * @tfile: A struct ttm_object_file identifying the caller.
616 * @fd: The prime / dmabuf fd.
617 * @handle: The returned handle.
618 *
619 * This function returns a handle to an object that previously exported
620 * a dma-buf. Note that we don't handle imports yet, because we simply
621 * have no consumers of that implementation.
622 */
623int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
624 int fd, u32 *handle)
625{
626 struct ttm_object_device *tdev = tfile->tdev;
627 struct dma_buf *dma_buf;
628 struct ttm_prime_object *prime;
629 struct ttm_base_object *base;
630 int ret;
631
632 dma_buf = dma_buf_get(fd);
633 if (IS_ERR(dma_buf))
634 return PTR_ERR(dma_buf);
635
636 if (dma_buf->ops != &tdev->ops)
637 return -ENOSYS;
638
639 prime = (struct ttm_prime_object *) dma_buf->priv;
640 base = &prime->base;
641 *handle = base->hash.key;
642 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
643
644 dma_buf_put(dma_buf);
645
646 return ret;
647}
648EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle);
649
650/**
651 * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
652 *
653 * @tfile: Struct ttm_object_file identifying the caller.
654 * @handle: Handle to the object we're exporting from.
655 * @flags: flags for dma-buf creation. We just pass them on.
656 * @prime_fd: The returned file descriptor.
657 *
658 */
659int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
660 uint32_t handle, uint32_t flags,
661 int *prime_fd)
662{
663 struct ttm_object_device *tdev = tfile->tdev;
664 struct ttm_base_object *base;
665 struct dma_buf *dma_buf;
666 struct ttm_prime_object *prime;
667 int ret;
668
669 base = ttm_base_object_lookup(tfile, handle);
670 if (unlikely(base == NULL ||
671 base->object_type != ttm_prime_type)) {
672 ret = -ENOENT;
673 goto out_unref;
674 }
675
676 prime = container_of(base, struct ttm_prime_object, base);
677 if (unlikely(!base->shareable)) {
678 ret = -EPERM;
679 goto out_unref;
680 }
681
682 ret = mutex_lock_interruptible(&prime->mutex);
683 if (unlikely(ret != 0)) {
684 ret = -ERESTARTSYS;
685 goto out_unref;
686 }
687
688 dma_buf = prime->dma_buf;
689 if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
690 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
691
692 exp_info.ops = &tdev->ops;
693 exp_info.size = prime->size;
694 exp_info.flags = flags;
695 exp_info.priv = prime;
696
697 /*
698 * Need to create a new dma_buf, with memory accounting.
699 */
700 ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
701 false, true);
702 if (unlikely(ret != 0)) {
703 mutex_unlock(&prime->mutex);
704 goto out_unref;
705 }
706
707 dma_buf = dma_buf_export(&exp_info);
708 if (IS_ERR(dma_buf)) {
709 ret = PTR_ERR(dma_buf);
710 ttm_mem_global_free(tdev->mem_glob,
711 tdev->dma_buf_size);
712 mutex_unlock(&prime->mutex);
713 goto out_unref;
714 }
715
716 /*
717 * dma_buf has taken the base object reference
718 */
719 base = NULL;
720 prime->dma_buf = dma_buf;
721 }
722 mutex_unlock(&prime->mutex);
723
724 ret = dma_buf_fd(dma_buf, flags);
725 if (ret >= 0) {
726 *prime_fd = ret;
727 ret = 0;
728 } else
729 dma_buf_put(dma_buf);
730
731out_unref:
732 if (base)
733 ttm_base_object_unref(&base);
734 return ret;
735}
736EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd);
737
738/**
739 * ttm_prime_object_init - Initialize a ttm_prime_object
740 *
741 * @tfile: struct ttm_object_file identifying the caller
742 * @size: The size of the dma_bufs we export.
743 * @prime: The object to be initialized.
744 * @shareable: See ttm_base_object_init
745 * @type: See ttm_base_object_init
746 * @refcount_release: See ttm_base_object_init
747 * @ref_obj_release: See ttm_base_object_init
748 *
749 * Initializes an object which is compatible with the drm_prime model
750 * for data sharing between processes and devices.
751 */
752int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
753 struct ttm_prime_object *prime, bool shareable,
754 enum ttm_object_type type,
755 void (*refcount_release) (struct ttm_base_object **),
756 void (*ref_obj_release) (struct ttm_base_object *,
757 enum ttm_ref_type ref_type))
758{
759 mutex_init(&prime->mutex);
760 prime->size = PAGE_ALIGN(size);
761 prime->real_type = type;
762 prime->dma_buf = NULL;
763 prime->refcount_release = refcount_release;
764 return ttm_base_object_init(tfile, &prime->base, shareable,
765 ttm_prime_type,
766 ttm_prime_refcount_release,
767 ref_obj_release);
768}
769EXPORT_SYMBOL(ttm_prime_object_init);