Loading...
1/**************************************************************************
2 *
3 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30/** @file ttm_ref_object.c
31 *
32 * Base- and reference object implementation for the various
33 * ttm objects. Implements reference counting, minimal security checks
34 * and release on file close.
35 */
36
37/**
38 * struct ttm_object_file
39 *
40 * @tdev: Pointer to the ttm_object_device.
41 *
42 * @lock: Lock that protects the ref_list list and the
43 * ref_hash hash tables.
44 *
45 * @ref_list: List of ttm_ref_objects to be destroyed at
46 * file release.
47 *
48 * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
49 * for fast lookup of ref objects given a base object.
50 */
51
52#include "ttm/ttm_object.h"
53#include "ttm/ttm_module.h"
54#include <linux/list.h>
55#include <linux/spinlock.h>
56#include <linux/slab.h>
57#include <linux/module.h>
58#include <linux/atomic.h>
59
60struct ttm_object_file {
61 struct ttm_object_device *tdev;
62 rwlock_t lock;
63 struct list_head ref_list;
64 struct drm_open_hash ref_hash[TTM_REF_NUM];
65 struct kref refcount;
66};
67
68/**
69 * struct ttm_object_device
70 *
71 * @object_lock: lock that protects the object_hash hash table.
72 *
73 * @object_hash: hash table for fast lookup of object global names.
74 *
75 * @object_count: Per device object count.
76 *
77 * This is the per-device data structure needed for ttm object management.
78 */
79
80struct ttm_object_device {
81 rwlock_t object_lock;
82 struct drm_open_hash object_hash;
83 atomic_t object_count;
84 struct ttm_mem_global *mem_glob;
85};
86
87/**
88 * struct ttm_ref_object
89 *
90 * @hash: Hash entry for the per-file object reference hash.
91 *
92 * @head: List entry for the per-file list of ref-objects.
93 *
94 * @kref: Ref count.
95 *
96 * @obj: Base object this ref object is referencing.
97 *
98 * @ref_type: Type of ref object.
99 *
100 * This is similar to an idr object, but it also has a hash table entry
101 * that allows lookup with a pointer to the referenced object as a key. In
102 * that way, one can easily detect whether a base object is referenced by
103 * a particular ttm_object_file. It also carries a ref count to avoid creating
104 * multiple ref objects if a ttm_object_file references the same base
105 * object more than once.
106 */
107
108struct ttm_ref_object {
109 struct drm_hash_item hash;
110 struct list_head head;
111 struct kref kref;
112 enum ttm_ref_type ref_type;
113 struct ttm_base_object *obj;
114 struct ttm_object_file *tfile;
115};
116
117static inline struct ttm_object_file *
118ttm_object_file_ref(struct ttm_object_file *tfile)
119{
120 kref_get(&tfile->refcount);
121 return tfile;
122}
123
124static void ttm_object_file_destroy(struct kref *kref)
125{
126 struct ttm_object_file *tfile =
127 container_of(kref, struct ttm_object_file, refcount);
128
129 kfree(tfile);
130}
131
132
133static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
134{
135 struct ttm_object_file *tfile = *p_tfile;
136
137 *p_tfile = NULL;
138 kref_put(&tfile->refcount, ttm_object_file_destroy);
139}
140
141
142int ttm_base_object_init(struct ttm_object_file *tfile,
143 struct ttm_base_object *base,
144 bool shareable,
145 enum ttm_object_type object_type,
146 void (*refcount_release) (struct ttm_base_object **),
147 void (*ref_obj_release) (struct ttm_base_object *,
148 enum ttm_ref_type ref_type))
149{
150 struct ttm_object_device *tdev = tfile->tdev;
151 int ret;
152
153 base->shareable = shareable;
154 base->tfile = ttm_object_file_ref(tfile);
155 base->refcount_release = refcount_release;
156 base->ref_obj_release = ref_obj_release;
157 base->object_type = object_type;
158 write_lock(&tdev->object_lock);
159 kref_init(&base->refcount);
160 ret = drm_ht_just_insert_please(&tdev->object_hash,
161 &base->hash,
162 (unsigned long)base, 31, 0, 0);
163 write_unlock(&tdev->object_lock);
164 if (unlikely(ret != 0))
165 goto out_err0;
166
167 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
168 if (unlikely(ret != 0))
169 goto out_err1;
170
171 ttm_base_object_unref(&base);
172
173 return 0;
174out_err1:
175 (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
176out_err0:
177 return ret;
178}
179EXPORT_SYMBOL(ttm_base_object_init);
180
181static void ttm_release_base(struct kref *kref)
182{
183 struct ttm_base_object *base =
184 container_of(kref, struct ttm_base_object, refcount);
185 struct ttm_object_device *tdev = base->tfile->tdev;
186
187 (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
188 write_unlock(&tdev->object_lock);
189 if (base->refcount_release) {
190 ttm_object_file_unref(&base->tfile);
191 base->refcount_release(&base);
192 }
193 write_lock(&tdev->object_lock);
194}
195
196void ttm_base_object_unref(struct ttm_base_object **p_base)
197{
198 struct ttm_base_object *base = *p_base;
199 struct ttm_object_device *tdev = base->tfile->tdev;
200
201 *p_base = NULL;
202
203 /*
204 * Need to take the lock here to avoid racing with
205 * users trying to look up the object.
206 */
207
208 write_lock(&tdev->object_lock);
209 kref_put(&base->refcount, ttm_release_base);
210 write_unlock(&tdev->object_lock);
211}
212EXPORT_SYMBOL(ttm_base_object_unref);
213
214struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
215 uint32_t key)
216{
217 struct ttm_object_device *tdev = tfile->tdev;
218 struct ttm_base_object *base;
219 struct drm_hash_item *hash;
220 int ret;
221
222 read_lock(&tdev->object_lock);
223 ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
224
225 if (likely(ret == 0)) {
226 base = drm_hash_entry(hash, struct ttm_base_object, hash);
227 kref_get(&base->refcount);
228 }
229 read_unlock(&tdev->object_lock);
230
231 if (unlikely(ret != 0))
232 return NULL;
233
234 if (tfile != base->tfile && !base->shareable) {
235 printk(KERN_ERR TTM_PFX
236 "Attempted access of non-shareable object.\n");
237 ttm_base_object_unref(&base);
238 return NULL;
239 }
240
241 return base;
242}
243EXPORT_SYMBOL(ttm_base_object_lookup);
244
245int ttm_ref_object_add(struct ttm_object_file *tfile,
246 struct ttm_base_object *base,
247 enum ttm_ref_type ref_type, bool *existed)
248{
249 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
250 struct ttm_ref_object *ref;
251 struct drm_hash_item *hash;
252 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
253 int ret = -EINVAL;
254
255 if (existed != NULL)
256 *existed = true;
257
258 while (ret == -EINVAL) {
259 read_lock(&tfile->lock);
260 ret = drm_ht_find_item(ht, base->hash.key, &hash);
261
262 if (ret == 0) {
263 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
264 kref_get(&ref->kref);
265 read_unlock(&tfile->lock);
266 break;
267 }
268
269 read_unlock(&tfile->lock);
270 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
271 false, false);
272 if (unlikely(ret != 0))
273 return ret;
274 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
275 if (unlikely(ref == NULL)) {
276 ttm_mem_global_free(mem_glob, sizeof(*ref));
277 return -ENOMEM;
278 }
279
280 ref->hash.key = base->hash.key;
281 ref->obj = base;
282 ref->tfile = tfile;
283 ref->ref_type = ref_type;
284 kref_init(&ref->kref);
285
286 write_lock(&tfile->lock);
287 ret = drm_ht_insert_item(ht, &ref->hash);
288
289 if (likely(ret == 0)) {
290 list_add_tail(&ref->head, &tfile->ref_list);
291 kref_get(&base->refcount);
292 write_unlock(&tfile->lock);
293 if (existed != NULL)
294 *existed = false;
295 break;
296 }
297
298 write_unlock(&tfile->lock);
299 BUG_ON(ret != -EINVAL);
300
301 ttm_mem_global_free(mem_glob, sizeof(*ref));
302 kfree(ref);
303 }
304
305 return ret;
306}
307EXPORT_SYMBOL(ttm_ref_object_add);
308
309static void ttm_ref_object_release(struct kref *kref)
310{
311 struct ttm_ref_object *ref =
312 container_of(kref, struct ttm_ref_object, kref);
313 struct ttm_base_object *base = ref->obj;
314 struct ttm_object_file *tfile = ref->tfile;
315 struct drm_open_hash *ht;
316 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
317
318 ht = &tfile->ref_hash[ref->ref_type];
319 (void)drm_ht_remove_item(ht, &ref->hash);
320 list_del(&ref->head);
321 write_unlock(&tfile->lock);
322
323 if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
324 base->ref_obj_release(base, ref->ref_type);
325
326 ttm_base_object_unref(&ref->obj);
327 ttm_mem_global_free(mem_glob, sizeof(*ref));
328 kfree(ref);
329 write_lock(&tfile->lock);
330}
331
332int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
333 unsigned long key, enum ttm_ref_type ref_type)
334{
335 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
336 struct ttm_ref_object *ref;
337 struct drm_hash_item *hash;
338 int ret;
339
340 write_lock(&tfile->lock);
341 ret = drm_ht_find_item(ht, key, &hash);
342 if (unlikely(ret != 0)) {
343 write_unlock(&tfile->lock);
344 return -EINVAL;
345 }
346 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
347 kref_put(&ref->kref, ttm_ref_object_release);
348 write_unlock(&tfile->lock);
349 return 0;
350}
351EXPORT_SYMBOL(ttm_ref_object_base_unref);
352
353void ttm_object_file_release(struct ttm_object_file **p_tfile)
354{
355 struct ttm_ref_object *ref;
356 struct list_head *list;
357 unsigned int i;
358 struct ttm_object_file *tfile = *p_tfile;
359
360 *p_tfile = NULL;
361 write_lock(&tfile->lock);
362
363 /*
364 * Since we release the lock within the loop, we have to
365 * restart it from the beginning each time.
366 */
367
368 while (!list_empty(&tfile->ref_list)) {
369 list = tfile->ref_list.next;
370 ref = list_entry(list, struct ttm_ref_object, head);
371 ttm_ref_object_release(&ref->kref);
372 }
373
374 for (i = 0; i < TTM_REF_NUM; ++i)
375 drm_ht_remove(&tfile->ref_hash[i]);
376
377 write_unlock(&tfile->lock);
378 ttm_object_file_unref(&tfile);
379}
380EXPORT_SYMBOL(ttm_object_file_release);
381
382struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
383 unsigned int hash_order)
384{
385 struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
386 unsigned int i;
387 unsigned int j = 0;
388 int ret;
389
390 if (unlikely(tfile == NULL))
391 return NULL;
392
393 rwlock_init(&tfile->lock);
394 tfile->tdev = tdev;
395 kref_init(&tfile->refcount);
396 INIT_LIST_HEAD(&tfile->ref_list);
397
398 for (i = 0; i < TTM_REF_NUM; ++i) {
399 ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
400 if (ret) {
401 j = i;
402 goto out_err;
403 }
404 }
405
406 return tfile;
407out_err:
408 for (i = 0; i < j; ++i)
409 drm_ht_remove(&tfile->ref_hash[i]);
410
411 kfree(tfile);
412
413 return NULL;
414}
415EXPORT_SYMBOL(ttm_object_file_init);
416
417struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
418 *mem_glob,
419 unsigned int hash_order)
420{
421 struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
422 int ret;
423
424 if (unlikely(tdev == NULL))
425 return NULL;
426
427 tdev->mem_glob = mem_glob;
428 rwlock_init(&tdev->object_lock);
429 atomic_set(&tdev->object_count, 0);
430 ret = drm_ht_create(&tdev->object_hash, hash_order);
431
432 if (likely(ret == 0))
433 return tdev;
434
435 kfree(tdev);
436 return NULL;
437}
438EXPORT_SYMBOL(ttm_object_device_init);
439
440void ttm_object_device_release(struct ttm_object_device **p_tdev)
441{
442 struct ttm_object_device *tdev = *p_tdev;
443
444 *p_tdev = NULL;
445
446 write_lock(&tdev->object_lock);
447 drm_ht_remove(&tdev->object_hash);
448 write_unlock(&tdev->object_lock);
449
450 kfree(tdev);
451}
452EXPORT_SYMBOL(ttm_object_device_release);
1/**************************************************************************
2 *
3 * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 *
30 * While no substantial code is shared, the prime code is inspired by
31 * drm_prime.c, with
32 * Authors:
33 * Dave Airlie <airlied@redhat.com>
34 * Rob Clark <rob.clark@linaro.org>
35 */
36/** @file ttm_ref_object.c
37 *
38 * Base- and reference object implementation for the various
39 * ttm objects. Implements reference counting, minimal security checks
40 * and release on file close.
41 */
42
43
44/**
45 * struct ttm_object_file
46 *
47 * @tdev: Pointer to the ttm_object_device.
48 *
49 * @lock: Lock that protects the ref_list list and the
50 * ref_hash hash tables.
51 *
52 * @ref_list: List of ttm_ref_objects to be destroyed at
53 * file release.
54 *
55 * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
56 * for fast lookup of ref objects given a base object.
57 */
58
59#define pr_fmt(fmt) "[TTM] " fmt
60
61#include <drm/ttm/ttm_object.h>
62#include <drm/ttm/ttm_module.h>
63#include <linux/list.h>
64#include <linux/spinlock.h>
65#include <linux/slab.h>
66#include <linux/module.h>
67#include <linux/atomic.h>
68
69struct ttm_object_file {
70 struct ttm_object_device *tdev;
71 spinlock_t lock;
72 struct list_head ref_list;
73 struct drm_open_hash ref_hash[TTM_REF_NUM];
74 struct kref refcount;
75};
76
77/**
78 * struct ttm_object_device
79 *
80 * @object_lock: lock that protects the object_hash hash table.
81 *
82 * @object_hash: hash table for fast lookup of object global names.
83 *
84 * @object_count: Per device object count.
85 *
86 * This is the per-device data structure needed for ttm object management.
87 */
88
89struct ttm_object_device {
90 spinlock_t object_lock;
91 struct drm_open_hash object_hash;
92 atomic_t object_count;
93 struct ttm_mem_global *mem_glob;
94 struct dma_buf_ops ops;
95 void (*dmabuf_release)(struct dma_buf *dma_buf);
96 size_t dma_buf_size;
97};
98
99/**
100 * struct ttm_ref_object
101 *
102 * @hash: Hash entry for the per-file object reference hash.
103 *
104 * @head: List entry for the per-file list of ref-objects.
105 *
106 * @kref: Ref count.
107 *
108 * @obj: Base object this ref object is referencing.
109 *
110 * @ref_type: Type of ref object.
111 *
112 * This is similar to an idr object, but it also has a hash table entry
113 * that allows lookup with a pointer to the referenced object as a key. In
114 * that way, one can easily detect whether a base object is referenced by
115 * a particular ttm_object_file. It also carries a ref count to avoid creating
116 * multiple ref objects if a ttm_object_file references the same base
117 * object more than once.
118 */
119
120struct ttm_ref_object {
121 struct rcu_head rcu_head;
122 struct drm_hash_item hash;
123 struct list_head head;
124 struct kref kref;
125 enum ttm_ref_type ref_type;
126 struct ttm_base_object *obj;
127 struct ttm_object_file *tfile;
128};
129
130static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
131
132static inline struct ttm_object_file *
133ttm_object_file_ref(struct ttm_object_file *tfile)
134{
135 kref_get(&tfile->refcount);
136 return tfile;
137}
138
139static void ttm_object_file_destroy(struct kref *kref)
140{
141 struct ttm_object_file *tfile =
142 container_of(kref, struct ttm_object_file, refcount);
143
144 kfree(tfile);
145}
146
147
148static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
149{
150 struct ttm_object_file *tfile = *p_tfile;
151
152 *p_tfile = NULL;
153 kref_put(&tfile->refcount, ttm_object_file_destroy);
154}
155
156
157int ttm_base_object_init(struct ttm_object_file *tfile,
158 struct ttm_base_object *base,
159 bool shareable,
160 enum ttm_object_type object_type,
161 void (*refcount_release) (struct ttm_base_object **),
162 void (*ref_obj_release) (struct ttm_base_object *,
163 enum ttm_ref_type ref_type))
164{
165 struct ttm_object_device *tdev = tfile->tdev;
166 int ret;
167
168 base->shareable = shareable;
169 base->tfile = ttm_object_file_ref(tfile);
170 base->refcount_release = refcount_release;
171 base->ref_obj_release = ref_obj_release;
172 base->object_type = object_type;
173 kref_init(&base->refcount);
174 spin_lock(&tdev->object_lock);
175 ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
176 &base->hash,
177 (unsigned long)base, 31, 0, 0);
178 spin_unlock(&tdev->object_lock);
179 if (unlikely(ret != 0))
180 goto out_err0;
181
182 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
183 if (unlikely(ret != 0))
184 goto out_err1;
185
186 ttm_base_object_unref(&base);
187
188 return 0;
189out_err1:
190 spin_lock(&tdev->object_lock);
191 (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
192 spin_unlock(&tdev->object_lock);
193out_err0:
194 return ret;
195}
196EXPORT_SYMBOL(ttm_base_object_init);
197
198static void ttm_release_base(struct kref *kref)
199{
200 struct ttm_base_object *base =
201 container_of(kref, struct ttm_base_object, refcount);
202 struct ttm_object_device *tdev = base->tfile->tdev;
203
204 spin_lock(&tdev->object_lock);
205 (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
206 spin_unlock(&tdev->object_lock);
207
208 /*
209 * Note: We don't use synchronize_rcu() here because it's far
210 * too slow. It's up to the user to free the object using
211 * call_rcu() or ttm_base_object_kfree().
212 */
213
214 ttm_object_file_unref(&base->tfile);
215 if (base->refcount_release)
216 base->refcount_release(&base);
217}
218
219void ttm_base_object_unref(struct ttm_base_object **p_base)
220{
221 struct ttm_base_object *base = *p_base;
222
223 *p_base = NULL;
224
225 kref_put(&base->refcount, ttm_release_base);
226}
227EXPORT_SYMBOL(ttm_base_object_unref);
228
229struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
230 uint32_t key)
231{
232 struct ttm_base_object *base = NULL;
233 struct drm_hash_item *hash;
234 struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
235 int ret;
236
237 rcu_read_lock();
238 ret = drm_ht_find_item_rcu(ht, key, &hash);
239
240 if (likely(ret == 0)) {
241 base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
242 if (!kref_get_unless_zero(&base->refcount))
243 base = NULL;
244 }
245 rcu_read_unlock();
246
247 return base;
248}
249EXPORT_SYMBOL(ttm_base_object_lookup);
250
251struct ttm_base_object *
252ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
253{
254 struct ttm_base_object *base = NULL;
255 struct drm_hash_item *hash;
256 struct drm_open_hash *ht = &tdev->object_hash;
257 int ret;
258
259 rcu_read_lock();
260 ret = drm_ht_find_item_rcu(ht, key, &hash);
261
262 if (likely(ret == 0)) {
263 base = drm_hash_entry(hash, struct ttm_base_object, hash);
264 if (!kref_get_unless_zero(&base->refcount))
265 base = NULL;
266 }
267 rcu_read_unlock();
268
269 return base;
270}
271EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
272
273/**
274 * ttm_ref_object_exists - Check whether a caller has a valid ref object
275 * (has opened) a base object.
276 *
277 * @tfile: Pointer to a struct ttm_object_file identifying the caller.
278 * @base: Pointer to a struct base object.
279 *
280 * Checks wether the caller identified by @tfile has put a valid USAGE
281 * reference object on the base object identified by @base.
282 */
283bool ttm_ref_object_exists(struct ttm_object_file *tfile,
284 struct ttm_base_object *base)
285{
286 struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
287 struct drm_hash_item *hash;
288 struct ttm_ref_object *ref;
289
290 rcu_read_lock();
291 if (unlikely(drm_ht_find_item_rcu(ht, base->hash.key, &hash) != 0))
292 goto out_false;
293
294 /*
295 * Verify that the ref object is really pointing to our base object.
296 * Our base object could actually be dead, and the ref object pointing
297 * to another base object with the same handle.
298 */
299 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
300 if (unlikely(base != ref->obj))
301 goto out_false;
302
303 /*
304 * Verify that the ref->obj pointer was actually valid!
305 */
306 rmb();
307 if (unlikely(kref_read(&ref->kref) == 0))
308 goto out_false;
309
310 rcu_read_unlock();
311 return true;
312
313 out_false:
314 rcu_read_unlock();
315 return false;
316}
317EXPORT_SYMBOL(ttm_ref_object_exists);
318
319int ttm_ref_object_add(struct ttm_object_file *tfile,
320 struct ttm_base_object *base,
321 enum ttm_ref_type ref_type, bool *existed,
322 bool require_existed)
323{
324 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
325 struct ttm_ref_object *ref;
326 struct drm_hash_item *hash;
327 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
328 struct ttm_operation_ctx ctx = {
329 .interruptible = false,
330 .no_wait_gpu = false
331 };
332 int ret = -EINVAL;
333
334 if (base->tfile != tfile && !base->shareable)
335 return -EPERM;
336
337 if (existed != NULL)
338 *existed = true;
339
340 while (ret == -EINVAL) {
341 rcu_read_lock();
342 ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
343
344 if (ret == 0) {
345 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
346 if (kref_get_unless_zero(&ref->kref)) {
347 rcu_read_unlock();
348 break;
349 }
350 }
351
352 rcu_read_unlock();
353 if (require_existed)
354 return -EPERM;
355
356 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
357 &ctx);
358 if (unlikely(ret != 0))
359 return ret;
360 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
361 if (unlikely(ref == NULL)) {
362 ttm_mem_global_free(mem_glob, sizeof(*ref));
363 return -ENOMEM;
364 }
365
366 ref->hash.key = base->hash.key;
367 ref->obj = base;
368 ref->tfile = tfile;
369 ref->ref_type = ref_type;
370 kref_init(&ref->kref);
371
372 spin_lock(&tfile->lock);
373 ret = drm_ht_insert_item_rcu(ht, &ref->hash);
374
375 if (likely(ret == 0)) {
376 list_add_tail(&ref->head, &tfile->ref_list);
377 kref_get(&base->refcount);
378 spin_unlock(&tfile->lock);
379 if (existed != NULL)
380 *existed = false;
381 break;
382 }
383
384 spin_unlock(&tfile->lock);
385 BUG_ON(ret != -EINVAL);
386
387 ttm_mem_global_free(mem_glob, sizeof(*ref));
388 kfree(ref);
389 }
390
391 return ret;
392}
393EXPORT_SYMBOL(ttm_ref_object_add);
394
395static void ttm_ref_object_release(struct kref *kref)
396{
397 struct ttm_ref_object *ref =
398 container_of(kref, struct ttm_ref_object, kref);
399 struct ttm_base_object *base = ref->obj;
400 struct ttm_object_file *tfile = ref->tfile;
401 struct drm_open_hash *ht;
402 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
403
404 ht = &tfile->ref_hash[ref->ref_type];
405 (void)drm_ht_remove_item_rcu(ht, &ref->hash);
406 list_del(&ref->head);
407 spin_unlock(&tfile->lock);
408
409 if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
410 base->ref_obj_release(base, ref->ref_type);
411
412 ttm_base_object_unref(&ref->obj);
413 ttm_mem_global_free(mem_glob, sizeof(*ref));
414 kfree_rcu(ref, rcu_head);
415 spin_lock(&tfile->lock);
416}
417
418int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
419 unsigned long key, enum ttm_ref_type ref_type)
420{
421 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
422 struct ttm_ref_object *ref;
423 struct drm_hash_item *hash;
424 int ret;
425
426 spin_lock(&tfile->lock);
427 ret = drm_ht_find_item(ht, key, &hash);
428 if (unlikely(ret != 0)) {
429 spin_unlock(&tfile->lock);
430 return -EINVAL;
431 }
432 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
433 kref_put(&ref->kref, ttm_ref_object_release);
434 spin_unlock(&tfile->lock);
435 return 0;
436}
437EXPORT_SYMBOL(ttm_ref_object_base_unref);
438
439void ttm_object_file_release(struct ttm_object_file **p_tfile)
440{
441 struct ttm_ref_object *ref;
442 struct list_head *list;
443 unsigned int i;
444 struct ttm_object_file *tfile = *p_tfile;
445
446 *p_tfile = NULL;
447 spin_lock(&tfile->lock);
448
449 /*
450 * Since we release the lock within the loop, we have to
451 * restart it from the beginning each time.
452 */
453
454 while (!list_empty(&tfile->ref_list)) {
455 list = tfile->ref_list.next;
456 ref = list_entry(list, struct ttm_ref_object, head);
457 ttm_ref_object_release(&ref->kref);
458 }
459
460 spin_unlock(&tfile->lock);
461 for (i = 0; i < TTM_REF_NUM; ++i)
462 drm_ht_remove(&tfile->ref_hash[i]);
463
464 ttm_object_file_unref(&tfile);
465}
466EXPORT_SYMBOL(ttm_object_file_release);
467
468struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
469 unsigned int hash_order)
470{
471 struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
472 unsigned int i;
473 unsigned int j = 0;
474 int ret;
475
476 if (unlikely(tfile == NULL))
477 return NULL;
478
479 spin_lock_init(&tfile->lock);
480 tfile->tdev = tdev;
481 kref_init(&tfile->refcount);
482 INIT_LIST_HEAD(&tfile->ref_list);
483
484 for (i = 0; i < TTM_REF_NUM; ++i) {
485 ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
486 if (ret) {
487 j = i;
488 goto out_err;
489 }
490 }
491
492 return tfile;
493out_err:
494 for (i = 0; i < j; ++i)
495 drm_ht_remove(&tfile->ref_hash[i]);
496
497 kfree(tfile);
498
499 return NULL;
500}
501EXPORT_SYMBOL(ttm_object_file_init);
502
503struct ttm_object_device *
504ttm_object_device_init(struct ttm_mem_global *mem_glob,
505 unsigned int hash_order,
506 const struct dma_buf_ops *ops)
507{
508 struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
509 int ret;
510
511 if (unlikely(tdev == NULL))
512 return NULL;
513
514 tdev->mem_glob = mem_glob;
515 spin_lock_init(&tdev->object_lock);
516 atomic_set(&tdev->object_count, 0);
517 ret = drm_ht_create(&tdev->object_hash, hash_order);
518 if (ret != 0)
519 goto out_no_object_hash;
520
521 tdev->ops = *ops;
522 tdev->dmabuf_release = tdev->ops.release;
523 tdev->ops.release = ttm_prime_dmabuf_release;
524 tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
525 ttm_round_pot(sizeof(struct file));
526 return tdev;
527
528out_no_object_hash:
529 kfree(tdev);
530 return NULL;
531}
532EXPORT_SYMBOL(ttm_object_device_init);
533
534void ttm_object_device_release(struct ttm_object_device **p_tdev)
535{
536 struct ttm_object_device *tdev = *p_tdev;
537
538 *p_tdev = NULL;
539
540 drm_ht_remove(&tdev->object_hash);
541
542 kfree(tdev);
543}
544EXPORT_SYMBOL(ttm_object_device_release);
545
546/**
547 * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
548 *
549 * @dma_buf: Non-refcounted pointer to a struct dma-buf.
550 *
551 * Obtain a file reference from a lookup structure that doesn't refcount
552 * the file, but synchronizes with its release method to make sure it has
553 * not been freed yet. See for example kref_get_unless_zero documentation.
554 * Returns true if refcounting succeeds, false otherwise.
555 *
556 * Nobody really wants this as a public API yet, so let it mature here
557 * for some time...
558 */
559static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
560{
561 return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
562}
563
564/**
565 * ttm_prime_refcount_release - refcount release method for a prime object.
566 *
567 * @p_base: Pointer to ttm_base_object pointer.
568 *
569 * This is a wrapper that calls the refcount_release founction of the
570 * underlying object. At the same time it cleans up the prime object.
571 * This function is called when all references to the base object we
572 * derive from are gone.
573 */
574static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
575{
576 struct ttm_base_object *base = *p_base;
577 struct ttm_prime_object *prime;
578
579 *p_base = NULL;
580 prime = container_of(base, struct ttm_prime_object, base);
581 BUG_ON(prime->dma_buf != NULL);
582 mutex_destroy(&prime->mutex);
583 if (prime->refcount_release)
584 prime->refcount_release(&base);
585}
586
587/**
588 * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
589 *
590 * @dma_buf:
591 *
592 * This function first calls the dma_buf release method the driver
593 * provides. Then it cleans up our dma_buf pointer used for lookup,
594 * and finally releases the reference the dma_buf has on our base
595 * object.
596 */
597static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
598{
599 struct ttm_prime_object *prime =
600 (struct ttm_prime_object *) dma_buf->priv;
601 struct ttm_base_object *base = &prime->base;
602 struct ttm_object_device *tdev = base->tfile->tdev;
603
604 if (tdev->dmabuf_release)
605 tdev->dmabuf_release(dma_buf);
606 mutex_lock(&prime->mutex);
607 if (prime->dma_buf == dma_buf)
608 prime->dma_buf = NULL;
609 mutex_unlock(&prime->mutex);
610 ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
611 ttm_base_object_unref(&base);
612}
613
614/**
615 * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
616 *
617 * @tfile: A struct ttm_object_file identifying the caller.
618 * @fd: The prime / dmabuf fd.
619 * @handle: The returned handle.
620 *
621 * This function returns a handle to an object that previously exported
622 * a dma-buf. Note that we don't handle imports yet, because we simply
623 * have no consumers of that implementation.
624 */
625int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
626 int fd, u32 *handle)
627{
628 struct ttm_object_device *tdev = tfile->tdev;
629 struct dma_buf *dma_buf;
630 struct ttm_prime_object *prime;
631 struct ttm_base_object *base;
632 int ret;
633
634 dma_buf = dma_buf_get(fd);
635 if (IS_ERR(dma_buf))
636 return PTR_ERR(dma_buf);
637
638 if (dma_buf->ops != &tdev->ops)
639 return -ENOSYS;
640
641 prime = (struct ttm_prime_object *) dma_buf->priv;
642 base = &prime->base;
643 *handle = base->hash.key;
644 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
645
646 dma_buf_put(dma_buf);
647
648 return ret;
649}
650EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle);
651
652/**
653 * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
654 *
655 * @tfile: Struct ttm_object_file identifying the caller.
656 * @handle: Handle to the object we're exporting from.
657 * @flags: flags for dma-buf creation. We just pass them on.
658 * @prime_fd: The returned file descriptor.
659 *
660 */
661int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
662 uint32_t handle, uint32_t flags,
663 int *prime_fd)
664{
665 struct ttm_object_device *tdev = tfile->tdev;
666 struct ttm_base_object *base;
667 struct dma_buf *dma_buf;
668 struct ttm_prime_object *prime;
669 int ret;
670
671 base = ttm_base_object_lookup(tfile, handle);
672 if (unlikely(base == NULL ||
673 base->object_type != ttm_prime_type)) {
674 ret = -ENOENT;
675 goto out_unref;
676 }
677
678 prime = container_of(base, struct ttm_prime_object, base);
679 if (unlikely(!base->shareable)) {
680 ret = -EPERM;
681 goto out_unref;
682 }
683
684 ret = mutex_lock_interruptible(&prime->mutex);
685 if (unlikely(ret != 0)) {
686 ret = -ERESTARTSYS;
687 goto out_unref;
688 }
689
690 dma_buf = prime->dma_buf;
691 if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
692 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
693 struct ttm_operation_ctx ctx = {
694 .interruptible = true,
695 .no_wait_gpu = false
696 };
697 exp_info.ops = &tdev->ops;
698 exp_info.size = prime->size;
699 exp_info.flags = flags;
700 exp_info.priv = prime;
701
702 /*
703 * Need to create a new dma_buf, with memory accounting.
704 */
705 ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
706 &ctx);
707 if (unlikely(ret != 0)) {
708 mutex_unlock(&prime->mutex);
709 goto out_unref;
710 }
711
712 dma_buf = dma_buf_export(&exp_info);
713 if (IS_ERR(dma_buf)) {
714 ret = PTR_ERR(dma_buf);
715 ttm_mem_global_free(tdev->mem_glob,
716 tdev->dma_buf_size);
717 mutex_unlock(&prime->mutex);
718 goto out_unref;
719 }
720
721 /*
722 * dma_buf has taken the base object reference
723 */
724 base = NULL;
725 prime->dma_buf = dma_buf;
726 }
727 mutex_unlock(&prime->mutex);
728
729 ret = dma_buf_fd(dma_buf, flags);
730 if (ret >= 0) {
731 *prime_fd = ret;
732 ret = 0;
733 } else
734 dma_buf_put(dma_buf);
735
736out_unref:
737 if (base)
738 ttm_base_object_unref(&base);
739 return ret;
740}
741EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd);
742
743/**
744 * ttm_prime_object_init - Initialize a ttm_prime_object
745 *
746 * @tfile: struct ttm_object_file identifying the caller
747 * @size: The size of the dma_bufs we export.
748 * @prime: The object to be initialized.
749 * @shareable: See ttm_base_object_init
750 * @type: See ttm_base_object_init
751 * @refcount_release: See ttm_base_object_init
752 * @ref_obj_release: See ttm_base_object_init
753 *
754 * Initializes an object which is compatible with the drm_prime model
755 * for data sharing between processes and devices.
756 */
757int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
758 struct ttm_prime_object *prime, bool shareable,
759 enum ttm_object_type type,
760 void (*refcount_release) (struct ttm_base_object **),
761 void (*ref_obj_release) (struct ttm_base_object *,
762 enum ttm_ref_type ref_type))
763{
764 mutex_init(&prime->mutex);
765 prime->size = PAGE_ALIGN(size);
766 prime->real_type = type;
767 prime->dma_buf = NULL;
768 prime->refcount_release = refcount_release;
769 return ttm_base_object_init(tfile, &prime->base, shareable,
770 ttm_prime_type,
771 ttm_prime_refcount_release,
772 ref_obj_release);
773}
774EXPORT_SYMBOL(ttm_prime_object_init);