Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Framework for buffer objects that can be shared across devices/subsystems.
4 *
5 * Copyright(C) 2011 Linaro Limited. All rights reserved.
6 * Author: Sumit Semwal <sumit.semwal@ti.com>
7 *
8 * Many thanks to linaro-mm-sig list, and specially
9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11 * refining of this idea.
12 */
13
14#include <linux/fs.h>
15#include <linux/slab.h>
16#include <linux/dma-buf.h>
17#include <linux/dma-fence.h>
18#include <linux/dma-fence-unwrap.h>
19#include <linux/anon_inodes.h>
20#include <linux/export.h>
21#include <linux/debugfs.h>
22#include <linux/module.h>
23#include <linux/seq_file.h>
24#include <linux/sync_file.h>
25#include <linux/poll.h>
26#include <linux/dma-resv.h>
27#include <linux/mm.h>
28#include <linux/mount.h>
29#include <linux/pseudo_fs.h>
30
31#include <uapi/linux/dma-buf.h>
32#include <uapi/linux/magic.h>
33
34#include "dma-buf-sysfs-stats.h"
35
36static inline int is_dma_buf_file(struct file *);
37
38#if IS_ENABLED(CONFIG_DEBUG_FS)
39static DEFINE_MUTEX(debugfs_list_mutex);
40static LIST_HEAD(debugfs_list);
41
42static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
43{
44 mutex_lock(&debugfs_list_mutex);
45 list_add(&dmabuf->list_node, &debugfs_list);
46 mutex_unlock(&debugfs_list_mutex);
47}
48
49static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf)
50{
51 if (!dmabuf)
52 return;
53
54 mutex_lock(&debugfs_list_mutex);
55 list_del(&dmabuf->list_node);
56 mutex_unlock(&debugfs_list_mutex);
57}
58#else
59static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf)
60{
61}
62
63static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf)
64{
65}
66#endif
67
68static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
69{
70 struct dma_buf *dmabuf;
71 char name[DMA_BUF_NAME_LEN];
72 ssize_t ret = 0;
73
74 dmabuf = dentry->d_fsdata;
75 spin_lock(&dmabuf->name_lock);
76 if (dmabuf->name)
77 ret = strscpy(name, dmabuf->name, sizeof(name));
78 spin_unlock(&dmabuf->name_lock);
79
80 return dynamic_dname(buffer, buflen, "/%s:%s",
81 dentry->d_name.name, ret > 0 ? name : "");
82}
83
84static void dma_buf_release(struct dentry *dentry)
85{
86 struct dma_buf *dmabuf;
87
88 dmabuf = dentry->d_fsdata;
89 if (unlikely(!dmabuf))
90 return;
91
92 BUG_ON(dmabuf->vmapping_counter);
93
94 /*
95 * If you hit this BUG() it could mean:
96 * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
97 * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
98 */
99 BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
100
101 dma_buf_stats_teardown(dmabuf);
102 dmabuf->ops->release(dmabuf);
103
104 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
105 dma_resv_fini(dmabuf->resv);
106
107 WARN_ON(!list_empty(&dmabuf->attachments));
108 module_put(dmabuf->owner);
109 kfree(dmabuf->name);
110 kfree(dmabuf);
111}
112
113static int dma_buf_file_release(struct inode *inode, struct file *file)
114{
115 if (!is_dma_buf_file(file))
116 return -EINVAL;
117
118 __dma_buf_debugfs_list_del(file->private_data);
119
120 return 0;
121}
122
123static const struct dentry_operations dma_buf_dentry_ops = {
124 .d_dname = dmabuffs_dname,
125 .d_release = dma_buf_release,
126};
127
128static struct vfsmount *dma_buf_mnt;
129
130static int dma_buf_fs_init_context(struct fs_context *fc)
131{
132 struct pseudo_fs_context *ctx;
133
134 ctx = init_pseudo(fc, DMA_BUF_MAGIC);
135 if (!ctx)
136 return -ENOMEM;
137 ctx->dops = &dma_buf_dentry_ops;
138 return 0;
139}
140
141static struct file_system_type dma_buf_fs_type = {
142 .name = "dmabuf",
143 .init_fs_context = dma_buf_fs_init_context,
144 .kill_sb = kill_anon_super,
145};
146
147static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
148{
149 struct dma_buf *dmabuf;
150
151 if (!is_dma_buf_file(file))
152 return -EINVAL;
153
154 dmabuf = file->private_data;
155
156 /* check if buffer supports mmap */
157 if (!dmabuf->ops->mmap)
158 return -EINVAL;
159
160 /* check for overflowing the buffer's size */
161 if (vma->vm_pgoff + vma_pages(vma) >
162 dmabuf->size >> PAGE_SHIFT)
163 return -EINVAL;
164
165 return dmabuf->ops->mmap(dmabuf, vma);
166}
167
168static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
169{
170 struct dma_buf *dmabuf;
171 loff_t base;
172
173 if (!is_dma_buf_file(file))
174 return -EBADF;
175
176 dmabuf = file->private_data;
177
178 /* only support discovering the end of the buffer,
179 * but also allow SEEK_SET to maintain the idiomatic
180 * SEEK_END(0), SEEK_CUR(0) pattern.
181 */
182 if (whence == SEEK_END)
183 base = dmabuf->size;
184 else if (whence == SEEK_SET)
185 base = 0;
186 else
187 return -EINVAL;
188
189 if (offset != 0)
190 return -EINVAL;
191
192 return base + offset;
193}
194
195/**
196 * DOC: implicit fence polling
197 *
198 * To support cross-device and cross-driver synchronization of buffer access
199 * implicit fences (represented internally in the kernel with &struct dma_fence)
200 * can be attached to a &dma_buf. The glue for that and a few related things are
201 * provided in the &dma_resv structure.
202 *
203 * Userspace can query the state of these implicitly tracked fences using poll()
204 * and related system calls:
205 *
206 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
207 * most recent write or exclusive fence.
208 *
209 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
210 * all attached fences, shared and exclusive ones.
211 *
212 * Note that this only signals the completion of the respective fences, i.e. the
213 * DMA transfers are complete. Cache flushing and any other necessary
214 * preparations before CPU access can begin still need to happen.
215 *
216 * As an alternative to poll(), the set of fences on DMA buffer can be
217 * exported as a &sync_file using &dma_buf_sync_file_export.
218 */
219
220static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
221{
222 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
223 struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
224 unsigned long flags;
225
226 spin_lock_irqsave(&dcb->poll->lock, flags);
227 wake_up_locked_poll(dcb->poll, dcb->active);
228 dcb->active = 0;
229 spin_unlock_irqrestore(&dcb->poll->lock, flags);
230 dma_fence_put(fence);
231 /* Paired with get_file in dma_buf_poll */
232 fput(dmabuf->file);
233}
234
235static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
236 struct dma_buf_poll_cb_t *dcb)
237{
238 struct dma_resv_iter cursor;
239 struct dma_fence *fence;
240 int r;
241
242 dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
243 fence) {
244 dma_fence_get(fence);
245 r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
246 if (!r)
247 return true;
248 dma_fence_put(fence);
249 }
250
251 return false;
252}
253
254static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
255{
256 struct dma_buf *dmabuf;
257 struct dma_resv *resv;
258 __poll_t events;
259
260 dmabuf = file->private_data;
261 if (!dmabuf || !dmabuf->resv)
262 return EPOLLERR;
263
264 resv = dmabuf->resv;
265
266 poll_wait(file, &dmabuf->poll, poll);
267
268 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
269 if (!events)
270 return 0;
271
272 dma_resv_lock(resv, NULL);
273
274 if (events & EPOLLOUT) {
275 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
276
277 /* Check that callback isn't busy */
278 spin_lock_irq(&dmabuf->poll.lock);
279 if (dcb->active)
280 events &= ~EPOLLOUT;
281 else
282 dcb->active = EPOLLOUT;
283 spin_unlock_irq(&dmabuf->poll.lock);
284
285 if (events & EPOLLOUT) {
286 /* Paired with fput in dma_buf_poll_cb */
287 get_file(dmabuf->file);
288
289 if (!dma_buf_poll_add_cb(resv, true, dcb))
290 /* No callback queued, wake up any other waiters */
291 dma_buf_poll_cb(NULL, &dcb->cb);
292 else
293 events &= ~EPOLLOUT;
294 }
295 }
296
297 if (events & EPOLLIN) {
298 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
299
300 /* Check that callback isn't busy */
301 spin_lock_irq(&dmabuf->poll.lock);
302 if (dcb->active)
303 events &= ~EPOLLIN;
304 else
305 dcb->active = EPOLLIN;
306 spin_unlock_irq(&dmabuf->poll.lock);
307
308 if (events & EPOLLIN) {
309 /* Paired with fput in dma_buf_poll_cb */
310 get_file(dmabuf->file);
311
312 if (!dma_buf_poll_add_cb(resv, false, dcb))
313 /* No callback queued, wake up any other waiters */
314 dma_buf_poll_cb(NULL, &dcb->cb);
315 else
316 events &= ~EPOLLIN;
317 }
318 }
319
320 dma_resv_unlock(resv);
321 return events;
322}
323
324/**
325 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
326 * It could support changing the name of the dma-buf if the same
327 * piece of memory is used for multiple purpose between different devices.
328 *
329 * @dmabuf: [in] dmabuf buffer that will be renamed.
330 * @buf: [in] A piece of userspace memory that contains the name of
331 * the dma-buf.
332 *
333 * Returns 0 on success. If the dma-buf buffer is already attached to
334 * devices, return -EBUSY.
335 *
336 */
337static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
338{
339 char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
340
341 if (IS_ERR(name))
342 return PTR_ERR(name);
343
344 spin_lock(&dmabuf->name_lock);
345 kfree(dmabuf->name);
346 dmabuf->name = name;
347 spin_unlock(&dmabuf->name_lock);
348
349 return 0;
350}
351
352#if IS_ENABLED(CONFIG_SYNC_FILE)
353static long dma_buf_export_sync_file(struct dma_buf *dmabuf,
354 void __user *user_data)
355{
356 struct dma_buf_export_sync_file arg;
357 enum dma_resv_usage usage;
358 struct dma_fence *fence = NULL;
359 struct sync_file *sync_file;
360 int fd, ret;
361
362 if (copy_from_user(&arg, user_data, sizeof(arg)))
363 return -EFAULT;
364
365 if (arg.flags & ~DMA_BUF_SYNC_RW)
366 return -EINVAL;
367
368 if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
369 return -EINVAL;
370
371 fd = get_unused_fd_flags(O_CLOEXEC);
372 if (fd < 0)
373 return fd;
374
375 usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE);
376 ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence);
377 if (ret)
378 goto err_put_fd;
379
380 if (!fence)
381 fence = dma_fence_get_stub();
382
383 sync_file = sync_file_create(fence);
384
385 dma_fence_put(fence);
386
387 if (!sync_file) {
388 ret = -ENOMEM;
389 goto err_put_fd;
390 }
391
392 arg.fd = fd;
393 if (copy_to_user(user_data, &arg, sizeof(arg))) {
394 ret = -EFAULT;
395 goto err_put_file;
396 }
397
398 fd_install(fd, sync_file->file);
399
400 return 0;
401
402err_put_file:
403 fput(sync_file->file);
404err_put_fd:
405 put_unused_fd(fd);
406 return ret;
407}
408
409static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
410 const void __user *user_data)
411{
412 struct dma_buf_import_sync_file arg;
413 struct dma_fence *fence, *f;
414 enum dma_resv_usage usage;
415 struct dma_fence_unwrap iter;
416 unsigned int num_fences;
417 int ret = 0;
418
419 if (copy_from_user(&arg, user_data, sizeof(arg)))
420 return -EFAULT;
421
422 if (arg.flags & ~DMA_BUF_SYNC_RW)
423 return -EINVAL;
424
425 if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
426 return -EINVAL;
427
428 fence = sync_file_get_fence(arg.fd);
429 if (!fence)
430 return -EINVAL;
431
432 usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
433 DMA_RESV_USAGE_READ;
434
435 num_fences = 0;
436 dma_fence_unwrap_for_each(f, &iter, fence)
437 ++num_fences;
438
439 if (num_fences > 0) {
440 dma_resv_lock(dmabuf->resv, NULL);
441
442 ret = dma_resv_reserve_fences(dmabuf->resv, num_fences);
443 if (!ret) {
444 dma_fence_unwrap_for_each(f, &iter, fence)
445 dma_resv_add_fence(dmabuf->resv, f, usage);
446 }
447
448 dma_resv_unlock(dmabuf->resv);
449 }
450
451 dma_fence_put(fence);
452
453 return ret;
454}
455#endif
456
457static long dma_buf_ioctl(struct file *file,
458 unsigned int cmd, unsigned long arg)
459{
460 struct dma_buf *dmabuf;
461 struct dma_buf_sync sync;
462 enum dma_data_direction direction;
463 int ret;
464
465 dmabuf = file->private_data;
466
467 switch (cmd) {
468 case DMA_BUF_IOCTL_SYNC:
469 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
470 return -EFAULT;
471
472 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
473 return -EINVAL;
474
475 switch (sync.flags & DMA_BUF_SYNC_RW) {
476 case DMA_BUF_SYNC_READ:
477 direction = DMA_FROM_DEVICE;
478 break;
479 case DMA_BUF_SYNC_WRITE:
480 direction = DMA_TO_DEVICE;
481 break;
482 case DMA_BUF_SYNC_RW:
483 direction = DMA_BIDIRECTIONAL;
484 break;
485 default:
486 return -EINVAL;
487 }
488
489 if (sync.flags & DMA_BUF_SYNC_END)
490 ret = dma_buf_end_cpu_access(dmabuf, direction);
491 else
492 ret = dma_buf_begin_cpu_access(dmabuf, direction);
493
494 return ret;
495
496 case DMA_BUF_SET_NAME_A:
497 case DMA_BUF_SET_NAME_B:
498 return dma_buf_set_name(dmabuf, (const char __user *)arg);
499
500#if IS_ENABLED(CONFIG_SYNC_FILE)
501 case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:
502 return dma_buf_export_sync_file(dmabuf, (void __user *)arg);
503 case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:
504 return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);
505#endif
506
507 default:
508 return -ENOTTY;
509 }
510}
511
512static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
513{
514 struct dma_buf *dmabuf = file->private_data;
515
516 seq_printf(m, "size:\t%zu\n", dmabuf->size);
517 /* Don't count the temporary reference taken inside procfs seq_show */
518 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
519 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
520 spin_lock(&dmabuf->name_lock);
521 if (dmabuf->name)
522 seq_printf(m, "name:\t%s\n", dmabuf->name);
523 spin_unlock(&dmabuf->name_lock);
524}
525
526static const struct file_operations dma_buf_fops = {
527 .release = dma_buf_file_release,
528 .mmap = dma_buf_mmap_internal,
529 .llseek = dma_buf_llseek,
530 .poll = dma_buf_poll,
531 .unlocked_ioctl = dma_buf_ioctl,
532 .compat_ioctl = compat_ptr_ioctl,
533 .show_fdinfo = dma_buf_show_fdinfo,
534};
535
536/*
537 * is_dma_buf_file - Check if struct file* is associated with dma_buf
538 */
539static inline int is_dma_buf_file(struct file *file)
540{
541 return file->f_op == &dma_buf_fops;
542}
543
544static struct file *dma_buf_getfile(size_t size, int flags)
545{
546 static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
547 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
548 struct file *file;
549
550 if (IS_ERR(inode))
551 return ERR_CAST(inode);
552
553 inode->i_size = size;
554 inode_set_bytes(inode, size);
555
556 /*
557 * The ->i_ino acquired from get_next_ino() is not unique thus
558 * not suitable for using it as dentry name by dmabuf stats.
559 * Override ->i_ino with the unique and dmabuffs specific
560 * value.
561 */
562 inode->i_ino = atomic64_inc_return(&dmabuf_inode);
563 flags &= O_ACCMODE | O_NONBLOCK;
564 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
565 flags, &dma_buf_fops);
566 if (IS_ERR(file))
567 goto err_alloc_file;
568
569 return file;
570
571err_alloc_file:
572 iput(inode);
573 return file;
574}
575
576/**
577 * DOC: dma buf device access
578 *
579 * For device DMA access to a shared DMA buffer the usual sequence of operations
580 * is fairly simple:
581 *
582 * 1. The exporter defines his exporter instance using
583 * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
584 * buffer object into a &dma_buf. It then exports that &dma_buf to userspace
585 * as a file descriptor by calling dma_buf_fd().
586 *
587 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
588 * to share with: First the file descriptor is converted to a &dma_buf using
589 * dma_buf_get(). Then the buffer is attached to the device using
590 * dma_buf_attach().
591 *
592 * Up to this stage the exporter is still free to migrate or reallocate the
593 * backing storage.
594 *
595 * 3. Once the buffer is attached to all devices userspace can initiate DMA
596 * access to the shared buffer. In the kernel this is done by calling
597 * dma_buf_map_attachment() and dma_buf_unmap_attachment().
598 *
599 * 4. Once a driver is done with a shared buffer it needs to call
600 * dma_buf_detach() (after cleaning up any mappings) and then release the
601 * reference acquired with dma_buf_get() by calling dma_buf_put().
602 *
603 * For the detailed semantics exporters are expected to implement see
604 * &dma_buf_ops.
605 */
606
607/**
608 * dma_buf_export - Creates a new dma_buf, and associates an anon file
609 * with this buffer, so it can be exported.
610 * Also connect the allocator specific data and ops to the buffer.
611 * Additionally, provide a name string for exporter; useful in debugging.
612 *
613 * @exp_info: [in] holds all the export related information provided
614 * by the exporter. see &struct dma_buf_export_info
615 * for further details.
616 *
617 * Returns, on success, a newly created struct dma_buf object, which wraps the
618 * supplied private data and operations for struct dma_buf_ops. On either
619 * missing ops, or error in allocating struct dma_buf, will return negative
620 * error.
621 *
622 * For most cases the easiest way to create @exp_info is through the
623 * %DEFINE_DMA_BUF_EXPORT_INFO macro.
624 */
625struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
626{
627 struct dma_buf *dmabuf;
628 struct dma_resv *resv = exp_info->resv;
629 struct file *file;
630 size_t alloc_size = sizeof(struct dma_buf);
631 int ret;
632
633 if (WARN_ON(!exp_info->priv || !exp_info->ops
634 || !exp_info->ops->map_dma_buf
635 || !exp_info->ops->unmap_dma_buf
636 || !exp_info->ops->release))
637 return ERR_PTR(-EINVAL);
638
639 if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
640 (exp_info->ops->pin || exp_info->ops->unpin)))
641 return ERR_PTR(-EINVAL);
642
643 if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
644 return ERR_PTR(-EINVAL);
645
646 if (!try_module_get(exp_info->owner))
647 return ERR_PTR(-ENOENT);
648
649 file = dma_buf_getfile(exp_info->size, exp_info->flags);
650 if (IS_ERR(file)) {
651 ret = PTR_ERR(file);
652 goto err_module;
653 }
654
655 if (!exp_info->resv)
656 alloc_size += sizeof(struct dma_resv);
657 else
658 /* prevent &dma_buf[1] == dma_buf->resv */
659 alloc_size += 1;
660 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
661 if (!dmabuf) {
662 ret = -ENOMEM;
663 goto err_file;
664 }
665
666 dmabuf->priv = exp_info->priv;
667 dmabuf->ops = exp_info->ops;
668 dmabuf->size = exp_info->size;
669 dmabuf->exp_name = exp_info->exp_name;
670 dmabuf->owner = exp_info->owner;
671 spin_lock_init(&dmabuf->name_lock);
672 init_waitqueue_head(&dmabuf->poll);
673 dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
674 dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
675 INIT_LIST_HEAD(&dmabuf->attachments);
676
677 if (!resv) {
678 dmabuf->resv = (struct dma_resv *)&dmabuf[1];
679 dma_resv_init(dmabuf->resv);
680 } else {
681 dmabuf->resv = resv;
682 }
683
684 ret = dma_buf_stats_setup(dmabuf, file);
685 if (ret)
686 goto err_dmabuf;
687
688 file->private_data = dmabuf;
689 file->f_path.dentry->d_fsdata = dmabuf;
690 dmabuf->file = file;
691
692 __dma_buf_debugfs_list_add(dmabuf);
693
694 return dmabuf;
695
696err_dmabuf:
697 if (!resv)
698 dma_resv_fini(dmabuf->resv);
699 kfree(dmabuf);
700err_file:
701 fput(file);
702err_module:
703 module_put(exp_info->owner);
704 return ERR_PTR(ret);
705}
706EXPORT_SYMBOL_NS_GPL(dma_buf_export, "DMA_BUF");
707
708/**
709 * dma_buf_fd - returns a file descriptor for the given struct dma_buf
710 * @dmabuf: [in] pointer to dma_buf for which fd is required.
711 * @flags: [in] flags to give to fd
712 *
713 * On success, returns an associated 'fd'. Else, returns error.
714 */
715int dma_buf_fd(struct dma_buf *dmabuf, int flags)
716{
717 int fd;
718
719 if (!dmabuf || !dmabuf->file)
720 return -EINVAL;
721
722 fd = get_unused_fd_flags(flags);
723 if (fd < 0)
724 return fd;
725
726 fd_install(fd, dmabuf->file);
727
728 return fd;
729}
730EXPORT_SYMBOL_NS_GPL(dma_buf_fd, "DMA_BUF");
731
732/**
733 * dma_buf_get - returns the struct dma_buf related to an fd
734 * @fd: [in] fd associated with the struct dma_buf to be returned
735 *
736 * On success, returns the struct dma_buf associated with an fd; uses
737 * file's refcounting done by fget to increase refcount. returns ERR_PTR
738 * otherwise.
739 */
740struct dma_buf *dma_buf_get(int fd)
741{
742 struct file *file;
743
744 file = fget(fd);
745
746 if (!file)
747 return ERR_PTR(-EBADF);
748
749 if (!is_dma_buf_file(file)) {
750 fput(file);
751 return ERR_PTR(-EINVAL);
752 }
753
754 return file->private_data;
755}
756EXPORT_SYMBOL_NS_GPL(dma_buf_get, "DMA_BUF");
757
758/**
759 * dma_buf_put - decreases refcount of the buffer
760 * @dmabuf: [in] buffer to reduce refcount of
761 *
762 * Uses file's refcounting done implicitly by fput().
763 *
764 * If, as a result of this call, the refcount becomes 0, the 'release' file
765 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
766 * in turn, and frees the memory allocated for dmabuf when exported.
767 */
768void dma_buf_put(struct dma_buf *dmabuf)
769{
770 if (WARN_ON(!dmabuf || !dmabuf->file))
771 return;
772
773 fput(dmabuf->file);
774}
775EXPORT_SYMBOL_NS_GPL(dma_buf_put, "DMA_BUF");
776
777static void mangle_sg_table(struct sg_table *sg_table)
778{
779#ifdef CONFIG_DMABUF_DEBUG
780 int i;
781 struct scatterlist *sg;
782
783 /* To catch abuse of the underlying struct page by importers mix
784 * up the bits, but take care to preserve the low SG_ bits to
785 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
786 * before passing the sgt back to the exporter.
787 */
788 for_each_sgtable_sg(sg_table, sg, i)
789 sg->page_link ^= ~0xffUL;
790#endif
791
792}
793static struct sg_table *__map_dma_buf(struct dma_buf_attachment *attach,
794 enum dma_data_direction direction)
795{
796 struct sg_table *sg_table;
797 signed long ret;
798
799 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
800 if (IS_ERR_OR_NULL(sg_table))
801 return sg_table;
802
803 if (!dma_buf_attachment_is_dynamic(attach)) {
804 ret = dma_resv_wait_timeout(attach->dmabuf->resv,
805 DMA_RESV_USAGE_KERNEL, true,
806 MAX_SCHEDULE_TIMEOUT);
807 if (ret < 0) {
808 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
809 direction);
810 return ERR_PTR(ret);
811 }
812 }
813
814 mangle_sg_table(sg_table);
815 return sg_table;
816}
817
818/**
819 * DOC: locking convention
820 *
821 * In order to avoid deadlock situations between dma-buf exports and importers,
822 * all dma-buf API users must follow the common dma-buf locking convention.
823 *
824 * Convention for importers
825 *
826 * 1. Importers must hold the dma-buf reservation lock when calling these
827 * functions:
828 *
829 * - dma_buf_pin()
830 * - dma_buf_unpin()
831 * - dma_buf_map_attachment()
832 * - dma_buf_unmap_attachment()
833 * - dma_buf_vmap()
834 * - dma_buf_vunmap()
835 *
836 * 2. Importers must not hold the dma-buf reservation lock when calling these
837 * functions:
838 *
839 * - dma_buf_attach()
840 * - dma_buf_dynamic_attach()
841 * - dma_buf_detach()
842 * - dma_buf_export()
843 * - dma_buf_fd()
844 * - dma_buf_get()
845 * - dma_buf_put()
846 * - dma_buf_mmap()
847 * - dma_buf_begin_cpu_access()
848 * - dma_buf_end_cpu_access()
849 * - dma_buf_map_attachment_unlocked()
850 * - dma_buf_unmap_attachment_unlocked()
851 * - dma_buf_vmap_unlocked()
852 * - dma_buf_vunmap_unlocked()
853 *
854 * Convention for exporters
855 *
856 * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf
857 * reservation and exporter can take the lock:
858 *
859 * - &dma_buf_ops.attach()
860 * - &dma_buf_ops.detach()
861 * - &dma_buf_ops.release()
862 * - &dma_buf_ops.begin_cpu_access()
863 * - &dma_buf_ops.end_cpu_access()
864 * - &dma_buf_ops.mmap()
865 *
866 * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf
867 * reservation and exporter can't take the lock:
868 *
869 * - &dma_buf_ops.pin()
870 * - &dma_buf_ops.unpin()
871 * - &dma_buf_ops.map_dma_buf()
872 * - &dma_buf_ops.unmap_dma_buf()
873 * - &dma_buf_ops.vmap()
874 * - &dma_buf_ops.vunmap()
875 *
876 * 3. Exporters must hold the dma-buf reservation lock when calling these
877 * functions:
878 *
879 * - dma_buf_move_notify()
880 */
881
882/**
883 * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
884 * @dmabuf: [in] buffer to attach device to.
885 * @dev: [in] device to be attached.
886 * @importer_ops: [in] importer operations for the attachment
887 * @importer_priv: [in] importer private pointer for the attachment
888 *
889 * Returns struct dma_buf_attachment pointer for this attachment. Attachments
890 * must be cleaned up by calling dma_buf_detach().
891 *
892 * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
893 * functionality.
894 *
895 * Returns:
896 *
897 * A pointer to newly created &dma_buf_attachment on success, or a negative
898 * error code wrapped into a pointer on failure.
899 *
900 * Note that this can fail if the backing storage of @dmabuf is in a place not
901 * accessible to @dev, and cannot be moved to a more suitable place. This is
902 * indicated with the error code -EBUSY.
903 */
904struct dma_buf_attachment *
905dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
906 const struct dma_buf_attach_ops *importer_ops,
907 void *importer_priv)
908{
909 struct dma_buf_attachment *attach;
910 int ret;
911
912 if (WARN_ON(!dmabuf || !dev))
913 return ERR_PTR(-EINVAL);
914
915 if (WARN_ON(importer_ops && !importer_ops->move_notify))
916 return ERR_PTR(-EINVAL);
917
918 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
919 if (!attach)
920 return ERR_PTR(-ENOMEM);
921
922 attach->dev = dev;
923 attach->dmabuf = dmabuf;
924 if (importer_ops)
925 attach->peer2peer = importer_ops->allow_peer2peer;
926 attach->importer_ops = importer_ops;
927 attach->importer_priv = importer_priv;
928
929 if (dmabuf->ops->attach) {
930 ret = dmabuf->ops->attach(dmabuf, attach);
931 if (ret)
932 goto err_attach;
933 }
934 dma_resv_lock(dmabuf->resv, NULL);
935 list_add(&attach->node, &dmabuf->attachments);
936 dma_resv_unlock(dmabuf->resv);
937
938 /* When either the importer or the exporter can't handle dynamic
939 * mappings we cache the mapping here to avoid issues with the
940 * reservation object lock.
941 */
942 if (dma_buf_attachment_is_dynamic(attach) !=
943 dma_buf_is_dynamic(dmabuf)) {
944 struct sg_table *sgt;
945
946 dma_resv_lock(attach->dmabuf->resv, NULL);
947 if (dma_buf_is_dynamic(attach->dmabuf)) {
948 ret = dmabuf->ops->pin(attach);
949 if (ret)
950 goto err_unlock;
951 }
952
953 sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
954 if (!sgt)
955 sgt = ERR_PTR(-ENOMEM);
956 if (IS_ERR(sgt)) {
957 ret = PTR_ERR(sgt);
958 goto err_unpin;
959 }
960 dma_resv_unlock(attach->dmabuf->resv);
961 attach->sgt = sgt;
962 attach->dir = DMA_BIDIRECTIONAL;
963 }
964
965 return attach;
966
967err_attach:
968 kfree(attach);
969 return ERR_PTR(ret);
970
971err_unpin:
972 if (dma_buf_is_dynamic(attach->dmabuf))
973 dmabuf->ops->unpin(attach);
974
975err_unlock:
976 dma_resv_unlock(attach->dmabuf->resv);
977
978 dma_buf_detach(dmabuf, attach);
979 return ERR_PTR(ret);
980}
981EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF");
982
983/**
984 * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
985 * @dmabuf: [in] buffer to attach device to.
986 * @dev: [in] device to be attached.
987 *
988 * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
989 * mapping.
990 */
991struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
992 struct device *dev)
993{
994 return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
995}
996EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF");
997
998static void __unmap_dma_buf(struct dma_buf_attachment *attach,
999 struct sg_table *sg_table,
1000 enum dma_data_direction direction)
1001{
1002 /* uses XOR, hence this unmangles */
1003 mangle_sg_table(sg_table);
1004
1005 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
1006}
1007
1008/**
1009 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
1010 * @dmabuf: [in] buffer to detach from.
1011 * @attach: [in] attachment to be detached; is free'd after this call.
1012 *
1013 * Clean up a device attachment obtained by calling dma_buf_attach().
1014 *
1015 * Optionally this calls &dma_buf_ops.detach for device-specific detach.
1016 */
1017void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
1018{
1019 if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf))
1020 return;
1021
1022 dma_resv_lock(dmabuf->resv, NULL);
1023
1024 if (attach->sgt) {
1025
1026 __unmap_dma_buf(attach, attach->sgt, attach->dir);
1027
1028 if (dma_buf_is_dynamic(attach->dmabuf))
1029 dmabuf->ops->unpin(attach);
1030 }
1031 list_del(&attach->node);
1032
1033 dma_resv_unlock(dmabuf->resv);
1034
1035 if (dmabuf->ops->detach)
1036 dmabuf->ops->detach(dmabuf, attach);
1037
1038 kfree(attach);
1039}
1040EXPORT_SYMBOL_NS_GPL(dma_buf_detach, "DMA_BUF");
1041
1042/**
1043 * dma_buf_pin - Lock down the DMA-buf
1044 * @attach: [in] attachment which should be pinned
1045 *
1046 * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
1047 * call this, and only for limited use cases like scanout and not for temporary
1048 * pin operations. It is not permitted to allow userspace to pin arbitrary
1049 * amounts of buffers through this interface.
1050 *
1051 * Buffers must be unpinned by calling dma_buf_unpin().
1052 *
1053 * Returns:
1054 * 0 on success, negative error code on failure.
1055 */
1056int dma_buf_pin(struct dma_buf_attachment *attach)
1057{
1058 struct dma_buf *dmabuf = attach->dmabuf;
1059 int ret = 0;
1060
1061 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1062
1063 dma_resv_assert_held(dmabuf->resv);
1064
1065 if (dmabuf->ops->pin)
1066 ret = dmabuf->ops->pin(attach);
1067
1068 return ret;
1069}
1070EXPORT_SYMBOL_NS_GPL(dma_buf_pin, "DMA_BUF");
1071
1072/**
1073 * dma_buf_unpin - Unpin a DMA-buf
1074 * @attach: [in] attachment which should be unpinned
1075 *
1076 * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
1077 * any mapping of @attach again and inform the importer through
1078 * &dma_buf_attach_ops.move_notify.
1079 */
1080void dma_buf_unpin(struct dma_buf_attachment *attach)
1081{
1082 struct dma_buf *dmabuf = attach->dmabuf;
1083
1084 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1085
1086 dma_resv_assert_held(dmabuf->resv);
1087
1088 if (dmabuf->ops->unpin)
1089 dmabuf->ops->unpin(attach);
1090}
1091EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, "DMA_BUF");
1092
1093/**
1094 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
1095 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1096 * dma_buf_ops.
1097 * @attach: [in] attachment whose scatterlist is to be returned
1098 * @direction: [in] direction of DMA transfer
1099 *
1100 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
1101 * on error. May return -EINTR if it is interrupted by a signal.
1102 *
1103 * On success, the DMA addresses and lengths in the returned scatterlist are
1104 * PAGE_SIZE aligned.
1105 *
1106 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
1107 * the underlying backing storage is pinned for as long as a mapping exists,
1108 * therefore users/importers should not hold onto a mapping for undue amounts of
1109 * time.
1110 *
1111 * Important: Dynamic importers must wait for the exclusive fence of the struct
1112 * dma_resv attached to the DMA-BUF first.
1113 */
1114struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
1115 enum dma_data_direction direction)
1116{
1117 struct sg_table *sg_table;
1118 int r;
1119
1120 might_sleep();
1121
1122 if (WARN_ON(!attach || !attach->dmabuf))
1123 return ERR_PTR(-EINVAL);
1124
1125 dma_resv_assert_held(attach->dmabuf->resv);
1126
1127 if (attach->sgt) {
1128 /*
1129 * Two mappings with different directions for the same
1130 * attachment are not allowed.
1131 */
1132 if (attach->dir != direction &&
1133 attach->dir != DMA_BIDIRECTIONAL)
1134 return ERR_PTR(-EBUSY);
1135
1136 return attach->sgt;
1137 }
1138
1139 if (dma_buf_is_dynamic(attach->dmabuf)) {
1140 if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
1141 r = attach->dmabuf->ops->pin(attach);
1142 if (r)
1143 return ERR_PTR(r);
1144 }
1145 }
1146
1147 sg_table = __map_dma_buf(attach, direction);
1148 if (!sg_table)
1149 sg_table = ERR_PTR(-ENOMEM);
1150
1151 if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
1152 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1153 attach->dmabuf->ops->unpin(attach);
1154
1155 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
1156 attach->sgt = sg_table;
1157 attach->dir = direction;
1158 }
1159
1160#ifdef CONFIG_DMA_API_DEBUG
1161 if (!IS_ERR(sg_table)) {
1162 struct scatterlist *sg;
1163 u64 addr;
1164 int len;
1165 int i;
1166
1167 for_each_sgtable_dma_sg(sg_table, sg, i) {
1168 addr = sg_dma_address(sg);
1169 len = sg_dma_len(sg);
1170 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
1171 pr_debug("%s: addr %llx or len %x is not page aligned!\n",
1172 __func__, addr, len);
1173 }
1174 }
1175 }
1176#endif /* CONFIG_DMA_API_DEBUG */
1177 return sg_table;
1178}
1179EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF");
1180
1181/**
1182 * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
1183 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1184 * dma_buf_ops.
1185 * @attach: [in] attachment whose scatterlist is to be returned
1186 * @direction: [in] direction of DMA transfer
1187 *
1188 * Unlocked variant of dma_buf_map_attachment().
1189 */
1190struct sg_table *
1191dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
1192 enum dma_data_direction direction)
1193{
1194 struct sg_table *sg_table;
1195
1196 might_sleep();
1197
1198 if (WARN_ON(!attach || !attach->dmabuf))
1199 return ERR_PTR(-EINVAL);
1200
1201 dma_resv_lock(attach->dmabuf->resv, NULL);
1202 sg_table = dma_buf_map_attachment(attach, direction);
1203 dma_resv_unlock(attach->dmabuf->resv);
1204
1205 return sg_table;
1206}
1207EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, "DMA_BUF");
1208
1209/**
1210 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
1211 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1212 * dma_buf_ops.
1213 * @attach: [in] attachment to unmap buffer from
1214 * @sg_table: [in] scatterlist info of the buffer to unmap
1215 * @direction: [in] direction of DMA transfer
1216 *
1217 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
1218 */
1219void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1220 struct sg_table *sg_table,
1221 enum dma_data_direction direction)
1222{
1223 might_sleep();
1224
1225 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1226 return;
1227
1228 dma_resv_assert_held(attach->dmabuf->resv);
1229
1230 if (attach->sgt == sg_table)
1231 return;
1232
1233 __unmap_dma_buf(attach, sg_table, direction);
1234
1235 if (dma_buf_is_dynamic(attach->dmabuf) &&
1236 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1237 dma_buf_unpin(attach);
1238}
1239EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF");
1240
1241/**
1242 * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
1243 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1244 * dma_buf_ops.
1245 * @attach: [in] attachment to unmap buffer from
1246 * @sg_table: [in] scatterlist info of the buffer to unmap
1247 * @direction: [in] direction of DMA transfer
1248 *
1249 * Unlocked variant of dma_buf_unmap_attachment().
1250 */
1251void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
1252 struct sg_table *sg_table,
1253 enum dma_data_direction direction)
1254{
1255 might_sleep();
1256
1257 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1258 return;
1259
1260 dma_resv_lock(attach->dmabuf->resv, NULL);
1261 dma_buf_unmap_attachment(attach, sg_table, direction);
1262 dma_resv_unlock(attach->dmabuf->resv);
1263}
1264EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, "DMA_BUF");
1265
1266/**
1267 * dma_buf_move_notify - notify attachments that DMA-buf is moving
1268 *
1269 * @dmabuf: [in] buffer which is moving
1270 *
1271 * Informs all attachments that they need to destroy and recreate all their
1272 * mappings.
1273 */
1274void dma_buf_move_notify(struct dma_buf *dmabuf)
1275{
1276 struct dma_buf_attachment *attach;
1277
1278 dma_resv_assert_held(dmabuf->resv);
1279
1280 list_for_each_entry(attach, &dmabuf->attachments, node)
1281 if (attach->importer_ops)
1282 attach->importer_ops->move_notify(attach);
1283}
1284EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, "DMA_BUF");
1285
1286/**
1287 * DOC: cpu access
1288 *
1289 * There are multiple reasons for supporting CPU access to a dma buffer object:
1290 *
1291 * - Fallback operations in the kernel, for example when a device is connected
1292 * over USB and the kernel needs to shuffle the data around first before
1293 * sending it away. Cache coherency is handled by bracketing any transactions
1294 * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1295 * access.
1296 *
1297 * Since for most kernel internal dma-buf accesses need the entire buffer, a
1298 * vmap interface is introduced. Note that on very old 32-bit architectures
1299 * vmalloc space might be limited and result in vmap calls failing.
1300 *
1301 * Interfaces:
1302 *
1303 * .. code-block:: c
1304 *
1305 * void *dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
1306 * void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
1307 *
1308 * The vmap call can fail if there is no vmap support in the exporter, or if
1309 * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
1310 * count for all vmap access and calls down into the exporter's vmap function
1311 * only when no vmapping exists, and only unmaps it once. Protection against
1312 * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
1313 *
1314 * - For full compatibility on the importer side with existing userspace
1315 * interfaces, which might already support mmap'ing buffers. This is needed in
1316 * many processing pipelines (e.g. feeding a software rendered image into a
1317 * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1318 * framework already supported this and for DMA buffer file descriptors to
1319 * replace ION buffers mmap support was needed.
1320 *
1321 * There is no special interfaces, userspace simply calls mmap on the dma-buf
1322 * fd. But like for CPU access there's a need to bracket the actual access,
1323 * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1324 * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1325 * be restarted.
1326 *
1327 * Some systems might need some sort of cache coherency management e.g. when
1328 * CPU and GPU domains are being accessed through dma-buf at the same time.
1329 * To circumvent this problem there are begin/end coherency markers, that
1330 * forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1331 * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1332 * sequence would be used like following:
1333 *
1334 * - mmap dma-buf fd
1335 * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1336 * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1337 * want (with the new data being consumed by say the GPU or the scanout
1338 * device)
1339 * - munmap once you don't need the buffer any more
1340 *
1341 * For correctness and optimal performance, it is always required to use
1342 * SYNC_START and SYNC_END before and after, respectively, when accessing the
1343 * mapped address. Userspace cannot rely on coherent access, even when there
1344 * are systems where it just works without calling these ioctls.
1345 *
1346 * - And as a CPU fallback in userspace processing pipelines.
1347 *
1348 * Similar to the motivation for kernel cpu access it is again important that
1349 * the userspace code of a given importing subsystem can use the same
1350 * interfaces with a imported dma-buf buffer object as with a native buffer
1351 * object. This is especially important for drm where the userspace part of
1352 * contemporary OpenGL, X, and other drivers is huge, and reworking them to
1353 * use a different way to mmap a buffer rather invasive.
1354 *
1355 * The assumption in the current dma-buf interfaces is that redirecting the
1356 * initial mmap is all that's needed. A survey of some of the existing
1357 * subsystems shows that no driver seems to do any nefarious thing like
1358 * syncing up with outstanding asynchronous processing on the device or
1359 * allocating special resources at fault time. So hopefully this is good
1360 * enough, since adding interfaces to intercept pagefaults and allow pte
1361 * shootdowns would increase the complexity quite a bit.
1362 *
1363 * Interface:
1364 *
1365 * .. code-block:: c
1366 *
1367 * int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long);
1368 *
1369 * If the importing subsystem simply provides a special-purpose mmap call to
1370 * set up a mapping in userspace, calling do_mmap with &dma_buf.file will
1371 * equally achieve that for a dma-buf object.
1372 */
1373
1374static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1375 enum dma_data_direction direction)
1376{
1377 bool write = (direction == DMA_BIDIRECTIONAL ||
1378 direction == DMA_TO_DEVICE);
1379 struct dma_resv *resv = dmabuf->resv;
1380 long ret;
1381
1382 /* Wait on any implicit rendering fences */
1383 ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
1384 true, MAX_SCHEDULE_TIMEOUT);
1385 if (ret < 0)
1386 return ret;
1387
1388 return 0;
1389}
1390
1391/**
1392 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1393 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1394 * preparations. Coherency is only guaranteed in the specified range for the
1395 * specified access direction.
1396 * @dmabuf: [in] buffer to prepare cpu access for.
1397 * @direction: [in] direction of access.
1398 *
1399 * After the cpu access is complete the caller should call
1400 * dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is
1401 * it guaranteed to be coherent with other DMA access.
1402 *
1403 * This function will also wait for any DMA transactions tracked through
1404 * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1405 * synchronization this function will only ensure cache coherency, callers must
1406 * ensure synchronization with such DMA transactions on their own.
1407 *
1408 * Can return negative error values, returns 0 on success.
1409 */
1410int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1411 enum dma_data_direction direction)
1412{
1413 int ret = 0;
1414
1415 if (WARN_ON(!dmabuf))
1416 return -EINVAL;
1417
1418 might_lock(&dmabuf->resv->lock.base);
1419
1420 if (dmabuf->ops->begin_cpu_access)
1421 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1422
1423 /* Ensure that all fences are waited upon - but we first allow
1424 * the native handler the chance to do so more efficiently if it
1425 * chooses. A double invocation here will be reasonably cheap no-op.
1426 */
1427 if (ret == 0)
1428 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1429
1430 return ret;
1431}
1432EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, "DMA_BUF");
1433
1434/**
1435 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1436 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1437 * actions. Coherency is only guaranteed in the specified range for the
1438 * specified access direction.
1439 * @dmabuf: [in] buffer to complete cpu access for.
1440 * @direction: [in] direction of access.
1441 *
1442 * This terminates CPU access started with dma_buf_begin_cpu_access().
1443 *
1444 * Can return negative error values, returns 0 on success.
1445 */
1446int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1447 enum dma_data_direction direction)
1448{
1449 int ret = 0;
1450
1451 WARN_ON(!dmabuf);
1452
1453 might_lock(&dmabuf->resv->lock.base);
1454
1455 if (dmabuf->ops->end_cpu_access)
1456 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1457
1458 return ret;
1459}
1460EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, "DMA_BUF");
1461
1462
1463/**
1464 * dma_buf_mmap - Setup up a userspace mmap with the given vma
1465 * @dmabuf: [in] buffer that should back the vma
1466 * @vma: [in] vma for the mmap
1467 * @pgoff: [in] offset in pages where this mmap should start within the
1468 * dma-buf buffer.
1469 *
1470 * This function adjusts the passed in vma so that it points at the file of the
1471 * dma_buf operation. It also adjusts the starting pgoff and does bounds
1472 * checking on the size of the vma. Then it calls the exporters mmap function to
1473 * set up the mapping.
1474 *
1475 * Can return negative error values, returns 0 on success.
1476 */
1477int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1478 unsigned long pgoff)
1479{
1480 if (WARN_ON(!dmabuf || !vma))
1481 return -EINVAL;
1482
1483 /* check if buffer supports mmap */
1484 if (!dmabuf->ops->mmap)
1485 return -EINVAL;
1486
1487 /* check for offset overflow */
1488 if (pgoff + vma_pages(vma) < pgoff)
1489 return -EOVERFLOW;
1490
1491 /* check for overflowing the buffer's size */
1492 if (pgoff + vma_pages(vma) >
1493 dmabuf->size >> PAGE_SHIFT)
1494 return -EINVAL;
1495
1496 /* readjust the vma */
1497 vma_set_file(vma, dmabuf->file);
1498 vma->vm_pgoff = pgoff;
1499
1500 return dmabuf->ops->mmap(dmabuf, vma);
1501}
1502EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, "DMA_BUF");
1503
1504/**
1505 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1506 * address space. Same restrictions as for vmap and friends apply.
1507 * @dmabuf: [in] buffer to vmap
1508 * @map: [out] returns the vmap pointer
1509 *
1510 * This call may fail due to lack of virtual mapping address space.
1511 * These calls are optional in drivers. The intended use for them
1512 * is for mapping objects linear in kernel space for high use objects.
1513 *
1514 * To ensure coherency users must call dma_buf_begin_cpu_access() and
1515 * dma_buf_end_cpu_access() around any cpu access performed through this
1516 * mapping.
1517 *
1518 * Returns 0 on success, or a negative errno code otherwise.
1519 */
1520int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
1521{
1522 struct iosys_map ptr;
1523 int ret;
1524
1525 iosys_map_clear(map);
1526
1527 if (WARN_ON(!dmabuf))
1528 return -EINVAL;
1529
1530 dma_resv_assert_held(dmabuf->resv);
1531
1532 if (!dmabuf->ops->vmap)
1533 return -EINVAL;
1534
1535 if (dmabuf->vmapping_counter) {
1536 dmabuf->vmapping_counter++;
1537 BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1538 *map = dmabuf->vmap_ptr;
1539 return 0;
1540 }
1541
1542 BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
1543
1544 ret = dmabuf->ops->vmap(dmabuf, &ptr);
1545 if (WARN_ON_ONCE(ret))
1546 return ret;
1547
1548 dmabuf->vmap_ptr = ptr;
1549 dmabuf->vmapping_counter = 1;
1550
1551 *map = dmabuf->vmap_ptr;
1552
1553 return 0;
1554}
1555EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, "DMA_BUF");
1556
1557/**
1558 * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel
1559 * address space. Same restrictions as for vmap and friends apply.
1560 * @dmabuf: [in] buffer to vmap
1561 * @map: [out] returns the vmap pointer
1562 *
1563 * Unlocked version of dma_buf_vmap()
1564 *
1565 * Returns 0 on success, or a negative errno code otherwise.
1566 */
1567int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
1568{
1569 int ret;
1570
1571 iosys_map_clear(map);
1572
1573 if (WARN_ON(!dmabuf))
1574 return -EINVAL;
1575
1576 dma_resv_lock(dmabuf->resv, NULL);
1577 ret = dma_buf_vmap(dmabuf, map);
1578 dma_resv_unlock(dmabuf->resv);
1579
1580 return ret;
1581}
1582EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, "DMA_BUF");
1583
1584/**
1585 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1586 * @dmabuf: [in] buffer to vunmap
1587 * @map: [in] vmap pointer to vunmap
1588 */
1589void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
1590{
1591 if (WARN_ON(!dmabuf))
1592 return;
1593
1594 dma_resv_assert_held(dmabuf->resv);
1595
1596 BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1597 BUG_ON(dmabuf->vmapping_counter == 0);
1598 BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
1599
1600 if (--dmabuf->vmapping_counter == 0) {
1601 if (dmabuf->ops->vunmap)
1602 dmabuf->ops->vunmap(dmabuf, map);
1603 iosys_map_clear(&dmabuf->vmap_ptr);
1604 }
1605}
1606EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, "DMA_BUF");
1607
1608/**
1609 * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.
1610 * @dmabuf: [in] buffer to vunmap
1611 * @map: [in] vmap pointer to vunmap
1612 */
1613void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
1614{
1615 if (WARN_ON(!dmabuf))
1616 return;
1617
1618 dma_resv_lock(dmabuf->resv, NULL);
1619 dma_buf_vunmap(dmabuf, map);
1620 dma_resv_unlock(dmabuf->resv);
1621}
1622EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, "DMA_BUF");
1623
1624#ifdef CONFIG_DEBUG_FS
1625static int dma_buf_debug_show(struct seq_file *s, void *unused)
1626{
1627 struct dma_buf *buf_obj;
1628 struct dma_buf_attachment *attach_obj;
1629 int count = 0, attach_count;
1630 size_t size = 0;
1631 int ret;
1632
1633 ret = mutex_lock_interruptible(&debugfs_list_mutex);
1634
1635 if (ret)
1636 return ret;
1637
1638 seq_puts(s, "\nDma-buf Objects:\n");
1639 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
1640 "size", "flags", "mode", "count", "ino");
1641
1642 list_for_each_entry(buf_obj, &debugfs_list, list_node) {
1643
1644 ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1645 if (ret)
1646 goto error_unlock;
1647
1648
1649 spin_lock(&buf_obj->name_lock);
1650 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1651 buf_obj->size,
1652 buf_obj->file->f_flags, buf_obj->file->f_mode,
1653 file_count(buf_obj->file),
1654 buf_obj->exp_name,
1655 file_inode(buf_obj->file)->i_ino,
1656 buf_obj->name ?: "<none>");
1657 spin_unlock(&buf_obj->name_lock);
1658
1659 dma_resv_describe(buf_obj->resv, s);
1660
1661 seq_puts(s, "\tAttached Devices:\n");
1662 attach_count = 0;
1663
1664 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1665 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1666 attach_count++;
1667 }
1668 dma_resv_unlock(buf_obj->resv);
1669
1670 seq_printf(s, "Total %d devices attached\n\n",
1671 attach_count);
1672
1673 count++;
1674 size += buf_obj->size;
1675 }
1676
1677 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1678
1679 mutex_unlock(&debugfs_list_mutex);
1680 return 0;
1681
1682error_unlock:
1683 mutex_unlock(&debugfs_list_mutex);
1684 return ret;
1685}
1686
1687DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1688
1689static struct dentry *dma_buf_debugfs_dir;
1690
1691static int dma_buf_init_debugfs(void)
1692{
1693 struct dentry *d;
1694 int err = 0;
1695
1696 d = debugfs_create_dir("dma_buf", NULL);
1697 if (IS_ERR(d))
1698 return PTR_ERR(d);
1699
1700 dma_buf_debugfs_dir = d;
1701
1702 d = debugfs_create_file("bufinfo", 0444, dma_buf_debugfs_dir,
1703 NULL, &dma_buf_debug_fops);
1704 if (IS_ERR(d)) {
1705 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1706 debugfs_remove_recursive(dma_buf_debugfs_dir);
1707 dma_buf_debugfs_dir = NULL;
1708 err = PTR_ERR(d);
1709 }
1710
1711 return err;
1712}
1713
1714static void dma_buf_uninit_debugfs(void)
1715{
1716 debugfs_remove_recursive(dma_buf_debugfs_dir);
1717}
1718#else
1719static inline int dma_buf_init_debugfs(void)
1720{
1721 return 0;
1722}
1723static inline void dma_buf_uninit_debugfs(void)
1724{
1725}
1726#endif
1727
1728static int __init dma_buf_init(void)
1729{
1730 int ret;
1731
1732 ret = dma_buf_init_sysfs_statistics();
1733 if (ret)
1734 return ret;
1735
1736 dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1737 if (IS_ERR(dma_buf_mnt))
1738 return PTR_ERR(dma_buf_mnt);
1739
1740 dma_buf_init_debugfs();
1741 return 0;
1742}
1743subsys_initcall(dma_buf_init);
1744
1745static void __exit dma_buf_deinit(void)
1746{
1747 dma_buf_uninit_debugfs();
1748 kern_unmount(dma_buf_mnt);
1749 dma_buf_uninit_sysfs_statistics();
1750}
1751__exitcall(dma_buf_deinit);
1/*
2 * Framework for buffer objects that can be shared across devices/subsystems.
3 *
4 * Copyright(C) 2011 Linaro Limited. All rights reserved.
5 * Author: Sumit Semwal <sumit.semwal@ti.com>
6 *
7 * Many thanks to linaro-mm-sig list, and specially
8 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
9 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
10 * refining of this idea.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License version 2 as published by
14 * the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * more details.
20 *
21 * You should have received a copy of the GNU General Public License along with
22 * this program. If not, see <http://www.gnu.org/licenses/>.
23 */
24
25#include <linux/fs.h>
26#include <linux/slab.h>
27#include <linux/dma-buf.h>
28#include <linux/dma-fence.h>
29#include <linux/anon_inodes.h>
30#include <linux/export.h>
31#include <linux/debugfs.h>
32#include <linux/module.h>
33#include <linux/seq_file.h>
34#include <linux/poll.h>
35#include <linux/reservation.h>
36#include <linux/mm.h>
37
38#include <uapi/linux/dma-buf.h>
39
40static inline int is_dma_buf_file(struct file *);
41
42struct dma_buf_list {
43 struct list_head head;
44 struct mutex lock;
45};
46
47static struct dma_buf_list db_list;
48
49static int dma_buf_release(struct inode *inode, struct file *file)
50{
51 struct dma_buf *dmabuf;
52
53 if (!is_dma_buf_file(file))
54 return -EINVAL;
55
56 dmabuf = file->private_data;
57
58 BUG_ON(dmabuf->vmapping_counter);
59
60 /*
61 * Any fences that a dma-buf poll can wait on should be signaled
62 * before releasing dma-buf. This is the responsibility of each
63 * driver that uses the reservation objects.
64 *
65 * If you hit this BUG() it means someone dropped their ref to the
66 * dma-buf while still having pending operation to the buffer.
67 */
68 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
69
70 dmabuf->ops->release(dmabuf);
71
72 mutex_lock(&db_list.lock);
73 list_del(&dmabuf->list_node);
74 mutex_unlock(&db_list.lock);
75
76 if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
77 reservation_object_fini(dmabuf->resv);
78
79 module_put(dmabuf->owner);
80 kfree(dmabuf);
81 return 0;
82}
83
84static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
85{
86 struct dma_buf *dmabuf;
87
88 if (!is_dma_buf_file(file))
89 return -EINVAL;
90
91 dmabuf = file->private_data;
92
93 /* check for overflowing the buffer's size */
94 if (vma->vm_pgoff + vma_pages(vma) >
95 dmabuf->size >> PAGE_SHIFT)
96 return -EINVAL;
97
98 return dmabuf->ops->mmap(dmabuf, vma);
99}
100
101static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
102{
103 struct dma_buf *dmabuf;
104 loff_t base;
105
106 if (!is_dma_buf_file(file))
107 return -EBADF;
108
109 dmabuf = file->private_data;
110
111 /* only support discovering the end of the buffer,
112 but also allow SEEK_SET to maintain the idiomatic
113 SEEK_END(0), SEEK_CUR(0) pattern */
114 if (whence == SEEK_END)
115 base = dmabuf->size;
116 else if (whence == SEEK_SET)
117 base = 0;
118 else
119 return -EINVAL;
120
121 if (offset != 0)
122 return -EINVAL;
123
124 return base + offset;
125}
126
127/**
128 * DOC: fence polling
129 *
130 * To support cross-device and cross-driver synchronization of buffer access
131 * implicit fences (represented internally in the kernel with &struct fence) can
132 * be attached to a &dma_buf. The glue for that and a few related things are
133 * provided in the &reservation_object structure.
134 *
135 * Userspace can query the state of these implicitly tracked fences using poll()
136 * and related system calls:
137 *
138 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
139 * most recent write or exclusive fence.
140 *
141 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
142 * all attached fences, shared and exclusive ones.
143 *
144 * Note that this only signals the completion of the respective fences, i.e. the
145 * DMA transfers are complete. Cache flushing and any other necessary
146 * preparations before CPU access can begin still need to happen.
147 */
148
149static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
150{
151 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
152 unsigned long flags;
153
154 spin_lock_irqsave(&dcb->poll->lock, flags);
155 wake_up_locked_poll(dcb->poll, dcb->active);
156 dcb->active = 0;
157 spin_unlock_irqrestore(&dcb->poll->lock, flags);
158}
159
160static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
161{
162 struct dma_buf *dmabuf;
163 struct reservation_object *resv;
164 struct reservation_object_list *fobj;
165 struct dma_fence *fence_excl;
166 __poll_t events;
167 unsigned shared_count, seq;
168
169 dmabuf = file->private_data;
170 if (!dmabuf || !dmabuf->resv)
171 return EPOLLERR;
172
173 resv = dmabuf->resv;
174
175 poll_wait(file, &dmabuf->poll, poll);
176
177 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
178 if (!events)
179 return 0;
180
181retry:
182 seq = read_seqcount_begin(&resv->seq);
183 rcu_read_lock();
184
185 fobj = rcu_dereference(resv->fence);
186 if (fobj)
187 shared_count = fobj->shared_count;
188 else
189 shared_count = 0;
190 fence_excl = rcu_dereference(resv->fence_excl);
191 if (read_seqcount_retry(&resv->seq, seq)) {
192 rcu_read_unlock();
193 goto retry;
194 }
195
196 if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
197 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
198 __poll_t pevents = EPOLLIN;
199
200 if (shared_count == 0)
201 pevents |= EPOLLOUT;
202
203 spin_lock_irq(&dmabuf->poll.lock);
204 if (dcb->active) {
205 dcb->active |= pevents;
206 events &= ~pevents;
207 } else
208 dcb->active = pevents;
209 spin_unlock_irq(&dmabuf->poll.lock);
210
211 if (events & pevents) {
212 if (!dma_fence_get_rcu(fence_excl)) {
213 /* force a recheck */
214 events &= ~pevents;
215 dma_buf_poll_cb(NULL, &dcb->cb);
216 } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
217 dma_buf_poll_cb)) {
218 events &= ~pevents;
219 dma_fence_put(fence_excl);
220 } else {
221 /*
222 * No callback queued, wake up any additional
223 * waiters.
224 */
225 dma_fence_put(fence_excl);
226 dma_buf_poll_cb(NULL, &dcb->cb);
227 }
228 }
229 }
230
231 if ((events & EPOLLOUT) && shared_count > 0) {
232 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
233 int i;
234
235 /* Only queue a new callback if no event has fired yet */
236 spin_lock_irq(&dmabuf->poll.lock);
237 if (dcb->active)
238 events &= ~EPOLLOUT;
239 else
240 dcb->active = EPOLLOUT;
241 spin_unlock_irq(&dmabuf->poll.lock);
242
243 if (!(events & EPOLLOUT))
244 goto out;
245
246 for (i = 0; i < shared_count; ++i) {
247 struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
248
249 if (!dma_fence_get_rcu(fence)) {
250 /*
251 * fence refcount dropped to zero, this means
252 * that fobj has been freed
253 *
254 * call dma_buf_poll_cb and force a recheck!
255 */
256 events &= ~EPOLLOUT;
257 dma_buf_poll_cb(NULL, &dcb->cb);
258 break;
259 }
260 if (!dma_fence_add_callback(fence, &dcb->cb,
261 dma_buf_poll_cb)) {
262 dma_fence_put(fence);
263 events &= ~EPOLLOUT;
264 break;
265 }
266 dma_fence_put(fence);
267 }
268
269 /* No callback queued, wake up any additional waiters. */
270 if (i == shared_count)
271 dma_buf_poll_cb(NULL, &dcb->cb);
272 }
273
274out:
275 rcu_read_unlock();
276 return events;
277}
278
279static long dma_buf_ioctl(struct file *file,
280 unsigned int cmd, unsigned long arg)
281{
282 struct dma_buf *dmabuf;
283 struct dma_buf_sync sync;
284 enum dma_data_direction direction;
285 int ret;
286
287 dmabuf = file->private_data;
288
289 switch (cmd) {
290 case DMA_BUF_IOCTL_SYNC:
291 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
292 return -EFAULT;
293
294 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
295 return -EINVAL;
296
297 switch (sync.flags & DMA_BUF_SYNC_RW) {
298 case DMA_BUF_SYNC_READ:
299 direction = DMA_FROM_DEVICE;
300 break;
301 case DMA_BUF_SYNC_WRITE:
302 direction = DMA_TO_DEVICE;
303 break;
304 case DMA_BUF_SYNC_RW:
305 direction = DMA_BIDIRECTIONAL;
306 break;
307 default:
308 return -EINVAL;
309 }
310
311 if (sync.flags & DMA_BUF_SYNC_END)
312 ret = dma_buf_end_cpu_access(dmabuf, direction);
313 else
314 ret = dma_buf_begin_cpu_access(dmabuf, direction);
315
316 return ret;
317 default:
318 return -ENOTTY;
319 }
320}
321
322static const struct file_operations dma_buf_fops = {
323 .release = dma_buf_release,
324 .mmap = dma_buf_mmap_internal,
325 .llseek = dma_buf_llseek,
326 .poll = dma_buf_poll,
327 .unlocked_ioctl = dma_buf_ioctl,
328#ifdef CONFIG_COMPAT
329 .compat_ioctl = dma_buf_ioctl,
330#endif
331};
332
333/*
334 * is_dma_buf_file - Check if struct file* is associated with dma_buf
335 */
336static inline int is_dma_buf_file(struct file *file)
337{
338 return file->f_op == &dma_buf_fops;
339}
340
341/**
342 * DOC: dma buf device access
343 *
344 * For device DMA access to a shared DMA buffer the usual sequence of operations
345 * is fairly simple:
346 *
347 * 1. The exporter defines his exporter instance using
348 * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
349 * buffer object into a &dma_buf. It then exports that &dma_buf to userspace
350 * as a file descriptor by calling dma_buf_fd().
351 *
352 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
353 * to share with: First the filedescriptor is converted to a &dma_buf using
354 * dma_buf_get(). Then the buffer is attached to the device using
355 * dma_buf_attach().
356 *
357 * Up to this stage the exporter is still free to migrate or reallocate the
358 * backing storage.
359 *
360 * 3. Once the buffer is attached to all devices userspace can initiate DMA
361 * access to the shared buffer. In the kernel this is done by calling
362 * dma_buf_map_attachment() and dma_buf_unmap_attachment().
363 *
364 * 4. Once a driver is done with a shared buffer it needs to call
365 * dma_buf_detach() (after cleaning up any mappings) and then release the
366 * reference acquired with dma_buf_get by calling dma_buf_put().
367 *
368 * For the detailed semantics exporters are expected to implement see
369 * &dma_buf_ops.
370 */
371
372/**
373 * dma_buf_export - Creates a new dma_buf, and associates an anon file
374 * with this buffer, so it can be exported.
375 * Also connect the allocator specific data and ops to the buffer.
376 * Additionally, provide a name string for exporter; useful in debugging.
377 *
378 * @exp_info: [in] holds all the export related information provided
379 * by the exporter. see &struct dma_buf_export_info
380 * for further details.
381 *
382 * Returns, on success, a newly created dma_buf object, which wraps the
383 * supplied private data and operations for dma_buf_ops. On either missing
384 * ops, or error in allocating struct dma_buf, will return negative error.
385 *
386 * For most cases the easiest way to create @exp_info is through the
387 * %DEFINE_DMA_BUF_EXPORT_INFO macro.
388 */
389struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
390{
391 struct dma_buf *dmabuf;
392 struct reservation_object *resv = exp_info->resv;
393 struct file *file;
394 size_t alloc_size = sizeof(struct dma_buf);
395 int ret;
396
397 if (!exp_info->resv)
398 alloc_size += sizeof(struct reservation_object);
399 else
400 /* prevent &dma_buf[1] == dma_buf->resv */
401 alloc_size += 1;
402
403 if (WARN_ON(!exp_info->priv
404 || !exp_info->ops
405 || !exp_info->ops->map_dma_buf
406 || !exp_info->ops->unmap_dma_buf
407 || !exp_info->ops->release
408 || !exp_info->ops->map_atomic
409 || !exp_info->ops->map
410 || !exp_info->ops->mmap)) {
411 return ERR_PTR(-EINVAL);
412 }
413
414 if (!try_module_get(exp_info->owner))
415 return ERR_PTR(-ENOENT);
416
417 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
418 if (!dmabuf) {
419 ret = -ENOMEM;
420 goto err_module;
421 }
422
423 dmabuf->priv = exp_info->priv;
424 dmabuf->ops = exp_info->ops;
425 dmabuf->size = exp_info->size;
426 dmabuf->exp_name = exp_info->exp_name;
427 dmabuf->owner = exp_info->owner;
428 init_waitqueue_head(&dmabuf->poll);
429 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
430 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
431
432 if (!resv) {
433 resv = (struct reservation_object *)&dmabuf[1];
434 reservation_object_init(resv);
435 }
436 dmabuf->resv = resv;
437
438 file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
439 exp_info->flags);
440 if (IS_ERR(file)) {
441 ret = PTR_ERR(file);
442 goto err_dmabuf;
443 }
444
445 file->f_mode |= FMODE_LSEEK;
446 dmabuf->file = file;
447
448 mutex_init(&dmabuf->lock);
449 INIT_LIST_HEAD(&dmabuf->attachments);
450
451 mutex_lock(&db_list.lock);
452 list_add(&dmabuf->list_node, &db_list.head);
453 mutex_unlock(&db_list.lock);
454
455 return dmabuf;
456
457err_dmabuf:
458 kfree(dmabuf);
459err_module:
460 module_put(exp_info->owner);
461 return ERR_PTR(ret);
462}
463EXPORT_SYMBOL_GPL(dma_buf_export);
464
465/**
466 * dma_buf_fd - returns a file descriptor for the given dma_buf
467 * @dmabuf: [in] pointer to dma_buf for which fd is required.
468 * @flags: [in] flags to give to fd
469 *
470 * On success, returns an associated 'fd'. Else, returns error.
471 */
472int dma_buf_fd(struct dma_buf *dmabuf, int flags)
473{
474 int fd;
475
476 if (!dmabuf || !dmabuf->file)
477 return -EINVAL;
478
479 fd = get_unused_fd_flags(flags);
480 if (fd < 0)
481 return fd;
482
483 fd_install(fd, dmabuf->file);
484
485 return fd;
486}
487EXPORT_SYMBOL_GPL(dma_buf_fd);
488
489/**
490 * dma_buf_get - returns the dma_buf structure related to an fd
491 * @fd: [in] fd associated with the dma_buf to be returned
492 *
493 * On success, returns the dma_buf structure associated with an fd; uses
494 * file's refcounting done by fget to increase refcount. returns ERR_PTR
495 * otherwise.
496 */
497struct dma_buf *dma_buf_get(int fd)
498{
499 struct file *file;
500
501 file = fget(fd);
502
503 if (!file)
504 return ERR_PTR(-EBADF);
505
506 if (!is_dma_buf_file(file)) {
507 fput(file);
508 return ERR_PTR(-EINVAL);
509 }
510
511 return file->private_data;
512}
513EXPORT_SYMBOL_GPL(dma_buf_get);
514
515/**
516 * dma_buf_put - decreases refcount of the buffer
517 * @dmabuf: [in] buffer to reduce refcount of
518 *
519 * Uses file's refcounting done implicitly by fput().
520 *
521 * If, as a result of this call, the refcount becomes 0, the 'release' file
522 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
523 * in turn, and frees the memory allocated for dmabuf when exported.
524 */
525void dma_buf_put(struct dma_buf *dmabuf)
526{
527 if (WARN_ON(!dmabuf || !dmabuf->file))
528 return;
529
530 fput(dmabuf->file);
531}
532EXPORT_SYMBOL_GPL(dma_buf_put);
533
534/**
535 * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
536 * calls attach() of dma_buf_ops to allow device-specific attach functionality
537 * @dmabuf: [in] buffer to attach device to.
538 * @dev: [in] device to be attached.
539 *
540 * Returns struct dma_buf_attachment pointer for this attachment. Attachments
541 * must be cleaned up by calling dma_buf_detach().
542 *
543 * Returns:
544 *
545 * A pointer to newly created &dma_buf_attachment on success, or a negative
546 * error code wrapped into a pointer on failure.
547 *
548 * Note that this can fail if the backing storage of @dmabuf is in a place not
549 * accessible to @dev, and cannot be moved to a more suitable place. This is
550 * indicated with the error code -EBUSY.
551 */
552struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
553 struct device *dev)
554{
555 struct dma_buf_attachment *attach;
556 int ret;
557
558 if (WARN_ON(!dmabuf || !dev))
559 return ERR_PTR(-EINVAL);
560
561 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
562 if (!attach)
563 return ERR_PTR(-ENOMEM);
564
565 attach->dev = dev;
566 attach->dmabuf = dmabuf;
567
568 mutex_lock(&dmabuf->lock);
569
570 if (dmabuf->ops->attach) {
571 ret = dmabuf->ops->attach(dmabuf, dev, attach);
572 if (ret)
573 goto err_attach;
574 }
575 list_add(&attach->node, &dmabuf->attachments);
576
577 mutex_unlock(&dmabuf->lock);
578 return attach;
579
580err_attach:
581 kfree(attach);
582 mutex_unlock(&dmabuf->lock);
583 return ERR_PTR(ret);
584}
585EXPORT_SYMBOL_GPL(dma_buf_attach);
586
587/**
588 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
589 * optionally calls detach() of dma_buf_ops for device-specific detach
590 * @dmabuf: [in] buffer to detach from.
591 * @attach: [in] attachment to be detached; is free'd after this call.
592 *
593 * Clean up a device attachment obtained by calling dma_buf_attach().
594 */
595void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
596{
597 if (WARN_ON(!dmabuf || !attach))
598 return;
599
600 mutex_lock(&dmabuf->lock);
601 list_del(&attach->node);
602 if (dmabuf->ops->detach)
603 dmabuf->ops->detach(dmabuf, attach);
604
605 mutex_unlock(&dmabuf->lock);
606 kfree(attach);
607}
608EXPORT_SYMBOL_GPL(dma_buf_detach);
609
610/**
611 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
612 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
613 * dma_buf_ops.
614 * @attach: [in] attachment whose scatterlist is to be returned
615 * @direction: [in] direction of DMA transfer
616 *
617 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
618 * on error. May return -EINTR if it is interrupted by a signal.
619 *
620 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
621 * the underlying backing storage is pinned for as long as a mapping exists,
622 * therefore users/importers should not hold onto a mapping for undue amounts of
623 * time.
624 */
625struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
626 enum dma_data_direction direction)
627{
628 struct sg_table *sg_table;
629
630 might_sleep();
631
632 if (WARN_ON(!attach || !attach->dmabuf))
633 return ERR_PTR(-EINVAL);
634
635 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
636 if (!sg_table)
637 sg_table = ERR_PTR(-ENOMEM);
638
639 return sg_table;
640}
641EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
642
643/**
644 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
645 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
646 * dma_buf_ops.
647 * @attach: [in] attachment to unmap buffer from
648 * @sg_table: [in] scatterlist info of the buffer to unmap
649 * @direction: [in] direction of DMA transfer
650 *
651 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
652 */
653void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
654 struct sg_table *sg_table,
655 enum dma_data_direction direction)
656{
657 might_sleep();
658
659 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
660 return;
661
662 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
663 direction);
664}
665EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
666
667/**
668 * DOC: cpu access
669 *
670 * There are mutliple reasons for supporting CPU access to a dma buffer object:
671 *
672 * - Fallback operations in the kernel, for example when a device is connected
673 * over USB and the kernel needs to shuffle the data around first before
674 * sending it away. Cache coherency is handled by braketing any transactions
675 * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
676 * access.
677 *
678 * To support dma_buf objects residing in highmem cpu access is page-based
679 * using an api similar to kmap. Accessing a dma_buf is done in aligned chunks
680 * of PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which
681 * returns a pointer in kernel virtual address space. Afterwards the chunk
682 * needs to be unmapped again. There is no limit on how often a given chunk
683 * can be mapped and unmapped, i.e. the importer does not need to call
684 * begin_cpu_access again before mapping the same chunk again.
685 *
686 * Interfaces::
687 * void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
688 * void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
689 *
690 * There are also atomic variants of these interfaces. Like for kmap they
691 * facilitate non-blocking fast-paths. Neither the importer nor the exporter
692 * (in the callback) is allowed to block when using these.
693 *
694 * Interfaces::
695 * void \*dma_buf_kmap_atomic(struct dma_buf \*, unsigned long);
696 * void dma_buf_kunmap_atomic(struct dma_buf \*, unsigned long, void \*);
697 *
698 * For importers all the restrictions of using kmap apply, like the limited
699 * supply of kmap_atomic slots. Hence an importer shall only hold onto at
700 * max 2 atomic dma_buf kmaps at the same time (in any given process context).
701 *
702 * dma_buf kmap calls outside of the range specified in begin_cpu_access are
703 * undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
704 * the partial chunks at the beginning and end but may return stale or bogus
705 * data outside of the range (in these partial chunks).
706 *
707 * Note that these calls need to always succeed. The exporter needs to
708 * complete any preparations that might fail in begin_cpu_access.
709 *
710 * For some cases the overhead of kmap can be too high, a vmap interface
711 * is introduced. This interface should be used very carefully, as vmalloc
712 * space is a limited resources on many architectures.
713 *
714 * Interfaces::
715 * void \*dma_buf_vmap(struct dma_buf \*dmabuf)
716 * void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
717 *
718 * The vmap call can fail if there is no vmap support in the exporter, or if
719 * it runs out of vmalloc space. Fallback to kmap should be implemented. Note
720 * that the dma-buf layer keeps a reference count for all vmap access and
721 * calls down into the exporter's vmap function only when no vmapping exists,
722 * and only unmaps it once. Protection against concurrent vmap/vunmap calls is
723 * provided by taking the dma_buf->lock mutex.
724 *
725 * - For full compatibility on the importer side with existing userspace
726 * interfaces, which might already support mmap'ing buffers. This is needed in
727 * many processing pipelines (e.g. feeding a software rendered image into a
728 * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
729 * framework already supported this and for DMA buffer file descriptors to
730 * replace ION buffers mmap support was needed.
731 *
732 * There is no special interfaces, userspace simply calls mmap on the dma-buf
733 * fd. But like for CPU access there's a need to braket the actual access,
734 * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
735 * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
736 * be restarted.
737 *
738 * Some systems might need some sort of cache coherency management e.g. when
739 * CPU and GPU domains are being accessed through dma-buf at the same time.
740 * To circumvent this problem there are begin/end coherency markers, that
741 * forward directly to existing dma-buf device drivers vfunc hooks. Userspace
742 * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
743 * sequence would be used like following:
744 *
745 * - mmap dma-buf fd
746 * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
747 * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
748 * want (with the new data being consumed by say the GPU or the scanout
749 * device)
750 * - munmap once you don't need the buffer any more
751 *
752 * For correctness and optimal performance, it is always required to use
753 * SYNC_START and SYNC_END before and after, respectively, when accessing the
754 * mapped address. Userspace cannot rely on coherent access, even when there
755 * are systems where it just works without calling these ioctls.
756 *
757 * - And as a CPU fallback in userspace processing pipelines.
758 *
759 * Similar to the motivation for kernel cpu access it is again important that
760 * the userspace code of a given importing subsystem can use the same
761 * interfaces with a imported dma-buf buffer object as with a native buffer
762 * object. This is especially important for drm where the userspace part of
763 * contemporary OpenGL, X, and other drivers is huge, and reworking them to
764 * use a different way to mmap a buffer rather invasive.
765 *
766 * The assumption in the current dma-buf interfaces is that redirecting the
767 * initial mmap is all that's needed. A survey of some of the existing
768 * subsystems shows that no driver seems to do any nefarious thing like
769 * syncing up with outstanding asynchronous processing on the device or
770 * allocating special resources at fault time. So hopefully this is good
771 * enough, since adding interfaces to intercept pagefaults and allow pte
772 * shootdowns would increase the complexity quite a bit.
773 *
774 * Interface::
775 * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
776 * unsigned long);
777 *
778 * If the importing subsystem simply provides a special-purpose mmap call to
779 * set up a mapping in userspace, calling do_mmap with dma_buf->file will
780 * equally achieve that for a dma-buf object.
781 */
782
783static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
784 enum dma_data_direction direction)
785{
786 bool write = (direction == DMA_BIDIRECTIONAL ||
787 direction == DMA_TO_DEVICE);
788 struct reservation_object *resv = dmabuf->resv;
789 long ret;
790
791 /* Wait on any implicit rendering fences */
792 ret = reservation_object_wait_timeout_rcu(resv, write, true,
793 MAX_SCHEDULE_TIMEOUT);
794 if (ret < 0)
795 return ret;
796
797 return 0;
798}
799
800/**
801 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
802 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
803 * preparations. Coherency is only guaranteed in the specified range for the
804 * specified access direction.
805 * @dmabuf: [in] buffer to prepare cpu access for.
806 * @direction: [in] length of range for cpu access.
807 *
808 * After the cpu access is complete the caller should call
809 * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
810 * it guaranteed to be coherent with other DMA access.
811 *
812 * Can return negative error values, returns 0 on success.
813 */
814int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
815 enum dma_data_direction direction)
816{
817 int ret = 0;
818
819 if (WARN_ON(!dmabuf))
820 return -EINVAL;
821
822 if (dmabuf->ops->begin_cpu_access)
823 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
824
825 /* Ensure that all fences are waited upon - but we first allow
826 * the native handler the chance to do so more efficiently if it
827 * chooses. A double invocation here will be reasonably cheap no-op.
828 */
829 if (ret == 0)
830 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
831
832 return ret;
833}
834EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
835
836/**
837 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
838 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
839 * actions. Coherency is only guaranteed in the specified range for the
840 * specified access direction.
841 * @dmabuf: [in] buffer to complete cpu access for.
842 * @direction: [in] length of range for cpu access.
843 *
844 * This terminates CPU access started with dma_buf_begin_cpu_access().
845 *
846 * Can return negative error values, returns 0 on success.
847 */
848int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
849 enum dma_data_direction direction)
850{
851 int ret = 0;
852
853 WARN_ON(!dmabuf);
854
855 if (dmabuf->ops->end_cpu_access)
856 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
857
858 return ret;
859}
860EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
861
862/**
863 * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
864 * space. The same restrictions as for kmap_atomic and friends apply.
865 * @dmabuf: [in] buffer to map page from.
866 * @page_num: [in] page in PAGE_SIZE units to map.
867 *
868 * This call must always succeed, any necessary preparations that might fail
869 * need to be done in begin_cpu_access.
870 */
871void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
872{
873 WARN_ON(!dmabuf);
874
875 return dmabuf->ops->map_atomic(dmabuf, page_num);
876}
877EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
878
879/**
880 * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
881 * @dmabuf: [in] buffer to unmap page from.
882 * @page_num: [in] page in PAGE_SIZE units to unmap.
883 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic.
884 *
885 * This call must always succeed.
886 */
887void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
888 void *vaddr)
889{
890 WARN_ON(!dmabuf);
891
892 if (dmabuf->ops->unmap_atomic)
893 dmabuf->ops->unmap_atomic(dmabuf, page_num, vaddr);
894}
895EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
896
897/**
898 * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
899 * same restrictions as for kmap and friends apply.
900 * @dmabuf: [in] buffer to map page from.
901 * @page_num: [in] page in PAGE_SIZE units to map.
902 *
903 * This call must always succeed, any necessary preparations that might fail
904 * need to be done in begin_cpu_access.
905 */
906void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
907{
908 WARN_ON(!dmabuf);
909
910 return dmabuf->ops->map(dmabuf, page_num);
911}
912EXPORT_SYMBOL_GPL(dma_buf_kmap);
913
914/**
915 * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
916 * @dmabuf: [in] buffer to unmap page from.
917 * @page_num: [in] page in PAGE_SIZE units to unmap.
918 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap.
919 *
920 * This call must always succeed.
921 */
922void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
923 void *vaddr)
924{
925 WARN_ON(!dmabuf);
926
927 if (dmabuf->ops->unmap)
928 dmabuf->ops->unmap(dmabuf, page_num, vaddr);
929}
930EXPORT_SYMBOL_GPL(dma_buf_kunmap);
931
932
933/**
934 * dma_buf_mmap - Setup up a userspace mmap with the given vma
935 * @dmabuf: [in] buffer that should back the vma
936 * @vma: [in] vma for the mmap
937 * @pgoff: [in] offset in pages where this mmap should start within the
938 * dma-buf buffer.
939 *
940 * This function adjusts the passed in vma so that it points at the file of the
941 * dma_buf operation. It also adjusts the starting pgoff and does bounds
942 * checking on the size of the vma. Then it calls the exporters mmap function to
943 * set up the mapping.
944 *
945 * Can return negative error values, returns 0 on success.
946 */
947int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
948 unsigned long pgoff)
949{
950 struct file *oldfile;
951 int ret;
952
953 if (WARN_ON(!dmabuf || !vma))
954 return -EINVAL;
955
956 /* check for offset overflow */
957 if (pgoff + vma_pages(vma) < pgoff)
958 return -EOVERFLOW;
959
960 /* check for overflowing the buffer's size */
961 if (pgoff + vma_pages(vma) >
962 dmabuf->size >> PAGE_SHIFT)
963 return -EINVAL;
964
965 /* readjust the vma */
966 get_file(dmabuf->file);
967 oldfile = vma->vm_file;
968 vma->vm_file = dmabuf->file;
969 vma->vm_pgoff = pgoff;
970
971 ret = dmabuf->ops->mmap(dmabuf, vma);
972 if (ret) {
973 /* restore old parameters on failure */
974 vma->vm_file = oldfile;
975 fput(dmabuf->file);
976 } else {
977 if (oldfile)
978 fput(oldfile);
979 }
980 return ret;
981
982}
983EXPORT_SYMBOL_GPL(dma_buf_mmap);
984
985/**
986 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
987 * address space. Same restrictions as for vmap and friends apply.
988 * @dmabuf: [in] buffer to vmap
989 *
990 * This call may fail due to lack of virtual mapping address space.
991 * These calls are optional in drivers. The intended use for them
992 * is for mapping objects linear in kernel space for high use objects.
993 * Please attempt to use kmap/kunmap before thinking about these interfaces.
994 *
995 * Returns NULL on error.
996 */
997void *dma_buf_vmap(struct dma_buf *dmabuf)
998{
999 void *ptr;
1000
1001 if (WARN_ON(!dmabuf))
1002 return NULL;
1003
1004 if (!dmabuf->ops->vmap)
1005 return NULL;
1006
1007 mutex_lock(&dmabuf->lock);
1008 if (dmabuf->vmapping_counter) {
1009 dmabuf->vmapping_counter++;
1010 BUG_ON(!dmabuf->vmap_ptr);
1011 ptr = dmabuf->vmap_ptr;
1012 goto out_unlock;
1013 }
1014
1015 BUG_ON(dmabuf->vmap_ptr);
1016
1017 ptr = dmabuf->ops->vmap(dmabuf);
1018 if (WARN_ON_ONCE(IS_ERR(ptr)))
1019 ptr = NULL;
1020 if (!ptr)
1021 goto out_unlock;
1022
1023 dmabuf->vmap_ptr = ptr;
1024 dmabuf->vmapping_counter = 1;
1025
1026out_unlock:
1027 mutex_unlock(&dmabuf->lock);
1028 return ptr;
1029}
1030EXPORT_SYMBOL_GPL(dma_buf_vmap);
1031
1032/**
1033 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1034 * @dmabuf: [in] buffer to vunmap
1035 * @vaddr: [in] vmap to vunmap
1036 */
1037void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
1038{
1039 if (WARN_ON(!dmabuf))
1040 return;
1041
1042 BUG_ON(!dmabuf->vmap_ptr);
1043 BUG_ON(dmabuf->vmapping_counter == 0);
1044 BUG_ON(dmabuf->vmap_ptr != vaddr);
1045
1046 mutex_lock(&dmabuf->lock);
1047 if (--dmabuf->vmapping_counter == 0) {
1048 if (dmabuf->ops->vunmap)
1049 dmabuf->ops->vunmap(dmabuf, vaddr);
1050 dmabuf->vmap_ptr = NULL;
1051 }
1052 mutex_unlock(&dmabuf->lock);
1053}
1054EXPORT_SYMBOL_GPL(dma_buf_vunmap);
1055
1056#ifdef CONFIG_DEBUG_FS
1057static int dma_buf_debug_show(struct seq_file *s, void *unused)
1058{
1059 int ret;
1060 struct dma_buf *buf_obj;
1061 struct dma_buf_attachment *attach_obj;
1062 struct reservation_object *robj;
1063 struct reservation_object_list *fobj;
1064 struct dma_fence *fence;
1065 unsigned seq;
1066 int count = 0, attach_count, shared_count, i;
1067 size_t size = 0;
1068
1069 ret = mutex_lock_interruptible(&db_list.lock);
1070
1071 if (ret)
1072 return ret;
1073
1074 seq_puts(s, "\nDma-buf Objects:\n");
1075 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\n",
1076 "size", "flags", "mode", "count");
1077
1078 list_for_each_entry(buf_obj, &db_list.head, list_node) {
1079 ret = mutex_lock_interruptible(&buf_obj->lock);
1080
1081 if (ret) {
1082 seq_puts(s,
1083 "\tERROR locking buffer object: skipping\n");
1084 continue;
1085 }
1086
1087 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
1088 buf_obj->size,
1089 buf_obj->file->f_flags, buf_obj->file->f_mode,
1090 file_count(buf_obj->file),
1091 buf_obj->exp_name);
1092
1093 robj = buf_obj->resv;
1094 while (true) {
1095 seq = read_seqcount_begin(&robj->seq);
1096 rcu_read_lock();
1097 fobj = rcu_dereference(robj->fence);
1098 shared_count = fobj ? fobj->shared_count : 0;
1099 fence = rcu_dereference(robj->fence_excl);
1100 if (!read_seqcount_retry(&robj->seq, seq))
1101 break;
1102 rcu_read_unlock();
1103 }
1104
1105 if (fence)
1106 seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1107 fence->ops->get_driver_name(fence),
1108 fence->ops->get_timeline_name(fence),
1109 dma_fence_is_signaled(fence) ? "" : "un");
1110 for (i = 0; i < shared_count; i++) {
1111 fence = rcu_dereference(fobj->shared[i]);
1112 if (!dma_fence_get_rcu(fence))
1113 continue;
1114 seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1115 fence->ops->get_driver_name(fence),
1116 fence->ops->get_timeline_name(fence),
1117 dma_fence_is_signaled(fence) ? "" : "un");
1118 }
1119 rcu_read_unlock();
1120
1121 seq_puts(s, "\tAttached Devices:\n");
1122 attach_count = 0;
1123
1124 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1125 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1126 attach_count++;
1127 }
1128
1129 seq_printf(s, "Total %d devices attached\n\n",
1130 attach_count);
1131
1132 count++;
1133 size += buf_obj->size;
1134 mutex_unlock(&buf_obj->lock);
1135 }
1136
1137 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1138
1139 mutex_unlock(&db_list.lock);
1140 return 0;
1141}
1142
1143static int dma_buf_debug_open(struct inode *inode, struct file *file)
1144{
1145 return single_open(file, dma_buf_debug_show, NULL);
1146}
1147
1148static const struct file_operations dma_buf_debug_fops = {
1149 .open = dma_buf_debug_open,
1150 .read = seq_read,
1151 .llseek = seq_lseek,
1152 .release = single_release,
1153};
1154
1155static struct dentry *dma_buf_debugfs_dir;
1156
1157static int dma_buf_init_debugfs(void)
1158{
1159 struct dentry *d;
1160 int err = 0;
1161
1162 d = debugfs_create_dir("dma_buf", NULL);
1163 if (IS_ERR(d))
1164 return PTR_ERR(d);
1165
1166 dma_buf_debugfs_dir = d;
1167
1168 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1169 NULL, &dma_buf_debug_fops);
1170 if (IS_ERR(d)) {
1171 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1172 debugfs_remove_recursive(dma_buf_debugfs_dir);
1173 dma_buf_debugfs_dir = NULL;
1174 err = PTR_ERR(d);
1175 }
1176
1177 return err;
1178}
1179
1180static void dma_buf_uninit_debugfs(void)
1181{
1182 debugfs_remove_recursive(dma_buf_debugfs_dir);
1183}
1184#else
1185static inline int dma_buf_init_debugfs(void)
1186{
1187 return 0;
1188}
1189static inline void dma_buf_uninit_debugfs(void)
1190{
1191}
1192#endif
1193
1194static int __init dma_buf_init(void)
1195{
1196 mutex_init(&db_list.lock);
1197 INIT_LIST_HEAD(&db_list.head);
1198 dma_buf_init_debugfs();
1199 return 0;
1200}
1201subsys_initcall(dma_buf_init);
1202
1203static void __exit dma_buf_deinit(void)
1204{
1205 dma_buf_uninit_debugfs();
1206}
1207__exitcall(dma_buf_deinit);