Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Framework for buffer objects that can be shared across devices/subsystems.
4 *
5 * Copyright(C) 2011 Linaro Limited. All rights reserved.
6 * Author: Sumit Semwal <sumit.semwal@ti.com>
7 *
8 * Many thanks to linaro-mm-sig list, and specially
9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11 * refining of this idea.
12 */
13
14#include <linux/fs.h>
15#include <linux/slab.h>
16#include <linux/dma-buf.h>
17#include <linux/dma-fence.h>
18#include <linux/dma-fence-unwrap.h>
19#include <linux/anon_inodes.h>
20#include <linux/export.h>
21#include <linux/debugfs.h>
22#include <linux/module.h>
23#include <linux/seq_file.h>
24#include <linux/sync_file.h>
25#include <linux/poll.h>
26#include <linux/dma-resv.h>
27#include <linux/mm.h>
28#include <linux/mount.h>
29#include <linux/pseudo_fs.h>
30
31#include <uapi/linux/dma-buf.h>
32#include <uapi/linux/magic.h>
33
34#include "dma-buf-sysfs-stats.h"
35
36static inline int is_dma_buf_file(struct file *);
37
38struct dma_buf_list {
39 struct list_head head;
40 struct mutex lock;
41};
42
43static struct dma_buf_list db_list;
44
45static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
46{
47 struct dma_buf *dmabuf;
48 char name[DMA_BUF_NAME_LEN];
49 ssize_t ret = 0;
50
51 dmabuf = dentry->d_fsdata;
52 spin_lock(&dmabuf->name_lock);
53 if (dmabuf->name)
54 ret = strscpy(name, dmabuf->name, sizeof(name));
55 spin_unlock(&dmabuf->name_lock);
56
57 return dynamic_dname(buffer, buflen, "/%s:%s",
58 dentry->d_name.name, ret > 0 ? name : "");
59}
60
61static void dma_buf_release(struct dentry *dentry)
62{
63 struct dma_buf *dmabuf;
64
65 dmabuf = dentry->d_fsdata;
66 if (unlikely(!dmabuf))
67 return;
68
69 BUG_ON(dmabuf->vmapping_counter);
70
71 /*
72 * If you hit this BUG() it could mean:
73 * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
74 * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
75 */
76 BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
77
78 dma_buf_stats_teardown(dmabuf);
79 dmabuf->ops->release(dmabuf);
80
81 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
82 dma_resv_fini(dmabuf->resv);
83
84 WARN_ON(!list_empty(&dmabuf->attachments));
85 module_put(dmabuf->owner);
86 kfree(dmabuf->name);
87 kfree(dmabuf);
88}
89
90static int dma_buf_file_release(struct inode *inode, struct file *file)
91{
92 struct dma_buf *dmabuf;
93
94 if (!is_dma_buf_file(file))
95 return -EINVAL;
96
97 dmabuf = file->private_data;
98 if (dmabuf) {
99 mutex_lock(&db_list.lock);
100 list_del(&dmabuf->list_node);
101 mutex_unlock(&db_list.lock);
102 }
103
104 return 0;
105}
106
107static const struct dentry_operations dma_buf_dentry_ops = {
108 .d_dname = dmabuffs_dname,
109 .d_release = dma_buf_release,
110};
111
112static struct vfsmount *dma_buf_mnt;
113
114static int dma_buf_fs_init_context(struct fs_context *fc)
115{
116 struct pseudo_fs_context *ctx;
117
118 ctx = init_pseudo(fc, DMA_BUF_MAGIC);
119 if (!ctx)
120 return -ENOMEM;
121 ctx->dops = &dma_buf_dentry_ops;
122 return 0;
123}
124
125static struct file_system_type dma_buf_fs_type = {
126 .name = "dmabuf",
127 .init_fs_context = dma_buf_fs_init_context,
128 .kill_sb = kill_anon_super,
129};
130
131static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
132{
133 struct dma_buf *dmabuf;
134
135 if (!is_dma_buf_file(file))
136 return -EINVAL;
137
138 dmabuf = file->private_data;
139
140 /* check if buffer supports mmap */
141 if (!dmabuf->ops->mmap)
142 return -EINVAL;
143
144 /* check for overflowing the buffer's size */
145 if (vma->vm_pgoff + vma_pages(vma) >
146 dmabuf->size >> PAGE_SHIFT)
147 return -EINVAL;
148
149 return dmabuf->ops->mmap(dmabuf, vma);
150}
151
152static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
153{
154 struct dma_buf *dmabuf;
155 loff_t base;
156
157 if (!is_dma_buf_file(file))
158 return -EBADF;
159
160 dmabuf = file->private_data;
161
162 /* only support discovering the end of the buffer,
163 but also allow SEEK_SET to maintain the idiomatic
164 SEEK_END(0), SEEK_CUR(0) pattern */
165 if (whence == SEEK_END)
166 base = dmabuf->size;
167 else if (whence == SEEK_SET)
168 base = 0;
169 else
170 return -EINVAL;
171
172 if (offset != 0)
173 return -EINVAL;
174
175 return base + offset;
176}
177
178/**
179 * DOC: implicit fence polling
180 *
181 * To support cross-device and cross-driver synchronization of buffer access
182 * implicit fences (represented internally in the kernel with &struct dma_fence)
183 * can be attached to a &dma_buf. The glue for that and a few related things are
184 * provided in the &dma_resv structure.
185 *
186 * Userspace can query the state of these implicitly tracked fences using poll()
187 * and related system calls:
188 *
189 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
190 * most recent write or exclusive fence.
191 *
192 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
193 * all attached fences, shared and exclusive ones.
194 *
195 * Note that this only signals the completion of the respective fences, i.e. the
196 * DMA transfers are complete. Cache flushing and any other necessary
197 * preparations before CPU access can begin still need to happen.
198 *
199 * As an alternative to poll(), the set of fences on DMA buffer can be
200 * exported as a &sync_file using &dma_buf_sync_file_export.
201 */
202
203static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
204{
205 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
206 struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
207 unsigned long flags;
208
209 spin_lock_irqsave(&dcb->poll->lock, flags);
210 wake_up_locked_poll(dcb->poll, dcb->active);
211 dcb->active = 0;
212 spin_unlock_irqrestore(&dcb->poll->lock, flags);
213 dma_fence_put(fence);
214 /* Paired with get_file in dma_buf_poll */
215 fput(dmabuf->file);
216}
217
218static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
219 struct dma_buf_poll_cb_t *dcb)
220{
221 struct dma_resv_iter cursor;
222 struct dma_fence *fence;
223 int r;
224
225 dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
226 fence) {
227 dma_fence_get(fence);
228 r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
229 if (!r)
230 return true;
231 dma_fence_put(fence);
232 }
233
234 return false;
235}
236
237static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
238{
239 struct dma_buf *dmabuf;
240 struct dma_resv *resv;
241 __poll_t events;
242
243 dmabuf = file->private_data;
244 if (!dmabuf || !dmabuf->resv)
245 return EPOLLERR;
246
247 resv = dmabuf->resv;
248
249 poll_wait(file, &dmabuf->poll, poll);
250
251 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
252 if (!events)
253 return 0;
254
255 dma_resv_lock(resv, NULL);
256
257 if (events & EPOLLOUT) {
258 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
259
260 /* Check that callback isn't busy */
261 spin_lock_irq(&dmabuf->poll.lock);
262 if (dcb->active)
263 events &= ~EPOLLOUT;
264 else
265 dcb->active = EPOLLOUT;
266 spin_unlock_irq(&dmabuf->poll.lock);
267
268 if (events & EPOLLOUT) {
269 /* Paired with fput in dma_buf_poll_cb */
270 get_file(dmabuf->file);
271
272 if (!dma_buf_poll_add_cb(resv, true, dcb))
273 /* No callback queued, wake up any other waiters */
274 dma_buf_poll_cb(NULL, &dcb->cb);
275 else
276 events &= ~EPOLLOUT;
277 }
278 }
279
280 if (events & EPOLLIN) {
281 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
282
283 /* Check that callback isn't busy */
284 spin_lock_irq(&dmabuf->poll.lock);
285 if (dcb->active)
286 events &= ~EPOLLIN;
287 else
288 dcb->active = EPOLLIN;
289 spin_unlock_irq(&dmabuf->poll.lock);
290
291 if (events & EPOLLIN) {
292 /* Paired with fput in dma_buf_poll_cb */
293 get_file(dmabuf->file);
294
295 if (!dma_buf_poll_add_cb(resv, false, dcb))
296 /* No callback queued, wake up any other waiters */
297 dma_buf_poll_cb(NULL, &dcb->cb);
298 else
299 events &= ~EPOLLIN;
300 }
301 }
302
303 dma_resv_unlock(resv);
304 return events;
305}
306
307/**
308 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
309 * It could support changing the name of the dma-buf if the same
310 * piece of memory is used for multiple purpose between different devices.
311 *
312 * @dmabuf: [in] dmabuf buffer that will be renamed.
313 * @buf: [in] A piece of userspace memory that contains the name of
314 * the dma-buf.
315 *
316 * Returns 0 on success. If the dma-buf buffer is already attached to
317 * devices, return -EBUSY.
318 *
319 */
320static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
321{
322 char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
323
324 if (IS_ERR(name))
325 return PTR_ERR(name);
326
327 spin_lock(&dmabuf->name_lock);
328 kfree(dmabuf->name);
329 dmabuf->name = name;
330 spin_unlock(&dmabuf->name_lock);
331
332 return 0;
333}
334
335#if IS_ENABLED(CONFIG_SYNC_FILE)
336static long dma_buf_export_sync_file(struct dma_buf *dmabuf,
337 void __user *user_data)
338{
339 struct dma_buf_export_sync_file arg;
340 enum dma_resv_usage usage;
341 struct dma_fence *fence = NULL;
342 struct sync_file *sync_file;
343 int fd, ret;
344
345 if (copy_from_user(&arg, user_data, sizeof(arg)))
346 return -EFAULT;
347
348 if (arg.flags & ~DMA_BUF_SYNC_RW)
349 return -EINVAL;
350
351 if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
352 return -EINVAL;
353
354 fd = get_unused_fd_flags(O_CLOEXEC);
355 if (fd < 0)
356 return fd;
357
358 usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE);
359 ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence);
360 if (ret)
361 goto err_put_fd;
362
363 if (!fence)
364 fence = dma_fence_get_stub();
365
366 sync_file = sync_file_create(fence);
367
368 dma_fence_put(fence);
369
370 if (!sync_file) {
371 ret = -ENOMEM;
372 goto err_put_fd;
373 }
374
375 arg.fd = fd;
376 if (copy_to_user(user_data, &arg, sizeof(arg))) {
377 ret = -EFAULT;
378 goto err_put_file;
379 }
380
381 fd_install(fd, sync_file->file);
382
383 return 0;
384
385err_put_file:
386 fput(sync_file->file);
387err_put_fd:
388 put_unused_fd(fd);
389 return ret;
390}
391
392static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
393 const void __user *user_data)
394{
395 struct dma_buf_import_sync_file arg;
396 struct dma_fence *fence, *f;
397 enum dma_resv_usage usage;
398 struct dma_fence_unwrap iter;
399 unsigned int num_fences;
400 int ret = 0;
401
402 if (copy_from_user(&arg, user_data, sizeof(arg)))
403 return -EFAULT;
404
405 if (arg.flags & ~DMA_BUF_SYNC_RW)
406 return -EINVAL;
407
408 if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
409 return -EINVAL;
410
411 fence = sync_file_get_fence(arg.fd);
412 if (!fence)
413 return -EINVAL;
414
415 usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
416 DMA_RESV_USAGE_READ;
417
418 num_fences = 0;
419 dma_fence_unwrap_for_each(f, &iter, fence)
420 ++num_fences;
421
422 if (num_fences > 0) {
423 dma_resv_lock(dmabuf->resv, NULL);
424
425 ret = dma_resv_reserve_fences(dmabuf->resv, num_fences);
426 if (!ret) {
427 dma_fence_unwrap_for_each(f, &iter, fence)
428 dma_resv_add_fence(dmabuf->resv, f, usage);
429 }
430
431 dma_resv_unlock(dmabuf->resv);
432 }
433
434 dma_fence_put(fence);
435
436 return ret;
437}
438#endif
439
440static long dma_buf_ioctl(struct file *file,
441 unsigned int cmd, unsigned long arg)
442{
443 struct dma_buf *dmabuf;
444 struct dma_buf_sync sync;
445 enum dma_data_direction direction;
446 int ret;
447
448 dmabuf = file->private_data;
449
450 switch (cmd) {
451 case DMA_BUF_IOCTL_SYNC:
452 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
453 return -EFAULT;
454
455 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
456 return -EINVAL;
457
458 switch (sync.flags & DMA_BUF_SYNC_RW) {
459 case DMA_BUF_SYNC_READ:
460 direction = DMA_FROM_DEVICE;
461 break;
462 case DMA_BUF_SYNC_WRITE:
463 direction = DMA_TO_DEVICE;
464 break;
465 case DMA_BUF_SYNC_RW:
466 direction = DMA_BIDIRECTIONAL;
467 break;
468 default:
469 return -EINVAL;
470 }
471
472 if (sync.flags & DMA_BUF_SYNC_END)
473 ret = dma_buf_end_cpu_access(dmabuf, direction);
474 else
475 ret = dma_buf_begin_cpu_access(dmabuf, direction);
476
477 return ret;
478
479 case DMA_BUF_SET_NAME_A:
480 case DMA_BUF_SET_NAME_B:
481 return dma_buf_set_name(dmabuf, (const char __user *)arg);
482
483#if IS_ENABLED(CONFIG_SYNC_FILE)
484 case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:
485 return dma_buf_export_sync_file(dmabuf, (void __user *)arg);
486 case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:
487 return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);
488#endif
489
490 default:
491 return -ENOTTY;
492 }
493}
494
495static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
496{
497 struct dma_buf *dmabuf = file->private_data;
498
499 seq_printf(m, "size:\t%zu\n", dmabuf->size);
500 /* Don't count the temporary reference taken inside procfs seq_show */
501 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
502 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
503 spin_lock(&dmabuf->name_lock);
504 if (dmabuf->name)
505 seq_printf(m, "name:\t%s\n", dmabuf->name);
506 spin_unlock(&dmabuf->name_lock);
507}
508
509static const struct file_operations dma_buf_fops = {
510 .release = dma_buf_file_release,
511 .mmap = dma_buf_mmap_internal,
512 .llseek = dma_buf_llseek,
513 .poll = dma_buf_poll,
514 .unlocked_ioctl = dma_buf_ioctl,
515 .compat_ioctl = compat_ptr_ioctl,
516 .show_fdinfo = dma_buf_show_fdinfo,
517};
518
519/*
520 * is_dma_buf_file - Check if struct file* is associated with dma_buf
521 */
522static inline int is_dma_buf_file(struct file *file)
523{
524 return file->f_op == &dma_buf_fops;
525}
526
527static struct file *dma_buf_getfile(size_t size, int flags)
528{
529 static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
530 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
531 struct file *file;
532
533 if (IS_ERR(inode))
534 return ERR_CAST(inode);
535
536 inode->i_size = size;
537 inode_set_bytes(inode, size);
538
539 /*
540 * The ->i_ino acquired from get_next_ino() is not unique thus
541 * not suitable for using it as dentry name by dmabuf stats.
542 * Override ->i_ino with the unique and dmabuffs specific
543 * value.
544 */
545 inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
546 flags &= O_ACCMODE | O_NONBLOCK;
547 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
548 flags, &dma_buf_fops);
549 if (IS_ERR(file))
550 goto err_alloc_file;
551
552 return file;
553
554err_alloc_file:
555 iput(inode);
556 return file;
557}
558
559/**
560 * DOC: dma buf device access
561 *
562 * For device DMA access to a shared DMA buffer the usual sequence of operations
563 * is fairly simple:
564 *
565 * 1. The exporter defines his exporter instance using
566 * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
567 * buffer object into a &dma_buf. It then exports that &dma_buf to userspace
568 * as a file descriptor by calling dma_buf_fd().
569 *
570 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
571 * to share with: First the file descriptor is converted to a &dma_buf using
572 * dma_buf_get(). Then the buffer is attached to the device using
573 * dma_buf_attach().
574 *
575 * Up to this stage the exporter is still free to migrate or reallocate the
576 * backing storage.
577 *
578 * 3. Once the buffer is attached to all devices userspace can initiate DMA
579 * access to the shared buffer. In the kernel this is done by calling
580 * dma_buf_map_attachment() and dma_buf_unmap_attachment().
581 *
582 * 4. Once a driver is done with a shared buffer it needs to call
583 * dma_buf_detach() (after cleaning up any mappings) and then release the
584 * reference acquired with dma_buf_get() by calling dma_buf_put().
585 *
586 * For the detailed semantics exporters are expected to implement see
587 * &dma_buf_ops.
588 */
589
590/**
591 * dma_buf_export - Creates a new dma_buf, and associates an anon file
592 * with this buffer, so it can be exported.
593 * Also connect the allocator specific data and ops to the buffer.
594 * Additionally, provide a name string for exporter; useful in debugging.
595 *
596 * @exp_info: [in] holds all the export related information provided
597 * by the exporter. see &struct dma_buf_export_info
598 * for further details.
599 *
600 * Returns, on success, a newly created struct dma_buf object, which wraps the
601 * supplied private data and operations for struct dma_buf_ops. On either
602 * missing ops, or error in allocating struct dma_buf, will return negative
603 * error.
604 *
605 * For most cases the easiest way to create @exp_info is through the
606 * %DEFINE_DMA_BUF_EXPORT_INFO macro.
607 */
608struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
609{
610 struct dma_buf *dmabuf;
611 struct dma_resv *resv = exp_info->resv;
612 struct file *file;
613 size_t alloc_size = sizeof(struct dma_buf);
614 int ret;
615
616 if (WARN_ON(!exp_info->priv || !exp_info->ops
617 || !exp_info->ops->map_dma_buf
618 || !exp_info->ops->unmap_dma_buf
619 || !exp_info->ops->release))
620 return ERR_PTR(-EINVAL);
621
622 if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
623 (exp_info->ops->pin || exp_info->ops->unpin)))
624 return ERR_PTR(-EINVAL);
625
626 if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
627 return ERR_PTR(-EINVAL);
628
629 if (!try_module_get(exp_info->owner))
630 return ERR_PTR(-ENOENT);
631
632 file = dma_buf_getfile(exp_info->size, exp_info->flags);
633 if (IS_ERR(file)) {
634 ret = PTR_ERR(file);
635 goto err_module;
636 }
637
638 if (!exp_info->resv)
639 alloc_size += sizeof(struct dma_resv);
640 else
641 /* prevent &dma_buf[1] == dma_buf->resv */
642 alloc_size += 1;
643 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
644 if (!dmabuf) {
645 ret = -ENOMEM;
646 goto err_file;
647 }
648
649 dmabuf->priv = exp_info->priv;
650 dmabuf->ops = exp_info->ops;
651 dmabuf->size = exp_info->size;
652 dmabuf->exp_name = exp_info->exp_name;
653 dmabuf->owner = exp_info->owner;
654 spin_lock_init(&dmabuf->name_lock);
655 init_waitqueue_head(&dmabuf->poll);
656 dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
657 dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
658 INIT_LIST_HEAD(&dmabuf->attachments);
659
660 if (!resv) {
661 dmabuf->resv = (struct dma_resv *)&dmabuf[1];
662 dma_resv_init(dmabuf->resv);
663 } else {
664 dmabuf->resv = resv;
665 }
666
667 ret = dma_buf_stats_setup(dmabuf, file);
668 if (ret)
669 goto err_dmabuf;
670
671 file->private_data = dmabuf;
672 file->f_path.dentry->d_fsdata = dmabuf;
673 dmabuf->file = file;
674
675 mutex_lock(&db_list.lock);
676 list_add(&dmabuf->list_node, &db_list.head);
677 mutex_unlock(&db_list.lock);
678
679 return dmabuf;
680
681err_dmabuf:
682 if (!resv)
683 dma_resv_fini(dmabuf->resv);
684 kfree(dmabuf);
685err_file:
686 fput(file);
687err_module:
688 module_put(exp_info->owner);
689 return ERR_PTR(ret);
690}
691EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF);
692
693/**
694 * dma_buf_fd - returns a file descriptor for the given struct dma_buf
695 * @dmabuf: [in] pointer to dma_buf for which fd is required.
696 * @flags: [in] flags to give to fd
697 *
698 * On success, returns an associated 'fd'. Else, returns error.
699 */
700int dma_buf_fd(struct dma_buf *dmabuf, int flags)
701{
702 int fd;
703
704 if (!dmabuf || !dmabuf->file)
705 return -EINVAL;
706
707 fd = get_unused_fd_flags(flags);
708 if (fd < 0)
709 return fd;
710
711 fd_install(fd, dmabuf->file);
712
713 return fd;
714}
715EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF);
716
717/**
718 * dma_buf_get - returns the struct dma_buf related to an fd
719 * @fd: [in] fd associated with the struct dma_buf to be returned
720 *
721 * On success, returns the struct dma_buf associated with an fd; uses
722 * file's refcounting done by fget to increase refcount. returns ERR_PTR
723 * otherwise.
724 */
725struct dma_buf *dma_buf_get(int fd)
726{
727 struct file *file;
728
729 file = fget(fd);
730
731 if (!file)
732 return ERR_PTR(-EBADF);
733
734 if (!is_dma_buf_file(file)) {
735 fput(file);
736 return ERR_PTR(-EINVAL);
737 }
738
739 return file->private_data;
740}
741EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF);
742
743/**
744 * dma_buf_put - decreases refcount of the buffer
745 * @dmabuf: [in] buffer to reduce refcount of
746 *
747 * Uses file's refcounting done implicitly by fput().
748 *
749 * If, as a result of this call, the refcount becomes 0, the 'release' file
750 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
751 * in turn, and frees the memory allocated for dmabuf when exported.
752 */
753void dma_buf_put(struct dma_buf *dmabuf)
754{
755 if (WARN_ON(!dmabuf || !dmabuf->file))
756 return;
757
758 fput(dmabuf->file);
759}
760EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF);
761
762static void mangle_sg_table(struct sg_table *sg_table)
763{
764#ifdef CONFIG_DMABUF_DEBUG
765 int i;
766 struct scatterlist *sg;
767
768 /* To catch abuse of the underlying struct page by importers mix
769 * up the bits, but take care to preserve the low SG_ bits to
770 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
771 * before passing the sgt back to the exporter. */
772 for_each_sgtable_sg(sg_table, sg, i)
773 sg->page_link ^= ~0xffUL;
774#endif
775
776}
777static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
778 enum dma_data_direction direction)
779{
780 struct sg_table *sg_table;
781 signed long ret;
782
783 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
784 if (IS_ERR_OR_NULL(sg_table))
785 return sg_table;
786
787 if (!dma_buf_attachment_is_dynamic(attach)) {
788 ret = dma_resv_wait_timeout(attach->dmabuf->resv,
789 DMA_RESV_USAGE_KERNEL, true,
790 MAX_SCHEDULE_TIMEOUT);
791 if (ret < 0) {
792 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
793 direction);
794 return ERR_PTR(ret);
795 }
796 }
797
798 mangle_sg_table(sg_table);
799 return sg_table;
800}
801
802/**
803 * DOC: locking convention
804 *
805 * In order to avoid deadlock situations between dma-buf exports and importers,
806 * all dma-buf API users must follow the common dma-buf locking convention.
807 *
808 * Convention for importers
809 *
810 * 1. Importers must hold the dma-buf reservation lock when calling these
811 * functions:
812 *
813 * - dma_buf_pin()
814 * - dma_buf_unpin()
815 * - dma_buf_map_attachment()
816 * - dma_buf_unmap_attachment()
817 * - dma_buf_vmap()
818 * - dma_buf_vunmap()
819 *
820 * 2. Importers must not hold the dma-buf reservation lock when calling these
821 * functions:
822 *
823 * - dma_buf_attach()
824 * - dma_buf_dynamic_attach()
825 * - dma_buf_detach()
826 * - dma_buf_export()
827 * - dma_buf_fd()
828 * - dma_buf_get()
829 * - dma_buf_put()
830 * - dma_buf_mmap()
831 * - dma_buf_begin_cpu_access()
832 * - dma_buf_end_cpu_access()
833 * - dma_buf_map_attachment_unlocked()
834 * - dma_buf_unmap_attachment_unlocked()
835 * - dma_buf_vmap_unlocked()
836 * - dma_buf_vunmap_unlocked()
837 *
838 * Convention for exporters
839 *
840 * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf
841 * reservation and exporter can take the lock:
842 *
843 * - &dma_buf_ops.attach()
844 * - &dma_buf_ops.detach()
845 * - &dma_buf_ops.release()
846 * - &dma_buf_ops.begin_cpu_access()
847 * - &dma_buf_ops.end_cpu_access()
848 * - &dma_buf_ops.mmap()
849 *
850 * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf
851 * reservation and exporter can't take the lock:
852 *
853 * - &dma_buf_ops.pin()
854 * - &dma_buf_ops.unpin()
855 * - &dma_buf_ops.map_dma_buf()
856 * - &dma_buf_ops.unmap_dma_buf()
857 * - &dma_buf_ops.vmap()
858 * - &dma_buf_ops.vunmap()
859 *
860 * 3. Exporters must hold the dma-buf reservation lock when calling these
861 * functions:
862 *
863 * - dma_buf_move_notify()
864 */
865
866/**
867 * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
868 * @dmabuf: [in] buffer to attach device to.
869 * @dev: [in] device to be attached.
870 * @importer_ops: [in] importer operations for the attachment
871 * @importer_priv: [in] importer private pointer for the attachment
872 *
873 * Returns struct dma_buf_attachment pointer for this attachment. Attachments
874 * must be cleaned up by calling dma_buf_detach().
875 *
876 * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
877 * functionality.
878 *
879 * Returns:
880 *
881 * A pointer to newly created &dma_buf_attachment on success, or a negative
882 * error code wrapped into a pointer on failure.
883 *
884 * Note that this can fail if the backing storage of @dmabuf is in a place not
885 * accessible to @dev, and cannot be moved to a more suitable place. This is
886 * indicated with the error code -EBUSY.
887 */
888struct dma_buf_attachment *
889dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
890 const struct dma_buf_attach_ops *importer_ops,
891 void *importer_priv)
892{
893 struct dma_buf_attachment *attach;
894 int ret;
895
896 if (WARN_ON(!dmabuf || !dev))
897 return ERR_PTR(-EINVAL);
898
899 if (WARN_ON(importer_ops && !importer_ops->move_notify))
900 return ERR_PTR(-EINVAL);
901
902 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
903 if (!attach)
904 return ERR_PTR(-ENOMEM);
905
906 attach->dev = dev;
907 attach->dmabuf = dmabuf;
908 if (importer_ops)
909 attach->peer2peer = importer_ops->allow_peer2peer;
910 attach->importer_ops = importer_ops;
911 attach->importer_priv = importer_priv;
912
913 if (dmabuf->ops->attach) {
914 ret = dmabuf->ops->attach(dmabuf, attach);
915 if (ret)
916 goto err_attach;
917 }
918 dma_resv_lock(dmabuf->resv, NULL);
919 list_add(&attach->node, &dmabuf->attachments);
920 dma_resv_unlock(dmabuf->resv);
921
922 /* When either the importer or the exporter can't handle dynamic
923 * mappings we cache the mapping here to avoid issues with the
924 * reservation object lock.
925 */
926 if (dma_buf_attachment_is_dynamic(attach) !=
927 dma_buf_is_dynamic(dmabuf)) {
928 struct sg_table *sgt;
929
930 dma_resv_lock(attach->dmabuf->resv, NULL);
931 if (dma_buf_is_dynamic(attach->dmabuf)) {
932 ret = dmabuf->ops->pin(attach);
933 if (ret)
934 goto err_unlock;
935 }
936
937 sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
938 if (!sgt)
939 sgt = ERR_PTR(-ENOMEM);
940 if (IS_ERR(sgt)) {
941 ret = PTR_ERR(sgt);
942 goto err_unpin;
943 }
944 dma_resv_unlock(attach->dmabuf->resv);
945 attach->sgt = sgt;
946 attach->dir = DMA_BIDIRECTIONAL;
947 }
948
949 return attach;
950
951err_attach:
952 kfree(attach);
953 return ERR_PTR(ret);
954
955err_unpin:
956 if (dma_buf_is_dynamic(attach->dmabuf))
957 dmabuf->ops->unpin(attach);
958
959err_unlock:
960 dma_resv_unlock(attach->dmabuf->resv);
961
962 dma_buf_detach(dmabuf, attach);
963 return ERR_PTR(ret);
964}
965EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF);
966
967/**
968 * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
969 * @dmabuf: [in] buffer to attach device to.
970 * @dev: [in] device to be attached.
971 *
972 * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
973 * mapping.
974 */
975struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
976 struct device *dev)
977{
978 return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
979}
980EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF);
981
982static void __unmap_dma_buf(struct dma_buf_attachment *attach,
983 struct sg_table *sg_table,
984 enum dma_data_direction direction)
985{
986 /* uses XOR, hence this unmangles */
987 mangle_sg_table(sg_table);
988
989 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
990}
991
992/**
993 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
994 * @dmabuf: [in] buffer to detach from.
995 * @attach: [in] attachment to be detached; is free'd after this call.
996 *
997 * Clean up a device attachment obtained by calling dma_buf_attach().
998 *
999 * Optionally this calls &dma_buf_ops.detach for device-specific detach.
1000 */
1001void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
1002{
1003 if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf))
1004 return;
1005
1006 dma_resv_lock(dmabuf->resv, NULL);
1007
1008 if (attach->sgt) {
1009
1010 __unmap_dma_buf(attach, attach->sgt, attach->dir);
1011
1012 if (dma_buf_is_dynamic(attach->dmabuf))
1013 dmabuf->ops->unpin(attach);
1014 }
1015 list_del(&attach->node);
1016
1017 dma_resv_unlock(dmabuf->resv);
1018
1019 if (dmabuf->ops->detach)
1020 dmabuf->ops->detach(dmabuf, attach);
1021
1022 kfree(attach);
1023}
1024EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF);
1025
1026/**
1027 * dma_buf_pin - Lock down the DMA-buf
1028 * @attach: [in] attachment which should be pinned
1029 *
1030 * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
1031 * call this, and only for limited use cases like scanout and not for temporary
1032 * pin operations. It is not permitted to allow userspace to pin arbitrary
1033 * amounts of buffers through this interface.
1034 *
1035 * Buffers must be unpinned by calling dma_buf_unpin().
1036 *
1037 * Returns:
1038 * 0 on success, negative error code on failure.
1039 */
1040int dma_buf_pin(struct dma_buf_attachment *attach)
1041{
1042 struct dma_buf *dmabuf = attach->dmabuf;
1043 int ret = 0;
1044
1045 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1046
1047 dma_resv_assert_held(dmabuf->resv);
1048
1049 if (dmabuf->ops->pin)
1050 ret = dmabuf->ops->pin(attach);
1051
1052 return ret;
1053}
1054EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF);
1055
1056/**
1057 * dma_buf_unpin - Unpin a DMA-buf
1058 * @attach: [in] attachment which should be unpinned
1059 *
1060 * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
1061 * any mapping of @attach again and inform the importer through
1062 * &dma_buf_attach_ops.move_notify.
1063 */
1064void dma_buf_unpin(struct dma_buf_attachment *attach)
1065{
1066 struct dma_buf *dmabuf = attach->dmabuf;
1067
1068 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1069
1070 dma_resv_assert_held(dmabuf->resv);
1071
1072 if (dmabuf->ops->unpin)
1073 dmabuf->ops->unpin(attach);
1074}
1075EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF);
1076
1077/**
1078 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
1079 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1080 * dma_buf_ops.
1081 * @attach: [in] attachment whose scatterlist is to be returned
1082 * @direction: [in] direction of DMA transfer
1083 *
1084 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
1085 * on error. May return -EINTR if it is interrupted by a signal.
1086 *
1087 * On success, the DMA addresses and lengths in the returned scatterlist are
1088 * PAGE_SIZE aligned.
1089 *
1090 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
1091 * the underlying backing storage is pinned for as long as a mapping exists,
1092 * therefore users/importers should not hold onto a mapping for undue amounts of
1093 * time.
1094 *
1095 * Important: Dynamic importers must wait for the exclusive fence of the struct
1096 * dma_resv attached to the DMA-BUF first.
1097 */
1098struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
1099 enum dma_data_direction direction)
1100{
1101 struct sg_table *sg_table;
1102 int r;
1103
1104 might_sleep();
1105
1106 if (WARN_ON(!attach || !attach->dmabuf))
1107 return ERR_PTR(-EINVAL);
1108
1109 dma_resv_assert_held(attach->dmabuf->resv);
1110
1111 if (attach->sgt) {
1112 /*
1113 * Two mappings with different directions for the same
1114 * attachment are not allowed.
1115 */
1116 if (attach->dir != direction &&
1117 attach->dir != DMA_BIDIRECTIONAL)
1118 return ERR_PTR(-EBUSY);
1119
1120 return attach->sgt;
1121 }
1122
1123 if (dma_buf_is_dynamic(attach->dmabuf)) {
1124 if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
1125 r = attach->dmabuf->ops->pin(attach);
1126 if (r)
1127 return ERR_PTR(r);
1128 }
1129 }
1130
1131 sg_table = __map_dma_buf(attach, direction);
1132 if (!sg_table)
1133 sg_table = ERR_PTR(-ENOMEM);
1134
1135 if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
1136 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1137 attach->dmabuf->ops->unpin(attach);
1138
1139 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
1140 attach->sgt = sg_table;
1141 attach->dir = direction;
1142 }
1143
1144#ifdef CONFIG_DMA_API_DEBUG
1145 if (!IS_ERR(sg_table)) {
1146 struct scatterlist *sg;
1147 u64 addr;
1148 int len;
1149 int i;
1150
1151 for_each_sgtable_dma_sg(sg_table, sg, i) {
1152 addr = sg_dma_address(sg);
1153 len = sg_dma_len(sg);
1154 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
1155 pr_debug("%s: addr %llx or len %x is not page aligned!\n",
1156 __func__, addr, len);
1157 }
1158 }
1159 }
1160#endif /* CONFIG_DMA_API_DEBUG */
1161 return sg_table;
1162}
1163EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
1164
1165/**
1166 * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
1167 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1168 * dma_buf_ops.
1169 * @attach: [in] attachment whose scatterlist is to be returned
1170 * @direction: [in] direction of DMA transfer
1171 *
1172 * Unlocked variant of dma_buf_map_attachment().
1173 */
1174struct sg_table *
1175dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
1176 enum dma_data_direction direction)
1177{
1178 struct sg_table *sg_table;
1179
1180 might_sleep();
1181
1182 if (WARN_ON(!attach || !attach->dmabuf))
1183 return ERR_PTR(-EINVAL);
1184
1185 dma_resv_lock(attach->dmabuf->resv, NULL);
1186 sg_table = dma_buf_map_attachment(attach, direction);
1187 dma_resv_unlock(attach->dmabuf->resv);
1188
1189 return sg_table;
1190}
1191EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF);
1192
1193/**
1194 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
1195 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1196 * dma_buf_ops.
1197 * @attach: [in] attachment to unmap buffer from
1198 * @sg_table: [in] scatterlist info of the buffer to unmap
1199 * @direction: [in] direction of DMA transfer
1200 *
1201 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
1202 */
1203void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1204 struct sg_table *sg_table,
1205 enum dma_data_direction direction)
1206{
1207 might_sleep();
1208
1209 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1210 return;
1211
1212 dma_resv_assert_held(attach->dmabuf->resv);
1213
1214 if (attach->sgt == sg_table)
1215 return;
1216
1217 __unmap_dma_buf(attach, sg_table, direction);
1218
1219 if (dma_buf_is_dynamic(attach->dmabuf) &&
1220 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1221 dma_buf_unpin(attach);
1222}
1223EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
1224
1225/**
1226 * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
1227 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1228 * dma_buf_ops.
1229 * @attach: [in] attachment to unmap buffer from
1230 * @sg_table: [in] scatterlist info of the buffer to unmap
1231 * @direction: [in] direction of DMA transfer
1232 *
1233 * Unlocked variant of dma_buf_unmap_attachment().
1234 */
1235void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
1236 struct sg_table *sg_table,
1237 enum dma_data_direction direction)
1238{
1239 might_sleep();
1240
1241 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1242 return;
1243
1244 dma_resv_lock(attach->dmabuf->resv, NULL);
1245 dma_buf_unmap_attachment(attach, sg_table, direction);
1246 dma_resv_unlock(attach->dmabuf->resv);
1247}
1248EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF);
1249
1250/**
1251 * dma_buf_move_notify - notify attachments that DMA-buf is moving
1252 *
1253 * @dmabuf: [in] buffer which is moving
1254 *
1255 * Informs all attachments that they need to destroy and recreate all their
1256 * mappings.
1257 */
1258void dma_buf_move_notify(struct dma_buf *dmabuf)
1259{
1260 struct dma_buf_attachment *attach;
1261
1262 dma_resv_assert_held(dmabuf->resv);
1263
1264 list_for_each_entry(attach, &dmabuf->attachments, node)
1265 if (attach->importer_ops)
1266 attach->importer_ops->move_notify(attach);
1267}
1268EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
1269
1270/**
1271 * DOC: cpu access
1272 *
1273 * There are multiple reasons for supporting CPU access to a dma buffer object:
1274 *
1275 * - Fallback operations in the kernel, for example when a device is connected
1276 * over USB and the kernel needs to shuffle the data around first before
1277 * sending it away. Cache coherency is handled by bracketing any transactions
1278 * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1279 * access.
1280 *
1281 * Since for most kernel internal dma-buf accesses need the entire buffer, a
1282 * vmap interface is introduced. Note that on very old 32-bit architectures
1283 * vmalloc space might be limited and result in vmap calls failing.
1284 *
1285 * Interfaces::
1286 *
1287 * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1288 * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1289 *
1290 * The vmap call can fail if there is no vmap support in the exporter, or if
1291 * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
1292 * count for all vmap access and calls down into the exporter's vmap function
1293 * only when no vmapping exists, and only unmaps it once. Protection against
1294 * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
1295 *
1296 * - For full compatibility on the importer side with existing userspace
1297 * interfaces, which might already support mmap'ing buffers. This is needed in
1298 * many processing pipelines (e.g. feeding a software rendered image into a
1299 * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1300 * framework already supported this and for DMA buffer file descriptors to
1301 * replace ION buffers mmap support was needed.
1302 *
1303 * There is no special interfaces, userspace simply calls mmap on the dma-buf
1304 * fd. But like for CPU access there's a need to bracket the actual access,
1305 * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1306 * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1307 * be restarted.
1308 *
1309 * Some systems might need some sort of cache coherency management e.g. when
1310 * CPU and GPU domains are being accessed through dma-buf at the same time.
1311 * To circumvent this problem there are begin/end coherency markers, that
1312 * forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1313 * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1314 * sequence would be used like following:
1315 *
1316 * - mmap dma-buf fd
1317 * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1318 * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1319 * want (with the new data being consumed by say the GPU or the scanout
1320 * device)
1321 * - munmap once you don't need the buffer any more
1322 *
1323 * For correctness and optimal performance, it is always required to use
1324 * SYNC_START and SYNC_END before and after, respectively, when accessing the
1325 * mapped address. Userspace cannot rely on coherent access, even when there
1326 * are systems where it just works without calling these ioctls.
1327 *
1328 * - And as a CPU fallback in userspace processing pipelines.
1329 *
1330 * Similar to the motivation for kernel cpu access it is again important that
1331 * the userspace code of a given importing subsystem can use the same
1332 * interfaces with a imported dma-buf buffer object as with a native buffer
1333 * object. This is especially important for drm where the userspace part of
1334 * contemporary OpenGL, X, and other drivers is huge, and reworking them to
1335 * use a different way to mmap a buffer rather invasive.
1336 *
1337 * The assumption in the current dma-buf interfaces is that redirecting the
1338 * initial mmap is all that's needed. A survey of some of the existing
1339 * subsystems shows that no driver seems to do any nefarious thing like
1340 * syncing up with outstanding asynchronous processing on the device or
1341 * allocating special resources at fault time. So hopefully this is good
1342 * enough, since adding interfaces to intercept pagefaults and allow pte
1343 * shootdowns would increase the complexity quite a bit.
1344 *
1345 * Interface::
1346 *
1347 * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1348 * unsigned long);
1349 *
1350 * If the importing subsystem simply provides a special-purpose mmap call to
1351 * set up a mapping in userspace, calling do_mmap with &dma_buf.file will
1352 * equally achieve that for a dma-buf object.
1353 */
1354
1355static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1356 enum dma_data_direction direction)
1357{
1358 bool write = (direction == DMA_BIDIRECTIONAL ||
1359 direction == DMA_TO_DEVICE);
1360 struct dma_resv *resv = dmabuf->resv;
1361 long ret;
1362
1363 /* Wait on any implicit rendering fences */
1364 ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
1365 true, MAX_SCHEDULE_TIMEOUT);
1366 if (ret < 0)
1367 return ret;
1368
1369 return 0;
1370}
1371
1372/**
1373 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1374 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1375 * preparations. Coherency is only guaranteed in the specified range for the
1376 * specified access direction.
1377 * @dmabuf: [in] buffer to prepare cpu access for.
1378 * @direction: [in] direction of access.
1379 *
1380 * After the cpu access is complete the caller should call
1381 * dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is
1382 * it guaranteed to be coherent with other DMA access.
1383 *
1384 * This function will also wait for any DMA transactions tracked through
1385 * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1386 * synchronization this function will only ensure cache coherency, callers must
1387 * ensure synchronization with such DMA transactions on their own.
1388 *
1389 * Can return negative error values, returns 0 on success.
1390 */
1391int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1392 enum dma_data_direction direction)
1393{
1394 int ret = 0;
1395
1396 if (WARN_ON(!dmabuf))
1397 return -EINVAL;
1398
1399 might_lock(&dmabuf->resv->lock.base);
1400
1401 if (dmabuf->ops->begin_cpu_access)
1402 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1403
1404 /* Ensure that all fences are waited upon - but we first allow
1405 * the native handler the chance to do so more efficiently if it
1406 * chooses. A double invocation here will be reasonably cheap no-op.
1407 */
1408 if (ret == 0)
1409 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1410
1411 return ret;
1412}
1413EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
1414
1415/**
1416 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1417 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1418 * actions. Coherency is only guaranteed in the specified range for the
1419 * specified access direction.
1420 * @dmabuf: [in] buffer to complete cpu access for.
1421 * @direction: [in] direction of access.
1422 *
1423 * This terminates CPU access started with dma_buf_begin_cpu_access().
1424 *
1425 * Can return negative error values, returns 0 on success.
1426 */
1427int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1428 enum dma_data_direction direction)
1429{
1430 int ret = 0;
1431
1432 WARN_ON(!dmabuf);
1433
1434 might_lock(&dmabuf->resv->lock.base);
1435
1436 if (dmabuf->ops->end_cpu_access)
1437 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1438
1439 return ret;
1440}
1441EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
1442
1443
1444/**
1445 * dma_buf_mmap - Setup up a userspace mmap with the given vma
1446 * @dmabuf: [in] buffer that should back the vma
1447 * @vma: [in] vma for the mmap
1448 * @pgoff: [in] offset in pages where this mmap should start within the
1449 * dma-buf buffer.
1450 *
1451 * This function adjusts the passed in vma so that it points at the file of the
1452 * dma_buf operation. It also adjusts the starting pgoff and does bounds
1453 * checking on the size of the vma. Then it calls the exporters mmap function to
1454 * set up the mapping.
1455 *
1456 * Can return negative error values, returns 0 on success.
1457 */
1458int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1459 unsigned long pgoff)
1460{
1461 if (WARN_ON(!dmabuf || !vma))
1462 return -EINVAL;
1463
1464 /* check if buffer supports mmap */
1465 if (!dmabuf->ops->mmap)
1466 return -EINVAL;
1467
1468 /* check for offset overflow */
1469 if (pgoff + vma_pages(vma) < pgoff)
1470 return -EOVERFLOW;
1471
1472 /* check for overflowing the buffer's size */
1473 if (pgoff + vma_pages(vma) >
1474 dmabuf->size >> PAGE_SHIFT)
1475 return -EINVAL;
1476
1477 /* readjust the vma */
1478 vma_set_file(vma, dmabuf->file);
1479 vma->vm_pgoff = pgoff;
1480
1481 return dmabuf->ops->mmap(dmabuf, vma);
1482}
1483EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
1484
1485/**
1486 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1487 * address space. Same restrictions as for vmap and friends apply.
1488 * @dmabuf: [in] buffer to vmap
1489 * @map: [out] returns the vmap pointer
1490 *
1491 * This call may fail due to lack of virtual mapping address space.
1492 * These calls are optional in drivers. The intended use for them
1493 * is for mapping objects linear in kernel space for high use objects.
1494 *
1495 * To ensure coherency users must call dma_buf_begin_cpu_access() and
1496 * dma_buf_end_cpu_access() around any cpu access performed through this
1497 * mapping.
1498 *
1499 * Returns 0 on success, or a negative errno code otherwise.
1500 */
1501int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
1502{
1503 struct iosys_map ptr;
1504 int ret;
1505
1506 iosys_map_clear(map);
1507
1508 if (WARN_ON(!dmabuf))
1509 return -EINVAL;
1510
1511 dma_resv_assert_held(dmabuf->resv);
1512
1513 if (!dmabuf->ops->vmap)
1514 return -EINVAL;
1515
1516 if (dmabuf->vmapping_counter) {
1517 dmabuf->vmapping_counter++;
1518 BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1519 *map = dmabuf->vmap_ptr;
1520 return 0;
1521 }
1522
1523 BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
1524
1525 ret = dmabuf->ops->vmap(dmabuf, &ptr);
1526 if (WARN_ON_ONCE(ret))
1527 return ret;
1528
1529 dmabuf->vmap_ptr = ptr;
1530 dmabuf->vmapping_counter = 1;
1531
1532 *map = dmabuf->vmap_ptr;
1533
1534 return 0;
1535}
1536EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
1537
1538/**
1539 * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel
1540 * address space. Same restrictions as for vmap and friends apply.
1541 * @dmabuf: [in] buffer to vmap
1542 * @map: [out] returns the vmap pointer
1543 *
1544 * Unlocked version of dma_buf_vmap()
1545 *
1546 * Returns 0 on success, or a negative errno code otherwise.
1547 */
1548int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
1549{
1550 int ret;
1551
1552 iosys_map_clear(map);
1553
1554 if (WARN_ON(!dmabuf))
1555 return -EINVAL;
1556
1557 dma_resv_lock(dmabuf->resv, NULL);
1558 ret = dma_buf_vmap(dmabuf, map);
1559 dma_resv_unlock(dmabuf->resv);
1560
1561 return ret;
1562}
1563EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, DMA_BUF);
1564
1565/**
1566 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1567 * @dmabuf: [in] buffer to vunmap
1568 * @map: [in] vmap pointer to vunmap
1569 */
1570void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
1571{
1572 if (WARN_ON(!dmabuf))
1573 return;
1574
1575 dma_resv_assert_held(dmabuf->resv);
1576
1577 BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1578 BUG_ON(dmabuf->vmapping_counter == 0);
1579 BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
1580
1581 if (--dmabuf->vmapping_counter == 0) {
1582 if (dmabuf->ops->vunmap)
1583 dmabuf->ops->vunmap(dmabuf, map);
1584 iosys_map_clear(&dmabuf->vmap_ptr);
1585 }
1586}
1587EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
1588
1589/**
1590 * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.
1591 * @dmabuf: [in] buffer to vunmap
1592 * @map: [in] vmap pointer to vunmap
1593 */
1594void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
1595{
1596 if (WARN_ON(!dmabuf))
1597 return;
1598
1599 dma_resv_lock(dmabuf->resv, NULL);
1600 dma_buf_vunmap(dmabuf, map);
1601 dma_resv_unlock(dmabuf->resv);
1602}
1603EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, DMA_BUF);
1604
1605#ifdef CONFIG_DEBUG_FS
1606static int dma_buf_debug_show(struct seq_file *s, void *unused)
1607{
1608 struct dma_buf *buf_obj;
1609 struct dma_buf_attachment *attach_obj;
1610 int count = 0, attach_count;
1611 size_t size = 0;
1612 int ret;
1613
1614 ret = mutex_lock_interruptible(&db_list.lock);
1615
1616 if (ret)
1617 return ret;
1618
1619 seq_puts(s, "\nDma-buf Objects:\n");
1620 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
1621 "size", "flags", "mode", "count", "ino");
1622
1623 list_for_each_entry(buf_obj, &db_list.head, list_node) {
1624
1625 ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1626 if (ret)
1627 goto error_unlock;
1628
1629
1630 spin_lock(&buf_obj->name_lock);
1631 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1632 buf_obj->size,
1633 buf_obj->file->f_flags, buf_obj->file->f_mode,
1634 file_count(buf_obj->file),
1635 buf_obj->exp_name,
1636 file_inode(buf_obj->file)->i_ino,
1637 buf_obj->name ?: "<none>");
1638 spin_unlock(&buf_obj->name_lock);
1639
1640 dma_resv_describe(buf_obj->resv, s);
1641
1642 seq_puts(s, "\tAttached Devices:\n");
1643 attach_count = 0;
1644
1645 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1646 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1647 attach_count++;
1648 }
1649 dma_resv_unlock(buf_obj->resv);
1650
1651 seq_printf(s, "Total %d devices attached\n\n",
1652 attach_count);
1653
1654 count++;
1655 size += buf_obj->size;
1656 }
1657
1658 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1659
1660 mutex_unlock(&db_list.lock);
1661 return 0;
1662
1663error_unlock:
1664 mutex_unlock(&db_list.lock);
1665 return ret;
1666}
1667
1668DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1669
1670static struct dentry *dma_buf_debugfs_dir;
1671
1672static int dma_buf_init_debugfs(void)
1673{
1674 struct dentry *d;
1675 int err = 0;
1676
1677 d = debugfs_create_dir("dma_buf", NULL);
1678 if (IS_ERR(d))
1679 return PTR_ERR(d);
1680
1681 dma_buf_debugfs_dir = d;
1682
1683 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1684 NULL, &dma_buf_debug_fops);
1685 if (IS_ERR(d)) {
1686 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1687 debugfs_remove_recursive(dma_buf_debugfs_dir);
1688 dma_buf_debugfs_dir = NULL;
1689 err = PTR_ERR(d);
1690 }
1691
1692 return err;
1693}
1694
1695static void dma_buf_uninit_debugfs(void)
1696{
1697 debugfs_remove_recursive(dma_buf_debugfs_dir);
1698}
1699#else
1700static inline int dma_buf_init_debugfs(void)
1701{
1702 return 0;
1703}
1704static inline void dma_buf_uninit_debugfs(void)
1705{
1706}
1707#endif
1708
1709static int __init dma_buf_init(void)
1710{
1711 int ret;
1712
1713 ret = dma_buf_init_sysfs_statistics();
1714 if (ret)
1715 return ret;
1716
1717 dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1718 if (IS_ERR(dma_buf_mnt))
1719 return PTR_ERR(dma_buf_mnt);
1720
1721 mutex_init(&db_list.lock);
1722 INIT_LIST_HEAD(&db_list.head);
1723 dma_buf_init_debugfs();
1724 return 0;
1725}
1726subsys_initcall(dma_buf_init);
1727
1728static void __exit dma_buf_deinit(void)
1729{
1730 dma_buf_uninit_debugfs();
1731 kern_unmount(dma_buf_mnt);
1732 dma_buf_uninit_sysfs_statistics();
1733}
1734__exitcall(dma_buf_deinit);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Framework for buffer objects that can be shared across devices/subsystems.
4 *
5 * Copyright(C) 2011 Linaro Limited. All rights reserved.
6 * Author: Sumit Semwal <sumit.semwal@ti.com>
7 *
8 * Many thanks to linaro-mm-sig list, and specially
9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11 * refining of this idea.
12 */
13
14#include <linux/fs.h>
15#include <linux/slab.h>
16#include <linux/dma-buf.h>
17#include <linux/dma-fence.h>
18#include <linux/anon_inodes.h>
19#include <linux/export.h>
20#include <linux/debugfs.h>
21#include <linux/module.h>
22#include <linux/seq_file.h>
23#include <linux/poll.h>
24#include <linux/dma-resv.h>
25#include <linux/mm.h>
26#include <linux/mount.h>
27#include <linux/pseudo_fs.h>
28
29#include <uapi/linux/dma-buf.h>
30#include <uapi/linux/magic.h>
31
32static inline int is_dma_buf_file(struct file *);
33
34struct dma_buf_list {
35 struct list_head head;
36 struct mutex lock;
37};
38
39static struct dma_buf_list db_list;
40
41static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
42{
43 struct dma_buf *dmabuf;
44 char name[DMA_BUF_NAME_LEN];
45 size_t ret = 0;
46
47 dmabuf = dentry->d_fsdata;
48 mutex_lock(&dmabuf->lock);
49 if (dmabuf->name)
50 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
51 mutex_unlock(&dmabuf->lock);
52
53 return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
54 dentry->d_name.name, ret > 0 ? name : "");
55}
56
57static const struct dentry_operations dma_buf_dentry_ops = {
58 .d_dname = dmabuffs_dname,
59};
60
61static struct vfsmount *dma_buf_mnt;
62
63static int dma_buf_fs_init_context(struct fs_context *fc)
64{
65 struct pseudo_fs_context *ctx;
66
67 ctx = init_pseudo(fc, DMA_BUF_MAGIC);
68 if (!ctx)
69 return -ENOMEM;
70 ctx->dops = &dma_buf_dentry_ops;
71 return 0;
72}
73
74static struct file_system_type dma_buf_fs_type = {
75 .name = "dmabuf",
76 .init_fs_context = dma_buf_fs_init_context,
77 .kill_sb = kill_anon_super,
78};
79
80static int dma_buf_release(struct inode *inode, struct file *file)
81{
82 struct dma_buf *dmabuf;
83
84 if (!is_dma_buf_file(file))
85 return -EINVAL;
86
87 dmabuf = file->private_data;
88
89 BUG_ON(dmabuf->vmapping_counter);
90
91 /*
92 * Any fences that a dma-buf poll can wait on should be signaled
93 * before releasing dma-buf. This is the responsibility of each
94 * driver that uses the reservation objects.
95 *
96 * If you hit this BUG() it means someone dropped their ref to the
97 * dma-buf while still having pending operation to the buffer.
98 */
99 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
100
101 dmabuf->ops->release(dmabuf);
102
103 mutex_lock(&db_list.lock);
104 list_del(&dmabuf->list_node);
105 mutex_unlock(&db_list.lock);
106
107 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
108 dma_resv_fini(dmabuf->resv);
109
110 module_put(dmabuf->owner);
111 kfree(dmabuf);
112 return 0;
113}
114
115static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
116{
117 struct dma_buf *dmabuf;
118
119 if (!is_dma_buf_file(file))
120 return -EINVAL;
121
122 dmabuf = file->private_data;
123
124 /* check if buffer supports mmap */
125 if (!dmabuf->ops->mmap)
126 return -EINVAL;
127
128 /* check for overflowing the buffer's size */
129 if (vma->vm_pgoff + vma_pages(vma) >
130 dmabuf->size >> PAGE_SHIFT)
131 return -EINVAL;
132
133 return dmabuf->ops->mmap(dmabuf, vma);
134}
135
136static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
137{
138 struct dma_buf *dmabuf;
139 loff_t base;
140
141 if (!is_dma_buf_file(file))
142 return -EBADF;
143
144 dmabuf = file->private_data;
145
146 /* only support discovering the end of the buffer,
147 but also allow SEEK_SET to maintain the idiomatic
148 SEEK_END(0), SEEK_CUR(0) pattern */
149 if (whence == SEEK_END)
150 base = dmabuf->size;
151 else if (whence == SEEK_SET)
152 base = 0;
153 else
154 return -EINVAL;
155
156 if (offset != 0)
157 return -EINVAL;
158
159 return base + offset;
160}
161
162/**
163 * DOC: fence polling
164 *
165 * To support cross-device and cross-driver synchronization of buffer access
166 * implicit fences (represented internally in the kernel with &struct fence) can
167 * be attached to a &dma_buf. The glue for that and a few related things are
168 * provided in the &dma_resv structure.
169 *
170 * Userspace can query the state of these implicitly tracked fences using poll()
171 * and related system calls:
172 *
173 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
174 * most recent write or exclusive fence.
175 *
176 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
177 * all attached fences, shared and exclusive ones.
178 *
179 * Note that this only signals the completion of the respective fences, i.e. the
180 * DMA transfers are complete. Cache flushing and any other necessary
181 * preparations before CPU access can begin still need to happen.
182 */
183
184static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
185{
186 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
187 unsigned long flags;
188
189 spin_lock_irqsave(&dcb->poll->lock, flags);
190 wake_up_locked_poll(dcb->poll, dcb->active);
191 dcb->active = 0;
192 spin_unlock_irqrestore(&dcb->poll->lock, flags);
193}
194
195static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
196{
197 struct dma_buf *dmabuf;
198 struct dma_resv *resv;
199 struct dma_resv_list *fobj;
200 struct dma_fence *fence_excl;
201 __poll_t events;
202 unsigned shared_count, seq;
203
204 dmabuf = file->private_data;
205 if (!dmabuf || !dmabuf->resv)
206 return EPOLLERR;
207
208 resv = dmabuf->resv;
209
210 poll_wait(file, &dmabuf->poll, poll);
211
212 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
213 if (!events)
214 return 0;
215
216retry:
217 seq = read_seqcount_begin(&resv->seq);
218 rcu_read_lock();
219
220 fobj = rcu_dereference(resv->fence);
221 if (fobj)
222 shared_count = fobj->shared_count;
223 else
224 shared_count = 0;
225 fence_excl = rcu_dereference(resv->fence_excl);
226 if (read_seqcount_retry(&resv->seq, seq)) {
227 rcu_read_unlock();
228 goto retry;
229 }
230
231 if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
232 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
233 __poll_t pevents = EPOLLIN;
234
235 if (shared_count == 0)
236 pevents |= EPOLLOUT;
237
238 spin_lock_irq(&dmabuf->poll.lock);
239 if (dcb->active) {
240 dcb->active |= pevents;
241 events &= ~pevents;
242 } else
243 dcb->active = pevents;
244 spin_unlock_irq(&dmabuf->poll.lock);
245
246 if (events & pevents) {
247 if (!dma_fence_get_rcu(fence_excl)) {
248 /* force a recheck */
249 events &= ~pevents;
250 dma_buf_poll_cb(NULL, &dcb->cb);
251 } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
252 dma_buf_poll_cb)) {
253 events &= ~pevents;
254 dma_fence_put(fence_excl);
255 } else {
256 /*
257 * No callback queued, wake up any additional
258 * waiters.
259 */
260 dma_fence_put(fence_excl);
261 dma_buf_poll_cb(NULL, &dcb->cb);
262 }
263 }
264 }
265
266 if ((events & EPOLLOUT) && shared_count > 0) {
267 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
268 int i;
269
270 /* Only queue a new callback if no event has fired yet */
271 spin_lock_irq(&dmabuf->poll.lock);
272 if (dcb->active)
273 events &= ~EPOLLOUT;
274 else
275 dcb->active = EPOLLOUT;
276 spin_unlock_irq(&dmabuf->poll.lock);
277
278 if (!(events & EPOLLOUT))
279 goto out;
280
281 for (i = 0; i < shared_count; ++i) {
282 struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
283
284 if (!dma_fence_get_rcu(fence)) {
285 /*
286 * fence refcount dropped to zero, this means
287 * that fobj has been freed
288 *
289 * call dma_buf_poll_cb and force a recheck!
290 */
291 events &= ~EPOLLOUT;
292 dma_buf_poll_cb(NULL, &dcb->cb);
293 break;
294 }
295 if (!dma_fence_add_callback(fence, &dcb->cb,
296 dma_buf_poll_cb)) {
297 dma_fence_put(fence);
298 events &= ~EPOLLOUT;
299 break;
300 }
301 dma_fence_put(fence);
302 }
303
304 /* No callback queued, wake up any additional waiters. */
305 if (i == shared_count)
306 dma_buf_poll_cb(NULL, &dcb->cb);
307 }
308
309out:
310 rcu_read_unlock();
311 return events;
312}
313
314/**
315 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
316 * The name of the dma-buf buffer can only be set when the dma-buf is not
317 * attached to any devices. It could theoritically support changing the
318 * name of the dma-buf if the same piece of memory is used for multiple
319 * purpose between different devices.
320 *
321 * @dmabuf [in] dmabuf buffer that will be renamed.
322 * @buf: [in] A piece of userspace memory that contains the name of
323 * the dma-buf.
324 *
325 * Returns 0 on success. If the dma-buf buffer is already attached to
326 * devices, return -EBUSY.
327 *
328 */
329static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
330{
331 char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
332 long ret = 0;
333
334 if (IS_ERR(name))
335 return PTR_ERR(name);
336
337 mutex_lock(&dmabuf->lock);
338 if (!list_empty(&dmabuf->attachments)) {
339 ret = -EBUSY;
340 kfree(name);
341 goto out_unlock;
342 }
343 kfree(dmabuf->name);
344 dmabuf->name = name;
345
346out_unlock:
347 mutex_unlock(&dmabuf->lock);
348 return ret;
349}
350
351static long dma_buf_ioctl(struct file *file,
352 unsigned int cmd, unsigned long arg)
353{
354 struct dma_buf *dmabuf;
355 struct dma_buf_sync sync;
356 enum dma_data_direction direction;
357 int ret;
358
359 dmabuf = file->private_data;
360
361 switch (cmd) {
362 case DMA_BUF_IOCTL_SYNC:
363 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
364 return -EFAULT;
365
366 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
367 return -EINVAL;
368
369 switch (sync.flags & DMA_BUF_SYNC_RW) {
370 case DMA_BUF_SYNC_READ:
371 direction = DMA_FROM_DEVICE;
372 break;
373 case DMA_BUF_SYNC_WRITE:
374 direction = DMA_TO_DEVICE;
375 break;
376 case DMA_BUF_SYNC_RW:
377 direction = DMA_BIDIRECTIONAL;
378 break;
379 default:
380 return -EINVAL;
381 }
382
383 if (sync.flags & DMA_BUF_SYNC_END)
384 ret = dma_buf_end_cpu_access(dmabuf, direction);
385 else
386 ret = dma_buf_begin_cpu_access(dmabuf, direction);
387
388 return ret;
389
390 case DMA_BUF_SET_NAME:
391 return dma_buf_set_name(dmabuf, (const char __user *)arg);
392
393 default:
394 return -ENOTTY;
395 }
396}
397
398static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
399{
400 struct dma_buf *dmabuf = file->private_data;
401
402 seq_printf(m, "size:\t%zu\n", dmabuf->size);
403 /* Don't count the temporary reference taken inside procfs seq_show */
404 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
405 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
406 mutex_lock(&dmabuf->lock);
407 if (dmabuf->name)
408 seq_printf(m, "name:\t%s\n", dmabuf->name);
409 mutex_unlock(&dmabuf->lock);
410}
411
412static const struct file_operations dma_buf_fops = {
413 .release = dma_buf_release,
414 .mmap = dma_buf_mmap_internal,
415 .llseek = dma_buf_llseek,
416 .poll = dma_buf_poll,
417 .unlocked_ioctl = dma_buf_ioctl,
418#ifdef CONFIG_COMPAT
419 .compat_ioctl = dma_buf_ioctl,
420#endif
421 .show_fdinfo = dma_buf_show_fdinfo,
422};
423
424/*
425 * is_dma_buf_file - Check if struct file* is associated with dma_buf
426 */
427static inline int is_dma_buf_file(struct file *file)
428{
429 return file->f_op == &dma_buf_fops;
430}
431
432static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
433{
434 struct file *file;
435 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
436
437 if (IS_ERR(inode))
438 return ERR_CAST(inode);
439
440 inode->i_size = dmabuf->size;
441 inode_set_bytes(inode, dmabuf->size);
442
443 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
444 flags, &dma_buf_fops);
445 if (IS_ERR(file))
446 goto err_alloc_file;
447 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
448 file->private_data = dmabuf;
449 file->f_path.dentry->d_fsdata = dmabuf;
450
451 return file;
452
453err_alloc_file:
454 iput(inode);
455 return file;
456}
457
458/**
459 * DOC: dma buf device access
460 *
461 * For device DMA access to a shared DMA buffer the usual sequence of operations
462 * is fairly simple:
463 *
464 * 1. The exporter defines his exporter instance using
465 * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
466 * buffer object into a &dma_buf. It then exports that &dma_buf to userspace
467 * as a file descriptor by calling dma_buf_fd().
468 *
469 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
470 * to share with: First the filedescriptor is converted to a &dma_buf using
471 * dma_buf_get(). Then the buffer is attached to the device using
472 * dma_buf_attach().
473 *
474 * Up to this stage the exporter is still free to migrate or reallocate the
475 * backing storage.
476 *
477 * 3. Once the buffer is attached to all devices userspace can initiate DMA
478 * access to the shared buffer. In the kernel this is done by calling
479 * dma_buf_map_attachment() and dma_buf_unmap_attachment().
480 *
481 * 4. Once a driver is done with a shared buffer it needs to call
482 * dma_buf_detach() (after cleaning up any mappings) and then release the
483 * reference acquired with dma_buf_get by calling dma_buf_put().
484 *
485 * For the detailed semantics exporters are expected to implement see
486 * &dma_buf_ops.
487 */
488
489/**
490 * dma_buf_export - Creates a new dma_buf, and associates an anon file
491 * with this buffer, so it can be exported.
492 * Also connect the allocator specific data and ops to the buffer.
493 * Additionally, provide a name string for exporter; useful in debugging.
494 *
495 * @exp_info: [in] holds all the export related information provided
496 * by the exporter. see &struct dma_buf_export_info
497 * for further details.
498 *
499 * Returns, on success, a newly created dma_buf object, which wraps the
500 * supplied private data and operations for dma_buf_ops. On either missing
501 * ops, or error in allocating struct dma_buf, will return negative error.
502 *
503 * For most cases the easiest way to create @exp_info is through the
504 * %DEFINE_DMA_BUF_EXPORT_INFO macro.
505 */
506struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
507{
508 struct dma_buf *dmabuf;
509 struct dma_resv *resv = exp_info->resv;
510 struct file *file;
511 size_t alloc_size = sizeof(struct dma_buf);
512 int ret;
513
514 if (!exp_info->resv)
515 alloc_size += sizeof(struct dma_resv);
516 else
517 /* prevent &dma_buf[1] == dma_buf->resv */
518 alloc_size += 1;
519
520 if (WARN_ON(!exp_info->priv
521 || !exp_info->ops
522 || !exp_info->ops->map_dma_buf
523 || !exp_info->ops->unmap_dma_buf
524 || !exp_info->ops->release)) {
525 return ERR_PTR(-EINVAL);
526 }
527
528 if (!try_module_get(exp_info->owner))
529 return ERR_PTR(-ENOENT);
530
531 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
532 if (!dmabuf) {
533 ret = -ENOMEM;
534 goto err_module;
535 }
536
537 dmabuf->priv = exp_info->priv;
538 dmabuf->ops = exp_info->ops;
539 dmabuf->size = exp_info->size;
540 dmabuf->exp_name = exp_info->exp_name;
541 dmabuf->owner = exp_info->owner;
542 init_waitqueue_head(&dmabuf->poll);
543 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
544 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
545
546 if (!resv) {
547 resv = (struct dma_resv *)&dmabuf[1];
548 dma_resv_init(resv);
549 }
550 dmabuf->resv = resv;
551
552 file = dma_buf_getfile(dmabuf, exp_info->flags);
553 if (IS_ERR(file)) {
554 ret = PTR_ERR(file);
555 goto err_dmabuf;
556 }
557
558 file->f_mode |= FMODE_LSEEK;
559 dmabuf->file = file;
560
561 mutex_init(&dmabuf->lock);
562 INIT_LIST_HEAD(&dmabuf->attachments);
563
564 mutex_lock(&db_list.lock);
565 list_add(&dmabuf->list_node, &db_list.head);
566 mutex_unlock(&db_list.lock);
567
568 return dmabuf;
569
570err_dmabuf:
571 kfree(dmabuf);
572err_module:
573 module_put(exp_info->owner);
574 return ERR_PTR(ret);
575}
576EXPORT_SYMBOL_GPL(dma_buf_export);
577
578/**
579 * dma_buf_fd - returns a file descriptor for the given dma_buf
580 * @dmabuf: [in] pointer to dma_buf for which fd is required.
581 * @flags: [in] flags to give to fd
582 *
583 * On success, returns an associated 'fd'. Else, returns error.
584 */
585int dma_buf_fd(struct dma_buf *dmabuf, int flags)
586{
587 int fd;
588
589 if (!dmabuf || !dmabuf->file)
590 return -EINVAL;
591
592 fd = get_unused_fd_flags(flags);
593 if (fd < 0)
594 return fd;
595
596 fd_install(fd, dmabuf->file);
597
598 return fd;
599}
600EXPORT_SYMBOL_GPL(dma_buf_fd);
601
602/**
603 * dma_buf_get - returns the dma_buf structure related to an fd
604 * @fd: [in] fd associated with the dma_buf to be returned
605 *
606 * On success, returns the dma_buf structure associated with an fd; uses
607 * file's refcounting done by fget to increase refcount. returns ERR_PTR
608 * otherwise.
609 */
610struct dma_buf *dma_buf_get(int fd)
611{
612 struct file *file;
613
614 file = fget(fd);
615
616 if (!file)
617 return ERR_PTR(-EBADF);
618
619 if (!is_dma_buf_file(file)) {
620 fput(file);
621 return ERR_PTR(-EINVAL);
622 }
623
624 return file->private_data;
625}
626EXPORT_SYMBOL_GPL(dma_buf_get);
627
628/**
629 * dma_buf_put - decreases refcount of the buffer
630 * @dmabuf: [in] buffer to reduce refcount of
631 *
632 * Uses file's refcounting done implicitly by fput().
633 *
634 * If, as a result of this call, the refcount becomes 0, the 'release' file
635 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
636 * in turn, and frees the memory allocated for dmabuf when exported.
637 */
638void dma_buf_put(struct dma_buf *dmabuf)
639{
640 if (WARN_ON(!dmabuf || !dmabuf->file))
641 return;
642
643 fput(dmabuf->file);
644}
645EXPORT_SYMBOL_GPL(dma_buf_put);
646
647/**
648 * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
649 * calls attach() of dma_buf_ops to allow device-specific attach functionality
650 * @dmabuf: [in] buffer to attach device to.
651 * @dev: [in] device to be attached.
652 *
653 * Returns struct dma_buf_attachment pointer for this attachment. Attachments
654 * must be cleaned up by calling dma_buf_detach().
655 *
656 * Returns:
657 *
658 * A pointer to newly created &dma_buf_attachment on success, or a negative
659 * error code wrapped into a pointer on failure.
660 *
661 * Note that this can fail if the backing storage of @dmabuf is in a place not
662 * accessible to @dev, and cannot be moved to a more suitable place. This is
663 * indicated with the error code -EBUSY.
664 */
665struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
666 struct device *dev)
667{
668 struct dma_buf_attachment *attach;
669 int ret;
670
671 if (WARN_ON(!dmabuf || !dev))
672 return ERR_PTR(-EINVAL);
673
674 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
675 if (!attach)
676 return ERR_PTR(-ENOMEM);
677
678 attach->dev = dev;
679 attach->dmabuf = dmabuf;
680
681 mutex_lock(&dmabuf->lock);
682
683 if (dmabuf->ops->attach) {
684 ret = dmabuf->ops->attach(dmabuf, attach);
685 if (ret)
686 goto err_attach;
687 }
688 list_add(&attach->node, &dmabuf->attachments);
689
690 mutex_unlock(&dmabuf->lock);
691
692 return attach;
693
694err_attach:
695 kfree(attach);
696 mutex_unlock(&dmabuf->lock);
697 return ERR_PTR(ret);
698}
699EXPORT_SYMBOL_GPL(dma_buf_attach);
700
701/**
702 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
703 * optionally calls detach() of dma_buf_ops for device-specific detach
704 * @dmabuf: [in] buffer to detach from.
705 * @attach: [in] attachment to be detached; is free'd after this call.
706 *
707 * Clean up a device attachment obtained by calling dma_buf_attach().
708 */
709void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
710{
711 if (WARN_ON(!dmabuf || !attach))
712 return;
713
714 if (attach->sgt)
715 dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
716
717 mutex_lock(&dmabuf->lock);
718 list_del(&attach->node);
719 if (dmabuf->ops->detach)
720 dmabuf->ops->detach(dmabuf, attach);
721
722 mutex_unlock(&dmabuf->lock);
723 kfree(attach);
724}
725EXPORT_SYMBOL_GPL(dma_buf_detach);
726
727/**
728 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
729 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
730 * dma_buf_ops.
731 * @attach: [in] attachment whose scatterlist is to be returned
732 * @direction: [in] direction of DMA transfer
733 *
734 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
735 * on error. May return -EINTR if it is interrupted by a signal.
736 *
737 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
738 * the underlying backing storage is pinned for as long as a mapping exists,
739 * therefore users/importers should not hold onto a mapping for undue amounts of
740 * time.
741 */
742struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
743 enum dma_data_direction direction)
744{
745 struct sg_table *sg_table;
746
747 might_sleep();
748
749 if (WARN_ON(!attach || !attach->dmabuf))
750 return ERR_PTR(-EINVAL);
751
752 if (attach->sgt) {
753 /*
754 * Two mappings with different directions for the same
755 * attachment are not allowed.
756 */
757 if (attach->dir != direction &&
758 attach->dir != DMA_BIDIRECTIONAL)
759 return ERR_PTR(-EBUSY);
760
761 return attach->sgt;
762 }
763
764 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
765 if (!sg_table)
766 sg_table = ERR_PTR(-ENOMEM);
767
768 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
769 attach->sgt = sg_table;
770 attach->dir = direction;
771 }
772
773 return sg_table;
774}
775EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
776
777/**
778 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
779 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
780 * dma_buf_ops.
781 * @attach: [in] attachment to unmap buffer from
782 * @sg_table: [in] scatterlist info of the buffer to unmap
783 * @direction: [in] direction of DMA transfer
784 *
785 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
786 */
787void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
788 struct sg_table *sg_table,
789 enum dma_data_direction direction)
790{
791 might_sleep();
792
793 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
794 return;
795
796 if (attach->sgt == sg_table)
797 return;
798
799 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
800}
801EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
802
803/**
804 * DOC: cpu access
805 *
806 * There are mutliple reasons for supporting CPU access to a dma buffer object:
807 *
808 * - Fallback operations in the kernel, for example when a device is connected
809 * over USB and the kernel needs to shuffle the data around first before
810 * sending it away. Cache coherency is handled by braketing any transactions
811 * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
812 * access.
813 *
814 * To support dma_buf objects residing in highmem cpu access is page-based
815 * using an api similar to kmap. Accessing a dma_buf is done in aligned chunks
816 * of PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which
817 * returns a pointer in kernel virtual address space. Afterwards the chunk
818 * needs to be unmapped again. There is no limit on how often a given chunk
819 * can be mapped and unmapped, i.e. the importer does not need to call
820 * begin_cpu_access again before mapping the same chunk again.
821 *
822 * Interfaces::
823 * void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
824 * void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
825 *
826 * Implementing the functions is optional for exporters and for importers all
827 * the restrictions of using kmap apply.
828 *
829 * dma_buf kmap calls outside of the range specified in begin_cpu_access are
830 * undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
831 * the partial chunks at the beginning and end but may return stale or bogus
832 * data outside of the range (in these partial chunks).
833 *
834 * For some cases the overhead of kmap can be too high, a vmap interface
835 * is introduced. This interface should be used very carefully, as vmalloc
836 * space is a limited resources on many architectures.
837 *
838 * Interfaces::
839 * void \*dma_buf_vmap(struct dma_buf \*dmabuf)
840 * void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
841 *
842 * The vmap call can fail if there is no vmap support in the exporter, or if
843 * it runs out of vmalloc space. Fallback to kmap should be implemented. Note
844 * that the dma-buf layer keeps a reference count for all vmap access and
845 * calls down into the exporter's vmap function only when no vmapping exists,
846 * and only unmaps it once. Protection against concurrent vmap/vunmap calls is
847 * provided by taking the dma_buf->lock mutex.
848 *
849 * - For full compatibility on the importer side with existing userspace
850 * interfaces, which might already support mmap'ing buffers. This is needed in
851 * many processing pipelines (e.g. feeding a software rendered image into a
852 * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
853 * framework already supported this and for DMA buffer file descriptors to
854 * replace ION buffers mmap support was needed.
855 *
856 * There is no special interfaces, userspace simply calls mmap on the dma-buf
857 * fd. But like for CPU access there's a need to braket the actual access,
858 * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
859 * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
860 * be restarted.
861 *
862 * Some systems might need some sort of cache coherency management e.g. when
863 * CPU and GPU domains are being accessed through dma-buf at the same time.
864 * To circumvent this problem there are begin/end coherency markers, that
865 * forward directly to existing dma-buf device drivers vfunc hooks. Userspace
866 * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
867 * sequence would be used like following:
868 *
869 * - mmap dma-buf fd
870 * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
871 * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
872 * want (with the new data being consumed by say the GPU or the scanout
873 * device)
874 * - munmap once you don't need the buffer any more
875 *
876 * For correctness and optimal performance, it is always required to use
877 * SYNC_START and SYNC_END before and after, respectively, when accessing the
878 * mapped address. Userspace cannot rely on coherent access, even when there
879 * are systems where it just works without calling these ioctls.
880 *
881 * - And as a CPU fallback in userspace processing pipelines.
882 *
883 * Similar to the motivation for kernel cpu access it is again important that
884 * the userspace code of a given importing subsystem can use the same
885 * interfaces with a imported dma-buf buffer object as with a native buffer
886 * object. This is especially important for drm where the userspace part of
887 * contemporary OpenGL, X, and other drivers is huge, and reworking them to
888 * use a different way to mmap a buffer rather invasive.
889 *
890 * The assumption in the current dma-buf interfaces is that redirecting the
891 * initial mmap is all that's needed. A survey of some of the existing
892 * subsystems shows that no driver seems to do any nefarious thing like
893 * syncing up with outstanding asynchronous processing on the device or
894 * allocating special resources at fault time. So hopefully this is good
895 * enough, since adding interfaces to intercept pagefaults and allow pte
896 * shootdowns would increase the complexity quite a bit.
897 *
898 * Interface::
899 * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
900 * unsigned long);
901 *
902 * If the importing subsystem simply provides a special-purpose mmap call to
903 * set up a mapping in userspace, calling do_mmap with dma_buf->file will
904 * equally achieve that for a dma-buf object.
905 */
906
907static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
908 enum dma_data_direction direction)
909{
910 bool write = (direction == DMA_BIDIRECTIONAL ||
911 direction == DMA_TO_DEVICE);
912 struct dma_resv *resv = dmabuf->resv;
913 long ret;
914
915 /* Wait on any implicit rendering fences */
916 ret = dma_resv_wait_timeout_rcu(resv, write, true,
917 MAX_SCHEDULE_TIMEOUT);
918 if (ret < 0)
919 return ret;
920
921 return 0;
922}
923
924/**
925 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
926 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
927 * preparations. Coherency is only guaranteed in the specified range for the
928 * specified access direction.
929 * @dmabuf: [in] buffer to prepare cpu access for.
930 * @direction: [in] length of range for cpu access.
931 *
932 * After the cpu access is complete the caller should call
933 * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
934 * it guaranteed to be coherent with other DMA access.
935 *
936 * Can return negative error values, returns 0 on success.
937 */
938int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
939 enum dma_data_direction direction)
940{
941 int ret = 0;
942
943 if (WARN_ON(!dmabuf))
944 return -EINVAL;
945
946 if (dmabuf->ops->begin_cpu_access)
947 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
948
949 /* Ensure that all fences are waited upon - but we first allow
950 * the native handler the chance to do so more efficiently if it
951 * chooses. A double invocation here will be reasonably cheap no-op.
952 */
953 if (ret == 0)
954 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
955
956 return ret;
957}
958EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
959
960/**
961 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
962 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
963 * actions. Coherency is only guaranteed in the specified range for the
964 * specified access direction.
965 * @dmabuf: [in] buffer to complete cpu access for.
966 * @direction: [in] length of range for cpu access.
967 *
968 * This terminates CPU access started with dma_buf_begin_cpu_access().
969 *
970 * Can return negative error values, returns 0 on success.
971 */
972int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
973 enum dma_data_direction direction)
974{
975 int ret = 0;
976
977 WARN_ON(!dmabuf);
978
979 if (dmabuf->ops->end_cpu_access)
980 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
981
982 return ret;
983}
984EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
985
986/**
987 * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
988 * same restrictions as for kmap and friends apply.
989 * @dmabuf: [in] buffer to map page from.
990 * @page_num: [in] page in PAGE_SIZE units to map.
991 *
992 * This call must always succeed, any necessary preparations that might fail
993 * need to be done in begin_cpu_access.
994 */
995void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
996{
997 WARN_ON(!dmabuf);
998
999 if (!dmabuf->ops->map)
1000 return NULL;
1001 return dmabuf->ops->map(dmabuf, page_num);
1002}
1003EXPORT_SYMBOL_GPL(dma_buf_kmap);
1004
1005/**
1006 * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
1007 * @dmabuf: [in] buffer to unmap page from.
1008 * @page_num: [in] page in PAGE_SIZE units to unmap.
1009 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap.
1010 *
1011 * This call must always succeed.
1012 */
1013void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
1014 void *vaddr)
1015{
1016 WARN_ON(!dmabuf);
1017
1018 if (dmabuf->ops->unmap)
1019 dmabuf->ops->unmap(dmabuf, page_num, vaddr);
1020}
1021EXPORT_SYMBOL_GPL(dma_buf_kunmap);
1022
1023
1024/**
1025 * dma_buf_mmap - Setup up a userspace mmap with the given vma
1026 * @dmabuf: [in] buffer that should back the vma
1027 * @vma: [in] vma for the mmap
1028 * @pgoff: [in] offset in pages where this mmap should start within the
1029 * dma-buf buffer.
1030 *
1031 * This function adjusts the passed in vma so that it points at the file of the
1032 * dma_buf operation. It also adjusts the starting pgoff and does bounds
1033 * checking on the size of the vma. Then it calls the exporters mmap function to
1034 * set up the mapping.
1035 *
1036 * Can return negative error values, returns 0 on success.
1037 */
1038int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1039 unsigned long pgoff)
1040{
1041 struct file *oldfile;
1042 int ret;
1043
1044 if (WARN_ON(!dmabuf || !vma))
1045 return -EINVAL;
1046
1047 /* check if buffer supports mmap */
1048 if (!dmabuf->ops->mmap)
1049 return -EINVAL;
1050
1051 /* check for offset overflow */
1052 if (pgoff + vma_pages(vma) < pgoff)
1053 return -EOVERFLOW;
1054
1055 /* check for overflowing the buffer's size */
1056 if (pgoff + vma_pages(vma) >
1057 dmabuf->size >> PAGE_SHIFT)
1058 return -EINVAL;
1059
1060 /* readjust the vma */
1061 get_file(dmabuf->file);
1062 oldfile = vma->vm_file;
1063 vma->vm_file = dmabuf->file;
1064 vma->vm_pgoff = pgoff;
1065
1066 ret = dmabuf->ops->mmap(dmabuf, vma);
1067 if (ret) {
1068 /* restore old parameters on failure */
1069 vma->vm_file = oldfile;
1070 fput(dmabuf->file);
1071 } else {
1072 if (oldfile)
1073 fput(oldfile);
1074 }
1075 return ret;
1076
1077}
1078EXPORT_SYMBOL_GPL(dma_buf_mmap);
1079
1080/**
1081 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1082 * address space. Same restrictions as for vmap and friends apply.
1083 * @dmabuf: [in] buffer to vmap
1084 *
1085 * This call may fail due to lack of virtual mapping address space.
1086 * These calls are optional in drivers. The intended use for them
1087 * is for mapping objects linear in kernel space for high use objects.
1088 * Please attempt to use kmap/kunmap before thinking about these interfaces.
1089 *
1090 * Returns NULL on error.
1091 */
1092void *dma_buf_vmap(struct dma_buf *dmabuf)
1093{
1094 void *ptr;
1095
1096 if (WARN_ON(!dmabuf))
1097 return NULL;
1098
1099 if (!dmabuf->ops->vmap)
1100 return NULL;
1101
1102 mutex_lock(&dmabuf->lock);
1103 if (dmabuf->vmapping_counter) {
1104 dmabuf->vmapping_counter++;
1105 BUG_ON(!dmabuf->vmap_ptr);
1106 ptr = dmabuf->vmap_ptr;
1107 goto out_unlock;
1108 }
1109
1110 BUG_ON(dmabuf->vmap_ptr);
1111
1112 ptr = dmabuf->ops->vmap(dmabuf);
1113 if (WARN_ON_ONCE(IS_ERR(ptr)))
1114 ptr = NULL;
1115 if (!ptr)
1116 goto out_unlock;
1117
1118 dmabuf->vmap_ptr = ptr;
1119 dmabuf->vmapping_counter = 1;
1120
1121out_unlock:
1122 mutex_unlock(&dmabuf->lock);
1123 return ptr;
1124}
1125EXPORT_SYMBOL_GPL(dma_buf_vmap);
1126
1127/**
1128 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1129 * @dmabuf: [in] buffer to vunmap
1130 * @vaddr: [in] vmap to vunmap
1131 */
1132void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
1133{
1134 if (WARN_ON(!dmabuf))
1135 return;
1136
1137 BUG_ON(!dmabuf->vmap_ptr);
1138 BUG_ON(dmabuf->vmapping_counter == 0);
1139 BUG_ON(dmabuf->vmap_ptr != vaddr);
1140
1141 mutex_lock(&dmabuf->lock);
1142 if (--dmabuf->vmapping_counter == 0) {
1143 if (dmabuf->ops->vunmap)
1144 dmabuf->ops->vunmap(dmabuf, vaddr);
1145 dmabuf->vmap_ptr = NULL;
1146 }
1147 mutex_unlock(&dmabuf->lock);
1148}
1149EXPORT_SYMBOL_GPL(dma_buf_vunmap);
1150
1151#ifdef CONFIG_DEBUG_FS
1152static int dma_buf_debug_show(struct seq_file *s, void *unused)
1153{
1154 int ret;
1155 struct dma_buf *buf_obj;
1156 struct dma_buf_attachment *attach_obj;
1157 struct dma_resv *robj;
1158 struct dma_resv_list *fobj;
1159 struct dma_fence *fence;
1160 unsigned seq;
1161 int count = 0, attach_count, shared_count, i;
1162 size_t size = 0;
1163
1164 ret = mutex_lock_interruptible(&db_list.lock);
1165
1166 if (ret)
1167 return ret;
1168
1169 seq_puts(s, "\nDma-buf Objects:\n");
1170 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1171 "size", "flags", "mode", "count", "ino");
1172
1173 list_for_each_entry(buf_obj, &db_list.head, list_node) {
1174 ret = mutex_lock_interruptible(&buf_obj->lock);
1175
1176 if (ret) {
1177 seq_puts(s,
1178 "\tERROR locking buffer object: skipping\n");
1179 continue;
1180 }
1181
1182 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1183 buf_obj->size,
1184 buf_obj->file->f_flags, buf_obj->file->f_mode,
1185 file_count(buf_obj->file),
1186 buf_obj->exp_name,
1187 file_inode(buf_obj->file)->i_ino,
1188 buf_obj->name ?: "");
1189
1190 robj = buf_obj->resv;
1191 while (true) {
1192 seq = read_seqcount_begin(&robj->seq);
1193 rcu_read_lock();
1194 fobj = rcu_dereference(robj->fence);
1195 shared_count = fobj ? fobj->shared_count : 0;
1196 fence = rcu_dereference(robj->fence_excl);
1197 if (!read_seqcount_retry(&robj->seq, seq))
1198 break;
1199 rcu_read_unlock();
1200 }
1201
1202 if (fence)
1203 seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1204 fence->ops->get_driver_name(fence),
1205 fence->ops->get_timeline_name(fence),
1206 dma_fence_is_signaled(fence) ? "" : "un");
1207 for (i = 0; i < shared_count; i++) {
1208 fence = rcu_dereference(fobj->shared[i]);
1209 if (!dma_fence_get_rcu(fence))
1210 continue;
1211 seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1212 fence->ops->get_driver_name(fence),
1213 fence->ops->get_timeline_name(fence),
1214 dma_fence_is_signaled(fence) ? "" : "un");
1215 dma_fence_put(fence);
1216 }
1217 rcu_read_unlock();
1218
1219 seq_puts(s, "\tAttached Devices:\n");
1220 attach_count = 0;
1221
1222 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1223 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1224 attach_count++;
1225 }
1226
1227 seq_printf(s, "Total %d devices attached\n\n",
1228 attach_count);
1229
1230 count++;
1231 size += buf_obj->size;
1232 mutex_unlock(&buf_obj->lock);
1233 }
1234
1235 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1236
1237 mutex_unlock(&db_list.lock);
1238 return 0;
1239}
1240
1241DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1242
1243static struct dentry *dma_buf_debugfs_dir;
1244
1245static int dma_buf_init_debugfs(void)
1246{
1247 struct dentry *d;
1248 int err = 0;
1249
1250 d = debugfs_create_dir("dma_buf", NULL);
1251 if (IS_ERR(d))
1252 return PTR_ERR(d);
1253
1254 dma_buf_debugfs_dir = d;
1255
1256 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1257 NULL, &dma_buf_debug_fops);
1258 if (IS_ERR(d)) {
1259 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1260 debugfs_remove_recursive(dma_buf_debugfs_dir);
1261 dma_buf_debugfs_dir = NULL;
1262 err = PTR_ERR(d);
1263 }
1264
1265 return err;
1266}
1267
1268static void dma_buf_uninit_debugfs(void)
1269{
1270 debugfs_remove_recursive(dma_buf_debugfs_dir);
1271}
1272#else
1273static inline int dma_buf_init_debugfs(void)
1274{
1275 return 0;
1276}
1277static inline void dma_buf_uninit_debugfs(void)
1278{
1279}
1280#endif
1281
1282static int __init dma_buf_init(void)
1283{
1284 dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1285 if (IS_ERR(dma_buf_mnt))
1286 return PTR_ERR(dma_buf_mnt);
1287
1288 mutex_init(&db_list.lock);
1289 INIT_LIST_HEAD(&db_list.head);
1290 dma_buf_init_debugfs();
1291 return 0;
1292}
1293subsys_initcall(dma_buf_init);
1294
1295static void __exit dma_buf_deinit(void)
1296{
1297 dma_buf_uninit_debugfs();
1298 kern_unmount(dma_buf_mnt);
1299}
1300__exitcall(dma_buf_deinit);