Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Framework for buffer objects that can be shared across devices/subsystems.
   4 *
   5 * Copyright(C) 2011 Linaro Limited. All rights reserved.
   6 * Author: Sumit Semwal <sumit.semwal@ti.com>
   7 *
   8 * Many thanks to linaro-mm-sig list, and specially
   9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
  10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
  11 * refining of this idea.
  12 */
  13
  14#include <linux/fs.h>
  15#include <linux/slab.h>
  16#include <linux/dma-buf.h>
  17#include <linux/dma-fence.h>
 
  18#include <linux/anon_inodes.h>
  19#include <linux/export.h>
  20#include <linux/debugfs.h>
  21#include <linux/module.h>
  22#include <linux/seq_file.h>
 
  23#include <linux/poll.h>
  24#include <linux/dma-resv.h>
  25#include <linux/mm.h>
  26#include <linux/mount.h>
  27#include <linux/pseudo_fs.h>
  28
  29#include <uapi/linux/dma-buf.h>
  30#include <uapi/linux/magic.h>
  31
 
 
  32static inline int is_dma_buf_file(struct file *);
  33
  34struct dma_buf_list {
  35	struct list_head head;
  36	struct mutex lock;
  37};
  38
  39static struct dma_buf_list db_list;
  40
  41static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
  42{
  43	struct dma_buf *dmabuf;
  44	char name[DMA_BUF_NAME_LEN];
  45	size_t ret = 0;
  46
  47	dmabuf = dentry->d_fsdata;
  48	spin_lock(&dmabuf->name_lock);
  49	if (dmabuf->name)
  50		ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
  51	spin_unlock(&dmabuf->name_lock);
  52
  53	return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
  54			     dentry->d_name.name, ret > 0 ? name : "");
  55}
  56
  57static void dma_buf_release(struct dentry *dentry)
  58{
  59	struct dma_buf *dmabuf;
  60
  61	dmabuf = dentry->d_fsdata;
  62	if (unlikely(!dmabuf))
  63		return;
  64
  65	BUG_ON(dmabuf->vmapping_counter);
  66
  67	/*
  68	 * Any fences that a dma-buf poll can wait on should be signaled
  69	 * before releasing dma-buf. This is the responsibility of each
  70	 * driver that uses the reservation objects.
  71	 *
  72	 * If you hit this BUG() it means someone dropped their ref to the
  73	 * dma-buf while still having pending operation to the buffer.
  74	 */
  75	BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
  76
 
  77	dmabuf->ops->release(dmabuf);
  78
  79	mutex_lock(&db_list.lock);
  80	list_del(&dmabuf->list_node);
  81	mutex_unlock(&db_list.lock);
  82
  83	if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
  84		dma_resv_fini(dmabuf->resv);
  85
 
  86	module_put(dmabuf->owner);
  87	kfree(dmabuf->name);
  88	kfree(dmabuf);
  89}
  90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  91static const struct dentry_operations dma_buf_dentry_ops = {
  92	.d_dname = dmabuffs_dname,
  93	.d_release = dma_buf_release,
  94};
  95
  96static struct vfsmount *dma_buf_mnt;
  97
  98static int dma_buf_fs_init_context(struct fs_context *fc)
  99{
 100	struct pseudo_fs_context *ctx;
 101
 102	ctx = init_pseudo(fc, DMA_BUF_MAGIC);
 103	if (!ctx)
 104		return -ENOMEM;
 105	ctx->dops = &dma_buf_dentry_ops;
 106	return 0;
 107}
 108
 109static struct file_system_type dma_buf_fs_type = {
 110	.name = "dmabuf",
 111	.init_fs_context = dma_buf_fs_init_context,
 112	.kill_sb = kill_anon_super,
 113};
 114
 115static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
 116{
 117	struct dma_buf *dmabuf;
 
 118
 119	if (!is_dma_buf_file(file))
 120		return -EINVAL;
 121
 122	dmabuf = file->private_data;
 123
 124	/* check if buffer supports mmap */
 125	if (!dmabuf->ops->mmap)
 126		return -EINVAL;
 127
 128	/* check for overflowing the buffer's size */
 129	if (vma->vm_pgoff + vma_pages(vma) >
 130	    dmabuf->size >> PAGE_SHIFT)
 131		return -EINVAL;
 132
 133	return dmabuf->ops->mmap(dmabuf, vma);
 
 
 
 
 134}
 135
 136static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
 137{
 138	struct dma_buf *dmabuf;
 139	loff_t base;
 140
 141	if (!is_dma_buf_file(file))
 142		return -EBADF;
 143
 144	dmabuf = file->private_data;
 145
 146	/* only support discovering the end of the buffer,
 147	   but also allow SEEK_SET to maintain the idiomatic
 148	   SEEK_END(0), SEEK_CUR(0) pattern */
 149	if (whence == SEEK_END)
 150		base = dmabuf->size;
 151	else if (whence == SEEK_SET)
 152		base = 0;
 153	else
 154		return -EINVAL;
 155
 156	if (offset != 0)
 157		return -EINVAL;
 158
 159	return base + offset;
 160}
 161
 162/**
 163 * DOC: implicit fence polling
 164 *
 165 * To support cross-device and cross-driver synchronization of buffer access
 166 * implicit fences (represented internally in the kernel with &struct dma_fence)
 167 * can be attached to a &dma_buf. The glue for that and a few related things are
 168 * provided in the &dma_resv structure.
 169 *
 170 * Userspace can query the state of these implicitly tracked fences using poll()
 171 * and related system calls:
 172 *
 173 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
 174 *   most recent write or exclusive fence.
 175 *
 176 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
 177 *   all attached fences, shared and exclusive ones.
 178 *
 179 * Note that this only signals the completion of the respective fences, i.e. the
 180 * DMA transfers are complete. Cache flushing and any other necessary
 181 * preparations before CPU access can begin still need to happen.
 
 
 
 182 */
 183
 184static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 185{
 186	struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
 
 187	unsigned long flags;
 188
 189	spin_lock_irqsave(&dcb->poll->lock, flags);
 190	wake_up_locked_poll(dcb->poll, dcb->active);
 191	dcb->active = 0;
 192	spin_unlock_irqrestore(&dcb->poll->lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 193}
 194
 195static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 196{
 197	struct dma_buf *dmabuf;
 198	struct dma_resv *resv;
 199	struct dma_resv_list *fobj;
 200	struct dma_fence *fence_excl;
 201	__poll_t events;
 202	unsigned shared_count, seq;
 203
 204	dmabuf = file->private_data;
 205	if (!dmabuf || !dmabuf->resv)
 206		return EPOLLERR;
 207
 208	resv = dmabuf->resv;
 209
 210	poll_wait(file, &dmabuf->poll, poll);
 211
 212	events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
 213	if (!events)
 214		return 0;
 215
 216retry:
 217	seq = read_seqcount_begin(&resv->seq);
 218	rcu_read_lock();
 219
 220	fobj = rcu_dereference(resv->fence);
 221	if (fobj)
 222		shared_count = fobj->shared_count;
 223	else
 224		shared_count = 0;
 225	fence_excl = rcu_dereference(resv->fence_excl);
 226	if (read_seqcount_retry(&resv->seq, seq)) {
 227		rcu_read_unlock();
 228		goto retry;
 229	}
 230
 231	if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
 232		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
 233		__poll_t pevents = EPOLLIN;
 234
 235		if (shared_count == 0)
 236			pevents |= EPOLLOUT;
 237
 
 238		spin_lock_irq(&dmabuf->poll.lock);
 239		if (dcb->active) {
 240			dcb->active |= pevents;
 241			events &= ~pevents;
 242		} else
 243			dcb->active = pevents;
 244		spin_unlock_irq(&dmabuf->poll.lock);
 245
 246		if (events & pevents) {
 247			if (!dma_fence_get_rcu(fence_excl)) {
 248				/* force a recheck */
 249				events &= ~pevents;
 250				dma_buf_poll_cb(NULL, &dcb->cb);
 251			} else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
 252							   dma_buf_poll_cb)) {
 253				events &= ~pevents;
 254				dma_fence_put(fence_excl);
 255			} else {
 256				/*
 257				 * No callback queued, wake up any additional
 258				 * waiters.
 259				 */
 260				dma_fence_put(fence_excl);
 261				dma_buf_poll_cb(NULL, &dcb->cb);
 262			}
 
 263		}
 264	}
 265
 266	if ((events & EPOLLOUT) && shared_count > 0) {
 267		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
 268		int i;
 269
 270		/* Only queue a new callback if no event has fired yet */
 271		spin_lock_irq(&dmabuf->poll.lock);
 272		if (dcb->active)
 273			events &= ~EPOLLOUT;
 274		else
 275			dcb->active = EPOLLOUT;
 276		spin_unlock_irq(&dmabuf->poll.lock);
 277
 278		if (!(events & EPOLLOUT))
 279			goto out;
 280
 281		for (i = 0; i < shared_count; ++i) {
 282			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
 283
 284			if (!dma_fence_get_rcu(fence)) {
 285				/*
 286				 * fence refcount dropped to zero, this means
 287				 * that fobj has been freed
 288				 *
 289				 * call dma_buf_poll_cb and force a recheck!
 290				 */
 291				events &= ~EPOLLOUT;
 292				dma_buf_poll_cb(NULL, &dcb->cb);
 293				break;
 294			}
 295			if (!dma_fence_add_callback(fence, &dcb->cb,
 296						    dma_buf_poll_cb)) {
 297				dma_fence_put(fence);
 298				events &= ~EPOLLOUT;
 299				break;
 300			}
 301			dma_fence_put(fence);
 302		}
 303
 304		/* No callback queued, wake up any additional waiters. */
 305		if (i == shared_count)
 306			dma_buf_poll_cb(NULL, &dcb->cb);
 307	}
 308
 309out:
 310	rcu_read_unlock();
 311	return events;
 312}
 313
 314/**
 315 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
 316 * The name of the dma-buf buffer can only be set when the dma-buf is not
 317 * attached to any devices. It could theoritically support changing the
 318 * name of the dma-buf if the same piece of memory is used for multiple
 319 * purpose between different devices.
 320 *
 321 * @dmabuf: [in]     dmabuf buffer that will be renamed.
 322 * @buf:    [in]     A piece of userspace memory that contains the name of
 323 *                   the dma-buf.
 324 *
 325 * Returns 0 on success. If the dma-buf buffer is already attached to
 326 * devices, return -EBUSY.
 327 *
 328 */
 329static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
 330{
 331	char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
 332	long ret = 0;
 333
 334	if (IS_ERR(name))
 335		return PTR_ERR(name);
 336
 337	dma_resv_lock(dmabuf->resv, NULL);
 338	if (!list_empty(&dmabuf->attachments)) {
 339		ret = -EBUSY;
 340		kfree(name);
 341		goto out_unlock;
 342	}
 343	spin_lock(&dmabuf->name_lock);
 344	kfree(dmabuf->name);
 345	dmabuf->name = name;
 346	spin_unlock(&dmabuf->name_lock);
 347
 348out_unlock:
 349	dma_resv_unlock(dmabuf->resv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 350	return ret;
 351}
 
 352
 353static long dma_buf_ioctl(struct file *file,
 354			  unsigned int cmd, unsigned long arg)
 355{
 356	struct dma_buf *dmabuf;
 357	struct dma_buf_sync sync;
 358	enum dma_data_direction direction;
 359	int ret;
 360
 361	dmabuf = file->private_data;
 362
 363	switch (cmd) {
 364	case DMA_BUF_IOCTL_SYNC:
 365		if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
 366			return -EFAULT;
 367
 368		if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
 369			return -EINVAL;
 370
 371		switch (sync.flags & DMA_BUF_SYNC_RW) {
 372		case DMA_BUF_SYNC_READ:
 373			direction = DMA_FROM_DEVICE;
 374			break;
 375		case DMA_BUF_SYNC_WRITE:
 376			direction = DMA_TO_DEVICE;
 377			break;
 378		case DMA_BUF_SYNC_RW:
 379			direction = DMA_BIDIRECTIONAL;
 380			break;
 381		default:
 382			return -EINVAL;
 383		}
 384
 385		if (sync.flags & DMA_BUF_SYNC_END)
 386			ret = dma_buf_end_cpu_access(dmabuf, direction);
 387		else
 388			ret = dma_buf_begin_cpu_access(dmabuf, direction);
 389
 390		return ret;
 391
 392	case DMA_BUF_SET_NAME_A:
 393	case DMA_BUF_SET_NAME_B:
 394		return dma_buf_set_name(dmabuf, (const char __user *)arg);
 395
 
 
 
 
 
 
 
 396	default:
 397		return -ENOTTY;
 398	}
 399}
 400
 401static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
 402{
 403	struct dma_buf *dmabuf = file->private_data;
 404
 405	seq_printf(m, "size:\t%zu\n", dmabuf->size);
 406	/* Don't count the temporary reference taken inside procfs seq_show */
 407	seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
 408	seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
 409	spin_lock(&dmabuf->name_lock);
 410	if (dmabuf->name)
 411		seq_printf(m, "name:\t%s\n", dmabuf->name);
 412	spin_unlock(&dmabuf->name_lock);
 413}
 414
 415static const struct file_operations dma_buf_fops = {
 
 416	.mmap		= dma_buf_mmap_internal,
 417	.llseek		= dma_buf_llseek,
 418	.poll		= dma_buf_poll,
 419	.unlocked_ioctl	= dma_buf_ioctl,
 420	.compat_ioctl	= compat_ptr_ioctl,
 421	.show_fdinfo	= dma_buf_show_fdinfo,
 422};
 423
 424/*
 425 * is_dma_buf_file - Check if struct file* is associated with dma_buf
 426 */
 427static inline int is_dma_buf_file(struct file *file)
 428{
 429	return file->f_op == &dma_buf_fops;
 430}
 431
 432static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
 433{
 434	struct file *file;
 435	struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
 
 436
 437	if (IS_ERR(inode))
 438		return ERR_CAST(inode);
 439
 440	inode->i_size = dmabuf->size;
 441	inode_set_bytes(inode, dmabuf->size);
 442
 
 
 
 
 
 
 
 
 443	file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
 444				 flags, &dma_buf_fops);
 445	if (IS_ERR(file))
 446		goto err_alloc_file;
 447	file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
 448	file->private_data = dmabuf;
 449	file->f_path.dentry->d_fsdata = dmabuf;
 450
 451	return file;
 452
 453err_alloc_file:
 454	iput(inode);
 455	return file;
 456}
 457
 458/**
 459 * DOC: dma buf device access
 460 *
 461 * For device DMA access to a shared DMA buffer the usual sequence of operations
 462 * is fairly simple:
 463 *
 464 * 1. The exporter defines his exporter instance using
 465 *    DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
 466 *    buffer object into a &dma_buf. It then exports that &dma_buf to userspace
 467 *    as a file descriptor by calling dma_buf_fd().
 468 *
 469 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
 470 *    to share with: First the filedescriptor is converted to a &dma_buf using
 471 *    dma_buf_get(). Then the buffer is attached to the device using
 472 *    dma_buf_attach().
 473 *
 474 *    Up to this stage the exporter is still free to migrate or reallocate the
 475 *    backing storage.
 476 *
 477 * 3. Once the buffer is attached to all devices userspace can initiate DMA
 478 *    access to the shared buffer. In the kernel this is done by calling
 479 *    dma_buf_map_attachment() and dma_buf_unmap_attachment().
 480 *
 481 * 4. Once a driver is done with a shared buffer it needs to call
 482 *    dma_buf_detach() (after cleaning up any mappings) and then release the
 483 *    reference acquired with dma_buf_get by calling dma_buf_put().
 484 *
 485 * For the detailed semantics exporters are expected to implement see
 486 * &dma_buf_ops.
 487 */
 488
 489/**
 490 * dma_buf_export - Creates a new dma_buf, and associates an anon file
 491 * with this buffer, so it can be exported.
 492 * Also connect the allocator specific data and ops to the buffer.
 493 * Additionally, provide a name string for exporter; useful in debugging.
 494 *
 495 * @exp_info:	[in]	holds all the export related information provided
 496 *			by the exporter. see &struct dma_buf_export_info
 497 *			for further details.
 498 *
 499 * Returns, on success, a newly created dma_buf object, which wraps the
 500 * supplied private data and operations for dma_buf_ops. On either missing
 501 * ops, or error in allocating struct dma_buf, will return negative error.
 
 502 *
 503 * For most cases the easiest way to create @exp_info is through the
 504 * %DEFINE_DMA_BUF_EXPORT_INFO macro.
 505 */
 506struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
 507{
 508	struct dma_buf *dmabuf;
 509	struct dma_resv *resv = exp_info->resv;
 510	struct file *file;
 511	size_t alloc_size = sizeof(struct dma_buf);
 512	int ret;
 513
 514	if (!exp_info->resv)
 515		alloc_size += sizeof(struct dma_resv);
 516	else
 517		/* prevent &dma_buf[1] == dma_buf->resv */
 518		alloc_size += 1;
 519
 520	if (WARN_ON(!exp_info->priv
 521			  || !exp_info->ops
 522			  || !exp_info->ops->map_dma_buf
 523			  || !exp_info->ops->unmap_dma_buf
 524			  || !exp_info->ops->release)) {
 525		return ERR_PTR(-EINVAL);
 526	}
 527
 528	if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
 529		    (exp_info->ops->pin || exp_info->ops->unpin)))
 530		return ERR_PTR(-EINVAL);
 531
 532	if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
 533		return ERR_PTR(-EINVAL);
 534
 535	if (!try_module_get(exp_info->owner))
 536		return ERR_PTR(-ENOENT);
 537
 
 
 
 
 
 
 
 
 
 
 
 538	dmabuf = kzalloc(alloc_size, GFP_KERNEL);
 539	if (!dmabuf) {
 540		ret = -ENOMEM;
 541		goto err_module;
 542	}
 543
 544	dmabuf->priv = exp_info->priv;
 545	dmabuf->ops = exp_info->ops;
 546	dmabuf->size = exp_info->size;
 547	dmabuf->exp_name = exp_info->exp_name;
 548	dmabuf->owner = exp_info->owner;
 549	spin_lock_init(&dmabuf->name_lock);
 550	init_waitqueue_head(&dmabuf->poll);
 551	dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
 552	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
 
 553
 554	if (!resv) {
 555		resv = (struct dma_resv *)&dmabuf[1];
 556		dma_resv_init(resv);
 
 
 557	}
 558	dmabuf->resv = resv;
 559
 560	file = dma_buf_getfile(dmabuf, exp_info->flags);
 561	if (IS_ERR(file)) {
 562		ret = PTR_ERR(file);
 563		goto err_dmabuf;
 564	}
 565
 566	file->f_mode |= FMODE_LSEEK;
 
 567	dmabuf->file = file;
 568
 569	mutex_init(&dmabuf->lock);
 570	INIT_LIST_HEAD(&dmabuf->attachments);
 571
 572	mutex_lock(&db_list.lock);
 573	list_add(&dmabuf->list_node, &db_list.head);
 574	mutex_unlock(&db_list.lock);
 575
 576	return dmabuf;
 577
 578err_dmabuf:
 
 
 579	kfree(dmabuf);
 
 
 580err_module:
 581	module_put(exp_info->owner);
 582	return ERR_PTR(ret);
 583}
 584EXPORT_SYMBOL_GPL(dma_buf_export);
 585
 586/**
 587 * dma_buf_fd - returns a file descriptor for the given dma_buf
 588 * @dmabuf:	[in]	pointer to dma_buf for which fd is required.
 589 * @flags:      [in]    flags to give to fd
 590 *
 591 * On success, returns an associated 'fd'. Else, returns error.
 592 */
 593int dma_buf_fd(struct dma_buf *dmabuf, int flags)
 594{
 595	int fd;
 596
 597	if (!dmabuf || !dmabuf->file)
 598		return -EINVAL;
 599
 600	fd = get_unused_fd_flags(flags);
 601	if (fd < 0)
 602		return fd;
 603
 604	fd_install(fd, dmabuf->file);
 605
 606	return fd;
 607}
 608EXPORT_SYMBOL_GPL(dma_buf_fd);
 609
 610/**
 611 * dma_buf_get - returns the dma_buf structure related to an fd
 612 * @fd:	[in]	fd associated with the dma_buf to be returned
 613 *
 614 * On success, returns the dma_buf structure associated with an fd; uses
 615 * file's refcounting done by fget to increase refcount. returns ERR_PTR
 616 * otherwise.
 617 */
 618struct dma_buf *dma_buf_get(int fd)
 619{
 620	struct file *file;
 621
 622	file = fget(fd);
 623
 624	if (!file)
 625		return ERR_PTR(-EBADF);
 626
 627	if (!is_dma_buf_file(file)) {
 628		fput(file);
 629		return ERR_PTR(-EINVAL);
 630	}
 631
 632	return file->private_data;
 633}
 634EXPORT_SYMBOL_GPL(dma_buf_get);
 635
 636/**
 637 * dma_buf_put - decreases refcount of the buffer
 638 * @dmabuf:	[in]	buffer to reduce refcount of
 639 *
 640 * Uses file's refcounting done implicitly by fput().
 641 *
 642 * If, as a result of this call, the refcount becomes 0, the 'release' file
 643 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
 644 * in turn, and frees the memory allocated for dmabuf when exported.
 645 */
 646void dma_buf_put(struct dma_buf *dmabuf)
 647{
 648	if (WARN_ON(!dmabuf || !dmabuf->file))
 649		return;
 650
 651	fput(dmabuf->file);
 652}
 653EXPORT_SYMBOL_GPL(dma_buf_put);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 654
 655/**
 656 * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list; optionally,
 657 * calls attach() of dma_buf_ops to allow device-specific attach functionality
 658 * @dmabuf:		[in]	buffer to attach device to.
 659 * @dev:		[in]	device to be attached.
 660 * @importer_ops:	[in]	importer operations for the attachment
 661 * @importer_priv:	[in]	importer private pointer for the attachment
 662 *
 663 * Returns struct dma_buf_attachment pointer for this attachment. Attachments
 664 * must be cleaned up by calling dma_buf_detach().
 665 *
 
 
 
 666 * Returns:
 667 *
 668 * A pointer to newly created &dma_buf_attachment on success, or a negative
 669 * error code wrapped into a pointer on failure.
 670 *
 671 * Note that this can fail if the backing storage of @dmabuf is in a place not
 672 * accessible to @dev, and cannot be moved to a more suitable place. This is
 673 * indicated with the error code -EBUSY.
 674 */
 675struct dma_buf_attachment *
 676dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
 677		       const struct dma_buf_attach_ops *importer_ops,
 678		       void *importer_priv)
 679{
 680	struct dma_buf_attachment *attach;
 681	int ret;
 682
 683	if (WARN_ON(!dmabuf || !dev))
 684		return ERR_PTR(-EINVAL);
 685
 686	if (WARN_ON(importer_ops && !importer_ops->move_notify))
 687		return ERR_PTR(-EINVAL);
 688
 689	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
 690	if (!attach)
 691		return ERR_PTR(-ENOMEM);
 692
 693	attach->dev = dev;
 694	attach->dmabuf = dmabuf;
 695	if (importer_ops)
 696		attach->peer2peer = importer_ops->allow_peer2peer;
 697	attach->importer_ops = importer_ops;
 698	attach->importer_priv = importer_priv;
 699
 700	if (dmabuf->ops->attach) {
 701		ret = dmabuf->ops->attach(dmabuf, attach);
 702		if (ret)
 703			goto err_attach;
 704	}
 705	dma_resv_lock(dmabuf->resv, NULL);
 706	list_add(&attach->node, &dmabuf->attachments);
 707	dma_resv_unlock(dmabuf->resv);
 708
 709	/* When either the importer or the exporter can't handle dynamic
 710	 * mappings we cache the mapping here to avoid issues with the
 711	 * reservation object lock.
 712	 */
 713	if (dma_buf_attachment_is_dynamic(attach) !=
 714	    dma_buf_is_dynamic(dmabuf)) {
 715		struct sg_table *sgt;
 716
 
 717		if (dma_buf_is_dynamic(attach->dmabuf)) {
 718			dma_resv_lock(attach->dmabuf->resv, NULL);
 719			ret = dma_buf_pin(attach);
 720			if (ret)
 721				goto err_unlock;
 722		}
 723
 724		sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
 725		if (!sgt)
 726			sgt = ERR_PTR(-ENOMEM);
 727		if (IS_ERR(sgt)) {
 728			ret = PTR_ERR(sgt);
 729			goto err_unpin;
 730		}
 731		if (dma_buf_is_dynamic(attach->dmabuf))
 732			dma_resv_unlock(attach->dmabuf->resv);
 733		attach->sgt = sgt;
 734		attach->dir = DMA_BIDIRECTIONAL;
 735	}
 736
 737	return attach;
 738
 739err_attach:
 740	kfree(attach);
 741	return ERR_PTR(ret);
 742
 743err_unpin:
 744	if (dma_buf_is_dynamic(attach->dmabuf))
 745		dma_buf_unpin(attach);
 746
 747err_unlock:
 748	if (dma_buf_is_dynamic(attach->dmabuf))
 749		dma_resv_unlock(attach->dmabuf->resv);
 750
 751	dma_buf_detach(dmabuf, attach);
 752	return ERR_PTR(ret);
 753}
 754EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
 755
 756/**
 757 * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
 758 * @dmabuf:	[in]	buffer to attach device to.
 759 * @dev:	[in]	device to be attached.
 760 *
 761 * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
 762 * mapping.
 763 */
 764struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
 765					  struct device *dev)
 766{
 767	return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
 768}
 769EXPORT_SYMBOL_GPL(dma_buf_attach);
 
 
 
 
 
 
 
 
 
 
 770
 771/**
 772 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
 773 * optionally calls detach() of dma_buf_ops for device-specific detach
 774 * @dmabuf:	[in]	buffer to detach from.
 775 * @attach:	[in]	attachment to be detached; is free'd after this call.
 776 *
 777 * Clean up a device attachment obtained by calling dma_buf_attach().
 
 
 778 */
 779void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
 780{
 781	if (WARN_ON(!dmabuf || !attach))
 782		return;
 783
 
 
 784	if (attach->sgt) {
 785		if (dma_buf_is_dynamic(attach->dmabuf))
 786			dma_resv_lock(attach->dmabuf->resv, NULL);
 787
 788		dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
 789
 790		if (dma_buf_is_dynamic(attach->dmabuf)) {
 791			dma_buf_unpin(attach);
 792			dma_resv_unlock(attach->dmabuf->resv);
 793		}
 794	}
 795
 796	dma_resv_lock(dmabuf->resv, NULL);
 797	list_del(&attach->node);
 
 798	dma_resv_unlock(dmabuf->resv);
 
 799	if (dmabuf->ops->detach)
 800		dmabuf->ops->detach(dmabuf, attach);
 801
 802	kfree(attach);
 803}
 804EXPORT_SYMBOL_GPL(dma_buf_detach);
 805
 806/**
 807 * dma_buf_pin - Lock down the DMA-buf
 808 *
 809 * @attach:	[in]	attachment which should be pinned
 810 *
 
 
 
 
 
 
 
 811 * Returns:
 812 * 0 on success, negative error code on failure.
 813 */
 814int dma_buf_pin(struct dma_buf_attachment *attach)
 815{
 816	struct dma_buf *dmabuf = attach->dmabuf;
 817	int ret = 0;
 818
 
 
 819	dma_resv_assert_held(dmabuf->resv);
 820
 821	if (dmabuf->ops->pin)
 822		ret = dmabuf->ops->pin(attach);
 823
 824	return ret;
 825}
 826EXPORT_SYMBOL_GPL(dma_buf_pin);
 827
 828/**
 829 * dma_buf_unpin - Remove lock from DMA-buf
 830 *
 831 * @attach:	[in]	attachment which should be unpinned
 
 
 
 
 832 */
 833void dma_buf_unpin(struct dma_buf_attachment *attach)
 834{
 835	struct dma_buf *dmabuf = attach->dmabuf;
 836
 
 
 837	dma_resv_assert_held(dmabuf->resv);
 838
 839	if (dmabuf->ops->unpin)
 840		dmabuf->ops->unpin(attach);
 841}
 842EXPORT_SYMBOL_GPL(dma_buf_unpin);
 843
 844/**
 845 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
 846 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
 847 * dma_buf_ops.
 848 * @attach:	[in]	attachment whose scatterlist is to be returned
 849 * @direction:	[in]	direction of DMA transfer
 850 *
 851 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
 852 * on error. May return -EINTR if it is interrupted by a signal.
 853 *
 
 
 
 854 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
 855 * the underlying backing storage is pinned for as long as a mapping exists,
 856 * therefore users/importers should not hold onto a mapping for undue amounts of
 857 * time.
 
 
 
 858 */
 859struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
 860					enum dma_data_direction direction)
 861{
 862	struct sg_table *sg_table;
 863	int r;
 864
 865	might_sleep();
 866
 867	if (WARN_ON(!attach || !attach->dmabuf))
 868		return ERR_PTR(-EINVAL);
 869
 870	if (dma_buf_attachment_is_dynamic(attach))
 871		dma_resv_assert_held(attach->dmabuf->resv);
 872
 873	if (attach->sgt) {
 874		/*
 875		 * Two mappings with different directions for the same
 876		 * attachment are not allowed.
 877		 */
 878		if (attach->dir != direction &&
 879		    attach->dir != DMA_BIDIRECTIONAL)
 880			return ERR_PTR(-EBUSY);
 881
 882		return attach->sgt;
 883	}
 884
 885	if (dma_buf_is_dynamic(attach->dmabuf)) {
 886		dma_resv_assert_held(attach->dmabuf->resv);
 887		if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
 888			r = dma_buf_pin(attach);
 889			if (r)
 890				return ERR_PTR(r);
 891		}
 892	}
 893
 894	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
 895	if (!sg_table)
 896		sg_table = ERR_PTR(-ENOMEM);
 897
 898	if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
 899	     !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
 900		dma_buf_unpin(attach);
 901
 902	if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
 903		attach->sgt = sg_table;
 904		attach->dir = direction;
 905	}
 906
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 907	return sg_table;
 908}
 909EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
 910
 911/**
 912 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
 913 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
 914 * dma_buf_ops.
 915 * @attach:	[in]	attachment to unmap buffer from
 916 * @sg_table:	[in]	scatterlist info of the buffer to unmap
 917 * @direction:  [in]    direction of DMA transfer
 918 *
 919 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
 920 */
 921void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
 922				struct sg_table *sg_table,
 923				enum dma_data_direction direction)
 924{
 925	might_sleep();
 926
 927	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
 928		return;
 929
 930	if (dma_buf_attachment_is_dynamic(attach))
 931		dma_resv_assert_held(attach->dmabuf->resv);
 932
 933	if (attach->sgt == sg_table)
 934		return;
 935
 936	if (dma_buf_is_dynamic(attach->dmabuf))
 937		dma_resv_assert_held(attach->dmabuf->resv);
 938
 939	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
 940
 941	if (dma_buf_is_dynamic(attach->dmabuf) &&
 942	    !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
 943		dma_buf_unpin(attach);
 944}
 945EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 946
 947/**
 948 * dma_buf_move_notify - notify attachments that DMA-buf is moving
 949 *
 950 * @dmabuf:	[in]	buffer which is moving
 951 *
 952 * Informs all attachmenst that they need to destroy and recreated all their
 953 * mappings.
 954 */
 955void dma_buf_move_notify(struct dma_buf *dmabuf)
 956{
 957	struct dma_buf_attachment *attach;
 958
 959	dma_resv_assert_held(dmabuf->resv);
 960
 961	list_for_each_entry(attach, &dmabuf->attachments, node)
 962		if (attach->importer_ops)
 963			attach->importer_ops->move_notify(attach);
 964}
 965EXPORT_SYMBOL_GPL(dma_buf_move_notify);
 966
 967/**
 968 * DOC: cpu access
 969 *
 970 * There are mutliple reasons for supporting CPU access to a dma buffer object:
 971 *
 972 * - Fallback operations in the kernel, for example when a device is connected
 973 *   over USB and the kernel needs to shuffle the data around first before
 974 *   sending it away. Cache coherency is handled by braketing any transactions
 975 *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
 976 *   access.
 977 *
 978 *   Since for most kernel internal dma-buf accesses need the entire buffer, a
 979 *   vmap interface is introduced. Note that on very old 32-bit architectures
 980 *   vmalloc space might be limited and result in vmap calls failing.
 981 *
 982 *   Interfaces::
 983 *      void \*dma_buf_vmap(struct dma_buf \*dmabuf)
 984 *      void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
 
 985 *
 986 *   The vmap call can fail if there is no vmap support in the exporter, or if
 987 *   it runs out of vmalloc space. Fallback to kmap should be implemented. Note
 988 *   that the dma-buf layer keeps a reference count for all vmap access and
 989 *   calls down into the exporter's vmap function only when no vmapping exists,
 990 *   and only unmaps it once. Protection against concurrent vmap/vunmap calls is
 991 *   provided by taking the dma_buf->lock mutex.
 992 *
 993 * - For full compatibility on the importer side with existing userspace
 994 *   interfaces, which might already support mmap'ing buffers. This is needed in
 995 *   many processing pipelines (e.g. feeding a software rendered image into a
 996 *   hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
 997 *   framework already supported this and for DMA buffer file descriptors to
 998 *   replace ION buffers mmap support was needed.
 999 *
1000 *   There is no special interfaces, userspace simply calls mmap on the dma-buf
1001 *   fd. But like for CPU access there's a need to braket the actual access,
1002 *   which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1003 *   DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1004 *   be restarted.
1005 *
1006 *   Some systems might need some sort of cache coherency management e.g. when
1007 *   CPU and GPU domains are being accessed through dma-buf at the same time.
1008 *   To circumvent this problem there are begin/end coherency markers, that
1009 *   forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1010 *   can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1011 *   sequence would be used like following:
1012 *
1013 *     - mmap dma-buf fd
1014 *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1015 *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1016 *       want (with the new data being consumed by say the GPU or the scanout
1017 *       device)
1018 *     - munmap once you don't need the buffer any more
1019 *
1020 *    For correctness and optimal performance, it is always required to use
1021 *    SYNC_START and SYNC_END before and after, respectively, when accessing the
1022 *    mapped address. Userspace cannot rely on coherent access, even when there
1023 *    are systems where it just works without calling these ioctls.
1024 *
1025 * - And as a CPU fallback in userspace processing pipelines.
1026 *
1027 *   Similar to the motivation for kernel cpu access it is again important that
1028 *   the userspace code of a given importing subsystem can use the same
1029 *   interfaces with a imported dma-buf buffer object as with a native buffer
1030 *   object. This is especially important for drm where the userspace part of
1031 *   contemporary OpenGL, X, and other drivers is huge, and reworking them to
1032 *   use a different way to mmap a buffer rather invasive.
1033 *
1034 *   The assumption in the current dma-buf interfaces is that redirecting the
1035 *   initial mmap is all that's needed. A survey of some of the existing
1036 *   subsystems shows that no driver seems to do any nefarious thing like
1037 *   syncing up with outstanding asynchronous processing on the device or
1038 *   allocating special resources at fault time. So hopefully this is good
1039 *   enough, since adding interfaces to intercept pagefaults and allow pte
1040 *   shootdowns would increase the complexity quite a bit.
1041 *
1042 *   Interface::
 
1043 *      int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1044 *		       unsigned long);
1045 *
1046 *   If the importing subsystem simply provides a special-purpose mmap call to
1047 *   set up a mapping in userspace, calling do_mmap with dma_buf->file will
1048 *   equally achieve that for a dma-buf object.
1049 */
1050
1051static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1052				      enum dma_data_direction direction)
1053{
1054	bool write = (direction == DMA_BIDIRECTIONAL ||
1055		      direction == DMA_TO_DEVICE);
1056	struct dma_resv *resv = dmabuf->resv;
1057	long ret;
1058
1059	/* Wait on any implicit rendering fences */
1060	ret = dma_resv_wait_timeout_rcu(resv, write, true,
1061						  MAX_SCHEDULE_TIMEOUT);
1062	if (ret < 0)
1063		return ret;
1064
1065	return 0;
1066}
1067
1068/**
1069 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1070 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1071 * preparations. Coherency is only guaranteed in the specified range for the
1072 * specified access direction.
1073 * @dmabuf:	[in]	buffer to prepare cpu access for.
1074 * @direction:	[in]	length of range for cpu access.
1075 *
1076 * After the cpu access is complete the caller should call
1077 * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
1078 * it guaranteed to be coherent with other DMA access.
1079 *
 
 
 
 
 
1080 * Can return negative error values, returns 0 on success.
1081 */
1082int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1083			     enum dma_data_direction direction)
1084{
1085	int ret = 0;
1086
1087	if (WARN_ON(!dmabuf))
1088		return -EINVAL;
1089
 
 
1090	if (dmabuf->ops->begin_cpu_access)
1091		ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1092
1093	/* Ensure that all fences are waited upon - but we first allow
1094	 * the native handler the chance to do so more efficiently if it
1095	 * chooses. A double invocation here will be reasonably cheap no-op.
1096	 */
1097	if (ret == 0)
1098		ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1099
1100	return ret;
1101}
1102EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
1103
1104/**
1105 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1106 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1107 * actions. Coherency is only guaranteed in the specified range for the
1108 * specified access direction.
1109 * @dmabuf:	[in]	buffer to complete cpu access for.
1110 * @direction:	[in]	length of range for cpu access.
1111 *
1112 * This terminates CPU access started with dma_buf_begin_cpu_access().
1113 *
1114 * Can return negative error values, returns 0 on success.
1115 */
1116int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1117			   enum dma_data_direction direction)
1118{
1119	int ret = 0;
1120
1121	WARN_ON(!dmabuf);
1122
 
 
1123	if (dmabuf->ops->end_cpu_access)
1124		ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1125
1126	return ret;
1127}
1128EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
1129
1130
1131/**
1132 * dma_buf_mmap - Setup up a userspace mmap with the given vma
1133 * @dmabuf:	[in]	buffer that should back the vma
1134 * @vma:	[in]	vma for the mmap
1135 * @pgoff:	[in]	offset in pages where this mmap should start within the
1136 *			dma-buf buffer.
1137 *
1138 * This function adjusts the passed in vma so that it points at the file of the
1139 * dma_buf operation. It also adjusts the starting pgoff and does bounds
1140 * checking on the size of the vma. Then it calls the exporters mmap function to
1141 * set up the mapping.
1142 *
1143 * Can return negative error values, returns 0 on success.
1144 */
1145int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1146		 unsigned long pgoff)
1147{
1148	struct file *oldfile;
1149	int ret;
1150
1151	if (WARN_ON(!dmabuf || !vma))
1152		return -EINVAL;
1153
1154	/* check if buffer supports mmap */
1155	if (!dmabuf->ops->mmap)
1156		return -EINVAL;
1157
1158	/* check for offset overflow */
1159	if (pgoff + vma_pages(vma) < pgoff)
1160		return -EOVERFLOW;
1161
1162	/* check for overflowing the buffer's size */
1163	if (pgoff + vma_pages(vma) >
1164	    dmabuf->size >> PAGE_SHIFT)
1165		return -EINVAL;
1166
1167	/* readjust the vma */
1168	get_file(dmabuf->file);
1169	oldfile = vma->vm_file;
1170	vma->vm_file = dmabuf->file;
1171	vma->vm_pgoff = pgoff;
1172
 
1173	ret = dmabuf->ops->mmap(dmabuf, vma);
1174	if (ret) {
1175		/* restore old parameters on failure */
1176		vma->vm_file = oldfile;
1177		fput(dmabuf->file);
1178	} else {
1179		if (oldfile)
1180			fput(oldfile);
1181	}
1182	return ret;
1183
 
1184}
1185EXPORT_SYMBOL_GPL(dma_buf_mmap);
1186
1187/**
1188 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1189 * address space. Same restrictions as for vmap and friends apply.
1190 * @dmabuf:	[in]	buffer to vmap
 
1191 *
1192 * This call may fail due to lack of virtual mapping address space.
1193 * These calls are optional in drivers. The intended use for them
1194 * is for mapping objects linear in kernel space for high use objects.
1195 * Please attempt to use kmap/kunmap before thinking about these interfaces.
1196 *
1197 * Returns NULL on error.
 
 
 
 
1198 */
1199void *dma_buf_vmap(struct dma_buf *dmabuf)
1200{
1201	void *ptr;
 
 
 
1202
1203	if (WARN_ON(!dmabuf))
1204		return NULL;
 
 
1205
1206	if (!dmabuf->ops->vmap)
1207		return NULL;
1208
1209	mutex_lock(&dmabuf->lock);
1210	if (dmabuf->vmapping_counter) {
1211		dmabuf->vmapping_counter++;
1212		BUG_ON(!dmabuf->vmap_ptr);
1213		ptr = dmabuf->vmap_ptr;
1214		goto out_unlock;
1215	}
1216
1217	BUG_ON(dmabuf->vmap_ptr);
1218
1219	ptr = dmabuf->ops->vmap(dmabuf);
1220	if (WARN_ON_ONCE(IS_ERR(ptr)))
1221		ptr = NULL;
1222	if (!ptr)
1223		goto out_unlock;
1224
1225	dmabuf->vmap_ptr = ptr;
1226	dmabuf->vmapping_counter = 1;
1227
1228out_unlock:
1229	mutex_unlock(&dmabuf->lock);
1230	return ptr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1231}
1232EXPORT_SYMBOL_GPL(dma_buf_vmap);
1233
1234/**
1235 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1236 * @dmabuf:	[in]	buffer to vunmap
1237 * @vaddr:	[in]	vmap to vunmap
1238 */
1239void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
1240{
1241	if (WARN_ON(!dmabuf))
1242		return;
1243
1244	BUG_ON(!dmabuf->vmap_ptr);
 
 
1245	BUG_ON(dmabuf->vmapping_counter == 0);
1246	BUG_ON(dmabuf->vmap_ptr != vaddr);
1247
1248	mutex_lock(&dmabuf->lock);
1249	if (--dmabuf->vmapping_counter == 0) {
1250		if (dmabuf->ops->vunmap)
1251			dmabuf->ops->vunmap(dmabuf, vaddr);
1252		dmabuf->vmap_ptr = NULL;
1253	}
1254	mutex_unlock(&dmabuf->lock);
1255}
1256EXPORT_SYMBOL_GPL(dma_buf_vunmap);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1257
1258#ifdef CONFIG_DEBUG_FS
1259static int dma_buf_debug_show(struct seq_file *s, void *unused)
1260{
1261	int ret;
1262	struct dma_buf *buf_obj;
1263	struct dma_buf_attachment *attach_obj;
1264	struct dma_resv *robj;
1265	struct dma_resv_list *fobj;
1266	struct dma_fence *fence;
1267	unsigned seq;
1268	int count = 0, attach_count, shared_count, i;
1269	size_t size = 0;
 
1270
1271	ret = mutex_lock_interruptible(&db_list.lock);
1272
1273	if (ret)
1274		return ret;
1275
1276	seq_puts(s, "\nDma-buf Objects:\n");
1277	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1278		   "size", "flags", "mode", "count", "ino");
1279
1280	list_for_each_entry(buf_obj, &db_list.head, list_node) {
1281
1282		ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1283		if (ret)
1284			goto error_unlock;
1285
 
 
1286		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1287				buf_obj->size,
1288				buf_obj->file->f_flags, buf_obj->file->f_mode,
1289				file_count(buf_obj->file),
1290				buf_obj->exp_name,
1291				file_inode(buf_obj->file)->i_ino,
1292				buf_obj->name ?: "");
 
1293
1294		robj = buf_obj->resv;
1295		while (true) {
1296			seq = read_seqcount_begin(&robj->seq);
1297			rcu_read_lock();
1298			fobj = rcu_dereference(robj->fence);
1299			shared_count = fobj ? fobj->shared_count : 0;
1300			fence = rcu_dereference(robj->fence_excl);
1301			if (!read_seqcount_retry(&robj->seq, seq))
1302				break;
1303			rcu_read_unlock();
1304		}
1305
1306		if (fence)
1307			seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1308				   fence->ops->get_driver_name(fence),
1309				   fence->ops->get_timeline_name(fence),
1310				   dma_fence_is_signaled(fence) ? "" : "un");
1311		for (i = 0; i < shared_count; i++) {
1312			fence = rcu_dereference(fobj->shared[i]);
1313			if (!dma_fence_get_rcu(fence))
1314				continue;
1315			seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1316				   fence->ops->get_driver_name(fence),
1317				   fence->ops->get_timeline_name(fence),
1318				   dma_fence_is_signaled(fence) ? "" : "un");
1319			dma_fence_put(fence);
1320		}
1321		rcu_read_unlock();
1322
1323		seq_puts(s, "\tAttached Devices:\n");
1324		attach_count = 0;
1325
1326		list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1327			seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1328			attach_count++;
1329		}
1330		dma_resv_unlock(buf_obj->resv);
1331
1332		seq_printf(s, "Total %d devices attached\n\n",
1333				attach_count);
1334
1335		count++;
1336		size += buf_obj->size;
1337	}
1338
1339	seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1340
1341	mutex_unlock(&db_list.lock);
1342	return 0;
1343
1344error_unlock:
1345	mutex_unlock(&db_list.lock);
1346	return ret;
1347}
1348
1349DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1350
1351static struct dentry *dma_buf_debugfs_dir;
1352
1353static int dma_buf_init_debugfs(void)
1354{
1355	struct dentry *d;
1356	int err = 0;
1357
1358	d = debugfs_create_dir("dma_buf", NULL);
1359	if (IS_ERR(d))
1360		return PTR_ERR(d);
1361
1362	dma_buf_debugfs_dir = d;
1363
1364	d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1365				NULL, &dma_buf_debug_fops);
1366	if (IS_ERR(d)) {
1367		pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1368		debugfs_remove_recursive(dma_buf_debugfs_dir);
1369		dma_buf_debugfs_dir = NULL;
1370		err = PTR_ERR(d);
1371	}
1372
1373	return err;
1374}
1375
1376static void dma_buf_uninit_debugfs(void)
1377{
1378	debugfs_remove_recursive(dma_buf_debugfs_dir);
1379}
1380#else
1381static inline int dma_buf_init_debugfs(void)
1382{
1383	return 0;
1384}
1385static inline void dma_buf_uninit_debugfs(void)
1386{
1387}
1388#endif
1389
1390static int __init dma_buf_init(void)
1391{
 
 
 
 
 
 
1392	dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1393	if (IS_ERR(dma_buf_mnt))
1394		return PTR_ERR(dma_buf_mnt);
1395
1396	mutex_init(&db_list.lock);
1397	INIT_LIST_HEAD(&db_list.head);
1398	dma_buf_init_debugfs();
1399	return 0;
1400}
1401subsys_initcall(dma_buf_init);
1402
1403static void __exit dma_buf_deinit(void)
1404{
1405	dma_buf_uninit_debugfs();
1406	kern_unmount(dma_buf_mnt);
 
1407}
1408__exitcall(dma_buf_deinit);
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Framework for buffer objects that can be shared across devices/subsystems.
   4 *
   5 * Copyright(C) 2011 Linaro Limited. All rights reserved.
   6 * Author: Sumit Semwal <sumit.semwal@ti.com>
   7 *
   8 * Many thanks to linaro-mm-sig list, and specially
   9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
  10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
  11 * refining of this idea.
  12 */
  13
  14#include <linux/fs.h>
  15#include <linux/slab.h>
  16#include <linux/dma-buf.h>
  17#include <linux/dma-fence.h>
  18#include <linux/dma-fence-unwrap.h>
  19#include <linux/anon_inodes.h>
  20#include <linux/export.h>
  21#include <linux/debugfs.h>
  22#include <linux/module.h>
  23#include <linux/seq_file.h>
  24#include <linux/sync_file.h>
  25#include <linux/poll.h>
  26#include <linux/dma-resv.h>
  27#include <linux/mm.h>
  28#include <linux/mount.h>
  29#include <linux/pseudo_fs.h>
  30
  31#include <uapi/linux/dma-buf.h>
  32#include <uapi/linux/magic.h>
  33
  34#include "dma-buf-sysfs-stats.h"
  35
  36static inline int is_dma_buf_file(struct file *);
  37
  38struct dma_buf_list {
  39	struct list_head head;
  40	struct mutex lock;
  41};
  42
  43static struct dma_buf_list db_list;
  44
  45static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
  46{
  47	struct dma_buf *dmabuf;
  48	char name[DMA_BUF_NAME_LEN];
  49	size_t ret = 0;
  50
  51	dmabuf = dentry->d_fsdata;
  52	spin_lock(&dmabuf->name_lock);
  53	if (dmabuf->name)
  54		ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
  55	spin_unlock(&dmabuf->name_lock);
  56
  57	return dynamic_dname(buffer, buflen, "/%s:%s",
  58			     dentry->d_name.name, ret > 0 ? name : "");
  59}
  60
  61static void dma_buf_release(struct dentry *dentry)
  62{
  63	struct dma_buf *dmabuf;
  64
  65	dmabuf = dentry->d_fsdata;
  66	if (unlikely(!dmabuf))
  67		return;
  68
  69	BUG_ON(dmabuf->vmapping_counter);
  70
  71	/*
  72	 * If you hit this BUG() it could mean:
  73	 * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
  74	 * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
 
 
 
  75	 */
  76	BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
  77
  78	dma_buf_stats_teardown(dmabuf);
  79	dmabuf->ops->release(dmabuf);
  80
 
 
 
 
  81	if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
  82		dma_resv_fini(dmabuf->resv);
  83
  84	WARN_ON(!list_empty(&dmabuf->attachments));
  85	module_put(dmabuf->owner);
  86	kfree(dmabuf->name);
  87	kfree(dmabuf);
  88}
  89
  90static int dma_buf_file_release(struct inode *inode, struct file *file)
  91{
  92	struct dma_buf *dmabuf;
  93
  94	if (!is_dma_buf_file(file))
  95		return -EINVAL;
  96
  97	dmabuf = file->private_data;
  98	if (dmabuf) {
  99		mutex_lock(&db_list.lock);
 100		list_del(&dmabuf->list_node);
 101		mutex_unlock(&db_list.lock);
 102	}
 103
 104	return 0;
 105}
 106
 107static const struct dentry_operations dma_buf_dentry_ops = {
 108	.d_dname = dmabuffs_dname,
 109	.d_release = dma_buf_release,
 110};
 111
 112static struct vfsmount *dma_buf_mnt;
 113
 114static int dma_buf_fs_init_context(struct fs_context *fc)
 115{
 116	struct pseudo_fs_context *ctx;
 117
 118	ctx = init_pseudo(fc, DMA_BUF_MAGIC);
 119	if (!ctx)
 120		return -ENOMEM;
 121	ctx->dops = &dma_buf_dentry_ops;
 122	return 0;
 123}
 124
 125static struct file_system_type dma_buf_fs_type = {
 126	.name = "dmabuf",
 127	.init_fs_context = dma_buf_fs_init_context,
 128	.kill_sb = kill_anon_super,
 129};
 130
 131static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
 132{
 133	struct dma_buf *dmabuf;
 134	int ret;
 135
 136	if (!is_dma_buf_file(file))
 137		return -EINVAL;
 138
 139	dmabuf = file->private_data;
 140
 141	/* check if buffer supports mmap */
 142	if (!dmabuf->ops->mmap)
 143		return -EINVAL;
 144
 145	/* check for overflowing the buffer's size */
 146	if (vma->vm_pgoff + vma_pages(vma) >
 147	    dmabuf->size >> PAGE_SHIFT)
 148		return -EINVAL;
 149
 150	dma_resv_lock(dmabuf->resv, NULL);
 151	ret = dmabuf->ops->mmap(dmabuf, vma);
 152	dma_resv_unlock(dmabuf->resv);
 153
 154	return ret;
 155}
 156
 157static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
 158{
 159	struct dma_buf *dmabuf;
 160	loff_t base;
 161
 162	if (!is_dma_buf_file(file))
 163		return -EBADF;
 164
 165	dmabuf = file->private_data;
 166
 167	/* only support discovering the end of the buffer,
 168	   but also allow SEEK_SET to maintain the idiomatic
 169	   SEEK_END(0), SEEK_CUR(0) pattern */
 170	if (whence == SEEK_END)
 171		base = dmabuf->size;
 172	else if (whence == SEEK_SET)
 173		base = 0;
 174	else
 175		return -EINVAL;
 176
 177	if (offset != 0)
 178		return -EINVAL;
 179
 180	return base + offset;
 181}
 182
 183/**
 184 * DOC: implicit fence polling
 185 *
 186 * To support cross-device and cross-driver synchronization of buffer access
 187 * implicit fences (represented internally in the kernel with &struct dma_fence)
 188 * can be attached to a &dma_buf. The glue for that and a few related things are
 189 * provided in the &dma_resv structure.
 190 *
 191 * Userspace can query the state of these implicitly tracked fences using poll()
 192 * and related system calls:
 193 *
 194 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
 195 *   most recent write or exclusive fence.
 196 *
 197 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
 198 *   all attached fences, shared and exclusive ones.
 199 *
 200 * Note that this only signals the completion of the respective fences, i.e. the
 201 * DMA transfers are complete. Cache flushing and any other necessary
 202 * preparations before CPU access can begin still need to happen.
 203 *
 204 * As an alternative to poll(), the set of fences on DMA buffer can be
 205 * exported as a &sync_file using &dma_buf_sync_file_export.
 206 */
 207
 208static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 209{
 210	struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
 211	struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
 212	unsigned long flags;
 213
 214	spin_lock_irqsave(&dcb->poll->lock, flags);
 215	wake_up_locked_poll(dcb->poll, dcb->active);
 216	dcb->active = 0;
 217	spin_unlock_irqrestore(&dcb->poll->lock, flags);
 218	dma_fence_put(fence);
 219	/* Paired with get_file in dma_buf_poll */
 220	fput(dmabuf->file);
 221}
 222
 223static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
 224				struct dma_buf_poll_cb_t *dcb)
 225{
 226	struct dma_resv_iter cursor;
 227	struct dma_fence *fence;
 228	int r;
 229
 230	dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
 231				fence) {
 232		dma_fence_get(fence);
 233		r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
 234		if (!r)
 235			return true;
 236		dma_fence_put(fence);
 237	}
 238
 239	return false;
 240}
 241
 242static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 243{
 244	struct dma_buf *dmabuf;
 245	struct dma_resv *resv;
 
 
 246	__poll_t events;
 
 247
 248	dmabuf = file->private_data;
 249	if (!dmabuf || !dmabuf->resv)
 250		return EPOLLERR;
 251
 252	resv = dmabuf->resv;
 253
 254	poll_wait(file, &dmabuf->poll, poll);
 255
 256	events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
 257	if (!events)
 258		return 0;
 259
 260	dma_resv_lock(resv, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 261
 262	if (events & EPOLLOUT) {
 263		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
 264
 265		/* Check that callback isn't busy */
 266		spin_lock_irq(&dmabuf->poll.lock);
 267		if (dcb->active)
 268			events &= ~EPOLLOUT;
 269		else
 270			dcb->active = EPOLLOUT;
 
 271		spin_unlock_irq(&dmabuf->poll.lock);
 272
 273		if (events & EPOLLOUT) {
 274			/* Paired with fput in dma_buf_poll_cb */
 275			get_file(dmabuf->file);
 276
 277			if (!dma_buf_poll_add_cb(resv, true, dcb))
 278				/* No callback queued, wake up any other waiters */
 
 
 
 
 
 
 
 
 
 279				dma_buf_poll_cb(NULL, &dcb->cb);
 280			else
 281				events &= ~EPOLLOUT;
 282		}
 283	}
 284
 285	if (events & EPOLLIN) {
 286		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
 
 287
 288		/* Check that callback isn't busy */
 289		spin_lock_irq(&dmabuf->poll.lock);
 290		if (dcb->active)
 291			events &= ~EPOLLIN;
 292		else
 293			dcb->active = EPOLLIN;
 294		spin_unlock_irq(&dmabuf->poll.lock);
 295
 296		if (events & EPOLLIN) {
 297			/* Paired with fput in dma_buf_poll_cb */
 298			get_file(dmabuf->file);
 
 
 299
 300			if (!dma_buf_poll_add_cb(resv, false, dcb))
 301				/* No callback queued, wake up any other waiters */
 
 
 
 
 
 
 302				dma_buf_poll_cb(NULL, &dcb->cb);
 303			else
 304				events &= ~EPOLLIN;
 
 
 
 
 
 
 
 305		}
 
 
 
 
 306	}
 307
 308	dma_resv_unlock(resv);
 
 309	return events;
 310}
 311
 312/**
 313 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
 314 * It could support changing the name of the dma-buf if the same
 315 * piece of memory is used for multiple purpose between different devices.
 
 
 316 *
 317 * @dmabuf: [in]     dmabuf buffer that will be renamed.
 318 * @buf:    [in]     A piece of userspace memory that contains the name of
 319 *                   the dma-buf.
 320 *
 321 * Returns 0 on success. If the dma-buf buffer is already attached to
 322 * devices, return -EBUSY.
 323 *
 324 */
 325static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
 326{
 327	char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
 
 328
 329	if (IS_ERR(name))
 330		return PTR_ERR(name);
 331
 
 
 
 
 
 
 332	spin_lock(&dmabuf->name_lock);
 333	kfree(dmabuf->name);
 334	dmabuf->name = name;
 335	spin_unlock(&dmabuf->name_lock);
 336
 337	return 0;
 338}
 339
 340#if IS_ENABLED(CONFIG_SYNC_FILE)
 341static long dma_buf_export_sync_file(struct dma_buf *dmabuf,
 342				     void __user *user_data)
 343{
 344	struct dma_buf_export_sync_file arg;
 345	enum dma_resv_usage usage;
 346	struct dma_fence *fence = NULL;
 347	struct sync_file *sync_file;
 348	int fd, ret;
 349
 350	if (copy_from_user(&arg, user_data, sizeof(arg)))
 351		return -EFAULT;
 352
 353	if (arg.flags & ~DMA_BUF_SYNC_RW)
 354		return -EINVAL;
 355
 356	if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
 357		return -EINVAL;
 358
 359	fd = get_unused_fd_flags(O_CLOEXEC);
 360	if (fd < 0)
 361		return fd;
 362
 363	usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE);
 364	ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence);
 365	if (ret)
 366		goto err_put_fd;
 367
 368	if (!fence)
 369		fence = dma_fence_get_stub();
 370
 371	sync_file = sync_file_create(fence);
 372
 373	dma_fence_put(fence);
 374
 375	if (!sync_file) {
 376		ret = -ENOMEM;
 377		goto err_put_fd;
 378	}
 379
 380	arg.fd = fd;
 381	if (copy_to_user(user_data, &arg, sizeof(arg))) {
 382		ret = -EFAULT;
 383		goto err_put_file;
 384	}
 385
 386	fd_install(fd, sync_file->file);
 387
 388	return 0;
 389
 390err_put_file:
 391	fput(sync_file->file);
 392err_put_fd:
 393	put_unused_fd(fd);
 394	return ret;
 395}
 396
 397static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
 398				     const void __user *user_data)
 399{
 400	struct dma_buf_import_sync_file arg;
 401	struct dma_fence *fence, *f;
 402	enum dma_resv_usage usage;
 403	struct dma_fence_unwrap iter;
 404	unsigned int num_fences;
 405	int ret = 0;
 406
 407	if (copy_from_user(&arg, user_data, sizeof(arg)))
 408		return -EFAULT;
 409
 410	if (arg.flags & ~DMA_BUF_SYNC_RW)
 411		return -EINVAL;
 412
 413	if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
 414		return -EINVAL;
 415
 416	fence = sync_file_get_fence(arg.fd);
 417	if (!fence)
 418		return -EINVAL;
 419
 420	usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
 421						   DMA_RESV_USAGE_READ;
 422
 423	num_fences = 0;
 424	dma_fence_unwrap_for_each(f, &iter, fence)
 425		++num_fences;
 426
 427	if (num_fences > 0) {
 428		dma_resv_lock(dmabuf->resv, NULL);
 429
 430		ret = dma_resv_reserve_fences(dmabuf->resv, num_fences);
 431		if (!ret) {
 432			dma_fence_unwrap_for_each(f, &iter, fence)
 433				dma_resv_add_fence(dmabuf->resv, f, usage);
 434		}
 435
 436		dma_resv_unlock(dmabuf->resv);
 437	}
 438
 439	dma_fence_put(fence);
 440
 441	return ret;
 442}
 443#endif
 444
 445static long dma_buf_ioctl(struct file *file,
 446			  unsigned int cmd, unsigned long arg)
 447{
 448	struct dma_buf *dmabuf;
 449	struct dma_buf_sync sync;
 450	enum dma_data_direction direction;
 451	int ret;
 452
 453	dmabuf = file->private_data;
 454
 455	switch (cmd) {
 456	case DMA_BUF_IOCTL_SYNC:
 457		if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
 458			return -EFAULT;
 459
 460		if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
 461			return -EINVAL;
 462
 463		switch (sync.flags & DMA_BUF_SYNC_RW) {
 464		case DMA_BUF_SYNC_READ:
 465			direction = DMA_FROM_DEVICE;
 466			break;
 467		case DMA_BUF_SYNC_WRITE:
 468			direction = DMA_TO_DEVICE;
 469			break;
 470		case DMA_BUF_SYNC_RW:
 471			direction = DMA_BIDIRECTIONAL;
 472			break;
 473		default:
 474			return -EINVAL;
 475		}
 476
 477		if (sync.flags & DMA_BUF_SYNC_END)
 478			ret = dma_buf_end_cpu_access(dmabuf, direction);
 479		else
 480			ret = dma_buf_begin_cpu_access(dmabuf, direction);
 481
 482		return ret;
 483
 484	case DMA_BUF_SET_NAME_A:
 485	case DMA_BUF_SET_NAME_B:
 486		return dma_buf_set_name(dmabuf, (const char __user *)arg);
 487
 488#if IS_ENABLED(CONFIG_SYNC_FILE)
 489	case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:
 490		return dma_buf_export_sync_file(dmabuf, (void __user *)arg);
 491	case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:
 492		return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);
 493#endif
 494
 495	default:
 496		return -ENOTTY;
 497	}
 498}
 499
 500static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
 501{
 502	struct dma_buf *dmabuf = file->private_data;
 503
 504	seq_printf(m, "size:\t%zu\n", dmabuf->size);
 505	/* Don't count the temporary reference taken inside procfs seq_show */
 506	seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
 507	seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
 508	spin_lock(&dmabuf->name_lock);
 509	if (dmabuf->name)
 510		seq_printf(m, "name:\t%s\n", dmabuf->name);
 511	spin_unlock(&dmabuf->name_lock);
 512}
 513
 514static const struct file_operations dma_buf_fops = {
 515	.release	= dma_buf_file_release,
 516	.mmap		= dma_buf_mmap_internal,
 517	.llseek		= dma_buf_llseek,
 518	.poll		= dma_buf_poll,
 519	.unlocked_ioctl	= dma_buf_ioctl,
 520	.compat_ioctl	= compat_ptr_ioctl,
 521	.show_fdinfo	= dma_buf_show_fdinfo,
 522};
 523
 524/*
 525 * is_dma_buf_file - Check if struct file* is associated with dma_buf
 526 */
 527static inline int is_dma_buf_file(struct file *file)
 528{
 529	return file->f_op == &dma_buf_fops;
 530}
 531
 532static struct file *dma_buf_getfile(size_t size, int flags)
 533{
 534	static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
 535	struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
 536	struct file *file;
 537
 538	if (IS_ERR(inode))
 539		return ERR_CAST(inode);
 540
 541	inode->i_size = size;
 542	inode_set_bytes(inode, size);
 543
 544	/*
 545	 * The ->i_ino acquired from get_next_ino() is not unique thus
 546	 * not suitable for using it as dentry name by dmabuf stats.
 547	 * Override ->i_ino with the unique and dmabuffs specific
 548	 * value.
 549	 */
 550	inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
 551	flags &= O_ACCMODE | O_NONBLOCK;
 552	file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
 553				 flags, &dma_buf_fops);
 554	if (IS_ERR(file))
 555		goto err_alloc_file;
 
 
 
 556
 557	return file;
 558
 559err_alloc_file:
 560	iput(inode);
 561	return file;
 562}
 563
 564/**
 565 * DOC: dma buf device access
 566 *
 567 * For device DMA access to a shared DMA buffer the usual sequence of operations
 568 * is fairly simple:
 569 *
 570 * 1. The exporter defines his exporter instance using
 571 *    DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
 572 *    buffer object into a &dma_buf. It then exports that &dma_buf to userspace
 573 *    as a file descriptor by calling dma_buf_fd().
 574 *
 575 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
 576 *    to share with: First the file descriptor is converted to a &dma_buf using
 577 *    dma_buf_get(). Then the buffer is attached to the device using
 578 *    dma_buf_attach().
 579 *
 580 *    Up to this stage the exporter is still free to migrate or reallocate the
 581 *    backing storage.
 582 *
 583 * 3. Once the buffer is attached to all devices userspace can initiate DMA
 584 *    access to the shared buffer. In the kernel this is done by calling
 585 *    dma_buf_map_attachment() and dma_buf_unmap_attachment().
 586 *
 587 * 4. Once a driver is done with a shared buffer it needs to call
 588 *    dma_buf_detach() (after cleaning up any mappings) and then release the
 589 *    reference acquired with dma_buf_get() by calling dma_buf_put().
 590 *
 591 * For the detailed semantics exporters are expected to implement see
 592 * &dma_buf_ops.
 593 */
 594
 595/**
 596 * dma_buf_export - Creates a new dma_buf, and associates an anon file
 597 * with this buffer, so it can be exported.
 598 * Also connect the allocator specific data and ops to the buffer.
 599 * Additionally, provide a name string for exporter; useful in debugging.
 600 *
 601 * @exp_info:	[in]	holds all the export related information provided
 602 *			by the exporter. see &struct dma_buf_export_info
 603 *			for further details.
 604 *
 605 * Returns, on success, a newly created struct dma_buf object, which wraps the
 606 * supplied private data and operations for struct dma_buf_ops. On either
 607 * missing ops, or error in allocating struct dma_buf, will return negative
 608 * error.
 609 *
 610 * For most cases the easiest way to create @exp_info is through the
 611 * %DEFINE_DMA_BUF_EXPORT_INFO macro.
 612 */
 613struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
 614{
 615	struct dma_buf *dmabuf;
 616	struct dma_resv *resv = exp_info->resv;
 617	struct file *file;
 618	size_t alloc_size = sizeof(struct dma_buf);
 619	int ret;
 620
 621	if (WARN_ON(!exp_info->priv || !exp_info->ops
 622		    || !exp_info->ops->map_dma_buf
 623		    || !exp_info->ops->unmap_dma_buf
 624		    || !exp_info->ops->release))
 
 
 
 
 
 
 
 625		return ERR_PTR(-EINVAL);
 
 626
 627	if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
 628		    (exp_info->ops->pin || exp_info->ops->unpin)))
 629		return ERR_PTR(-EINVAL);
 630
 631	if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
 632		return ERR_PTR(-EINVAL);
 633
 634	if (!try_module_get(exp_info->owner))
 635		return ERR_PTR(-ENOENT);
 636
 637	file = dma_buf_getfile(exp_info->size, exp_info->flags);
 638	if (IS_ERR(file)) {
 639		ret = PTR_ERR(file);
 640		goto err_module;
 641	}
 642
 643	if (!exp_info->resv)
 644		alloc_size += sizeof(struct dma_resv);
 645	else
 646		/* prevent &dma_buf[1] == dma_buf->resv */
 647		alloc_size += 1;
 648	dmabuf = kzalloc(alloc_size, GFP_KERNEL);
 649	if (!dmabuf) {
 650		ret = -ENOMEM;
 651		goto err_file;
 652	}
 653
 654	dmabuf->priv = exp_info->priv;
 655	dmabuf->ops = exp_info->ops;
 656	dmabuf->size = exp_info->size;
 657	dmabuf->exp_name = exp_info->exp_name;
 658	dmabuf->owner = exp_info->owner;
 659	spin_lock_init(&dmabuf->name_lock);
 660	init_waitqueue_head(&dmabuf->poll);
 661	dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
 662	dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
 663	INIT_LIST_HEAD(&dmabuf->attachments);
 664
 665	if (!resv) {
 666		dmabuf->resv = (struct dma_resv *)&dmabuf[1];
 667		dma_resv_init(dmabuf->resv);
 668	} else {
 669		dmabuf->resv = resv;
 670	}
 
 671
 672	ret = dma_buf_stats_setup(dmabuf, file);
 673	if (ret)
 
 674		goto err_dmabuf;
 
 675
 676	file->private_data = dmabuf;
 677	file->f_path.dentry->d_fsdata = dmabuf;
 678	dmabuf->file = file;
 679
 
 
 
 680	mutex_lock(&db_list.lock);
 681	list_add(&dmabuf->list_node, &db_list.head);
 682	mutex_unlock(&db_list.lock);
 683
 684	return dmabuf;
 685
 686err_dmabuf:
 687	if (!resv)
 688		dma_resv_fini(dmabuf->resv);
 689	kfree(dmabuf);
 690err_file:
 691	fput(file);
 692err_module:
 693	module_put(exp_info->owner);
 694	return ERR_PTR(ret);
 695}
 696EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF);
 697
 698/**
 699 * dma_buf_fd - returns a file descriptor for the given struct dma_buf
 700 * @dmabuf:	[in]	pointer to dma_buf for which fd is required.
 701 * @flags:      [in]    flags to give to fd
 702 *
 703 * On success, returns an associated 'fd'. Else, returns error.
 704 */
 705int dma_buf_fd(struct dma_buf *dmabuf, int flags)
 706{
 707	int fd;
 708
 709	if (!dmabuf || !dmabuf->file)
 710		return -EINVAL;
 711
 712	fd = get_unused_fd_flags(flags);
 713	if (fd < 0)
 714		return fd;
 715
 716	fd_install(fd, dmabuf->file);
 717
 718	return fd;
 719}
 720EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF);
 721
 722/**
 723 * dma_buf_get - returns the struct dma_buf related to an fd
 724 * @fd:	[in]	fd associated with the struct dma_buf to be returned
 725 *
 726 * On success, returns the struct dma_buf associated with an fd; uses
 727 * file's refcounting done by fget to increase refcount. returns ERR_PTR
 728 * otherwise.
 729 */
 730struct dma_buf *dma_buf_get(int fd)
 731{
 732	struct file *file;
 733
 734	file = fget(fd);
 735
 736	if (!file)
 737		return ERR_PTR(-EBADF);
 738
 739	if (!is_dma_buf_file(file)) {
 740		fput(file);
 741		return ERR_PTR(-EINVAL);
 742	}
 743
 744	return file->private_data;
 745}
 746EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF);
 747
 748/**
 749 * dma_buf_put - decreases refcount of the buffer
 750 * @dmabuf:	[in]	buffer to reduce refcount of
 751 *
 752 * Uses file's refcounting done implicitly by fput().
 753 *
 754 * If, as a result of this call, the refcount becomes 0, the 'release' file
 755 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
 756 * in turn, and frees the memory allocated for dmabuf when exported.
 757 */
 758void dma_buf_put(struct dma_buf *dmabuf)
 759{
 760	if (WARN_ON(!dmabuf || !dmabuf->file))
 761		return;
 762
 763	fput(dmabuf->file);
 764}
 765EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF);
 766
 767static void mangle_sg_table(struct sg_table *sg_table)
 768{
 769#ifdef CONFIG_DMABUF_DEBUG
 770	int i;
 771	struct scatterlist *sg;
 772
 773	/* To catch abuse of the underlying struct page by importers mix
 774	 * up the bits, but take care to preserve the low SG_ bits to
 775	 * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
 776	 * before passing the sgt back to the exporter. */
 777	for_each_sgtable_sg(sg_table, sg, i)
 778		sg->page_link ^= ~0xffUL;
 779#endif
 780
 781}
 782static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
 783				       enum dma_data_direction direction)
 784{
 785	struct sg_table *sg_table;
 786	signed long ret;
 787
 788	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
 789	if (IS_ERR_OR_NULL(sg_table))
 790		return sg_table;
 791
 792	if (!dma_buf_attachment_is_dynamic(attach)) {
 793		ret = dma_resv_wait_timeout(attach->dmabuf->resv,
 794					    DMA_RESV_USAGE_KERNEL, true,
 795					    MAX_SCHEDULE_TIMEOUT);
 796		if (ret < 0) {
 797			attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
 798							   direction);
 799			return ERR_PTR(ret);
 800		}
 801	}
 802
 803	mangle_sg_table(sg_table);
 804	return sg_table;
 805}
 806
 807/**
 808 * DOC: locking convention
 809 *
 810 * In order to avoid deadlock situations between dma-buf exports and importers,
 811 * all dma-buf API users must follow the common dma-buf locking convention.
 812 *
 813 * Convention for importers
 814 *
 815 * 1. Importers must hold the dma-buf reservation lock when calling these
 816 *    functions:
 817 *
 818 *     - dma_buf_pin()
 819 *     - dma_buf_unpin()
 820 *     - dma_buf_map_attachment()
 821 *     - dma_buf_unmap_attachment()
 822 *     - dma_buf_vmap()
 823 *     - dma_buf_vunmap()
 824 *
 825 * 2. Importers must not hold the dma-buf reservation lock when calling these
 826 *    functions:
 827 *
 828 *     - dma_buf_attach()
 829 *     - dma_buf_dynamic_attach()
 830 *     - dma_buf_detach()
 831 *     - dma_buf_export(
 832 *     - dma_buf_fd()
 833 *     - dma_buf_get()
 834 *     - dma_buf_put()
 835 *     - dma_buf_mmap()
 836 *     - dma_buf_begin_cpu_access()
 837 *     - dma_buf_end_cpu_access()
 838 *     - dma_buf_map_attachment_unlocked()
 839 *     - dma_buf_unmap_attachment_unlocked()
 840 *     - dma_buf_vmap_unlocked()
 841 *     - dma_buf_vunmap_unlocked()
 842 *
 843 * Convention for exporters
 844 *
 845 * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf
 846 *    reservation and exporter can take the lock:
 847 *
 848 *     - &dma_buf_ops.attach()
 849 *     - &dma_buf_ops.detach()
 850 *     - &dma_buf_ops.release()
 851 *     - &dma_buf_ops.begin_cpu_access()
 852 *     - &dma_buf_ops.end_cpu_access()
 853 *
 854 * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf
 855 *    reservation and exporter can't take the lock:
 856 *
 857 *     - &dma_buf_ops.pin()
 858 *     - &dma_buf_ops.unpin()
 859 *     - &dma_buf_ops.map_dma_buf()
 860 *     - &dma_buf_ops.unmap_dma_buf()
 861 *     - &dma_buf_ops.mmap()
 862 *     - &dma_buf_ops.vmap()
 863 *     - &dma_buf_ops.vunmap()
 864 *
 865 * 3. Exporters must hold the dma-buf reservation lock when calling these
 866 *    functions:
 867 *
 868 *     - dma_buf_move_notify()
 869 */
 870
 871/**
 872 * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
 
 873 * @dmabuf:		[in]	buffer to attach device to.
 874 * @dev:		[in]	device to be attached.
 875 * @importer_ops:	[in]	importer operations for the attachment
 876 * @importer_priv:	[in]	importer private pointer for the attachment
 877 *
 878 * Returns struct dma_buf_attachment pointer for this attachment. Attachments
 879 * must be cleaned up by calling dma_buf_detach().
 880 *
 881 * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
 882 * functionality.
 883 *
 884 * Returns:
 885 *
 886 * A pointer to newly created &dma_buf_attachment on success, or a negative
 887 * error code wrapped into a pointer on failure.
 888 *
 889 * Note that this can fail if the backing storage of @dmabuf is in a place not
 890 * accessible to @dev, and cannot be moved to a more suitable place. This is
 891 * indicated with the error code -EBUSY.
 892 */
 893struct dma_buf_attachment *
 894dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
 895		       const struct dma_buf_attach_ops *importer_ops,
 896		       void *importer_priv)
 897{
 898	struct dma_buf_attachment *attach;
 899	int ret;
 900
 901	if (WARN_ON(!dmabuf || !dev))
 902		return ERR_PTR(-EINVAL);
 903
 904	if (WARN_ON(importer_ops && !importer_ops->move_notify))
 905		return ERR_PTR(-EINVAL);
 906
 907	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
 908	if (!attach)
 909		return ERR_PTR(-ENOMEM);
 910
 911	attach->dev = dev;
 912	attach->dmabuf = dmabuf;
 913	if (importer_ops)
 914		attach->peer2peer = importer_ops->allow_peer2peer;
 915	attach->importer_ops = importer_ops;
 916	attach->importer_priv = importer_priv;
 917
 918	if (dmabuf->ops->attach) {
 919		ret = dmabuf->ops->attach(dmabuf, attach);
 920		if (ret)
 921			goto err_attach;
 922	}
 923	dma_resv_lock(dmabuf->resv, NULL);
 924	list_add(&attach->node, &dmabuf->attachments);
 925	dma_resv_unlock(dmabuf->resv);
 926
 927	/* When either the importer or the exporter can't handle dynamic
 928	 * mappings we cache the mapping here to avoid issues with the
 929	 * reservation object lock.
 930	 */
 931	if (dma_buf_attachment_is_dynamic(attach) !=
 932	    dma_buf_is_dynamic(dmabuf)) {
 933		struct sg_table *sgt;
 934
 935		dma_resv_lock(attach->dmabuf->resv, NULL);
 936		if (dma_buf_is_dynamic(attach->dmabuf)) {
 937			ret = dmabuf->ops->pin(attach);
 
 938			if (ret)
 939				goto err_unlock;
 940		}
 941
 942		sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
 943		if (!sgt)
 944			sgt = ERR_PTR(-ENOMEM);
 945		if (IS_ERR(sgt)) {
 946			ret = PTR_ERR(sgt);
 947			goto err_unpin;
 948		}
 949		dma_resv_unlock(attach->dmabuf->resv);
 
 950		attach->sgt = sgt;
 951		attach->dir = DMA_BIDIRECTIONAL;
 952	}
 953
 954	return attach;
 955
 956err_attach:
 957	kfree(attach);
 958	return ERR_PTR(ret);
 959
 960err_unpin:
 961	if (dma_buf_is_dynamic(attach->dmabuf))
 962		dmabuf->ops->unpin(attach);
 963
 964err_unlock:
 965	dma_resv_unlock(attach->dmabuf->resv);
 
 966
 967	dma_buf_detach(dmabuf, attach);
 968	return ERR_PTR(ret);
 969}
 970EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF);
 971
 972/**
 973 * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
 974 * @dmabuf:	[in]	buffer to attach device to.
 975 * @dev:	[in]	device to be attached.
 976 *
 977 * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
 978 * mapping.
 979 */
 980struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
 981					  struct device *dev)
 982{
 983	return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
 984}
 985EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF);
 986
 987static void __unmap_dma_buf(struct dma_buf_attachment *attach,
 988			    struct sg_table *sg_table,
 989			    enum dma_data_direction direction)
 990{
 991	/* uses XOR, hence this unmangles */
 992	mangle_sg_table(sg_table);
 993
 994	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
 995}
 996
 997/**
 998 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
 
 999 * @dmabuf:	[in]	buffer to detach from.
1000 * @attach:	[in]	attachment to be detached; is free'd after this call.
1001 *
1002 * Clean up a device attachment obtained by calling dma_buf_attach().
1003 *
1004 * Optionally this calls &dma_buf_ops.detach for device-specific detach.
1005 */
1006void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
1007{
1008	if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf))
1009		return;
1010
1011	dma_resv_lock(dmabuf->resv, NULL);
1012
1013	if (attach->sgt) {
 
 
1014
1015		__unmap_dma_buf(attach, attach->sgt, attach->dir);
1016
1017		if (dma_buf_is_dynamic(attach->dmabuf))
1018			dmabuf->ops->unpin(attach);
 
 
1019	}
 
 
1020	list_del(&attach->node);
1021
1022	dma_resv_unlock(dmabuf->resv);
1023
1024	if (dmabuf->ops->detach)
1025		dmabuf->ops->detach(dmabuf, attach);
1026
1027	kfree(attach);
1028}
1029EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF);
1030
1031/**
1032 * dma_buf_pin - Lock down the DMA-buf
 
1033 * @attach:	[in]	attachment which should be pinned
1034 *
1035 * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
1036 * call this, and only for limited use cases like scanout and not for temporary
1037 * pin operations. It is not permitted to allow userspace to pin arbitrary
1038 * amounts of buffers through this interface.
1039 *
1040 * Buffers must be unpinned by calling dma_buf_unpin().
1041 *
1042 * Returns:
1043 * 0 on success, negative error code on failure.
1044 */
1045int dma_buf_pin(struct dma_buf_attachment *attach)
1046{
1047	struct dma_buf *dmabuf = attach->dmabuf;
1048	int ret = 0;
1049
1050	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1051
1052	dma_resv_assert_held(dmabuf->resv);
1053
1054	if (dmabuf->ops->pin)
1055		ret = dmabuf->ops->pin(attach);
1056
1057	return ret;
1058}
1059EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF);
1060
1061/**
1062 * dma_buf_unpin - Unpin a DMA-buf
 
1063 * @attach:	[in]	attachment which should be unpinned
1064 *
1065 * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
1066 * any mapping of @attach again and inform the importer through
1067 * &dma_buf_attach_ops.move_notify.
1068 */
1069void dma_buf_unpin(struct dma_buf_attachment *attach)
1070{
1071	struct dma_buf *dmabuf = attach->dmabuf;
1072
1073	WARN_ON(!dma_buf_attachment_is_dynamic(attach));
1074
1075	dma_resv_assert_held(dmabuf->resv);
1076
1077	if (dmabuf->ops->unpin)
1078		dmabuf->ops->unpin(attach);
1079}
1080EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF);
1081
1082/**
1083 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
1084 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1085 * dma_buf_ops.
1086 * @attach:	[in]	attachment whose scatterlist is to be returned
1087 * @direction:	[in]	direction of DMA transfer
1088 *
1089 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
1090 * on error. May return -EINTR if it is interrupted by a signal.
1091 *
1092 * On success, the DMA addresses and lengths in the returned scatterlist are
1093 * PAGE_SIZE aligned.
1094 *
1095 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
1096 * the underlying backing storage is pinned for as long as a mapping exists,
1097 * therefore users/importers should not hold onto a mapping for undue amounts of
1098 * time.
1099 *
1100 * Important: Dynamic importers must wait for the exclusive fence of the struct
1101 * dma_resv attached to the DMA-BUF first.
1102 */
1103struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
1104					enum dma_data_direction direction)
1105{
1106	struct sg_table *sg_table;
1107	int r;
1108
1109	might_sleep();
1110
1111	if (WARN_ON(!attach || !attach->dmabuf))
1112		return ERR_PTR(-EINVAL);
1113
1114	dma_resv_assert_held(attach->dmabuf->resv);
 
1115
1116	if (attach->sgt) {
1117		/*
1118		 * Two mappings with different directions for the same
1119		 * attachment are not allowed.
1120		 */
1121		if (attach->dir != direction &&
1122		    attach->dir != DMA_BIDIRECTIONAL)
1123			return ERR_PTR(-EBUSY);
1124
1125		return attach->sgt;
1126	}
1127
1128	if (dma_buf_is_dynamic(attach->dmabuf)) {
 
1129		if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
1130			r = attach->dmabuf->ops->pin(attach);
1131			if (r)
1132				return ERR_PTR(r);
1133		}
1134	}
1135
1136	sg_table = __map_dma_buf(attach, direction);
1137	if (!sg_table)
1138		sg_table = ERR_PTR(-ENOMEM);
1139
1140	if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
1141	     !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1142		attach->dmabuf->ops->unpin(attach);
1143
1144	if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
1145		attach->sgt = sg_table;
1146		attach->dir = direction;
1147	}
1148
1149#ifdef CONFIG_DMA_API_DEBUG
1150	if (!IS_ERR(sg_table)) {
1151		struct scatterlist *sg;
1152		u64 addr;
1153		int len;
1154		int i;
1155
1156		for_each_sgtable_dma_sg(sg_table, sg, i) {
1157			addr = sg_dma_address(sg);
1158			len = sg_dma_len(sg);
1159			if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
1160				pr_debug("%s: addr %llx or len %x is not page aligned!\n",
1161					 __func__, addr, len);
1162			}
1163		}
1164	}
1165#endif /* CONFIG_DMA_API_DEBUG */
1166	return sg_table;
1167}
1168EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
1169
1170/**
1171 * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
1172 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1173 * dma_buf_ops.
1174 * @attach:	[in]	attachment whose scatterlist is to be returned
1175 * @direction:	[in]	direction of DMA transfer
1176 *
1177 * Unlocked variant of dma_buf_map_attachment().
1178 */
1179struct sg_table *
1180dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
1181				enum dma_data_direction direction)
1182{
1183	struct sg_table *sg_table;
1184
1185	might_sleep();
1186
1187	if (WARN_ON(!attach || !attach->dmabuf))
1188		return ERR_PTR(-EINVAL);
1189
1190	dma_resv_lock(attach->dmabuf->resv, NULL);
1191	sg_table = dma_buf_map_attachment(attach, direction);
1192	dma_resv_unlock(attach->dmabuf->resv);
1193
1194	return sg_table;
1195}
1196EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF);
1197
1198/**
1199 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
1200 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1201 * dma_buf_ops.
1202 * @attach:	[in]	attachment to unmap buffer from
1203 * @sg_table:	[in]	scatterlist info of the buffer to unmap
1204 * @direction:  [in]    direction of DMA transfer
1205 *
1206 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
1207 */
1208void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1209				struct sg_table *sg_table,
1210				enum dma_data_direction direction)
1211{
1212	might_sleep();
1213
1214	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1215		return;
1216
1217	dma_resv_assert_held(attach->dmabuf->resv);
 
1218
1219	if (attach->sgt == sg_table)
1220		return;
1221
1222	__unmap_dma_buf(attach, sg_table, direction);
 
 
 
1223
1224	if (dma_buf_is_dynamic(attach->dmabuf) &&
1225	    !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
1226		dma_buf_unpin(attach);
1227}
1228EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
1229
1230/**
1231 * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
1232 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1233 * dma_buf_ops.
1234 * @attach:	[in]	attachment to unmap buffer from
1235 * @sg_table:	[in]	scatterlist info of the buffer to unmap
1236 * @direction:	[in]	direction of DMA transfer
1237 *
1238 * Unlocked variant of dma_buf_unmap_attachment().
1239 */
1240void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
1241				       struct sg_table *sg_table,
1242				       enum dma_data_direction direction)
1243{
1244	might_sleep();
1245
1246	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
1247		return;
1248
1249	dma_resv_lock(attach->dmabuf->resv, NULL);
1250	dma_buf_unmap_attachment(attach, sg_table, direction);
1251	dma_resv_unlock(attach->dmabuf->resv);
1252}
1253EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF);
1254
1255/**
1256 * dma_buf_move_notify - notify attachments that DMA-buf is moving
1257 *
1258 * @dmabuf:	[in]	buffer which is moving
1259 *
1260 * Informs all attachmenst that they need to destroy and recreated all their
1261 * mappings.
1262 */
1263void dma_buf_move_notify(struct dma_buf *dmabuf)
1264{
1265	struct dma_buf_attachment *attach;
1266
1267	dma_resv_assert_held(dmabuf->resv);
1268
1269	list_for_each_entry(attach, &dmabuf->attachments, node)
1270		if (attach->importer_ops)
1271			attach->importer_ops->move_notify(attach);
1272}
1273EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
1274
1275/**
1276 * DOC: cpu access
1277 *
1278 * There are mutliple reasons for supporting CPU access to a dma buffer object:
1279 *
1280 * - Fallback operations in the kernel, for example when a device is connected
1281 *   over USB and the kernel needs to shuffle the data around first before
1282 *   sending it away. Cache coherency is handled by braketing any transactions
1283 *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1284 *   access.
1285 *
1286 *   Since for most kernel internal dma-buf accesses need the entire buffer, a
1287 *   vmap interface is introduced. Note that on very old 32-bit architectures
1288 *   vmalloc space might be limited and result in vmap calls failing.
1289 *
1290 *   Interfaces::
1291 *
1292 *      void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1293 *      void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
1294 *
1295 *   The vmap call can fail if there is no vmap support in the exporter, or if
1296 *   it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
1297 *   count for all vmap access and calls down into the exporter's vmap function
1298 *   only when no vmapping exists, and only unmaps it once. Protection against
1299 *   concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
 
1300 *
1301 * - For full compatibility on the importer side with existing userspace
1302 *   interfaces, which might already support mmap'ing buffers. This is needed in
1303 *   many processing pipelines (e.g. feeding a software rendered image into a
1304 *   hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1305 *   framework already supported this and for DMA buffer file descriptors to
1306 *   replace ION buffers mmap support was needed.
1307 *
1308 *   There is no special interfaces, userspace simply calls mmap on the dma-buf
1309 *   fd. But like for CPU access there's a need to braket the actual access,
1310 *   which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1311 *   DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1312 *   be restarted.
1313 *
1314 *   Some systems might need some sort of cache coherency management e.g. when
1315 *   CPU and GPU domains are being accessed through dma-buf at the same time.
1316 *   To circumvent this problem there are begin/end coherency markers, that
1317 *   forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1318 *   can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1319 *   sequence would be used like following:
1320 *
1321 *     - mmap dma-buf fd
1322 *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1323 *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1324 *       want (with the new data being consumed by say the GPU or the scanout
1325 *       device)
1326 *     - munmap once you don't need the buffer any more
1327 *
1328 *    For correctness and optimal performance, it is always required to use
1329 *    SYNC_START and SYNC_END before and after, respectively, when accessing the
1330 *    mapped address. Userspace cannot rely on coherent access, even when there
1331 *    are systems where it just works without calling these ioctls.
1332 *
1333 * - And as a CPU fallback in userspace processing pipelines.
1334 *
1335 *   Similar to the motivation for kernel cpu access it is again important that
1336 *   the userspace code of a given importing subsystem can use the same
1337 *   interfaces with a imported dma-buf buffer object as with a native buffer
1338 *   object. This is especially important for drm where the userspace part of
1339 *   contemporary OpenGL, X, and other drivers is huge, and reworking them to
1340 *   use a different way to mmap a buffer rather invasive.
1341 *
1342 *   The assumption in the current dma-buf interfaces is that redirecting the
1343 *   initial mmap is all that's needed. A survey of some of the existing
1344 *   subsystems shows that no driver seems to do any nefarious thing like
1345 *   syncing up with outstanding asynchronous processing on the device or
1346 *   allocating special resources at fault time. So hopefully this is good
1347 *   enough, since adding interfaces to intercept pagefaults and allow pte
1348 *   shootdowns would increase the complexity quite a bit.
1349 *
1350 *   Interface::
1351 *
1352 *      int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1353 *		       unsigned long);
1354 *
1355 *   If the importing subsystem simply provides a special-purpose mmap call to
1356 *   set up a mapping in userspace, calling do_mmap with &dma_buf.file will
1357 *   equally achieve that for a dma-buf object.
1358 */
1359
1360static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1361				      enum dma_data_direction direction)
1362{
1363	bool write = (direction == DMA_BIDIRECTIONAL ||
1364		      direction == DMA_TO_DEVICE);
1365	struct dma_resv *resv = dmabuf->resv;
1366	long ret;
1367
1368	/* Wait on any implicit rendering fences */
1369	ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
1370				    true, MAX_SCHEDULE_TIMEOUT);
1371	if (ret < 0)
1372		return ret;
1373
1374	return 0;
1375}
1376
1377/**
1378 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1379 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1380 * preparations. Coherency is only guaranteed in the specified range for the
1381 * specified access direction.
1382 * @dmabuf:	[in]	buffer to prepare cpu access for.
1383 * @direction:	[in]	length of range for cpu access.
1384 *
1385 * After the cpu access is complete the caller should call
1386 * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
1387 * it guaranteed to be coherent with other DMA access.
1388 *
1389 * This function will also wait for any DMA transactions tracked through
1390 * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1391 * synchronization this function will only ensure cache coherency, callers must
1392 * ensure synchronization with such DMA transactions on their own.
1393 *
1394 * Can return negative error values, returns 0 on success.
1395 */
1396int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1397			     enum dma_data_direction direction)
1398{
1399	int ret = 0;
1400
1401	if (WARN_ON(!dmabuf))
1402		return -EINVAL;
1403
1404	might_lock(&dmabuf->resv->lock.base);
1405
1406	if (dmabuf->ops->begin_cpu_access)
1407		ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1408
1409	/* Ensure that all fences are waited upon - but we first allow
1410	 * the native handler the chance to do so more efficiently if it
1411	 * chooses. A double invocation here will be reasonably cheap no-op.
1412	 */
1413	if (ret == 0)
1414		ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1415
1416	return ret;
1417}
1418EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
1419
1420/**
1421 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1422 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1423 * actions. Coherency is only guaranteed in the specified range for the
1424 * specified access direction.
1425 * @dmabuf:	[in]	buffer to complete cpu access for.
1426 * @direction:	[in]	length of range for cpu access.
1427 *
1428 * This terminates CPU access started with dma_buf_begin_cpu_access().
1429 *
1430 * Can return negative error values, returns 0 on success.
1431 */
1432int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1433			   enum dma_data_direction direction)
1434{
1435	int ret = 0;
1436
1437	WARN_ON(!dmabuf);
1438
1439	might_lock(&dmabuf->resv->lock.base);
1440
1441	if (dmabuf->ops->end_cpu_access)
1442		ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1443
1444	return ret;
1445}
1446EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
1447
1448
1449/**
1450 * dma_buf_mmap - Setup up a userspace mmap with the given vma
1451 * @dmabuf:	[in]	buffer that should back the vma
1452 * @vma:	[in]	vma for the mmap
1453 * @pgoff:	[in]	offset in pages where this mmap should start within the
1454 *			dma-buf buffer.
1455 *
1456 * This function adjusts the passed in vma so that it points at the file of the
1457 * dma_buf operation. It also adjusts the starting pgoff and does bounds
1458 * checking on the size of the vma. Then it calls the exporters mmap function to
1459 * set up the mapping.
1460 *
1461 * Can return negative error values, returns 0 on success.
1462 */
1463int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1464		 unsigned long pgoff)
1465{
 
1466	int ret;
1467
1468	if (WARN_ON(!dmabuf || !vma))
1469		return -EINVAL;
1470
1471	/* check if buffer supports mmap */
1472	if (!dmabuf->ops->mmap)
1473		return -EINVAL;
1474
1475	/* check for offset overflow */
1476	if (pgoff + vma_pages(vma) < pgoff)
1477		return -EOVERFLOW;
1478
1479	/* check for overflowing the buffer's size */
1480	if (pgoff + vma_pages(vma) >
1481	    dmabuf->size >> PAGE_SHIFT)
1482		return -EINVAL;
1483
1484	/* readjust the vma */
1485	vma_set_file(vma, dmabuf->file);
 
 
1486	vma->vm_pgoff = pgoff;
1487
1488	dma_resv_lock(dmabuf->resv, NULL);
1489	ret = dmabuf->ops->mmap(dmabuf, vma);
1490	dma_resv_unlock(dmabuf->resv);
 
 
 
 
 
 
 
 
1491
1492	return ret;
1493}
1494EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
1495
1496/**
1497 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1498 * address space. Same restrictions as for vmap and friends apply.
1499 * @dmabuf:	[in]	buffer to vmap
1500 * @map:	[out]	returns the vmap pointer
1501 *
1502 * This call may fail due to lack of virtual mapping address space.
1503 * These calls are optional in drivers. The intended use for them
1504 * is for mapping objects linear in kernel space for high use objects.
 
1505 *
1506 * To ensure coherency users must call dma_buf_begin_cpu_access() and
1507 * dma_buf_end_cpu_access() around any cpu access performed through this
1508 * mapping.
1509 *
1510 * Returns 0 on success, or a negative errno code otherwise.
1511 */
1512int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
1513{
1514	struct iosys_map ptr;
1515	int ret;
1516
1517	iosys_map_clear(map);
1518
1519	if (WARN_ON(!dmabuf))
1520		return -EINVAL;
1521
1522	dma_resv_assert_held(dmabuf->resv);
1523
1524	if (!dmabuf->ops->vmap)
1525		return -EINVAL;
1526
 
1527	if (dmabuf->vmapping_counter) {
1528		dmabuf->vmapping_counter++;
1529		BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1530		*map = dmabuf->vmap_ptr;
1531		return 0;
1532	}
1533
1534	BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
1535
1536	ret = dmabuf->ops->vmap(dmabuf, &ptr);
1537	if (WARN_ON_ONCE(ret))
1538		return ret;
 
 
1539
1540	dmabuf->vmap_ptr = ptr;
1541	dmabuf->vmapping_counter = 1;
1542
1543	*map = dmabuf->vmap_ptr;
1544
1545	return 0;
1546}
1547EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
1548
1549/**
1550 * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel
1551 * address space. Same restrictions as for vmap and friends apply.
1552 * @dmabuf:	[in]	buffer to vmap
1553 * @map:	[out]	returns the vmap pointer
1554 *
1555 * Unlocked version of dma_buf_vmap()
1556 *
1557 * Returns 0 on success, or a negative errno code otherwise.
1558 */
1559int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
1560{
1561	int ret;
1562
1563	iosys_map_clear(map);
1564
1565	if (WARN_ON(!dmabuf))
1566		return -EINVAL;
1567
1568	dma_resv_lock(dmabuf->resv, NULL);
1569	ret = dma_buf_vmap(dmabuf, map);
1570	dma_resv_unlock(dmabuf->resv);
1571
1572	return ret;
1573}
1574EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, DMA_BUF);
1575
1576/**
1577 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1578 * @dmabuf:	[in]	buffer to vunmap
1579 * @map:	[in]	vmap pointer to vunmap
1580 */
1581void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
1582{
1583	if (WARN_ON(!dmabuf))
1584		return;
1585
1586	dma_resv_assert_held(dmabuf->resv);
1587
1588	BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1589	BUG_ON(dmabuf->vmapping_counter == 0);
1590	BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
1591
 
1592	if (--dmabuf->vmapping_counter == 0) {
1593		if (dmabuf->ops->vunmap)
1594			dmabuf->ops->vunmap(dmabuf, map);
1595		iosys_map_clear(&dmabuf->vmap_ptr);
1596	}
 
1597}
1598EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
1599
1600/**
1601 * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.
1602 * @dmabuf:	[in]	buffer to vunmap
1603 * @map:	[in]	vmap pointer to vunmap
1604 */
1605void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
1606{
1607	if (WARN_ON(!dmabuf))
1608		return;
1609
1610	dma_resv_lock(dmabuf->resv, NULL);
1611	dma_buf_vunmap(dmabuf, map);
1612	dma_resv_unlock(dmabuf->resv);
1613}
1614EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, DMA_BUF);
1615
1616#ifdef CONFIG_DEBUG_FS
1617static int dma_buf_debug_show(struct seq_file *s, void *unused)
1618{
 
1619	struct dma_buf *buf_obj;
1620	struct dma_buf_attachment *attach_obj;
1621	int count = 0, attach_count;
 
 
 
 
1622	size_t size = 0;
1623	int ret;
1624
1625	ret = mutex_lock_interruptible(&db_list.lock);
1626
1627	if (ret)
1628		return ret;
1629
1630	seq_puts(s, "\nDma-buf Objects:\n");
1631	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
1632		   "size", "flags", "mode", "count", "ino");
1633
1634	list_for_each_entry(buf_obj, &db_list.head, list_node) {
1635
1636		ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1637		if (ret)
1638			goto error_unlock;
1639
1640
1641		spin_lock(&buf_obj->name_lock);
1642		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1643				buf_obj->size,
1644				buf_obj->file->f_flags, buf_obj->file->f_mode,
1645				file_count(buf_obj->file),
1646				buf_obj->exp_name,
1647				file_inode(buf_obj->file)->i_ino,
1648				buf_obj->name ?: "<none>");
1649		spin_unlock(&buf_obj->name_lock);
1650
1651		dma_resv_describe(buf_obj->resv, s);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1652
1653		seq_puts(s, "\tAttached Devices:\n");
1654		attach_count = 0;
1655
1656		list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1657			seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1658			attach_count++;
1659		}
1660		dma_resv_unlock(buf_obj->resv);
1661
1662		seq_printf(s, "Total %d devices attached\n\n",
1663				attach_count);
1664
1665		count++;
1666		size += buf_obj->size;
1667	}
1668
1669	seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1670
1671	mutex_unlock(&db_list.lock);
1672	return 0;
1673
1674error_unlock:
1675	mutex_unlock(&db_list.lock);
1676	return ret;
1677}
1678
1679DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1680
1681static struct dentry *dma_buf_debugfs_dir;
1682
1683static int dma_buf_init_debugfs(void)
1684{
1685	struct dentry *d;
1686	int err = 0;
1687
1688	d = debugfs_create_dir("dma_buf", NULL);
1689	if (IS_ERR(d))
1690		return PTR_ERR(d);
1691
1692	dma_buf_debugfs_dir = d;
1693
1694	d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1695				NULL, &dma_buf_debug_fops);
1696	if (IS_ERR(d)) {
1697		pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1698		debugfs_remove_recursive(dma_buf_debugfs_dir);
1699		dma_buf_debugfs_dir = NULL;
1700		err = PTR_ERR(d);
1701	}
1702
1703	return err;
1704}
1705
1706static void dma_buf_uninit_debugfs(void)
1707{
1708	debugfs_remove_recursive(dma_buf_debugfs_dir);
1709}
1710#else
1711static inline int dma_buf_init_debugfs(void)
1712{
1713	return 0;
1714}
1715static inline void dma_buf_uninit_debugfs(void)
1716{
1717}
1718#endif
1719
1720static int __init dma_buf_init(void)
1721{
1722	int ret;
1723
1724	ret = dma_buf_init_sysfs_statistics();
1725	if (ret)
1726		return ret;
1727
1728	dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1729	if (IS_ERR(dma_buf_mnt))
1730		return PTR_ERR(dma_buf_mnt);
1731
1732	mutex_init(&db_list.lock);
1733	INIT_LIST_HEAD(&db_list.head);
1734	dma_buf_init_debugfs();
1735	return 0;
1736}
1737subsys_initcall(dma_buf_init);
1738
1739static void __exit dma_buf_deinit(void)
1740{
1741	dma_buf_uninit_debugfs();
1742	kern_unmount(dma_buf_mnt);
1743	dma_buf_uninit_sysfs_statistics();
1744}
1745__exitcall(dma_buf_deinit);