Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2013 Red Hat
   4 * Author: Rob Clark <robdclark@gmail.com>
 
 
 
 
 
 
 
 
 
 
 
 
   5 */
   6
   7#include <linux/dma-map-ops.h>
   8#include <linux/vmalloc.h>
   9#include <linux/spinlock.h>
  10#include <linux/shmem_fs.h>
  11#include <linux/dma-buf.h>
  12#include <linux/pfn_t.h>
  13
  14#include <drm/drm_prime.h>
  15#include <drm/drm_file.h>
  16
  17#include <trace/events/gpu_mem.h>
  18
  19#include "msm_drv.h"
  20#include "msm_fence.h"
  21#include "msm_gem.h"
  22#include "msm_gpu.h"
  23#include "msm_mmu.h"
  24
 
 
 
  25static dma_addr_t physaddr(struct drm_gem_object *obj)
  26{
  27	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  28	struct msm_drm_private *priv = obj->dev->dev_private;
  29	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
  30			priv->vram.paddr;
  31}
  32
  33static bool use_pages(struct drm_gem_object *obj)
  34{
  35	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  36	return !msm_obj->vram_node;
  37}
  38
  39static void update_device_mem(struct msm_drm_private *priv, ssize_t size)
  40{
  41	uint64_t total_mem = atomic64_add_return(size, &priv->total_mem);
  42	trace_gpu_mem_total(0, 0, total_mem);
  43}
  44
  45static void update_ctx_mem(struct drm_file *file, ssize_t size)
  46{
  47	struct msm_file_private *ctx = file->driver_priv;
  48	uint64_t ctx_mem = atomic64_add_return(size, &ctx->ctx_mem);
  49
  50	rcu_read_lock(); /* Locks file->pid! */
  51	trace_gpu_mem_total(0, pid_nr(rcu_dereference(file->pid)), ctx_mem);
  52	rcu_read_unlock();
  53
  54}
  55
  56static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file)
  57{
  58	update_ctx_mem(file, obj->size);
  59	return 0;
  60}
  61
  62static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
  63{
  64	update_ctx_mem(file, -obj->size);
  65}
  66
  67/*
  68 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
  69 * API.  Really GPU cache is out of scope here (handled on cmdstream)
  70 * and all we need to do is invalidate newly allocated pages before
  71 * mapping to CPU as uncached/writecombine.
  72 *
  73 * On top of this, we have the added headache, that depending on
  74 * display generation, the display's iommu may be wired up to either
  75 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
  76 * that here we either have dma-direct or iommu ops.
  77 *
  78 * Let this be a cautionary tail of abstraction gone wrong.
  79 */
  80
  81static void sync_for_device(struct msm_gem_object *msm_obj)
  82{
  83	struct device *dev = msm_obj->base.dev->dev;
  84
  85	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
  86}
  87
  88static void sync_for_cpu(struct msm_gem_object *msm_obj)
  89{
  90	struct device *dev = msm_obj->base.dev->dev;
  91
  92	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
  93}
  94
  95static void update_lru_active(struct drm_gem_object *obj)
  96{
  97	struct msm_drm_private *priv = obj->dev->dev_private;
  98	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  99
 100	GEM_WARN_ON(!msm_obj->pages);
 101
 102	if (msm_obj->pin_count) {
 103		drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
 104	} else if (msm_obj->madv == MSM_MADV_WILLNEED) {
 105		drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj);
 106	} else {
 107		GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
 108
 109		drm_gem_lru_move_tail_locked(&priv->lru.dontneed, obj);
 110	}
 111}
 112
 113static void update_lru_locked(struct drm_gem_object *obj)
 114{
 115	struct msm_drm_private *priv = obj->dev->dev_private;
 116	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 117
 118	msm_gem_assert_locked(&msm_obj->base);
 119
 120	if (!msm_obj->pages) {
 121		GEM_WARN_ON(msm_obj->pin_count);
 122
 123		drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj);
 124	} else {
 125		update_lru_active(obj);
 126	}
 127}
 128
 129static void update_lru(struct drm_gem_object *obj)
 130{
 131	struct msm_drm_private *priv = obj->dev->dev_private;
 132
 133	mutex_lock(&priv->lru.lock);
 134	update_lru_locked(obj);
 135	mutex_unlock(&priv->lru.lock);
 136}
 137
 138/* allocate pages from VRAM carveout, used when no IOMMU: */
 139static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
 140{
 141	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 142	struct msm_drm_private *priv = obj->dev->dev_private;
 143	dma_addr_t paddr;
 144	struct page **p;
 145	int ret, i;
 146
 147	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
 148	if (!p)
 149		return ERR_PTR(-ENOMEM);
 150
 151	spin_lock(&priv->vram.lock);
 152	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
 153	spin_unlock(&priv->vram.lock);
 154	if (ret) {
 155		kvfree(p);
 156		return ERR_PTR(ret);
 157	}
 158
 159	paddr = physaddr(obj);
 160	for (i = 0; i < npages; i++) {
 161		p[i] = pfn_to_page(__phys_to_pfn(paddr));
 162		paddr += PAGE_SIZE;
 163	}
 164
 165	return p;
 166}
 167
 168static struct page **get_pages(struct drm_gem_object *obj)
 169{
 170	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 171
 172	msm_gem_assert_locked(obj);
 173
 174	if (!msm_obj->pages) {
 175		struct drm_device *dev = obj->dev;
 176		struct page **p;
 177		int npages = obj->size >> PAGE_SHIFT;
 178
 179		if (use_pages(obj))
 180			p = drm_gem_get_pages(obj);
 181		else
 182			p = get_pages_vram(obj, npages);
 183
 184		if (IS_ERR(p)) {
 185			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
 186					PTR_ERR(p));
 187			return p;
 188		}
 189
 190		update_device_mem(dev->dev_private, obj->size);
 191
 192		msm_obj->pages = p;
 193
 194		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
 195		if (IS_ERR(msm_obj->sgt)) {
 196			void *ptr = ERR_CAST(msm_obj->sgt);
 197
 198			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
 199			msm_obj->sgt = NULL;
 200			return ptr;
 201		}
 202
 203		/* For non-cached buffers, ensure the new pages are clean
 204		 * because display controller, GPU, etc. are not coherent:
 205		 */
 206		if (msm_obj->flags & MSM_BO_WC)
 207			sync_for_device(msm_obj);
 208
 209		update_lru(obj);
 210	}
 211
 212	return msm_obj->pages;
 213}
 214
 215static void put_pages_vram(struct drm_gem_object *obj)
 216{
 217	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 218	struct msm_drm_private *priv = obj->dev->dev_private;
 219
 220	spin_lock(&priv->vram.lock);
 221	drm_mm_remove_node(msm_obj->vram_node);
 222	spin_unlock(&priv->vram.lock);
 223
 224	kvfree(msm_obj->pages);
 225}
 226
 227static void put_pages(struct drm_gem_object *obj)
 228{
 229	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 230
 231	if (msm_obj->pages) {
 232		if (msm_obj->sgt) {
 233			/* For non-cached buffers, ensure the new
 234			 * pages are clean because display controller,
 235			 * GPU, etc. are not coherent:
 236			 */
 237			if (msm_obj->flags & MSM_BO_WC)
 238				sync_for_cpu(msm_obj);
 
 
 239
 240			sg_free_table(msm_obj->sgt);
 241			kfree(msm_obj->sgt);
 242			msm_obj->sgt = NULL;
 243		}
 244
 245		update_device_mem(obj->dev->dev_private, -obj->size);
 246
 247		if (use_pages(obj))
 248			drm_gem_put_pages(obj, msm_obj->pages, true, false);
 249		else
 250			put_pages_vram(obj);
 251
 252		msm_obj->pages = NULL;
 253		update_lru(obj);
 254	}
 255}
 256
 257static struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj,
 258					      unsigned madv)
 259{
 260	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
 261
 262	msm_gem_assert_locked(obj);
 263
 264	if (msm_obj->madv > madv) {
 265		DRM_DEV_DEBUG_DRIVER(obj->dev->dev, "Invalid madv state: %u vs %u\n",
 266				     msm_obj->madv, madv);
 267		return ERR_PTR(-EBUSY);
 268	}
 269
 270	return get_pages(obj);
 
 
 271}
 272
 273/*
 274 * Update the pin count of the object, call under lru.lock
 275 */
 276void msm_gem_pin_obj_locked(struct drm_gem_object *obj)
 277{
 278	struct msm_drm_private *priv = obj->dev->dev_private;
 279
 280	msm_gem_assert_locked(obj);
 281
 282	to_msm_bo(obj)->pin_count++;
 283	drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
 284}
 285
 286static void pin_obj_locked(struct drm_gem_object *obj)
 
 287{
 288	struct msm_drm_private *priv = obj->dev->dev_private;
 289
 290	mutex_lock(&priv->lru.lock);
 291	msm_gem_pin_obj_locked(obj);
 292	mutex_unlock(&priv->lru.lock);
 293}
 294
 295struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
 296{
 297	struct page **p;
 298
 299	msm_gem_assert_locked(obj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 300
 301	p = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
 302	if (!IS_ERR(p))
 303		pin_obj_locked(obj);
 304
 305	return p;
 306}
 307
 308void msm_gem_unpin_pages_locked(struct drm_gem_object *obj)
 309{
 310	msm_gem_assert_locked(obj);
 311
 312	msm_gem_unpin_locked(obj);
 313}
 
 
 
 314
 315static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
 316{
 317	if (msm_obj->flags & MSM_BO_WC)
 318		return pgprot_writecombine(prot);
 319	return prot;
 320}
 321
 322static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
 323{
 324	struct vm_area_struct *vma = vmf->vma;
 325	struct drm_gem_object *obj = vma->vm_private_data;
 326	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 327	struct page **pages;
 328	unsigned long pfn;
 329	pgoff_t pgoff;
 330	int err;
 331	vm_fault_t ret;
 332
 333	/*
 334	 * vm_ops.open/drm_gem_mmap_obj and close get and put
 335	 * a reference on obj. So, we dont need to hold one here.
 336	 */
 337	err = msm_gem_lock_interruptible(obj);
 338	if (err) {
 339		ret = VM_FAULT_NOPAGE;
 340		goto out;
 341	}
 342
 343	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
 344		msm_gem_unlock(obj);
 345		return VM_FAULT_SIGBUS;
 346	}
 347
 348	/* make sure we have pages attached now */
 349	pages = get_pages(obj);
 350	if (IS_ERR(pages)) {
 351		ret = vmf_error(PTR_ERR(pages));
 352		goto out_unlock;
 353	}
 354
 355	/* We don't use vmf->pgoff since that has the fake offset: */
 356	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 357
 358	pfn = page_to_pfn(pages[pgoff]);
 359
 360	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 361			pfn, pfn << PAGE_SHIFT);
 362
 363	ret = vmf_insert_pfn(vma, vmf->address, pfn);
 364
 365out_unlock:
 366	msm_gem_unlock(obj);
 367out:
 368	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 369}
 370
 371/** get mmap offset */
 372static uint64_t mmap_offset(struct drm_gem_object *obj)
 373{
 374	struct drm_device *dev = obj->dev;
 
 375	int ret;
 376
 377	msm_gem_assert_locked(obj);
 378
 379	/* Make it mmapable */
 380	ret = drm_gem_create_mmap_offset(obj);
 381
 382	if (ret) {
 383		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
 384		return 0;
 385	}
 386
 387	return drm_vma_node_offset_addr(&obj->vma_node);
 388}
 389
 390uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
 391{
 392	uint64_t offset;
 
 393
 394	msm_gem_lock(obj);
 395	offset = mmap_offset(obj);
 396	msm_gem_unlock(obj);
 397	return offset;
 398}
 399
 400static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
 401		struct msm_gem_address_space *aspace)
 402{
 403	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 404	struct msm_gem_vma *vma;
 405
 406	msm_gem_assert_locked(obj);
 407
 408	vma = msm_gem_vma_new(aspace);
 409	if (!vma)
 410		return ERR_PTR(-ENOMEM);
 411
 
 
 412	list_add_tail(&vma->list, &msm_obj->vmas);
 413
 414	return vma;
 415}
 416
 417static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
 418		struct msm_gem_address_space *aspace)
 419{
 420	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 421	struct msm_gem_vma *vma;
 422
 423	msm_gem_assert_locked(obj);
 424
 425	list_for_each_entry(vma, &msm_obj->vmas, list) {
 426		if (vma->aspace == aspace)
 427			return vma;
 428	}
 429
 430	return NULL;
 431}
 432
 433static void del_vma(struct msm_gem_vma *vma)
 434{
 435	if (!vma)
 436		return;
 437
 438	list_del(&vma->list);
 439	kfree(vma);
 440}
 441
 442/*
 443 * If close is true, this also closes the VMA (releasing the allocated
 444 * iova range) in addition to removing the iommu mapping.  In the eviction
 445 * case (!close), we keep the iova allocated, but only remove the iommu
 446 * mapping.
 447 */
 448static void
 449put_iova_spaces(struct drm_gem_object *obj, bool close)
 450{
 451	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 452	struct msm_gem_vma *vma;
 453
 454	msm_gem_assert_locked(obj);
 455
 456	list_for_each_entry(vma, &msm_obj->vmas, list) {
 457		if (vma->aspace) {
 458			msm_gem_vma_purge(vma);
 459			if (close)
 460				msm_gem_vma_close(vma);
 461		}
 462	}
 463}
 464
 465/* Called with msm_obj locked */
 466static void
 467put_iova_vmas(struct drm_gem_object *obj)
 468{
 469	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 470	struct msm_gem_vma *vma, *tmp;
 471
 472	msm_gem_assert_locked(obj);
 473
 474	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
 
 475		del_vma(vma);
 476	}
 477}
 478
 479static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
 480		struct msm_gem_address_space *aspace,
 481		u64 range_start, u64 range_end)
 482{
 
 483	struct msm_gem_vma *vma;
 
 
 
 484
 485	msm_gem_assert_locked(obj);
 
 
 
 486
 487	vma = lookup_vma(obj, aspace);
 488
 489	if (!vma) {
 490		int ret;
 491
 492		vma = add_vma(obj, aspace);
 493		if (IS_ERR(vma))
 494			return vma;
 495
 496		ret = msm_gem_vma_init(vma, obj->size,
 497			range_start, range_end);
 498		if (ret) {
 499			del_vma(vma);
 500			return ERR_PTR(ret);
 501		}
 502	} else {
 503		GEM_WARN_ON(vma->iova < range_start);
 504		GEM_WARN_ON((vma->iova + obj->size) > range_end);
 505	}
 506
 507	return vma;
 508}
 509
 510int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
 511{
 512	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 513	struct page **pages;
 514	int prot = IOMMU_READ;
 515
 516	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
 517		prot |= IOMMU_WRITE;
 518
 519	if (msm_obj->flags & MSM_BO_MAP_PRIV)
 520		prot |= IOMMU_PRIV;
 521
 522	if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
 523		prot |= IOMMU_CACHE;
 524
 525	msm_gem_assert_locked(obj);
 526
 527	pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
 528	if (IS_ERR(pages))
 529		return PTR_ERR(pages);
 530
 531	return msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
 532}
 533
 534void msm_gem_unpin_locked(struct drm_gem_object *obj)
 535{
 536	struct msm_drm_private *priv = obj->dev->dev_private;
 537	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 538
 539	msm_gem_assert_locked(obj);
 540
 541	mutex_lock(&priv->lru.lock);
 542	msm_obj->pin_count--;
 543	GEM_WARN_ON(msm_obj->pin_count < 0);
 544	update_lru_locked(obj);
 545	mutex_unlock(&priv->lru.lock);
 546}
 547
 548/* Special unpin path for use in fence-signaling path, avoiding the need
 549 * to hold the obj lock by only depending on things that a protected by
 550 * the LRU lock.  In particular we know that that we already have backing
 551 * and and that the object's dma_resv has the fence for the current
 552 * submit/job which will prevent us racing against page eviction.
 553 */
 554void msm_gem_unpin_active(struct drm_gem_object *obj)
 555{
 556	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 557
 558	msm_obj->pin_count--;
 559	GEM_WARN_ON(msm_obj->pin_count < 0);
 560	update_lru_active(obj);
 561}
 562
 563struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
 564					   struct msm_gem_address_space *aspace)
 565{
 566	return get_vma_locked(obj, aspace, 0, U64_MAX);
 567}
 568
 569static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
 570		struct msm_gem_address_space *aspace, uint64_t *iova,
 571		u64 range_start, u64 range_end)
 572{
 573	struct msm_gem_vma *vma;
 574	int ret;
 575
 576	msm_gem_assert_locked(obj);
 577
 578	vma = get_vma_locked(obj, aspace, range_start, range_end);
 579	if (IS_ERR(vma))
 580		return PTR_ERR(vma);
 581
 582	ret = msm_gem_pin_vma_locked(obj, vma);
 583	if (!ret) {
 584		*iova = vma->iova;
 585		pin_obj_locked(obj);
 586	}
 587
 588	return ret;
 589}
 590
 591/*
 592 * get iova and pin it. Should have a matching put
 593 * limits iova to specified range (in pages)
 594 */
 595int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
 596		struct msm_gem_address_space *aspace, uint64_t *iova,
 597		u64 range_start, u64 range_end)
 598{
 599	int ret;
 600
 601	msm_gem_lock(obj);
 602	ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
 603	msm_gem_unlock(obj);
 604
 
 
 
 
 605	return ret;
 606}
 607
 608/* get iova and pin it. Should have a matching put */
 609int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
 610		struct msm_gem_address_space *aspace, uint64_t *iova)
 611{
 612	return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
 613}
 614
 615/*
 616 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
 617 * valid for the life of the object
 618 */
 619int msm_gem_get_iova(struct drm_gem_object *obj,
 620		struct msm_gem_address_space *aspace, uint64_t *iova)
 621{
 
 622	struct msm_gem_vma *vma;
 623	int ret = 0;
 624
 625	msm_gem_lock(obj);
 626	vma = get_vma_locked(obj, aspace, 0, U64_MAX);
 627	if (IS_ERR(vma)) {
 628		ret = PTR_ERR(vma);
 629	} else {
 630		*iova = vma->iova;
 631	}
 632	msm_gem_unlock(obj);
 633
 634	return ret;
 635}
 636
 637static int clear_iova(struct drm_gem_object *obj,
 638		      struct msm_gem_address_space *aspace)
 639{
 640	struct msm_gem_vma *vma = lookup_vma(obj, aspace);
 641
 642	if (!vma)
 643		return 0;
 644
 645	msm_gem_vma_purge(vma);
 646	msm_gem_vma_close(vma);
 647	del_vma(vma);
 648
 649	return 0;
 650}
 651
 652/*
 653 * Get the requested iova but don't pin it.  Fails if the requested iova is
 654 * not available.  Doesn't need a put because iovas are currently valid for
 655 * the life of the object.
 656 *
 657 * Setting an iova of zero will clear the vma.
 658 */
 659int msm_gem_set_iova(struct drm_gem_object *obj,
 660		     struct msm_gem_address_space *aspace, uint64_t iova)
 661{
 662	int ret = 0;
 663
 664	msm_gem_lock(obj);
 665	if (!iova) {
 666		ret = clear_iova(obj, aspace);
 667	} else {
 668		struct msm_gem_vma *vma;
 669		vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
 670		if (IS_ERR(vma)) {
 671			ret = PTR_ERR(vma);
 672		} else if (GEM_WARN_ON(vma->iova != iova)) {
 673			clear_iova(obj, aspace);
 674			ret = -EBUSY;
 675		}
 676	}
 677	msm_gem_unlock(obj);
 678
 679	return ret;
 680}
 681
 682/*
 683 * Unpin a iova by updating the reference counts. The memory isn't actually
 684 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
 685 * to get rid of it
 686 */
 687void msm_gem_unpin_iova(struct drm_gem_object *obj,
 688		struct msm_gem_address_space *aspace)
 689{
 690	struct msm_gem_vma *vma;
 691
 692	msm_gem_lock(obj);
 693	vma = lookup_vma(obj, aspace);
 694	if (!GEM_WARN_ON(!vma)) {
 695		msm_gem_unpin_locked(obj);
 696	}
 697	msm_gem_unlock(obj);
 698}
 699
 700int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 701		struct drm_mode_create_dumb *args)
 702{
 703	args->pitch = align_pitch(args->width, args->bpp);
 704	args->size  = PAGE_ALIGN(args->pitch * args->height);
 705	return msm_gem_new_handle(dev, file, args->size,
 706			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
 707}
 708
 709int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 710		uint32_t handle, uint64_t *offset)
 711{
 712	struct drm_gem_object *obj;
 713	int ret = 0;
 714
 715	/* GEM does all our handle to object mapping */
 716	obj = drm_gem_object_lookup(file, handle);
 717	if (obj == NULL) {
 718		ret = -ENOENT;
 719		goto fail;
 720	}
 721
 722	*offset = msm_gem_mmap_offset(obj);
 723
 724	drm_gem_object_put(obj);
 725
 726fail:
 727	return ret;
 728}
 729
 730static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
 731{
 732	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 733	struct page **pages;
 734	int ret = 0;
 735
 736	msm_gem_assert_locked(obj);
 737
 738	if (obj->import_attach)
 739		return ERR_PTR(-ENODEV);
 740
 741	pages = msm_gem_get_pages_locked(obj, madv);
 742	if (IS_ERR(pages))
 743		return ERR_CAST(pages);
 744
 745	pin_obj_locked(obj);
 
 
 
 
 
 746
 747	/* increment vmap_count *before* vmap() call, so shrinker can
 748	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
 749	 * This guarantees that we won't try to msm_gem_vunmap() this
 750	 * same object from within the vmap() call (while we already
 751	 * hold msm_obj lock)
 752	 */
 753	msm_obj->vmap_count++;
 754
 755	if (!msm_obj->vaddr) {
 
 
 
 
 
 756		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
 757				VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
 758		if (msm_obj->vaddr == NULL) {
 759			ret = -ENOMEM;
 760			goto fail;
 761		}
 762	}
 763
 
 764	return msm_obj->vaddr;
 765
 766fail:
 767	msm_obj->vmap_count--;
 768	msm_gem_unpin_locked(obj);
 769	return ERR_PTR(ret);
 770}
 771
 772void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
 773{
 774	return get_vaddr(obj, MSM_MADV_WILLNEED);
 775}
 776
 777void *msm_gem_get_vaddr(struct drm_gem_object *obj)
 778{
 779	void *ret;
 780
 781	msm_gem_lock(obj);
 782	ret = msm_gem_get_vaddr_locked(obj);
 783	msm_gem_unlock(obj);
 784
 785	return ret;
 786}
 787
 788/*
 789 * Don't use this!  It is for the very special case of dumping
 790 * submits from GPU hangs or faults, were the bo may already
 791 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
 792 * active list.
 793 */
 794void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
 795{
 796	return get_vaddr(obj, __MSM_MADV_PURGED);
 797}
 798
 799void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
 800{
 801	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 802
 803	msm_gem_assert_locked(obj);
 804	GEM_WARN_ON(msm_obj->vmap_count < 1);
 805
 806	msm_obj->vmap_count--;
 807	msm_gem_unpin_locked(obj);
 808}
 809
 810void msm_gem_put_vaddr(struct drm_gem_object *obj)
 811{
 812	msm_gem_lock(obj);
 813	msm_gem_put_vaddr_locked(obj);
 814	msm_gem_unlock(obj);
 815}
 816
 817/* Update madvise status, returns true if not purged, else
 818 * false or -errno.
 819 */
 820int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
 821{
 822	struct msm_drm_private *priv = obj->dev->dev_private;
 823	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 824
 825	msm_gem_lock(obj);
 826
 827	mutex_lock(&priv->lru.lock);
 828
 829	if (msm_obj->madv != __MSM_MADV_PURGED)
 830		msm_obj->madv = madv;
 831
 832	madv = msm_obj->madv;
 833
 834	/* If the obj is inactive, we might need to move it
 835	 * between inactive lists
 836	 */
 837	update_lru_locked(obj);
 838
 839	mutex_unlock(&priv->lru.lock);
 840
 841	msm_gem_unlock(obj);
 842
 843	return (madv != __MSM_MADV_PURGED);
 844}
 845
 846void msm_gem_purge(struct drm_gem_object *obj)
 847{
 848	struct drm_device *dev = obj->dev;
 849	struct msm_drm_private *priv = obj->dev->dev_private;
 850	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 851
 852	msm_gem_assert_locked(obj);
 853	GEM_WARN_ON(!is_purgeable(msm_obj));
 
 854
 855	/* Get rid of any iommu mapping(s): */
 856	put_iova_spaces(obj, true);
 857
 858	msm_gem_vunmap(obj);
 859
 860	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
 861
 862	put_pages(obj);
 863
 864	put_iova_vmas(obj);
 865
 866	mutex_lock(&priv->lru.lock);
 867	/* A one-way transition: */
 868	msm_obj->madv = __MSM_MADV_PURGED;
 869	mutex_unlock(&priv->lru.lock);
 870
 
 871	drm_gem_free_mmap_offset(obj);
 872
 873	/* Our goal here is to return as much of the memory as
 874	 * is possible back to the system as we are called from OOM.
 875	 * To do this we must instruct the shmfs to drop all of its
 876	 * backing pages, *now*.
 877	 */
 878	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
 879
 880	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
 881			0, (loff_t)-1);
 
 
 882}
 883
 884/*
 885 * Unpin the backing pages and make them available to be swapped out.
 886 */
 887void msm_gem_evict(struct drm_gem_object *obj)
 888{
 889	struct drm_device *dev = obj->dev;
 890	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 891
 892	msm_gem_assert_locked(obj);
 893	GEM_WARN_ON(is_unevictable(msm_obj));
 894
 895	/* Get rid of any iommu mapping(s): */
 896	put_iova_spaces(obj, false);
 897
 898	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
 
 
 899
 900	put_pages(obj);
 
 
 
 
 
 
 901}
 902
 903void msm_gem_vunmap(struct drm_gem_object *obj)
 
 
 904{
 905	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
 
 
 906
 907	msm_gem_assert_locked(obj);
 
 
 
 
 
 
 
 
 
 908
 909	if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
 910		return;
 911
 912	vunmap(msm_obj->vaddr);
 913	msm_obj->vaddr = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 914}
 915
 916bool msm_gem_active(struct drm_gem_object *obj)
 917{
 918	msm_gem_assert_locked(obj);
 
 
 919
 920	if (to_msm_bo(obj)->pin_count)
 921		return true;
 922
 923	return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
 
 
 924}
 925
 926int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
 927{
 
 928	bool write = !!(op & MSM_PREP_WRITE);
 929	unsigned long remain =
 930		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
 931	long ret;
 932
 933	if (op & MSM_PREP_BOOST) {
 934		dma_resv_set_deadline(obj->resv, dma_resv_usage_rw(write),
 935				      ktime_get());
 936	}
 937
 938	ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
 939				    true,  remain);
 940	if (ret == 0)
 941		return remain == 0 ? -EBUSY : -ETIMEDOUT;
 942	else if (ret < 0)
 943		return ret;
 944
 945	/* TODO cache maintenance */
 946
 947	return 0;
 948}
 949
 950int msm_gem_cpu_fini(struct drm_gem_object *obj)
 951{
 952	/* TODO cache maintenance */
 953	return 0;
 954}
 955
 956#ifdef CONFIG_DEBUG_FS
 957void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
 958		struct msm_gem_stats *stats)
 
 
 
 
 
 
 
 
 
 959{
 960	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 961	struct dma_resv *robj = obj->resv;
 
 
 962	struct msm_gem_vma *vma;
 963	uint64_t off = drm_vma_node_start(&obj->vma_node);
 964	const char *madv;
 965
 966	msm_gem_lock(obj);
 967
 968	stats->all.count++;
 969	stats->all.size += obj->size;
 970
 971	if (msm_gem_active(obj)) {
 972		stats->active.count++;
 973		stats->active.size += obj->size;
 974	}
 975
 976	if (msm_obj->pages) {
 977		stats->resident.count++;
 978		stats->resident.size += obj->size;
 979	}
 980
 981	switch (msm_obj->madv) {
 982	case __MSM_MADV_PURGED:
 983		stats->purged.count++;
 984		stats->purged.size += obj->size;
 985		madv = " purged";
 986		break;
 987	case MSM_MADV_DONTNEED:
 988		stats->purgeable.count++;
 989		stats->purgeable.size += obj->size;
 990		madv = " purgeable";
 991		break;
 992	case MSM_MADV_WILLNEED:
 993	default:
 994		madv = "";
 995		break;
 996	}
 997
 998	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
 999			msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
1000			obj->name, kref_read(&obj->refcount),
1001			off, msm_obj->vaddr);
1002
1003	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
1004
1005	if (!list_empty(&msm_obj->vmas)) {
1006
1007		seq_puts(m, "      vmas:");
1008
1009		list_for_each_entry(vma, &msm_obj->vmas, list) {
1010			const char *name, *comm;
1011			if (vma->aspace) {
1012				struct msm_gem_address_space *aspace = vma->aspace;
1013				struct task_struct *task =
1014					get_pid_task(aspace->pid, PIDTYPE_PID);
1015				if (task) {
1016					comm = kstrdup(task->comm, GFP_KERNEL);
1017					put_task_struct(task);
1018				} else {
1019					comm = NULL;
1020				}
1021				name = aspace->name;
1022			} else {
1023				name = comm = NULL;
1024			}
1025			seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s]",
1026				name, comm ? ":" : "", comm ? comm : "",
1027				vma->aspace, vma->iova,
1028				vma->mapped ? "mapped" : "unmapped");
1029			kfree(comm);
1030		}
1031
1032		seq_puts(m, "\n");
1033	}
1034
1035	dma_resv_describe(robj, m);
1036	msm_gem_unlock(obj);
 
 
 
 
1037}
1038
1039void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
1040{
1041	struct msm_gem_stats stats = {};
1042	struct msm_gem_object *msm_obj;
 
 
1043
1044	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
1045	list_for_each_entry(msm_obj, list, node) {
1046		struct drm_gem_object *obj = &msm_obj->base;
1047		seq_puts(m, "   ");
1048		msm_gem_describe(obj, m, &stats);
 
 
1049	}
1050
1051	seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
1052			stats.all.count, stats.all.size);
1053	seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
1054			stats.active.count, stats.active.size);
1055	seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
1056			stats.resident.count, stats.resident.size);
1057	seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
1058			stats.purgeable.count, stats.purgeable.size);
1059	seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
1060			stats.purged.count, stats.purged.size);
1061}
1062#endif
1063
1064/* don't call directly!  Use drm_gem_object_put() */
1065static void msm_gem_free_object(struct drm_gem_object *obj)
1066{
1067	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1068	struct drm_device *dev = obj->dev;
1069	struct msm_drm_private *priv = dev->dev_private;
1070
1071	mutex_lock(&priv->obj_lock);
1072	list_del(&msm_obj->node);
1073	mutex_unlock(&priv->obj_lock);
1074
1075	put_iova_spaces(obj, true);
 
 
 
 
 
 
 
1076
1077	if (obj->import_attach) {
1078		GEM_WARN_ON(msm_obj->vaddr);
 
1079
1080		/* Don't drop the pages for imported dmabuf, as they are not
1081		 * ours, just free the array we allocated:
1082		 */
1083		kvfree(msm_obj->pages);
1084
1085		put_iova_vmas(obj);
1086
1087		drm_prime_gem_destroy(obj, msm_obj->sgt);
1088	} else {
1089		msm_gem_vunmap(obj);
1090		put_pages(obj);
1091		put_iova_vmas(obj);
1092	}
1093
 
 
 
1094	drm_gem_object_release(obj);
1095
1096	kfree(msm_obj->metadata);
1097	kfree(msm_obj);
1098}
1099
1100static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1101{
1102	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1103
1104	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1105	vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1106
1107	return 0;
1108}
1109
1110/* convenience method to construct a GEM buffer object, and userspace handle */
1111int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1112		uint32_t size, uint32_t flags, uint32_t *handle,
1113		char *name)
1114{
1115	struct drm_gem_object *obj;
1116	int ret;
1117
1118	obj = msm_gem_new(dev, size, flags);
1119
1120	if (IS_ERR(obj))
1121		return PTR_ERR(obj);
1122
1123	if (name)
1124		msm_gem_object_set_name(obj, "%s", name);
1125
1126	ret = drm_gem_handle_create(file, obj, handle);
1127
1128	/* drop reference from allocate - handle holds it now */
1129	drm_gem_object_put(obj);
1130
1131	return ret;
1132}
1133
1134static enum drm_gem_object_status msm_gem_status(struct drm_gem_object *obj)
1135{
1136	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1137	enum drm_gem_object_status status = 0;
1138
1139	if (msm_obj->pages)
1140		status |= DRM_GEM_OBJECT_RESIDENT;
1141
1142	if (msm_obj->madv == MSM_MADV_DONTNEED)
1143		status |= DRM_GEM_OBJECT_PURGEABLE;
1144
1145	return status;
1146}
1147
1148static const struct vm_operations_struct vm_ops = {
1149	.fault = msm_gem_fault,
1150	.open = drm_gem_vm_open,
1151	.close = drm_gem_vm_close,
1152};
1153
1154static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1155	.free = msm_gem_free_object,
1156	.open = msm_gem_open,
1157	.close = msm_gem_close,
1158	.pin = msm_gem_prime_pin,
1159	.unpin = msm_gem_prime_unpin,
1160	.get_sg_table = msm_gem_prime_get_sg_table,
1161	.vmap = msm_gem_prime_vmap,
1162	.vunmap = msm_gem_prime_vunmap,
1163	.mmap = msm_gem_object_mmap,
1164	.status = msm_gem_status,
1165	.vm_ops = &vm_ops,
1166};
1167
1168static int msm_gem_new_impl(struct drm_device *dev,
1169		uint32_t size, uint32_t flags,
1170		struct drm_gem_object **obj)
 
 
1171{
1172	struct msm_drm_private *priv = dev->dev_private;
1173	struct msm_gem_object *msm_obj;
1174
1175	switch (flags & MSM_BO_CACHE_MASK) {
 
1176	case MSM_BO_CACHED:
1177	case MSM_BO_WC:
1178		break;
1179	case MSM_BO_CACHED_COHERENT:
1180		if (priv->has_cached_coherent)
1181			break;
1182		fallthrough;
1183	default:
1184		DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1185				(flags & MSM_BO_CACHE_MASK));
1186		return -EINVAL;
1187	}
1188
1189	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1190	if (!msm_obj)
1191		return -ENOMEM;
1192
 
 
1193	msm_obj->flags = flags;
1194	msm_obj->madv = MSM_MADV_WILLNEED;
1195
1196	INIT_LIST_HEAD(&msm_obj->node);
 
 
 
 
 
 
 
1197	INIT_LIST_HEAD(&msm_obj->vmas);
1198
 
 
 
 
 
 
 
 
 
1199	*obj = &msm_obj->base;
1200	(*obj)->funcs = &msm_gem_object_funcs;
1201
1202	return 0;
1203}
1204
1205struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
 
1206{
1207	struct msm_drm_private *priv = dev->dev_private;
1208	struct msm_gem_object *msm_obj;
1209	struct drm_gem_object *obj = NULL;
1210	bool use_vram = false;
1211	int ret;
1212
1213	size = PAGE_ALIGN(size);
1214
1215	if (!msm_use_mmu(dev))
1216		use_vram = true;
1217	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1218		use_vram = true;
1219
1220	if (GEM_WARN_ON(use_vram && !priv->vram.size))
1221		return ERR_PTR(-EINVAL);
1222
1223	/* Disallow zero sized objects as they make the underlying
1224	 * infrastructure grumpy
1225	 */
1226	if (size == 0)
1227		return ERR_PTR(-EINVAL);
1228
1229	ret = msm_gem_new_impl(dev, size, flags, &obj);
1230	if (ret)
1231		return ERR_PTR(ret);
1232
1233	msm_obj = to_msm_bo(obj);
1234
1235	if (use_vram) {
1236		struct msm_gem_vma *vma;
1237		struct page **pages;
 
1238
1239		drm_gem_private_object_init(dev, obj, size);
1240
1241		msm_gem_lock(obj);
1242
1243		vma = add_vma(obj, NULL);
1244		msm_gem_unlock(obj);
1245		if (IS_ERR(vma)) {
1246			ret = PTR_ERR(vma);
1247			goto fail;
1248		}
1249
1250		to_msm_bo(obj)->vram_node = &vma->node;
1251
1252		msm_gem_lock(obj);
 
1253		pages = get_pages(obj);
1254		msm_gem_unlock(obj);
1255		if (IS_ERR(pages)) {
1256			ret = PTR_ERR(pages);
1257			goto fail;
1258		}
1259
1260		vma->iova = physaddr(obj);
1261	} else {
1262		ret = drm_gem_object_init(dev, obj, size);
1263		if (ret)
1264			goto fail;
1265		/*
1266		 * Our buffers are kept pinned, so allocating them from the
1267		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1268		 * See comments above new_inode() why this is required _and_
1269		 * expected if you're going to pin these pages.
1270		 */
1271		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1272	}
1273
1274	drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
1275
1276	mutex_lock(&priv->obj_lock);
1277	list_add_tail(&msm_obj->node, &priv->objects);
1278	mutex_unlock(&priv->obj_lock);
1279
1280	ret = drm_gem_create_mmap_offset(obj);
1281	if (ret)
1282		goto fail;
1283
1284	return obj;
1285
1286fail:
1287	drm_gem_object_put(obj);
1288	return ERR_PTR(ret);
1289}
1290
 
 
 
 
 
 
 
 
 
 
 
 
1291struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1292		struct dma_buf *dmabuf, struct sg_table *sgt)
1293{
1294	struct msm_drm_private *priv = dev->dev_private;
1295	struct msm_gem_object *msm_obj;
1296	struct drm_gem_object *obj;
1297	uint32_t size;
1298	int ret, npages;
1299
1300	/* if we don't have IOMMU, don't bother pretending we can import: */
1301	if (!msm_use_mmu(dev)) {
1302		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1303		return ERR_PTR(-EINVAL);
1304	}
1305
1306	size = PAGE_ALIGN(dmabuf->size);
1307
1308	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1309	if (ret)
1310		return ERR_PTR(ret);
1311
1312	drm_gem_private_object_init(dev, obj, size);
1313
1314	npages = size / PAGE_SIZE;
1315
1316	msm_obj = to_msm_bo(obj);
1317	msm_gem_lock(obj);
1318	msm_obj->sgt = sgt;
1319	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1320	if (!msm_obj->pages) {
1321		msm_gem_unlock(obj);
1322		ret = -ENOMEM;
1323		goto fail;
1324	}
1325
1326	ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1327	if (ret) {
1328		msm_gem_unlock(obj);
1329		goto fail;
1330	}
1331
1332	msm_gem_unlock(obj);
1333
1334	drm_gem_lru_move_tail(&priv->lru.pinned, obj);
1335
1336	mutex_lock(&priv->obj_lock);
1337	list_add_tail(&msm_obj->node, &priv->objects);
1338	mutex_unlock(&priv->obj_lock);
1339
1340	ret = drm_gem_create_mmap_offset(obj);
1341	if (ret)
1342		goto fail;
1343
1344	return obj;
1345
1346fail:
1347	drm_gem_object_put(obj);
1348	return ERR_PTR(ret);
1349}
1350
1351void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1352		uint32_t flags, struct msm_gem_address_space *aspace,
1353		struct drm_gem_object **bo, uint64_t *iova)
1354{
1355	void *vaddr;
1356	struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1357	int ret;
1358
1359	if (IS_ERR(obj))
1360		return ERR_CAST(obj);
1361
1362	if (iova) {
1363		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1364		if (ret)
1365			goto err;
 
 
1366	}
1367
1368	vaddr = msm_gem_get_vaddr(obj);
1369	if (IS_ERR(vaddr)) {
1370		msm_gem_unpin_iova(obj, aspace);
1371		ret = PTR_ERR(vaddr);
1372		goto err;
1373	}
1374
1375	if (bo)
1376		*bo = obj;
1377
1378	return vaddr;
1379err:
1380	drm_gem_object_put(obj);
1381
1382	return ERR_PTR(ret);
1383
1384}
1385
1386void msm_gem_kernel_put(struct drm_gem_object *bo,
1387		struct msm_gem_address_space *aspace)
 
1388{
1389	if (IS_ERR_OR_NULL(bo))
1390		return;
1391
1392	msm_gem_put_vaddr(bo);
1393	msm_gem_unpin_iova(bo, aspace);
1394	drm_gem_object_put(bo);
1395}
1396
1397void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
 
 
1398{
1399	struct msm_gem_object *msm_obj = to_msm_bo(bo);
1400	va_list ap;
1401
1402	if (!fmt)
1403		return;
1404
1405	va_start(ap, fmt);
1406	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1407	va_end(ap);
1408}
v4.17
 
   1/*
   2 * Copyright (C) 2013 Red Hat
   3 * Author: Rob Clark <robdclark@gmail.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published by
   7 * the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program.  If not, see <http://www.gnu.org/licenses/>.
  16 */
  17
 
 
  18#include <linux/spinlock.h>
  19#include <linux/shmem_fs.h>
  20#include <linux/dma-buf.h>
  21#include <linux/pfn_t.h>
  22
 
 
 
 
 
  23#include "msm_drv.h"
  24#include "msm_fence.h"
  25#include "msm_gem.h"
  26#include "msm_gpu.h"
  27#include "msm_mmu.h"
  28
  29static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
  30
  31
  32static dma_addr_t physaddr(struct drm_gem_object *obj)
  33{
  34	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  35	struct msm_drm_private *priv = obj->dev->dev_private;
  36	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
  37			priv->vram.paddr;
  38}
  39
  40static bool use_pages(struct drm_gem_object *obj)
  41{
  42	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  43	return !msm_obj->vram_node;
  44}
  45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  46/* allocate pages from VRAM carveout, used when no IOMMU: */
  47static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
  48{
  49	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  50	struct msm_drm_private *priv = obj->dev->dev_private;
  51	dma_addr_t paddr;
  52	struct page **p;
  53	int ret, i;
  54
  55	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  56	if (!p)
  57		return ERR_PTR(-ENOMEM);
  58
  59	spin_lock(&priv->vram.lock);
  60	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
  61	spin_unlock(&priv->vram.lock);
  62	if (ret) {
  63		kvfree(p);
  64		return ERR_PTR(ret);
  65	}
  66
  67	paddr = physaddr(obj);
  68	for (i = 0; i < npages; i++) {
  69		p[i] = phys_to_page(paddr);
  70		paddr += PAGE_SIZE;
  71	}
  72
  73	return p;
  74}
  75
  76static struct page **get_pages(struct drm_gem_object *obj)
  77{
  78	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  79
 
 
  80	if (!msm_obj->pages) {
  81		struct drm_device *dev = obj->dev;
  82		struct page **p;
  83		int npages = obj->size >> PAGE_SHIFT;
  84
  85		if (use_pages(obj))
  86			p = drm_gem_get_pages(obj);
  87		else
  88			p = get_pages_vram(obj, npages);
  89
  90		if (IS_ERR(p)) {
  91			dev_err(dev->dev, "could not get pages: %ld\n",
  92					PTR_ERR(p));
  93			return p;
  94		}
  95
 
 
  96		msm_obj->pages = p;
  97
  98		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
  99		if (IS_ERR(msm_obj->sgt)) {
 100			void *ptr = ERR_CAST(msm_obj->sgt);
 101
 102			dev_err(dev->dev, "failed to allocate sgt\n");
 103			msm_obj->sgt = NULL;
 104			return ptr;
 105		}
 106
 107		/* For non-cached buffers, ensure the new pages are clean
 108		 * because display controller, GPU, etc. are not coherent:
 109		 */
 110		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
 111			dma_map_sg(dev->dev, msm_obj->sgt->sgl,
 112					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 
 113	}
 114
 115	return msm_obj->pages;
 116}
 117
 118static void put_pages_vram(struct drm_gem_object *obj)
 119{
 120	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 121	struct msm_drm_private *priv = obj->dev->dev_private;
 122
 123	spin_lock(&priv->vram.lock);
 124	drm_mm_remove_node(msm_obj->vram_node);
 125	spin_unlock(&priv->vram.lock);
 126
 127	kvfree(msm_obj->pages);
 128}
 129
 130static void put_pages(struct drm_gem_object *obj)
 131{
 132	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 133
 134	if (msm_obj->pages) {
 135		if (msm_obj->sgt) {
 136			/* For non-cached buffers, ensure the new
 137			 * pages are clean because display controller,
 138			 * GPU, etc. are not coherent:
 139			 */
 140			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
 141				dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
 142					     msm_obj->sgt->nents,
 143					     DMA_BIDIRECTIONAL);
 144
 145			sg_free_table(msm_obj->sgt);
 146			kfree(msm_obj->sgt);
 
 147		}
 148
 
 
 149		if (use_pages(obj))
 150			drm_gem_put_pages(obj, msm_obj->pages, true, false);
 151		else
 152			put_pages_vram(obj);
 153
 154		msm_obj->pages = NULL;
 
 155	}
 156}
 157
 158struct page **msm_gem_get_pages(struct drm_gem_object *obj)
 
 159{
 160	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 161	struct page **p;
 162
 163	mutex_lock(&msm_obj->lock);
 164
 165	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
 166		mutex_unlock(&msm_obj->lock);
 
 167		return ERR_PTR(-EBUSY);
 168	}
 169
 170	p = get_pages(obj);
 171	mutex_unlock(&msm_obj->lock);
 172	return p;
 173}
 174
 175void msm_gem_put_pages(struct drm_gem_object *obj)
 
 
 
 176{
 177	/* when we start tracking the pin count, then do something here */
 
 
 
 
 
 178}
 179
 180int msm_gem_mmap_obj(struct drm_gem_object *obj,
 181		struct vm_area_struct *vma)
 182{
 183	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
 
 
 
 
 184
 185	vma->vm_flags &= ~VM_PFNMAP;
 186	vma->vm_flags |= VM_MIXEDMAP;
 
 187
 188	if (msm_obj->flags & MSM_BO_WC) {
 189		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 190	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
 191		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
 192	} else {
 193		/*
 194		 * Shunt off cached objs to shmem file so they have their own
 195		 * address_space (so unmap_mapping_range does what we want,
 196		 * in particular in the case of mmap'd dmabufs)
 197		 */
 198		fput(vma->vm_file);
 199		get_file(obj->filp);
 200		vma->vm_pgoff = 0;
 201		vma->vm_file  = obj->filp;
 202
 203		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 204	}
 
 205
 206	return 0;
 207}
 208
 209int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 210{
 211	int ret;
 212
 213	ret = drm_gem_mmap(filp, vma);
 214	if (ret) {
 215		DBG("mmap failed: %d", ret);
 216		return ret;
 217	}
 218
 219	return msm_gem_mmap_obj(vma->vm_private_data, vma);
 
 
 
 
 220}
 221
 222int msm_gem_fault(struct vm_fault *vmf)
 223{
 224	struct vm_area_struct *vma = vmf->vma;
 225	struct drm_gem_object *obj = vma->vm_private_data;
 226	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 227	struct page **pages;
 228	unsigned long pfn;
 229	pgoff_t pgoff;
 230	int ret;
 
 231
 232	/*
 233	 * vm_ops.open/drm_gem_mmap_obj and close get and put
 234	 * a reference on obj. So, we dont need to hold one here.
 235	 */
 236	ret = mutex_lock_interruptible(&msm_obj->lock);
 237	if (ret)
 
 238		goto out;
 
 239
 240	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
 241		mutex_unlock(&msm_obj->lock);
 242		return VM_FAULT_SIGBUS;
 243	}
 244
 245	/* make sure we have pages attached now */
 246	pages = get_pages(obj);
 247	if (IS_ERR(pages)) {
 248		ret = PTR_ERR(pages);
 249		goto out_unlock;
 250	}
 251
 252	/* We don't use vmf->pgoff since that has the fake offset: */
 253	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 254
 255	pfn = page_to_pfn(pages[pgoff]);
 256
 257	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 258			pfn, pfn << PAGE_SHIFT);
 259
 260	ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
 261
 262out_unlock:
 263	mutex_unlock(&msm_obj->lock);
 264out:
 265	switch (ret) {
 266	case -EAGAIN:
 267	case 0:
 268	case -ERESTARTSYS:
 269	case -EINTR:
 270	case -EBUSY:
 271		/*
 272		 * EBUSY is ok: this just means that another thread
 273		 * already did the job.
 274		 */
 275		return VM_FAULT_NOPAGE;
 276	case -ENOMEM:
 277		return VM_FAULT_OOM;
 278	default:
 279		return VM_FAULT_SIGBUS;
 280	}
 281}
 282
 283/** get mmap offset */
 284static uint64_t mmap_offset(struct drm_gem_object *obj)
 285{
 286	struct drm_device *dev = obj->dev;
 287	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 288	int ret;
 289
 290	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 291
 292	/* Make it mmapable */
 293	ret = drm_gem_create_mmap_offset(obj);
 294
 295	if (ret) {
 296		dev_err(dev->dev, "could not allocate mmap offset\n");
 297		return 0;
 298	}
 299
 300	return drm_vma_node_offset_addr(&obj->vma_node);
 301}
 302
 303uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
 304{
 305	uint64_t offset;
 306	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 307
 308	mutex_lock(&msm_obj->lock);
 309	offset = mmap_offset(obj);
 310	mutex_unlock(&msm_obj->lock);
 311	return offset;
 312}
 313
 314static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
 315		struct msm_gem_address_space *aspace)
 316{
 317	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 318	struct msm_gem_vma *vma;
 319
 320	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 321
 322	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
 323	if (!vma)
 324		return ERR_PTR(-ENOMEM);
 325
 326	vma->aspace = aspace;
 327
 328	list_add_tail(&vma->list, &msm_obj->vmas);
 329
 330	return vma;
 331}
 332
 333static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
 334		struct msm_gem_address_space *aspace)
 335{
 336	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 337	struct msm_gem_vma *vma;
 338
 339	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 340
 341	list_for_each_entry(vma, &msm_obj->vmas, list) {
 342		if (vma->aspace == aspace)
 343			return vma;
 344	}
 345
 346	return NULL;
 347}
 348
 349static void del_vma(struct msm_gem_vma *vma)
 350{
 351	if (!vma)
 352		return;
 353
 354	list_del(&vma->list);
 355	kfree(vma);
 356}
 357
 358/* Called with msm_obj->lock locked */
 
 
 
 
 
 359static void
 360put_iova(struct drm_gem_object *obj)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 361{
 362	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 363	struct msm_gem_vma *vma, *tmp;
 364
 365	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 366
 367	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
 368		msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
 369		del_vma(vma);
 370	}
 371}
 372
 373/* get iova, taking a reference.  Should have a matching put */
 374int msm_gem_get_iova(struct drm_gem_object *obj,
 375		struct msm_gem_address_space *aspace, uint64_t *iova)
 376{
 377	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 378	struct msm_gem_vma *vma;
 379	int ret = 0;
 380
 381	mutex_lock(&msm_obj->lock);
 382
 383	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
 384		mutex_unlock(&msm_obj->lock);
 385		return -EBUSY;
 386	}
 387
 388	vma = lookup_vma(obj, aspace);
 389
 390	if (!vma) {
 391		struct page **pages;
 392
 393		vma = add_vma(obj, aspace);
 394		if (IS_ERR(vma)) {
 395			ret = PTR_ERR(vma);
 396			goto unlock;
 
 
 
 
 
 397		}
 
 
 
 
 
 
 
 398
 399		pages = get_pages(obj);
 400		if (IS_ERR(pages)) {
 401			ret = PTR_ERR(pages);
 402			goto fail;
 403		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 404
 405		ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
 406				obj->size >> PAGE_SHIFT);
 407		if (ret)
 408			goto fail;
 
 
 
 
 409	}
 410
 411	*iova = vma->iova;
 
 
 
 
 
 
 
 
 
 
 
 412
 413	mutex_unlock(&msm_obj->lock);
 414	return 0;
 
 415
 416fail:
 417	del_vma(vma);
 418unlock:
 419	mutex_unlock(&msm_obj->lock);
 420	return ret;
 421}
 422
 423/* get iova without taking a reference, used in places where you have
 424 * already done a 'msm_gem_get_iova()'.
 
 
 
 
 
 
 
 
 425 */
 426uint64_t msm_gem_iova(struct drm_gem_object *obj,
 427		struct msm_gem_address_space *aspace)
 428{
 429	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 430	struct msm_gem_vma *vma;
 
 431
 432	mutex_lock(&msm_obj->lock);
 433	vma = lookup_vma(obj, aspace);
 434	mutex_unlock(&msm_obj->lock);
 435	WARN_ON(!vma);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 436
 437	return vma ? vma->iova : 0;
 438}
 439
 440void msm_gem_put_iova(struct drm_gem_object *obj,
 
 
 
 
 
 441		struct msm_gem_address_space *aspace)
 442{
 443	// XXX TODO ..
 444	// NOTE: probably don't need a _locked() version.. we wouldn't
 445	// normally unmap here, but instead just mark that it could be
 446	// unmapped (if the iova refcnt drops to zero), but then later
 447	// if another _get_iova_locked() fails we can start unmapping
 448	// things that are no longer needed..
 
 
 449}
 450
 451int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 452		struct drm_mode_create_dumb *args)
 453{
 454	args->pitch = align_pitch(args->width, args->bpp);
 455	args->size  = PAGE_ALIGN(args->pitch * args->height);
 456	return msm_gem_new_handle(dev, file, args->size,
 457			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
 458}
 459
 460int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 461		uint32_t handle, uint64_t *offset)
 462{
 463	struct drm_gem_object *obj;
 464	int ret = 0;
 465
 466	/* GEM does all our handle to object mapping */
 467	obj = drm_gem_object_lookup(file, handle);
 468	if (obj == NULL) {
 469		ret = -ENOENT;
 470		goto fail;
 471	}
 472
 473	*offset = msm_gem_mmap_offset(obj);
 474
 475	drm_gem_object_put_unlocked(obj);
 476
 477fail:
 478	return ret;
 479}
 480
 481static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
 482{
 483	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
 484	int ret = 0;
 485
 486	mutex_lock(&msm_obj->lock);
 
 
 
 
 
 
 
 487
 488	if (WARN_ON(msm_obj->madv > madv)) {
 489		dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
 490			msm_obj->madv, madv);
 491		mutex_unlock(&msm_obj->lock);
 492		return ERR_PTR(-EBUSY);
 493	}
 494
 495	/* increment vmap_count *before* vmap() call, so shrinker can
 496	 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
 497	 * This guarantees that we won't try to msm_gem_vunmap() this
 498	 * same object from within the vmap() call (while we already
 499	 * hold msm_obj->lock)
 500	 */
 501	msm_obj->vmap_count++;
 502
 503	if (!msm_obj->vaddr) {
 504		struct page **pages = get_pages(obj);
 505		if (IS_ERR(pages)) {
 506			ret = PTR_ERR(pages);
 507			goto fail;
 508		}
 509		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
 510				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
 511		if (msm_obj->vaddr == NULL) {
 512			ret = -ENOMEM;
 513			goto fail;
 514		}
 515	}
 516
 517	mutex_unlock(&msm_obj->lock);
 518	return msm_obj->vaddr;
 519
 520fail:
 521	msm_obj->vmap_count--;
 522	mutex_unlock(&msm_obj->lock);
 523	return ERR_PTR(ret);
 524}
 525
 
 
 
 
 
 526void *msm_gem_get_vaddr(struct drm_gem_object *obj)
 527{
 528	return get_vaddr(obj, MSM_MADV_WILLNEED);
 
 
 
 
 
 
 529}
 530
 531/*
 532 * Don't use this!  It is for the very special case of dumping
 533 * submits from GPU hangs or faults, were the bo may already
 534 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
 535 * active list.
 536 */
 537void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
 538{
 539	return get_vaddr(obj, __MSM_MADV_PURGED);
 540}
 541
 542void msm_gem_put_vaddr(struct drm_gem_object *obj)
 543{
 544	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 545
 546	mutex_lock(&msm_obj->lock);
 547	WARN_ON(msm_obj->vmap_count < 1);
 
 548	msm_obj->vmap_count--;
 549	mutex_unlock(&msm_obj->lock);
 
 
 
 
 
 
 
 550}
 551
 552/* Update madvise status, returns true if not purged, else
 553 * false or -errno.
 554 */
 555int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
 556{
 
 557	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 558
 559	mutex_lock(&msm_obj->lock);
 560
 561	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
 562
 563	if (msm_obj->madv != __MSM_MADV_PURGED)
 564		msm_obj->madv = madv;
 565
 566	madv = msm_obj->madv;
 567
 568	mutex_unlock(&msm_obj->lock);
 
 
 
 
 
 
 
 569
 570	return (madv != __MSM_MADV_PURGED);
 571}
 572
 573void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
 574{
 575	struct drm_device *dev = obj->dev;
 
 576	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 577
 578	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 579	WARN_ON(!is_purgeable(msm_obj));
 580	WARN_ON(obj->import_attach);
 581
 582	mutex_lock_nested(&msm_obj->lock, subclass);
 
 583
 584	put_iova(obj);
 585
 586	msm_gem_vunmap_locked(obj);
 587
 588	put_pages(obj);
 589
 
 
 
 
 590	msm_obj->madv = __MSM_MADV_PURGED;
 
 591
 592	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
 593	drm_gem_free_mmap_offset(obj);
 594
 595	/* Our goal here is to return as much of the memory as
 596	 * is possible back to the system as we are called from OOM.
 597	 * To do this we must instruct the shmfs to drop all of its
 598	 * backing pages, *now*.
 599	 */
 600	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
 601
 602	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
 603			0, (loff_t)-1);
 604
 605	mutex_unlock(&msm_obj->lock);
 606}
 607
 608static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
 
 
 
 609{
 
 610	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 611
 612	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 
 613
 614	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
 615		return;
 616
 617	vunmap(msm_obj->vaddr);
 618	msm_obj->vaddr = NULL;
 619}
 620
 621void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
 622{
 623	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 624
 625	mutex_lock_nested(&msm_obj->lock, subclass);
 626	msm_gem_vunmap_locked(obj);
 627	mutex_unlock(&msm_obj->lock);
 628}
 629
 630/* must be called before _move_to_active().. */
 631int msm_gem_sync_object(struct drm_gem_object *obj,
 632		struct msm_fence_context *fctx, bool exclusive)
 633{
 634	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 635	struct reservation_object_list *fobj;
 636	struct dma_fence *fence;
 637	int i, ret;
 638
 639	fobj = reservation_object_get_list(msm_obj->resv);
 640	if (!fobj || (fobj->shared_count == 0)) {
 641		fence = reservation_object_get_excl(msm_obj->resv);
 642		/* don't need to wait on our own fences, since ring is fifo */
 643		if (fence && (fence->context != fctx->context)) {
 644			ret = dma_fence_wait(fence, true);
 645			if (ret)
 646				return ret;
 647		}
 648	}
 649
 650	if (!exclusive || !fobj)
 651		return 0;
 652
 653	for (i = 0; i < fobj->shared_count; i++) {
 654		fence = rcu_dereference_protected(fobj->shared[i],
 655						reservation_object_held(msm_obj->resv));
 656		if (fence->context != fctx->context) {
 657			ret = dma_fence_wait(fence, true);
 658			if (ret)
 659				return ret;
 660		}
 661	}
 662
 663	return 0;
 664}
 665
 666void msm_gem_move_to_active(struct drm_gem_object *obj,
 667		struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
 668{
 669	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 670	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
 671	msm_obj->gpu = gpu;
 672	if (exclusive)
 673		reservation_object_add_excl_fence(msm_obj->resv, fence);
 674	else
 675		reservation_object_add_shared_fence(msm_obj->resv, fence);
 676	list_del_init(&msm_obj->mm_list);
 677	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
 678}
 679
 680void msm_gem_move_to_inactive(struct drm_gem_object *obj)
 681{
 682	struct drm_device *dev = obj->dev;
 683	struct msm_drm_private *priv = dev->dev_private;
 684	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 685
 686	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
 687
 688	msm_obj->gpu = NULL;
 689	list_del_init(&msm_obj->mm_list);
 690	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 691}
 692
 693int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
 694{
 695	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 696	bool write = !!(op & MSM_PREP_WRITE);
 697	unsigned long remain =
 698		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
 699	long ret;
 700
 701	ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
 702						  true,  remain);
 
 
 
 
 
 703	if (ret == 0)
 704		return remain == 0 ? -EBUSY : -ETIMEDOUT;
 705	else if (ret < 0)
 706		return ret;
 707
 708	/* TODO cache maintenance */
 709
 710	return 0;
 711}
 712
 713int msm_gem_cpu_fini(struct drm_gem_object *obj)
 714{
 715	/* TODO cache maintenance */
 716	return 0;
 717}
 718
 719#ifdef CONFIG_DEBUG_FS
 720static void describe_fence(struct dma_fence *fence, const char *type,
 721		struct seq_file *m)
 722{
 723	if (!dma_fence_is_signaled(fence))
 724		seq_printf(m, "\t%9s: %s %s seq %u\n", type,
 725				fence->ops->get_driver_name(fence),
 726				fence->ops->get_timeline_name(fence),
 727				fence->seqno);
 728}
 729
 730void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 731{
 732	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 733	struct reservation_object *robj = msm_obj->resv;
 734	struct reservation_object_list *fobj;
 735	struct dma_fence *fence;
 736	struct msm_gem_vma *vma;
 737	uint64_t off = drm_vma_node_start(&obj->vma_node);
 738	const char *madv;
 739
 740	mutex_lock(&msm_obj->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 741
 742	switch (msm_obj->madv) {
 743	case __MSM_MADV_PURGED:
 
 
 744		madv = " purged";
 745		break;
 746	case MSM_MADV_DONTNEED:
 
 
 747		madv = " purgeable";
 748		break;
 749	case MSM_MADV_WILLNEED:
 750	default:
 751		madv = "";
 752		break;
 753	}
 754
 755	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
 756			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
 757			obj->name, kref_read(&obj->refcount),
 758			off, msm_obj->vaddr);
 759
 760	/* FIXME: we need to print the address space here too */
 761	list_for_each_entry(vma, &msm_obj->vmas, list)
 762		seq_printf(m, " %08llx", vma->iova);
 763
 764	seq_printf(m, " %zu%s\n", obj->size, madv);
 765
 766	rcu_read_lock();
 767	fobj = rcu_dereference(robj->fence);
 768	if (fobj) {
 769		unsigned int i, shared_count = fobj->shared_count;
 770
 771		for (i = 0; i < shared_count; i++) {
 772			fence = rcu_dereference(fobj->shared[i]);
 773			describe_fence(fence, "Shared", m);
 
 
 
 
 
 
 
 
 
 
 
 
 
 774		}
 
 
 775	}
 776
 777	fence = rcu_dereference(robj->fence_excl);
 778	if (fence)
 779		describe_fence(fence, "Exclusive", m);
 780	rcu_read_unlock();
 781
 782	mutex_unlock(&msm_obj->lock);
 783}
 784
 785void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
 786{
 
 787	struct msm_gem_object *msm_obj;
 788	int count = 0;
 789	size_t size = 0;
 790
 791	list_for_each_entry(msm_obj, list, mm_list) {
 
 792		struct drm_gem_object *obj = &msm_obj->base;
 793		seq_printf(m, "   ");
 794		msm_gem_describe(obj, m);
 795		count++;
 796		size += obj->size;
 797	}
 798
 799	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
 
 
 
 
 
 
 
 
 
 800}
 801#endif
 802
 803/* don't call directly!  Use drm_gem_object_put() and friends */
 804void msm_gem_free_object(struct drm_gem_object *obj)
 805{
 
 806	struct drm_device *dev = obj->dev;
 807	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 808
 809	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
 
 810
 811	/* object should not be on active list: */
 812	WARN_ON(is_active(msm_obj));
 813
 814	list_del(&msm_obj->mm_list);
 815
 816	mutex_lock(&msm_obj->lock);
 817
 818	put_iova(obj);
 819
 820	if (obj->import_attach) {
 821		if (msm_obj->vaddr)
 822			dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
 823
 824		/* Don't drop the pages for imported dmabuf, as they are not
 825		 * ours, just free the array we allocated:
 826		 */
 827		if (msm_obj->pages)
 828			kvfree(msm_obj->pages);
 
 829
 830		drm_prime_gem_destroy(obj, msm_obj->sgt);
 831	} else {
 832		msm_gem_vunmap_locked(obj);
 833		put_pages(obj);
 
 834	}
 835
 836	if (msm_obj->resv == &msm_obj->_resv)
 837		reservation_object_fini(msm_obj->resv);
 838
 839	drm_gem_object_release(obj);
 840
 841	mutex_unlock(&msm_obj->lock);
 842	kfree(msm_obj);
 843}
 844
 
 
 
 
 
 
 
 
 
 
 845/* convenience method to construct a GEM buffer object, and userspace handle */
 846int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
 847		uint32_t size, uint32_t flags, uint32_t *handle)
 
 848{
 849	struct drm_gem_object *obj;
 850	int ret;
 851
 852	obj = msm_gem_new(dev, size, flags);
 853
 854	if (IS_ERR(obj))
 855		return PTR_ERR(obj);
 856
 
 
 
 857	ret = drm_gem_handle_create(file, obj, handle);
 858
 859	/* drop reference from allocate - handle holds it now */
 860	drm_gem_object_put_unlocked(obj);
 861
 862	return ret;
 863}
 864
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 865static int msm_gem_new_impl(struct drm_device *dev,
 866		uint32_t size, uint32_t flags,
 867		struct reservation_object *resv,
 868		struct drm_gem_object **obj,
 869		bool struct_mutex_locked)
 870{
 871	struct msm_drm_private *priv = dev->dev_private;
 872	struct msm_gem_object *msm_obj;
 873
 874	switch (flags & MSM_BO_CACHE_MASK) {
 875	case MSM_BO_UNCACHED:
 876	case MSM_BO_CACHED:
 877	case MSM_BO_WC:
 878		break;
 
 
 
 
 879	default:
 880		dev_err(dev->dev, "invalid cache flag: %x\n",
 881				(flags & MSM_BO_CACHE_MASK));
 882		return -EINVAL;
 883	}
 884
 885	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
 886	if (!msm_obj)
 887		return -ENOMEM;
 888
 889	mutex_init(&msm_obj->lock);
 890
 891	msm_obj->flags = flags;
 892	msm_obj->madv = MSM_MADV_WILLNEED;
 893
 894	if (resv) {
 895		msm_obj->resv = resv;
 896	} else {
 897		msm_obj->resv = &msm_obj->_resv;
 898		reservation_object_init(msm_obj->resv);
 899	}
 900
 901	INIT_LIST_HEAD(&msm_obj->submit_entry);
 902	INIT_LIST_HEAD(&msm_obj->vmas);
 903
 904	if (struct_mutex_locked) {
 905		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 906		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 907	} else {
 908		mutex_lock(&dev->struct_mutex);
 909		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 910		mutex_unlock(&dev->struct_mutex);
 911	}
 912
 913	*obj = &msm_obj->base;
 
 914
 915	return 0;
 916}
 917
 918static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
 919		uint32_t size, uint32_t flags, bool struct_mutex_locked)
 920{
 921	struct msm_drm_private *priv = dev->dev_private;
 
 922	struct drm_gem_object *obj = NULL;
 923	bool use_vram = false;
 924	int ret;
 925
 926	size = PAGE_ALIGN(size);
 927
 928	if (!iommu_present(&platform_bus_type))
 929		use_vram = true;
 930	else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
 931		use_vram = true;
 932
 933	if (WARN_ON(use_vram && !priv->vram.size))
 934		return ERR_PTR(-EINVAL);
 935
 936	/* Disallow zero sized objects as they make the underlying
 937	 * infrastructure grumpy
 938	 */
 939	if (size == 0)
 940		return ERR_PTR(-EINVAL);
 941
 942	ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
 943	if (ret)
 944		goto fail;
 
 
 945
 946	if (use_vram) {
 947		struct msm_gem_vma *vma;
 948		struct page **pages;
 949		struct msm_gem_object *msm_obj = to_msm_bo(obj);
 950
 951		mutex_lock(&msm_obj->lock);
 
 
 952
 953		vma = add_vma(obj, NULL);
 954		mutex_unlock(&msm_obj->lock);
 955		if (IS_ERR(vma)) {
 956			ret = PTR_ERR(vma);
 957			goto fail;
 958		}
 959
 960		to_msm_bo(obj)->vram_node = &vma->node;
 961
 962		drm_gem_private_object_init(dev, obj, size);
 963
 964		pages = get_pages(obj);
 
 965		if (IS_ERR(pages)) {
 966			ret = PTR_ERR(pages);
 967			goto fail;
 968		}
 969
 970		vma->iova = physaddr(obj);
 971	} else {
 972		ret = drm_gem_object_init(dev, obj, size);
 973		if (ret)
 974			goto fail;
 
 
 
 
 
 
 
 975	}
 976
 
 
 
 
 
 
 
 
 
 
 977	return obj;
 978
 979fail:
 980	drm_gem_object_put_unlocked(obj);
 981	return ERR_PTR(ret);
 982}
 983
 984struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
 985		uint32_t size, uint32_t flags)
 986{
 987	return _msm_gem_new(dev, size, flags, true);
 988}
 989
 990struct drm_gem_object *msm_gem_new(struct drm_device *dev,
 991		uint32_t size, uint32_t flags)
 992{
 993	return _msm_gem_new(dev, size, flags, false);
 994}
 995
 996struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 997		struct dma_buf *dmabuf, struct sg_table *sgt)
 998{
 
 999	struct msm_gem_object *msm_obj;
1000	struct drm_gem_object *obj;
1001	uint32_t size;
1002	int ret, npages;
1003
1004	/* if we don't have IOMMU, don't bother pretending we can import: */
1005	if (!iommu_present(&platform_bus_type)) {
1006		dev_err(dev->dev, "cannot import without IOMMU\n");
1007		return ERR_PTR(-EINVAL);
1008	}
1009
1010	size = PAGE_ALIGN(dmabuf->size);
1011
1012	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
1013	if (ret)
1014		goto fail;
1015
1016	drm_gem_private_object_init(dev, obj, size);
1017
1018	npages = size / PAGE_SIZE;
1019
1020	msm_obj = to_msm_bo(obj);
1021	mutex_lock(&msm_obj->lock);
1022	msm_obj->sgt = sgt;
1023	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1024	if (!msm_obj->pages) {
1025		mutex_unlock(&msm_obj->lock);
1026		ret = -ENOMEM;
1027		goto fail;
1028	}
1029
1030	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1031	if (ret) {
1032		mutex_unlock(&msm_obj->lock);
1033		goto fail;
1034	}
1035
1036	mutex_unlock(&msm_obj->lock);
 
 
 
 
 
 
 
 
 
 
 
1037	return obj;
1038
1039fail:
1040	drm_gem_object_put_unlocked(obj);
1041	return ERR_PTR(ret);
1042}
1043
1044static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1045		uint32_t flags, struct msm_gem_address_space *aspace,
1046		struct drm_gem_object **bo, uint64_t *iova, bool locked)
1047{
1048	void *vaddr;
1049	struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1050	int ret;
1051
1052	if (IS_ERR(obj))
1053		return ERR_CAST(obj);
1054
1055	if (iova) {
1056		ret = msm_gem_get_iova(obj, aspace, iova);
1057		if (ret) {
1058			drm_gem_object_put(obj);
1059			return ERR_PTR(ret);
1060		}
1061	}
1062
1063	vaddr = msm_gem_get_vaddr(obj);
1064	if (IS_ERR(vaddr)) {
1065		msm_gem_put_iova(obj, aspace);
1066		drm_gem_object_put(obj);
1067		return ERR_CAST(vaddr);
1068	}
1069
1070	if (bo)
1071		*bo = obj;
1072
1073	return vaddr;
 
 
 
 
 
1074}
1075
1076void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1077		uint32_t flags, struct msm_gem_address_space *aspace,
1078		struct drm_gem_object **bo, uint64_t *iova)
1079{
1080	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
 
 
 
 
 
1081}
1082
1083void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1084		uint32_t flags, struct msm_gem_address_space *aspace,
1085		struct drm_gem_object **bo, uint64_t *iova)
1086{
1087	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
 
 
 
 
 
 
 
 
1088}