Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2013 Red Hat
   4 * Author: Rob Clark <robdclark@gmail.com>
   5 */
   6
   7#include <linux/dma-map-ops.h>
   8#include <linux/vmalloc.h>
   9#include <linux/spinlock.h>
  10#include <linux/shmem_fs.h>
  11#include <linux/dma-buf.h>
  12#include <linux/pfn_t.h>
  13
  14#include <drm/drm_prime.h>
  15#include <drm/drm_file.h>
  16
  17#include <trace/events/gpu_mem.h>
  18
  19#include "msm_drv.h"
  20#include "msm_fence.h"
  21#include "msm_gem.h"
  22#include "msm_gpu.h"
  23#include "msm_mmu.h"
  24
 
 
 
  25static dma_addr_t physaddr(struct drm_gem_object *obj)
  26{
  27	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  28	struct msm_drm_private *priv = obj->dev->dev_private;
  29	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
  30			priv->vram.paddr;
  31}
  32
  33static bool use_pages(struct drm_gem_object *obj)
  34{
  35	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  36	return !msm_obj->vram_node;
  37}
  38
  39static void update_device_mem(struct msm_drm_private *priv, ssize_t size)
  40{
  41	uint64_t total_mem = atomic64_add_return(size, &priv->total_mem);
  42	trace_gpu_mem_total(0, 0, total_mem);
  43}
  44
  45static void update_ctx_mem(struct drm_file *file, ssize_t size)
  46{
  47	struct msm_file_private *ctx = file->driver_priv;
  48	uint64_t ctx_mem = atomic64_add_return(size, &ctx->ctx_mem);
  49
  50	rcu_read_lock(); /* Locks file->pid! */
  51	trace_gpu_mem_total(0, pid_nr(rcu_dereference(file->pid)), ctx_mem);
  52	rcu_read_unlock();
  53
  54}
  55
  56static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file)
  57{
  58	update_ctx_mem(file, obj->size);
  59	return 0;
  60}
  61
  62static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file)
  63{
  64	update_ctx_mem(file, -obj->size);
  65}
  66
  67/*
  68 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
  69 * API.  Really GPU cache is out of scope here (handled on cmdstream)
  70 * and all we need to do is invalidate newly allocated pages before
  71 * mapping to CPU as uncached/writecombine.
  72 *
  73 * On top of this, we have the added headache, that depending on
  74 * display generation, the display's iommu may be wired up to either
  75 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
  76 * that here we either have dma-direct or iommu ops.
  77 *
  78 * Let this be a cautionary tail of abstraction gone wrong.
  79 */
  80
  81static void sync_for_device(struct msm_gem_object *msm_obj)
  82{
  83	struct device *dev = msm_obj->base.dev->dev;
  84
  85	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
  86}
  87
  88static void sync_for_cpu(struct msm_gem_object *msm_obj)
  89{
  90	struct device *dev = msm_obj->base.dev->dev;
  91
  92	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
  93}
  94
  95static void update_lru_active(struct drm_gem_object *obj)
  96{
  97	struct msm_drm_private *priv = obj->dev->dev_private;
  98	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  99
 100	GEM_WARN_ON(!msm_obj->pages);
 101
 102	if (msm_obj->pin_count) {
 103		drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
 104	} else if (msm_obj->madv == MSM_MADV_WILLNEED) {
 105		drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj);
 106	} else {
 107		GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
 108
 109		drm_gem_lru_move_tail_locked(&priv->lru.dontneed, obj);
 110	}
 111}
 112
 113static void update_lru_locked(struct drm_gem_object *obj)
 114{
 115	struct msm_drm_private *priv = obj->dev->dev_private;
 116	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 117
 118	msm_gem_assert_locked(&msm_obj->base);
 119
 120	if (!msm_obj->pages) {
 121		GEM_WARN_ON(msm_obj->pin_count);
 122
 123		drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj);
 
 
 124	} else {
 125		update_lru_active(obj);
 
 126	}
 127}
 128
 129static void update_lru(struct drm_gem_object *obj)
 130{
 131	struct msm_drm_private *priv = obj->dev->dev_private;
 132
 133	mutex_lock(&priv->lru.lock);
 134	update_lru_locked(obj);
 135	mutex_unlock(&priv->lru.lock);
 136}
 137
 138/* allocate pages from VRAM carveout, used when no IOMMU: */
 139static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
 140{
 141	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 142	struct msm_drm_private *priv = obj->dev->dev_private;
 143	dma_addr_t paddr;
 144	struct page **p;
 145	int ret, i;
 146
 147	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
 148	if (!p)
 149		return ERR_PTR(-ENOMEM);
 150
 151	spin_lock(&priv->vram.lock);
 152	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
 153	spin_unlock(&priv->vram.lock);
 154	if (ret) {
 155		kvfree(p);
 156		return ERR_PTR(ret);
 157	}
 158
 159	paddr = physaddr(obj);
 160	for (i = 0; i < npages; i++) {
 161		p[i] = pfn_to_page(__phys_to_pfn(paddr));
 162		paddr += PAGE_SIZE;
 163	}
 164
 165	return p;
 166}
 167
 168static struct page **get_pages(struct drm_gem_object *obj)
 169{
 170	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 171
 172	msm_gem_assert_locked(obj);
 173
 174	if (!msm_obj->pages) {
 175		struct drm_device *dev = obj->dev;
 176		struct page **p;
 177		int npages = obj->size >> PAGE_SHIFT;
 178
 179		if (use_pages(obj))
 180			p = drm_gem_get_pages(obj);
 181		else
 182			p = get_pages_vram(obj, npages);
 183
 184		if (IS_ERR(p)) {
 185			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
 186					PTR_ERR(p));
 187			return p;
 188		}
 189
 190		update_device_mem(dev->dev_private, obj->size);
 191
 192		msm_obj->pages = p;
 193
 194		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
 195		if (IS_ERR(msm_obj->sgt)) {
 196			void *ptr = ERR_CAST(msm_obj->sgt);
 197
 198			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
 199			msm_obj->sgt = NULL;
 200			return ptr;
 201		}
 202
 203		/* For non-cached buffers, ensure the new pages are clean
 204		 * because display controller, GPU, etc. are not coherent:
 205		 */
 206		if (msm_obj->flags & MSM_BO_WC)
 207			sync_for_device(msm_obj);
 208
 209		update_lru(obj);
 210	}
 211
 212	return msm_obj->pages;
 213}
 214
 215static void put_pages_vram(struct drm_gem_object *obj)
 216{
 217	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 218	struct msm_drm_private *priv = obj->dev->dev_private;
 219
 220	spin_lock(&priv->vram.lock);
 221	drm_mm_remove_node(msm_obj->vram_node);
 222	spin_unlock(&priv->vram.lock);
 223
 224	kvfree(msm_obj->pages);
 225}
 226
 227static void put_pages(struct drm_gem_object *obj)
 228{
 229	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 230
 231	if (msm_obj->pages) {
 232		if (msm_obj->sgt) {
 233			/* For non-cached buffers, ensure the new
 234			 * pages are clean because display controller,
 235			 * GPU, etc. are not coherent:
 236			 */
 237			if (msm_obj->flags & MSM_BO_WC)
 238				sync_for_cpu(msm_obj);
 239
 240			sg_free_table(msm_obj->sgt);
 241			kfree(msm_obj->sgt);
 242			msm_obj->sgt = NULL;
 243		}
 244
 245		update_device_mem(obj->dev->dev_private, -obj->size);
 246
 247		if (use_pages(obj))
 248			drm_gem_put_pages(obj, msm_obj->pages, true, false);
 249		else
 250			put_pages_vram(obj);
 251
 252		msm_obj->pages = NULL;
 253		update_lru(obj);
 254	}
 255}
 256
 257static struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj,
 258					      unsigned madv)
 259{
 260	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
 261
 262	msm_gem_assert_locked(obj);
 263
 264	if (msm_obj->madv > madv) {
 265		DRM_DEV_DEBUG_DRIVER(obj->dev->dev, "Invalid madv state: %u vs %u\n",
 266				     msm_obj->madv, madv);
 267		return ERR_PTR(-EBUSY);
 268	}
 269
 270	return get_pages(obj);
 
 
 271}
 272
 273/*
 274 * Update the pin count of the object, call under lru.lock
 275 */
 276void msm_gem_pin_obj_locked(struct drm_gem_object *obj)
 277{
 278	struct msm_drm_private *priv = obj->dev->dev_private;
 279
 280	msm_gem_assert_locked(obj);
 281
 282	to_msm_bo(obj)->pin_count++;
 283	drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
 284}
 285
 286static void pin_obj_locked(struct drm_gem_object *obj)
 
 287{
 288	struct msm_drm_private *priv = obj->dev->dev_private;
 289
 290	mutex_lock(&priv->lru.lock);
 291	msm_gem_pin_obj_locked(obj);
 292	mutex_unlock(&priv->lru.lock);
 293}
 294
 295struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
 296{
 297	struct page **p;
 298
 299	msm_gem_assert_locked(obj);
 
 
 
 
 
 
 
 
 
 300
 301	p = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
 302	if (!IS_ERR(p))
 303		pin_obj_locked(obj);
 304
 305	return p;
 306}
 307
 308void msm_gem_unpin_pages_locked(struct drm_gem_object *obj)
 309{
 310	msm_gem_assert_locked(obj);
 311
 312	msm_gem_unpin_locked(obj);
 313}
 
 
 
 314
 315static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
 316{
 317	if (msm_obj->flags & MSM_BO_WC)
 318		return pgprot_writecombine(prot);
 319	return prot;
 320}
 321
 322static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
 323{
 324	struct vm_area_struct *vma = vmf->vma;
 325	struct drm_gem_object *obj = vma->vm_private_data;
 326	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 327	struct page **pages;
 328	unsigned long pfn;
 329	pgoff_t pgoff;
 330	int err;
 331	vm_fault_t ret;
 332
 333	/*
 334	 * vm_ops.open/drm_gem_mmap_obj and close get and put
 335	 * a reference on obj. So, we dont need to hold one here.
 336	 */
 337	err = msm_gem_lock_interruptible(obj);
 338	if (err) {
 339		ret = VM_FAULT_NOPAGE;
 340		goto out;
 341	}
 342
 343	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
 344		msm_gem_unlock(obj);
 345		return VM_FAULT_SIGBUS;
 346	}
 347
 348	/* make sure we have pages attached now */
 349	pages = get_pages(obj);
 350	if (IS_ERR(pages)) {
 351		ret = vmf_error(PTR_ERR(pages));
 352		goto out_unlock;
 353	}
 354
 355	/* We don't use vmf->pgoff since that has the fake offset: */
 356	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 357
 358	pfn = page_to_pfn(pages[pgoff]);
 359
 360	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 361			pfn, pfn << PAGE_SHIFT);
 362
 363	ret = vmf_insert_pfn(vma, vmf->address, pfn);
 364
 365out_unlock:
 366	msm_gem_unlock(obj);
 367out:
 368	return ret;
 369}
 370
 371/** get mmap offset */
 372static uint64_t mmap_offset(struct drm_gem_object *obj)
 373{
 374	struct drm_device *dev = obj->dev;
 
 375	int ret;
 376
 377	msm_gem_assert_locked(obj);
 378
 379	/* Make it mmapable */
 380	ret = drm_gem_create_mmap_offset(obj);
 381
 382	if (ret) {
 383		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
 384		return 0;
 385	}
 386
 387	return drm_vma_node_offset_addr(&obj->vma_node);
 388}
 389
 390uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
 391{
 392	uint64_t offset;
 
 393
 394	msm_gem_lock(obj);
 395	offset = mmap_offset(obj);
 396	msm_gem_unlock(obj);
 397	return offset;
 398}
 399
 400static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
 401		struct msm_gem_address_space *aspace)
 402{
 403	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 404	struct msm_gem_vma *vma;
 405
 406	msm_gem_assert_locked(obj);
 407
 408	vma = msm_gem_vma_new(aspace);
 409	if (!vma)
 410		return ERR_PTR(-ENOMEM);
 411
 
 
 412	list_add_tail(&vma->list, &msm_obj->vmas);
 413
 414	return vma;
 415}
 416
 417static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
 418		struct msm_gem_address_space *aspace)
 419{
 420	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 421	struct msm_gem_vma *vma;
 422
 423	msm_gem_assert_locked(obj);
 424
 425	list_for_each_entry(vma, &msm_obj->vmas, list) {
 426		if (vma->aspace == aspace)
 427			return vma;
 428	}
 429
 430	return NULL;
 431}
 432
 433static void del_vma(struct msm_gem_vma *vma)
 434{
 435	if (!vma)
 436		return;
 437
 438	list_del(&vma->list);
 439	kfree(vma);
 440}
 441
 442/*
 443 * If close is true, this also closes the VMA (releasing the allocated
 444 * iova range) in addition to removing the iommu mapping.  In the eviction
 445 * case (!close), we keep the iova allocated, but only remove the iommu
 446 * mapping.
 447 */
 448static void
 449put_iova_spaces(struct drm_gem_object *obj, bool close)
 450{
 451	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 452	struct msm_gem_vma *vma;
 453
 454	msm_gem_assert_locked(obj);
 455
 456	list_for_each_entry(vma, &msm_obj->vmas, list) {
 457		if (vma->aspace) {
 458			msm_gem_vma_purge(vma);
 459			if (close)
 460				msm_gem_vma_close(vma);
 461		}
 462	}
 463}
 464
 465/* Called with msm_obj locked */
 466static void
 467put_iova_vmas(struct drm_gem_object *obj)
 468{
 469	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 470	struct msm_gem_vma *vma, *tmp;
 471
 472	msm_gem_assert_locked(obj);
 473
 474	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
 
 
 
 
 475		del_vma(vma);
 476	}
 477}
 478
 479static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
 480		struct msm_gem_address_space *aspace,
 481		u64 range_start, u64 range_end)
 482{
 
 483	struct msm_gem_vma *vma;
 
 484
 485	msm_gem_assert_locked(obj);
 486
 487	vma = lookup_vma(obj, aspace);
 488
 489	if (!vma) {
 490		int ret;
 491
 492		vma = add_vma(obj, aspace);
 493		if (IS_ERR(vma))
 494			return vma;
 495
 496		ret = msm_gem_vma_init(vma, obj->size,
 497			range_start, range_end);
 498		if (ret) {
 499			del_vma(vma);
 500			return ERR_PTR(ret);
 501		}
 502	} else {
 503		GEM_WARN_ON(vma->iova < range_start);
 504		GEM_WARN_ON((vma->iova + obj->size) > range_end);
 505	}
 506
 507	return vma;
 
 508}
 509
 510int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
 
 511{
 512	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
 513	struct page **pages;
 514	int prot = IOMMU_READ;
 515
 516	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
 517		prot |= IOMMU_WRITE;
 518
 519	if (msm_obj->flags & MSM_BO_MAP_PRIV)
 520		prot |= IOMMU_PRIV;
 521
 522	if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
 523		prot |= IOMMU_CACHE;
 524
 525	msm_gem_assert_locked(obj);
 
 
 526
 527	pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
 528	if (IS_ERR(pages))
 529		return PTR_ERR(pages);
 530
 531	return msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
 532}
 533
 534void msm_gem_unpin_locked(struct drm_gem_object *obj)
 535{
 536	struct msm_drm_private *priv = obj->dev->dev_private;
 537	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 538
 539	msm_gem_assert_locked(obj);
 540
 541	mutex_lock(&priv->lru.lock);
 542	msm_obj->pin_count--;
 543	GEM_WARN_ON(msm_obj->pin_count < 0);
 544	update_lru_locked(obj);
 545	mutex_unlock(&priv->lru.lock);
 546}
 547
 548/* Special unpin path for use in fence-signaling path, avoiding the need
 549 * to hold the obj lock by only depending on things that a protected by
 550 * the LRU lock.  In particular we know that that we already have backing
 551 * and and that the object's dma_resv has the fence for the current
 552 * submit/job which will prevent us racing against page eviction.
 553 */
 554void msm_gem_unpin_active(struct drm_gem_object *obj)
 555{
 556	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 557
 558	msm_obj->pin_count--;
 559	GEM_WARN_ON(msm_obj->pin_count < 0);
 560	update_lru_active(obj);
 561}
 562
 563struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
 564					   struct msm_gem_address_space *aspace)
 565{
 566	return get_vma_locked(obj, aspace, 0, U64_MAX);
 567}
 568
 569static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
 570		struct msm_gem_address_space *aspace, uint64_t *iova,
 571		u64 range_start, u64 range_end)
 572{
 573	struct msm_gem_vma *vma;
 574	int ret;
 575
 576	msm_gem_assert_locked(obj);
 577
 578	vma = get_vma_locked(obj, aspace, range_start, range_end);
 579	if (IS_ERR(vma))
 580		return PTR_ERR(vma);
 581
 582	ret = msm_gem_pin_vma_locked(obj, vma);
 583	if (!ret) {
 584		*iova = vma->iova;
 585		pin_obj_locked(obj);
 586	}
 587
 588	return ret;
 589}
 590
 591/*
 592 * get iova and pin it. Should have a matching put
 593 * limits iova to specified range (in pages)
 594 */
 595int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
 596		struct msm_gem_address_space *aspace, uint64_t *iova,
 597		u64 range_start, u64 range_end)
 598{
 599	int ret;
 600
 601	msm_gem_lock(obj);
 602	ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
 603	msm_gem_unlock(obj);
 604
 
 605	return ret;
 606}
 607
 608/* get iova and pin it. Should have a matching put */
 609int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
 610		struct msm_gem_address_space *aspace, uint64_t *iova)
 611{
 612	return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
 613}
 614
 615/*
 616 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
 617 * valid for the life of the object
 618 */
 619int msm_gem_get_iova(struct drm_gem_object *obj,
 620		struct msm_gem_address_space *aspace, uint64_t *iova)
 621{
 622	struct msm_gem_vma *vma;
 623	int ret = 0;
 624
 625	msm_gem_lock(obj);
 626	vma = get_vma_locked(obj, aspace, 0, U64_MAX);
 627	if (IS_ERR(vma)) {
 628		ret = PTR_ERR(vma);
 629	} else {
 630		*iova = vma->iova;
 631	}
 632	msm_gem_unlock(obj);
 633
 634	return ret;
 635}
 636
 637static int clear_iova(struct drm_gem_object *obj,
 638		      struct msm_gem_address_space *aspace)
 639{
 640	struct msm_gem_vma *vma = lookup_vma(obj, aspace);
 641
 642	if (!vma)
 643		return 0;
 644
 645	msm_gem_vma_purge(vma);
 646	msm_gem_vma_close(vma);
 647	del_vma(vma);
 648
 649	return 0;
 650}
 651
 652/*
 653 * Get the requested iova but don't pin it.  Fails if the requested iova is
 654 * not available.  Doesn't need a put because iovas are currently valid for
 655 * the life of the object.
 656 *
 657 * Setting an iova of zero will clear the vma.
 658 */
 659int msm_gem_set_iova(struct drm_gem_object *obj,
 660		     struct msm_gem_address_space *aspace, uint64_t iova)
 661{
 662	int ret = 0;
 
 663
 664	msm_gem_lock(obj);
 665	if (!iova) {
 666		ret = clear_iova(obj, aspace);
 667	} else {
 668		struct msm_gem_vma *vma;
 669		vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
 670		if (IS_ERR(vma)) {
 671			ret = PTR_ERR(vma);
 672		} else if (GEM_WARN_ON(vma->iova != iova)) {
 673			clear_iova(obj, aspace);
 674			ret = -EBUSY;
 675		}
 676	}
 677	msm_gem_unlock(obj);
 678
 679	return ret;
 680}
 681
 682/*
 683 * Unpin a iova by updating the reference counts. The memory isn't actually
 684 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
 685 * to get rid of it
 686 */
 687void msm_gem_unpin_iova(struct drm_gem_object *obj,
 688		struct msm_gem_address_space *aspace)
 689{
 
 690	struct msm_gem_vma *vma;
 691
 692	msm_gem_lock(obj);
 693	vma = lookup_vma(obj, aspace);
 694	if (!GEM_WARN_ON(!vma)) {
 695		msm_gem_unpin_locked(obj);
 696	}
 697	msm_gem_unlock(obj);
 
 698}
 699
 700int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 701		struct drm_mode_create_dumb *args)
 702{
 703	args->pitch = align_pitch(args->width, args->bpp);
 704	args->size  = PAGE_ALIGN(args->pitch * args->height);
 705	return msm_gem_new_handle(dev, file, args->size,
 706			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
 707}
 708
 709int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 710		uint32_t handle, uint64_t *offset)
 711{
 712	struct drm_gem_object *obj;
 713	int ret = 0;
 714
 715	/* GEM does all our handle to object mapping */
 716	obj = drm_gem_object_lookup(file, handle);
 717	if (obj == NULL) {
 718		ret = -ENOENT;
 719		goto fail;
 720	}
 721
 722	*offset = msm_gem_mmap_offset(obj);
 723
 724	drm_gem_object_put(obj);
 725
 726fail:
 727	return ret;
 728}
 729
 730static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
 731{
 732	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 733	struct page **pages;
 734	int ret = 0;
 735
 736	msm_gem_assert_locked(obj);
 737
 738	if (obj->import_attach)
 739		return ERR_PTR(-ENODEV);
 740
 741	pages = msm_gem_get_pages_locked(obj, madv);
 742	if (IS_ERR(pages))
 743		return ERR_CAST(pages);
 744
 745	pin_obj_locked(obj);
 
 746
 747	/* increment vmap_count *before* vmap() call, so shrinker can
 748	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
 749	 * This guarantees that we won't try to msm_gem_vunmap() this
 750	 * same object from within the vmap() call (while we already
 751	 * hold msm_obj lock)
 752	 */
 753	msm_obj->vmap_count++;
 754
 755	if (!msm_obj->vaddr) {
 
 
 
 
 
 756		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
 757				VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
 758		if (msm_obj->vaddr == NULL) {
 759			ret = -ENOMEM;
 760			goto fail;
 761		}
 762	}
 763
 
 764	return msm_obj->vaddr;
 765
 766fail:
 767	msm_obj->vmap_count--;
 768	msm_gem_unpin_locked(obj);
 769	return ERR_PTR(ret);
 770}
 771
 772void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
 773{
 774	return get_vaddr(obj, MSM_MADV_WILLNEED);
 775}
 776
 777void *msm_gem_get_vaddr(struct drm_gem_object *obj)
 778{
 779	void *ret;
 780
 781	msm_gem_lock(obj);
 782	ret = msm_gem_get_vaddr_locked(obj);
 783	msm_gem_unlock(obj);
 784
 785	return ret;
 786}
 787
 788/*
 789 * Don't use this!  It is for the very special case of dumping
 790 * submits from GPU hangs or faults, were the bo may already
 791 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
 792 * active list.
 793 */
 794void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
 795{
 796	return get_vaddr(obj, __MSM_MADV_PURGED);
 797}
 798
 799void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
 800{
 801	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 802
 803	msm_gem_assert_locked(obj);
 804	GEM_WARN_ON(msm_obj->vmap_count < 1);
 805
 806	msm_obj->vmap_count--;
 807	msm_gem_unpin_locked(obj);
 808}
 809
 810void msm_gem_put_vaddr(struct drm_gem_object *obj)
 811{
 812	msm_gem_lock(obj);
 813	msm_gem_put_vaddr_locked(obj);
 814	msm_gem_unlock(obj);
 815}
 816
 817/* Update madvise status, returns true if not purged, else
 818 * false or -errno.
 819 */
 820int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
 821{
 822	struct msm_drm_private *priv = obj->dev->dev_private;
 823	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 824
 825	msm_gem_lock(obj);
 826
 827	mutex_lock(&priv->lru.lock);
 828
 829	if (msm_obj->madv != __MSM_MADV_PURGED)
 830		msm_obj->madv = madv;
 831
 832	madv = msm_obj->madv;
 833
 834	/* If the obj is inactive, we might need to move it
 835	 * between inactive lists
 836	 */
 837	update_lru_locked(obj);
 838
 839	mutex_unlock(&priv->lru.lock);
 840
 841	msm_gem_unlock(obj);
 842
 843	return (madv != __MSM_MADV_PURGED);
 844}
 845
 846void msm_gem_purge(struct drm_gem_object *obj)
 847{
 848	struct drm_device *dev = obj->dev;
 849	struct msm_drm_private *priv = obj->dev->dev_private;
 850	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 851
 852	msm_gem_assert_locked(obj);
 853	GEM_WARN_ON(!is_purgeable(msm_obj));
 
 854
 855	/* Get rid of any iommu mapping(s): */
 856	put_iova_spaces(obj, true);
 857
 858	msm_gem_vunmap(obj);
 859
 860	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
 861
 862	put_pages(obj);
 863
 864	put_iova_vmas(obj);
 865
 866	mutex_lock(&priv->lru.lock);
 867	/* A one-way transition: */
 868	msm_obj->madv = __MSM_MADV_PURGED;
 869	mutex_unlock(&priv->lru.lock);
 870
 
 871	drm_gem_free_mmap_offset(obj);
 872
 873	/* Our goal here is to return as much of the memory as
 874	 * is possible back to the system as we are called from OOM.
 875	 * To do this we must instruct the shmfs to drop all of its
 876	 * backing pages, *now*.
 877	 */
 878	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
 879
 880	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
 881			0, (loff_t)-1);
 
 
 882}
 883
 884/*
 885 * Unpin the backing pages and make them available to be swapped out.
 886 */
 887void msm_gem_evict(struct drm_gem_object *obj)
 888{
 889	struct drm_device *dev = obj->dev;
 890	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 891
 892	msm_gem_assert_locked(obj);
 893	GEM_WARN_ON(is_unevictable(msm_obj));
 894
 895	/* Get rid of any iommu mapping(s): */
 896	put_iova_spaces(obj, false);
 897
 898	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
 899
 900	put_pages(obj);
 
 901}
 902
 903void msm_gem_vunmap(struct drm_gem_object *obj)
 904{
 905	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 906
 907	msm_gem_assert_locked(obj);
 
 
 
 908
 909	if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
 910		return;
 
 
 
 
 
 911
 912	vunmap(msm_obj->vaddr);
 913	msm_obj->vaddr = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 914}
 915
 916bool msm_gem_active(struct drm_gem_object *obj)
 
 917{
 918	msm_gem_assert_locked(obj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 919
 920	if (to_msm_bo(obj)->pin_count)
 921		return true;
 922
 923	return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
 
 
 924}
 925
 926int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
 927{
 928	bool write = !!(op & MSM_PREP_WRITE);
 929	unsigned long remain =
 930		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
 931	long ret;
 932
 933	if (op & MSM_PREP_BOOST) {
 934		dma_resv_set_deadline(obj->resv, dma_resv_usage_rw(write),
 935				      ktime_get());
 936	}
 937
 938	ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
 939				    true,  remain);
 940	if (ret == 0)
 941		return remain == 0 ? -EBUSY : -ETIMEDOUT;
 942	else if (ret < 0)
 943		return ret;
 944
 945	/* TODO cache maintenance */
 946
 947	return 0;
 948}
 949
 950int msm_gem_cpu_fini(struct drm_gem_object *obj)
 951{
 952	/* TODO cache maintenance */
 953	return 0;
 954}
 955
 956#ifdef CONFIG_DEBUG_FS
 957void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
 958		struct msm_gem_stats *stats)
 
 
 
 
 
 
 
 
 
 959{
 960	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 961	struct dma_resv *robj = obj->resv;
 
 
 962	struct msm_gem_vma *vma;
 963	uint64_t off = drm_vma_node_start(&obj->vma_node);
 964	const char *madv;
 965
 966	msm_gem_lock(obj);
 967
 968	stats->all.count++;
 969	stats->all.size += obj->size;
 970
 971	if (msm_gem_active(obj)) {
 972		stats->active.count++;
 973		stats->active.size += obj->size;
 974	}
 975
 976	if (msm_obj->pages) {
 977		stats->resident.count++;
 978		stats->resident.size += obj->size;
 979	}
 980
 981	switch (msm_obj->madv) {
 982	case __MSM_MADV_PURGED:
 983		stats->purged.count++;
 984		stats->purged.size += obj->size;
 985		madv = " purged";
 986		break;
 987	case MSM_MADV_DONTNEED:
 988		stats->purgeable.count++;
 989		stats->purgeable.size += obj->size;
 990		madv = " purgeable";
 991		break;
 992	case MSM_MADV_WILLNEED:
 993	default:
 994		madv = "";
 995		break;
 996	}
 997
 998	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
 999			msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
1000			obj->name, kref_read(&obj->refcount),
1001			off, msm_obj->vaddr);
1002
1003	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
1004
1005	if (!list_empty(&msm_obj->vmas)) {
1006
1007		seq_puts(m, "      vmas:");
1008
1009		list_for_each_entry(vma, &msm_obj->vmas, list) {
1010			const char *name, *comm;
1011			if (vma->aspace) {
1012				struct msm_gem_address_space *aspace = vma->aspace;
1013				struct task_struct *task =
1014					get_pid_task(aspace->pid, PIDTYPE_PID);
1015				if (task) {
1016					comm = kstrdup(task->comm, GFP_KERNEL);
1017					put_task_struct(task);
1018				} else {
1019					comm = NULL;
1020				}
1021				name = aspace->name;
1022			} else {
1023				name = comm = NULL;
1024			}
1025			seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s]",
1026				name, comm ? ":" : "", comm ? comm : "",
1027				vma->aspace, vma->iova,
1028				vma->mapped ? "mapped" : "unmapped");
1029			kfree(comm);
1030		}
1031
1032		seq_puts(m, "\n");
1033	}
1034
1035	dma_resv_describe(robj, m);
1036	msm_gem_unlock(obj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1037}
1038
1039void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
1040{
1041	struct msm_gem_stats stats = {};
1042	struct msm_gem_object *msm_obj;
 
 
1043
1044	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
1045	list_for_each_entry(msm_obj, list, node) {
1046		struct drm_gem_object *obj = &msm_obj->base;
1047		seq_puts(m, "   ");
1048		msm_gem_describe(obj, m, &stats);
 
 
1049	}
1050
1051	seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
1052			stats.all.count, stats.all.size);
1053	seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
1054			stats.active.count, stats.active.size);
1055	seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
1056			stats.resident.count, stats.resident.size);
1057	seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
1058			stats.purgeable.count, stats.purgeable.size);
1059	seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
1060			stats.purged.count, stats.purged.size);
1061}
1062#endif
1063
1064/* don't call directly!  Use drm_gem_object_put() */
1065static void msm_gem_free_object(struct drm_gem_object *obj)
1066{
1067	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1068	struct drm_device *dev = obj->dev;
1069	struct msm_drm_private *priv = dev->dev_private;
1070
1071	mutex_lock(&priv->obj_lock);
1072	list_del(&msm_obj->node);
1073	mutex_unlock(&priv->obj_lock);
1074
1075	put_iova_spaces(obj, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1076
1077	if (obj->import_attach) {
1078		GEM_WARN_ON(msm_obj->vaddr);
 
1079
1080		/* Don't drop the pages for imported dmabuf, as they are not
1081		 * ours, just free the array we allocated:
1082		 */
1083		kvfree(msm_obj->pages);
1084
1085		put_iova_vmas(obj);
1086
1087		drm_prime_gem_destroy(obj, msm_obj->sgt);
1088	} else {
1089		msm_gem_vunmap(obj);
1090		put_pages(obj);
1091		put_iova_vmas(obj);
1092	}
1093
1094	drm_gem_object_release(obj);
1095
1096	kfree(msm_obj->metadata);
1097	kfree(msm_obj);
1098}
1099
1100static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1101{
1102	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
 
 
 
1103
1104	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1105	vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1106
1107	return 0;
 
 
 
 
 
 
 
 
 
 
1108}
1109
1110/* convenience method to construct a GEM buffer object, and userspace handle */
1111int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1112		uint32_t size, uint32_t flags, uint32_t *handle,
1113		char *name)
1114{
1115	struct drm_gem_object *obj;
1116	int ret;
1117
1118	obj = msm_gem_new(dev, size, flags);
1119
1120	if (IS_ERR(obj))
1121		return PTR_ERR(obj);
1122
1123	if (name)
1124		msm_gem_object_set_name(obj, "%s", name);
1125
1126	ret = drm_gem_handle_create(file, obj, handle);
1127
1128	/* drop reference from allocate - handle holds it now */
1129	drm_gem_object_put(obj);
1130
1131	return ret;
1132}
1133
1134static enum drm_gem_object_status msm_gem_status(struct drm_gem_object *obj)
1135{
1136	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1137	enum drm_gem_object_status status = 0;
1138
1139	if (msm_obj->pages)
1140		status |= DRM_GEM_OBJECT_RESIDENT;
1141
1142	if (msm_obj->madv == MSM_MADV_DONTNEED)
1143		status |= DRM_GEM_OBJECT_PURGEABLE;
1144
1145	return status;
1146}
1147
1148static const struct vm_operations_struct vm_ops = {
1149	.fault = msm_gem_fault,
1150	.open = drm_gem_vm_open,
1151	.close = drm_gem_vm_close,
1152};
1153
1154static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1155	.free = msm_gem_free_object,
1156	.open = msm_gem_open,
1157	.close = msm_gem_close,
1158	.pin = msm_gem_prime_pin,
1159	.unpin = msm_gem_prime_unpin,
1160	.get_sg_table = msm_gem_prime_get_sg_table,
1161	.vmap = msm_gem_prime_vmap,
1162	.vunmap = msm_gem_prime_vunmap,
1163	.mmap = msm_gem_object_mmap,
1164	.status = msm_gem_status,
1165	.vm_ops = &vm_ops,
1166};
1167
1168static int msm_gem_new_impl(struct drm_device *dev,
1169		uint32_t size, uint32_t flags,
1170		struct drm_gem_object **obj)
 
1171{
1172	struct msm_drm_private *priv = dev->dev_private;
1173	struct msm_gem_object *msm_obj;
1174
1175	switch (flags & MSM_BO_CACHE_MASK) {
 
1176	case MSM_BO_CACHED:
1177	case MSM_BO_WC:
1178		break;
1179	case MSM_BO_CACHED_COHERENT:
1180		if (priv->has_cached_coherent)
1181			break;
1182		fallthrough;
1183	default:
1184		DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1185				(flags & MSM_BO_CACHE_MASK));
1186		return -EINVAL;
1187	}
1188
1189	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1190	if (!msm_obj)
1191		return -ENOMEM;
1192
 
 
1193	msm_obj->flags = flags;
1194	msm_obj->madv = MSM_MADV_WILLNEED;
1195
1196	INIT_LIST_HEAD(&msm_obj->node);
1197	INIT_LIST_HEAD(&msm_obj->vmas);
1198
 
 
 
 
 
 
 
 
 
1199	*obj = &msm_obj->base;
1200	(*obj)->funcs = &msm_gem_object_funcs;
1201
1202	return 0;
1203}
1204
1205struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
 
1206{
1207	struct msm_drm_private *priv = dev->dev_private;
1208	struct msm_gem_object *msm_obj;
1209	struct drm_gem_object *obj = NULL;
1210	bool use_vram = false;
1211	int ret;
1212
1213	size = PAGE_ALIGN(size);
1214
1215	if (!msm_use_mmu(dev))
1216		use_vram = true;
1217	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1218		use_vram = true;
1219
1220	if (GEM_WARN_ON(use_vram && !priv->vram.size))
1221		return ERR_PTR(-EINVAL);
1222
1223	/* Disallow zero sized objects as they make the underlying
1224	 * infrastructure grumpy
1225	 */
1226	if (size == 0)
1227		return ERR_PTR(-EINVAL);
1228
1229	ret = msm_gem_new_impl(dev, size, flags, &obj);
1230	if (ret)
1231		return ERR_PTR(ret);
1232
1233	msm_obj = to_msm_bo(obj);
1234
1235	if (use_vram) {
1236		struct msm_gem_vma *vma;
1237		struct page **pages;
 
1238
1239		drm_gem_private_object_init(dev, obj, size);
1240
1241		msm_gem_lock(obj);
1242
1243		vma = add_vma(obj, NULL);
1244		msm_gem_unlock(obj);
1245		if (IS_ERR(vma)) {
1246			ret = PTR_ERR(vma);
1247			goto fail;
1248		}
1249
1250		to_msm_bo(obj)->vram_node = &vma->node;
1251
1252		msm_gem_lock(obj);
 
1253		pages = get_pages(obj);
1254		msm_gem_unlock(obj);
1255		if (IS_ERR(pages)) {
1256			ret = PTR_ERR(pages);
1257			goto fail;
1258		}
1259
1260		vma->iova = physaddr(obj);
1261	} else {
1262		ret = drm_gem_object_init(dev, obj, size);
1263		if (ret)
1264			goto fail;
1265		/*
1266		 * Our buffers are kept pinned, so allocating them from the
1267		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1268		 * See comments above new_inode() why this is required _and_
1269		 * expected if you're going to pin these pages.
1270		 */
1271		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1272	}
1273
1274	drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
1275
1276	mutex_lock(&priv->obj_lock);
1277	list_add_tail(&msm_obj->node, &priv->objects);
1278	mutex_unlock(&priv->obj_lock);
1279
1280	ret = drm_gem_create_mmap_offset(obj);
1281	if (ret)
1282		goto fail;
1283
1284	return obj;
1285
1286fail:
1287	drm_gem_object_put(obj);
1288	return ERR_PTR(ret);
1289}
1290
 
 
 
 
 
 
 
 
 
 
 
 
1291struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1292		struct dma_buf *dmabuf, struct sg_table *sgt)
1293{
1294	struct msm_drm_private *priv = dev->dev_private;
1295	struct msm_gem_object *msm_obj;
1296	struct drm_gem_object *obj;
1297	uint32_t size;
1298	int ret, npages;
1299
1300	/* if we don't have IOMMU, don't bother pretending we can import: */
1301	if (!msm_use_mmu(dev)) {
1302		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1303		return ERR_PTR(-EINVAL);
1304	}
1305
1306	size = PAGE_ALIGN(dmabuf->size);
1307
1308	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1309	if (ret)
1310		return ERR_PTR(ret);
1311
1312	drm_gem_private_object_init(dev, obj, size);
1313
1314	npages = size / PAGE_SIZE;
1315
1316	msm_obj = to_msm_bo(obj);
1317	msm_gem_lock(obj);
1318	msm_obj->sgt = sgt;
1319	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1320	if (!msm_obj->pages) {
1321		msm_gem_unlock(obj);
1322		ret = -ENOMEM;
1323		goto fail;
1324	}
1325
1326	ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1327	if (ret) {
1328		msm_gem_unlock(obj);
1329		goto fail;
1330	}
1331
1332	msm_gem_unlock(obj);
1333
1334	drm_gem_lru_move_tail(&priv->lru.pinned, obj);
1335
1336	mutex_lock(&priv->obj_lock);
1337	list_add_tail(&msm_obj->node, &priv->objects);
1338	mutex_unlock(&priv->obj_lock);
1339
1340	ret = drm_gem_create_mmap_offset(obj);
1341	if (ret)
1342		goto fail;
1343
1344	return obj;
1345
1346fail:
1347	drm_gem_object_put(obj);
1348	return ERR_PTR(ret);
1349}
1350
1351void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1352		uint32_t flags, struct msm_gem_address_space *aspace,
1353		struct drm_gem_object **bo, uint64_t *iova)
1354{
1355	void *vaddr;
1356	struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1357	int ret;
1358
1359	if (IS_ERR(obj))
1360		return ERR_CAST(obj);
1361
1362	if (iova) {
1363		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1364		if (ret)
1365			goto err;
1366	}
1367
1368	vaddr = msm_gem_get_vaddr(obj);
1369	if (IS_ERR(vaddr)) {
1370		msm_gem_unpin_iova(obj, aspace);
1371		ret = PTR_ERR(vaddr);
1372		goto err;
1373	}
1374
1375	if (bo)
1376		*bo = obj;
1377
1378	return vaddr;
1379err:
1380	drm_gem_object_put(obj);
 
 
 
1381
1382	return ERR_PTR(ret);
1383
1384}
1385
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1386void msm_gem_kernel_put(struct drm_gem_object *bo,
1387		struct msm_gem_address_space *aspace)
1388{
1389	if (IS_ERR_OR_NULL(bo))
1390		return;
1391
1392	msm_gem_put_vaddr(bo);
1393	msm_gem_unpin_iova(bo, aspace);
1394	drm_gem_object_put(bo);
 
 
 
 
1395}
1396
1397void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1398{
1399	struct msm_gem_object *msm_obj = to_msm_bo(bo);
1400	va_list ap;
1401
1402	if (!fmt)
1403		return;
1404
1405	va_start(ap, fmt);
1406	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1407	va_end(ap);
1408}
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2013 Red Hat
   4 * Author: Rob Clark <robdclark@gmail.com>
   5 */
   6
 
 
   7#include <linux/spinlock.h>
   8#include <linux/shmem_fs.h>
   9#include <linux/dma-buf.h>
  10#include <linux/pfn_t.h>
  11
  12#include <drm/drm_prime.h>
 
 
 
  13
  14#include "msm_drv.h"
  15#include "msm_fence.h"
  16#include "msm_gem.h"
  17#include "msm_gpu.h"
  18#include "msm_mmu.h"
  19
  20static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
  21
  22
  23static dma_addr_t physaddr(struct drm_gem_object *obj)
  24{
  25	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  26	struct msm_drm_private *priv = obj->dev->dev_private;
  27	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
  28			priv->vram.paddr;
  29}
  30
  31static bool use_pages(struct drm_gem_object *obj)
  32{
  33	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  34	return !msm_obj->vram_node;
  35}
  36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  37/*
  38 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
  39 * API.  Really GPU cache is out of scope here (handled on cmdstream)
  40 * and all we need to do is invalidate newly allocated pages before
  41 * mapping to CPU as uncached/writecombine.
  42 *
  43 * On top of this, we have the added headache, that depending on
  44 * display generation, the display's iommu may be wired up to either
  45 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
  46 * that here we either have dma-direct or iommu ops.
  47 *
  48 * Let this be a cautionary tail of abstraction gone wrong.
  49 */
  50
  51static void sync_for_device(struct msm_gem_object *msm_obj)
  52{
  53	struct device *dev = msm_obj->base.dev->dev;
  54
  55	if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
  56		dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
  57			msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  58	} else {
  59		dma_map_sg(dev, msm_obj->sgt->sgl,
  60			msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 
  61	}
  62}
  63
  64static void sync_for_cpu(struct msm_gem_object *msm_obj)
  65{
  66	struct device *dev = msm_obj->base.dev->dev;
 
 
 
 
 
 
  67
  68	if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
  69		dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
  70			msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  71	} else {
  72		dma_unmap_sg(dev, msm_obj->sgt->sgl,
  73			msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  74	}
  75}
  76
 
 
 
 
 
 
 
 
 
  77/* allocate pages from VRAM carveout, used when no IOMMU: */
  78static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
  79{
  80	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  81	struct msm_drm_private *priv = obj->dev->dev_private;
  82	dma_addr_t paddr;
  83	struct page **p;
  84	int ret, i;
  85
  86	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  87	if (!p)
  88		return ERR_PTR(-ENOMEM);
  89
  90	spin_lock(&priv->vram.lock);
  91	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
  92	spin_unlock(&priv->vram.lock);
  93	if (ret) {
  94		kvfree(p);
  95		return ERR_PTR(ret);
  96	}
  97
  98	paddr = physaddr(obj);
  99	for (i = 0; i < npages; i++) {
 100		p[i] = phys_to_page(paddr);
 101		paddr += PAGE_SIZE;
 102	}
 103
 104	return p;
 105}
 106
 107static struct page **get_pages(struct drm_gem_object *obj)
 108{
 109	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 110
 
 
 111	if (!msm_obj->pages) {
 112		struct drm_device *dev = obj->dev;
 113		struct page **p;
 114		int npages = obj->size >> PAGE_SHIFT;
 115
 116		if (use_pages(obj))
 117			p = drm_gem_get_pages(obj);
 118		else
 119			p = get_pages_vram(obj, npages);
 120
 121		if (IS_ERR(p)) {
 122			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
 123					PTR_ERR(p));
 124			return p;
 125		}
 126
 
 
 127		msm_obj->pages = p;
 128
 129		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
 130		if (IS_ERR(msm_obj->sgt)) {
 131			void *ptr = ERR_CAST(msm_obj->sgt);
 132
 133			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
 134			msm_obj->sgt = NULL;
 135			return ptr;
 136		}
 137
 138		/* For non-cached buffers, ensure the new pages are clean
 139		 * because display controller, GPU, etc. are not coherent:
 140		 */
 141		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
 142			sync_for_device(msm_obj);
 
 
 143	}
 144
 145	return msm_obj->pages;
 146}
 147
 148static void put_pages_vram(struct drm_gem_object *obj)
 149{
 150	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 151	struct msm_drm_private *priv = obj->dev->dev_private;
 152
 153	spin_lock(&priv->vram.lock);
 154	drm_mm_remove_node(msm_obj->vram_node);
 155	spin_unlock(&priv->vram.lock);
 156
 157	kvfree(msm_obj->pages);
 158}
 159
 160static void put_pages(struct drm_gem_object *obj)
 161{
 162	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 163
 164	if (msm_obj->pages) {
 165		if (msm_obj->sgt) {
 166			/* For non-cached buffers, ensure the new
 167			 * pages are clean because display controller,
 168			 * GPU, etc. are not coherent:
 169			 */
 170			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
 171				sync_for_cpu(msm_obj);
 172
 173			sg_free_table(msm_obj->sgt);
 174			kfree(msm_obj->sgt);
 
 175		}
 176
 
 
 177		if (use_pages(obj))
 178			drm_gem_put_pages(obj, msm_obj->pages, true, false);
 179		else
 180			put_pages_vram(obj);
 181
 182		msm_obj->pages = NULL;
 
 183	}
 184}
 185
 186struct page **msm_gem_get_pages(struct drm_gem_object *obj)
 
 187{
 188	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 189	struct page **p;
 190
 191	mutex_lock(&msm_obj->lock);
 192
 193	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
 194		mutex_unlock(&msm_obj->lock);
 
 195		return ERR_PTR(-EBUSY);
 196	}
 197
 198	p = get_pages(obj);
 199	mutex_unlock(&msm_obj->lock);
 200	return p;
 201}
 202
 203void msm_gem_put_pages(struct drm_gem_object *obj)
 
 
 
 204{
 205	/* when we start tracking the pin count, then do something here */
 
 
 
 
 
 206}
 207
 208int msm_gem_mmap_obj(struct drm_gem_object *obj,
 209		struct vm_area_struct *vma)
 210{
 211	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 212
 213	vma->vm_flags &= ~VM_PFNMAP;
 214	vma->vm_flags |= VM_MIXEDMAP;
 
 
 215
 216	if (msm_obj->flags & MSM_BO_WC) {
 217		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 218	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
 219		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
 220	} else {
 221		/*
 222		 * Shunt off cached objs to shmem file so they have their own
 223		 * address_space (so unmap_mapping_range does what we want,
 224		 * in particular in the case of mmap'd dmabufs)
 225		 */
 226		fput(vma->vm_file);
 227		get_file(obj->filp);
 228		vma->vm_pgoff = 0;
 229		vma->vm_file  = obj->filp;
 230
 231		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 232	}
 
 233
 234	return 0;
 235}
 236
 237int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 238{
 239	int ret;
 240
 241	ret = drm_gem_mmap(filp, vma);
 242	if (ret) {
 243		DBG("mmap failed: %d", ret);
 244		return ret;
 245	}
 246
 247	return msm_gem_mmap_obj(vma->vm_private_data, vma);
 
 
 
 
 248}
 249
 250vm_fault_t msm_gem_fault(struct vm_fault *vmf)
 251{
 252	struct vm_area_struct *vma = vmf->vma;
 253	struct drm_gem_object *obj = vma->vm_private_data;
 254	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 255	struct page **pages;
 256	unsigned long pfn;
 257	pgoff_t pgoff;
 258	int err;
 259	vm_fault_t ret;
 260
 261	/*
 262	 * vm_ops.open/drm_gem_mmap_obj and close get and put
 263	 * a reference on obj. So, we dont need to hold one here.
 264	 */
 265	err = mutex_lock_interruptible(&msm_obj->lock);
 266	if (err) {
 267		ret = VM_FAULT_NOPAGE;
 268		goto out;
 269	}
 270
 271	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
 272		mutex_unlock(&msm_obj->lock);
 273		return VM_FAULT_SIGBUS;
 274	}
 275
 276	/* make sure we have pages attached now */
 277	pages = get_pages(obj);
 278	if (IS_ERR(pages)) {
 279		ret = vmf_error(PTR_ERR(pages));
 280		goto out_unlock;
 281	}
 282
 283	/* We don't use vmf->pgoff since that has the fake offset: */
 284	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 285
 286	pfn = page_to_pfn(pages[pgoff]);
 287
 288	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 289			pfn, pfn << PAGE_SHIFT);
 290
 291	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
 
 292out_unlock:
 293	mutex_unlock(&msm_obj->lock);
 294out:
 295	return ret;
 296}
 297
 298/** get mmap offset */
 299static uint64_t mmap_offset(struct drm_gem_object *obj)
 300{
 301	struct drm_device *dev = obj->dev;
 302	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 303	int ret;
 304
 305	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 306
 307	/* Make it mmapable */
 308	ret = drm_gem_create_mmap_offset(obj);
 309
 310	if (ret) {
 311		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
 312		return 0;
 313	}
 314
 315	return drm_vma_node_offset_addr(&obj->vma_node);
 316}
 317
 318uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
 319{
 320	uint64_t offset;
 321	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 322
 323	mutex_lock(&msm_obj->lock);
 324	offset = mmap_offset(obj);
 325	mutex_unlock(&msm_obj->lock);
 326	return offset;
 327}
 328
 329static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
 330		struct msm_gem_address_space *aspace)
 331{
 332	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 333	struct msm_gem_vma *vma;
 334
 335	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 336
 337	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
 338	if (!vma)
 339		return ERR_PTR(-ENOMEM);
 340
 341	vma->aspace = aspace;
 342
 343	list_add_tail(&vma->list, &msm_obj->vmas);
 344
 345	return vma;
 346}
 347
 348static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
 349		struct msm_gem_address_space *aspace)
 350{
 351	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 352	struct msm_gem_vma *vma;
 353
 354	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 355
 356	list_for_each_entry(vma, &msm_obj->vmas, list) {
 357		if (vma->aspace == aspace)
 358			return vma;
 359	}
 360
 361	return NULL;
 362}
 363
 364static void del_vma(struct msm_gem_vma *vma)
 365{
 366	if (!vma)
 367		return;
 368
 369	list_del(&vma->list);
 370	kfree(vma);
 371}
 372
 373/* Called with msm_obj->lock locked */
 
 
 
 
 
 374static void
 375put_iova(struct drm_gem_object *obj)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 376{
 377	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 378	struct msm_gem_vma *vma, *tmp;
 379
 380	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 381
 382	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
 383		if (vma->aspace) {
 384			msm_gem_purge_vma(vma->aspace, vma);
 385			msm_gem_close_vma(vma->aspace, vma);
 386		}
 387		del_vma(vma);
 388	}
 389}
 390
 391static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
 392		struct msm_gem_address_space *aspace, uint64_t *iova)
 
 393{
 394	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 395	struct msm_gem_vma *vma;
 396	int ret = 0;
 397
 398	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 399
 400	vma = lookup_vma(obj, aspace);
 401
 402	if (!vma) {
 
 
 403		vma = add_vma(obj, aspace);
 404		if (IS_ERR(vma))
 405			return PTR_ERR(vma);
 406
 407		ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
 
 408		if (ret) {
 409			del_vma(vma);
 410			return ret;
 411		}
 
 
 
 412	}
 413
 414	*iova = vma->iova;
 415	return 0;
 416}
 417
 418static int msm_gem_pin_iova(struct drm_gem_object *obj,
 419		struct msm_gem_address_space *aspace)
 420{
 421	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 422	struct msm_gem_vma *vma;
 423	struct page **pages;
 424	int prot = IOMMU_READ;
 425
 426	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
 427		prot |= IOMMU_WRITE;
 428
 429	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 
 430
 431	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
 432		return -EBUSY;
 433
 434	vma = lookup_vma(obj, aspace);
 435	if (WARN_ON(!vma))
 436		return -EINVAL;
 437
 438	pages = get_pages(obj);
 439	if (IS_ERR(pages))
 440		return PTR_ERR(pages);
 441
 442	return msm_gem_map_vma(aspace, vma, prot,
 443			msm_obj->sgt, obj->size >> PAGE_SHIFT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 444}
 445
 446/* get iova and pin it. Should have a matching put */
 447int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
 448		struct msm_gem_address_space *aspace, uint64_t *iova)
 
 
 
 
 449{
 450	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 451	u64 local;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 452	int ret;
 453
 454	mutex_lock(&msm_obj->lock);
 
 
 
 
 
 
 
 
 
 
 455
 456	ret = msm_gem_get_iova_locked(obj, aspace, &local);
 
 457
 458	if (!ret)
 459		ret = msm_gem_pin_iova(obj, aspace);
 
 
 
 
 
 
 
 460
 461	if (!ret)
 462		*iova = local;
 
 463
 464	mutex_unlock(&msm_obj->lock);
 465	return ret;
 466}
 467
 
 
 
 
 
 
 
 468/*
 469 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
 470 * valid for the life of the object
 471 */
 472int msm_gem_get_iova(struct drm_gem_object *obj,
 473		struct msm_gem_address_space *aspace, uint64_t *iova)
 474{
 475	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 476	int ret;
 477
 478	mutex_lock(&msm_obj->lock);
 479	ret = msm_gem_get_iova_locked(obj, aspace, iova);
 480	mutex_unlock(&msm_obj->lock);
 
 
 
 
 
 481
 482	return ret;
 483}
 484
 485/* get iova without taking a reference, used in places where you have
 486 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 487 */
 488uint64_t msm_gem_iova(struct drm_gem_object *obj,
 489		struct msm_gem_address_space *aspace)
 490{
 491	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 492	struct msm_gem_vma *vma;
 493
 494	mutex_lock(&msm_obj->lock);
 495	vma = lookup_vma(obj, aspace);
 496	mutex_unlock(&msm_obj->lock);
 497	WARN_ON(!vma);
 
 
 
 
 
 
 
 
 
 
 498
 499	return vma ? vma->iova : 0;
 500}
 501
 502/*
 503 * Unpin a iova by updating the reference counts. The memory isn't actually
 504 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
 505 * to get rid of it
 506 */
 507void msm_gem_unpin_iova(struct drm_gem_object *obj,
 508		struct msm_gem_address_space *aspace)
 509{
 510	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 511	struct msm_gem_vma *vma;
 512
 513	mutex_lock(&msm_obj->lock);
 514	vma = lookup_vma(obj, aspace);
 515
 516	if (!WARN_ON(!vma))
 517		msm_gem_unmap_vma(aspace, vma);
 518
 519	mutex_unlock(&msm_obj->lock);
 520}
 521
 522int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 523		struct drm_mode_create_dumb *args)
 524{
 525	args->pitch = align_pitch(args->width, args->bpp);
 526	args->size  = PAGE_ALIGN(args->pitch * args->height);
 527	return msm_gem_new_handle(dev, file, args->size,
 528			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
 529}
 530
 531int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 532		uint32_t handle, uint64_t *offset)
 533{
 534	struct drm_gem_object *obj;
 535	int ret = 0;
 536
 537	/* GEM does all our handle to object mapping */
 538	obj = drm_gem_object_lookup(file, handle);
 539	if (obj == NULL) {
 540		ret = -ENOENT;
 541		goto fail;
 542	}
 543
 544	*offset = msm_gem_mmap_offset(obj);
 545
 546	drm_gem_object_put_unlocked(obj);
 547
 548fail:
 549	return ret;
 550}
 551
 552static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
 553{
 554	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
 555	int ret = 0;
 556
 557	mutex_lock(&msm_obj->lock);
 
 
 
 558
 559	if (WARN_ON(msm_obj->madv > madv)) {
 560		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
 561			msm_obj->madv, madv);
 562		mutex_unlock(&msm_obj->lock);
 563		return ERR_PTR(-EBUSY);
 564	}
 565
 566	/* increment vmap_count *before* vmap() call, so shrinker can
 567	 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
 568	 * This guarantees that we won't try to msm_gem_vunmap() this
 569	 * same object from within the vmap() call (while we already
 570	 * hold msm_obj->lock)
 571	 */
 572	msm_obj->vmap_count++;
 573
 574	if (!msm_obj->vaddr) {
 575		struct page **pages = get_pages(obj);
 576		if (IS_ERR(pages)) {
 577			ret = PTR_ERR(pages);
 578			goto fail;
 579		}
 580		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
 581				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
 582		if (msm_obj->vaddr == NULL) {
 583			ret = -ENOMEM;
 584			goto fail;
 585		}
 586	}
 587
 588	mutex_unlock(&msm_obj->lock);
 589	return msm_obj->vaddr;
 590
 591fail:
 592	msm_obj->vmap_count--;
 593	mutex_unlock(&msm_obj->lock);
 594	return ERR_PTR(ret);
 595}
 596
 
 
 
 
 
 597void *msm_gem_get_vaddr(struct drm_gem_object *obj)
 598{
 599	return get_vaddr(obj, MSM_MADV_WILLNEED);
 
 
 
 
 
 
 600}
 601
 602/*
 603 * Don't use this!  It is for the very special case of dumping
 604 * submits from GPU hangs or faults, were the bo may already
 605 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
 606 * active list.
 607 */
 608void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
 609{
 610	return get_vaddr(obj, __MSM_MADV_PURGED);
 611}
 612
 613void msm_gem_put_vaddr(struct drm_gem_object *obj)
 614{
 615	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 616
 617	mutex_lock(&msm_obj->lock);
 618	WARN_ON(msm_obj->vmap_count < 1);
 
 619	msm_obj->vmap_count--;
 620	mutex_unlock(&msm_obj->lock);
 
 
 
 
 
 
 
 621}
 622
 623/* Update madvise status, returns true if not purged, else
 624 * false or -errno.
 625 */
 626int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
 627{
 
 628	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 629
 630	mutex_lock(&msm_obj->lock);
 631
 632	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
 633
 634	if (msm_obj->madv != __MSM_MADV_PURGED)
 635		msm_obj->madv = madv;
 636
 637	madv = msm_obj->madv;
 638
 639	mutex_unlock(&msm_obj->lock);
 
 
 
 
 
 
 
 640
 641	return (madv != __MSM_MADV_PURGED);
 642}
 643
 644void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
 645{
 646	struct drm_device *dev = obj->dev;
 
 647	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 648
 649	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 650	WARN_ON(!is_purgeable(msm_obj));
 651	WARN_ON(obj->import_attach);
 652
 653	mutex_lock_nested(&msm_obj->lock, subclass);
 
 654
 655	put_iova(obj);
 656
 657	msm_gem_vunmap_locked(obj);
 658
 659	put_pages(obj);
 660
 
 
 
 
 661	msm_obj->madv = __MSM_MADV_PURGED;
 
 662
 663	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
 664	drm_gem_free_mmap_offset(obj);
 665
 666	/* Our goal here is to return as much of the memory as
 667	 * is possible back to the system as we are called from OOM.
 668	 * To do this we must instruct the shmfs to drop all of its
 669	 * backing pages, *now*.
 670	 */
 671	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
 672
 673	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
 674			0, (loff_t)-1);
 675
 676	mutex_unlock(&msm_obj->lock);
 677}
 678
 679static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
 
 
 
 680{
 
 681	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 682
 683	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 
 684
 685	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
 686		return;
 
 
 687
 688	vunmap(msm_obj->vaddr);
 689	msm_obj->vaddr = NULL;
 690}
 691
 692void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
 693{
 694	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 695
 696	mutex_lock_nested(&msm_obj->lock, subclass);
 697	msm_gem_vunmap_locked(obj);
 698	mutex_unlock(&msm_obj->lock);
 699}
 700
 701/* must be called before _move_to_active().. */
 702int msm_gem_sync_object(struct drm_gem_object *obj,
 703		struct msm_fence_context *fctx, bool exclusive)
 704{
 705	struct dma_resv_list *fobj;
 706	struct dma_fence *fence;
 707	int i, ret;
 708
 709	fobj = dma_resv_get_list(obj->resv);
 710	if (!fobj || (fobj->shared_count == 0)) {
 711		fence = dma_resv_get_excl(obj->resv);
 712		/* don't need to wait on our own fences, since ring is fifo */
 713		if (fence && (fence->context != fctx->context)) {
 714			ret = dma_fence_wait(fence, true);
 715			if (ret)
 716				return ret;
 717		}
 718	}
 719
 720	if (!exclusive || !fobj)
 721		return 0;
 722
 723	for (i = 0; i < fobj->shared_count; i++) {
 724		fence = rcu_dereference_protected(fobj->shared[i],
 725						dma_resv_held(obj->resv));
 726		if (fence->context != fctx->context) {
 727			ret = dma_fence_wait(fence, true);
 728			if (ret)
 729				return ret;
 730		}
 731	}
 732
 733	return 0;
 734}
 735
 736void msm_gem_move_to_active(struct drm_gem_object *obj,
 737		struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
 738{
 739	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 740	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
 741	msm_obj->gpu = gpu;
 742	if (exclusive)
 743		dma_resv_add_excl_fence(obj->resv, fence);
 744	else
 745		dma_resv_add_shared_fence(obj->resv, fence);
 746	list_del_init(&msm_obj->mm_list);
 747	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
 748}
 749
 750void msm_gem_move_to_inactive(struct drm_gem_object *obj)
 751{
 752	struct drm_device *dev = obj->dev;
 753	struct msm_drm_private *priv = dev->dev_private;
 754	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 755
 756	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
 757
 758	msm_obj->gpu = NULL;
 759	list_del_init(&msm_obj->mm_list);
 760	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 761}
 762
 763int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
 764{
 765	bool write = !!(op & MSM_PREP_WRITE);
 766	unsigned long remain =
 767		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
 768	long ret;
 769
 770	ret = dma_resv_wait_timeout_rcu(obj->resv, write,
 771						  true,  remain);
 
 
 
 
 
 772	if (ret == 0)
 773		return remain == 0 ? -EBUSY : -ETIMEDOUT;
 774	else if (ret < 0)
 775		return ret;
 776
 777	/* TODO cache maintenance */
 778
 779	return 0;
 780}
 781
 782int msm_gem_cpu_fini(struct drm_gem_object *obj)
 783{
 784	/* TODO cache maintenance */
 785	return 0;
 786}
 787
 788#ifdef CONFIG_DEBUG_FS
 789static void describe_fence(struct dma_fence *fence, const char *type,
 790		struct seq_file *m)
 791{
 792	if (!dma_fence_is_signaled(fence))
 793		seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
 794				fence->ops->get_driver_name(fence),
 795				fence->ops->get_timeline_name(fence),
 796				fence->seqno);
 797}
 798
 799void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 800{
 801	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 802	struct dma_resv *robj = obj->resv;
 803	struct dma_resv_list *fobj;
 804	struct dma_fence *fence;
 805	struct msm_gem_vma *vma;
 806	uint64_t off = drm_vma_node_start(&obj->vma_node);
 807	const char *madv;
 808
 809	mutex_lock(&msm_obj->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 810
 811	switch (msm_obj->madv) {
 812	case __MSM_MADV_PURGED:
 
 
 813		madv = " purged";
 814		break;
 815	case MSM_MADV_DONTNEED:
 
 
 816		madv = " purgeable";
 817		break;
 818	case MSM_MADV_WILLNEED:
 819	default:
 820		madv = "";
 821		break;
 822	}
 823
 824	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
 825			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
 826			obj->name, kref_read(&obj->refcount),
 827			off, msm_obj->vaddr);
 828
 829	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
 830
 831	if (!list_empty(&msm_obj->vmas)) {
 832
 833		seq_puts(m, "      vmas:");
 834
 835		list_for_each_entry(vma, &msm_obj->vmas, list)
 836			seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
 837				vma->aspace != NULL ? vma->aspace->name : NULL,
 838				vma->iova, vma->mapped ? "mapped" : "unmapped",
 839				vma->inuse);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 840
 841		seq_puts(m, "\n");
 842	}
 843
 844	rcu_read_lock();
 845	fobj = rcu_dereference(robj->fence);
 846	if (fobj) {
 847		unsigned int i, shared_count = fobj->shared_count;
 848
 849		for (i = 0; i < shared_count; i++) {
 850			fence = rcu_dereference(fobj->shared[i]);
 851			describe_fence(fence, "Shared", m);
 852		}
 853	}
 854
 855	fence = rcu_dereference(robj->fence_excl);
 856	if (fence)
 857		describe_fence(fence, "Exclusive", m);
 858	rcu_read_unlock();
 859
 860	mutex_unlock(&msm_obj->lock);
 861}
 862
 863void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
 864{
 
 865	struct msm_gem_object *msm_obj;
 866	int count = 0;
 867	size_t size = 0;
 868
 869	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
 870	list_for_each_entry(msm_obj, list, mm_list) {
 871		struct drm_gem_object *obj = &msm_obj->base;
 872		seq_puts(m, "   ");
 873		msm_gem_describe(obj, m);
 874		count++;
 875		size += obj->size;
 876	}
 877
 878	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
 
 
 
 
 
 
 
 
 
 879}
 880#endif
 881
 882/* don't call directly!  Use drm_gem_object_put() and friends */
 883void msm_gem_free_object(struct drm_gem_object *obj)
 884{
 885	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 886	struct drm_device *dev = obj->dev;
 887	struct msm_drm_private *priv = dev->dev_private;
 888
 889	if (llist_add(&msm_obj->freed, &priv->free_list))
 890		queue_work(priv->wq, &priv->free_work);
 891}
 892
 893static void free_object(struct msm_gem_object *msm_obj)
 894{
 895	struct drm_gem_object *obj = &msm_obj->base;
 896	struct drm_device *dev = obj->dev;
 897
 898	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 899
 900	/* object should not be on active list: */
 901	WARN_ON(is_active(msm_obj));
 902
 903	list_del(&msm_obj->mm_list);
 904
 905	mutex_lock(&msm_obj->lock);
 906
 907	put_iova(obj);
 908
 909	if (obj->import_attach) {
 910		if (msm_obj->vaddr)
 911			dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
 912
 913		/* Don't drop the pages for imported dmabuf, as they are not
 914		 * ours, just free the array we allocated:
 915		 */
 916		if (msm_obj->pages)
 917			kvfree(msm_obj->pages);
 
 918
 919		drm_prime_gem_destroy(obj, msm_obj->sgt);
 920	} else {
 921		msm_gem_vunmap_locked(obj);
 922		put_pages(obj);
 
 923	}
 924
 925	drm_gem_object_release(obj);
 926
 927	mutex_unlock(&msm_obj->lock);
 928	kfree(msm_obj);
 929}
 930
 931void msm_gem_free_work(struct work_struct *work)
 932{
 933	struct msm_drm_private *priv =
 934		container_of(work, struct msm_drm_private, free_work);
 935	struct drm_device *dev = priv->dev;
 936	struct llist_node *freed;
 937	struct msm_gem_object *msm_obj, *next;
 938
 939	while ((freed = llist_del_all(&priv->free_list))) {
 
 940
 941		mutex_lock(&dev->struct_mutex);
 942
 943		llist_for_each_entry_safe(msm_obj, next,
 944					  freed, freed)
 945			free_object(msm_obj);
 946
 947		mutex_unlock(&dev->struct_mutex);
 948
 949		if (need_resched())
 950			break;
 951	}
 952}
 953
 954/* convenience method to construct a GEM buffer object, and userspace handle */
 955int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
 956		uint32_t size, uint32_t flags, uint32_t *handle,
 957		char *name)
 958{
 959	struct drm_gem_object *obj;
 960	int ret;
 961
 962	obj = msm_gem_new(dev, size, flags);
 963
 964	if (IS_ERR(obj))
 965		return PTR_ERR(obj);
 966
 967	if (name)
 968		msm_gem_object_set_name(obj, "%s", name);
 969
 970	ret = drm_gem_handle_create(file, obj, handle);
 971
 972	/* drop reference from allocate - handle holds it now */
 973	drm_gem_object_put_unlocked(obj);
 974
 975	return ret;
 976}
 977
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 978static int msm_gem_new_impl(struct drm_device *dev,
 979		uint32_t size, uint32_t flags,
 980		struct drm_gem_object **obj,
 981		bool struct_mutex_locked)
 982{
 983	struct msm_drm_private *priv = dev->dev_private;
 984	struct msm_gem_object *msm_obj;
 985
 986	switch (flags & MSM_BO_CACHE_MASK) {
 987	case MSM_BO_UNCACHED:
 988	case MSM_BO_CACHED:
 989	case MSM_BO_WC:
 990		break;
 
 
 
 
 991	default:
 992		DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
 993				(flags & MSM_BO_CACHE_MASK));
 994		return -EINVAL;
 995	}
 996
 997	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
 998	if (!msm_obj)
 999		return -ENOMEM;
1000
1001	mutex_init(&msm_obj->lock);
1002
1003	msm_obj->flags = flags;
1004	msm_obj->madv = MSM_MADV_WILLNEED;
1005
1006	INIT_LIST_HEAD(&msm_obj->submit_entry);
1007	INIT_LIST_HEAD(&msm_obj->vmas);
1008
1009	if (struct_mutex_locked) {
1010		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1011		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1012	} else {
1013		mutex_lock(&dev->struct_mutex);
1014		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1015		mutex_unlock(&dev->struct_mutex);
1016	}
1017
1018	*obj = &msm_obj->base;
 
1019
1020	return 0;
1021}
1022
1023static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
1024		uint32_t size, uint32_t flags, bool struct_mutex_locked)
1025{
1026	struct msm_drm_private *priv = dev->dev_private;
 
1027	struct drm_gem_object *obj = NULL;
1028	bool use_vram = false;
1029	int ret;
1030
1031	size = PAGE_ALIGN(size);
1032
1033	if (!msm_use_mmu(dev))
1034		use_vram = true;
1035	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1036		use_vram = true;
1037
1038	if (WARN_ON(use_vram && !priv->vram.size))
1039		return ERR_PTR(-EINVAL);
1040
1041	/* Disallow zero sized objects as they make the underlying
1042	 * infrastructure grumpy
1043	 */
1044	if (size == 0)
1045		return ERR_PTR(-EINVAL);
1046
1047	ret = msm_gem_new_impl(dev, size, flags, &obj, struct_mutex_locked);
1048	if (ret)
1049		goto fail;
 
 
1050
1051	if (use_vram) {
1052		struct msm_gem_vma *vma;
1053		struct page **pages;
1054		struct msm_gem_object *msm_obj = to_msm_bo(obj);
1055
1056		mutex_lock(&msm_obj->lock);
 
 
1057
1058		vma = add_vma(obj, NULL);
1059		mutex_unlock(&msm_obj->lock);
1060		if (IS_ERR(vma)) {
1061			ret = PTR_ERR(vma);
1062			goto fail;
1063		}
1064
1065		to_msm_bo(obj)->vram_node = &vma->node;
1066
1067		drm_gem_private_object_init(dev, obj, size);
1068
1069		pages = get_pages(obj);
 
1070		if (IS_ERR(pages)) {
1071			ret = PTR_ERR(pages);
1072			goto fail;
1073		}
1074
1075		vma->iova = physaddr(obj);
1076	} else {
1077		ret = drm_gem_object_init(dev, obj, size);
1078		if (ret)
1079			goto fail;
1080		/*
1081		 * Our buffers are kept pinned, so allocating them from the
1082		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1083		 * See comments above new_inode() why this is required _and_
1084		 * expected if you're going to pin these pages.
1085		 */
1086		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1087	}
1088
 
 
 
 
 
 
 
 
 
 
1089	return obj;
1090
1091fail:
1092	drm_gem_object_put_unlocked(obj);
1093	return ERR_PTR(ret);
1094}
1095
1096struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1097		uint32_t size, uint32_t flags)
1098{
1099	return _msm_gem_new(dev, size, flags, true);
1100}
1101
1102struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1103		uint32_t size, uint32_t flags)
1104{
1105	return _msm_gem_new(dev, size, flags, false);
1106}
1107
1108struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1109		struct dma_buf *dmabuf, struct sg_table *sgt)
1110{
 
1111	struct msm_gem_object *msm_obj;
1112	struct drm_gem_object *obj;
1113	uint32_t size;
1114	int ret, npages;
1115
1116	/* if we don't have IOMMU, don't bother pretending we can import: */
1117	if (!msm_use_mmu(dev)) {
1118		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1119		return ERR_PTR(-EINVAL);
1120	}
1121
1122	size = PAGE_ALIGN(dmabuf->size);
1123
1124	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj, false);
1125	if (ret)
1126		goto fail;
1127
1128	drm_gem_private_object_init(dev, obj, size);
1129
1130	npages = size / PAGE_SIZE;
1131
1132	msm_obj = to_msm_bo(obj);
1133	mutex_lock(&msm_obj->lock);
1134	msm_obj->sgt = sgt;
1135	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1136	if (!msm_obj->pages) {
1137		mutex_unlock(&msm_obj->lock);
1138		ret = -ENOMEM;
1139		goto fail;
1140	}
1141
1142	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1143	if (ret) {
1144		mutex_unlock(&msm_obj->lock);
1145		goto fail;
1146	}
1147
1148	mutex_unlock(&msm_obj->lock);
 
 
 
 
 
 
 
 
 
 
 
1149	return obj;
1150
1151fail:
1152	drm_gem_object_put_unlocked(obj);
1153	return ERR_PTR(ret);
1154}
1155
1156static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1157		uint32_t flags, struct msm_gem_address_space *aspace,
1158		struct drm_gem_object **bo, uint64_t *iova, bool locked)
1159{
1160	void *vaddr;
1161	struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1162	int ret;
1163
1164	if (IS_ERR(obj))
1165		return ERR_CAST(obj);
1166
1167	if (iova) {
1168		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1169		if (ret)
1170			goto err;
1171	}
1172
1173	vaddr = msm_gem_get_vaddr(obj);
1174	if (IS_ERR(vaddr)) {
1175		msm_gem_unpin_iova(obj, aspace);
1176		ret = PTR_ERR(vaddr);
1177		goto err;
1178	}
1179
1180	if (bo)
1181		*bo = obj;
1182
1183	return vaddr;
1184err:
1185	if (locked)
1186		drm_gem_object_put(obj);
1187	else
1188		drm_gem_object_put_unlocked(obj);
1189
1190	return ERR_PTR(ret);
1191
1192}
1193
1194void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1195		uint32_t flags, struct msm_gem_address_space *aspace,
1196		struct drm_gem_object **bo, uint64_t *iova)
1197{
1198	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1199}
1200
1201void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1202		uint32_t flags, struct msm_gem_address_space *aspace,
1203		struct drm_gem_object **bo, uint64_t *iova)
1204{
1205	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1206}
1207
1208void msm_gem_kernel_put(struct drm_gem_object *bo,
1209		struct msm_gem_address_space *aspace, bool locked)
1210{
1211	if (IS_ERR_OR_NULL(bo))
1212		return;
1213
1214	msm_gem_put_vaddr(bo);
1215	msm_gem_unpin_iova(bo, aspace);
1216
1217	if (locked)
1218		drm_gem_object_put(bo);
1219	else
1220		drm_gem_object_put_unlocked(bo);
1221}
1222
1223void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1224{
1225	struct msm_gem_object *msm_obj = to_msm_bo(bo);
1226	va_list ap;
1227
1228	if (!fmt)
1229		return;
1230
1231	va_start(ap, fmt);
1232	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1233	va_end(ap);
1234}