Linux Audio

Check our new training course

Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2013 Red Hat
   4 * Author: Rob Clark <robdclark@gmail.com>
   5 */
   6
   7#include <linux/dma-map-ops.h>
   8#include <linux/vmalloc.h>
   9#include <linux/spinlock.h>
  10#include <linux/shmem_fs.h>
  11#include <linux/dma-buf.h>
  12#include <linux/pfn_t.h>
  13
  14#include <drm/drm_prime.h>
  15
  16#include "msm_drv.h"
  17#include "msm_fence.h"
  18#include "msm_gem.h"
  19#include "msm_gpu.h"
  20#include "msm_mmu.h"
  21
 
 
 
  22static dma_addr_t physaddr(struct drm_gem_object *obj)
  23{
  24	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  25	struct msm_drm_private *priv = obj->dev->dev_private;
  26	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
  27			priv->vram.paddr;
  28}
  29
  30static bool use_pages(struct drm_gem_object *obj)
  31{
  32	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  33	return !msm_obj->vram_node;
  34}
  35
  36/*
  37 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
  38 * API.  Really GPU cache is out of scope here (handled on cmdstream)
  39 * and all we need to do is invalidate newly allocated pages before
  40 * mapping to CPU as uncached/writecombine.
  41 *
  42 * On top of this, we have the added headache, that depending on
  43 * display generation, the display's iommu may be wired up to either
  44 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
  45 * that here we either have dma-direct or iommu ops.
  46 *
  47 * Let this be a cautionary tail of abstraction gone wrong.
  48 */
  49
  50static void sync_for_device(struct msm_gem_object *msm_obj)
  51{
  52	struct device *dev = msm_obj->base.dev->dev;
  53
  54	dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
  55}
  56
  57static void sync_for_cpu(struct msm_gem_object *msm_obj)
  58{
  59	struct device *dev = msm_obj->base.dev->dev;
  60
  61	dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
  62}
  63
  64static void update_lru_active(struct drm_gem_object *obj)
  65{
  66	struct msm_drm_private *priv = obj->dev->dev_private;
  67	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  68
  69	GEM_WARN_ON(!msm_obj->pages);
  70
  71	if (msm_obj->pin_count) {
  72		drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
  73	} else if (msm_obj->madv == MSM_MADV_WILLNEED) {
  74		drm_gem_lru_move_tail_locked(&priv->lru.willneed, obj);
  75	} else {
  76		GEM_WARN_ON(msm_obj->madv != MSM_MADV_DONTNEED);
  77
  78		drm_gem_lru_move_tail_locked(&priv->lru.dontneed, obj);
  79	}
  80}
  81
  82static void update_lru_locked(struct drm_gem_object *obj)
  83{
  84	struct msm_drm_private *priv = obj->dev->dev_private;
  85	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  86
  87	msm_gem_assert_locked(&msm_obj->base);
  88
  89	if (!msm_obj->pages) {
  90		GEM_WARN_ON(msm_obj->pin_count);
  91
  92		drm_gem_lru_move_tail_locked(&priv->lru.unbacked, obj);
 
 
  93	} else {
  94		update_lru_active(obj);
 
  95	}
  96}
  97
  98static void update_lru(struct drm_gem_object *obj)
  99{
 100	struct msm_drm_private *priv = obj->dev->dev_private;
 101
 102	mutex_lock(&priv->lru.lock);
 103	update_lru_locked(obj);
 104	mutex_unlock(&priv->lru.lock);
 105}
 106
 107/* allocate pages from VRAM carveout, used when no IOMMU: */
 108static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
 109{
 110	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 111	struct msm_drm_private *priv = obj->dev->dev_private;
 112	dma_addr_t paddr;
 113	struct page **p;
 114	int ret, i;
 115
 116	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
 117	if (!p)
 118		return ERR_PTR(-ENOMEM);
 119
 120	spin_lock(&priv->vram.lock);
 121	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
 122	spin_unlock(&priv->vram.lock);
 123	if (ret) {
 124		kvfree(p);
 125		return ERR_PTR(ret);
 126	}
 127
 128	paddr = physaddr(obj);
 129	for (i = 0; i < npages; i++) {
 130		p[i] = pfn_to_page(__phys_to_pfn(paddr));
 131		paddr += PAGE_SIZE;
 132	}
 133
 134	return p;
 135}
 136
 137static struct page **get_pages(struct drm_gem_object *obj)
 138{
 139	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 140
 141	msm_gem_assert_locked(obj);
 142
 143	if (!msm_obj->pages) {
 144		struct drm_device *dev = obj->dev;
 145		struct page **p;
 146		int npages = obj->size >> PAGE_SHIFT;
 147
 148		if (use_pages(obj))
 149			p = drm_gem_get_pages(obj);
 150		else
 151			p = get_pages_vram(obj, npages);
 152
 153		if (IS_ERR(p)) {
 154			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
 155					PTR_ERR(p));
 156			return p;
 157		}
 158
 159		msm_obj->pages = p;
 160
 161		msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
 162		if (IS_ERR(msm_obj->sgt)) {
 163			void *ptr = ERR_CAST(msm_obj->sgt);
 164
 165			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
 166			msm_obj->sgt = NULL;
 167			return ptr;
 168		}
 169
 170		/* For non-cached buffers, ensure the new pages are clean
 171		 * because display controller, GPU, etc. are not coherent:
 172		 */
 173		if (msm_obj->flags & MSM_BO_WC)
 174			sync_for_device(msm_obj);
 175
 176		update_lru(obj);
 177	}
 178
 179	return msm_obj->pages;
 180}
 181
 182static void put_pages_vram(struct drm_gem_object *obj)
 183{
 184	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 185	struct msm_drm_private *priv = obj->dev->dev_private;
 186
 187	spin_lock(&priv->vram.lock);
 188	drm_mm_remove_node(msm_obj->vram_node);
 189	spin_unlock(&priv->vram.lock);
 190
 191	kvfree(msm_obj->pages);
 192}
 193
 194static void put_pages(struct drm_gem_object *obj)
 195{
 196	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 197
 198	if (msm_obj->pages) {
 199		if (msm_obj->sgt) {
 200			/* For non-cached buffers, ensure the new
 201			 * pages are clean because display controller,
 202			 * GPU, etc. are not coherent:
 203			 */
 204			if (msm_obj->flags & MSM_BO_WC)
 205				sync_for_cpu(msm_obj);
 206
 207			sg_free_table(msm_obj->sgt);
 208			kfree(msm_obj->sgt);
 209			msm_obj->sgt = NULL;
 210		}
 211
 212		if (use_pages(obj))
 213			drm_gem_put_pages(obj, msm_obj->pages, true, false);
 214		else
 215			put_pages_vram(obj);
 216
 217		msm_obj->pages = NULL;
 218		update_lru(obj);
 219	}
 220}
 221
 222static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj,
 223					      unsigned madv)
 224{
 225	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
 226
 227	msm_gem_assert_locked(obj);
 228
 229	if (msm_obj->madv > madv) {
 230		DRM_DEV_DEBUG_DRIVER(obj->dev->dev, "Invalid madv state: %u vs %u\n",
 231				     msm_obj->madv, madv);
 232		return ERR_PTR(-EBUSY);
 233	}
 234
 235	return get_pages(obj);
 
 
 236}
 237
 238/*
 239 * Update the pin count of the object, call under lru.lock
 240 */
 241void msm_gem_pin_obj_locked(struct drm_gem_object *obj)
 242{
 243	struct msm_drm_private *priv = obj->dev->dev_private;
 244
 245	msm_gem_assert_locked(obj);
 246
 247	to_msm_bo(obj)->pin_count++;
 248	drm_gem_lru_move_tail_locked(&priv->lru.pinned, obj);
 249}
 250
 251static void pin_obj_locked(struct drm_gem_object *obj)
 
 252{
 253	struct msm_drm_private *priv = obj->dev->dev_private;
 254
 255	mutex_lock(&priv->lru.lock);
 256	msm_gem_pin_obj_locked(obj);
 257	mutex_unlock(&priv->lru.lock);
 258}
 259
 260struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
 261{
 262	struct page **p;
 
 
 
 
 
 
 
 
 
 
 
 263
 264	msm_gem_lock(obj);
 265	p = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
 266	if (!IS_ERR(p))
 267		pin_obj_locked(obj);
 268	msm_gem_unlock(obj);
 269
 270	return p;
 271}
 272
 273void msm_gem_unpin_pages(struct drm_gem_object *obj)
 274{
 275	msm_gem_lock(obj);
 276	msm_gem_unpin_locked(obj);
 277	msm_gem_unlock(obj);
 278}
 279
 280static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
 281{
 282	if (msm_obj->flags & MSM_BO_WC)
 283		return pgprot_writecombine(prot);
 284	return prot;
 
 
 285}
 286
 287static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
 288{
 289	struct vm_area_struct *vma = vmf->vma;
 290	struct drm_gem_object *obj = vma->vm_private_data;
 291	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 292	struct page **pages;
 293	unsigned long pfn;
 294	pgoff_t pgoff;
 295	int err;
 296	vm_fault_t ret;
 297
 298	/*
 299	 * vm_ops.open/drm_gem_mmap_obj and close get and put
 300	 * a reference on obj. So, we dont need to hold one here.
 301	 */
 302	err = msm_gem_lock_interruptible(obj);
 303	if (err) {
 304		ret = VM_FAULT_NOPAGE;
 305		goto out;
 306	}
 307
 308	if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
 309		msm_gem_unlock(obj);
 310		return VM_FAULT_SIGBUS;
 311	}
 312
 313	/* make sure we have pages attached now */
 314	pages = get_pages(obj);
 315	if (IS_ERR(pages)) {
 316		ret = vmf_error(PTR_ERR(pages));
 317		goto out_unlock;
 318	}
 319
 320	/* We don't use vmf->pgoff since that has the fake offset: */
 321	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 322
 323	pfn = page_to_pfn(pages[pgoff]);
 324
 325	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 326			pfn, pfn << PAGE_SHIFT);
 327
 328	ret = vmf_insert_pfn(vma, vmf->address, pfn);
 329
 330out_unlock:
 331	msm_gem_unlock(obj);
 332out:
 333	return ret;
 334}
 335
 336/** get mmap offset */
 337static uint64_t mmap_offset(struct drm_gem_object *obj)
 338{
 339	struct drm_device *dev = obj->dev;
 
 340	int ret;
 341
 342	msm_gem_assert_locked(obj);
 343
 344	/* Make it mmapable */
 345	ret = drm_gem_create_mmap_offset(obj);
 346
 347	if (ret) {
 348		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
 349		return 0;
 350	}
 351
 352	return drm_vma_node_offset_addr(&obj->vma_node);
 353}
 354
 355uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
 356{
 357	uint64_t offset;
 
 358
 359	msm_gem_lock(obj);
 360	offset = mmap_offset(obj);
 361	msm_gem_unlock(obj);
 362	return offset;
 363}
 364
 365static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
 366		struct msm_gem_address_space *aspace)
 367{
 368	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 369	struct msm_gem_vma *vma;
 370
 371	msm_gem_assert_locked(obj);
 372
 373	vma = msm_gem_vma_new(aspace);
 374	if (!vma)
 375		return ERR_PTR(-ENOMEM);
 376
 
 
 377	list_add_tail(&vma->list, &msm_obj->vmas);
 378
 379	return vma;
 380}
 381
 382static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
 383		struct msm_gem_address_space *aspace)
 384{
 385	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 386	struct msm_gem_vma *vma;
 387
 388	msm_gem_assert_locked(obj);
 389
 390	list_for_each_entry(vma, &msm_obj->vmas, list) {
 391		if (vma->aspace == aspace)
 392			return vma;
 393	}
 394
 395	return NULL;
 396}
 397
 398static void del_vma(struct msm_gem_vma *vma)
 399{
 400	if (!vma)
 401		return;
 402
 403	list_del(&vma->list);
 404	kfree(vma);
 405}
 406
 407/*
 408 * If close is true, this also closes the VMA (releasing the allocated
 409 * iova range) in addition to removing the iommu mapping.  In the eviction
 410 * case (!close), we keep the iova allocated, but only remove the iommu
 411 * mapping.
 412 */
 413static void
 414put_iova_spaces(struct drm_gem_object *obj, bool close)
 415{
 416	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 417	struct msm_gem_vma *vma;
 418
 419	msm_gem_assert_locked(obj);
 420
 421	list_for_each_entry(vma, &msm_obj->vmas, list) {
 422		if (vma->aspace) {
 423			msm_gem_vma_purge(vma);
 424			if (close)
 425				msm_gem_vma_close(vma);
 426		}
 427	}
 428}
 429
 430/* Called with msm_obj locked */
 431static void
 432put_iova_vmas(struct drm_gem_object *obj)
 433{
 434	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 435	struct msm_gem_vma *vma, *tmp;
 436
 437	msm_gem_assert_locked(obj);
 438
 439	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
 
 
 
 
 440		del_vma(vma);
 441	}
 442}
 443
 444static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
 445		struct msm_gem_address_space *aspace,
 446		u64 range_start, u64 range_end)
 447{
 
 448	struct msm_gem_vma *vma;
 
 449
 450	msm_gem_assert_locked(obj);
 451
 452	vma = lookup_vma(obj, aspace);
 453
 454	if (!vma) {
 455		int ret;
 456
 457		vma = add_vma(obj, aspace);
 458		if (IS_ERR(vma))
 459			return vma;
 460
 461		ret = msm_gem_vma_init(vma, obj->size,
 462			range_start, range_end);
 463		if (ret) {
 464			del_vma(vma);
 465			return ERR_PTR(ret);
 466		}
 467	} else {
 468		GEM_WARN_ON(vma->iova < range_start);
 469		GEM_WARN_ON((vma->iova + obj->size) > range_end);
 470	}
 471
 472	return vma;
 
 473}
 474
 475int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
 
 476{
 477	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
 478	struct page **pages;
 479	int prot = IOMMU_READ;
 480
 481	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
 482		prot |= IOMMU_WRITE;
 483
 484	if (msm_obj->flags & MSM_BO_MAP_PRIV)
 485		prot |= IOMMU_PRIV;
 486
 487	if (msm_obj->flags & MSM_BO_CACHED_COHERENT)
 488		prot |= IOMMU_CACHE;
 489
 490	msm_gem_assert_locked(obj);
 
 491
 492	pages = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
 
 
 
 
 493	if (IS_ERR(pages))
 494		return PTR_ERR(pages);
 495
 496	return msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
 497}
 498
 499void msm_gem_unpin_locked(struct drm_gem_object *obj)
 500{
 501	struct msm_drm_private *priv = obj->dev->dev_private;
 502	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 503
 504	msm_gem_assert_locked(obj);
 505
 506	mutex_lock(&priv->lru.lock);
 507	msm_obj->pin_count--;
 508	GEM_WARN_ON(msm_obj->pin_count < 0);
 509	update_lru_locked(obj);
 510	mutex_unlock(&priv->lru.lock);
 511}
 512
 513/* Special unpin path for use in fence-signaling path, avoiding the need
 514 * to hold the obj lock by only depending on things that a protected by
 515 * the LRU lock.  In particular we know that that we already have backing
 516 * and and that the object's dma_resv has the fence for the current
 517 * submit/job which will prevent us racing against page eviction.
 518 */
 519void msm_gem_unpin_active(struct drm_gem_object *obj)
 520{
 521	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 522
 523	msm_obj->pin_count--;
 524	GEM_WARN_ON(msm_obj->pin_count < 0);
 525	update_lru_active(obj);
 526}
 527
 528struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
 529					   struct msm_gem_address_space *aspace)
 530{
 531	return get_vma_locked(obj, aspace, 0, U64_MAX);
 532}
 533
 534static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
 535		struct msm_gem_address_space *aspace, uint64_t *iova,
 536		u64 range_start, u64 range_end)
 537{
 538	struct msm_gem_vma *vma;
 539	int ret;
 540
 541	msm_gem_assert_locked(obj);
 542
 543	vma = get_vma_locked(obj, aspace, range_start, range_end);
 544	if (IS_ERR(vma))
 545		return PTR_ERR(vma);
 546
 547	ret = msm_gem_pin_vma_locked(obj, vma);
 548	if (!ret) {
 549		*iova = vma->iova;
 550		pin_obj_locked(obj);
 551	}
 552
 553	return ret;
 554}
 555
 556/*
 557 * get iova and pin it. Should have a matching put
 558 * limits iova to specified range (in pages)
 559 */
 560int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
 561		struct msm_gem_address_space *aspace, uint64_t *iova,
 562		u64 range_start, u64 range_end)
 563{
 
 
 564	int ret;
 565
 566	msm_gem_lock(obj);
 567	ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
 568	msm_gem_unlock(obj);
 569
 
 
 
 
 
 
 
 
 
 
 570	return ret;
 571}
 572
 573/* get iova and pin it. Should have a matching put */
 574int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
 575		struct msm_gem_address_space *aspace, uint64_t *iova)
 576{
 577	return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
 578}
 579
 580/*
 581 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
 582 * valid for the life of the object
 583 */
 584int msm_gem_get_iova(struct drm_gem_object *obj,
 585		struct msm_gem_address_space *aspace, uint64_t *iova)
 586{
 587	struct msm_gem_vma *vma;
 588	int ret = 0;
 589
 590	msm_gem_lock(obj);
 591	vma = get_vma_locked(obj, aspace, 0, U64_MAX);
 592	if (IS_ERR(vma)) {
 593		ret = PTR_ERR(vma);
 594	} else {
 595		*iova = vma->iova;
 596	}
 597	msm_gem_unlock(obj);
 598
 599	return ret;
 600}
 601
 602static int clear_iova(struct drm_gem_object *obj,
 603		      struct msm_gem_address_space *aspace)
 604{
 605	struct msm_gem_vma *vma = lookup_vma(obj, aspace);
 606
 607	if (!vma)
 608		return 0;
 609
 610	msm_gem_vma_purge(vma);
 611	msm_gem_vma_close(vma);
 612	del_vma(vma);
 613
 614	return 0;
 615}
 616
 617/*
 618 * Get the requested iova but don't pin it.  Fails if the requested iova is
 619 * not available.  Doesn't need a put because iovas are currently valid for
 620 * the life of the object.
 621 *
 622 * Setting an iova of zero will clear the vma.
 623 */
 624int msm_gem_set_iova(struct drm_gem_object *obj,
 625		     struct msm_gem_address_space *aspace, uint64_t iova)
 626{
 627	int ret = 0;
 
 628
 629	msm_gem_lock(obj);
 630	if (!iova) {
 631		ret = clear_iova(obj, aspace);
 632	} else {
 633		struct msm_gem_vma *vma;
 634		vma = get_vma_locked(obj, aspace, iova, iova + obj->size);
 635		if (IS_ERR(vma)) {
 636			ret = PTR_ERR(vma);
 637		} else if (GEM_WARN_ON(vma->iova != iova)) {
 638			clear_iova(obj, aspace);
 639			ret = -EBUSY;
 640		}
 641	}
 642	msm_gem_unlock(obj);
 643
 644	return ret;
 645}
 646
 647/*
 648 * Unpin a iova by updating the reference counts. The memory isn't actually
 649 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
 650 * to get rid of it
 651 */
 652void msm_gem_unpin_iova(struct drm_gem_object *obj,
 653		struct msm_gem_address_space *aspace)
 654{
 
 655	struct msm_gem_vma *vma;
 656
 657	msm_gem_lock(obj);
 658	vma = lookup_vma(obj, aspace);
 659	if (!GEM_WARN_ON(!vma)) {
 660		msm_gem_unpin_locked(obj);
 661	}
 662	msm_gem_unlock(obj);
 
 663}
 664
 665int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 666		struct drm_mode_create_dumb *args)
 667{
 668	args->pitch = align_pitch(args->width, args->bpp);
 669	args->size  = PAGE_ALIGN(args->pitch * args->height);
 670	return msm_gem_new_handle(dev, file, args->size,
 671			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
 672}
 673
 674int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 675		uint32_t handle, uint64_t *offset)
 676{
 677	struct drm_gem_object *obj;
 678	int ret = 0;
 679
 680	/* GEM does all our handle to object mapping */
 681	obj = drm_gem_object_lookup(file, handle);
 682	if (obj == NULL) {
 683		ret = -ENOENT;
 684		goto fail;
 685	}
 686
 687	*offset = msm_gem_mmap_offset(obj);
 688
 689	drm_gem_object_put(obj);
 690
 691fail:
 692	return ret;
 693}
 694
 695static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
 696{
 697	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 698	struct page **pages;
 699	int ret = 0;
 700
 701	msm_gem_assert_locked(obj);
 702
 703	if (obj->import_attach)
 704		return ERR_PTR(-ENODEV);
 705
 706	pages = msm_gem_pin_pages_locked(obj, madv);
 707	if (IS_ERR(pages))
 708		return ERR_CAST(pages);
 709
 710	pin_obj_locked(obj);
 
 
 
 
 
 711
 712	/* increment vmap_count *before* vmap() call, so shrinker can
 713	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
 714	 * This guarantees that we won't try to msm_gem_vunmap() this
 715	 * same object from within the vmap() call (while we already
 716	 * hold msm_obj lock)
 717	 */
 718	msm_obj->vmap_count++;
 719
 720	if (!msm_obj->vaddr) {
 
 
 
 
 
 721		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
 722				VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL));
 723		if (msm_obj->vaddr == NULL) {
 724			ret = -ENOMEM;
 725			goto fail;
 726		}
 727	}
 728
 
 729	return msm_obj->vaddr;
 730
 731fail:
 732	msm_obj->vmap_count--;
 733	msm_gem_unpin_locked(obj);
 734	return ERR_PTR(ret);
 735}
 736
 737void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
 738{
 739	return get_vaddr(obj, MSM_MADV_WILLNEED);
 740}
 741
 742void *msm_gem_get_vaddr(struct drm_gem_object *obj)
 743{
 744	void *ret;
 745
 746	msm_gem_lock(obj);
 747	ret = msm_gem_get_vaddr_locked(obj);
 748	msm_gem_unlock(obj);
 749
 750	return ret;
 751}
 752
 753/*
 754 * Don't use this!  It is for the very special case of dumping
 755 * submits from GPU hangs or faults, were the bo may already
 756 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
 757 * active list.
 758 */
 759void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
 760{
 761	return get_vaddr(obj, __MSM_MADV_PURGED);
 762}
 763
 764void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
 765{
 766	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 767
 768	msm_gem_assert_locked(obj);
 769	GEM_WARN_ON(msm_obj->vmap_count < 1);
 770
 771	msm_obj->vmap_count--;
 772	msm_gem_unpin_locked(obj);
 773}
 774
 775void msm_gem_put_vaddr(struct drm_gem_object *obj)
 776{
 777	msm_gem_lock(obj);
 778	msm_gem_put_vaddr_locked(obj);
 779	msm_gem_unlock(obj);
 780}
 781
 782/* Update madvise status, returns true if not purged, else
 783 * false or -errno.
 784 */
 785int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
 786{
 787	struct msm_drm_private *priv = obj->dev->dev_private;
 788	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 789
 790	msm_gem_lock(obj);
 791
 792	mutex_lock(&priv->lru.lock);
 793
 794	if (msm_obj->madv != __MSM_MADV_PURGED)
 795		msm_obj->madv = madv;
 796
 797	madv = msm_obj->madv;
 798
 799	/* If the obj is inactive, we might need to move it
 800	 * between inactive lists
 801	 */
 802	update_lru_locked(obj);
 803
 804	mutex_unlock(&priv->lru.lock);
 805
 806	msm_gem_unlock(obj);
 807
 808	return (madv != __MSM_MADV_PURGED);
 809}
 810
 811void msm_gem_purge(struct drm_gem_object *obj)
 812{
 813	struct drm_device *dev = obj->dev;
 814	struct msm_drm_private *priv = obj->dev->dev_private;
 815	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 816
 817	msm_gem_assert_locked(obj);
 818	GEM_WARN_ON(!is_purgeable(msm_obj));
 
 819
 820	/* Get rid of any iommu mapping(s): */
 821	put_iova_spaces(obj, true);
 822
 823	msm_gem_vunmap(obj);
 824
 825	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
 826
 827	put_pages(obj);
 828
 829	put_iova_vmas(obj);
 830
 831	mutex_lock(&priv->lru.lock);
 832	/* A one-way transition: */
 833	msm_obj->madv = __MSM_MADV_PURGED;
 834	mutex_unlock(&priv->lru.lock);
 835
 
 836	drm_gem_free_mmap_offset(obj);
 837
 838	/* Our goal here is to return as much of the memory as
 839	 * is possible back to the system as we are called from OOM.
 840	 * To do this we must instruct the shmfs to drop all of its
 841	 * backing pages, *now*.
 842	 */
 843	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
 844
 845	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
 846			0, (loff_t)-1);
 
 
 847}
 848
 849/*
 850 * Unpin the backing pages and make them available to be swapped out.
 851 */
 852void msm_gem_evict(struct drm_gem_object *obj)
 853{
 854	struct drm_device *dev = obj->dev;
 855	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 856
 857	msm_gem_assert_locked(obj);
 858	GEM_WARN_ON(is_unevictable(msm_obj));
 859
 860	/* Get rid of any iommu mapping(s): */
 861	put_iova_spaces(obj, false);
 862
 863	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
 864
 865	put_pages(obj);
 
 866}
 867
 868void msm_gem_vunmap(struct drm_gem_object *obj)
 869{
 870	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 871
 872	msm_gem_assert_locked(obj);
 
 
 
 873
 874	if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
 875		return;
 
 
 
 
 
 876
 877	vunmap(msm_obj->vaddr);
 878	msm_obj->vaddr = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 879}
 880
 881bool msm_gem_active(struct drm_gem_object *obj)
 882{
 883	msm_gem_assert_locked(obj);
 
 
 884
 885	if (to_msm_bo(obj)->pin_count)
 886		return true;
 887
 888	return !dma_resv_test_signaled(obj->resv, dma_resv_usage_rw(true));
 
 
 889}
 890
 891int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
 892{
 893	bool write = !!(op & MSM_PREP_WRITE);
 894	unsigned long remain =
 895		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
 896	long ret;
 897
 898	if (op & MSM_PREP_BOOST) {
 899		dma_resv_set_deadline(obj->resv, dma_resv_usage_rw(write),
 900				      ktime_get());
 901	}
 902
 903	ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
 904				    true,  remain);
 905	if (ret == 0)
 906		return remain == 0 ? -EBUSY : -ETIMEDOUT;
 907	else if (ret < 0)
 908		return ret;
 909
 910	/* TODO cache maintenance */
 911
 912	return 0;
 913}
 914
 915int msm_gem_cpu_fini(struct drm_gem_object *obj)
 916{
 917	/* TODO cache maintenance */
 918	return 0;
 919}
 920
 921#ifdef CONFIG_DEBUG_FS
 922void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
 923		struct msm_gem_stats *stats)
 
 
 
 
 
 
 
 
 
 924{
 925	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 926	struct dma_resv *robj = obj->resv;
 
 
 927	struct msm_gem_vma *vma;
 928	uint64_t off = drm_vma_node_start(&obj->vma_node);
 929	const char *madv;
 930
 931	msm_gem_lock(obj);
 932
 933	stats->all.count++;
 934	stats->all.size += obj->size;
 935
 936	if (msm_gem_active(obj)) {
 937		stats->active.count++;
 938		stats->active.size += obj->size;
 939	}
 940
 941	if (msm_obj->pages) {
 942		stats->resident.count++;
 943		stats->resident.size += obj->size;
 944	}
 945
 946	switch (msm_obj->madv) {
 947	case __MSM_MADV_PURGED:
 948		stats->purged.count++;
 949		stats->purged.size += obj->size;
 950		madv = " purged";
 951		break;
 952	case MSM_MADV_DONTNEED:
 953		stats->purgeable.count++;
 954		stats->purgeable.size += obj->size;
 955		madv = " purgeable";
 956		break;
 957	case MSM_MADV_WILLNEED:
 958	default:
 959		madv = "";
 960		break;
 961	}
 962
 963	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
 964			msm_obj->flags, msm_gem_active(obj) ? 'A' : 'I',
 965			obj->name, kref_read(&obj->refcount),
 966			off, msm_obj->vaddr);
 967
 968	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
 969
 970	if (!list_empty(&msm_obj->vmas)) {
 971
 972		seq_puts(m, "      vmas:");
 973
 974		list_for_each_entry(vma, &msm_obj->vmas, list) {
 975			const char *name, *comm;
 976			if (vma->aspace) {
 977				struct msm_gem_address_space *aspace = vma->aspace;
 978				struct task_struct *task =
 979					get_pid_task(aspace->pid, PIDTYPE_PID);
 980				if (task) {
 981					comm = kstrdup(task->comm, GFP_KERNEL);
 982					put_task_struct(task);
 983				} else {
 984					comm = NULL;
 985				}
 986				name = aspace->name;
 987			} else {
 988				name = comm = NULL;
 989			}
 990			seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s]",
 991				name, comm ? ":" : "", comm ? comm : "",
 992				vma->aspace, vma->iova,
 993				vma->mapped ? "mapped" : "unmapped");
 994			kfree(comm);
 995		}
 996
 997		seq_puts(m, "\n");
 998	}
 999
1000	dma_resv_describe(robj, m);
1001	msm_gem_unlock(obj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1002}
1003
1004void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
1005{
1006	struct msm_gem_stats stats = {};
1007	struct msm_gem_object *msm_obj;
 
 
1008
1009	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
1010	list_for_each_entry(msm_obj, list, node) {
1011		struct drm_gem_object *obj = &msm_obj->base;
1012		seq_puts(m, "   ");
1013		msm_gem_describe(obj, m, &stats);
 
 
1014	}
1015
1016	seq_printf(m, "Total:     %4d objects, %9zu bytes\n",
1017			stats.all.count, stats.all.size);
1018	seq_printf(m, "Active:    %4d objects, %9zu bytes\n",
1019			stats.active.count, stats.active.size);
1020	seq_printf(m, "Resident:  %4d objects, %9zu bytes\n",
1021			stats.resident.count, stats.resident.size);
1022	seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
1023			stats.purgeable.count, stats.purgeable.size);
1024	seq_printf(m, "Purged:    %4d objects, %9zu bytes\n",
1025			stats.purged.count, stats.purged.size);
1026}
1027#endif
1028
1029/* don't call directly!  Use drm_gem_object_put() */
1030static void msm_gem_free_object(struct drm_gem_object *obj)
1031{
1032	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1033	struct drm_device *dev = obj->dev;
1034	struct msm_drm_private *priv = dev->dev_private;
1035
1036	mutex_lock(&priv->obj_lock);
1037	list_del(&msm_obj->node);
1038	mutex_unlock(&priv->obj_lock);
 
 
 
 
 
 
 
1039
1040	put_iova_spaces(obj, true);
 
 
 
 
 
 
 
1041
1042	if (obj->import_attach) {
1043		GEM_WARN_ON(msm_obj->vaddr);
1044
1045		/* Don't drop the pages for imported dmabuf, as they are not
1046		 * ours, just free the array we allocated:
1047		 */
1048		kvfree(msm_obj->pages);
1049
1050		put_iova_vmas(obj);
1051
1052		drm_prime_gem_destroy(obj, msm_obj->sgt);
1053	} else {
1054		msm_gem_vunmap(obj);
1055		put_pages(obj);
1056		put_iova_vmas(obj);
1057	}
1058
1059	drm_gem_object_release(obj);
1060
1061	kfree(msm_obj->metadata);
1062	kfree(msm_obj);
1063}
1064
1065static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
1066{
1067	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
 
 
 
1068
1069	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1070	vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
1071
1072	return 0;
 
 
 
 
 
 
 
 
 
 
1073}
1074
1075/* convenience method to construct a GEM buffer object, and userspace handle */
1076int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1077		uint32_t size, uint32_t flags, uint32_t *handle,
1078		char *name)
1079{
1080	struct drm_gem_object *obj;
1081	int ret;
1082
1083	obj = msm_gem_new(dev, size, flags);
1084
1085	if (IS_ERR(obj))
1086		return PTR_ERR(obj);
1087
1088	if (name)
1089		msm_gem_object_set_name(obj, "%s", name);
1090
1091	ret = drm_gem_handle_create(file, obj, handle);
1092
1093	/* drop reference from allocate - handle holds it now */
1094	drm_gem_object_put(obj);
1095
1096	return ret;
1097}
1098
1099static enum drm_gem_object_status msm_gem_status(struct drm_gem_object *obj)
1100{
1101	struct msm_gem_object *msm_obj = to_msm_bo(obj);
1102	enum drm_gem_object_status status = 0;
1103
1104	if (msm_obj->pages)
1105		status |= DRM_GEM_OBJECT_RESIDENT;
1106
1107	if (msm_obj->madv == MSM_MADV_DONTNEED)
1108		status |= DRM_GEM_OBJECT_PURGEABLE;
1109
1110	return status;
1111}
1112
1113static const struct vm_operations_struct vm_ops = {
1114	.fault = msm_gem_fault,
1115	.open = drm_gem_vm_open,
1116	.close = drm_gem_vm_close,
1117};
1118
1119static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1120	.free = msm_gem_free_object,
1121	.pin = msm_gem_prime_pin,
1122	.unpin = msm_gem_prime_unpin,
1123	.get_sg_table = msm_gem_prime_get_sg_table,
1124	.vmap = msm_gem_prime_vmap,
1125	.vunmap = msm_gem_prime_vunmap,
1126	.mmap = msm_gem_object_mmap,
1127	.status = msm_gem_status,
1128	.vm_ops = &vm_ops,
1129};
1130
1131static int msm_gem_new_impl(struct drm_device *dev,
1132		uint32_t size, uint32_t flags,
1133		struct drm_gem_object **obj)
1134{
1135	struct msm_drm_private *priv = dev->dev_private;
1136	struct msm_gem_object *msm_obj;
1137
1138	switch (flags & MSM_BO_CACHE_MASK) {
 
1139	case MSM_BO_CACHED:
1140	case MSM_BO_WC:
1141		break;
1142	case MSM_BO_CACHED_COHERENT:
1143		if (priv->has_cached_coherent)
1144			break;
1145		fallthrough;
1146	default:
1147		DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
1148				(flags & MSM_BO_CACHE_MASK));
1149		return -EINVAL;
1150	}
1151
1152	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1153	if (!msm_obj)
1154		return -ENOMEM;
1155
 
 
1156	msm_obj->flags = flags;
1157	msm_obj->madv = MSM_MADV_WILLNEED;
1158
1159	INIT_LIST_HEAD(&msm_obj->node);
1160	INIT_LIST_HEAD(&msm_obj->vmas);
1161
1162	*obj = &msm_obj->base;
1163	(*obj)->funcs = &msm_gem_object_funcs;
1164
1165	return 0;
1166}
1167
1168struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
 
1169{
1170	struct msm_drm_private *priv = dev->dev_private;
1171	struct msm_gem_object *msm_obj;
1172	struct drm_gem_object *obj = NULL;
1173	bool use_vram = false;
1174	int ret;
1175
1176	size = PAGE_ALIGN(size);
1177
1178	if (!msm_use_mmu(dev))
1179		use_vram = true;
1180	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1181		use_vram = true;
1182
1183	if (GEM_WARN_ON(use_vram && !priv->vram.size))
1184		return ERR_PTR(-EINVAL);
1185
1186	/* Disallow zero sized objects as they make the underlying
1187	 * infrastructure grumpy
1188	 */
1189	if (size == 0)
1190		return ERR_PTR(-EINVAL);
1191
1192	ret = msm_gem_new_impl(dev, size, flags, &obj);
1193	if (ret)
1194		return ERR_PTR(ret);
1195
1196	msm_obj = to_msm_bo(obj);
1197
1198	if (use_vram) {
1199		struct msm_gem_vma *vma;
1200		struct page **pages;
1201
1202		drm_gem_private_object_init(dev, obj, size);
1203
1204		msm_gem_lock(obj);
1205
1206		vma = add_vma(obj, NULL);
1207		msm_gem_unlock(obj);
1208		if (IS_ERR(vma)) {
1209			ret = PTR_ERR(vma);
1210			goto fail;
1211		}
1212
1213		to_msm_bo(obj)->vram_node = &vma->node;
1214
1215		msm_gem_lock(obj);
 
1216		pages = get_pages(obj);
1217		msm_gem_unlock(obj);
1218		if (IS_ERR(pages)) {
1219			ret = PTR_ERR(pages);
1220			goto fail;
1221		}
1222
1223		vma->iova = physaddr(obj);
1224	} else {
1225		ret = drm_gem_object_init(dev, obj, size);
1226		if (ret)
1227			goto fail;
1228		/*
1229		 * Our buffers are kept pinned, so allocating them from the
1230		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1231		 * See comments above new_inode() why this is required _and_
1232		 * expected if you're going to pin these pages.
1233		 */
1234		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1235	}
1236
1237	drm_gem_lru_move_tail(&priv->lru.unbacked, obj);
1238
1239	mutex_lock(&priv->obj_lock);
1240	list_add_tail(&msm_obj->node, &priv->objects);
1241	mutex_unlock(&priv->obj_lock);
1242
1243	ret = drm_gem_create_mmap_offset(obj);
1244	if (ret)
1245		goto fail;
1246
1247	return obj;
1248
1249fail:
1250	drm_gem_object_put(obj);
1251	return ERR_PTR(ret);
1252}
1253
 
 
 
 
 
 
 
 
 
 
 
 
1254struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1255		struct dma_buf *dmabuf, struct sg_table *sgt)
1256{
1257	struct msm_drm_private *priv = dev->dev_private;
1258	struct msm_gem_object *msm_obj;
1259	struct drm_gem_object *obj;
1260	uint32_t size;
1261	int ret, npages;
1262
1263	/* if we don't have IOMMU, don't bother pretending we can import: */
1264	if (!msm_use_mmu(dev)) {
1265		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1266		return ERR_PTR(-EINVAL);
1267	}
1268
1269	size = PAGE_ALIGN(dmabuf->size);
1270
1271	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1272	if (ret)
1273		return ERR_PTR(ret);
1274
1275	drm_gem_private_object_init(dev, obj, size);
1276
1277	npages = size / PAGE_SIZE;
1278
1279	msm_obj = to_msm_bo(obj);
1280	msm_gem_lock(obj);
1281	msm_obj->sgt = sgt;
1282	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1283	if (!msm_obj->pages) {
1284		msm_gem_unlock(obj);
1285		ret = -ENOMEM;
1286		goto fail;
1287	}
1288
1289	ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1290	if (ret) {
1291		msm_gem_unlock(obj);
1292		goto fail;
1293	}
1294
1295	msm_gem_unlock(obj);
1296
1297	drm_gem_lru_move_tail(&priv->lru.pinned, obj);
1298
1299	mutex_lock(&priv->obj_lock);
1300	list_add_tail(&msm_obj->node, &priv->objects);
1301	mutex_unlock(&priv->obj_lock);
1302
1303	ret = drm_gem_create_mmap_offset(obj);
1304	if (ret)
1305		goto fail;
1306
1307	return obj;
1308
1309fail:
1310	drm_gem_object_put(obj);
1311	return ERR_PTR(ret);
1312}
1313
1314void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1315		uint32_t flags, struct msm_gem_address_space *aspace,
1316		struct drm_gem_object **bo, uint64_t *iova)
1317{
1318	void *vaddr;
1319	struct drm_gem_object *obj = msm_gem_new(dev, size, flags);
1320	int ret;
1321
1322	if (IS_ERR(obj))
1323		return ERR_CAST(obj);
1324
1325	if (iova) {
1326		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1327		if (ret)
1328			goto err;
1329	}
1330
1331	vaddr = msm_gem_get_vaddr(obj);
1332	if (IS_ERR(vaddr)) {
1333		msm_gem_unpin_iova(obj, aspace);
1334		ret = PTR_ERR(vaddr);
1335		goto err;
1336	}
1337
1338	if (bo)
1339		*bo = obj;
1340
1341	return vaddr;
1342err:
1343	drm_gem_object_put(obj);
 
 
 
1344
1345	return ERR_PTR(ret);
1346
1347}
1348
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1349void msm_gem_kernel_put(struct drm_gem_object *bo,
1350		struct msm_gem_address_space *aspace)
1351{
1352	if (IS_ERR_OR_NULL(bo))
1353		return;
1354
1355	msm_gem_put_vaddr(bo);
1356	msm_gem_unpin_iova(bo, aspace);
1357	drm_gem_object_put(bo);
 
 
 
 
1358}
1359
1360void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1361{
1362	struct msm_gem_object *msm_obj = to_msm_bo(bo);
1363	va_list ap;
1364
1365	if (!fmt)
1366		return;
1367
1368	va_start(ap, fmt);
1369	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1370	va_end(ap);
1371}
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2013 Red Hat
   4 * Author: Rob Clark <robdclark@gmail.com>
   5 */
   6
 
 
   7#include <linux/spinlock.h>
   8#include <linux/shmem_fs.h>
   9#include <linux/dma-buf.h>
  10#include <linux/pfn_t.h>
  11
  12#include <drm/drm_prime.h>
  13
  14#include "msm_drv.h"
  15#include "msm_fence.h"
  16#include "msm_gem.h"
  17#include "msm_gpu.h"
  18#include "msm_mmu.h"
  19
  20static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
  21
  22
  23static dma_addr_t physaddr(struct drm_gem_object *obj)
  24{
  25	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  26	struct msm_drm_private *priv = obj->dev->dev_private;
  27	return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
  28			priv->vram.paddr;
  29}
  30
  31static bool use_pages(struct drm_gem_object *obj)
  32{
  33	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  34	return !msm_obj->vram_node;
  35}
  36
  37/*
  38 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
  39 * API.  Really GPU cache is out of scope here (handled on cmdstream)
  40 * and all we need to do is invalidate newly allocated pages before
  41 * mapping to CPU as uncached/writecombine.
  42 *
  43 * On top of this, we have the added headache, that depending on
  44 * display generation, the display's iommu may be wired up to either
  45 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
  46 * that here we either have dma-direct or iommu ops.
  47 *
  48 * Let this be a cautionary tail of abstraction gone wrong.
  49 */
  50
  51static void sync_for_device(struct msm_gem_object *msm_obj)
  52{
  53	struct device *dev = msm_obj->base.dev->dev;
  54
  55	if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
  56		dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
  57			msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  58	} else {
  59		dma_map_sg(dev, msm_obj->sgt->sgl,
  60			msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 
  61	}
  62}
  63
  64static void sync_for_cpu(struct msm_gem_object *msm_obj)
  65{
  66	struct device *dev = msm_obj->base.dev->dev;
 
 
 
 
 
 
  67
  68	if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
  69		dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
  70			msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  71	} else {
  72		dma_unmap_sg(dev, msm_obj->sgt->sgl,
  73			msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
  74	}
  75}
  76
 
 
 
 
 
 
 
 
 
  77/* allocate pages from VRAM carveout, used when no IOMMU: */
  78static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
  79{
  80	struct msm_gem_object *msm_obj = to_msm_bo(obj);
  81	struct msm_drm_private *priv = obj->dev->dev_private;
  82	dma_addr_t paddr;
  83	struct page **p;
  84	int ret, i;
  85
  86	p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
  87	if (!p)
  88		return ERR_PTR(-ENOMEM);
  89
  90	spin_lock(&priv->vram.lock);
  91	ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
  92	spin_unlock(&priv->vram.lock);
  93	if (ret) {
  94		kvfree(p);
  95		return ERR_PTR(ret);
  96	}
  97
  98	paddr = physaddr(obj);
  99	for (i = 0; i < npages; i++) {
 100		p[i] = phys_to_page(paddr);
 101		paddr += PAGE_SIZE;
 102	}
 103
 104	return p;
 105}
 106
 107static struct page **get_pages(struct drm_gem_object *obj)
 108{
 109	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 110
 
 
 111	if (!msm_obj->pages) {
 112		struct drm_device *dev = obj->dev;
 113		struct page **p;
 114		int npages = obj->size >> PAGE_SHIFT;
 115
 116		if (use_pages(obj))
 117			p = drm_gem_get_pages(obj);
 118		else
 119			p = get_pages_vram(obj, npages);
 120
 121		if (IS_ERR(p)) {
 122			DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
 123					PTR_ERR(p));
 124			return p;
 125		}
 126
 127		msm_obj->pages = p;
 128
 129		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
 130		if (IS_ERR(msm_obj->sgt)) {
 131			void *ptr = ERR_CAST(msm_obj->sgt);
 132
 133			DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
 134			msm_obj->sgt = NULL;
 135			return ptr;
 136		}
 137
 138		/* For non-cached buffers, ensure the new pages are clean
 139		 * because display controller, GPU, etc. are not coherent:
 140		 */
 141		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
 142			sync_for_device(msm_obj);
 
 
 143	}
 144
 145	return msm_obj->pages;
 146}
 147
 148static void put_pages_vram(struct drm_gem_object *obj)
 149{
 150	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 151	struct msm_drm_private *priv = obj->dev->dev_private;
 152
 153	spin_lock(&priv->vram.lock);
 154	drm_mm_remove_node(msm_obj->vram_node);
 155	spin_unlock(&priv->vram.lock);
 156
 157	kvfree(msm_obj->pages);
 158}
 159
 160static void put_pages(struct drm_gem_object *obj)
 161{
 162	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 163
 164	if (msm_obj->pages) {
 165		if (msm_obj->sgt) {
 166			/* For non-cached buffers, ensure the new
 167			 * pages are clean because display controller,
 168			 * GPU, etc. are not coherent:
 169			 */
 170			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
 171				sync_for_cpu(msm_obj);
 172
 173			sg_free_table(msm_obj->sgt);
 174			kfree(msm_obj->sgt);
 
 175		}
 176
 177		if (use_pages(obj))
 178			drm_gem_put_pages(obj, msm_obj->pages, true, false);
 179		else
 180			put_pages_vram(obj);
 181
 182		msm_obj->pages = NULL;
 
 183	}
 184}
 185
 186struct page **msm_gem_get_pages(struct drm_gem_object *obj)
 
 187{
 188	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 189	struct page **p;
 190
 191	mutex_lock(&msm_obj->lock);
 192
 193	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
 194		mutex_unlock(&msm_obj->lock);
 
 195		return ERR_PTR(-EBUSY);
 196	}
 197
 198	p = get_pages(obj);
 199	mutex_unlock(&msm_obj->lock);
 200	return p;
 201}
 202
 203void msm_gem_put_pages(struct drm_gem_object *obj)
 
 
 
 204{
 205	/* when we start tracking the pin count, then do something here */
 
 
 
 
 
 206}
 207
 208int msm_gem_mmap_obj(struct drm_gem_object *obj,
 209		struct vm_area_struct *vma)
 210{
 211	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 212
 213	vma->vm_flags &= ~VM_PFNMAP;
 214	vma->vm_flags |= VM_MIXEDMAP;
 
 
 215
 216	if (msm_obj->flags & MSM_BO_WC) {
 217		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 218	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
 219		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
 220	} else {
 221		/*
 222		 * Shunt off cached objs to shmem file so they have their own
 223		 * address_space (so unmap_mapping_range does what we want,
 224		 * in particular in the case of mmap'd dmabufs)
 225		 */
 226		fput(vma->vm_file);
 227		get_file(obj->filp);
 228		vma->vm_pgoff = 0;
 229		vma->vm_file  = obj->filp;
 230
 231		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
 232	}
 
 
 
 233
 234	return 0;
 235}
 236
 237int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 238{
 239	int ret;
 
 
 
 240
 241	ret = drm_gem_mmap(filp, vma);
 242	if (ret) {
 243		DBG("mmap failed: %d", ret);
 244		return ret;
 245	}
 246
 247	return msm_gem_mmap_obj(vma->vm_private_data, vma);
 248}
 249
 250vm_fault_t msm_gem_fault(struct vm_fault *vmf)
 251{
 252	struct vm_area_struct *vma = vmf->vma;
 253	struct drm_gem_object *obj = vma->vm_private_data;
 254	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 255	struct page **pages;
 256	unsigned long pfn;
 257	pgoff_t pgoff;
 258	int err;
 259	vm_fault_t ret;
 260
 261	/*
 262	 * vm_ops.open/drm_gem_mmap_obj and close get and put
 263	 * a reference on obj. So, we dont need to hold one here.
 264	 */
 265	err = mutex_lock_interruptible(&msm_obj->lock);
 266	if (err) {
 267		ret = VM_FAULT_NOPAGE;
 268		goto out;
 269	}
 270
 271	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
 272		mutex_unlock(&msm_obj->lock);
 273		return VM_FAULT_SIGBUS;
 274	}
 275
 276	/* make sure we have pages attached now */
 277	pages = get_pages(obj);
 278	if (IS_ERR(pages)) {
 279		ret = vmf_error(PTR_ERR(pages));
 280		goto out_unlock;
 281	}
 282
 283	/* We don't use vmf->pgoff since that has the fake offset: */
 284	pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
 285
 286	pfn = page_to_pfn(pages[pgoff]);
 287
 288	VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
 289			pfn, pfn << PAGE_SHIFT);
 290
 291	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
 
 292out_unlock:
 293	mutex_unlock(&msm_obj->lock);
 294out:
 295	return ret;
 296}
 297
 298/** get mmap offset */
 299static uint64_t mmap_offset(struct drm_gem_object *obj)
 300{
 301	struct drm_device *dev = obj->dev;
 302	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 303	int ret;
 304
 305	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 306
 307	/* Make it mmapable */
 308	ret = drm_gem_create_mmap_offset(obj);
 309
 310	if (ret) {
 311		DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
 312		return 0;
 313	}
 314
 315	return drm_vma_node_offset_addr(&obj->vma_node);
 316}
 317
 318uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
 319{
 320	uint64_t offset;
 321	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 322
 323	mutex_lock(&msm_obj->lock);
 324	offset = mmap_offset(obj);
 325	mutex_unlock(&msm_obj->lock);
 326	return offset;
 327}
 328
 329static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
 330		struct msm_gem_address_space *aspace)
 331{
 332	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 333	struct msm_gem_vma *vma;
 334
 335	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 336
 337	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
 338	if (!vma)
 339		return ERR_PTR(-ENOMEM);
 340
 341	vma->aspace = aspace;
 342
 343	list_add_tail(&vma->list, &msm_obj->vmas);
 344
 345	return vma;
 346}
 347
 348static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
 349		struct msm_gem_address_space *aspace)
 350{
 351	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 352	struct msm_gem_vma *vma;
 353
 354	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 355
 356	list_for_each_entry(vma, &msm_obj->vmas, list) {
 357		if (vma->aspace == aspace)
 358			return vma;
 359	}
 360
 361	return NULL;
 362}
 363
 364static void del_vma(struct msm_gem_vma *vma)
 365{
 366	if (!vma)
 367		return;
 368
 369	list_del(&vma->list);
 370	kfree(vma);
 371}
 372
 373/* Called with msm_obj->lock locked */
 
 
 
 
 
 374static void
 375put_iova(struct drm_gem_object *obj)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 376{
 377	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 378	struct msm_gem_vma *vma, *tmp;
 379
 380	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 381
 382	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
 383		if (vma->aspace) {
 384			msm_gem_purge_vma(vma->aspace, vma);
 385			msm_gem_close_vma(vma->aspace, vma);
 386		}
 387		del_vma(vma);
 388	}
 389}
 390
 391static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
 392		struct msm_gem_address_space *aspace, uint64_t *iova,
 393		u64 range_start, u64 range_end)
 394{
 395	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 396	struct msm_gem_vma *vma;
 397	int ret = 0;
 398
 399	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 400
 401	vma = lookup_vma(obj, aspace);
 402
 403	if (!vma) {
 
 
 404		vma = add_vma(obj, aspace);
 405		if (IS_ERR(vma))
 406			return PTR_ERR(vma);
 407
 408		ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
 409			range_start, range_end);
 410		if (ret) {
 411			del_vma(vma);
 412			return ret;
 413		}
 
 
 
 414	}
 415
 416	*iova = vma->iova;
 417	return 0;
 418}
 419
 420static int msm_gem_pin_iova(struct drm_gem_object *obj,
 421		struct msm_gem_address_space *aspace)
 422{
 423	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 424	struct msm_gem_vma *vma;
 425	struct page **pages;
 426	int prot = IOMMU_READ;
 427
 428	if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
 429		prot |= IOMMU_WRITE;
 430
 431	if (msm_obj->flags & MSM_BO_MAP_PRIV)
 432		prot |= IOMMU_PRIV;
 433
 434	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 
 435
 436	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
 437		return -EBUSY;
 438
 439	vma = lookup_vma(obj, aspace);
 440	if (WARN_ON(!vma))
 441		return -EINVAL;
 442
 443	pages = get_pages(obj);
 444	if (IS_ERR(pages))
 445		return PTR_ERR(pages);
 446
 447	return msm_gem_map_vma(aspace, vma, prot,
 448			msm_obj->sgt, obj->size >> PAGE_SHIFT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 449}
 450
 451/*
 452 * get iova and pin it. Should have a matching put
 453 * limits iova to specified range (in pages)
 454 */
 455int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
 456		struct msm_gem_address_space *aspace, uint64_t *iova,
 457		u64 range_start, u64 range_end)
 458{
 459	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 460	u64 local;
 461	int ret;
 462
 463	mutex_lock(&msm_obj->lock);
 
 
 464
 465	ret = msm_gem_get_iova_locked(obj, aspace, &local,
 466		range_start, range_end);
 467
 468	if (!ret)
 469		ret = msm_gem_pin_iova(obj, aspace);
 470
 471	if (!ret)
 472		*iova = local;
 473
 474	mutex_unlock(&msm_obj->lock);
 475	return ret;
 476}
 477
 478/* get iova and pin it. Should have a matching put */
 479int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
 480		struct msm_gem_address_space *aspace, uint64_t *iova)
 481{
 482	return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
 483}
 484
 485/*
 486 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
 487 * valid for the life of the object
 488 */
 489int msm_gem_get_iova(struct drm_gem_object *obj,
 490		struct msm_gem_address_space *aspace, uint64_t *iova)
 491{
 492	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 493	int ret;
 494
 495	mutex_lock(&msm_obj->lock);
 496	ret = msm_gem_get_iova_locked(obj, aspace, iova, 0, U64_MAX);
 497	mutex_unlock(&msm_obj->lock);
 
 
 
 
 
 498
 499	return ret;
 500}
 501
 502/* get iova without taking a reference, used in places where you have
 503 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 504 */
 505uint64_t msm_gem_iova(struct drm_gem_object *obj,
 506		struct msm_gem_address_space *aspace)
 507{
 508	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 509	struct msm_gem_vma *vma;
 510
 511	mutex_lock(&msm_obj->lock);
 512	vma = lookup_vma(obj, aspace);
 513	mutex_unlock(&msm_obj->lock);
 514	WARN_ON(!vma);
 
 
 
 
 
 
 
 
 
 
 515
 516	return vma ? vma->iova : 0;
 517}
 518
 519/*
 520 * Unpin a iova by updating the reference counts. The memory isn't actually
 521 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
 522 * to get rid of it
 523 */
 524void msm_gem_unpin_iova(struct drm_gem_object *obj,
 525		struct msm_gem_address_space *aspace)
 526{
 527	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 528	struct msm_gem_vma *vma;
 529
 530	mutex_lock(&msm_obj->lock);
 531	vma = lookup_vma(obj, aspace);
 532
 533	if (!WARN_ON(!vma))
 534		msm_gem_unmap_vma(aspace, vma);
 535
 536	mutex_unlock(&msm_obj->lock);
 537}
 538
 539int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 540		struct drm_mode_create_dumb *args)
 541{
 542	args->pitch = align_pitch(args->width, args->bpp);
 543	args->size  = PAGE_ALIGN(args->pitch * args->height);
 544	return msm_gem_new_handle(dev, file, args->size,
 545			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
 546}
 547
 548int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
 549		uint32_t handle, uint64_t *offset)
 550{
 551	struct drm_gem_object *obj;
 552	int ret = 0;
 553
 554	/* GEM does all our handle to object mapping */
 555	obj = drm_gem_object_lookup(file, handle);
 556	if (obj == NULL) {
 557		ret = -ENOENT;
 558		goto fail;
 559	}
 560
 561	*offset = msm_gem_mmap_offset(obj);
 562
 563	drm_gem_object_put(obj);
 564
 565fail:
 566	return ret;
 567}
 568
 569static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
 570{
 571	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
 572	int ret = 0;
 573
 
 
 574	if (obj->import_attach)
 575		return ERR_PTR(-ENODEV);
 576
 577	mutex_lock(&msm_obj->lock);
 
 
 578
 579	if (WARN_ON(msm_obj->madv > madv)) {
 580		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
 581			msm_obj->madv, madv);
 582		mutex_unlock(&msm_obj->lock);
 583		return ERR_PTR(-EBUSY);
 584	}
 585
 586	/* increment vmap_count *before* vmap() call, so shrinker can
 587	 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
 588	 * This guarantees that we won't try to msm_gem_vunmap() this
 589	 * same object from within the vmap() call (while we already
 590	 * hold msm_obj->lock)
 591	 */
 592	msm_obj->vmap_count++;
 593
 594	if (!msm_obj->vaddr) {
 595		struct page **pages = get_pages(obj);
 596		if (IS_ERR(pages)) {
 597			ret = PTR_ERR(pages);
 598			goto fail;
 599		}
 600		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
 601				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
 602		if (msm_obj->vaddr == NULL) {
 603			ret = -ENOMEM;
 604			goto fail;
 605		}
 606	}
 607
 608	mutex_unlock(&msm_obj->lock);
 609	return msm_obj->vaddr;
 610
 611fail:
 612	msm_obj->vmap_count--;
 613	mutex_unlock(&msm_obj->lock);
 614	return ERR_PTR(ret);
 615}
 616
 
 
 
 
 
 617void *msm_gem_get_vaddr(struct drm_gem_object *obj)
 618{
 619	return get_vaddr(obj, MSM_MADV_WILLNEED);
 
 
 
 
 
 
 620}
 621
 622/*
 623 * Don't use this!  It is for the very special case of dumping
 624 * submits from GPU hangs or faults, were the bo may already
 625 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
 626 * active list.
 627 */
 628void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
 629{
 630	return get_vaddr(obj, __MSM_MADV_PURGED);
 631}
 632
 633void msm_gem_put_vaddr(struct drm_gem_object *obj)
 634{
 635	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 636
 637	mutex_lock(&msm_obj->lock);
 638	WARN_ON(msm_obj->vmap_count < 1);
 
 639	msm_obj->vmap_count--;
 640	mutex_unlock(&msm_obj->lock);
 
 
 
 
 
 
 
 641}
 642
 643/* Update madvise status, returns true if not purged, else
 644 * false or -errno.
 645 */
 646int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
 647{
 
 648	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 649
 650	mutex_lock(&msm_obj->lock);
 651
 652	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
 653
 654	if (msm_obj->madv != __MSM_MADV_PURGED)
 655		msm_obj->madv = madv;
 656
 657	madv = msm_obj->madv;
 658
 659	mutex_unlock(&msm_obj->lock);
 
 
 
 
 
 
 
 660
 661	return (madv != __MSM_MADV_PURGED);
 662}
 663
 664void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
 665{
 666	struct drm_device *dev = obj->dev;
 
 667	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 668
 669	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 670	WARN_ON(!is_purgeable(msm_obj));
 671	WARN_ON(obj->import_attach);
 672
 673	mutex_lock_nested(&msm_obj->lock, subclass);
 
 674
 675	put_iova(obj);
 676
 677	msm_gem_vunmap_locked(obj);
 678
 679	put_pages(obj);
 680
 
 
 
 
 681	msm_obj->madv = __MSM_MADV_PURGED;
 
 682
 683	drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
 684	drm_gem_free_mmap_offset(obj);
 685
 686	/* Our goal here is to return as much of the memory as
 687	 * is possible back to the system as we are called from OOM.
 688	 * To do this we must instruct the shmfs to drop all of its
 689	 * backing pages, *now*.
 690	 */
 691	shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
 692
 693	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
 694			0, (loff_t)-1);
 695
 696	mutex_unlock(&msm_obj->lock);
 697}
 698
 699static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
 
 
 
 700{
 
 701	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 702
 703	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 
 704
 705	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
 706		return;
 
 
 707
 708	vunmap(msm_obj->vaddr);
 709	msm_obj->vaddr = NULL;
 710}
 711
 712void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
 713{
 714	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 715
 716	mutex_lock_nested(&msm_obj->lock, subclass);
 717	msm_gem_vunmap_locked(obj);
 718	mutex_unlock(&msm_obj->lock);
 719}
 720
 721/* must be called before _move_to_active().. */
 722int msm_gem_sync_object(struct drm_gem_object *obj,
 723		struct msm_fence_context *fctx, bool exclusive)
 724{
 725	struct dma_resv_list *fobj;
 726	struct dma_fence *fence;
 727	int i, ret;
 728
 729	fobj = dma_resv_get_list(obj->resv);
 730	if (!fobj || (fobj->shared_count == 0)) {
 731		fence = dma_resv_get_excl(obj->resv);
 732		/* don't need to wait on our own fences, since ring is fifo */
 733		if (fence && (fence->context != fctx->context)) {
 734			ret = dma_fence_wait(fence, true);
 735			if (ret)
 736				return ret;
 737		}
 738	}
 739
 740	if (!exclusive || !fobj)
 741		return 0;
 742
 743	for (i = 0; i < fobj->shared_count; i++) {
 744		fence = rcu_dereference_protected(fobj->shared[i],
 745						dma_resv_held(obj->resv));
 746		if (fence->context != fctx->context) {
 747			ret = dma_fence_wait(fence, true);
 748			if (ret)
 749				return ret;
 750		}
 751	}
 752
 753	return 0;
 754}
 755
 756void msm_gem_move_to_active(struct drm_gem_object *obj,
 757		struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
 758{
 759	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 760	WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
 761	msm_obj->gpu = gpu;
 762	if (exclusive)
 763		dma_resv_add_excl_fence(obj->resv, fence);
 764	else
 765		dma_resv_add_shared_fence(obj->resv, fence);
 766	list_del_init(&msm_obj->mm_list);
 767	list_add_tail(&msm_obj->mm_list, &gpu->active_list);
 768}
 769
 770void msm_gem_move_to_inactive(struct drm_gem_object *obj)
 771{
 772	struct drm_device *dev = obj->dev;
 773	struct msm_drm_private *priv = dev->dev_private;
 774	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 775
 776	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
 777
 778	msm_obj->gpu = NULL;
 779	list_del_init(&msm_obj->mm_list);
 780	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
 781}
 782
 783int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
 784{
 785	bool write = !!(op & MSM_PREP_WRITE);
 786	unsigned long remain =
 787		op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
 788	long ret;
 789
 790	ret = dma_resv_wait_timeout_rcu(obj->resv, write,
 791						  true,  remain);
 
 
 
 
 
 792	if (ret == 0)
 793		return remain == 0 ? -EBUSY : -ETIMEDOUT;
 794	else if (ret < 0)
 795		return ret;
 796
 797	/* TODO cache maintenance */
 798
 799	return 0;
 800}
 801
 802int msm_gem_cpu_fini(struct drm_gem_object *obj)
 803{
 804	/* TODO cache maintenance */
 805	return 0;
 806}
 807
 808#ifdef CONFIG_DEBUG_FS
 809static void describe_fence(struct dma_fence *fence, const char *type,
 810		struct seq_file *m)
 811{
 812	if (!dma_fence_is_signaled(fence))
 813		seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
 814				fence->ops->get_driver_name(fence),
 815				fence->ops->get_timeline_name(fence),
 816				fence->seqno);
 817}
 818
 819void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 820{
 821	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 822	struct dma_resv *robj = obj->resv;
 823	struct dma_resv_list *fobj;
 824	struct dma_fence *fence;
 825	struct msm_gem_vma *vma;
 826	uint64_t off = drm_vma_node_start(&obj->vma_node);
 827	const char *madv;
 828
 829	mutex_lock(&msm_obj->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 830
 831	switch (msm_obj->madv) {
 832	case __MSM_MADV_PURGED:
 
 
 833		madv = " purged";
 834		break;
 835	case MSM_MADV_DONTNEED:
 
 
 836		madv = " purgeable";
 837		break;
 838	case MSM_MADV_WILLNEED:
 839	default:
 840		madv = "";
 841		break;
 842	}
 843
 844	seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
 845			msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
 846			obj->name, kref_read(&obj->refcount),
 847			off, msm_obj->vaddr);
 848
 849	seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
 850
 851	if (!list_empty(&msm_obj->vmas)) {
 852
 853		seq_puts(m, "      vmas:");
 854
 855		list_for_each_entry(vma, &msm_obj->vmas, list)
 856			seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
 857				vma->aspace != NULL ? vma->aspace->name : NULL,
 858				vma->iova, vma->mapped ? "mapped" : "unmapped",
 859				vma->inuse);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 860
 861		seq_puts(m, "\n");
 862	}
 863
 864	rcu_read_lock();
 865	fobj = rcu_dereference(robj->fence);
 866	if (fobj) {
 867		unsigned int i, shared_count = fobj->shared_count;
 868
 869		for (i = 0; i < shared_count; i++) {
 870			fence = rcu_dereference(fobj->shared[i]);
 871			describe_fence(fence, "Shared", m);
 872		}
 873	}
 874
 875	fence = rcu_dereference(robj->fence_excl);
 876	if (fence)
 877		describe_fence(fence, "Exclusive", m);
 878	rcu_read_unlock();
 879
 880	mutex_unlock(&msm_obj->lock);
 881}
 882
 883void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
 884{
 
 885	struct msm_gem_object *msm_obj;
 886	int count = 0;
 887	size_t size = 0;
 888
 889	seq_puts(m, "   flags       id ref  offset   kaddr            size     madv      name\n");
 890	list_for_each_entry(msm_obj, list, mm_list) {
 891		struct drm_gem_object *obj = &msm_obj->base;
 892		seq_puts(m, "   ");
 893		msm_gem_describe(obj, m);
 894		count++;
 895		size += obj->size;
 896	}
 897
 898	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
 
 
 
 
 
 
 
 
 
 899}
 900#endif
 901
 902/* don't call directly!  Use drm_gem_object_put_locked() and friends */
 903void msm_gem_free_object(struct drm_gem_object *obj)
 904{
 905	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 906	struct drm_device *dev = obj->dev;
 907	struct msm_drm_private *priv = dev->dev_private;
 908
 909	if (llist_add(&msm_obj->freed, &priv->free_list))
 910		queue_work(priv->wq, &priv->free_work);
 911}
 912
 913static void free_object(struct msm_gem_object *msm_obj)
 914{
 915	struct drm_gem_object *obj = &msm_obj->base;
 916	struct drm_device *dev = obj->dev;
 917
 918	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 919
 920	/* object should not be on active list: */
 921	WARN_ON(is_active(msm_obj));
 922
 923	list_del(&msm_obj->mm_list);
 924
 925	mutex_lock(&msm_obj->lock);
 926
 927	put_iova(obj);
 928
 929	if (obj->import_attach) {
 930		WARN_ON(msm_obj->vaddr);
 931
 932		/* Don't drop the pages for imported dmabuf, as they are not
 933		 * ours, just free the array we allocated:
 934		 */
 935		if (msm_obj->pages)
 936			kvfree(msm_obj->pages);
 
 937
 938		drm_prime_gem_destroy(obj, msm_obj->sgt);
 939	} else {
 940		msm_gem_vunmap_locked(obj);
 941		put_pages(obj);
 
 942	}
 943
 944	drm_gem_object_release(obj);
 945
 946	mutex_unlock(&msm_obj->lock);
 947	kfree(msm_obj);
 948}
 949
 950void msm_gem_free_work(struct work_struct *work)
 951{
 952	struct msm_drm_private *priv =
 953		container_of(work, struct msm_drm_private, free_work);
 954	struct drm_device *dev = priv->dev;
 955	struct llist_node *freed;
 956	struct msm_gem_object *msm_obj, *next;
 957
 958	while ((freed = llist_del_all(&priv->free_list))) {
 
 959
 960		mutex_lock(&dev->struct_mutex);
 961
 962		llist_for_each_entry_safe(msm_obj, next,
 963					  freed, freed)
 964			free_object(msm_obj);
 965
 966		mutex_unlock(&dev->struct_mutex);
 967
 968		if (need_resched())
 969			break;
 970	}
 971}
 972
 973/* convenience method to construct a GEM buffer object, and userspace handle */
 974int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
 975		uint32_t size, uint32_t flags, uint32_t *handle,
 976		char *name)
 977{
 978	struct drm_gem_object *obj;
 979	int ret;
 980
 981	obj = msm_gem_new(dev, size, flags);
 982
 983	if (IS_ERR(obj))
 984		return PTR_ERR(obj);
 985
 986	if (name)
 987		msm_gem_object_set_name(obj, "%s", name);
 988
 989	ret = drm_gem_handle_create(file, obj, handle);
 990
 991	/* drop reference from allocate - handle holds it now */
 992	drm_gem_object_put(obj);
 993
 994	return ret;
 995}
 996
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 997static int msm_gem_new_impl(struct drm_device *dev,
 998		uint32_t size, uint32_t flags,
 999		struct drm_gem_object **obj)
1000{
 
1001	struct msm_gem_object *msm_obj;
1002
1003	switch (flags & MSM_BO_CACHE_MASK) {
1004	case MSM_BO_UNCACHED:
1005	case MSM_BO_CACHED:
1006	case MSM_BO_WC:
1007		break;
 
 
 
 
1008	default:
1009		DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
1010				(flags & MSM_BO_CACHE_MASK));
1011		return -EINVAL;
1012	}
1013
1014	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1015	if (!msm_obj)
1016		return -ENOMEM;
1017
1018	mutex_init(&msm_obj->lock);
1019
1020	msm_obj->flags = flags;
1021	msm_obj->madv = MSM_MADV_WILLNEED;
1022
1023	INIT_LIST_HEAD(&msm_obj->submit_entry);
1024	INIT_LIST_HEAD(&msm_obj->vmas);
1025
1026	*obj = &msm_obj->base;
 
1027
1028	return 0;
1029}
1030
1031static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
1032		uint32_t size, uint32_t flags, bool struct_mutex_locked)
1033{
1034	struct msm_drm_private *priv = dev->dev_private;
1035	struct msm_gem_object *msm_obj;
1036	struct drm_gem_object *obj = NULL;
1037	bool use_vram = false;
1038	int ret;
1039
1040	size = PAGE_ALIGN(size);
1041
1042	if (!msm_use_mmu(dev))
1043		use_vram = true;
1044	else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1045		use_vram = true;
1046
1047	if (WARN_ON(use_vram && !priv->vram.size))
1048		return ERR_PTR(-EINVAL);
1049
1050	/* Disallow zero sized objects as they make the underlying
1051	 * infrastructure grumpy
1052	 */
1053	if (size == 0)
1054		return ERR_PTR(-EINVAL);
1055
1056	ret = msm_gem_new_impl(dev, size, flags, &obj);
1057	if (ret)
1058		goto fail;
1059
1060	msm_obj = to_msm_bo(obj);
1061
1062	if (use_vram) {
1063		struct msm_gem_vma *vma;
1064		struct page **pages;
1065
1066		mutex_lock(&msm_obj->lock);
 
 
1067
1068		vma = add_vma(obj, NULL);
1069		mutex_unlock(&msm_obj->lock);
1070		if (IS_ERR(vma)) {
1071			ret = PTR_ERR(vma);
1072			goto fail;
1073		}
1074
1075		to_msm_bo(obj)->vram_node = &vma->node;
1076
1077		drm_gem_private_object_init(dev, obj, size);
1078
1079		pages = get_pages(obj);
 
1080		if (IS_ERR(pages)) {
1081			ret = PTR_ERR(pages);
1082			goto fail;
1083		}
1084
1085		vma->iova = physaddr(obj);
1086	} else {
1087		ret = drm_gem_object_init(dev, obj, size);
1088		if (ret)
1089			goto fail;
1090		/*
1091		 * Our buffers are kept pinned, so allocating them from the
1092		 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1093		 * See comments above new_inode() why this is required _and_
1094		 * expected if you're going to pin these pages.
1095		 */
1096		mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1097	}
1098
1099	if (struct_mutex_locked) {
1100		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1101		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1102	} else {
1103		mutex_lock(&dev->struct_mutex);
1104		list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1105		mutex_unlock(&dev->struct_mutex);
1106	}
 
1107
1108	return obj;
1109
1110fail:
1111	drm_gem_object_put(obj);
1112	return ERR_PTR(ret);
1113}
1114
1115struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1116		uint32_t size, uint32_t flags)
1117{
1118	return _msm_gem_new(dev, size, flags, true);
1119}
1120
1121struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1122		uint32_t size, uint32_t flags)
1123{
1124	return _msm_gem_new(dev, size, flags, false);
1125}
1126
1127struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1128		struct dma_buf *dmabuf, struct sg_table *sgt)
1129{
1130	struct msm_drm_private *priv = dev->dev_private;
1131	struct msm_gem_object *msm_obj;
1132	struct drm_gem_object *obj;
1133	uint32_t size;
1134	int ret, npages;
1135
1136	/* if we don't have IOMMU, don't bother pretending we can import: */
1137	if (!msm_use_mmu(dev)) {
1138		DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1139		return ERR_PTR(-EINVAL);
1140	}
1141
1142	size = PAGE_ALIGN(dmabuf->size);
1143
1144	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1145	if (ret)
1146		goto fail;
1147
1148	drm_gem_private_object_init(dev, obj, size);
1149
1150	npages = size / PAGE_SIZE;
1151
1152	msm_obj = to_msm_bo(obj);
1153	mutex_lock(&msm_obj->lock);
1154	msm_obj->sgt = sgt;
1155	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1156	if (!msm_obj->pages) {
1157		mutex_unlock(&msm_obj->lock);
1158		ret = -ENOMEM;
1159		goto fail;
1160	}
1161
1162	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1163	if (ret) {
1164		mutex_unlock(&msm_obj->lock);
1165		goto fail;
1166	}
1167
1168	mutex_unlock(&msm_obj->lock);
 
 
1169
1170	mutex_lock(&dev->struct_mutex);
1171	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1172	mutex_unlock(&dev->struct_mutex);
 
 
 
 
1173
1174	return obj;
1175
1176fail:
1177	drm_gem_object_put(obj);
1178	return ERR_PTR(ret);
1179}
1180
1181static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1182		uint32_t flags, struct msm_gem_address_space *aspace,
1183		struct drm_gem_object **bo, uint64_t *iova, bool locked)
1184{
1185	void *vaddr;
1186	struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1187	int ret;
1188
1189	if (IS_ERR(obj))
1190		return ERR_CAST(obj);
1191
1192	if (iova) {
1193		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1194		if (ret)
1195			goto err;
1196	}
1197
1198	vaddr = msm_gem_get_vaddr(obj);
1199	if (IS_ERR(vaddr)) {
1200		msm_gem_unpin_iova(obj, aspace);
1201		ret = PTR_ERR(vaddr);
1202		goto err;
1203	}
1204
1205	if (bo)
1206		*bo = obj;
1207
1208	return vaddr;
1209err:
1210	if (locked)
1211		drm_gem_object_put_locked(obj);
1212	else
1213		drm_gem_object_put(obj);
1214
1215	return ERR_PTR(ret);
1216
1217}
1218
1219void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1220		uint32_t flags, struct msm_gem_address_space *aspace,
1221		struct drm_gem_object **bo, uint64_t *iova)
1222{
1223	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1224}
1225
1226void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1227		uint32_t flags, struct msm_gem_address_space *aspace,
1228		struct drm_gem_object **bo, uint64_t *iova)
1229{
1230	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1231}
1232
1233void msm_gem_kernel_put(struct drm_gem_object *bo,
1234		struct msm_gem_address_space *aspace, bool locked)
1235{
1236	if (IS_ERR_OR_NULL(bo))
1237		return;
1238
1239	msm_gem_put_vaddr(bo);
1240	msm_gem_unpin_iova(bo, aspace);
1241
1242	if (locked)
1243		drm_gem_object_put_locked(bo);
1244	else
1245		drm_gem_object_put(bo);
1246}
1247
1248void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1249{
1250	struct msm_gem_object *msm_obj = to_msm_bo(bo);
1251	va_list ap;
1252
1253	if (!fmt)
1254		return;
1255
1256	va_start(ap, fmt);
1257	vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
1258	va_end(ap);
1259}