Linux Audio

Check our new training course

Loading...
v6.9.4
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <linux/ktime.h>
  29#include <linux/module.h>
  30#include <linux/pagemap.h>
  31#include <linux/pci.h>
  32#include <linux/dma-buf.h>
  33
  34#include <drm/amdgpu_drm.h>
  35#include <drm/drm_drv.h>
  36#include <drm/drm_exec.h>
  37#include <drm/drm_gem_ttm_helper.h>
  38#include <drm/ttm/ttm_tt.h>
  39
  40#include "amdgpu.h"
  41#include "amdgpu_display.h"
  42#include "amdgpu_dma_buf.h"
  43#include "amdgpu_hmm.h"
  44#include "amdgpu_xgmi.h"
  45
  46static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
  47
  48static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
  49{
  50	struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
  51	struct drm_device *ddev = bo->base.dev;
  52	vm_fault_t ret;
  53	int idx;
  54
  55	ret = ttm_bo_vm_reserve(bo, vmf);
  56	if (ret)
  57		return ret;
  58
  59	if (drm_dev_enter(ddev, &idx)) {
  60		ret = amdgpu_bo_fault_reserve_notify(bo);
  61		if (ret) {
  62			drm_dev_exit(idx);
  63			goto unlock;
  64		}
  65
  66		ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
  67					       TTM_BO_VM_NUM_PREFAULT);
  68
  69		drm_dev_exit(idx);
  70	} else {
  71		ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
  72	}
  73	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
  74		return ret;
  75
  76unlock:
  77	dma_resv_unlock(bo->base.resv);
  78	return ret;
  79}
  80
  81static const struct vm_operations_struct amdgpu_gem_vm_ops = {
  82	.fault = amdgpu_gem_fault,
  83	.open = ttm_bo_vm_open,
  84	.close = ttm_bo_vm_close,
  85	.access = ttm_bo_vm_access
  86};
  87
  88static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
  89{
  90	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
  91
  92	if (robj) {
  93		amdgpu_hmm_unregister(robj);
  94		amdgpu_bo_unref(&robj);
  95	}
  96}
  97
  98int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
  99			     int alignment, u32 initial_domain,
 100			     u64 flags, enum ttm_bo_type type,
 101			     struct dma_resv *resv,
 102			     struct drm_gem_object **obj, int8_t xcp_id_plus1)
 103{
 104	struct amdgpu_bo *bo;
 105	struct amdgpu_bo_user *ubo;
 106	struct amdgpu_bo_param bp;
 107	int r;
 108
 109	memset(&bp, 0, sizeof(bp));
 110	*obj = NULL;
 111
 112	bp.size = size;
 113	bp.byte_align = alignment;
 114	bp.type = type;
 115	bp.resv = resv;
 116	bp.preferred_domain = initial_domain;
 
 117	bp.flags = flags;
 118	bp.domain = initial_domain;
 119	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
 120	bp.xcp_id_plus1 = xcp_id_plus1;
 
 
 
 
 
 121
 122	r = amdgpu_bo_create_user(adev, &bp, &ubo);
 123	if (r)
 
 
 
 
 
 124		return r;
 125
 126	bo = &ubo->bo;
 127	*obj = &bo->tbo.base;
 128	(*obj)->funcs = &amdgpu_gem_object_funcs;
 129
 130	return 0;
 131}
 132
 133void amdgpu_gem_force_release(struct amdgpu_device *adev)
 134{
 135	struct drm_device *ddev = adev_to_drm(adev);
 136	struct drm_file *file;
 137
 138	mutex_lock(&ddev->filelist_mutex);
 139
 140	list_for_each_entry(file, &ddev->filelist, lhead) {
 141		struct drm_gem_object *gobj;
 142		int handle;
 143
 144		WARN_ONCE(1, "Still active user space clients!\n");
 145		spin_lock(&file->table_lock);
 146		idr_for_each_entry(&file->object_idr, gobj, handle) {
 147			WARN_ONCE(1, "And also active allocations!\n");
 148			drm_gem_object_put(gobj);
 149		}
 150		idr_destroy(&file->object_idr);
 151		spin_unlock(&file->table_lock);
 152	}
 153
 154	mutex_unlock(&ddev->filelist_mutex);
 155}
 156
 157/*
 158 * Call from drm_gem_handle_create which appear in both new and open ioctl
 159 * case.
 160 */
 161static int amdgpu_gem_object_open(struct drm_gem_object *obj,
 162				  struct drm_file *file_priv)
 163{
 164	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
 165	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
 166	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
 167	struct amdgpu_vm *vm = &fpriv->vm;
 168	struct amdgpu_bo_va *bo_va;
 169	struct mm_struct *mm;
 170	int r;
 171
 172	mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
 173	if (mm && mm != current->mm)
 174		return -EPERM;
 175
 176	if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
 177	    abo->tbo.base.resv != vm->root.bo->tbo.base.resv)
 178		return -EPERM;
 179
 180	r = amdgpu_bo_reserve(abo, false);
 181	if (r)
 182		return r;
 183
 184	bo_va = amdgpu_vm_bo_find(vm, abo);
 185	if (!bo_va)
 186		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
 187	else
 188		++bo_va->ref_count;
 189	amdgpu_bo_unreserve(abo);
 190
 191	/* Validate and add eviction fence to DMABuf imports with dynamic
 192	 * attachment in compute VMs. Re-validation will be done by
 193	 * amdgpu_vm_validate. Fences are on the reservation shared with the
 194	 * export, which is currently required to be validated and fenced
 195	 * already by amdgpu_amdkfd_gpuvm_restore_process_bos.
 196	 *
 197	 * Nested locking below for the case that a GEM object is opened in
 198	 * kfd_mem_export_dmabuf. Since the lock below is only taken for imports,
 199	 * but not for export, this is a different lock class that cannot lead to
 200	 * circular lock dependencies.
 201	 */
 202	if (!vm->is_compute_context || !vm->process_info)
 203		return 0;
 204	if (!obj->import_attach ||
 205	    !dma_buf_is_dynamic(obj->import_attach->dmabuf))
 206		return 0;
 207	mutex_lock_nested(&vm->process_info->lock, 1);
 208	if (!WARN_ON(!vm->process_info->eviction_fence)) {
 209		r = amdgpu_amdkfd_bo_validate_and_fence(abo, AMDGPU_GEM_DOMAIN_GTT,
 210							&vm->process_info->eviction_fence->base);
 211		if (r) {
 212			struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
 213
 214			dev_warn(adev->dev, "validate_and_fence failed: %d\n", r);
 215			if (ti) {
 216				dev_warn(adev->dev, "pid %d\n", ti->pid);
 217				amdgpu_vm_put_task_info(ti);
 218			}
 219		}
 220	}
 221	mutex_unlock(&vm->process_info->lock);
 222
 223	return r;
 224}
 225
 226static void amdgpu_gem_object_close(struct drm_gem_object *obj,
 227				    struct drm_file *file_priv)
 228{
 229	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
 230	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 231	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
 232	struct amdgpu_vm *vm = &fpriv->vm;
 233
 234	struct dma_fence *fence = NULL;
 
 
 
 235	struct amdgpu_bo_va *bo_va;
 236	struct drm_exec exec;
 237	long r;
 238
 239	drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
 240	drm_exec_until_all_locked(&exec) {
 241		r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1);
 242		drm_exec_retry_on_contention(&exec);
 243		if (unlikely(r))
 244			goto out_unlock;
 245
 246		r = amdgpu_vm_lock_pd(vm, &exec, 0);
 247		drm_exec_retry_on_contention(&exec);
 248		if (unlikely(r))
 249			goto out_unlock;
 250	}
 251
 
 
 
 
 
 
 
 
 
 
 
 
 252	bo_va = amdgpu_vm_bo_find(vm, bo);
 253	if (!bo_va || --bo_va->ref_count)
 254		goto out_unlock;
 255
 256	amdgpu_vm_bo_del(adev, bo_va);
 257	if (!amdgpu_vm_ready(vm))
 258		goto out_unlock;
 259
 260	r = amdgpu_vm_clear_freed(adev, vm, &fence);
 261	if (unlikely(r < 0))
 262		dev_err(adev->dev, "failed to clear page "
 263			"tables on GEM object close (%ld)\n", r);
 264	if (r || !fence)
 265		goto out_unlock;
 266
 267	amdgpu_bo_fence(bo, fence, true);
 268	dma_fence_put(fence);
 
 
 
 269
 270out_unlock:
 271	if (r)
 272		dev_err(adev->dev, "leaking bo va (%ld)\n", r);
 273	drm_exec_fini(&exec);
 
 
 
 274}
 275
 276static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
 277{
 278	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
 279
 280	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
 281		return -EPERM;
 282	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
 283		return -EPERM;
 284
 285	/* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings
 286	 * for debugger access to invisible VRAM. Should have used MAP_SHARED
 287	 * instead. Clearing VM_MAYWRITE prevents the mapping from ever
 288	 * becoming writable and makes is_cow_mapping(vm_flags) false.
 289	 */
 290	if (is_cow_mapping(vma->vm_flags) &&
 291	    !(vma->vm_flags & VM_ACCESS_FLAGS))
 292		vm_flags_clear(vma, VM_MAYWRITE);
 293
 294	return drm_gem_ttm_mmap(obj, vma);
 295}
 296
 297static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
 298	.free = amdgpu_gem_object_free,
 299	.open = amdgpu_gem_object_open,
 300	.close = amdgpu_gem_object_close,
 301	.export = amdgpu_gem_prime_export,
 302	.vmap = drm_gem_ttm_vmap,
 303	.vunmap = drm_gem_ttm_vunmap,
 304	.mmap = amdgpu_gem_object_mmap,
 305	.vm_ops = &amdgpu_gem_vm_ops,
 306};
 307
 308/*
 309 * GEM ioctls.
 310 */
 311int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
 312			    struct drm_file *filp)
 313{
 314	struct amdgpu_device *adev = drm_to_adev(dev);
 315	struct amdgpu_fpriv *fpriv = filp->driver_priv;
 316	struct amdgpu_vm *vm = &fpriv->vm;
 317	union drm_amdgpu_gem_create *args = data;
 318	uint64_t flags = args->in.domain_flags;
 319	uint64_t size = args->in.bo_size;
 320	struct dma_resv *resv = NULL;
 321	struct drm_gem_object *gobj;
 322	uint32_t handle, initial_domain;
 323	int r;
 324
 325	/* reject DOORBELLs until userspace code to use it is available */
 326	if (args->in.domains & AMDGPU_GEM_DOMAIN_DOORBELL)
 327		return -EINVAL;
 328
 329	/* reject invalid gem flags */
 330	if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
 331		      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
 332		      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
 333		      AMDGPU_GEM_CREATE_VRAM_CLEARED |
 334		      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
 335		      AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
 336		      AMDGPU_GEM_CREATE_ENCRYPTED |
 337		      AMDGPU_GEM_CREATE_DISCARDABLE))
 338		return -EINVAL;
 339
 340	/* reject invalid gem domains */
 341	if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
 342		return -EINVAL;
 343
 344	if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
 345		DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
 346		return -EINVAL;
 347	}
 348
 349	/* create a gem object to contain this object in */
 350	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
 351	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
 352		if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
 353			/* if gds bo is created from user space, it must be
 354			 * passed to bo list
 355			 */
 356			DRM_ERROR("GDS bo cannot be per-vm-bo\n");
 357			return -EINVAL;
 358		}
 359		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
 360	}
 361
 362	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
 363		r = amdgpu_bo_reserve(vm->root.bo, false);
 364		if (r)
 365			return r;
 366
 367		resv = vm->root.bo->tbo.base.resv;
 368	}
 369
 370	initial_domain = (u32)(0xffffffff & args->in.domains);
 371retry:
 372	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
 373				     initial_domain,
 374				     flags, ttm_bo_type_device, resv, &gobj, fpriv->xcp_id + 1);
 375	if (r && r != -ERESTARTSYS) {
 376		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
 377			flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 378			goto retry;
 379		}
 380
 381		if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
 382			initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
 383			goto retry;
 384		}
 385		DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
 386				size, initial_domain, args->in.alignment, r);
 387	}
 388
 389	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
 390		if (!r) {
 391			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
 392
 393			abo->parent = amdgpu_bo_ref(vm->root.bo);
 394		}
 395		amdgpu_bo_unreserve(vm->root.bo);
 396	}
 397	if (r)
 398		return r;
 399
 400	r = drm_gem_handle_create(filp, gobj, &handle);
 401	/* drop reference from allocate - handle holds it now */
 402	drm_gem_object_put(gobj);
 403	if (r)
 404		return r;
 405
 406	memset(args, 0, sizeof(*args));
 407	args->out.handle = handle;
 408	return 0;
 409}
 410
 411int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
 412			     struct drm_file *filp)
 413{
 414	struct ttm_operation_ctx ctx = { true, false };
 415	struct amdgpu_device *adev = drm_to_adev(dev);
 416	struct drm_amdgpu_gem_userptr *args = data;
 417	struct amdgpu_fpriv *fpriv = filp->driver_priv;
 418	struct drm_gem_object *gobj;
 419	struct hmm_range *range;
 420	struct amdgpu_bo *bo;
 421	uint32_t handle;
 422	int r;
 423
 424	args->addr = untagged_addr(args->addr);
 425
 426	if (offset_in_page(args->addr | args->size))
 427		return -EINVAL;
 428
 429	/* reject unknown flag values */
 430	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
 431	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
 432	    AMDGPU_GEM_USERPTR_REGISTER))
 433		return -EINVAL;
 434
 435	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
 436	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
 437
 438		/* if we want to write to it we must install a MMU notifier */
 439		return -EACCES;
 440	}
 441
 442	/* create a gem object to contain this object in */
 443	r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
 444				     0, ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1);
 445	if (r)
 446		return r;
 447
 448	bo = gem_to_amdgpu_bo(gobj);
 449	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
 450	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
 451	r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags);
 452	if (r)
 453		goto release_object;
 454
 455	r = amdgpu_hmm_register(bo, args->addr);
 456	if (r)
 457		goto release_object;
 
 
 458
 459	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
 460		r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
 461						 &range);
 462		if (r)
 463			goto release_object;
 464
 465		r = amdgpu_bo_reserve(bo, true);
 466		if (r)
 467			goto user_pages_done;
 468
 469		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
 470		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 471		amdgpu_bo_unreserve(bo);
 472		if (r)
 473			goto user_pages_done;
 474	}
 475
 476	r = drm_gem_handle_create(filp, gobj, &handle);
 477	if (r)
 478		goto user_pages_done;
 479
 480	args->handle = handle;
 481
 482user_pages_done:
 483	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
 484		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
 485
 486release_object:
 487	drm_gem_object_put(gobj);
 488
 489	return r;
 490}
 491
 492int amdgpu_mode_dumb_mmap(struct drm_file *filp,
 493			  struct drm_device *dev,
 494			  uint32_t handle, uint64_t *offset_p)
 495{
 496	struct drm_gem_object *gobj;
 497	struct amdgpu_bo *robj;
 498
 499	gobj = drm_gem_object_lookup(filp, handle);
 500	if (!gobj)
 501		return -ENOENT;
 502
 503	robj = gem_to_amdgpu_bo(gobj);
 504	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
 505	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
 506		drm_gem_object_put(gobj);
 507		return -EPERM;
 508	}
 509	*offset_p = amdgpu_bo_mmap_offset(robj);
 510	drm_gem_object_put(gobj);
 511	return 0;
 512}
 513
 514int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
 515			  struct drm_file *filp)
 516{
 517	union drm_amdgpu_gem_mmap *args = data;
 518	uint32_t handle = args->in.handle;
 519
 520	memset(args, 0, sizeof(*args));
 521	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
 522}
 523
 524/**
 525 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
 526 *
 527 * @timeout_ns: timeout in ns
 528 *
 529 * Calculate the timeout in jiffies from an absolute timeout in ns.
 530 */
 531unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
 532{
 533	unsigned long timeout_jiffies;
 534	ktime_t timeout;
 535
 536	/* clamp timeout if it's to large */
 537	if (((int64_t)timeout_ns) < 0)
 538		return MAX_SCHEDULE_TIMEOUT;
 539
 540	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
 541	if (ktime_to_ns(timeout) < 0)
 542		return 0;
 543
 544	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
 545	/*  clamp timeout to avoid unsigned-> signed overflow */
 546	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT)
 547		return MAX_SCHEDULE_TIMEOUT - 1;
 548
 549	return timeout_jiffies;
 550}
 551
 552int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 553			      struct drm_file *filp)
 554{
 555	union drm_amdgpu_gem_wait_idle *args = data;
 556	struct drm_gem_object *gobj;
 557	struct amdgpu_bo *robj;
 558	uint32_t handle = args->in.handle;
 559	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
 560	int r = 0;
 561	long ret;
 562
 563	gobj = drm_gem_object_lookup(filp, handle);
 564	if (!gobj)
 565		return -ENOENT;
 566
 567	robj = gem_to_amdgpu_bo(gobj);
 568	ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
 569				    true, timeout);
 570
 571	/* ret == 0 means not signaled,
 572	 * ret > 0 means signaled
 573	 * ret < 0 means interrupted before timeout
 574	 */
 575	if (ret >= 0) {
 576		memset(args, 0, sizeof(*args));
 577		args->out.status = (ret == 0);
 578	} else
 579		r = ret;
 580
 581	drm_gem_object_put(gobj);
 582	return r;
 583}
 584
 585int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
 586				struct drm_file *filp)
 587{
 588	struct drm_amdgpu_gem_metadata *args = data;
 589	struct drm_gem_object *gobj;
 590	struct amdgpu_bo *robj;
 591	int r = -1;
 592
 593	DRM_DEBUG("%d\n", args->handle);
 594	gobj = drm_gem_object_lookup(filp, args->handle);
 595	if (gobj == NULL)
 596		return -ENOENT;
 597	robj = gem_to_amdgpu_bo(gobj);
 598
 599	r = amdgpu_bo_reserve(robj, false);
 600	if (unlikely(r != 0))
 601		goto out;
 602
 603	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
 604		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
 605		r = amdgpu_bo_get_metadata(robj, args->data.data,
 606					   sizeof(args->data.data),
 607					   &args->data.data_size_bytes,
 608					   &args->data.flags);
 609	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
 610		if (args->data.data_size_bytes > sizeof(args->data.data)) {
 611			r = -EINVAL;
 612			goto unreserve;
 613		}
 614		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
 615		if (!r)
 616			r = amdgpu_bo_set_metadata(robj, args->data.data,
 617						   args->data.data_size_bytes,
 618						   args->data.flags);
 619	}
 620
 621unreserve:
 622	amdgpu_bo_unreserve(robj);
 623out:
 624	drm_gem_object_put(gobj);
 625	return r;
 626}
 627
 628/**
 629 * amdgpu_gem_va_update_vm -update the bo_va in its VM
 630 *
 631 * @adev: amdgpu_device pointer
 632 * @vm: vm to update
 633 * @bo_va: bo_va to update
 634 * @operation: map, unmap or clear
 635 *
 636 * Update the bo_va directly after setting its address. Errors are not
 637 * vital here, so they are not reported back to userspace.
 638 */
 639static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
 640				    struct amdgpu_vm *vm,
 641				    struct amdgpu_bo_va *bo_va,
 642				    uint32_t operation)
 643{
 644	int r;
 645
 646	if (!amdgpu_vm_ready(vm))
 647		return;
 648
 649	r = amdgpu_vm_clear_freed(adev, vm, NULL);
 650	if (r)
 651		goto error;
 652
 653	if (operation == AMDGPU_VA_OP_MAP ||
 654	    operation == AMDGPU_VA_OP_REPLACE) {
 655		r = amdgpu_vm_bo_update(adev, bo_va, false);
 656		if (r)
 657			goto error;
 658	}
 659
 660	r = amdgpu_vm_update_pdes(adev, vm, false);
 661
 662error:
 663	if (r && r != -ERESTARTSYS)
 664		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
 665}
 666
 667/**
 668 * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
 669 *
 670 * @adev: amdgpu_device pointer
 671 * @flags: GEM UAPI flags
 672 *
 673 * Returns the GEM UAPI flags mapped into hardware for the ASIC.
 674 */
 675uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
 676{
 677	uint64_t pte_flag = 0;
 678
 679	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
 680		pte_flag |= AMDGPU_PTE_EXECUTABLE;
 681	if (flags & AMDGPU_VM_PAGE_READABLE)
 682		pte_flag |= AMDGPU_PTE_READABLE;
 683	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
 684		pte_flag |= AMDGPU_PTE_WRITEABLE;
 685	if (flags & AMDGPU_VM_PAGE_PRT)
 686		pte_flag |= AMDGPU_PTE_PRT;
 687	if (flags & AMDGPU_VM_PAGE_NOALLOC)
 688		pte_flag |= AMDGPU_PTE_NOALLOC;
 689
 690	if (adev->gmc.gmc_funcs->map_mtype)
 691		pte_flag |= amdgpu_gmc_map_mtype(adev,
 692						 flags & AMDGPU_VM_MTYPE_MASK);
 693
 694	return pte_flag;
 695}
 696
 697int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 698			  struct drm_file *filp)
 699{
 700	const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
 701		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
 702		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK |
 703		AMDGPU_VM_PAGE_NOALLOC;
 704	const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
 705		AMDGPU_VM_PAGE_PRT;
 706
 707	struct drm_amdgpu_gem_va *args = data;
 708	struct drm_gem_object *gobj;
 709	struct amdgpu_device *adev = drm_to_adev(dev);
 710	struct amdgpu_fpriv *fpriv = filp->driver_priv;
 711	struct amdgpu_bo *abo;
 712	struct amdgpu_bo_va *bo_va;
 713	struct drm_exec exec;
 
 
 
 714	uint64_t va_flags;
 715	uint64_t vm_size;
 716	int r = 0;
 717
 718	if (args->va_address < AMDGPU_VA_RESERVED_BOTTOM) {
 719		dev_dbg(dev->dev,
 720			"va_address 0x%llx is in reserved area 0x%llx\n",
 721			args->va_address, AMDGPU_VA_RESERVED_BOTTOM);
 722		return -EINVAL;
 723	}
 724
 725	if (args->va_address >= AMDGPU_GMC_HOLE_START &&
 726	    args->va_address < AMDGPU_GMC_HOLE_END) {
 727		dev_dbg(dev->dev,
 728			"va_address 0x%llx is in VA hole 0x%llx-0x%llx\n",
 729			args->va_address, AMDGPU_GMC_HOLE_START,
 730			AMDGPU_GMC_HOLE_END);
 731		return -EINVAL;
 732	}
 733
 734	args->va_address &= AMDGPU_GMC_HOLE_MASK;
 735
 736	vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
 737	vm_size -= AMDGPU_VA_RESERVED_TOP;
 738	if (args->va_address + args->map_size > vm_size) {
 739		dev_dbg(dev->dev,
 740			"va_address 0x%llx is in top reserved area 0x%llx\n",
 741			args->va_address + args->map_size, vm_size);
 742		return -EINVAL;
 743	}
 744
 745	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
 746		dev_dbg(dev->dev, "invalid flags combination 0x%08X\n",
 747			args->flags);
 748		return -EINVAL;
 749	}
 750
 751	switch (args->operation) {
 752	case AMDGPU_VA_OP_MAP:
 753	case AMDGPU_VA_OP_UNMAP:
 754	case AMDGPU_VA_OP_CLEAR:
 755	case AMDGPU_VA_OP_REPLACE:
 756		break;
 757	default:
 758		dev_dbg(dev->dev, "unsupported operation %d\n",
 759			args->operation);
 760		return -EINVAL;
 761	}
 762
 
 
 763	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
 764	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
 765		gobj = drm_gem_object_lookup(filp, args->handle);
 766		if (gobj == NULL)
 767			return -ENOENT;
 768		abo = gem_to_amdgpu_bo(gobj);
 
 
 
 
 
 
 769	} else {
 770		gobj = NULL;
 771		abo = NULL;
 772	}
 773
 774	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
 775		      DRM_EXEC_IGNORE_DUPLICATES, 0);
 776	drm_exec_until_all_locked(&exec) {
 777		if (gobj) {
 778			r = drm_exec_lock_obj(&exec, gobj);
 779			drm_exec_retry_on_contention(&exec);
 780			if (unlikely(r))
 781				goto error;
 782		}
 783
 784		r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 2);
 785		drm_exec_retry_on_contention(&exec);
 786		if (unlikely(r))
 787			goto error;
 788	}
 789
 790	if (abo) {
 791		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
 792		if (!bo_va) {
 793			r = -ENOENT;
 794			goto error;
 795		}
 796	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
 797		bo_va = fpriv->prt_va;
 798	} else {
 799		bo_va = NULL;
 800	}
 801
 802	switch (args->operation) {
 803	case AMDGPU_VA_OP_MAP:
 804		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
 805		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
 806				     args->offset_in_bo, args->map_size,
 807				     va_flags);
 808		break;
 809	case AMDGPU_VA_OP_UNMAP:
 810		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
 811		break;
 812
 813	case AMDGPU_VA_OP_CLEAR:
 814		r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
 815						args->va_address,
 816						args->map_size);
 817		break;
 818	case AMDGPU_VA_OP_REPLACE:
 819		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
 820		r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
 821					     args->offset_in_bo, args->map_size,
 822					     va_flags);
 823		break;
 824	default:
 825		break;
 826	}
 827	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm)
 828		amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
 829					args->operation);
 830
 831error:
 832	drm_exec_fini(&exec);
 833	drm_gem_object_put(gobj);
 
 
 834	return r;
 835}
 836
 837int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
 838			struct drm_file *filp)
 839{
 840	struct amdgpu_device *adev = drm_to_adev(dev);
 841	struct drm_amdgpu_gem_op *args = data;
 842	struct drm_gem_object *gobj;
 843	struct amdgpu_vm_bo_base *base;
 844	struct amdgpu_bo *robj;
 845	int r;
 846
 847	gobj = drm_gem_object_lookup(filp, args->handle);
 848	if (!gobj)
 849		return -ENOENT;
 850
 851	robj = gem_to_amdgpu_bo(gobj);
 852
 853	r = amdgpu_bo_reserve(robj, false);
 854	if (unlikely(r))
 855		goto out;
 856
 857	switch (args->op) {
 858	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
 859		struct drm_amdgpu_gem_create_in info;
 860		void __user *out = u64_to_user_ptr(args->value);
 861
 862		info.bo_size = robj->tbo.base.size;
 863		info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
 864		info.domains = robj->preferred_domains;
 865		info.domain_flags = robj->flags;
 866		amdgpu_bo_unreserve(robj);
 867		if (copy_to_user(out, &info, sizeof(info)))
 868			r = -EFAULT;
 869		break;
 870	}
 871	case AMDGPU_GEM_OP_SET_PLACEMENT:
 872		if (robj->tbo.base.import_attach &&
 873		    args->value & AMDGPU_GEM_DOMAIN_VRAM) {
 874			r = -EINVAL;
 875			amdgpu_bo_unreserve(robj);
 876			break;
 877		}
 878		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
 879			r = -EPERM;
 880			amdgpu_bo_unreserve(robj);
 881			break;
 882		}
 883		for (base = robj->vm_bo; base; base = base->next)
 884			if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
 885				amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
 886				r = -EINVAL;
 887				amdgpu_bo_unreserve(robj);
 888				goto out;
 889			}
 890
 891
 892		robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
 893							AMDGPU_GEM_DOMAIN_GTT |
 894							AMDGPU_GEM_DOMAIN_CPU);
 895		robj->allowed_domains = robj->preferred_domains;
 896		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
 897			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
 898
 899		if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
 900			amdgpu_vm_bo_invalidate(adev, robj, true);
 901
 902		amdgpu_bo_unreserve(robj);
 903		break;
 904	default:
 905		amdgpu_bo_unreserve(robj);
 906		r = -EINVAL;
 907	}
 908
 909out:
 910	drm_gem_object_put(gobj);
 911	return r;
 912}
 913
 914static int amdgpu_gem_align_pitch(struct amdgpu_device *adev,
 915				  int width,
 916				  int cpp,
 917				  bool tiled)
 918{
 919	int aligned = width;
 920	int pitch_mask = 0;
 921
 922	switch (cpp) {
 923	case 1:
 924		pitch_mask = 255;
 925		break;
 926	case 2:
 927		pitch_mask = 127;
 928		break;
 929	case 3:
 930	case 4:
 931		pitch_mask = 63;
 932		break;
 933	}
 934
 935	aligned += pitch_mask;
 936	aligned &= ~pitch_mask;
 937	return aligned * cpp;
 938}
 939
 940int amdgpu_mode_dumb_create(struct drm_file *file_priv,
 941			    struct drm_device *dev,
 942			    struct drm_mode_create_dumb *args)
 943{
 944	struct amdgpu_device *adev = drm_to_adev(dev);
 945	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
 946	struct drm_gem_object *gobj;
 947	uint32_t handle;
 948	u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
 949		    AMDGPU_GEM_CREATE_CPU_GTT_USWC |
 950		    AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 951	u32 domain;
 952	int r;
 953
 954	/*
 955	 * The buffer returned from this function should be cleared, but
 956	 * it can only be done if the ring is enabled or we'll fail to
 957	 * create the buffer.
 958	 */
 959	if (adev->mman.buffer_funcs_enabled)
 960		flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
 961
 962	args->pitch = amdgpu_gem_align_pitch(adev, args->width,
 963					     DIV_ROUND_UP(args->bpp, 8), 0);
 964	args->size = (u64)args->pitch * args->height;
 965	args->size = ALIGN(args->size, PAGE_SIZE);
 966	domain = amdgpu_bo_get_preferred_domain(adev,
 967				amdgpu_display_supported_domains(adev, flags));
 968	r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
 969				     ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1);
 970	if (r)
 971		return -ENOMEM;
 972
 973	r = drm_gem_handle_create(file_priv, gobj, &handle);
 974	/* drop reference from allocate - handle holds it now */
 975	drm_gem_object_put(gobj);
 976	if (r)
 977		return r;
 978
 979	args->handle = handle;
 980	return 0;
 981}
 982
 983#if defined(CONFIG_DEBUG_FS)
 984static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
 
 
 
 
 
 
 985{
 986	struct amdgpu_device *adev = m->private;
 987	struct drm_device *dev = adev_to_drm(adev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 988	struct drm_file *file;
 989	int r;
 990
 991	r = mutex_lock_interruptible(&dev->filelist_mutex);
 992	if (r)
 993		return r;
 994
 995	list_for_each_entry(file, &dev->filelist, lhead) {
 996		struct task_struct *task;
 997		struct drm_gem_object *gobj;
 998		struct pid *pid;
 999		int id;
1000
1001		/*
1002		 * Although we have a valid reference on file->pid, that does
1003		 * not guarantee that the task_struct who called get_pid() is
1004		 * still alive (e.g. get_pid(current) => fork() => exit()).
1005		 * Therefore, we need to protect this ->comm access using RCU.
1006		 */
1007		rcu_read_lock();
1008		pid = rcu_dereference(file->pid);
1009		task = pid_task(pid, PIDTYPE_TGID);
1010		seq_printf(m, "pid %8d command %s:\n", pid_nr(pid),
1011			   task ? task->comm : "<unknown>");
1012		rcu_read_unlock();
1013
1014		spin_lock(&file->table_lock);
1015		idr_for_each_entry(&file->object_idr, gobj, id) {
1016			struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
1017
1018			amdgpu_bo_print_info(id, bo, m);
1019		}
1020		spin_unlock(&file->table_lock);
1021	}
1022
1023	mutex_unlock(&dev->filelist_mutex);
1024	return 0;
1025}
1026
1027DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_gem_info);
1028
 
1029#endif
1030
1031void amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
1032{
1033#if defined(CONFIG_DEBUG_FS)
1034	struct drm_minor *minor = adev_to_drm(adev)->primary;
1035	struct dentry *root = minor->debugfs_root;
1036
1037	debugfs_create_file("amdgpu_gem_info", 0444, root, adev,
1038			    &amdgpu_debugfs_gem_info_fops);
1039#endif
 
1040}
v5.4
  1/*
  2 * Copyright 2008 Advanced Micro Devices, Inc.
  3 * Copyright 2008 Red Hat Inc.
  4 * Copyright 2009 Jerome Glisse.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the "Software"),
  8 * to deal in the Software without restriction, including without limitation
  9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 10 * and/or sell copies of the Software, and to permit persons to whom the
 11 * Software is furnished to do so, subject to the following conditions:
 12 *
 13 * The above copyright notice and this permission notice shall be included in
 14 * all copies or substantial portions of the Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 22 * OTHER DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors: Dave Airlie
 25 *          Alex Deucher
 26 *          Jerome Glisse
 27 */
 28#include <linux/ktime.h>
 29#include <linux/module.h>
 30#include <linux/pagemap.h>
 31#include <linux/pci.h>
 
 32
 33#include <drm/amdgpu_drm.h>
 34#include <drm/drm_debugfs.h>
 
 
 
 35
 36#include "amdgpu.h"
 37#include "amdgpu_display.h"
 
 
 38#include "amdgpu_xgmi.h"
 39
 40void amdgpu_gem_object_free(struct drm_gem_object *gobj)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 41{
 42	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
 43
 44	if (robj) {
 45		amdgpu_mn_unregister(robj);
 46		amdgpu_bo_unref(&robj);
 47	}
 48}
 49
 50int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 51			     int alignment, u32 initial_domain,
 52			     u64 flags, enum ttm_bo_type type,
 53			     struct dma_resv *resv,
 54			     struct drm_gem_object **obj)
 55{
 56	struct amdgpu_bo *bo;
 
 57	struct amdgpu_bo_param bp;
 58	int r;
 59
 60	memset(&bp, 0, sizeof(bp));
 61	*obj = NULL;
 62
 63	bp.size = size;
 64	bp.byte_align = alignment;
 65	bp.type = type;
 66	bp.resv = resv;
 67	bp.preferred_domain = initial_domain;
 68retry:
 69	bp.flags = flags;
 70	bp.domain = initial_domain;
 71	r = amdgpu_bo_create(adev, &bp, &bo);
 72	if (r) {
 73		if (r != -ERESTARTSYS) {
 74			if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
 75				flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 76				goto retry;
 77			}
 78
 79			if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
 80				initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
 81				goto retry;
 82			}
 83			DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
 84				  size, initial_domain, alignment, r);
 85		}
 86		return r;
 87	}
 
 88	*obj = &bo->tbo.base;
 
 89
 90	return 0;
 91}
 92
 93void amdgpu_gem_force_release(struct amdgpu_device *adev)
 94{
 95	struct drm_device *ddev = adev->ddev;
 96	struct drm_file *file;
 97
 98	mutex_lock(&ddev->filelist_mutex);
 99
100	list_for_each_entry(file, &ddev->filelist, lhead) {
101		struct drm_gem_object *gobj;
102		int handle;
103
104		WARN_ONCE(1, "Still active user space clients!\n");
105		spin_lock(&file->table_lock);
106		idr_for_each_entry(&file->object_idr, gobj, handle) {
107			WARN_ONCE(1, "And also active allocations!\n");
108			drm_gem_object_put_unlocked(gobj);
109		}
110		idr_destroy(&file->object_idr);
111		spin_unlock(&file->table_lock);
112	}
113
114	mutex_unlock(&ddev->filelist_mutex);
115}
116
117/*
118 * Call from drm_gem_handle_create which appear in both new and open ioctl
119 * case.
120 */
121int amdgpu_gem_object_open(struct drm_gem_object *obj,
122			   struct drm_file *file_priv)
123{
124	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
125	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
126	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
127	struct amdgpu_vm *vm = &fpriv->vm;
128	struct amdgpu_bo_va *bo_va;
129	struct mm_struct *mm;
130	int r;
131
132	mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
133	if (mm && mm != current->mm)
134		return -EPERM;
135
136	if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
137	    abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
138		return -EPERM;
139
140	r = amdgpu_bo_reserve(abo, false);
141	if (r)
142		return r;
143
144	bo_va = amdgpu_vm_bo_find(vm, abo);
145	if (!bo_va) {
146		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
147	} else {
148		++bo_va->ref_count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149	}
150	amdgpu_bo_unreserve(abo);
151	return 0;
 
152}
153
154void amdgpu_gem_object_close(struct drm_gem_object *obj,
155			     struct drm_file *file_priv)
156{
157	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
158	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
159	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
160	struct amdgpu_vm *vm = &fpriv->vm;
161
162	struct amdgpu_bo_list_entry vm_pd;
163	struct list_head list, duplicates;
164	struct ttm_validate_buffer tv;
165	struct ww_acquire_ctx ticket;
166	struct amdgpu_bo_va *bo_va;
167	int r;
 
168
169	INIT_LIST_HEAD(&list);
170	INIT_LIST_HEAD(&duplicates);
 
 
 
 
 
 
 
 
 
 
171
172	tv.bo = &bo->tbo;
173	tv.num_shared = 1;
174	list_add(&tv.head, &list);
175
176	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
177
178	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, false);
179	if (r) {
180		dev_err(adev->dev, "leaking bo va because "
181			"we fail to reserve bo (%d)\n", r);
182		return;
183	}
184	bo_va = amdgpu_vm_bo_find(vm, bo);
185	if (bo_va && --bo_va->ref_count == 0) {
186		amdgpu_vm_bo_rmv(adev, bo_va);
 
 
 
 
187
188		if (amdgpu_vm_ready(vm)) {
189			struct dma_fence *fence = NULL;
 
 
 
 
190
191			r = amdgpu_vm_clear_freed(adev, vm, &fence);
192			if (unlikely(r)) {
193				dev_err(adev->dev, "failed to clear page "
194					"tables on GEM object close (%d)\n", r);
195			}
196
197			if (fence) {
198				amdgpu_bo_fence(bo, fence, true);
199				dma_fence_put(fence);
200			}
201		}
202	}
203	ttm_eu_backoff_reservation(&ticket, &list);
204}
205
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206/*
207 * GEM ioctls.
208 */
209int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
210			    struct drm_file *filp)
211{
212	struct amdgpu_device *adev = dev->dev_private;
213	struct amdgpu_fpriv *fpriv = filp->driver_priv;
214	struct amdgpu_vm *vm = &fpriv->vm;
215	union drm_amdgpu_gem_create *args = data;
216	uint64_t flags = args->in.domain_flags;
217	uint64_t size = args->in.bo_size;
218	struct dma_resv *resv = NULL;
219	struct drm_gem_object *gobj;
220	uint32_t handle;
221	int r;
222
 
 
 
 
223	/* reject invalid gem flags */
224	if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
225		      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
226		      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
227		      AMDGPU_GEM_CREATE_VRAM_CLEARED |
228		      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
229		      AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
230
 
231		return -EINVAL;
232
233	/* reject invalid gem domains */
234	if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
235		return -EINVAL;
236
 
 
 
 
 
237	/* create a gem object to contain this object in */
238	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
239	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
240		if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
241			/* if gds bo is created from user space, it must be
242			 * passed to bo list
243			 */
244			DRM_ERROR("GDS bo cannot be per-vm-bo\n");
245			return -EINVAL;
246		}
247		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
248	}
249
250	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
251		r = amdgpu_bo_reserve(vm->root.base.bo, false);
252		if (r)
253			return r;
254
255		resv = vm->root.base.bo->tbo.base.resv;
256	}
257
 
 
258	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
259				     (u32)(0xffffffff & args->in.domains),
260				     flags, ttm_bo_type_device, resv, &gobj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
261	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
262		if (!r) {
263			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
264
265			abo->parent = amdgpu_bo_ref(vm->root.base.bo);
266		}
267		amdgpu_bo_unreserve(vm->root.base.bo);
268	}
269	if (r)
270		return r;
271
272	r = drm_gem_handle_create(filp, gobj, &handle);
273	/* drop reference from allocate - handle holds it now */
274	drm_gem_object_put_unlocked(gobj);
275	if (r)
276		return r;
277
278	memset(args, 0, sizeof(*args));
279	args->out.handle = handle;
280	return 0;
281}
282
283int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
284			     struct drm_file *filp)
285{
286	struct ttm_operation_ctx ctx = { true, false };
287	struct amdgpu_device *adev = dev->dev_private;
288	struct drm_amdgpu_gem_userptr *args = data;
 
289	struct drm_gem_object *gobj;
 
290	struct amdgpu_bo *bo;
291	uint32_t handle;
292	int r;
293
294	args->addr = untagged_addr(args->addr);
295
296	if (offset_in_page(args->addr | args->size))
297		return -EINVAL;
298
299	/* reject unknown flag values */
300	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
301	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
302	    AMDGPU_GEM_USERPTR_REGISTER))
303		return -EINVAL;
304
305	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
306	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
307
308		/* if we want to write to it we must install a MMU notifier */
309		return -EACCES;
310	}
311
312	/* create a gem object to contain this object in */
313	r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
314				     0, ttm_bo_type_device, NULL, &gobj);
315	if (r)
316		return r;
317
318	bo = gem_to_amdgpu_bo(gobj);
319	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
320	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
321	r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
322	if (r)
323		goto release_object;
324
325	if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
326		r = amdgpu_mn_register(bo, args->addr);
327		if (r)
328			goto release_object;
329	}
330
331	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
332		r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
 
333		if (r)
334			goto release_object;
335
336		r = amdgpu_bo_reserve(bo, true);
337		if (r)
338			goto user_pages_done;
339
340		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
341		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
342		amdgpu_bo_unreserve(bo);
343		if (r)
344			goto user_pages_done;
345	}
346
347	r = drm_gem_handle_create(filp, gobj, &handle);
348	if (r)
349		goto user_pages_done;
350
351	args->handle = handle;
352
353user_pages_done:
354	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
355		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
356
357release_object:
358	drm_gem_object_put_unlocked(gobj);
359
360	return r;
361}
362
363int amdgpu_mode_dumb_mmap(struct drm_file *filp,
364			  struct drm_device *dev,
365			  uint32_t handle, uint64_t *offset_p)
366{
367	struct drm_gem_object *gobj;
368	struct amdgpu_bo *robj;
369
370	gobj = drm_gem_object_lookup(filp, handle);
371	if (gobj == NULL) {
372		return -ENOENT;
373	}
374	robj = gem_to_amdgpu_bo(gobj);
375	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
376	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
377		drm_gem_object_put_unlocked(gobj);
378		return -EPERM;
379	}
380	*offset_p = amdgpu_bo_mmap_offset(robj);
381	drm_gem_object_put_unlocked(gobj);
382	return 0;
383}
384
385int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
386			  struct drm_file *filp)
387{
388	union drm_amdgpu_gem_mmap *args = data;
389	uint32_t handle = args->in.handle;
 
390	memset(args, 0, sizeof(*args));
391	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
392}
393
394/**
395 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
396 *
397 * @timeout_ns: timeout in ns
398 *
399 * Calculate the timeout in jiffies from an absolute timeout in ns.
400 */
401unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
402{
403	unsigned long timeout_jiffies;
404	ktime_t timeout;
405
406	/* clamp timeout if it's to large */
407	if (((int64_t)timeout_ns) < 0)
408		return MAX_SCHEDULE_TIMEOUT;
409
410	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
411	if (ktime_to_ns(timeout) < 0)
412		return 0;
413
414	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
415	/*  clamp timeout to avoid unsigned-> signed overflow */
416	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
417		return MAX_SCHEDULE_TIMEOUT - 1;
418
419	return timeout_jiffies;
420}
421
422int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
423			      struct drm_file *filp)
424{
425	union drm_amdgpu_gem_wait_idle *args = data;
426	struct drm_gem_object *gobj;
427	struct amdgpu_bo *robj;
428	uint32_t handle = args->in.handle;
429	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
430	int r = 0;
431	long ret;
432
433	gobj = drm_gem_object_lookup(filp, handle);
434	if (gobj == NULL) {
435		return -ENOENT;
436	}
437	robj = gem_to_amdgpu_bo(gobj);
438	ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
439						  timeout);
440
441	/* ret == 0 means not signaled,
442	 * ret > 0 means signaled
443	 * ret < 0 means interrupted before timeout
444	 */
445	if (ret >= 0) {
446		memset(args, 0, sizeof(*args));
447		args->out.status = (ret == 0);
448	} else
449		r = ret;
450
451	drm_gem_object_put_unlocked(gobj);
452	return r;
453}
454
455int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
456				struct drm_file *filp)
457{
458	struct drm_amdgpu_gem_metadata *args = data;
459	struct drm_gem_object *gobj;
460	struct amdgpu_bo *robj;
461	int r = -1;
462
463	DRM_DEBUG("%d \n", args->handle);
464	gobj = drm_gem_object_lookup(filp, args->handle);
465	if (gobj == NULL)
466		return -ENOENT;
467	robj = gem_to_amdgpu_bo(gobj);
468
469	r = amdgpu_bo_reserve(robj, false);
470	if (unlikely(r != 0))
471		goto out;
472
473	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
474		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
475		r = amdgpu_bo_get_metadata(robj, args->data.data,
476					   sizeof(args->data.data),
477					   &args->data.data_size_bytes,
478					   &args->data.flags);
479	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
480		if (args->data.data_size_bytes > sizeof(args->data.data)) {
481			r = -EINVAL;
482			goto unreserve;
483		}
484		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
485		if (!r)
486			r = amdgpu_bo_set_metadata(robj, args->data.data,
487						   args->data.data_size_bytes,
488						   args->data.flags);
489	}
490
491unreserve:
492	amdgpu_bo_unreserve(robj);
493out:
494	drm_gem_object_put_unlocked(gobj);
495	return r;
496}
497
498/**
499 * amdgpu_gem_va_update_vm -update the bo_va in its VM
500 *
501 * @adev: amdgpu_device pointer
502 * @vm: vm to update
503 * @bo_va: bo_va to update
504 * @operation: map, unmap or clear
505 *
506 * Update the bo_va directly after setting its address. Errors are not
507 * vital here, so they are not reported back to userspace.
508 */
509static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
510				    struct amdgpu_vm *vm,
511				    struct amdgpu_bo_va *bo_va,
512				    uint32_t operation)
513{
514	int r;
515
516	if (!amdgpu_vm_ready(vm))
517		return;
518
519	r = amdgpu_vm_clear_freed(adev, vm, NULL);
520	if (r)
521		goto error;
522
523	if (operation == AMDGPU_VA_OP_MAP ||
524	    operation == AMDGPU_VA_OP_REPLACE) {
525		r = amdgpu_vm_bo_update(adev, bo_va, false);
526		if (r)
527			goto error;
528	}
529
530	r = amdgpu_vm_update_directories(adev, vm);
531
532error:
533	if (r && r != -ERESTARTSYS)
534		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
535}
536
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
537int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
538			  struct drm_file *filp)
539{
540	const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
541		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
542		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
 
543	const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
544		AMDGPU_VM_PAGE_PRT;
545
546	struct drm_amdgpu_gem_va *args = data;
547	struct drm_gem_object *gobj;
548	struct amdgpu_device *adev = dev->dev_private;
549	struct amdgpu_fpriv *fpriv = filp->driver_priv;
550	struct amdgpu_bo *abo;
551	struct amdgpu_bo_va *bo_va;
552	struct amdgpu_bo_list_entry vm_pd;
553	struct ttm_validate_buffer tv;
554	struct ww_acquire_ctx ticket;
555	struct list_head list, duplicates;
556	uint64_t va_flags;
 
557	int r = 0;
558
559	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
560		dev_dbg(&dev->pdev->dev,
561			"va_address 0x%LX is in reserved area 0x%LX\n",
562			args->va_address, AMDGPU_VA_RESERVED_SIZE);
563		return -EINVAL;
564	}
565
566	if (args->va_address >= AMDGPU_GMC_HOLE_START &&
567	    args->va_address < AMDGPU_GMC_HOLE_END) {
568		dev_dbg(&dev->pdev->dev,
569			"va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
570			args->va_address, AMDGPU_GMC_HOLE_START,
571			AMDGPU_GMC_HOLE_END);
572		return -EINVAL;
573	}
574
575	args->va_address &= AMDGPU_GMC_HOLE_MASK;
576
 
 
 
 
 
 
 
 
 
577	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
578		dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
579			args->flags);
580		return -EINVAL;
581	}
582
583	switch (args->operation) {
584	case AMDGPU_VA_OP_MAP:
585	case AMDGPU_VA_OP_UNMAP:
586	case AMDGPU_VA_OP_CLEAR:
587	case AMDGPU_VA_OP_REPLACE:
588		break;
589	default:
590		dev_dbg(&dev->pdev->dev, "unsupported operation %d\n",
591			args->operation);
592		return -EINVAL;
593	}
594
595	INIT_LIST_HEAD(&list);
596	INIT_LIST_HEAD(&duplicates);
597	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
598	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
599		gobj = drm_gem_object_lookup(filp, args->handle);
600		if (gobj == NULL)
601			return -ENOENT;
602		abo = gem_to_amdgpu_bo(gobj);
603		tv.bo = &abo->tbo;
604		if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
605			tv.num_shared = 1;
606		else
607			tv.num_shared = 0;
608		list_add(&tv.head, &list);
609	} else {
610		gobj = NULL;
611		abo = NULL;
612	}
613
614	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
 
 
 
 
 
 
 
 
615
616	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, false);
617	if (r)
618		goto error_unref;
 
 
619
620	if (abo) {
621		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
622		if (!bo_va) {
623			r = -ENOENT;
624			goto error_backoff;
625		}
626	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
627		bo_va = fpriv->prt_va;
628	} else {
629		bo_va = NULL;
630	}
631
632	switch (args->operation) {
633	case AMDGPU_VA_OP_MAP:
634		va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
635		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
636				     args->offset_in_bo, args->map_size,
637				     va_flags);
638		break;
639	case AMDGPU_VA_OP_UNMAP:
640		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
641		break;
642
643	case AMDGPU_VA_OP_CLEAR:
644		r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
645						args->va_address,
646						args->map_size);
647		break;
648	case AMDGPU_VA_OP_REPLACE:
649		va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
650		r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
651					     args->offset_in_bo, args->map_size,
652					     va_flags);
653		break;
654	default:
655		break;
656	}
657	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
658		amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
659					args->operation);
660
661error_backoff:
662	ttm_eu_backoff_reservation(&ticket, &list);
663
664error_unref:
665	drm_gem_object_put_unlocked(gobj);
666	return r;
667}
668
669int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
670			struct drm_file *filp)
671{
672	struct amdgpu_device *adev = dev->dev_private;
673	struct drm_amdgpu_gem_op *args = data;
674	struct drm_gem_object *gobj;
675	struct amdgpu_vm_bo_base *base;
676	struct amdgpu_bo *robj;
677	int r;
678
679	gobj = drm_gem_object_lookup(filp, args->handle);
680	if (gobj == NULL) {
681		return -ENOENT;
682	}
683	robj = gem_to_amdgpu_bo(gobj);
684
685	r = amdgpu_bo_reserve(robj, false);
686	if (unlikely(r))
687		goto out;
688
689	switch (args->op) {
690	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
691		struct drm_amdgpu_gem_create_in info;
692		void __user *out = u64_to_user_ptr(args->value);
693
694		info.bo_size = robj->tbo.base.size;
695		info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
696		info.domains = robj->preferred_domains;
697		info.domain_flags = robj->flags;
698		amdgpu_bo_unreserve(robj);
699		if (copy_to_user(out, &info, sizeof(info)))
700			r = -EFAULT;
701		break;
702	}
703	case AMDGPU_GEM_OP_SET_PLACEMENT:
704		if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
 
705			r = -EINVAL;
706			amdgpu_bo_unreserve(robj);
707			break;
708		}
709		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
710			r = -EPERM;
711			amdgpu_bo_unreserve(robj);
712			break;
713		}
714		for (base = robj->vm_bo; base; base = base->next)
715			if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
716				amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) {
717				r = -EINVAL;
718				amdgpu_bo_unreserve(robj);
719				goto out;
720			}
721
722
723		robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
724							AMDGPU_GEM_DOMAIN_GTT |
725							AMDGPU_GEM_DOMAIN_CPU);
726		robj->allowed_domains = robj->preferred_domains;
727		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
728			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
729
730		if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
731			amdgpu_vm_bo_invalidate(adev, robj, true);
732
733		amdgpu_bo_unreserve(robj);
734		break;
735	default:
736		amdgpu_bo_unreserve(robj);
737		r = -EINVAL;
738	}
739
740out:
741	drm_gem_object_put_unlocked(gobj);
742	return r;
743}
744
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
745int amdgpu_mode_dumb_create(struct drm_file *file_priv,
746			    struct drm_device *dev,
747			    struct drm_mode_create_dumb *args)
748{
749	struct amdgpu_device *adev = dev->dev_private;
 
750	struct drm_gem_object *gobj;
751	uint32_t handle;
752	u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
753		    AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 
754	u32 domain;
755	int r;
756
757	/*
758	 * The buffer returned from this function should be cleared, but
759	 * it can only be done if the ring is enabled or we'll fail to
760	 * create the buffer.
761	 */
762	if (adev->mman.buffer_funcs_enabled)
763		flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
764
765	args->pitch = amdgpu_align_pitch(adev, args->width,
766					 DIV_ROUND_UP(args->bpp, 8), 0);
767	args->size = (u64)args->pitch * args->height;
768	args->size = ALIGN(args->size, PAGE_SIZE);
769	domain = amdgpu_bo_get_preferred_pin_domain(adev,
770				amdgpu_display_supported_domains(adev, flags));
771	r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
772				     ttm_bo_type_device, NULL, &gobj);
773	if (r)
774		return -ENOMEM;
775
776	r = drm_gem_handle_create(file_priv, gobj, &handle);
777	/* drop reference from allocate - handle holds it now */
778	drm_gem_object_put_unlocked(gobj);
779	if (r) {
780		return r;
781	}
782	args->handle = handle;
783	return 0;
784}
785
786#if defined(CONFIG_DEBUG_FS)
787
788#define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag)	\
789	if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) {	\
790		seq_printf((m), " " #flag);		\
791	}
792
793static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
794{
795	struct drm_gem_object *gobj = ptr;
796	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
797	struct seq_file *m = data;
798
799	struct dma_buf_attachment *attachment;
800	struct dma_buf *dma_buf;
801	unsigned domain;
802	const char *placement;
803	unsigned pin_count;
804
805	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
806	switch (domain) {
807	case AMDGPU_GEM_DOMAIN_VRAM:
808		placement = "VRAM";
809		break;
810	case AMDGPU_GEM_DOMAIN_GTT:
811		placement = " GTT";
812		break;
813	case AMDGPU_GEM_DOMAIN_CPU:
814	default:
815		placement = " CPU";
816		break;
817	}
818	seq_printf(m, "\t0x%08x: %12ld byte %s",
819		   id, amdgpu_bo_size(bo), placement);
820
821	pin_count = READ_ONCE(bo->pin_count);
822	if (pin_count)
823		seq_printf(m, " pin count %d", pin_count);
824
825	dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
826	attachment = READ_ONCE(bo->tbo.base.import_attach);
827
828	if (attachment)
829		seq_printf(m, " imported from %p", dma_buf);
830	else if (dma_buf)
831		seq_printf(m, " exported as %p", dma_buf);
832
833	amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
834	amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS);
835	amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC);
836	amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED);
837	amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW);
838	amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
839	amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID);
840	amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC);
841
842	seq_printf(m, "\n");
843
844	return 0;
845}
846
847static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
848{
849	struct drm_info_node *node = (struct drm_info_node *)m->private;
850	struct drm_device *dev = node->minor->dev;
851	struct drm_file *file;
852	int r;
853
854	r = mutex_lock_interruptible(&dev->filelist_mutex);
855	if (r)
856		return r;
857
858	list_for_each_entry(file, &dev->filelist, lhead) {
859		struct task_struct *task;
 
 
 
860
861		/*
862		 * Although we have a valid reference on file->pid, that does
863		 * not guarantee that the task_struct who called get_pid() is
864		 * still alive (e.g. get_pid(current) => fork() => exit()).
865		 * Therefore, we need to protect this ->comm access using RCU.
866		 */
867		rcu_read_lock();
868		task = pid_task(file->pid, PIDTYPE_PID);
869		seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
 
870			   task ? task->comm : "<unknown>");
871		rcu_read_unlock();
872
873		spin_lock(&file->table_lock);
874		idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
 
 
 
 
875		spin_unlock(&file->table_lock);
876	}
877
878	mutex_unlock(&dev->filelist_mutex);
879	return 0;
880}
881
882static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
883	{"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
884};
885#endif
886
887int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
888{
889#if defined(CONFIG_DEBUG_FS)
890	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
 
 
 
 
891#endif
892	return 0;
893}