Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v5.9
  1/*
  2 * Copyright 2008 Advanced Micro Devices, Inc.
  3 * Copyright 2008 Red Hat Inc.
  4 * Copyright 2009 Jerome Glisse.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the "Software"),
  8 * to deal in the Software without restriction, including without limitation
  9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 10 * and/or sell copies of the Software, and to permit persons to whom the
 11 * Software is furnished to do so, subject to the following conditions:
 12 *
 13 * The above copyright notice and this permission notice shall be included in
 14 * all copies or substantial portions of the Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 22 * OTHER DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors: Dave Airlie
 25 *          Alex Deucher
 26 *          Jerome Glisse
 27 */
 28#include <linux/ktime.h>
 29#include <linux/module.h>
 30#include <linux/pagemap.h>
 31#include <linux/pci.h>
 32#include <linux/dma-buf.h>
 33
 34#include <drm/amdgpu_drm.h>
 35#include <drm/drm_debugfs.h>
 
 36
 37#include "amdgpu.h"
 38#include "amdgpu_display.h"
 
 39#include "amdgpu_xgmi.h"
 40
 41void amdgpu_gem_object_free(struct drm_gem_object *gobj)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42{
 43	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
 44
 45	if (robj) {
 46		amdgpu_mn_unregister(robj);
 47		amdgpu_bo_unref(&robj);
 48	}
 49}
 50
 51int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 52			     int alignment, u32 initial_domain,
 53			     u64 flags, enum ttm_bo_type type,
 54			     struct dma_resv *resv,
 55			     struct drm_gem_object **obj)
 56{
 57	struct amdgpu_bo *bo;
 
 58	struct amdgpu_bo_param bp;
 59	int r;
 60
 61	memset(&bp, 0, sizeof(bp));
 62	*obj = NULL;
 63
 64	bp.size = size;
 65	bp.byte_align = alignment;
 66	bp.type = type;
 67	bp.resv = resv;
 68	bp.preferred_domain = initial_domain;
 69retry:
 70	bp.flags = flags;
 71	bp.domain = initial_domain;
 72	r = amdgpu_bo_create(adev, &bp, &bo);
 73	if (r) {
 74		if (r != -ERESTARTSYS) {
 75			if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
 76				flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 77				goto retry;
 78			}
 79
 80			if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
 81				initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
 82				goto retry;
 83			}
 84			DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
 85				  size, initial_domain, alignment, r);
 86		}
 87		return r;
 88	}
 
 89	*obj = &bo->tbo.base;
 
 90
 91	return 0;
 92}
 93
 94void amdgpu_gem_force_release(struct amdgpu_device *adev)
 95{
 96	struct drm_device *ddev = adev->ddev;
 97	struct drm_file *file;
 98
 99	mutex_lock(&ddev->filelist_mutex);
100
101	list_for_each_entry(file, &ddev->filelist, lhead) {
102		struct drm_gem_object *gobj;
103		int handle;
104
105		WARN_ONCE(1, "Still active user space clients!\n");
106		spin_lock(&file->table_lock);
107		idr_for_each_entry(&file->object_idr, gobj, handle) {
108			WARN_ONCE(1, "And also active allocations!\n");
109			drm_gem_object_put(gobj);
110		}
111		idr_destroy(&file->object_idr);
112		spin_unlock(&file->table_lock);
113	}
114
115	mutex_unlock(&ddev->filelist_mutex);
116}
117
118/*
119 * Call from drm_gem_handle_create which appear in both new and open ioctl
120 * case.
121 */
122int amdgpu_gem_object_open(struct drm_gem_object *obj,
123			   struct drm_file *file_priv)
124{
125	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
126	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
127	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
128	struct amdgpu_vm *vm = &fpriv->vm;
129	struct amdgpu_bo_va *bo_va;
130	struct mm_struct *mm;
131	int r;
132
133	mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
134	if (mm && mm != current->mm)
135		return -EPERM;
136
137	if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
138	    abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
139		return -EPERM;
140
141	r = amdgpu_bo_reserve(abo, false);
142	if (r)
143		return r;
144
145	bo_va = amdgpu_vm_bo_find(vm, abo);
146	if (!bo_va) {
147		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
148	} else {
149		++bo_va->ref_count;
150	}
151	amdgpu_bo_unreserve(abo);
152	return 0;
153}
154
155void amdgpu_gem_object_close(struct drm_gem_object *obj,
156			     struct drm_file *file_priv)
157{
158	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
159	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
160	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
161	struct amdgpu_vm *vm = &fpriv->vm;
162
163	struct amdgpu_bo_list_entry vm_pd;
164	struct list_head list, duplicates;
165	struct dma_fence *fence = NULL;
166	struct ttm_validate_buffer tv;
167	struct ww_acquire_ctx ticket;
168	struct amdgpu_bo_va *bo_va;
169	long r;
170
171	INIT_LIST_HEAD(&list);
172	INIT_LIST_HEAD(&duplicates);
173
174	tv.bo = &bo->tbo;
175	tv.num_shared = 2;
176	list_add(&tv.head, &list);
177
178	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
179
180	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
181	if (r) {
182		dev_err(adev->dev, "leaking bo va because "
183			"we fail to reserve bo (%ld)\n", r);
184		return;
185	}
186	bo_va = amdgpu_vm_bo_find(vm, bo);
187	if (!bo_va || --bo_va->ref_count)
188		goto out_unlock;
189
190	amdgpu_vm_bo_rmv(adev, bo_va);
191	if (!amdgpu_vm_ready(vm))
192		goto out_unlock;
193
194	fence = dma_resv_get_excl(bo->tbo.base.resv);
195	if (fence) {
196		amdgpu_bo_fence(bo, fence, true);
197		fence = NULL;
198	}
199
200	r = amdgpu_vm_clear_freed(adev, vm, &fence);
201	if (r || !fence)
202		goto out_unlock;
203
204	amdgpu_bo_fence(bo, fence, true);
205	dma_fence_put(fence);
206
207out_unlock:
208	if (unlikely(r < 0))
209		dev_err(adev->dev, "failed to clear page "
210			"tables on GEM object close (%ld)\n", r);
211	ttm_eu_backoff_reservation(&ticket, &list);
212}
213
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214/*
215 * GEM ioctls.
216 */
217int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
218			    struct drm_file *filp)
219{
220	struct amdgpu_device *adev = dev->dev_private;
221	struct amdgpu_fpriv *fpriv = filp->driver_priv;
222	struct amdgpu_vm *vm = &fpriv->vm;
223	union drm_amdgpu_gem_create *args = data;
224	uint64_t flags = args->in.domain_flags;
225	uint64_t size = args->in.bo_size;
226	struct dma_resv *resv = NULL;
227	struct drm_gem_object *gobj;
228	uint32_t handle;
229	int r;
230
231	/* reject invalid gem flags */
232	if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
233		      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
234		      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
235		      AMDGPU_GEM_CREATE_VRAM_CLEARED |
236		      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
237		      AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
238		      AMDGPU_GEM_CREATE_ENCRYPTED))
239
240		return -EINVAL;
241
242	/* reject invalid gem domains */
243	if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
244		return -EINVAL;
245
246	if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
247		DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
248		return -EINVAL;
249	}
250
251	/* create a gem object to contain this object in */
252	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
253	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
254		if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
255			/* if gds bo is created from user space, it must be
256			 * passed to bo list
257			 */
258			DRM_ERROR("GDS bo cannot be per-vm-bo\n");
259			return -EINVAL;
260		}
261		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
262	}
263
264	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
265		r = amdgpu_bo_reserve(vm->root.base.bo, false);
266		if (r)
267			return r;
268
269		resv = vm->root.base.bo->tbo.base.resv;
270	}
271
 
 
272	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
273				     (u32)(0xffffffff & args->in.domains),
274				     flags, ttm_bo_type_device, resv, &gobj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
276		if (!r) {
277			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
278
279			abo->parent = amdgpu_bo_ref(vm->root.base.bo);
280		}
281		amdgpu_bo_unreserve(vm->root.base.bo);
282	}
283	if (r)
284		return r;
285
286	r = drm_gem_handle_create(filp, gobj, &handle);
287	/* drop reference from allocate - handle holds it now */
288	drm_gem_object_put(gobj);
289	if (r)
290		return r;
291
292	memset(args, 0, sizeof(*args));
293	args->out.handle = handle;
294	return 0;
295}
296
297int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
298			     struct drm_file *filp)
299{
300	struct ttm_operation_ctx ctx = { true, false };
301	struct amdgpu_device *adev = dev->dev_private;
302	struct drm_amdgpu_gem_userptr *args = data;
303	struct drm_gem_object *gobj;
304	struct amdgpu_bo *bo;
305	uint32_t handle;
306	int r;
307
308	args->addr = untagged_addr(args->addr);
309
310	if (offset_in_page(args->addr | args->size))
311		return -EINVAL;
312
313	/* reject unknown flag values */
314	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
315	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
316	    AMDGPU_GEM_USERPTR_REGISTER))
317		return -EINVAL;
318
319	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
320	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
321
322		/* if we want to write to it we must install a MMU notifier */
323		return -EACCES;
324	}
325
326	/* create a gem object to contain this object in */
327	r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
328				     0, ttm_bo_type_device, NULL, &gobj);
329	if (r)
330		return r;
331
332	bo = gem_to_amdgpu_bo(gobj);
333	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
334	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
335	r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
336	if (r)
337		goto release_object;
338
339	if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
340		r = amdgpu_mn_register(bo, args->addr);
341		if (r)
342			goto release_object;
343	}
344
345	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
346		r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
347		if (r)
348			goto release_object;
349
350		r = amdgpu_bo_reserve(bo, true);
351		if (r)
352			goto user_pages_done;
353
354		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
355		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
356		amdgpu_bo_unreserve(bo);
357		if (r)
358			goto user_pages_done;
359	}
360
361	r = drm_gem_handle_create(filp, gobj, &handle);
362	if (r)
363		goto user_pages_done;
364
365	args->handle = handle;
366
367user_pages_done:
368	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
369		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
370
371release_object:
372	drm_gem_object_put(gobj);
373
374	return r;
375}
376
377int amdgpu_mode_dumb_mmap(struct drm_file *filp,
378			  struct drm_device *dev,
379			  uint32_t handle, uint64_t *offset_p)
380{
381	struct drm_gem_object *gobj;
382	struct amdgpu_bo *robj;
383
384	gobj = drm_gem_object_lookup(filp, handle);
385	if (gobj == NULL) {
386		return -ENOENT;
387	}
388	robj = gem_to_amdgpu_bo(gobj);
389	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
390	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
391		drm_gem_object_put(gobj);
392		return -EPERM;
393	}
394	*offset_p = amdgpu_bo_mmap_offset(robj);
395	drm_gem_object_put(gobj);
396	return 0;
397}
398
399int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
400			  struct drm_file *filp)
401{
402	union drm_amdgpu_gem_mmap *args = data;
403	uint32_t handle = args->in.handle;
404	memset(args, 0, sizeof(*args));
405	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
406}
407
408/**
409 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
410 *
411 * @timeout_ns: timeout in ns
412 *
413 * Calculate the timeout in jiffies from an absolute timeout in ns.
414 */
415unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
416{
417	unsigned long timeout_jiffies;
418	ktime_t timeout;
419
420	/* clamp timeout if it's to large */
421	if (((int64_t)timeout_ns) < 0)
422		return MAX_SCHEDULE_TIMEOUT;
423
424	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
425	if (ktime_to_ns(timeout) < 0)
426		return 0;
427
428	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
429	/*  clamp timeout to avoid unsigned-> signed overflow */
430	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
431		return MAX_SCHEDULE_TIMEOUT - 1;
432
433	return timeout_jiffies;
434}
435
436int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
437			      struct drm_file *filp)
438{
439	union drm_amdgpu_gem_wait_idle *args = data;
440	struct drm_gem_object *gobj;
441	struct amdgpu_bo *robj;
442	uint32_t handle = args->in.handle;
443	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
444	int r = 0;
445	long ret;
446
447	gobj = drm_gem_object_lookup(filp, handle);
448	if (gobj == NULL) {
449		return -ENOENT;
450	}
451	robj = gem_to_amdgpu_bo(gobj);
452	ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
453						  timeout);
454
455	/* ret == 0 means not signaled,
456	 * ret > 0 means signaled
457	 * ret < 0 means interrupted before timeout
458	 */
459	if (ret >= 0) {
460		memset(args, 0, sizeof(*args));
461		args->out.status = (ret == 0);
462	} else
463		r = ret;
464
465	drm_gem_object_put(gobj);
466	return r;
467}
468
469int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
470				struct drm_file *filp)
471{
472	struct drm_amdgpu_gem_metadata *args = data;
473	struct drm_gem_object *gobj;
474	struct amdgpu_bo *robj;
475	int r = -1;
476
477	DRM_DEBUG("%d \n", args->handle);
478	gobj = drm_gem_object_lookup(filp, args->handle);
479	if (gobj == NULL)
480		return -ENOENT;
481	robj = gem_to_amdgpu_bo(gobj);
482
483	r = amdgpu_bo_reserve(robj, false);
484	if (unlikely(r != 0))
485		goto out;
486
487	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
488		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
489		r = amdgpu_bo_get_metadata(robj, args->data.data,
490					   sizeof(args->data.data),
491					   &args->data.data_size_bytes,
492					   &args->data.flags);
493	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
494		if (args->data.data_size_bytes > sizeof(args->data.data)) {
495			r = -EINVAL;
496			goto unreserve;
497		}
498		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
499		if (!r)
500			r = amdgpu_bo_set_metadata(robj, args->data.data,
501						   args->data.data_size_bytes,
502						   args->data.flags);
503	}
504
505unreserve:
506	amdgpu_bo_unreserve(robj);
507out:
508	drm_gem_object_put(gobj);
509	return r;
510}
511
512/**
513 * amdgpu_gem_va_update_vm -update the bo_va in its VM
514 *
515 * @adev: amdgpu_device pointer
516 * @vm: vm to update
517 * @bo_va: bo_va to update
518 * @operation: map, unmap or clear
519 *
520 * Update the bo_va directly after setting its address. Errors are not
521 * vital here, so they are not reported back to userspace.
522 */
523static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
524				    struct amdgpu_vm *vm,
525				    struct amdgpu_bo_va *bo_va,
526				    uint32_t operation)
527{
528	int r;
529
530	if (!amdgpu_vm_ready(vm))
531		return;
532
533	r = amdgpu_vm_clear_freed(adev, vm, NULL);
534	if (r)
535		goto error;
536
537	if (operation == AMDGPU_VA_OP_MAP ||
538	    operation == AMDGPU_VA_OP_REPLACE) {
539		r = amdgpu_vm_bo_update(adev, bo_va, false);
540		if (r)
541			goto error;
542	}
543
544	r = amdgpu_vm_update_pdes(adev, vm, false);
545
546error:
547	if (r && r != -ERESTARTSYS)
548		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
549}
550
551/**
552 * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
553 *
554 * @adev: amdgpu_device pointer
555 * @flags: GEM UAPI flags
556 *
557 * Returns the GEM UAPI flags mapped into hardware for the ASIC.
558 */
559uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
560{
561	uint64_t pte_flag = 0;
562
563	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
564		pte_flag |= AMDGPU_PTE_EXECUTABLE;
565	if (flags & AMDGPU_VM_PAGE_READABLE)
566		pte_flag |= AMDGPU_PTE_READABLE;
567	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
568		pte_flag |= AMDGPU_PTE_WRITEABLE;
569	if (flags & AMDGPU_VM_PAGE_PRT)
570		pte_flag |= AMDGPU_PTE_PRT;
571
572	if (adev->gmc.gmc_funcs->map_mtype)
573		pte_flag |= amdgpu_gmc_map_mtype(adev,
574						 flags & AMDGPU_VM_MTYPE_MASK);
575
576	return pte_flag;
577}
578
579int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
580			  struct drm_file *filp)
581{
582	const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
583		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
584		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
585	const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
586		AMDGPU_VM_PAGE_PRT;
587
588	struct drm_amdgpu_gem_va *args = data;
589	struct drm_gem_object *gobj;
590	struct amdgpu_device *adev = dev->dev_private;
591	struct amdgpu_fpriv *fpriv = filp->driver_priv;
592	struct amdgpu_bo *abo;
593	struct amdgpu_bo_va *bo_va;
594	struct amdgpu_bo_list_entry vm_pd;
595	struct ttm_validate_buffer tv;
596	struct ww_acquire_ctx ticket;
597	struct list_head list, duplicates;
598	uint64_t va_flags;
 
599	int r = 0;
600
601	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
602		dev_dbg(&dev->pdev->dev,
603			"va_address 0x%LX is in reserved area 0x%LX\n",
604			args->va_address, AMDGPU_VA_RESERVED_SIZE);
605		return -EINVAL;
606	}
607
608	if (args->va_address >= AMDGPU_GMC_HOLE_START &&
609	    args->va_address < AMDGPU_GMC_HOLE_END) {
610		dev_dbg(&dev->pdev->dev,
611			"va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
612			args->va_address, AMDGPU_GMC_HOLE_START,
613			AMDGPU_GMC_HOLE_END);
614		return -EINVAL;
615	}
616
617	args->va_address &= AMDGPU_GMC_HOLE_MASK;
618
 
 
 
 
 
 
 
 
 
619	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
620		dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
621			args->flags);
622		return -EINVAL;
623	}
624
625	switch (args->operation) {
626	case AMDGPU_VA_OP_MAP:
627	case AMDGPU_VA_OP_UNMAP:
628	case AMDGPU_VA_OP_CLEAR:
629	case AMDGPU_VA_OP_REPLACE:
630		break;
631	default:
632		dev_dbg(&dev->pdev->dev, "unsupported operation %d\n",
633			args->operation);
634		return -EINVAL;
635	}
636
637	INIT_LIST_HEAD(&list);
638	INIT_LIST_HEAD(&duplicates);
639	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
640	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
641		gobj = drm_gem_object_lookup(filp, args->handle);
642		if (gobj == NULL)
643			return -ENOENT;
644		abo = gem_to_amdgpu_bo(gobj);
645		tv.bo = &abo->tbo;
646		if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
647			tv.num_shared = 1;
648		else
649			tv.num_shared = 0;
650		list_add(&tv.head, &list);
651	} else {
652		gobj = NULL;
653		abo = NULL;
654	}
655
656	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
657
658	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
659	if (r)
660		goto error_unref;
661
662	if (abo) {
663		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
664		if (!bo_va) {
665			r = -ENOENT;
666			goto error_backoff;
667		}
668	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
669		bo_va = fpriv->prt_va;
670	} else {
671		bo_va = NULL;
672	}
673
674	switch (args->operation) {
675	case AMDGPU_VA_OP_MAP:
676		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
677		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
678				     args->offset_in_bo, args->map_size,
679				     va_flags);
680		break;
681	case AMDGPU_VA_OP_UNMAP:
682		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
683		break;
684
685	case AMDGPU_VA_OP_CLEAR:
686		r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
687						args->va_address,
688						args->map_size);
689		break;
690	case AMDGPU_VA_OP_REPLACE:
691		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
692		r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
693					     args->offset_in_bo, args->map_size,
694					     va_flags);
695		break;
696	default:
697		break;
698	}
699	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
700		amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
701					args->operation);
702
703error_backoff:
704	ttm_eu_backoff_reservation(&ticket, &list);
705
706error_unref:
707	drm_gem_object_put(gobj);
708	return r;
709}
710
711int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
712			struct drm_file *filp)
713{
714	struct amdgpu_device *adev = dev->dev_private;
715	struct drm_amdgpu_gem_op *args = data;
716	struct drm_gem_object *gobj;
717	struct amdgpu_vm_bo_base *base;
718	struct amdgpu_bo *robj;
719	int r;
720
721	gobj = drm_gem_object_lookup(filp, args->handle);
722	if (gobj == NULL) {
723		return -ENOENT;
724	}
725	robj = gem_to_amdgpu_bo(gobj);
726
727	r = amdgpu_bo_reserve(robj, false);
728	if (unlikely(r))
729		goto out;
730
731	switch (args->op) {
732	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
733		struct drm_amdgpu_gem_create_in info;
734		void __user *out = u64_to_user_ptr(args->value);
735
736		info.bo_size = robj->tbo.base.size;
737		info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
738		info.domains = robj->preferred_domains;
739		info.domain_flags = robj->flags;
740		amdgpu_bo_unreserve(robj);
741		if (copy_to_user(out, &info, sizeof(info)))
742			r = -EFAULT;
743		break;
744	}
745	case AMDGPU_GEM_OP_SET_PLACEMENT:
746		if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
747			r = -EINVAL;
748			amdgpu_bo_unreserve(robj);
749			break;
750		}
751		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
752			r = -EPERM;
753			amdgpu_bo_unreserve(robj);
754			break;
755		}
756		for (base = robj->vm_bo; base; base = base->next)
757			if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
758				amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) {
759				r = -EINVAL;
760				amdgpu_bo_unreserve(robj);
761				goto out;
762			}
763
764
765		robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
766							AMDGPU_GEM_DOMAIN_GTT |
767							AMDGPU_GEM_DOMAIN_CPU);
768		robj->allowed_domains = robj->preferred_domains;
769		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
770			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
771
772		if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
773			amdgpu_vm_bo_invalidate(adev, robj, true);
774
775		amdgpu_bo_unreserve(robj);
776		break;
777	default:
778		amdgpu_bo_unreserve(robj);
779		r = -EINVAL;
780	}
781
782out:
783	drm_gem_object_put(gobj);
784	return r;
785}
786
787int amdgpu_mode_dumb_create(struct drm_file *file_priv,
788			    struct drm_device *dev,
789			    struct drm_mode_create_dumb *args)
790{
791	struct amdgpu_device *adev = dev->dev_private;
792	struct drm_gem_object *gobj;
793	uint32_t handle;
794	u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
795		    AMDGPU_GEM_CREATE_CPU_GTT_USWC;
796	u32 domain;
797	int r;
798
799	/*
800	 * The buffer returned from this function should be cleared, but
801	 * it can only be done if the ring is enabled or we'll fail to
802	 * create the buffer.
803	 */
804	if (adev->mman.buffer_funcs_enabled)
805		flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
806
807	args->pitch = amdgpu_align_pitch(adev, args->width,
808					 DIV_ROUND_UP(args->bpp, 8), 0);
809	args->size = (u64)args->pitch * args->height;
810	args->size = ALIGN(args->size, PAGE_SIZE);
811	domain = amdgpu_bo_get_preferred_pin_domain(adev,
812				amdgpu_display_supported_domains(adev, flags));
813	r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
814				     ttm_bo_type_device, NULL, &gobj);
815	if (r)
816		return -ENOMEM;
817
818	r = drm_gem_handle_create(file_priv, gobj, &handle);
819	/* drop reference from allocate - handle holds it now */
820	drm_gem_object_put(gobj);
821	if (r) {
822		return r;
823	}
824	args->handle = handle;
825	return 0;
826}
827
828#if defined(CONFIG_DEBUG_FS)
829
830#define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag)	\
831	if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) {	\
832		seq_printf((m), " " #flag);		\
833	}
834
835static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
836{
837	struct drm_gem_object *gobj = ptr;
838	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
839	struct seq_file *m = data;
840
841	struct dma_buf_attachment *attachment;
842	struct dma_buf *dma_buf;
843	unsigned domain;
844	const char *placement;
845	unsigned pin_count;
846
847	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
848	switch (domain) {
849	case AMDGPU_GEM_DOMAIN_VRAM:
850		placement = "VRAM";
851		break;
852	case AMDGPU_GEM_DOMAIN_GTT:
853		placement = " GTT";
854		break;
855	case AMDGPU_GEM_DOMAIN_CPU:
856	default:
857		placement = " CPU";
858		break;
859	}
860	seq_printf(m, "\t0x%08x: %12ld byte %s",
861		   id, amdgpu_bo_size(bo), placement);
862
863	pin_count = READ_ONCE(bo->pin_count);
864	if (pin_count)
865		seq_printf(m, " pin count %d", pin_count);
866
867	dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
868	attachment = READ_ONCE(bo->tbo.base.import_attach);
869
870	if (attachment)
871		seq_printf(m, " imported from %p%s", dma_buf,
872			   attachment->peer2peer ? " P2P" : "");
873	else if (dma_buf)
874		seq_printf(m, " exported as %p", dma_buf);
875
876	amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
877	amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS);
878	amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC);
879	amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED);
880	amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW);
881	amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
882	amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID);
883	amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC);
884
885	seq_printf(m, "\n");
886
887	return 0;
888}
889
890static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
891{
892	struct drm_info_node *node = (struct drm_info_node *)m->private;
893	struct drm_device *dev = node->minor->dev;
894	struct drm_file *file;
895	int r;
896
897	r = mutex_lock_interruptible(&dev->filelist_mutex);
898	if (r)
899		return r;
900
901	list_for_each_entry(file, &dev->filelist, lhead) {
902		struct task_struct *task;
 
 
903
904		/*
905		 * Although we have a valid reference on file->pid, that does
906		 * not guarantee that the task_struct who called get_pid() is
907		 * still alive (e.g. get_pid(current) => fork() => exit()).
908		 * Therefore, we need to protect this ->comm access using RCU.
909		 */
910		rcu_read_lock();
911		task = pid_task(file->pid, PIDTYPE_PID);
912		seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
913			   task ? task->comm : "<unknown>");
914		rcu_read_unlock();
915
916		spin_lock(&file->table_lock);
917		idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
 
 
 
 
918		spin_unlock(&file->table_lock);
919	}
920
921	mutex_unlock(&dev->filelist_mutex);
922	return 0;
923}
924
925static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
926	{"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
927};
928#endif
929
930int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
931{
932#if defined(CONFIG_DEBUG_FS)
933	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list,
934					ARRAY_SIZE(amdgpu_debugfs_gem_list));
 
 
 
935#endif
936	return 0;
937}
v5.14.15
  1/*
  2 * Copyright 2008 Advanced Micro Devices, Inc.
  3 * Copyright 2008 Red Hat Inc.
  4 * Copyright 2009 Jerome Glisse.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the "Software"),
  8 * to deal in the Software without restriction, including without limitation
  9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 10 * and/or sell copies of the Software, and to permit persons to whom the
 11 * Software is furnished to do so, subject to the following conditions:
 12 *
 13 * The above copyright notice and this permission notice shall be included in
 14 * all copies or substantial portions of the Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 22 * OTHER DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors: Dave Airlie
 25 *          Alex Deucher
 26 *          Jerome Glisse
 27 */
 28#include <linux/ktime.h>
 29#include <linux/module.h>
 30#include <linux/pagemap.h>
 31#include <linux/pci.h>
 32#include <linux/dma-buf.h>
 33
 34#include <drm/amdgpu_drm.h>
 35#include <drm/drm_drv.h>
 36#include <drm/drm_gem_ttm_helper.h>
 37
 38#include "amdgpu.h"
 39#include "amdgpu_display.h"
 40#include "amdgpu_dma_buf.h"
 41#include "amdgpu_xgmi.h"
 42
 43static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
 44
 45static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
 46{
 47	struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
 48	struct drm_device *ddev = bo->base.dev;
 49	vm_fault_t ret;
 50	int idx;
 51
 52	ret = ttm_bo_vm_reserve(bo, vmf);
 53	if (ret)
 54		return ret;
 55
 56	if (drm_dev_enter(ddev, &idx)) {
 57		ret = amdgpu_bo_fault_reserve_notify(bo);
 58		if (ret) {
 59			drm_dev_exit(idx);
 60			goto unlock;
 61		}
 62
 63		 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
 64						TTM_BO_VM_NUM_PREFAULT, 1);
 65
 66		 drm_dev_exit(idx);
 67	} else {
 68		ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
 69	}
 70	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
 71		return ret;
 72
 73unlock:
 74	dma_resv_unlock(bo->base.resv);
 75	return ret;
 76}
 77
 78static const struct vm_operations_struct amdgpu_gem_vm_ops = {
 79	.fault = amdgpu_gem_fault,
 80	.open = ttm_bo_vm_open,
 81	.close = ttm_bo_vm_close,
 82	.access = ttm_bo_vm_access
 83};
 84
 85static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
 86{
 87	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
 88
 89	if (robj) {
 90		amdgpu_mn_unregister(robj);
 91		amdgpu_bo_unref(&robj);
 92	}
 93}
 94
 95int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 96			     int alignment, u32 initial_domain,
 97			     u64 flags, enum ttm_bo_type type,
 98			     struct dma_resv *resv,
 99			     struct drm_gem_object **obj)
100{
101	struct amdgpu_bo *bo;
102	struct amdgpu_bo_user *ubo;
103	struct amdgpu_bo_param bp;
104	int r;
105
106	memset(&bp, 0, sizeof(bp));
107	*obj = NULL;
108
109	bp.size = size;
110	bp.byte_align = alignment;
111	bp.type = type;
112	bp.resv = resv;
113	bp.preferred_domain = initial_domain;
 
114	bp.flags = flags;
115	bp.domain = initial_domain;
116	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
 
 
 
 
 
 
117
118	r = amdgpu_bo_create_user(adev, &bp, &ubo);
119	if (r)
 
 
 
 
 
120		return r;
121
122	bo = &ubo->bo;
123	*obj = &bo->tbo.base;
124	(*obj)->funcs = &amdgpu_gem_object_funcs;
125
126	return 0;
127}
128
129void amdgpu_gem_force_release(struct amdgpu_device *adev)
130{
131	struct drm_device *ddev = adev_to_drm(adev);
132	struct drm_file *file;
133
134	mutex_lock(&ddev->filelist_mutex);
135
136	list_for_each_entry(file, &ddev->filelist, lhead) {
137		struct drm_gem_object *gobj;
138		int handle;
139
140		WARN_ONCE(1, "Still active user space clients!\n");
141		spin_lock(&file->table_lock);
142		idr_for_each_entry(&file->object_idr, gobj, handle) {
143			WARN_ONCE(1, "And also active allocations!\n");
144			drm_gem_object_put(gobj);
145		}
146		idr_destroy(&file->object_idr);
147		spin_unlock(&file->table_lock);
148	}
149
150	mutex_unlock(&ddev->filelist_mutex);
151}
152
153/*
154 * Call from drm_gem_handle_create which appear in both new and open ioctl
155 * case.
156 */
157static int amdgpu_gem_object_open(struct drm_gem_object *obj,
158				  struct drm_file *file_priv)
159{
160	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
161	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
162	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
163	struct amdgpu_vm *vm = &fpriv->vm;
164	struct amdgpu_bo_va *bo_va;
165	struct mm_struct *mm;
166	int r;
167
168	mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
169	if (mm && mm != current->mm)
170		return -EPERM;
171
172	if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
173	    abo->tbo.base.resv != vm->root.bo->tbo.base.resv)
174		return -EPERM;
175
176	r = amdgpu_bo_reserve(abo, false);
177	if (r)
178		return r;
179
180	bo_va = amdgpu_vm_bo_find(vm, abo);
181	if (!bo_va) {
182		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
183	} else {
184		++bo_va->ref_count;
185	}
186	amdgpu_bo_unreserve(abo);
187	return 0;
188}
189
190static void amdgpu_gem_object_close(struct drm_gem_object *obj,
191				    struct drm_file *file_priv)
192{
193	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
194	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
195	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
196	struct amdgpu_vm *vm = &fpriv->vm;
197
198	struct amdgpu_bo_list_entry vm_pd;
199	struct list_head list, duplicates;
200	struct dma_fence *fence = NULL;
201	struct ttm_validate_buffer tv;
202	struct ww_acquire_ctx ticket;
203	struct amdgpu_bo_va *bo_va;
204	long r;
205
206	INIT_LIST_HEAD(&list);
207	INIT_LIST_HEAD(&duplicates);
208
209	tv.bo = &bo->tbo;
210	tv.num_shared = 2;
211	list_add(&tv.head, &list);
212
213	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
214
215	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
216	if (r) {
217		dev_err(adev->dev, "leaking bo va because "
218			"we fail to reserve bo (%ld)\n", r);
219		return;
220	}
221	bo_va = amdgpu_vm_bo_find(vm, bo);
222	if (!bo_va || --bo_va->ref_count)
223		goto out_unlock;
224
225	amdgpu_vm_bo_rmv(adev, bo_va);
226	if (!amdgpu_vm_ready(vm))
227		goto out_unlock;
228
229	fence = dma_resv_excl_fence(bo->tbo.base.resv);
230	if (fence) {
231		amdgpu_bo_fence(bo, fence, true);
232		fence = NULL;
233	}
234
235	r = amdgpu_vm_clear_freed(adev, vm, &fence);
236	if (r || !fence)
237		goto out_unlock;
238
239	amdgpu_bo_fence(bo, fence, true);
240	dma_fence_put(fence);
241
242out_unlock:
243	if (unlikely(r < 0))
244		dev_err(adev->dev, "failed to clear page "
245			"tables on GEM object close (%ld)\n", r);
246	ttm_eu_backoff_reservation(&ticket, &list);
247}
248
249static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
250{
251	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
252
253	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
254		return -EPERM;
255	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
256		return -EPERM;
257
258	/* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings
259	 * for debugger access to invisible VRAM. Should have used MAP_SHARED
260	 * instead. Clearing VM_MAYWRITE prevents the mapping from ever
261	 * becoming writable and makes is_cow_mapping(vm_flags) false.
262	 */
263	if (is_cow_mapping(vma->vm_flags) &&
264	    !(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
265		vma->vm_flags &= ~VM_MAYWRITE;
266
267	return drm_gem_ttm_mmap(obj, vma);
268}
269
270static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
271	.free = amdgpu_gem_object_free,
272	.open = amdgpu_gem_object_open,
273	.close = amdgpu_gem_object_close,
274	.export = amdgpu_gem_prime_export,
275	.vmap = drm_gem_ttm_vmap,
276	.vunmap = drm_gem_ttm_vunmap,
277	.mmap = amdgpu_gem_object_mmap,
278	.vm_ops = &amdgpu_gem_vm_ops,
279};
280
281/*
282 * GEM ioctls.
283 */
284int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
285			    struct drm_file *filp)
286{
287	struct amdgpu_device *adev = drm_to_adev(dev);
288	struct amdgpu_fpriv *fpriv = filp->driver_priv;
289	struct amdgpu_vm *vm = &fpriv->vm;
290	union drm_amdgpu_gem_create *args = data;
291	uint64_t flags = args->in.domain_flags;
292	uint64_t size = args->in.bo_size;
293	struct dma_resv *resv = NULL;
294	struct drm_gem_object *gobj;
295	uint32_t handle, initial_domain;
296	int r;
297
298	/* reject invalid gem flags */
299	if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
300		      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
301		      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
302		      AMDGPU_GEM_CREATE_VRAM_CLEARED |
303		      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
304		      AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
305		      AMDGPU_GEM_CREATE_ENCRYPTED))
306
307		return -EINVAL;
308
309	/* reject invalid gem domains */
310	if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
311		return -EINVAL;
312
313	if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
314		DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
315		return -EINVAL;
316	}
317
318	/* create a gem object to contain this object in */
319	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
320	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
321		if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
322			/* if gds bo is created from user space, it must be
323			 * passed to bo list
324			 */
325			DRM_ERROR("GDS bo cannot be per-vm-bo\n");
326			return -EINVAL;
327		}
328		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
329	}
330
331	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
332		r = amdgpu_bo_reserve(vm->root.bo, false);
333		if (r)
334			return r;
335
336		resv = vm->root.bo->tbo.base.resv;
337	}
338
339	initial_domain = (u32)(0xffffffff & args->in.domains);
340retry:
341	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
342				     initial_domain,
343				     flags, ttm_bo_type_device, resv, &gobj);
344	if (r && r != -ERESTARTSYS) {
345		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
346			flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
347			goto retry;
348		}
349
350		if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
351			initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
352			goto retry;
353		}
354		DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
355				size, initial_domain, args->in.alignment, r);
356	}
357
358	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
359		if (!r) {
360			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
361
362			abo->parent = amdgpu_bo_ref(vm->root.bo);
363		}
364		amdgpu_bo_unreserve(vm->root.bo);
365	}
366	if (r)
367		return r;
368
369	r = drm_gem_handle_create(filp, gobj, &handle);
370	/* drop reference from allocate - handle holds it now */
371	drm_gem_object_put(gobj);
372	if (r)
373		return r;
374
375	memset(args, 0, sizeof(*args));
376	args->out.handle = handle;
377	return 0;
378}
379
380int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
381			     struct drm_file *filp)
382{
383	struct ttm_operation_ctx ctx = { true, false };
384	struct amdgpu_device *adev = drm_to_adev(dev);
385	struct drm_amdgpu_gem_userptr *args = data;
386	struct drm_gem_object *gobj;
387	struct amdgpu_bo *bo;
388	uint32_t handle;
389	int r;
390
391	args->addr = untagged_addr(args->addr);
392
393	if (offset_in_page(args->addr | args->size))
394		return -EINVAL;
395
396	/* reject unknown flag values */
397	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
398	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
399	    AMDGPU_GEM_USERPTR_REGISTER))
400		return -EINVAL;
401
402	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
403	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
404
405		/* if we want to write to it we must install a MMU notifier */
406		return -EACCES;
407	}
408
409	/* create a gem object to contain this object in */
410	r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
411				     0, ttm_bo_type_device, NULL, &gobj);
412	if (r)
413		return r;
414
415	bo = gem_to_amdgpu_bo(gobj);
416	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
417	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
418	r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags);
419	if (r)
420		goto release_object;
421
422	if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
423		r = amdgpu_mn_register(bo, args->addr);
424		if (r)
425			goto release_object;
426	}
427
428	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
429		r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
430		if (r)
431			goto release_object;
432
433		r = amdgpu_bo_reserve(bo, true);
434		if (r)
435			goto user_pages_done;
436
437		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
438		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
439		amdgpu_bo_unreserve(bo);
440		if (r)
441			goto user_pages_done;
442	}
443
444	r = drm_gem_handle_create(filp, gobj, &handle);
445	if (r)
446		goto user_pages_done;
447
448	args->handle = handle;
449
450user_pages_done:
451	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
452		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
453
454release_object:
455	drm_gem_object_put(gobj);
456
457	return r;
458}
459
460int amdgpu_mode_dumb_mmap(struct drm_file *filp,
461			  struct drm_device *dev,
462			  uint32_t handle, uint64_t *offset_p)
463{
464	struct drm_gem_object *gobj;
465	struct amdgpu_bo *robj;
466
467	gobj = drm_gem_object_lookup(filp, handle);
468	if (gobj == NULL) {
469		return -ENOENT;
470	}
471	robj = gem_to_amdgpu_bo(gobj);
472	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
473	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
474		drm_gem_object_put(gobj);
475		return -EPERM;
476	}
477	*offset_p = amdgpu_bo_mmap_offset(robj);
478	drm_gem_object_put(gobj);
479	return 0;
480}
481
482int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
483			  struct drm_file *filp)
484{
485	union drm_amdgpu_gem_mmap *args = data;
486	uint32_t handle = args->in.handle;
487	memset(args, 0, sizeof(*args));
488	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
489}
490
491/**
492 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
493 *
494 * @timeout_ns: timeout in ns
495 *
496 * Calculate the timeout in jiffies from an absolute timeout in ns.
497 */
498unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
499{
500	unsigned long timeout_jiffies;
501	ktime_t timeout;
502
503	/* clamp timeout if it's to large */
504	if (((int64_t)timeout_ns) < 0)
505		return MAX_SCHEDULE_TIMEOUT;
506
507	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
508	if (ktime_to_ns(timeout) < 0)
509		return 0;
510
511	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
512	/*  clamp timeout to avoid unsigned-> signed overflow */
513	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
514		return MAX_SCHEDULE_TIMEOUT - 1;
515
516	return timeout_jiffies;
517}
518
519int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
520			      struct drm_file *filp)
521{
522	union drm_amdgpu_gem_wait_idle *args = data;
523	struct drm_gem_object *gobj;
524	struct amdgpu_bo *robj;
525	uint32_t handle = args->in.handle;
526	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
527	int r = 0;
528	long ret;
529
530	gobj = drm_gem_object_lookup(filp, handle);
531	if (gobj == NULL) {
532		return -ENOENT;
533	}
534	robj = gem_to_amdgpu_bo(gobj);
535	ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout);
 
536
537	/* ret == 0 means not signaled,
538	 * ret > 0 means signaled
539	 * ret < 0 means interrupted before timeout
540	 */
541	if (ret >= 0) {
542		memset(args, 0, sizeof(*args));
543		args->out.status = (ret == 0);
544	} else
545		r = ret;
546
547	drm_gem_object_put(gobj);
548	return r;
549}
550
551int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
552				struct drm_file *filp)
553{
554	struct drm_amdgpu_gem_metadata *args = data;
555	struct drm_gem_object *gobj;
556	struct amdgpu_bo *robj;
557	int r = -1;
558
559	DRM_DEBUG("%d \n", args->handle);
560	gobj = drm_gem_object_lookup(filp, args->handle);
561	if (gobj == NULL)
562		return -ENOENT;
563	robj = gem_to_amdgpu_bo(gobj);
564
565	r = amdgpu_bo_reserve(robj, false);
566	if (unlikely(r != 0))
567		goto out;
568
569	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
570		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
571		r = amdgpu_bo_get_metadata(robj, args->data.data,
572					   sizeof(args->data.data),
573					   &args->data.data_size_bytes,
574					   &args->data.flags);
575	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
576		if (args->data.data_size_bytes > sizeof(args->data.data)) {
577			r = -EINVAL;
578			goto unreserve;
579		}
580		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
581		if (!r)
582			r = amdgpu_bo_set_metadata(robj, args->data.data,
583						   args->data.data_size_bytes,
584						   args->data.flags);
585	}
586
587unreserve:
588	amdgpu_bo_unreserve(robj);
589out:
590	drm_gem_object_put(gobj);
591	return r;
592}
593
594/**
595 * amdgpu_gem_va_update_vm -update the bo_va in its VM
596 *
597 * @adev: amdgpu_device pointer
598 * @vm: vm to update
599 * @bo_va: bo_va to update
600 * @operation: map, unmap or clear
601 *
602 * Update the bo_va directly after setting its address. Errors are not
603 * vital here, so they are not reported back to userspace.
604 */
605static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
606				    struct amdgpu_vm *vm,
607				    struct amdgpu_bo_va *bo_va,
608				    uint32_t operation)
609{
610	int r;
611
612	if (!amdgpu_vm_ready(vm))
613		return;
614
615	r = amdgpu_vm_clear_freed(adev, vm, NULL);
616	if (r)
617		goto error;
618
619	if (operation == AMDGPU_VA_OP_MAP ||
620	    operation == AMDGPU_VA_OP_REPLACE) {
621		r = amdgpu_vm_bo_update(adev, bo_va, false);
622		if (r)
623			goto error;
624	}
625
626	r = amdgpu_vm_update_pdes(adev, vm, false);
627
628error:
629	if (r && r != -ERESTARTSYS)
630		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
631}
632
633/**
634 * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
635 *
636 * @adev: amdgpu_device pointer
637 * @flags: GEM UAPI flags
638 *
639 * Returns the GEM UAPI flags mapped into hardware for the ASIC.
640 */
641uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
642{
643	uint64_t pte_flag = 0;
644
645	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
646		pte_flag |= AMDGPU_PTE_EXECUTABLE;
647	if (flags & AMDGPU_VM_PAGE_READABLE)
648		pte_flag |= AMDGPU_PTE_READABLE;
649	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
650		pte_flag |= AMDGPU_PTE_WRITEABLE;
651	if (flags & AMDGPU_VM_PAGE_PRT)
652		pte_flag |= AMDGPU_PTE_PRT;
653
654	if (adev->gmc.gmc_funcs->map_mtype)
655		pte_flag |= amdgpu_gmc_map_mtype(adev,
656						 flags & AMDGPU_VM_MTYPE_MASK);
657
658	return pte_flag;
659}
660
661int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
662			  struct drm_file *filp)
663{
664	const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
665		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
666		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
667	const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
668		AMDGPU_VM_PAGE_PRT;
669
670	struct drm_amdgpu_gem_va *args = data;
671	struct drm_gem_object *gobj;
672	struct amdgpu_device *adev = drm_to_adev(dev);
673	struct amdgpu_fpriv *fpriv = filp->driver_priv;
674	struct amdgpu_bo *abo;
675	struct amdgpu_bo_va *bo_va;
676	struct amdgpu_bo_list_entry vm_pd;
677	struct ttm_validate_buffer tv;
678	struct ww_acquire_ctx ticket;
679	struct list_head list, duplicates;
680	uint64_t va_flags;
681	uint64_t vm_size;
682	int r = 0;
683
684	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
685		dev_dbg(dev->dev,
686			"va_address 0x%LX is in reserved area 0x%LX\n",
687			args->va_address, AMDGPU_VA_RESERVED_SIZE);
688		return -EINVAL;
689	}
690
691	if (args->va_address >= AMDGPU_GMC_HOLE_START &&
692	    args->va_address < AMDGPU_GMC_HOLE_END) {
693		dev_dbg(dev->dev,
694			"va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
695			args->va_address, AMDGPU_GMC_HOLE_START,
696			AMDGPU_GMC_HOLE_END);
697		return -EINVAL;
698	}
699
700	args->va_address &= AMDGPU_GMC_HOLE_MASK;
701
702	vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
703	vm_size -= AMDGPU_VA_RESERVED_SIZE;
704	if (args->va_address + args->map_size > vm_size) {
705		dev_dbg(dev->dev,
706			"va_address 0x%llx is in top reserved area 0x%llx\n",
707			args->va_address + args->map_size, vm_size);
708		return -EINVAL;
709	}
710
711	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
712		dev_dbg(dev->dev, "invalid flags combination 0x%08X\n",
713			args->flags);
714		return -EINVAL;
715	}
716
717	switch (args->operation) {
718	case AMDGPU_VA_OP_MAP:
719	case AMDGPU_VA_OP_UNMAP:
720	case AMDGPU_VA_OP_CLEAR:
721	case AMDGPU_VA_OP_REPLACE:
722		break;
723	default:
724		dev_dbg(dev->dev, "unsupported operation %d\n",
725			args->operation);
726		return -EINVAL;
727	}
728
729	INIT_LIST_HEAD(&list);
730	INIT_LIST_HEAD(&duplicates);
731	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
732	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
733		gobj = drm_gem_object_lookup(filp, args->handle);
734		if (gobj == NULL)
735			return -ENOENT;
736		abo = gem_to_amdgpu_bo(gobj);
737		tv.bo = &abo->tbo;
738		if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
739			tv.num_shared = 1;
740		else
741			tv.num_shared = 0;
742		list_add(&tv.head, &list);
743	} else {
744		gobj = NULL;
745		abo = NULL;
746	}
747
748	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
749
750	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
751	if (r)
752		goto error_unref;
753
754	if (abo) {
755		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
756		if (!bo_va) {
757			r = -ENOENT;
758			goto error_backoff;
759		}
760	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
761		bo_va = fpriv->prt_va;
762	} else {
763		bo_va = NULL;
764	}
765
766	switch (args->operation) {
767	case AMDGPU_VA_OP_MAP:
768		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
769		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
770				     args->offset_in_bo, args->map_size,
771				     va_flags);
772		break;
773	case AMDGPU_VA_OP_UNMAP:
774		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
775		break;
776
777	case AMDGPU_VA_OP_CLEAR:
778		r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
779						args->va_address,
780						args->map_size);
781		break;
782	case AMDGPU_VA_OP_REPLACE:
783		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
784		r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
785					     args->offset_in_bo, args->map_size,
786					     va_flags);
787		break;
788	default:
789		break;
790	}
791	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
792		amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
793					args->operation);
794
795error_backoff:
796	ttm_eu_backoff_reservation(&ticket, &list);
797
798error_unref:
799	drm_gem_object_put(gobj);
800	return r;
801}
802
803int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
804			struct drm_file *filp)
805{
806	struct amdgpu_device *adev = drm_to_adev(dev);
807	struct drm_amdgpu_gem_op *args = data;
808	struct drm_gem_object *gobj;
809	struct amdgpu_vm_bo_base *base;
810	struct amdgpu_bo *robj;
811	int r;
812
813	gobj = drm_gem_object_lookup(filp, args->handle);
814	if (gobj == NULL) {
815		return -ENOENT;
816	}
817	robj = gem_to_amdgpu_bo(gobj);
818
819	r = amdgpu_bo_reserve(robj, false);
820	if (unlikely(r))
821		goto out;
822
823	switch (args->op) {
824	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
825		struct drm_amdgpu_gem_create_in info;
826		void __user *out = u64_to_user_ptr(args->value);
827
828		info.bo_size = robj->tbo.base.size;
829		info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
830		info.domains = robj->preferred_domains;
831		info.domain_flags = robj->flags;
832		amdgpu_bo_unreserve(robj);
833		if (copy_to_user(out, &info, sizeof(info)))
834			r = -EFAULT;
835		break;
836	}
837	case AMDGPU_GEM_OP_SET_PLACEMENT:
838		if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
839			r = -EINVAL;
840			amdgpu_bo_unreserve(robj);
841			break;
842		}
843		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
844			r = -EPERM;
845			amdgpu_bo_unreserve(robj);
846			break;
847		}
848		for (base = robj->vm_bo; base; base = base->next)
849			if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
850				amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
851				r = -EINVAL;
852				amdgpu_bo_unreserve(robj);
853				goto out;
854			}
855
856
857		robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
858							AMDGPU_GEM_DOMAIN_GTT |
859							AMDGPU_GEM_DOMAIN_CPU);
860		robj->allowed_domains = robj->preferred_domains;
861		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
862			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
863
864		if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
865			amdgpu_vm_bo_invalidate(adev, robj, true);
866
867		amdgpu_bo_unreserve(robj);
868		break;
869	default:
870		amdgpu_bo_unreserve(robj);
871		r = -EINVAL;
872	}
873
874out:
875	drm_gem_object_put(gobj);
876	return r;
877}
878
879int amdgpu_mode_dumb_create(struct drm_file *file_priv,
880			    struct drm_device *dev,
881			    struct drm_mode_create_dumb *args)
882{
883	struct amdgpu_device *adev = drm_to_adev(dev);
884	struct drm_gem_object *gobj;
885	uint32_t handle;
886	u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
887		    AMDGPU_GEM_CREATE_CPU_GTT_USWC;
888	u32 domain;
889	int r;
890
891	/*
892	 * The buffer returned from this function should be cleared, but
893	 * it can only be done if the ring is enabled or we'll fail to
894	 * create the buffer.
895	 */
896	if (adev->mman.buffer_funcs_enabled)
897		flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
898
899	args->pitch = amdgpu_align_pitch(adev, args->width,
900					 DIV_ROUND_UP(args->bpp, 8), 0);
901	args->size = (u64)args->pitch * args->height;
902	args->size = ALIGN(args->size, PAGE_SIZE);
903	domain = amdgpu_bo_get_preferred_pin_domain(adev,
904				amdgpu_display_supported_domains(adev, flags));
905	r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
906				     ttm_bo_type_device, NULL, &gobj);
907	if (r)
908		return -ENOMEM;
909
910	r = drm_gem_handle_create(file_priv, gobj, &handle);
911	/* drop reference from allocate - handle holds it now */
912	drm_gem_object_put(gobj);
913	if (r) {
914		return r;
915	}
916	args->handle = handle;
917	return 0;
918}
919
920#if defined(CONFIG_DEBUG_FS)
921static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
922{
923	struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
924	struct drm_device *dev = adev_to_drm(adev);
925	struct drm_file *file;
926	int r;
927
928	r = mutex_lock_interruptible(&dev->filelist_mutex);
929	if (r)
930		return r;
931
932	list_for_each_entry(file, &dev->filelist, lhead) {
933		struct task_struct *task;
934		struct drm_gem_object *gobj;
935		int id;
936
937		/*
938		 * Although we have a valid reference on file->pid, that does
939		 * not guarantee that the task_struct who called get_pid() is
940		 * still alive (e.g. get_pid(current) => fork() => exit()).
941		 * Therefore, we need to protect this ->comm access using RCU.
942		 */
943		rcu_read_lock();
944		task = pid_task(file->pid, PIDTYPE_PID);
945		seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
946			   task ? task->comm : "<unknown>");
947		rcu_read_unlock();
948
949		spin_lock(&file->table_lock);
950		idr_for_each_entry(&file->object_idr, gobj, id) {
951			struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
952
953			amdgpu_bo_print_info(id, bo, m);
954		}
955		spin_unlock(&file->table_lock);
956	}
957
958	mutex_unlock(&dev->filelist_mutex);
959	return 0;
960}
961
962DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_gem_info);
963
 
964#endif
965
966void amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
967{
968#if defined(CONFIG_DEBUG_FS)
969	struct drm_minor *minor = adev_to_drm(adev)->primary;
970	struct dentry *root = minor->debugfs_root;
971
972	debugfs_create_file("amdgpu_gem_info", 0444, root, adev,
973			    &amdgpu_debugfs_gem_info_fops);
974#endif
 
975}