Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v4.17
  1/*
  2 * Copyright 2008 Advanced Micro Devices, Inc.
  3 * Copyright 2008 Red Hat Inc.
  4 * Copyright 2009 Jerome Glisse.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the "Software"),
  8 * to deal in the Software without restriction, including without limitation
  9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 10 * and/or sell copies of the Software, and to permit persons to whom the
 11 * Software is furnished to do so, subject to the following conditions:
 12 *
 13 * The above copyright notice and this permission notice shall be included in
 14 * all copies or substantial portions of the Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 22 * OTHER DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors: Dave Airlie
 25 *          Alex Deucher
 26 *          Jerome Glisse
 27 */
 28#include <linux/ktime.h>
 
 29#include <linux/pagemap.h>
 30#include <drm/drmP.h>
 
 
 31#include <drm/amdgpu_drm.h>
 
 
 
 
 
 32#include "amdgpu.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 33
 34void amdgpu_gem_object_free(struct drm_gem_object *gobj)
 
 
 
 
 
 
 
 35{
 36	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
 37
 38	if (robj) {
 39		amdgpu_mn_unregister(robj);
 40		amdgpu_bo_unref(&robj);
 41	}
 42}
 43
 44int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 45			     int alignment, u32 initial_domain,
 46			     u64 flags, enum ttm_bo_type type,
 47			     struct reservation_object *resv,
 48			     struct drm_gem_object **obj)
 49{
 50	struct amdgpu_bo *bo;
 
 
 51	int r;
 52
 
 53	*obj = NULL;
 54	/* At least align on page size */
 55	if (alignment < PAGE_SIZE) {
 56		alignment = PAGE_SIZE;
 57	}
 58
 59retry:
 60	r = amdgpu_bo_create(adev, size, alignment, initial_domain,
 61			     flags, type, resv, &bo);
 62	if (r) {
 63		if (r != -ERESTARTSYS) {
 64			if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
 65				flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 66				goto retry;
 67			}
 68
 69			if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
 70				initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
 71				goto retry;
 72			}
 73			DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
 74				  size, initial_domain, alignment, r);
 75		}
 76		return r;
 77	}
 78	*obj = &bo->gem_base;
 
 
 79
 80	return 0;
 81}
 82
 83void amdgpu_gem_force_release(struct amdgpu_device *adev)
 84{
 85	struct drm_device *ddev = adev->ddev;
 86	struct drm_file *file;
 87
 88	mutex_lock(&ddev->filelist_mutex);
 89
 90	list_for_each_entry(file, &ddev->filelist, lhead) {
 91		struct drm_gem_object *gobj;
 92		int handle;
 93
 94		WARN_ONCE(1, "Still active user space clients!\n");
 95		spin_lock(&file->table_lock);
 96		idr_for_each_entry(&file->object_idr, gobj, handle) {
 97			WARN_ONCE(1, "And also active allocations!\n");
 98			drm_gem_object_put_unlocked(gobj);
 99		}
100		idr_destroy(&file->object_idr);
101		spin_unlock(&file->table_lock);
102	}
103
104	mutex_unlock(&ddev->filelist_mutex);
105}
106
107/*
108 * Call from drm_gem_handle_create which appear in both new and open ioctl
109 * case.
110 */
111int amdgpu_gem_object_open(struct drm_gem_object *obj,
112			   struct drm_file *file_priv)
113{
114	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
115	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
116	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
117	struct amdgpu_vm *vm = &fpriv->vm;
118	struct amdgpu_bo_va *bo_va;
119	struct mm_struct *mm;
120	int r;
121
122	mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
123	if (mm && mm != current->mm)
124		return -EPERM;
125
126	if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
127	    abo->tbo.resv != vm->root.base.bo->tbo.resv)
128		return -EPERM;
129
130	r = amdgpu_bo_reserve(abo, false);
131	if (r)
132		return r;
133
134	bo_va = amdgpu_vm_bo_find(vm, abo);
135	if (!bo_va) {
136		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
137	} else {
138		++bo_va->ref_count;
139	}
140	amdgpu_bo_unreserve(abo);
141	return 0;
142}
143
144void amdgpu_gem_object_close(struct drm_gem_object *obj,
145			     struct drm_file *file_priv)
146{
147	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
148	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
149	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
150	struct amdgpu_vm *vm = &fpriv->vm;
151
152	struct amdgpu_bo_list_entry vm_pd;
153	struct list_head list, duplicates;
154	struct ttm_validate_buffer tv;
155	struct ww_acquire_ctx ticket;
156	struct amdgpu_bo_va *bo_va;
157	int r;
 
158
159	INIT_LIST_HEAD(&list);
160	INIT_LIST_HEAD(&duplicates);
161
162	tv.bo = &bo->tbo;
163	tv.shared = true;
164	list_add(&tv.head, &list);
165
166	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
167
168	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
169	if (r) {
170		dev_err(adev->dev, "leaking bo va because "
171			"we fail to reserve bo (%d)\n", r);
172		return;
173	}
 
174	bo_va = amdgpu_vm_bo_find(vm, bo);
175	if (bo_va && --bo_va->ref_count == 0) {
176		amdgpu_vm_bo_rmv(adev, bo_va);
177
178		if (amdgpu_vm_ready(vm)) {
179			struct dma_fence *fence = NULL;
 
180
181			r = amdgpu_vm_clear_freed(adev, vm, &fence);
182			if (unlikely(r)) {
183				dev_err(adev->dev, "failed to clear page "
184					"tables on GEM object close (%d)\n", r);
185			}
 
186
187			if (fence) {
188				amdgpu_bo_fence(bo, fence, true);
189				dma_fence_put(fence);
190			}
191		}
192	}
193	ttm_eu_backoff_reservation(&ticket, &list);
194}
195
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196/*
197 * GEM ioctls.
198 */
199int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
200			    struct drm_file *filp)
201{
202	struct amdgpu_device *adev = dev->dev_private;
203	struct amdgpu_fpriv *fpriv = filp->driver_priv;
204	struct amdgpu_vm *vm = &fpriv->vm;
205	union drm_amdgpu_gem_create *args = data;
206	uint64_t flags = args->in.domain_flags;
207	uint64_t size = args->in.bo_size;
208	struct reservation_object *resv = NULL;
209	struct drm_gem_object *gobj;
210	uint32_t handle;
211	int r;
212
 
 
 
 
213	/* reject invalid gem flags */
214	if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
215		      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
216		      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
217		      AMDGPU_GEM_CREATE_VRAM_CLEARED |
218		      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
219		      AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
220
 
221		return -EINVAL;
222
223	/* reject invalid gem domains */
224	if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
225				 AMDGPU_GEM_DOMAIN_GTT |
226				 AMDGPU_GEM_DOMAIN_VRAM |
227				 AMDGPU_GEM_DOMAIN_GDS |
228				 AMDGPU_GEM_DOMAIN_GWS |
229				 AMDGPU_GEM_DOMAIN_OA))
230		return -EINVAL;
 
231
232	/* create a gem object to contain this object in */
233	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
234	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
235		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
236		if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
237			size = size << AMDGPU_GDS_SHIFT;
238		else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
239			size = size << AMDGPU_GWS_SHIFT;
240		else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
241			size = size << AMDGPU_OA_SHIFT;
242		else
243			return -EINVAL;
 
 
244	}
245	size = roundup(size, PAGE_SIZE);
246
247	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
248		r = amdgpu_bo_reserve(vm->root.base.bo, false);
249		if (r)
250			return r;
251
252		resv = vm->root.base.bo->tbo.resv;
253	}
254
 
 
255	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
256				     (u32)(0xffffffff & args->in.domains),
257				     flags, false, resv, &gobj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
259		if (!r) {
260			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
261
262			abo->parent = amdgpu_bo_ref(vm->root.base.bo);
263		}
264		amdgpu_bo_unreserve(vm->root.base.bo);
265	}
266	if (r)
267		return r;
268
269	r = drm_gem_handle_create(filp, gobj, &handle);
270	/* drop reference from allocate - handle holds it now */
271	drm_gem_object_put_unlocked(gobj);
272	if (r)
273		return r;
274
275	memset(args, 0, sizeof(*args));
276	args->out.handle = handle;
277	return 0;
278}
279
280int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
281			     struct drm_file *filp)
282{
283	struct ttm_operation_ctx ctx = { true, false };
284	struct amdgpu_device *adev = dev->dev_private;
285	struct drm_amdgpu_gem_userptr *args = data;
 
286	struct drm_gem_object *gobj;
 
287	struct amdgpu_bo *bo;
288	uint32_t handle;
289	int r;
290
 
 
291	if (offset_in_page(args->addr | args->size))
292		return -EINVAL;
293
294	/* reject unknown flag values */
295	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
296	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
297	    AMDGPU_GEM_USERPTR_REGISTER))
298		return -EINVAL;
299
300	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
301	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
302
303		/* if we want to write to it we must install a MMU notifier */
304		return -EACCES;
305	}
306
307	/* create a gem object to contain this object in */
308	r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
309				     0, 0, NULL, &gobj);
310	if (r)
311		return r;
312
313	bo = gem_to_amdgpu_bo(gobj);
314	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
315	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
316	r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
317	if (r)
318		goto release_object;
319
320	if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
321		r = amdgpu_mn_register(bo, args->addr);
322		if (r)
323			goto release_object;
324	}
325
326	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
327		r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
328						 bo->tbo.ttm->pages);
329		if (r)
330			goto release_object;
331
332		r = amdgpu_bo_reserve(bo, true);
333		if (r)
334			goto free_pages;
335
336		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
337		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
338		amdgpu_bo_unreserve(bo);
339		if (r)
340			goto free_pages;
341	}
342
343	r = drm_gem_handle_create(filp, gobj, &handle);
344	/* drop reference from allocate - handle holds it now */
345	drm_gem_object_put_unlocked(gobj);
346	if (r)
347		return r;
348
349	args->handle = handle;
350	return 0;
351
352free_pages:
353	release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages);
 
354
355release_object:
356	drm_gem_object_put_unlocked(gobj);
357
358	return r;
359}
360
361int amdgpu_mode_dumb_mmap(struct drm_file *filp,
362			  struct drm_device *dev,
363			  uint32_t handle, uint64_t *offset_p)
364{
365	struct drm_gem_object *gobj;
366	struct amdgpu_bo *robj;
367
368	gobj = drm_gem_object_lookup(filp, handle);
369	if (gobj == NULL) {
370		return -ENOENT;
371	}
372	robj = gem_to_amdgpu_bo(gobj);
373	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
374	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
375		drm_gem_object_put_unlocked(gobj);
376		return -EPERM;
377	}
378	*offset_p = amdgpu_bo_mmap_offset(robj);
379	drm_gem_object_put_unlocked(gobj);
380	return 0;
381}
382
383int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
384			  struct drm_file *filp)
385{
386	union drm_amdgpu_gem_mmap *args = data;
387	uint32_t handle = args->in.handle;
 
388	memset(args, 0, sizeof(*args));
389	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
390}
391
392/**
393 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
394 *
395 * @timeout_ns: timeout in ns
396 *
397 * Calculate the timeout in jiffies from an absolute timeout in ns.
398 */
399unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
400{
401	unsigned long timeout_jiffies;
402	ktime_t timeout;
403
404	/* clamp timeout if it's to large */
405	if (((int64_t)timeout_ns) < 0)
406		return MAX_SCHEDULE_TIMEOUT;
407
408	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
409	if (ktime_to_ns(timeout) < 0)
410		return 0;
411
412	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
413	/*  clamp timeout to avoid unsigned-> signed overflow */
414	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
415		return MAX_SCHEDULE_TIMEOUT - 1;
416
417	return timeout_jiffies;
418}
419
420int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
421			      struct drm_file *filp)
422{
423	union drm_amdgpu_gem_wait_idle *args = data;
424	struct drm_gem_object *gobj;
425	struct amdgpu_bo *robj;
426	uint32_t handle = args->in.handle;
427	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
428	int r = 0;
429	long ret;
430
431	gobj = drm_gem_object_lookup(filp, handle);
432	if (gobj == NULL) {
433		return -ENOENT;
434	}
435	robj = gem_to_amdgpu_bo(gobj);
436	ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
437						  timeout);
438
439	/* ret == 0 means not signaled,
440	 * ret > 0 means signaled
441	 * ret < 0 means interrupted before timeout
442	 */
443	if (ret >= 0) {
444		memset(args, 0, sizeof(*args));
445		args->out.status = (ret == 0);
446	} else
447		r = ret;
448
449	drm_gem_object_put_unlocked(gobj);
450	return r;
451}
452
453int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
454				struct drm_file *filp)
455{
456	struct drm_amdgpu_gem_metadata *args = data;
457	struct drm_gem_object *gobj;
458	struct amdgpu_bo *robj;
459	int r = -1;
460
461	DRM_DEBUG("%d \n", args->handle);
462	gobj = drm_gem_object_lookup(filp, args->handle);
463	if (gobj == NULL)
464		return -ENOENT;
465	robj = gem_to_amdgpu_bo(gobj);
466
467	r = amdgpu_bo_reserve(robj, false);
468	if (unlikely(r != 0))
469		goto out;
470
471	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
472		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
473		r = amdgpu_bo_get_metadata(robj, args->data.data,
474					   sizeof(args->data.data),
475					   &args->data.data_size_bytes,
476					   &args->data.flags);
477	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
478		if (args->data.data_size_bytes > sizeof(args->data.data)) {
479			r = -EINVAL;
480			goto unreserve;
481		}
482		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
483		if (!r)
484			r = amdgpu_bo_set_metadata(robj, args->data.data,
485						   args->data.data_size_bytes,
486						   args->data.flags);
487	}
488
489unreserve:
490	amdgpu_bo_unreserve(robj);
491out:
492	drm_gem_object_put_unlocked(gobj);
493	return r;
494}
495
496/**
497 * amdgpu_gem_va_update_vm -update the bo_va in its VM
498 *
499 * @adev: amdgpu_device pointer
500 * @vm: vm to update
501 * @bo_va: bo_va to update
502 * @list: validation list
503 * @operation: map, unmap or clear
504 *
505 * Update the bo_va directly after setting its address. Errors are not
506 * vital here, so they are not reported back to userspace.
507 */
508static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
509				    struct amdgpu_vm *vm,
510				    struct amdgpu_bo_va *bo_va,
511				    struct list_head *list,
512				    uint32_t operation)
513{
514	int r;
515
516	if (!amdgpu_vm_ready(vm))
517		return;
518
519	r = amdgpu_vm_clear_freed(adev, vm, NULL);
520	if (r)
521		goto error;
522
523	if (operation == AMDGPU_VA_OP_MAP ||
524	    operation == AMDGPU_VA_OP_REPLACE) {
525		r = amdgpu_vm_bo_update(adev, bo_va, false);
526		if (r)
527			goto error;
528	}
529
530	r = amdgpu_vm_update_directories(adev, vm);
531
532error:
533	if (r && r != -ERESTARTSYS)
534		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
535}
536
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
537int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
538			  struct drm_file *filp)
539{
540	const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
541		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
542		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
 
543	const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
544		AMDGPU_VM_PAGE_PRT;
545
546	struct drm_amdgpu_gem_va *args = data;
547	struct drm_gem_object *gobj;
548	struct amdgpu_device *adev = dev->dev_private;
549	struct amdgpu_fpriv *fpriv = filp->driver_priv;
550	struct amdgpu_bo *abo;
551	struct amdgpu_bo_va *bo_va;
552	struct amdgpu_bo_list_entry vm_pd;
553	struct ttm_validate_buffer tv;
554	struct ww_acquire_ctx ticket;
555	struct list_head list, duplicates;
556	uint64_t va_flags;
 
557	int r = 0;
558
559	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
560		dev_dbg(&dev->pdev->dev,
561			"va_address 0x%LX is in reserved area 0x%LX\n",
562			args->va_address, AMDGPU_VA_RESERVED_SIZE);
563		return -EINVAL;
564	}
565
566	if (args->va_address >= AMDGPU_VA_HOLE_START &&
567	    args->va_address < AMDGPU_VA_HOLE_END) {
568		dev_dbg(&dev->pdev->dev,
569			"va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
570			args->va_address, AMDGPU_VA_HOLE_START,
571			AMDGPU_VA_HOLE_END);
572		return -EINVAL;
573	}
574
575	args->va_address &= AMDGPU_VA_HOLE_MASK;
 
 
 
 
 
 
 
 
 
576
577	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
578		dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
579			args->flags);
580		return -EINVAL;
581	}
582
583	switch (args->operation) {
584	case AMDGPU_VA_OP_MAP:
585	case AMDGPU_VA_OP_UNMAP:
586	case AMDGPU_VA_OP_CLEAR:
587	case AMDGPU_VA_OP_REPLACE:
588		break;
589	default:
590		dev_dbg(&dev->pdev->dev, "unsupported operation %d\n",
591			args->operation);
592		return -EINVAL;
593	}
594
595	INIT_LIST_HEAD(&list);
596	INIT_LIST_HEAD(&duplicates);
597	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
598	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
599		gobj = drm_gem_object_lookup(filp, args->handle);
600		if (gobj == NULL)
601			return -ENOENT;
602		abo = gem_to_amdgpu_bo(gobj);
603		tv.bo = &abo->tbo;
604		tv.shared = false;
605		list_add(&tv.head, &list);
606	} else {
607		gobj = NULL;
608		abo = NULL;
609	}
610
611	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
 
 
 
 
 
 
 
 
612
613	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
614	if (r)
615		goto error_unref;
 
 
616
617	if (abo) {
618		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
619		if (!bo_va) {
620			r = -ENOENT;
621			goto error_backoff;
622		}
623	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
624		bo_va = fpriv->prt_va;
625	} else {
626		bo_va = NULL;
627	}
628
629	switch (args->operation) {
630	case AMDGPU_VA_OP_MAP:
631		r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
632					args->map_size);
633		if (r)
634			goto error_backoff;
635
636		va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
637		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
638				     args->offset_in_bo, args->map_size,
639				     va_flags);
640		break;
641	case AMDGPU_VA_OP_UNMAP:
642		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
643		break;
644
645	case AMDGPU_VA_OP_CLEAR:
646		r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
647						args->va_address,
648						args->map_size);
649		break;
650	case AMDGPU_VA_OP_REPLACE:
651		r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
652					args->map_size);
653		if (r)
654			goto error_backoff;
655
656		va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
657		r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
658					     args->offset_in_bo, args->map_size,
659					     va_flags);
660		break;
661	default:
662		break;
663	}
664	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
665		amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, &list,
666					args->operation);
667
668error_backoff:
669	ttm_eu_backoff_reservation(&ticket, &list);
670
671error_unref:
672	drm_gem_object_put_unlocked(gobj);
673	return r;
674}
675
676int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
677			struct drm_file *filp)
678{
679	struct amdgpu_device *adev = dev->dev_private;
680	struct drm_amdgpu_gem_op *args = data;
681	struct drm_gem_object *gobj;
 
682	struct amdgpu_bo *robj;
683	int r;
684
685	gobj = drm_gem_object_lookup(filp, args->handle);
686	if (gobj == NULL) {
687		return -ENOENT;
688	}
689	robj = gem_to_amdgpu_bo(gobj);
690
691	r = amdgpu_bo_reserve(robj, false);
692	if (unlikely(r))
693		goto out;
694
695	switch (args->op) {
696	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
697		struct drm_amdgpu_gem_create_in info;
698		void __user *out = u64_to_user_ptr(args->value);
699
700		info.bo_size = robj->gem_base.size;
701		info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
702		info.domains = robj->preferred_domains;
703		info.domain_flags = robj->flags;
704		amdgpu_bo_unreserve(robj);
705		if (copy_to_user(out, &info, sizeof(info)))
706			r = -EFAULT;
707		break;
708	}
709	case AMDGPU_GEM_OP_SET_PLACEMENT:
710		if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
 
711			r = -EINVAL;
712			amdgpu_bo_unreserve(robj);
713			break;
714		}
715		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
716			r = -EPERM;
717			amdgpu_bo_unreserve(robj);
718			break;
719		}
 
 
 
 
 
 
 
 
 
720		robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
721							AMDGPU_GEM_DOMAIN_GTT |
722							AMDGPU_GEM_DOMAIN_CPU);
723		robj->allowed_domains = robj->preferred_domains;
724		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
725			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
726
727		if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
728			amdgpu_vm_bo_invalidate(adev, robj, true);
729
730		amdgpu_bo_unreserve(robj);
731		break;
732	default:
733		amdgpu_bo_unreserve(robj);
734		r = -EINVAL;
735	}
736
737out:
738	drm_gem_object_put_unlocked(gobj);
739	return r;
740}
741
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
742int amdgpu_mode_dumb_create(struct drm_file *file_priv,
743			    struct drm_device *dev,
744			    struct drm_mode_create_dumb *args)
745{
746	struct amdgpu_device *adev = dev->dev_private;
 
747	struct drm_gem_object *gobj;
748	uint32_t handle;
 
 
 
 
749	int r;
750
751	args->pitch = amdgpu_align_pitch(adev, args->width,
752					 DIV_ROUND_UP(args->bpp, 8), 0);
 
 
 
 
 
 
 
 
753	args->size = (u64)args->pitch * args->height;
754	args->size = ALIGN(args->size, PAGE_SIZE);
755
756	r = amdgpu_gem_object_create(adev, args->size, 0,
757				     AMDGPU_GEM_DOMAIN_VRAM,
758				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
759				     false, NULL, &gobj);
760	if (r)
761		return -ENOMEM;
762
763	r = drm_gem_handle_create(file_priv, gobj, &handle);
764	/* drop reference from allocate - handle holds it now */
765	drm_gem_object_put_unlocked(gobj);
766	if (r) {
767		return r;
768	}
769	args->handle = handle;
770	return 0;
771}
772
773#if defined(CONFIG_DEBUG_FS)
774static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
775{
776	struct drm_gem_object *gobj = ptr;
777	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
778	struct seq_file *m = data;
779
780	unsigned domain;
781	const char *placement;
782	unsigned pin_count;
783	uint64_t offset;
784
785	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
786	switch (domain) {
787	case AMDGPU_GEM_DOMAIN_VRAM:
788		placement = "VRAM";
789		break;
790	case AMDGPU_GEM_DOMAIN_GTT:
791		placement = " GTT";
792		break;
793	case AMDGPU_GEM_DOMAIN_CPU:
794	default:
795		placement = " CPU";
796		break;
797	}
798	seq_printf(m, "\t0x%08x: %12ld byte %s",
799		   id, amdgpu_bo_size(bo), placement);
800
801	offset = READ_ONCE(bo->tbo.mem.start);
802	if (offset != AMDGPU_BO_INVALID_OFFSET)
803		seq_printf(m, " @ 0x%010Lx", offset);
804
805	pin_count = READ_ONCE(bo->pin_count);
806	if (pin_count)
807		seq_printf(m, " pin count %d", pin_count);
808	seq_printf(m, "\n");
809
810	return 0;
811}
812
813static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
814{
815	struct drm_info_node *node = (struct drm_info_node *)m->private;
816	struct drm_device *dev = node->minor->dev;
817	struct drm_file *file;
818	int r;
819
820	r = mutex_lock_interruptible(&dev->filelist_mutex);
821	if (r)
822		return r;
823
824	list_for_each_entry(file, &dev->filelist, lhead) {
825		struct task_struct *task;
 
 
 
826
827		/*
828		 * Although we have a valid reference on file->pid, that does
829		 * not guarantee that the task_struct who called get_pid() is
830		 * still alive (e.g. get_pid(current) => fork() => exit()).
831		 * Therefore, we need to protect this ->comm access using RCU.
832		 */
833		rcu_read_lock();
834		task = pid_task(file->pid, PIDTYPE_PID);
835		seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
 
836			   task ? task->comm : "<unknown>");
837		rcu_read_unlock();
838
839		spin_lock(&file->table_lock);
840		idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
 
 
 
 
841		spin_unlock(&file->table_lock);
842	}
843
844	mutex_unlock(&dev->filelist_mutex);
845	return 0;
846}
847
848static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
849	{"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
850};
851#endif
852
853int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
854{
855#if defined(CONFIG_DEBUG_FS)
856	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
 
 
 
 
857#endif
858	return 0;
859}
v6.8
   1/*
   2 * Copyright 2008 Advanced Micro Devices, Inc.
   3 * Copyright 2008 Red Hat Inc.
   4 * Copyright 2009 Jerome Glisse.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the "Software"),
   8 * to deal in the Software without restriction, including without limitation
   9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10 * and/or sell copies of the Software, and to permit persons to whom the
  11 * Software is furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22 * OTHER DEALINGS IN THE SOFTWARE.
  23 *
  24 * Authors: Dave Airlie
  25 *          Alex Deucher
  26 *          Jerome Glisse
  27 */
  28#include <linux/ktime.h>
  29#include <linux/module.h>
  30#include <linux/pagemap.h>
  31#include <linux/pci.h>
  32#include <linux/dma-buf.h>
  33
  34#include <drm/amdgpu_drm.h>
  35#include <drm/drm_drv.h>
  36#include <drm/drm_exec.h>
  37#include <drm/drm_gem_ttm_helper.h>
  38#include <drm/ttm/ttm_tt.h>
  39
  40#include "amdgpu.h"
  41#include "amdgpu_display.h"
  42#include "amdgpu_dma_buf.h"
  43#include "amdgpu_hmm.h"
  44#include "amdgpu_xgmi.h"
  45
  46static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
  47
  48static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
  49{
  50	struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
  51	struct drm_device *ddev = bo->base.dev;
  52	vm_fault_t ret;
  53	int idx;
  54
  55	ret = ttm_bo_vm_reserve(bo, vmf);
  56	if (ret)
  57		return ret;
  58
  59	if (drm_dev_enter(ddev, &idx)) {
  60		ret = amdgpu_bo_fault_reserve_notify(bo);
  61		if (ret) {
  62			drm_dev_exit(idx);
  63			goto unlock;
  64		}
  65
  66		ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
  67					       TTM_BO_VM_NUM_PREFAULT);
  68
  69		drm_dev_exit(idx);
  70	} else {
  71		ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
  72	}
  73	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
  74		return ret;
  75
  76unlock:
  77	dma_resv_unlock(bo->base.resv);
  78	return ret;
  79}
  80
  81static const struct vm_operations_struct amdgpu_gem_vm_ops = {
  82	.fault = amdgpu_gem_fault,
  83	.open = ttm_bo_vm_open,
  84	.close = ttm_bo_vm_close,
  85	.access = ttm_bo_vm_access
  86};
  87
  88static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
  89{
  90	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
  91
  92	if (robj) {
  93		amdgpu_hmm_unregister(robj);
  94		amdgpu_bo_unref(&robj);
  95	}
  96}
  97
  98int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
  99			     int alignment, u32 initial_domain,
 100			     u64 flags, enum ttm_bo_type type,
 101			     struct dma_resv *resv,
 102			     struct drm_gem_object **obj, int8_t xcp_id_plus1)
 103{
 104	struct amdgpu_bo *bo;
 105	struct amdgpu_bo_user *ubo;
 106	struct amdgpu_bo_param bp;
 107	int r;
 108
 109	memset(&bp, 0, sizeof(bp));
 110	*obj = NULL;
 
 
 
 
 111
 112	bp.size = size;
 113	bp.byte_align = alignment;
 114	bp.type = type;
 115	bp.resv = resv;
 116	bp.preferred_domain = initial_domain;
 117	bp.flags = flags;
 118	bp.domain = initial_domain;
 119	bp.bo_ptr_size = sizeof(struct amdgpu_bo);
 120	bp.xcp_id_plus1 = xcp_id_plus1;
 121
 122	r = amdgpu_bo_create_user(adev, &bp, &ubo);
 123	if (r)
 
 
 
 
 
 124		return r;
 125
 126	bo = &ubo->bo;
 127	*obj = &bo->tbo.base;
 128	(*obj)->funcs = &amdgpu_gem_object_funcs;
 129
 130	return 0;
 131}
 132
 133void amdgpu_gem_force_release(struct amdgpu_device *adev)
 134{
 135	struct drm_device *ddev = adev_to_drm(adev);
 136	struct drm_file *file;
 137
 138	mutex_lock(&ddev->filelist_mutex);
 139
 140	list_for_each_entry(file, &ddev->filelist, lhead) {
 141		struct drm_gem_object *gobj;
 142		int handle;
 143
 144		WARN_ONCE(1, "Still active user space clients!\n");
 145		spin_lock(&file->table_lock);
 146		idr_for_each_entry(&file->object_idr, gobj, handle) {
 147			WARN_ONCE(1, "And also active allocations!\n");
 148			drm_gem_object_put(gobj);
 149		}
 150		idr_destroy(&file->object_idr);
 151		spin_unlock(&file->table_lock);
 152	}
 153
 154	mutex_unlock(&ddev->filelist_mutex);
 155}
 156
 157/*
 158 * Call from drm_gem_handle_create which appear in both new and open ioctl
 159 * case.
 160 */
 161static int amdgpu_gem_object_open(struct drm_gem_object *obj,
 162				  struct drm_file *file_priv)
 163{
 164	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
 165	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
 166	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
 167	struct amdgpu_vm *vm = &fpriv->vm;
 168	struct amdgpu_bo_va *bo_va;
 169	struct mm_struct *mm;
 170	int r;
 171
 172	mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
 173	if (mm && mm != current->mm)
 174		return -EPERM;
 175
 176	if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
 177	    abo->tbo.base.resv != vm->root.bo->tbo.base.resv)
 178		return -EPERM;
 179
 180	r = amdgpu_bo_reserve(abo, false);
 181	if (r)
 182		return r;
 183
 184	bo_va = amdgpu_vm_bo_find(vm, abo);
 185	if (!bo_va)
 186		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
 187	else
 188		++bo_va->ref_count;
 
 189	amdgpu_bo_unreserve(abo);
 190	return 0;
 191}
 192
 193static void amdgpu_gem_object_close(struct drm_gem_object *obj,
 194				    struct drm_file *file_priv)
 195{
 196	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
 197	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 198	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
 199	struct amdgpu_vm *vm = &fpriv->vm;
 200
 201	struct dma_fence *fence = NULL;
 
 
 
 202	struct amdgpu_bo_va *bo_va;
 203	struct drm_exec exec;
 204	long r;
 205
 206	drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
 207	drm_exec_until_all_locked(&exec) {
 208		r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1);
 209		drm_exec_retry_on_contention(&exec);
 210		if (unlikely(r))
 211			goto out_unlock;
 212
 213		r = amdgpu_vm_lock_pd(vm, &exec, 0);
 214		drm_exec_retry_on_contention(&exec);
 215		if (unlikely(r))
 216			goto out_unlock;
 
 
 
 217	}
 218
 219	bo_va = amdgpu_vm_bo_find(vm, bo);
 220	if (!bo_va || --bo_va->ref_count)
 221		goto out_unlock;
 222
 223	amdgpu_vm_bo_del(adev, bo_va);
 224	if (!amdgpu_vm_ready(vm))
 225		goto out_unlock;
 226
 227	r = amdgpu_vm_clear_freed(adev, vm, &fence);
 228	if (unlikely(r < 0))
 229		dev_err(adev->dev, "failed to clear page "
 230			"tables on GEM object close (%ld)\n", r);
 231	if (r || !fence)
 232		goto out_unlock;
 233
 234	amdgpu_bo_fence(bo, fence, true);
 235	dma_fence_put(fence);
 236
 237out_unlock:
 238	if (r)
 239		dev_err(adev->dev, "leaking bo va (%ld)\n", r);
 240	drm_exec_fini(&exec);
 241}
 242
 243static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
 244{
 245	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
 246
 247	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
 248		return -EPERM;
 249	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
 250		return -EPERM;
 251
 252	/* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings
 253	 * for debugger access to invisible VRAM. Should have used MAP_SHARED
 254	 * instead. Clearing VM_MAYWRITE prevents the mapping from ever
 255	 * becoming writable and makes is_cow_mapping(vm_flags) false.
 256	 */
 257	if (is_cow_mapping(vma->vm_flags) &&
 258	    !(vma->vm_flags & VM_ACCESS_FLAGS))
 259		vm_flags_clear(vma, VM_MAYWRITE);
 260
 261	return drm_gem_ttm_mmap(obj, vma);
 262}
 263
 264static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
 265	.free = amdgpu_gem_object_free,
 266	.open = amdgpu_gem_object_open,
 267	.close = amdgpu_gem_object_close,
 268	.export = amdgpu_gem_prime_export,
 269	.vmap = drm_gem_ttm_vmap,
 270	.vunmap = drm_gem_ttm_vunmap,
 271	.mmap = amdgpu_gem_object_mmap,
 272	.vm_ops = &amdgpu_gem_vm_ops,
 273};
 274
 275/*
 276 * GEM ioctls.
 277 */
 278int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
 279			    struct drm_file *filp)
 280{
 281	struct amdgpu_device *adev = drm_to_adev(dev);
 282	struct amdgpu_fpriv *fpriv = filp->driver_priv;
 283	struct amdgpu_vm *vm = &fpriv->vm;
 284	union drm_amdgpu_gem_create *args = data;
 285	uint64_t flags = args->in.domain_flags;
 286	uint64_t size = args->in.bo_size;
 287	struct dma_resv *resv = NULL;
 288	struct drm_gem_object *gobj;
 289	uint32_t handle, initial_domain;
 290	int r;
 291
 292	/* reject DOORBELLs until userspace code to use it is available */
 293	if (args->in.domains & AMDGPU_GEM_DOMAIN_DOORBELL)
 294		return -EINVAL;
 295
 296	/* reject invalid gem flags */
 297	if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
 298		      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
 299		      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
 300		      AMDGPU_GEM_CREATE_VRAM_CLEARED |
 301		      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
 302		      AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
 303		      AMDGPU_GEM_CREATE_ENCRYPTED |
 304		      AMDGPU_GEM_CREATE_DISCARDABLE))
 305		return -EINVAL;
 306
 307	/* reject invalid gem domains */
 308	if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
 309		return -EINVAL;
 310
 311	if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
 312		DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
 
 313		return -EINVAL;
 314	}
 315
 316	/* create a gem object to contain this object in */
 317	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
 318	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
 319		if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
 320			/* if gds bo is created from user space, it must be
 321			 * passed to bo list
 322			 */
 323			DRM_ERROR("GDS bo cannot be per-vm-bo\n");
 
 
 
 324			return -EINVAL;
 325		}
 326		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
 327	}
 
 328
 329	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
 330		r = amdgpu_bo_reserve(vm->root.bo, false);
 331		if (r)
 332			return r;
 333
 334		resv = vm->root.bo->tbo.base.resv;
 335	}
 336
 337	initial_domain = (u32)(0xffffffff & args->in.domains);
 338retry:
 339	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
 340				     initial_domain,
 341				     flags, ttm_bo_type_device, resv, &gobj, fpriv->xcp_id + 1);
 342	if (r && r != -ERESTARTSYS) {
 343		if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
 344			flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 345			goto retry;
 346		}
 347
 348		if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
 349			initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
 350			goto retry;
 351		}
 352		DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
 353				size, initial_domain, args->in.alignment, r);
 354	}
 355
 356	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
 357		if (!r) {
 358			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
 359
 360			abo->parent = amdgpu_bo_ref(vm->root.bo);
 361		}
 362		amdgpu_bo_unreserve(vm->root.bo);
 363	}
 364	if (r)
 365		return r;
 366
 367	r = drm_gem_handle_create(filp, gobj, &handle);
 368	/* drop reference from allocate - handle holds it now */
 369	drm_gem_object_put(gobj);
 370	if (r)
 371		return r;
 372
 373	memset(args, 0, sizeof(*args));
 374	args->out.handle = handle;
 375	return 0;
 376}
 377
 378int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
 379			     struct drm_file *filp)
 380{
 381	struct ttm_operation_ctx ctx = { true, false };
 382	struct amdgpu_device *adev = drm_to_adev(dev);
 383	struct drm_amdgpu_gem_userptr *args = data;
 384	struct amdgpu_fpriv *fpriv = filp->driver_priv;
 385	struct drm_gem_object *gobj;
 386	struct hmm_range *range;
 387	struct amdgpu_bo *bo;
 388	uint32_t handle;
 389	int r;
 390
 391	args->addr = untagged_addr(args->addr);
 392
 393	if (offset_in_page(args->addr | args->size))
 394		return -EINVAL;
 395
 396	/* reject unknown flag values */
 397	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
 398	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
 399	    AMDGPU_GEM_USERPTR_REGISTER))
 400		return -EINVAL;
 401
 402	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
 403	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
 404
 405		/* if we want to write to it we must install a MMU notifier */
 406		return -EACCES;
 407	}
 408
 409	/* create a gem object to contain this object in */
 410	r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
 411				     0, ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1);
 412	if (r)
 413		return r;
 414
 415	bo = gem_to_amdgpu_bo(gobj);
 416	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
 417	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
 418	r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags);
 419	if (r)
 420		goto release_object;
 421
 422	r = amdgpu_hmm_register(bo, args->addr);
 423	if (r)
 424		goto release_object;
 
 
 425
 426	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
 427		r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
 428						 &range);
 429		if (r)
 430			goto release_object;
 431
 432		r = amdgpu_bo_reserve(bo, true);
 433		if (r)
 434			goto user_pages_done;
 435
 436		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
 437		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 438		amdgpu_bo_unreserve(bo);
 439		if (r)
 440			goto user_pages_done;
 441	}
 442
 443	r = drm_gem_handle_create(filp, gobj, &handle);
 
 
 444	if (r)
 445		goto user_pages_done;
 446
 447	args->handle = handle;
 
 448
 449user_pages_done:
 450	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
 451		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
 452
 453release_object:
 454	drm_gem_object_put(gobj);
 455
 456	return r;
 457}
 458
 459int amdgpu_mode_dumb_mmap(struct drm_file *filp,
 460			  struct drm_device *dev,
 461			  uint32_t handle, uint64_t *offset_p)
 462{
 463	struct drm_gem_object *gobj;
 464	struct amdgpu_bo *robj;
 465
 466	gobj = drm_gem_object_lookup(filp, handle);
 467	if (!gobj)
 468		return -ENOENT;
 469
 470	robj = gem_to_amdgpu_bo(gobj);
 471	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
 472	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
 473		drm_gem_object_put(gobj);
 474		return -EPERM;
 475	}
 476	*offset_p = amdgpu_bo_mmap_offset(robj);
 477	drm_gem_object_put(gobj);
 478	return 0;
 479}
 480
 481int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
 482			  struct drm_file *filp)
 483{
 484	union drm_amdgpu_gem_mmap *args = data;
 485	uint32_t handle = args->in.handle;
 486
 487	memset(args, 0, sizeof(*args));
 488	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
 489}
 490
 491/**
 492 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
 493 *
 494 * @timeout_ns: timeout in ns
 495 *
 496 * Calculate the timeout in jiffies from an absolute timeout in ns.
 497 */
 498unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
 499{
 500	unsigned long timeout_jiffies;
 501	ktime_t timeout;
 502
 503	/* clamp timeout if it's to large */
 504	if (((int64_t)timeout_ns) < 0)
 505		return MAX_SCHEDULE_TIMEOUT;
 506
 507	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
 508	if (ktime_to_ns(timeout) < 0)
 509		return 0;
 510
 511	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
 512	/*  clamp timeout to avoid unsigned-> signed overflow */
 513	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT)
 514		return MAX_SCHEDULE_TIMEOUT - 1;
 515
 516	return timeout_jiffies;
 517}
 518
 519int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 520			      struct drm_file *filp)
 521{
 522	union drm_amdgpu_gem_wait_idle *args = data;
 523	struct drm_gem_object *gobj;
 524	struct amdgpu_bo *robj;
 525	uint32_t handle = args->in.handle;
 526	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
 527	int r = 0;
 528	long ret;
 529
 530	gobj = drm_gem_object_lookup(filp, handle);
 531	if (!gobj)
 532		return -ENOENT;
 533
 534	robj = gem_to_amdgpu_bo(gobj);
 535	ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
 536				    true, timeout);
 537
 538	/* ret == 0 means not signaled,
 539	 * ret > 0 means signaled
 540	 * ret < 0 means interrupted before timeout
 541	 */
 542	if (ret >= 0) {
 543		memset(args, 0, sizeof(*args));
 544		args->out.status = (ret == 0);
 545	} else
 546		r = ret;
 547
 548	drm_gem_object_put(gobj);
 549	return r;
 550}
 551
 552int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
 553				struct drm_file *filp)
 554{
 555	struct drm_amdgpu_gem_metadata *args = data;
 556	struct drm_gem_object *gobj;
 557	struct amdgpu_bo *robj;
 558	int r = -1;
 559
 560	DRM_DEBUG("%d\n", args->handle);
 561	gobj = drm_gem_object_lookup(filp, args->handle);
 562	if (gobj == NULL)
 563		return -ENOENT;
 564	robj = gem_to_amdgpu_bo(gobj);
 565
 566	r = amdgpu_bo_reserve(robj, false);
 567	if (unlikely(r != 0))
 568		goto out;
 569
 570	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
 571		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
 572		r = amdgpu_bo_get_metadata(robj, args->data.data,
 573					   sizeof(args->data.data),
 574					   &args->data.data_size_bytes,
 575					   &args->data.flags);
 576	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
 577		if (args->data.data_size_bytes > sizeof(args->data.data)) {
 578			r = -EINVAL;
 579			goto unreserve;
 580		}
 581		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
 582		if (!r)
 583			r = amdgpu_bo_set_metadata(robj, args->data.data,
 584						   args->data.data_size_bytes,
 585						   args->data.flags);
 586	}
 587
 588unreserve:
 589	amdgpu_bo_unreserve(robj);
 590out:
 591	drm_gem_object_put(gobj);
 592	return r;
 593}
 594
 595/**
 596 * amdgpu_gem_va_update_vm -update the bo_va in its VM
 597 *
 598 * @adev: amdgpu_device pointer
 599 * @vm: vm to update
 600 * @bo_va: bo_va to update
 
 601 * @operation: map, unmap or clear
 602 *
 603 * Update the bo_va directly after setting its address. Errors are not
 604 * vital here, so they are not reported back to userspace.
 605 */
 606static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
 607				    struct amdgpu_vm *vm,
 608				    struct amdgpu_bo_va *bo_va,
 
 609				    uint32_t operation)
 610{
 611	int r;
 612
 613	if (!amdgpu_vm_ready(vm))
 614		return;
 615
 616	r = amdgpu_vm_clear_freed(adev, vm, NULL);
 617	if (r)
 618		goto error;
 619
 620	if (operation == AMDGPU_VA_OP_MAP ||
 621	    operation == AMDGPU_VA_OP_REPLACE) {
 622		r = amdgpu_vm_bo_update(adev, bo_va, false);
 623		if (r)
 624			goto error;
 625	}
 626
 627	r = amdgpu_vm_update_pdes(adev, vm, false);
 628
 629error:
 630	if (r && r != -ERESTARTSYS)
 631		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
 632}
 633
 634/**
 635 * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
 636 *
 637 * @adev: amdgpu_device pointer
 638 * @flags: GEM UAPI flags
 639 *
 640 * Returns the GEM UAPI flags mapped into hardware for the ASIC.
 641 */
 642uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
 643{
 644	uint64_t pte_flag = 0;
 645
 646	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
 647		pte_flag |= AMDGPU_PTE_EXECUTABLE;
 648	if (flags & AMDGPU_VM_PAGE_READABLE)
 649		pte_flag |= AMDGPU_PTE_READABLE;
 650	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
 651		pte_flag |= AMDGPU_PTE_WRITEABLE;
 652	if (flags & AMDGPU_VM_PAGE_PRT)
 653		pte_flag |= AMDGPU_PTE_PRT;
 654	if (flags & AMDGPU_VM_PAGE_NOALLOC)
 655		pte_flag |= AMDGPU_PTE_NOALLOC;
 656
 657	if (adev->gmc.gmc_funcs->map_mtype)
 658		pte_flag |= amdgpu_gmc_map_mtype(adev,
 659						 flags & AMDGPU_VM_MTYPE_MASK);
 660
 661	return pte_flag;
 662}
 663
 664int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 665			  struct drm_file *filp)
 666{
 667	const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
 668		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
 669		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK |
 670		AMDGPU_VM_PAGE_NOALLOC;
 671	const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
 672		AMDGPU_VM_PAGE_PRT;
 673
 674	struct drm_amdgpu_gem_va *args = data;
 675	struct drm_gem_object *gobj;
 676	struct amdgpu_device *adev = drm_to_adev(dev);
 677	struct amdgpu_fpriv *fpriv = filp->driver_priv;
 678	struct amdgpu_bo *abo;
 679	struct amdgpu_bo_va *bo_va;
 680	struct drm_exec exec;
 
 
 
 681	uint64_t va_flags;
 682	uint64_t vm_size;
 683	int r = 0;
 684
 685	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
 686		dev_dbg(dev->dev,
 687			"va_address 0x%llx is in reserved area 0x%llx\n",
 688			args->va_address, AMDGPU_VA_RESERVED_SIZE);
 689		return -EINVAL;
 690	}
 691
 692	if (args->va_address >= AMDGPU_GMC_HOLE_START &&
 693	    args->va_address < AMDGPU_GMC_HOLE_END) {
 694		dev_dbg(dev->dev,
 695			"va_address 0x%llx is in VA hole 0x%llx-0x%llx\n",
 696			args->va_address, AMDGPU_GMC_HOLE_START,
 697			AMDGPU_GMC_HOLE_END);
 698		return -EINVAL;
 699	}
 700
 701	args->va_address &= AMDGPU_GMC_HOLE_MASK;
 702
 703	vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
 704	vm_size -= AMDGPU_VA_RESERVED_SIZE;
 705	if (args->va_address + args->map_size > vm_size) {
 706		dev_dbg(dev->dev,
 707			"va_address 0x%llx is in top reserved area 0x%llx\n",
 708			args->va_address + args->map_size, vm_size);
 709		return -EINVAL;
 710	}
 711
 712	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
 713		dev_dbg(dev->dev, "invalid flags combination 0x%08X\n",
 714			args->flags);
 715		return -EINVAL;
 716	}
 717
 718	switch (args->operation) {
 719	case AMDGPU_VA_OP_MAP:
 720	case AMDGPU_VA_OP_UNMAP:
 721	case AMDGPU_VA_OP_CLEAR:
 722	case AMDGPU_VA_OP_REPLACE:
 723		break;
 724	default:
 725		dev_dbg(dev->dev, "unsupported operation %d\n",
 726			args->operation);
 727		return -EINVAL;
 728	}
 729
 
 
 730	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
 731	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
 732		gobj = drm_gem_object_lookup(filp, args->handle);
 733		if (gobj == NULL)
 734			return -ENOENT;
 735		abo = gem_to_amdgpu_bo(gobj);
 
 
 
 736	} else {
 737		gobj = NULL;
 738		abo = NULL;
 739	}
 740
 741	drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
 742		      DRM_EXEC_IGNORE_DUPLICATES, 0);
 743	drm_exec_until_all_locked(&exec) {
 744		if (gobj) {
 745			r = drm_exec_lock_obj(&exec, gobj);
 746			drm_exec_retry_on_contention(&exec);
 747			if (unlikely(r))
 748				goto error;
 749		}
 750
 751		r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 2);
 752		drm_exec_retry_on_contention(&exec);
 753		if (unlikely(r))
 754			goto error;
 755	}
 756
 757	if (abo) {
 758		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
 759		if (!bo_va) {
 760			r = -ENOENT;
 761			goto error;
 762		}
 763	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
 764		bo_va = fpriv->prt_va;
 765	} else {
 766		bo_va = NULL;
 767	}
 768
 769	switch (args->operation) {
 770	case AMDGPU_VA_OP_MAP:
 771		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
 
 
 
 
 
 772		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
 773				     args->offset_in_bo, args->map_size,
 774				     va_flags);
 775		break;
 776	case AMDGPU_VA_OP_UNMAP:
 777		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
 778		break;
 779
 780	case AMDGPU_VA_OP_CLEAR:
 781		r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
 782						args->va_address,
 783						args->map_size);
 784		break;
 785	case AMDGPU_VA_OP_REPLACE:
 786		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
 
 
 
 
 
 787		r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
 788					     args->offset_in_bo, args->map_size,
 789					     va_flags);
 790		break;
 791	default:
 792		break;
 793	}
 794	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm)
 795		amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
 796					args->operation);
 797
 798error:
 799	drm_exec_fini(&exec);
 800	drm_gem_object_put(gobj);
 
 
 801	return r;
 802}
 803
 804int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
 805			struct drm_file *filp)
 806{
 807	struct amdgpu_device *adev = drm_to_adev(dev);
 808	struct drm_amdgpu_gem_op *args = data;
 809	struct drm_gem_object *gobj;
 810	struct amdgpu_vm_bo_base *base;
 811	struct amdgpu_bo *robj;
 812	int r;
 813
 814	gobj = drm_gem_object_lookup(filp, args->handle);
 815	if (!gobj)
 816		return -ENOENT;
 817
 818	robj = gem_to_amdgpu_bo(gobj);
 819
 820	r = amdgpu_bo_reserve(robj, false);
 821	if (unlikely(r))
 822		goto out;
 823
 824	switch (args->op) {
 825	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
 826		struct drm_amdgpu_gem_create_in info;
 827		void __user *out = u64_to_user_ptr(args->value);
 828
 829		info.bo_size = robj->tbo.base.size;
 830		info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
 831		info.domains = robj->preferred_domains;
 832		info.domain_flags = robj->flags;
 833		amdgpu_bo_unreserve(robj);
 834		if (copy_to_user(out, &info, sizeof(info)))
 835			r = -EFAULT;
 836		break;
 837	}
 838	case AMDGPU_GEM_OP_SET_PLACEMENT:
 839		if (robj->tbo.base.import_attach &&
 840		    args->value & AMDGPU_GEM_DOMAIN_VRAM) {
 841			r = -EINVAL;
 842			amdgpu_bo_unreserve(robj);
 843			break;
 844		}
 845		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
 846			r = -EPERM;
 847			amdgpu_bo_unreserve(robj);
 848			break;
 849		}
 850		for (base = robj->vm_bo; base; base = base->next)
 851			if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
 852				amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
 853				r = -EINVAL;
 854				amdgpu_bo_unreserve(robj);
 855				goto out;
 856			}
 857
 858
 859		robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
 860							AMDGPU_GEM_DOMAIN_GTT |
 861							AMDGPU_GEM_DOMAIN_CPU);
 862		robj->allowed_domains = robj->preferred_domains;
 863		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
 864			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
 865
 866		if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
 867			amdgpu_vm_bo_invalidate(adev, robj, true);
 868
 869		amdgpu_bo_unreserve(robj);
 870		break;
 871	default:
 872		amdgpu_bo_unreserve(robj);
 873		r = -EINVAL;
 874	}
 875
 876out:
 877	drm_gem_object_put(gobj);
 878	return r;
 879}
 880
 881static int amdgpu_gem_align_pitch(struct amdgpu_device *adev,
 882				  int width,
 883				  int cpp,
 884				  bool tiled)
 885{
 886	int aligned = width;
 887	int pitch_mask = 0;
 888
 889	switch (cpp) {
 890	case 1:
 891		pitch_mask = 255;
 892		break;
 893	case 2:
 894		pitch_mask = 127;
 895		break;
 896	case 3:
 897	case 4:
 898		pitch_mask = 63;
 899		break;
 900	}
 901
 902	aligned += pitch_mask;
 903	aligned &= ~pitch_mask;
 904	return aligned * cpp;
 905}
 906
 907int amdgpu_mode_dumb_create(struct drm_file *file_priv,
 908			    struct drm_device *dev,
 909			    struct drm_mode_create_dumb *args)
 910{
 911	struct amdgpu_device *adev = drm_to_adev(dev);
 912	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
 913	struct drm_gem_object *gobj;
 914	uint32_t handle;
 915	u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
 916		    AMDGPU_GEM_CREATE_CPU_GTT_USWC |
 917		    AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 918	u32 domain;
 919	int r;
 920
 921	/*
 922	 * The buffer returned from this function should be cleared, but
 923	 * it can only be done if the ring is enabled or we'll fail to
 924	 * create the buffer.
 925	 */
 926	if (adev->mman.buffer_funcs_enabled)
 927		flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
 928
 929	args->pitch = amdgpu_gem_align_pitch(adev, args->width,
 930					     DIV_ROUND_UP(args->bpp, 8), 0);
 931	args->size = (u64)args->pitch * args->height;
 932	args->size = ALIGN(args->size, PAGE_SIZE);
 933	domain = amdgpu_bo_get_preferred_domain(adev,
 934				amdgpu_display_supported_domains(adev, flags));
 935	r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
 936				     ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1);
 
 937	if (r)
 938		return -ENOMEM;
 939
 940	r = drm_gem_handle_create(file_priv, gobj, &handle);
 941	/* drop reference from allocate - handle holds it now */
 942	drm_gem_object_put(gobj);
 943	if (r)
 944		return r;
 945
 946	args->handle = handle;
 947	return 0;
 948}
 949
 950#if defined(CONFIG_DEBUG_FS)
 951static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 952{
 953	struct amdgpu_device *adev = m->private;
 954	struct drm_device *dev = adev_to_drm(adev);
 955	struct drm_file *file;
 956	int r;
 957
 958	r = mutex_lock_interruptible(&dev->filelist_mutex);
 959	if (r)
 960		return r;
 961
 962	list_for_each_entry(file, &dev->filelist, lhead) {
 963		struct task_struct *task;
 964		struct drm_gem_object *gobj;
 965		struct pid *pid;
 966		int id;
 967
 968		/*
 969		 * Although we have a valid reference on file->pid, that does
 970		 * not guarantee that the task_struct who called get_pid() is
 971		 * still alive (e.g. get_pid(current) => fork() => exit()).
 972		 * Therefore, we need to protect this ->comm access using RCU.
 973		 */
 974		rcu_read_lock();
 975		pid = rcu_dereference(file->pid);
 976		task = pid_task(pid, PIDTYPE_TGID);
 977		seq_printf(m, "pid %8d command %s:\n", pid_nr(pid),
 978			   task ? task->comm : "<unknown>");
 979		rcu_read_unlock();
 980
 981		spin_lock(&file->table_lock);
 982		idr_for_each_entry(&file->object_idr, gobj, id) {
 983			struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
 984
 985			amdgpu_bo_print_info(id, bo, m);
 986		}
 987		spin_unlock(&file->table_lock);
 988	}
 989
 990	mutex_unlock(&dev->filelist_mutex);
 991	return 0;
 992}
 993
 994DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_gem_info);
 995
 
 996#endif
 997
 998void amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
 999{
1000#if defined(CONFIG_DEBUG_FS)
1001	struct drm_minor *minor = adev_to_drm(adev)->primary;
1002	struct dentry *root = minor->debugfs_root;
1003
1004	debugfs_create_file("amdgpu_gem_info", 0444, root, adev,
1005			    &amdgpu_debugfs_gem_info_fops);
1006#endif
 
1007}