Linux Audio

Check our new training course

Loading...
v5.9
  1/*
  2 * Copyright 2008 Advanced Micro Devices, Inc.
  3 * Copyright 2008 Red Hat Inc.
  4 * Copyright 2009 Jerome Glisse.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the "Software"),
  8 * to deal in the Software without restriction, including without limitation
  9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 10 * and/or sell copies of the Software, and to permit persons to whom the
 11 * Software is furnished to do so, subject to the following conditions:
 12 *
 13 * The above copyright notice and this permission notice shall be included in
 14 * all copies or substantial portions of the Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 22 * OTHER DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors: Dave Airlie
 25 *          Alex Deucher
 26 *          Jerome Glisse
 27 */
 28#include <linux/ktime.h>
 29#include <linux/module.h>
 30#include <linux/pagemap.h>
 31#include <linux/pci.h>
 32#include <linux/dma-buf.h>
 33
 34#include <drm/amdgpu_drm.h>
 35#include <drm/drm_debugfs.h>
 36
 37#include "amdgpu.h"
 38#include "amdgpu_display.h"
 39#include "amdgpu_xgmi.h"
 40
 41void amdgpu_gem_object_free(struct drm_gem_object *gobj)
 42{
 43	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
 44
 45	if (robj) {
 
 
 46		amdgpu_mn_unregister(robj);
 47		amdgpu_bo_unref(&robj);
 48	}
 49}
 50
 51int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 52			     int alignment, u32 initial_domain,
 53			     u64 flags, enum ttm_bo_type type,
 54			     struct dma_resv *resv,
 55			     struct drm_gem_object **obj)
 56{
 57	struct amdgpu_bo *bo;
 58	struct amdgpu_bo_param bp;
 59	int r;
 60
 61	memset(&bp, 0, sizeof(bp));
 62	*obj = NULL;
 
 
 
 
 63
 64	bp.size = size;
 65	bp.byte_align = alignment;
 66	bp.type = type;
 67	bp.resv = resv;
 68	bp.preferred_domain = initial_domain;
 
 
 
 
 
 
 69retry:
 70	bp.flags = flags;
 71	bp.domain = initial_domain;
 72	r = amdgpu_bo_create(adev, &bp, &bo);
 73	if (r) {
 74		if (r != -ERESTARTSYS) {
 75			if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
 76				flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 77				goto retry;
 78			}
 79
 80			if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
 81				initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
 82				goto retry;
 83			}
 84			DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
 85				  size, initial_domain, alignment, r);
 86		}
 87		return r;
 88	}
 89	*obj = &bo->tbo.base;
 90
 91	return 0;
 92}
 93
 94void amdgpu_gem_force_release(struct amdgpu_device *adev)
 95{
 96	struct drm_device *ddev = adev->ddev;
 97	struct drm_file *file;
 98
 99	mutex_lock(&ddev->filelist_mutex);
100
101	list_for_each_entry(file, &ddev->filelist, lhead) {
102		struct drm_gem_object *gobj;
103		int handle;
104
105		WARN_ONCE(1, "Still active user space clients!\n");
106		spin_lock(&file->table_lock);
107		idr_for_each_entry(&file->object_idr, gobj, handle) {
108			WARN_ONCE(1, "And also active allocations!\n");
109			drm_gem_object_put(gobj);
110		}
111		idr_destroy(&file->object_idr);
112		spin_unlock(&file->table_lock);
113	}
114
115	mutex_unlock(&ddev->filelist_mutex);
116}
117
118/*
119 * Call from drm_gem_handle_create which appear in both new and open ioctl
120 * case.
121 */
122int amdgpu_gem_object_open(struct drm_gem_object *obj,
123			   struct drm_file *file_priv)
124{
125	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
126	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
127	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
128	struct amdgpu_vm *vm = &fpriv->vm;
129	struct amdgpu_bo_va *bo_va;
130	struct mm_struct *mm;
131	int r;
132
133	mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
134	if (mm && mm != current->mm)
135		return -EPERM;
136
137	if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
138	    abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
139		return -EPERM;
140
141	r = amdgpu_bo_reserve(abo, false);
142	if (r)
143		return r;
144
145	bo_va = amdgpu_vm_bo_find(vm, abo);
146	if (!bo_va) {
147		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
148	} else {
149		++bo_va->ref_count;
150	}
151	amdgpu_bo_unreserve(abo);
152	return 0;
153}
154
155void amdgpu_gem_object_close(struct drm_gem_object *obj,
156			     struct drm_file *file_priv)
157{
158	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
159	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
160	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
161	struct amdgpu_vm *vm = &fpriv->vm;
162
163	struct amdgpu_bo_list_entry vm_pd;
164	struct list_head list, duplicates;
165	struct dma_fence *fence = NULL;
166	struct ttm_validate_buffer tv;
167	struct ww_acquire_ctx ticket;
168	struct amdgpu_bo_va *bo_va;
169	long r;
170
171	INIT_LIST_HEAD(&list);
172	INIT_LIST_HEAD(&duplicates);
173
174	tv.bo = &bo->tbo;
175	tv.num_shared = 2;
176	list_add(&tv.head, &list);
177
178	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
179
180	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
181	if (r) {
182		dev_err(adev->dev, "leaking bo va because "
183			"we fail to reserve bo (%ld)\n", r);
184		return;
185	}
186	bo_va = amdgpu_vm_bo_find(vm, bo);
187	if (!bo_va || --bo_va->ref_count)
188		goto out_unlock;
189
190	amdgpu_vm_bo_rmv(adev, bo_va);
191	if (!amdgpu_vm_ready(vm))
192		goto out_unlock;
193
194	fence = dma_resv_get_excl(bo->tbo.base.resv);
195	if (fence) {
196		amdgpu_bo_fence(bo, fence, true);
197		fence = NULL;
198	}
199
200	r = amdgpu_vm_clear_freed(adev, vm, &fence);
201	if (r || !fence)
202		goto out_unlock;
203
204	amdgpu_bo_fence(bo, fence, true);
205	dma_fence_put(fence);
206
207out_unlock:
208	if (unlikely(r < 0))
209		dev_err(adev->dev, "failed to clear page "
210			"tables on GEM object close (%ld)\n", r);
211	ttm_eu_backoff_reservation(&ticket, &list);
212}
213
 
 
 
 
 
 
 
 
 
 
214/*
215 * GEM ioctls.
216 */
217int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
218			    struct drm_file *filp)
219{
220	struct amdgpu_device *adev = dev->dev_private;
221	struct amdgpu_fpriv *fpriv = filp->driver_priv;
222	struct amdgpu_vm *vm = &fpriv->vm;
223	union drm_amdgpu_gem_create *args = data;
224	uint64_t flags = args->in.domain_flags;
225	uint64_t size = args->in.bo_size;
226	struct dma_resv *resv = NULL;
227	struct drm_gem_object *gobj;
228	uint32_t handle;
 
229	int r;
230
231	/* reject invalid gem flags */
232	if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
233		      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
234		      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
235		      AMDGPU_GEM_CREATE_VRAM_CLEARED |
236		      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
237		      AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
238		      AMDGPU_GEM_CREATE_ENCRYPTED))
239
240		return -EINVAL;
241
242	/* reject invalid gem domains */
243	if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
244		return -EINVAL;
245
246	if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
247		DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
248		return -EINVAL;
249	}
250
251	/* create a gem object to contain this object in */
252	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
253	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
254		if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
255			/* if gds bo is created from user space, it must be
256			 * passed to bo list
257			 */
258			DRM_ERROR("GDS bo cannot be per-vm-bo\n");
259			return -EINVAL;
 
 
 
 
260		}
261		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
262	}
263
264	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
265		r = amdgpu_bo_reserve(vm->root.base.bo, false);
266		if (r)
267			return r;
268
269		resv = vm->root.base.bo->tbo.base.resv;
270	}
 
271
272	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
273				     (u32)(0xffffffff & args->in.domains),
274				     flags, ttm_bo_type_device, resv, &gobj);
275	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
276		if (!r) {
277			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
278
279			abo->parent = amdgpu_bo_ref(vm->root.base.bo);
280		}
281		amdgpu_bo_unreserve(vm->root.base.bo);
282	}
283	if (r)
284		return r;
285
286	r = drm_gem_handle_create(filp, gobj, &handle);
287	/* drop reference from allocate - handle holds it now */
288	drm_gem_object_put(gobj);
289	if (r)
290		return r;
291
292	memset(args, 0, sizeof(*args));
293	args->out.handle = handle;
294	return 0;
 
 
 
 
295}
296
297int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
298			     struct drm_file *filp)
299{
300	struct ttm_operation_ctx ctx = { true, false };
301	struct amdgpu_device *adev = dev->dev_private;
302	struct drm_amdgpu_gem_userptr *args = data;
303	struct drm_gem_object *gobj;
304	struct amdgpu_bo *bo;
305	uint32_t handle;
306	int r;
307
308	args->addr = untagged_addr(args->addr);
309
310	if (offset_in_page(args->addr | args->size))
311		return -EINVAL;
312
313	/* reject unknown flag values */
314	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
315	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
316	    AMDGPU_GEM_USERPTR_REGISTER))
317		return -EINVAL;
318
319	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
320	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
321
322		/* if we want to write to it we must install a MMU notifier */
323		return -EACCES;
324	}
325
326	/* create a gem object to contain this object in */
327	r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
328				     0, ttm_bo_type_device, NULL, &gobj);
 
329	if (r)
330		return r;
331
332	bo = gem_to_amdgpu_bo(gobj);
333	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
334	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
335	r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
336	if (r)
337		goto release_object;
338
339	if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
340		r = amdgpu_mn_register(bo, args->addr);
341		if (r)
342			goto release_object;
343	}
344
345	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
346		r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
 
 
 
347		if (r)
348			goto release_object;
349
350		r = amdgpu_bo_reserve(bo, true);
351		if (r)
352			goto user_pages_done;
353
354		amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
355		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
356		amdgpu_bo_unreserve(bo);
357		if (r)
358			goto user_pages_done;
 
 
359	}
360
361	r = drm_gem_handle_create(filp, gobj, &handle);
 
 
362	if (r)
363		goto user_pages_done;
364
365	args->handle = handle;
 
366
367user_pages_done:
368	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
369		amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
 
 
370
371release_object:
372	drm_gem_object_put(gobj);
 
 
 
373
374	return r;
375}
376
377int amdgpu_mode_dumb_mmap(struct drm_file *filp,
378			  struct drm_device *dev,
379			  uint32_t handle, uint64_t *offset_p)
380{
381	struct drm_gem_object *gobj;
382	struct amdgpu_bo *robj;
383
384	gobj = drm_gem_object_lookup(filp, handle);
385	if (gobj == NULL) {
386		return -ENOENT;
387	}
388	robj = gem_to_amdgpu_bo(gobj);
389	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
390	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
391		drm_gem_object_put(gobj);
392		return -EPERM;
393	}
394	*offset_p = amdgpu_bo_mmap_offset(robj);
395	drm_gem_object_put(gobj);
396	return 0;
397}
398
399int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
400			  struct drm_file *filp)
401{
402	union drm_amdgpu_gem_mmap *args = data;
403	uint32_t handle = args->in.handle;
404	memset(args, 0, sizeof(*args));
405	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
406}
407
408/**
409 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
410 *
411 * @timeout_ns: timeout in ns
412 *
413 * Calculate the timeout in jiffies from an absolute timeout in ns.
414 */
415unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
416{
417	unsigned long timeout_jiffies;
418	ktime_t timeout;
419
420	/* clamp timeout if it's to large */
421	if (((int64_t)timeout_ns) < 0)
422		return MAX_SCHEDULE_TIMEOUT;
423
424	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
425	if (ktime_to_ns(timeout) < 0)
426		return 0;
427
428	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
429	/*  clamp timeout to avoid unsigned-> signed overflow */
430	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
431		return MAX_SCHEDULE_TIMEOUT - 1;
432
433	return timeout_jiffies;
434}
435
436int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
437			      struct drm_file *filp)
438{
 
439	union drm_amdgpu_gem_wait_idle *args = data;
440	struct drm_gem_object *gobj;
441	struct amdgpu_bo *robj;
442	uint32_t handle = args->in.handle;
443	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
444	int r = 0;
445	long ret;
446
447	gobj = drm_gem_object_lookup(filp, handle);
448	if (gobj == NULL) {
449		return -ENOENT;
450	}
451	robj = gem_to_amdgpu_bo(gobj);
452	ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
453						  timeout);
454
455	/* ret == 0 means not signaled,
456	 * ret > 0 means signaled
457	 * ret < 0 means interrupted before timeout
458	 */
459	if (ret >= 0) {
460		memset(args, 0, sizeof(*args));
461		args->out.status = (ret == 0);
462	} else
463		r = ret;
464
465	drm_gem_object_put(gobj);
 
466	return r;
467}
468
469int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
470				struct drm_file *filp)
471{
472	struct drm_amdgpu_gem_metadata *args = data;
473	struct drm_gem_object *gobj;
474	struct amdgpu_bo *robj;
475	int r = -1;
476
477	DRM_DEBUG("%d \n", args->handle);
478	gobj = drm_gem_object_lookup(filp, args->handle);
479	if (gobj == NULL)
480		return -ENOENT;
481	robj = gem_to_amdgpu_bo(gobj);
482
483	r = amdgpu_bo_reserve(robj, false);
484	if (unlikely(r != 0))
485		goto out;
486
487	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
488		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
489		r = amdgpu_bo_get_metadata(robj, args->data.data,
490					   sizeof(args->data.data),
491					   &args->data.data_size_bytes,
492					   &args->data.flags);
493	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
494		if (args->data.data_size_bytes > sizeof(args->data.data)) {
495			r = -EINVAL;
496			goto unreserve;
497		}
498		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
499		if (!r)
500			r = amdgpu_bo_set_metadata(robj, args->data.data,
501						   args->data.data_size_bytes,
502						   args->data.flags);
503	}
504
505unreserve:
506	amdgpu_bo_unreserve(robj);
507out:
508	drm_gem_object_put(gobj);
509	return r;
510}
511
 
 
 
 
 
 
 
 
 
 
512/**
513 * amdgpu_gem_va_update_vm -update the bo_va in its VM
514 *
515 * @adev: amdgpu_device pointer
516 * @vm: vm to update
517 * @bo_va: bo_va to update
518 * @operation: map, unmap or clear
519 *
520 * Update the bo_va directly after setting its address. Errors are not
521 * vital here, so they are not reported back to userspace.
522 */
523static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
524				    struct amdgpu_vm *vm,
525				    struct amdgpu_bo_va *bo_va,
526				    uint32_t operation)
527{
 
 
 
 
 
528	int r;
529
530	if (!amdgpu_vm_ready(vm))
531		return;
 
 
 
 
532
533	r = amdgpu_vm_clear_freed(adev, vm, NULL);
 
 
 
534	if (r)
535		goto error;
536
537	if (operation == AMDGPU_VA_OP_MAP ||
538	    operation == AMDGPU_VA_OP_REPLACE) {
539		r = amdgpu_vm_bo_update(adev, bo_va, false);
540		if (r)
541			goto error;
 
542	}
 
 
 
 
543
544	r = amdgpu_vm_update_pdes(adev, vm, false);
 
 
545
546error:
547	if (r && r != -ERESTARTSYS)
548		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
549}
550
551/**
552 * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
553 *
554 * @adev: amdgpu_device pointer
555 * @flags: GEM UAPI flags
556 *
557 * Returns the GEM UAPI flags mapped into hardware for the ASIC.
558 */
559uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
560{
561	uint64_t pte_flag = 0;
562
563	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
564		pte_flag |= AMDGPU_PTE_EXECUTABLE;
565	if (flags & AMDGPU_VM_PAGE_READABLE)
566		pte_flag |= AMDGPU_PTE_READABLE;
567	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
568		pte_flag |= AMDGPU_PTE_WRITEABLE;
569	if (flags & AMDGPU_VM_PAGE_PRT)
570		pte_flag |= AMDGPU_PTE_PRT;
571
572	if (adev->gmc.gmc_funcs->map_mtype)
573		pte_flag |= amdgpu_gmc_map_mtype(adev,
574						 flags & AMDGPU_VM_MTYPE_MASK);
575
576	return pte_flag;
 
 
577}
578
579int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
580			  struct drm_file *filp)
581{
582	const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
583		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
584		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
585	const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
586		AMDGPU_VM_PAGE_PRT;
587
588	struct drm_amdgpu_gem_va *args = data;
589	struct drm_gem_object *gobj;
590	struct amdgpu_device *adev = dev->dev_private;
591	struct amdgpu_fpriv *fpriv = filp->driver_priv;
592	struct amdgpu_bo *abo;
593	struct amdgpu_bo_va *bo_va;
594	struct amdgpu_bo_list_entry vm_pd;
595	struct ttm_validate_buffer tv;
596	struct ww_acquire_ctx ticket;
597	struct list_head list, duplicates;
598	uint64_t va_flags;
599	int r = 0;
600
601	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
602		dev_dbg(&dev->pdev->dev,
603			"va_address 0x%LX is in reserved area 0x%LX\n",
604			args->va_address, AMDGPU_VA_RESERVED_SIZE);
605		return -EINVAL;
606	}
607
608	if (args->va_address >= AMDGPU_GMC_HOLE_START &&
609	    args->va_address < AMDGPU_GMC_HOLE_END) {
610		dev_dbg(&dev->pdev->dev,
611			"va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
612			args->va_address, AMDGPU_GMC_HOLE_START,
613			AMDGPU_GMC_HOLE_END);
614		return -EINVAL;
615	}
616
617	args->va_address &= AMDGPU_GMC_HOLE_MASK;
618
619	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
620		dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
621			args->flags);
622		return -EINVAL;
623	}
624
625	switch (args->operation) {
626	case AMDGPU_VA_OP_MAP:
627	case AMDGPU_VA_OP_UNMAP:
628	case AMDGPU_VA_OP_CLEAR:
629	case AMDGPU_VA_OP_REPLACE:
630		break;
631	default:
632		dev_dbg(&dev->pdev->dev, "unsupported operation %d\n",
633			args->operation);
634		return -EINVAL;
635	}
636
 
 
 
 
637	INIT_LIST_HEAD(&list);
638	INIT_LIST_HEAD(&duplicates);
639	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
640	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
641		gobj = drm_gem_object_lookup(filp, args->handle);
642		if (gobj == NULL)
643			return -ENOENT;
644		abo = gem_to_amdgpu_bo(gobj);
645		tv.bo = &abo->tbo;
646		if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
647			tv.num_shared = 1;
648		else
649			tv.num_shared = 0;
650		list_add(&tv.head, &list);
651	} else {
652		gobj = NULL;
653		abo = NULL;
654	}
655
656	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
657
658	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
659	if (r)
660		goto error_unref;
 
 
661
662	if (abo) {
663		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
664		if (!bo_va) {
665			r = -ENOENT;
666			goto error_backoff;
667		}
668	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
669		bo_va = fpriv->prt_va;
670	} else {
671		bo_va = NULL;
672	}
673
674	switch (args->operation) {
675	case AMDGPU_VA_OP_MAP:
676		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
 
 
 
 
 
677		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
678				     args->offset_in_bo, args->map_size,
679				     va_flags);
680		break;
681	case AMDGPU_VA_OP_UNMAP:
682		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
683		break;
684
685	case AMDGPU_VA_OP_CLEAR:
686		r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
687						args->va_address,
688						args->map_size);
689		break;
690	case AMDGPU_VA_OP_REPLACE:
691		va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
692		r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
693					     args->offset_in_bo, args->map_size,
694					     va_flags);
695		break;
696	default:
697		break;
698	}
699	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
700		amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
701					args->operation);
702
703error_backoff:
704	ttm_eu_backoff_reservation(&ticket, &list);
 
 
 
705
706error_unref:
707	drm_gem_object_put(gobj);
708	return r;
709}
710
711int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
712			struct drm_file *filp)
713{
714	struct amdgpu_device *adev = dev->dev_private;
715	struct drm_amdgpu_gem_op *args = data;
716	struct drm_gem_object *gobj;
717	struct amdgpu_vm_bo_base *base;
718	struct amdgpu_bo *robj;
719	int r;
720
721	gobj = drm_gem_object_lookup(filp, args->handle);
722	if (gobj == NULL) {
723		return -ENOENT;
724	}
725	robj = gem_to_amdgpu_bo(gobj);
726
727	r = amdgpu_bo_reserve(robj, false);
728	if (unlikely(r))
729		goto out;
730
731	switch (args->op) {
732	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
733		struct drm_amdgpu_gem_create_in info;
734		void __user *out = u64_to_user_ptr(args->value);
735
736		info.bo_size = robj->tbo.base.size;
737		info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
738		info.domains = robj->preferred_domains;
739		info.domain_flags = robj->flags;
740		amdgpu_bo_unreserve(robj);
741		if (copy_to_user(out, &info, sizeof(info)))
742			r = -EFAULT;
743		break;
744	}
745	case AMDGPU_GEM_OP_SET_PLACEMENT:
746		if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
747			r = -EINVAL;
748			amdgpu_bo_unreserve(robj);
749			break;
750		}
751		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
752			r = -EPERM;
753			amdgpu_bo_unreserve(robj);
754			break;
755		}
756		for (base = robj->vm_bo; base; base = base->next)
757			if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
758				amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) {
759				r = -EINVAL;
760				amdgpu_bo_unreserve(robj);
761				goto out;
762			}
763
764
765		robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
766							AMDGPU_GEM_DOMAIN_GTT |
767							AMDGPU_GEM_DOMAIN_CPU);
768		robj->allowed_domains = robj->preferred_domains;
769		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
770			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
771
772		if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
773			amdgpu_vm_bo_invalidate(adev, robj, true);
774
775		amdgpu_bo_unreserve(robj);
776		break;
777	default:
778		amdgpu_bo_unreserve(robj);
779		r = -EINVAL;
780	}
781
782out:
783	drm_gem_object_put(gobj);
784	return r;
785}
786
787int amdgpu_mode_dumb_create(struct drm_file *file_priv,
788			    struct drm_device *dev,
789			    struct drm_mode_create_dumb *args)
790{
791	struct amdgpu_device *adev = dev->dev_private;
792	struct drm_gem_object *gobj;
793	uint32_t handle;
794	u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
795		    AMDGPU_GEM_CREATE_CPU_GTT_USWC;
796	u32 domain;
797	int r;
798
799	/*
800	 * The buffer returned from this function should be cleared, but
801	 * it can only be done if the ring is enabled or we'll fail to
802	 * create the buffer.
803	 */
804	if (adev->mman.buffer_funcs_enabled)
805		flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
806
807	args->pitch = amdgpu_align_pitch(adev, args->width,
808					 DIV_ROUND_UP(args->bpp, 8), 0);
809	args->size = (u64)args->pitch * args->height;
810	args->size = ALIGN(args->size, PAGE_SIZE);
811	domain = amdgpu_bo_get_preferred_pin_domain(adev,
812				amdgpu_display_supported_domains(adev, flags));
813	r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
814				     ttm_bo_type_device, NULL, &gobj);
 
 
815	if (r)
816		return -ENOMEM;
817
818	r = drm_gem_handle_create(file_priv, gobj, &handle);
819	/* drop reference from allocate - handle holds it now */
820	drm_gem_object_put(gobj);
821	if (r) {
822		return r;
823	}
824	args->handle = handle;
825	return 0;
826}
827
828#if defined(CONFIG_DEBUG_FS)
829
830#define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag)	\
831	if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) {	\
832		seq_printf((m), " " #flag);		\
833	}
834
835static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
836{
837	struct drm_gem_object *gobj = ptr;
838	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
839	struct seq_file *m = data;
840
841	struct dma_buf_attachment *attachment;
842	struct dma_buf *dma_buf;
843	unsigned domain;
844	const char *placement;
845	unsigned pin_count;
846
847	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
848	switch (domain) {
849	case AMDGPU_GEM_DOMAIN_VRAM:
850		placement = "VRAM";
851		break;
852	case AMDGPU_GEM_DOMAIN_GTT:
853		placement = " GTT";
854		break;
855	case AMDGPU_GEM_DOMAIN_CPU:
856	default:
857		placement = " CPU";
858		break;
859	}
860	seq_printf(m, "\t0x%08x: %12ld byte %s",
861		   id, amdgpu_bo_size(bo), placement);
 
862
863	pin_count = READ_ONCE(bo->pin_count);
864	if (pin_count)
865		seq_printf(m, " pin count %d", pin_count);
866
867	dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
868	attachment = READ_ONCE(bo->tbo.base.import_attach);
869
870	if (attachment)
871		seq_printf(m, " imported from %p%s", dma_buf,
872			   attachment->peer2peer ? " P2P" : "");
873	else if (dma_buf)
874		seq_printf(m, " exported as %p", dma_buf);
875
876	amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
877	amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS);
878	amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC);
879	amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED);
880	amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW);
881	amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
882	amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID);
883	amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC);
884
885	seq_printf(m, "\n");
886
887	return 0;
888}
889
890static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
891{
892	struct drm_info_node *node = (struct drm_info_node *)m->private;
893	struct drm_device *dev = node->minor->dev;
894	struct drm_file *file;
895	int r;
896
897	r = mutex_lock_interruptible(&dev->filelist_mutex);
898	if (r)
899		return r;
900
901	list_for_each_entry(file, &dev->filelist, lhead) {
902		struct task_struct *task;
903
904		/*
905		 * Although we have a valid reference on file->pid, that does
906		 * not guarantee that the task_struct who called get_pid() is
907		 * still alive (e.g. get_pid(current) => fork() => exit()).
908		 * Therefore, we need to protect this ->comm access using RCU.
909		 */
910		rcu_read_lock();
911		task = pid_task(file->pid, PIDTYPE_PID);
912		seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
913			   task ? task->comm : "<unknown>");
914		rcu_read_unlock();
915
916		spin_lock(&file->table_lock);
917		idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
918		spin_unlock(&file->table_lock);
919	}
920
921	mutex_unlock(&dev->filelist_mutex);
922	return 0;
923}
924
925static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
926	{"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
927};
928#endif
929
930int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
931{
932#if defined(CONFIG_DEBUG_FS)
933	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list,
934					ARRAY_SIZE(amdgpu_debugfs_gem_list));
935#endif
936	return 0;
937}
v4.10.11
  1/*
  2 * Copyright 2008 Advanced Micro Devices, Inc.
  3 * Copyright 2008 Red Hat Inc.
  4 * Copyright 2009 Jerome Glisse.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the "Software"),
  8 * to deal in the Software without restriction, including without limitation
  9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 10 * and/or sell copies of the Software, and to permit persons to whom the
 11 * Software is furnished to do so, subject to the following conditions:
 12 *
 13 * The above copyright notice and this permission notice shall be included in
 14 * all copies or substantial portions of the Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 22 * OTHER DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors: Dave Airlie
 25 *          Alex Deucher
 26 *          Jerome Glisse
 27 */
 28#include <linux/ktime.h>
 
 29#include <linux/pagemap.h>
 30#include <drm/drmP.h>
 
 
 31#include <drm/amdgpu_drm.h>
 
 
 32#include "amdgpu.h"
 
 
 33
 34void amdgpu_gem_object_free(struct drm_gem_object *gobj)
 35{
 36	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
 37
 38	if (robj) {
 39		if (robj->gem_base.import_attach)
 40			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
 41		amdgpu_mn_unregister(robj);
 42		amdgpu_bo_unref(&robj);
 43	}
 44}
 45
 46int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 47				int alignment, u32 initial_domain,
 48				u64 flags, bool kernel,
 49				struct drm_gem_object **obj)
 
 50{
 51	struct amdgpu_bo *robj;
 52	unsigned long max_size;
 53	int r;
 54
 
 55	*obj = NULL;
 56	/* At least align on page size */
 57	if (alignment < PAGE_SIZE) {
 58		alignment = PAGE_SIZE;
 59	}
 60
 61	if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
 62		/* Maximum bo size is the unpinned gtt size since we use the gtt to
 63		 * handle vram to system pool migrations.
 64		 */
 65		max_size = adev->mc.gtt_size - adev->gart_pin_size;
 66		if (size > max_size) {
 67			DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
 68				  size >> 20, max_size >> 20);
 69			return -ENOMEM;
 70		}
 71	}
 72retry:
 73	r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
 74			     flags, NULL, NULL, &robj);
 
 75	if (r) {
 76		if (r != -ERESTARTSYS) {
 
 
 
 
 
 77			if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
 78				initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
 79				goto retry;
 80			}
 81			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
 82				  size, initial_domain, alignment, r);
 83		}
 84		return r;
 85	}
 86	*obj = &robj->gem_base;
 87
 88	return 0;
 89}
 90
 91void amdgpu_gem_force_release(struct amdgpu_device *adev)
 92{
 93	struct drm_device *ddev = adev->ddev;
 94	struct drm_file *file;
 95
 96	mutex_lock(&ddev->filelist_mutex);
 97
 98	list_for_each_entry(file, &ddev->filelist, lhead) {
 99		struct drm_gem_object *gobj;
100		int handle;
101
102		WARN_ONCE(1, "Still active user space clients!\n");
103		spin_lock(&file->table_lock);
104		idr_for_each_entry(&file->object_idr, gobj, handle) {
105			WARN_ONCE(1, "And also active allocations!\n");
106			drm_gem_object_unreference_unlocked(gobj);
107		}
108		idr_destroy(&file->object_idr);
109		spin_unlock(&file->table_lock);
110	}
111
112	mutex_unlock(&ddev->filelist_mutex);
113}
114
115/*
116 * Call from drm_gem_handle_create which appear in both new and open ioctl
117 * case.
118 */
119int amdgpu_gem_object_open(struct drm_gem_object *obj,
120			   struct drm_file *file_priv)
121{
122	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
123	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
124	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
125	struct amdgpu_vm *vm = &fpriv->vm;
126	struct amdgpu_bo_va *bo_va;
 
127	int r;
 
 
 
 
 
 
 
 
 
128	r = amdgpu_bo_reserve(abo, false);
129	if (r)
130		return r;
131
132	bo_va = amdgpu_vm_bo_find(vm, abo);
133	if (!bo_va) {
134		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
135	} else {
136		++bo_va->ref_count;
137	}
138	amdgpu_bo_unreserve(abo);
139	return 0;
140}
141
142void amdgpu_gem_object_close(struct drm_gem_object *obj,
143			     struct drm_file *file_priv)
144{
145	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
146	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
147	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
148	struct amdgpu_vm *vm = &fpriv->vm;
149
150	struct amdgpu_bo_list_entry vm_pd;
151	struct list_head list, duplicates;
 
152	struct ttm_validate_buffer tv;
153	struct ww_acquire_ctx ticket;
154	struct amdgpu_bo_va *bo_va;
155	int r;
156
157	INIT_LIST_HEAD(&list);
158	INIT_LIST_HEAD(&duplicates);
159
160	tv.bo = &bo->tbo;
161	tv.shared = true;
162	list_add(&tv.head, &list);
163
164	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
165
166	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
167	if (r) {
168		dev_err(adev->dev, "leaking bo va because "
169			"we fail to reserve bo (%d)\n", r);
170		return;
171	}
172	bo_va = amdgpu_vm_bo_find(vm, bo);
173	if (bo_va) {
174		if (--bo_va->ref_count == 0) {
175			amdgpu_vm_bo_rmv(adev, bo_va);
176		}
177	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178	ttm_eu_backoff_reservation(&ticket, &list);
179}
180
181static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
182{
183	if (r == -EDEADLK) {
184		r = amdgpu_gpu_reset(adev);
185		if (!r)
186			r = -EAGAIN;
187	}
188	return r;
189}
190
191/*
192 * GEM ioctls.
193 */
194int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
195			    struct drm_file *filp)
196{
197	struct amdgpu_device *adev = dev->dev_private;
 
 
198	union drm_amdgpu_gem_create *args = data;
 
199	uint64_t size = args->in.bo_size;
 
200	struct drm_gem_object *gobj;
201	uint32_t handle;
202	bool kernel = false;
203	int r;
204
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205	/* create a gem object to contain this object in */
206	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
207	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
208		kernel = true;
209		if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
210			size = size << AMDGPU_GDS_SHIFT;
211		else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
212			size = size << AMDGPU_GWS_SHIFT;
213		else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
214			size = size << AMDGPU_OA_SHIFT;
215		else {
216			r = -EINVAL;
217			goto error_unlock;
218		}
 
 
 
 
 
 
 
 
 
219	}
220	size = roundup(size, PAGE_SIZE);
221
222	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
223				     (u32)(0xffffffff & args->in.domains),
224				     args->in.domain_flags,
225				     kernel, &gobj);
 
 
 
 
 
 
 
226	if (r)
227		goto error_unlock;
228
229	r = drm_gem_handle_create(filp, gobj, &handle);
230	/* drop reference from allocate - handle holds it now */
231	drm_gem_object_unreference_unlocked(gobj);
232	if (r)
233		goto error_unlock;
234
235	memset(args, 0, sizeof(*args));
236	args->out.handle = handle;
237	return 0;
238
239error_unlock:
240	r = amdgpu_gem_handle_lockup(adev, r);
241	return r;
242}
243
244int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
245			     struct drm_file *filp)
246{
 
247	struct amdgpu_device *adev = dev->dev_private;
248	struct drm_amdgpu_gem_userptr *args = data;
249	struct drm_gem_object *gobj;
250	struct amdgpu_bo *bo;
251	uint32_t handle;
252	int r;
253
 
 
254	if (offset_in_page(args->addr | args->size))
255		return -EINVAL;
256
257	/* reject unknown flag values */
258	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
259	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
260	    AMDGPU_GEM_USERPTR_REGISTER))
261		return -EINVAL;
262
263	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
264	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
265
266		/* if we want to write to it we must install a MMU notifier */
267		return -EACCES;
268	}
269
270	/* create a gem object to contain this object in */
271	r = amdgpu_gem_object_create(adev, args->size, 0,
272				     AMDGPU_GEM_DOMAIN_CPU, 0,
273				     0, &gobj);
274	if (r)
275		goto handle_lockup;
276
277	bo = gem_to_amdgpu_bo(gobj);
278	bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
279	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
280	r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
281	if (r)
282		goto release_object;
283
284	if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
285		r = amdgpu_mn_register(bo, args->addr);
286		if (r)
287			goto release_object;
288	}
289
290	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
291		down_read(&current->mm->mmap_sem);
292
293		r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
294						 bo->tbo.ttm->pages);
295		if (r)
296			goto unlock_mmap_sem;
297
298		r = amdgpu_bo_reserve(bo, true);
299		if (r)
300			goto free_pages;
301
302		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
303		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
304		amdgpu_bo_unreserve(bo);
305		if (r)
306			goto free_pages;
307
308		up_read(&current->mm->mmap_sem);
309	}
310
311	r = drm_gem_handle_create(filp, gobj, &handle);
312	/* drop reference from allocate - handle holds it now */
313	drm_gem_object_unreference_unlocked(gobj);
314	if (r)
315		goto handle_lockup;
316
317	args->handle = handle;
318	return 0;
319
320free_pages:
321	release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
322
323unlock_mmap_sem:
324	up_read(&current->mm->mmap_sem);
325
326release_object:
327	drm_gem_object_unreference_unlocked(gobj);
328
329handle_lockup:
330	r = amdgpu_gem_handle_lockup(adev, r);
331
332	return r;
333}
334
335int amdgpu_mode_dumb_mmap(struct drm_file *filp,
336			  struct drm_device *dev,
337			  uint32_t handle, uint64_t *offset_p)
338{
339	struct drm_gem_object *gobj;
340	struct amdgpu_bo *robj;
341
342	gobj = drm_gem_object_lookup(filp, handle);
343	if (gobj == NULL) {
344		return -ENOENT;
345	}
346	robj = gem_to_amdgpu_bo(gobj);
347	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
348	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
349		drm_gem_object_unreference_unlocked(gobj);
350		return -EPERM;
351	}
352	*offset_p = amdgpu_bo_mmap_offset(robj);
353	drm_gem_object_unreference_unlocked(gobj);
354	return 0;
355}
356
357int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
358			  struct drm_file *filp)
359{
360	union drm_amdgpu_gem_mmap *args = data;
361	uint32_t handle = args->in.handle;
362	memset(args, 0, sizeof(*args));
363	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
364}
365
366/**
367 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
368 *
369 * @timeout_ns: timeout in ns
370 *
371 * Calculate the timeout in jiffies from an absolute timeout in ns.
372 */
373unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
374{
375	unsigned long timeout_jiffies;
376	ktime_t timeout;
377
378	/* clamp timeout if it's to large */
379	if (((int64_t)timeout_ns) < 0)
380		return MAX_SCHEDULE_TIMEOUT;
381
382	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
383	if (ktime_to_ns(timeout) < 0)
384		return 0;
385
386	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
387	/*  clamp timeout to avoid unsigned-> signed overflow */
388	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
389		return MAX_SCHEDULE_TIMEOUT - 1;
390
391	return timeout_jiffies;
392}
393
394int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
395			      struct drm_file *filp)
396{
397	struct amdgpu_device *adev = dev->dev_private;
398	union drm_amdgpu_gem_wait_idle *args = data;
399	struct drm_gem_object *gobj;
400	struct amdgpu_bo *robj;
401	uint32_t handle = args->in.handle;
402	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
403	int r = 0;
404	long ret;
405
406	gobj = drm_gem_object_lookup(filp, handle);
407	if (gobj == NULL) {
408		return -ENOENT;
409	}
410	robj = gem_to_amdgpu_bo(gobj);
411	ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
412						  timeout);
413
414	/* ret == 0 means not signaled,
415	 * ret > 0 means signaled
416	 * ret < 0 means interrupted before timeout
417	 */
418	if (ret >= 0) {
419		memset(args, 0, sizeof(*args));
420		args->out.status = (ret == 0);
421	} else
422		r = ret;
423
424	drm_gem_object_unreference_unlocked(gobj);
425	r = amdgpu_gem_handle_lockup(adev, r);
426	return r;
427}
428
429int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
430				struct drm_file *filp)
431{
432	struct drm_amdgpu_gem_metadata *args = data;
433	struct drm_gem_object *gobj;
434	struct amdgpu_bo *robj;
435	int r = -1;
436
437	DRM_DEBUG("%d \n", args->handle);
438	gobj = drm_gem_object_lookup(filp, args->handle);
439	if (gobj == NULL)
440		return -ENOENT;
441	robj = gem_to_amdgpu_bo(gobj);
442
443	r = amdgpu_bo_reserve(robj, false);
444	if (unlikely(r != 0))
445		goto out;
446
447	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
448		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
449		r = amdgpu_bo_get_metadata(robj, args->data.data,
450					   sizeof(args->data.data),
451					   &args->data.data_size_bytes,
452					   &args->data.flags);
453	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
454		if (args->data.data_size_bytes > sizeof(args->data.data)) {
455			r = -EINVAL;
456			goto unreserve;
457		}
458		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
459		if (!r)
460			r = amdgpu_bo_set_metadata(robj, args->data.data,
461						   args->data.data_size_bytes,
462						   args->data.flags);
463	}
464
465unreserve:
466	amdgpu_bo_unreserve(robj);
467out:
468	drm_gem_object_unreference_unlocked(gobj);
469	return r;
470}
471
472static int amdgpu_gem_va_check(void *param, struct amdgpu_bo *bo)
473{
474	unsigned domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
475
476	/* if anything is swapped out don't swap it in here,
477	   just abort and wait for the next CS */
478
479	return domain == AMDGPU_GEM_DOMAIN_CPU ? -ERESTARTSYS : 0;
480}
481
482/**
483 * amdgpu_gem_va_update_vm -update the bo_va in its VM
484 *
485 * @adev: amdgpu_device pointer
 
486 * @bo_va: bo_va to update
 
487 *
488 * Update the bo_va directly after setting it's address. Errors are not
489 * vital here, so they are not reported back to userspace.
490 */
491static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
 
492				    struct amdgpu_bo_va *bo_va,
493				    uint32_t operation)
494{
495	struct ttm_validate_buffer tv, *entry;
496	struct amdgpu_bo_list_entry vm_pd;
497	struct ww_acquire_ctx ticket;
498	struct list_head list, duplicates;
499	unsigned domain;
500	int r;
501
502	INIT_LIST_HEAD(&list);
503	INIT_LIST_HEAD(&duplicates);
504
505	tv.bo = &bo_va->bo->tbo;
506	tv.shared = true;
507	list_add(&tv.head, &list);
508
509	amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd);
510
511	/* Provide duplicates to avoid -EALREADY */
512	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
513	if (r)
514		goto error_print;
515
516	list_for_each_entry(entry, &list, head) {
517		domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
518		/* if anything is swapped out don't swap it in here,
519		   just abort and wait for the next CS */
520		if (domain == AMDGPU_GEM_DOMAIN_CPU)
521			goto error_unreserve;
522	}
523	r = amdgpu_vm_validate_pt_bos(adev, bo_va->vm, amdgpu_gem_va_check,
524				      NULL);
525	if (r)
526		goto error_unreserve;
527
528	r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
529	if (r)
530		goto error_unreserve;
531
532	r = amdgpu_vm_clear_freed(adev, bo_va->vm);
533	if (r)
534		goto error_unreserve;
 
535
536	if (operation == AMDGPU_VA_OP_MAP)
537		r = amdgpu_vm_bo_update(adev, bo_va, false);
 
 
 
 
 
 
 
 
 
538
539error_unreserve:
540	ttm_eu_backoff_reservation(&ticket, &list);
 
 
 
 
 
 
 
 
 
 
541
542error_print:
543	if (r && r != -ERESTARTSYS)
544		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
545}
546
547int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
548			  struct drm_file *filp)
549{
 
 
 
 
 
 
550	struct drm_amdgpu_gem_va *args = data;
551	struct drm_gem_object *gobj;
552	struct amdgpu_device *adev = dev->dev_private;
553	struct amdgpu_fpriv *fpriv = filp->driver_priv;
554	struct amdgpu_bo *abo;
555	struct amdgpu_bo_va *bo_va;
556	struct amdgpu_bo_list_entry vm_pd;
557	struct ttm_validate_buffer tv;
558	struct ww_acquire_ctx ticket;
559	struct list_head list, duplicates;
560	uint32_t invalid_flags, va_flags = 0;
561	int r = 0;
562
563	if (!adev->vm_manager.enabled)
564		return -ENOTTY;
 
 
 
 
565
566	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
567		dev_err(&dev->pdev->dev,
568			"va_address 0x%lX is in reserved area 0x%X\n",
569			(unsigned long)args->va_address,
570			AMDGPU_VA_RESERVED_SIZE);
 
571		return -EINVAL;
572	}
573
574	invalid_flags = ~(AMDGPU_VM_DELAY_UPDATE | AMDGPU_VM_PAGE_READABLE |
575			AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE);
576	if ((args->flags & invalid_flags)) {
577		dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
578			args->flags, invalid_flags);
579		return -EINVAL;
580	}
581
582	switch (args->operation) {
583	case AMDGPU_VA_OP_MAP:
584	case AMDGPU_VA_OP_UNMAP:
 
 
585		break;
586	default:
587		dev_err(&dev->pdev->dev, "unsupported operation %d\n",
588			args->operation);
589		return -EINVAL;
590	}
591
592	gobj = drm_gem_object_lookup(filp, args->handle);
593	if (gobj == NULL)
594		return -ENOENT;
595	abo = gem_to_amdgpu_bo(gobj);
596	INIT_LIST_HEAD(&list);
597	INIT_LIST_HEAD(&duplicates);
598	tv.bo = &abo->tbo;
599	tv.shared = true;
600	list_add(&tv.head, &list);
 
 
 
 
 
 
 
 
 
 
 
 
 
601
602	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
603
604	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
605	if (r) {
606		drm_gem_object_unreference_unlocked(gobj);
607		return r;
608	}
609
610	bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
611	if (!bo_va) {
612		ttm_eu_backoff_reservation(&ticket, &list);
613		drm_gem_object_unreference_unlocked(gobj);
614		return -ENOENT;
 
 
 
 
 
615	}
616
617	switch (args->operation) {
618	case AMDGPU_VA_OP_MAP:
619		if (args->flags & AMDGPU_VM_PAGE_READABLE)
620			va_flags |= AMDGPU_PTE_READABLE;
621		if (args->flags & AMDGPU_VM_PAGE_WRITEABLE)
622			va_flags |= AMDGPU_PTE_WRITEABLE;
623		if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE)
624			va_flags |= AMDGPU_PTE_EXECUTABLE;
625		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
626				     args->offset_in_bo, args->map_size,
627				     va_flags);
628		break;
629	case AMDGPU_VA_OP_UNMAP:
630		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
631		break;
 
 
 
 
 
 
 
 
 
 
 
 
632	default:
633		break;
634	}
 
 
 
 
 
635	ttm_eu_backoff_reservation(&ticket, &list);
636	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
637	    !amdgpu_vm_debug)
638		amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
639
640	drm_gem_object_unreference_unlocked(gobj);
 
641	return r;
642}
643
644int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
645			struct drm_file *filp)
646{
 
647	struct drm_amdgpu_gem_op *args = data;
648	struct drm_gem_object *gobj;
 
649	struct amdgpu_bo *robj;
650	int r;
651
652	gobj = drm_gem_object_lookup(filp, args->handle);
653	if (gobj == NULL) {
654		return -ENOENT;
655	}
656	robj = gem_to_amdgpu_bo(gobj);
657
658	r = amdgpu_bo_reserve(robj, false);
659	if (unlikely(r))
660		goto out;
661
662	switch (args->op) {
663	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
664		struct drm_amdgpu_gem_create_in info;
665		void __user *out = (void __user *)(long)args->value;
666
667		info.bo_size = robj->gem_base.size;
668		info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
669		info.domains = robj->prefered_domains;
670		info.domain_flags = robj->flags;
671		amdgpu_bo_unreserve(robj);
672		if (copy_to_user(out, &info, sizeof(info)))
673			r = -EFAULT;
674		break;
675	}
676	case AMDGPU_GEM_OP_SET_PLACEMENT:
 
 
 
 
 
677		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
678			r = -EPERM;
679			amdgpu_bo_unreserve(robj);
680			break;
681		}
682		robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
 
 
 
 
 
 
 
 
 
683							AMDGPU_GEM_DOMAIN_GTT |
684							AMDGPU_GEM_DOMAIN_CPU);
685		robj->allowed_domains = robj->prefered_domains;
686		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
687			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
688
 
 
 
689		amdgpu_bo_unreserve(robj);
690		break;
691	default:
692		amdgpu_bo_unreserve(robj);
693		r = -EINVAL;
694	}
695
696out:
697	drm_gem_object_unreference_unlocked(gobj);
698	return r;
699}
700
701int amdgpu_mode_dumb_create(struct drm_file *file_priv,
702			    struct drm_device *dev,
703			    struct drm_mode_create_dumb *args)
704{
705	struct amdgpu_device *adev = dev->dev_private;
706	struct drm_gem_object *gobj;
707	uint32_t handle;
 
 
 
708	int r;
709
 
 
 
 
 
 
 
 
710	args->pitch = amdgpu_align_pitch(adev, args->width,
711					 DIV_ROUND_UP(args->bpp, 8), 0);
712	args->size = (u64)args->pitch * args->height;
713	args->size = ALIGN(args->size, PAGE_SIZE);
714
715	r = amdgpu_gem_object_create(adev, args->size, 0,
716				     AMDGPU_GEM_DOMAIN_VRAM,
717				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
718				     ttm_bo_type_device,
719				     &gobj);
720	if (r)
721		return -ENOMEM;
722
723	r = drm_gem_handle_create(file_priv, gobj, &handle);
724	/* drop reference from allocate - handle holds it now */
725	drm_gem_object_unreference_unlocked(gobj);
726	if (r) {
727		return r;
728	}
729	args->handle = handle;
730	return 0;
731}
732
733#if defined(CONFIG_DEBUG_FS)
 
 
 
 
 
 
734static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
735{
736	struct drm_gem_object *gobj = ptr;
737	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
738	struct seq_file *m = data;
739
 
 
740	unsigned domain;
741	const char *placement;
742	unsigned pin_count;
743
744	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
745	switch (domain) {
746	case AMDGPU_GEM_DOMAIN_VRAM:
747		placement = "VRAM";
748		break;
749	case AMDGPU_GEM_DOMAIN_GTT:
750		placement = " GTT";
751		break;
752	case AMDGPU_GEM_DOMAIN_CPU:
753	default:
754		placement = " CPU";
755		break;
756	}
757	seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx",
758		   id, amdgpu_bo_size(bo), placement,
759		   amdgpu_bo_gpu_offset(bo));
760
761	pin_count = ACCESS_ONCE(bo->pin_count);
762	if (pin_count)
763		seq_printf(m, " pin count %d", pin_count);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
764	seq_printf(m, "\n");
765
766	return 0;
767}
768
769static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
770{
771	struct drm_info_node *node = (struct drm_info_node *)m->private;
772	struct drm_device *dev = node->minor->dev;
773	struct drm_file *file;
774	int r;
775
776	r = mutex_lock_interruptible(&dev->filelist_mutex);
777	if (r)
778		return r;
779
780	list_for_each_entry(file, &dev->filelist, lhead) {
781		struct task_struct *task;
782
783		/*
784		 * Although we have a valid reference on file->pid, that does
785		 * not guarantee that the task_struct who called get_pid() is
786		 * still alive (e.g. get_pid(current) => fork() => exit()).
787		 * Therefore, we need to protect this ->comm access using RCU.
788		 */
789		rcu_read_lock();
790		task = pid_task(file->pid, PIDTYPE_PID);
791		seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
792			   task ? task->comm : "<unknown>");
793		rcu_read_unlock();
794
795		spin_lock(&file->table_lock);
796		idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
797		spin_unlock(&file->table_lock);
798	}
799
800	mutex_unlock(&dev->filelist_mutex);
801	return 0;
802}
803
804static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
805	{"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
806};
807#endif
808
809int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
810{
811#if defined(CONFIG_DEBUG_FS)
812	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
 
813#endif
814	return 0;
815}