Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * Copyright 2008 Advanced Micro Devices, Inc.
  3 * Copyright 2008 Red Hat Inc.
  4 * Copyright 2009 Jerome Glisse.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the "Software"),
  8 * to deal in the Software without restriction, including without limitation
  9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 10 * and/or sell copies of the Software, and to permit persons to whom the
 11 * Software is furnished to do so, subject to the following conditions:
 12 *
 13 * The above copyright notice and this permission notice shall be included in
 14 * all copies or substantial portions of the Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 22 * OTHER DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors: Dave Airlie
 25 *          Alex Deucher
 26 *          Jerome Glisse
 27 */
 28#include <linux/ktime.h>
 29#include <linux/pagemap.h>
 30#include <drm/drmP.h>
 31#include <drm/amdgpu_drm.h>
 32#include "amdgpu.h"
 33
 34void amdgpu_gem_object_free(struct drm_gem_object *gobj)
 35{
 36	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
 37
 38	if (robj) {
 
 
 39		amdgpu_mn_unregister(robj);
 40		amdgpu_bo_unref(&robj);
 41	}
 42}
 43
 44int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 45			     int alignment, u32 initial_domain,
 46			     u64 flags, enum ttm_bo_type type,
 47			     struct reservation_object *resv,
 48			     struct drm_gem_object **obj)
 49{
 50	struct amdgpu_bo *bo;
 
 51	int r;
 52
 53	*obj = NULL;
 54	/* At least align on page size */
 55	if (alignment < PAGE_SIZE) {
 56		alignment = PAGE_SIZE;
 57	}
 58
 
 
 
 
 
 
 
 
 
 
 
 59retry:
 60	r = amdgpu_bo_create(adev, size, alignment, initial_domain,
 61			     flags, type, resv, &bo);
 62	if (r) {
 63		if (r != -ERESTARTSYS) {
 64			if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
 65				flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 66				goto retry;
 67			}
 68
 69			if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
 70				initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
 71				goto retry;
 72			}
 73			DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
 74				  size, initial_domain, alignment, r);
 75		}
 76		return r;
 77	}
 78	*obj = &bo->gem_base;
 79
 80	return 0;
 81}
 82
 83void amdgpu_gem_force_release(struct amdgpu_device *adev)
 84{
 85	struct drm_device *ddev = adev->ddev;
 86	struct drm_file *file;
 87
 88	mutex_lock(&ddev->filelist_mutex);
 89
 90	list_for_each_entry(file, &ddev->filelist, lhead) {
 91		struct drm_gem_object *gobj;
 92		int handle;
 93
 94		WARN_ONCE(1, "Still active user space clients!\n");
 95		spin_lock(&file->table_lock);
 96		idr_for_each_entry(&file->object_idr, gobj, handle) {
 97			WARN_ONCE(1, "And also active allocations!\n");
 98			drm_gem_object_put_unlocked(gobj);
 99		}
100		idr_destroy(&file->object_idr);
101		spin_unlock(&file->table_lock);
102	}
103
104	mutex_unlock(&ddev->filelist_mutex);
105}
106
107/*
108 * Call from drm_gem_handle_create which appear in both new and open ioctl
109 * case.
110 */
111int amdgpu_gem_object_open(struct drm_gem_object *obj,
112			   struct drm_file *file_priv)
113{
114	struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
115	struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
116	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
117	struct amdgpu_vm *vm = &fpriv->vm;
118	struct amdgpu_bo_va *bo_va;
119	struct mm_struct *mm;
120	int r;
121
122	mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
123	if (mm && mm != current->mm)
124		return -EPERM;
125
126	if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
127	    abo->tbo.resv != vm->root.base.bo->tbo.resv)
128		return -EPERM;
129
130	r = amdgpu_bo_reserve(abo, false);
131	if (r)
132		return r;
133
134	bo_va = amdgpu_vm_bo_find(vm, abo);
135	if (!bo_va) {
136		bo_va = amdgpu_vm_bo_add(adev, vm, abo);
137	} else {
138		++bo_va->ref_count;
139	}
140	amdgpu_bo_unreserve(abo);
141	return 0;
142}
143
144void amdgpu_gem_object_close(struct drm_gem_object *obj,
145			     struct drm_file *file_priv)
146{
147	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
148	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
149	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
150	struct amdgpu_vm *vm = &fpriv->vm;
151
152	struct amdgpu_bo_list_entry vm_pd;
153	struct list_head list, duplicates;
154	struct ttm_validate_buffer tv;
155	struct ww_acquire_ctx ticket;
156	struct amdgpu_bo_va *bo_va;
157	int r;
158
159	INIT_LIST_HEAD(&list);
160	INIT_LIST_HEAD(&duplicates);
161
162	tv.bo = &bo->tbo;
163	tv.shared = true;
164	list_add(&tv.head, &list);
165
166	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
167
168	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
169	if (r) {
170		dev_err(adev->dev, "leaking bo va because "
171			"we fail to reserve bo (%d)\n", r);
172		return;
173	}
174	bo_va = amdgpu_vm_bo_find(vm, bo);
175	if (bo_va && --bo_va->ref_count == 0) {
176		amdgpu_vm_bo_rmv(adev, bo_va);
177
178		if (amdgpu_vm_ready(vm)) {
179			struct dma_fence *fence = NULL;
180
181			r = amdgpu_vm_clear_freed(adev, vm, &fence);
182			if (unlikely(r)) {
183				dev_err(adev->dev, "failed to clear page "
184					"tables on GEM object close (%d)\n", r);
185			}
186
187			if (fence) {
188				amdgpu_bo_fence(bo, fence, true);
189				dma_fence_put(fence);
190			}
191		}
192	}
193	ttm_eu_backoff_reservation(&ticket, &list);
194}
195
 
 
 
 
 
 
 
 
 
 
196/*
197 * GEM ioctls.
198 */
199int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
200			    struct drm_file *filp)
201{
202	struct amdgpu_device *adev = dev->dev_private;
203	struct amdgpu_fpriv *fpriv = filp->driver_priv;
204	struct amdgpu_vm *vm = &fpriv->vm;
205	union drm_amdgpu_gem_create *args = data;
206	uint64_t flags = args->in.domain_flags;
207	uint64_t size = args->in.bo_size;
208	struct reservation_object *resv = NULL;
209	struct drm_gem_object *gobj;
210	uint32_t handle;
 
211	int r;
212
213	/* reject invalid gem flags */
214	if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
215		      AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
216		      AMDGPU_GEM_CREATE_CPU_GTT_USWC |
217		      AMDGPU_GEM_CREATE_VRAM_CLEARED |
218		      AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
219		      AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
220
221		return -EINVAL;
222
223	/* reject invalid gem domains */
224	if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
225				 AMDGPU_GEM_DOMAIN_GTT |
226				 AMDGPU_GEM_DOMAIN_VRAM |
227				 AMDGPU_GEM_DOMAIN_GDS |
228				 AMDGPU_GEM_DOMAIN_GWS |
229				 AMDGPU_GEM_DOMAIN_OA))
230		return -EINVAL;
231
232	/* create a gem object to contain this object in */
233	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
234	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
235		flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
236		if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
237			size = size << AMDGPU_GDS_SHIFT;
238		else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
239			size = size << AMDGPU_GWS_SHIFT;
240		else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
241			size = size << AMDGPU_OA_SHIFT;
242		else
243			return -EINVAL;
 
 
244	}
245	size = roundup(size, PAGE_SIZE);
246
247	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
248		r = amdgpu_bo_reserve(vm->root.base.bo, false);
249		if (r)
250			return r;
251
252		resv = vm->root.base.bo->tbo.resv;
253	}
254
255	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
256				     (u32)(0xffffffff & args->in.domains),
257				     flags, false, resv, &gobj);
258	if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
259		if (!r) {
260			struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
261
262			abo->parent = amdgpu_bo_ref(vm->root.base.bo);
263		}
264		amdgpu_bo_unreserve(vm->root.base.bo);
265	}
266	if (r)
267		return r;
268
269	r = drm_gem_handle_create(filp, gobj, &handle);
270	/* drop reference from allocate - handle holds it now */
271	drm_gem_object_put_unlocked(gobj);
272	if (r)
273		return r;
274
275	memset(args, 0, sizeof(*args));
276	args->out.handle = handle;
277	return 0;
 
 
 
 
278}
279
280int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
281			     struct drm_file *filp)
282{
283	struct ttm_operation_ctx ctx = { true, false };
284	struct amdgpu_device *adev = dev->dev_private;
285	struct drm_amdgpu_gem_userptr *args = data;
286	struct drm_gem_object *gobj;
287	struct amdgpu_bo *bo;
288	uint32_t handle;
289	int r;
290
291	if (offset_in_page(args->addr | args->size))
292		return -EINVAL;
293
294	/* reject unknown flag values */
295	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
296	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
297	    AMDGPU_GEM_USERPTR_REGISTER))
298		return -EINVAL;
299
300	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
301	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
302
303		/* if we want to write to it we must install a MMU notifier */
304		return -EACCES;
305	}
306
307	/* create a gem object to contain this object in */
308	r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
309				     0, 0, NULL, &gobj);
 
310	if (r)
311		return r;
312
313	bo = gem_to_amdgpu_bo(gobj);
314	bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
315	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
316	r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
317	if (r)
318		goto release_object;
319
320	if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
321		r = amdgpu_mn_register(bo, args->addr);
322		if (r)
323			goto release_object;
324	}
325
326	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
 
 
327		r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
328						 bo->tbo.ttm->pages);
329		if (r)
330			goto release_object;
331
332		r = amdgpu_bo_reserve(bo, true);
333		if (r)
334			goto free_pages;
335
336		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
337		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
338		amdgpu_bo_unreserve(bo);
339		if (r)
340			goto free_pages;
 
 
341	}
342
343	r = drm_gem_handle_create(filp, gobj, &handle);
344	/* drop reference from allocate - handle holds it now */
345	drm_gem_object_put_unlocked(gobj);
346	if (r)
347		return r;
348
349	args->handle = handle;
350	return 0;
351
352free_pages:
353	release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages);
 
 
 
354
355release_object:
356	drm_gem_object_put_unlocked(gobj);
 
 
 
357
358	return r;
359}
360
361int amdgpu_mode_dumb_mmap(struct drm_file *filp,
362			  struct drm_device *dev,
363			  uint32_t handle, uint64_t *offset_p)
364{
365	struct drm_gem_object *gobj;
366	struct amdgpu_bo *robj;
367
368	gobj = drm_gem_object_lookup(filp, handle);
369	if (gobj == NULL) {
370		return -ENOENT;
371	}
372	robj = gem_to_amdgpu_bo(gobj);
373	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
374	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
375		drm_gem_object_put_unlocked(gobj);
376		return -EPERM;
377	}
378	*offset_p = amdgpu_bo_mmap_offset(robj);
379	drm_gem_object_put_unlocked(gobj);
380	return 0;
381}
382
383int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
384			  struct drm_file *filp)
385{
386	union drm_amdgpu_gem_mmap *args = data;
387	uint32_t handle = args->in.handle;
388	memset(args, 0, sizeof(*args));
389	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
390}
391
392/**
393 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
394 *
395 * @timeout_ns: timeout in ns
396 *
397 * Calculate the timeout in jiffies from an absolute timeout in ns.
398 */
399unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
400{
401	unsigned long timeout_jiffies;
402	ktime_t timeout;
403
404	/* clamp timeout if it's to large */
405	if (((int64_t)timeout_ns) < 0)
406		return MAX_SCHEDULE_TIMEOUT;
407
408	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
409	if (ktime_to_ns(timeout) < 0)
410		return 0;
411
412	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
413	/*  clamp timeout to avoid unsigned-> signed overflow */
414	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
415		return MAX_SCHEDULE_TIMEOUT - 1;
416
417	return timeout_jiffies;
418}
419
420int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
421			      struct drm_file *filp)
422{
 
423	union drm_amdgpu_gem_wait_idle *args = data;
424	struct drm_gem_object *gobj;
425	struct amdgpu_bo *robj;
426	uint32_t handle = args->in.handle;
427	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
428	int r = 0;
429	long ret;
430
431	gobj = drm_gem_object_lookup(filp, handle);
432	if (gobj == NULL) {
433		return -ENOENT;
434	}
435	robj = gem_to_amdgpu_bo(gobj);
436	ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
437						  timeout);
 
 
438
439	/* ret == 0 means not signaled,
440	 * ret > 0 means signaled
441	 * ret < 0 means interrupted before timeout
442	 */
443	if (ret >= 0) {
444		memset(args, 0, sizeof(*args));
445		args->out.status = (ret == 0);
446	} else
447		r = ret;
448
449	drm_gem_object_put_unlocked(gobj);
 
450	return r;
451}
452
453int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
454				struct drm_file *filp)
455{
456	struct drm_amdgpu_gem_metadata *args = data;
457	struct drm_gem_object *gobj;
458	struct amdgpu_bo *robj;
459	int r = -1;
460
461	DRM_DEBUG("%d \n", args->handle);
462	gobj = drm_gem_object_lookup(filp, args->handle);
463	if (gobj == NULL)
464		return -ENOENT;
465	robj = gem_to_amdgpu_bo(gobj);
466
467	r = amdgpu_bo_reserve(robj, false);
468	if (unlikely(r != 0))
469		goto out;
470
471	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
472		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
473		r = amdgpu_bo_get_metadata(robj, args->data.data,
474					   sizeof(args->data.data),
475					   &args->data.data_size_bytes,
476					   &args->data.flags);
477	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
478		if (args->data.data_size_bytes > sizeof(args->data.data)) {
479			r = -EINVAL;
480			goto unreserve;
481		}
482		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
483		if (!r)
484			r = amdgpu_bo_set_metadata(robj, args->data.data,
485						   args->data.data_size_bytes,
486						   args->data.flags);
487	}
488
489unreserve:
490	amdgpu_bo_unreserve(robj);
491out:
492	drm_gem_object_put_unlocked(gobj);
493	return r;
494}
495
496/**
497 * amdgpu_gem_va_update_vm -update the bo_va in its VM
498 *
499 * @adev: amdgpu_device pointer
500 * @vm: vm to update
501 * @bo_va: bo_va to update
502 * @list: validation list
503 * @operation: map, unmap or clear
504 *
505 * Update the bo_va directly after setting its address. Errors are not
506 * vital here, so they are not reported back to userspace.
507 */
508static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
509				    struct amdgpu_vm *vm,
510				    struct amdgpu_bo_va *bo_va,
511				    struct list_head *list,
512				    uint32_t operation)
513{
 
 
 
 
 
514	int r;
515
516	if (!amdgpu_vm_ready(vm))
517		return;
 
 
 
 
518
519	r = amdgpu_vm_clear_freed(adev, vm, NULL);
 
 
 
520	if (r)
521		goto error;
522
523	if (operation == AMDGPU_VA_OP_MAP ||
524	    operation == AMDGPU_VA_OP_REPLACE) {
525		r = amdgpu_vm_bo_update(adev, bo_va, false);
526		if (r)
527			goto error;
 
 
 
 
 
 
 
 
 
528	}
529
530	r = amdgpu_vm_update_directories(adev, vm);
 
 
 
 
 
 
 
 
 
531
532error:
 
 
 
533	if (r && r != -ERESTARTSYS)
534		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
535}
536
 
 
537int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
538			  struct drm_file *filp)
539{
540	const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
541		AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
542		AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
543	const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
544		AMDGPU_VM_PAGE_PRT;
545
546	struct drm_amdgpu_gem_va *args = data;
547	struct drm_gem_object *gobj;
548	struct amdgpu_device *adev = dev->dev_private;
549	struct amdgpu_fpriv *fpriv = filp->driver_priv;
550	struct amdgpu_bo *abo;
551	struct amdgpu_bo_va *bo_va;
552	struct amdgpu_bo_list_entry vm_pd;
553	struct ttm_validate_buffer tv;
554	struct ww_acquire_ctx ticket;
555	struct list_head list, duplicates;
556	uint64_t va_flags;
557	int r = 0;
558
 
 
 
559	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
560		dev_dbg(&dev->pdev->dev,
561			"va_address 0x%LX is in reserved area 0x%LX\n",
562			args->va_address, AMDGPU_VA_RESERVED_SIZE);
 
563		return -EINVAL;
564	}
565
566	if (args->va_address >= AMDGPU_VA_HOLE_START &&
567	    args->va_address < AMDGPU_VA_HOLE_END) {
568		dev_dbg(&dev->pdev->dev,
569			"va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
570			args->va_address, AMDGPU_VA_HOLE_START,
571			AMDGPU_VA_HOLE_END);
572		return -EINVAL;
573	}
574
575	args->va_address &= AMDGPU_VA_HOLE_MASK;
576
577	if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
578		dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
579			args->flags);
580		return -EINVAL;
581	}
582
583	switch (args->operation) {
584	case AMDGPU_VA_OP_MAP:
585	case AMDGPU_VA_OP_UNMAP:
586	case AMDGPU_VA_OP_CLEAR:
587	case AMDGPU_VA_OP_REPLACE:
588		break;
589	default:
590		dev_dbg(&dev->pdev->dev, "unsupported operation %d\n",
591			args->operation);
592		return -EINVAL;
593	}
594
 
 
 
 
595	INIT_LIST_HEAD(&list);
596	INIT_LIST_HEAD(&duplicates);
597	if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
598	    !(args->flags & AMDGPU_VM_PAGE_PRT)) {
599		gobj = drm_gem_object_lookup(filp, args->handle);
600		if (gobj == NULL)
601			return -ENOENT;
602		abo = gem_to_amdgpu_bo(gobj);
603		tv.bo = &abo->tbo;
604		tv.shared = false;
605		list_add(&tv.head, &list);
606	} else {
607		gobj = NULL;
608		abo = NULL;
609	}
610
611	amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
 
 
612
613	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
614	if (r)
615		goto error_unref;
 
 
616
617	if (abo) {
618		bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
619		if (!bo_va) {
620			r = -ENOENT;
621			goto error_backoff;
622		}
623	} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
624		bo_va = fpriv->prt_va;
625	} else {
626		bo_va = NULL;
627	}
628
629	switch (args->operation) {
630	case AMDGPU_VA_OP_MAP:
631		r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
632					args->map_size);
633		if (r)
634			goto error_backoff;
635
636		va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
637		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
638				     args->offset_in_bo, args->map_size,
639				     va_flags);
640		break;
641	case AMDGPU_VA_OP_UNMAP:
642		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
643		break;
644
645	case AMDGPU_VA_OP_CLEAR:
646		r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
647						args->va_address,
648						args->map_size);
649		break;
650	case AMDGPU_VA_OP_REPLACE:
651		r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
652					args->map_size);
653		if (r)
654			goto error_backoff;
655
656		va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
657		r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
658					     args->offset_in_bo, args->map_size,
659					     va_flags);
660		break;
661	default:
662		break;
663	}
664	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
665		amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, &list,
666					args->operation);
667
668error_backoff:
669	ttm_eu_backoff_reservation(&ticket, &list);
 
 
 
670
671error_unref:
672	drm_gem_object_put_unlocked(gobj);
673	return r;
674}
675
676int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
677			struct drm_file *filp)
678{
679	struct amdgpu_device *adev = dev->dev_private;
680	struct drm_amdgpu_gem_op *args = data;
681	struct drm_gem_object *gobj;
682	struct amdgpu_bo *robj;
683	int r;
684
685	gobj = drm_gem_object_lookup(filp, args->handle);
686	if (gobj == NULL) {
687		return -ENOENT;
688	}
689	robj = gem_to_amdgpu_bo(gobj);
690
691	r = amdgpu_bo_reserve(robj, false);
692	if (unlikely(r))
693		goto out;
694
695	switch (args->op) {
696	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
697		struct drm_amdgpu_gem_create_in info;
698		void __user *out = u64_to_user_ptr(args->value);
699
700		info.bo_size = robj->gem_base.size;
701		info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
702		info.domains = robj->preferred_domains;
703		info.domain_flags = robj->flags;
704		amdgpu_bo_unreserve(robj);
705		if (copy_to_user(out, &info, sizeof(info)))
706			r = -EFAULT;
707		break;
708	}
709	case AMDGPU_GEM_OP_SET_PLACEMENT:
710		if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
711			r = -EINVAL;
712			amdgpu_bo_unreserve(robj);
713			break;
714		}
715		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
716			r = -EPERM;
717			amdgpu_bo_unreserve(robj);
718			break;
719		}
720		robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
721							AMDGPU_GEM_DOMAIN_GTT |
722							AMDGPU_GEM_DOMAIN_CPU);
723		robj->allowed_domains = robj->preferred_domains;
724		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
725			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
726
727		if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
728			amdgpu_vm_bo_invalidate(adev, robj, true);
729
730		amdgpu_bo_unreserve(robj);
731		break;
732	default:
733		amdgpu_bo_unreserve(robj);
734		r = -EINVAL;
735	}
736
737out:
738	drm_gem_object_put_unlocked(gobj);
739	return r;
740}
741
742int amdgpu_mode_dumb_create(struct drm_file *file_priv,
743			    struct drm_device *dev,
744			    struct drm_mode_create_dumb *args)
745{
746	struct amdgpu_device *adev = dev->dev_private;
747	struct drm_gem_object *gobj;
748	uint32_t handle;
749	int r;
750
751	args->pitch = amdgpu_align_pitch(adev, args->width,
752					 DIV_ROUND_UP(args->bpp, 8), 0);
753	args->size = (u64)args->pitch * args->height;
754	args->size = ALIGN(args->size, PAGE_SIZE);
755
756	r = amdgpu_gem_object_create(adev, args->size, 0,
757				     AMDGPU_GEM_DOMAIN_VRAM,
758				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
759				     false, NULL, &gobj);
 
760	if (r)
761		return -ENOMEM;
762
763	r = drm_gem_handle_create(file_priv, gobj, &handle);
764	/* drop reference from allocate - handle holds it now */
765	drm_gem_object_put_unlocked(gobj);
766	if (r) {
767		return r;
768	}
769	args->handle = handle;
770	return 0;
771}
772
773#if defined(CONFIG_DEBUG_FS)
774static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
775{
776	struct drm_gem_object *gobj = ptr;
777	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
778	struct seq_file *m = data;
779
780	unsigned domain;
781	const char *placement;
782	unsigned pin_count;
783	uint64_t offset;
784
785	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
786	switch (domain) {
787	case AMDGPU_GEM_DOMAIN_VRAM:
788		placement = "VRAM";
789		break;
790	case AMDGPU_GEM_DOMAIN_GTT:
791		placement = " GTT";
792		break;
793	case AMDGPU_GEM_DOMAIN_CPU:
794	default:
795		placement = " CPU";
796		break;
797	}
798	seq_printf(m, "\t0x%08x: %12ld byte %s",
799		   id, amdgpu_bo_size(bo), placement);
800
801	offset = READ_ONCE(bo->tbo.mem.start);
802	if (offset != AMDGPU_BO_INVALID_OFFSET)
803		seq_printf(m, " @ 0x%010Lx", offset);
804
805	pin_count = READ_ONCE(bo->pin_count);
806	if (pin_count)
807		seq_printf(m, " pin count %d", pin_count);
808	seq_printf(m, "\n");
809
810	return 0;
811}
812
813static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
814{
815	struct drm_info_node *node = (struct drm_info_node *)m->private;
816	struct drm_device *dev = node->minor->dev;
817	struct drm_file *file;
818	int r;
819
820	r = mutex_lock_interruptible(&dev->filelist_mutex);
821	if (r)
822		return r;
823
824	list_for_each_entry(file, &dev->filelist, lhead) {
825		struct task_struct *task;
826
827		/*
828		 * Although we have a valid reference on file->pid, that does
829		 * not guarantee that the task_struct who called get_pid() is
830		 * still alive (e.g. get_pid(current) => fork() => exit()).
831		 * Therefore, we need to protect this ->comm access using RCU.
832		 */
833		rcu_read_lock();
834		task = pid_task(file->pid, PIDTYPE_PID);
835		seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
836			   task ? task->comm : "<unknown>");
837		rcu_read_unlock();
838
839		spin_lock(&file->table_lock);
840		idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
841		spin_unlock(&file->table_lock);
842	}
843
844	mutex_unlock(&dev->filelist_mutex);
845	return 0;
846}
847
848static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
849	{"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
850};
851#endif
852
853int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
854{
855#if defined(CONFIG_DEBUG_FS)
856	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
857#endif
858	return 0;
859}
v4.6
  1/*
  2 * Copyright 2008 Advanced Micro Devices, Inc.
  3 * Copyright 2008 Red Hat Inc.
  4 * Copyright 2009 Jerome Glisse.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the "Software"),
  8 * to deal in the Software without restriction, including without limitation
  9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 10 * and/or sell copies of the Software, and to permit persons to whom the
 11 * Software is furnished to do so, subject to the following conditions:
 12 *
 13 * The above copyright notice and this permission notice shall be included in
 14 * all copies or substantial portions of the Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 22 * OTHER DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors: Dave Airlie
 25 *          Alex Deucher
 26 *          Jerome Glisse
 27 */
 28#include <linux/ktime.h>
 29#include <linux/pagemap.h>
 30#include <drm/drmP.h>
 31#include <drm/amdgpu_drm.h>
 32#include "amdgpu.h"
 33
 34void amdgpu_gem_object_free(struct drm_gem_object *gobj)
 35{
 36	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
 37
 38	if (robj) {
 39		if (robj->gem_base.import_attach)
 40			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
 41		amdgpu_mn_unregister(robj);
 42		amdgpu_bo_unref(&robj);
 43	}
 44}
 45
 46int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
 47				int alignment, u32 initial_domain,
 48				u64 flags, bool kernel,
 49				struct drm_gem_object **obj)
 
 50{
 51	struct amdgpu_bo *robj;
 52	unsigned long max_size;
 53	int r;
 54
 55	*obj = NULL;
 56	/* At least align on page size */
 57	if (alignment < PAGE_SIZE) {
 58		alignment = PAGE_SIZE;
 59	}
 60
 61	if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
 62		/* Maximum bo size is the unpinned gtt size since we use the gtt to
 63		 * handle vram to system pool migrations.
 64		 */
 65		max_size = adev->mc.gtt_size - adev->gart_pin_size;
 66		if (size > max_size) {
 67			DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
 68				  size >> 20, max_size >> 20);
 69			return -ENOMEM;
 70		}
 71	}
 72retry:
 73	r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
 74			     flags, NULL, NULL, &robj);
 75	if (r) {
 76		if (r != -ERESTARTSYS) {
 
 
 
 
 
 77			if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
 78				initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
 79				goto retry;
 80			}
 81			DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
 82				  size, initial_domain, alignment, r);
 83		}
 84		return r;
 85	}
 86	*obj = &robj->gem_base;
 87
 88	return 0;
 89}
 90
 91void amdgpu_gem_force_release(struct amdgpu_device *adev)
 92{
 93	struct drm_device *ddev = adev->ddev;
 94	struct drm_file *file;
 95
 96	mutex_lock(&ddev->struct_mutex);
 97
 98	list_for_each_entry(file, &ddev->filelist, lhead) {
 99		struct drm_gem_object *gobj;
100		int handle;
101
102		WARN_ONCE(1, "Still active user space clients!\n");
103		spin_lock(&file->table_lock);
104		idr_for_each_entry(&file->object_idr, gobj, handle) {
105			WARN_ONCE(1, "And also active allocations!\n");
106			drm_gem_object_unreference(gobj);
107		}
108		idr_destroy(&file->object_idr);
109		spin_unlock(&file->table_lock);
110	}
111
112	mutex_unlock(&ddev->struct_mutex);
113}
114
115/*
116 * Call from drm_gem_handle_create which appear in both new and open ioctl
117 * case.
118 */
119int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
 
120{
121	struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj);
122	struct amdgpu_device *adev = rbo->adev;
123	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
124	struct amdgpu_vm *vm = &fpriv->vm;
125	struct amdgpu_bo_va *bo_va;
 
126	int r;
127	r = amdgpu_bo_reserve(rbo, false);
 
 
 
 
 
 
 
 
 
128	if (r)
129		return r;
130
131	bo_va = amdgpu_vm_bo_find(vm, rbo);
132	if (!bo_va) {
133		bo_va = amdgpu_vm_bo_add(adev, vm, rbo);
134	} else {
135		++bo_va->ref_count;
136	}
137	amdgpu_bo_unreserve(rbo);
138	return 0;
139}
140
141void amdgpu_gem_object_close(struct drm_gem_object *obj,
142			     struct drm_file *file_priv)
143{
144	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
145	struct amdgpu_device *adev = bo->adev;
146	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
147	struct amdgpu_vm *vm = &fpriv->vm;
148
149	struct amdgpu_bo_list_entry vm_pd;
150	struct list_head list, duplicates;
151	struct ttm_validate_buffer tv;
152	struct ww_acquire_ctx ticket;
153	struct amdgpu_bo_va *bo_va;
154	int r;
155
156	INIT_LIST_HEAD(&list);
157	INIT_LIST_HEAD(&duplicates);
158
159	tv.bo = &bo->tbo;
160	tv.shared = true;
161	list_add(&tv.head, &list);
162
163	amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
164
165	r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
166	if (r) {
167		dev_err(adev->dev, "leaking bo va because "
168			"we fail to reserve bo (%d)\n", r);
169		return;
170	}
171	bo_va = amdgpu_vm_bo_find(vm, bo);
172	if (bo_va) {
173		if (--bo_va->ref_count == 0) {
174			amdgpu_vm_bo_rmv(adev, bo_va);
 
 
 
 
 
 
 
 
 
 
 
 
 
175		}
176	}
177	ttm_eu_backoff_reservation(&ticket, &list);
178}
179
180static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
181{
182	if (r == -EDEADLK) {
183		r = amdgpu_gpu_reset(adev);
184		if (!r)
185			r = -EAGAIN;
186	}
187	return r;
188}
189
190/*
191 * GEM ioctls.
192 */
193int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
194			    struct drm_file *filp)
195{
196	struct amdgpu_device *adev = dev->dev_private;
 
 
197	union drm_amdgpu_gem_create *args = data;
 
198	uint64_t size = args->in.bo_size;
 
199	struct drm_gem_object *gobj;
200	uint32_t handle;
201	bool kernel = false;
202	int r;
203
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204	/* create a gem object to contain this object in */
205	if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
206	    AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
207		kernel = true;
208		if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
209			size = size << AMDGPU_GDS_SHIFT;
210		else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
211			size = size << AMDGPU_GWS_SHIFT;
212		else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
213			size = size << AMDGPU_OA_SHIFT;
214		else {
215			r = -EINVAL;
216			goto error_unlock;
217		}
218	}
219	size = roundup(size, PAGE_SIZE);
220
 
 
 
 
 
 
 
 
221	r = amdgpu_gem_object_create(adev, size, args->in.alignment,
222				     (u32)(0xffffffff & args->in.domains),
223				     args->in.domain_flags,
224				     kernel, &gobj);
 
 
 
 
 
 
 
225	if (r)
226		goto error_unlock;
227
228	r = drm_gem_handle_create(filp, gobj, &handle);
229	/* drop reference from allocate - handle holds it now */
230	drm_gem_object_unreference_unlocked(gobj);
231	if (r)
232		goto error_unlock;
233
234	memset(args, 0, sizeof(*args));
235	args->out.handle = handle;
236	return 0;
237
238error_unlock:
239	r = amdgpu_gem_handle_lockup(adev, r);
240	return r;
241}
242
243int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
244			     struct drm_file *filp)
245{
 
246	struct amdgpu_device *adev = dev->dev_private;
247	struct drm_amdgpu_gem_userptr *args = data;
248	struct drm_gem_object *gobj;
249	struct amdgpu_bo *bo;
250	uint32_t handle;
251	int r;
252
253	if (offset_in_page(args->addr | args->size))
254		return -EINVAL;
255
256	/* reject unknown flag values */
257	if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
258	    AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
259	    AMDGPU_GEM_USERPTR_REGISTER))
260		return -EINVAL;
261
262	if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
263	     !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
264
265		/* if we want to write to it we must install a MMU notifier */
266		return -EACCES;
267	}
268
269	/* create a gem object to contain this object in */
270	r = amdgpu_gem_object_create(adev, args->size, 0,
271				     AMDGPU_GEM_DOMAIN_CPU, 0,
272				     0, &gobj);
273	if (r)
274		goto handle_lockup;
275
276	bo = gem_to_amdgpu_bo(gobj);
277	bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
278	bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
279	r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
280	if (r)
281		goto release_object;
282
283	if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
284		r = amdgpu_mn_register(bo, args->addr);
285		if (r)
286			goto release_object;
287	}
288
289	if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
290		down_read(&current->mm->mmap_sem);
291
292		r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
293						 bo->tbo.ttm->pages);
294		if (r)
295			goto unlock_mmap_sem;
296
297		r = amdgpu_bo_reserve(bo, true);
298		if (r)
299			goto free_pages;
300
301		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
302		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
303		amdgpu_bo_unreserve(bo);
304		if (r)
305			goto free_pages;
306
307		up_read(&current->mm->mmap_sem);
308	}
309
310	r = drm_gem_handle_create(filp, gobj, &handle);
311	/* drop reference from allocate - handle holds it now */
312	drm_gem_object_unreference_unlocked(gobj);
313	if (r)
314		goto handle_lockup;
315
316	args->handle = handle;
317	return 0;
318
319free_pages:
320	release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
321
322unlock_mmap_sem:
323	up_read(&current->mm->mmap_sem);
324
325release_object:
326	drm_gem_object_unreference_unlocked(gobj);
327
328handle_lockup:
329	r = amdgpu_gem_handle_lockup(adev, r);
330
331	return r;
332}
333
334int amdgpu_mode_dumb_mmap(struct drm_file *filp,
335			  struct drm_device *dev,
336			  uint32_t handle, uint64_t *offset_p)
337{
338	struct drm_gem_object *gobj;
339	struct amdgpu_bo *robj;
340
341	gobj = drm_gem_object_lookup(dev, filp, handle);
342	if (gobj == NULL) {
343		return -ENOENT;
344	}
345	robj = gem_to_amdgpu_bo(gobj);
346	if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
347	    (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
348		drm_gem_object_unreference_unlocked(gobj);
349		return -EPERM;
350	}
351	*offset_p = amdgpu_bo_mmap_offset(robj);
352	drm_gem_object_unreference_unlocked(gobj);
353	return 0;
354}
355
356int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
357			  struct drm_file *filp)
358{
359	union drm_amdgpu_gem_mmap *args = data;
360	uint32_t handle = args->in.handle;
361	memset(args, 0, sizeof(*args));
362	return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
363}
364
365/**
366 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
367 *
368 * @timeout_ns: timeout in ns
369 *
370 * Calculate the timeout in jiffies from an absolute timeout in ns.
371 */
372unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
373{
374	unsigned long timeout_jiffies;
375	ktime_t timeout;
376
377	/* clamp timeout if it's to large */
378	if (((int64_t)timeout_ns) < 0)
379		return MAX_SCHEDULE_TIMEOUT;
380
381	timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
382	if (ktime_to_ns(timeout) < 0)
383		return 0;
384
385	timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
386	/*  clamp timeout to avoid unsigned-> signed overflow */
387	if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
388		return MAX_SCHEDULE_TIMEOUT - 1;
389
390	return timeout_jiffies;
391}
392
393int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
394			      struct drm_file *filp)
395{
396	struct amdgpu_device *adev = dev->dev_private;
397	union drm_amdgpu_gem_wait_idle *args = data;
398	struct drm_gem_object *gobj;
399	struct amdgpu_bo *robj;
400	uint32_t handle = args->in.handle;
401	unsigned long timeout = amdgpu_gem_timeout(args->in.timeout);
402	int r = 0;
403	long ret;
404
405	gobj = drm_gem_object_lookup(dev, filp, handle);
406	if (gobj == NULL) {
407		return -ENOENT;
408	}
409	robj = gem_to_amdgpu_bo(gobj);
410	if (timeout == 0)
411		ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
412	else
413		ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout);
414
415	/* ret == 0 means not signaled,
416	 * ret > 0 means signaled
417	 * ret < 0 means interrupted before timeout
418	 */
419	if (ret >= 0) {
420		memset(args, 0, sizeof(*args));
421		args->out.status = (ret == 0);
422	} else
423		r = ret;
424
425	drm_gem_object_unreference_unlocked(gobj);
426	r = amdgpu_gem_handle_lockup(adev, r);
427	return r;
428}
429
430int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
431				struct drm_file *filp)
432{
433	struct drm_amdgpu_gem_metadata *args = data;
434	struct drm_gem_object *gobj;
435	struct amdgpu_bo *robj;
436	int r = -1;
437
438	DRM_DEBUG("%d \n", args->handle);
439	gobj = drm_gem_object_lookup(dev, filp, args->handle);
440	if (gobj == NULL)
441		return -ENOENT;
442	robj = gem_to_amdgpu_bo(gobj);
443
444	r = amdgpu_bo_reserve(robj, false);
445	if (unlikely(r != 0))
446		goto out;
447
448	if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) {
449		amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info);
450		r = amdgpu_bo_get_metadata(robj, args->data.data,
451					   sizeof(args->data.data),
452					   &args->data.data_size_bytes,
453					   &args->data.flags);
454	} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
455		if (args->data.data_size_bytes > sizeof(args->data.data)) {
456			r = -EINVAL;
457			goto unreserve;
458		}
459		r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
460		if (!r)
461			r = amdgpu_bo_set_metadata(robj, args->data.data,
462						   args->data.data_size_bytes,
463						   args->data.flags);
464	}
465
466unreserve:
467	amdgpu_bo_unreserve(robj);
468out:
469	drm_gem_object_unreference_unlocked(gobj);
470	return r;
471}
472
473/**
474 * amdgpu_gem_va_update_vm -update the bo_va in its VM
475 *
476 * @adev: amdgpu_device pointer
 
477 * @bo_va: bo_va to update
 
 
478 *
479 * Update the bo_va directly after setting it's address. Errors are not
480 * vital here, so they are not reported back to userspace.
481 */
482static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
483				    struct amdgpu_bo_va *bo_va, uint32_t operation)
 
 
 
484{
485	struct ttm_validate_buffer tv, *entry;
486	struct amdgpu_bo_list_entry vm_pd;
487	struct ww_acquire_ctx ticket;
488	struct list_head list, duplicates;
489	unsigned domain;
490	int r;
491
492	INIT_LIST_HEAD(&list);
493	INIT_LIST_HEAD(&duplicates);
494
495	tv.bo = &bo_va->bo->tbo;
496	tv.shared = true;
497	list_add(&tv.head, &list);
498
499	amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd);
500
501	/* Provide duplicates to avoid -EALREADY */
502	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
503	if (r)
504		goto error_print;
505
506	amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates);
507	list_for_each_entry(entry, &list, head) {
508		domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
509		/* if anything is swapped out don't swap it in here,
510		   just abort and wait for the next CS */
511		if (domain == AMDGPU_GEM_DOMAIN_CPU)
512			goto error_unreserve;
513	}
514	list_for_each_entry(entry, &duplicates, head) {
515		domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
516		/* if anything is swapped out don't swap it in here,
517		   just abort and wait for the next CS */
518		if (domain == AMDGPU_GEM_DOMAIN_CPU)
519			goto error_unreserve;
520	}
521
522	r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
523	if (r)
524		goto error_unreserve;
525
526	r = amdgpu_vm_clear_freed(adev, bo_va->vm);
527	if (r)
528		goto error_unreserve;
529
530	if (operation == AMDGPU_VA_OP_MAP)
531		r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
532
533error_unreserve:
534	ttm_eu_backoff_reservation(&ticket, &list);
535
536error_print:
537	if (r && r != -ERESTARTSYS)
538		DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
539}
540
541
542
543int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
544			  struct drm_file *filp)
545{
 
 
 
 
 
 
546	struct drm_amdgpu_gem_va *args = data;
547	struct drm_gem_object *gobj;
548	struct amdgpu_device *adev = dev->dev_private;
549	struct amdgpu_fpriv *fpriv = filp->driver_priv;
550	struct amdgpu_bo *rbo;
551	struct amdgpu_bo_va *bo_va;
552	struct ttm_validate_buffer tv, tv_pd;
 
553	struct ww_acquire_ctx ticket;
554	struct list_head list, duplicates;
555	uint32_t invalid_flags, va_flags = 0;
556	int r = 0;
557
558	if (!adev->vm_manager.enabled)
559		return -ENOTTY;
560
561	if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
562		dev_err(&dev->pdev->dev,
563			"va_address 0x%lX is in reserved area 0x%X\n",
564			(unsigned long)args->va_address,
565			AMDGPU_VA_RESERVED_SIZE);
566		return -EINVAL;
567	}
568
569	invalid_flags = ~(AMDGPU_VM_DELAY_UPDATE | AMDGPU_VM_PAGE_READABLE |
570			AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE);
571	if ((args->flags & invalid_flags)) {
572		dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
573			args->flags, invalid_flags);
 
 
 
 
 
 
 
 
 
574		return -EINVAL;
575	}
576
577	switch (args->operation) {
578	case AMDGPU_VA_OP_MAP:
579	case AMDGPU_VA_OP_UNMAP:
 
 
580		break;
581	default:
582		dev_err(&dev->pdev->dev, "unsupported operation %d\n",
583			args->operation);
584		return -EINVAL;
585	}
586
587	gobj = drm_gem_object_lookup(dev, filp, args->handle);
588	if (gobj == NULL)
589		return -ENOENT;
590	rbo = gem_to_amdgpu_bo(gobj);
591	INIT_LIST_HEAD(&list);
592	INIT_LIST_HEAD(&duplicates);
593	tv.bo = &rbo->tbo;
594	tv.shared = true;
595	list_add(&tv.head, &list);
 
 
 
 
 
 
 
 
 
 
596
597	tv_pd.bo = &fpriv->vm.page_directory->tbo;
598	tv_pd.shared = true;
599	list_add(&tv_pd.head, &list);
600
601	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
602	if (r) {
603		drm_gem_object_unreference_unlocked(gobj);
604		return r;
605	}
606
607	bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
608	if (!bo_va) {
609		ttm_eu_backoff_reservation(&ticket, &list);
610		drm_gem_object_unreference_unlocked(gobj);
611		return -ENOENT;
 
 
 
 
 
612	}
613
614	switch (args->operation) {
615	case AMDGPU_VA_OP_MAP:
616		if (args->flags & AMDGPU_VM_PAGE_READABLE)
617			va_flags |= AMDGPU_PTE_READABLE;
618		if (args->flags & AMDGPU_VM_PAGE_WRITEABLE)
619			va_flags |= AMDGPU_PTE_WRITEABLE;
620		if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE)
621			va_flags |= AMDGPU_PTE_EXECUTABLE;
622		r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
623				     args->offset_in_bo, args->map_size,
624				     va_flags);
625		break;
626	case AMDGPU_VA_OP_UNMAP:
627		r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
628		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
629	default:
630		break;
631	}
 
 
 
 
 
632	ttm_eu_backoff_reservation(&ticket, &list);
633	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
634	    !amdgpu_vm_debug)
635		amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
636
637	drm_gem_object_unreference_unlocked(gobj);
 
638	return r;
639}
640
641int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
642			struct drm_file *filp)
643{
 
644	struct drm_amdgpu_gem_op *args = data;
645	struct drm_gem_object *gobj;
646	struct amdgpu_bo *robj;
647	int r;
648
649	gobj = drm_gem_object_lookup(dev, filp, args->handle);
650	if (gobj == NULL) {
651		return -ENOENT;
652	}
653	robj = gem_to_amdgpu_bo(gobj);
654
655	r = amdgpu_bo_reserve(robj, false);
656	if (unlikely(r))
657		goto out;
658
659	switch (args->op) {
660	case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
661		struct drm_amdgpu_gem_create_in info;
662		void __user *out = (void __user *)(long)args->value;
663
664		info.bo_size = robj->gem_base.size;
665		info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
666		info.domains = robj->prefered_domains;
667		info.domain_flags = robj->flags;
668		amdgpu_bo_unreserve(robj);
669		if (copy_to_user(out, &info, sizeof(info)))
670			r = -EFAULT;
671		break;
672	}
673	case AMDGPU_GEM_OP_SET_PLACEMENT:
 
 
 
 
 
674		if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
675			r = -EPERM;
676			amdgpu_bo_unreserve(robj);
677			break;
678		}
679		robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
680							AMDGPU_GEM_DOMAIN_GTT |
681							AMDGPU_GEM_DOMAIN_CPU);
682		robj->allowed_domains = robj->prefered_domains;
683		if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
684			robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
685
 
 
 
686		amdgpu_bo_unreserve(robj);
687		break;
688	default:
689		amdgpu_bo_unreserve(robj);
690		r = -EINVAL;
691	}
692
693out:
694	drm_gem_object_unreference_unlocked(gobj);
695	return r;
696}
697
698int amdgpu_mode_dumb_create(struct drm_file *file_priv,
699			    struct drm_device *dev,
700			    struct drm_mode_create_dumb *args)
701{
702	struct amdgpu_device *adev = dev->dev_private;
703	struct drm_gem_object *gobj;
704	uint32_t handle;
705	int r;
706
707	args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
 
708	args->size = (u64)args->pitch * args->height;
709	args->size = ALIGN(args->size, PAGE_SIZE);
710
711	r = amdgpu_gem_object_create(adev, args->size, 0,
712				     AMDGPU_GEM_DOMAIN_VRAM,
713				     AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
714				     ttm_bo_type_device,
715				     &gobj);
716	if (r)
717		return -ENOMEM;
718
719	r = drm_gem_handle_create(file_priv, gobj, &handle);
720	/* drop reference from allocate - handle holds it now */
721	drm_gem_object_unreference_unlocked(gobj);
722	if (r) {
723		return r;
724	}
725	args->handle = handle;
726	return 0;
727}
728
729#if defined(CONFIG_DEBUG_FS)
730static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
731{
732	struct drm_gem_object *gobj = ptr;
733	struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
734	struct seq_file *m = data;
735
736	unsigned domain;
737	const char *placement;
738	unsigned pin_count;
 
739
740	domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
741	switch (domain) {
742	case AMDGPU_GEM_DOMAIN_VRAM:
743		placement = "VRAM";
744		break;
745	case AMDGPU_GEM_DOMAIN_GTT:
746		placement = " GTT";
747		break;
748	case AMDGPU_GEM_DOMAIN_CPU:
749	default:
750		placement = " CPU";
751		break;
752	}
753	seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx",
754		   id, amdgpu_bo_size(bo), placement,
755		   amdgpu_bo_gpu_offset(bo));
 
 
 
756
757	pin_count = ACCESS_ONCE(bo->pin_count);
758	if (pin_count)
759		seq_printf(m, " pin count %d", pin_count);
760	seq_printf(m, "\n");
761
762	return 0;
763}
764
765static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
766{
767	struct drm_info_node *node = (struct drm_info_node *)m->private;
768	struct drm_device *dev = node->minor->dev;
769	struct drm_file *file;
770	int r;
771
772	r = mutex_lock_interruptible(&dev->struct_mutex);
773	if (r)
774		return r;
775
776	list_for_each_entry(file, &dev->filelist, lhead) {
777		struct task_struct *task;
778
779		/*
780		 * Although we have a valid reference on file->pid, that does
781		 * not guarantee that the task_struct who called get_pid() is
782		 * still alive (e.g. get_pid(current) => fork() => exit()).
783		 * Therefore, we need to protect this ->comm access using RCU.
784		 */
785		rcu_read_lock();
786		task = pid_task(file->pid, PIDTYPE_PID);
787		seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
788			   task ? task->comm : "<unknown>");
789		rcu_read_unlock();
790
791		spin_lock(&file->table_lock);
792		idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
793		spin_unlock(&file->table_lock);
794	}
795
796	mutex_unlock(&dev->struct_mutex);
797	return 0;
798}
799
800static struct drm_info_list amdgpu_debugfs_gem_list[] = {
801	{"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
802};
803#endif
804
805int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
806{
807#if defined(CONFIG_DEBUG_FS)
808	return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
809#endif
810	return 0;
811}